From 47118a07267bdb8b20b20d3a09e1ad8f4be1bda4 Mon Sep 17 00:00:00 2001 From: Carson Katri Date: Mon, 8 May 2023 18:22:10 -0400 Subject: [PATCH] Allow backends to report fixable errors --- .../coremltools-6.3.0.dist-info/INSTALLER | 1 + .../coremltools-6.3.0.dist-info/LICENSE.txt | 11 + .../coremltools-6.3.0.dist-info/METADATA | 60 + .../coremltools-6.3.0.dist-info/RECORD | 801 + .../coremltools-6.3.0.dist-info/REQUESTED | 0 .../coremltools-6.3.0.dist-info/WHEEL | 5 + .../coremltools-6.3.0.dist-info/top_level.txt | 1 + .../coremltools/__init__.py | 114 + .../coremltools/_deps/__init__.py | 179 + .../coremltools/converters/__init__.py | 19 + .../converters/_converters_entry.py | 896 + .../coremltools/converters/_profile_utils.py | 80 + .../coremltools/converters/libsvm/__init__.py | 108 + .../converters/libsvm/_libsvm_converter.py | 199 + .../converters/libsvm/_libsvm_util.py | 37 + .../coremltools/converters/mil/__init__.py | 16 + .../mil/_deployment_compatibility.py | 165 + .../converters/mil/backend/__init__.py | 4 + .../converters/mil/backend/backend_helper.py | 74 + .../converters/mil/backend/mil/__init__.py | 4 + .../converters/mil/backend/mil/helper.py | 329 + .../converters/mil/backend/mil/load.py | 535 + .../mil/backend/mil/passes/__init__.py | 7 + .../passes/adjust_io_to_supported_types.py | 204 + .../mil/passes/fuse_activation_silu.py | 82 + .../passes/insert_image_preprocessing_op.py | 67 + .../mil/passes/sanitize_name_strings.py | 22 + .../mil/backend/mil/passes/test_passes.py | 888 + .../converters/mil/backend/mil/test_helper.py | 27 + .../backend/mil/test_model_input_params.py | 195 + .../converters/mil/backend/nn/__init__.py | 4 + .../converters/mil/backend/nn/load.py | 313 + .../backend/nn/mil_to_nn_mapping_registry.py | 19 + .../converters/mil/backend/nn/op_mapping.py | 3837 +++ .../mil/backend/nn/passes/__init__.py | 14 + .../nn/passes/alert_return_type_cast.py | 48 + .../backend/nn/passes/commingle_loop_vars.py | 75 + .../backend/nn/passes/conv1d_decomposition.py | 101 + .../passes/handle_return_inputs_as_outputs.py | 62 + .../nn/passes/handle_return_unused_inputs.py | 59 + .../backend/nn/passes/handle_unused_inputs.py | 50 + .../mil/backend/nn/passes/mlmodel_passes.py | 467 + .../backend/nn/passes/test_mlmodel_passes.py | 1052 + .../mil/backend/nn/passes/test_passes.py | 227 + .../coremltools/converters/mil/conftest.py | 12 + .../coremltools/converters/mil/converter.py | 341 + .../converters/mil/debugging_utils.py | 175 + .../converters/mil/experimental/__init__.py | 4 + .../mil/experimental/passes/README.md | 587 + .../mil/experimental/passes/__init__.py | 4 + .../passes/generic_conv_batchnorm_fusion.py | 169 + .../passes/generic_conv_bias_fusion.py | 367 + .../passes/generic_conv_scale_fusion.py | 244 + ...c_layernorm_instancenorm_pattern_fusion.py | 457 + .../passes/generic_linear_bias_fusion.py | 133 + .../passes/generic_pass_infrastructure.py | 221 + .../converters/mil/frontend/__init__.py | 6 + .../converters/mil/frontend/_utils.py | 410 + .../mil/frontend/milproto/__init__.py | 6 + .../mil/frontend/milproto/helper.py | 65 + .../converters/mil/frontend/milproto/load.py | 429 + .../mil/frontend/milproto/test_load.py | 199 + .../mil/frontend/tensorflow/__init__.py | 23 + .../frontend/tensorflow/basic_graph_ops.py | 356 + .../mil/frontend/tensorflow/convert_utils.py | 211 + .../mil/frontend/tensorflow/converter.py | 466 + .../mil/frontend/tensorflow/dialect_ops.py | 173 + .../mil/frontend/tensorflow/dot_visitor.py | 149 + .../mil/frontend/tensorflow/load.py | 316 + .../mil/frontend/tensorflow/naming_utils.py | 35 + .../converters/mil/frontend/tensorflow/ops.py | 3546 +++ .../mil/frontend/tensorflow/parse.py | 138 + .../mil/frontend/tensorflow/parsed_tf_node.py | 80 + .../tensorflow/ssa_passes/__init__.py | 6 + .../backfill_make_list_elem_type.py | 121 + .../tensorflow/ssa_passes/expand_tf_lstm.py | 225 + .../tensorflow/ssa_passes/test_passes.py | 56 + .../ssa_passes/tf_lstm_to_core_lstm.py | 308 + .../mil/frontend/tensorflow/test/__init__.py | 4 + .../tensorflow/test/test_composite_ops.py | 69 + .../tensorflow/test/test_custom_ops.py | 288 + .../frontend/tensorflow/test/test_graphs.py | 45 + .../mil/frontend/tensorflow/test/test_load.py | 435 + .../mil/frontend/tensorflow/test/test_ops.py | 7370 ++++++ .../frontend/tensorflow/test/test_parse.py | 124 + .../tensorflow/test/test_parsed_tf_node.py | 65 + .../tensorflow/test/test_tf_conversion_api.py | 766 + .../frontend/tensorflow/test/testing_utils.py | 373 + .../tensorflow/tf_graph_pass/__init__.py | 19 + .../tensorflow/tf_graph_pass/cond_to_where.py | 130 + .../tf_graph_pass/constant_propagation.py | 163 + .../tf_graph_pass/delete_asserts.py | 68 + .../tf_graph_pass/delete_constant.py | 82 + .../delete_disconnected_nodes.py | 21 + .../tf_graph_pass/functionalize_loops.py | 469 + .../tf_graph_pass/fuse_dilation_conv.py | 215 + .../tf_graph_pass/insert_get_tuple.py | 111 + .../tf_graph_pass/quantization_pass.py | 63 + .../tf_graph_pass/tensor_array_transform.py | 78 + .../tf_graph_pass/variable_node_transform.py | 85 + .../tensorflow/tf_graph_pass/visitors.py | 233 + .../mil/frontend/tensorflow/tf_op_registry.py | 47 + .../mil/frontend/tensorflow/tfssa.py | 549 + .../mil/frontend/tensorflow2/__init__.py | 13 + .../mil/frontend/tensorflow2/converter.py | 40 + .../mil/frontend/tensorflow2/load.py | 346 + .../mil/frontend/tensorflow2/ops.py | 235 + .../tensorflow2/ssa_passes/__init__.py | 6 + .../ssa_passes/remove_vacuous_cond.py | 118 + .../tensorflow2/ssa_passes/test_v2_passes.py | 54 + .../mil/frontend/tensorflow2/test/__init__.py | 4 + .../test/test_tf2_conversion_api.py | 437 + .../frontend/tensorflow2/test/test_v2_load.py | 224 + .../frontend/tensorflow2/test/test_v2_ops.py | 792 + .../tensorflow2/test/test_v2_ops_tf_keras.py | 1739 ++ .../tensorflow2/test/testing_utils.py | 290 + .../tensorflow2/tf_graph_pass/__init__.py | 7 + .../rewrite_control_flow_functions.py | 561 + .../converters/mil/frontend/torch/__init__.py | 13 + .../mil/frontend/torch/converter.py | 495 + .../mil/frontend/torch/dialect_ops.py | 219 + .../mil/frontend/torch/internal_graph.py | 336 + .../converters/mil/frontend/torch/load.py | 112 + .../converters/mil/frontend/torch/ops.py | 5734 +++++ .../mil/frontend/torch/ssa_passes/__init__.py | 6 + .../ssa_passes/torch_tensor_assign_to_core.py | 64 + .../torch_upsample_to_core_upsample.py | 135 + .../mil/frontend/torch/test/__init__.py | 4 + .../mil/frontend/torch/test/test_api.py | 62 + .../frontend/torch/test/test_custom_ops.py | 144 + .../mil/frontend/torch/test/test_examples.py | 64 + .../torch/test/test_internal_graph.py | 1804 ++ .../mil/frontend/torch/test/test_passes.py | 371 + .../torch/test/test_torch_conversion_api.py | 1401 ++ .../mil/frontend/torch/test/test_torch_ops.py | 8442 +++++++ .../mil/frontend/torch/test/testing_utils.py | 259 + .../mil/frontend/torch/torch_op_registry.py | 58 + .../mil/frontend/torch/torchir_passes.py | 322 + .../coremltools/converters/mil/input_types.py | 492 + .../converters/mil/mil/__init__.py | 19 + .../coremltools/converters/mil/mil/block.py | 894 + .../coremltools/converters/mil/mil/builder.py | 246 + .../converters/mil/mil/input_type.py | 382 + .../converters/mil/mil/operation.py | 603 + .../converters/mil/mil/ops/__init__.py | 4 + .../converters/mil/mil/ops/defs/__init__.py | 6 + .../converters/mil/mil/ops/defs/_op_reqs.py | 8 + .../converters/mil/mil/ops/defs/_utils.py | 548 + .../mil/mil/ops/defs/complex_dialect_ops.py | 744 + .../mil/mil/ops/defs/iOS15/__init__.py | 52 + .../mil/mil/ops/defs/iOS15/activation.py | 616 + .../mil/mil/ops/defs/iOS15/classify.py | 76 + .../mil/mil/ops/defs/iOS15/control_flow.py | 828 + .../converters/mil/mil/ops/defs/iOS15/conv.py | 428 + .../mil/ops/defs/iOS15/elementwise_binary.py | 638 + .../mil/ops/defs/iOS15/elementwise_unary.py | 898 + .../mil/mil/ops/defs/iOS15/image_resizing.py | 899 + .../mil/mil/ops/defs/iOS15/linear.py | 343 + .../mil/mil/ops/defs/iOS15/normalization.py | 381 + .../converters/mil/mil/ops/defs/iOS15/pool.py | 263 + .../mil/mil/ops/defs/iOS15/random.py | 294 + .../mil/mil/ops/defs/iOS15/recurrent.py | 519 + .../mil/mil/ops/defs/iOS15/reduction.py | 558 + .../mil/mil/ops/defs/iOS15/scatter_gather.py | 549 + .../mil/ops/defs/iOS15/tensor_operation.py | 1320 + .../ops/defs/iOS15/tensor_transformation.py | 1069 + .../mil/mil/ops/defs/iOS16/__init__.py | 15 + .../mil/mil/ops/defs/iOS16/constexpr_ops.py | 383 + .../mil/mil/ops/defs/iOS16/image_resizing.py | 86 + .../mil/mil/ops/defs/iOS16/scatter_gather.py | 170 + .../mil/ops/defs/iOS16/tensor_operation.py | 115 + .../ops/defs/iOS16/tensor_transformation.py | 186 + .../converters/mil/mil/ops/helper.py | 28 + .../converters/mil/mil/ops/registry.py | 190 + .../converters/mil/mil/ops/tests/__init__.py | 4 + .../mil/mil/ops/tests/test_activation.py | 1080 + .../mil/mil/ops/tests/test_const.py | 62 + .../mil/mil/ops/tests/test_constexpr_ops.py | 646 + .../mil/mil/ops/tests/test_control_flow.py | 419 + .../converters/mil/mil/ops/tests/test_conv.py | 940 + .../mil/ops/tests/test_elementwise_binary.py | 592 + .../mil/ops/tests/test_elementwise_unary.py | 688 + .../mil/mil/ops/tests/test_image_resizing.py | 934 + .../mil/mil/ops/tests/test_linear.py | 333 + .../mil/mil/ops/tests/test_normalization.py | 751 + .../converters/mil/mil/ops/tests/test_pool.py | 494 + .../mil/mil/ops/tests/test_random.py | 443 + .../mil/mil/ops/tests/test_recurrent.py | 790 + .../mil/mil/ops/tests/test_reduction.py | 356 + .../mil/mil/ops/tests/test_scatter_gather.py | 750 + .../mil/mil/ops/tests/test_slice.py | 394 + .../mil/ops/tests/test_tensor_operation.py | 1645 ++ .../ops/tests/test_tensor_transformation.py | 1347 + .../mil/mil/ops/tests/test_utils.py | 262 + .../mil/mil/ops/tests/testing_utils.py | 159 + .../converters/mil/mil/passes/__init__.py | 43 + .../mil/mil/passes/defs/__init__.py | 4 + .../mil/mil/passes/defs/cleanup/__init__.py | 14 + .../passes/defs/cleanup/const_elimination.py | 103 + .../defs/cleanup/dead_code_elimination.py | 79 + .../defs/cleanup/dedup_op_and_var_names.py | 94 + .../passes/defs/cleanup/fuse_reduce_mean.py | 123 + .../cleanup/loop_invariant_elimination.py | 169 + .../passes/defs/cleanup/noop_elimination.py | 243 + .../defs/cleanup/remove_redundant_ops.py | 196 + .../defs/cleanup/remove_symbolic_reshape.py | 95 + .../defs/cleanup/topological_reorder.py | 169 + .../passes/defs/lower_complex_dialect_ops.py | 552 + .../mil/passes/defs/optimize_activation.py | 649 + .../mil/mil/passes/defs/optimize_conv.py | 1142 + .../defs/optimize_elementwise_binary.py | 321 + .../mil/mil/passes/defs/optimize_linear.py | 306 + .../mil/passes/defs/optimize_normalization.py | 851 + .../mil/passes/defs/optimize_repeat_ops.py | 1755 ++ .../passes/defs/optimize_tensor_operation.py | 831 + .../mil/mil/passes/defs/preprocess.py | 362 + .../mil/mil/passes/defs/quantization.py | 857 + .../converters/mil/mil/passes/graph_pass.py | 73 + .../converters/mil/mil/passes/helper.py | 188 + .../mil/mil/passes/pass_pipeline.py | 380 + .../mil/mil/passes/pass_registry.py | 65 + .../mil/mil/passes/tests/__init__.py | 4 + .../tests/test_lower_complex_dialect_ops.py | 56 + .../mil/passes/tests/test_pass_pipeline.py | 113 + .../mil/mil/passes/tests/test_passes.py | 7475 ++++++ .../tests/test_reduce_transposes_pass.py | 1967 ++ .../coremltools/converters/mil/mil/program.py | 274 + .../converters/mil/mil/tests/__init__.py | 4 + .../converters/mil/mil/tests/test_block.py | 495 + .../converters/mil/mil/tests/test_debug.py | 302 + .../converters/mil/mil/tests/test_programs.py | 347 + .../converters/mil/mil/tests/test_types.py | 27 + .../converters/mil/mil/types/__init__.py | 33 + .../converters/mil/mil/types/annotate.py | 115 + .../converters/mil/mil/types/get_type_info.py | 59 + .../mil/mil/types/global_methods.py | 49 + .../converters/mil/mil/types/symbolic.py | 81 + .../converters/mil/mil/types/type_bool.py | 49 + .../converters/mil/mil/types/type_complex.py | 171 + .../converters/mil/mil/types/type_dict.py | 62 + .../converters/mil/mil/types/type_double.py | 162 + .../mil/mil/types/type_globals_pseudo_type.py | 12 + .../converters/mil/mil/types/type_int.py | 177 + .../converters/mil/mil/types/type_list.py | 69 + .../converters/mil/mil/types/type_mapping.py | 449 + .../converters/mil/mil/types/type_spec.py | 89 + .../converters/mil/mil/types/type_str.py | 22 + .../converters/mil/mil/types/type_tensor.py | 233 + .../converters/mil/mil/types/type_tuple.py | 53 + .../converters/mil/mil/types/type_unknown.py | 19 + .../converters/mil/mil/types/type_void.py | 12 + .../coremltools/converters/mil/mil/var.py | 397 + .../converters/mil/mil/visitors/__init__.py | 4 + .../mil/mil/visitors/dot_visitor.py | 206 + .../mil/test_flexible_shape_inputs.py | 146 + .../converters/mil/testing_reqs.py | 54 + .../converters/mil/testing_utils.py | 545 + .../converters/sklearn/_LinearSVC.py | 58 + .../converters/sklearn/_LinearSVR.py | 53 + .../coremltools/converters/sklearn/_NuSVC.py | 68 + .../coremltools/converters/sklearn/_NuSVR.py | 54 + .../coremltools/converters/sklearn/_SVC.py | 132 + .../coremltools/converters/sklearn/_SVR.py | 81 + .../converters/sklearn/__init__.py | 8 + .../converters/sklearn/_converter.py | 161 + .../converters/sklearn/_converter_internal.py | 350 + .../sklearn/_decision_tree_classifier.py | 68 + .../sklearn/_decision_tree_regressor.py | 51 + .../converters/sklearn/_dict_vectorizer.py | 113 + .../sklearn/_gradient_boosting_classifier.py | 102 + .../sklearn/_gradient_boosting_regressor.py | 74 + .../converters/sklearn/_imputer.py | 113 + .../sklearn/_k_neighbors_classifier.py | 291 + .../converters/sklearn/_linear_regression.py | 81 + .../sklearn/_logistic_regression.py | 108 + .../converters/sklearn/_normalizer.py | 82 + .../converters/sklearn/_one_hot_encoder.py | 264 + .../sklearn/_random_forest_classifier.py | 70 + .../sklearn/_random_forest_regressor.py | 58 + .../converters/sklearn/_ridge_regression.py | 53 + .../converters/sklearn/_sklearn_util.py | 37 + .../converters/sklearn/_standard_scaler.py | 89 + .../converters/sklearn/_svm_common.py | 37 + .../converters/sklearn/_tree_ensemble.py | 263 + .../converters/xgboost/__init__.py | 6 + .../coremltools/converters/xgboost/_tree.py | 93 + .../converters/xgboost/_tree_ensemble.py | 280 + .../coremltools/models/__init__.py | 37 + .../coremltools/models/_deprecation.py | 37 + .../coremltools/models/_feature_management.py | 354 + .../models/_interface_management.py | 211 + .../models/array_feature_extractor.py | 60 + .../coremltools/models/datatypes.py | 244 + .../coremltools/models/feature_vectorizer.py | 98 + .../coremltools/models/ml_program/__init__.py | 6 + .../models/ml_program/compression_utils.py | 609 + .../coremltools/models/model.py | 670 + .../models/nearest_neighbors/__init__.py | 6 + .../models/nearest_neighbors/builder.py | 664 + .../models/neural_network/__init__.py | 10 + .../models/neural_network/builder.py | 8857 +++++++ .../neural_network/flexible_shape_utils.py | 738 + .../neural_network/optimization_utils.py | 255 + .../models/neural_network/printer.py | 114 + .../neural_network/quantization_utils.py | 1651 ++ .../neural_network/spec_inspection_utils.py | 297 + .../neural_network/update_optimizer_utils.py | 191 + .../models/neural_network/utils.py | 140 + .../coremltools/models/pipeline.py | 305 + .../coremltools/models/tree_ensemble.py | 426 + .../coremltools/models/utils.py | 1097 + .../proto/ArrayFeatureExtractor_pb2.py | 71 + .../proto/AudioFeaturePrint_pb2.py | 142 + .../proto/BayesianProbitRegressor_pb2.py | 283 + .../proto/CategoricalMapping_pb2.py | 120 + .../proto/ClassConfidenceThresholding_pb2.py | 80 + .../coremltools/proto/CustomModel_pb2.py | 230 + .../coremltools/proto/DataStructures_pb2.py | 739 + .../coremltools/proto/DictVectorizer_pb2.py | 97 + .../coremltools/proto/FeatureTypes_pb2.py | 924 + .../proto/FeatureVectorizer_pb2.py | 118 + .../coremltools/proto/GLMClassifier_pb2.py | 215 + .../coremltools/proto/GLMRegressor_pb2.py | 154 + .../coremltools/proto/Gazetteer_pb2.py | 107 + .../coremltools/proto/Identity_pb2.py | 64 + .../coremltools/proto/Imputer_pb2.py | 182 + .../proto/ItemSimilarityRecommender_pb2.py | 238 + .../coremltools/proto/LinkedModel_pb2.py | 138 + .../coremltools/proto/MIL_pb2.py | 2086 ++ .../coremltools/proto/Model_pb2.py | 1153 + .../coremltools/proto/NamedParameters_pb2.py | 393 + .../coremltools/proto/NearestNeighbors_pb2.py | 424 + .../coremltools/proto/NeuralNetwork_pb2.py | 12661 ++++++++++ .../proto/NonMaximumSuppression_pb2.py | 206 + .../coremltools/proto/Normalizer_pb2.py | 100 + .../coremltools/proto/OneHotEncoder_pb2.py | 136 + .../coremltools/proto/Parameters_pb2.py | 235 + .../coremltools/proto/SVM_pb2.py | 739 + .../coremltools/proto/Scaler_pb2.py | 78 + .../proto/SoundAnalysisPreprocessing_pb2.py | 110 + .../coremltools/proto/TextClassifier_pb2.py | 107 + .../coremltools/proto/TreeEnsemble_pb2.py | 446 + .../proto/VisionFeaturePrint_pb2.py | 232 + .../coremltools/proto/WordEmbedding_pb2.py | 93 + .../coremltools/proto/WordTagger_pb2.py | 135 + .../coremltools/proto/__init__.py | 1 + .../coremltools/test/__init__.py | 4 + .../coremltools/test/api/__init__.py | 4 + .../coremltools/test/api/test_api_examples.py | 519 + .../test/api/test_api_visibilities.py | 230 + .../coremltools/test/blob/__init__.py | 4 + .../coremltools/test/blob/test_weights.py | 68 + .../coremltools/test/ml_program/__init__.py | 4 + .../test/ml_program/test_compression.py | 432 + .../coremltools/test/modelpackage/__init__.py | 4 + .../test/modelpackage/test_mlmodel.py | 74 + .../test/modelpackage/test_modelpackage.py | 519 + .../test/neural_network/__init__.py | 4 + .../neural_network/test_custom_neural_nets.py | 89 + .../test/neural_network/test_model.py | 569 + .../neural_network/test_neural_networks.py | 60 + .../test/neural_network/test_nn_builder.py | 627 + .../neural_network/test_numpy_nn_layers.py | 7086 ++++++ .../test/neural_network/test_quantization.py | 562 + .../test_simple_nn_inference.py | 53 + .../test/neural_network/test_tf_numeric.py | 508 + .../coremltools/test/pipeline/__init__.py | 4 + .../test/pipeline/test_model_updatable.py | 796 + .../test/pipeline/test_pipeline.py | 277 + .../test/sklearn_tests/__init__.py | 4 + .../test/sklearn_tests/test_NuSVC.py | 309 + .../test/sklearn_tests/test_NuSVR.py | 224 + .../test/sklearn_tests/test_SVC.py | 369 + .../test/sklearn_tests/test_SVR.py | 259 + .../sklearn_tests/test_categorical_imputer.py | 78 + .../sklearn_tests/test_composite_pipelines.py | 85 + .../sklearn_tests/test_dict_vectorizer.py | 102 + .../test/sklearn_tests/test_feature_names.py | 30 + .../test/sklearn_tests/test_glm_classifier.py | 112 + .../test/sklearn_tests/test_imputer.py | 80 + .../test/sklearn_tests/test_io_types.py | 342 + .../test_k_neighbors_classifier.py | 277 + .../sklearn_tests/test_linear_regression.py | 136 + .../test_nearest_neighbors_builder.py | 418 + .../test/sklearn_tests/test_normalizer.py | 60 + .../sklearn_tests/test_one_hot_encoder.py | 290 + .../test_random_forest_classifier.py | 168 + .../test_random_forest_classifier_numeric.py | 141 + .../test_random_forest_regression.py | 88 + .../test_random_forest_regression_numeric.py | 107 + .../sklearn_tests/test_ridge_regression.py | 106 + .../sklearn_tests/test_standard_scalar.py | 65 + .../test/sklearn_tests/test_utils.py | 49 + .../test/xgboost_tests/__init__.py | 4 + .../test_boosted_trees_classifier.py | 342 + .../test_boosted_trees_classifier_numeric.py | 264 + .../test_boosted_trees_regression.py | 218 + .../test_boosted_trees_regression_numeric.py | 309 + .../test_decision_tree_classifier.py | 150 + .../test_decision_tree_classifier_numeric.py | 137 + .../test_decision_tree_regression.py | 87 + .../test_decision_tree_regression_numeric.py | 106 + .../coremltools/version.py | 7 + .../INSTALLER | 1 + .../LICENSE.md | 39 + .../METADATA | 486 + .../RECORD | 30 + .../REQUESTED | 0 .../WHEEL | 5 + .../direct_url.json | 1 + .../top_level.txt | 2 + .../__init__.py | 1 + .../_version.py | 1 + .../chunk_mlprogram.py | 337 + .../controlnet.py | 244 + .../coreml_model.py | 119 + .../layer_norm.py | 80 + .../pipeline.py | 656 + .../torch2coreml.py | 1311 + .../python_coreml_stable_diffusion/unet.py | 1104 + .../scipy-1.10.1.dist-info/INSTALLER | 1 + .../scipy-1.10.1.dist-info/LICENSE.txt | 819 + .../scipy-1.10.1.dist-info/METADATA | 952 + .../scipy-1.10.1.dist-info/RECORD | 2037 ++ .../scipy-1.10.1.dist-info/REQUESTED | 0 .../scipy-1.10.1.dist-info/WHEEL | 4 + .../scipy/.dylibs/libgcc_s.1.1.dylib | Bin 0 -> 156896 bytes .../scipy/.dylibs/libgfortran.5.dylib | Bin 0 -> 1846176 bytes .../scipy/.dylibs/libopenblas.0.dylib | Bin 0 -> 20952288 bytes .../scipy/.dylibs/libquadmath.0.dylib | Bin 0 -> 349408 bytes .../.python_dependencies/scipy/__config__.py | 147 + .../.python_dependencies/scipy/__init__.py | 207 + .../scipy/_distributor_init.py | 10 + .../scipy/_lib/__init__.py | 14 + .../.python_dependencies/scipy/_lib/_bunch.py | 226 + .../scipy/_lib/_ccallback.py | 227 + .../scipy/_lib/_disjoint_set.py | 228 + .../scipy/_lib/_docscrape.py | 680 + .../scipy/_lib/_finite_differences.py | 145 + .../scipy/_lib/_gcutils.py | 105 + .../scipy/_lib/_pep440.py | 487 + .../scipy/_lib/_testutils.py | 217 + .../scipy/_lib/_threadsafety.py | 58 + .../scipy/_lib/_tmpdirs.py | 86 + .../scipy/_lib/_uarray/LICENSE | 29 + .../scipy/_lib/_uarray/__init__.py | 116 + .../scipy/_lib/_uarray/_backend.py | 703 + .../.python_dependencies/scipy/_lib/_util.py | 711 + .../scipy/_lib/decorator.py | 399 + .../scipy/_lib/deprecation.py | 107 + .../.python_dependencies/scipy/_lib/doccer.py | 275 + .../scipy/_lib/tests/__init__.py | 0 .../scipy/_lib/tests/test__gcutils.py | 101 + .../scipy/_lib/tests/test__pep440.py | 67 + .../scipy/_lib/tests/test__testutils.py | 32 + .../scipy/_lib/tests/test__threadsafety.py | 51 + .../scipy/_lib/tests/test__util.py | 380 + .../scipy/_lib/tests/test_bunch.py | 163 + .../scipy/_lib/tests/test_ccallback.py | 197 + .../scipy/_lib/tests/test_deprecation.py | 10 + .../scipy/_lib/tests/test_import_cycles.py | 53 + .../scipy/_lib/tests/test_public_api.py | 326 + .../scipy/_lib/tests/test_scipy_version.py | 18 + .../scipy/_lib/tests/test_tmpdirs.py | 42 + .../scipy/_lib/tests/test_warnings.py | 131 + .../.python_dependencies/scipy/_lib/uarray.py | 31 + .../scipy/cluster/__init__.py | 29 + .../scipy/cluster/hierarchy.py | 4180 +++ .../scipy/cluster/tests/__init__.py | 0 .../cluster/tests/hierarchy_test_data.py | 145 + .../scipy/cluster/tests/test_disjoint_set.py | 201 + .../scipy/cluster/tests/test_hierarchy.py | 1121 + .../scipy/cluster/tests/test_vq.py | 336 + .../.python_dependencies/scipy/cluster/vq.py | 795 + .../.python_dependencies/scipy/conftest.py | 95 + .../scipy/constants/__init__.py | 343 + .../scipy/constants/_codata.py | 1756 ++ .../scipy/constants/_constants.py | 357 + .../scipy/constants/codata.py | 32 + .../scipy/constants/constants.py | 61 + .../scipy/constants/tests/__init__.py | 0 .../scipy/constants/tests/test_codata.py | 57 + .../scipy/constants/tests/test_constants.py | 35 + .../scipy/datasets/__init__.py | 90 + .../scipy/datasets/_download_all.py | 57 + .../scipy/datasets/_fetchers.py | 220 + .../scipy/datasets/_registry.py | 26 + .../scipy/datasets/_utils.py | 81 + .../scipy/datasets/tests/__init__.py | 0 .../scipy/datasets/tests/test_data.py | 123 + .../scipy/fft/__init__.py | 113 + .../scipy/fft/_backend.py | 191 + .../.python_dependencies/scipy/fft/_basic.py | 1629 ++ .../scipy/fft/_debug_backends.py | 22 + .../.python_dependencies/scipy/fft/_fftlog.py | 390 + .../scipy/fft/_fftlog_multimethods.py | 29 + .../.python_dependencies/scipy/fft/_helper.py | 101 + .../scipy/fft/_pocketfft/LICENSE.md | 25 + .../scipy/fft/_pocketfft/__init__.py | 9 + .../scipy/fft/_pocketfft/basic.py | 297 + .../scipy/fft/_pocketfft/helper.py | 216 + .../scipy/fft/_pocketfft/realtransforms.py | 110 + .../scipy/fft/_pocketfft/tests/__init__.py | 0 .../scipy/fft/_pocketfft/tests/test_basic.py | 1022 + .../_pocketfft/tests/test_real_transforms.py | 493 + .../scipy/fft/_realtransforms.py | 693 + .../scipy/fft/tests/__init__.py | 0 .../scipy/fft/tests/mock_backend.py | 59 + .../scipy/fft/tests/test_backend.py | 98 + .../scipy/fft/tests/test_fft_function.py | 43 + .../scipy/fft/tests/test_fftlog.py | 161 + .../scipy/fft/tests/test_helper.py | 300 + .../scipy/fft/tests/test_multithreading.py | 83 + .../scipy/fft/tests/test_numpy.py | 364 + .../scipy/fft/tests/test_real_transforms.py | 216 + .../scipy/fftpack/__init__.py | 104 + .../scipy/fftpack/_basic.py | 428 + .../scipy/fftpack/_helper.py | 112 + .../scipy/fftpack/_pseudo_diffs.py | 551 + .../scipy/fftpack/_realtransforms.py | 598 + .../scipy/fftpack/basic.py | 28 + .../scipy/fftpack/helper.py | 27 + .../scipy/fftpack/pseudo_diffs.py | 30 + .../scipy/fftpack/realtransforms.py | 27 + .../scipy/fftpack/tests/__init__.py | 0 .../scipy/fftpack/tests/fftw_double_ref.npz | Bin 0 -> 162120 bytes .../fftpack/tests/fftw_longdouble_ref.npz | Bin 0 -> 296072 bytes .../scipy/fftpack/tests/fftw_single_ref.npz | Bin 0 -> 95144 bytes .../scipy/fftpack/tests/test.npz | Bin 0 -> 11968 bytes .../scipy/fftpack/tests/test_basic.py | 877 + .../scipy/fftpack/tests/test_helper.py | 54 + .../scipy/fftpack/tests/test_import.py | 31 + .../scipy/fftpack/tests/test_pseudo_diffs.py | 380 + .../fftpack/tests/test_real_transforms.py | 815 + .../scipy/integrate/__init__.py | 107 + .../scipy/integrate/_bvp.py | 1159 + .../scipy/integrate/_ivp/__init__.py | 8 + .../scipy/integrate/_ivp/base.py | 274 + .../scipy/integrate/_ivp/bdf.py | 470 + .../scipy/integrate/_ivp/common.py | 433 + .../integrate/_ivp/dop853_coefficients.py | 193 + .../scipy/integrate/_ivp/ivp.py | 678 + .../scipy/integrate/_ivp/lsoda.py | 192 + .../scipy/integrate/_ivp/radau.py | 565 + .../scipy/integrate/_ivp/rk.py | 587 + .../scipy/integrate/_ivp/tests/__init__.py | 0 .../scipy/integrate/_ivp/tests/test_ivp.py | 1040 + .../scipy/integrate/_ivp/tests/test_rk.py | 37 + .../scipy/integrate/_ode.py | 1372 + .../scipy/integrate/_odepack_py.py | 260 + .../scipy/integrate/_quad_vec.py | 653 + .../scipy/integrate/_quadpack_py.py | 1244 + .../scipy/integrate/_quadrature.py | 1360 + .../scipy/integrate/dop.py | 28 + .../scipy/integrate/lsoda.py | 25 + .../scipy/integrate/odepack.py | 25 + .../scipy/integrate/quadpack.py | 32 + .../scipy/integrate/tests/__init__.py | 0 .../scipy/integrate/tests/test__quad_vec.py | 204 + .../tests/test_banded_ode_solvers.py | 218 + .../scipy/integrate/tests/test_bvp.py | 709 + .../scipy/integrate/tests/test_integrate.py | 830 + .../scipy/integrate/tests/test_odeint_jac.py | 75 + .../scipy/integrate/tests/test_quadpack.py | 675 + .../scipy/integrate/tests/test_quadrature.py | 397 + .../scipy/integrate/vode.py | 28 + .../scipy/interpolate/__init__.py | 199 + .../scipy/interpolate/_bsplines.py | 2030 ++ .../scipy/interpolate/_cubic.py | 864 + .../scipy/interpolate/_fitpack2.py | 2187 ++ .../scipy/interpolate/_fitpack_impl.py | 1314 + .../scipy/interpolate/_fitpack_py.py | 788 + .../scipy/interpolate/_interpnd_info.py | 37 + .../scipy/interpolate/_interpolate.py | 2462 ++ .../scipy/interpolate/_ndgriddata.py | 273 + .../scipy/interpolate/_pade.py | 67 + .../scipy/interpolate/_polyint.py | 743 + .../scipy/interpolate/_rbf.py | 289 + .../scipy/interpolate/_rbfinterp.py | 546 + .../scipy/interpolate/_rgi.py | 675 + .../scipy/interpolate/fitpack.py | 40 + .../scipy/interpolate/fitpack2.py | 46 + .../scipy/interpolate/interpolate.py | 52 + .../scipy/interpolate/ndgriddata.py | 33 + .../scipy/interpolate/polyint.py | 34 + .../scipy/interpolate/rbf.py | 33 + .../scipy/interpolate/tests/__init__.py | 0 .../scipy/interpolate/tests/data/bug-1310.npz | Bin 0 -> 2648 bytes .../tests/data/estimate_gradients_hang.npy | Bin 0 -> 35680 bytes .../scipy/interpolate/tests/data/gcvspl.npz | Bin 0 -> 3138 bytes .../scipy/interpolate/tests/test_bsplines.py | 1639 ++ .../scipy/interpolate/tests/test_fitpack.py | 462 + .../scipy/interpolate/tests/test_fitpack2.py | 1347 + .../scipy/interpolate/tests/test_gil.py | 65 + .../scipy/interpolate/tests/test_interpnd.py | 386 + .../interpolate/tests/test_interpolate.py | 2545 ++ .../interpolate/tests/test_ndgriddata.py | 246 + .../scipy/interpolate/tests/test_pade.py | 101 + .../scipy/interpolate/tests/test_polyint.py | 808 + .../scipy/interpolate/tests/test_rbf.py | 221 + .../scipy/interpolate/tests/test_rbfinterp.py | 507 + .../scipy/interpolate/tests/test_rgi.py | 1019 + .../.python_dependencies/scipy/io/__init__.py | 117 + .../.python_dependencies/scipy/io/_fortran.py | 354 + .../scipy/io/_harwell_boeing/__init__.py | 17 + .../_harwell_boeing/_fortran_format_parser.py | 309 + .../scipy/io/_harwell_boeing/hb.py | 570 + .../io/_harwell_boeing/tests/__init__.py | 0 .../tests/test_fortran_format.py | 74 + .../scipy/io/_harwell_boeing/tests/test_hb.py | 65 + .../.python_dependencies/scipy/io/_idl.py | 914 + .../.python_dependencies/scipy/io/_mmio.py | 996 + .../.python_dependencies/scipy/io/_netcdf.py | 1088 + .../scipy/io/arff/__init__.py | 28 + .../scipy/io/arff/_arffread.py | 905 + .../scipy/io/arff/arffread.py | 36 + .../scipy/io/arff/tests/__init__.py | 0 .../scipy/io/arff/tests/data/iris.arff | 225 + .../scipy/io/arff/tests/data/missing.arff | 8 + .../scipy/io/arff/tests/data/nodata.arff | 11 + .../io/arff/tests/data/quoted_nominal.arff | 13 + .../tests/data/quoted_nominal_spaces.arff | 13 + .../scipy/io/arff/tests/data/test1.arff | 10 + .../scipy/io/arff/tests/data/test10.arff | 8 + .../scipy/io/arff/tests/data/test11.arff | 11 + .../scipy/io/arff/tests/data/test2.arff | 15 + .../scipy/io/arff/tests/data/test3.arff | 6 + .../scipy/io/arff/tests/data/test4.arff | 11 + .../scipy/io/arff/tests/data/test5.arff | 26 + .../scipy/io/arff/tests/data/test6.arff | 12 + .../scipy/io/arff/tests/data/test7.arff | 15 + .../scipy/io/arff/tests/data/test8.arff | 12 + .../scipy/io/arff/tests/data/test9.arff | 14 + .../scipy/io/arff/tests/test_arffread.py | 418 + .../scipy/io/harwell_boeing.py | 29 + .../.python_dependencies/scipy/io/idl.py | 29 + .../scipy/io/matlab/__init__.py | 63 + .../scipy/io/matlab/_byteordercodes.py | 73 + .../scipy/io/matlab/_mio.py | 358 + .../scipy/io/matlab/_mio4.py | 623 + .../scipy/io/matlab/_mio5.py | 892 + .../scipy/io/matlab/_mio5_params.py | 280 + .../scipy/io/matlab/_miobase.py | 429 + .../scipy/io/matlab/byteordercodes.py | 29 + .../scipy/io/matlab/mio.py | 29 + .../scipy/io/matlab/mio4.py | 33 + .../scipy/io/matlab/mio5.py | 37 + .../scipy/io/matlab/mio5_params.py | 37 + .../scipy/io/matlab/mio5_utils.py | 28 + .../scipy/io/matlab/mio_utils.py | 26 + .../scipy/io/matlab/miobase.py | 31 + .../scipy/io/matlab/streams.py | 27 + .../scipy/io/matlab/tests/__init__.py | 0 .../io/matlab/tests/data/bad_miuint32.mat | Bin 0 -> 272 bytes .../tests/data/bad_miutf8_array_name.mat | Bin 0 -> 208 bytes .../scipy/io/matlab/tests/data/big_endian.mat | Bin 0 -> 273 bytes .../io/matlab/tests/data/broken_utf8.mat | Bin 0 -> 216 bytes .../tests/data/corrupted_zlib_checksum.mat | Bin 0 -> 276 bytes .../matlab/tests/data/corrupted_zlib_data.mat | Bin 0 -> 3451 bytes .../io/matlab/tests/data/japanese_utf8.txt | 5 + .../io/matlab/tests/data/little_endian.mat | Bin 0 -> 265 bytes .../io/matlab/tests/data/logical_sparse.mat | Bin 0 -> 208 bytes .../scipy/io/matlab/tests/data/malformed1.mat | Bin 0 -> 2208 bytes .../tests/data/miuint32_for_miint32.mat | Bin 0 -> 272 bytes .../matlab/tests/data/miutf8_array_name.mat | Bin 0 -> 208 bytes .../tests/data/nasty_duplicate_fieldnames.mat | Bin 0 -> 8168 bytes .../io/matlab/tests/data/one_by_zero_char.mat | Bin 0 -> 184 bytes .../scipy/io/matlab/tests/data/parabola.mat | Bin 0 -> 729 bytes .../matlab/tests/data/single_empty_string.mat | Bin 0 -> 171 bytes .../io/matlab/tests/data/some_functions.mat | Bin 0 -> 1397 bytes .../scipy/io/matlab/tests/data/sqr.mat | Bin 0 -> 679 bytes .../tests/data/test3dmatrix_6.1_SOL2.mat | Bin 0 -> 232 bytes .../tests/data/test3dmatrix_6.5.1_GLNX86.mat | Bin 0 -> 232 bytes .../tests/data/test3dmatrix_7.1_GLNX86.mat | Bin 0 -> 213 bytes .../tests/data/test3dmatrix_7.4_GLNX86.mat | Bin 0 -> 213 bytes .../matlab/tests/data/test_empty_struct.mat | Bin 0 -> 173 bytes .../matlab/tests/data/test_mat4_le_floats.mat | Bin 0 -> 38 bytes .../matlab/tests/data/test_skip_variable.mat | Bin 0 -> 20225 bytes .../io/matlab/tests/data/testbool_8_WIN64.mat | Bin 0 -> 185 bytes .../matlab/tests/data/testcell_6.1_SOL2.mat | Bin 0 -> 536 bytes .../tests/data/testcell_6.5.1_GLNX86.mat | Bin 0 -> 536 bytes .../matlab/tests/data/testcell_7.1_GLNX86.mat | Bin 0 -> 283 bytes .../matlab/tests/data/testcell_7.4_GLNX86.mat | Bin 0 -> 283 bytes .../tests/data/testcellnest_6.1_SOL2.mat | Bin 0 -> 568 bytes .../tests/data/testcellnest_6.5.1_GLNX86.mat | Bin 0 -> 568 bytes .../tests/data/testcellnest_7.1_GLNX86.mat | Bin 0 -> 228 bytes .../tests/data/testcellnest_7.4_GLNX86.mat | Bin 0 -> 228 bytes .../tests/data/testcomplex_4.2c_SOL2.mat | Bin 0 -> 176 bytes .../tests/data/testcomplex_6.1_SOL2.mat | Bin 0 -> 352 bytes .../tests/data/testcomplex_6.5.1_GLNX86.mat | Bin 0 -> 352 bytes .../tests/data/testcomplex_7.1_GLNX86.mat | Bin 0 -> 247 bytes .../tests/data/testcomplex_7.4_GLNX86.mat | Bin 0 -> 247 bytes .../tests/data/testdouble_4.2c_SOL2.mat | Bin 0 -> 103 bytes .../matlab/tests/data/testdouble_6.1_SOL2.mat | Bin 0 -> 272 bytes .../tests/data/testdouble_6.5.1_GLNX86.mat | Bin 0 -> 272 bytes .../tests/data/testdouble_7.1_GLNX86.mat | Bin 0 -> 232 bytes .../tests/data/testdouble_7.4_GLNX86.mat | Bin 0 -> 232 bytes .../tests/data/testemptycell_5.3_SOL2.mat | Bin 0 -> 472 bytes .../tests/data/testemptycell_6.5.1_GLNX86.mat | Bin 0 -> 472 bytes .../tests/data/testemptycell_7.1_GLNX86.mat | Bin 0 -> 218 bytes .../tests/data/testemptycell_7.4_GLNX86.mat | Bin 0 -> 218 bytes .../matlab/tests/data/testfunc_7.4_GLNX86.mat | Bin 0 -> 381 bytes .../matlab/tests/data/testhdf5_7.4_GLNX86.mat | Bin 0 -> 4168 bytes .../tests/data/testmatrix_4.2c_SOL2.mat | Bin 0 -> 151 bytes .../matlab/tests/data/testmatrix_6.1_SOL2.mat | Bin 0 -> 216 bytes .../tests/data/testmatrix_6.5.1_GLNX86.mat | Bin 0 -> 216 bytes .../tests/data/testmatrix_7.1_GLNX86.mat | Bin 0 -> 193 bytes .../tests/data/testmatrix_7.4_GLNX86.mat | Bin 0 -> 193 bytes .../matlab/tests/data/testminus_4.2c_SOL2.mat | Bin 0 -> 38 bytes .../matlab/tests/data/testminus_6.1_SOL2.mat | Bin 0 -> 200 bytes .../tests/data/testminus_6.5.1_GLNX86.mat | Bin 0 -> 200 bytes .../tests/data/testminus_7.1_GLNX86.mat | Bin 0 -> 184 bytes .../tests/data/testminus_7.4_GLNX86.mat | Bin 0 -> 184 bytes .../matlab/tests/data/testmulti_4.2c_SOL2.mat | Bin 0 -> 240 bytes .../tests/data/testmulti_7.1_GLNX86.mat | Bin 0 -> 276 bytes .../tests/data/testmulti_7.4_GLNX86.mat | Bin 0 -> 276 bytes .../matlab/tests/data/testobject_6.1_SOL2.mat | Bin 0 -> 800 bytes .../tests/data/testobject_6.5.1_GLNX86.mat | Bin 0 -> 672 bytes .../tests/data/testobject_7.1_GLNX86.mat | Bin 0 -> 306 bytes .../tests/data/testobject_7.4_GLNX86.mat | Bin 0 -> 306 bytes .../tests/data/testonechar_4.2c_SOL2.mat | Bin 0 -> 40 bytes .../tests/data/testonechar_6.1_SOL2.mat | Bin 0 -> 200 bytes .../tests/data/testonechar_6.5.1_GLNX86.mat | Bin 0 -> 200 bytes .../tests/data/testonechar_7.1_GLNX86.mat | Bin 0 -> 184 bytes .../tests/data/testonechar_7.4_GLNX86.mat | Bin 0 -> 184 bytes .../tests/data/testscalarcell_7.4_GLNX86.mat | Bin 0 -> 194 bytes .../io/matlab/tests/data/testsimplecell.mat | Bin 0 -> 220 bytes .../tests/data/testsparse_4.2c_SOL2.mat | Bin 0 -> 223 bytes .../matlab/tests/data/testsparse_6.1_SOL2.mat | Bin 0 -> 280 bytes .../tests/data/testsparse_6.5.1_GLNX86.mat | Bin 0 -> 328 bytes .../tests/data/testsparse_7.1_GLNX86.mat | Bin 0 -> 229 bytes .../tests/data/testsparse_7.4_GLNX86.mat | Bin 0 -> 229 bytes .../data/testsparsecomplex_4.2c_SOL2.mat | Bin 0 -> 294 bytes .../tests/data/testsparsecomplex_6.1_SOL2.mat | Bin 0 -> 304 bytes .../data/testsparsecomplex_6.5.1_GLNX86.mat | Bin 0 -> 400 bytes .../data/testsparsecomplex_7.1_GLNX86.mat | Bin 0 -> 241 bytes .../data/testsparsecomplex_7.4_GLNX86.mat | Bin 0 -> 241 bytes .../tests/data/testsparsefloat_7.4_GLNX86.mat | Bin 0 -> 219 bytes .../tests/data/teststring_4.2c_SOL2.mat | Bin 0 -> 375 bytes .../matlab/tests/data/teststring_6.1_SOL2.mat | Bin 0 -> 288 bytes .../tests/data/teststring_6.5.1_GLNX86.mat | Bin 0 -> 288 bytes .../tests/data/teststring_7.1_GLNX86.mat | Bin 0 -> 224 bytes .../tests/data/teststring_7.4_GLNX86.mat | Bin 0 -> 224 bytes .../tests/data/teststringarray_4.2c_SOL2.mat | Bin 0 -> 156 bytes .../tests/data/teststringarray_6.1_SOL2.mat | Bin 0 -> 232 bytes .../data/teststringarray_6.5.1_GLNX86.mat | Bin 0 -> 232 bytes .../tests/data/teststringarray_7.1_GLNX86.mat | Bin 0 -> 203 bytes .../tests/data/teststringarray_7.4_GLNX86.mat | Bin 0 -> 203 bytes .../matlab/tests/data/teststruct_6.1_SOL2.mat | Bin 0 -> 608 bytes .../tests/data/teststruct_6.5.1_GLNX86.mat | Bin 0 -> 552 bytes .../tests/data/teststruct_7.1_GLNX86.mat | Bin 0 -> 314 bytes .../tests/data/teststruct_7.4_GLNX86.mat | Bin 0 -> 314 bytes .../tests/data/teststructarr_6.1_SOL2.mat | Bin 0 -> 528 bytes .../tests/data/teststructarr_6.5.1_GLNX86.mat | Bin 0 -> 472 bytes .../tests/data/teststructarr_7.1_GLNX86.mat | Bin 0 -> 246 bytes .../tests/data/teststructarr_7.4_GLNX86.mat | Bin 0 -> 246 bytes .../tests/data/teststructnest_6.1_SOL2.mat | Bin 0 -> 496 bytes .../data/teststructnest_6.5.1_GLNX86.mat | Bin 0 -> 416 bytes .../tests/data/teststructnest_7.1_GLNX86.mat | Bin 0 -> 252 bytes .../tests/data/teststructnest_7.4_GLNX86.mat | Bin 0 -> 252 bytes .../tests/data/testunicode_7.1_GLNX86.mat | Bin 0 -> 357 bytes .../tests/data/testunicode_7.4_GLNX86.mat | Bin 0 -> 357 bytes .../io/matlab/tests/data/testvec_4_GLNX86.mat | Bin 0 -> 93 bytes .../io/matlab/tests/test_byteordercodes.py | 29 + .../scipy/io/matlab/tests/test_mio.py | 1291 + .../scipy/io/matlab/tests/test_mio5_utils.py | 180 + .../scipy/io/matlab/tests/test_mio_funcs.py | 51 + .../scipy/io/matlab/tests/test_mio_utils.py | 45 + .../scipy/io/matlab/tests/test_miobase.py | 32 + .../io/matlab/tests/test_pathological.py | 33 + .../scipy/io/matlab/tests/test_streams.py | 229 + .../.python_dependencies/scipy/io/mmio.py | 28 + .../.python_dependencies/scipy/io/netcdf.py | 33 + .../scipy/io/tests/__init__.py | 0 .../scipy/io/tests/data/Transparent Busy.ani | Bin 0 -> 4362 bytes .../scipy/io/tests/data/array_float32_1d.sav | Bin 0 -> 2628 bytes .../scipy/io/tests/data/array_float32_2d.sav | Bin 0 -> 3192 bytes .../scipy/io/tests/data/array_float32_3d.sav | Bin 0 -> 13752 bytes .../scipy/io/tests/data/array_float32_4d.sav | Bin 0 -> 6616 bytes .../scipy/io/tests/data/array_float32_5d.sav | Bin 0 -> 7896 bytes .../scipy/io/tests/data/array_float32_6d.sav | Bin 0 -> 19416 bytes .../scipy/io/tests/data/array_float32_7d.sav | Bin 0 -> 3288 bytes .../scipy/io/tests/data/array_float32_8d.sav | Bin 0 -> 13656 bytes .../tests/data/array_float32_pointer_1d.sav | Bin 0 -> 2692 bytes .../tests/data/array_float32_pointer_2d.sav | Bin 0 -> 3256 bytes .../tests/data/array_float32_pointer_3d.sav | Bin 0 -> 13816 bytes .../tests/data/array_float32_pointer_4d.sav | Bin 0 -> 6680 bytes .../tests/data/array_float32_pointer_5d.sav | Bin 0 -> 7960 bytes .../tests/data/array_float32_pointer_6d.sav | Bin 0 -> 19480 bytes .../tests/data/array_float32_pointer_7d.sav | Bin 0 -> 3352 bytes .../tests/data/array_float32_pointer_8d.sav | Bin 0 -> 13720 bytes .../scipy/io/tests/data/example_1.nc | Bin 0 -> 1736 bytes .../scipy/io/tests/data/example_2.nc | Bin 0 -> 272 bytes .../io/tests/data/example_3_maskedvals.nc | Bin 0 -> 1424 bytes .../scipy/io/tests/data/fortran-3x3d-2i.dat | Bin 0 -> 451 bytes .../scipy/io/tests/data/fortran-mixed.dat | Bin 0 -> 40 bytes .../io/tests/data/fortran-sf8-11x1x10.dat | Bin 0 -> 888 bytes .../io/tests/data/fortran-sf8-15x10x22.dat | Bin 0 -> 26408 bytes .../scipy/io/tests/data/fortran-sf8-1x1x1.dat | Bin 0 -> 16 bytes .../scipy/io/tests/data/fortran-sf8-1x1x5.dat | Bin 0 -> 48 bytes .../scipy/io/tests/data/fortran-sf8-1x1x7.dat | Bin 0 -> 64 bytes .../scipy/io/tests/data/fortran-sf8-1x3x5.dat | Bin 0 -> 128 bytes .../io/tests/data/fortran-si4-11x1x10.dat | Bin 0 -> 448 bytes .../io/tests/data/fortran-si4-15x10x22.dat | Bin 0 -> 13208 bytes .../scipy/io/tests/data/fortran-si4-1x1x1.dat | Bin 0 -> 12 bytes .../scipy/io/tests/data/fortran-si4-1x1x5.dat | Bin 0 -> 28 bytes .../scipy/io/tests/data/fortran-si4-1x1x7.dat | Bin 0 -> 36 bytes .../scipy/io/tests/data/fortran-si4-1x3x5.dat | Bin 0 -> 68 bytes .../scipy/io/tests/data/invalid_pointer.sav | Bin 0 -> 1280 bytes .../scipy/io/tests/data/null_pointer.sav | Bin 0 -> 2180 bytes .../scipy/io/tests/data/scalar_byte.sav | Bin 0 -> 2076 bytes .../scipy/io/tests/data/scalar_byte_descr.sav | Bin 0 -> 2124 bytes .../scipy/io/tests/data/scalar_complex32.sav | Bin 0 -> 2076 bytes .../scipy/io/tests/data/scalar_complex64.sav | Bin 0 -> 2084 bytes .../scipy/io/tests/data/scalar_float32.sav | Bin 0 -> 2072 bytes .../scipy/io/tests/data/scalar_float64.sav | Bin 0 -> 2076 bytes .../io/tests/data/scalar_heap_pointer.sav | Bin 0 -> 2204 bytes .../scipy/io/tests/data/scalar_int16.sav | Bin 0 -> 2072 bytes .../scipy/io/tests/data/scalar_int32.sav | Bin 0 -> 2072 bytes .../scipy/io/tests/data/scalar_int64.sav | Bin 0 -> 2076 bytes .../scipy/io/tests/data/scalar_string.sav | Bin 0 -> 2124 bytes .../scipy/io/tests/data/scalar_uint16.sav | Bin 0 -> 2072 bytes .../scipy/io/tests/data/scalar_uint32.sav | Bin 0 -> 2072 bytes .../scipy/io/tests/data/scalar_uint64.sav | Bin 0 -> 2076 bytes .../scipy/io/tests/data/struct_arrays.sav | Bin 0 -> 2580 bytes .../tests/data/struct_arrays_byte_idl80.sav | Bin 0 -> 1388 bytes .../tests/data/struct_arrays_replicated.sav | Bin 0 -> 2936 bytes .../data/struct_arrays_replicated_3d.sav | Bin 0 -> 4608 bytes .../scipy/io/tests/data/struct_inherit.sav | Bin 0 -> 2404 bytes .../io/tests/data/struct_pointer_arrays.sav | Bin 0 -> 2408 bytes .../data/struct_pointer_arrays_replicated.sav | Bin 0 -> 2492 bytes .../struct_pointer_arrays_replicated_3d.sav | Bin 0 -> 2872 bytes .../scipy/io/tests/data/struct_pointers.sav | Bin 0 -> 2268 bytes .../tests/data/struct_pointers_replicated.sav | Bin 0 -> 2304 bytes .../data/struct_pointers_replicated_3d.sav | Bin 0 -> 2456 bytes .../scipy/io/tests/data/struct_scalars.sav | Bin 0 -> 2316 bytes .../tests/data/struct_scalars_replicated.sav | Bin 0 -> 2480 bytes .../data/struct_scalars_replicated_3d.sav | Bin 0 -> 3240 bytes .../data/test-44100Hz-2ch-32bit-float-be.wav | Bin 0 -> 3586 bytes .../data/test-44100Hz-2ch-32bit-float-le.wav | Bin 0 -> 3586 bytes .../tests/data/test-44100Hz-be-1ch-4bytes.wav | Bin 0 -> 17720 bytes ...4100Hz-le-1ch-4bytes-early-eof-no-data.wav | Bin 0 -> 72 bytes .../test-44100Hz-le-1ch-4bytes-early-eof.wav | Bin 0 -> 1024 bytes ...44100Hz-le-1ch-4bytes-incomplete-chunk.wav | Bin 0 -> 13 bytes .../tests/data/test-44100Hz-le-1ch-4bytes.wav | Bin 0 -> 17720 bytes .../test-48000Hz-2ch-64bit-float-le-wavex.wav | Bin 0 -> 7792 bytes .../data/test-8000Hz-be-3ch-5S-24bit.wav | Bin 0 -> 90 bytes .../test-8000Hz-le-1ch-10S-20bit-extra.wav | Bin 0 -> 74 bytes .../data/test-8000Hz-le-1ch-1byte-ulaw.wav | Bin 0 -> 70 bytes .../tests/data/test-8000Hz-le-2ch-1byteu.wav | Bin 0 -> 1644 bytes ...st-8000Hz-le-3ch-5S-24bit-inconsistent.wav | Bin 0 -> 90 bytes .../data/test-8000Hz-le-3ch-5S-24bit.wav | Bin 0 -> 90 bytes .../data/test-8000Hz-le-3ch-5S-36bit.wav | Bin 0 -> 120 bytes .../data/test-8000Hz-le-3ch-5S-45bit.wav | Bin 0 -> 134 bytes .../data/test-8000Hz-le-3ch-5S-53bit.wav | Bin 0 -> 150 bytes .../data/test-8000Hz-le-3ch-5S-64bit.wav | Bin 0 -> 164 bytes .../data/test-8000Hz-le-4ch-9S-12bit.wav | Bin 0 -> 116 bytes .../tests/data/test-8000Hz-le-5ch-9S-5bit.wav | Bin 0 -> 89 bytes .../io/tests/data/various_compressed.sav | Bin 0 -> 1015 bytes .../scipy/io/tests/test_fortran.py | 236 + .../scipy/io/tests/test_idl.py | 450 + .../scipy/io/tests/test_mmio.py | 759 + .../scipy/io/tests/test_netcdf.py | 543 + .../scipy/io/tests/test_paths.py | 93 + .../scipy/io/tests/test_wavfile.py | 416 + .../.python_dependencies/scipy/io/wavfile.py | 840 + .../.python_dependencies/scipy/linalg.pxd | 1 + .../scipy/linalg/__init__.py | 230 + .../scipy/linalg/_basic.py | 1815 ++ .../scipy/linalg/_blas_subroutine_wrappers.f | 462 + .../scipy/linalg/_blas_subroutines.h | 166 + .../scipy/linalg/_cythonized_array_utils.pxd | 40 + .../scipy/linalg/_cythonized_array_utils.pyi | 16 + .../scipy/linalg/_decomp.py | 1603 ++ .../scipy/linalg/_decomp_cholesky.py | 358 + .../scipy/linalg/_decomp_cossin.py | 224 + .../scipy/linalg/_decomp_ldl.py | 352 + .../scipy/linalg/_decomp_lu.py | 226 + .../scipy/linalg/_decomp_polar.py | 111 + .../scipy/linalg/_decomp_qr.py | 429 + .../scipy/linalg/_decomp_qz.py | 448 + .../scipy/linalg/_decomp_schur.py | 294 + .../scipy/linalg/_decomp_svd.py | 503 + .../scipy/linalg/_expm_frechet.py | 413 + .../scipy/linalg/_flinalg_py.py | 56 + .../scipy/linalg/_interpolative_backend.py | 1681 ++ .../linalg/_lapack_subroutine_wrappers.f | 2031 ++ .../scipy/linalg/_lapack_subroutines.h | 1523 ++ .../scipy/linalg/_matfuncs.py | 881 + .../scipy/linalg/_matfuncs_expm.pyi | 6 + .../scipy/linalg/_matfuncs_inv_ssq.py | 886 + .../scipy/linalg/_matfuncs_sqrtm.py | 210 + .../scipy/linalg/_misc.py | 191 + .../scipy/linalg/_procrustes.py | 90 + .../scipy/linalg/_sketches.py | 179 + .../scipy/linalg/_solvers.py | 847 + .../scipy/linalg/_special_matrices.py | 1379 + .../scipy/linalg/_testutils.py | 63 + .../scipy/linalg/basic.py | 31 + .../.python_dependencies/scipy/linalg/blas.py | 484 + .../scipy/linalg/cython_blas.pxd | 314 + .../scipy/linalg/cython_blas.pyx | 1192 + .../scipy/linalg/cython_lapack.pxd | 3021 +++ .../scipy/linalg/cython_lapack.pyx | 9293 +++++++ .../scipy/linalg/decomp.py | 32 + .../scipy/linalg/decomp_cholesky.py | 29 + .../scipy/linalg/decomp_lu.py | 30 + .../scipy/linalg/decomp_qr.py | 27 + .../scipy/linalg/decomp_schur.py | 28 + .../scipy/linalg/decomp_svd.py | 28 + .../scipy/linalg/flinalg.py | 23 + .../scipy/linalg/interpolative.py | 1004 + .../scipy/linalg/lapack.py | 1036 + .../scipy/linalg/matfuncs.py | 32 + .../.python_dependencies/scipy/linalg/misc.py | 28 + .../scipy/linalg/special_matrices.py | 30 + .../scipy/linalg/tests/__init__.py | 0 .../scipy/linalg/tests/data/carex_15_data.npz | Bin 0 -> 34462 bytes .../scipy/linalg/tests/data/carex_18_data.npz | Bin 0 -> 161487 bytes .../scipy/linalg/tests/data/carex_19_data.npz | Bin 0 -> 34050 bytes .../scipy/linalg/tests/data/carex_20_data.npz | Bin 0 -> 31231 bytes .../scipy/linalg/tests/data/carex_6_data.npz | Bin 0 -> 15878 bytes .../tests/data/gendare_20170120_data.npz | Bin 0 -> 2164 bytes .../scipy/linalg/tests/test_basic.py | 1714 ++ .../scipy/linalg/tests/test_blas.py | 1096 + .../scipy/linalg/tests/test_cython_blas.py | 120 + .../scipy/linalg/tests/test_cython_lapack.py | 17 + .../tests/test_cythonized_array_utils.py | 121 + .../scipy/linalg/tests/test_decomp.py | 2904 +++ .../linalg/tests/test_decomp_cholesky.py | 202 + .../scipy/linalg/tests/test_decomp_cossin.py | 155 + .../scipy/linalg/tests/test_decomp_ldl.py | 136 + .../scipy/linalg/tests/test_decomp_polar.py | 90 + .../scipy/linalg/tests/test_decomp_update.py | 1700 ++ .../scipy/linalg/tests/test_fblas.py | 607 + .../scipy/linalg/tests/test_interpolative.py | 241 + .../scipy/linalg/tests/test_lapack.py | 3282 +++ .../scipy/linalg/tests/test_matfuncs.py | 974 + .../linalg/tests/test_matmul_toeplitz.py | 125 + .../scipy/linalg/tests/test_misc.py | 5 + .../scipy/linalg/tests/test_procrustes.py | 191 + .../scipy/linalg/tests/test_sketches.py | 118 + .../scipy/linalg/tests/test_solve_toeplitz.py | 121 + .../scipy/linalg/tests/test_solvers.py | 766 + .../linalg/tests/test_special_matrices.py | 690 + .../scipy/misc/__init__.py | 67 + .../scipy/misc/_common.py | 342 + .../scipy/misc/ascent.dat | 749 + .../.python_dependencies/scipy/misc/common.py | 29 + .../.python_dependencies/scipy/misc/doccer.py | 29 + .../.python_dependencies/scipy/misc/ecg.dat | Bin 0 -> 119035 bytes .../.python_dependencies/scipy/misc/face.dat | Bin 0 -> 1581821 bytes .../scipy/misc/tests/__init__.py | 0 .../scipy/misc/tests/test_common.py | 26 + .../scipy/misc/tests/test_config.py | 44 + .../scipy/misc/tests/test_doccer.py | 134 + .../scipy/ndimage/__init__.py | 169 + .../scipy/ndimage/_filters.py | 1635 ++ .../scipy/ndimage/_fourier.py | 307 + .../scipy/ndimage/_interpolation.py | 960 + .../scipy/ndimage/_measurements.py | 1674 ++ .../scipy/ndimage/_morphology.py | 2342 ++ .../scipy/ndimage/_ni_docstrings.py | 208 + .../scipy/ndimage/_ni_support.py | 97 + .../scipy/ndimage/filters.py | 35 + .../scipy/ndimage/fourier.py | 29 + .../scipy/ndimage/interpolation.py | 31 + .../scipy/ndimage/measurements.py | 32 + .../scipy/ndimage/morphology.py | 35 + .../scipy/ndimage/tests/__init__.py | 15 + .../scipy/ndimage/tests/data/label_inputs.txt | 21 + .../ndimage/tests/data/label_results.txt | 294 + .../scipy/ndimage/tests/data/label_strels.txt | 42 + .../scipy/ndimage/tests/dots.png | Bin 0 -> 2114 bytes .../scipy/ndimage/tests/test_c_api.py | 94 + .../scipy/ndimage/tests/test_datatypes.py | 66 + .../scipy/ndimage/tests/test_filters.py | 1995 ++ .../scipy/ndimage/tests/test_fourier.py | 151 + .../scipy/ndimage/tests/test_interpolation.py | 1328 + .../scipy/ndimage/tests/test_measurements.py | 1393 + .../scipy/ndimage/tests/test_morphology.py | 2371 ++ .../scipy/ndimage/tests/test_splines.py | 65 + .../scipy/odr/__init__.py | 131 + .../scipy/odr/_add_newdocs.py | 30 + .../.python_dependencies/scipy/odr/_models.py | 315 + .../scipy/odr/_odrpack.py | 1142 + .../.python_dependencies/scipy/odr/models.py | 28 + .../.python_dependencies/scipy/odr/odrpack.py | 29 + .../scipy/odr/tests/__init__.py | 0 .../scipy/odr/tests/test_odr.py | 533 + .../.python_dependencies/scipy/optimize.pxd | 1 + .../scipy/optimize/README | 87 + .../scipy/optimize/__init__.py | 441 + .../scipy/optimize/__nnls.pyi | 21 + .../scipy/optimize/_basinhopping.py | 741 + .../scipy/optimize/_cobyla_py.py | 293 + .../scipy/optimize/_constraints.py | 570 + .../optimize/_differentiable_functions.py | 616 + .../scipy/optimize/_differentialevolution.py | 1668 ++ .../scipy/optimize/_direct_py.py | 279 + .../scipy/optimize/_dual_annealing.py | 711 + .../optimize/_hessian_update_strategy.py | 429 + .../scipy/optimize/_highs/__init__.py | 0 .../optimize/_highs/src/cython/HConst.pxd | 107 + .../optimize/_highs/src/cython/Highs.pxd | 55 + .../optimize/_highs/src/cython/HighsIO.pxd | 21 + .../optimize/_highs/src/cython/HighsInfo.pxd | 23 + .../optimize/_highs/src/cython/HighsLp.pxd | 47 + .../_highs/src/cython/HighsLpUtils.pxd | 10 + .../_highs/src/cython/HighsModelUtils.pxd | 11 + .../_highs/src/cython/HighsOptions.pxd | 111 + .../_highs/src/cython/HighsRuntimeOptions.pxd | 10 + .../_highs/src/cython/HighsStatus.pxd | 13 + .../_highs/src/cython/SimplexConst.pxd | 96 + .../_highs/src/cython/highs_c_api.pxd | 8 + .../scipy/optimize/_lbfgsb_py.py | 494 + .../scipy/optimize/_linesearch.py | 881 + .../scipy/optimize/_linprog.py | 708 + .../scipy/optimize/_linprog_doc.py | 1435 ++ .../scipy/optimize/_linprog_highs.py | 440 + .../scipy/optimize/_linprog_ip.py | 1128 + .../scipy/optimize/_linprog_rs.py | 572 + .../scipy/optimize/_linprog_simplex.py | 661 + .../scipy/optimize/_linprog_util.py | 1515 ++ .../scipy/optimize/_lsq/__init__.py | 5 + .../scipy/optimize/_lsq/bvls.py | 183 + .../scipy/optimize/_lsq/common.py | 734 + .../scipy/optimize/_lsq/dogbox.py | 331 + .../scipy/optimize/_lsq/least_squares.py | 963 + .../scipy/optimize/_lsq/lsq_linear.py | 351 + .../scipy/optimize/_lsq/trf.py | 560 + .../scipy/optimize/_lsq/trf_linear.py | 249 + .../scipy/optimize/_milp.py | 387 + .../scipy/optimize/_minimize.py | 1038 + .../scipy/optimize/_minpack_py.py | 1016 + .../scipy/optimize/_nnls.py | 85 + .../scipy/optimize/_nonlin.py | 1566 ++ .../scipy/optimize/_numdiff.py | 761 + .../scipy/optimize/_optimize.py | 3951 +++ .../scipy/optimize/_qap.py | 724 + .../scipy/optimize/_remove_redundancy.py | 522 + .../scipy/optimize/_root.py | 717 + .../scipy/optimize/_root_scalar.py | 502 + .../scipy/optimize/_shgo.py | 1604 ++ .../scipy/optimize/_shgo_lib/__init__.py | 0 .../scipy/optimize/_shgo_lib/triangulation.py | 661 + .../scipy/optimize/_slsqp_py.py | 504 + .../scipy/optimize/_spectral.py | 257 + .../scipy/optimize/_tnc.py | 441 + .../scipy/optimize/_trlib/__init__.py | 12 + .../scipy/optimize/_trustregion.py | 301 + .../optimize/_trustregion_constr/__init__.py | 6 + .../canonical_constraint.py | 390 + .../equality_constrained_sqp.py | 217 + .../minimize_trustregion_constr.py | 545 + .../_trustregion_constr/projections.py | 405 + .../_trustregion_constr/qp_subproblem.py | 637 + .../optimize/_trustregion_constr/report.py | 52 + .../_trustregion_constr/tests/__init__.py | 0 .../tests/test_canonical_constraint.py | 296 + .../tests/test_projections.py | 214 + .../tests/test_qp_subproblem.py | 645 + .../_trustregion_constr/tests/test_report.py | 32 + .../_trustregion_constr/tr_interior_point.py | 346 + .../scipy/optimize/_trustregion_dogleg.py | 122 + .../scipy/optimize/_trustregion_exact.py | 430 + .../scipy/optimize/_trustregion_krylov.py | 65 + .../scipy/optimize/_trustregion_ncg.py | 126 + .../scipy/optimize/_tstutils.py | 676 + .../scipy/optimize/_zeros_py.py | 1377 + .../scipy/optimize/cobyla.py | 31 + .../scipy/optimize/cython_optimize.pxd | 11 + .../optimize/cython_optimize/__init__.py | 132 + .../scipy/optimize/cython_optimize/_zeros.pxd | 33 + .../optimize/cython_optimize/c_zeros.pxd | 26 + .../scipy/optimize/lbfgsb.py | 37 + .../scipy/optimize/linesearch.py | 38 + .../scipy/optimize/minpack.py | 60 + .../scipy/optimize/minpack2.py | 29 + .../scipy/optimize/moduleTNC.py | 28 + .../scipy/optimize/nonlin.py | 65 + .../scipy/optimize/optimize.py | 72 + .../scipy/optimize/slsqp.py | 46 + .../scipy/optimize/tests/__init__.py | 0 .../optimize/tests/test__basinhopping.py | 480 + .../tests/test__differential_evolution.py | 1485 ++ .../optimize/tests/test__dual_annealing.py | 360 + .../tests/test__linprog_clean_inputs.py | 297 + .../scipy/optimize/tests/test__numdiff.py | 813 + .../optimize/tests/test__remove_redundancy.py | 255 + .../scipy/optimize/tests/test__root.py | 85 + .../scipy/optimize/tests/test__shgo.py | 812 + .../scipy/optimize/tests/test__spectral.py | 208 + .../scipy/optimize/tests/test_cobyla.py | 131 + .../tests/test_constraint_conversion.py | 267 + .../scipy/optimize/tests/test_constraints.py | 234 + .../optimize/tests/test_cython_optimize.py | 92 + .../tests/test_differentiable_functions.py | 731 + .../scipy/optimize/tests/test_direct.py | 318 + .../tests/test_hessian_update_strategy.py | 208 + .../optimize/tests/test_lbfgsb_hessinv.py | 43 + .../optimize/tests/test_lbfgsb_setulb.py | 116 + .../optimize/tests/test_least_squares.py | 811 + .../optimize/tests/test_linear_assignment.py | 116 + .../scipy/optimize/tests/test_linesearch.py | 312 + .../scipy/optimize/tests/test_linprog.py | 2437 ++ .../scipy/optimize/tests/test_lsq_common.py | 297 + .../scipy/optimize/tests/test_lsq_linear.py | 269 + .../scipy/optimize/tests/test_milp.py | 370 + .../tests/test_minimize_constrained.py | 781 + .../scipy/optimize/tests/test_minpack.py | 973 + .../scipy/optimize/tests/test_nnls.py | 34 + .../scipy/optimize/tests/test_nonlin.py | 490 + .../scipy/optimize/tests/test_optimize.py | 2855 +++ .../tests/test_quadratic_assignment.py | 431 + .../scipy/optimize/tests/test_regression.py | 40 + .../scipy/optimize/tests/test_slsqp.py | 604 + .../scipy/optimize/tests/test_tnc.py | 355 + .../scipy/optimize/tests/test_trustregion.py | 112 + .../optimize/tests/test_trustregion_exact.py | 352 + .../optimize/tests/test_trustregion_krylov.py | 170 + .../scipy/optimize/tests/test_zeros.py | 770 + .../scipy/optimize/tnc.py | 53 + .../scipy/optimize/zeros.py | 44 + .../scipy/signal/__init__.py | 386 + .../scipy/signal/_arraytools.py | 241 + .../scipy/signal/_bsplines.py | 683 + .../.python_dependencies/scipy/signal/_czt.py | 575 + .../scipy/signal/_filter_design.py | 5615 +++++ .../scipy/signal/_fir_filter_design.py | 1296 + .../scipy/signal/_lti_conversion.py | 533 + .../scipy/signal/_ltisys.py | 3872 +++ .../scipy/signal/_max_len_seq.py | 139 + .../scipy/signal/_peak_finding.py | 1311 + .../scipy/signal/_savitzky_golay.py | 357 + .../scipy/signal/_signaltools.py | 4565 ++++ .../scipy/signal/_spectral.py | 83 + .../scipy/signal/_spectral_py.py | 2059 ++ .../scipy/signal/_upfirdn.py | 216 + .../scipy/signal/_waveforms.py | 672 + .../scipy/signal/_wavelets.py | 492 + .../scipy/signal/bsplines.py | 32 + .../scipy/signal/filter_design.py | 42 + .../scipy/signal/fir_filter_design.py | 33 + .../scipy/signal/lti_conversion.py | 30 + .../scipy/signal/ltisys.py | 38 + .../scipy/signal/signaltools.py | 37 + .../scipy/signal/spectral.py | 32 + .../scipy/signal/spline.py | 26 + .../scipy/signal/tests/__init__.py | 0 .../scipy/signal/tests/mpsig.py | 122 + .../scipy/signal/tests/test_array_tools.py | 111 + .../scipy/signal/tests/test_bsplines.py | 267 + .../scipy/signal/tests/test_cont2discrete.py | 420 + .../scipy/signal/tests/test_czt.py | 219 + .../scipy/signal/tests/test_dltisys.py | 598 + .../scipy/signal/tests/test_filter_design.py | 4156 +++ .../signal/tests/test_fir_filter_design.py | 674 + .../scipy/signal/tests/test_ltisys.py | 1290 + .../scipy/signal/tests/test_max_len_seq.py | 65 + .../scipy/signal/tests/test_peak_finding.py | 887 + .../scipy/signal/tests/test_result_type.py | 52 + .../scipy/signal/tests/test_savitzky_golay.py | 358 + .../scipy/signal/tests/test_signaltools.py | 3575 +++ .../scipy/signal/tests/test_spectral.py | 1602 ++ .../scipy/signal/tests/test_upfirdn.py | 287 + .../scipy/signal/tests/test_waveforms.py | 351 + .../scipy/signal/tests/test_wavelets.py | 151 + .../scipy/signal/tests/test_windows.py | 852 + .../scipy/signal/waveforms.py | 29 + .../scipy/signal/wavelets.py | 28 + .../scipy/signal/windows/__init__.py | 52 + .../scipy/signal/windows/_windows.py | 2374 ++ .../scipy/signal/windows/windows.py | 32 + .../scipy/sparse/__init__.py | 298 + .../scipy/sparse/_arrays.py | 98 + .../scipy/sparse/_base.py | 1331 + .../.python_dependencies/scipy/sparse/_bsr.py | 721 + .../scipy/sparse/_compressed.py | 1318 + .../scipy/sparse/_construct.py | 947 + .../.python_dependencies/scipy/sparse/_coo.py | 614 + .../.python_dependencies/scipy/sparse/_csc.py | 260 + .../.python_dependencies/scipy/sparse/_csr.py | 357 + .../scipy/sparse/_data.py | 402 + .../.python_dependencies/scipy/sparse/_dia.py | 470 + .../.python_dependencies/scipy/sparse/_dok.py | 456 + .../scipy/sparse/_extract.py | 169 + .../scipy/sparse/_index.py | 389 + .../.python_dependencies/scipy/sparse/_lil.py | 547 + .../scipy/sparse/_matrix_io.py | 151 + .../scipy/sparse/_spfuncs.py | 76 + .../scipy/sparse/_sputils.py | 413 + .../.python_dependencies/scipy/sparse/base.py | 42 + .../.python_dependencies/scipy/sparse/bsr.py | 46 + .../scipy/sparse/compressed.py | 54 + .../scipy/sparse/construct.py | 53 + .../.python_dependencies/scipy/sparse/coo.py | 47 + .../.python_dependencies/scipy/sparse/csc.py | 34 + .../scipy/sparse/csgraph/__init__.py | 208 + .../scipy/sparse/csgraph/_laplacian.py | 555 + .../scipy/sparse/csgraph/_validation.py | 56 + .../scipy/sparse/csgraph/setup.py | 38 + .../scipy/sparse/csgraph/tests/__init__.py | 0 .../tests/test_connected_components.py | 99 + .../sparse/csgraph/tests/test_conversions.py | 61 + .../scipy/sparse/csgraph/tests/test_flow.py | 208 + .../csgraph/tests/test_graph_laplacian.py | 358 + .../sparse/csgraph/tests/test_matching.py | 239 + .../sparse/csgraph/tests/test_reordering.py | 70 + .../csgraph/tests/test_shortest_path.py | 395 + .../csgraph/tests/test_spanning_tree.py | 65 + .../sparse/csgraph/tests/test_traversal.py | 68 + .../.python_dependencies/scipy/sparse/csr.py | 36 + .../.python_dependencies/scipy/sparse/data.py | 33 + .../.python_dependencies/scipy/sparse/dia.py | 39 + .../.python_dependencies/scipy/sparse/dok.py | 42 + .../scipy/sparse/extract.py | 31 + .../.python_dependencies/scipy/sparse/lil.py | 41 + .../scipy/sparse/linalg/__init__.py | 136 + .../scipy/sparse/linalg/_dsolve/__init__.py | 71 + .../sparse/linalg/_dsolve/_add_newdocs.py | 152 + .../scipy/sparse/linalg/_dsolve/linsolve.py | 715 + .../sparse/linalg/_dsolve/tests/__init__.py | 0 .../linalg/_dsolve/tests/test_linsolve.py | 799 + .../scipy/sparse/linalg/_eigen/__init__.py | 22 + .../scipy/sparse/linalg/_eigen/_svds.py | 563 + .../scipy/sparse/linalg/_eigen/_svds_doc.py | 398 + .../scipy/sparse/linalg/_eigen/arpack/COPYING | 45 + .../sparse/linalg/_eigen/arpack/__init__.py | 20 + .../sparse/linalg/_eigen/arpack/arpack.py | 1699 ++ .../linalg/_eigen/arpack/tests/__init__.py | 0 .../linalg/_eigen/arpack/tests/test_arpack.py | 725 + .../sparse/linalg/_eigen/lobpcg/__init__.py | 16 + .../sparse/linalg/_eigen/lobpcg/lobpcg.py | 982 + .../linalg/_eigen/lobpcg/tests/__init__.py | 0 .../linalg/_eigen/lobpcg/tests/test_lobpcg.py | 534 + .../sparse/linalg/_eigen/tests/__init__.py | 0 .../sparse/linalg/_eigen/tests/test_svds.py | 907 + .../scipy/sparse/linalg/_expm_multiply.py | 807 + .../scipy/sparse/linalg/_interface.py | 829 + .../scipy/sparse/linalg/_isolve/__init__.py | 20 + .../scipy/sparse/linalg/_isolve/_gcrotmk.py | 507 + .../scipy/sparse/linalg/_isolve/iterative.py | 881 + .../scipy/sparse/linalg/_isolve/lgmres.py | 237 + .../scipy/sparse/linalg/_isolve/lsmr.py | 486 + .../scipy/sparse/linalg/_isolve/lsqr.py | 587 + .../scipy/sparse/linalg/_isolve/minres.py | 392 + .../sparse/linalg/_isolve/tests/__init__.py | 0 .../linalg/_isolve/tests/test_gcrotmk.py | 165 + .../linalg/_isolve/tests/test_iterative.py | 794 + .../linalg/_isolve/tests/test_lgmres.py | 211 + .../sparse/linalg/_isolve/tests/test_lsmr.py | 228 + .../sparse/linalg/_isolve/tests/test_lsqr.py | 153 + .../linalg/_isolve/tests/test_minres.py | 97 + .../sparse/linalg/_isolve/tests/test_utils.py | 8 + .../scipy/sparse/linalg/_isolve/tfqmr.py | 184 + .../scipy/sparse/linalg/_isolve/utils.py | 127 + .../scipy/sparse/linalg/_matfuncs.py | 863 + .../scipy/sparse/linalg/_norm.py | 193 + .../scipy/sparse/linalg/_onenormest.py | 467 + .../scipy/sparse/linalg/_svdp.py | 321 + .../scipy/sparse/linalg/dsolve.py | 38 + .../scipy/sparse/linalg/eigen.py | 37 + .../scipy/sparse/linalg/interface.py | 30 + .../scipy/sparse/linalg/isolve.py | 30 + .../scipy/sparse/linalg/matfuncs.py | 30 + .../scipy/sparse/linalg/tests/__init__.py | 0 .../sparse/linalg/tests/propack_test_data.npz | Bin 0 -> 600350 bytes .../sparse/linalg/tests/test_expm_multiply.py | 345 + .../sparse/linalg/tests/test_interface.py | 449 + .../sparse/linalg/tests/test_matfuncs.py | 581 + .../scipy/sparse/linalg/tests/test_norm.py | 141 + .../sparse/linalg/tests/test_onenormest.py | 252 + .../scipy/sparse/linalg/tests/test_propack.py | 187 + .../sparse/linalg/tests/test_pydata_sparse.py | 241 + .../scipy/sparse/sparsetools.py | 106 + .../scipy/sparse/spfuncs.py | 29 + .../scipy/sparse/sputils.py | 52 + .../scipy/sparse/tests/__init__.py | 0 .../scipy/sparse/tests/data/csc_py2.npz | Bin 0 -> 846 bytes .../scipy/sparse/tests/data/csc_py3.npz | Bin 0 -> 851 bytes .../scipy/sparse/tests/test_array_api.py | 339 + .../scipy/sparse/tests/test_base.py | 4976 ++++ .../scipy/sparse/tests/test_construct.py | 582 + .../scipy/sparse/tests/test_csc.py | 98 + .../scipy/sparse/tests/test_csr.py | 169 + .../scipy/sparse/tests/test_extract.py | 42 + .../scipy/sparse/tests/test_matrix_io.py | 86 + .../scipy/sparse/tests/test_sparsetools.py | 337 + .../scipy/sparse/tests/test_spfuncs.py | 97 + .../scipy/sparse/tests/test_sputils.py | 188 + .../scipy/spatial/__init__.py | 124 + .../scipy/spatial/_ckdtree.pyi | 230 + .../scipy/spatial/_geometric_slerp.py | 239 + .../scipy/spatial/_kdtree.py | 920 + .../scipy/spatial/_plotutils.py | 269 + .../scipy/spatial/_procrustes.py | 132 + .../scipy/spatial/_qhull.pyi | 214 + .../scipy/spatial/_spherical_voronoi.py | 342 + .../scipy/spatial/_voronoi.pyi | 5 + .../scipy/spatial/ckdtree.py | 35 + .../scipy/spatial/distance.py | 2952 +++ .../scipy/spatial/distance.pyi | 216 + .../scipy/spatial/kdtree.py | 34 + .../scipy/spatial/qhull.py | 37 + .../scipy/spatial/qhull_src/COPYING.txt | 38 + .../scipy/spatial/tests/__init__.py | 0 .../scipy/spatial/tests/data/cdist-X1.txt | 10 + .../scipy/spatial/tests/data/cdist-X2.txt | 20 + .../tests/data/degenerate_pointset.npz | Bin 0 -> 22548 bytes .../scipy/spatial/tests/data/iris.txt | 150 + .../spatial/tests/data/pdist-boolean-inp.txt | 20 + .../tests/data/pdist-chebyshev-ml-iris.txt | 1 + .../spatial/tests/data/pdist-chebyshev-ml.txt | 1 + .../tests/data/pdist-cityblock-ml-iris.txt | 1 + .../spatial/tests/data/pdist-cityblock-ml.txt | 1 + .../tests/data/pdist-correlation-ml-iris.txt | 1 + .../tests/data/pdist-correlation-ml.txt | 1 + .../tests/data/pdist-cosine-ml-iris.txt | 1 + .../spatial/tests/data/pdist-cosine-ml.txt | 1 + .../spatial/tests/data/pdist-double-inp.txt | 20 + .../tests/data/pdist-euclidean-ml-iris.txt | 1 + .../spatial/tests/data/pdist-euclidean-ml.txt | 1 + .../spatial/tests/data/pdist-hamming-ml.txt | 1 + .../spatial/tests/data/pdist-jaccard-ml.txt | 1 + .../data/pdist-jensenshannon-ml-iris.txt | 1 + .../tests/data/pdist-jensenshannon-ml.txt | 1 + .../data/pdist-minkowski-3.2-ml-iris.txt | 1 + .../tests/data/pdist-minkowski-3.2-ml.txt | 1 + .../data/pdist-minkowski-5.8-ml-iris.txt | 1 + .../tests/data/pdist-seuclidean-ml-iris.txt | 1 + .../tests/data/pdist-seuclidean-ml.txt | 1 + .../spatial/tests/data/pdist-spearman-ml.txt | 1 + .../spatial/tests/data/random-bool-data.txt | 100 + .../spatial/tests/data/random-double-data.txt | 100 + .../spatial/tests/data/random-int-data.txt | 100 + .../spatial/tests/data/random-uint-data.txt | 100 + .../tests/data/selfdual-4d-polytope.txt | 27 + .../scipy/spatial/tests/test__plotutils.py | 54 + .../scipy/spatial/tests/test__procrustes.py | 116 + .../scipy/spatial/tests/test_distance.py | 2186 ++ .../scipy/spatial/tests/test_hausdorff.py | 172 + .../scipy/spatial/tests/test_kdtree.py | 1470 ++ .../scipy/spatial/tests/test_qhull.py | 1178 + .../scipy/spatial/tests/test_slerp.py | 416 + .../spatial/tests/test_spherical_voronoi.py | 355 + .../scipy/spatial/transform/__init__.py | 29 + .../scipy/spatial/transform/_rotation.pyi | 57 + .../spatial/transform/_rotation_groups.py | 140 + .../spatial/transform/_rotation_spline.py | 460 + .../scipy/spatial/transform/rotation.py | 33 + .../scipy/spatial/transform/tests/__init__.py | 0 .../spatial/transform/tests/test_rotation.py | 1370 + .../transform/tests/test_rotation_groups.py | 169 + .../transform/tests/test_rotation_spline.py | 161 + .../.python_dependencies/scipy/special.pxd | 1 + .../scipy/special/__init__.py | 710 + .../scipy/special/_add_newdocs.py | 13639 ++++++++++ .../scipy/special/_basic.py | 3020 +++ .../scipy/special/_ellip_harm.py | 208 + .../scipy/special/_lambertw.py | 106 + .../scipy/special/_logsumexp.py | 298 + .../scipy/special/_mptestutils.py | 447 + .../scipy/special/_orthogonal.py | 2557 ++ .../scipy/special/_orthogonal.pyi | 341 + .../scipy/special/_precompute/__init__.py | 0 .../scipy/special/_precompute/cosine_cdf.py | 18 + .../scipy/special/_precompute/expn_asy.py | 54 + .../scipy/special/_precompute/gammainc_asy.py | 116 + .../special/_precompute/gammainc_data.py | 124 + .../scipy/special/_precompute/lambertw.py | 68 + .../scipy/special/_precompute/loggamma.py | 43 + .../special/_precompute/struve_convergence.py | 120 + .../scipy/special/_precompute/utils.py | 38 + .../special/_precompute/wright_bessel.py | 342 + .../special/_precompute/wright_bessel_data.py | 152 + .../scipy/special/_precompute/wrightomega.py | 41 + .../scipy/special/_precompute/zetac.py | 27 + .../scipy/special/_sf_error.py | 15 + .../scipy/special/_spfun_stats.py | 107 + .../scipy/special/_spherical_bessel.py | 349 + .../scipy/special/_test_internal.pyi | 10 + .../scipy/special/_testutils.py | 316 + .../scipy/special/_ufuncs.pyi | 520 + .../scipy/special/_ufuncs.pyx | 20949 ++++++++++++++++ .../scipy/special/_ufuncs_cxx.pxd | 41 + .../scipy/special/_ufuncs_cxx.pyx | 125 + .../scipy/special/_ufuncs_cxx_defs.h | 47 + .../scipy/special/_ufuncs_defs.h | 215 + .../scipy/special/add_newdocs.py | 23 + .../scipy/special/basic.py | 97 + .../scipy/special/cython_special.pxd | 259 + .../scipy/special/cython_special.pyi | 3 + .../scipy/special/cython_special.pyx | 3641 +++ .../scipy/special/orthogonal.py | 55 + .../scipy/special/sf_error.py | 28 + .../scipy/special/specfun.py | 51 + .../scipy/special/spfun_stats.py | 25 + .../scipy/special/tests/__init__.py | 0 .../scipy/special/tests/data/boost.npz | Bin 0 -> 1270643 bytes .../scipy/special/tests/data/gsl.npz | Bin 0 -> 51433 bytes .../scipy/special/tests/data/local.npz | Bin 0 -> 203438 bytes .../scipy/special/tests/test_basic.py | 3656 +++ .../scipy/special/tests/test_bdtr.py | 112 + .../scipy/special/tests/test_boxcox.py | 106 + .../scipy/special/tests/test_cdflib.py | 424 + .../special/tests/test_cdft_asymptotic.py | 49 + .../scipy/special/tests/test_cosine_distr.py | 84 + .../special/tests/test_cython_special.py | 352 + .../scipy/special/tests/test_data.py | 617 + .../scipy/special/tests/test_dd.py | 46 + .../scipy/special/tests/test_digamma.py | 42 + .../scipy/special/tests/test_ellip_harm.py | 278 + .../scipy/special/tests/test_erfinv.py | 89 + .../tests/test_exponential_integrals.py | 75 + .../scipy/special/tests/test_faddeeva.py | 85 + .../scipy/special/tests/test_gamma.py | 12 + .../scipy/special/tests/test_gammainc.py | 136 + .../scipy/special/tests/test_hyp2f1.py | 2180 ++ .../special/tests/test_hypergeometric.py | 140 + .../scipy/special/tests/test_kolmogorov.py | 412 + .../scipy/special/tests/test_lambertw.py | 109 + .../scipy/special/tests/test_log_softmax.py | 109 + .../scipy/special/tests/test_loggamma.py | 70 + .../scipy/special/tests/test_logit.py | 145 + .../scipy/special/tests/test_logsumexp.py | 194 + .../scipy/special/tests/test_mpmath.py | 2027 ++ .../scipy/special/tests/test_nan_inputs.py | 64 + .../scipy/special/tests/test_ndtr.py | 77 + .../scipy/special/tests/test_ndtri_exp.py | 94 + .../scipy/special/tests/test_orthogonal.py | 791 + .../special/tests/test_orthogonal_eval.py | 268 + .../scipy/special/tests/test_owens_t.py | 53 + .../scipy/special/tests/test_pcf.py | 24 + .../scipy/special/tests/test_pdtr.py | 48 + .../scipy/special/tests/test_powm1.py | 65 + .../special/tests/test_precompute_expn_asy.py | 24 + .../special/tests/test_precompute_gammainc.py | 109 + .../special/tests/test_precompute_utils.py | 36 + .../scipy/special/tests/test_round.py | 16 + .../scipy/special/tests/test_sf_error.py | 122 + .../scipy/special/tests/test_sici.py | 36 + .../scipy/special/tests/test_spence.py | 32 + .../scipy/special/tests/test_spfun_stats.py | 61 + .../scipy/special/tests/test_sph_harm.py | 37 + .../special/tests/test_spherical_bessel.py | 379 + .../scipy/special/tests/test_trig.py | 66 + .../scipy/special/tests/test_wright_bessel.py | 115 + .../scipy/special/tests/test_wrightomega.py | 117 + .../scipy/special/tests/test_zeta.py | 49 + .../scipy/stats/__init__.py | 515 + .../scipy/stats/_axis_nan_policy.py | 605 + .../scipy/stats/_biasedurn.pxd | 27 + .../scipy/stats/_binned_statistic.py | 795 + .../scipy/stats/_binomtest.py | 375 + .../scipy/stats/_boost/__init__.py | 53 + .../scipy/stats/_common.py | 6 + .../scipy/stats/_constants.py | 34 + .../scipy/stats/_continuous_distns.py | 10314 ++++++++ .../scipy/stats/_covariance.py | 629 + .../scipy/stats/_crosstab.py | 203 + .../scipy/stats/_discrete_distns.py | 1814 ++ .../scipy/stats/_distn_infrastructure.py | 4062 +++ .../scipy/stats/_distr_params.py | 281 + .../scipy/stats/_entropy.py | 399 + .../.python_dependencies/scipy/stats/_fit.py | 1284 + .../scipy/stats/_generate_pyx.py | 75 + .../scipy/stats/_hypotests.py | 2006 ++ .../.python_dependencies/scipy/stats/_kde.py | 725 + .../scipy/stats/_ksstats.py | 596 + .../scipy/stats/_levy_stable/__init__.py | 1200 + .../scipy/stats/_mannwhitneyu.py | 493 + .../scipy/stats/_morestats.py | 4186 +++ .../scipy/stats/_mstats_basic.py | 3521 +++ .../scipy/stats/_mstats_extras.py | 500 + .../scipy/stats/_multivariate.py | 5691 +++++ .../scipy/stats/_odds_ratio.py | 465 + .../scipy/stats/_page_trend_test.py | 476 + .../.python_dependencies/scipy/stats/_qmc.py | 2570 ++ .../scipy/stats/_qmc_cy.pyi | 54 + .../scipy/stats/_rcont/__init__.py | 5 + .../scipy/stats/_relative_risk.py | 264 + .../scipy/stats/_resampling.py | 1602 ++ .../scipy/stats/_result_classes.py | 34 + .../scipy/stats/_rvs_sampling.py | 172 + .../scipy/stats/_sobol.pyi | 54 + .../scipy/stats/_sobol_direction_numbers.npz | Bin 0 -> 589334 bytes .../scipy/stats/_stats.pxd | 9 + .../scipy/stats/_stats_mstats_common.py | 501 + .../scipy/stats/_stats_py.py | 9662 +++++++ .../scipy/stats/_tukeylambda_stats.py | 199 + .../scipy/stats/_unuran/__init__.py | 0 .../scipy/stats/_unuran/unuran.pxd | 1309 + .../scipy/stats/_unuran/unuran_wrapper.pyi | 178 + .../scipy/stats/_variation.py | 224 + .../scipy/stats/_warnings_errors.py | 38 + .../scipy/stats/biasedurn.py | 29 + .../scipy/stats/contingency.py | 419 + .../scipy/stats/distributions.py | 24 + .../.python_dependencies/scipy/stats/kde.py | 31 + .../scipy/stats/morestats.py | 42 + .../scipy/stats/mstats.py | 135 + .../scipy/stats/mstats_basic.py | 58 + .../scipy/stats/mstats_extras.py | 34 + .../.python_dependencies/scipy/stats/mvn.py | 31 + .../.python_dependencies/scipy/stats/qmc.py | 235 + .../scipy/stats/sampling.py | 51 + .../scipy/stats/statlib.py | 30 + .../.python_dependencies/scipy/stats/stats.py | 62 + .../scipy/stats/tests/__init__.py | 0 .../scipy/stats/tests/common_tests.py | 450 + .../tests/data/fisher_exact_results_from_r.py | 607 + .../levy_stable/stable-Z1-cdf-sample-data.npy | Bin 0 -> 183728 bytes .../levy_stable/stable-Z1-pdf-sample-data.npy | Bin 0 -> 183688 bytes .../stable-loc-scale-sample-data.npy | Bin 0 -> 9328 bytes .../stats/tests/data/nist_anova/AtmWtAg.dat | 108 + .../stats/tests/data/nist_anova/SiRstv.dat | 85 + .../stats/tests/data/nist_anova/SmLs01.dat | 249 + .../stats/tests/data/nist_anova/SmLs02.dat | 1869 ++ .../stats/tests/data/nist_anova/SmLs03.dat | 18069 +++++++++++++ .../stats/tests/data/nist_anova/SmLs04.dat | 249 + .../stats/tests/data/nist_anova/SmLs05.dat | 1869 ++ .../stats/tests/data/nist_anova/SmLs06.dat | 18069 +++++++++++++ .../stats/tests/data/nist_anova/SmLs07.dat | 249 + .../stats/tests/data/nist_anova/SmLs08.dat | 1869 ++ .../stats/tests/data/nist_anova/SmLs09.dat | 18069 +++++++++++++ .../tests/data/nist_linregress/Norris.dat | 97 + .../data/studentized_range_mpmath_ref.json | 1499 ++ .../scipy/stats/tests/test_axis_nan_policy.py | 1044 + .../stats/tests/test_binned_statistic.py | 568 + .../scipy/stats/tests/test_boost_ufuncs.py | 44 + .../scipy/stats/tests/test_contingency.py | 241 + .../stats/tests/test_continuous_basic.py | 997 + .../scipy/stats/tests/test_crosstab.py | 115 + .../scipy/stats/tests/test_discrete_basic.py | 545 + .../scipy/stats/tests/test_discrete_distns.py | 566 + .../scipy/stats/tests/test_distributions.py | 7625 ++++++ .../scipy/stats/tests/test_entropy.py | 287 + .../scipy/stats/tests/test_fit.py | 850 + .../scipy/stats/tests/test_hypotests.py | 1712 ++ .../scipy/stats/tests/test_kdeoth.py | 604 + .../scipy/stats/tests/test_morestats.py | 2673 ++ .../scipy/stats/tests/test_mstats_basic.py | 1977 ++ .../scipy/stats/tests/test_mstats_extras.py | 150 + .../scipy/stats/tests/test_multivariate.py | 2905 +++ .../scipy/stats/tests/test_odds_ratio.py | 147 + .../scipy/stats/tests/test_qmc.py | 1326 + .../scipy/stats/tests/test_rank.py | 320 + .../scipy/stats/tests/test_relative_risk.py | 96 + .../scipy/stats/tests/test_resampling.py | 1651 ++ .../scipy/stats/tests/test_sampling.py | 1357 + .../scipy/stats/tests/test_stats.py | 8173 ++++++ .../stats/tests/test_tukeylambda_stats.py | 86 + .../scipy/stats/tests/test_variation.py | 158 + .../.python_dependencies/scipy/version.py | 12 + .../.python_dependencies/tests/__init__.py | 0 .../tests/test_stable_diffusion.py | 410 + __packaged__/coreml/README.md | 4 + __packaged__/coreml/__init__.py | 89 + __packaged__/coreml/actor.py | 233 + __packaged__/coreml/preferences.py | 7 + __packaged__/coreml/requirements.txt | 3 + __packaged__/coreml/test.png | Bin 0 -> 432702 bytes api/backend/backend.py | 51 +- api/models/__init__.py | 3 +- api/models/fix_it_error.py | 41 + api/models/generation_arguments.py | 104 + api/models/task.py | 33 +- diffusers_backend.py | 71 +- generator_process/actions/huggingface_hub.py | 36 + generator_process/models/__init__.py | 1 - generator_process/models/fix_it_error.py | 14 - operators/dream_texture.py | 4 +- operators/project.py | 4 +- property_groups/dream_prompt.py | 38 +- property_groups/dream_prompt_validation.py | 1 - ui/panels/dream_texture.py | 7 +- 1577 files changed, 704641 insertions(+), 88 deletions(-) create mode 100644 __packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/INSTALLER create mode 100644 __packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/LICENSE.txt create mode 100644 __packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/METADATA create mode 100644 __packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/RECORD create mode 100644 __packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/REQUESTED create mode 100644 __packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/WHEEL create mode 100644 __packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/top_level.txt create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/_deps/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/_converters_entry.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/_profile_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_converter.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_util.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/_deployment_compatibility.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/backend_helper.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/helper.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/load.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/fuse_activation_silu.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/sanitize_name_strings.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/test_passes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_helper.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_model_input_params.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/load.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/mil_to_nn_mapping_registry.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/op_mapping.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/alert_return_type_cast.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/commingle_loop_vars.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/conv1d_decomposition.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_inputs_as_outputs.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_unused_inputs.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_unused_inputs.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/mlmodel_passes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_mlmodel_passes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_passes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/conftest.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/converter.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/debugging_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/README.md create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_batchnorm_fusion.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_bias_fusion.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_linear_bias_fusion.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/helper.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/load.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/test_load.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/basic_graph_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/convert_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/converter.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dialect_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dot_visitor.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/load.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/naming_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parse.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parsed_tf_node.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/backfill_make_list_elem_type.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/expand_tf_lstm.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/test_passes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/tf_lstm_to_core_lstm.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_composite_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_custom_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_graphs.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_load.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parse.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parsed_tf_node.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_tf_conversion_api.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/testing_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/cond_to_where.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_asserts.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_constant.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/functionalize_loops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/fuse_dilation_conv.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/insert_get_tuple.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/quantization_pass.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/tensor_array_transform.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/variable_node_transform.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/visitors.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_op_registry.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tfssa.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/converter.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/load.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/remove_vacuous_cond.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/test_v2_passes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_tf2_conversion_api.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops_tf_keras.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/testing_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/rewrite_control_flow_functions.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/converter.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/dialect_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/internal_graph.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/load.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_tensor_assign_to_core.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_upsample_to_core_upsample.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_api.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_custom_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_examples.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_internal_graph.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_passes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_conversion_api.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/testing_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torch_op_registry.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torchir_passes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/input_types.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/block.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/builder.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/input_type.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/operation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_op_reqs.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/complex_dialect_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/activation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/classify.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/control_flow.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_binary.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_unary.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/image_resizing.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/linear.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/normalization.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/pool.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/random.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/recurrent.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/reduction.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/scatter_gather.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_operation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/constexpr_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/image_resizing.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/scatter_gather.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_operation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/helper.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/registry.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_activation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_const.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_constexpr_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_control_flow.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_conv.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_binary.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_unary.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_image_resizing.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_linear.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_normalization.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_pool.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_random.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_recurrent.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_reduction.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_scatter_gather.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_slice.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_operation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_transformation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/testing_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/const_elimination.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dead_code_elimination.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dedup_op_and_var_names.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/fuse_reduce_mean.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/loop_invariant_elimination.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/noop_elimination.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_redundant_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_symbolic_reshape.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/topological_reorder.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/lower_complex_dialect_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_activation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_conv.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_elementwise_binary.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_linear.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_normalization.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_tensor_operation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/preprocess.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/quantization.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/graph_pass.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/helper.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_pipeline.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_registry.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_lower_complex_dialect_ops.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_pass_pipeline.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_passes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_reduce_transposes_pass.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/program.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_block.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_debug.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_programs.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_types.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/annotate.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/get_type_info.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/global_methods.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/symbolic.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_bool.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_complex.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_dict.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_double.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_globals_pseudo_type.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_int.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_list.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_mapping.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_spec.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_str.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tensor.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tuple.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_unknown.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_void.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/var.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/dot_visitor.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/test_flexible_shape_inputs.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_reqs.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVC.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVR.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVC.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVR.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVC.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVR.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter_internal.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_classifier.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_regressor.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_dict_vectorizer.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_classifier.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_regressor.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_imputer.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_k_neighbors_classifier.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_linear_regression.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_logistic_regression.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_normalizer.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_one_hot_encoder.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_classifier.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_regressor.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_ridge_regression.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_sklearn_util.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_standard_scaler.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_svm_common.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_tree_ensemble.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree_ensemble.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/_deprecation.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/_feature_management.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/_interface_management.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/array_feature_extractor.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/datatypes.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/feature_vectorizer.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/ml_program/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/ml_program/compression_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/model.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/builder.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/neural_network/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/neural_network/builder.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/neural_network/flexible_shape_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/neural_network/optimization_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/neural_network/printer.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/neural_network/quantization_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/neural_network/spec_inspection_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/neural_network/update_optimizer_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/neural_network/utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/pipeline.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/tree_ensemble.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/models/utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/ArrayFeatureExtractor_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/AudioFeaturePrint_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/BayesianProbitRegressor_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/CategoricalMapping_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/ClassConfidenceThresholding_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/CustomModel_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/DataStructures_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/DictVectorizer_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/FeatureTypes_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/FeatureVectorizer_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/GLMClassifier_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/GLMRegressor_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/Gazetteer_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/Identity_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/Imputer_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/ItemSimilarityRecommender_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/LinkedModel_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/MIL_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/Model_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/NamedParameters_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/NearestNeighbors_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/NeuralNetwork_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/NonMaximumSuppression_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/Normalizer_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/OneHotEncoder_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/Parameters_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/SVM_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/Scaler_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/SoundAnalysisPreprocessing_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/TextClassifier_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/TreeEnsemble_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/VisionFeaturePrint_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/WordEmbedding_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/WordTagger_pb2.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/proto/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/api/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_examples.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_visibilities.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/blob/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/blob/test_weights.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/ml_program/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/ml_program/test_compression.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_mlmodel.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_modelpackage.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/neural_network/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_custom_neural_nets.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_model.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_neural_networks.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_nn_builder.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_numpy_nn_layers.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_quantization.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_simple_nn_inference.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_tf_numeric.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/pipeline/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_model_updatable.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_pipeline.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVC.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVR.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVC.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVR.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_categorical_imputer.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_composite_pipelines.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_dict_vectorizer.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_feature_names.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_glm_classifier.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_imputer.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_io_types.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_k_neighbors_classifier.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_linear_regression.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_nearest_neighbors_builder.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_normalizer.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_one_hot_encoder.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier_numeric.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression_numeric.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_ridge_regression.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_standard_scalar.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier_numeric.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression_numeric.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier_numeric.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression_numeric.py create mode 100644 __packaged__/coreml/.python_dependencies/coremltools/version.py create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/INSTALLER create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/LICENSE.md create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/METADATA create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/RECORD create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/REQUESTED create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/WHEEL create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/direct_url.json create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/top_level.txt create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/_version.py create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/chunk_mlprogram.py create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/controlnet.py create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/coreml_model.py create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/layer_norm.py create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/pipeline.py create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/torch2coreml.py create mode 100644 __packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/unet.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/INSTALLER create mode 100644 __packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/LICENSE.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/METADATA create mode 100644 __packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/RECORD create mode 100644 __packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/REQUESTED create mode 100644 __packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/WHEEL create mode 100644 __packaged__/coreml/.python_dependencies/scipy/.dylibs/libgcc_s.1.1.dylib create mode 100755 __packaged__/coreml/.python_dependencies/scipy/.dylibs/libgfortran.5.dylib create mode 100755 __packaged__/coreml/.python_dependencies/scipy/.dylibs/libopenblas.0.dylib create mode 100755 __packaged__/coreml/.python_dependencies/scipy/.dylibs/libquadmath.0.dylib create mode 100644 __packaged__/coreml/.python_dependencies/scipy/__config__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_distributor_init.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_bunch.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_ccallback.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_disjoint_set.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_docscrape.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_finite_differences.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_gcutils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_pep440.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_testutils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_threadsafety.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_tmpdirs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/LICENSE create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/_backend.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/_util.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/decorator.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/deprecation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/doccer.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__gcutils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__pep440.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__testutils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__threadsafety.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__util.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_bunch.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_ccallback.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_deprecation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_import_cycles.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_public_api.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_scipy_version.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_tmpdirs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_warnings.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/_lib/uarray.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/cluster/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/cluster/hierarchy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/cluster/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/cluster/tests/hierarchy_test_data.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_disjoint_set.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_hierarchy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_vq.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/cluster/vq.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/conftest.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/constants/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/constants/_codata.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/constants/_constants.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/constants/codata.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/constants/constants.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/constants/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/constants/tests/test_codata.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/constants/tests/test_constants.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/datasets/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/datasets/_download_all.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/datasets/_fetchers.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/datasets/_registry.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/datasets/_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/datasets/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/datasets/tests/test_data.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_backend.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_debug_backends.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_fftlog.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_fftlog_multimethods.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_helper.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/LICENSE.md create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/helper.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/realtransforms.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/test_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/test_real_transforms.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/_realtransforms.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/tests/mock_backend.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/tests/test_backend.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/tests/test_fft_function.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/tests/test_fftlog.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/tests/test_helper.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/tests/test_multithreading.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/tests/test_numpy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fft/tests/test_real_transforms.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/_helper.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/_pseudo_diffs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/_realtransforms.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/helper.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/pseudo_diffs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/realtransforms.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_double_ref.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_longdouble_ref.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_single_ref.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_helper.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_import.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_pseudo_diffs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_real_transforms.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_bvp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/base.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/bdf.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/common.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/dop853_coefficients.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/ivp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/lsoda.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/radau.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/rk.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/test_ivp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/test_rk.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_ode.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_odepack_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_quad_vec.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_quadpack_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/_quadrature.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/dop.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/lsoda.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/odepack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/quadpack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/tests/test__quad_vec.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_banded_ode_solvers.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_bvp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_integrate.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_odeint_jac.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_quadpack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_quadrature.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/integrate/vode.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_bsplines.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_cubic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack2.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack_impl.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_interpnd_info.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_interpolate.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_ndgriddata.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_pade.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_polyint.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_rbf.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_rbfinterp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/_rgi.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/fitpack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/fitpack2.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/interpolate.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/ndgriddata.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/polyint.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/rbf.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/bug-1310.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/estimate_gradients_hang.npy create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/gcvspl.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_bsplines.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_fitpack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_fitpack2.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_gil.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_interpnd.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_interpolate.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_ndgriddata.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_pade.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_polyint.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rbf.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rbfinterp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rgi.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/_fortran.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/_fortran_format_parser.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/hb.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/test_fortran_format.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/test_hb.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/_idl.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/_mmio.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/_netcdf.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/_arffread.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/arffread.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/iris.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/missing.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/nodata.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/quoted_nominal.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/quoted_nominal_spaces.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test1.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test10.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test11.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test2.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test3.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test4.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test5.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test6.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test7.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test8.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test9.arff create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/arff/tests/test_arffread.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/harwell_boeing.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/idl.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/_byteordercodes.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio4.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio5.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio5_params.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/_miobase.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/byteordercodes.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/mio.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/mio4.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5_params.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/mio_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/miobase.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/streams.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/bad_miuint32.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/big_endian.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/broken_utf8.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/corrupted_zlib_data.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/japanese_utf8.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/little_endian.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/logical_sparse.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/malformed1.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/miuint32_for_miint32.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/miutf8_array_name.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/one_by_zero_char.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/parabola.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/single_empty_string.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/some_functions.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/sqr.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_empty_struct.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_mat4_le_floats.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_skip_variable.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testbool_8_WIN64.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsimplecell.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_byteordercodes.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio5_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio_funcs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_miobase.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_pathological.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_streams.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/mmio.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/netcdf.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/Transparent Busy.ani create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_1d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_2d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_3d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_4d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_5d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_6d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_7d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_8d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_1d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_2d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_3d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_4d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_5d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_6d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_7d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_8d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_1.nc create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_2.nc create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_3_maskedvals.nc create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-3x3d-2i.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-mixed.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-11x1x10.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-15x10x22.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x1.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x5.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x7.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x3x5.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-11x1x10.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-15x10x22.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x1.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x5.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x7.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x3x5.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/invalid_pointer.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/null_pointer.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_byte.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_byte_descr.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_complex32.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_complex64.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_float32.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_float64.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_heap_pointer.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int16.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int32.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int64.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_string.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint16.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint32.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint64.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_byte_idl80.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_replicated.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_replicated_3d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_inherit.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays_replicated.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers_replicated.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers_replicated_3d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars_replicated.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars_replicated_3d.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/data/various_compressed.sav create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/test_fortran.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/test_idl.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/test_mmio.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/test_netcdf.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/test_paths.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/tests/test_wavfile.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/io/wavfile.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_blas_subroutine_wrappers.f create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_blas_subroutines.h create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_cythonized_array_utils.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_cythonized_array_utils.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_decomp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_cholesky.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_cossin.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_ldl.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_lu.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_polar.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_qr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_qz.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_schur.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_svd.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_expm_frechet.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_flinalg_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_interpolative_backend.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_lapack_subroutine_wrappers.f create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_lapack_subroutines.h create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_expm.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_inv_ssq.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_sqrtm.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_misc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_procrustes.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_sketches.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_solvers.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_special_matrices.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/_testutils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/blas.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/cython_blas.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/cython_blas.pyx create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/cython_lapack.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/cython_lapack.pyx create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/decomp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/decomp_cholesky.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/decomp_lu.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/decomp_qr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/decomp_schur.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/decomp_svd.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/flinalg.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/interpolative.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/lapack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/matfuncs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/misc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/special_matrices.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_15_data.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_18_data.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_19_data.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_20_data.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_6_data.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/gendare_20170120_data.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_blas.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cython_blas.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cython_lapack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cythonized_array_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_cholesky.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_cossin.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_ldl.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_polar.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_update.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_fblas.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_interpolative.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_lapack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_matfuncs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_matmul_toeplitz.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_misc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_procrustes.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_sketches.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_solve_toeplitz.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_solvers.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_special_matrices.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/_common.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/ascent.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/common.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/doccer.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/ecg.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/face.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/tests/test_common.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/tests/test_config.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/misc/tests/test_doccer.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/_filters.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/_fourier.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/_interpolation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/_measurements.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/_morphology.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/_ni_docstrings.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/_ni_support.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/filters.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/fourier.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/interpolation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/measurements.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/morphology.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_inputs.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_results.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_strels.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/dots.png create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_c_api.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_datatypes.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_filters.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_fourier.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_interpolation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_measurements.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_morphology.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_splines.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/odr/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/odr/_add_newdocs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/odr/_models.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/odr/_odrpack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/odr/models.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/odr/odrpack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/odr/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/odr/tests/test_odr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/README create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/__nnls.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_basinhopping.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_cobyla_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_constraints.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_differentiable_functions.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_differentialevolution.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_direct_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_dual_annealing.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_hessian_update_strategy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HConst.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/Highs.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsIO.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsInfo.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsLp.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsOptions.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsStatus.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/SimplexConst.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/highs_c_api.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_lbfgsb_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_linesearch.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_linprog.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_doc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_highs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_ip.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_rs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_simplex.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_util.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/bvls.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/common.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/dogbox.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/least_squares.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/lsq_linear.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/trf.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/trf_linear.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_milp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_minimize.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_minpack_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_nnls.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_nonlin.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_numdiff.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_optimize.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_qap.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_remove_redundancy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_root.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_root_scalar.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_shgo.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_shgo_lib/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_shgo_lib/triangulation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_slsqp_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_spectral.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_tnc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trlib/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/canonical_constraint.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/projections.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/qp_subproblem.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/report.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_projections.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_report.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tr_interior_point.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_dogleg.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_exact.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_krylov.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_ncg.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_tstutils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/_zeros_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/cobyla.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/_zeros.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/c_zeros.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/lbfgsb.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/linesearch.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/minpack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/minpack2.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/moduleTNC.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/nonlin.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/optimize.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/slsqp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__basinhopping.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__differential_evolution.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__dual_annealing.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__linprog_clean_inputs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__numdiff.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__remove_redundancy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__root.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__shgo.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__spectral.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_cobyla.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_constraint_conversion.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_constraints.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_cython_optimize.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_differentiable_functions.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_direct.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_hessian_update_strategy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lbfgsb_hessinv.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lbfgsb_setulb.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_least_squares.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linear_assignment.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linesearch.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linprog.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lsq_common.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lsq_linear.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_milp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_minimize_constrained.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_minpack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_nnls.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_nonlin.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_optimize.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_quadratic_assignment.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_regression.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_slsqp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_tnc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion_exact.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion_krylov.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_zeros.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/tnc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/optimize/zeros.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_arraytools.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_bsplines.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_czt.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_filter_design.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_fir_filter_design.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_lti_conversion.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_ltisys.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_max_len_seq.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_peak_finding.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_savitzky_golay.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_signaltools.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_spectral.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_spectral_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_upfirdn.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_waveforms.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/_wavelets.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/bsplines.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/filter_design.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/fir_filter_design.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/lti_conversion.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/ltisys.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/signaltools.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/spectral.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/spline.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/mpsig.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_array_tools.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_bsplines.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_cont2discrete.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_czt.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_dltisys.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_filter_design.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_fir_filter_design.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_ltisys.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_max_len_seq.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_peak_finding.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_result_type.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_savitzky_golay.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_signaltools.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_spectral.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_upfirdn.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_waveforms.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_wavelets.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/tests/test_windows.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/waveforms.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/wavelets.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/windows/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/windows/_windows.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/signal/windows/windows.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_arrays.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_base.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_bsr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_compressed.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_construct.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_coo.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_csc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_csr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_data.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_dia.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_dok.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_extract.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_index.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_lil.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_matrix_io.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_spfuncs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/_sputils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/base.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/bsr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/compressed.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/construct.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/coo.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/_laplacian.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/_validation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/setup.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_connected_components.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_conversions.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_flow.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_graph_laplacian.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_matching.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_reordering.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_shortest_path.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_spanning_tree.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_traversal.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/csr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/data.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/dia.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/dok.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/extract.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/lil.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/_add_newdocs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/linsolve.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/_svds.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/_svds_doc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/COPYING create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/arpack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/tests/test_svds.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_expm_multiply.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_interface.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/_gcrotmk.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/iterative.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lgmres.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lsmr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lsqr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/minres.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_iterative.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lgmres.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lsmr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lsqr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_minres.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tfqmr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/utils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_matfuncs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_norm.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_onenormest.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_svdp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/dsolve.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/eigen.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/interface.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/isolve.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/matfuncs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/propack_test_data.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_expm_multiply.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_interface.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_matfuncs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_norm.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_onenormest.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_propack.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_pydata_sparse.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/sparsetools.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/spfuncs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/sputils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/data/csc_py2.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/data/csc_py3.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_array_api.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_base.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_construct.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_csc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_csr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_extract.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_matrix_io.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_sparsetools.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_spfuncs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_sputils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/_ckdtree.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/_geometric_slerp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/_kdtree.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/_plotutils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/_procrustes.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/_qhull.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/_spherical_voronoi.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/_voronoi.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/ckdtree.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/distance.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/distance.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/kdtree.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/qhull.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/qhull_src/COPYING.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/cdist-X1.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/cdist-X2.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/degenerate_pointset.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/iris.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-boolean-inp.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-chebyshev-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cityblock-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-correlation-ml-iris.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-correlation-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cosine-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-double-inp.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-euclidean-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-hamming-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jaccard-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jensenshannon-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-seuclidean-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-spearman-ml.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-bool-data.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-double-data.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-int-data.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-uint-data.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/selfdual-4d-polytope.txt create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/test__plotutils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/test__procrustes.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_distance.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_hausdorff.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_kdtree.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_qhull.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_slerp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_spherical_voronoi.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/transform/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation_groups.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation_spline.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/transform/rotation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation_groups.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation_spline.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_add_newdocs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_ellip_harm.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_lambertw.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_logsumexp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_mptestutils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_orthogonal.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_orthogonal.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/cosine_cdf.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/expn_asy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/gammainc_asy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/gammainc_data.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/lambertw.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/loggamma.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/struve_convergence.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/utils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/wright_bessel.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/wright_bessel_data.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/wrightomega.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_precompute/zetac.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_sf_error.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_spfun_stats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_spherical_bessel.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_test_internal.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_testutils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_ufuncs.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_ufuncs.pyx create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx.pyx create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx_defs.h create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_defs.h create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/add_newdocs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/cython_special.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/cython_special.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/cython_special.pyx create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/orthogonal.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/sf_error.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/specfun.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/spfun_stats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/data/boost.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/data/gsl.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/data/local.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_bdtr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_boxcox.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_cdflib.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_cdft_asymptotic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_cosine_distr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_cython_special.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_data.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_dd.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_digamma.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_ellip_harm.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_erfinv.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_exponential_integrals.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_faddeeva.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_gamma.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_gammainc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_hyp2f1.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_hypergeometric.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_kolmogorov.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_lambertw.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_log_softmax.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_loggamma.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_logit.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_logsumexp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_mpmath.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_nan_inputs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_ndtr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_ndtri_exp.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_orthogonal.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_orthogonal_eval.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_owens_t.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_pcf.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_pdtr.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_powm1.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_expn_asy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_gammainc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_utils.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_round.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_sf_error.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_sici.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_spence.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_spfun_stats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_sph_harm.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_spherical_bessel.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_trig.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_wright_bessel.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_wrightomega.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/special/tests/test_zeta.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_axis_nan_policy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_biasedurn.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_binned_statistic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_binomtest.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_boost/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_common.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_constants.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_continuous_distns.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_covariance.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_crosstab.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_discrete_distns.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_distn_infrastructure.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_distr_params.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_entropy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_fit.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_generate_pyx.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_hypotests.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_kde.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_ksstats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_levy_stable/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_mannwhitneyu.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_morestats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_mstats_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_mstats_extras.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_multivariate.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_odds_ratio.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_page_trend_test.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_qmc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_qmc_cy.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_rcont/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_relative_risk.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_resampling.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_result_classes.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_rvs_sampling.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_sobol.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_sobol_direction_numbers.npz create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_stats.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_stats_mstats_common.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_stats_py.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_tukeylambda_stats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_unuran/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_unuran/unuran.pxd create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_unuran/unuran_wrapper.pyi create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_variation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/_warnings_errors.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/biasedurn.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/contingency.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/distributions.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/kde.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/morestats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/mstats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/mstats_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/mstats_extras.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/mvn.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/qmc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/sampling.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/statlib.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/stats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/common_tests.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/fisher_exact_results_from_r.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/AtmWtAg.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SiRstv.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs01.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs02.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs03.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs04.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs05.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs06.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs07.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs08.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs09.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_linregress/Norris.dat create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/data/studentized_range_mpmath_ref.json create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_axis_nan_policy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_binned_statistic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_boost_ufuncs.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_contingency.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_continuous_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_crosstab.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_discrete_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_discrete_distns.py create mode 100755 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_distributions.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_entropy.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_fit.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_hypotests.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_kdeoth.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_morestats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_mstats_basic.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_mstats_extras.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_multivariate.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_odds_ratio.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_qmc.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_rank.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_relative_risk.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_resampling.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_sampling.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_stats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_tukeylambda_stats.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/stats/tests/test_variation.py create mode 100644 __packaged__/coreml/.python_dependencies/scipy/version.py create mode 100644 __packaged__/coreml/.python_dependencies/tests/__init__.py create mode 100644 __packaged__/coreml/.python_dependencies/tests/test_stable_diffusion.py create mode 100644 __packaged__/coreml/README.md create mode 100644 __packaged__/coreml/__init__.py create mode 100644 __packaged__/coreml/actor.py create mode 100644 __packaged__/coreml/preferences.py create mode 100644 __packaged__/coreml/requirements.txt create mode 100644 __packaged__/coreml/test.png create mode 100644 api/models/fix_it_error.py create mode 100644 api/models/generation_arguments.py delete mode 100644 generator_process/models/fix_it_error.py diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/INSTALLER b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/LICENSE.txt b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/LICENSE.txt new file mode 100644 index 00000000..78a5fe85 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/LICENSE.txt @@ -0,0 +1,11 @@ +Copyright (c) 2020, Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/METADATA b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/METADATA new file mode 100644 index 00000000..7a2179f8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/METADATA @@ -0,0 +1,60 @@ +Metadata-Version: 2.1 +Name: coremltools +Version: 6.3.0 +Summary: Community Tools for Core ML +Home-page: https://github.com/apple/coremltools +Author: Apple Inc. +Author-email: coremltools@apple.com +License: BSD +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Software Development +License-File: LICENSE.txt +Requires-Dist: numpy (>=1.14.5) +Requires-Dist: protobuf (<=4.0.0,>=3.1.0) +Requires-Dist: sympy +Requires-Dist: tqdm +Requires-Dist: packaging + +coremltools +=========== + +`Core ML `_ +is an Apple framework that allows developers to easily integrate +machine learning (ML) models into apps. Core ML is available on iOS, iPadOS, +watchOS, macOS, and tvOS. Core ML introduces a public file format (.mlmodel) +for a broad set of ML methods including deep neural networks (convolutional +and recurrent), tree ensembles (boosted trees, random forest, decision trees), +and generalized linear models. Core ML models can be directly integrated into +apps within Xcode. + +:code:`coremltools` is a python package for creating, examining, and testing models in +the .mlmodel format. In particular, it can be used to: + +- Convert trained models from popular machine learning tools into Core ML format + (.mlmodel). +- Write models to Core ML format with a simple API. +- Making predictions using the Core ML framework (on select platforms) to + verify conversion. + +More Information +---------------- + +- `coremltools user guide and examples `_ +- `Core ML framework documentation `_ +- `Machine learning at Apple `_ + +License +------- +Copyright (c) 2020, Apple Inc. All rights reserved. + +Use of this source code is governed by the +`3-Clause BSD License `_ +that can be found in the LICENSE.txt file. diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/RECORD b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/RECORD new file mode 100644 index 00000000..cdf94764 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/RECORD @@ -0,0 +1,801 @@ +coremltools-6.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +coremltools-6.3.0.dist-info/LICENSE.txt,sha256=66FzSUqZcxfHi3zmtVLo5EM-Me_29eqQk0GFJC63H90,1488 +coremltools-6.3.0.dist-info/METADATA,sha256=oTLMWokJJCBSYGmkl4bqAe5pF84UcZLGJQ8hpf-Sycs,2303 +coremltools-6.3.0.dist-info/RECORD,, +coremltools-6.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +coremltools-6.3.0.dist-info/WHEEL,sha256=bqKuSnIUrnP2Cm43W1aVJYmhiN5AXQEeSa7Zd-R-YnM,108 +coremltools-6.3.0.dist-info/top_level.txt,sha256=LpjwPWmxFPfhcovVTFV_nHNcZNnb4lN3DytAZ7KLgL8,12 +coremltools/__init__.py,sha256=MjUWXzLcsNS5WLZsNzIzBjOrIf2w_74jYYf_gXUUSD8,4561 +coremltools/__pycache__/__init__.cpython-310.pyc,, +coremltools/__pycache__/version.cpython-310.pyc,, +coremltools/_deps/__init__.py,sha256=rMyK1D_RfWjb2SYRr8Wm4ajG4UaOvhWK-5Zz2T0dDFI,5563 +coremltools/_deps/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/__init__.py,sha256=VUt_9erzq60Md56GOZYEry8JyHjcMTskJsg80-LTj4Y,490 +coremltools/converters/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/__pycache__/_converters_entry.cpython-310.pyc,, +coremltools/converters/__pycache__/_profile_utils.cpython-310.pyc,, +coremltools/converters/_converters_entry.py,sha256=kcryVpWGwEDgSFSdMphtj9tkhOOccfXHbYGHDaHLZpk,38179 +coremltools/converters/_profile_utils.py,sha256=j_N-n3H5E-ncyXmWqMw2TCXgebrv4Hi3VyLkxfcPxbQ,2415 +coremltools/converters/libsvm/__init__.py,sha256=5d5hKszt6hW6MmMDuRDS3o1qmdsRMLn6gBzD4vluU28,3399 +coremltools/converters/libsvm/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/libsvm/__pycache__/_libsvm_converter.cpython-310.pyc,, +coremltools/converters/libsvm/__pycache__/_libsvm_util.cpython-310.pyc,, +coremltools/converters/libsvm/_libsvm_converter.py,sha256=qSYudH0qCayM0OVld9z9AEDT9JM2RB4hwPU23RwuHQM,7202 +coremltools/converters/libsvm/_libsvm_util.py,sha256=Pprr6pb1BjMuICUsRBzDz-izxFpdRLWMVWrHpn1i7ak,971 +coremltools/converters/mil/__init__.py,sha256=VK_pHIuhb5HeMbYKMpNo2v62n0JplyFUPeJdLJEyxQ0,894 +coremltools/converters/mil/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/_deployment_compatibility.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/conftest.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/converter.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/debugging_utils.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/input_types.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/test_flexible_shape_inputs.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/testing_reqs.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/testing_utils.cpython-310.pyc,, +coremltools/converters/mil/_deployment_compatibility.py,sha256=CGT3AkXVyQaBHor7l5y41pUch-UvYFN9yX6i0jCHi90,5944 +coremltools/converters/mil/backend/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/backend/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/backend/__pycache__/backend_helper.cpython-310.pyc,, +coremltools/converters/mil/backend/backend_helper.py,sha256=hfDxGw9QwTWZHhKu2n6zJDsnJP8S8X6HkrP6zDd1ZBE,3868 +coremltools/converters/mil/backend/mil/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/backend/mil/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/__pycache__/helper.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/__pycache__/test_helper.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/__pycache__/test_model_input_params.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/helper.py,sha256=eASB_rm9LoFzjqR073Yxkzh5J-CkJFd3vGx4XtvNaIg,12647 +coremltools/converters/mil/backend/mil/load.py,sha256=sfiSQyyY3ygM84MG9fTbFapOF7CWgJqweyzmH5Ws0oM,23014 +coremltools/converters/mil/backend/mil/passes/__init__.py,sha256=KHEqbZx-4q-53RegXVIDZfxonilAe0-361yeVMo_p-E,355 +coremltools/converters/mil/backend/mil/passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/__pycache__/adjust_io_to_supported_types.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/__pycache__/fuse_activation_silu.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/__pycache__/insert_image_preprocessing_op.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/__pycache__/sanitize_name_strings.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/__pycache__/test_passes.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py,sha256=KzYYhYlx95GhpPI_DfxPP5KBc1q7D4kPMdoWxi5aLsw,10346 +coremltools/converters/mil/backend/mil/passes/fuse_activation_silu.py,sha256=Phgi9Ci8db4tJyM6QbVLEKD-CqGIoDMOnD0JZGr3E-Q,2744 +coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py,sha256=amH7rQjeLlTXfAw3ccK8fS3SgzJW-tcsbe7plLpIzww,3434 +coremltools/converters/mil/backend/mil/passes/sanitize_name_strings.py,sha256=QGqKu7Eh69ebgIJosT6WeNMf9FrZKQB09R8SWRJWOzs,1014 +coremltools/converters/mil/backend/mil/passes/test_passes.py,sha256=VrJ-kIyVhT_YGBW66KTSiO2oLIZuCYEepp4nQrpGoIA,31333 +coremltools/converters/mil/backend/mil/test_helper.py,sha256=I_g44hmrxPTYDmtwQS735peHrAfzPS9WmkMr0uCtxt4,1265 +coremltools/converters/mil/backend/mil/test_model_input_params.py,sha256=8Q1IPsOUTbV1rl3P7nXD9ZeHZhZlEn2oDzz34ZXle-c,14116 +coremltools/converters/mil/backend/nn/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/backend/nn/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/__pycache__/mil_to_nn_mapping_registry.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/__pycache__/op_mapping.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/load.py,sha256=eBP4i2OnCkrXcEMWzM302c1kGD-4vKDcOvFJQI2ER9c,13539 +coremltools/converters/mil/backend/nn/mil_to_nn_mapping_registry.py,sha256=1Zn-vZtr94KghOsMg6wbvYd4hf9HS0eBjy7-CnwRigw,739 +coremltools/converters/mil/backend/nn/op_mapping.py,sha256=7iQEpxeh-PxOYc46AhJ-mQV9CI1sd1i_PN0rxe04--E,129681 +coremltools/converters/mil/backend/nn/passes/__init__.py,sha256=5v0mIELxvFz0asqZRg5vC3loZrPIwbRlciRMKbICIVU,432 +coremltools/converters/mil/backend/nn/passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/alert_return_type_cast.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/commingle_loop_vars.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/conv1d_decomposition.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/handle_return_inputs_as_outputs.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/handle_return_unused_inputs.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/handle_unused_inputs.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/mlmodel_passes.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/test_mlmodel_passes.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/test_passes.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/alert_return_type_cast.py,sha256=r_ESDoZI1cTSFgB4nmmqPyhpsX7V6Rxg3FOQ9hd7pks,1698 +coremltools/converters/mil/backend/nn/passes/commingle_loop_vars.py,sha256=xFtEETz312GRoOj9G4MF88qR9Seg2weE0aIcEMWFNq0,2528 +coremltools/converters/mil/backend/nn/passes/conv1d_decomposition.py,sha256=TonBSpywmGI7nj18VAAWJ2Mjk7mIEI59TKBWBK0Cxx0,3705 +coremltools/converters/mil/backend/nn/passes/handle_return_inputs_as_outputs.py,sha256=2rU5JAoP82u4lNIFjEUKbZgTATrH3Px52zud5Ql4CV4,2153 +coremltools/converters/mil/backend/nn/passes/handle_return_unused_inputs.py,sha256=pVvJDU6d5gTDnApVXofs1vSLK6xDY7Q_8HiOueFHaTk,2086 +coremltools/converters/mil/backend/nn/passes/handle_unused_inputs.py,sha256=JMEQBCGCCwspXXOrQVDO6zSU3z7ey8AcPqnesPT7bKc,1641 +coremltools/converters/mil/backend/nn/passes/mlmodel_passes.py,sha256=MMkar87LfR2RqzJq1Gr7IjB1zS-cW6hYcYTGZz0kukQ,18489 +coremltools/converters/mil/backend/nn/passes/test_mlmodel_passes.py,sha256=1YiFX-wL0vYxZECBLOd1bx0x2dAPdKQeWN4hwpiVveI,37205 +coremltools/converters/mil/backend/nn/passes/test_passes.py,sha256=-QTwAhNVRnR5nemZv8ck7jpQuKFmkgKiuHrIBMt_zpI,7734 +coremltools/converters/mil/conftest.py,sha256=b3s_rk8_X7Qef5qPqjQvD4Jh-0dvKogwCBza3__fLEg,460 +coremltools/converters/mil/converter.py,sha256=fLAKgvyjmRSnVcWvXrsbzT8zM6_CWkOl0hYd0_LWtF8,12340 +coremltools/converters/mil/debugging_utils.py,sha256=wseDDiowencLrucDa4fndMVzKhDmLK6HgfeWHvq0xhI,6991 +coremltools/converters/mil/experimental/__init__.py,sha256=FctNmlyIxwYyQaMKDFsO_9yRk-3zSd4DvigWLjq1i3A,218 +coremltools/converters/mil/experimental/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/README.md,sha256=Rl-HDN3V9SOeUpQb7pnE2ol7VNnLk6rVcIlOL_aO4_k,31488 +coremltools/converters/mil/experimental/passes/__init__.py,sha256=FctNmlyIxwYyQaMKDFsO_9yRk-3zSd4DvigWLjq1i3A,218 +coremltools/converters/mil/experimental/passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_conv_batchnorm_fusion.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_conv_bias_fusion.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_conv_scale_fusion.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_layernorm_instancenorm_pattern_fusion.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_linear_bias_fusion.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_pass_infrastructure.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/generic_conv_batchnorm_fusion.py,sha256=spiMvPiZIm6c95hRzLQ0QJV0m4byezGuJ7HqTUYMhkM,6064 +coremltools/converters/mil/experimental/passes/generic_conv_bias_fusion.py,sha256=kw2uEqdvwaWhAtXMDTw8_r24lb98GAlioWTpinMrX1U,12523 +coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py,sha256=06IAMF3pmxC1x86Q09AZ-fAefyL1Cc89W7EpT4zPpJ0,8575 +coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py,sha256=76ER1S9R5c_xLr-Nk40NTCmerH880AezFD0oPqY-sG4,21031 +coremltools/converters/mil/experimental/passes/generic_linear_bias_fusion.py,sha256=MzvMgbjshh_Q1qpzdAXJdxvcjsBOfP2i02_tv_BpQZQ,4994 +coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py,sha256=zEsWPj8wIcqQ8WlGhNZTRSiz4QBjhN_Bvotknrx96GM,9865 +coremltools/converters/mil/frontend/__init__.py,sha256=RNrekscuLoD1IlRtDtknffbXW-HpBgRUPeQvJZ36xCE,264 +coremltools/converters/mil/frontend/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/__pycache__/_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/_utils.py,sha256=makq6wWuKNEuu6bmzSULCXfrLRZde3eREmELjNO3LIw,15826 +coremltools/converters/mil/frontend/milproto/__init__.py,sha256=KmeSYtj2APlznOBRP6nMj3dWsZYytRNl3usX3h9TFrA,239 +coremltools/converters/mil/frontend/milproto/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/milproto/__pycache__/helper.cpython-310.pyc,, +coremltools/converters/mil/frontend/milproto/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/frontend/milproto/__pycache__/test_load.cpython-310.pyc,, +coremltools/converters/mil/frontend/milproto/helper.py,sha256=0PfRznC9qaThNNiky0GVc3Bxj71HZYoQrFE-xzWsyGQ,2434 +coremltools/converters/mil/frontend/milproto/load.py,sha256=xoH-n-oPIqwoQia0nLG0px76jzdSzGC_QuFxRMKEe68,17069 +coremltools/converters/mil/frontend/milproto/test_load.py,sha256=Enlsku2bIL7sAyhdiyubUAd96P9JDbud1U6Jnk_yrk4,8796 +coremltools/converters/mil/frontend/tensorflow/__init__.py,sha256=zk4a9ntbs4MmR810UzPz1ME0j3cFIFQ_8R6NnOU1icM,762 +coremltools/converters/mil/frontend/tensorflow/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/basic_graph_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/convert_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/converter.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/dialect_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/dot_visitor.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/naming_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/parse.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/parsed_tf_node.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/tf_op_registry.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/tfssa.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/basic_graph_ops.py,sha256=DkqK2JZ3hBhZhM8-ADL3ai8e4zounF0hwY9d_7dXiS8,10999 +coremltools/converters/mil/frontend/tensorflow/convert_utils.py,sha256=Z6fsFcif-E5kz0zR1CM301fOV9UQARXNrY5y5L72pBk,7497 +coremltools/converters/mil/frontend/tensorflow/converter.py,sha256=8JdirgUo4woQZsBh0hIaLDlDyzAXAjLDQ6T79dFHWjw,21705 +coremltools/converters/mil/frontend/tensorflow/dialect_ops.py,sha256=mIEP6BkcTA6-ssQOl1A4I5PC0f1FWUDlUznSvD7Avjc,6464 +coremltools/converters/mil/frontend/tensorflow/dot_visitor.py,sha256=5Tg5P4jIHLfBLeyxKetUoXrH2ajvhStZzvWmLowWzow,4544 +coremltools/converters/mil/frontend/tensorflow/load.py,sha256=r68_hpdeAUtTWaIEg1RvO2gKFJCCw-jUEVmzklGFBKI,12848 +coremltools/converters/mil/frontend/tensorflow/naming_utils.py,sha256=8YpEU53uohT-JxZJBeIIUWBhbfW7lmdh9jP9ybQSCfg,993 +coremltools/converters/mil/frontend/tensorflow/ops.py,sha256=ewDgy2sAum-VjwTuFkJBB1phu9pl6cti5IHKSp_0QLM,122775 +coremltools/converters/mil/frontend/tensorflow/parse.py,sha256=r-DsewTvFfRKb78VZD5iPjkHEVyxT-TOoK1yBEz7C0M,4082 +coremltools/converters/mil/frontend/tensorflow/parsed_tf_node.py,sha256=qVeJqKXTkD4_P0SCpwcI1fnaeK7u40MMRFMKJArvnjg,3234 +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py,sha256=YXqZ3yZ0uLdeDQolnlqAxtXu6QXijrNygfAktKyLeao,300 +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__pycache__/backfill_make_list_elem_type.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__pycache__/expand_tf_lstm.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__pycache__/test_passes.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__pycache__/tf_lstm_to_core_lstm.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/ssa_passes/backfill_make_list_elem_type.py,sha256=VvsUVaSRRC5cJwKUNgkvfZDnX56PdxwbJ8WTCRyjme8,4788 +coremltools/converters/mil/frontend/tensorflow/ssa_passes/expand_tf_lstm.py,sha256=Rral6EZC_Fjnsbr6Mk_VyRS2LwzoKxovot3Vq9e0XQk,7617 +coremltools/converters/mil/frontend/tensorflow/ssa_passes/test_passes.py,sha256=eiL-O4Ln5mydvomONP42C-mTo2oK4FHB1lhH4B6TPVM,2071 +coremltools/converters/mil/frontend/tensorflow/ssa_passes/tf_lstm_to_core_lstm.py,sha256=m02EXj12T_aIIa9b9kSz4im5oL1gdh-FT-qIulADeMM,12437 +coremltools/converters/mil/frontend/tensorflow/test/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_composite_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_custom_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_graphs.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_load.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_parse.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_parsed_tf_node.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_tf_conversion_api.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/testing_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/test_composite_ops.py,sha256=oyUgbGcs1KD5rR7qGsJHLO_Dylj0No70IqhR-v7pGAE,2415 +coremltools/converters/mil/frontend/tensorflow/test/test_custom_ops.py,sha256=hDQsXdH0lQnwz-bF6G13T8H5JyY_H2Qpxc5PUTTL1Tc,11068 +coremltools/converters/mil/frontend/tensorflow/test/test_graphs.py,sha256=G_o4pMUduHq-bO_aoTi3NtJqUa9_r3wIjk9APdZuS-A,1463 +coremltools/converters/mil/frontend/tensorflow/test/test_load.py,sha256=M_m7DsGbRC09xFS_ABZrqdbvm6aM5z1MnXAiV8w7sSI,16127 +coremltools/converters/mil/frontend/tensorflow/test/test_ops.py,sha256=P991du_fGn2urOVRyOktaT03OPRQfoHruLvHtqJIwWA,251335 +coremltools/converters/mil/frontend/tensorflow/test/test_parse.py,sha256=dyxsv6vDOmD-u9KGD3VnCA14zLN03NsIq5xG0poacVo,5013 +coremltools/converters/mil/frontend/tensorflow/test/test_parsed_tf_node.py,sha256=-0-rwXYlsUKqRLHdtJXDszTJWZBisWpwkG5yU4eV4U0,2187 +coremltools/converters/mil/frontend/tensorflow/test/test_tf_conversion_api.py,sha256=PHNQrzTSaaGlDXMSOYM4ektUc3_Cr1q4N_nqwZvTGBo,38934 +coremltools/converters/mil/frontend/tensorflow/test/testing_utils.py,sha256=1ojjkIsJBgkmxO-YWkIVvFPQ4yQU6-Qwm6ZR9Cs9dZE,14271 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__init__.py,sha256=034IrrS2tps3c8Fk78KUZUXz1jUGI8dFylgNqKAUbCQ,847 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/cond_to_where.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/constant_propagation.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/delete_asserts.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/delete_constant.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/delete_disconnected_nodes.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/functionalize_loops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/fuse_dilation_conv.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/insert_get_tuple.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/quantization_pass.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/tensor_array_transform.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/variable_node_transform.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/visitors.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/cond_to_where.py,sha256=kBG2Og-V3RNMQLiXOAyDhtqB7brCBC7-cLot8Cqjpa4,4377 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py,sha256=SdKMbyaW67C2LGjWZ5VfJijnMXm7GASIMG_xuo0JzTo,6880 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_asserts.py,sha256=1EBB-arega95PY7Lb7P2sSRq5T7RAK60bjg5sj-Kwhw,2386 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_constant.py,sha256=a9E5QPSdlsccpAhUcwuYJSAfyeyst6SsXFMetOjYTKs,3059 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py,sha256=eV7xam2sY2swOXkscTVQyO7VAJnUJD5UZr6uXi--QQQ,669 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/functionalize_loops.py,sha256=qwMTLvVVHbFLqwx8u9lZn1ETmVl70Ft_WJAxQ7LTMTk,19047 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/fuse_dilation_conv.py,sha256=YzBKuHHd894T4PVek2HrWn34Lj4mleXlklu0_WubRd0,7675 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/insert_get_tuple.py,sha256=9KlQQGhZodQTAh6Zn8DD4q110a_pkN-WDgLazQjt0IU,3398 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/quantization_pass.py,sha256=cRa_rYajqS9q74XPkF8fwgwmE5PQdvQpU2AFRLmowuM,2966 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/tensor_array_transform.py,sha256=K05rgqEm8vhfRKkrjukFZYpYwGanWMQhnW33Qa0kSuE,3649 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/variable_node_transform.py,sha256=-9SfcPttBV7jk8WHxPTL5a6iwwKoq3mVObfDJstzTM4,2898 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/visitors.py,sha256=GcYQEti8-Pbkb9yx7gvYqrZtfNEFf_E49ZN69UzhXPQ,6501 +coremltools/converters/mil/frontend/tensorflow/tf_op_registry.py,sha256=kVaXMUKnzA7mntR4C_Y7mrY9vg0dMUkdNFQg7C_j3xk,1769 +coremltools/converters/mil/frontend/tensorflow/tfssa.py,sha256=Cs607FC4dk1CjQ2Qr3jh2jmdd-dOlOjyjbk-dK5zo8M,21046 +coremltools/converters/mil/frontend/tensorflow2/__init__.py,sha256=egD5DxwrT6jNiUGCiyhrCKiF7alfE8-dSBcUtGn7djY,455 +coremltools/converters/mil/frontend/tensorflow2/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/__pycache__/converter.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/__pycache__/ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/converter.py,sha256=CWj1lr2ee4DdiLU_rZYRcsMTD8eNj0IEFYyo6imSKnE,1540 +coremltools/converters/mil/frontend/tensorflow2/load.py,sha256=SsnyT62Cm_oT0AyNdHr1BD1JnqLUQo2gpzwLrczAGF4,14784 +coremltools/converters/mil/frontend/tensorflow2/ops.py,sha256=uxaNzzNYfGqZSNDr8lTP4h8LQ1mcViEDfvNY-kK5YtU,8460 +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__init__.py,sha256=w3I7aJMf6J9tFjXVBkU6LL-xBLMryvNyug7z2elobvs,253 +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__pycache__/remove_vacuous_cond.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__pycache__/test_v2_passes.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/remove_vacuous_cond.py,sha256=XjbWSKCvtaVtqcFkkUtFrvY0LP7PxFiYiQ-lb3C_fI8,4645 +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/test_v2_passes.py,sha256=0PccXazU4ifnlPDbXPvO3y7SCN5EGud4t39zYP1IRWM,1850 +coremltools/converters/mil/frontend/tensorflow2/test/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/test_tf2_conversion_api.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/test_v2_load.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/test_v2_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/test_v2_ops_tf_keras.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/testing_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/test_tf2_conversion_api.py,sha256=09bLYLJJPWa9KlMy6fWW23Gp_yhEhqwbM2Qbg4s7lVM,17555 +coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py,sha256=gcYEMjyZ0NotJrH2Nbyam6AbCfvc1FNt7LVeyfvw2Y4,8379 +coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops.py,sha256=9KZIamhFRA0rPnLo0aIV8N3RxjqQUGm6j6ijLQO9AJI,25555 +coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops_tf_keras.py,sha256=OrUIgqhuQyj1-3TSb-DCZ6i726xZqH5bTpoQRJKLqM4,59928 +coremltools/converters/mil/frontend/tensorflow2/test/testing_utils.py,sha256=u3MJhV-abwCNAxW3uwqcaq7gdFkoEQ2M4SdzdKxNk4w,10465 +coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__init__.py,sha256=iDhveVfdgf0DQZiiGKbrv2AqqXhxxIH02lpAAW7ysoU,371 +coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__pycache__/rewrite_control_flow_functions.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/rewrite_control_flow_functions.py,sha256=sS5Eu_vctUn9aUJJ7GrzPZKz9XBKxez_csb8LQtv89Q,20086 +coremltools/converters/mil/frontend/torch/__init__.py,sha256=Pb2LcDa-lprF5s4A9aui9gV1Wd9Hubh1r9pKgbviDwA,494 +coremltools/converters/mil/frontend/torch/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/converter.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/dialect_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/internal_graph.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/torch_op_registry.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/torchir_passes.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/converter.py,sha256=a6NhfSjvjisr6DwW9v4EQD05QbtXDocvNhrncdYQZIk,20820 +coremltools/converters/mil/frontend/torch/dialect_ops.py,sha256=hufPXwH_ciov7fhTGc-bMnwIw7CQxI662vOZQda_988,8408 +coremltools/converters/mil/frontend/torch/internal_graph.py,sha256=aW5A5A1KspQ_uy8vwotGfEDM_-1hvjnI08K4Ngto9rc,12949 +coremltools/converters/mil/frontend/torch/load.py,sha256=xwbY8KbZoNGOByXNtOCJE9B_fW9Vaj2NB9c5NCCPz_0,4629 +coremltools/converters/mil/frontend/torch/ops.py,sha256=yp4S2smZu81rvxhm-onZhDVDHEywNrYcTHuwpp-aptQ,194098 +coremltools/converters/mil/frontend/torch/ssa_passes/__init__.py,sha256=VKaxWdiSJkPcA59djF6299M2Zebt7YrchEallFmusr4,294 +coremltools/converters/mil/frontend/torch/ssa_passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/ssa_passes/__pycache__/torch_tensor_assign_to_core.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/ssa_passes/__pycache__/torch_upsample_to_core_upsample.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/ssa_passes/torch_tensor_assign_to_core.py,sha256=348dCWHvgqbRsuNKSvxi--2X93UaBqMRQCvBDbOM8lw,2582 +coremltools/converters/mil/frontend/torch/ssa_passes/torch_upsample_to_core_upsample.py,sha256=NdL3cinpWzxerHVtKe0Vc46S6knPQZAmls_ddsTGhxQ,4525 +coremltools/converters/mil/frontend/torch/test/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/frontend/torch/test/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_api.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_custom_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_examples.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_internal_graph.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_passes.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_torch_conversion_api.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_torch_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/testing_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/test_api.py,sha256=WMS4nlkduCpIEj0HFFIxQThgDfocYlBD1AdSnehK-BQ,1753 +coremltools/converters/mil/frontend/torch/test/test_custom_ops.py,sha256=ibMiwNeQkMcoEY1wWLfo6LKPm1RiYsDS3WbD_U_ZGnU,5626 +coremltools/converters/mil/frontend/torch/test/test_examples.py,sha256=AkdE7NNX3vbMKm6GGk4vqK0pZ60LJqKcmsKjAz8suSU,1880 +coremltools/converters/mil/frontend/torch/test/test_internal_graph.py,sha256=Bo2RTg0gzvAFFbtFmDgJ-jUh_VWBATZytsAzzbE58so,66729 +coremltools/converters/mil/frontend/torch/test/test_passes.py,sha256=WZKBvSOzDfB-bGSV1Z-A-XWnFx3MJG-1q3zeKTfAUD0,12355 +coremltools/converters/mil/frontend/torch/test/test_torch_conversion_api.py,sha256=JXLgrTfCBG5PFgBZTIvlDUUz0a4DlehYcUmpeyTSiVA,65369 +coremltools/converters/mil/frontend/torch/test/test_torch_ops.py,sha256=C_T4gHQgMp_6w0sVS4848cUpV_RaYKokVo-wYvHtBaw,269720 +coremltools/converters/mil/frontend/torch/test/testing_utils.py,sha256=1PdmCOLXH99hrGwWvVMk2eEzmjHiqO3QyKBG7gkZ-KQ,9054 +coremltools/converters/mil/frontend/torch/torch_op_registry.py,sha256=OH4O2Wc74U7HiD8P4o2Yg6H6Fodhlm_RnMOLPzrXt6M,2276 +coremltools/converters/mil/frontend/torch/torchir_passes.py,sha256=UfllOFdJd4b-U4CvRlvETzQ7ZJJw7NQxVihzWafi4x8,12932 +coremltools/converters/mil/input_types.py,sha256=j78OHR-vDJQqMPSwT4BbrHz6xa1fshOLHCsXCOtq4Rw,18307 +coremltools/converters/mil/mil/__init__.py,sha256=5pSgAVXTX2o5s7uT2hi1goKhU1V5CAXWVoqMLL9HO7Q,833 +coremltools/converters/mil/mil/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/block.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/builder.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/input_type.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/operation.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/program.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/var.cpython-310.pyc,, +coremltools/converters/mil/mil/block.py,sha256=_tRC6YJPHAlXlzBsTydpCjZFyJkxPhYNOy-zKR9baDM,32383 +coremltools/converters/mil/mil/builder.py,sha256=D0ERESJiDeWVLP-em8Jmpmka19MhCieS2ZSneq4x9Mg,8906 +coremltools/converters/mil/mil/input_type.py,sha256=D4U9sQXKVlZPUt--fazMy85rNpomqHG1fxqUH_A_LgA,11733 +coremltools/converters/mil/mil/operation.py,sha256=7MdTAjxP9bvHCBiJhQQ4CVGEaGijZFCxwfIJN7ngysc,22407 +coremltools/converters/mil/mil/ops/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/mil/ops/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/__pycache__/helper.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/__pycache__/registry.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/__init__.py,sha256=H9x2ZQ0aQ3iJbD6GcQ3atZ6JkyCYny0wq3zYL3VeaaM,267 +coremltools/converters/mil/mil/ops/defs/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/__pycache__/_op_reqs.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/__pycache__/_utils.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/__pycache__/complex_dialect_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/_op_reqs.py,sha256=zVg-z94rkUCBkGyxBD3dMXkk0jIDW1zHIQnc0XcQjzg,354 +coremltools/converters/mil/mil/ops/defs/_utils.py,sha256=DFsyGosV0c143ueKl_Ziiq6q9Ma6mNyW2AEf1YDZluI,21615 +coremltools/converters/mil/mil/ops/defs/complex_dialect_ops.py,sha256=gySvCLcaf3Ama_KjPllN0KCBE5XBP1hWS8A3TE0xQ4k,26614 +coremltools/converters/mil/mil/ops/defs/iOS15/__init__.py,sha256=gZaKXRdzDh3fqDI-6auzI5-9Ij46ot2MuMAKandHRQw,3183 +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/activation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/classify.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/control_flow.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/conv.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/elementwise_binary.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/elementwise_unary.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/image_resizing.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/linear.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/normalization.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/pool.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/random.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/recurrent.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/reduction.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/scatter_gather.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/tensor_operation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/tensor_transformation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/activation.py,sha256=iUo74qPL7T3ppffKcR9JhI4YGnHyXrITUpAPRnCEuA4,15088 +coremltools/converters/mil/mil/ops/defs/iOS15/classify.py,sha256=eejXnPeTeD3mWdUb-2NwqaryZixIrgQDg1COQMi7fyc,3283 +coremltools/converters/mil/mil/ops/defs/iOS15/control_flow.py,sha256=K_KMZL1drvIdPj5N9JtQuhGHHHwHuTpwJxgVoJvrwm0,29194 +coremltools/converters/mil/mil/ops/defs/iOS15/conv.py,sha256=BYfxie-T11e43TS_0E-wFjShtVLs9i9-lkkAQFZlY60,16798 +coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_binary.py,sha256=jHxUuo1XPNQ-NprycwiBUwnAMrw55QfP5pEXq9VFqnk,15713 +coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_unary.py,sha256=r0yoTfmDkQcmqMZADu8OFYT6EIvxzU7akUV6PUSkSAg,20155 +coremltools/converters/mil/mil/ops/defs/iOS15/image_resizing.py,sha256=ccdIxevYBva5zL5ORuLyenC4PzVJBMt0Xy4okuA85Hg,33876 +coremltools/converters/mil/mil/ops/defs/iOS15/linear.py,sha256=WX3L7tG7dShjCtDdhNimpfSL8PWLPreAVh6p6q567aA,12377 +coremltools/converters/mil/mil/ops/defs/iOS15/normalization.py,sha256=193vZ0cm-909K4OKK3aHdHoIeYMTbyf9ddsrjgAknWY,12595 +coremltools/converters/mil/mil/ops/defs/iOS15/pool.py,sha256=2aub91zsj4qYs1F-fTicavq1JLQmWncO2LbvP4qs8SY,9122 +coremltools/converters/mil/mil/ops/defs/iOS15/random.py,sha256=R1yCaFuKJuX1cUsdYwr5Qdi-lcAVZZHyObkQfCq8koE,9058 +coremltools/converters/mil/mil/ops/defs/iOS15/recurrent.py,sha256=TBuFBK3cd16W6D7H2OWmVW1TapCorke5SzvoZ-3eNEE,20532 +coremltools/converters/mil/mil/ops/defs/iOS15/reduction.py,sha256=JBUxB9XtMfWQq66Ul2-SSGevnQTopTrNVToebiEKi-I,15176 +coremltools/converters/mil/mil/ops/defs/iOS15/scatter_gather.py,sha256=nuBoZvW5FHkoLETaPmTzohjyGMp-sNqTLSc105SuVoo,16508 +coremltools/converters/mil/mil/ops/defs/iOS15/tensor_operation.py,sha256=rh2ifcTFw3E0Qi95qNbrLhaWQ_Ir2ACH_hGgWKt1s4I,41580 +coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py,sha256=FTQxh-5JyX1VCCgvlkfEObnhzifPEEmYGRI_O0uTzRI,35196 +coremltools/converters/mil/mil/ops/defs/iOS16/__init__.py,sha256=9-uGmEr7gANNDX_ae5h-84NqCWwlQcARfWH6UsuNWdM,724 +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/constexpr_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/image_resizing.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/scatter_gather.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/tensor_operation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/tensor_transformation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/constexpr_ops.py,sha256=QGDuDisrRlebkhKsWgSfAm96v11UuAwpfAzJc0sW3WE,14272 +coremltools/converters/mil/mil/ops/defs/iOS16/image_resizing.py,sha256=K93EldkBrWTeIdkZxrRBcKMMIPu_DUPzuuZkiARDKnw,3335 +coremltools/converters/mil/mil/ops/defs/iOS16/scatter_gather.py,sha256=QRm9vh7dik47WZ1MaGyB0kHyTtNptvh5gM2IFeKw3Ec,5863 +coremltools/converters/mil/mil/ops/defs/iOS16/tensor_operation.py,sha256=d7QOIZ4spvY0ywvAFDXppOwenu5q9FAmxYHcL8vyuIQ,3930 +coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py,sha256=j_cAR2A8r2MRmtK7PiBX58ojp3QTxtDjaPXRUhtPwLE,6920 +coremltools/converters/mil/mil/ops/helper.py,sha256=X26CylbEZhqvmcE_BKUyBK3S9SM95eFKq_jEq1z82jY,1245 +coremltools/converters/mil/mil/ops/registry.py,sha256=5WPQk19fyFrjxzupmpkth-T5SxHZ8UbcNj_6qW2Ihfs,8910 +coremltools/converters/mil/mil/ops/tests/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/mil/ops/tests/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_activation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_const.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_constexpr_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_control_flow.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_conv.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_elementwise_binary.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_elementwise_unary.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_image_resizing.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_linear.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_normalization.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_pool.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_random.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_recurrent.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_reduction.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_scatter_gather.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_slice.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_tensor_operation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_tensor_transformation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_utils.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/testing_utils.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/test_activation.py,sha256=DXmTvhPuZkgel-XgTPEr0t_sKrFda69NCz5g2qSaBNs,35469 +coremltools/converters/mil/mil/ops/tests/test_const.py,sha256=RppHpqP6HuT4mrneuHfwHGU-KPRBX5EpmT3NsSHUvQQ,1851 +coremltools/converters/mil/mil/ops/tests/test_constexpr_ops.py,sha256=ZO5uCtJNKOQ4ofPyHY0rb7tSmLdWfiCemINms64cAJo,22658 +coremltools/converters/mil/mil/ops/tests/test_control_flow.py,sha256=C-9mLUesd4KQKCid-SQpK2-iAr2fgWAXkf72Op6ojh8,13602 +coremltools/converters/mil/mil/ops/tests/test_conv.py,sha256=_KbdkJY5_cFkKZUPzdPHAvIgVYuf-zQDe3rsO1likKI,30760 +coremltools/converters/mil/mil/ops/tests/test_elementwise_binary.py,sha256=TY5-4Gqhc8ZiQULDwS_QJRBqxiwuqSGBDC0_Roojj5g,21099 +coremltools/converters/mil/mil/ops/tests/test_elementwise_unary.py,sha256=qA5KE4ASEGyJtL4jPI1ur4d7ZesFTS8aPM6WMcXCvVI,24212 +coremltools/converters/mil/mil/ops/tests/test_image_resizing.py,sha256=mIXh4VF6nqPbX8-9d_IirlTnyTIXwVS7Wc79-NbfdPA,32525 +coremltools/converters/mil/mil/ops/tests/test_linear.py,sha256=IWQ6wiHlQXa2MT8hZQ1KYB9qokeMSZfSUJPrWhF96xE,12430 +coremltools/converters/mil/mil/ops/tests/test_normalization.py,sha256=e0d7pj2xI2PG06eomk9RDgHwg590VpMnfrfCxq0sSlY,25998 +coremltools/converters/mil/mil/ops/tests/test_pool.py,sha256=NGywf4NzVyHzR2FMMmUXxszPCS9U9Y1ESPkz2VP0lPE,17261 +coremltools/converters/mil/mil/ops/tests/test_random.py,sha256=NmnQSa52nreeSC49VqUVMv-quvP_6hqcPoNlM7x0zrI,14861 +coremltools/converters/mil/mil/ops/tests/test_recurrent.py,sha256=V318_xADa0fwaynlMPl3KUHcXhS2P2TMO1GrS8K95hA,25212 +coremltools/converters/mil/mil/ops/tests/test_reduction.py,sha256=eDx42BYueU4m5zZ9PQTUR2UK0pNxJ0dodw0bCYW9pd4,13720 +coremltools/converters/mil/mil/ops/tests/test_scatter_gather.py,sha256=AAmHXx8vO_8raXPpUivr2w6iUA2GEayxuf-KYlZLjqY,26958 +coremltools/converters/mil/mil/ops/tests/test_slice.py,sha256=I6VuQBpcETSUtroEIb1hQKa3NTwhQGO7LfNaUAFO2ik,13981 +coremltools/converters/mil/mil/ops/tests/test_tensor_operation.py,sha256=46gWhtx980Z3U9rMphLDq6_wJv9iBZDbqb3z4E0wOaE,54152 +coremltools/converters/mil/mil/ops/tests/test_tensor_transformation.py,sha256=qLBZb0dzWI_OzPyQCcYVp9jyRW8oSCM-kRV4HKWPwVw,46203 +coremltools/converters/mil/mil/ops/tests/test_utils.py,sha256=FApzl9gGXGxJ5QUHR5E9IOtVgP7gzN8g-uc8dKemPnM,8055 +coremltools/converters/mil/mil/ops/tests/testing_utils.py,sha256=OGIFAcwN4bDaD0TmB1aKxXpnYkuSDOx8aV6rznn2qFc,5734 +coremltools/converters/mil/mil/passes/__init__.py,sha256=3UdPk7iQnrIx-JULKELn6gY1ySWUULIniwe55ZDV9RY,1415 +coremltools/converters/mil/mil/passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/__pycache__/graph_pass.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/__pycache__/helper.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/__pycache__/pass_pipeline.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/__pycache__/pass_registry.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__init__.py,sha256=L_PMcjeb4I6OmDYQGt8pMhoce2THRcdxyat78rql4to,218 +coremltools/converters/mil/mil/passes/defs/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/lower_complex_dialect_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_activation.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_conv.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_elementwise_binary.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_linear.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_normalization.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_repeat_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_tensor_operation.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/preprocess.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/quantization.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__init__.py,sha256=wzYdDR8g9nsgNt9JUKNk29qzEXHQc2ixuoeYDlq_k1g,714 +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/const_elimination.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/dead_code_elimination.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/dedup_op_and_var_names.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/fuse_reduce_mean.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/loop_invariant_elimination.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/noop_elimination.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/remove_redundant_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/remove_symbolic_reshape.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/topological_reorder.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/const_elimination.py,sha256=o5C3nhcC-eMO0sdth9DyJSHf-epH1MEoVnpqgwtT8f0,3996 +coremltools/converters/mil/mil/passes/defs/cleanup/dead_code_elimination.py,sha256=SRMP4yoJcF9Sdu3tggXzMSzIIjZSbeIHyNPs4HuqdaY,2902 +coremltools/converters/mil/mil/passes/defs/cleanup/dedup_op_and_var_names.py,sha256=rDNSVWUe5QYdS7idJqxffi14fKSUO1_dsmZhg5yBhBw,3833 +coremltools/converters/mil/mil/passes/defs/cleanup/fuse_reduce_mean.py,sha256=Aqxv_72E9uHOiIRqRa_BG78mNC5FB9V6o0un2zzAfXE,4434 +coremltools/converters/mil/mil/passes/defs/cleanup/loop_invariant_elimination.py,sha256=hRJU-ZwcGhkohY-tNTenp3cCm15LqN1MYy3MQq4BzzA,6930 +coremltools/converters/mil/mil/passes/defs/cleanup/noop_elimination.py,sha256=v6MtGmmL7jCpkMZW9wqgTP0XHBccqyhQYgVRJH8VQ7E,7962 +coremltools/converters/mil/mil/passes/defs/cleanup/remove_redundant_ops.py,sha256=kMkM2xIKGXnfQ2SX-f6cndUQTFuSC7q6L4zlwBFobdE,8399 +coremltools/converters/mil/mil/passes/defs/cleanup/remove_symbolic_reshape.py,sha256=j0DssL2n-WNOJZrYujbc6ePje-lmMV7AR0P0hsauRsA,3848 +coremltools/converters/mil/mil/passes/defs/cleanup/topological_reorder.py,sha256=Ea-SSm0xD-mY4_57bVSd9dSWOT4PqX4jN7D9XuXEx78,7620 +coremltools/converters/mil/mil/passes/defs/lower_complex_dialect_ops.py,sha256=oeM3BoqpQfRK9GHSUaiPS16Xl26qCMcwalx1c67IjwM,21096 +coremltools/converters/mil/mil/passes/defs/optimize_activation.py,sha256=1SqRyWwYgZKShx2gzLyIA5zIW3VUIyaXkPAYfFZ1WV8,24562 +coremltools/converters/mil/mil/passes/defs/optimize_conv.py,sha256=QoLhYc0L54Wuz11Mup9i-RrAMvZY0RjTgMnZViott6U,41827 +coremltools/converters/mil/mil/passes/defs/optimize_elementwise_binary.py,sha256=m95s6dplNvoMywA_fgynCuC8_HNk54e_YU3KpjuAD7A,11714 +coremltools/converters/mil/mil/passes/defs/optimize_linear.py,sha256=KKymLQzMwrLu-QvQ1VR3VXUlDFYlAxUgyDeKtWH4YyE,11330 +coremltools/converters/mil/mil/passes/defs/optimize_normalization.py,sha256=dTPHtbYtCS-SqZYwSugE5N8-EJb-W_SKRWnw68eX4co,34386 +coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py,sha256=0pk9H4mVZ5wd-FuuC4LeGq_sBoxlRM76UBV4r-6J6II,72694 +coremltools/converters/mil/mil/passes/defs/optimize_tensor_operation.py,sha256=f_V2gGTJ_YhIbtbKqtiiHxaUJ7rWnXwcFdsV2hk3kRA,29777 +coremltools/converters/mil/mil/passes/defs/preprocess.py,sha256=eBrN5Q1Y-FN-zivwnmuGCPaM8TQc6OWHvvcTTB3ydG0,15254 +coremltools/converters/mil/mil/passes/defs/quantization.py,sha256=KK0eq3nMUOFazWTIQUSTLIVIhnDambQsofMA0OXQIDk,32105 +coremltools/converters/mil/mil/passes/graph_pass.py,sha256=tyrIY8pXk7FsF4m-9ML0qIdy8DnUaZs7hqKbdLyrdms,2605 +coremltools/converters/mil/mil/passes/helper.py,sha256=u-8DCYAbEkrhQwcs2D6PRMv18wqKtTahKgHxJkvJKGI,5761 +coremltools/converters/mil/mil/passes/pass_pipeline.py,sha256=bpOZyFG3CYjvn-aJUzd1Hujs3qPbvPlaZg9Fxiey-A4,15680 +coremltools/converters/mil/mil/passes/pass_registry.py,sha256=Hh8_orCulG-GmKWN4STn0UW8PZdhJdNbsoaHZovb0KQ,2275 +coremltools/converters/mil/mil/passes/tests/__init__.py,sha256=L_PMcjeb4I6OmDYQGt8pMhoce2THRcdxyat78rql4to,218 +coremltools/converters/mil/mil/passes/tests/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/tests/__pycache__/test_lower_complex_dialect_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/tests/__pycache__/test_pass_pipeline.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/tests/__pycache__/test_passes.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/tests/__pycache__/test_reduce_transposes_pass.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/tests/test_lower_complex_dialect_ops.py,sha256=jqdSY6_DVaAEl1M9v_LtGEt_jaCU0zypqVcmqe94HZY,2082 +coremltools/converters/mil/mil/passes/tests/test_pass_pipeline.py,sha256=GmURas3N7XDTM-seKSF3KP_rWgoc7xaFQP6MBSIvWdk,5077 +coremltools/converters/mil/mil/passes/tests/test_passes.py,sha256=VV25MCOhes0y_ZJDZQDPd9ed-iFgMOKQiQoYOMXM4zI,271534 +coremltools/converters/mil/mil/passes/tests/test_reduce_transposes_pass.py,sha256=jvBaMlpNMh8qp-3oOiIufEZ75sO-xLi5IyIA5kyGJHw,78845 +coremltools/converters/mil/mil/program.py,sha256=Nc_floSXfmC7f6gO8rmMZm7ey53F3UQaHG8MkMCW1A4,10745 +coremltools/converters/mil/mil/tests/__init__.py,sha256=hDZrITJ4jV2RaNfWRvkN7mI2mF4QCA5OiQ4MmCp6Gns,217 +coremltools/converters/mil/mil/tests/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/tests/__pycache__/test_block.cpython-310.pyc,, +coremltools/converters/mil/mil/tests/__pycache__/test_debug.cpython-310.pyc,, +coremltools/converters/mil/mil/tests/__pycache__/test_programs.cpython-310.pyc,, +coremltools/converters/mil/mil/tests/__pycache__/test_types.cpython-310.pyc,, +coremltools/converters/mil/mil/tests/test_block.py,sha256=EPqbFOzCKf7-fSle65jHtMiUYyIyoVH8UVdhgYlNfdc,15736 +coremltools/converters/mil/mil/tests/test_debug.py,sha256=vqlRzqx88YHzgGM9F-FZw-5ZRE1chMFsZumS-Jwm6kI,10605 +coremltools/converters/mil/mil/tests/test_programs.py,sha256=oEb6no5KWuk9e4P4ja3Em0FB1GyzDUcsBaxFmKmlogI,13429 +coremltools/converters/mil/mil/tests/test_types.py,sha256=phOYAyvqsKoM5BQTdHCsQZ4XXUugyEsZIjdmLFhbhCc,1085 +coremltools/converters/mil/mil/types/__init__.py,sha256=Fqy2ofjkzm1iZ5viV9XGU9CjVhgFxeL459yId7XIKxY,1703 +coremltools/converters/mil/mil/types/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/annotate.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/get_type_info.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/global_methods.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/symbolic.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_bool.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_complex.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_dict.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_double.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_globals_pseudo_type.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_int.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_list.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_mapping.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_spec.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_str.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_tensor.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_tuple.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_unknown.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_void.cpython-310.pyc,, +coremltools/converters/mil/mil/types/annotate.py,sha256=3ZqR-mCpr2l1stnQlvGCDnY6H81o_th-hToFuNhZiu0,3411 +coremltools/converters/mil/mil/types/get_type_info.py,sha256=fM_y6Y5qvjUKthPkfE4HNdBaLHpI9ZJbIYwTgu2pHLY,2123 +coremltools/converters/mil/mil/types/global_methods.py,sha256=LgvWtHE26K6pNzDhVwWqxgTUuoLU_NBw1fwEgnNgxcM,1468 +coremltools/converters/mil/mil/types/symbolic.py,sha256=do8ZZ0tjmaxwXGDoiN98msijsdI9rnlqsr7oZ5Isr0A,2160 +coremltools/converters/mil/mil/types/type_bool.py,sha256=ddP0dQqFuq3gGBr8lcXqd0uji4KUnNEV1eidKfxe7Ls,1230 +coremltools/converters/mil/mil/types/type_complex.py,sha256=rFu8FATeQr2pjkQtIdq067BxnXhkobDvRPixnQeL5aU,5705 +coremltools/converters/mil/mil/types/type_dict.py,sha256=kQZ9m_sP3K1XXza94XghSKgjTQKIhjw_zJZXo0ysycU,1665 +coremltools/converters/mil/mil/types/type_double.py,sha256=_avobC6xf1Z7GIVZ0CEbTscGpUbYBbI41Yq7gbmU6qg,5119 +coremltools/converters/mil/mil/types/type_globals_pseudo_type.py,sha256=jEiaHuzPOMjmwuQWC0IeKRZSyqR-wG2LwPjU9VLGa_c,370 +coremltools/converters/mil/mil/types/type_int.py,sha256=W16HHH0u2UU5rzdRDUpZNDHEcDcqzYITWVQxgriVBOk,5439 +coremltools/converters/mil/mil/types/type_list.py,sha256=m32goMJcjMDgfuMgaP719tEO58hdXOGoFIv7vusI308,1974 +coremltools/converters/mil/mil/types/type_mapping.py,sha256=4528XrAWkxcR9jL1ylSniYINCEWo0xeTjJqjXO-brZY,12886 +coremltools/converters/mil/mil/types/type_spec.py,sha256=_zm-Uf3g0z4VRr9o2bm7D1ND2uCJETtHLg0pJiumQHQ,2994 +coremltools/converters/mil/mil/types/type_str.py,sha256=ximDBJgjn35kVqeGm65IM3PSS6yjr98_Xu4udpSlXVQ,641 +coremltools/converters/mil/mil/types/type_tensor.py,sha256=gq6dycXjqYj85HzdrWwNnBdSuCQOEWKSEojuAqFrbvg,7524 +coremltools/converters/mil/mil/types/type_tuple.py,sha256=4Ma5Q6caZL7jigqPe9menbCU09nPC0RjKOIdLlAUBT0,1263 +coremltools/converters/mil/mil/types/type_unknown.py,sha256=Z8Ky3qY6oG_3AF1iw2Ec6AlWJXcETrSEM9LYkaHvLJs,468 +coremltools/converters/mil/mil/types/type_void.py,sha256=fOaI7s928HwZVqYIV_dmYIZBYv9DbYsD6hZEwVddzII,352 +coremltools/converters/mil/mil/var.py,sha256=EiEu9sjvJL8TqqzhDyww54UdDIoBIqSdGquZMOK33ek,12468 +coremltools/converters/mil/mil/visitors/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/mil/visitors/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/visitors/__pycache__/dot_visitor.cpython-310.pyc,, +coremltools/converters/mil/mil/visitors/dot_visitor.py,sha256=zGPglQ2Aj5G07URoAf_uyvQuB-eOaxRAvg0eMs2JHtk,6100 +coremltools/converters/mil/test_flexible_shape_inputs.py,sha256=cNwy4Omd3iW87ONSxkvmeolkGc70JrynGFbov57Cmv8,6883 +coremltools/converters/mil/testing_reqs.py,sha256=xyXbk1lHscCKREjrjf0Y6wIMbWIvpfyVgLZS2K6hvKE,1839 +coremltools/converters/mil/testing_utils.py,sha256=f5vTHD-jGAedmDqWTVEWakKHq18LKRhGcd_XoCCODSA,20514 +coremltools/converters/sklearn/_LinearSVC.py,sha256=lCbB1yW0H8QBo1VT1HXzewdsSh0JJgXhA42UXize1NM,1500 +coremltools/converters/sklearn/_LinearSVR.py,sha256=DYLkoDJUy_KWPcTu0Zyz3zpwy1AYmvV1ocsI9rLD8jE,1405 +coremltools/converters/sklearn/_NuSVC.py,sha256=L5mj0xhEECZcYRs_Uc4z4d28Bf8o9k3Hl81iuWiu0KQ,1882 +coremltools/converters/sklearn/_NuSVR.py,sha256=Kuf2g6yI8txkwdwXZW5UNi1ZEpTmM6JUbSyYhSmtrh4,1458 +coremltools/converters/sklearn/_SVC.py,sha256=2EfcCNErT3F5ItFQvldQCKL6OeXBjhtS5jS26MaQri0,4021 +coremltools/converters/sklearn/_SVR.py,sha256=Nfgb5X2u5Q3im0UQ-_2MQD_rRO0TNKx18baI3crqdWI,2372 +coremltools/converters/sklearn/__init__.py,sha256=Pz9DffozoGDaptuA-3lFdNNXtbEB9DvmFs-y4ryRJ_g,294 +coremltools/converters/sklearn/__pycache__/_LinearSVC.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_LinearSVR.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_NuSVC.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_NuSVR.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_SVC.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_SVR.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_converter.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_converter_internal.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_decision_tree_classifier.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_decision_tree_regressor.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_dict_vectorizer.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_gradient_boosting_classifier.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_gradient_boosting_regressor.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_imputer.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_k_neighbors_classifier.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_linear_regression.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_logistic_regression.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_normalizer.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_one_hot_encoder.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_random_forest_classifier.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_random_forest_regressor.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_ridge_regression.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_sklearn_util.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_standard_scaler.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_svm_common.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_tree_ensemble.cpython-310.pyc,, +coremltools/converters/sklearn/_converter.py,sha256=m8smydsWCW3Wn2j1kbhS-_cFqgF54rlqfq2eZOuBLlE,5912 +coremltools/converters/sklearn/_converter_internal.py,sha256=U1ysD14cmi4upr8EKaF1W5BBzhPMlsv6Si5utQOaOc4,12902 +coremltools/converters/sklearn/_decision_tree_classifier.py,sha256=BWsZDwFYTb6MC94uDuLPOUMIsuAt3VMcEEM-chc3Ois,1684 +coremltools/converters/sklearn/_decision_tree_regressor.py,sha256=Lzca6wEghmuvZgDhYttAHuH22P88CgtxV-NSV0hugCk,1442 +coremltools/converters/sklearn/_dict_vectorizer.py,sha256=RjrXEHrwJAAx3d7VPUOVslwkW5P7d1tdUBGEh5FTric,3581 +coremltools/converters/sklearn/_gradient_boosting_classifier.py,sha256=EbJF4ZnGazU826RGS3c5n6bUhbD0o1CaVlyk7IyGIMo,3369 +coremltools/converters/sklearn/_gradient_boosting_regressor.py,sha256=a4h6Zg2YE_vsFL1vZshICljxpst1s8WnufRRstT7NYU,2246 +coremltools/converters/sklearn/_imputer.py,sha256=pIniBOLcEafe4x5P-cedYjrnn7kVqu0AJ6XYfBQiDK8,3417 +coremltools/converters/sklearn/_k_neighbors_classifier.py,sha256=H5paYfKvFWd5npSbqNKBXh0Pbc2id7dwiAcle57qRGc,9525 +coremltools/converters/sklearn/_linear_regression.py,sha256=_jmLLDSOQPa_hsgAWRRwPDXxs9vR-QPy_ojWm6Bjsvw,2376 +coremltools/converters/sklearn/_logistic_regression.py,sha256=o1Vd04GAuEMYlk8iDikGNNY5KuLy1C4wz_v1K3V7maI,3120 +coremltools/converters/sklearn/_normalizer.py,sha256=vb9ms0eOIaA4rGj5BNUtEIMGbccp_S64CRy4s2UH_wU,2315 +coremltools/converters/sklearn/_one_hot_encoder.py,sha256=I5VouIP_lho_Xi-ZhjYUrQLtNO1dGEgd5t__3xoGcvE,9933 +coremltools/converters/sklearn/_random_forest_classifier.py,sha256=0XyMd4VOTeO1oul8MghEfLhEUdRAizFmGE6rrscw6pY,1916 +coremltools/converters/sklearn/_random_forest_regressor.py,sha256=TZ0QFwtmswyKK9yJ2ikA45CQJ8bPqkGabAdGDXIx4f4,1710 +coremltools/converters/sklearn/_ridge_regression.py,sha256=gzAvke90jYfT269or8dS5eQPLC31-nT1H3W0UQgoHn8,1422 +coremltools/converters/sklearn/_sklearn_util.py,sha256=LhOC-eDE7dZpB0wTFDRt_V2HQjc7KgIV-eEQ7kWP_fw,1032 +coremltools/converters/sklearn/_standard_scaler.py,sha256=WOjXmHAxgPYkGV3QuygqV0-LLgoTeTqIA8nvJB8Ed2k,2626 +coremltools/converters/sklearn/_svm_common.py,sha256=CXvMzz6o2aiT4D5H5_XZiiPdkCIzVfQ4q4OeYAOYGRM,1210 +coremltools/converters/sklearn/_tree_ensemble.py,sha256=DhTs5J_8TbsG2LO_URrsJbefB_mqt6DreE6ks5UAUTY,7783 +coremltools/converters/xgboost/__init__.py,sha256=b_yMPWvR26-dnC7cJZlavGS1gOmBkmJL2lBAYvjLvaM,243 +coremltools/converters/xgboost/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/xgboost/__pycache__/_tree.cpython-310.pyc,, +coremltools/converters/xgboost/__pycache__/_tree_ensemble.cpython-310.pyc,, +coremltools/converters/xgboost/_tree.py,sha256=8hWCttuSP_Ig1s36YE_KqCSVrowtWsxBiTxV0Ed39Mg,2760 +coremltools/converters/xgboost/_tree_ensemble.py,sha256=wRMABFYEwWRKvpkW4GUsoHVXj6NRwvKSi1Rkrm-CjDU,9537 +coremltools/libcoremlpython.so,sha256=d4nonEZiMH9jMAVlzzqAzRYd4YU16Q19eZYMjJbMuKs,1626896 +coremltools/libmilstoragepython.so,sha256=d2ZMpv2iF6EVcWOT0_DW-fmuvzEuu4E5tTbbfLwvNQY,213608 +coremltools/libmodelpackage.so,sha256=5doC_sP61YJ_TuAtV34x-bm3bmMqPDl4HHosj4Ej3Tc,305376 +coremltools/models/__init__.py,sha256=VdXZQNtZlSSF4gSe8OfXvvvfEpPnE_6VuF0Mo_-_5QY,1049 +coremltools/models/__pycache__/__init__.cpython-310.pyc,, +coremltools/models/__pycache__/_deprecation.cpython-310.pyc,, +coremltools/models/__pycache__/_feature_management.cpython-310.pyc,, +coremltools/models/__pycache__/_interface_management.cpython-310.pyc,, +coremltools/models/__pycache__/array_feature_extractor.cpython-310.pyc,, +coremltools/models/__pycache__/datatypes.cpython-310.pyc,, +coremltools/models/__pycache__/feature_vectorizer.cpython-310.pyc,, +coremltools/models/__pycache__/model.cpython-310.pyc,, +coremltools/models/__pycache__/pipeline.cpython-310.pyc,, +coremltools/models/__pycache__/tree_ensemble.cpython-310.pyc,, +coremltools/models/__pycache__/utils.cpython-310.pyc,, +coremltools/models/_deprecation.py,sha256=t3tie2O6lyTUpfmB6ZTMRItmAT74yxIQ_3tO_R4zsws,1131 +coremltools/models/_feature_management.py,sha256=R-HmJX4i7R7Kr4KjlAzT4S35XP69T_gAZg6YMbB7Xd0,11762 +coremltools/models/_interface_management.py,sha256=OevbCPfC5x07eUiMbOmOjO2jrIRdZHbKTOWVMVCVSaI,7068 +coremltools/models/array_feature_extractor.py,sha256=IZDVYlt-3xPfAqsagb5alr5BLXgRxYL7wPB3jw4kfB8,2018 +coremltools/models/datatypes.py,sha256=SyZo_AiQr_DFpHivsjXL4Tw_5uVKwSHQOwf-wPQ9Pto,6761 +coremltools/models/feature_vectorizer.py,sha256=TahnTSreaUUdfnJ3Us28QR1HChPUlWubdNEa0YiBWow,3718 +coremltools/models/ml_program/__init__.py,sha256=oxlPVGZ3j0hBRHK01gavhSG5buxJepofMsluRjJ9ZCk,247 +coremltools/models/ml_program/__pycache__/__init__.cpython-310.pyc,, +coremltools/models/ml_program/__pycache__/compression_utils.cpython-310.pyc,, +coremltools/models/ml_program/compression_utils.py,sha256=ZfKUJgh9xcA9cnPqnKdd_HzlmrJInSpujSXdiPBGr4k,25126 +coremltools/models/model.py,sha256=IYSNOWemf29N_x10Mf6L33vFeTFnG_k1ssBQGqKj5zw,26036 +coremltools/models/nearest_neighbors/__init__.py,sha256=gdGUD03yQ8QSXbiI5lQzQzCuOFSrenjX0Awszx89cSc,272 +coremltools/models/nearest_neighbors/__pycache__/__init__.cpython-310.pyc,, +coremltools/models/nearest_neighbors/__pycache__/builder.cpython-310.pyc,, +coremltools/models/nearest_neighbors/builder.py,sha256=8d5naMN2BB7SsIKpCweqZhQitQYQIE-Cp1BFEO2U2_I,21314 +coremltools/models/neural_network/__init__.py,sha256=Pi2j7iOWOOv3tb1oMwry3iIuYI9Yar0XfVW4jpAzPrk,486 +coremltools/models/neural_network/__pycache__/__init__.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/builder.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/flexible_shape_utils.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/optimization_utils.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/printer.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/quantization_utils.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/spec_inspection_utils.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/update_optimizer_utils.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/utils.cpython-310.pyc,, +coremltools/models/neural_network/builder.py,sha256=-Dlsymtrb_Q6TpShAmFbxpquT-TInG9YDNVY7zdLkao,337600 +coremltools/models/neural_network/flexible_shape_utils.py,sha256=B46ZaWbxBfNm4ZVLdJWsq7QqFi1aP7UyE3FyKoqGBZc,27323 +coremltools/models/neural_network/optimization_utils.py,sha256=KIRE1_EKzw5xgOZxuAZIWWYqitGgbgvO4PgpW-t1rhA,8194 +coremltools/models/neural_network/printer.py,sha256=TOfIgBgDp7xWUb6cA6udVEg03-by85C_w9Zwc0TIy3c,3748 +coremltools/models/neural_network/quantization_utils.py,sha256=YVYKpABtmom2LsN9COx6mDfem2dHNh8iLLzeOuGznY4,57685 +coremltools/models/neural_network/spec_inspection_utils.py,sha256=aEeoulN-oBEZvzfAXkM2vmPTJDobAEqXajKwR5vrcgA,10768 +coremltools/models/neural_network/update_optimizer_utils.py,sha256=qQZM0SjaHLZdVbth6kNcJOH5PUdIqiekMPKhish6nao,4775 +coremltools/models/neural_network/utils.py,sha256=Mq7lH7HLNTSu39aVAYuLj4VD7XqB76TzVdFWbWEGUk8,3967 +coremltools/models/pipeline.py,sha256=lLbm6IsZ7Lx1lpLoLdSGH2jwJJZ0MKfZwlNpv7rApKY,10916 +coremltools/models/tree_ensemble.py,sha256=fnnMaZBTUN79AQ27Y9MqUQo8DtaiBsHP3msYbKpqvzc,15764 +coremltools/models/utils.py,sha256=StUvsWc7thkPaMx8GPQ47zM4ArozER6NNEsOE5M8T4E,33597 +coremltools/proto/ArrayFeatureExtractor_pb2.py,sha256=n9QZfMjC8We5P-og9vRpYUMx2JlQXUrwxqODj61ikoo,2269 +coremltools/proto/AudioFeaturePrint_pb2.py,sha256=AveFTuEtE6soQiO0qKYVboVcU6sz2uEP4-1vtmscTXk,5197 +coremltools/proto/BayesianProbitRegressor_pb2.py,sha256=3862NhATie3vVRxR69S4SUoOH80UJrCHj2XfJc4uDVw,13073 +coremltools/proto/CategoricalMapping_pb2.py,sha256=v88lYib_p1kEM6kvJURtyxHQy89Ab6wcciw7TnLRWfU,5556 +coremltools/proto/ClassConfidenceThresholding_pb2.py,sha256=UeF1poavnXbfV4Sp6wcFGRzcvsqUkvNYxXZXua74SXI,2940 +coremltools/proto/CustomModel_pb2.py,sha256=XX4EwZBMIE-7HlFBIKYBuLRKa9H07uzsIyaqCk6femI,10557 +coremltools/proto/DataStructures_pb2.py,sha256=gy7-fsP8XCISGXQMbjHfWwSN3DiwmnYT6sxD9c8Mo1A,25856 +coremltools/proto/DictVectorizer_pb2.py,sha256=2ZnPXKbxdWy9PXpcuN8xnIkEsbeAOhqJPBsmz1EbyLs,3817 +coremltools/proto/FeatureTypes_pb2.py,sha256=GMIa8rmmyfm3tKS1tQQh807--FvjvYKV3WUmchBS0_g,38649 +coremltools/proto/FeatureVectorizer_pb2.py,sha256=T103AMfqJHZNCpNTKYW4O7CEGUSyNLRZOEXPcqN41og,4119 +coremltools/proto/GLMClassifier_pb2.py,sha256=VZ8IUgxc16YV-ixYBATdIW1pFhCOsMFahK99GN7eTVA,8780 +coremltools/proto/GLMRegressor_pb2.py,sha256=kHNL_QhJ1c0b9W9qtaVvzgNAhVeQ7gWn5HlzSaOKru8,5431 +coremltools/proto/Gazetteer_pb2.py,sha256=GEzYcai4VywOzKhyggMRgTcTULi6Kv7RTizbxTcaxlo,4337 +coremltools/proto/Identity_pb2.py,sha256=yLT0mbD-jLfiREQoZwqQWQr5R7vRXBC8lpcUgTx1S6s,1655 +coremltools/proto/Imputer_pb2.py,sha256=ud3XL7ea1C0_jePPNExH6X-ONtrrK-QoaMU3VeG3MHE,9310 +coremltools/proto/ItemSimilarityRecommender_pb2.py,sha256=oT_hrCPYTuI4-iCRLCD64JKvTuqkaLeik5dSBiCEQKE,11274 +coremltools/proto/LinkedModel_pb2.py,sha256=fh9o623qI2PT7rCqs9GZtWKG2Q96gmDiqDrJRtwkZ9A,5073 +coremltools/proto/MIL_pb2.py,sha256=P8Ykuz3H2-yteKySyjAbuxKIc5zr5QsZiIG4B8ShS48,83751 +coremltools/proto/Model_pb2.py,sha256=4zm7zLuL1thqU7zHwlCLGT_-ogsBdt5VtfaF6S_lDeQ,59302 +coremltools/proto/NamedParameters_pb2.py,sha256=L9sRHazQzNUsSg9E9AijBBsjCt0XryWus-xOVhPU9os,14471 +coremltools/proto/NearestNeighbors_pb2.py,sha256=0I7hB7wUBnKkGvpkdKH4N4LBBwWfDW59FB9qx-VPAPg,18991 +coremltools/proto/NeuralNetwork_pb2.py,sha256=e1SOqycUT7XWd-MJ13hhMKtTsI9m9oBFFvRxvEbCbvM,552750 +coremltools/proto/NonMaximumSuppression_pb2.py,sha256=QbK3qJpn_jD3bzM___wXzMBgdGwuNYiQw9WW5gBnsgs,10172 +coremltools/proto/Normalizer_pb2.py,sha256=c9ZjsaESTvzzfHB1rIokhwIYL7V5FjteAx9xZiVEPf0,3009 +coremltools/proto/OneHotEncoder_pb2.py,sha256=EBuLvjKlGQV-8RvW92o5XiiJ87I625rsW3df5UdcJoc,5613 +coremltools/proto/Parameters_pb2.py,sha256=M4e4VNbF6qwft7kzXT4-qIwds9FwGpcM0GjimCpVEiw,8731 +coremltools/proto/SVM_pb2.py,sha256=eP1JHJzwAqNG4ZTsjydL0DGYLNyn0O20FfJXSkqy-Ik,29440 +coremltools/proto/Scaler_pb2.py,sha256=gl16Hx5vsmELjijDI3N0uB6AbfHrIv0jCUn8DhAvJO8,2397 +coremltools/proto/SoundAnalysisPreprocessing_pb2.py,sha256=qy-rjnqqCOZjen2QVUBn9PmVGXqs8Ls8uE_DSnLSbdQ,4090 +coremltools/proto/TextClassifier_pb2.py,sha256=YcsTT4e4UnxaAwNBsnlk4bNnLlH7EdB4RmEsKk_5Qe8,4464 +coremltools/proto/TreeEnsemble_pb2.py,sha256=TO3_Ix8-O8MMTkX_yUgvWoSPwkIyuiwD9Jmmnq1t_-8,20559 +coremltools/proto/VisionFeaturePrint_pb2.py,sha256=02WZADtAql6rvzXifW-6O6g72EtrU96VrYBZxzxnCdA,9188 +coremltools/proto/WordEmbedding_pb2.py,sha256=dCK32bm8W7Pn5UuTu7lBzCDjdysxPQLMQ0shWdA0cgo,3417 +coremltools/proto/WordTagger_pb2.py,sha256=hgRJyhmrGo9Bmm74uxqjRGCmoufpviBs0Y-DDJczH-g,6177 +coremltools/proto/__init__.py,sha256=Qp3qzHiz2yBHkv3ovvGcwliSoAsiRrL4GtR0TygN5k0,44 +coremltools/proto/__pycache__/ArrayFeatureExtractor_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/AudioFeaturePrint_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/BayesianProbitRegressor_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/CategoricalMapping_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/ClassConfidenceThresholding_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/CustomModel_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/DataStructures_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/DictVectorizer_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/FeatureTypes_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/FeatureVectorizer_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/GLMClassifier_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/GLMRegressor_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Gazetteer_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Identity_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Imputer_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/ItemSimilarityRecommender_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/LinkedModel_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/MIL_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Model_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/NamedParameters_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/NearestNeighbors_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/NeuralNetwork_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/NonMaximumSuppression_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Normalizer_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/OneHotEncoder_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Parameters_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/SVM_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Scaler_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/SoundAnalysisPreprocessing_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/TextClassifier_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/TreeEnsemble_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/VisionFeaturePrint_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/WordEmbedding_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/WordTagger_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/test/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/api/__init__.py,sha256=0v8QveTcIon5tuJ8MxBg0nR0YXo1dr1847H4W1-d62A,225 +coremltools/test/api/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/api/__pycache__/test_api_examples.cpython-310.pyc,, +coremltools/test/api/__pycache__/test_api_visibilities.cpython-310.pyc,, +coremltools/test/api/test_api_examples.py,sha256=_tFg2G4wvwOwi650eEdNB1PjXt79sPcC3gnAWhwgq0w,19747 +coremltools/test/api/test_api_visibilities.py,sha256=4hC_SmW5PnimDZ5khBU7X4jQRDkYQC-wFEsAYkeBltE,6967 +coremltools/test/blob/__init__.py,sha256=lnJqw4kPK2WsLMwTuKg6W3KQ7fsE9KDz8VkbdYwZGB0,225 +coremltools/test/blob/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/blob/__pycache__/test_weights.cpython-310.pyc,, +coremltools/test/blob/test_weights.py,sha256=JtynAC6byI2-CvCCUJzHkTN1kGQ2AGg2A3mwxbcOkSM,2604 +coremltools/test/ml_program/__init__.py,sha256=W3YJH2hWGWLcQ48vEYy5o2GcJgzS6rn6RogIBGMZ_Eo,214 +coremltools/test/ml_program/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/ml_program/__pycache__/test_compression.cpython-310.pyc,, +coremltools/test/ml_program/test_compression.py,sha256=dgHmBwgR0Gln9qCV9RkzATVa_0PtM-nw4rDwxEC3KNM,20484 +coremltools/test/modelpackage/__init__.py,sha256=lnJqw4kPK2WsLMwTuKg6W3KQ7fsE9KDz8VkbdYwZGB0,225 +coremltools/test/modelpackage/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/modelpackage/__pycache__/test_mlmodel.cpython-310.pyc,, +coremltools/test/modelpackage/__pycache__/test_modelpackage.cpython-310.pyc,, +coremltools/test/modelpackage/test_mlmodel.py,sha256=u9ITWH3xfE4npIgqPi6pIbpQAYiT3NF_IaXAItOWppQ,2137 +coremltools/test/modelpackage/test_modelpackage.py,sha256=z6fRzU_G9sFhWk3Uh1s_PxFWpMZzKQXdUuYA2m_EOiI,20790 +coremltools/test/neural_network/__init__.py,sha256=fOD8NncsWGGNnPwqckAPfPZqCASMyfaSrkFWrjoY8C0,215 +coremltools/test/neural_network/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_custom_neural_nets.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_model.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_neural_networks.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_nn_builder.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_numpy_nn_layers.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_quantization.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_simple_nn_inference.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_tf_numeric.cpython-310.pyc,, +coremltools/test/neural_network/test_custom_neural_nets.py,sha256=OUACUnpr2-hzG9dHx-IZMEXAGw_4GTShO8xjS1cR8Rw,3321 +coremltools/test/neural_network/test_model.py,sha256=llY4nqxEmp9uLDTr7QymV9orpQm-meJXwzANX4jpjlo,22507 +coremltools/test/neural_network/test_neural_networks.py,sha256=cIrFyvfhVzJao1zZMvaNk8POr-M4ugyvbPemDUuqIkU,2061 +coremltools/test/neural_network/test_nn_builder.py,sha256=TrtngRAH6m3WGl4h1TU6-HP4GIteEhXInW0BE2AwJ6A,24047 +coremltools/test/neural_network/test_numpy_nn_layers.py,sha256=GoNLC9mDNVyRk8vWYo3kGS-9_ALr_jr0NOzfYUlV0js,280978 +coremltools/test/neural_network/test_quantization.py,sha256=JQaKJYO7QFz8hbP4gjl9IrBLthTXR-XY65GiQrGzV2Q,18696 +coremltools/test/neural_network/test_simple_nn_inference.py,sha256=e5mjXp1n9gtmNOYnkhFrCcBtNqK7jBWiYlWAYGkEEMI,1780 +coremltools/test/neural_network/test_tf_numeric.py,sha256=L9nl7z6tnXO_l2X7y_ttqqE3zUrpkoO5fQz3Eu0oF6c,19367 +coremltools/test/pipeline/__init__.py,sha256=fOD8NncsWGGNnPwqckAPfPZqCASMyfaSrkFWrjoY8C0,215 +coremltools/test/pipeline/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/pipeline/__pycache__/test_model_updatable.cpython-310.pyc,, +coremltools/test/pipeline/__pycache__/test_pipeline.cpython-310.pyc,, +coremltools/test/pipeline/test_model_updatable.py,sha256=eN_932HI1ngSZYKCVth4nrgXwCtffKra36sBHr_qSMg,28313 +coremltools/test/pipeline/test_pipeline.py,sha256=ldrHNNoDHTutYsbvewjt1-2dOSim1O6GcTNxfiIa5f0,9810 +coremltools/test/sklearn_tests/__init__.py,sha256=fOD8NncsWGGNnPwqckAPfPZqCASMyfaSrkFWrjoY8C0,215 +coremltools/test/sklearn_tests/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_NuSVC.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_NuSVR.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_SVC.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_SVR.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_categorical_imputer.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_composite_pipelines.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_dict_vectorizer.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_feature_names.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_glm_classifier.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_imputer.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_io_types.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_k_neighbors_classifier.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_linear_regression.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_nearest_neighbors_builder.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_normalizer.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_one_hot_encoder.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_random_forest_classifier.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_random_forest_classifier_numeric.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_random_forest_regression.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_random_forest_regression_numeric.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_ridge_regression.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_standard_scalar.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_utils.cpython-310.pyc,, +coremltools/test/sklearn_tests/test_NuSVC.py,sha256=Zf0QJcofmfHB9X8NdovCUcJKexPr9fGHKnZ0m5HNLok,11837 +coremltools/test/sklearn_tests/test_NuSVR.py,sha256=S58cT0v5_hfLrCCs7fAQqaxGnhBBeoOg3RpeUXq-Dww,7381 +coremltools/test/sklearn_tests/test_SVC.py,sha256=MYe9JUUkxRg8rvGbUHnB38b2rc3bT5YMGf2uzcD9aT0,14243 +coremltools/test/sklearn_tests/test_SVR.py,sha256=60n2wRXaX7tT-EI3qsfcfUVJJxsDoQoSk6k3ZKgymbQ,8747 +coremltools/test/sklearn_tests/test_categorical_imputer.py,sha256=_nVEHUKsMhL6Q8PG8cgl9_i7EzpknUFfOWRydXDiVt4,2599 +coremltools/test/sklearn_tests/test_composite_pipelines.py,sha256=QXfE_bwdPKnNffJdbFsR-YhEFh5HpT59DOIyQXaVV_k,3064 +coremltools/test/sklearn_tests/test_dict_vectorizer.py,sha256=b6x0sWfpsnesADVYkQYc1bRSHFYsUXLN0LdBiIsgm6E,3358 +coremltools/test/sklearn_tests/test_feature_names.py,sha256=IX-cWvErkBoaJiPHMZnQx1711gpbfs8MgCRMRLZ8nvM,1031 +coremltools/test/sklearn_tests/test_glm_classifier.py,sha256=Rv6n2zD6hyeFd0Sqk5xPHdKhWfhgx2DNOzbwTrRB27M,4400 +coremltools/test/sklearn_tests/test_imputer.py,sha256=kokwW3tLOnRCa_PjJ9B3q_Z5WBOW5tgCzJ1YLeZpwoA,2545 +coremltools/test/sklearn_tests/test_io_types.py,sha256=qHq_6Zuz4hYMTVk0Oq_PCGg0k8YFg9YikMDLx_0dhC8,14446 +coremltools/test/sklearn_tests/test_k_neighbors_classifier.py,sha256=kUQPpEal6jnaTiLjgS5JIjQdf97IWdynpKdh78fytb8,11433 +coremltools/test/sklearn_tests/test_linear_regression.py,sha256=_dlpqZmOEOIf_VFLSO1OCtw-c5nGt5cRBAgwbnesUeg,5048 +coremltools/test/sklearn_tests/test_nearest_neighbors_builder.py,sha256=oAuhQIvWSKhrJLxRfsswiSFm0EdmhNKjVHFJLn_oNyA,16089 +coremltools/test/sklearn_tests/test_normalizer.py,sha256=ufzr-6vOvOH9GJA7468MtwlsbvxvSril4hztuLWFZOk,1915 +coremltools/test/sklearn_tests/test_one_hot_encoder.py,sha256=kTZT3u0h3uf6ulHbvl8AW47m1YU249PUJBMqQlbcmbg,10354 +coremltools/test/sklearn_tests/test_random_forest_classifier.py,sha256=Ms_wdG4hFEt2CLE8AV3xP309TKZH6iwMRJLSCxvcaIw,6468 +coremltools/test/sklearn_tests/test_random_forest_classifier_numeric.py,sha256=_9c3KXkB9Zv_2rNrrOMN0NDSBSJZQFsynPxO_C_p33E,5055 +coremltools/test/sklearn_tests/test_random_forest_regression.py,sha256=Rqv51iL4lXjOM9NewQnMTcTekXO73LhlLLKKodMYcSE,3348 +coremltools/test/sklearn_tests/test_random_forest_regression_numeric.py,sha256=7koorV7ORWMtYArqaNIoPFUJrGJ9v27Nq7TF5t-ARyA,3690 +coremltools/test/sklearn_tests/test_ridge_regression.py,sha256=o-dCWR7XEYLhUgBb2EshGp47CPkn7p4oa4fjZ4oXaQE,3884 +coremltools/test/sklearn_tests/test_standard_scalar.py,sha256=I0SMgMNiIdXqElNlzJiBArRKeXxW4w9hwJ7stHN7j-4,1960 +coremltools/test/sklearn_tests/test_utils.py,sha256=ju2vg0aN2SKn2Fm5EvmfVWR2GExqFIoI19-2SIe2NhI,1876 +coremltools/test/xgboost_tests/__init__.py,sha256=fOD8NncsWGGNnPwqckAPfPZqCASMyfaSrkFWrjoY8C0,215 +coremltools/test/xgboost_tests/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_boosted_trees_classifier.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_boosted_trees_classifier_numeric.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_boosted_trees_regression.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_boosted_trees_regression_numeric.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_decision_tree_classifier.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_decision_tree_classifier_numeric.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_decision_tree_regression.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_decision_tree_regression_numeric.cpython-310.pyc,, +coremltools/test/xgboost_tests/test_boosted_trees_classifier.py,sha256=eA7nO3AcZ_UAjnmM-l9oWwsFfHh4yJ7QhSb1tDpNaFw,12175 +coremltools/test/xgboost_tests/test_boosted_trees_classifier_numeric.py,sha256=kV8UPK5e7wmSftO1xE9D5pGvZaMXMKYet3lY1R65rYw,9737 +coremltools/test/xgboost_tests/test_boosted_trees_regression.py,sha256=MIxeTPUtKPpxRvQmmBIKAHmn3z69vGTVq7vOhiInbvI,7876 +coremltools/test/xgboost_tests/test_boosted_trees_regression_numeric.py,sha256=xg7dqQtKBMFKXh4mGY6xh9d6x1sUwD4QDK0dxbNxxng,10997 +coremltools/test/xgboost_tests/test_decision_tree_classifier.py,sha256=IsP3qauvPvN62Nkw17Vv1it343lTJV3MwmLmG_spOpc,5295 +coremltools/test/xgboost_tests/test_decision_tree_classifier_numeric.py,sha256=trzSdPcBuvS4RsWGbBis0W_VOXPM-AEm-_A0C6egAnc,5041 +coremltools/test/xgboost_tests/test_decision_tree_regression.py,sha256=r8vkwfJiMlNXP2HRh_tn9XVSeMQeWu7nzkB6l-RJvps,2999 +coremltools/test/xgboost_tests/test_decision_tree_regression_numeric.py,sha256=tJucU4K6vlGpdYp594JXjtd4ugSd7h2u9LyKmijpkSY,3755 +coremltools/version.py,sha256=X-VlAgFopx9zcYo1V8MTQQ0OFrG68cMdesrft92v8lQ,257 diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/REQUESTED b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/REQUESTED new file mode 100644 index 00000000..e69de29b diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/WHEEL b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/WHEEL new file mode 100644 index 00000000..69da415f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: cp310-none-macosx_11_0_arm64 + diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/top_level.txt b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/top_level.txt new file mode 100644 index 00000000..42075f79 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +coremltools diff --git a/__packaged__/coreml/.python_dependencies/coremltools/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/__init__.py new file mode 100644 index 00000000..d741975e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/__init__.py @@ -0,0 +1,114 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Core ML is an Apple framework which allows developers to simply and easily integrate machine +learning (ML) models into apps running on Apple devices (including iOS, watchOS, macOS, and +tvOS). Core ML introduces a public file format (.mlmodel) for a broad set of ML methods +including deep neural networks (both convolutional and recurrent), tree ensembles with boosting, +and generalized linear models. Models in this format can be directly integrated into apps +through Xcode. + +Coremltools is a python package for creating, examining, and testing models in the .mlpackage +and .mlmodel formats. In particular, it can be used to: + +* Convert existing models to .mlpackage or .mlmodel formats from popular machine learning tools including: + PyTorch, TensorFlow, scikit-learn, XGBoost and libsvm. +* Express models in .mlpackage and .mlmodel formats through a simple API. +* Make predictions with .mlpackage and .mlmodel files (on macOS). + +For more information: http://developer.apple.com/documentation/coreml +""" +from enum import Enum as _Enum +from logging import getLogger as _getLogger + +from .version import __version__ + +_logger = _getLogger(__name__) + +# This is the basic Core ML specification format understood by iOS 11.0 +SPECIFICATION_VERSION = 1 + +# New versions for iOS 11.2 features. Models which use these features should have these +# versions, but models created from this coremltools which do not use the features can +# still have the basic version. +_MINIMUM_CUSTOM_LAYER_SPEC_VERSION = 2 +_MINIMUM_FP16_SPEC_VERSION = 2 + +# New versions for iOS 12.0 features. Models which use these features should have these +# versions, but models created from this coremltools which do not use the features can +# still have the basic version. +_MINIMUM_CUSTOM_MODEL_SPEC_VERSION = 3 +_MINIMUM_QUANTIZED_MODEL_SPEC_VERSION = 3 +_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION = 3 + +# New versions for iOS 13.0. +_MINIMUM_NDARRAY_SPEC_VERSION = 4 +_MINIMUM_NEAREST_NEIGHBORS_SPEC_VERSION = 4 +_MINIMUM_LINKED_MODELS_SPEC_VERSION = 4 +_MINIMUM_UPDATABLE_SPEC_VERSION = 4 +_SPECIFICATION_VERSION_IOS_13 = 4 + +# New versions for iOS 14.0 +_SPECIFICATION_VERSION_IOS_14 = 5 + +# New versions for iOS 15.0 +_SPECIFICATION_VERSION_IOS_15 = 6 + +# New versions for iOS 16.0 +_SPECIFICATION_VERSION_IOS_16 = 7 + +class ComputeUnit(_Enum): + ''' + The set of processing-unit configurations the model can use to make predictions. + ''' + ALL = 1 # Allows the model to use all compute units available, including the neural engine + CPU_AND_GPU = 2 # Allows the model to use both the CPU and GPU, but not the neural engine + CPU_ONLY = 3 # Limit the model to only use the CPU + CPU_AND_NE = 4 # Allows the model to use both the CPU and neural engine, but not the GPU. + # Only available on macOS >= 13.0 + +# A dictionary that maps the CoreML model specification version to the MLProgram/MIL opset string +_OPSET = { + _SPECIFICATION_VERSION_IOS_13: "CoreML3", + _SPECIFICATION_VERSION_IOS_14: "CoreML4", + _SPECIFICATION_VERSION_IOS_15: "CoreML5", + _SPECIFICATION_VERSION_IOS_16: "CoreML6", +} + +# Default specification version for each backend +_LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_NEURALNETWORK = _SPECIFICATION_VERSION_IOS_13 +_LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_MILPROGRAM = _SPECIFICATION_VERSION_IOS_15 + + +# expose sub packages as directories +from . import converters, models, proto + +# expose unified converter in coremltools package level +from .converters import ClassifierConfig +from .converters import ColorLayout as colorlayout +from .converters import EnumeratedShapes, ImageType, RangeDim, Shape, TensorType, convert +from .converters.mil._deployment_compatibility import AvailableTarget as target +from .converters.mil.mil.passes.defs import quantization as transform +from .converters.mil.mil.passes.pass_pipeline import PassPipeline +from .converters.mil.mil.passes.defs.quantization import ComputePrecision as precision +from .models import utils +from .models.ml_program import compression_utils + +try: + from . import libcoremlpython +except: + pass + +# Time profiling for functions in coremltools package, decorated with @profile +import os as _os +import sys as _sys + +from .converters._profile_utils import _profiler + +_ENABLE_PROFILING = _os.environ.get("ENABLE_PROFILING", False) + +if _ENABLE_PROFILING: + _sys.setprofile(_profiler) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/_deps/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/_deps/__init__.py new file mode 100644 index 00000000..9d59acbe --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/_deps/__init__.py @@ -0,0 +1,179 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +List of all external dependancies for this package. Imported as +optional includes +""" +import platform as _platform +import re as _re +import sys as _sys +from distutils.version import StrictVersion as _StrictVersion + +from packaging import version + +from coremltools import _logger as logger + + +def _get_version(version): + # matching 1.6.1, and 1.6.1rc, 1.6.1.dev + version_regex = r"^\d+\.\d+\.\d+" + version = _re.search(version_regex, str(version)).group(0) + return _StrictVersion(version) + + +def _warn_if_above_max_supported_version(package_name, package_version, max_supported_version): + if _get_version(package_version) > _StrictVersion(max_supported_version): + logger.warning( + "%s version %s has not been tested with coremltools. You may run into unexpected errors. " + "%s %s is the most recent version that has been tested." + % (package_name, package_version, package_name, max_supported_version) + ) + + +# --------------------------------------------------------------------------------------- + +_IS_MACOS = _sys.platform == "darwin" +_MACOS_VERSION = () + +if _IS_MACOS: + ver_str = _platform.mac_ver()[0] + MACOS_VERSION = tuple([int(v) for v in ver_str.split(".")]) + +MSG_ONLY_MACOS = "Only supported on macOS" + +# --------------------------------------------------------------------------------------- +_HAS_SKLEARN = True +_SKLEARN_VERSION = None +_SKLEARN_MIN_VERSION = "0.17" +_SKLEARN_MAX_VERSION = "1.1.2" + + +def __get_sklearn_version(version): + # matching 0.15b, 0.16bf, etc + version_regex = r"^\d+\.\d+" + version = _re.search(version_regex, str(version)).group(0) + return _StrictVersion(version) + + +try: + import sklearn + + _SKLEARN_VERSION = __get_sklearn_version(sklearn.__version__) + if _SKLEARN_VERSION < _StrictVersion( + _SKLEARN_MIN_VERSION + ) or _SKLEARN_VERSION > _StrictVersion(_SKLEARN_MAX_VERSION): + _HAS_SKLEARN = False + logger.warning( + ( + "scikit-learn version %s is not supported. Minimum required version: %s. " + "Maximum required version: %s. " + "Disabling scikit-learn conversion API." + ) + % (sklearn.__version__, _SKLEARN_MIN_VERSION, _SKLEARN_MAX_VERSION) + ) +except: + _HAS_SKLEARN = False +MSG_SKLEARN_NOT_FOUND = "Sklearn not found." + +# --------------------------------------------------------------------------------------- +_HAS_LIBSVM = True +try: + from libsvm import svm +except: + _HAS_LIBSVM = False +MSG_LIBSVM_NOT_FOUND = "Libsvm not found." + +# --------------------------------------------------------------------------------------- +_HAS_XGBOOST = True +_XGBOOST_MAX_VERSION = "1.4.2" +try: + import xgboost + _warn_if_above_max_supported_version("XGBoost", xgboost.__version__, _XGBOOST_MAX_VERSION) +except: + _HAS_XGBOOST = False + +# --------------------------------------------------------------------------------------- +_HAS_TF = True +_HAS_TF_1 = False +_HAS_TF_2 = False +_TF_1_MIN_VERSION = "1.12.0" +_TF_1_MAX_VERSION = "1.15.4" +_TF_2_MIN_VERSION = "2.1.0" +_TF_2_MAX_VERSION = "2.12.0" + +try: + import tensorflow + + tf_ver = _get_version(tensorflow.__version__) + + # TensorFlow + if tf_ver < _StrictVersion("2.0.0"): + _HAS_TF_1 = True + + if tf_ver >= _StrictVersion("2.0.0"): + _HAS_TF_2 = True + + if _HAS_TF_1: + if tf_ver < _StrictVersion(_TF_1_MIN_VERSION): + logger.warning( + ( + "TensorFlow version %s is not supported. Minimum required version: %s ." + "TensorFlow conversion will be disabled." + ) + % (tensorflow.__version__, _TF_1_MIN_VERSION) + ) + _warn_if_above_max_supported_version("TensorFlow", tensorflow.__version__, _TF_1_MAX_VERSION) + elif _HAS_TF_2: + if tf_ver < _StrictVersion(_TF_2_MIN_VERSION): + logger.warning( + ( + "TensorFlow version %s is not supported. Minimum required version: %s ." + "TensorFlow conversion will be disabled." + ) + % (tensorflow.__version__, _TF_2_MIN_VERSION) + ) + _warn_if_above_max_supported_version("TensorFlow", tensorflow.__version__, _TF_2_MAX_VERSION) + +except: + _HAS_TF = False + _HAS_TF_1 = False + _HAS_TF_2 = False + +MSG_TF1_NOT_FOUND = "TensorFlow 1.x not found." +MSG_TF2_NOT_FOUND = "TensorFlow 2.x not found." + +# --------------------------------------------------------------------------------------- +_HAS_TORCH = True +_TORCH_MAX_VERSION = "2.0.0" +try: + import torch + _warn_if_above_max_supported_version("Torch", torch.__version__, _TORCH_MAX_VERSION) +except: + _HAS_TORCH = False +MSG_TORCH_NOT_FOUND = "PyTorch not found." + + +# --------------------------------------------------------------------------------------- +try: + import scipy +except: + _HAS_SCIPY = False +else: + _HAS_SCIPY = True + +# General utils +def version_ge(module, target_version): + """ + Example usage: + + >>> import torch # v1.5.0 + >>> version_ge(torch, '1.6.0') # False + """ + return version.parse(module.__version__) >= version.parse(target_version) + +def version_lt(module, target_version): + """See version_ge""" + return version.parse(module.__version__) < version.parse(target_version) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/__init__.py new file mode 100644 index 00000000..bca49bbb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +# expose directories as imports +from . import libsvm +from . import sklearn +from . import xgboost +from ._converters_entry import convert +from .mil import ( + ClassifierConfig, + ColorLayout, + TensorType, + ImageType, + RangeDim, + Shape, + EnumeratedShapes, +) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/_converters_entry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/_converters_entry.py new file mode 100644 index 00000000..bc588de1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/_converters_entry.py @@ -0,0 +1,896 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import collections +import gc +import os +from typing import Optional, Text, Union + +from coremltools import ( + _LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_MILPROGRAM, + _LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_NEURALNETWORK, +) +from coremltools import ComputeUnit as _ComputeUnit +from coremltools import __version__ as _ct_version +from coremltools._deps import _HAS_TF_1, _HAS_TF_2, _HAS_TORCH +from coremltools.converters._profile_utils import _profile +from coremltools.converters.mil._deployment_compatibility import ( + AvailableTarget, + check_deployment_compatibility, +) +from coremltools.converters.mil.converter import mil_convert +from coremltools.converters.mil.input_types import ( + ClassifierConfig, + ImageType, + InputType, + TensorType, +) +from coremltools.converters.mil.mil import Program, types +from coremltools.converters.mil.mil.passes.defs.quantization import ComputePrecision as precision +from coremltools.converters.mil.mil.passes.defs.quantization import FP16ComputePrecision +from coremltools.converters.mil.mil.passes.graph_pass import PassOption as _PassOption +from coremltools.converters.mil.mil.passes.pass_pipeline import PassPipeline +from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION +from coremltools.models.utils import _MLPACKAGE_EXTENSION + +if _HAS_TF_1: + import tensorflow as tf + + from coremltools.converters.mil.frontend.tensorflow.load import TF1Loader +if _HAS_TF_2: + import tensorflow as tf + + from coremltools.converters.mil.frontend.tensorflow2.load import TF2Loader + +if _HAS_TORCH: + import torch + + from coremltools.converters.mil.frontend.torch.load import \ + _torchscript_from_model as pytorch_load + + +@_profile +def convert( + model, + source="auto", + inputs=None, + outputs=None, + classifier_config=None, + minimum_deployment_target=None, + convert_to=None, + compute_precision=None, + skip_model_load=False, + compute_units=_ComputeUnit.ALL, + package_dir=None, + debug=False, + pass_pipeline: Optional[PassPipeline] = None, +): + """ + Convert a TensorFlow or PyTorch model to the Core ML model format as either + a neural network or an `ML program `_. + Some parameters and requirements differ for TensorFlow and PyTorch + conversions. + + Parameters + ---------- + + model : + TensorFlow 1, TensorFlow 2, or PyTorch model in one of the following + formats: + + * TensorFlow versions 1.x + + - Frozen `tf.Graph `_ + - Frozen graph (``.pb``) file path + - `tf.keras.Model `_ + - `HDF5 `_ file path (``.h5``) + - `SavedModel `_ directory path + + * TensorFlow versions 2.x + + - `tf.keras.Model `_ + - `HDF5 file path `_ (``.h5``) + - `SavedModel `_ directory path + - A `concrete function `_ + - A `GraphDef `_ + + * PyTorch + + - A `TorchScript `_ object + - Path to a ``.pt`` file + + source : str (optional) + + One of [``auto``, ``tensorflow``, ``pytorch``, ``milinternal``]. ``auto`` + determines the framework automatically for most cases. Raises + ``ValueError`` if it fails to determine the source framework. + + inputs : list of ``TensorType`` or ``ImageType`` + + * If you specify ``dtype`` with ``TensorType`` or ``ImageType``, it will + be applied to the input of the converted model. For example, the + following code snippet will produce a Core ML model with float 16 typed + inputs. + + .. sourcecode:: python + + import coremltools as ct + + mlmodel = ct.convert( + keras_model, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + + * The following code snippet will produce a Core ML model with the + ``GRAYSCALE_FLOAT16`` input image type: + + .. sourcecode:: python + + import coremltools as ct + + # H : image height, W: image width + mlmodel = ct.convert( + torch_model, + inputs=[ + ct.ImageType(shape=(1, 1, H, W), color_layout=ct.colorlayout.GRAYSCALE_FLOAT16) + ], + minimum_deployment_target=ct.target.macOS13, + ) + + * TensorFlow 1 and 2 (including tf.keras): + - The ``inputs`` parameter is optional. If not provided, the inputs + are placeholder nodes in the model (if the model is a frozen graph) + or function inputs (if the model is a ``tf.function``). + - If ``inputs`` is provided, it must be a flat list. + - The ``inputs`` must correspond to all or some of the placeholder nodes + in the TF model. + - If ``name`` is specified with ``TensorType`` and ``ImageType``, it + must correspond to a placeholder op in the TF graph. The input names + in the converted Core ML model can later be modifed using the + ``ct.utils.rename_feature`` API. + - If ``dtype`` is not specified, it defaults to the ``dtype`` of the + inputs in the TF model. + + * PyTorch: + - The ``inputs`` parameter is required. + - Number of elements in ``inputs`` must match the number of inputs + of the PyTorch model. + - ``inputs`` may be a nested list or tuple. + - ``TensorType`` and ``ImageType`` must have the ``shape`` specified. + - If the ``name`` argument is specified with ``TensorType`` or + ``ImageType``, the converted Core ML model will have inputs with + the same name. + - If ``dtype`` is missing, it defaults to float 32. + + outputs : list of ``TensorType`` or ``ImageType`` (optional) + + * If you specify ``dtype`` with ``TensorType`` or ``ImageType``, + it will be applied to the output of the converted model. For example, + to produce float 16 typed inputs and outputs: + + .. sourcecode:: python + + import coremltools as ct + + mlmodel = ct.convert( + keras_model, + inputs=[ct.TensorType(dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + + * To produce image inputs and outputs: + + .. sourcecode:: python + + import coremltools as ct + + # H: image height, W: image width + mlmodel = ct.convert( + torch_model, + inputs=[ct.ImageType(shape=(1, 3, H, W), color_layout=ct.colorlayout.RGB)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + + * TensorFlow 1 and 2 (including tf.keras): + + - If ``outputs`` is not specified, the converter infers outputs from + the sink nodes in the graph. + - If specified, the ``name`` with ``TensorType`` or ``ImageType`` + must correspond to a node in the TF graph. In this case, the model + will be converted up to that node. + + * PyTorch: + + - If specified, the length of the list must match the number of + outputs returned by the PyTorch model. + - If ``name`` is specified, it is applied to the output names of the + converted Core ML model. + + classifier_config : ClassifierConfig class (optional) + The configuration if the MLModel is intended to be a classifier. + + minimum_deployment_target : coremltools.target enumeration (optional) + A member of the ``coremltools.target`` enum. + The value of this parameter determines the type of the model + representation produced by the converter. To learn about the differences + between neural networks and ML programs, see + `ML Programs `_. + + - The converter produces a neural network (``neuralnetwork``) if: + :: + minimum_deployment_target <= coremltools.target.iOS14/ + coremltools.target.macOS11/ + coremltools.target.watchOS7/ + coremltools.target.tvOS14: + + - The converter produces an ML program (``mlprogram``) if: + :: + minimum_deployment_target >= coremltools.target.iOS15/ + coremltools.target.macOS12/ + coremltools.target.watchOS8/ + coremltools.target.tvOS15: + + - If neither the ``minimum_deployment_target`` nor the ``convert_to`` + parameter is specified, the converter produces the neural network + model type with as minimum of a deployment target as possible. + - If this parameter is specified and ``convert_to`` is also specified, + they must be compatible. The following are examples of invalid values: + :: + # Invalid: + convert_to="neuralnetwork", minimum_deployment_target=coremltools.target.iOS15 + # Invalid: + convert_to="mlprogram", minimum_deployment_target=coremltools.target.iOS14 + + convert_to : str (optional) + Must be one of [``'neuralnetwork'``, ``'mlprogram'``, ``'milinternal'``]. + The value of this parameter determines the type of the model + representation produced by the converter. To learn about the + differences between neural networks and ML programs, see + `ML Programs `_. + + - ``'neuralnetwork'``: Returns an MLModel (``coremltools.models.MLModel``) + containing a NeuralNetwork proto, which is the original Core ML format. + The model saved from this returned object is executable either on + iOS13/macOS10.15/watchOS6/tvOS13 and newer, or on + iOS14/macOS11/watchOS7/tvOS14 and newer, depending on the layers used + in the model. + - ``'mlprogram'`` : Returns an MLModel (``coremltools.models.MLModel``) + containing a MILSpec.Program proto, which is the Core ML program format. + The model saved from this returned object is executable on iOS15, + macOS12, watchOS8, and tvOS15. + - ``'milinternal'``: Returns an MIL program object + (``coremltools.converters.mil.Program``). An MIL program is primarily + used for debugging and inspection. It can be converted to an MLModel for + execution by using one of the following: + :: + ct.convert(mil_program, convert_to="neuralnetwork") + ct.convert(mil_program, convert_to="mlprogram") + + - If neither the ``minimum_deployment_target`` nor the ``convert_to`` + parameter is specified, the converter produces the neural network + model type with as minimum of a deployment target as possible. + + compute_precision : coremltools.precision enumeration or ct.transform.FP16ComputePrecision() (optional) + + Use this argument to control the storage precision of the tensors in the + ML program. Must be one of the following. + + - ``coremltools.precision.FLOAT16`` enum: The following transform is + applied to produce a float 16 program; that is, a program in which all + the intermediate float tensors are of type float 16 (for ops that + support that type). + :: + coremltools.transform.FP16ComputePrecision(op_selector= + lambda op:True) + + The above transform iterates through all the ops, looking at each op's + inputs and outputs. If they are of type float 32, ``cast`` + ops are injected to convert those tensors (also known as `vars`) to + type float 16. + + - ``coremltools.precision.FLOAT32`` enum: No transform is applied. + + The original float32 tensor dtype in the source model is preserved. + Opt into this option if the default converted model is displaying + numerical precision issues. + + - ``coremltools.transform.FP16ComputePrecision(op_selector=...)`` + + Use this option to control which tensors are cast to float 16. + Before casting the inputs/outputs of any op from float32 to float 16, + the op_selector function is invoked on the op object. This function + must return a boolean value. By default it returns ``True`` for every op, + but you can customize this. + + For example: + :: + coremltools.transform.FP16ComputePrecision(op_selector= + lambda op: op.op_type != "linear") + + The above casts all the float32 tensors to be float 16, except + the input/output tensors to any ``linear`` op. See more examples + below. + + - ``None``: The default + - When ``convert_to="mlprogram"``, the ``compute_precision`` parameter + defaults to ``coremltools.precision.FLOAT16``. + - When ``convert_to="neuralnetwork"``, the ``compute_precision`` parameter + needs to be ``None`` and has no meaning. + - For example, you can customize the float 16 precision transform to prevent + casting all the ``real_div`` ops in the program to float 16 + precision: + + .. sourcecode:: python + + def skip_real_div_ops(op): + if op.op_type == "real_div": + return False + return True + + + model = ct.convert( + source_model, + compute_precision=ct.transform.FP16ComputePrecision(op_selector=skip_real_div_ops), + minimum_deployment_target=ct.target.iOS15, + ) + + skip_model_load : bool + Set to ``True`` to prevent coremltools from calling into the Core ML framework + to compile and load the model, post-conversion. In that case, the returned + model object cannot be used to make a prediction, but can be used to save + with ``model.save()``. This flag may be used to convert to a newer model type + on an older Mac, which may raise a runtime warning if done without + turning this flag on. + + Example: Use this flag to suppress a runtime warning when converting to an + ML program model on macOS 11, since an ML program can only be compiled and + loaded from macOS12+. + + Defaults to ``False``. + + compute_units: coremltools.ComputeUnit + + An enum with the following possible values. + + - ``coremltools.ComputeUnit.ALL``: Use all compute units available, including the + neural engine. + - ``coremltools.ComputeUnit.CPU_ONLY``: Limit the model to only use the CPU. + - ``coremltools.ComputeUnit.CPU_AND_GPU``: Use both the CPU and GPU, but not the + neural engine. + - ``coremltools.ComputeUnit.CPU_AND_NE``: Use both the CPU and neural engine, but + not the GPU. Available only for macOS >= 13.0. + + package_dir : str + Post conversion, the model is saved at a temporary location and + loaded to form the MLModel object ready for prediction. + + * If ``package_dir`` is provided, model will be saved at this location + rather than creating a temporary directory. + * If not ``None``, this must be a path to a directory with the extension + ``.mlpackage``. + + debug : bool + This flag should generally be ``False`` except for debugging purposes. + Setting this flag to ``True`` produces the following behavior: + - For Torch conversion, it will print the list of supported and + unsupported ops found in the model if conversion fails due to an + unsupported op. + - For Tensorflow conversion, it will cause to display extra logging + and visualizations. + + pass_pipeline : PassPipeline + Manage graph passes. You can control which graph passes to run and the order of the + graph passes. You can also specify options for each pass. See the details in the docstring of + PassPipeline (``coremltools/converters/mil/mil/passes/pass_pipeline.py``). + + * To avoid fusing the ``conv`` and ``batchnorm`` ops, skip the corresponding pass + as shown in the following example: + + .. sourcecode:: python + + pipeline = ct.PassPipeline() + pipeline.remove_passes({"common::fuse_conv_batchnorm"}) + ct.convert(model, pass_pipeline=pipeline) + + * To avoid folding too-large ``const`` ops that lead to a large model, set pass option + as shown in the following example: + + .. sourcecode:: python + + pipeline = ct.PassPipeline() + pipeline.set_options("common::const_elimination", {"skip_const_by_size": "1e6"}) + ct.convert(model, pass_pipeline=pipeline) + + Returns + ------- + + model : ``coremltools.models.MLModel`` or ``coremltools.converters.mil.Program`` + A Core ML MLModel object or MIL program object (see ``convert_to``). + + Examples + -------- + + TensorFlow 1, 2 (``model`` is a frozen graph): + + >>> with tf.Graph().as_default() as graph: + >>> x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + >>> y = tf.nn.relu(x, name="output") + + Automatically infer inputs and outputs: + + >>> mlmodel = ct.convert(graph) + >>> test_input = np.random.rand(1, 2, 3) - 0.5 + >>> results = mlmodel.predict({"input": test_input}) + >>> print(results['output']) + + TensorFlow 2 (``model`` is a tf.Keras model path): + + >>> x = tf.keras.Input(shape=(32,), name='input') + >>> y = tf.keras.layers.Dense(16, activation='softmax')(x) + >>> keras_model = tf.keras.Model(x, y) + + >>> keras_model.save(h5_path) + >>> mlmodel = ct.convert(h5_path) + + >>> test_input = np.random.rand(2, 32) + >>> results = mlmodel.predict({'input': test_input}) + >>> print(results['Identity']) + + PyTorch: + + >>> model = torchvision.models.mobilenet_v2() + >>> model.eval() + >>> example_input = torch.rand(1, 3, 256, 256) + >>> traced_model = torch.jit.trace(model, example_input) + + >>> input = ct.TensorType(name='input_name', shape=(1, 3, 256, 256)) + >>> mlmodel = ct.convert(traced_model, inputs=[input]) + >>> results = mlmodel.predict({"input": example_input.numpy()}) + >>> print(results['1651']) # 1651 is the node name given by PyTorch's JIT + + See `Conversion Options `_ for + more advanced options. + """ + _check_deployment_target(minimum_deployment_target) + outputs_as_strings, outputs_as_tensor_or_image_types = _validate_outputs_argument(outputs) + exact_source = _determine_source(model, source, + outputs_as_strings, + outputs_as_tensor_or_image_types, + outputs) + exact_target = _determine_target(convert_to, minimum_deployment_target) + _validate_conversion_arguments(model, exact_source, inputs, outputs_as_tensor_or_image_types, + classifier_config, compute_precision, + exact_target, minimum_deployment_target) + + if pass_pipeline is None: + pass_pipeline = PassPipeline() + if not _need_fp16_cast_pass(compute_precision, exact_target): + pass_pipeline.remove_passes({"common::add_fp16_cast"}) + if isinstance(compute_precision, FP16ComputePrecision): + # For backward compatibility with the `op_selector` param in FP16ComputePrecision. + pass_pipeline._pass_options["common::add_fp16_cast"] = [ + _PassOption(option_name="op_selector", option_val=compute_precision.op_selector) + ] + + if package_dir is not None: + _, ext = os.path.splitext(package_dir) + if ext != _MLPACKAGE_EXTENSION: + raise ValueError( + f"`package_dir` must have extension {_MLPACKAGE_EXTENSION} (not {ext})" + ) + + specification_version = minimum_deployment_target.value if minimum_deployment_target is not None else None + + if specification_version is None: + specification_version = _set_default_specification_version(exact_target) + + mlmodel = mil_convert( + model, + convert_from=exact_source, + convert_to=exact_target, + inputs=inputs, + outputs=outputs_as_tensor_or_image_types, # None or list[ct.ImageType/ct.TensorType] + classifier_config=classifier_config, + skip_model_load=skip_model_load, + compute_units=compute_units, + package_dir=package_dir, + debug=debug, + specification_version=specification_version, + main_pipeline=pass_pipeline, + ) + + if exact_target == 'milinternal': + return mlmodel # Returns the MIL program + + if minimum_deployment_target is not None: + check_deployment_compatibility( + spec=mlmodel.get_spec(), + representation=exact_target, + deployment_target=minimum_deployment_target, + ) + + gc.collect() + + mlmodel = _record_build_metadata(mlmodel, exact_source) + + return mlmodel + + +def _need_fp16_cast_pass( + compute_precision: Optional[Union[precision, FP16ComputePrecision]], convert_to: Text +) -> bool: + if convert_to not in ("mlprogram", "neuralnetwork", "milinternal", "milpython"): + raise NotImplementedError(f"Backend converter {convert_to} not implemented") + + if compute_precision is None: + return convert_to != "neuralnetwork" + elif compute_precision == precision.FLOAT32: + return False + elif compute_precision == precision.FLOAT16 or isinstance( + compute_precision, FP16ComputePrecision + ): + return True + else: + raise ValueError(f"Invalid value of the argument 'compute_precision': {compute_precision}") + + +def _set_default_specification_version(target): + if target == "neuralnetwork": + return _LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_NEURALNETWORK + elif target == "mlprogram": + return _LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_MILPROGRAM + elif target in ("milinternal", "milpython"): + return None + else: + raise NotImplementedError("Backend converter {} not implemented".format(target)) + + +def _check_deployment_target(minimum_deployment_target): + if minimum_deployment_target is not None and not isinstance( + minimum_deployment_target, AvailableTarget + ): + msg = ( + "Unrecognized value of argument 'minimum_deployment_target': {}. " + "It needs to be a member of 'coremltools.target' enumeration. " + "For example, coremltools.target.iOS13" + ) + raise TypeError(msg.format(minimum_deployment_target)) + + +def _validate_outputs_argument(outputs): + """ + - validate properties that the "outputs" argument must satisfy, for instance, it should either be a list + of ct.ImageType/ct.TensorType or a list of strings, etc. + - return : tuple + - (outputs_as_strings, outputs_as_tensor_or_image_types) + - outputs_as_strings: list[str] + - outputs_as_tensor_or_image_types : list[ct.ImageType] or list[ct.TensorType] + """ + if outputs is None: + return None, None + else: + if not isinstance(outputs, list): + raise ValueError('"outputs" must be of type list') + if len(outputs) == 0: + return None, None + if not all(map(lambda t: isinstance(t, (ImageType, str, TensorType)), outputs)): + raise ValueError('Elements in "outputs" must be ct.TensorType or ct.ImageType or str') + + msg_inconsistent_types = 'all elements of "outputs" must either be of type str ' \ + 'or of types ct.ImageType/ct.TensorType' + if isinstance(outputs[0], str): + # if one of the elements is a string, all elements must be strings + if not all([isinstance(t, str) for t in outputs]): + raise ValueError(msg_inconsistent_types) + return outputs, [TensorType(name=name) for name in outputs] + + if isinstance(outputs[0], InputType): + if not all([isinstance(t, TensorType) or isinstance(t, ImageType) for t in outputs]): + raise ValueError(msg_inconsistent_types) + if any([t.shape is not None for t in outputs]): + msg = "The 'shape' argument must not be specified for the outputs, since it is " \ + "automatically inferred from the input shapes and the ops in the model" + raise ValueError(msg) + for out_ in outputs: + if isinstance(out_, TensorType): + if out_.default_value is not None: + raise ValueError( + "The 'default_value' argument must not be specified for the outputs" + ) + if isinstance(out_, ImageType): + if out_.scale != 1.0: + raise ValueError("'scale' must be 1.0 for a output of ImageType") + if not (out_.bias is None or out_.bias == 0.0 or out_.bias == [0.0, 0.0, 0.0]): + raise ValueError("'bias' must be None or 0 for an output of ImageType") + if out_.channel_first is not None: + raise ValueError("'channel_first' must be None for an output of ImageType") + output_names = [t.name for t in outputs] + # verify that either all of the entries in output_names is "None" or none of them is "None" + msg_consistent_names = 'Either none or all the outputs must have the "name" argument specified' + if output_names[0] is None and not all([name is None for name in output_names]): + raise ValueError(msg_consistent_names) + if output_names[0] is not None and not all([name is not None for name in output_names]): + raise ValueError(msg_consistent_names) + if output_names[0] is not None: + if len(set(output_names)) != len(output_names): + raise ValueError("Duplicate names provided in 'outputs'") + if output_names[0] is None: + return None, outputs + else: + return output_names, outputs + + +def _validate_conversion_arguments(model, + exact_source, + inputs, + outputs, + classifier_config, + compute_precision, + convert_to, + minimum_deployment_target, + ): + """ + Validate and process model, inputs, classifier_config based on + `exact_source` (which cannot be `auto`) + """ + + def raise_if_duplicated(input_list): + # Detect duplicated inputs + input_names = [t.name for t in input_list if t.name is not None] + dups = [ + item + for item, count in collections.Counter(input_names).items() + if count > 1 + ] + if len(dups) > 0: + raise ValueError("Duplicated inputs: {}".format(dups)) + + def _flatten_list(_inputs): + ret = [] + for _input in _inputs: + if isinstance(_input, (list, tuple)): + ret.extend(_flatten_list(_input)) + elif isinstance(_input, InputType): + ret.append(_input) + else: + raise ValueError( + "Unknown type {} for flattening into InputType.".format( + type(_input) + ) + ) + return ret + + flat_inputs = None + if inputs is not None: + if not isinstance(inputs, list): + raise ValueError("`inputs` must be of type list") + + # get flattened inputs + flat_inputs = _flatten_list(inputs) + for t in flat_inputs: + if not isinstance(t, InputType): + raise ValueError("inputs must be a list of type ct.TensorType or ct.ImageType") + if t.dtype == types.fp16: + if not ( + minimum_deployment_target is not None + and minimum_deployment_target >= AvailableTarget.iOS16 + ): + raise TypeError( + "float16 dtype for inputs is only supported for deployment " + "target >= iOS16/macOS13/watchOS9/tvOS16" + ) + + if outputs is not None: + for t in outputs: + if t.dtype == types.fp16: + if not ( + minimum_deployment_target is not None + and minimum_deployment_target >= AvailableTarget.iOS16 + ): + raise TypeError( + "float16 dtype for outputs is only supported for deployment " + "target >= iOS16/macOS13/watchOS9/tvOS16" + ) + + if classifier_config is not None: + if not isinstance(classifier_config, ClassifierConfig): + raise ValueError("`classifier_config` must be of type ClassifierConfig") + + if convert_to.lower() == "neuralnetwork" and compute_precision is not None: + raise ValueError( + "compute_precision is only supported for mlprogram target and must be " + "None if target=='neuralnetwork'. Note that target may be implicitly set " + "depending on the minimum_deployment_target. See " + "minimum_deployment_target for more details." + ) + + if compute_precision is not None: + if compute_precision not in [precision.FLOAT32, precision.FLOAT16]: + if not isinstance(compute_precision, FP16ComputePrecision): + raise ValueError( + "'compute_precision' must be either coremltools.precision.FLOAT32 " + "or coremltools.precision.FLOAT16 or of type " + "coremltools.transform.FP16ComputePrecision()" + ) + + if exact_source in {"tensorflow", "tensorflow2"}: + if exact_source == "tensorflow" and not _HAS_TF_1: + raise ValueError( + 'Converter was called with source="tensorflow", but missing ' "tensorflow package" + ) + + if inputs is not None: + raise_if_duplicated(inputs) + + if inputs is not None and not all([isinstance(_input, InputType) for _input in inputs]): + raise ValueError("Input should be a list of TensorType or ImageType") + + elif exact_source == "pytorch": + if inputs is None: + raise ValueError('Expected argument for pytorch "inputs" not provided') + + raise_if_duplicated(flat_inputs) + if inputs is not None and not all( + [isinstance(_input, InputType) for _input in flat_inputs] + ): + raise ValueError( + "Input should be a list/tuple (or nested lists/tuples) of TensorType or ImageType" + ) + + elif exact_source == "milinternal": + if not isinstance(model, Program): + raise ValueError( + "Converter was asked to convert MIL input, but input is not a MIL " "program!" + ) + + +def _determine_source(model, source, + output_names, + outputs_as_tensor_or_image_types, + output_argument_as_specified_by_user): + """ + Infer source (which can be auto) to the precise framework. + """ + source = source.lower() + if source not in {"auto", "tensorflow", "pytorch", "milinternal"}: + raise ValueError( + f'Unrecognized value of argument "source": {source}. It must be one of ["auto", "tensorflow", "pytorch"].' + ) + + # Determine tensorflow version + if source == "tensorflow" and _HAS_TF_2: + return "tensorflow2" + + if source != 'auto': + return source + + # Determine `auto` source + if source == "auto" and _HAS_TF_1: + try: + loader = TF1Loader(model, outputs=outputs_as_tensor_or_image_types) + loader._graph_def_from_model(output_names=output_names) + return "tensorflow" + except: + pass + + if source == "auto" and _HAS_TF_2: + try: + loader = TF2Loader(model, outputs=outputs_as_tensor_or_image_types) + loader._graph_def_from_model(output_names=output_names) + return "tensorflow2" + except: + pass + + if source == "auto" and _HAS_TORCH: + is_torch_load_successful = False + try: + pytorch_load(model) + is_torch_load_successful = True + except: + pass + if is_torch_load_successful: + # validate that the outputs passed by the user are of type ImageType/TensorType + if output_argument_as_specified_by_user is not None and not all( + [ + isinstance(t, TensorType) or isinstance(t, ImageType) + for t in output_argument_as_specified_by_user + ] + ): + raise ValueError( + '"outputs" must be a list of type ct.TensorType or ct.ImageType ' + "for pytorch conversion" + ) + return "pytorch" + + if source == "auto" and isinstance(model, Program): + return "milinternal" + + msg = ( + "Unable to determine the type of the model, i.e. the source framework. " + 'Please provide the value of argument "source", from one of ' + '["tensorflow", "pytorch", "milinternal"]. Note that model conversion requires the ' + "source package that generates the model. Please make sure you have " + "the appropriate version of source package installed. E.g., if you're " + "converting model originally trained with TensorFlow 1.14, make sure " + "you have `tensorflow==1.14` installed." + ) + raise ValueError(msg) + + +def _determine_target(convert_to, minimum_deployment_target): + """ + Infer the precise backend target, which could be one of ``milinternal``, ``neuralnetwork`` or ``mlprogram`` + """ + if minimum_deployment_target is not None: + if convert_to == "mlprogram" and minimum_deployment_target < AvailableTarget.iOS15: + raise ValueError( + f"When 'convert_to' is {convert_to}, the minimum deployment target " + f"must be at least iOS15/macOS12/watchOS8/tvOS15" + ) + + if convert_to == "neuralnetwork" and minimum_deployment_target >= AvailableTarget.iOS15: + raise ValueError( + f"If minimum deployment target is iOS15/macOS12/watchOS8/tvOS15 or " + f"higher, then 'convert_to' cannot be {convert_to}. It must be " + f"'mlprogram'" + ) + + if convert_to is not None: + return convert_to + else: + if minimum_deployment_target is None: + return "neuralnetwork" + elif minimum_deployment_target <= AvailableTarget.iOS14: + return "neuralnetwork" + else: + return "mlprogram" + + +def _get_metadata_from_mlmodel(mlmodel): + # Copy from source mlmodel if metadata info exists + src_pkg_version = mlmodel.user_defined_metadata[_METADATA_SOURCE] + coremltools_version = mlmodel.user_defined_metadata[_METADATA_VERSION] + + src_pkg_version_list = src_pkg_version.split("==") + if len(src_pkg_version_list) == 0: + src_pkg, pkg_ver = None, None + elif len(src_pkg_version_list) == 1: + src_pkg, pkg_ver = src_pkg_version_list[0], "" + elif len(src_pkg_version_list) == 2: + src_pkg, pkg_ver = src_pkg_version_list + else: + raise AssertionError("Unable to parse src_pkg_version") + + build_info = { + "coremltools-version": _ct_version if not coremltools_version else coremltools_version + } + if src_pkg is not None and pkg_ver is not None: + build_info['coremltools-component-' + src_pkg] = str(pkg_ver) + + return build_info + + +def _record_build_metadata(mlmodel, exact_source): + # recording metadata: coremltools version, source framework and version + if exact_source in {"tensorflow", "tensorflow2"} and (_HAS_TF_1 or _HAS_TF_2): + src_pkg_version = "tensorflow=={0}".format(tf.__version__) + elif exact_source == "pytorch" and _HAS_TORCH: + src_pkg_version = "torch=={0}".format(torch.__version__) + elif exact_source == 'milinternal': + src_pkg_version = "milinternal" + else: + raise ValueError('Unsupported source {}'.format(exact_source)) + + mlmodel.user_defined_metadata[_METADATA_SOURCE] = src_pkg_version + mlmodel.user_defined_metadata[_METADATA_VERSION] = _ct_version + + build_info = _get_metadata_from_mlmodel(mlmodel) + + mlmodel._set_build_info_mil_attributes(build_info) + + return mlmodel diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/_profile_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/_profile_utils.py new file mode 100644 index 00000000..1f59c4a2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/_profile_utils.py @@ -0,0 +1,80 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import time + +_FUNCTION_PROFILE_REGISTRY = {} # str -> list (function name to time stack) +_ENABLE_PROFILING = os.environ.get("ENABLE_PROFILING", False) + + +def _profile(_f=None): + def func_wrapper(func): + f_name = func.__module__ + "." + func.__name__ + if f_name in _FUNCTION_PROFILE_REGISTRY: + raise ValueError( + "Function {} is already registered for profiling.".format(f_name) + ) + + _FUNCTION_PROFILE_REGISTRY[f_name] = [] + return func + + if _f is None: + return func_wrapper + return func_wrapper(_f) + + +_INITIAL_CALL = True + + +def _pr_color(skk, color="94m", end="\n"): + print("\033[{} {}\033[00m".format(color, skk), end=end) + + +def _profiler(frame, event, arg, indent=[0]): + if frame.f_globals.get("__name__", None) is None: + return + + package_name = __name__.split(".")[0] + + function_name = frame.f_globals["__name__"] + "." + frame.f_code.co_name + + profile_function = ( + package_name in str(frame) and function_name in _FUNCTION_PROFILE_REGISTRY + ) + + if event == "call" and profile_function: + global _INITIAL_CALL + if _INITIAL_CALL: + _INITIAL_CALL = False + print("\n" * 2) + + indent[0] += 3 + _pr_color( + "{} call {} {}".format( + "=" * indent[0] + ">", + function_name.split(".")[-1], + " (" + ".".join(function_name.split(".")[2:-1]) + ")", + ) + ) + start_time = time.clock() + _FUNCTION_PROFILE_REGISTRY[function_name].append(start_time) + + elif event == "return" and profile_function: + duration = time.clock() - _FUNCTION_PROFILE_REGISTRY[function_name][-1] + duration = round(duration) + _pr_color( + "{} exit {} {} ".format( + "<" + "=" * indent[0], + function_name.split(".")[-1], + " (" + ".".join(function_name.split(".")[2:-1]) + ")", + ), + end="", + ) + _pr_color(": Time spent {} seconds ".format(duration,), color="91m") + indent[0] -= 3 + _FUNCTION_PROFILE_REGISTRY[function_name].pop() + + return _profiler diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/__init__.py new file mode 100644 index 00000000..3278bfce --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/__init__.py @@ -0,0 +1,108 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from ..._deps import _HAS_LIBSVM +from . import _libsvm_converter, _libsvm_util + +if _HAS_LIBSVM: + from libsvm import svmutil as _svmutil + + +def convert( + model, + input_names="input", + target_name="target", + probability="classProbability", + input_length="auto", +): + """ + Convert a LIBSVM model to Core ML format. + + Parameters + ---------- + + model: a libsvm model (C-SVC, nu-SVC, epsilon-SVR, or nu-SVR) + or string path to a saved model. + + input_names: str | [str] + Name of the input column(s). + If a single string is used (the default) the input will be an array. The + length of the array will be inferred from the model, this can be overridden + using the 'input_length' parameter. + + target: str + Name of the output column. + + probability: str + Name of the output class probability column. + Only used for C-SVC and nu-SVC that have been trained with probability + estimates enabled. + + input_length: int + Set the length of the input array. + This parameter should only be used when the input is an array (i.e. when + 'input_name' is a string). + + Returns + ------- + model: MLModel + Model in Core ML format. + + Examples + -------- + .. sourcecode:: python + + # Make a LIBSVM model + >>> import svmutil + >>> problem = svmutil.svm_problem([0,0,1,1], [[0,1], [1,1], [8,9], [7,7]]) + >>> libsvm_model = svmutil.svm_train(problem, svmutil.svm_parameter()) + + # Convert using default input and output names + >>> import coremltools + >>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model) + + # Save the CoreML model to a file. + >>> coreml_model.save('./my_model.mlmodel') + + # Convert using user specified input names + >>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model, input_names=['x', 'y']) + """ + if not (_HAS_LIBSVM): + raise RuntimeError("libsvm not found. libsvm conversion API is disabled.") + + if isinstance(model, str): + libsvm_model = _libsvm_util.load_model(model) + else: + libsvm_model = model + if not isinstance(libsvm_model, _svmutil.svm_model): + raise TypeError( + "Expected 'model' of type '%s' (got %s)" + % (_svmutil.svm_model, type(libsvm_model)) + ) + + if not isinstance(target_name, str): + raise TypeError( + "Expected 'target_name' of type str (got %s)" % type(libsvm_model) + ) + + if input_length != "auto" and not isinstance(input_length, int): + raise TypeError( + "Expected 'input_length' of type int, got %s" % type(input_length) + ) + + if input_length != "auto" and not isinstance(input_names, str): + raise ValueError( + "'input_length' should not be used unless the input will be only one array." + ) + + if not isinstance(probability, str): + raise TypeError( + "Expected 'probability' of type str (got %s)" % type(probability) + ) + + return _libsvm_converter.convert( + libsvm_model, input_names, target_name, input_length, probability + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_converter.py new file mode 100644 index 00000000..8f476adc --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_converter.py @@ -0,0 +1,199 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import __version__ as ct_version +from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_LIBSVM + + +def _infer_min_num_features(model): + # find the largest index of all the support vectors + max_index = 0 + for i in range(model.l): + j = 0 + while model.SV[i][j].index != -1: + cur_last_index = model.SV[i][j].index + j += 1 + if cur_last_index > max_index: + max_index = cur_last_index + return max_index + + +def convert(libsvm_model, feature_names, target, input_length, probability): + """ + Convert a support vector machine (SVM) model to the protobuf spec. + + Supports: + * C-SVC + * nu-SVC + * Epsilon-SVR + * nu-SVR + + Parameters + ---------- + model_path: libsvm_model + Libsvm representation of the model. + + feature_names : [str] | str + Names of each of the features. + + target: str + Name of the predicted class column. + + probability: str + Name of the class probability column. Only used for C-SVC and nu-SVC. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_LIBSVM): + raise RuntimeError("libsvm not found. libsvm conversion API is disabled.") + + from libsvm import svm as _svm + + from ...models import MLModel + from ...proto import Model_pb2 + + svm_type_enum = libsvm_model.param.svm_type + + # Create the spec + export_spec = Model_pb2.Model() + export_spec.specificationVersion = SPECIFICATION_VERSION + + if svm_type_enum == _svm.EPSILON_SVR or svm_type_enum == _svm.NU_SVR: + svm = export_spec.supportVectorRegressor + else: + svm = export_spec.supportVectorClassifier + + # Set the features names + inferred_length = _infer_min_num_features(libsvm_model) + if isinstance(feature_names, str): + # input will be a single array + if input_length == "auto": + print( + "[WARNING] Infering an input length of %d. If this is not correct," + " use the 'input_length' parameter." % inferred_length + ) + input_length = inferred_length + elif inferred_length > input_length: + raise ValueError( + "An input length of %d was given, but the model requires an" + " input of at least %d." % (input_length, inferred_length) + ) + + input = export_spec.description.input.add() + input.name = feature_names + input.type.multiArrayType.shape.append(input_length) + input.type.multiArrayType.dataType = Model_pb2.ArrayFeatureType.DOUBLE + + else: + # input will be a series of doubles + if inferred_length > len(feature_names): + raise ValueError( + "%d feature names were given, but the model requires at" + " least %d features." % (len(feature_names), inferred_length) + ) + for cur_input_name in feature_names: + input = export_spec.description.input.add() + input.name = cur_input_name + input.type.doubleType.MergeFromString(b"") + + # Set target + output = export_spec.description.output.add() + output.name = target + + # Set the interface types + if svm_type_enum == _svm.EPSILON_SVR or svm_type_enum == _svm.NU_SVR: + export_spec.description.predictedFeatureName = target + output.type.doubleType.MergeFromString(b"") + nr_class = 2 + + elif svm_type_enum == _svm.C_SVC or svm_type_enum == _svm.NU_SVC: + export_spec.description.predictedFeatureName = target + output.type.int64Type.MergeFromString(b"") + + nr_class = len(libsvm_model.get_labels()) + + for i in range(nr_class): + svm.numberOfSupportVectorsPerClass.append(libsvm_model.nSV[i]) + svm.int64ClassLabels.vector.append(libsvm_model.label[i]) + + if probability and bool(libsvm_model.probA): + output = export_spec.description.output.add() + output.name = probability + output.type.dictionaryType.MergeFromString(b"") + output.type.dictionaryType.int64KeyType.MergeFromString(b"") + export_spec.description.predictedProbabilitiesName = probability + + else: + raise ValueError( + "Only the following SVM types are supported: C_SVC, NU_SVC, EPSILON_SVR, NU_SVR" + ) + + if libsvm_model.param.kernel_type == _svm.LINEAR: + svm.kernel.linearKernel.MergeFromString( + b"" + ) # Hack to set kernel to an empty type + elif libsvm_model.param.kernel_type == _svm.RBF: + svm.kernel.rbfKernel.gamma = libsvm_model.param.gamma + elif libsvm_model.param.kernel_type == _svm.POLY: + svm.kernel.polyKernel.degree = libsvm_model.param.degree + svm.kernel.polyKernel.c = libsvm_model.param.coef0 + svm.kernel.polyKernel.gamma = libsvm_model.param.gamma + elif libsvm_model.param.kernel_type == _svm.SIGMOID: + svm.kernel.sigmoidKernel.c = libsvm_model.param.coef0 + svm.kernel.sigmoidKernel.gamma = libsvm_model.param.gamma + else: + raise ValueError( + "Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid." + ) + + # set rho + # also set probA/ProbB only for SVC + if svm_type_enum == _svm.C_SVC or svm_type_enum == _svm.NU_SVC: + num_class_pairs = nr_class * (nr_class - 1) // 2 + for i in range(num_class_pairs): + svm.rho.append(libsvm_model.rho[i]) + if bool(libsvm_model.probA) and bool(libsvm_model.probB): + for i in range(num_class_pairs): + svm.probA.append(libsvm_model.probA[i]) + svm.probB.append(libsvm_model.probB[i]) + else: + svm.rho = libsvm_model.rho[0] + + # set coefficents + if svm_type_enum == _svm.C_SVC or svm_type_enum == _svm.NU_SVC: + for _ in range(nr_class - 1): + svm.coefficients.add() + for i in range(libsvm_model.l): + for j in range(nr_class - 1): + svm.coefficients[j].alpha.append(libsvm_model.sv_coef[j][i]) + else: + for i in range(libsvm_model.l): + svm.coefficients.alpha.append(libsvm_model.sv_coef[0][i]) + + # set support vectors + for i in range(libsvm_model.l): + j = 0 + cur_support_vector = svm.sparseSupportVectors.vectors.add() + while libsvm_model.SV[i][j].index != -1: + cur_node = cur_support_vector.nodes.add() + cur_node.index = libsvm_model.SV[i][j].index + cur_node.value = libsvm_model.SV[i][j].value + j += 1 + + model = MLModel(export_spec) + + from libsvm import __version__ as libsvm_version + + libsvm_version = "libsvm=={0}".format(libsvm_version) + model.user_defined_metadata[_METADATA_VERSION] = ct_version + model.user_defined_metadata[_METADATA_SOURCE] = libsvm_version + + return model diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_util.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_util.py new file mode 100644 index 00000000..fb75c05e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_util.py @@ -0,0 +1,37 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_LIBSVM + + +def load_model(model_path): + """Load a libsvm model from a path on disk. + + This currently supports: + * C-SVC + * NU-SVC + * Epsilon-SVR + * NU-SVR + + Parameters + ---------- + model_path: str + Path on disk where the libsvm model representation is. + + Returns + ------- + model: libsvm_model + A model of the libsvm format. + """ + if not (_HAS_LIBSVM): + raise RuntimeError("libsvm not found. libsvm conversion API is disabled.") + + import os + + from svmutil import svm_load_model # From libsvm + + if not os.path.exists(model_path): + raise IOError("Expected a valid file path. %s does not exist" % model_path) + return svm_load_model(model_path) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/__init__.py new file mode 100644 index 00000000..64a17d12 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from .mil import (SPACES, SUPPORT_FLOAT_TYPES, SUPPORT_INT_TYPES, Block, + Builder, DefaultInputs, Function, InputSpec, InternalVar, + ListInputType, ListVar, Operation, Placeholder, Program, + Symbol, TupleInputType, Var, builder, curr_block, + get_existing_symbol, get_new_symbol, get_new_variadic_symbol, + mil_list, register_op) +from .input_types import (ClassifierConfig, ColorLayout, EnumeratedShapes, + ImageType, InputType, RangeDim, Shape, TensorType) +from .frontend.tensorflow.tf_op_registry import register_tf_op +from .frontend.torch import register_torch_op diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/_deployment_compatibility.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/_deployment_compatibility.py new file mode 100644 index 00000000..e3a8f498 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/_deployment_compatibility.py @@ -0,0 +1,165 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from enum import IntEnum + +from coremltools import (_SPECIFICATION_VERSION_IOS_13, + _SPECIFICATION_VERSION_IOS_14, + _SPECIFICATION_VERSION_IOS_15, + _SPECIFICATION_VERSION_IOS_16) + + +class AvailableTarget(IntEnum): + # iOS versions + iOS13 = _SPECIFICATION_VERSION_IOS_13 + iOS14 = _SPECIFICATION_VERSION_IOS_14 + iOS15 = _SPECIFICATION_VERSION_IOS_15 + iOS16 = _SPECIFICATION_VERSION_IOS_16 + + # macOS versions (aliases of iOS versions) + macOS15 = _SPECIFICATION_VERSION_IOS_13 + macOS16 = _SPECIFICATION_VERSION_IOS_14 + macOS10_15 = _SPECIFICATION_VERSION_IOS_13 + macOS10_16 = _SPECIFICATION_VERSION_IOS_14 + macOS11 = _SPECIFICATION_VERSION_IOS_14 + macOS12 = _SPECIFICATION_VERSION_IOS_15 + macOS13 = _SPECIFICATION_VERSION_IOS_16 + + # watchOS versions (aliases of iOS versions) + watchOS6 = _SPECIFICATION_VERSION_IOS_13 + watchOS7 = _SPECIFICATION_VERSION_IOS_14 + watchOS8 = _SPECIFICATION_VERSION_IOS_15 + watchOS9 = _SPECIFICATION_VERSION_IOS_16 + + # tvOS versions (aliases of iOS versions) + tvOS13 = _SPECIFICATION_VERSION_IOS_13 + tvOS14 = _SPECIFICATION_VERSION_IOS_14 + tvOS15 = _SPECIFICATION_VERSION_IOS_15 + tvOS16 = _SPECIFICATION_VERSION_IOS_16 + + # customized __str__ + def __str__(self): + original_str = super().__str__() + new_str = original_str.replace(type(self).__name__, "coremltools.target") + return new_str + + +_get_features_associated_with = {} + + +def register_with(name): + def decorator(func): + if name not in _get_features_associated_with: + _get_features_associated_with[name] = func + else: + raise ValueError("Function is already registered with {}".format(name)) + return func + + return decorator + + +@register_with(AvailableTarget.iOS14) +def iOS14Features(spec): + features_list = [] + + if spec.WhichOneof("Type") == "neuralNetwork": + nn_spec = spec.neuralNetwork + elif spec.WhichOneof("Type") in "neuralNetworkClassifier": + nn_spec = spec.neuralNetworkClassifier + elif spec.WhichOneof("Type") in "neuralNetworkRegressor": + nn_spec = spec.neuralNetworkRegressor + else: + raise ValueError("Invalid neural network specification for the model") + + # Non-zero default optional values + for idx, input in enumerate(spec.description.input): + value = 0 + if input.type.isOptional: + value = max(value, input.type.multiArrayType.floatDefaultValue) + value = max(value, input.type.multiArrayType.doubleDefaultValue) + value = max(value, input.type.multiArrayType.intDefaultValue) + + if value != 0: + msg = "Support of non-zero default optional values for inputs." + features_list.append(msg) + break + + # Layers or modifications introduced in iOS14 + new_layers = [ + "oneHot", + "cumSum", + "clampedReLU", + "argSort", + "pooling3d", + "convolution3d", + "globalPooling3d", + ] + for layer in nn_spec.layers: + layer_type = layer.WhichOneof("layer") + + msg = "" + + if layer_type in new_layers: + msg = "{} {}".format(layer_type.capitalize(), "operation") + + if layer_type == "tile" and len(layer.input) == 2: + msg = "Dynamic Tile operation" + + if layer_type == "upsample" and layer.upsample.linearUpsampleMode in [1, 2]: + msg = "Upsample operation with Align Corners mode" + + if layer_type == "reorganizeData" and layer.reorganizeData.mode == 2: + msg = "Pixel Shuffle operation" + + if layer_type == "sliceDynamic" and layer.sliceDynamic.squeezeMasks: + msg = "Squeeze mask for dynamic slice operation" + + if layer_type == "sliceStatic" and layer.sliceDynamic.squeezeMasks: + msg = "Squeeze mask for static slice operation" + + if layer_type == "concatND" and layer.concatND.interleave: + msg = "Concat layer with interleave operation" + + if msg != "" and (msg not in features_list): + features_list.append(msg) + + return features_list + + +def check_deployment_compatibility(spec, representation, deployment_target): + + if not isinstance(deployment_target, AvailableTarget): + raise TypeError( + "Argument for deployment_target must be an enumeration from Enum class AvailableTarget" + ) + + for any_target in AvailableTarget: + + if any_target > deployment_target and any_target in _get_features_associated_with: + missing_features = _get_features_associated_with[any_target](spec) + + if missing_features: + msg = ( + "Provided minimum deployment target requires model to be of version {} but converted model " + "uses following features which are available from version {} onwards. Please use a higher " + "minimum deployment target to convert. \n ".format( + deployment_target.value, any_target.value + ) + ) + + for i, feature in enumerate(missing_features): + msg += " {}. {}\n".format(i + 1, feature) + + raise ValueError(msg) + + # Default exception throwing if not able to find the reason behind spec version bump + if spec.specificationVersion > deployment_target.value: + msg = ( + "Provided deployment target requires model to be of version {} but converted model has version {} " + "suitable for later releases".format( + deployment_target.value, spec.specificationVersion, + ) + ) + raise ValueError(msg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/backend_helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/backend_helper.py new file mode 100644 index 00000000..8a75c904 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/backend_helper.py @@ -0,0 +1,74 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.input_types import ColorLayout +from coremltools.converters.mil.mil.passes.defs.preprocess import NameSanitizer +from coremltools.proto import FeatureTypes_pb2 as ft + + +def _get_probability_var_for_classifier(prog, classifier_config): + ''' + Return the var which will be used to construct the dictionary for the classifier. + :param prog: mil program + :param classifier_config: an instance of coremltools.ClassifierConfig class + :return: var + ''' + block = prog.functions["main"] + probability_var = None + if classifier_config.predicted_probabilities_output is None \ + or classifier_config.predicted_probabilities_output == "": + # user has not indicated which tensor in the program to use as probabilities + # (i.e which tensor to link to the classifier output) + # in this case, attach the last non const op to the classify op + for op in reversed(block.operations): + if op.op_type != 'const' and len(op.outputs) == 1: + probability_var = op.outputs[0] + break + if probability_var is None: + raise ValueError("Unable to determine the tensor in the graph " + "that corresponds to the probabilities for the classifier output") + else: + # user has indicated which tensor in the program to use as probabilities + # (i.e which tensor to link to the classifier output) + # Verify that it corresponds to a var produced in the program + predicted_probabilities_output = NameSanitizer().sanitize_name(classifier_config.predicted_probabilities_output) + for op in block.operations: + for out in op.outputs: + if out.name == predicted_probabilities_output: + probability_var = out + break + if probability_var is None: + msg = "'predicted_probabilities_output', '{}', provided in 'ClassifierConfig', does not exist in the MIL program." + raise ValueError(msg.format(predicted_probabilities_output)) + return probability_var + + +def _get_colorspace_enum(color_layout): + if color_layout == ColorLayout.GRAYSCALE: + return ft.ImageFeatureType.ColorSpace.GRAYSCALE + elif color_layout == ColorLayout.GRAYSCALE_FLOAT16: + return ft.ImageFeatureType.ColorSpace.GRAYSCALE_FLOAT16 + elif color_layout == ColorLayout.BGR: + return ft.ImageFeatureType.ColorSpace.BGR + else: + return ft.ImageFeatureType.ColorSpace.RGB + +def _validate_image_input_output_shapes(color_layout, shape, name, is_input=True): + io_str = "input" if is_input else "output" + if len(shape) != 4: + raise ValueError("Image {}, '{}', must have rank 4. Instead it has rank {}". + format(io_str, name, len(shape))) + if color_layout in (ColorLayout.BGR, ColorLayout.RGB): + if shape[1] != 3 or shape[0] != 1: + raise ValueError("Shape of the RGB/BGR image {}, '{}', must be of kind (1, 3, H, W), " + "i.e., first two dimensions must be (1, 3), instead they are: {}". + format(io_str, name, shape[:2])) + elif color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16): + if shape[1] != 1 or shape[0] != 1: + raise ValueError("Shape of the Grayscale image {}, '{}', must be of kind (1, 1, H, W), " + "i.e., first two dimensions must be (1, 1), instead they are: {}". + format(io_str, name, shape[:2])) + else: + raise KeyError("Unrecognized color_layout {}".format(color_layout)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/helper.py new file mode 100644 index 00000000..b03708cd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/helper.py @@ -0,0 +1,329 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +import coremltools.proto.FeatureTypes_pb2 as ft +import coremltools.proto.MIL_pb2 as pm +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types import (builtin_to_proto_types, + builtin_to_string, + numpy_type_to_builtin_type, + type_to_builtin_type) +from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type +from coremltools.models.utils import _WEIGHTS_DIR_NAME, _WEIGHTS_FILE_NAME + + +def create_valuetype_scalar(data_type): + """ + Return pm.ValueType with DataType set + """ + v_type = pm.ValueType() + update_tensortype(v_type.tensorType, (), data_type) + return v_type + + +def update_listtype(l_type, length, elem_shape, dtype): + """ + Update in-place of l_type (ListType) to length and type. + """ + + elem_type = create_valuetype_tensor(elem_shape, dtype) + l_type.type.CopyFrom(elem_type) + + l_dim = l_type.length + set_proto_dim(l_dim, length) + +def create_valuetype_list(length, elem_shape, dtype): + """ + Return pm.ValueType with List (ListType) set. + length: length of list (int) + """ + v_type = pm.ValueType() + update_listtype(v_type.listType, length, elem_shape, dtype) + return v_type + +def create_valuetype_dict(key_type, value_type): + """ + Return pm.ValueType with dict (dictionaryType) set + """ + v_type = pm.ValueType() + v_type.dictionaryType.keyType.CopyFrom(types_to_proto(key_type)) + v_type.dictionaryType.valueType.CopyFrom(types_to_proto(value_type)) + return v_type + + +def create_valuetype_tensor(shape, data_type): + """ + Return pm.ValueType with tensor (TensorType) set. + shape: list of ints + """ + v_type = pm.ValueType() + update_tensortype(v_type.tensorType, shape, data_type) + return v_type + + +def set_proto_dim(proto_dim, dim): + if isinstance(dim, (int, np.integer)): + proto_dim.constant.size = dim + else: + dim_str = str(dim) + if len(dim_str) > 0: + if dim_str[0] == "*" or (len(dim_str) >= 3 and dim_str[0:3] == "..."): + proto_dim.unknown.variadic = True + return + proto_dim.unknown.variadic = False + + +def update_tensortype(t_type, shape, data_type): + """ + Update in-place of t_type (TensorType) to shape and data_type. + """ + t_type.dataType = data_type + t_type.rank = len(shape) + t_type.ClearField("dimensions") + for s in shape: + t_dim = t_type.dimensions.add() + set_proto_dim(t_dim, s) + +def _tensor_field_by_type(tensor_val, builtin_type): + if builtin_type == types.bool: + return tensor_val.bools.values + elif types.is_int(builtin_type): + if (builtin_type == types.int64 or builtin_type == types.uint64): + return tensor_val.longInts.values + if builtin_type in (types.int8, types.uint8, types.uint32): + return tensor_val.bytes.values + return tensor_val.ints.values + elif types.is_float(builtin_type): + if (builtin_type == types.fp64): + return tensor_val.doubles.values + elif (builtin_type == types.fp32): + return tensor_val.floats.values + elif (builtin_type == types.fp16): + return tensor_val.bytes.values + else: + raise TypeError( + "Unsupported float dtype for MIL proto serialization: {}".format(builtin_to_string(builtin_type))) + elif builtin_type == types.str: + return tensor_val.strings.values + else: + raise NotImplementedError("Unimplemented tensor type for: " + str(builtin_type)) + +def _set_empty_tensor_field_by_type(tensor_val, builtin_type): + if builtin_type == types.bool: + tensor_val.bools.SetInParent() + elif types.is_int(builtin_type): + if (builtin_type == types.int64 or builtin_type == types.uint64): + tensor_val.longInts.SetInParent() + elif builtin_type in (types.int8, types.uint8, types.uint32): + tensor_val.bytes.SetInParent() + else: + tensor_val.ints.SetInParent() + elif types.is_float(builtin_type): + if (builtin_type == types.fp64): + tensor_val.doubles.SetInParent() + elif (builtin_type == types.fp32): + tensor_val.floats.SetInParent() + elif (builtin_type == types.fp16): + tensor_val.bytes.SetInParent() + else: + raise TypeError("Unsupported float dtype for MIL proto serialization: {}".format(builtin_to_string(builtin_type))) + elif builtin_type == types.str: + tensor_val.strings.SetInParent() + else: + raise NotImplementedError("Unimplemented tensor type for: " + str(builtin_type)) + +def create_tensor_value(np_tensor): + """ + Return TensorValue. + """ + builtin_type = numpy_type_to_builtin_type(np_tensor.dtype) + + value_type = create_valuetype_tensor(np_tensor.shape, types_to_proto_primitive(builtin_type)) + val = pm.Value(type=value_type) + t_val = val.immediateValue.tensor + + # Copy the tensor values from the input tensor + t_field = _tensor_field_by_type(t_val, builtin_type) + + if 0 not in np_tensor.shape: + if builtin_type == types.str: + for x in np.nditer(np_tensor): + t_field.append(x.encode("utf-8")) + elif builtin_type in (types.fp16, types.int8, types.uint8, types.uint32): + val.immediateValue.tensor.bytes.values = np_val_to_py_type(np_tensor) + else: + for x in np_tensor.flatten(): + t_field.append(np_val_to_py_type(x)) + else: # This is an "empty" tensor (tensor with a dimension being size 0) + _set_empty_tensor_field_by_type(t_val, builtin_type) + return val + + +def create_scalar_value(py_scalar): + """ + Return TensorValue (since there's no ScalarValue) + """ + # Create the "scalar" (rank 0) tensor + builtin_type = type_to_builtin_type(type(py_scalar)) + value_type = create_valuetype_scalar(types_to_proto_primitive(builtin_type)) + val = pm.Value(type=value_type) + t_val = val.immediateValue.tensor + + # Set the tensor value + t_field = _tensor_field_by_type(t_val, builtin_type) + if builtin_type in (types.fp16, types.int8, types.uint8, types.uint32): + val.immediateValue.tensor.bytes.values = np_val_to_py_type(py_scalar) + else: + if builtin_type == types.str: + py_scalar = py_scalar.encode("utf-8") + t_field.append(np_val_to_py_type(py_scalar)) + + return val + + +def create_tuple_value(py_tuple): + """ + Return type of Tuple + """ + tp_val = pm.TupleValue() + for t in py_tuple: + item_val = tp_val.values.add() + item_type = item_val.type # ValueType + if isinstance(t, int): + v = create_scalar_value(t) + item_val.immediateValue.i = t + item_type = v.type + elif isinstance(t, np.ndarray): + v = create_tensor_value(t) + item_val.immediateValue.tensor.CopyFrom(v.immediateValue.tensor) + item_type.tensorType.CopyFrom(v.type.tensorType) + else: + raise NotImplementedError() + return tp_val + +def create_list_scalarvalue(py_list, np_type): + """ + Return a Value of type List, which holds scalar values + """ + builtin_type = numpy_type_to_builtin_type(np_type) + value_type = create_valuetype_list(length=len(py_list), + elem_shape=(), + dtype=types_to_proto_primitive(builtin_type)) + val = pm.Value(type=value_type) + + list_val = val.immediateValue.list + for v in py_list: + item_val = list_val.values.add() + item_val.CopyFrom(create_scalar_value(v)) + + return val + +def create_file_value_tensor(file_name, offset, dim, data_type): + """ + Create a Value Type to store File Value + """ + val = pm.Value( + blobFileValue=pm.Value.BlobFileValue(fileName=file_name, offset=offset), + type=create_valuetype_tensor(dim, data_type), + ) + return val + + +def types_to_proto_primitive(valuetype): + if valuetype not in builtin_to_proto_types: + additional_error_msg = "" + if valuetype in (types.complex64, types.complex128): + additional_error_msg = ( + "(MIL doesn't support complex data as model's output, please extract real and " + "imaginary parts explicitly.) " + ) + raise ValueError( + f"Unknown map from SSA type {valuetype} to Proto type. {additional_error_msg}" + ) + return builtin_to_proto_types[valuetype] + + +def types_to_proto(valuetype): + if types.is_tensor(valuetype): + primitive = types_to_proto_primitive(valuetype.get_primitive()) + return create_valuetype_tensor(valuetype.get_shape(), primitive) + elif types.is_tuple(valuetype): + v_type = pm.ValueType() + t_type = v_type.tupleType + for t in valuetype.T: + new_v_type = t_type.types.add() + new_v_type.CopyFrom(types_to_proto(t)) + return v_type + elif types.is_list(valuetype): + elem = valuetype.T[0] + length = valuetype.T[1] + if types.is_tensor(elem): + dtype = types_to_proto_primitive(elem.get_primitive()) + elem_shape = elem.get_shape() + elif types.is_scalar(elem): + dtype = types_to_proto_primitive(valuetype) + elem_shape = () + elif types.is_str(elem): + dtype = types_to_proto_primitive(elem) + elem_shape = () + else: + raise NotImplementedError("Only list of either tensors or scalars supported. " + "Got element of type {}".format(elem.__type_info__())) + return create_valuetype_list(length=length, elem_shape=elem_shape, dtype=dtype) + elif types.is_dict(valuetype): + return create_valuetype_dict(valuetype.T[0], valuetype.T[1]) + else: + return create_valuetype_scalar(types_to_proto_primitive(valuetype)) + + +def create_file_value(output_var, blob_writer): + if output_var.val.dtype.kind == 'f' and output_var.val.dtype.itemsize == 4: + offset = blob_writer.write_float_data(output_var.val.flatten()) + elif output_var.val.dtype.kind == 'f' and output_var.val.dtype.itemsize == 2: + output_var_fp16_to_bytes_to_uint16 = np.frombuffer(output_var.val.flatten().tobytes(), np.uint16) + offset = blob_writer.write_fp16_data(output_var_fp16_to_bytes_to_uint16) + elif output_var.val.dtype.kind == "u" and output_var.val.dtype.itemsize == 1: + offset = blob_writer.write_uint8_data(output_var.val.flatten()) + elif output_var.val.dtype.kind == "i" and output_var.val.dtype.itemsize == 1: + offset = blob_writer.write_int8_data(output_var.val.flatten()) + else: + raise TypeError("Unsupported type, {}, for net buffer serialization.".format(output_var.val.dtype)) + + return create_file_value_tensor( + file_name=os.path.join(os.path.join('@model_path', _WEIGHTS_DIR_NAME), _WEIGHTS_FILE_NAME), + offset=offset, + dim=output_var.val.shape, + data_type=types_to_proto_primitive(output_var.sym_type.get_primitive()), + ) + +def create_immediate_value(var): + if types.is_tensor(var.sym_type): + return create_tensor_value(var.val) + elif types.is_list(var.sym_type): + if var.elem_type == types.str: + return create_list_scalarvalue(var.val, str) + elif var.elem_type == types.int64: + return create_list_scalarvalue(var.val, np.int64) + else: + raise NotImplementedError("List element type, {}, not supported yet.".format(var.sym_type.__type_info__())) + else: + return create_scalar_value(var.val) + +def cast_to_framework_io_dtype(var, is_output): + if var.dtype == types.fp32: + return ft.ArrayFeatureType.ArrayDataType.FLOAT32 + elif var.dtype == types.int32: + return ft.ArrayFeatureType.ArrayDataType.INT32 + elif var.dtype == types.fp16: + return ft.ArrayFeatureType.ArrayDataType.FLOAT16 + else: + ioname = "Output " if is_output else "Input " + ioname2 = "outputs" if is_output else "inputs" + raise NotImplementedError(ioname + var.name + " has data type " + builtin_to_string(var.dtype) + \ + ". ML Program models only support fp32 and int32 " + ioname2 + ".") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/load.py new file mode 100644 index 00000000..e682e871 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/load.py @@ -0,0 +1,535 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools import _OPSET, _SPECIFICATION_VERSION_IOS_15 +from coremltools import _logger as logger +from coremltools.converters.mil.backend.backend_helper import _get_probability_var_for_classifier +from coremltools.converters.mil.backend.mil.helper import ( + cast_to_framework_io_dtype, + create_file_value, + create_immediate_value, + create_list_scalarvalue, + create_scalar_value, + types_to_proto, +) +from coremltools.converters.mil.backend.nn.load import _set_optional_inputs +from coremltools.converters.mil.input_types import EnumeratedShapes, ImageType, RangeDim, TensorType +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, mil_list, types +from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry +from coremltools.converters.mil.mil.types.symbolic import any_symbolic, any_variadic, is_symbolic +from coremltools.models.neural_network.flexible_shape_utils import ( + NeuralNetworkImageSize, + NeuralNetworkImageSizeRange, + add_enumerated_image_sizes, + add_multiarray_ndshape_enumeration, + set_multiarray_ndshape_range, + update_image_size_range, +) +from coremltools.models.utils import _WEIGHTS_FILE_NAME +from coremltools.proto import FeatureTypes_pb2 as ft +from coremltools.proto import MIL_pb2 as pm +from coremltools.proto import Model_pb2 as ml + +from ..backend_helper import _get_colorspace_enum, _validate_image_input_output_shapes + +try: + from coremltools.libmilstoragepython import _BlobStorageWriter as BlobWriter +except: + BlobWriter = None + + +def should_use_weight_file(val): + return ( + val is not None + and isinstance(val, (np.ndarray, np.generic)) + and val.size >= 10 + and val.dtype in ['float16', 'float32', 'uint8', 'int8'] + ) + + +def translate_const(op, blob_writer): + output_var = op.outputs[0] + + if should_use_weight_file(output_var.val): + value = create_file_value(output_var, blob_writer) + else: + value = create_immediate_value(output_var) + + return pm.Operation( + type="const", + attributes={"name": create_scalar_value(op.name), "val": value}, + outputs=[ + pm.NamedValueType( + name=output_var.name, type=types_to_proto(output_var.sym_type) + ) + ], + ) + + +def translate_constexpr(op, blob_writer): + + def get_value(var): + if should_use_weight_file(var.val): + value = create_file_value(var, blob_writer) + else: + value = create_immediate_value(var) + + return value + + output_var = op.outputs[0] + + attributes = {"name": create_scalar_value(op.name)} + attributes.update({k: get_value(v) for k, v in op.inputs.items()}) + + return pm.Operation( + type=op.op_type, + attributes=attributes, + outputs=[ + pm.NamedValueType( + name=output_var.name, type=types_to_proto(output_var.sym_type) + ) + ], + ) + + +def translate_generic_op(op, parameters, blob_writer, literal_params=[]): + inputs = {} + for param_name, vars in op.inputs.items(): + if param_name.startswith("_"): + continue + if not isinstance(vars, (list, tuple)): + vars = [vars] + + arguments = [] + for _var in vars: + binding = pm.Argument.Binding() + # use const value literals if requested + if param_name in literal_params: + binding.value.CopyFrom(create_immediate_value(_var)) + else: + binding.name = _var.name + arguments.append(binding) + + args = pm.Argument() + args.arguments.extend(arguments) + inputs[param_name] = args + + outputs = [ + pm.NamedValueType(name=v.name, type=types_to_proto(v.sym_type)) + for v in op.outputs + ] + blocks = None + if len(op.blocks) > 0: + blocks = [create_block(b, parameters, blob_writer) for b in op.blocks] + + op_type = op.op_type + attr_dict = {} + if op.op_type in SSAOpRegistry.custom_ops: + op_type = "custom_layer" + class_name = op.bindings.get("class_name", op.name) + input_order = op.bindings.get("input_order", []) + parameters = op.bindings.get("parameters", []) + weights = op.bindings.get("weights", []) + description = op.bindings.get("description", "") + + attr_dict["name"] = create_scalar_value(op.name) + attr_dict["class_name"] = create_scalar_value(class_name) + attr_dict["input_order"] = create_list_scalarvalue(input_order, str) + attr_dict["parameters"] = create_list_scalarvalue(parameters, str) + attr_dict["weights"] = create_list_scalarvalue(weights, str) + attr_dict["description"] = create_scalar_value(description) + + return pm.Operation( + type=op_type, + blocks=blocks, + inputs=inputs, + attributes=attr_dict, + outputs=outputs, + ) + +def create_block(block, parameters, blob_writer): + + def feeds_to_only_constexprs(op): + return (op.op_type == 'const') \ + and len(op.outputs[0].child_ops) > 0 \ + and all((child_op.op_type.startswith("constexpr_")) for child_op in op.outputs[0].child_ops) + + proto_ops = [] + + # Find the const op that generates classify's "label" / "class" string vec. + classify_const_classes_op = None + if len(block.operations) > 0: + # Classify is always the last operation in the block. + op = block.operations[-1] + op_cls_name = type(op).__name__ + if (op_cls_name == "classify"): + classes_var = op.inputs["classes"] + classify_const_classes_op = classes_var.op + if (len(classes_var.child_ops) != 1): + raise ValueError("Classify's labels/classes should be input to only 1 op (classify).") + + for op in block.operations: + op_cls_name = type(op).__name__ + if op_cls_name == "const": + if feeds_to_only_constexprs(op): + continue + # Do not serialize the const op that creates the var bound to the classifier's "classes" param. + # The variable's value will be bound directly to classify's "classes" param instead. + if op != classify_const_classes_op: + proto_ops.append(translate_const(op, blob_writer)) + elif op_cls_name.startswith("constexpr_"): + proto_ops.append(translate_constexpr(op, blob_writer)) + elif op_cls_name == "classify": + # Classify's "classes" param should be serialized as a value literal bound + # directly to the param, rather than as a const-generated variable. + proto_ops.append(translate_generic_op(op, parameters, blob_writer, ["classes"])) + elif op_cls_name == "reshape_like": + # The reshape_like should also be able to take value from a const op + # This is a workaround solution + # rdar://98689808 (Reshape_like should also accept const value from non literal input) + literal_params = ["begins", "ends", "end_masks"] + proto_ops.append(translate_generic_op(op, parameters, blob_writer, literal_params)) + else: + proto_ops.append(translate_generic_op(op, parameters, blob_writer)) + + inputs = [] + if not isinstance(block, Function): + # Function is subclass of Block, but function's block has no input, + # and hence skipping reading the block inputs. + for var in block.inputs: + proto_type = types_to_proto(var.sym_type) + inputs.append(pm.NamedValueType(name=var.name, type=proto_type)) + output_names = [v.name for v in block.outputs] + return pm.Block(inputs=inputs, outputs=output_names, operations=proto_ops) + + +def convert_function(function, parameters, blob_writer, opset): + block = create_block(function, parameters, blob_writer) + + inputs = [] + for name, var in function.inputs.items(): + proto_type = types_to_proto(var.sym_type) + inputs.append(pm.NamedValueType(name=name, type=proto_type)) + + return pm.Function(inputs=inputs, opset=opset, block_specializations={opset: block}) + +# Add a classify op to the output. +# Replaces the original probabilites output (in the containing MIL block) +# with the outputs of the classifier op. Returns the name of the original +# probabilities output variable. +def _add_classify_op(prog, classifier_config): + ''' + Add a "classify" op to the program, at the end of the main block + ''' + def remove_output(block, prob_var): + for i in range(len(block.outputs)): + if block.outputs[i] is prob_var: + block.outputs.pop(i) + break + + block = prog.functions["main"] + + message = "Class labels must be a list of integers / strings or a file path" + classes_in = classifier_config.class_labels + if isinstance(classes_in, str): + import os + + if not os.path.isfile(classes_in): + raise ValueError("Path to class labels (%s) does not exist." % classes_in) + with open(classes_in, "r") as f: + classes = f.read() + classes = classes.splitlines() + elif isinstance(classes_in, list): # list[int or str] + classes = classes_in + assert all([isinstance(x, (int, str)) for x in classes]), message + else: + raise ValueError(message) + + probability_var = _get_probability_var_for_classifier(prog, classifier_config) + + # add the classify op now + with block: + # cast the int label to np.int64 + if isinstance(classes[0], int): + classes = [np.int64(x) for x in classes] + classes_var = mb.const(val=mil_list(classes)) + if probability_var.dtype != types.fp32: + remove_output(block, probability_var) + probability_var = mb.cast(x=probability_var, dtype="fp32", name=probability_var.name + "_cast_to_fp32") + out = mb.classify(probabilities=probability_var, + classes=classes_var + ) + + predicted_feature_name = "classLabel" if classifier_config.predicted_feature_name is None \ + else classifier_config.predicted_feature_name + out[0].name = predicted_feature_name + out[1].name = predicted_feature_name + "_probs" + + # Remove probabilities from block outputs, replace with classify's outputs + remove_output(block, probability_var) + block.outputs[:0] = out + return out[0].name, out[1].name + + +def load(prog, weights_dir, resume_on_errors=False, specification_version=_SPECIFICATION_VERSION_IOS_15, **kwargs): + if BlobWriter is None: + raise RuntimeError("BlobWriter not loaded") + if "main" not in prog.functions: + raise ValueError("main function not found in program") + + # if user has specified "ClassifierConfig", then add the "classify" op to the prog + classifier_config = kwargs.get("classifier_config", None) + predicted_feature_name = None + predicted_probabilities_name = None + if classifier_config is not None: + predicted_feature_name, predicted_probabilities_name = _add_classify_op(prog, classifier_config) + + input_types = prog.main_input_types + output_types = prog.main_output_types + weight_path = os.path.join(weights_dir, _WEIGHTS_FILE_NAME) + blob_writer = BlobWriter(weight_path) + + opset = _OPSET[specification_version] + + function_protos = {} + for func_name, func in prog.functions.items(): + function_protos[func_name] = convert_function(func, prog.parameters, blob_writer, opset) + + proto = pm.Program( + version=1, + functions=function_protos, + ) + + desc = kwargs.get("model_description", None) + if desc and not isinstance(desc, ml.ModelDescription): + raise ValueError("Invalid model descriptor") + + if desc: + if classifier_config is not None: + raise AssertionError("Both model_description and classifier_config can't be provided") + model = ml.Model(description=desc, specificationVersion=specification_version) + model.mlProgram.CopyFrom(proto) + return model + + input_features = [] + output_features = [] + symbolic_inputs = [] + image_input_names = {} # these are the model inputs marked as image by the user + input_shape_map = {} + + for input_type in input_types: + if isinstance(input_type, ImageType): + image_input_names[input_type.name] = input_type + # error checking for input(s) marked as images + if input_type.name not in list(prog.functions["main"].inputs.keys()): + msg = "Provided image input '{}' is not one of the inputs of the MIL program" + raise ValueError(msg.format(input_type.name)) + input_shape_map[input_type.name] = input_type + + for name, var in prog.functions["main"].inputs.items(): + input_feature_type = ft.FeatureType() + + # error checking for input(s) marked as images + # an image input must be of type tensor in program proto + # (since an image type does not exist in MIL program) + if name in image_input_names and \ + not types.is_tensor(var.sym_type): + raise ValueError("For the image input, '{}', its type in the MIL program must be tensor. " + "Instead it is {}.".format(name, var.sym_type.__type_info__())) + + if types.is_tensor(var.sym_type): + shape = var.sym_type.get_shape() + if any_variadic(shape): + raise ValueError("Variable rank model inputs are not supported!") + if any_symbolic(shape): + symbolic_inputs.append(name) + # We extract the default input shape given by user first + if name in input_shape_map: + shape = input_shape_map[name].shape.default + else: + logger.warning("Input shape not fully specified by enumerated shapes or range dim! 1 will be used for dimension not specified instead.") + # If no input shape is provided (ex. auto conversion of -1 in Tensorflow) + shape = [1 if is_symbolic(d) else d for d in shape] + + if name not in image_input_names: + # make a feature type of Type "multiArrayType" + array_type = ft.ArrayFeatureType(shape=shape, dataType=cast_to_framework_io_dtype(var, False)) + input_feature_type.multiArrayType.CopyFrom(array_type) + else: + # make a feature type of Type "imageType" + input_type = image_input_names[name] + _validate_image_input_output_shapes(input_type.color_layout, shape, name, is_input=True) + if not input_type.channel_first: + raise ValueError("Image input, '{}', must be in the channel_first format". + format(name)) + clr_space = _get_colorspace_enum(input_type.color_layout) + image_type = ft.ImageFeatureType(width=shape[-1], + height=shape[-2], + colorSpace=clr_space) + input_feature_type.imageType.CopyFrom(image_type) + + input_features.append( + ml.FeatureDescription(name=name, type=input_feature_type) + ) + elif types.is_scalar(var.sym_type): + array_type = ft.ArrayFeatureType(shape=[1], dataType=cast_to_framework_io_dtype(var, False)) + input_feature_type.multiArrayType.CopyFrom(array_type) + input_features.append(ml.FeatureDescription(name=var.name, type=input_feature_type)) + else: + raise NotImplementedError() + + if output_types is not None and classifier_config is None: + assert len(output_types) == len(prog.functions["main"].outputs), \ + "number of mil program outputs do not match the number of outputs provided by the user" + + for i, var in enumerate(prog.functions["main"].outputs): + output_feature_type = ft.FeatureType() + if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type): + if output_types is not None and isinstance(output_types[i], ImageType): + if not types.is_tensor(var.sym_type): + raise ValueError("Image output, '{}', is a scalar, but it should be a tensor of rank 4".format( + var.name)) + shape = var.sym_type.get_shape() + if any_variadic(shape): + raise ValueError("Variable rank model outputs, that are ImageTypes, are not supported") + if any([is_symbolic(d) for d in shape]): + raise NotImplementedError("Image output '{}' has symbolic dimensions in its shape". + format(var.name)) + _validate_image_input_output_shapes(output_types[i].color_layout, shape, var.name, is_input=False) + clr_space = _get_colorspace_enum(output_types[i].color_layout) + image_type = ft.ImageFeatureType(width=shape[-1], + height=shape[-2], + colorSpace=clr_space) + output_feature_type.imageType.CopyFrom(image_type) + output_features.append( + ml.FeatureDescription(name=var.name, type=output_feature_type) + ) + else: + dataType = None + if classifier_config is None or var.name != predicted_feature_name: + # Not a classifier output, make sure model output type matches with ML Program type. + dataType = cast_to_framework_io_dtype(var, True) + else: + # Classifier outputs are set up separately, so default to fp32 for now. + dataType = ft.ArrayFeatureType.ArrayDataType.FLOAT32 + + array_type = ft.ArrayFeatureType(shape=None, dataType=dataType) + output_feature_type.multiArrayType.CopyFrom(array_type) + output_features.append(ml.FeatureDescription(name=var.name, type=output_feature_type)) + elif (types.is_dict(var.sym_type)): + output_feature_type.dictionaryType.MergeFromString(b"") + keytype, valtype = var.sym_type.T + if types.is_str(keytype): + output_feature_type.dictionaryType.stringKeyType.MergeFromString(b"") + elif (keytype == types.int64): + output_feature_type.dictionaryType.int64KeyType.MergeFromString(b"") + else: + raise ValueError("Dictionary key type not supported.") + output_features.append(ml.FeatureDescription(name=var.name, type=output_feature_type)) + else: + raise NotImplementedError() + + # Model description + desc = ml.ModelDescription(input=input_features, output=output_features) + if classifier_config is not None: + desc.predictedFeatureName = predicted_feature_name + desc.predictedProbabilitiesName = predicted_probabilities_name + + # Manually edit output type of predictedFeatureName. + # It doesn't use MLMultiArray and really uses a "primitive" type. + for output in desc.output: + if output.name == predicted_feature_name: + if type(classifier_config.class_labels[0]) == int: + output.type.int64Type.MergeFromString(b"") + else: + output.type.stringType.MergeFromString(b"") + break + + # Create ML Model + model = ml.Model(description=desc, specificationVersion=specification_version) + model.mlProgram.CopyFrom(proto) + + # Set symbolic shapes + for input_name in symbolic_inputs: + input_type = input_shape_map.get(input_name, None) + + if isinstance(input_type, ImageType): + if isinstance(input_type.shape, EnumeratedShapes): + enumerated_shapes = [] + for s in input_type.shape.shapes: + enumerated_shapes.append( + NeuralNetworkImageSize( + height=s.shape[-2], width=s.shape[-1] + ) + ) + add_enumerated_image_sizes( + model, input_name, sizes=enumerated_shapes + ) + else: + img_range = NeuralNetworkImageSizeRange() + H = input_type.shape.shape[-2] + W = input_type.shape.shape[-1] + + if isinstance(H, RangeDim): + img_range.add_height_range((H.lower_bound, H.upper_bound)) + elif is_symbolic(H): + img_range.add_height_range((1, -1)) + else: + img_range.add_height_range((H, H)) + if isinstance(W, RangeDim): + img_range.add_width_range((W.lower_bound, W.upper_bound)) + elif is_symbolic(W): + img_range.add_width_range((1, -1)) + else: + img_range.add_width_range((W, W)) + + update_image_size_range( + model, input_name, img_range + ) + elif isinstance(input_type, TensorType): + if isinstance(input_type.shape, EnumeratedShapes): + add_multiarray_ndshape_enumeration( + model, input_name, [tuple(s.shape) for s in input_type.shape.shapes] + ) + else: + lb = [] + ub = [] + for s in input_type.shape.shape: + if isinstance(s, RangeDim): + lb.append(s.lower_bound) + ub.append(s.upper_bound) + elif is_symbolic(s): + lb.append(1) + ub.append(-1) + else: + lb.append(s) + ub.append(s) + set_multiarray_ndshape_range( + model, input_name, lower_bounds=lb, upper_bounds=ub + ) + elif input_type is None: + sym_type = prog.functions["main"].inputs[input_name].sym_type + lb = [] + ub = [] + for s in sym_type.get_shape(): + if is_symbolic(s): + lb.append(1) + ub.append(-1) + else: + lb.append(s) + ub.append(s) + set_multiarray_ndshape_range( + model, input_name, lower_bounds=lb, upper_bounds=ub + ) + + # Set optional inputs + _set_optional_inputs(model, input_types) + + return model diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/__init__.py new file mode 100644 index 00000000..21f3261a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import (adjust_io_to_supported_types, fuse_activation_silu, + insert_image_preprocessing_op, sanitize_name_strings) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py new file mode 100644 index 00000000..dd77dfed --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py @@ -0,0 +1,204 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types as types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="mil_backend") +class adjust_io_to_supported_types(AbstractGraphPass): + """ + Converts all dtypes to types that are supported by the CoreML runtime. + The runtime supports only fp16, fp32, int32, str, and bool variables. + + General rules: + * Integer vars that are not 32 bit are replaced with int32 types. + * All other types not in the list of runtime supported types are replaced with the fp32 dtype. + No casts are inserted; the previous type is replaced. The assumption is that all remaining + types are numerical and can be reasonably replaced with 32 bit float types. + + The "main" function has additional rules since its I/O is mapped to CoreML model I/O: + * if function.opset_version < coremltools.target.iOS16, then: + * Fp16 I/O is replaced with fp32 I/O. + Casts (fp32 input -> fp16) are inserted at the beginning of the program to preserve 16 bit inputs. + Casts (fp16 -> fp32 output) are inserted at the end of the program to preserve 16 bit computations. + + * All non-integer I/O that is not fp32 is replaced with fp32 I/O. + A cast (prev input type -> fp32) is inserted at the beginning of the program to preserve non-fp32 inputs. + A cast (prev type -> fp32 out) is inserted at the end of the program to preserve non-fp32 computations. + The assumption is that all remaining types are numerical and it is valid to cast them to/from fp32. + + * The only exception: Int64 outputs are allowed for the classifier op. This is to keep consistency with + the CoreML API, which uses 64 bit integers to represent classifier labels. + + ------ + + func main(bool x, int32 y, fp32 z) { + bool out = logical_not(x) + } -> (out, y, z) + + becomes + + func main(fp32 x, int32 y, fp32 z) { + bool x_casted = cast(x) + bool out__pre__output__fp32__cast = logical_not(x_casted) + fp32 out = cast(out__pre__output__fp32__cast) + } -> (out, y, z) + + ------ + + func not_main(bool x, int32 y, fp32 z) { + bool out = logical_not(x) + } -> (out, y, z) + + is unchanged. + """ + + def apply(self, prog): + for name, func in prog.functions.items(): + is_main_funtion = name == "main" + _adjust_io_to_supported_types(func, is_main_funtion) + +__RUNTIME_SUPPORTED_TYPES = [types.fp16, types.fp32, types.int32, types.str, types.bool] + +##### +# Main Function +##### +def _adjust_var_dtype_helper(var, dtype): + if (types.is_scalar(var.sym_type)): + var._sym_type = dtype + else: + var._sym_type = types.tensor(dtype, var.sym_type.get_shape()) + +@block_context_manager +def _adjust_main_inputs(func): + first_op = func.operations[0] if len(func.operations) > 0 else None + for input_name, input_var in func.inputs.items(): + if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \ + and input_var.dtype != types.fp32 \ + and input_var.dtype != types.int32: + input_dtype_str = types.builtin_to_string(input_var.dtype) + if types.is_int(input_var.dtype): + # Replace non-int32 input type with int32. + logger.warning("Input" + input_var.name + " is of dtype " + input_dtype_str +\ + ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\ + "This input will be assigned a dtype of int32. " +\ + "No cast will be inserted; the previous dtype will be replaced.") + _adjust_var_dtype_helper(input_var, types.int32) + elif input_var.dtype == types.fp64: + # Replace float64 input type with fp32. + logger.warning("Input '" + input_var.name + "' is of dtype fp64. 64 bit float inputs are " +\ + "not supported by ML program models. This input will be assigned a dtype " +\ + "of fp32. No cast will be inserted; the previous dtype will be replaced.") + _adjust_var_dtype_helper(input_var, types.fp32) + elif input_var.dtype == types.fp16 \ + and func.opset_version >= target.iOS16: + pass # do nothing, since fp16 is a valid input type for CoreML + else: + # This is some other dtype. Change the type to fp32 and add a cast. + # This is only a limitation of main--other functions do not represent CoreML model inputs + # and do not have the same limitation on input types. + supported_dtypes = "{int32, fp32}" if func.opset_version < target.iOS16 else \ + "{int32, fp16, fp32}" + msg = "\nInput '{}' is of dtype {}. The " +\ + "CoreML runtime does not support inputs with this dtype " +\ + "(supported dtypes are: {}). This input will be assigned a dtype of " +\ + "fp32. A cast will be inserted at the beginning of the program to " +\ + "convert the input to the originally defined dtype.\n" + if input_var.dtype == types.fp16: + msg += "fp16 dtype input is supported if the function.opset_version is chosen to be at least " \ + "iOS16/macOS13.\n" + logger.warning(msg.format( + input_var.name, + input_dtype_str, + supported_dtypes)) + + casted_input_var = mb.cast(x=input_var, dtype=input_dtype_str, before_op=first_op) + func.replace_uses_of_var_after_op(anchor_op=casted_input_var.op, old_var=input_var, new_var=casted_input_var) + _adjust_var_dtype_helper(input_var, types.fp32) + +@block_context_manager +def _adjust_main_outputs(func): + new_outputs = [] + for output_var in func.outputs: + output_type = output_var.sym_type + if (types.is_tensor(output_type) or types.is_scalar(output_type)) \ + and output_var.dtype != types.fp32 \ + and output_var.dtype != types.int32 \ + and (func.opset_version < target.iOS16 or output_var.dtype != types.fp16): + # since fp16 is a valid output type for coreml from ios16 spec onwards, no need to cast + output_dtype_str = types.builtin_to_string(output_var.dtype) + supported_dtypes = "{int32, fp32}" if func.opset_version < target.iOS16 else \ + "{int32, fp16, fp32}" + msg = "\nOutput '{}' is of dtype {}. The " +\ + "CoreML runtime does not support outputs with this dtype " +\ + "(supported dtypes are: {}). This output will be assigned a dtype " +\ + "of fp32. A cast will be inserted at the end of the program to convert" +\ + "the original output dtype to the dtype supported by the CoreML runtime.\n" + if output_var.dtype == types.fp16: + msg += "fp16 dtype output is supported if function.opset_version is chosen to be at least " \ + "iOS16/macOS13.\n" + logger.warning(msg.format( + output_var.name, + output_dtype_str, + supported_dtypes, + )) + + output_var_name = output_var.name + output_var.set_name(output_var_name + "__pre__output__fp32__cast") + # Convert the output to fp32, and add a cast. + output_var = mb.cast(x=output_var, dtype="fp32") + output_var.set_name(output_var_name) + new_outputs.append(output_var) + func.set_outputs(new_outputs) + + +##### +# General Functions and Blocks +##### +def _adjust_var(var): + """ + Changes the dtype of the provided variable according + to the rules outlined in the top level pass comment + (see adjust_io_to_supported_types). + """ + if (types.is_tensor(var.sym_type) or types.is_scalar(var.sym_type)) \ + and var.dtype not in __RUNTIME_SUPPORTED_TYPES: + dtype_str = types.builtin_to_string(var.dtype) + if types.is_int(var.dtype): + # Replace non-int32 input type with int32. + logger.warning("Input '" + var.name + "' is of dtype " + dtype_str +\ + ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\ + "This input will be assigned a dtype of int32. " +\ + "No cast will be inserted; the previous dtype will be replaced.") + _adjust_var_dtype_helper(var, types.int32) + else: + # This is some other unsupported dtype. Change the input type to fp32. + logger.warning("Var " + var.name + " is of dtype " + dtype_str + ". The CoreML runtime " +\ + "does not support this dtype (only fp16, fp32, bool, and int32 are supported). " +\ + "This input will be assigned a dtype of fp32. No cast will be inserted; " +\ + "the previous dtype will be replaced.") + _adjust_var_dtype_helper(var, types.fp32) + + +def _adjust_func_inputs(func): + for input_name, input_var in func.inputs.items(): + _adjust_var(input_var) + +##### +# The Pass +##### +def _adjust_io_to_supported_types(func, is_main): + if is_main: + _adjust_main_inputs(func) + _adjust_main_outputs(func) + else: + _adjust_func_inputs(func) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/fuse_activation_silu.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/fuse_activation_silu.py new file mode 100644 index 00000000..5f9270df --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/fuse_activation_silu.py @@ -0,0 +1,82 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +def _match_pattern(op): + if op.op_type == "sigmoid": + # abort fusion if op output is also a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + # find following op + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + mul_op_candidate = list(child_ops)[0] + if mul_op_candidate.op_type != "mul": + return None + mul_inputs_actual = {mul_op_candidate.x.name, mul_op_candidate.y.name} + mul_inputs_expect = {op.x.name, op.outputs[0].name} + if mul_inputs_actual != mul_inputs_expect: + return None + return mul_op_candidate + + return None + + +def _try_to_transform(sigmoid_op, mul_op, block): + out_name = mul_op.outputs[0].name + # create a new silu op + x = mb.silu(x=sigmoid_op.x, name=out_name, before_op=sigmoid_op) + mul_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=mul_op, old_var=mul_op.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops([sigmoid_op, mul_op]) + return True + + +@block_context_manager +def _fuse_activation_silu_block(block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = _fuse_activation_silu_block(b) + if len(op.blocks) > 0: + continue + + mul_op = _match_pattern(op) + if mul_op is not None: + fusion_status = _try_to_transform(op, mul_op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + + +@register_pass(namespace="mil_backend") +class fuse_activation_silu(AbstractGraphPass): + """ + Fold x * sigmoid(x) into silu(x) + + Given: + %1 = sigmoid(x=%0) + %2 = mul(x=%0, y=%1) or mul(x=%1, y=%0) + ... + + Result: + %3 = silu(%0) + ... + """ + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = _fuse_activation_silu_block(f) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py new file mode 100644 index 00000000..b83a2b43 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py @@ -0,0 +1,67 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.input_types import ColorLayout, ImageType +from coremltools.converters.mil.mil import Builder as mb +# import mil internal ops to add it to the builder +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types import nptype_from_builtin + + +@register_pass(namespace="mil_backend") +class insert_image_preprocessing_ops(AbstractGraphPass): + """ + Insert preprocessing ops, right after the input if its of type Image + """ + def apply(self, prog): + for f_name, f in prog.functions.items(): + if f_name == 'main': + _insert_image_preprocessing_ops(f, prog) + +@block_context_manager +def _insert_image_preprocessing_ops(block, prog): + input_types = list(prog.main_input_types) + + for input_type in input_types: + if isinstance(input_type, ImageType): + if input_type.name not in block.inputs: + continue + + input_var = block.inputs[input_type.name] + placeholder_op = block.placeholder_inputs[input_type.name] + first_op = block.operations[0] + old_var = placeholder_op.outputs[0] + has_bias = np.any(np.array(input_type.bias) != 0) + last_output = input_var + input_nptype = nptype_from_builtin(type(last_output.dtype())) + if input_type.scale != 1: + last_output = mb.mul(x=last_output, + y=np.array(input_type.scale, dtype=input_nptype), + before_op=first_op, name=input_var.name + "__scaled__") + if has_bias: + if input_type.color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16): + last_output = mb.add(x=last_output, + y=np.array(input_type.bias, dtype=input_nptype), + before_op=first_op, name=input_var.name + "__biased__") + else: + if len(last_output.shape) == 3: + last_output = mb.add(x=last_output, + y=np.array(input_type.bias, dtype=input_nptype).reshape([3, 1, 1]), + before_op=first_op, name=input_var.name + "__biased__") + elif len(last_output.shape) == 4: + last_output = mb.add(x=last_output, + y=np.array(input_type.bias, dtype=input_nptype).reshape([1, 3, 1, 1]), + before_op=first_op, name=input_var.name + "__biased__") + else: + raise TypeError("Unsupported rank for image input type.") + + if last_output != input_var: + block.replace_uses_of_var_after_op(anchor_op=last_output.op, + old_var=old_var, + new_var=last_output) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/sanitize_name_strings.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/sanitize_name_strings.py new file mode 100644 index 00000000..9ec89909 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/sanitize_name_strings.py @@ -0,0 +1,22 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil.passes.defs.preprocess import NameSanitizer +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="mil_backend") +class sanitize_name_strings(AbstractGraphPass): + """ + Sanitize the names of vars and ops to make sure + that they are of the format as described in the NameSanitizer class, i.e. + of the format [a-zA-Z_][a-zA-Z0-9_]* + """ + def apply(self, prog): + for f in prog.functions.values(): + sanitizer_vars = NameSanitizer(prefix="var_") + sanitizer_ops = NameSanitizer(prefix="op_") + NameSanitizer.sanitize_block(f, sanitizer_vars, sanitizer_ops, prog.main_input_types) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/test_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/test_passes.py new file mode 100644 index 00000000..6d82ff38 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/test_passes.py @@ -0,0 +1,888 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import itertools + +import numpy as np +import pytest + +# import mil internal ops to add it to the builder +import coremltools as ct +# Set the testing backend +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import ( + apply_pass_and_basic_check, assert_model_is_valid, get_op_types_in_program) + + +class TestAdjustToSupportedTypes: + + def test_basic(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.bool), + mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.int32), + mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.fp32)]) + def prog(x, y, z): + out = mb.logical_not(x=x) + return (out, y, z) + prog.functions['not_main'] = copy.deepcopy(prog.functions['main']) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + """ + Input graph: + + func main(bool x, int32 y, fp32 z) { + bool out = logical_not(x) + } -> (out, y, z) + + becomes + + func main(fp32 x, int32 y, fp32 z) { + bool x_casted = cast(x) + bool out__pre__output__fp32__cast = logical_not(x_casted) + fp32 out = cast(out__pre__output__fp32__cast) + } -> (out, y, z) + """ + assert get_op_types_in_program(prev_prog) == ['logical_not'] + assert get_op_types_in_program(prog) == ['cast', 'logical_not', 'cast'] + + prev_inputs = list(prev_prog.functions['main'].inputs.items()) + inputs = list(prog.functions['main'].inputs.items()) + assert prev_inputs[0][1].name == inputs[0][1].name + assert inputs[0][1].dtype == types.fp32 + for i in range(1, len(inputs)): + assert prev_inputs[i][1].name == inputs[i][1].name + assert prev_inputs[i][1].dtype == inputs[i][1].dtype + + prev_outputs = prev_prog.functions['main'].outputs + outputs = prog.functions['main'].outputs + assert prev_outputs[0].name == outputs[0].name + assert outputs[0].dtype == types.fp32 + for i in range(1, len(outputs)): + assert prev_outputs[i].name == outputs[i].name + assert prev_outputs[i].dtype == outputs[i].dtype + + """ + Input graph: + + func not_main(bool x, int32 y, fp32 z) { + bool out = logical_not(x) + } -> (out, y, z) + + is identical after the pass. + """ + assert get_op_types_in_program(prev_prog, 'not_main') == ['logical_not'] + assert get_op_types_in_program(prog, 'not_main') == ['logical_not'] + + prev_inputs = list(prev_prog.functions['not_main'].inputs.items()) + inputs = list(prog.functions['not_main'].inputs.items()) + for i in range(0, len(inputs)): + assert prev_inputs[i][1].name == inputs[i][1].name + assert prev_inputs[i][1].dtype == inputs[i][1].dtype + + prev_outputs = prev_prog.functions['not_main'].outputs + outputs = prog.functions['not_main'].outputs + for i in range(0, len(outputs)): + assert prev_outputs[i].name == outputs[i].name + assert prev_outputs[i].dtype == outputs[i].dtype + + def test_int64_input(self): + """ + Input graph: + + func main(int64 x) { + } -> (x) + + becomes + + func main(int32 x) { + } -> (x) + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.int64)]) + def prog(x): + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_prog.functions['main'].inputs.items()) + inputs = list(prog.functions['main'].inputs.items()) + assert prev_inputs[0][1].name == inputs[0][1].name + assert inputs[0][1].dtype == types.int32 + + def test_float64_input(self): + """ + Input graph: + + func main(float64 x) { + } -> (x) + + becomes + + func main(float32 x) { + } -> (x) + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.fp64)]) + def prog(x): + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_prog.functions['main'].inputs.items()) + inputs = list(prog.functions['main'].inputs.items()) + assert prev_inputs[0][1].name == inputs[0][1].name + assert inputs[0][1].dtype == types.fp32 + + + @pytest.mark.parametrize( + "opset_version", + [None, target.iOS13, target.iOS16], + ) + def test_float16_input_output(self, opset_version): + """ + Input graph: + + main(%x: (1, 1, 1, 1, fp16)(Tensor)) { + block0() { + %relu_0: (1, 1, 1, 1, fp16)(Tensor) = relu(x=%x, name="relu_0") + } -> (%relu_0) + } + + Output graph (if opset_version < ios16): + + main(%x: (1, 1, 1, 1, fp32)(Tensor)) { + block0() { + %cast_0: (1, 1, 1, 1, fp16)(Tensor) = cast(x=%x, dtype="fp16", name="cast_0") + %relu_0__pre__output__fp32__cast: (1, 1, 1, 1, fp16)(Tensor) = relu(x=%cast_0, name="relu_0") + %relu_0: (1, 1, 1, 1, fp32)(Tensor) = cast(x=%relu_0__pre__output__fp32__cast, dtype="fp32", name="cast_1") + } -> (%relu_0) + } + + Output graph (if opset_version >= ios16): same as the input graph + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.fp16)], opset_version=opset_version) + def prog(x): + return mb.relu(x=x) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_block.inputs.items()) + inputs = list(block.inputs.items()) + prev_outputs = prev_block.outputs + outputs = block.outputs + assert prev_inputs[0][1].name == inputs[0][1].name + assert outputs[0].name == prev_outputs[0].name + if opset_version is None or opset_version < target.iOS16: + assert get_op_types_in_program(prog) == ['cast', 'relu', 'cast'] + assert inputs[0][1].dtype == types.fp32 + assert outputs[0].dtype == types.fp32 + else: + assert get_op_types_in_program(prog) == ['relu'] + assert inputs[0][1].dtype == types.fp16 + assert block.outputs[0].dtype == types.fp16 + + def test_float16_input_output_with_opset_version_inference(self): + """ + Input graph: + + main(%x: (1, 1, 4, 4, fp16)(Tensor)) { + block0() { + %pixel_unshuffle_0: (1, 4, 2, 2, fp16)(Tensor) = pixel_unshuffle(x=%x, downscale_factor=2, name="pixel_unshuffle_0") + } -> (%pixel_unshuffle_0) + } + + This function would be inferred as an iOS16 function, and the graph pass should behave properly + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4), dtype=types.fp16)]) + def prog(x): + x = mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(2)) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_block.inputs.items()) + inputs = list(block.inputs.items()) + prev_outputs = prev_block.outputs + outputs = block.outputs + assert prev_inputs[0][1].name == inputs[0][1].name + assert outputs[0].name == prev_outputs[0].name + assert get_op_types_in_program(prog) == ['pixel_unshuffle'] + assert inputs[0][1].dtype == types.fp16 + assert block.outputs[0].dtype == types.fp16 + + def test_int8_input(self): + """ + Input graph: + + func main(int8 x) { + } -> (x) + + becomes + + func main(int32 x) { + } -> (x) + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.int8)]) + def prog(x): + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_prog.functions['main'].inputs.items()) + inputs = list(prog.functions['main'].inputs.items()) + assert prev_inputs[0][1].name == inputs[0][1].name + assert inputs[0][1].dtype == types.int32 + + def test_subblock(self): + """ + Input graph: + + func main(float64 a, float32 b) { + float64 out_0, float32 out_1 = while_loop(a, b, + (float64 a, float32 b) { + bool cond = less(a, b) + } -> (cond) + (float64 a, float32 b) { + float64 temp = const(1) + float64 out = add(a, b) + } -> (out, b) + ); + } -> (out_0, out_1) + + becomes + + func main(float32 a, float32 b) { + float32 out_0, float32 out_1 = while_loop(a, b, + (float32 a, float32 b) { + bool cond = less(a, b) + } -> (cond) + (float32 a, float32 b) { + float32 temp = const(1) + float32 out = add(a, b) + } -> (out, b) + ); + } -> (out_0, out_1) + """ + pytest.xfail("fp64 dtype not supported in MIL") + def body(a, b): + return mb.add(x=a, y=np.float64(1)), b + + def cond(a, b): + return mb.less(x=a, y=b) + + @mb.program(input_specs=[mb.TensorSpec(shape=(1,), dtype=types.fp64), + mb.TensorSpec(shape=(1,), dtype=types.fp32)]) + def prog(a, b): + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_prog.functions['main'].inputs.items()) + inputs = list(prog.functions['main'].inputs.items()) + for i in range(0, len(prev_inputs)): + assert prev_inputs[i][1].name == inputs[i][1].name + assert inputs[i][1].dtype == types.fp32 + + assert get_op_types_in_program(prev_prog) == ['while_loop'] + assert get_op_types_in_program(prog) == ['while_loop'] + + def assert_block_inputs(prev_inputs, inputs): + for i in range(0, len(prev_inputs)): + assert prev_inputs[i].name == inputs[i].name + assert inputs[i].dtype == types.fp32 + + subblocks = prog.functions['main'].operations[0].blocks + prev_subblocks = prev_prog.functions['main'].operations[0].blocks + for i in range(0, len(subblocks)): + assert_block_inputs(prev_subblocks[i].inputs, subblocks[i].inputs) + + def test_adjust_cast(self): + """ + Input graph: + + func main(int32 x) { + fp64 y = cast(x=x, dtype="fp64") + } -> (y) + + becomes + + func main(int32 x) { + fp32 y = cast(x=x, dtype="fp32") + } -> (y) + """ + pytest.xfail("cast operation does not support casting to fp64") + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.int32)]) + def prog(x): + y = mb.cast(x=x, dtype="fp64") + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + assert get_op_types_in_program(prev_prog) == ['cast'] + assert get_op_types_in_program(prog) == ['cast'] + + prev_cast = prev_prog.functions['main'].operations[1] + cast = prog.functions['main'].operations[2] + + assert prev_cast.dtype.val == "fp64" + assert prev_cast.outputs[0].dtype == types.fp64 + + assert cast.dtype.val == "fp32" + assert cast.outputs[0].dtype == types.fp32 + + def test_adjust_redundant_cast(self): + """ + Input graph: + + func main(int32 x) { + int64 y = cast(x=x, dtype="int64") + } -> (y) + + becomes + + func main(int32 x) { + } -> (x) + """ + pytest.xfail("cast not supports dtype=`int64`") + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.int32)]) + def prog(x): + y = mb.cast(x=x, dtype="int64") + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + assert get_op_types_in_program(prev_prog) == ['cast'] + assert get_op_types_in_program(prog) == [] + +class TestImagePreprocessingPass: + + def test_program_grayscale(self): + """ + Input graph: + + main(x: ImageType(color_layout="G", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 1, 20, 20], + color_layout="G", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["relu", "relu", "add"] + + def test_program_grayscale_with_scale(self): + """ + Input graph: + + main(x: ImageType(scale=2.0, color_layout="G", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y = mul(x, 2) + y1 = relu(y) + y2 = relu(y) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 1, 20, 20], + scale=2.0, + color_layout="G", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["mul", "relu", "relu", "add"] + scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0] + assert scale_op.y.val == 2.0 + + def test_program_grayscale_with_bias(self): + """ + Input graph: + + main(x: ImageType(bias=2.0, color_layout="G", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y = add(x, 2) + y1 = relu(y) + y2 = relu(y) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 1, 20, 20], + bias=2.0, + color_layout="G", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["add", "relu", "relu", "add"] + add_op = prog.find_ops(op_type="add", exactly_one=False)[0] + assert add_op.y.val == 2.0 + + def test_program_grayscale_with_scale_bias(self): + """ + Input graph: + + main(x: ImageType(scale=2.0, bias=2.0, color_layout="G", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y_scaled = mul(x, 2) + y = add(y_scaled, 2) + y1 = relu(y) + y2 = relu(y) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 1, 20, 20], + scale=2.0, + bias=2.0, + color_layout="G", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["mul", "add", "relu", "relu", "add"] + scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0] + assert scale_op.y.val == 2.0 + add_op = prog.find_ops(op_type="add", exactly_one=False)[0] + assert add_op.y.val == 2.0 + + def test_program_rgb(self): + """ + Input graph: + + main(x: ImageType(color_layout="RGB", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 3, 20, 20], + color_layout="RGB", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["relu", "relu", "add"] + + def test_program_rgb_scale_bias(self): + """ + Input graph: + + main(x: ImageType(color_layout="RGB", scale=2.0, bias=[1.0, 2.0, 3.0], channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y = mul(x, scale) + y_bias = add(y, bias) + y1 = relu(y_bias) + y2 = relu(y_bias) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 3, 20, 20], + scale=2.0, + bias=[1.0, 2.0, 3.0], + color_layout="RGB", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["mul", "add", "relu", "relu", "add"] + scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0] + assert scale_op.y.val == 2.0 + add_op = prog.find_ops(op_type="add", exactly_one=False)[0] + assert np.all(add_op.y.val == np.array([1.0, 2.0, 3.0]).reshape([1, 3, 1, 1])) + + def test_program_bgr(self): + """ + Input graph: + + main(x: ImageType(color_layout="BGR", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 3, 20, 20], + color_layout="BGR", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["relu", "relu", "add"] + + def test_program_bgr_scale_bias(self): + """ + Input graph: + + main(x: ImageType(color_layout="BGR", scale=2.0, bias=[1.0, 2.0, 3.0], channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y = mul(x, scale) + y_bias = add(y, bias) + y1 = relu(y_bias) + y2 = relu(y_bias) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 3, 20, 20], + scale=2.0, + bias=[1.0, 2.0, 3.0], + color_layout="BGR", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["mul", "add", "relu", "relu", "add"] + scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0] + assert scale_op.y.val == 2.0 + add_op = prog.find_ops(op_type="add", exactly_one=False)[0] + assert np.all(add_op.y.val == np.array([1.0, 2.0, 3.0]).reshape([1, 3, 1, 1])) + + @pytest.mark.parametrize( + "scale_type, bias_type", itertools.product([np.float32, np.int32], [np.float32, np.int32]) + ) + def test_scale_bias_types(self, scale_type, bias_type): + """ + Input graph: + + main(x: ImageType(color_layout="RGB", scale=2.0, bias=[1.0, 2.0, 3.0], channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y = mul(x, scale) + y_bias = add(y, bias) + y1 = relu(y_bias) + y2 = relu(y_bias) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 3, 20, 20], + scale=scale_type(2.0), + bias=np.array([1, 2, 3]).astype(bias_type), + color_layout="RGB", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["mul", "add", "relu", "relu", "add"] + scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0] + assert scale_op.y.dtype() == prog.functions["main"].inputs["x"].dtype() + add_op = prog.find_ops(op_type="add", exactly_one=False)[0] + assert add_op.y.dtype() == prog.functions["main"].inputs["x"].dtype() + +class TestSanitizerPass: + + def test_sanitize_numeric_var_names(self): + """ + Input: + main(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1!") + %1: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="1") + %3: (1, 3, 20, fp32)(Tensor) = add(x=%Var_1!, y=%1, name="3") + } -> (%3) + } + + Output: + main(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1_: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1_") + %var_1: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="op_1") + %var_3: (1, 3, 20, fp32)(Tensor) = add(x=%var_1_, y=%var_1, name="op_3") + } -> (%var_3) + } + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20))]) + def prog(x): + y1 = mb.relu(x=x, name = "var_1!") + y2 = mb.relu(x=x, name = "1") + z = mb.add(x=y1, y=y2, name = "3") + return z + + PASS_REGISTRY["mil_backend::sanitize_name_strings"](prog) + block = prog.functions["main"] + assert block.find_ops(op_type="relu")[0].outputs[0].name == "var_1_" + assert block.find_ops(op_type="relu")[1].outputs[0].name == "var_1" + assert prog["main"].outputs[0].name == "var_3" + assert block.find_ops(op_type="relu")[0].name == "var_1_" + assert block.find_ops(op_type="relu")[1].name == "op_1" + assert block.find_ops(op_type="add")[0].name == "op_3" + + def test_sanitize_var_names_with_two_functions(self): + """ + Input: + main(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1!") + } -> (%var_1!) + } + + main_2(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1!") + } -> (%var_1!) + } + + + Output: + main(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1_") + } -> (%var_1_) + } + + main_2(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1_") + } -> (%var_1_) + } + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20))]) + def prog(x): + z = mb.relu(x=x, name = "var_1!") + return z + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20))]) + def prog_2(x): + z = mb.relu(x=x, name = "var_1!") + return z + + prog.add_function("main_2", prog_2.functions["main"]) + PASS_REGISTRY["mil_backend::sanitize_name_strings"](prog) + block = prog.functions["main"] + assert block.find_ops(op_type="relu")[0].outputs[0].name == "var_1_" + assert prog["main"].outputs[0].name == "var_1_" + assert block.find_ops(op_type="relu")[0].name == "var_1_" + block = prog.functions["main_2"] + assert block.find_ops(op_type="relu")[0].outputs[0].name == "var_1_" + assert prog["main"].outputs[0].name == "var_1_" + assert block.find_ops(op_type="relu")[0].name == "var_1_" + + +class TestPassFuseActivationSiLU: + """ + Input graph: + input --> sigmoid --> mul --> output + Output graph: + input --> silu --> output + """ + + @pytest.mark.skipif(ct.utils._macos_version() < (12, 0), reason="mlprogram predict available only on macOS12+") + @pytest.mark.parametrize( + "reverse_order", itertools.product([True, False]), + ) + def test_0(self, reverse_order): + x_shape = tuple(np.random.randint(low=1, high=4, size=5)) + + @mb.program(input_specs=[mb.TensorSpec(shape=x_shape)]) + def program(x): + sigmoid_x = mb.sigmoid(x=x) + if not reverse_order: + x = mb.mul(x=x, y=sigmoid_x) + else: + x = mb.mul(x=sigmoid_x, y=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + program, "mil_backend::fuse_activation_silu" + ) + + assert get_op_types_in_program(prev_prog) == ["sigmoid", "mul"] + assert get_op_types_in_program(program) == ["silu"] + + assert_model_is_valid( + program=program, + inputs={"x": x_shape}, + backend=("mlprogram", "fp32"), + expected_output_shapes={block.outputs[0].name: tuple(x_shape)}, + ) + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_helper.py new file mode 100644 index 00000000..4a07c00b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_helper.py @@ -0,0 +1,27 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil.passes.defs.preprocess import NameSanitizer as _NameSanitizer + + +class TestNameSanitizer: + + def test_name_sanitizer(self): + input_and_expected_strings = [("1", "_1"), + ("abc", "abc"), + ("*asdf", "_asdf"), + ("*asd*f", "_asd_f"), + ("0abc2", "_0abc2"), + ("is8174 + 16", "is8174___16"), + ("a:abc", "a_abc"), + ("a.abc", "a_abc"), + ("dense_2_1/BiasAdd", "dense_2_1_BiasAdd"), + ("dense_2_1-BiasAdd", "dense_2_1_BiasAdd"), + ("key:0", "key_0"), + ] + + for i, in_and_out_str in enumerate(input_and_expected_strings): + out = _NameSanitizer().sanitize_name(in_and_out_str[0]) + assert out == in_and_out_str[1] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_model_input_params.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_model_input_params.py new file mode 100644 index 00000000..5847e172 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_model_input_params.py @@ -0,0 +1,195 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import numpy as np + +import coremltools as ct +from coremltools.converters.mil.mil.builder import Builder as mb +from coremltools.converters.mil.mil.program import Symbol +from coremltools.models.utils import _macos_version + + +class TestMILFlexibleShapes: + + @mb.program( + input_specs = [ + mb.TensorSpec(shape=[1, 3, Symbol("H"), Symbol("W")]) + ]) + def basic_network(x): + return mb.relu(x=x) + + def test_mil_enumerated_multiarray(self): + enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20), (1, 3, 10, 30)]) + input_shape = [ct.TensorType(name="x", shape=ct.EnumeratedShapes(shapes=enumerated_shapes))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "multiArrayType", "Expected multiArrayType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.multiArrayType.WhichOneof("ShapeFlexibility") == "enumeratedShapes", "Expected enumeratedShapes in ShapeFlexibility" + + spec_default_shape = [s for s in input_spec[0].type.multiArrayType.shape] + spec_enumerated_shapes = set() + for enumerated in input_spec[0].type.multiArrayType.enumeratedShapes.shapes: + spec_enumerated_shapes.add(tuple([s for s in enumerated.shape])) + assert spec_default_shape == [1, 3, 10, 10], "Expected default shape to be [1, 3, 10, 10], got {} instead".format(str(spec_default_shape)) + assert spec_enumerated_shapes == set(enumerated_shapes), "Enumerated shape mismatch" + + def test_mil_enumerated_multiarray_with_default(self): + enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20), (1, 3, 10, 30)]) + input_shape = [ct.TensorType(name="x", shape=ct.EnumeratedShapes(shapes=enumerated_shapes, default=(1, 3, 10, 30)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "multiArrayType", "Expected multiArrayType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.multiArrayType.WhichOneof("ShapeFlexibility") == "enumeratedShapes", "Expected enumeratedShapes in ShapeFlexibility" + + spec_default_shape = [s for s in input_spec[0].type.multiArrayType.shape] + spec_enumerated_shapes = set() + for enumerated in input_spec[0].type.multiArrayType.enumeratedShapes.shapes: + spec_enumerated_shapes.add(tuple([s for s in enumerated.shape])) + assert spec_default_shape == [1, 3, 10, 30], "Expected default shape to be [1, 3, 10, 10], got {} instead".format(str(spec_default_shape)) + assert spec_enumerated_shapes == set(enumerated_shapes), "Enumerated shape mismatch" + + def test_mil_enumerated_image(self): + enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20), (1, 3, 10, 30)]) + input_shape = [ct.ImageType(name="x", shape=ct.EnumeratedShapes(shapes=enumerated_shapes))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "imageType", "Expected imageType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.imageType.WhichOneof("SizeFlexibility") == "enumeratedSizes", "Expected enumeratedShapes in ShapeFlexibility" + + spec_H = input_spec[0].type.imageType.height + spec_W = input_spec[0].type.imageType.width + assert spec_H == 10 and spec_W == 10, "expected [H, W] == [10, 10], got [{}, {}] instead".format(spec_H, spec_W) + + spec_enumerated_shapes = set() + for enumerated in input_spec[0].type.imageType.enumeratedSizes.sizes: + spec_enumerated_shapes.add(tuple([1, 3, enumerated.height, enumerated.width])) + assert spec_enumerated_shapes == set(enumerated_shapes), "Enumerated shape mismatch" + + def test_mil_enumerated_image_with_default(self): + enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20), (1, 3, 10, 30)]) + input_shape = [ct.ImageType(name="x", shape=ct.EnumeratedShapes(shapes=enumerated_shapes, default=(1, 3, 10, 30)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "imageType", "Expected imageType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.imageType.WhichOneof("SizeFlexibility") == "enumeratedSizes", "Expected enumeratedShapes in ShapeFlexibility" + + spec_H = input_spec[0].type.imageType.height + spec_W = input_spec[0].type.imageType.width + assert spec_H == 10 and spec_W == 30, "expected [H, W] == [10, 30], got [{}, {}] instead".format(spec_H, spec_W) + + spec_enumerated_shapes = set() + for enumerated in input_spec[0].type.imageType.enumeratedSizes.sizes: + spec_enumerated_shapes.add(tuple([1, 3, enumerated.height, enumerated.width])) + assert spec_enumerated_shapes == set(enumerated_shapes), "Enumerated shape mismatch" + + def test_mil_ranged_multiarray(self): + input_shape = [ct.TensorType(name="x", shape=(1, 3, 10, ct.RangeDim(10, 30)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "multiArrayType", "Expected multiArrayType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.multiArrayType.WhichOneof("ShapeFlexibility") == "shapeRange", "Expected shapeRange in ShapeFlexibility" + + spec_default_shape = [s for s in input_spec[0].type.multiArrayType.shape] + ranged_shapes = [(1, 1), (3, 3), (10, 10), (10, 30)] + spec_ranged_shapes = [] + for range_dim in input_spec[0].type.multiArrayType.shapeRange.sizeRanges: + spec_ranged_shapes.append(tuple([range_dim.lowerBound, range_dim.upperBound])) + assert spec_default_shape == [1, 3, 10, 10], "Expected default shape to be [1, 3, 10, 10], got {} instead".format(str(spec_default_shape)) + assert spec_ranged_shapes == ranged_shapes, "Enumerated shape mismatch" + + def test_mil_ranged_multiarray_with_default(self): + input_shape = [ct.TensorType(name="x", shape=(1, 3, 10, ct.RangeDim(10, 30, default=20)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "multiArrayType", "Expected multiArrayType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.multiArrayType.WhichOneof("ShapeFlexibility") == "shapeRange", "Expected shapeRange in ShapeFlexibility" + + spec_default_shape = [s for s in input_spec[0].type.multiArrayType.shape] + ranged_shapes = [(1, 1), (3, 3), (10, 10), (10, 30)] + spec_ranged_shapes = [] + for range_dim in input_spec[0].type.multiArrayType.shapeRange.sizeRanges: + spec_ranged_shapes.append(tuple([range_dim.lowerBound, range_dim.upperBound])) + assert spec_default_shape == [1, 3, 10, 20], "Expected default shape to be [1, 3, 10, 20], got {} instead".format(str(spec_default_shape)) + assert spec_ranged_shapes == ranged_shapes, "Enumerated shape mismatch" + + def test_mil_ranged_image(self): + input_shape = [ct.ImageType(name="x", shape=(1, 3, 10, ct.RangeDim(10, 30)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "imageType", "Expected imageType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.imageType.WhichOneof("SizeFlexibility") == "imageSizeRange", "Expected imageSizeRange in ShapeFlexibility" + + spec_H = input_spec[0].type.imageType.height + spec_W = input_spec[0].type.imageType.width + assert spec_H == 10 and spec_W == 10, "expected [H, W] == [10, 10], got [{}, {}] instead".format(spec_H, spec_W) + + spec_H_range = [input_spec[0].type.imageType.imageSizeRange.heightRange.lowerBound, input_spec[0].type.imageType.imageSizeRange.heightRange.upperBound] + spec_W_range = [input_spec[0].type.imageType.imageSizeRange.widthRange.lowerBound, input_spec[0].type.imageType.imageSizeRange.widthRange.upperBound] + assert spec_H_range == [10, 10], "Ranged height mismatch" + assert spec_W_range == [10, 30], "Ranged width mismatch" + + def test_mil_ranged_image_with_default(self): + input_shape = [ct.ImageType(name="x", shape=(1, 3, 10, ct.RangeDim(10, 30, default=20)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "imageType", "Expected imageType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.imageType.WhichOneof("SizeFlexibility") == "imageSizeRange", "Expected imageSizeRange in ShapeFlexibility" + + spec_H = input_spec[0].type.imageType.height + spec_W = input_spec[0].type.imageType.width + assert spec_H == 10 and spec_W == 20, "expected [H, W] == [10, 20], got [{}, {}] instead".format(spec_H, spec_W) + + spec_H_range = [input_spec[0].type.imageType.imageSizeRange.heightRange.lowerBound, input_spec[0].type.imageType.imageSizeRange.heightRange.upperBound] + spec_W_range = [input_spec[0].type.imageType.imageSizeRange.widthRange.lowerBound, input_spec[0].type.imageType.imageSizeRange.widthRange.upperBound] + assert spec_H_range == [10, 10], "Ranged height mismatch" + assert spec_W_range == [10, 30], "Ranged width mismatch" + +class TestMILDefaultValues: + @mb.program( + input_specs = [ + mb.TensorSpec(shape=[1]), + mb.TensorSpec(shape=[1]) + ]) + def basic_network(x, y): + return mb.add(x=x, y=y, name="output") + + def test_mil_default_value_to_proto(self): + program_input_spec = [ct.TensorType(name="x", shape=[1], default_value=np.array([1.0]).astype(np.float32)), ct.TensorType(name="y", shape=[1])] + mlmodel = ct.convert(self.basic_network, convert_to="mlprogram", inputs=program_input_spec) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 2, "2 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "multiArrayType", "Expected multiArrayType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.multiArrayType.WhichOneof("defaultOptionalValue") == "floatDefaultValue", "Expected floatDefaultValue, got {} instead".format(input_spec[0].type.multiArrayType.WhichOneof("defaultOptionalValue")) + assert input_spec[0].type.multiArrayType.floatDefaultValue == 1.0 + + def test_mil_default_value_runtime(self): + program_input_spec = [ct.TensorType(name="x", shape=[1], default_value=np.array([1.0]).astype(np.float32)), ct.TensorType(name="y", shape=[1])] + mlmodel = ct.convert(self.basic_network, convert_to="mlprogram", inputs=program_input_spec) + + if _macos_version() < (12, 0): + # Can only get predictions for ml program on macOS 12+ + return + + res = mlmodel.predict({"x": np.array([3.]), "y": np.array([2.])}) + assert res["output"][0] == 5.0 + + res = mlmodel.predict({"y": np.array([2.])}) + assert res["output"][0] == 3.0 diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/load.py new file mode 100644 index 00000000..a2447782 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/load.py @@ -0,0 +1,313 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import coremltools as ct +from coremltools.converters._profile_utils import _profile +from coremltools.converters.mil.backend.backend_helper import _get_probability_var_for_classifier +from coremltools.converters.mil.input_types import ( + ColorLayout, + EnumeratedShapes, + ImageType, + RangeDim, + Shape, +) +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types.symbolic import any_symbolic, any_variadic, is_symbolic +from coremltools.models import MLModel +from coremltools.models import neural_network as neural_network +from coremltools.models.datatypes import Array +from coremltools.models.neural_network import flexible_shape_utils +from coremltools.models.neural_network.flexible_shape_utils import ( + add_enumerated_image_sizes, + add_multiarray_ndshape_enumeration, + set_multiarray_ndshape_range, +) + +from ..backend_helper import _get_colorspace_enum, _validate_image_input_output_shapes +from .op_mapping import convert_ops + + +def _convert_to_image_input(proto, inputs, skip_model_load=False): + tmp_model = MLModel(proto, skip_model_load=skip_model_load) + for input_type in inputs: + if isinstance(input_type, ImageType): + if input_type.color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16): + gray_bias = input_type.bias + red_bias, green_bias, blue_bias = 0.0, 0.0, 0.0 + elif input_type.color_layout == ColorLayout.RGB: + gray_bias = 0.0 + red_bias, green_bias, blue_bias = input_type.bias + elif input_type.color_layout == ColorLayout.BGR: + gray_bias = 0.0 + blue_bias, green_bias, red_bias = input_type.bias + tmp_model = neural_network.utils.make_image_input( + tmp_model, + input_type.name, + is_bgr=input_type.color_layout == ColorLayout.BGR, + image_format="NCHW" if input_type.channel_first else "NHWC", + red_bias=red_bias, + green_bias=green_bias, + blue_bias=blue_bias, + gray_bias=gray_bias, + scale=input_type.scale, + ) + return tmp_model.get_spec() + + +def _convert_to_classifier(proto, classifier_config, skip_model_load=False): + tmp_model = MLModel(proto, skip_model_load=skip_model_load) + tmp_model = neural_network.utils.make_nn_classifier( + tmp_model, + classifier_config.class_labels, + classifier_config.predicted_feature_name, + classifier_config.predicted_probabilities_output, + ) + return tmp_model.get_spec() + + +def _set_user_inputs(proto, inputs): + for input_type in inputs: + shape = input_type.shape + if isinstance(shape, EnumeratedShapes): + if isinstance(input_type, ImageType): + default_height, default_width = 0, 0 + for inp in proto.description.input: + if inp.name == input_type.name: + default_height = inp.type.imageType.height + default_width = inp.type.imageType.width + break + image_sizes = [] + if input_type.channel_first: + for s in shape.shapes: + if s.shape[-2] == default_height and s.shape[-1] == default_width: + continue + image_sizes.append( + flexible_shape_utils.NeuralNetworkImageSize( + height=s.shape[-2], width=s.shape[-1] + ) + ) + else: + for s in shape.shapes: + if s.shape[-3] == default_height and s.shape[-2] == default_width: + continue + image_sizes.append( + flexible_shape_utils.NeuralNetworkImageSize( + height=s.shape[-3], width=s.shape[-2] + ) + ) + add_enumerated_image_sizes( + proto, input_type.name, sizes=image_sizes + ) + else: + add_multiarray_ndshape_enumeration( + proto, input_type.name, [tuple(s.shape) for s in shape.shapes] + ) + elif isinstance(shape, Shape): + shape = shape.shape # This is shape in Shape + if all( + [ + not isinstance(s, RangeDim) and not is_symbolic(s) and s > 0 + for s in shape + ] + ): + continue + if isinstance(input_type, ImageType): + img_range = flexible_shape_utils.NeuralNetworkImageSizeRange() + if input_type.channel_first: + H = shape[-2] + W = shape[-1] + else: + H = shape[-3] + W = shape[-2] + + if isinstance(H, RangeDim): + img_range.add_height_range((H.lower_bound, H.upper_bound)) + elif is_symbolic(H): + img_range.add_height_range((1, -1)) + else: + img_range.add_height_range((H, H)) + if isinstance(W, RangeDim): + img_range.add_width_range((W.lower_bound, W.upper_bound)) + elif is_symbolic(W): + img_range.add_width_range((1, -1)) + else: + img_range.add_width_range((W, W)) + + flexible_shape_utils.update_image_size_range( + proto, input_type.name, img_range + ) + else: + lb = [] + ub = [] + for s in shape: + if isinstance(s, RangeDim): + lb.append(s.lower_bound) + ub.append(s.upper_bound) + elif is_symbolic(s): + lb.append(1) + ub.append(-1) + else: + lb.append(s) + ub.append(s) + set_multiarray_ndshape_range( + proto, input_type.name, lower_bounds=lb, upper_bounds=ub + ) + + +def _set_symbolic_inputs(proto, symbolic_inputs): + # Set symbolic input shapes by -1 infered from graph + for input_name, shape in symbolic_inputs.items(): + lb = [1 if is_symbolic(d) else d for d in shape] + ub = [-1 if is_symbolic(d) else d for d in shape] + set_multiarray_ndshape_range( + proto, input_name, lower_bounds=lb, upper_bounds=ub + ) + +def _set_optional_inputs(proto, input_types): + # Set default values for optional input_types + default_map = {} + for input_type in input_types: + if isinstance(input_type, ImageType): + continue + if input_type.default_value is not None: + default_map[input_type.name] = input_type.default_value + + for idx, input in enumerate(proto.description.input): + name = proto.description.input[idx].name + if name in default_map: + default_value = default_map[name] + proto.description.input[idx].type.isOptional = True + array_t = proto.description.input[idx].type.multiArrayType + default_fill_val = default_value.flatten()[0] + array_t.floatDefaultValue = default_fill_val + if default_fill_val != 0 or list(default_value.shape) != \ + array_t.shape: + # promote spec version to 5 and set the default value + proto.specificationVersion = max(proto.specificationVersion, + ct._SPECIFICATION_VERSION_IOS_14) + # array_t.shape is not empty. + array_t.ClearField('shape') + array_t.shape.extend(list(default_value.shape)) + + +@_profile +def load(prog, **kwargs): + if "main" not in prog.functions: + msg = "main function not found in program {}" + raise ValueError(msg.format(prog)) + if len(prog.functions) != 1: + msg = ( + "Program must have exactly one `main` function to " + "convert to NN. Program: {}" + ) + raise ValueError(msg.format(prog)) + + input_types = prog.main_input_types + output_types = prog.main_output_types + + v1_inputs = [] + symbolic_inputs = {} + for name, var in prog.functions["main"].inputs.items(): + if types.is_tensor(var.sym_type): + sym_shape = var.sym_type.get_shape() + if any_variadic(sym_shape): + raise NotImplementedError("Variadic rank is not supported") + if any_symbolic(sym_shape): + user_specified = False + for input_type in input_types: + if name == input_type.name: + sym_shape = input_type.shape.default + user_specified = True + break + # Use dummy static shape, and will set it later. + shape = [1 if is_symbolic(d) else d for d in sym_shape] + if not user_specified: + symbolic_inputs[name] = sym_shape + else: + shape = sym_shape + v1_inputs.append((name, Array(*shape))) + elif types.is_scalar(var.sym_type): + v1_inputs.append((name, Array(1))) + else: + raise NotImplementedError() + + v1_outputs = [] + for var in prog.functions["main"].outputs: + if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type): + # Disregard the output types + v1_outputs.append((var.name, None)) + else: + raise NotImplementedError() + + # create neural network builder + builder = neural_network.NeuralNetworkBuilder( + v1_inputs, + v1_outputs, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + + # const in V2 are added lazily to V1 by each op whenever needed. + # `const_context` stores the const names we've added so far and avoid + # adding a const more than once. + # const_context: list[set of str] (const name for v1 & v2 + # (the same)). Note that in NN in outer layer is visible from the inner + # layer, so the const_context is simply a stack of set. + const_context = [] + # Iterate through ops and add to builder + convert_ops( + const_context, + builder, + prog.functions["main"].operations, + prog.functions["main"].outputs, + ) + + proto = builder.spec + # image input + has_image_input = any([isinstance(s, ImageType) for s in input_types]) + if has_image_input: + proto = _convert_to_image_input(proto, input_types, + skip_model_load=kwargs.get("skip_model_load", False)) + + # image output + if output_types is not None: + assert len(output_types) == len(prog.functions["main"].outputs), \ + "number of mil program outputs do not match the number of outputs provided by the user" + for i, output_proto_desc in enumerate(proto.description.output): + output_var = prog.functions["main"].outputs[i] + if isinstance(output_types[i], ImageType): + if not types.is_tensor(var.sym_type): + raise ValueError("Image output, '{}', is a scalar, but it should be a tensor of rank 4".format( + var.name)) + shape = var.sym_type.get_shape() + if any_variadic(shape): + raise ValueError("Variable rank model outputs, that are ImageTypes, are not supported") + if any([is_symbolic(d) for d in shape]): + raise NotImplementedError("Image output '{}' has symbolic dimensions in its shape". + format(var.name)) + _validate_image_input_output_shapes(output_types[i].color_layout, shape, var.name, is_input=False) + clr_space = _get_colorspace_enum(output_types[i].color_layout) + output_proto_desc.type.imageType.colorSpace = clr_space + output_proto_desc.type.imageType.width = shape[-1] + output_proto_desc.type.imageType.height = shape[-2] + + # classifier flag + classifier_config = kwargs.get("classifier_config", None) + if classifier_config is not None: + # verify that classifier_config.predicted_probabilities_output if its exists. + # And if its empty/None, fill it with the last non const op's output + # this is done in "_get_probability_var_for_classifier()" + probability_var = _get_probability_var_for_classifier(prog, classifier_config) + if classifier_config.predicted_probabilities_output != probability_var.name: + classifier_config.predicted_probabilities_output = probability_var.name + # add classifier related fields to the proto spec + proto = _convert_to_classifier(proto, classifier_config, + skip_model_load=kwargs.get("skip_model_load", False)) + + _set_user_inputs(proto, input_types) + _set_symbolic_inputs(proto, symbolic_inputs) + _set_optional_inputs(proto, input_types) + + return proto diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/mil_to_nn_mapping_registry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/mil_to_nn_mapping_registry.py new file mode 100644 index 00000000..f892583c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/mil_to_nn_mapping_registry.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +MIL_TO_NN_MAPPING_REGISTRY = {} + +def register_mil_to_nn_mapping(func=None, override=False): + def func_wrapper(_func): + f_name = _func.__name__ + if not override and f_name in MIL_TO_NN_MAPPING_REGISTRY: + raise ValueError("MIL to NN mapping for MIL op {} is already registered.".format(f_name)) + MIL_TO_NN_MAPPING_REGISTRY[f_name] = _func + return _func + + if func is None: + # decorator called without argument + return func_wrapper + return func_wrapper(func) \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/op_mapping.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/op_mapping.py new file mode 100644 index 00000000..f2778f73 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/op_mapping.py @@ -0,0 +1,3837 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np +from tqdm import tqdm as _tqdm + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry +from coremltools.converters.mil.mil.types.symbolic import (any_symbolic, + is_symbolic, + is_variadic) +from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type +from coremltools.models import neural_network as neural_network +from coremltools.models.neural_network.quantization_utils import \ + _convert_array_to_nbit_quantized_bytes +from coremltools.proto import NeuralNetwork_pb2 + +from .mil_to_nn_mapping_registry import (MIL_TO_NN_MAPPING_REGISTRY, + register_mil_to_nn_mapping) + + +def convert_ops(const_context, builder, ops, outputs): + """ + const_context: list[set of str]: const name for v1 & v2 (the same) + builder: neural_network.NeuralNetworkBuilder + ops: list[Operation], usually from Block.operations. + outputs: list[Var]. block outputs + """ + + const_context.append(set()) + custom_ops = SSAOpRegistry.custom_ops + for op in _tqdm(ops, desc="Translating MIL ==> NeuralNetwork Ops", unit=" ops"): + if op.op_type in custom_ops: + mapper = MIL_TO_NN_MAPPING_REGISTRY["custom_op"] + elif op.op_type in MIL_TO_NN_MAPPING_REGISTRY: + mapper = MIL_TO_NN_MAPPING_REGISTRY[op.op_type] + else: + msg = ("Op {} is used in the source model. This op is not supported " + "by the NeuralNetwork (compatibility with MacOS < 12, iOS < 15) model " + "type. To successfully convert this model, convert to the ML Program " + "model type (minimum target MacOS 12, iOS 15 and later).\n" + "Use coremltools.convert(..., convert_to=\"mlprogram\") to convert to ML Program.\n" + "block: {}") + raise NotImplementedError(msg.format(op.op_type, op.enclosing_block)) + # const is globally shared in nn. + mapper(const_context, builder, op) + + for ov in outputs: + # If block return value is a const, we need to add it. + if ov.op is None: + continue # placeholder + if ov.op.op_type == "const": + add_const(const_context, builder, ov.name, ov.val) + const_context.pop() + + +def make_input(const_context, builder, variables): + """ + Ensure that variables, if const, are added to builder. + + variables: list[Var] or Var or str. Inputs for an nn layer. + + Returns: + list[str] or str: variables' names. + """ + if isinstance(variables, (list, tuple)): + return [make_input(const_context, builder, v) for v in variables] + if isinstance(variables, str): + return variables + + v = variables # variables is Var + if v.op is not None and v.op.op_type == "const" and v.name not in const_context[-1]: + add_const(const_context, builder, v.name, v.val) + return v.name + + +def _convert_pool(const_context, builder, op, mode, exclude_padding_from_average=True): + num_spatial_dimensions = len(op.kernel_sizes.val) + op_pad = op.pad.val if op.pad_type.val == 'custom' \ + else [0] * num_spatial_dimensions * 2 + + padding_type = op.pad_type.val.upper() + same_padding_asymmetry_mode = "BOTTOM_RIGHT_HEAVY" + if padding_type == "SAME_LOWER": + if num_spatial_dimensions == 3: + msg = "For the neuralnetwork backend, padding_mode ``same_lower`` is not supported for 3d pooling." + raise ValueError(msg) + padding_type = "SAME" + same_padding_asymmetry_mode = "TOP_LEFT_HEAVY" + + if num_spatial_dimensions == 1: + builder.add_expand_dims( + name=op.name + "_expanded", + input_name=op.x.name, + output_name=op.name + "_expanded", + axes=[-2], + ) + # nn's add_pool function does not support CUSTOM padding, + # but VALID padding supports user-defined padding amounts. + # Therefore we map CUSTOM padding to VALID padding. + padding_type = "VALID" if padding_type == "CUSTOM" else padding_type + builder.add_pooling( + name=op.name, + height=1, + width=op.kernel_sizes.val[-1], + stride_height=1, + stride_width=op.strides.val[-1], + layer_type=mode.upper(), + padding_type="INCLUDE_LAST_PIXEL" if op.ceil_mode.val else padding_type, + input_name=make_input(const_context, builder, op.name + "_expanded"), + output_name=op.name + "_pool", + exclude_pad_area=exclude_padding_from_average, + padding_top=0, + padding_bottom=0, + padding_left=op_pad[0], + padding_right=op_pad[1], + is_global=False, + same_padding_asymmetry_mode=same_padding_asymmetry_mode, + ) + builder.add_squeeze( + name=op.name + "_squeeze", + input_name=op.name + "_pool", + output_name=op.outputs[0].name, + axes=[-2], + ) + elif num_spatial_dimensions == 2: + # nn's add_pool function does not support CUSTOM padding, + # but VALID padding supports user-defined padding amounts. + # Therefore we map CUSTOM padding to VALID padding. + padding_type = "VALID" if padding_type == "CUSTOM" else padding_type + builder.add_pooling( + name=op.name, + height=op.kernel_sizes.val[-2], + width=op.kernel_sizes.val[-1], + stride_height=op.strides.val[-2], + stride_width=op.strides.val[-1], + layer_type=mode.upper(), + padding_type="INCLUDE_LAST_PIXEL" if op.ceil_mode.val else padding_type, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + exclude_pad_area=exclude_padding_from_average, + padding_top=op_pad[0], + padding_bottom=op_pad[1], + padding_left=op_pad[2], + padding_right=op_pad[3], + is_global=False, + same_padding_asymmetry_mode=same_padding_asymmetry_mode, + ) + elif num_spatial_dimensions == 3: + builder.add_pooling3d( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + pooling_type=mode.upper(), + kernel_depth=op.kernel_sizes.val[-3], + kernel_height=op.kernel_sizes.val[-2], + kernel_width=op.kernel_sizes.val[-1], + stride_depth=op.strides.val[-3], + stride_height=op.strides.val[-2], + stride_width=op.strides.val[-1], + padding_mode=op.pad_type.val, + custom_padding_front=op_pad[0], + custom_padding_back=op_pad[1], + custom_padding_top=op_pad[2], + custom_padding_bottom=op_pad[3], + custom_padding_left=op_pad[4], + custom_padding_right=op_pad[5], + average_pooling_count_excludes_padding=exclude_padding_from_average, + ) + else: + raise ValueError( + "Unsupported number of spatial dimensions. Maximum is 3, but got %s" + % num_spatial_dimensions + ) + + +def _try_convert_global_pool(const_context, builder, op, mode): + """ + Optional performance optimization pass that tries to lower spatial + reduce_mean / reduce_max to global_avg_pool / global_max_pool. + Return True if the lowering happened, otherwise return False to + continue as normal reduction op. + """ + rank = op.x.rank + if is_variadic(rank) or rank not in {4, 5}: + return False + keep_dims = op.keep_dims.val + if keep_dims is False: + return False + + axes = None + if op.axes is not None and op.axes.val is not None: + axes = op.axes.val + else: + axes = list(range(rank)) + + if tuple(op.outputs[0].shape[:-2]) != tuple(op.inputs["x"].shape[:-2]): + return False + if not all([s == 1 for s in op.outputs[0].shape[-2:]]): + return False + + builder.add_pooling( + name=op.name, + height=0, + width=0, + stride_height=0, + stride_width=0, + layer_type=mode.upper(), + padding_type="valid".upper(), + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + is_global=True, + ) + return True + + +def add_const(const_context, builder, name, val): + """ + const_context (list of set of str): const names added to v1 builder. Const names are + identical between v2 and v1 + + name (str): name of const. Should be the same for v1 and v2. + val: np.ndarray + + No return values as `name` is the name of const in v1. + + Comment: we don't need to add scalar const as they are just fields in + layer proto message in NN. + If we really need a const scalar, we upcast it to rank-1. + + """ + for const_set in const_context: + if name in const_set: + logger.warning("Const {} was already added.".format(name)) + return + if not isinstance(val, (_np.ndarray, _np.generic)): + val = _np.array([val]) + if val.dtype != float: + # nn proto only supports float32 activation. (e.g., pred in cond op + # needs to be converted to float) + val = val.astype(float) + rank = len(val.shape) + if rank == 0: + builder.add_load_constant_nd( + name=name, output_name=name, constant_value=val.reshape([1]), shape=[1] + ) + else: + builder.add_load_constant_nd( + name=name, output_name=name, constant_value=val, shape=val.shape + ) + const_context[-1].add(name) + logger.info("added const {} for builder {}".format(name, builder)) + + +# Helper routines for recurrent layers +def _expand_dim(builder, node_name, input_name, axes): + builder.add_expand_dims( + name=node_name, input_name=input_name, output_name=node_name, axes=axes + ) + + +def _squeeze(builder, node_name, input_name, axes): + builder.add_squeeze( + name=node_name, input_name=input_name, output_name=node_name, axes=axes + ) + + +def _split(x, sections, axis=0): + if x is None: + return None + if x.shape[axis] % sections != 0: + raise ValueError( + "Cannot split axis {} into {} sections for input of shape {}".format( + axis, sections, x.shape + ) + ) + return _np.split(x, sections, axis=axis) + + +@register_mil_to_nn_mapping +def avg_pool(const_context, builder, op): + _convert_pool( + const_context=const_context, + builder=builder, + op=op, + mode="average", + exclude_padding_from_average=op.exclude_padding_from_average.val, + ) + + +@register_mil_to_nn_mapping +def band_part(const_context, builder, op): + builder.add_matrix_band_part( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + num_lower=op.lower.val, + num_upper=op.upper.val, + ) + + +@register_mil_to_nn_mapping +def batch_norm(const_context, builder, op): + channels = op.x.shape[1] + gamma = _np.array([1.0] * channels) if op.gamma is None else op.gamma.val + beta = _np.array([0.0] * channels) if op.beta is None else op.beta.val + + x_name = make_input(const_context, builder, op.x) + out_name = op.outputs[0].name + + is_batchnorm_1d = op.x.rank == 3 + is_batchnorm_2d = op.x.rank == 4 + is_batchnorm_3d = op.x.rank == 5 + + if is_batchnorm_1d: + x_name = op.name + "_expanded" + builder.add_expand_dims( + name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2], + ) + out_name += "_batch_norm" + + if is_batchnorm_1d or is_batchnorm_2d: + # batch norm 1d / 2d + builder.add_batchnorm( + name=op.name, + channels=channels, + gamma=gamma, + beta=beta, + mean=op.mean.val, + variance=op.variance.val, + input_name=x_name, + output_name=out_name, + compute_mean_var=False, + instance_normalization=False, + epsilon=op.epsilon.val, + ) + elif is_batchnorm_3d: + # batch norm 3d + batch_size, channel, height, width, depth = op.x.shape + assert not is_symbolic(channel), "Channel dimension must be known for batchnorm layer." + symbolic_num = sum([is_symbolic(x) for x in op.x.shape]) + + if symbolic_num > 1: + gamma_expand = _np.expand_dims(gamma, axis=(0, 2, 3, 4)) + beta_expand = _np.expand_dims(beta, axis=(0, 2, 3, 4)) + mean_expand = _np.expand_dims(op.mean.val, axis=(0, 2, 3, 4)) + var_expand = _np.expand_dims(op.variance.val, axis=(0, 2, 3, 4)) + + # compute batch norm 3d by decomposing it into elementwise operations + negative_mean_name = op.name + "_negative_mean" + add_const(const_context, builder, negative_mean_name, -mean_expand) + + numerator_name = op.name + "_numerator" + builder.add_add_broadcastable( + name=numerator_name, + input_names=[x_name, negative_mean_name], + output_name=numerator_name, + ) + + var_expand = var_expand + op.epsilon.val + denominator = _np.sqrt(var_expand) + gamma_expand = gamma_expand / denominator + gamma_name = op.name + "_gamma" + add_const(const_context, builder, gamma_name, gamma_expand) + + mul_name = op.name + "_mul" + builder.add_multiply_broadcastable( + name=mul_name, + input_names=[numerator_name, gamma_name], + output_name=mul_name, + ) + + beta_name = op.name + "_beta" + add_const(const_context, builder, beta_name, beta_expand) + + builder.add_add_broadcastable( + name=out_name, + input_names=[mul_name, beta_name], + output_name=out_name, + ) + else: + is_batch_symbloic = is_symbolic(batch_size) + is_height_symbolic = is_symbolic(height) + is_width_symbolic = is_symbolic(width) + is_depth_symbolic = is_symbolic(depth) + + if is_batch_symbloic: + shape1 = [-1, channel, height * width, depth] + shape2 = [-1, channel, height, width, depth] + + elif is_height_symbolic: + shape1 = [batch_size, channel, -1, width*depth] + shape2 = [batch_size, channel, -1, width, depth] + + elif is_width_symbolic: + shape1 = [batch_size, channel, -1, height*depth] + shape2 = [batch_size, channel, height, -1, depth] + + elif is_depth_symbolic: + shape1 = [batch_size, channel, height * width, -1] + shape2 = [batch_size, channel, height, width, -1] + + else: + shape1 = [batch_size, channel, height*width, depth] + shape2 = [batch_size, channel, height, width, depth] + + reshape_4d_name = op.name + "_reshape_4d" + builder.add_reshape_static( + name=reshape_4d_name, + input_name=x_name, + output_name=reshape_4d_name, + output_shape=shape1, + ) + + batchnorm_name = op.name + "_batchnorm_4d" + builder.add_batchnorm( + name=batchnorm_name, + channels=channels, + gamma=gamma, + beta=beta, + mean=op.mean.val, + variance=op.variance.val, + input_name=reshape_4d_name, + output_name=batchnorm_name, + compute_mean_var=False, + instance_normalization=False, + epsilon=op.epsilon.val, + ) + + builder.add_reshape_static( + name=out_name, + input_name=batchnorm_name, + output_name=out_name, + output_shape=shape2, + ) + + # Squeeze added `Width` dimension for 1d case + if is_batchnorm_1d: + x_name = op.name + "_squeeze" + builder.add_squeeze( + name=x_name, + input_name=out_name, + output_name=op.outputs[0].name, + axes=[-2], + ) + +@register_mil_to_nn_mapping +def const(const_context, builder, op): + # const in V2 are added to V1 lazily. + pass + + +def conv_helper(const_context, builder, op): + # v2 x: (n, C_in/groups, spatial_dims) + x_name = make_input(const_context, builder, op.x) + out_name = op.outputs[0].name + + is_conv1d = op.x.rank == 3 + is_conv2d = op.x.rank == 4 + is_conv3d = op.x.rank == 5 + if not (is_conv1d or is_conv2d or is_conv3d): + raise ValueError( + "Input tensor rank '{}' is not one of '{}'.".format(op.x.rank, (3, 4, 5),) + ) + if is_conv1d: + x_name = op.name + "_expand_dim" + out_name += "_expanded" + builder.add_expand_dims( + name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2], + ) + # `x_name` is guaranteed to be (n, C_in/groups, spatial_dims) for 1D and 2D convolution + # W_v1 wil be np.ndarray (if W is const at compile time) or None + # (if W is not known at compile time). + weights = None + input_names = [x_name] + if op.weight.val is not None: + # v2 convolution (conv3d) expects weights to have shape (C_out, C_in/groups, spatial_dims) + # v1 convolution expects (H, W, C_in/groups, C_out) or (D, H, W, C_in/groups, C_out) + weights = op.weight.val + if is_conv1d: + weights = _np.expand_dims(op.weight.val, -2) + if is_conv1d or is_conv2d: + weights = _np.transpose(weights, [2, 3, 1, 0]) + else: + # op.weight is not const at compile time. + # When weight is dynamic, v1 convolution expects weight to be + # (C_out, C_in/groups, H, W) + # TODO 3D convolution doesn't support dynamic weights: + if is_conv3d: + raise ValueError("3D Convolution doesn't support dynamic weights.") + weights_name = op.weight.name + if is_conv1d: + weights_name += "_expand_dim" + builder.add_expand_dims( + name=weights_name, + input_name=op.weight.name, + output_name=weights_name, + axes=[-2], + ) + input_names.append(weights_name) + + # padding + padding_mode = op.pad_type.val + pad = {} + if padding_mode == "custom": + if is_conv1d: + padding_mode = "valid" + pad["padding_top"] = 0 + pad["padding_bottom"] = 0 + pad["padding_left"] = op.pad.val[0] + pad["padding_right"] = op.pad.val[1] + elif is_conv2d: + padding_mode = "valid" + pad["padding_top"] = op.pad.val[0] + pad["padding_bottom"] = op.pad.val[1] + pad["padding_left"] = op.pad.val[2] + pad["padding_right"] = op.pad.val[3] + else: + pad["padding_front"] = op.pad.val[0] + pad["padding_back"] = op.pad.val[1] + pad["padding_top"] = op.pad.val[2] + pad["padding_bottom"] = op.pad.val[3] + pad["padding_left"] = op.pad.val[4] + pad["padding_right"] = op.pad.val[5] + + same_padding_asymmetry_mode = "BOTTOM_RIGHT_HEAVY" + if padding_mode == "same_lower": + if is_conv3d: + msg = "For the neuralnetwork backend, padding_mode ``same_lower`` is not supported for conv 3d." + raise ValueError(msg) + padding_mode = "same" + same_padding_asymmetry_mode = "TOP_LEFT_HEAVY" + + has_bias = op.bias is not None + groups = op.groups.val + + strides = op.strides.val.tolist() + dilations = op.dilations.val.tolist() + if is_conv1d: + dilations = dilations[:-1] + [1] + dilations[-1:] + strides = strides[:-1] + [1] + strides[-1:] + + if weights is not None and op.op_type == "conv_quantized": + nbits = op.nbits.val + weights = _convert_array_to_nbit_quantized_bytes(weights.flatten(), nbits).tobytes() + quantization_type = op.quantization_type.val + quant_bias = op.quant_bias.val + quant_scale = op.quant_scale.val + else: + quantization_type = None + nbits = None + quant_bias = None + quant_scale = None + + if is_conv1d or is_conv2d: + if weights is None and has_bias: + # weights are dyanmic. + # In this case, bias, if present, cannot be part of the conv op + # it needs to be added separately via an add op + out_name += "_without_bias" + + if weights is None and groups > 1: + raise NotImplementedError("Convolution with dynamic weights and groups > 1 is not supported on the " + "neuralnetwork backend. Please use the mlprogram backend " + "(convert_to=\"mlprogram\")") + + builder.add_convolution( + name=out_name, + kernel_channels=op.weight.shape[1], + output_channels=op.weight.shape[0], + height= 1 if is_conv1d else op.weight.shape[2], + width= op.weight.shape[2] if is_conv1d else op.weight.shape[3], + stride_height=strides[0], + stride_width=strides[1], + border_mode=padding_mode, + same_padding_asymmetry_mode=same_padding_asymmetry_mode, + groups=groups, + W=weights, + b=op.bias.val if has_bias and weights is not None else None, + has_bias=has_bias if weights is not None else False, + is_deconv=False, + input_name=input_names, + output_name=out_name, + dilation_factors=dilations, + quantization_type=quantization_type, + nbits=nbits, + quant_bias=quant_bias, + quant_scale=quant_scale, + **pad # Python 2.7.16 will fail with a syntax error if a comma is included after `**pad` + ) + + # add bias if weights are dynamic + if weights is None and has_bias: + Cout = op.weight.shape[0] + assert op.bias.val.size == Cout, \ + "size of bias for convolution must be same as the number of output channels" + builder.add_load_constant_nd( + name=op.name + '_constant_bias', output_name=op.name + "_constant_bias", + constant_value=op.bias.val.reshape((Cout, 1, 1)), shape=(Cout, 1, 1) + ) + add_op_output_name = op.name + "_with_bias" if is_conv1d else op.outputs[0].name + builder.add_add_broadcastable( + name=add_op_output_name, + input_names=[out_name, op.name + "_constant_bias"], + output_name=add_op_output_name, + ) + if is_conv1d: + out_name = add_op_output_name + + # Squeeze added `Width` dimension for 1d case + if is_conv1d: + x_name = op.name + "expand_dim" + builder.add_squeeze( + name=op.name, + input_name=out_name, + output_name=op.outputs[0].name, + axes=[-2], + ) + + if is_conv3d: + builder.add_convolution3d( + name=op.name, + input_channels=op.weight.shape[1] * groups, + output_channels=op.weight.shape[0], + depth=op.weight.shape[2], + height=op.weight.shape[3], + width=op.weight.shape[4], + W=op.weight.val, + b=op.bias.val if has_bias else None, + has_bias=has_bias, + groups=groups, + stride_depth=strides[0], + stride_height=strides[1], + stride_width=strides[2], + dilation_depth=dilations[0], + dilation_height=dilations[1], + dilation_width=dilations[2], + padding_mode=padding_mode, + is_deconv=False, + output_shape=None, + input_name=input_names, + output_name=out_name, + **pad # Python 2.7.16 will fail with a syntax error if a comma is included after `**pad` + ) + +@register_mil_to_nn_mapping +def conv(const_context, builder, op): + conv_helper(const_context, builder, op) + + +@register_mil_to_nn_mapping() +def conv_quantized(const_context, builder, op): + conv_helper(const_context, builder, op) + + +@register_mil_to_nn_mapping +def cumsum(const_context, builder, op): + input_names = make_input(const_context, builder, [op.x]) + builder.add_cumsum( + name=op.name, + input_names=input_names, + output_name=op.outputs[0].name, + axis=op.axis.val, + reverse=op.reverse.val, + exclusive=op.exclusive.val, + ) + + +def _add_elementwise_unary( + const_context, builder, op, mode, output_name=None, **kwargs +): + output_name = output_name if output_name else op.outputs[0].name + name = output_name if output_name else op.name + if mode in ["sqrt", "rsqrt", "inverse", "power", "exp", "log", "abs", "threshold"]: + builder.add_unary( + name=name, + input_name=make_input(const_context, builder, op.x), + output_name=output_name, + mode=mode, + **kwargs + ) + else: + add_func = getattr(builder, "add_" + mode, None) + if add_func is None: + logger.error( + "Elementwise unary method {} not found in builder.".format(mode) + ) + add_func( + name=name, + input_name=make_input(const_context, builder, op.x), + output_name=output_name, + **kwargs + ) + + +def _add_elementwise_binary( + const_context, builder, op, mode, output_name=None, **kwargs +): + output_name = output_name if output_name else op.outputs[0].name + name = output_name if output_name else op.name + if mode in ["add", "multiply"]: + params = {"name": name, "output_name": output_name, "mode": mode.upper()} + if op.x.val is not None and op.x.rank == 0 and _np.isfinite(op.x.val): + params["input_names"] = make_input(const_context, builder, [op.y]) + val = op.x.val if not isinstance(op.x.val, _np.float16) else op.x.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + builder.add_elementwise(**params) + return + elif op.y.val is not None and op.y.rank == 0 and _np.isfinite(op.y.val): + params["input_names"] = make_input(const_context, builder, [op.x]) + val = op.y.val if not isinstance(op.y.val, _np.float16) else op.y.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + builder.add_elementwise(**params) + return + elif mode in ["equal", "not_equal"]: + add_func = getattr(builder, "add_" + mode, None) + params = {"name": name, "output_name": output_name} + if op.x.val is not None and op.x.rank == 0 and _np.isfinite(op.x.val): + params["input_names"] = make_input(const_context, builder, [op.y]) + val = op.x.val if not isinstance(op.x.val, _np.float16) else op.x.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + add_func(**params) + return + elif op.y.val is not None and op.y.rank == 0 and _np.isfinite(op.y.val): + params["input_names"] = make_input(const_context, builder, [op.x]) + val = op.y.val if not isinstance(op.y.val, _np.float16) else op.y.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + add_func(**params) + return + elif mode in ["greater_than", "greater_equal", "less_than", "less_equal"]: + params = {"name": name, "output_name": output_name} + if op.x.val is not None and op.x.rank == 0 and _np.isfinite(op.x.val): + params["input_names"] = make_input(const_context, builder, [op.y]) + val = op.x.val if not isinstance(op.x.val, _np.float16) else op.x.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + if "less" in mode: + params["use_greater_than_equal"] = mode.endswith("_equal") + builder.add_greater_than(**params) + elif "greater" in mode: + params["use_less_than_equal"] = mode.endswith("_equal") + builder.add_less_than(**params) + return + elif op.y.val is not None and op.y.rank == 0 and _np.isfinite(op.y.val): + params["input_names"] = make_input(const_context, builder, [op.x]) + val = op.y.val if not isinstance(op.y.val, _np.float16) else op.y.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + if "greater" in mode: + params["use_greater_than_equal"] = mode.endswith("_equal") + builder.add_greater_than(**params) + elif "less" in mode: + params["use_less_than_equal"] = mode.endswith("_equal") + builder.add_less_than(**params) + return + + if op.x.can_be_folded_to_const(): + add_const(const_context, builder, op.x.name, op.x.val) + if op.y.can_be_folded_to_const(): + if mode == "pow": + _add_elementwise_unary( + const_context, + builder, + op, + "power", + output_name=output_name, + alpha=op.y.val, + ) + return + add_const(const_context, builder, op.y.name, op.y.val) + + if mode in {"add", "multiply", "max", "min"} and op.x.shape == op.y.shape: + builder.add_elementwise( + name=name, + input_names=make_input(const_context, builder, [op.x, op.y]), + output_name=output_name, + mode=mode.upper(), + ) + return + + # the broadcast feature in the elementwise layer is hardcoded to 4D or less + # for the 5d tensor, we need to use broadcasable layers instead. + if mode in {"add", "multiply", "subtract"} and op.x.rank < 5 and op.y.rank < 5: + shape_x = _np.array([1] * (5 - op.x.rank) + list(op.x.shape)) + shape_y = _np.array([1] * (5 - op.y.rank) + list(op.y.shape)) + + internal_x = internal_y = None + if all(shape_x == 1): + internal_y = op.x + internal_x = op.y + elif all(shape_y == 1): + internal_x = op.x + internal_y = op.y + + for indices in ([1], [2], [3, 4], [2, 3, 4], [1, 2, 3, 4]): + if indices == [1, 2, 3, 4] and mode == "multiply": + # INTERNAL_MUL_XYKN not implemented + continue + if all(shape_x[indices] == shape_y[indices]): + if all([True if i in indices else s == 1 for i, s in enumerate(shape_x)]): + internal_y = op.x + internal_x = op.y + break + if all([True if i in indices else s == 1 for i, s in enumerate(shape_y)]): + internal_x = op.x + internal_y = op.y + break + + if internal_x is not None: + if mode in {"add", "multiply"}: + builder.add_elementwise( + name=name, + input_names=make_input(const_context, builder, [internal_x, internal_y]), + output_name=output_name, + mode=mode.upper(), + ) + elif mode == "subtract": + builder.add_activation( + name="_neg_y_" + name, + input_name=make_input(const_context, builder, op.y), + output_name="_neg_y_" + output_name, + non_linearity="LINEAR", + params=[-1, 0]) + if op.x == internal_y: + internal_x = "_neg_y_" + output_name + else: + internal_y = "_neg_y_" + output_name + builder.add_elementwise( + name=name, + input_names=make_input(const_context, builder, [internal_x, internal_y]), + output_name=output_name, + mode="ADD", + ) + return + + if mode in {"add", "multiply", "max", "min"}: + add_func = getattr(builder, "add_" + mode + "_broadcastable", None) + + if add_func is None: + msg = "Element-wise binary method {} not found in builder." + raise ValueError(msg.format(mode)) + + add_func( + name=name, + input_names=make_input(const_context, builder, [op.x, op.y]), + output_name=output_name, + **kwargs + ) + else: + if mode in ["divide", "floor_div", "mod", "pow", "subtract"]: + add_func = getattr(builder, "add_" + mode + "_broadcastable", None) + elif mode == "less_equal": + add_func = builder.add_less_than + kwargs["use_less_than_equal"] = True + elif mode == "greater_equal": + add_func = builder.add_greater_than + kwargs["use_greater_than_equal"] = True + else: + add_func = getattr(builder, "add_" + mode, None) + + if add_func is None: + msg = "Element-wise binary method {} not found in builder." + raise ValueError(msg.format(mode)) + + add_func( + name=name, + input_names=make_input(const_context, builder, [op.x, op.y]), + output_name=output_name, + **kwargs + ) + + +def _add_logical(const_context, builder, op, mode): + input_names = [] + input_names.append(make_input(const_context, builder, op.x)) + if mode != "NOT": + input_names.append(make_input(const_context, builder, op.y)) + + builder.add_logical( + name=op.name, input_names=input_names, output_name=op.outputs[0].name, mode=mode + ) + + +@register_mil_to_nn_mapping +def abs(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "abs") + + +@register_mil_to_nn_mapping +def acos(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "acos") + + +@register_mil_to_nn_mapping +def add(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "add") + + +@register_mil_to_nn_mapping +def asin(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "asin") + + +@register_mil_to_nn_mapping +def atan(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "atan") + + +@register_mil_to_nn_mapping +def atanh(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "atanh") + + +@register_mil_to_nn_mapping +def cast(const_context, builder, op): + if op.dtype.val in ["int32", "int64"]: + _add_elementwise_unary( + const_context, builder, op, "floor", output_name=op.name + "_floor" + ) + _add_elementwise_unary( + const_context, builder, op, "ceil", output_name=op.name + "_ceil" + ) + + builder.add_greater_than( + name=op.name + "_cond", + input_names=[make_input(const_context, builder, op.x)], + output_name=op.name + "_cond", + alpha=0.0, + ) + + builder.add_where_broadcastable( + name=op.name, + input_names=[op.name + i for i in ["_cond", "_floor", "_ceil"]], + output_name=op.outputs[0].name, + ) + elif op.dtype.val in ["fp16", "fp32", "fp64"]: + builder.add_activation( + name=op.name, + non_linearity="LINEAR", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=[1.0, 0.0], + ) + elif op.dtype.val == "bool": + builder.add_not_equal( + name=op.name, + input_names=op.x.name, + output_name=op.outputs[0].name, + alpha=0.0, + ) + else: + raise NotImplementedError( + "Parameter dtype of the cast operation can be one of the {}. " + "Provided {}".format(["int32", "int64", "fp16", "fp32", "fp64"], op.dtype.val) + ) + + +@register_mil_to_nn_mapping +def ceil(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "ceil") + + +@register_mil_to_nn_mapping +def clip(const_context, builder, op): + _add_elementwise_unary( + const_context, + builder, + op, + "clip", + min_value=op.alpha.val, + max_value=op.beta.val, + ) + + +@register_mil_to_nn_mapping +def cos(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "cos") + + +@register_mil_to_nn_mapping +def cosh(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "cosh") + +@register_mil_to_nn_mapping +def einsum(const_context, builder, op): + ''' + MIL einsum is either + - (B,C,H,W1) * (B,W1,H,W2) = (B,C,H,W2) + or + - (C,H,W1) * (W1,H,W2) = (C,H,W2) + + Hence to support it, first transpose the 2 inputs, so that the matrices + to be multiplied are on the last 2 axes, + then call bmm, and finally transpose the result again + ''' + rank = op.values[0].rank + perm = [0, 2, 1, 3] if rank == 4 else [1, 0, 2] + input_names = make_input(const_context, builder, op.values) + + output_name_1 = op.name + "_transpose_1" + output_name_2 = op.name + "_transpose_2" + builder.add_transpose(name=op.name + "_transpose_x", + axes=perm, + input_name=input_names[0], + output_name=output_name_1 + ) + builder.add_transpose(name=op.name + "_transpose_y", + axes=perm, + input_name=input_names[1], + output_name=output_name_2 + ) + builder.add_batched_mat_mul( + name=op.name + "_batch_matmul", + input_names=[output_name_1, output_name_2], + output_name=op.outputs[0].name + "_pre_transpose" + ) + builder.add_transpose(name=op.name, + axes=perm, + input_name=op.outputs[0].name + "_pre_transpose", + output_name=op.outputs[0].name + ) + + +@register_mil_to_nn_mapping +def equal(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "equal") + + +@register_mil_to_nn_mapping +def exp(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "exp") + + +@register_mil_to_nn_mapping +def exp2(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "exp2") + + +@register_mil_to_nn_mapping +def floor(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "floor") + + +@register_mil_to_nn_mapping +def floor_div(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "floor_div") + + +@register_mil_to_nn_mapping +def greater(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "greater_than") + + +@register_mil_to_nn_mapping +def greater_equal(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "greater_equal") + + +@register_mil_to_nn_mapping +def inverse(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "inverse", epsilon=op.epsilon.val) + + +@register_mil_to_nn_mapping +def less(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "less_than") + + +@register_mil_to_nn_mapping +def less_equal(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "less_equal") + + +@register_mil_to_nn_mapping +def log(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "log", epsilon=op.epsilon.val) + + +@register_mil_to_nn_mapping +def logical_and(const_context, builder, op): + _add_logical(const_context, builder, op, "AND") + + +@register_mil_to_nn_mapping +def logical_not(const_context, builder, op): + _add_logical(const_context, builder, op, "NOT") + + +@register_mil_to_nn_mapping +def logical_or(const_context, builder, op): + _add_logical(const_context, builder, op, "OR") + + +@register_mil_to_nn_mapping +def logical_xor(const_context, builder, op): + _add_logical(const_context, builder, op, "XOR") + + +@register_mil_to_nn_mapping +def maximum(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "max") + + +@register_mil_to_nn_mapping +def minimum(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "min") + + +@register_mil_to_nn_mapping +def mod(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "mod") + + +@register_mil_to_nn_mapping +def mul(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "multiply") + + +@register_mil_to_nn_mapping +def not_equal(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "not_equal") + + +@register_mil_to_nn_mapping +def pow(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "pow") + + +@register_mil_to_nn_mapping +def real_div(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "divide") + + +@register_mil_to_nn_mapping +def round(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "round") + + +@register_mil_to_nn_mapping +def rsqrt(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "rsqrt", epsilon=op.epsilon.val) + + +@register_mil_to_nn_mapping +def sign(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "sign") + + +@register_mil_to_nn_mapping +def sin(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "sin") + + +@register_mil_to_nn_mapping +def sinh(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "sinh") + + +@register_mil_to_nn_mapping +def slice_by_index(const_context, builder, op): + rank = op.x.rank + stride = [1] * rank if op.stride is None else op.stride.val + begin_mask = [False] * rank if op.begin_mask is None else op.begin_mask.val + end_mask = [False] * rank if op.end_mask is None else op.end_mask.val + squeeze_mask = [False] * rank if op.squeeze_mask is None else op.squeeze_mask.val + + if op.begin.val is not None and op.end.val is not None: + + # If only one dimension is sliced, we should use the slice layer instead of static_slice or dynamic_slice + # In general, slice has a better performance. + begin = op.begin.val + end = op.end.val + slice_dim = [] + + for i in range(rank): + if (not begin_mask[i] and begin[i] != 0) or \ + (not end_mask[i] and end[i] != op.x.shape[i]) or \ + stride[i] != 1: + slice_dim.append(i) + + if len(slice_dim) == 1 and not any(squeeze_mask): + dim = slice_dim[0] - rank + if dim in [-3, -2, -1]: + # get the axis, only channel, width, and depth dimension are supported + axis = None + if dim == -1: + axis = "width" + elif dim == -2: + axis = "height" + elif dim == -3: + axis = "channel" + + start_index = 0 if begin_mask[dim] else begin[dim] + end_index = op.x.shape[dim] if end_mask[dim] else end[dim] + shape = op.x.shape + + if not is_symbolic(shape[dim]): + if start_index < 0: + start_index += shape[dim] + + if not is_symbolic(end_index) and start_index >= 0 and stride[dim] >= 1: + builder.add_slice( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=axis, + start_index=start_index, + end_index=end_index, + stride=stride[dim], + ) + return + + # use add_slice_static + builder.add_slice_static( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + begin_ids=op.begin.val, + end_ids=op.end.val, + strides=np_val_to_py_type(stride), + begin_masks=np_val_to_py_type(begin_mask), + end_masks=np_val_to_py_type(end_mask), + squeeze_masks=np_val_to_py_type(squeeze_mask), + ) + else: + builder.add_slice_dynamic( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.begin, op.end]), + output_name=op.outputs[0].name, + strides=np_val_to_py_type(stride), + begin_masks=np_val_to_py_type(begin_mask), + end_masks=np_val_to_py_type(end_mask), + squeeze_masks=np_val_to_py_type(squeeze_mask), + ) + + +@register_mil_to_nn_mapping +def slice_by_size(const_context, builder, op): + """ + If the inputs satisfy + 1. op.x has static input shape for those dimension whose size is not -1 + 2. op.begin and op.size are both known during compile time + we use add_slice_static directly + + Otherwise, build a block of ops achieving slice_by_size with dynamic input x and size. + """ + + # The static case + if op.begin.val is not None and op.size.val is not None: + begin = op.begin.val + size = op.size.val + rank = op.x.rank + end = [] + + for i in range(rank): + if size[i] == -1: + end.append(op.x.shape[i]) + else: + end.append(begin[i] + size[i]) + + if not any_symbolic(end): + builder.add_slice_static( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + begin_ids=begin, + end_ids=end, + strides=[1] * rank, + begin_masks=[False] * rank, + end_masks=[False] * rank, + squeeze_masks=[False] * rank, + ) + return + + # The dynamic case + # get the end_index of input x + # for instance, x with shape [2,3,4] results in [2,3,4] + end_index_name = op.name + "_end_index" + builder.add_get_shape( + name=end_index_name, + input_name=make_input(const_context, builder, op.x), + output_name=end_index_name, + ) + + # get the mask where size = -1 + # for instance, size = [-1,1,2] results in [1,0,0] + const_name = op.name + "_const_name" + add_const(const_context, builder, const_name, _np.array([-1] * op.x.rank)) + + is_end_mask_name = op.name + "_is_end_mask" + builder.add_equal( + name=is_end_mask_name, + input_names=make_input(const_context, builder, [const_name, op.size]), + output_name=is_end_mask_name, + ) + + # get the mask where size != -1 + # for instance, size = [-1,1,2] results in [0,1,1] + is_not_end_mask_name = op.name + "_is_not_end_mask" + builder.add_not_equal( + name=is_not_end_mask_name, + input_names=make_input(const_context, builder, [const_name, op.size]), + output_name=is_not_end_mask_name, + ) + + # get the end index for dimensions i where size[i] = -1 + # for size[i] != -1, just make it 0 + # for instance, x with shape [2,3,4] and size = [-1,1,2] + # results in [2,0,0] + end_index_with_mask_name = op.name + "_end_index_with_mask" + builder.add_elementwise( + name=end_index_with_mask_name, + input_names=[end_index_name, is_end_mask_name], + output_name=end_index_with_mask_name, + mode="MULTIPLY", + ) + + # get the end index for dimension i where size[i] != -1 + # for size[i] = 1, just make it 0 + # for instance, x with shape [2,3,4], size = [-1,1,2], + # begin = [0,1,1] results in [0,2,3] + end_ids = op.name + "_end_ids" + builder.add_elementwise( + name=end_ids, + input_names=make_input(const_context, builder, [op.begin, op.size]), + output_name=end_ids, + mode="ADD", + ) + + end_index_without_mask_name = op.name + "_end_index_without_mask" + builder.add_elementwise( + name=end_index_without_mask_name, + input_names=make_input(const_context, builder, [is_not_end_mask_name, end_ids]), + output_name=end_index_without_mask_name, + mode="MULTIPLY", + ) + + # add two end index array together to get the final index + final_end_index_name = op.name + "_final_index" + builder.add_elementwise( + name=final_end_index_name, + input_names=make_input( + const_context, + builder, + [end_index_with_mask_name, end_index_without_mask_name], + ), + output_name=final_end_index_name, + mode="ADD", + ) + + input_names = make_input( + const_context, builder, [op.x, op.begin, final_end_index_name] + ) + builder.add_slice_dynamic( + name=op.name, input_names=input_names, output_name=op.outputs[0].name + ) + + +@register_mil_to_nn_mapping +def sqrt(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "sqrt") + + +@register_mil_to_nn_mapping +def square(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "power", alpha=2.0) + + +@register_mil_to_nn_mapping +def sub(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "subtract") + + +@register_mil_to_nn_mapping +def tan(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "tan") + + +@register_mil_to_nn_mapping +def threshold(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "threshold", alpha=op.alpha.val) + + +@register_mil_to_nn_mapping +def depth_to_space(const_context, builder, op): + builder.add_reorganize_data( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode="DEPTH_TO_SPACE", + block_size=op.block_size.val, + ) + + +@register_mil_to_nn_mapping +def expand_dims(const_context, builder, op): + builder.add_expand_dims( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axes=op.axes.val, + ) + + + +@register_mil_to_nn_mapping +def fill(const_context, builder, op): + if op.shape.val is None: + builder.add_fill_dynamic( + name=op.name, + input_name=make_input(const_context, builder, op.shape), + output_name=op.outputs[0].name, + value=op.value.val, + ) + else: + builder.add_fill_static( + name=op.name, + output_name=op.outputs[0].name, + output_shape=op.shape.val, + value=op.value.val, + ) + + +@register_mil_to_nn_mapping +def random_bernoulli(const_context, builder, op): + if op.shape.val is None: + builder.add_random_bernoulli_dynamic( + name=op.name, + input_names=make_input(const_context, builder, [op.shape]), + output_name=op.outputs[0].name, + prob=op.prob.val, + seed=op.seed.val, + ) + else: + builder.add_random_bernoulli_static( + name=op.name, + output_name=op.outputs[0].name, + output_shape=op.shape.val, + prob=op.prob.val, + seed=op.seed.val, + ) + + +@register_mil_to_nn_mapping +def random_categorical(const_context, builder, op): + builder.add_categorical_distribution( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + num_samples=op.size.val, + is_logits=(op.mode.val == "logits"), + seed=op.seed.val, + ) + + +@register_mil_to_nn_mapping +def random_normal(const_context, builder, op): + if op.shape.val is None: + builder.add_random_normal_dynamic( + name=op.name, + input_names=make_input(const_context, builder, [op.shape]), + output_name=op.outputs[0].name, + mean=op.mean.val, + stddev=op.stddev.val, + seed=op.seed.val, + ) + else: + builder.add_random_normal_static( + name=op.name, + output_name=op.outputs[0].name, + output_shape=op.shape.val, + mean=op.mean.val, + stddev=op.stddev.val, + seed=op.seed.val, + ) + + +@register_mil_to_nn_mapping +def random_uniform(const_context, builder, op): + if op.shape.val is None: + builder.add_random_uniform_dynamic( + name=op.name, + input_names=make_input(const_context, builder, [op.shape]), + output_name=op.outputs[0].name, + minval=op.low.val, + maxval=op.high.val, + seed=op.seed.val, + ) + else: + builder.add_random_uniform_static( + name=op.name, + output_name=op.outputs[0].name, + output_shape=op.shape.val, + minval=op.low.val, + maxval=op.high.val, + seed=op.seed.val, + ) + + +@register_mil_to_nn_mapping +def gru(const_context, builder, op): + make_input(const_context, builder, [op.x, op.initial_h]) + # Input shape: [b, s, I] + input_name = op.x.name + # Shape: [b, H] + initial_h = op.initial_h.name + + weight_ih = op.weight_ih.val + weight_hh = op.weight_hh.val + b = op.bias.val if op.bias is not None else None + direction = op.direction.val + output_sequence = op.output_sequence.val + + # Add expand dims for input, in + _expand_dim(builder, input_name + "_expanded", input_name, [3, 4]) + input_name += "_expanded" + + if direction not in {"forward", "reverse"}: + raise ValueError( + "Unknown direction {} for GRU layer. Supported are forward, reverse".format( + direction + ) + ) + + # Expand initial_h + _expand_dim(builder, initial_h + "_expanded", initial_h, [0, 3, 4]) + initial_h += "_expanded" + + def roz_to_zro(x): + if x is None: + return None + r, o, z = _split(x, sections=3, axis=0) + return [z, r, o] + + # w_x: [H*I, H*I, H*I] + # w_h: [H*H, H*H, H*H] + # where, format is [Z, R, O] + # Z: Update gate, R: Reset gate, O: Output gate + w_x = roz_to_zro(weight_ih) + w_h = roz_to_zro(weight_hh) + # bias format: [3*H] + b = roz_to_zro(b) + + input_size = w_x[0].shape[1] + hidden_size = w_x[0].shape[0] + + # 2 outputs + # Y : [s/1, b, h, 1, 1] + # Y_h: [ 1, b, h, 1, 1] + output_names = [_output.name + "_5d" for _output in op.outputs] + builder.add_gru( + name=op.name, + W_h=w_h, + W_x=w_x, + b=b, + hidden_size=hidden_size, + input_size=input_size, + input_names=[input_name, initial_h], + output_names=output_names, + inner_activation=op.recurrent_activation.val, + activation=op.activation.val, + output_all=output_sequence, + reverse_input=(direction == "reverse"), + ) + + # Squeeze Output + # to output shape of [Seq Len or 1, Batch Size, Hidden Size] + _squeeze(builder, op.outputs[0].name, output_names[0], axes=[3, 4]) + # Squeeze Output H and Output C + # to output shape of [Batch Size, Hidden Size] + _squeeze(builder, op.outputs[1].name, output_names[1], axes=[0, 3, 4]) + + +@register_mil_to_nn_mapping +def squeeze(const_context, builder, op): + axes = op.axes.val if op.axes is not None else None + builder.add_squeeze( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axes=axes, + squeeze_all=axes is None, + ) + + +@register_mil_to_nn_mapping +def topk(const_context, builder, op): + builder.add_topk( + name=op.name, + input_names=make_input(const_context, builder, [op.x]), + output_names=[output.name for output in op.outputs], + k=op.k.val, + axis=op.axis.val, + use_bottom_k=op.ascending.val, + ) + + +@register_mil_to_nn_mapping +def l2_pool(const_context, builder, op): + _convert_pool(const_context=const_context, builder=builder, op=op, mode="l2") + + +@register_mil_to_nn_mapping +def linear(const_context, builder, op): + out_channels, in_channels = op.weight.shape + if op.x.rank and op.x.rank <= 3 and op.x.rank > 0: + has_bias = op.bias is not None and op.bias.val is not None + builder.add_inner_product( + name=op.name, + W=op.weight.val, + b=op.bias.val if has_bias else None, + input_channels=in_channels, + output_channels=out_channels, + has_bias=has_bias, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + else: + builder.add_batched_mat_mul( + name=op.name, + input_names=make_input(const_context, builder, [op.x]), + output_name=op.outputs[0].name, + W=op.weight.val.T, + bias=op.bias.val, + weight_matrix_rows=in_channels, + weight_matrix_columns=out_channels, + ) + +@register_mil_to_nn_mapping +def matmul(const_context, builder, op): + weight = None + rows, columns = 0, 0 + + if ( + op.y.val is not None + and op.y.rank == 2 + and len(op.y.child_ops) == 1 + and len(op.y.consuming_blocks) == 0 + ): + + weight = op.y.val + if op.transpose_y.val: + weight = weight.transpose((1, 0)) + + rows, columns = weight.shape + input_names = make_input(const_context, builder, [op.x]) + + if op.transpose_x.val: + perm = [i for i in range(op.x.rank)] + perm[-1], perm[-2] = perm[-2], perm[-1] + name = op.name + "_x_transpose" + builder.add_transpose( + name=name, axes=perm, input_name=input_names[0], output_name=name + ) + input_names = [name] + + else: + input_names = make_input(const_context, builder, [op.x, op.y]) + + builder.add_batched_mat_mul( + name=op.name, + input_names=input_names, + output_name=op.outputs[0].name, + transpose_a=op.transpose_x.val, + transpose_b=op.transpose_y.val, + W=weight, + weight_matrix_rows=rows, + weight_matrix_columns=columns, + ) + + +@register_mil_to_nn_mapping +def max_pool(const_context, builder, op): + _convert_pool(const_context=const_context, builder=builder, op=op, mode="max") + + +@register_mil_to_nn_mapping +def non_zero(const_context, builder, op): + builder.add_where_nonzero( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def lstm(const_context, builder, op): + make_input(const_context, builder, [op.x, op.initial_h, op.initial_c]) + # Input shape [b, s, I] + input_name = op.x.name + # Shape: [b, DIRECTION*H] + initial_h = op.initial_h.name + initial_c = op.initial_c.name + + wt_ih = op.weight_ih.val + wt_hh = op.weight_hh.val + b = op.bias.val if op.bias is not None else None + direction = op.direction.val + output_sequence = op.output_sequence.val + peephole = op.peephole.val if op.peephole is not None else None + # High enough clip value to be ineffective! + clip = 500.0 if op.clip is None else op.clip.val + + # Add expand dims for input, in + _expand_dim(builder, input_name + "_expanded", input_name, [3, 4]) + input_name += "_expanded" + + if direction in {"forward", "reverse"}: + # Expand initial_h and initial_c, + # from shape (B, H) to shape (1, Batch, H, 1, 1) + _expand_dim(builder, initial_h + "_expanded", initial_h, [0, 3, 4]) + initial_h += "_expanded" + # initial_h may have the same name as initial_c (e.g., same Var). + # Append a different string to avoid conflict + _expand_dim(builder, initial_c + "_expanded2", initial_c, [0, 3, 4]) + initial_c += "_expanded2" + + # w_x: [H*I, H*I, H*I, H*I] + # w_h: [H*H, H*H, H*H, H*H] + # where format is, [input gate, forget gate, output gate, cell gate] + w_x = _split(wt_ih, sections=4) + w_h = _split(wt_hh, sections=4) + # bias format: [4*H] + b = _split(b, sections=4) # ifoz layout + # peephole format: [3*H] + # where format is, [input gate, forget gate, output gate] + peephole = _split(peephole, sections=3) + + input_size = w_x[0].shape[1] + hidden_size = w_h[0].shape[1] + + # 3 outputs + # Y : [s/1, b, h, 1, 1] + # Y_h: [ 1, b, h, 1, 1] + # Y_c: [ 1, b, h, 1, 1] + output_names = [_output.name + "_5d" for _output in op.outputs] + builder.add_unilstm( + name=op.name, + W_h=w_h, + W_x=w_x, + b=b, + hidden_size=hidden_size, + input_size=input_size, + input_names=[input_name, initial_h, initial_c], + output_names=output_names, + inner_activation=op.recurrent_activation.val, + cell_state_update_activation=op.cell_activation.val, + output_activation=op.activation.val, + peep=peephole, + output_all=output_sequence, + cell_clip_threshold=clip, + reverse_input=(direction == "reverse"), + ) + + # Squeeze Output + # to output shape of [Seq Len or 1, Batch Size, Hidden Size] + _squeeze(builder, op.outputs[0].name, output_names[0], axes=[3, 4]) + # Squeeze Output H and Output C + # to output shape of [Batch Size, Hidden Size] + _squeeze(builder, op.outputs[1].name, output_names[1], axes=[0, 3, 4]) + _squeeze(builder, op.outputs[2].name, output_names[2], axes=[0, 3, 4]) + + elif direction == "bidirectional": + # Expand initial_h and initial_c + # Issue #810 + num_layer = len(builder.layers) + initial_h_expand = initial_h + "_expanded" + "_" + str(num_layer) + # from shape (B, 2*H) to shape (1, Batch, 2*H, 1, 1) + if not (initial_h_expand in set(builder.layers)): + _expand_dim(builder, initial_h_expand, initial_h, [0, 3, 4]) + initial_h = initial_h_expand + + # initial_h may have the same name as initial_c (e.g., same Var) + initial_c_expand = initial_c + "_expanded2" + "_" + str(num_layer) + if not (initial_c_expand in set(builder.layers)): + _expand_dim(builder, initial_c_expand, initial_c, [0, 3, 4]) + initial_c = initial_c_expand + + initial_h_f = initial_h + "_forward" + initial_h_r = initial_h + "_reverse" + initial_c_f = initial_c + "_forward" + initial_c_r = initial_c + "_reverse" + + # split input_h and input_c into two parts + builder.add_split_nd( + name=op.name + "_split_h", + input_name=initial_h, + output_names=[initial_h_f, initial_h_r], + axis=2, + ) + builder.add_split_nd( + name=op.name + "_split_c", + input_name=initial_c, + output_names=[initial_c_f, initial_c_r], + axis=2, + ) + + wt_ih_back = op.weight_ih_back.val + wt_hh_back = op.weight_hh_back.val + # Get weights here + # weight format: [I+H, 2*4*H] -> [I+H, 4*H (forward):4*H (backward)] + hidden_size = wt_hh.shape[1] + input_size = wt_ih.shape[1] + + # f_w_x and r_w_x: [H*I, H*I, H*I, H*I] + # f_w_h and r_w_h: [H*H, H*H, H*H, H*H] + # where format is, [input gate, forget gate, output gate, cell gate] + w_x = _split(wt_ih, sections=4) + w_h = _split(wt_hh, sections=4) + r_w_x = _split(wt_ih_back, sections=4) + r_w_h = _split(wt_hh_back, sections=4) + + # f_b and r_b format: [4*H] + b_back = op.bias_back.val if op.bias_back is not None else None + f_b, r_b = None, None + if b is not None: + f_b = _split(b, sections=4) + if b_back is not None: + r_b = _split(b_back, sections=4) + + # peephole format: [2*3*H] -> [3*H (forward) : 3*H (backward)] + peephole_back = op.peephole_back.val if op.peephole_back is not None else None + f_peephole, r_peephole = None, None + if peephole is not None: + f_peephole = _split(peephole, sections=3) + if peephole_back is not None: + r_peephole = _split(peephole_back, sections=3) + + output_names = [ + op.outputs[0].name + "_5d", # Output Y [s/1, b, 2*h, 1, 1] + op.outputs[1].name + "_5d_foward", # Output Y_h [ 1, b, h, 1, 1] + op.outputs[2].name + + "_5d_forward", # Output Y_c [ 1, b, h, 1, 1] + op.outputs[1].name + + "_5d_reverse", # Output Y_h_reverse [ 1, b, h, 1, 1] + op.outputs[2].name + "_5d_reverse", + ] # Output Y_c_reverse [ 1, b, h, 1, 1] + + builder.add_bidirlstm( + name=op.name, + W_h=w_h, + W_x=w_x, + b=f_b, + W_h_back=r_w_h, + W_x_back=r_w_x, + b_back=r_b, + hidden_size=hidden_size, + input_size=input_size, + input_names=[ + input_name, + initial_h_f, + initial_c_f, + initial_h_r, + initial_c_r, + ], + output_names=output_names, + inner_activation=op.recurrent_activation.val, + cell_state_update_activation=op.cell_activation.val, + output_activation=op.activation.val, + peep=f_peephole, + peep_back=r_peephole, + output_all=output_sequence, + cell_clip_threshold=clip, + ) + + # Squeeze Output + # to output shape of [Seq Len or 1, Batch Size, 2*Hidden Size] + _squeeze(builder, op.outputs[0].name, output_names[0], axes=[3, 4]) + + # Output H is of format + # 1, Batch_Size, Hidden_Size, 1, 1 + # Concat to make it + # 1, Batch_Size, 2*Hidden_Size, 1, 1 + builder.add_elementwise( + name=op.outputs[1].name + "_5d", + input_names=[output_names[1], output_names[3]], + output_name=op.outputs[1].name + "_5d", + mode="CONCAT", + ) + # Output C is of format + # 1, Batch_Size, Hidden_Size, 1, 1 + builder.add_elementwise( + name=op.outputs[2].name + "_5d", + input_names=[output_names[2], output_names[4]], + output_name=op.outputs[2].name + "_5d", + mode="CONCAT", + ) + + # Squeeze Output H and Output C + # to output shape of [Batch Size, 2*Hidden Size] + _squeeze( + builder, op.outputs[1].name, op.outputs[1].name + "_5d", axes=[0, 3, 4] + ) + _squeeze( + builder, op.outputs[2].name, op.outputs[2].name + "_5d", axes=[0, 3, 4] + ) + else: + raise ValueError( + "Unknown direction {} for LSTM layer. Supported are forward, reverse or bidirectional".format( + direction + ) + ) + + +@register_mil_to_nn_mapping +def reshape(const_context, builder, op): + if op.shape.val is None: + builder.add_reshape_dynamic( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.shape]), + output_name=op.outputs[0].name, + ) + elif -1 in op.shape.val and len(op.shape.val) == op.x.rank: + # Support 0 in shape. + builder.add_rank_preserving_reshape( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + output_shape=op.shape.val, + ) + else: + if 0 in op.shape.val: + # Does not support 0 in shape + msg = "Use 0 in shape only if len(shape) == x.rank. Report bug." + raise ValueError(msg) + output_shape = (1,) if len(op.shape.val) == 0 or 0 in op.shape.shape else op.shape.val + builder.add_reshape_static( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + output_shape=output_shape, + ) + + +@register_mil_to_nn_mapping +def reduce_argmax(const_context, builder, op): + builder.add_argmax( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=op.axis.val, + keepdims=op.keep_dims.val, + ) + + +@register_mil_to_nn_mapping +def reduce_argmin(const_context, builder, op): + builder.add_argmin( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=op.axis.val, + keepdims=op.keep_dims.val, + ) + + +def _reduce_axes(const_context, builder, builder_op, op): + axes = op.axes.val if op.axes is not None else op.axes + builder_op( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axes=axes, + keepdims=op.keep_dims.val, + reduce_all=axes is None, + ) + + +@register_mil_to_nn_mapping +def reduce_l1_norm(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_l1, op) + + +@register_mil_to_nn_mapping +def reduce_l2_norm(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_l2, op) + + +@register_mil_to_nn_mapping +def reduce_log_sum(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_logsum, op) + + +@register_mil_to_nn_mapping +def reduce_log_sum_exp(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_logsumexp, op) + + +@register_mil_to_nn_mapping +def reduce_max(const_context, builder, op): + if not _try_convert_global_pool(const_context, builder, op, mode="max"): + _reduce_axes(const_context, builder, builder.add_reduce_max, op) + + +@register_mil_to_nn_mapping +def reduce_mean(const_context, builder, op): + if not _try_convert_global_pool(const_context, builder, op, mode="average"): + _reduce_axes(const_context, builder, builder.add_reduce_mean, op) + + +@register_mil_to_nn_mapping +def reduce_min(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_min, op) + + +@register_mil_to_nn_mapping +def reduce_prod(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_prod, op) + + +@register_mil_to_nn_mapping +def reduce_sum(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_sum, op) + + +@register_mil_to_nn_mapping +def reduce_sum_square(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_sumsquare, op) + + +@register_mil_to_nn_mapping +def reverse(const_context, builder, op): + reverse_dim = [False] * op.x.rank + if op.axes is None: + reverse_dim = [True] * op.x.rank + else: + for axis in op.axes.val: + reverse_dim[axis] = True + builder.add_reverse( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + reverse_dim=reverse_dim, + ) + + +@register_mil_to_nn_mapping +def reverse_sequence(const_context, builder, op): + builder.add_reverse_sequence( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.lengths]), + output_name=op.outputs[0].name, + batch_axis=op.batch_axis.val, + seq_axis=op.seq_axis.val, + ) + + +@register_mil_to_nn_mapping +def rnn(const_context, builder, op): + input_name = make_input(const_context, builder, op.x) # [b, s, I] + initial_h = make_input(const_context, builder, op.initial_h) # [b, H] + + w_ih = op.weight_ih.val + w_hh = op.weight_hh.val + b = op.bias.val if op.bias is not None else None + direction = op.direction.val + output_sequence = op.output_sequence.val + activation = op.activation.val + + # Add expand dims for input, in + _expand_dim(builder, input_name + "_expanded", input_name, [3, 4]) + input_name += "_expanded" + + if direction not in {"forward", "reverse"}: + raise ValueError( + "Unknown direction {} for RNN layer. Supported are forward and reverse".format( + direction + ) + ) + + # Expand initial_h and initial_c + _expand_dim(builder, initial_h + "_expanded", initial_h, [2, 3, 4]) + initial_h += "_expanded" + + # w_x: (H, I) + # w_h: (H, H) + hidden_size = w_hh.shape[0] + input_size = w_ih.shape[-1] + + # 3 outputs + # Y : [s/1, b, h, 1, 1] + # Y_h: [ 1, b, h, 1, 1] + output_names = [_output.name + "_5d" for _output in op.outputs] + builder.add_simple_rnn( + name=op.name, + W_h=w_hh, + W_x=w_ih, + b=b, + hidden_size=hidden_size, + input_size=input_size, + input_names=[input_name, initial_h], + output_names=output_names, + activation=activation, + output_all=output_sequence, + reverse_input=(direction == "reverse"), + ) + + # Squeeze Output + # to output shape of [Seq Len or 1, Batch Size, Hidden Size] + _squeeze(builder, op.outputs[0].name, output_names[0], [3, 4]) + # Squeeze Output H and Output C + # to output shape of [Batch Size, Hidden Size] + _squeeze(builder, op.outputs[1].name, output_names[1], [0, 3, 4]) + + +@register_mil_to_nn_mapping +def select(const_context, builder, op): + builder.add_where_broadcastable( + name=op.name, + input_names=make_input(const_context, builder, [op.cond, op.a, op.b]), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def space_to_depth(const_context, builder, op): + builder.add_reorganize_data( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode="SPACE_TO_DEPTH", + block_size=op.block_size.val, + ) + + +@register_mil_to_nn_mapping +def batch_to_space(const_context, builder, op): + block_size = op.block_shape.val + if block_size[0] != block_size[1]: + raise ValueError("batch_to_space non-equal block shape is not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.") + block_size = block_size[0] + if block_size == 1: + raise ValueError("batch_to_space block shape == 1 not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.") + + transpose_1_name = op.name + "_transpose_1" + builder.add_transpose( + name=transpose_1_name, + input_name=make_input(const_context, builder, op.x), + axes=[1, 0, 2, 3], + output_name=transpose_1_name, + ) + depth_to_space_name = op.name + "_depth_to_space" + builder.add_reorganize_data( + name=depth_to_space_name, + input_name=transpose_1_name, + output_name=depth_to_space_name, + mode="DEPTH_TO_SPACE", + block_size=block_size, + ) + crop_name = op.name + "_crop" + crops = op.crops.val + builder.add_crop( + name=crop_name, + input_names=[depth_to_space_name], + output_name=crop_name, + offset=0, + top=crops[0][0], + bottom=crops[0][1], + left=crops[1][0], + right=crops[1][1], + ) + transpose_2_name = op.name + "_transpose_2" + builder.add_transpose( + name=transpose_2_name, + input_name=crop_name, + axes=[1, 0, 2, 3], + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def space_to_batch(const_context, builder, op): + block_size = op.block_shape.val + if block_size[0] != block_size[1]: + raise ValueError("space_to_batch non-equal block shape is not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.") + block_size = block_size[0] + if block_size == 1: + raise ValueError("space_to_batch block shape == 1 not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.") + + pad = op.paddings.val.flatten() + left, right = pad[2], pad[3] + top, bottom = pad[0], pad[1] + + pad_name = op.name + "_pad" + builder.add_padding( + name=pad_name, + left=left, + right=right, + top=top, + bottom=bottom, + input_name=make_input(const_context, builder, op.x), + output_name=pad_name, + padding_type="constant", + value=0., + ) + + transpose_1_name = op.name + "_transpose_1" + builder.add_transpose( + name=transpose_1_name, + input_name=pad_name, + axes=[1, 0, 2, 3], + output_name=transpose_1_name, + ) + space_to_depth_name = op.name + "_space_to_depth" + builder.add_reorganize_data( + name=space_to_depth_name, + input_name=transpose_1_name, + output_name=space_to_depth_name, + mode="SPACE_TO_DEPTH", + block_size=block_size, + ) + transpose_2_name = op.name + "_transpose_2" + builder.add_transpose( + name=transpose_2_name, + input_name=space_to_depth_name, + axes=[1, 0, 2, 3], + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def transpose(const_context, builder, op): + builder.add_transpose( + name=op.name, + axes=op.perm.val, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def gather(const_context, builder, op): + is_embedding = False + + if op.x.val is not None: + W = op.x.val + if len(W.shape) == 2: + if op.axis.val == 0 or op.axis.val == -2: + if len(op.x.child_ops) == 1: + # the constant feeding into the gather doesn't go to any other op + is_embedding = True + + if is_embedding: + """" + The following: + %3 = gather(%1, %2, axis=0) # %1 is a constant matrix of shape (vocab_size, embedding_size) + can be mapped to: + %2_e = expand_dims(%2, axis=-1) + %3 = embeddingND(%2_e, weight=%1) + """ + builder.add_expand_dims( + name=op.name + "_expand_dims", + input_name=make_input(const_context, builder, op.indices), + output_name=op.name + "_expand_dims", + axes=[-1], + ) + + builder.add_embedding_nd( + name=op.name, + input_name=op.name + "_expand_dims", + output_name=op.outputs[0].name, + vocab_size=W.shape[0], + embedding_size=W.shape[1], + W=_np.transpose(W), + ) + + else: + builder.add_gather( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.indices]), + output_name=op.outputs[0].name, + axis=op.axis.val, + ) + + +@register_mil_to_nn_mapping +def scatter(const_context, builder, op): + builder.add_scatter( + name=op.name, + input_names=make_input( + const_context, builder, [op.data, op.indices, op.updates] + ), + output_name=op.outputs[0].name, + axis=op.axis.val, + mode=op.mode.val.upper(), + ) + + +@register_mil_to_nn_mapping +def gather_along_axis(const_context, builder, op): + builder.add_gather_along_axis( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.indices]), + output_name=op.outputs[0].name, + axis=op.axis.val, + ) + + +@register_mil_to_nn_mapping +def scatter_along_axis(const_context, builder, op): + builder.add_scatter_along_axis( + name=op.name, + input_names=make_input( + const_context, builder, [op.data, op.indices, op.updates] + ), + output_name=op.outputs[0].name, + axis=op.axis.val, + mode=op.mode.val.upper(), + ) + + +@register_mil_to_nn_mapping +def gather_nd(const_context, builder, op): + builder.add_gather_nd( + name=op.name, + input_names=make_input( + const_context, builder, [op.x, op.indices] + ), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def scatter_nd(const_context, builder, op): + builder.add_scatter_nd( + name=op.name, + input_names=make_input( + const_context, builder, [op.data, op.indices, op.updates], + ), + output_name=op.outputs[0].name, + mode=op.mode.val.upper(), + ) + +@register_mil_to_nn_mapping +def silu(const_context, builder, op): + ''' + silu is: + y = x * sigmoid(x) + ''' + inp = make_input(const_context, builder, op.x) + builder.add_activation( + name=op.name + "__silu_sigmoid__", + non_linearity="SIGMOID", + input_name=inp, + output_name=op.name + "__silu_sigmoid__", + ) + builder.add_elementwise( + name=op.name, + input_names=[inp, op.name + "__silu_sigmoid__"], + output_name=op.outputs[0].name, + mode='MULTIPLY', + ) + + +@register_mil_to_nn_mapping +def tile(const_context, builder, op): + inputs = [make_input(const_context, builder, op.x)] + if op.reps.val is None: + inputs.append(op.reps.name) + builder.add_tile( + name=op.name, + reps=op.reps.val, + input_name=inputs, + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def tanh(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="TANH", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def scaled_tanh(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="SCALED_TANH", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=[op.alpha.val, op.beta.val], + ) + + +@register_mil_to_nn_mapping +def sigmoid(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="SIGMOID", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def sigmoid_hard(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="SIGMOID_HARD", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=[op.alpha.val, op.beta.val], + ) + + +@register_mil_to_nn_mapping +def erf(const_context, builder, op): + builder.add_erf( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def thresholded_relu(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="THRESHOLDEDRELU", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=op.alpha.val, + ) + + +@register_mil_to_nn_mapping +def elu(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="ELU", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=op.alpha.val, + ) + + +@register_mil_to_nn_mapping +def leaky_relu(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="LEAKYRELU", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=[op.alpha.val], + ) + + +@register_mil_to_nn_mapping +def gelu(const_context, builder, op): + builder.add_gelu( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode=op.mode.val, + ) + + +@register_mil_to_nn_mapping +def softplus(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="SOFTPLUS", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def softmax(const_context, builder, op): + rank = op.x.rank + if op.axis.val == -3 or op.axis.val > 0 and op.axis.val == rank - 3: + builder.add_softmax( + name=op.name, input_name=op.x.name, output_name=op.outputs[0].name, + ) + else: + builder.add_softmax_nd( + name=op.name, + input_name=op.x.name, + output_name=op.outputs[0].name, + axis=op.axis.val, + ) + + +@register_mil_to_nn_mapping +def softplus_parametric(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="PARAMETRICSOFTPLUS", + input_name=make_input(const_context, builder, op.x), + input_shape=op.x.shape, + input_rank=op.x.rank, + output_name=op.outputs[0].name, + params=[op.alpha.val, op.beta.val], + ) + + +@register_mil_to_nn_mapping +def softsign(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="SOFTSIGN", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def linear_activation(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="LINEAR", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=[op.alpha.val, op.beta.val], + ) + + +@register_mil_to_nn_mapping +def relu(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="RELU", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def clamped_relu(const_context, builder, op): + builder.add_clamped_relu( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + alpha=op.alpha.val, + beta=op.beta.val, + ) + + +@register_mil_to_nn_mapping +def relu6(const_context, builder, op): + builder.add_activation( + name=op.name + "__relu6_relu__", + input_name=make_input(const_context, builder, op.x), + output_name=op.name + "__relu6_relu__", + non_linearity="RELU", + ) + builder.add_activation( + name=op.name + "__relu6_neg__", + input_name=op.name + "__relu6_relu__", + output_name=op.name + "__relu6_neg__", + non_linearity="LINEAR", + params=[-1, 0], + ) + builder.add_unary( + name=op.name + "__relu6_threshold6__", + input_name=op.name + "__relu6_neg__", + output_name=op.name + "__relu6_threshold6__", + mode="threshold", + alpha=-6, + ) + builder.add_activation( + name=op.name, + input_name=op.name + "__relu6_threshold6__", + output_name=op.outputs[0].name, + non_linearity="LINEAR", + params=[-1, 0], + ) + + +@register_mil_to_nn_mapping +def prelu(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="PRELU", + input_name=make_input(const_context, builder, op.x), + input_shape=op.x.shape, + input_rank=op.x.rank, + output_name=op.outputs[0].name, + params=op.alpha.val, + ) + + +@register_mil_to_nn_mapping +def pad(const_context, builder, op): + if len(op.pad.shape) != 1: + raise ValueError("Pad should be a 1D tensor.") + + pad = op.pad.val + mode = op.mode.val + constant_val = op.constant_val.val + + nn_mode_mapping = {"reflect": "reflection", "replicate": "replication"} + mode = nn_mode_mapping.get(mode, mode) + + if pad is not None: + missing_dims = op.x.rank - len(pad) // 2 + pad = [0, 0] * missing_dims + list(pad) + + + if pad is not None and op.x.rank > 1 and all(i == 0 for i in pad[:-4]): + pad = pad[-4:] + left, right = pad[2], pad[3] + top, bottom = pad[0], pad[1] + builder.add_padding( + name=op.name, + left=left, + right=right, + top=top, + bottom=bottom, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + padding_type=mode, + value=constant_val, + ) + elif mode == "constant": + if pad is None: + builder.add_constant_pad( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.pad]), + output_name=op.outputs[0].name, + value=constant_val + ) + else: + builder.add_constant_pad( + name=op.name, + input_names=make_input(const_context, builder, [op.x]), + output_name=op.outputs[0].name, + value=constant_val, + pad_amounts=pad, + ) + else: + raise ValueError("Unsupported mode for Pad layer! {}".format(mode)) + + +@register_mil_to_nn_mapping +def instance_norm(const_context, builder, op): + channels = op.x.shape[1] + gamma = _np.array([1.0] * channels) if op.gamma is None else op.gamma.val + beta = _np.array([0.0] * channels) if op.beta is None else op.beta.val + + x_name = make_input(const_context, builder, op.x) + out_name = op.outputs[0].name + + if op.x.rank == 3: + x_name = op.name + "_expanded" + builder.add_expand_dims( + name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2], + ) + out_name += "_instance_norm" + + builder.add_batchnorm( + name=op.name, + channels=channels, + gamma=gamma, + beta=beta, + input_name=x_name, + output_name=out_name, + compute_mean_var=True, + instance_normalization=True, + epsilon=op.epsilon.val, + ) + + # Squeeze added `Height` dimension for 1d case + if op.x.rank == 3: + x_name = op.name + "_squeeze" + builder.add_squeeze( + name=x_name, + input_name=out_name, + output_name=op.outputs[0].name, + axes=[-2], + ) + + + + +@register_mil_to_nn_mapping +def l2_norm(const_context, builder, op): + builder.add_l2_normalize( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + epsilon=op.epsilon.val, + ) + + +@register_mil_to_nn_mapping +def layer_norm(const_context, builder, op): + + rank = op.x.rank + input_shape = [-1 if is_symbolic(dim) else dim for dim in list(op.x.shape)] + axes = list(range(op.x.rank)) if op.axes.val is None else op.axes.val + axes = [axis+rank if axis < 0 else axis for axis in op.axes.val] + epsilon = op.epsilon.val + + # if input shape = (X1, X2) or (X0, X1, X2), axes = [-1], X1 and X2 are known + # then the following operations are performed + # - reshape to (X1, 1, X2) / (X0, X1, 1, X2) + # - apply MVN layer, which normalizes across last 2 dims + # - apply scale layer + # - reshape back to (X1, X2) / (X0, X1, X2) + # Otherwise, we express the layer_norm as primitive operations + if rank in [2, 3] and len(axes) == 1 and axes[0] == rank - 1 and input_shape.count(-1) < 2 \ + and input_shape[-1] != -1 and input_shape[-2] != -1: + + reshaped_shape = input_shape[:] + # Insert a singleton dimension in the 'height' position + reshaped_shape.insert(-1, 1) + + # Scale layer can't take parameters of size [W], but can take [1, H, W], and H=1 in this case + gamma = _np.ones((1, 1, reshaped_shape[-1])) if op.gamma is None else _np.expand_dims(op.gamma.val, axis=(0, 1)) + beta = _np.zeros((1, 1, reshaped_shape[-1])) if op.beta is None else _np.expand_dims(op.beta.val, axis=(0, 1)) + + builder.add_reshape_static( + name=op.name + "_reshape", + input_name=make_input(const_context, builder, op.x), + output_name=op.name + "_reshape", + output_shape=reshaped_shape, + ) + + builder.add_mvn( + name=op.name + "_mvn", + input_name=op.name + "_reshape", + output_name=op.name + "_mvn", + across_channels=False, + normalize_variance=True, + epsilon=epsilon, + ) + + builder.add_scale( + name=op.name + "_scale", + input_name=op.name + "_mvn", + output_name=op.name + "_scale", + W=gamma, + b=beta, + has_bias=True, + shape_scale=_np.shape(gamma), + shape_bias=_np.shape(beta), + ) + + builder.add_reshape_static( + name=op.name, + input_name=op.name + "_scale", + output_name=op.outputs[0].name, + output_shape=input_shape, + ) + + else: # We don't meet the conditions for an MVN layer, so we use primitives + mean_name = op.name + "_mean" + builder.add_reduce_mean( + name=mean_name, + input_name=make_input(const_context, builder, op.x), + output_name=mean_name, + axes=axes, + keepdims=True, + reduce_all=False, + ) + + sub_mean_name = op.name + "_sub_mean" + builder.add_subtract_broadcastable( + name=sub_mean_name, + input_names=[op.x.name, mean_name], + output_name=sub_mean_name, + ) + + square_name = op.name + '_square' + builder.add_unary( + name=square_name, + input_name=sub_mean_name, + output_name=square_name, + mode="power", + alpha=2.0, + ) + + square_sum_name = op.name + '_square_sum' + builder.add_reduce_sum( + name=square_sum_name, + input_name=square_name, + output_name=square_sum_name, + axes=axes, + keepdims=True, + reduce_all=False, + ) + + normalized_shape = [op.x.shape[i] if i in axes else 1 for i in range(rank)] + if not any_symbolic(normalized_shape): + div_prod_name = op.name + '_div_constant' + add_const(const_context, builder, div_prod_name, _np.prod(normalized_shape)) + else: + raise NotImplementedError("dynamic shape input nor supported for layer_norm") + + div_square_sum_name = op.name + '_div_square_sum' + builder.add_divide_broadcastable( + name=div_square_sum_name, + input_names=[square_sum_name, div_prod_name], + output_name=div_square_sum_name + ) + + epsilon_const_name = op.name + '_epsilon' + add_const(const_context, builder, epsilon_const_name, epsilon) + add_epsilon_name = op.name + '_add_epsilon' + builder.add_elementwise( + name=add_epsilon_name, + input_names=[div_square_sum_name, epsilon_const_name], + output_name=add_epsilon_name, + mode="ADD", + ) + + sqrt_name = op.name + '_sqrt' + builder.add_unary( + name=sqrt_name, + input_name=add_epsilon_name, + output_name=sqrt_name, + mode="sqrt", + ) + + div_name = op.name + '_divide' + builder.add_divide_broadcastable( + name=div_name, + input_names=[sub_mean_name, sqrt_name], + output_name=div_name + ) + + gamma = _np.ones(normalized_shape) if op.gamma is None else _np.reshape(op.gamma.val, normalized_shape) + beta = _np.zeros(normalized_shape) if op.beta is None else _np.reshape(op.beta.val, normalized_shape) + + gamma_name = op.name + '_gamma' + beta_name = op.name + '_beta' + add_const(const_context, builder, gamma_name, gamma) + add_const(const_context, builder, beta_name, beta) + + mul_name = op.name + '_mul' + builder.add_multiply_broadcastable( + name=mul_name, + input_names=[div_name, gamma_name], + output_name=mul_name, + ) + + builder.add_add_broadcastable( + name=op.name, + input_names=[mul_name, beta_name], + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def local_response_norm(const_context, builder, op): + builder.add_lrn( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + alpha=op.alpha.val, + beta=op.beta.val, + local_size=op.size.val, + k=op.k.val, + ) + + +@register_mil_to_nn_mapping +def conv_transpose(const_context, builder, op): + x_name = make_input(const_context, builder, op.x) + out_name = op.outputs[0].name + + # Special handling for 1d conv transpose + is_conv_transpose_1d = op.x.rank == 3 + is_conv_transpose_2d = op.x.rank == 4 + is_conv_transpose_3d = op.x.rank == 5 + + if is_conv_transpose_1d: + x_name = op.name + "_expand_dim" + out_name = op.name + "_expanded" + builder.add_expand_dims( + name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2] + ) + + # Input names to be used + input_names = [x_name] + + # Kernel shape: [C_in, C_out, D, H, W] + weight = op.weight.val + kernel_channels = weight.shape[0] + output_channels = weight.shape[1] * op.groups.val + + if is_conv_transpose_1d: + weight = _np.expand_dims(weight, -2) + + # pyMIL Deconvolution format: [C_in, C_out / groups, spatial_dims] + # NN DeConvolution3D expects weights to have shape (C_out / groups, C_in, spatial_dims) + # NN DeConvolution2D/1D expects (spatial_dims, C_in, C_out/groups) + if is_conv_transpose_3d: + weight = _np.transpose(weight, [1, 0, 2, 3, 4]) + else: + weight = _np.transpose(weight, [2, 3, 0, 1]) + + strides = op.strides.val.tolist() + dilations = op.dilations.val.tolist() + + output_spatial_dims = list(op.outputs[0].shape[2:]) + if is_conv_transpose_1d: + dilations = dilations[:-1] + [1] + dilations[-1:] + strides = strides[:-1] + [1] + strides[-1:] + # Must be at least 2D + output_spatial_dims = output_spatial_dims[:-1] + [1] + output_spatial_dims[-1:] + + if any_symbolic(output_spatial_dims): + output_spatial_dims = None + + # padding + padding_mode = op.pad_type.val + pad = {} + if padding_mode == "custom": + if is_conv_transpose_1d: + padding_mode = "valid" + pad["padding_top"] = 0 + pad["padding_bottom"] = 0 + pad["padding_left"] = op.pad.val[0] # Left + pad["padding_right"] = op.pad.val[1] # Right + elif is_conv_transpose_2d: + padding_mode = "valid" + pad["padding_top"] = op.pad.val[0] # Top + pad["padding_bottom"] = op.pad.val[1] # Bottom + pad["padding_left"] = op.pad.val[2] # Left + pad["padding_right"] = op.pad.val[3] # Right + else: + pad["padding_front"] = op.pad.val[0] # Front + pad["padding_back"] = op.pad.val[1] # Back + pad["padding_top"] = op.pad.val[2] # Top + pad["padding_bottom"] = op.pad.val[3] # Bottom + pad["padding_left"] = op.pad.val[4] # Left + pad["padding_right"] = op.pad.val[5] # Right + + groups = op.groups.val + has_bias = op.bias is not None + + if is_conv_transpose_3d: + builder.add_convolution3d( + name=op.name, + input_channels=kernel_channels, + output_channels=output_channels, + depth=weight.shape[-3], + height=weight.shape[-2], + width=weight.shape[-1], + W=weight, + b=op.bias.val if has_bias else None, + has_bias=has_bias, + groups=groups, + stride_depth=strides[0], + stride_height=strides[1], + stride_width=strides[2], + dilation_depth=dilations[0], + dilation_height=dilations[1], + dilation_width=dilations[2], + padding_mode=padding_mode, + is_deconv=True, + output_shape=output_spatial_dims, + input_name=input_names, + output_name=out_name, + **pad + ) + else: + builder.add_convolution( + name=out_name, + kernel_channels=kernel_channels, + output_channels=output_channels, + height=weight.shape[0], + width=weight.shape[1], + stride_height=strides[0], + stride_width=strides[1], + border_mode=padding_mode, + groups=groups, + W=weight, + b=op.bias.val if has_bias else None, + has_bias=has_bias, + is_deconv=True, + output_shape=output_spatial_dims, + input_name=input_names, + output_name=out_name, + dilation_factors=dilations, + **pad + ) + + # Squeeze added `Height` dimension for 1d case + if is_conv_transpose_1d: + builder.add_squeeze( + name=op.name, + input_name=out_name, + output_name=op.outputs[0].name, + axes=[-2], + ) + + +@register_mil_to_nn_mapping +def range_1d(const_context, builder, op): + if op.start.val is not None and op.step.val is not None: + inputs = [op.end] + elif op.start.val is None and op.step.val is not None: + inputs = [op.end, op.start] + elif op.start.val is not None and op.step.val is None: + inputs = [op.end, op.start, op.step] + else: + inputs = [op.end, op.start, op.step] + + builder.add_range_dynamic( + name=op.name, + output_name=op.outputs[0].name, + input_names=make_input(const_context, builder, inputs), + start=op.start.val if op.start.val is not None else 0, + step=op.step.val if op.step.val is not None else 1, + ) + + +@register_mil_to_nn_mapping +def one_hot(const_context, builder, op): + if op.one_hot_vector_size.val is not None: + inputs = [op.indices] + else: + inputs = [op.indices, op.one_hot_vector_size] + + builder.add_one_hot( + name=op.name, + input_names=make_input(const_context, builder, inputs), + output_name=op.outputs[0].name, + one_hot_vector_size=op.one_hot_vector_size.val, + axis=op.axis.val, + on_value=op.on_value.val, + off_value=op.off_value.val, + ) + + +@register_mil_to_nn_mapping +def non_maximum_suppression(const_context, builder, op): + builder.add_nms( + name=op.name, + input_names=make_input(const_context, builder, [op.boxes, op.scores]), + output_names=[op.outputs[i].name for i in range(4)], + iou_threshold=op.iou_threshold.val, + score_threshold=op.score_threshold.val, + max_boxes=op.max_boxes.val, + per_class_suppression=op.per_class_suppression.val, + ) + + +@register_mil_to_nn_mapping +def flatten2d(const_context, builder, op): + builder.add_flatten_to_2d( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=op.axis.val, + ) + + +@register_mil_to_nn_mapping +def shape(const_context, builder, op): + builder.add_get_shape( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +def add_upsample_nn(const_context, builder, op, scale_factor_h, scale_factor_w): + mode = "NN" + linear_upsample_mode = "DEFAULT" + if _np.abs(_np.round(scale_factor_h) - scale_factor_h) < 1e-4 and scale_factor_h >= 1 - 1e-4: + scale_factor_h = int(scale_factor_h) + else: + logger.warning( + f"Unsupported float type 'scale_factor_height' ({scale_factor_h}) for neuralnetwork. " + "Falling back to bilinear interpolation." + ) + mode = "BILINEAR" + linear_upsample_mode = "ALIGN_CORNERS_TRUE" + if _np.abs(_np.round(scale_factor_w) - scale_factor_w) < 1e-4 and scale_factor_w >= 1 - 1e-4: + scale_factor_w = int(scale_factor_w) + else: + logger.warning( + f"Unsupported float type 'scale_factor_width' ({scale_factor_w}) for neuralnetwork. " + "Falling back to bilinear interpolation." + ) + mode = "BILINEAR" + linear_upsample_mode = "ALIGN_CORNERS_TRUE" + + builder.add_upsample( + name=op.name, + scaling_factor_h=scale_factor_h, + scaling_factor_w=scale_factor_w, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode=mode, + linear_upsample_mode=linear_upsample_mode, + ) + + +@register_mil_to_nn_mapping +def resize_nearest_neighbor(const_context, builder, op): + Hout, Wout = op.target_size_height.val, op.target_size_width.val + x_shape = op.x.shape + Hin, Win = x_shape[-2], x_shape[-1] + + scale_factor_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin + scale_factor_w = Wout / Win if Wout % Win == 0 else (Wout + 1e-4) / Win + + add_upsample_nn(const_context, builder, op, scale_factor_h, scale_factor_w) + + +@register_mil_to_nn_mapping +def upsample_nearest_neighbor(const_context, builder, op): + scale_factor_h = op.scale_factor_height.val + scale_factor_w = op.scale_factor_width.val + + add_upsample_nn(const_context, builder, op, scale_factor_h, scale_factor_w) + + +@register_mil_to_nn_mapping +def upsample_bilinear(const_context, builder, op): + builder.add_upsample( + name=op.name, + scaling_factor_h=op.scale_factor_height.val, + scaling_factor_w=op.scale_factor_width.val, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode="BILINEAR", + linear_upsample_mode="ALIGN_CORNERS_TRUE" if op.align_corners.val else "ALIGN_CORNERS_FALSE", + ) + + +@register_mil_to_nn_mapping +def resize_bilinear(const_context, builder, op): + grid_sampling_mode_map = { + "STRICT_ALIGN_CORNERS": "STRICT_ALIGN_ENDPOINTS_MODE", + "ALIGN_CORNERS": "ALIGN_ENDPOINTS_MODE", + "DEFAULT": "UPSAMPLE_MODE", + "OFFSET_CORNERS": "ROI_ALIGN_MODE" + } + + if op.sampling_mode.val not in grid_sampling_mode_map: + raise NotImplementedError( + "Unsupported 'sampling_mode' ('{op.sampling_mode.val}') in neuralnetwork backend" + ) + + builder.add_resize_bilinear( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + target_height=op.target_size_height.val, + target_width=op.target_size_width.val, + mode=grid_sampling_mode_map[op.sampling_mode.val], + ) + + +@register_mil_to_nn_mapping +def cond(const_context, builder, op): + true_block = op.blocks[0] + false_block = op.blocks[1] + + branch_layer = builder.add_branch( + name=op.name, input_name=make_input(const_context, builder, op.pred), + ) + true_builder = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.ifBranch, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + convert_ops(const_context, true_builder, true_block.operations, true_block.outputs) + + # Copy block output to cond op output. + for block_out, op_out in zip(true_block.outputs, op.outputs): + true_builder.add_copy( + name=block_out.name + "_ret_copy", + # No need to make_input for block_out which is guaranteed + # to be a node + input_name=block_out.name, + output_name=op_out.name, + ) + + false_builder = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.elseBranch, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + convert_ops( + const_context, false_builder, false_block.operations, false_block.outputs + ) + + for block_out, op_out in zip(false_block.outputs, op.outputs): + false_builder.add_copy( + name=block_out.name + "_ret_copy", + input_name=block_out.name, + output_name=op_out.name, + ) + + +@register_mil_to_nn_mapping +def while_loop(const_context, builder, op): + cond_block = op.blocks[0] + body_block = op.blocks[1] + + # Assume that all loop vars aren't loop invariant (invariant loop vars + # should've be optimized away in graph passes). + for v_in, vx_in in zip(op.loop_vars, cond_block.inputs): + assert v_in.name != vx_in.name, "Loop invariant detected in {}".format(op) + builder.add_copy( + name=vx_in.name + "_input_copy", + input_name=make_input(const_context, builder, v_in), + output_name=vx_in.name, + ) + + loop_layer = builder.add_loop( + name=op.name, + # max_iterations=0 to use condition network. + max_iterations=0, + ) + + # Construct while_loop condition + cond_builder = neural_network.NeuralNetworkBuilder( + nn_spec=loop_layer.loop.conditionNetwork, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + cond_builder.rank_dict = {k.name: builder.rank_dict[k.name] for k in cond_block.inputs} + convert_ops( + const_context, + cond_builder, + cond_block.operations, + cond_block.outputs, + ) + + loop_layer.loop.conditionVar = cond_block.outputs[0].name + + # while_loop body produces loop_vars + body_builder = neural_network.NeuralNetworkBuilder( + nn_spec=loop_layer.loop.bodyNetwork, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + body_builder.rank_dict = {k.name: builder.rank_dict[k.name] for k in body_block.inputs} + convert_ops( + const_context, + body_builder, + body_block.operations, + body_block.outputs, + ) + + # Also assume all outputs are different from loop inputs (i.e., no loop + # invariant.) + for vx_in, vx_out in zip(body_block.inputs, body_block.outputs): + if vx_in.name == vx_out.name: + msg = "Loop invariant var {} detected in block {}" + logger.warning(msg.format(vx_in.name, body_block.name)) + continue + body_builder.add_copy( + name=vx_in.name + "_ret_copy", + input_name=make_input(const_context, builder, vx_out), + output_name=vx_in.name, + ) + + +@register_mil_to_nn_mapping +def identity(const_context, builder, op): + builder.add_copy( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def concat(const_context, builder, op): + # filter out input tensor with 0 size + values = [] + for v in op.values: + if len(v.shape) > 0 and v.shape[op.axis.val] == 0: + continue + values.append(v) + + if len(values) == 0: + raise NotImplementedError('0 size tensor unsupported.') + + if len(values) >= 2: + rank = values[0].rank + if op.interleave.val: + builder.add_concat_nd( + name=op.name, + input_names=make_input(const_context, builder, values), + output_name=op.outputs[0].name, + axis=op.axis.val, + interleave=True) + elif rank >= 4 and (op.axis.val == -3 or op.axis.val > 0 and op.axis.val == rank - 3): + builder.add_elementwise( + name=op.name, + input_names=make_input(const_context, builder, values), + output_name=op.outputs[0].name, + mode="CONCAT", + ) + else: + builder.add_concat_nd( + name=op.name, + input_names=make_input(const_context, builder, values), + output_name=op.outputs[0].name, + axis=op.axis.val) + else: + builder.add_copy( + name=op.name, + input_name=make_input(const_context, builder, values[0]), + output_name=op.outputs[0].name) + + +@register_mil_to_nn_mapping +def stack(const_context, builder, op): + builder.add_stack( + name=op.name, + input_names=make_input(const_context, builder, op.values), + output_name=op.outputs[0].name, + axis=op.axis.val, + ) + + +@register_mil_to_nn_mapping +def split(const_context, builder, op): + split = op.sizes + split = [size for size in split if size != 0] + has_equal_splits = all([size == split[0] for size in split]) + num_splits = len(split) + output_names = [op.outputs[i].name for i in range(len(op.sizes)) if op.sizes[i] != 0] + + if has_equal_splits: + builder.add_split_nd( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_names=output_names, + axis=op.axis.val, + num_splits=num_splits) + else: + builder.add_split_nd( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_names=output_names, + axis=op.axis.val, + split_sizes=list(split)) + + +@register_mil_to_nn_mapping +def argsort(const_context, builder, op): + axis = op.x.rank + op.axis.val if op.axis.val < 0 else op.axis.val + builder.add_argsort( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=axis, + descending=(not op.ascending.val), + ) + + +@register_mil_to_nn_mapping +def pixel_shuffle(const_context, builder, op): + builder.add_reorganize_data( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode="PIXEL_SHUFFLE", + block_size=op.upscale_factor.val, + ) + + +@register_mil_to_nn_mapping +def sliding_windows(const_context, builder, op): + builder.add_sliding_windows( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=op.axis.val, + window_size=op.size.val, + step=op.stride.val, + ) + + +@register_mil_to_nn_mapping +def crop(const_context, builder, op): + builder.add_crop( + name=op.name, + input_names=[op.x.name], + output_name=op.outputs[0].name, + offset=0, + left=op.crop_width.val[0], + right=op.crop_width.val[1], + top=op.crop_height.val[0], + bottom=op.crop_height.val[1], + ) + + +@register_mil_to_nn_mapping +def crop_resize(const_context, builder, op): + grid_sampling_mode_map = { + "STRICT_ALIGN_CORNERS": "STRICT_ALIGN_ENDPOINTS_MODE", + "ALIGN_CORNERS": "ALIGN_ENDPOINTS_MODE", + "DEFAULT": "UPSAMPLE_MODE", + "OFFSET_CORNERS": "ROI_ALIGN_MODE", + } + + if op.sampling_mode.val not in grid_sampling_mode_map: + raise NotImplementedError( + "Unsupported 'sampling_mode' ('{}') in neuralnetwork backend".format( + op.sampling_mode.val + ) + ) + + mode = grid_sampling_mode_map[op.sampling_mode.val] + + input_expanded = op.name + "_x_expand" + builder.add_expand_dims( + name=input_expanded, + input_name=make_input(const_context, builder, op.x), + output_name=input_expanded, + axes=[0], + ) + builder.add_crop_resize( + name=op.name, + input_names=make_input(const_context, builder, [input_expanded, op.roi]), + output_name=op.outputs[0].name, + target_height=op.target_height.val, + target_width=op.target_width.val, + mode=mode, + normalized_roi=op.normalized_coordinates.val, + box_indices_mode=op.box_coordinate_mode.val, + spatial_scale=op.spatial_scale.val, + ) + + +@register_mil_to_nn_mapping +def custom_op(const_context, builder, op): + class_name = op.bindings.get("class_name", op.name) + input_order = op.bindings.get("input_order", []) + parameters = op.bindings.get("parameters", []) + weights = op.bindings.get("weights", []) + description = op.bindings.get("description", "") + + if len(input_order) == 0: + raise ValueError("Inputs not provided for Custom Layer: {}".format(op.name)) + + # Get input names + inputs = [op.inputs[_name] for _name in input_order] + + # Get output names + output_names = [_output.name for _output in op.outputs] + + # Load custom params + params = NeuralNetwork_pb2.CustomLayerParams() + params.className = class_name + params.description = description + + # Load parameters + for _param in parameters: + param = op.inputs[_param] + param_val = param.val + if types.is_bool(param.dtype): + params.parameters[_param].boolValue = param_val + elif types.is_int(param.dtype): + params.parameters[_param].intValue = param_val + elif types.is_float(param.dtype): + params.parameters[_param].doubleValue = param_val + elif types.is_str(param.dtype): + params.parameters[_param].stringValue = param_val + else: + raise ValueError( + "Unknown parameter type for custom layer- " + "Op: {}, Parameter: {}, Type: {}".format(op.name, _param, param.dtype) + ) + + # Load weights + for _weight in weights: + wt = params.weights.add() + wt.floatValue.extend(map(float, _weight)) + + # Add a custom layer + builder.add_custom( + name=op.name, + input_names=make_input(const_context, builder, inputs), + output_names=output_names, + custom_proto_spec=params, + ) + + +@register_mil_to_nn_mapping +def make_list(const_context, builder, op): + # Set a initial size + size = op.init_length.val + + # set the dynamic dimensions to 1 for initialization + # Ex: op.elem_shape = [i0, 128] will result in [1, 128] + elem_shape = [1 if isinstance(dim_var.val, str) else + dim_var.val for dim_var in op.elem_shape] + + if size is not None: + array_size = size if size > 0 else 1 + array_shape = [array_size] + elem_shape + add_const( + const_context, + builder, + op.outputs[0].name, + val=_np.zeros(array_shape, dtype="float"), + ) + else: + if len(elem_shape) > 0: + node_es_name = op.name + "_element_shape" + add_const( + const_context, + builder, + node_es_name, + val=_np.array(elem_shape, dtype="float"), + ) + + # Concatenate list length of the input, should be a constant vector of size 1) with element shape + node_arr_shape_name = op.name + "_arr_shape" + builder.add_concat_nd( + name=node_arr_shape_name, + input_names=[op.init_length.name, node_es_name], + output_name=node_arr_shape_name, + axis=0, + ) + else: + raise ValueError("elem_shape should have length > 0.") + + builder.add_fill_dynamic( + name=op.name, input_name=node_arr_shape_name, output_name=op.outputs[0].name + ) + + +def _realloc_list(const_context, builder, ls_var, index_var, value_var, mode): + # we do two things in this helper function + # (1) + # check if we need to re-initialize the tensorarray: + # it happens when the elem_shape is runtime determined and the runtime shape is not equal to + # the default shape. Ex: elem_shape is = [i0, 10] (initilized with [1, 10]) and at the runtime we get [2, 10]. + + # (2) + # If index_var >= len(ls_var), reallocate the array and copy over existing + # contents + + # index_var: str or Var + # ls_var: Var + + # check if elem_shape is runtime-determined + elem_shape = tuple(value_var.shape) + has_dynamic_shape = any([is_symbolic(i) for i in elem_shape]) + + # get the fill shape of the tensor array + # [length, elem_dim1, elem_dim2, ...] + full_shape_name = ls_var.name + "_full_shape" + builder.add_get_shape( + name=full_shape_name, + input_name=ls_var.name, # no need to make_input + output_name=full_shape_name, + ) + + # slice shape [length, elem_dim1, elem_dim2, ...] to get current length + curr_len_name = ls_var.name + "_length" + builder.add_slice_static( + name=curr_len_name, + input_name=full_shape_name, + output_name=curr_len_name, + begin_ids=[0], + end_ids=[1], + begin_masks=[False], + end_masks=[False], + strides=[1], + ) + + value_elem_shape_name = ls_var.name + '_value_elem_shape' + if has_dynamic_shape: + # get elem_shape from value if it is runtime-determined + # this is similar to what the backfill_make_list_elem_type tf graph pass does. + # if mode == "list_write", elem_shape equal to value.shape, + # if mode == "list_scatter", elem_shape equal to value.shape[1:] + if mode == "list_write": + builder.add_get_shape( + name=value_elem_shape_name, + input_name=make_input(const_context, builder, value_var), + output_name=value_elem_shape_name, + ) + elif mode == "list_scatter": + raw_value_elem_shape_name = ls_var.name + '_raw_value_elem_shape' + builder.add_get_shape( + name=raw_value_elem_shape_name, + input_name=make_input(const_context, builder, value_var), + output_name=raw_value_elem_shape_name, + ) + + builder.add_slice_static( + name=value_elem_shape_name, + input_name=raw_value_elem_shape_name, + output_name=value_elem_shape_name, + begin_ids=[1], + end_ids=[-1], + begin_masks=[False], + end_masks=[True], + strides=[1], + ) + else: + add_const(const_context, builder, value_elem_shape_name, _np.array(elem_shape)) + + # if elem_shape is runtime-determined, check if we need to re-initialize the array + + if has_dynamic_shape: + # slice shape [length, elem_dim1, elem_dim2, ...] to get list elem_shape + curr_elem_shape_name = ls_var.name + "_ls_elem_shape" + builder.add_slice_static( + name=curr_elem_shape_name, + input_name=full_shape_name, + output_name=curr_elem_shape_name, + begin_ids=[1], + end_ids=[-1], + begin_masks=[False], + end_masks=[True], + strides=[1], + ) + + # test if the runtime elem_shape from the list and value are equal + not_equal_name = ls_var.name + '_elem_shape_not_equal' + builder.add_not_equal( + name=not_equal_name, + input_names=[curr_elem_shape_name, value_elem_shape_name], + output_name=not_equal_name, + ) + + reduce_any_name = ls_var.name + '_reduce_any' + builder.add_reduce_sum( + name=reduce_any_name, + input_name=not_equal_name, + output_name=reduce_any_name, + axes=[0], + keepdims=False, + reduce_all=True, + ) + + # if the two elem_shape are different, then re initialize the list with elem_shape from the value + re_initialize_condition_name = ls_var.name + "_condition_re_initialize" + layer = builder.add_branch(name=re_initialize_condition_name, input_name=reduce_any_name) + true_builder = neural_network.NeuralNetworkBuilder( + nn_spec=layer.branch.ifBranch, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + + re_initialize_shape_name = ls_var.name + "_re_initialize_shape" + true_builder.add_concat_nd( + name=re_initialize_shape_name, + input_names=[curr_len_name, value_elem_shape_name], + output_name=re_initialize_shape_name, + axis=0, + ) + + re_initialize_name = ls_var.name + "_re_initialize" + true_builder.add_fill_dynamic( + name=re_initialize_name, + input_name=re_initialize_shape_name, + output_name=re_initialize_name, + value=0.0, + ) + + true_builder.add_copy( + name=ls_var.name + "_re_initialize_assign", + input_name=re_initialize_name, + output_name=ls_var.name + ) + + # after re-initialize the list, we now check if we need to reallocate the list + # check if the index > curr_length + is_growing_name = ls_var.name + "_is_growing" + builder.add_greater_than( + name=is_growing_name, + input_names=make_input(const_context, builder, [index_var, curr_len_name]), + output_name=is_growing_name, + use_greater_than_equal=True, + ) + + condition_name = ls_var.name + "_condition" + layer = builder.add_branch(name=condition_name, input_name=is_growing_name) + + true_builder = neural_network.NeuralNetworkBuilder( + nn_spec=layer.branch.ifBranch, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + + # alloc_length_name0 = index - list_length + alloc_length_name0 = ls_var.name + "_extra_length0" + true_builder.add_subtract_broadcastable( + name=alloc_length_name0, + input_names=make_input(const_context, builder, [index_var, curr_len_name]), + output_name=alloc_length_name0, + ) + + # alloc_length_name1 = index - list_length + 1 + alloc_length_name1 = ls_var.name + "_extra_length1" + true_builder.add_elementwise( + name=alloc_length_name1, + input_names=[alloc_length_name0], + mode="ADD", + output_name=alloc_length_name1, + alpha=1, + ) + + # alloc_shape_name = [alloc_length] + elem_shape + alloc_shape_name = ls_var.name + "_alloc_shape" + true_builder.add_concat_nd( + name=alloc_shape_name, + input_names=[alloc_length_name1, value_elem_shape_name], + output_name=alloc_shape_name, + axis=0, + ) + + # new_alloc_name is np.zeros([alloc_length] + elem_shape) + new_alloc_name = ls_var.name + "_alloc" + true_builder.add_fill_dynamic( + name=new_alloc_name, + input_name=alloc_shape_name, + output_name=new_alloc_name, + value=0.0, + ) + + # new_list_name is np.concat([old_list, new_alloc]) + new_list_name = ls_var.name + "_new" + true_builder.add_concat_nd( + name=new_list_name, + input_names=[ls_var.name, new_alloc_name], + output_name=new_list_name, + axis=0, + ) + + # Copy new_list_name to ls_var.name + true_builder.add_copy( + name=ls_var.name + "_assign", input_name=new_list_name, output_name=ls_var.name + ) + + +@register_mil_to_nn_mapping +def list_write(const_context, builder, op): + _realloc_list(const_context, builder, op.ls, op.index, op.value, "list_write") + + # expanded_value_name is [1, op.value] + expanded_value_name = op.ls.name + '_' + op.value.name + "_expanded" + builder.add_expand_dims( + name=expanded_value_name, + input_name=make_input(const_context, builder, op.value), + output_name=expanded_value_name, + axes=[0], + ) + + builder.add_scatter( + name=op.name, + input_names=make_input( + const_context, builder, [op.ls, op.index, expanded_value_name] + ), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def list_gather(const_context, builder, op): + builder.add_gather( + name=op.name, + input_names=make_input(const_context, builder, [op.ls, op.indices]), + output_name=op.outputs[0].name, + axis=0, + ) + + +@register_mil_to_nn_mapping +def list_scatter(const_context, builder, op): + max_idx_name = op.indices.name + "_max" + builder.add_reduce_max( + name=max_idx_name, + axes=[0], + keepdims=False, + input_name=make_input(const_context, builder, op.indices), + output_name=max_idx_name, + ) + _realloc_list(const_context, builder, op.ls, max_idx_name, op.value, "list_scatter") + builder.add_scatter( + name=op.name, + input_names=make_input(const_context, builder, [op.ls, op.indices, op.value]), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def list_read(const_context, builder, op): + # gathered_name has shape [1] + elem_shape + gathered_name = op.name + "_gathered" + builder.add_gather( + name=op.name, + input_names=make_input(const_context, builder, [op.ls, op.index]), + output_name=gathered_name, + axis=0, + ) + + # squeezed_name has shape elem_shape + squeezed_name = op.name + "_squeezed" + builder.add_squeeze( + name=squeezed_name, + input_name=gathered_name, + output_name=op.outputs[0].name, + axes=[0], + ) + + +@register_mil_to_nn_mapping +def list_length(const_context, builder, op): + # list_shape_name == [list_length] + elem_shape + list_shape_name = op.ls.name + "_shape" + builder.add_get_shape( + name=list_shape_name, + input_name=make_input(const_context, builder, op.ls), + output_name=list_shape_name, + ) + + # slice to get list_length + builder.add_slice_static( + name=op.name, + input_name=list_shape_name, + output_name=op.outputs[0].name, + begin_ids=[0], + end_ids=[1], + begin_masks=[False], + end_masks=[False], + strides=[1], + ) + +@register_mil_to_nn_mapping +def _const_symbolic(const_context, builder, op): + # do nothing + pass diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/__init__.py new file mode 100644 index 00000000..d7ee0008 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import ( + alert_return_type_cast, + commingle_loop_vars, + conv1d_decomposition, + handle_return_inputs_as_outputs, + handle_return_unused_inputs, + handle_unused_inputs, + mlmodel_passes, +) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/alert_return_type_cast.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/alert_return_type_cast.py new file mode 100644 index 00000000..dc2fe7e2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/alert_return_type_cast.py @@ -0,0 +1,48 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Var, types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="nn_backend") +class alert_return_type_cast(AbstractGraphPass): + """ + prog: Program + + # NN always implicitly cast return types to fp32. Detect any return + # types that are not builtin.fp32 and alert user of the implicit + # casting. This pass must be at the end. Example: + # + # Given: + # + # main(%x: (2, 3, fp32)) { + # block0() { + # %shape_0: (2,i32)* = const(val=[4, 7]) + # } -> (%shape_0) + # } + # + # (Notice that %shape_0 is i32, not fp32) + # + # Result: + # + # The same program. + # + # Alert messages about %shape_0 being implicitly cast from i32 to fp32. + # + # Comment: This pass should do more proper casting as backend supports more types. + """ + def apply(self, prog): + for f_name, f in prog.functions.items(): + for v in f.outputs: + if isinstance(v, Var) and v.dtype != types.fp32: + msg = ( + "Output var {} of type {} in function {} is " + "cast to type fp32" + ) + logger.warning( + msg.format(v.name, types.builtin_to_string(v.dtype), f_name) + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/commingle_loop_vars.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/commingle_loop_vars.py new file mode 100644 index 00000000..7105ea09 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/commingle_loop_vars.py @@ -0,0 +1,75 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +def _commingle_loop_vars_block(block): + for op in list(block.operations): + for b in op.blocks: + _commingle_loop_vars_block(b) + + if op.op_type != "while_loop": + continue + + for block in op.blocks: + for v_out, vx_in in zip(op.outputs, block.inputs): + # Disable check as v_out is not visible in block. + block.replace_uses_of_var_after_op( + anchor_op=None, + old_var=vx_in, + new_var=v_out, + no_check_var_visibility=True, + ) + + # replace block inputs + block._block_inputs = op.outputs + + +@register_pass(namespace="nn_backend") +class commingle_loop_vars(AbstractGraphPass): + """ + prog: Program + + # NN backend expects output vars as loop vars. Example: + # + # Given: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \ + # while_loop(loop_vars=(%a, %b)) + # loop_cond(%a.x, %b.x) { + # %cond_var: (bool) = some_op(x=%a.x, y=%b.x) + # } -> (%cond_var) + # loop_body(%a.x, %b.x) { + # %add_0: (1, 2, fp32) = add(x=%a.x, y=%b.x) + # } -> (%add_0, %b.x) + # } -> (%loop:0, %loop:1) + # } + # + # Result: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \ + # while_loop(loop_vars=(%a, %b)) + # loop_cond(%loop:0, %loop:1) { + # %cond_var: (bool) = some_op(x=%loop:0, y=%loop:1) + # } -> (%cond_var) + # loop_body(%loop:0, %loop:1) { + # %add_0: (1, 2, fp32) = add(x=%loop:0, y=%loop:1) + # } -> (%add_0, %loop:1) + # } -> (%loop:0, %loop:1) + # } + # + # Comment: The resulting program is no longer SSA (multiple assignments on + # %loop:0). + """ + def apply(self, prog): + for f in prog.functions.values(): + _commingle_loop_vars_block(f) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/conv1d_decomposition.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/conv1d_decomposition.py new file mode 100644 index 00000000..48c207c5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/conv1d_decomposition.py @@ -0,0 +1,101 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Block +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Operation +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="nn_backend") +class decompose_conv1d(AbstractGraphPass): + """ + NeuralNetwork does not support conv1d natively, + instead it decomposes conv1d into expand_dims -> conv2d -> squeeze + + Let us decompose conv1d for NN, + so we may have a chance to optimize expand_dims -> conv2d -> squeeze + + Given: + %2 = conv(%1), %1.rank = 3 + ... + + Result: + %3 = expand_dims(%1, axes=-2) + %4 = conv(%3) + %2 = squeeze(%4, axes=-2) + ... + + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._decompose_conv1d_block(f) + + @block_context_manager + def _decompose_conv1d_block(self, block: Block): + def help_decompose_conv1d_block(block: Block) -> bool: + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = help_decompose_conv1d_block(b) + + # must be conv1d + if op.op_type != "conv" or op.x.rank != 3: + continue + + if self._try_apply_transform(op, block): + # has to break as the downstream iterator is affected + return True + + return False + + block_changed = True + while block_changed: + block_changed = help_decompose_conv1d_block(block) + + @staticmethod + def _try_apply_transform(conv_op: Operation, block: Block) -> bool: + # create `expand_dims` + expand_out = mb.expand_dims(x=conv_op.x, axes=(-2,), before_op=conv_op) + + # prepare `conv2d` + conv_kwargs = {"x": expand_out, "before_op": conv_op} + + # inherit `pad_type`, `groups`, `bias` from `conv1d` + conv_kwargs["pad_type"] = conv_op.inputs["pad_type"].val + conv_kwargs["groups"] = conv_op.inputs["groups"].val + bias = conv_op.inputs.get("bias", None) + if bias is not None: + conv_kwargs["bias"] = bias + + # expand `weight`, `strides`, `pad`, `dilations` from `conv1d` + conv_kwargs["weight"] = mb.expand_dims( + x=conv_op.inputs["weight"], axes=(-2,), before_op=conv_op + ) + conv_kwargs["strides"] = (1, conv_op.inputs["strides"].val[-1]) + conv_kwargs["pad"] = (0, 0, conv_op.inputs["pad"].val[-2], conv_op.inputs["pad"].val[-1]) + conv_kwargs["dilations"] = (1, conv_op.inputs["dilations"].val[-1]) + + # compose `conv2d` + conv_out = mb.conv(**conv_kwargs) + + # create `squeeze` + squeeze_out = mb.squeeze( + x=conv_out, axes=(-2,), name=conv_op.outputs[0].name, before_op=conv_op + ) + + # try replacing `conv1d` output + # with the new `expand_dims` -> `conv2d` -> `squeeze` output + if conv_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=conv_op, old_var=conv_op.outputs[0], new_var=squeeze_out + ): + # remove `conv1d` + block.remove_ops([conv_op]) + return True + return False diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_inputs_as_outputs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_inputs_as_outputs.py new file mode 100644 index 00000000..1a5f42a5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_inputs_as_outputs.py @@ -0,0 +1,62 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +def _handle_return_inputs_as_outputs_func(f): + returned_inputs = [] + for v_name, v in f.inputs.items(): + if v not in f.outputs: + continue + returned_inputs.append(v) + + with f: + for v in returned_inputs: + # copy twice since NN layer cannot have input name == output name + v_tmp = mb.identity(x=v, name=v.name + "_tmp") + res = mb.identity(x=v_tmp, name=v.name) + res.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=res.op, old_var=v, new_var=res + ) + +@register_pass(namespace="nn_backend") +class handle_return_inputs_as_outputs(AbstractGraphPass): + """ + prog: Program + + # NN cannot handle returning input as output. Insert an identity op for + # those cases. Example: + # + # Given: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %mul_0_y_0: (i32)* = const(val=2) + # %mul_0: (1, 2, fp64) = mul(x=%a, y=%mul_0_y_0) + # } -> (%mul_0, %b) + # } + # + # (Notice that %b is returned from input. This causes error in NN) + # + # Result: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %mul_0_y_0: (i32)* = const(val=2) + # %mul_0: (1, 2, fp64) = mul(x=%a, y=%mul_0_y_0) + # %b_tmp: (1, 2, fp32) = identity(x=%b) + # %b: (1, 2, fp32) = identity(x=%b_tmp) + # } -> (%mul_0, %b) + # } + # + # where identity is applied twice since NN layer cannot have + # input name == output name + """ + def apply(self, prog): + for f in prog.functions.values(): + _handle_return_inputs_as_outputs_func(f) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_unused_inputs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_unused_inputs.py new file mode 100644 index 00000000..3f8e2b9e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_unused_inputs.py @@ -0,0 +1,59 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +def _handle_return_unused_inputs_func(f): + + returned_unused_inputs = filter(lambda x: x in f.outputs, list(f.inputs.values())) + + with f: + for v in returned_unused_inputs: + # copy twice since NN layer cannot have input name == output name + v_tmp = mb.identity(x=v, name=v.name + "_tmp") + res = mb.identity(x=v_tmp, name=v.name) + res.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=res.op, old_var=v, new_var=res + ) + +@register_pass(namespace="nn_backend") +class handle_return_unused_inputs(AbstractGraphPass): + """ + prog: Program + + # NN cannot handle returning input as output. Insert an identity op for + # those cases. Example: + # + # Given: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %mul_0_y_0: (i32)* = const(val=2) + # %mul_0: (1, 2, fp64) = mul(x=%a, y=%mul_0_y_0) + # } -> (%mul_0, %b) + # } + # + # (Notice that %b is returned from input. This causes error in NN) + # + # Result: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %mul_0_y_0: (i32)* = const(val=2) + # %mul_0: (1, 2, fp64) = mul(x=%a, y=%mul_0_y_0) + # %b_tmp: (1, 2, fp32) = identity(x=%b) + # %b: (1, 2, fp32) = identity(x=%b_tmp) + # } -> (%mul_0, %b) + # } + # + # where identity is applied twice since NN layer cannot have + # input name == output name + """ + def apply(self, prog): + for f in prog.functions.values(): + _handle_return_unused_inputs_func(f) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_unused_inputs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_unused_inputs.py new file mode 100644 index 00000000..2effac4f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_unused_inputs.py @@ -0,0 +1,50 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +def _handle_unused_inputs_func(f): + unused_inputs = [v for v_name, v in f.inputs.items() if len(v.child_ops) == 0] + + with f: + for v in unused_inputs: + # copy the input + v_tmp = mb.identity(x=v, name=v.name + "_tmp") + + +@register_pass(namespace="nn_backend") +class handle_unused_inputs(AbstractGraphPass): + """ + prog: Program + + # NN doesn't allow unused inputs. Insert an identity op to consume + # inputs (though its outputs are not used.). This pass must come after + # dead code elimination as all inserted code are "dead code". Example: + # + # Given: + # + # main(%x: (2, 3, fp32)) { + # block0() { + # %shape_0_const: (2,i32)* = const(val=[4, 7]) + # } -> (%shape_0_const) + # } + # + # (Notice that input %x is not consumed. This causes error in NN.) + # + # Result: + # + # main(%x: (2, 3, fp32)) { + # block0() { + # %unused_var: (2, 3, fp32) = identity(x=%x) + # %shape_0_const: (2,i32)* = const(val=[4, 7]) + # } -> (%shape_0_const) + # } + """ + def apply(self, prog): + for f in prog.functions.values(): + _handle_unused_inputs_func(f) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/mlmodel_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/mlmodel_passes.py new file mode 100644 index 00000000..9ab855e9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/mlmodel_passes.py @@ -0,0 +1,467 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +def _get_nn_spec(spec): + if spec.WhichOneof("Type") == "neuralNetwork": + nn_spec = spec.neuralNetwork + elif spec.WhichOneof("Type") == "neuralNetworkClassifier": + nn_spec = spec.neuralNetworkClassifier + elif spec.WhichOneof("Type") == "neuralNetworkRegressor": + nn_spec = spec.neuralNetworkRegressor + else: + raise ValueError("Specification must contain a neural network") + return nn_spec + + +def _get_blob_out_degree(spec): + """ + Computes use count of every tensor/node in NN graph + i.e. How many layers are using it as an input + + :param nn_spec : NeuralNetworkSpecification + :returns use_count_dict : str -> int, a dictionary with node name as a key and it's use count as a value + """ + + def _get_blob_out_degree_rec(nn_spec, out_degree): + nn_layers = nn_spec.layers + for layer in nn_layers: + layer_type = layer.WhichOneof("layer") + for inp in layer.input: + out_degree[inp] = out_degree.get(inp, 0) + 1 + if layer_type == "loop": + out_degree[layer.loop.conditionVar] = ( + out_degree.get(layer.loop.conditionVar, 0) + 1 + ) + _get_blob_out_degree_rec(layer.loop.conditionNetwork, out_degree) + _get_blob_out_degree_rec(layer.loop.bodyNetwork, out_degree) + elif layer_type == "branch": + _get_blob_out_degree_rec(layer.branch.ifBranch, out_degree) + _get_blob_out_degree_rec(layer.branch.elseBranch, out_degree) + + use_count_dict = {} + # Collect variable use count recursively + nn_spec = _get_nn_spec(spec) + _get_blob_out_degree_rec(nn_spec, use_count_dict) + + # Network outputs are variable use + network_outputs = _get_network_output(spec) + for _output in network_outputs: + use_count_dict[_output] = use_count_dict.get(_output, 0) + 1 + return use_count_dict + + +def _is_layer(nn_layer, layer_type): + """ + :param nn_layer : NN layer proto message + :param layer_type : str Layer type to check against + :returns True if nn_layer is of type `layer_type` otherwise False + """ + return nn_layer.WhichOneof("layer") == layer_type + + +def _get_input(layer, index=0): + """ + :param layer : NN Layer Proto message + :param index : Layer input index (Default 0) + :returns name of input at provided index if present, otherwise None + """ + if len(layer.input) <= index: + return None + return layer.input[index] + + +def _get_output(layer, index=0): + """ + :param layer : NN Layer Proto message + :param index : Layer output index (Default 0) + :returns name of output at provided index if present, otherwise None + """ + if len(layer.output) <= index: + return None + return layer.output[index] + + +def _get_network_output(spec): + """ + :param spec : CoreML Specification + :returns network output names + """ + network_output_names = [] + for _out in spec.description.output: + network_output_names.append(_out.name) + return network_output_names + + +def transform_conv_crop(spec): + """ + Transforms Conv -> Crop -> BN (if present) -> Activation (if present) into + Conv -> BN (if present) -> Activation (if present) -> Crop + This transformation will allow Conv -> BN -> Activation fusion by changing + the position of the crop layer, which does not affect the computation + """ + # Collect metadata + out_degree = _get_blob_out_degree(spec) + network_output_names = _get_network_output(spec) + + nn_spec = _get_nn_spec(spec) + nn_layers = nn_spec.layers + for i in range(0, len(nn_layers) - 2): + + # If Convolution output is being using as a network output or more than one layers + # that's acceptable + if not _is_layer(nn_layers[i], "convolution"): + continue + + # Output of Crop layer must not be network output or used by more than one layer + if not ( + _is_layer(nn_layers[i + 1], "crop") + and _get_input(nn_layers[i + 1]) not in network_output_names + and out_degree[_get_output(nn_layers[i + 1])] == 1 + ): + continue + + layer_to_shuffle_with = -1 + + # Output of Batchnorm layer must not be network output or used by more than one layer + if ( + _is_layer(nn_layers[i + 2], "batchnorm") + and out_degree[_get_output(nn_layers[i + 2])] == 1 + ): + layer_to_shuffle_with = i + 2 + + # Output of Activation layer must not be network output or used by more than one layer + if ( + i + 3 < len(nn_layers) + and _is_layer(nn_layers[i + 3], "activation") + and out_degree[_get_output(nn_layers[i + 3])] == 1 + ): + layer_to_shuffle_with = i + 3 + + if layer_to_shuffle_with == -1: + continue + # restructure crop layer + # Conv ---> Crop ---> BN ---> Activation ---> Layer1 + # In following three steps + # 1. Conv --------------> BN ---> Activation ---> Layer1 + # \ / + # ---> Crop -- + nn_layers[i].output[0] = nn_layers[i + 1].output[0] + # 2. Conv ---> BN ---> Activation ---> Layer1 + # \ / + # -----------------Crop ---- + nn_layers[i + 1].output[0] = nn_layers[layer_to_shuffle_with].output[0] + # 3. Conv ---> BN ---> Activation ---> Crop ---> Layer1 + nn_layers[layer_to_shuffle_with].output[0] = nn_layers[i + 1].input[0] + + # Add Crop layer at new position and remove from current position + crop_layer = nn_layers[i + 1] + nn_layers.remove(crop_layer) + nn_layers.insert(layer_to_shuffle_with, crop_layer) + + +def remove_disconnected_layers(spec): + """ + Removes layers from model specification if it's output is not + connected or on path to the network output. + """ + + def _remove_layers_from_spec(nn_spec, layers_to_delete): + nn_layers = nn_spec.layers + for _layer in layers_to_delete: + nn_layers.remove(_layer) + + def _get_disconnected_layers_rec(nn_spec): + """ + - Iteraters over layers in bottom-up fashion + - Collect layers if it's output is not being used (marks and does lazy deletion) + - Recursively iterates over NN Spec if layer is Loop or Branch + """ + + def _decrease_input_degree(layer): + """ + Helper routine to reduce degree input nodes for given layer + """ + for _input in layer.input: + out_degree[_input] -= 1 + if out_degree[_input] == 0: + del out_degree[_input] + + nn_layers = nn_spec.layers + layers_to_delete = [] + for _layer in reversed(nn_layers): + layer_type = _layer.WhichOneof("layer") + if layer_type == "loop": + condition_net_layers_to_delete = _get_disconnected_layers_rec( + _layer.loop.conditionNetwork + ) + body_net_layers_to_delete = _get_disconnected_layers_rec( + _layer.loop.bodyNetwork + ) + _remove_layers_from_spec( + _layer.loop.conditionNetwork, condition_net_layers_to_delete + ) + _remove_layers_from_spec( + _layer.loop.bodyNetwork, body_net_layers_to_delete + ) + + # NOTE: Debatable? + # If condition network or bodyNetwork is empty, delete loop layer + if ( + len(_layer.loop.conditionNetwork.layers) == 0 + or len(_layer.loop.bodyNetwork.layers) == 0 + ): + layers_to_delete.append(_layer) + _decrease_input_degree(_layer) + continue + + if layer_type == "branch": + if_layers_to_delete = _get_disconnected_layers_rec( + _layer.branch.ifBranch + ) + else_layers_to_delete = _get_disconnected_layers_rec( + _layer.branch.elseBranch + ) + + total_if_layers = len(_layer.branch.ifBranch.layers) + total_else_layers = len(_layer.branch.elseBranch.layers) + + if ( + len(if_layers_to_delete) != total_if_layers + and len(else_layers_to_delete) != total_else_layers + ): + # If both branches are non-empty after dead-layer elimination + # remove respective layers + _remove_layers_from_spec( + _layer.branch.ifBranch, if_layers_to_delete + ) + _remove_layers_from_spec( + _layer.branch.elseBranch, else_layers_to_delete + ) + elif ( + len(if_layers_to_delete) == total_if_layers + and len(else_layers_to_delete) == total_else_layers + ): + # If both branches are empty after dead-layer elimination + # remove branch layer altogehter + layers_to_delete.append(_layer) + _decrease_input_degree(_layer) + continue + + output_is_used = False + for _output in _layer.output: + # If output is used, cannot remove current layer + if _output in out_degree: + output_is_used = True + break + + # If no output from current node is used + # Remove the layer and decrement use count for all the inputs + if not output_is_used: + layers_to_delete.append(_layer) + _decrease_input_degree(_layer) + + return layers_to_delete + + def _remove_disconnected_layers_rec(nn_spec): + """ + Entry point for removing disconnected layers + """ + layers_to_delete = _get_disconnected_layers_rec(nn_spec) + # delete layers to be removed + _remove_layers_from_spec(nn_spec, layers_to_delete) + + # Get the use count of each layer + out_degree = _get_blob_out_degree(spec) + nn_spec = _get_nn_spec(spec) + # Initiate removal from high level Neural Network spec + _remove_disconnected_layers_rec(nn_spec) + + +def remove_redundant_transposes(spec): + """ + Removes layers from model specification that are back to back transposes + that compose to the identity. + """ + + def blob_name_to_layers(nn_layers): + """ + output_to_layers: {str: layer_proto_message} : {blob name: layers that it feeds into} + input_to_parent_layers: {str: layer_proto_message} : {blob name: parent layers that feed in} + """ + output_to_layers = {} + for layer in nn_layers: + for input in layer.input: + if not input in output_to_layers: + output_to_layers[input] = [layer] + else: + output_to_layers[input].append(layer) + + input_to_parent_layers = {} + for layer in nn_layers: + for output in layer.output: + if not layer.WhichOneof("layer") == "copy": + assert output not in input_to_parent_layers, \ + "'{}' blob is generated by more than 1 layers".format(output) + input_to_parent_layers[output] = layer + + return input_to_parent_layers, output_to_layers + + def _delete_layers(nn_spec, layers_to_delete): + """ + Given a neural network spec and pairs of transposes to remove, rewire + the network to bypass those transposes and remove them from the spec. + """ + nn_layers = nn_spec.layers + _, output_to_layers = blob_name_to_layers(nn_layers) + + # First pass: rewire layers to bypass those that will be deleted. + for layers in layers_to_delete: + start_layer = layers[0] + end_layer = layers[-1] + + # Replace children's input by layer_start's input + children = output_to_layers[end_layer.output[0]] + for child in children: + idx = [ + i + for i, input in enumerate(child.input) + if input == end_layer.output[0] + ] + assert len(idx) == 1 + idx = idx[0] + child.input[idx] = start_layer.input[0] + + # Second pass: delete the layers. + for layers in layers_to_delete: + for layer in layers: + nn_layers.remove(layer) + + def _find_redundant_transposes(nn_spec): + """ + Search the neural network spec for sequence of transposes that together + are the identity, and return a list of those sequence. + """ + nn_layers = nn_spec.layers + layers_to_delete = [] + + input_to_parent_layers, output_to_layers = blob_name_to_layers(nn_layers) + + for layer in nn_layers: + # Only start with the last element of the transpose layers sequence + if not layer.WhichOneof("layer") == "transpose": + continue + if ( + layer.output[0] in output_to_layers + and len(output_to_layers[layer.output[0]]) == 1 + and output_to_layers[layer.output[0]][0].WhichOneof("layer") + == "transpose" + ): + continue + + # Get the transpose layers sequence + layers = [] + cursor = layer + while True: + if cursor.output[0] in output_to_layers: + layers.append(cursor) + if not cursor.input[0] in input_to_parent_layers: + break + cursor = input_to_parent_layers[cursor.input[0]] + if cursor.WhichOneof("layer") != "transpose": + break + if len(output_to_layers[cursor.output[0]]) != 1: + break + layers = layers[::-1] + + if len(layers) == 0: + continue + + # Optimize for the number of layers which can be merged using dynamic programming + def solve_dp(layers): + """ + The resulting dp[i] means the maximum length of transpose sequence resulting + in identity starting at index i + For example, dp[0] = 0 means there is no sequence starting at 0 results in identity + dp[10] = 5 means the longest identity sequence starts at 10 is 5, + so [layers[10],layer[11],..,layer[14]] is the longest identity sequence start at 10. + + # dic: {tuple:int} + # key is the net transpose axes pattern starting from the first layer + # value is the highest id of the layer which has this pattern + # e.g. if dic[(1,2,0)] = 34, it means that starting from the 1st layer, + # the net transpose pattern `(1,2,0)` is last seen at layer id 34. No layer after 34-th + # layer will result in the net pattern `(1,2,0)` + """ + dim = len(layers[0].transpose.axes) + dp = [0] * len(layers) + dic = {} + axes = list(range(dim)) + dic[tuple(axes)] = 0 + for i in range(len(layers)): + axes = [axes[k] for k in layers[i].transpose.axes] + key = tuple(axes) + if key in dic: + dp[dic[key]] = i - dic[key] + 1 + dic[key] = i + 1 + for i in range(len(layers) - 1, -1, -1): + j = i + dp[i] + if j < len(layers): + dp[i] = dp[i] + dp[j] + return dp + + dp = solve_dp(layers) + + """ + Once we know the maximum identity sequence starts at each index, we solve + for the maximum total node we can remove. + I think there must be lots of different solution for this, but I use DP again. + sol_num[i] keeps track of the maximum number of nodes can be remove after index i + For example, if sol_num[10] = 5, this means after index 10, we can at most remove 5 nodes. + sol_bt[i] keeps the first starting point of identity sequence which results in the + optimal solution after index i. + For example, if sol_num[10] = 12, means that in order to get rid of the maxium number of + nodes after 10, the first starting point is index 12. + After construct sol_num and sol_bt by dynamic programming, we backtrack for the optimal + solution using sol_bt. + """ + sol_num = [0] * len(dp) + sol_bt = [None] * len(dp) + if dp[-1] != 0: + sol_num[-1] = dp[-1] + sol_bt[-1] = len(dp) - 1 + for i in range(len(sol_num) - 2, -1, -1): + if dp[i] == 0: + sol_num[i] = sol_num[i + 1] + sol_bt[i] = sol_bt[i + 1] + else: + num = dp[i] + j = i + dp[i] + if j < len(sol_num): + num += sol_num[j] + if num > sol_num[i + 1]: + sol_num[i] = num + sol_bt[i] = i + else: + sol_num[i] = sol_num[i + 1] + sol_bt[i] = sol_bt[i + 1] + + # Get layers to delete using sol_bt + cursor = 0 + while cursor < len(dp): + if sol_bt[cursor] is None: + break + cursor = sol_bt[cursor] + tmp = [layers[i] for i in range(cursor, cursor + dp[cursor])] + layers_to_delete.append(tmp) + cursor += dp[cursor] + + return layers_to_delete + + nn_spec = _get_nn_spec(spec) + layers_to_delete = _find_redundant_transposes(nn_spec) + if len(layers_to_delete) > 0: + _delete_layers(nn_spec, layers_to_delete) + print("{} transpose pairs deleted".format(len(layers_to_delete))) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_mlmodel_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_mlmodel_passes.py new file mode 100644 index 00000000..841460a9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_mlmodel_passes.py @@ -0,0 +1,1052 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import unittest +from sys import platform + +import numpy as np + +import coremltools.models.datatypes as datatypes +from coremltools import ComputeUnit +from coremltools._deps import _IS_MACOS +from coremltools.converters.mil.backend.nn.passes.mlmodel_passes import ( + remove_disconnected_layers, remove_redundant_transposes, + transform_conv_crop) +from coremltools.models import MLModel +from coremltools.models import neural_network as neural_network +from coremltools.models.neural_network.printer import print_network_spec +from coremltools.models.utils import _macos_version + +DEBUG = False +np.random.seed(10) + + +class MLModelPassesTest(unittest.TestCase): + def test_load_constant_remove(self): + input_features = [("data", datatypes.Array(*(3, 4)))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("relu1", "RELU", "data", "relu1") + builder.add_load_constant_nd( + "const1", "c1", constant_value=np.ones((5,)), shape=(5,) + ) + builder.add_activation("relu2", "RELU", "relu1", "out") + builder.add_load_constant_nd( + "const2", "c2", constant_value=np.ones((5,)), shape=(5,) + ) + builder.add_load_constant_nd( + "const3", "c3", constant_value=np.ones((5,)), shape=(5,) + ) + spec = builder.spec + np.testing.assert_equal(5, len(spec.neuralNetwork.layers)) + remove_disconnected_layers(spec) + np.testing.assert_equal(2, len(spec.neuralNetwork.layers)) + + def test_dead_layer_remove(self): + input_features = [("data", datatypes.Array(*(3, 4)))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("relu1", "RELU", "data", "relu1") + builder.add_load_constant_nd( + "const1", "c1", constant_value=np.ones((5,)), shape=(5,) + ) + builder.add_load_constant_nd( + "const2", "c2", constant_value=np.ones((5,)), shape=(5,) + ) + builder.add_split_nd( + "splitnd1", "const2", ["s1", "s2", "s3"], axis=0, num_splits=3 + ) + builder.add_squeeze("squeeze", "s1", "squeeze_out") + builder.add_activation("relu4", "RELU", "s2", "relu4") + builder.add_activation("relu5", "RELU", "relu4", "relu5") + builder.add_load_constant_nd( + "const3", "c3", constant_value=np.ones((5,)), shape=(5,) + ) + builder.add_activation("relu2", "RELU", "relu1", "out") + spec = builder.spec + np.testing.assert_equal(9, len(spec.neuralNetwork.layers)) + remove_disconnected_layers(spec) + np.testing.assert_equal(2, len(spec.neuralNetwork.layers)) + + def test_dead_layer_remove_branch(self): + convergence_tolerance = 1e-8 + + input_features = [("input", datatypes.Array(*(2,)))] + output_features = [("out", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + # add condition to break from the loop, if convergence criterion is met + builder.add_less_than("cond", ["input"], "cond", alpha=convergence_tolerance) + branch_layer = builder.add_branch("branch_layer", "cond") + builder_ifbranch = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.ifBranch + ) + builder_ifbranch.add_activation("relu1", "RELU", "input", "relu1_out") + builder_ifbranch.add_activation("relu2_out", "RELU", "relu1_out", "relu2_out") + builder_elsebranch = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.elseBranch + ) + builder_elsebranch.add_activation("linear1", "LINEAR", "input", "linear1_out") + builder_elsebranch.add_activation( + "linear2", "LINEAR", "linear1_out", "relu2_out" + ) + builder.add_squeeze("out", "input", "out", squeeze_all=True) + + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + data = np.random.rand(2,) + data_dict = {"input": data} + if _IS_MACOS: + before_pass_out = mlmodel.predict(data_dict)["out"] + if DEBUG: + print( + "\n mlmodel description before remove disconnected layers pass: \n" + ) + print_network_spec(builder.spec, style="coding") + remove_disconnected_layers(builder.spec) + if DEBUG: + print( + "\n mlmodel description after remove disconnected layers pass: \n" + ) + print_network_spec(builder.spec, style="coding") + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + after_pass_out = mlmodel.predict(data_dict)["out"] + + np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2) + np.testing.assert_equal(len(builder.spec.neuralNetwork.layers), 1) + + def test_dead_layer_partial_branch(self): + convergence_tolerance = 1e-8 + + input_features = [("input", datatypes.Array(*(2,)))] + output_features = [("out", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + # add condition to break from the loop, if convergence criterion is met + builder.add_less_than("cond", ["input"], "cond", alpha=convergence_tolerance) + branch_layer = builder.add_branch("branch_layer", "cond") + builder_ifbranch = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.ifBranch + ) + builder_ifbranch.add_activation("relu1", "RELU", "input", "relu1_out") + builder_ifbranch.add_activation("relu2_out", "RELU", "relu1_out", "relu2_out") + builder_elsebranch = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.elseBranch + ) + builder_elsebranch.add_activation("linear1", "LINEAR", "input", "linear1_out") + builder_elsebranch.add_activation( + "linear_red_1", "LINEAR", "input", "linear_red1_out" + ) + builder_elsebranch.add_activation( + "linear_red_2", "LINEAR", "linear_red1_out", "linear_red2_out" + ) + builder_elsebranch.add_activation( + "linear2", "LINEAR", "linear1_out", "relu2_out" + ) + builder.add_squeeze("out", "relu2_out", "out", squeeze_all=True) + + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + + if not _IS_MACOS: + # Can not get predictions unless on macOS. + return + + data = np.random.rand(2,) + data_dict = {"input": data} + before_pass_out = mlmodel.predict(data_dict)["out"] + if DEBUG: + print("\n mlmodel description before remove disconnected layers pass: \n") + print_network_spec(builder.spec, style="coding") + old_spec = copy.copy(builder.spec) + remove_disconnected_layers(builder.spec) + if DEBUG: + print("\n mlmodel description after remove disconnected layers pass: \n") + print_network_spec(builder.spec, style="coding") + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + after_pass_out = mlmodel.predict(data_dict)["out"] + + np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2) + np.testing.assert_equal( + len(old_spec.neuralNetwork.layers[1].branch.ifBranch.layers), + len(builder.spec.neuralNetwork.layers[1].branch.ifBranch.layers), + ) + np.testing.assert_equal( + len(builder.spec.neuralNetwork.layers[1].branch.elseBranch.layers), 2 + ) + + def test_conv_crop_bn_to_conv_bn_crop(self): + input_features = [("data", datatypes.Array(1, 10, 10))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + W = np.ones((1, 2, 2, 2), dtype=np.float32) + builder.add_convolution( + name="conv", + kernel_channels=1, + output_channels=2, + height=2, + width=2, + stride_height=1, + stride_width=1, + border_mode="valid", + groups=1, + W=W, + b=None, + has_bias=False, + input_name="data", + output_name="conv_out", + ) + builder.add_crop( + name="crop", + left=1, + right=1, + top=1, + bottom=1, + offset=0, + input_names=["conv_out"], + output_name="crop_out", + ) + builder.add_batchnorm( + name="bn", + channels=2, + gamma=np.ones(2,).astype(np.float32), + beta=np.ones(2,).astype(np.float32), + mean=np.ones(2,).astype(np.float32), + variance=np.ones(2,).astype(np.float32), + input_name="crop_out", + output_name="out", + ) + # Conv -> Crop -> BN + spec = builder.spec.neuralNetwork + np.testing.assert_equal("crop", spec.layers[1].WhichOneof("layer")) + np.testing.assert_equal("batchnorm", spec.layers[2].WhichOneof("layer")) + + # Predict + if _IS_MACOS: + mlmodel = MLModel(builder.spec, dict, compute_units=ComputeUnit.CPU_ONLY) + data = np.random.rand(1, 10, 10) + data_dict = {"data": data} + before_pass_out = mlmodel.predict(data_dict)["out"] + + # transform the pattern + transform_conv_crop(builder.spec) + # Conv -> BN -> Crop + np.testing.assert_equal("batchnorm", spec.layers[1].WhichOneof("layer")) + np.testing.assert_equal("crop", spec.layers[2].WhichOneof("layer")) + + if _IS_MACOS: + # Predict + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + after_pass_out = mlmodel.predict(data_dict)["out"] + np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=3) + + def test_conv_crop_bn_relu_to_conv_bn_relu_crop(self): + input_features = [("data", datatypes.Array(1, 10, 10))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + + W = np.ones((1, 2, 2, 2), dtype=np.float32) + builder.add_convolution( + name="conv", + kernel_channels=1, + output_channels=2, + height=2, + width=2, + stride_height=1, + stride_width=1, + border_mode="valid", + groups=1, + W=W, + b=None, + has_bias=False, + input_name="data", + output_name="conv_out", + ) + builder.add_crop( + name="crop", + left=1, + right=1, + top=1, + bottom=1, + offset=0, + input_names=["conv_out"], + output_name="crop_out", + ) + builder.add_batchnorm( + name="bn", + channels=2, + gamma=np.ones(2,).astype(np.float32), + beta=np.ones(2,).astype(np.float32), + mean=np.ones(2,).astype(np.float32), + variance=np.ones(2,).astype(np.float32), + input_name="crop_out", + output_name="bn_out", + ) + builder.add_activation( + name="relu", non_linearity="RELU", input_name="bn_out", output_name="out" + ) + # Conv -> Crop -> BN -> ReLU + spec = builder.spec.neuralNetwork + np.testing.assert_equal("crop", spec.layers[1].WhichOneof("layer")) + np.testing.assert_equal("batchnorm", spec.layers[2].WhichOneof("layer")) + np.testing.assert_equal("activation", spec.layers[3].WhichOneof("layer")) + + # Predict + if _IS_MACOS: + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + data = np.random.rand(1, 10, 10) + data_dict = {"data": data} + before_pass_out = mlmodel.predict(data_dict)["out"] + + # transform the pattern + transform_conv_crop(builder.spec) + # Conv -> BN -> ReLU -> Crop + np.testing.assert_equal("batchnorm", spec.layers[1].WhichOneof("layer")) + np.testing.assert_equal("activation", spec.layers[2].WhichOneof("layer")) + np.testing.assert_equal("crop", spec.layers[3].WhichOneof("layer")) + + # Predict + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + if _IS_MACOS: + after_pass_out = mlmodel.predict(data_dict)["out"] + np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=3) + + +@unittest.skipIf( + platform != "darwin" or _macos_version() < (10, 15), "Requires MacOS 10.15 or later" +) +class Redundant_Transposees_Test(unittest.TestCase): + def _test_builder(self, builder, input_shape, expected_layer_num=None): + + data = np.random.rand(*input_shape) + + # Mlmodel before + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + output_before = mlmodel.predict({"data": data})["out"] + num_layers_before = len(builder.spec.neuralNetwork.layers) + + remove_redundant_transposes(builder.spec) + + layers = builder.spec.neuralNetwork.layers + if expected_layer_num is None: + self.assertTrue(len(layers) < num_layers_before) + else: + self.assertEqual(len(layers), expected_layer_num) + + # Mlmodel after + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + output_after = mlmodel.predict({"data": data})["out"] + + np.testing.assert_almost_equal(output_before, output_after, decimal=3) + + def test_output_edge_case(self): + + # For now for safety purpose, the node which are output should't be merged + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_transpose( + name="first_transpose", + axes=[2, 0, 1], + input_name="data", + output_name="first_transpose_out", + ) + builder.add_transpose( + name="second_transpose", + axes=[1, 2, 0], + input_name="first_transpose_out", + output_name="out", + ) + + self._test_builder(builder, input_shape, 2) + + def test_output_edge_case_2(self): + + # For now for safety purpose, the node which are output should't be merged + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_transpose( + name="ranspose", axes=[1, 2, 0], input_name="data", output_name="out" + ) + + self._test_builder(builder, input_shape, 1) + + def test_remove_single_identity_transpose(self): + + # A single identity transpose (like 0,1,2) should also be removed + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_transpose( + name="uselss_transpose", + axes=[0, 1, 2], + input_name="data", + output_name="useless_transpose_out", + ) + builder.add_activation( + name="relu", + non_linearity="RELU", + input_name="useless_transpose_out", + output_name="out", + ) + + self._test_builder(builder, input_shape, 1) + + def test_remove_three_transpose(self): + + # Three transpose layer which can be removed + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [1, 0, 2], [2, 0, 1]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + + self._test_builder(builder, input_shape, 1) + + def test_remove_thousands_identity_transpose(self): + + """ + INPUT + | + v + [t1] + | + v + [t2] + | + v + . + . + . + | + v + [t1000] + | + v + RELU + tk are all identity + Remove a sequence of 1000 identity transpose + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + num_layers = 1000 + input_name = "data" + for i in range(num_layers): + output_name = "layer_" + str(i) + "_output" + name = "layer_" + str(i) + builder.add_transpose( + name=name, + axes=[0, 1, 2], + input_name=input_name, + output_name=output_name, + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + + self._test_builder(builder, input_shape, 1) + + def test_remove_thousands_identity_transpose_with_activation_between(self): + """ + INPUT + | + v + [t1] + | + v + . + . + . + [t500] + | + v + RELU_1 + | + v + . + . + . + | + v + [t1000] + | + v + RELU_2 + tk are all identity + Remove a sequence of 1000 identity transpose but with a RELU in the middle, + the final output should be + INPUT + | + v + RELU_1 + | + v + RELU_2 + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + num_layers = 1000 + input_name = "data" + for i in range(num_layers): + output_name = "layer_" + str(i) + "_output" + name = "layer_" + str(i) + builder.add_transpose( + name=name, + axes=[0, 1, 2], + input_name=input_name, + output_name=output_name, + ) + input_name = output_name + if i == num_layers / 2: + builder.add_activation( + name="relu_inter", + non_linearity="ReLU", + input_name=input_name, + output_name="relu_out", + ) + input_name = "relu_out" + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + self._test_builder(builder, input_shape, 2) + + def test_remove_thousands_random_transpose_layers(self): + """ + INPUT + | + v + [t_0] + | + v + [t_1] + | + v + . + . + . + | + v + [t_999] + | + v + RELU + tk are randomly generated, + under this certain seed, the result should be + INPUT + | + v + [t_0] + | + v + [t_1] + | + v + RELU + """ + + import random + from itertools import permutations + + random.seed(1000) + input_shape = (3, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + num_layers = 1000 + dim = 3 + input_name = "data" + debug = [] + for i in range(num_layers): + axes = list(permutations(range(dim))) + random.shuffle(axes) + output_name = "layer_" + str(i) + "_output" + name = "layer_" + str(i) + debug.append(axes[0]) + builder.add_transpose( + name=name, axes=axes[0], input_name=input_name, output_name=output_name + ) + input_name = output_name + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + self._test_builder(builder, input_shape, None) + + def test_remove_thousands_random_transpose_layers_case_2(self): + """ + Same test as the previous one, but add more layers and dimension. + """ + import random + from itertools import permutations + + random.seed(0) + input_shape = (3, 10, 5, 2, 4) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + num_layers = 5000 + dim = 5 + input_name = "data" + for i in range(num_layers): + axes = list(permutations(range(dim))) + random.shuffle(axes) + output_name = "layer_" + str(i) + "_output" + name = "layer_" + str(i) + builder.add_transpose( + name=name, axes=axes[0], input_name=input_name, output_name=output_name + ) + input_name = output_name + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + self._test_builder(builder, input_shape, None) + + def test_branch_structure(self): + """ + INPUT + | + v + [t_0] + | + v + [t_1] + | + v + [t_3] --. + | | + v v + [t_4] RELU_1 + | + v + [t_5] + | + v + RELU_2 + t_0, t_1, t_3 can be merged. + t_4, t_5 can be merged. + The output shuld be + INPUT + | + .------. + | | + v v + RELU_2 RELU_1 + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(1, 10, 5))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [2, 1, 0], [0, 1, 2], [2, 0, 1], [1, 2, 0]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + builder.add_activation( + name="dumpy", + non_linearity="RELU", + input_name="transpose_2_out", + output_name="dumpy", + ) + self._test_builder(builder, input_shape, 2) + + def test_branch_case_2(self): + """ + INPUT + | + v + [t_0] --. + | | + v v + [t_1] RELU_1 + | + v + RELU_2 + Even though t_0, t_1 can be merged, but there is a branch from t_0, + so we shouldn't remove anything here. + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [2, 1, 0]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + builder.add_activation( + name="dumpy", + non_linearity="RELU", + input_name="transpose_0_out", + output_name="dumpy", + ) + self._test_builder(builder, input_shape, 4) + + def test_fork_structure_case_3(self): + """ + INPUT + | + v + [t_0] + | + v + [t_1]--. + | | + | v + | RELU_1 + | + v + [t_2]--. + | | + | v + | RELU_2 + [t_3] + | + v + [t_4]--. + | | + | v + | RELU_3 + v + RELU_4 + + Even though t_0, t_1 can be merged, t_2 is identity, t_3, t_4 can be merge, + The final output should be + INPUT + | + .------------.----------. + | | | | + v v v v + RELU_1 RELU_2 RELU_3 RELU_4 + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(1, 10, 5))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [2, 1, 0], [0, 1, 2], [2, 1, 0], [2, 1, 0]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + builder.add_activation( + name="dumpy_1", + non_linearity="RELU", + input_name="transpose_1_out", + output_name="dumpy_1", + ) + builder.add_activation( + name="dumpy_2", + non_linearity="RELU", + input_name="transpose_2_out", + output_name="dumpy_2", + ) + builder.add_activation( + name="dumpy_4", + non_linearity="RELU", + input_name="transpose_4_out", + output_name="dumpy_4", + ) + + self._test_builder(builder, input_shape, 4) + + def test_fork(self): + """ + INPUT + | + .------.------. + | | + v v + [t_1] [t_3] + | | + v v + [t_2] [t_4] + | | + v v + RELU_1 RELU_2 + + t_1,t_2 can be merged and t_3,t_4 can be merged. + The result output would be + + INPUT + | + .------.------. + | | + v v + RELU_1 RELU_2 + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [2, 1, 0]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_branch_2_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu_branch_2", + non_linearity="RELU", + input_name=input_name, + output_name="out_branch_2", + ) + self._test_builder(builder, input_shape, 2) + + def test_fork_and_add(self): + """ + INPUT + | + .------.------. + | | + v v + [t_1] [t_3] + | | + v v + [t_2] [t_4] + | | + .-----. .-----. + | | + v v + Add + + t_1,t_2 can be merged and t_3,t_4 can be merged. + The result output would be + + INPUT + | + .------.------. + | | + .-----. .-----. + | | + v v + Add + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [2, 1, 0]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + input_1 = input_name + + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_branch_2_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + input_2 = input_name + + builder.add_add_broadcastable( + name="add", input_names=[input_1, input_2], output_name="out" + ) + self._test_builder(builder, input_shape, 1) + + def test_transpose(self): + def _build_and_test_network(input_size, transpose_layers, expected_layers): + """ + Helper function for testing transpose removal. + + Args: + input_size: Size of the input network tensor. + transpose_layers: Array of transpose axes definitions. + expected_layers: Array of indices into transpose_layers indicating + which of the transpose layers should be present after the + graph pass. + """ + input_features = [("data", datatypes.Array(*input_size))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + spec = builder.spec.neuralNetwork.layers + + last_layer = "data" + for idx, axes in enumerate(transpose_layers): + name = "t{}".format(idx) + if idx == len(transpose_layers) - 1: + output_name = "out" + else: + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=last_layer, output_name=output_name + ) + last_layer = output_name + + spec = builder.spec.neuralNetwork + # Check the network before the graph pass. + for idx in range(len(transpose_layers)): + np.testing.assert_equal( + "transpose", spec.layers[idx].WhichOneof("layer") + ) + # Run the removal pass. + remove_redundant_transposes(builder.spec) + # Verify only the expected layers remain. + np.testing.assert_equal(len(spec.layers), len(expected_layers)) + for output_layer_idx, input_layer_idx in enumerate(expected_layers): + np.testing.assert_equal( + "transpose", spec.layers[output_layer_idx].WhichOneof("layer") + ) + np.testing.assert_array_equal( + transpose_layers[input_layer_idx], + spec.layers[output_layer_idx].transpose.axes, + ) + + _build_and_test_network( + input_size=[1, 10, 10], + # These transposes are not inverses. + transpose_layers=[[2, 0, 1], [2, 0, 1]], + expected_layers=[0, 1], + ) + + _build_and_test_network( + input_size=[1, 1, 10, 10, 3], + # First two are the identity, then an extra. + transpose_layers=[[2, 4, 1, 0, 3], [3, 2, 0, 4, 1], [1, 0, 2, 3, 4]], + expected_layers=[2], + ) + + # A slightly more complicated test case where there are two transposes + # in topological order, but are actually in parallel in the graph. + builder = neural_network.NeuralNetworkBuilder( + [("data", datatypes.Array(2, 4, 8))], [("out", None)] + ) + builder.add_transpose( + name="t1", axes=[0, 2, 1], input_name="data", output_name="t1" + ) + builder.add_transpose( + name="t2", axes=[0, 2, 1], input_name="data", output_name="t2" + ) + builder.add_stack(name="stack", input_names=["t1", "t2"], output_name="out") + spec = builder.spec.neuralNetwork + # Run the removal pass. + remove_redundant_transposes(builder.spec) + # Verify nothing was removed. + np.testing.assert_equal(len(spec.layers), 3) + + +if __name__ == "__main__": + RUN_ALL_TESTS = True + if RUN_ALL_TESTS: + unittest.main() + else: + suite = unittest.TestSuite() + suite.addTest(MLModelPassesTest("test_load_constant_remove")) + unittest.TextTestRunner().run(suite) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_passes.py new file mode 100644 index 00000000..5f85c24c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_passes.py @@ -0,0 +1,227 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import ( + apply_pass_and_basic_check, + assert_model_is_valid, + assert_same_output_names, + get_op_types_in_program, +) + +backends = testing_reqs.backends + + +class TestConv1dDeompositionPasses: + @pytest.mark.parametrize( + "backend, has_strides, pad_type, has_pad, has_dilations, has_bias", + itertools.product( + backends, + (True, False), + ("valid", "custom", "same"), + (True, False), + (True, False), + (True, False), + ), + ) + def test_conv1d_decomposition( + self, backend, has_strides, pad_type, has_pad, has_dilations, has_bias + ): + """ + Input graph: + input -> expand_dims -> conv2d -> squeeze -> out + + Output graph: + input -> conv1d -> out + """ + N, L = 2, 8 + C_in, C_out = 3, 4 + K = 3 + + conv_kwargs = {"weight": np.random.rand(C_out, C_in, K), "pad_type": pad_type} + if has_strides: + conv_kwargs["strides"] = (2,) + if has_pad: + conv_kwargs["pad"] = (1, 1) + if has_dilations: + conv_kwargs["dilations"] = (2,) + if has_bias: + conv_kwargs["bias"] = np.random.rand(C_out) + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, C_in, L))]) + def prog(x): + y = mb.conv(x=x, **conv_kwargs) + return y + + assert get_op_types_in_program(prog) == ["conv"] + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "nn_backend::decompose_conv1d" + ) + assert get_op_types_in_program(prog) == ["expand_dims", "expand_dims", "conv", "squeeze"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["expand_dims", "conv", "squeeze"] + + # infer output shape + strides = conv_kwargs["strides"] if has_strides else (1,) + pad = conv_kwargs["pad"] if has_pad else (0, 0) + dilations = conv_kwargs["dilations"] if has_dilations else (1,) + L_out = None + if pad_type == "valid": + L_out = (L - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "custom": + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "same": + L_out = np.ceil(L / strides[-1]) + else: + raise Exception("unsupported pad type") + output_shape = (N, C_out, L_out) + + assert_model_is_valid( + prog, + {"x": (N, C_in, L)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize("backend", backends) + def test_conv1d_decomposition_dynamic_weight(self, backend): + """ + Input graph: + input -> expand_dims -> conv2d -> squeeze -> out + + Output graph: + input -> conv1d -> out + """ + N, L = 2, 9 + C_in, C_out = 4, 3 + K = 4 + + strides = (2,) + pad = (1, 1) + # MIL convolution with dynamic weights does not support dilations != 1 + # see coremltools/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py + dilations = (1,) + + # infer L_out with pad_type fixed to custom + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + + conv_kwargs = { + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + } + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(N, C_in, L)), + mb.TensorSpec(shape=(C_out, C_in, K)), + ] + ) + def prog(x, weight): + y = mb.conv(x=x, weight=weight, **conv_kwargs) + return y + + assert get_op_types_in_program(prog) == ["conv"] + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "nn_backend::decompose_conv1d" + ) + assert get_op_types_in_program(prog) == ["expand_dims", "expand_dims", "conv", "squeeze"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["expand_dims", "expand_dims", "conv", "squeeze"] + + output_shape = (N, C_out, L_out) + assert_model_is_valid( + prog, + {"x": (N, C_in, L), "weight": (C_out, C_in, K)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +def test_commingle_loop_vars(): + def body(a, b): + # b is a loop invariant + return mb.add(x=a, y=b), b + + def cond(a, b): + a_mean = mb.reduce_mean(x=a, axes=[0, 1]) + b_mean = mb.reduce_mean(x=b, axes=[0, 1]) + return mb.less(x=a_mean, y=b_mean) + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)),] + ) + def prog(a, b): + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert while_op.blocks[0].inputs[0].name == "a_x0" + assert while_op.blocks[0].inputs[1].name == "b_x0" + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["nn_backend::commingle_loop_vars"](prog) + assert_same_output_names(prev_prog, prog) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert while_op.blocks[0].inputs[0].name == while_op.outputs[0].name + assert while_op.blocks[0].inputs[1].name == while_op.outputs[1].name + + prog.validate() + + # The program is not ssa and thus cannot be converted + + +def test_handle_return_inputs_as_outputs(): + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)),] + ) + def prog(a, b): + return mb.mul(x=a, y=2.), b + + prev_main_output_names = [o.name for o in prog["main"].outputs] + assert prog["main"].outputs[1].op is None # output comes from input + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["nn_backend::handle_return_inputs_as_outputs"](prog) + assert_same_output_names(prev_prog, prog) + + assert prog["main"].outputs[1].op is not None # output comes from an op + assert prog["main"].outputs[1].op.op_type == "identity" + + with pytest.raises(ValueError, match='used both as function\'s input and output'): + # prog has input and output names 'b' that refer to different vars + # This program can pass if we disable 'dedup_op_and_var_names' pass + assert_model_is_valid(prog, {"a": (1, 2), "b": (1, 2)}) + + +def test_handle_unused_inputs(): + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 2)),] + ) + def prog(unused_input): + return mb.const(val=[3, 2]) + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["nn_backend::handle_unused_inputs"](prog) + assert_same_output_names(prev_prog, prog) + + id_op = prog.find_ops(op_type="identity", exactly_one=True)[0] + # Assert that input var is consumed by an identity op. + assert id_op in prog["main"].inputs["unused_input"].child_ops + + assert_model_is_valid(prog, {"unused_input": (1, 2)}) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/conftest.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/conftest.py new file mode 100644 index 00000000..236ca03f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/conftest.py @@ -0,0 +1,12 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +def pytest_make_parametrize_id(config, val, argname): + ''' + This function is a hook into pytest. It generates a user friendly string + representation of the parameterized values. + ''' + return "{}={}".format(argname, str(val)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/converter.py new file mode 100644 index 00000000..6642e200 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/converter.py @@ -0,0 +1,341 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import tempfile as _tempfile +import warnings as _warnings +from typing import Optional, Text, Tuple + +from coremltools.converters._profile_utils import _profile +from coremltools.converters.mil import Program +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.types.symbolic import k_num_internal_syms, k_used_symbols +from coremltools.models import MLModel +from coremltools.models.model import _create_mlpackage + +from . import ImageType, InputType +from .mil.passes.pass_pipeline import PassPipeline, PipelineManager + + +class ConverterRegistry: + frontends = {} + backends = {} + backend_alias_names = {} + + @staticmethod + def frontend(converter): + ConverterRegistry.frontends[converter.name] = converter + return converter + + @staticmethod + def backend(converter): + ConverterRegistry.backends[converter.name] = converter + if 'alias_names' in converter.__dict__: + for name in converter.alias_names: + ConverterRegistry.backend_alias_names[name] = converter.name + return converter + + +@ConverterRegistry.frontend +class MILFrontend: + name = "milinternal" + + def __call__(self, model, *args, **kwargs): + specification_version = kwargs.get("specification_version", None) + if specification_version is not None: + max_opset_version, op = model._get_max_opset_version_and_op() + if max_opset_version > specification_version: + msg = ( + "Please update the minimum_deployment_target to {!s}," + " since op {} is only available in opset {!s} or newer." + ).format(max_opset_version, op.op_type, max_opset_version) + raise ValueError(msg) + + if "inputs" in kwargs and kwargs["inputs"] is not None: + inputs = kwargs["inputs"] + if not isinstance(inputs, (list, tuple)): + raise ValueError( + "Type of inputs should be list or tuple, got {} instead.".format( + type(inputs) + ) + ) + if not all([isinstance(i, InputType) for i in inputs]): + raise ValueError( + "Type of inputs should be list or tuple of TensorType or ImageType, got {} instead.".format( + [type(i) for i in inputs] + ) + ) + + for idx, inp in enumerate(inputs): + # We set the default image format in MIL as NCHW, since only NCHW is + # natively supported by MIL ops (ex. Conv/Pool/etc.) + if isinstance(inp, ImageType) and inputs[idx].channel_first is None: + inputs[idx].channel_first = True + model.set_main_input_types(tuple(inputs)) + return model + + +@ConverterRegistry.frontend +class TensorFlowFrontend: + name = "tensorflow" + + def __call__(self, *args, **kwargs): + from .frontend.tensorflow.load import TF1Loader + + tf1_loader = TF1Loader(*args, **kwargs) + return tf1_loader.load() + + +@ConverterRegistry.frontend +class TensorFlow2Frontend: + name = "tensorflow2" + + def __call__(self, *args, **kwargs): + from .frontend.tensorflow2.load import TF2Loader + + tf2_loader = TF2Loader(*args, **kwargs) + return tf2_loader.load() + + +@ConverterRegistry.frontend +class TorchFrontend: + name = "pytorch" + + def __call__(self, *args, **kwargs): + from .frontend.torch.load import load + + return load(*args, **kwargs) + + +@ConverterRegistry.backend +class NNProtoBackend: + name = "neuralnetwork" + alias_names = [] + + def __call__(self, *args, **kwargs): + from .backend.nn.load import load + + return load(*args, **kwargs) + + +@ConverterRegistry.backend +class MILProtoBackend: + name = "mlprogram" + alias_names = [] + + def __call__(self, *args, **kwargs): + from .backend.mil.load import load as backend_load + + return backend_load(*args, **kwargs) + + +def _reset_conversion_state(): + ''' + Reset any stateful properties/variables that are populated during conversion. + ''' + + # Clear the "name_count" dict, + # which is used to generate unique op names in the mil builder class. + mb.name_count.clear() + + # Clear "k_used_symbols" dict, and the int counter "k_num_internal_syms" that are used to track symbolic names + global k_used_symbols + global k_num_internal_syms + k_used_symbols.clear() + k_num_internal_syms = 0 + + +@_profile +def mil_convert( + model, + convert_from, + convert_to, + compute_units, + **kwargs +): + """ + Convert model from a specified frontend `convert_from` to a specified + converter backend `convert_to`. + + Parameters + ---------- + model: TF, PyTorch, or `coremltools.converters.mil.Program`. + See `coremltools.converters.convert` + + convert_from: str + The value must be one of ['tensorflow', 'tensorflow2', + 'pytorch', 'milinternal'] (aka name of a `ConverterRegistry.frontend`). + + compute_units: coremltools.ComputeUnit + A enum with three possible values: + - coremltools.ComputeUnit.ALL - use all compute units available, including the + neural engine. + - coremltools.ComputeUnit.CPU_ONLY - limit the model to only use the CPU. + - coremltools.ComputeUnit.CPU_AND_GPU - use both the CPU and GPU, but not the + neural engine. + + convert_to: str + Value must be one of ['neuralnetwork', 'mlprogram', 'milinternal'] + See `coremltools.converters.convert` + + Returns + ------- + model: `coremltools.models.MLModel` or + `coremltools.converters.mil.Program` + See `coremltools.converters.convert` + """ + return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs) + + +def _mil_convert( + model, + convert_from, + convert_to, + registry, + modelClass, + compute_units, + **kwargs +): + + # Map "convert_to" values that correspond to the alias_names, to the actual supported registries + if convert_to in registry.backend_alias_names: + msg = "Please use '{}' instead of '{}' with the 'convert_to' argument. The latter will be removed in the future." + _warnings.warn(msg.format(registry.backend_alias_names[convert_to], convert_to)) + convert_to = registry.backend_alias_names[convert_to] + + if convert_to == 'mlprogram': + # mil_convert_to_proto places weight files inside the weights_dir + weights_dir = _tempfile.TemporaryDirectory() + kwargs["weights_dir"] = weights_dir.name + + proto, mil_program = mil_convert_to_proto( + model, + convert_from, + convert_to, + registry, + **kwargs + ) + + _reset_conversion_state() + + if convert_to == 'milinternal': + return mil_program # mil program + elif convert_to == 'milpython': + return proto # internal mil data structure + + elif convert_to == "mlprogram": + package_path = _create_mlpackage( + proto, kwargs.get("weights_dir"), kwargs.get("package_dir") + ) + return modelClass( + package_path, + is_temp_package=not kwargs.get("package_dir"), + mil_program=mil_program, + skip_model_load=kwargs.get("skip_model_load", False), + compute_units=compute_units, + ) + + return modelClass(proto, + mil_program=mil_program, + skip_model_load=kwargs.get('skip_model_load', False), + compute_units=compute_units) + + +def mil_convert_to_proto( + model, convert_from, convert_to, converter_registry, main_pipeline=None, **kwargs +) -> Tuple[Optional[MLModel], Program]: + """ + Convert model to proto object. + + Parameters + ---------- + model: See `mil_convert` + + convert_from: See `mil_convert` + + convert_to: See `mil_convert` + + converter_registry: `ConverterRegistry` + Available frontend and backend converters + + main_pipeline: `PassPipeline` + The main pipeline with options set by users. + """ + frontend_converter_type = converter_registry.frontends.get(convert_from.lower()) + if not frontend_converter_type: + raise NotImplementedError( + f'Frontend converter "{convert_from}" not implemented, must be ' + f"one of: {list(converter_registry.frontends.keys())}" + ) + + kwargs.setdefault("convert_to", convert_to) + + if main_pipeline is None: + # If the client calls `mil_convert` directly, the `pass_pipeline` is None. To keep the + # behaviour same as before, the quantization pass is removed in this situation. + # TODO: rdar://106111553 ([Infra] Quantization Pass is skipped when `mil_convert` is called directly.) + main_pipeline = PassPipeline() + main_pipeline.remove_passes({"common::add_fp16_cast"}) + frontend_pipeline, backend_pipeline = _construct_other_pipelines( + main_pipeline, convert_from, convert_to + ) + + frontend_converter = frontend_converter_type() + prog = frontend_converter(model, **kwargs) + PipelineManager.apply_pipeline(prog, frontend_pipeline) + + PipelineManager.apply_pipeline(prog, main_pipeline) + + prog._check_invalid_tensor_rank() + + if convert_to == 'milinternal': + return None, prog + + PipelineManager.apply_pipeline(prog, backend_pipeline) + backend_converter_type = converter_registry.backends.get(convert_to.lower()) + if not backend_converter_type: + raise NotImplementedError( + f'Backend converter "{convert_to}" not implemented, must be ' + f"one of: {list(converter_registry.backends.keys())}" + ) + backend_converter = backend_converter_type() + out = backend_converter(prog, **kwargs) + + return out, prog + + +def _construct_other_pipelines( + main_pipeline: PassPipeline, convert_from: Text, convert_to: Text +) -> Tuple[PassPipeline, PassPipeline]: + """ + Construct other pipelines based on the main pipeline. It includes: + - The frontend pipeline which will run in the frontend converter + - The backend pipeline which will run in the backend converter + As the main pipeline could have passes which also exists in the frontend/backend passes, we + need to make sure the pass options are set properly in all pipelines. + For example, if users set options to skip some vars in `const_elimination` pass, we want to make + sure those vars are skipped not only in main_pipeline, but also in other pipelines wherever the + `const_elimination` pass runs. + + TODO: rdar://106046237 ([Infra] Expose Backend and Frontend Pipeline to External Users) + Currently users only control the passes in the main pipeline by passing `pass_pipeline` param. + There are two reasons why we don't expose the frontend/backend pipelines at the current stage: + - The frontend and backend specific passes need to be well documented. + - The interface need more carefully design, as we don't want to provide too many params such as + ct.convert(..., frontend_pipeline=xxx, backend_pipelien=xxx, main_pipeline=xxx) to overwhelm + users. + """ + # Set the main pipeline options specified by the user in frontend/backend pipeline. + frontend_pipeline = PassPipeline.get_pipeline(f"frontend_{convert_from.lower()}") + frontend_pipeline.set_options_by_another_pipeline(main_pipeline) + backend_pipeline = PassPipeline.get_pipeline(f"backend_{convert_to.lower()}") + backend_pipeline.set_options_by_another_pipeline(main_pipeline) + + # If a pass is skipped in the main pipeline, we also skip it in the frontend/backend pipeline. + default_main_pipeline = PassPipeline.get_pipeline("default") + passes_skipped_in_main = set(default_main_pipeline.passes) - set(main_pipeline.passes) + frontend_pipeline.remove_passes(passes_skipped_in_main) + backend_pipeline.remove_passes(passes_skipped_in_main) + + return frontend_pipeline, backend_pipeline diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/debugging_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/debugging_utils.py new file mode 100644 index 00000000..a2a59d8f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/debugging_utils.py @@ -0,0 +1,175 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +from collections import OrderedDict +from typing import List, Optional + +import coremltools as ct +from coremltools.models import MLModel +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.frontend.milproto.load import \ + load as milproto_to_pymil + +def extract_submodel( + model: MLModel, + outputs: List[str], + inputs: Optional[List[str]] = None, + function_name: str = "main" + ) -> MLModel: + """ + This utility function allows the user to extract a submodel from a Core ML model. + + For the NeuralNetwork model, only in memory Core ML model can be extracted. That is to say, + the user should always call this function to a model directly from `ct.convert`. It is not + allowed to load the model from the disk, and call this API. + + For the ML program model, both cases (in memory / from disk) are supported. + + Parameters + ---------- + model: MLModel + The Core ML model from which the submodel is extracted. + + outputs: list[str] + A list of names of Vars, which are the outputs of the extracted submodel. + + inputs: list[str] (Optional) + A list of names of Vars, which are the inputs of the extracted submodel. + If not provided, we use the inputs from the original model. + + function_name: str (Optional) + Name of the function where the subgraph is extracted. Default "main". + + Examples + -------- + + NeuralNetwork: + + >>> from coremltools.converters.mil.debugging_utils import extract_submodel + >>> mlmodel = ct.convert(model, convert_to="neuralnetwork") + >>> outputs = ["output_0", "output_1"] + >>> submodel = extract_submodel(mlmodel, outputs) + + ML Program: + >>> from coremltools.converters.mil.debugging_utils import extract_submodel + >>> mlmodel = ct.convert(model, convert_to="mlprogram") + >>> outputs = ["output_0", "output_1"] + >>> + >>> # Directly extract model in memory + >>> submodel = extract_submodel(mlmodel, outputs) + >>> + >>> # Extract model loaded from disk + >>> mlmodel.save("model.mlpackage") + >>> mlmodel = coremltools.model.models.MLModel("model.mlpackage") + >>> submodel = extract_submodel(mlmodel, outputs) + + """ + def validate_inputs(func, input_vars): + reachable_vars = set(input_vars) + for op in func.operations: + if op.op_type == "const": + reachable_vars.add(op.outputs[0]) + + for op in func.operations: + if all([x in reachable_vars for x in op.inputs.values()]): + reachable_vars.update(op.outputs) + + for out in func.outputs: + if out not in reachable_vars: + raise ValueError(f"output {output} not reachable from inputs") + + @block_context_manager + def replace_inputs(func, input_vars): + func_inputs = {} + for input in input_vars: + name = input.name + func_inputs[name] = mb.placeholder(input.shape, dtype=input.dtype) + func.replace_uses_of_var_after_op( + anchor_op=input.op, + old_var=input, + new_var=func_inputs[name].outputs[0], + no_check_var_visibility=True, + ) + func._input_dict = OrderedDict() + for k, v in func_inputs.items(): + v.set_name(k) + func._input_dict[k] = v.outputs[0] + + if not isinstance(outputs, (list, tuple)): + raise ValueError(f"outputs must be of type list/tuple. Got {type(outputs)}.") + + for output in outputs: + if not isinstance(output, str): + raise ValueError(f"outputs must be a list of str. Got element {output} with type {type(output)}.") + if outputs.count(output) > 1: + raise ValueError(f"outputs must be a list of unique elements. '{output}' occurs {outputs.count(output)} times.") + + model_spec = model.get_spec() + backend = "mlprogram" if model_spec.WhichOneof("Type") == "mlProgram" else "neuralnetwork" + if backend == "neuralnetwork": + if model._mil_program is None: + raise ValueError("NeuralNetwork model loaded from the disk is not supported by the extract_submodel util.") + program = model._mil_program + else: + assert backend == "mlprogram" + if model._mil_program is None: + program = milproto_to_pymil( + model_spec=model_spec, + specification_version=model_spec.specificationVersion, + file_weights_dir=model.weights_dir, + ) + else: + program = model._mil_program + + # extract subgraph + prog = copy.deepcopy(program) + func = prog.functions[function_name] + vars = {} + new_outputs = [] + for op in func.operations: + for o in op.outputs: + if o.name in outputs: + new_outputs.append(o) + vars[o.name] = o + + if len(outputs) != len(new_outputs): + new_outputs_names = [o.name for o in new_outputs] + outputs_not_found = [name for name in outputs if name not in new_outputs_names] + raise ValueError(f"outputs {outputs_not_found} not found in the function.") + + func.set_outputs(new_outputs) + + # Clean up the graph + PASS_REGISTRY["common::dead_code_elimination"](prog) + + # If the inputs are provided, we subtract the subgraph starting from them + if inputs is not None: + if not isinstance(inputs, (list, tuple)): + raise ValueError(f"inputs must be of type list/tuple. Got {type(inputs)}.") + + input_vars = [] + for input in inputs: + if not isinstance(input, str): + raise ValueError(f"inputs must be a list of str. Got element {input} with type {type(input)}.") + if inputs.count(input) > 1: + raise ValueError(f"inputs must be a list of unique elements. '{input}' occurs {inputs.count(input)} times.") + if input not in vars and input not in func.inputs: + raise ValueError(f"input {input} not found in the function.") + if input in vars: + input_vars.append(vars[input]) + if input in func.inputs: + input_vars.append(func.inputs[input]) + + validate_inputs(func, input_vars) + replace_inputs(func, input_vars) + PASS_REGISTRY["common::dead_code_elimination"](prog) + + prog.skip_all_passes = True + submodel = ct.convert(prog, convert_to=backend, compute_units=model.compute_unit) + + return submodel diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/__init__.py new file mode 100644 index 00000000..545ac7e5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/README.md b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/README.md new file mode 100644 index 00000000..aff706f8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/README.md @@ -0,0 +1,587 @@ +# Generic Pattern Matching Infrastructure Documentation + +## _**Introduction**_ + +This document contains the **motivation**, **user flow**, and **documentation**, and **instructions** for adding/running a pass for Arjun Singla’s Generic Pattern Matching Infrastructure. + +## _**What We Know**_ + +* Existing TensorFlow and Pytorch models are converted to intermediate representations, GraphDef and TorchScript respectively, by the frameworks themselves when they are compiled. These intermediate representations are “verbose” - each operation is expanded into the combination of its most basic operations. +* Then, our Apple infrastructure performs a one to one mapping, taking these intermediate representations and converting them into a MIL (Model Intermediate Language) representation. As this mapping is one to one, the MIL representation is “verbose” as well. +* Now, the goal becomes to take these “verbose” MIL representations, and make them compact again - taking sets of simple operations and consolidating them into their more complicated cousins - the same ones that the user defined in the original TensorFlow and Pytorch models. These are executed when we convert the MIL representation into the final CoreML one. +* The project + * My project is working on a very specific subproblem to this larger issue. The goal is to take these “verbose” MIL representations, detect **any** sequence of operations, and replace it with **any** other sequence of operations. + +## _**The User Flow: Documentation**_ + +* We are assuming that the user has a very high understanding of PyMil. So, we will have the user define a PyMil program, which will be the pattern to detect in the larger machine learning model. Attached is a code snippet, taken from the PyMil docs, on how to define a program: + +``` +#import builder +from coremltools.converters.mil import Builder as mb + +# Input to MIL program is a list of tensors. Here we have one input with +# shape (1, 100, 100, 3) and implicit dtype == fp32 + +@mb.program(input_specs=[mb.TensorSpec(shape=(1, 100, 100, 3)),]) +def prog(x): + + # MIL operation takes named inputs (instead of positional inputs). + # Here name argument is optional. + + x = mb.relu(x=x, name='relu') + x = mb.transpose(x=x, perm=[0, 3, 1, 2], name='transpose') + x = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=False, name='reduce') + x = mb.log(x=x, name='log') + return x +``` + +* It is important that the user follows these constraints when writing their MIL program: + * **This program must only have one root variable** + * **This program has exactly one proper last operation topologically.** + * **Each operation in the program must have a UNIQUE NAME!!!** + ``` + # Read from left to right, this pattern has two "last" operations, + # and is not permitted + + --> op1 --- op2 --- op3 --> + | + | ---- op4 --> + + # Read from left to right, this pattern has one "last" operation, + # and is permitted. The only thing that must be + # singular here is the last operation (and, of course, the root var) + + --> op1 --- op2 --- op3 --- op5 --> + | | + | ---- op4 -----| + + + +* The second function the user needs to define is the following: +`def var_constraints(pattern):` + * Parameters + * a `Pattern` object + * What is a pattern object, you may ask? Excellent question! + * A `Pattern` object stores the captured operations in the larger machine learning model. So, let’s say that the user defined a pattern ` return mb.relu(x=x, name='mycoolrelu') ` . Then, `pattern.mycoolrelu` would return the **captured** relu operation in the larger machine learning model! + * The pattern also has the following additional attributes: + * `pattern.root_var`, which is the root variable of the first operation of the captured pattern (and corresponds to the user defined pattern’s root variable) + * `pattern.final_op`, the operation in the larger machine learning model that corresponds to the last operation in the user defined pattern. + * `pattern.block`, the block in the larger machine learning model where the pattern was found + * `pattern.op_set`, a set of all the operations captured from the larger machine learning model. The user should call `pattern.op_list() `to return a list verision of the set (without duplicates) + * Note: The user can add additional attributes to the pattern object using this method if they choose: + `pattern.add_attribute("attr_name", attribute)` + * Returns `True` if the pattern satisfies certain constraints (ie constant input values, rank, etc). Basically, anything beyond its topological order with respect to operation types, which is already identical to that of the user defined pattern. Returns `False` otherwise. + + + +* The third function the user needs to define is the following: +`def transform_pattern(pattern):` + + * Parameters + * a `Pattern` object + * This function needs to replace the captured operations (stored in the pattern object) with whatever you want! Feel free to define another MIL program and replace the pattern with that second program. + + + +* The last thing the user needs to do is **call** the following function +`register_generic_pass(ops_arrangement, var_constraints, + transform_pattern, pass_name, namespace)` + + * Parameters + * `ops_arrangement`: the user defined pattern + * `var_constraints`: the user defined function (see above) + * `transform_pattern`: the user defined function (see above) + * `pass_name`: a string representing the name of the pass. + * `namespace`: a string representing the namespace of the pass (ie `"common"`) + * Calling this function will register the pass will the given `passname` and `namespace`, so that it will be called when the passes are run. + * If you have multiple patterns to detect for a single pass, just call this function multiple times with the respective `ops_arrangement`, `var_constraints`, and `transform_pattern`, but have the `pass_name` and `namespace` be the same. That way, all of these “mini passes” will be registered under the same pass! + + + +## Gelu Example - Everything the User Does + +``` +# Full source @ coreml/coremltools/coremltools/converters/mil/experimental/passes/generic_gelu_tanh_approximation_fusion.py +# This is a simple function defined by the user +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.mil.passes.helper import _check_var_scalar_value +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import register_generic_pass + +# This is the user defined pattern to detect +@mb.program(input_specs=[mb.TensorSpec(shape=([1, 1024, 4096])), ]) +def gelu_to_detect_1(x): + # MIL operation takes named inputs (instead of positional inputs). + # Here `name` argument is MANDATORY. + pow = mb.pow(x=x, y=3.0, name="pow") + mul_1 = mb.mul(x=0.044714998453855515, y=pow, name="mul_1") + add = mb.add(x=x, y=mul_1, name="add") + mul_2 = mb.mul(x=0.7978845834732056, y=add, name="mul_2") + tanh = mb.tanh(x=mul_2, name="tanh") + add_1 = mb.add(x=1.0, y=tanh, name="add_1") + mul = mb.mul(x=0.5, y=add_1, name="mul") + mul_3 = mb.mul(x=mul, y=x, name="mul_3") + return mul_3 +""" +y = x * (0.5 * (tanh(((.0447)x^3 + x ) * sqrt(2/pi)) + 1)) + + +[...] -----> pow (3) ----> mul (.044715) ---> add -----> mul (sqrt(2/pi)) ---> tanh ----> add (1) ----> mul (0.5) -----> mul ---> [...] + | ^ ^ + | | | + |------------------------------------------------------------------------------------------------------------------------ + +""" + +# This is another user defined pattern to detect +# In this pattern, 0.5 is first multiplied with the input which is then multiplied with the tanh term. +# In pattern1, 0.5 is first multiplied with the tanh term, and then multiplied with input +@mb.program(input_specs=[mb.TensorSpec(shape=([1, 1024, 4096])), ]) +def gelu_to_detect_2(x): + pow = mb.pow(x=x, y=3.0, name ="pow") + mul_1 = mb.mul(x=0.044714998453855515, y=pow, name="mul_1") + add = mb.add(x=x, y=mul_1, name="add") + mul_2 = mb.mul(x=0.7978845834732056, y=add, name="mul_2") + tanh = mb.tanh(x=mul_2, name="tanh") + add_1 = mb.add(x=1.0, y=tanh, name="add_1") + mul = mb.mul(x = 0.5, y=x, name="mul") + mul_3 = mb.mul(x=mul, y=add_1, name="mul_3") + return mul_3 + +""" +y = (0.5 * x) * (tanh(((.0447)x^3 + x ) * sqrt(2/pi)) + 1) + + --------------------------------------------------------------------------------------------------------- + ^ | + | V + [...] -----> mul(0.5) pow (3) ----> mul (.044715) ---> add -----> mul (sqrt(2/pi)) ---> tanh ----> add (1) -----> mul ---> [...] + | ^ ^ + | | | + |------------------------------------------------------------ +""" + +# Constraint enforcement +def var_constraints(pattern): + passed = True + + passed = passed and (_check_var_scalar_value(pattern.mul.y, 0.5) or _check_var_scalar_value(pattern.mul.x, 0.5)) + passed = passed and _check_var_scalar_value(pattern.pow.y, 3.0) + + passed = passed and ( + _check_var_scalar_value(pattern.mul_1.y, 0.044715) or + _check_var_scalar_value(pattern.mul_1.x, 0.044715) + ) + + passed = passed and ( + _check_var_scalar_value(pattern.mul_2.y, 0.79788) or + _check_var_scalar_value(pattern.mul_2.x, 0.79788) + ) + + passed = passed and ( + _check_var_scalar_value(pattern.add_1.y, 1) or + _check_var_scalar_value(pattern.add_1.x, 1) + ) + + return passed + +# Transformation Logic +def transform_pattern(pattern): + # remove all the ops, and replace with a gelu op + out_name = pattern.mul_3.outputs[0].name + x = mb.gelu(x=pattern.root_var, mode="TANH_APPROXIMATION", name=out_name, before_op=pattern.mul) + + pattern.mul_3.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.mul_3, old_var=pattern.mul_3.outputs[0], new_var=x + ) + + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + +# Registering the Pass +register_generic_pass(ops_arrangement=gelu_to_detect_1, var_constraints=var_constraints, + transform_pattern = transform_pattern, pass_name="fuse_gelu_tanh_approximation", namespace="common") + +register_generic_pass(ops_arrangement=gelu_to_detect_2, var_constraints = var_constraints, + transform_pattern = transform_pattern, pass_name="fuse_gelu_tanh_approximation", namespace="common") + + +``` + + + +## Linear Bias Example - Everything the User Does + +``` +# Full source @ coreml/coremltools/coremltools/converters/mil/mil/passes/linear_bias_fusion.py arbitrary_shape = (get_new_symbol(), get_new_symbol()) +arbitrary_shape = (get_new_symbol(), get_new_symbol()) +np.random.seed() +arbitrary_weight = np.random.rand(4,3) +arbitrary_bias = np.random.rand(4) + +@mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_shape)]) +def pattern_add(x): + """ + Original: + % 4 = linear(x= % 1, weight = % 2, bias = % 3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + % 6 = add(x= % 4, y = % 5) # %5 is a const tensor with same shape as %3 + + Result: + % 8 = linear(x= % 1, weight = % 2, bias = % 7) # where %7 is a new const tensor with value + # %7 = %3 + %6 + """ + linear = mb.linear(x=x, weight=arbitrary_weight, bias=arbitrary_bias, name="linear") + add_or_sub = mb.add(x=linear, y=arbitrary_bias, name="add_or_sub") + return add_or_sub + +@mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_shape)]) +def pattern_sub(x): + """ + Original: + %4 = linear(x=%1, weight=%2, bias=%3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + %6 = sub(x=%5, y=%4) # %5 is a const tensor with a broacasable shape with %3. + i.e. if %3 has shape (Dout), %5 could be (1, Dout). + + Result: + %9 = linear(x=%1, weight=%7, bias=%8) # where %7 is a new const tensor with value %7 = -%2 + # %8 = %5 - %3 + """ + linear = mb.linear(x=x, weight=arbitrary_weight, bias=arbitrary_bias, name="linear") + add_or_sub = mb.sub(x=linear, y=arbitrary_bias, name="add_or_sub") + return add_or_sub + + +def var_constraints(pattern): + passed = True + passed = passed and pattern.add_or_sub.x.val is not None or pattern.add_or_sub.y.val is not None + + is_sub, is_first_input = _get_is_sub_and_is_first_input(pattern) + linear_bias, bias, Dout = _get_linear_bias_bias_Dout(pattern, is_first_input) + + # check if the shape is broadcasable + passed = passed and np.prod(linear_bias.shape) == np.prod(bias.shape) + passed = passed and bias.shape[-1] == Dout + return passed + + +def _get_is_sub_and_is_first_input(pattern): + is_sub = pattern.add_or_sub.op_type == "sub" + is_first_input = pattern.add_or_sub.x == pattern.linear.outputs[0] + return is_sub, is_first_input + + +def _get_linear_bias_bias_Dout(pattern, is_first_input): + linear_bias = pattern.linear.bias.val + bias = pattern.add_or_sub.y.val if is_first_input else pattern.add_or_sub.x.val + Dout = linear_bias.shape[0] + return linear_bias, bias, Dout + + +def transform_pattern(pattern): + is_sub, is_first_input = _get_is_sub_and_is_first_input(pattern) + linear_bias, bias, Dout = _get_linear_bias_bias_Dout(pattern, is_first_input) + bias = np.reshape(bias, (Dout,)) + + if is_sub and is_first_input: bias = -bias + if is_sub and not is_first_input: linear_bias = -linear_bias + + new_bias = linear_bias + bias + + # compute the new weight + if is_sub and not is_first_input: + new_weight = -pattern.linear.weight.val + else: + new_weight = pattern.linear.weight.val + + # create a new linear op with the new weight, bias value, copying rest of the attributes + out_name = pattern.add_or_sub.outputs[0].name + linear_kargs = {"weight": new_weight, "bias": new_bias, "name": out_name, "before_op": pattern.linear} + + linear_kargs.update({k: v for k, v in pattern.linear.inputs.items() if k not in ["weight", "bias"]}) + + x = mb.linear(**linear_kargs) + + pattern.add_or_sub.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.add_or_sub, old_var=pattern.add_or_sub.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +register_generic_pass( + ops_arrangement=pattern_add, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_linear_bias", + namespace="common", +) + +register_generic_pass( + ops_arrangement=pattern_sub, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_linear_bias", + namespace="common", +) +``` + + + +## Layernorm/Instancenorm Fusion - Everything the User Does (for one of the patterns) + +``` +# Full source @coreml/coremltools/coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py +@mb.program(input_specs=[mb.TensorSpec(shape=(1, 100, 100, 3)),]) +def layernorm(x): + + # MIL operation takes named inputs (instead of positional inputs). + + y = mb.reduce_mean(x = x, keep_dims = False, name = "reduce_mean") + x = sub(x = x, y =y, name = "add") + ... + x = add(x = x, y = sub, name = "last_add") + return x + +# User defined helper function +def _check_no_output_connection(block: Block, ops: List[Operation]) -> bool: + """ + Check that none of the op in this pattern is connected to the output + (except the last add op) + + :param block: Block + :param ops: List of operations to check on. + """ + for op in ops[:-1]: + for out in op.outputs: + if out in block.outputs: + return False + return True + +# User defined helper function +def _check_reduce_op(reduce_op: Operation, mode: str = "reduce_mean") -> bool: + """ + Check whether or not the reduction op satisfy following conditions: + - Mode is expected. + - Does not change rank (keep_dims is True). + - Axes is known at compile time. + + :param reduce_op: reduce op to check on + :param mode: reduce mode + """ + if reduce_op is None: + return False + if reduce_op.op_type != mode: + return False + if reduce_op.keep_dims is None or reduce_op.keep_dims.val is None: + return False + if reduce_op.keep_dims.val is False: + return False + if reduce_op.axes is None or reduce_op.axes.val is None: + return False + return True + + +def var_constraints(pattern) -> bool: + + root_var = pattern.reduce_op.x + epsilon_var = pattern.add_op1.y if add_op1.x == pattern.reduce_op2.outputs[0] else pattern.add_op1.x + gamma_var = pattern.mul_op1.y if pattern.mul_op1.x == pattern.rsqrt_op.outputs[0] else pattern.mul_op1.x + beta_var = pattern.sub_op2.x + rank = len(root_var.shape) + + passed = True + + passed = passed and _check_no_output_connection(pattern.block, pattern.to_list) + + passed = passed and root_var.shape is not None + passed = passed and rank == 4 + passed = passed and _check_reduce_op(pattern.reduce_op) + passed = passed and not(epsilon_var.val is None or len(epsilon_var.val.shape) != 0) + passed = passed and gamma_var.val is not None + passed = passed and beta_var.val is not None + + pattern.add_attribute('epsilon_var', epsilon_var) + pattern.add_attribute('gamma_var', gamma_var) + pattern.add_attribute('beta_var', beta_var) + + return constraints_passed + + +def transform_pattern(pattern): + + # Insert instance_norm / layer_norm and delete all ops. + + axes = pattern.reduce_op.axes.val + rank = len(pattern.reduce_op.x.shape) + + # check whether the pattern is instance_norm or layer_norm + is_layernorm = False + is_instancenorm = False + is_require_rank4_transpose = False + + negative_axes = [a - rank if a >= 0 else a for a in axes] + negative_axes.sort() + + if len(pattern.gamma_var.val.shape) == len(axes) and len(pattern.beta_var.val.shape) == len(axes): + # axes for layer_norm must be [-1] or [-1, -2] or [-1, -2, -3] and so on + if negative_axes == list(range(-len(negative_axes), 0)): + is_layernorm = True + + if rank == 4 and (negative_axes == [-2, -1] or negative_axes == [-3, -2]): + if ( + len(np.squeeze(pattern.gamma_var.val).shape) == 1 + and len(np.squeeze(pattern.beta_var.val).shape) == 1 + ): + is_instancenorm = True + if negative_axes == [-3, -2]: + is_require_rank4_transpose = True + + if not (is_instancenorm or is_layernorm): + return False + + # remove all the ops, and replace with a layer_norm or instance_norm op + out_name = pattern.end_op.outputs[0].name + + if is_require_rank4_transpose: + x = mb.transpose( + x=pattern.reduce_op.x, + perm=[0, 3, 1, 2], + name=out_name + "_transpose_nhwc_nchw", + before_op=pattern.end_op, + ) + if is_instancenorm: + x = mb.instance_norm( + x=x if is_require_rank4_transpose else pattern.reduce_op.x, + gamma=np.squeeze(pattern.gamma_var.val), + beta=np.squeeze(pattern.beta_var.val), + epsilon=pattern.epsilon_var, + name=out_name + "_instancenorm" if is_require_rank4_transpose else out_name, + before_op=pattern.end_op, + ) + else: # is_layernorm + x = mb.layer_norm( + x=x if is_require_rank4_transpose else pattern.reduce_op.x, + axes=axes, + gamma=pattern.gamma_var, + beta=pattern.beta_var, + epsilon=pattern.epsilon_var, + name=out_name + "_layernorm" if is_require_rank4_transpose else out_name, + before_op=pattern.end_op, + ) + if is_require_rank4_transpose: + x = mb.transpose( + x=x, + perm=[0, 2, 3, 1], + name=out_name + "_transpose_nchw_nhwc", + before_op=pattern.end_op, + ) + + pattern.end_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.end_op, old_var=pattern.end_op.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops(pattern.to_list) + return True + + +`register_generic_pass`(ops_arrangement=layernorm, var_constraints = var_constraints, + transform_pattern = transform_pattern, + pass_name="layernorm_pass", namespace="common") +``` + + + +## _**Understanding the Infrastructure: Implementation Details**_ + +* This is a list of all the internal functions in my infrastructure, and what they each do. Remember, the goal is to detect a small user-defined MIL program inside a larger machine learning model (also a MIL program). Most of these functions are in the `coreml/coremltools/coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py` file +* The first (highest level) function: +`register_generic_pass(ops_arrangement, var_constraints, transform_pattern, pass_name, namespace)` + * Parameters + * `ops_arragement` : The user defined MIL program we are trying to detect + * `var_constraints` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and returns whether the captured operations in that object satisfy certain constraints + * `transform_pattern` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and replaces those captured operations with the desired operations in the larger machine learning model + * `pass_name` : A string that is the name of the pass + * `namespace`: A string that is the namespace where the pass is registered + * Results + * This function registers a pass with the given parameters +* The second function, called by the one above: +`fuse_all_blocks(ops_arrangement, var_constraints, transform_pattern, prog)` + * Parameters + * `ops_arragement` : The user defined MIL program we are trying to detect + * `var_constraints` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and returns whether the captured operations in that object satisfy certain constraints + * `transform_pattern` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and replaces those captured operations with the desired operations in the larger machine learning model + * `prog` : The large machine learning model (represented in MIL) in which we are tying to detect `ops_arragement` + * Results + * This function replaces all instances of `ops_arragement` in `prog` with the desired replacement code in `transform_pattern` +* The third function, called by the one above: +`fuse_one_block(block, ops_arrangement, var_constraints, transform_pattern)` + * Parameters + * `block`: The block in the main machine learning model that we are looking into right now + * `ops_arragement` : The user defined MIL program we are trying to detect + * `var_constraints` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and returns whether the captured operations in that object satisfy certain constraints + * `transform_pattern` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and replaces those captured operations with the desired operations in the larger machine learning model + * Results + * This function replaces the first instance of `ops_arragement` in `block` with the desired replacement code in `transform_pattern` +* The fourth function, called by the one above: +`detect_pattern(program_op, ops_arrangement_root_var, block)` + + * Parameters + * `program_op`: A single operation in the main machine learning model + * `ops_arrangement_root_var` : The root variable for the user defined MIL program we are trying to detect. **Assumption: this program has only one root variable** + * `block`: The block in the main machine learning model that we are looking into right now + * Results + * This function does the following: + * Creates a `Pattern` object to capture operations and other relevant details from the main machine learning model + * Sets the `Pattern` object’s `block` and `root_var` attributes. `Root_var` is to the **single** variable input of the `program_op` that corresponds to the `ops_arrangement_root_var`. In other words, if you remember your SAT prep from high school, `ops_arrangement_root_var` is to `ops_arragement` as `pattern.root_var` is to the main machine learning model. **Since we are assuming that the user defined pattern has only 1 root variable, if `program_op` has more than 1 input variable, we loop through these inputs to find the one that corresponds to the one in the user defined pattern. Here, “corresponds” is defined as recursively having the same number and type of child operations, in the same topological order.** + * Sets the `Pattern` object operation attributes. Each of these attribute's names correspond to the names given to the operations in the user defined pattern. + * Sets the Pattern object `final_op` attribute. This is the operation in the main machine learning model that corresponds to the last operation in the user defined pattern. For this last operation, we always only verify that the operation types are the same (we don’t care about child operations). + * We also check that this is the only operation in the captured pattern that has an output that is also in the block’s output. If it is not, we return `False, None` + * **Assumption: Here, we are assuming that the user defined pattern has exactly one proper last operation. If the user defined pattern has multiple “last operations” (ie, operations with 0 child operations) then the `final_op` will be set to only one of these last operations, and the check mentioned above will fail - therefore, not capturing the pattern.** + * Returns `True, pattern` if the user defined pattern is found in the main machine learning model starting at `program_op` ‘s root variable, and `False, None` otherwise +* The fifth function, called by the one above: +`pattern_detected(pattern, program_op, pattern_op, program_root_var, pattern_root_var, block)` + * Parameters + * `pattern`: A `Pattern` object + * `program_op` : The current operation in the main machine learning model that we are looking at + * `pattern_op` : The current operation in the user defined pattern that we are looking at + * `program_root_var` : The variable in the main machine learning model that is analogous to the root variable in the user defined pattern. + * `pattern_root_var` : The root variable in the user defined pattern + * `block`: The block in the main machine learning model that we are looking into right now + * Results + * This recursively looks at operations and their children in the main machine learning model, and returns true if the following conditions are met, and false otherwise + * Every operation in the user defined pattern has the same operation type and number of outputs as its counterpart in the main machine learning model + * Every operation in the user defined pattern has the same number and type of child operations as its counterpart in the main machine learning model (recursive call). This constraint is not enforced if the operation in the user defined pattern has 0 children. + * **Assumption: If an `program_op` and the `pattern_op` have the same number of outputs, we are assuming that, if there is a match, those outputs are stored in the same order. Child operations do not have to be ordered.** +* The sixth function, called by the one above: +`lists_op_equality(oplist1, oplist2)` + * Parameters + * `oplist1`: A list of operations + * `oplist2` : A list of operations + * Results + * Returns True if the operations in `oplist1` are in the same order and have the same operation type as the operations in `oplist2` and False otherwise. +* The `Pattern` class + * Stores a bunch of stuff, including operations, and in addition has `root_var`, `bock`, `op_set` and `final_op` attributes. The user can, of course, add more attributes to the pattern in their functions if they wish, using `pattern.add_attribute(attribute_name, attribute`) + * `pattern.op_list()` Returns a list of all unique operations stored in the pattern +* The `PassContainer` class + * In the new infrastructure, each new pattern that the user wants to detect needs to be defined and registered separately. If the user wants to group each of these “subpasses” together, they can register them with the same name and namespace, and all the “subpasses” will be stored in a `PassContainer` instance, where they will eventually all be executed. + * `PassContainer(pass_name)`: makes a new `PassContainer` object with a single pass name (String) + * `passContainer.add(pass_func)` adds a pass function to the `PassContainer’s` list of pass functions. A pass function is a function that takes in a machine learning model as a parameter and transforms it into the compressed, transformed machine learning model. This is a partial function of `fuse_all_blocks` defined above. + * `PassContainer.__call__(prog)` : Executes all `pass_functions` stored in this `PassContainer` object with respect to the given machine learning model + +## _**How to Add/Run a Pass**_ + +* Write the passs, and save it in a file in the `coreml/coremltools/coremltools/converters/mil/experimental/passes` folder +* Add an import line to the `coreml/coremltools/coremltools/converters/mil/mil/passes/init.py` file +* Run the experimental (generic) passes by setting the `ENABLE_EXPERIMENTAL_PASSES` environment variable to 1, which will override the regular passes with the same name diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/__init__.py new file mode 100644 index 00000000..545ac7e5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_batchnorm_fusion.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_batchnorm_fusion.py new file mode 100644 index 00000000..498b0810 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_batchnorm_fusion.py @@ -0,0 +1,169 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \ + register_generic_pass + +""" +Fuse the following batch_norm layer into conv and conv_transpose +That is, convert conv + batch_norm to conv, by modifying the weight and bias in the conv layer +Given: + %2 = conv(%1) + ... + %3 = batch_norm(%2) + ... + +Result: + %3 = conv(%1) + ... +""" + +arbitrary_cin = 5 +arbitrary_cout = 8 +np.random.seed() +arbitrary_input = (3, arbitrary_cin, 224, 224) +arbitrary_weight = np.random.rand(arbitrary_cout, arbitrary_cin, 10, 10) +arbitrary_mean= np.random.rand(arbitrary_cout) +arbitrary_variance = np.random.rand(arbitrary_cout) + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_batchnorm(x): + conv = mb.conv(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + batch_norm = mb.batch_norm(x=conv, mean=arbitrary_mean, variance=arbitrary_variance, name="batchnorm") + return batch_norm + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_transpose_batchorm(x): + conv = mb.conv_transpose(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + batch_norm = mb.batch_norm(x=conv, mean=arbitrary_mean, variance=arbitrary_variance, name="batchnorm") + return batch_norm + + +def var_constraints(pattern): + return pattern.conv.weight.val is not None + + +def transform_pattern(pattern): + # get parameters from batch_norm layer + gamma = pattern.batchnorm.gamma.val + beta = pattern.batchnorm.beta.val + mean = pattern.batchnorm.mean.val + variance = pattern.batchnorm.variance.val + epsilon = pattern.batchnorm.epsilon.val + # get weight, bias and groups from conv layer + + conv_weight = pattern.conv.weight.val + conv_bias = pattern.conv.bias + groups = pattern.conv.groups.val + + # get type of the conv layer + is_deconv = pattern.conv.op_type == 'conv_transpose' + is_conv_1d = len(conv_weight.shape) == 3 + + # D_in denotes the spatial dimensions for conv kernel weight + # for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in] + # for conv, conv_weight has shape [Cout, Cin / groups, *D_in] + if is_deconv: + Cout = conv_weight.shape[1] * groups + Cin = conv_weight.shape[0] + else: + Cout = conv_weight.shape[0] + Cin = conv_weight.shape[1] * groups + + # get the type of the conv weight + conv_weight_type = conv_weight.dtype + + # create bias for conv if not exist + if conv_bias is None: + conv_bias = np.zeros(Cout) + else: + conv_bias = conv_bias.val + conv_bias = conv_bias.astype(conv_weight_type) + + # get the original shape of weight and bias + origin_weight_shape = conv_weight.shape + origin_bias_shape = conv_bias.shape + + # update the weight for conv layer + new_conv_weight = [] + new_conv_bias = [] + + if is_deconv: + conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]) + conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:])) + + for i in range(Cout): + # get batch norm parameters for each channel + _gamma = gamma[i] + _beta = beta[i] + _mean = mean[i] + _variance = variance[i] + _scale = _gamma / np.sqrt(_variance + epsilon) + + # get conv weight and bias for each channel + _conv_weight = conv_weight[i] + _conv_bias = conv_bias[i] + + # update the conv weight and bias + _conv_weight = _conv_weight * _scale + _conv_bias = _scale * (_conv_bias - _mean) + _beta + new_conv_weight.append(_conv_weight) + new_conv_bias.append(_conv_bias) + + new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type) + new_conv_bias = np.array(new_conv_bias).astype(conv_weight_type) + + if is_deconv: + new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:])) + new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]) + + # make sure the updated weight and bias have the same shape as the original ones + assert new_conv_weight.shape == origin_weight_shape, "conv weight should have the same shape before and after the fuse_conv_batchnorm pass." + assert new_conv_bias.shape == origin_bias_shape, "conv bias should have the same shape before and after the fuse_conv_batchnorm pass." + + # create a new conv op with the new bias value, copying rest of the attributes + out_name = pattern.batchnorm.outputs[0].name + conv_kargs = {"weight": new_conv_weight, "bias": new_conv_bias, "name": out_name, "before_op": pattern.conv} + + for k, v in pattern.conv.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + pattern.batchnorm.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.batchnorm, old_var=pattern.batchnorm.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +if os.getenv('ENABLE_EXPERIMENTAL_PASSES') == '1': + register_generic_pass( + ops_arrangement=conv_batchnorm, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_batchnorm", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=conv_transpose_batchorm, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_batchnorm", + namespace="common", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_bias_fusion.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_bias_fusion.py new file mode 100644 index 00000000..ca8977c4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_bias_fusion.py @@ -0,0 +1,367 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \ + register_generic_pass +from coremltools.converters.mil.mil import types + +""" +Fold add/sub into bias of conv and conv_transpose +That is, convert conv + add/sub to conv, when add/sub is adding a constant + +There are two main patterns supported now. The first one is: + +Pattern 1: +Given: + %2 = conv(%1) + ... + %3 = add(%2, constant) # where constant has shape (1,C,1)/(C,1) for 1d conv, (1,C,1,1)/(C,1,1) for 2d conv etc + ... + +Result: + %3 = conv(%1) + ... + +The second one is: + +Pattern 2: + Given: + %2 = conv(%1) + %3 = transpose(%2) + ... + %4 = add(%3, constant) # where constant has a broacasable shape + ... + + Result: + %2 = conv(%1) + %4 = transpose(%2) + ... + +When taking all of the conv/conv_tranpose, transpose/no transpose, and add/sub into account, +We end up with a total of 8 patterns (2^3). These patterns are paramaterized by the pattern_to_detect +function below. +""" + +arbitrary_cin = 5 +arbitrary_cout = 8 +arbitrary_scalar = 5 +np.random.seed() +arbitrary_perm = [0,1,2,3] +arbitrary_input = (3, arbitrary_cin, 224, 224) +arbitrary_weight = np.random.rand(arbitrary_cout, arbitrary_cin, 10, 10) + + +def pattern_to_detect(conv_transpose, transpose, sub): + """ + Wrapper to create 8 patterns to detect for conciseness. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_bias_pattern(x): + if not conv_transpose: + conv = mb.conv(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + else: + conv = mb.conv_transpose(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + + if transpose: + transpose_layer = mb.transpose(x=conv, perm=arbitrary_perm, name="transpose") + + if sub: + add_or_sub = mb.sub(x=transpose_layer if transpose else conv, y=arbitrary_scalar, name="add_or_sub") + else: + add_or_sub = mb.add(x=transpose_layer if transpose else conv, y=arbitrary_scalar, name="add_or_sub") + return add_or_sub + + return conv_bias_pattern + + +def var_constraints(pattern): + bias_value = _get_bias_var(pattern).val + rank = pattern.conv.x.rank + is_bias_scalar = True if not isinstance(bias_value, np.ndarray) else False + old_bias = pattern.conv.inputs.get("bias", None) + old_bias_value = old_bias.val if old_bias is not None and old_bias.val is not None else None + + passed = True + passed = passed and isinstance(bias_value, (np.ndarray, np.generic)) + passed = passed and rank is not None + passed = passed and (rank == 3 or rank == 4 or rank == 5) + + # check compatibility of bias value with the rank of the conv op + # either bias value should be a scalar or: + # rank=3 ==> (B,C,D), which means bias must be (1,C,1) or (C,1) + # rank=4 ==> (B,C,D1,D2), which means bias must be (1,C,1,1) or (C,1,1) + # rank=5 ==> (B,C,D1,D2,D3), which means bias must be (1,C,1,1,1) or (C,1,1,1) + if not is_bias_scalar: + # check that there is at most one dimension in the shape that is not 1 + passed = passed and len(np.squeeze(bias_value).shape) <= 1 + # check that addition is not happening on the batch dimension + passed = passed and (len(bias_value) != rank or bias_value.shape[0] == 1) + # check that last rank-2 entries in the shape vector are all 1s + passed = passed and np.prod(bias_value.shape[-(rank - 2):]) == 1 + + bias_value = np.array([bias_value]) if is_bias_scalar else np.squeeze(bias_value) + + passed = passed and ( + old_bias is not None + or np.prod(bias_value.shape) != 1 + or pattern.conv.weight.val is not None + ) + + if old_bias is not None: + try: + new_bias_value = old_bias_value + bias_value + except: + return False + + return passed + + +def var_constraints_tranpose(pattern): + bias = pattern.add_or_sub.x.val if pattern.add_or_sub.x.val is not None else pattern.add_or_sub.y.val + Cout = pattern.conv.outputs[0].shape[1] + + passed = True + passed = passed and pattern.add_or_sub.x.val is not None or pattern.add_or_sub.y.val is not None + passed = passed and _bias_mod_and_validity(bias, Cout, pattern) is not None + return passed + +def transform_pattern(pattern): + bias_value = _get_bias_var(pattern).val + + is_conv_op = (pattern.conv.op_type == "conv") + + is_bias_scalar = False + if not isinstance(bias_value, np.ndarray): + is_bias_scalar = True + + bias_value = np.array([bias_value]) if is_bias_scalar else np.squeeze(bias_value) + + if pattern.add_or_sub.op_type == "sub": + bias_value *= -1 + + # everything looks good, now find the new updated bias + old_bias = pattern.conv.inputs.get("bias", None) + old_bias_value = None + if old_bias is not None and old_bias.val is not None: + old_bias_value = old_bias.val + if old_bias is None: + # need to create a fresh numpy array for bias + if np.prod(bias_value.shape) == 1: + # its a scalar bias + # need to find the value of Cout to form a new bias + # conv_transpose has weight format [K, C_out, spatial dims] + # conv has weight format [C_out, K, spatial dims] + Cout = pattern.conv.weight.val.shape[0 if is_conv_op else 1] + new_bias_value = np.broadcast_to(bias_value, (Cout,)) + else: + new_bias_value = bias_value + else: + # just need to update the existing bias array + new_bias_value = old_bias_value + bias_value + + # create a new conv op with the new bias value, copying rest of the attributes + out_name = pattern.add_or_sub.outputs[0].name + if new_bias_value.dtype != np.float32 and new_bias_value.dtype != np.float16: + # cast the bias to match the weight type + weight_np_type = types.nptype_from_builtin(pattern.conv.inputs["weight"].sym_type.get_primitive()) + logger.warning("conv_bias_fusion pass: casting bias " + "from {} to {} to match the dtype of the weight of the conv layer".format( + new_bias_value.dtype, weight_np_type + ) + ) + new_bias_value = new_bias_value.astype(weight_np_type) + new_bias_var = mb.const(val=new_bias_value, before_op=pattern.conv) + + conv_kargs = {"bias": new_bias_var, "name": out_name, "before_op": pattern.conv} + + for k, v in pattern.conv.inputs.items(): + if k == "bias": + continue + conv_kargs[k] = v + + if is_conv_op: + x = mb.conv(**conv_kargs) + else: + x = mb.conv_transpose(**conv_kargs) + + pattern.add_or_sub.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.add_or_sub, old_var=pattern.add_or_sub.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +def transform_transpose_pattern(pattern): + is_deconv = pattern.conv.op_type == "conv_transpose" + + # get the bias + bias = pattern.add_or_sub.x.val if pattern.add_or_sub.x.val is not None else pattern.add_or_sub.y.val + is_first_input = pattern.add_or_sub.y.val is not None + is_sub = pattern.add_or_sub.op_type == "sub" + + # get the conv bias/weight + conv_shape = pattern.conv.outputs[0].shape + Cout = conv_shape[1] + conv_weight = pattern.conv.weight.val + conv_weight_type = conv_weight.dtype + conv_bias = np.zeros(Cout).astype(conv_weight_type) if pattern.conv.bias is None else pattern.conv.bias.val + + bias = _bias_mod_and_validity(bias, Cout, pattern) + + # compute the new bias + if is_sub: + if is_first_input: + bias = -bias + else: + conv_bias = -conv_bias + + new_bias = conv_bias + bias + + # compute the new weight + if is_sub and not is_first_input: + new_weight = -conv_weight + else: + new_weight = conv_weight + + # create a new conv op with the new weight, bias value, copying rest of the attributes + conv_kargs = {"weight": new_weight, "bias": new_bias, "before_op": pattern.conv} + + for k, v in pattern.conv.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + # create a new transpose op + out_name = pattern.add_or_sub.outputs[0].name + tranpose_kargs = {"x": x, "name": out_name, "before_op": pattern.transpose} + for k, v in pattern.transpose.inputs.items(): + if k == "x": + continue + tranpose_kargs[k] = v + x = mb.transpose(**tranpose_kargs) + + pattern.add_or_sub.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.add_or_sub, old_var=pattern.add_or_sub.outputs[0], new_var=x + ) + + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + +def _bias_mod_and_validity(bias, Cout, pattern): + # check if the bias is compatible for fusion + is_bias_scalar = True + if isinstance(bias, np.ndarray): + if bias.shape == (): + bias = bias.tolist() + elif np.prod(bias.shape) == 1: + bias = np.squeeze(bias).tolist() + else: + is_bias_scalar = False + + if not is_bias_scalar: + if np.prod(bias.shape) != Cout: + return None + rank = pattern.transpose.outputs[0].rank + cout_dim = pattern.transpose.perm.val.tolist().index(1) - rank + if bias.shape[cout_dim] != Cout: + return None + bias = np.reshape(bias, (Cout)) + + return bias + +def _get_bias_var(pattern): + if pattern.add_or_sub.op_type == "sub": + bias_var = pattern.add_or_sub.y + else: + bias_var = pattern.add_or_sub.x if pattern.add_or_sub.x.val is not None else pattern.add_or_sub.y + + return bias_var + + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + + # conv -> add + register_generic_pass( + ops_arrangement=pattern_to_detect(False, False, False), + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv -> sub + register_generic_pass( + ops_arrangement=pattern_to_detect(False, False, True), + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv_transpose -> add + register_generic_pass( + ops_arrangement=pattern_to_detect(True, False, False), + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv_transpose -> sub + register_generic_pass( + ops_arrangement=pattern_to_detect(True, False, True), + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv -> transpose -> add + register_generic_pass( + ops_arrangement=pattern_to_detect(False, True, False), + var_constraints=var_constraints_tranpose, + transform_pattern=transform_transpose_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv -> transpse -> sub + register_generic_pass( + ops_arrangement=pattern_to_detect(False, True, True), + var_constraints=var_constraints_tranpose, + transform_pattern=transform_transpose_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv_transpose -> transpose -> add + register_generic_pass( + ops_arrangement=pattern_to_detect(True, True, False), + var_constraints=var_constraints_tranpose, + transform_pattern=transform_transpose_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv_transpose -> transpose -> sub + register_generic_pass( + ops_arrangement=pattern_to_detect(True, True, True), + var_constraints=var_constraints_tranpose, + transform_pattern=transform_transpose_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py new file mode 100644 index 00000000..744e2d2f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py @@ -0,0 +1,244 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \ + register_generic_pass + +""" +Fold mul/div into conv/conv_transpose by updating the weight/bias of the convolution layers. + +The scale const can be a single number (scalar) or a vector with a broacasable shape, +for instance, if the output of the conv/deconv layer is (B, Cout, H, W), +const of shape (Cout, 1, 1) and (1, Cout, 1, 1) are allowed. + +Given: + %2 = conv(%1) + ... + %3 = mul(%2, constant) # where constant is the scale constant + ... + +Result: + %3 = conv(%1) + ... +""" + +arbitrary_cin = 5 +arbitrary_cout = 8 +arbitrary_scalar = 5 +np.random.seed() +arbitrary_input = (3, arbitrary_cin, 224, 224) +arbitrary_weight = np.random.rand(arbitrary_cout, arbitrary_cin, 10, 10) + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_scale_mul(x): + conv = mb.conv(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + mul = mb.mul(x=conv, y=arbitrary_scalar, name="scale") + return mul + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_transpose_scale_mul(x): + conv = mb.conv_transpose(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + mul = mb.mul(x=conv, y=arbitrary_scalar, name="scale") + return mul + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_scale_div(x): + conv = mb.conv(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + real_div = mb.real_div(x=conv, y=arbitrary_scalar, name="scale") + return real_div + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_transpose_scale_div(x): + conv = mb.conv_transpose(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + real_div = mb.real_div(x=conv, y=arbitrary_scalar, name="scale") + return real_div + + +def _cin_cout(pattern): + # D_in denotes the spatial dimensions for conv kernel weight + # for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in] + # for conv, conv_weight has shape [Cout, Cin / groups, *D_in] + is_deconv = pattern.conv.op_type == "conv_transpose" + groups = pattern.conv.groups.val + conv_weight = pattern.conv.weight.val + if is_deconv: + Cout = conv_weight.shape[1] * groups + Cin = conv_weight.shape[0] + else: + Cout = conv_weight.shape[0] + Cin = conv_weight.shape[1] * groups + + return Cin, Cout + + +def _is_scalar(pattern): + # for the scalar case, the scalar can be either + # 1. a python int/float + # 2. a 0d numpy array + # 3. a 1d numpy array with shape (1,) + scale_var = pattern.scale.x if pattern.scale.x.val is not None else pattern.scale.y + scale = scale_var.val + is_scalar = True + if isinstance(scale, np.ndarray): + if scale.shape == (): + scale = scale.tolist() + elif scale.shape == (1) or scale.shape == (1,): + scale = scale[0] + else: + is_scalar = False + + return is_scalar + + +def var_constraints(pattern): + passed = True + passed = passed and pattern.scale.x.val is not None or pattern.scale.y.val is not None + passed = passed and pattern.conv.weight.val is not None + + is_scalar = _is_scalar(pattern) + Cin, Cout = _cin_cout(pattern) + scale_var = pattern.scale.x if pattern.scale.x.val is not None else pattern.scale.y + scale = scale_var.val + + # for the vector scale case, check if the shape is broacastable + if not is_scalar: + conv_weight = pattern.conv.weight.val + passed = passed and ( + np.product(scale.shape) == Cout + or (len(scale.shape) == len(conv_weight.shape) and scale.shape[1] == Cout) + or (len(scale.shape) == len(conv_weight.shape) - 1 and scale.shape[0] == Cout) + ) + + return passed + + +def transform_pattern(pattern): + # get the scale + scale_var = pattern.scale.x if pattern.scale.x.val is not None else pattern.scale.y + scale = scale_var.val + is_scalar = _is_scalar(pattern) + + # get weight and bias and groups from conv layer + conv_weight = pattern.conv.weight.val + conv_bias = pattern.conv.bias + groups = pattern.conv.groups.val + + # get type of the conv layer + is_deconv = pattern.conv.op_type == "conv_transpose" + is_conv_1d = len(conv_weight.shape) == 3 + + Cin, Cout = _cin_cout(pattern) + + # transform the scale to 1./scale for the real_div case + if pattern.scale.op_type == "real_div": + scale = 1.0 / scale + + # get the type of the conv weight + conv_weight_type = conv_weight.dtype + + # create bias for conv if not exist + if conv_bias is None: + conv_bias = np.zeros(Cout) + else: + conv_bias = conv_bias.val + conv_bias = conv_bias.astype(conv_weight_type) + + # get the original shape of weight and bias + origin_weight_shape = conv_weight.shape + origin_bias_shape = conv_bias.shape + + # update the weight/bias for conv layer + if is_scalar: + new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type) + new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type) + + else: + scale = np.reshape(scale, (Cout)) + new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type) + new_conv_weight = [] + if is_deconv: + conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]) + conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:])) + + for i in range(Cout): + _conv_weight = conv_weight[i] * scale[i] + new_conv_weight.append(_conv_weight) + new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type) + + if is_deconv: + new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:])) + new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]) + + # make sure the updated weight and bias have the same shape as the original ones + assert new_conv_weight.shape == origin_weight_shape, "conv weight should have the same shape before and after the fuse_conv_scale pass." + assert new_conv_bias.shape == origin_bias_shape, "conv bias should have the same shape before and after the fuse_conv_scale pass." + + # create a new conv op with the new weight, bias value, copying rest of the attributes + out_name = pattern.scale.outputs[0].name + conv_kargs = { + "weight": new_conv_weight, + "bias": new_conv_bias, + "name": out_name, + "before_op": pattern.conv, + } + + for k, v in pattern.conv.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + pattern.scale.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.scale, old_var=pattern.scale.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + register_generic_pass( + ops_arrangement=conv_scale_mul, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_scale", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=conv_transpose_scale_mul, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_scale", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=conv_scale_div, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_scale", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=conv_transpose_scale_div, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_scale", + namespace="common", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py new file mode 100644 index 00000000..890dacf4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py @@ -0,0 +1,457 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \ + register_generic_pass +from coremltools.converters.mil.mil import get_new_symbol + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + shape = (get_new_symbol(), get_new_symbol(), get_new_symbol(), get_new_symbol()) + +def _check_reduce_op(reduce_op, mode="reduce_mean") -> bool: + """ + Check whether or not the reduction op satisfy following conditions: + - Mode is expected. + - Does not change rank (keep_dims is True). + - Axes are known at compile time. + + :param reduce_op: reduce op to check on + :param mode: reduce mode + """ + if reduce_op is None: + return False + if reduce_op.op_type != mode: + return False + if reduce_op.keep_dims.val is False: + return False + if reduce_op.axes is None or reduce_op.axes.val is None: + return False + return True + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def instancenorm_or_layernorm(x): + """ + Identify the pattern: + + y = gamma * (x - mean) / sqrt(variance + epsilon) + beta + + y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + + x --> main_reduce --> sub --> square --> reduce_mean_2 --> add(epsilon) --> rsqrt + | | ^ | + | | | V + |----------------------- mul (gamma) + | | | + | | --------|--------- + | | | | + | | | V + | |------------------------------------------------------------------> mul_3 + | | | + | V | + |----------------------------------------------------------------> mul_2 | + | V + | sub (beta) --> add_2 --> [...] + | ^ + |------------------------------- + + This pattern corresponds to either layer_norm or instance_norm. + + It is instance_norm if all of the following are true: + - input is rank 4 + - axes of reduce_mean is [-2, -1] or [-3, -2] + (when [-3, -2], a channel first to channel last transpose would be inserted) + - gamma and beta are rank 1, after squeeze + + It is layer_norm if all of the following are true: + - axes is either [-1] or [-1, -2] or [-1, -2, -3] and so on + - rank of gamma and beta is equal to the length of the axes + """ + main_reduce = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True, name="main_reduce") + sub = mb.sub(x=x, y=main_reduce, name="sub") + square = mb.square(x=sub, name="square") + reduce_mean_2 = mb.reduce_mean(x=square, axes=[2, 3], keep_dims=True, name="reduce_mean_2") + add_epsilon = mb.add(x=reduce_mean_2, y=1e-5, name="add_epsilon") + rsqrt = mb.rsqrt(x=add_epsilon, epsilon=1e-12, name="rsqrt") + mul_gamma = mb.mul(x=rsqrt, y=np.random.rand(1, 5, 1, 1), name="mul_gamma") + mul_2 = mb.mul(x=x, y=mul_gamma, name="mul_2") + mul_3 = mb.mul(x=main_reduce, y=mul_gamma, name="mul_3") + sub_beta = mb.sub(x=np.random.rand(1, 5, 1, 1), y=mul_3, name="sub_beta") + add_2 = mb.add(x=sub_beta, y=mul_2, name="add_2") + return add_2 + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def instancenorm_2(x): + """ + Identify the pattern: + y = (x - mean) / pow(variance + epsilon) * gamma + beta + + This pattern corresponds to, should be fused as instance_norm. + All of the following must be satisty: + 1) Input is rank 4 tensor + 2) Reduce operates on spatial dimensions axes=[-2, -1], or axes=[-3, -2] (a + channel first to channel last transpose would be inserted in such case) + 3) Gamma and beta are both shape (C,) after squeeze, where C is number of channels + + + |----> sub0 ----------| const (0.5) + | ^ | | + | | V V + x ---> main_reduce square --> mean1 --> add_eps ---> pow const_gamma const_beta + | | | | | + | V V V V + |----> sub1 --------------------------------------> real_div --> mul_gamma --> add_beta --> ... + """ + + main_reduce = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True, name="main_reduce") + sub0 = mb.sub(x=x, y=main_reduce, name="sub0") + sub1 = mb.sub(x=x, y=main_reduce, name="sub1") + square = mb.square(x=sub0, name="square") + mean1 = mb.reduce_mean(x=square, axes=[2, 3], keep_dims=True, name="mean1") + add_epsilon = mb.add(x=mean1, y=1e-5, name="add_epsilon") + pow = mb.pow(x=add_epsilon, y=0.5, name="pow") + real_div = mb.real_div(x=sub1, y=pow, name="real_div") + mul_gamma = mb.mul(x=np.random.rand(1, 5, 1, 1), y=real_div, name="mul_gamma") + add_beta = mb.add(x=np.random.rand(1, 5, 1, 1), y=mul_gamma, name="add_beta") + return add_beta + + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def instancenorm_3(x): + """ + Detect InstanceNorm pattern in TensorFlow-Addons. + + This pattern corresponds to, should be fused as instance_norm. + All of the following must be satisty: + 1) Input is rank 4 tensor + 2) Reduce operates on spatial dimensions axes=[-2, -1], or axes=[-3, -2] (a + channel first to channel last transpose would be inserted in such case) + 3) Gamma and beta are absent. Default values for gamma and beta would be used. + + |-------------------------------------------------------| + | | + | V + x --> main_reduce square --> mean1 --> add_eps --> rsqrt --> mul2 --> mul_sub + | | ^ | | + | V | | | + | --> sub -----------| | | + | V V + |--------------------------------------------------> mul1 -------------> add --> ... + """ + + main_reduce = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True, name="main_reduce") + sub = mb.sub(x=x, y=main_reduce, name="sub") + square = mb.square(x=sub, name="square") + mean1 = mb.reduce_mean(x=square, axes=[2, 3], keep_dims=True, name="mean1") + add_epsilon = mb.add(x=mean1, y=1e-5, name="add_epsilon") # epsilon + rsqrt = mb.rsqrt(x=add_epsilon, name="rsqrt") + mul1 = mb.mul(x=rsqrt, y=x, name="mul1") + mul2 = mb.mul(x=main_reduce, y=rsqrt, name="mul2") + mul_sub = mb.mul(x=mul2, y=-1, name="mul_sub") + add = mb.add(x=mul1, y=mul_sub, name="add") + return add + + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def instancenorm_4(x): + """ + Identify the pattern: + y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + + This pattern corresponds to, should be fused as instance_norm. + All of the following must be satisty: + 1) Input is rank 4 tensor + 2) Reduce operates on spatial dimensions axes=[-2, -1], or axes=[-3, -2] (a + channel first to channel last transpose would be inserted in such case) + 3) Gamma and beta are both shape (C,) after squeeze, where C is number of channels + + |-----------| + | V + |------> mul_square1 -------------> sum1 -----> mul_mean1 + | | + | V + x --> main_reduce --> mul_mean ==> mul_square --> sub_variance --> add_eps --> rsqrt + | | | + | | V + | | mul_gamma + | | | + | | |----------------| + | | | V + | |--------------------------------------------+-------------> mul2 + | V | + |------------------------------------------------------------------> mul1 | + | V + | sub_beta --> add --> [...] + | ^ + |---------------------------| + """ + mul_square1 = mb.mul(x=x, y=x, name="mul_square1") + main_reduce = mb.reduce_sum(x=x, axes=[2, 3], keep_dims=True, name="main_reduce") + mul_mean = mb.mul(x=main_reduce, y=3.3333334e-05, name="mul_mean") # dummy value here + mul_square = mb.mul(x=mul_mean, y=mul_mean, name="mul_square") + sum1 = mb.reduce_sum(x=mul_square1, axes=[2, 3], keep_dims=True, name="sum1") + mul_mean1 = mb.mul(x=sum1, y=8.333333e-06, name="mul_mean1") # dummy value here + sub_variance = mb.sub(x=mul_mean1, y=mul_square, name="sub_variance") + add_epsilon = mb.add(x=sub_variance, y=1e-5, name="add_epsilon") # epsilon + rsqrt = mb.rsqrt(x=add_epsilon, name="rsqrt") + mul_gamma = mb.mul(x=rsqrt, y=np.random.rand(1, 5, 1, 1), name="mul_gamma") + mul1 = mb.mul(x=mul_gamma, y=x, name="mul1") + mul2 = mb.mul(x=mul_mean, y=mul_gamma, name="mul2") + sub_beta = mb.sub(x=np.random.rand(1, 5, 1, 1), y=mul2, name="sub_beta") + add = mb.add(x=mul1, y=sub_beta, name="add") + return add + +def instancenorm_1_constraints(pattern): + passed = True + passed = passed and _common_pattern1_constraints(pattern) + passed = passed and _instancenorm_constraints(pattern) + return passed + + +def layernorm_1_constraints(pattern): + passed = True + passed = passed and _common_pattern1_constraints(pattern) + passed = passed and _layernorm_constraints(pattern) + return passed + + +def instancenorm_2_constraints(pattern): + epsilon_var = _get_var(pattern.add_epsilon, pattern.mean1) + gamma_var = _get_var(pattern.mul_gamma, pattern.real_div) + beta_var = _get_var(pattern.add_beta, pattern.mul_gamma) + + passed = True + passed = passed and _check_reduce_op(pattern.main_reduce) + passed = passed and pattern.sub0.x == pattern.root_var and pattern.sub0.y == pattern.main_reduce.outputs[0] + passed = passed and pattern.sub1.x == pattern.root_var and pattern.sub1.y == pattern.main_reduce.outputs[0] + passed = passed and _check_reduce_op(pattern.mean1) + passed = passed and pattern.pow.y.val is not None and np.isclose(pattern.pow.y.val, 0.5) + passed = passed and pattern.real_div.x == pattern.sub1.outputs[0] and pattern.real_div.y == pattern.pow.outputs[0] + + passed = passed and _general_constraints(pattern, epsilon_var, gamma_var, beta_var) + passed = passed and _instancenorm_constraints(pattern) + + return passed + + +def instancenorm_3_constraints(pattern): + epsilon_var = _get_var(pattern.add_epsilon, pattern.mean1) + + gamma_var = mb.const( + val=np.ones(shape=(1, pattern.root_var.shape[1], 1, 1)), name="gamma_var" + ) + beta_var = mb.const( + val=np.zeros(shape=(1, pattern.root_var.shape[1], 1, 1)), + name="_fuse_layernorm_or_instancenorm_beta", + ) + passed = True + passed = passed and _check_reduce_op(pattern.main_reduce) + passed = passed and pattern.sub.x == pattern.root_var and pattern.sub.y == pattern.main_reduce.outputs[0] + passed = passed and _check_reduce_op(pattern.mean1) + passed = passed and pattern.mul_sub.y.val is not None and pattern.mul_sub.y.val == -1 + + passed = passed and _general_constraints(pattern, epsilon_var, gamma_var, beta_var) + passed = passed and _instancenorm_constraints(pattern) + + return passed + + +def instancenorm_4_constraints(pattern): + epsilon_var = _get_var(pattern.add_epsilon, pattern.sub_variance) + gamma_var = _get_var(pattern.mul_gamma, pattern.rsqrt) + beta_var = pattern.sub_beta.x + + passed = True + passed = passed and _check_reduce_op(pattern.main_reduce, mode="reduce_sum") + passed = passed and pattern.mul_mean.y.shape == () + passed = passed and _check_reduce_op(pattern.sum1, "reduce_sum") + passed = passed and pattern.mul_mean1.y.shape == () + passed = passed and pattern.sub_variance.y == pattern.mul_square.outputs[0] + passed = passed and pattern.sub_beta.y == pattern.mul2.outputs[0] + + passed = passed and _general_constraints(pattern, epsilon_var, gamma_var, beta_var) + passed = passed and _instancenorm_constraints(pattern) + + return passed + + +def _general_constraints(pattern, epsilon_var, gamma_var, beta_var): + passed = True + passed = passed and pattern.root_var.shape is not None + passed = passed and epsilon_var.val is not None and len(epsilon_var.val.shape) == 0 + passed = passed and gamma_var.val is not None + passed = passed and beta_var.val is not None + + pattern.add_attribute("epsilon_var", epsilon_var) + pattern.add_attribute("gamma_var", gamma_var) + pattern.add_attribute("beta_var", beta_var) + return passed + + +def _common_pattern1_constraints(pattern): + epsilon_var = _get_var(pattern.add_epsilon, pattern.reduce_mean_2) + gamma_var = _get_var(pattern.mul_gamma, pattern.rsqrt) + beta_var = pattern.sub_beta.x + + passed = True + passed = passed and _check_reduce_op(pattern.main_reduce) + passed = passed and _check_reduce_op(pattern.reduce_mean_2) + passed = passed and pattern.sub.x == pattern.root_var and pattern.sub.y == pattern.main_reduce.outputs[0] + passed = passed and pattern.sub_beta.y == pattern.mul_3.outputs[0] + + passed = passed and _general_constraints(pattern, epsilon_var, gamma_var, beta_var) + + return passed + +def _layernorm_constraints(pattern): + rank, axes, negative_axes = _rank_and_axes(pattern) + + passed = True + passed = passed and len(pattern.gamma_var.val.shape) == len(axes) + passed = passed and len(pattern.beta_var.val.shape) == len(axes) + passed = passed and negative_axes == list(range(-len(negative_axes), 0)) + requires_rank4_transpose = False + + if rank == 4 and negative_axes == [-3, -2]: + requires_rank4_transpose = True + + pattern.add_attribute("requires_rank4_transpose", requires_rank4_transpose) + pattern.add_attribute("is_instancenorm", False) + return passed + + +def _instancenorm_constraints(pattern): + rank, axes, negative_axes = _rank_and_axes(pattern) + + passed = True + passed = passed and rank == 4 + passed = passed and _check_axes_and_var_shape(negative_axes, pattern.gamma_var.shape) + passed = passed and _check_axes_and_var_shape(negative_axes, pattern.beta_var.shape) + + requires_rank4_transpose = False + if negative_axes == [-3, -2]: + requires_rank4_transpose = True + pattern.add_attribute("requires_rank4_transpose", requires_rank4_transpose) + pattern.add_attribute("is_instancenorm", True) + return passed + + +def _rank_and_axes(pattern): + rank = len(pattern.root_var.shape) + axes = pattern.main_reduce.axes.val + negative_axes = [a - rank if a >= 0 else a for a in axes] + negative_axes.sort() + return rank, axes, negative_axes + + +def _get_var(operation1, operation2): + return operation1.y if operation1.x == operation2.outputs[0] else operation1.x + +def _check_axes_and_var_shape(negative_axes, shape): + if len(shape) == 1: + return True + if negative_axes == [-2, -1]: + return shape[0] == 1 and shape[2] == 1 and shape[3] == 1 + if negative_axes == [-3, -2]: + return shape[0] == 1 and shape[1] == 1 and shape[2] == 1 + return False + +def transform_pattern(pattern): + """ + Insert instance_norm / layer_norm and delete all ops. + :param pattern: A pattern object that contains all relevant information. + """ + out_name = pattern.final_op.outputs[0].name + axes = pattern.main_reduce.axes.val + + if pattern.requires_rank4_transpose: + x = mb.transpose( + x=pattern.main_reduce.x, + perm=[0, 3, 1, 2], + name=out_name + "_transpose_nhwc_nchw", + before_op=pattern.final_op, + ) + if pattern.is_instancenorm: + x = mb.instance_norm( + x=x if pattern.requires_rank4_transpose else pattern.main_reduce.x, + gamma=np.squeeze(pattern.gamma_var.val), + beta=np.squeeze(pattern.beta_var.val), + epsilon=pattern.epsilon_var, + name=out_name + "_instancenorm" if pattern.requires_rank4_transpose else out_name, + before_op=pattern.final_op, + ) + else: # is_layernorm + x = mb.layer_norm( + x=x if pattern.requires_rank4_transpose else pattern.main_reduce.x, + axes=axes, + gamma=pattern.gamma_var, + beta=pattern.beta_var, + epsilon=pattern.epsilon_var, + name=out_name + "_layernorm" if pattern.requires_rank4_transpose else out_name, + before_op=pattern.final_op, + ) + if pattern.requires_rank4_transpose: + x = mb.transpose( + x=x, + perm=[0, 2, 3, 1], + name=out_name + "_transpose_nchw_nhwc", + before_op=pattern.final_op, + ) + + pattern.final_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.final_op, old_var=pattern.final_op.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + register_generic_pass( + ops_arrangement=instancenorm_or_layernorm, + var_constraints=layernorm_1_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_layernorm_or_instancenorm", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=instancenorm_or_layernorm, + var_constraints=instancenorm_1_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_layernorm_or_instancenorm", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=instancenorm_2, + var_constraints=instancenorm_2_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_layernorm_or_instancenorm", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=instancenorm_3, + var_constraints=instancenorm_3_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_layernorm_or_instancenorm", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=instancenorm_4, + var_constraints=instancenorm_4_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_layernorm_or_instancenorm", + namespace="common", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_linear_bias_fusion.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_linear_bias_fusion.py new file mode 100644 index 00000000..3b12b5e6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_linear_bias_fusion.py @@ -0,0 +1,133 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \ + register_generic_pass +from coremltools.converters.mil.mil import get_new_symbol + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + arbitrary_shape = (get_new_symbol(), get_new_symbol()) + np.random.seed() + arbitrary_weight = np.random.rand(4, 3) + arbitrary_bias = np.random.rand(4) + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_shape)]) + def pattern_add(x): + """ + Original: + % 4 = linear(x= % 1, weight = % 2, bias = % 3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + % 6 = add(x= % 4, y = % 5) # %5 is a const tensor with same shape as %3 + + Result: + % 8 = linear(x= % 1, weight = % 2, bias = % 7) # where %7 is a new const tensor with value + # %7 = %3 + %6 + """ + linear = mb.linear(x=x, weight=arbitrary_weight, bias=arbitrary_bias, name="linear") + add_or_sub = mb.add(x=linear, y=arbitrary_bias, name="add_or_sub") + return add_or_sub + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_shape)]) + def pattern_sub(x): + """ + Original: + %4 = linear(x=%1, weight=%2, bias=%3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + %6 = sub(x=%5, y=%4) # %5 is a const tensor with a broacasable shape with %3. + i.e. if %3 has shape (Dout), %5 could be (1, Dout). + + Result: + %9 = linear(x=%1, weight=%7, bias=%8) # where %7 is a new const tensor with value %7 = -%2 + # %8 = %5 - %3 + """ + linear = mb.linear(x=x, weight=arbitrary_weight, bias=arbitrary_bias, name="linear") + add_or_sub = mb.sub(x=linear, y=arbitrary_bias, name="add_or_sub") + return add_or_sub + + +def var_constraints(pattern): + passed = True + passed = passed and pattern.add_or_sub.x.val is not None or pattern.add_or_sub.y.val is not None + + is_sub, is_first_input = _get_is_sub_and_is_first_input(pattern) + linear_bias, bias, Dout = _get_linear_bias_bias_Dout(pattern, is_first_input) + + # check if the shape is broadcasable + passed = passed and np.prod(linear_bias.shape) == np.prod(bias.shape) + passed = passed and bias.shape[-1] == Dout + return passed + + +def _get_is_sub_and_is_first_input(pattern): + is_sub = pattern.add_or_sub.op_type == "sub" + is_first_input = pattern.add_or_sub.x == pattern.linear.outputs[0] + return is_sub, is_first_input + + +def _get_linear_bias_bias_Dout(pattern, is_first_input): + linear_bias = pattern.linear.bias.val + bias = pattern.add_or_sub.y.val if is_first_input else pattern.add_or_sub.x.val + Dout = linear_bias.shape[0] + return linear_bias, bias, Dout + + +def transform_pattern(pattern): + is_sub, is_first_input = _get_is_sub_and_is_first_input(pattern) + linear_bias, bias, Dout = _get_linear_bias_bias_Dout(pattern, is_first_input) + bias = np.reshape(bias, (Dout,)) + + if is_sub and is_first_input: + bias = -bias + if is_sub and not is_first_input: + linear_bias = -linear_bias + + new_bias = linear_bias + bias + + # compute the new weight + if is_sub and not is_first_input: + new_weight = -pattern.linear.weight.val + else: + new_weight = pattern.linear.weight.val + + # create a new linear op with the new weight, bias value, copying rest of the attributes + out_name = pattern.add_or_sub.outputs[0].name + linear_kargs = {"weight": new_weight, "bias": new_bias, "name": out_name, "before_op": pattern.linear} + + linear_kargs.update({k: v for k, v in pattern.linear.inputs.items() if k not in ["weight", "bias"]}) + + x = mb.linear(**linear_kargs) + + pattern.add_or_sub.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.add_or_sub, old_var=pattern.add_or_sub.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +if os.getenv('ENABLE_EXPERIMENTAL_PASSES') == '1': + register_generic_pass( + ops_arrangement=pattern_add, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_linear_bias", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=pattern_sub, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_linear_bias", + namespace="common", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py new file mode 100644 index 00000000..9ebb1b2d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py @@ -0,0 +1,221 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +from functools import partial + +from coremltools.converters.mil.mil.passes.helper import block_context_manager + +from ...mil.passes import pass_registry + +# IMPORTANT: List of assumptions we are making about the problem +# 1) The user defined pattern has exactly one root variable, and one final output operation. As such, we will be searching for a singlular +# root variable in the larger program, and using that root variable as a starting point for our pattern matching. +# And, we will only match one of the final operations for the larger program. +# 2) The root variable in the larger program, where we start off the pattern matching, must have the same number of child ops as the +# root variable in the user defined program +# 3) The outputs of an operation are stored in identical, predictable order. The child operations of an operation are stored in a random order. + + +class Pattern: + + """This class will have references to all the ops that we have captured in the main, larger program. + Each captured op will be an attribute of this class. The attribute name will be the same name + that the user defined in their pattern. So, if the user defines a pattern add(name = 'add_1') -> sub(name = 'sub_1), + the pattern object will have the fields pattern.add_1, pattern.sub_1, which are references to the corresponding operations + in the larger program. + + + Minimum Attributes: + root_var: which is the root variable of the first operation of the captured pattern (and corresponds to the user defined pattern’s root variable) + final_op: the operation in the larger machine learning model that corresponds to the last operation in the user defined pattern. + block: the block in the larger machine learning model where the pattern was found + op_set: a set of all the operations captured from the larger machine learning model + attribute_set: used for enforcing naming (ie, so the user doesn't overwrite any of the variables mentioned above) + + Setters + set_root_var(root_var): sets the root_var attribute of the Pattern with the given root_var + set_block(block): sets the block attribute of the Pattern with the given block + set_final_op(op_name, final_op): adds the operation in question to the pattern and also sets it as the final_op + + Other Methods + add_attribute(attribute_name, attribute): Adds an attribute to the pattern object. Can be useful for the user. + Verifies name using the attribute set mentioned above + add_op(op_name, op): Adds an operation to the pattern, as an attribute which can be accessed and as part of the op_set + op_list(): convers the op_set to a list and returns it to make it easier for the user + + """ + + def __init__(self): + self.root_var = None + self.block = None + self.final_op = None + self.op_set = set() + self.attribute_set = set(["root_var", "block", "final_op", "op_set", "attribute_set"]) + + def set_root_var(self, root_var): + self.root_var = root_var + + def set_block(self, block): + self.block = block + + def set_final_op(self, op_name, final_op): + self.add_op(op_name, final_op) + self.final_op = final_op + + def add_attribute(self, attribute_name, attribute): + if attribute_name in self.attribute_set: + raise NameError("Pattern " + attribute_name + " is being overwritten. " + "Make sure every operation in your MIL pattern to detect " + "has a unique name, and that no operation in it or an attribute you are setting is named " + "root_var, block, final_op, op_set, or attribute_set.") + setattr(self, attribute_name, attribute) + + def add_op(self, op_name, op): + self.add_attribute(op_name, op) + self.op_set.add(op) + + def op_list(self): + return list(self.op_set) + +def _lists_op_equality(oplist1, oplist2): + if (len(oplist1) != len(oplist2)): + return False + + for i in range(len(oplist1)): + if oplist1[i].op_type != oplist2[i].op_type: + return False + + return True + +def _pattern_detected(pattern, program_op, pattern_op, program_root_var, pattern_root_var, block): + # If the pattern_op is None, that means we are dealing with root_var checking (which don't have op_types or outputs) + if pattern_op is not None and program_op.op_type != pattern_op.op_type: + return False + + if pattern_op is not None and len(program_op.outputs) != len(pattern_op.outputs): + return False + + for i in range(len(program_op.outputs) if pattern_op is not None else 1): + output_same = False + + # ASSUMTION: Assumming that the outputs of an operation are ordered in a particular way + # So, two identical operations will have the same ordering of outputs. + program_child_op_list = list(program_op.outputs[i].child_ops) if pattern_op is not None else program_root_var.child_ops + pattern_child_op_list = list(pattern_op.outputs[i].child_ops) if pattern_op is not None else pattern_root_var.child_ops + + # Last op in the pattern + if len(pattern_child_op_list) == 0: + if pattern.final_op is not None and pattern.final_op != program_op: + raise ValueError("User defined pattern has more than one final operation") + pattern.set_final_op(pattern_op.name, program_op) + return True + + if len(program_child_op_list) != len(pattern_child_op_list): + return False + + # Permuting the program child operations so that at least one of the permutations will be in + # the exact same order as the pattern child operations + op_combos = list(itertools.permutations(pattern_child_op_list)) + + for combo in op_combos: + if _lists_op_equality(combo, program_child_op_list): + truly_equal = True + + for i in range(len(combo)): + truly_equal = truly_equal and _pattern_detected(pattern, program_child_op_list[i], combo[i], program_root_var, pattern_root_var, block) + + if truly_equal: + # The operations in this sequence match perfectly with the pattern + output_same = True + break + + if output_same is False: + return False + + if pattern_op is not None: + pattern.add_op(pattern_op.name, program_op) + return True + + +# This function finds the root_variable in the program that matches with the root_variable in the pattern, +# And then kicks off the pattern matching from there +def _detect_pattern(program_op, ops_arrangement_root_var, block): + # The goal of this function is to find the root variable of both operations + program_op_inputs = program_op.get_flattened_inputs() + + for potential_program_root_variable in program_op_inputs: + pattern = Pattern() + pattern.set_block(block) + + if _pattern_detected(pattern, program_op, ops_arrangement_root_var.op, potential_program_root_variable, ops_arrangement_root_var, block): + pattern.set_root_var(potential_program_root_variable) + + # check that none of the ops in this pattern is connected to the output + # (except the last one) + for op in pattern.op_list(): + if op is not pattern.final_op: + for out in op.outputs: + if out in pattern.block.outputs: + return False, None + + return True, pattern + + return False, None + +@block_context_manager +def _fuse_one_block(block, ops_arrangement, var_constraints, transform_pattern): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = _fuse_one_block(b, ops_arrangement, var_constraints, transform_pattern) + + ops_arrangement_root_var = list(ops_arrangement.functions.values())[0].function_inputs[0] + fusion_status, pattern = _detect_pattern(op, ops_arrangement_root_var, block) + + if fusion_status: + fusion_status &= var_constraints(pattern) + + if fusion_status: + transform_pattern(pattern) + return fusion_status + + return fusion_status + + +def fuse_all_blocks(ops_arrangement, var_constraints, transform_pattern, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = _fuse_one_block(f, ops_arrangement, var_constraints, transform_pattern) + + +class PassContainer(): + def __init__(self, pass_name): + self.pass_name = pass_name + self.passes = [] + + def __call__(self, prog): + if len(self.passes) == 0: + raise ValueError("no pass functions associated with " + self.pass_name) + + for one_pass in self.passes: + one_pass(prog) + prog.validate() + + def add(self, pass_function): + self.passes.append(pass_function) + +def register_generic_pass(ops_arrangement, var_constraints, transform_pattern, pass_name, namespace): + pass_function = partial(fuse_all_blocks, ops_arrangement, var_constraints, transform_pattern) + + pass_id = namespace + "::" + pass_name + if pass_id not in pass_registry.PASS_REGISTRY or not isinstance(pass_registry.PASS_REGISTRY[pass_id], PassContainer): + pass_registry.PASS_REGISTRY.passes[pass_id] = PassContainer(pass_name) + + pass_registry.PASS_REGISTRY[pass_id].add(pass_function) + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/__init__.py new file mode 100644 index 00000000..ee7e9ea0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import tensorflow, tensorflow2, torch diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/_utils.py new file mode 100644 index 00000000..8e39fbe5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/_utils.py @@ -0,0 +1,410 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from typing import List, Optional + +from coremltools.converters.mil.input_types import InputType +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Var, types +from coremltools.converters.mil.mil.ops.defs._utils import parse_einsum_equation +from coremltools.converters.mil.mil.types.symbolic import any_symbolic, is_symbolic + + +def value_at(x: Var, idx: int, name=None): + """ + input x: 1D tensor (vector). + return value at index idx. x[idx]. + Could specify the name of the returned MIL scalar tensor as well. + """ + assert x.rank == 1 + args = { + "x": x, + "begin": [idx], + "end": [0], + "squeeze_mask": [True], + } + if name is not None: + args["name"] = name + return mb.slice_by_index(**args) + + +def _reverse_input_einsum_eq(equation: str) -> str: + """ + Reverse the input order of the einsum eqaution + e.g.: + input : "nchw,nwhu->nchu" + returns : "nwhu,nchw->nchu" + """ + input_output_strings = equation.split('->') + assert len(input_output_strings) == 2, "invalid equation" + input_strings = input_output_strings[0].split(',') + assert len(input_strings) == 2, "invalid equation" + equation = input_strings[1] + ',' + input_strings[0] + '->' + input_output_strings[1] + return equation + + +def build_einsum_mil(a_var: Var, b_var: Var, equation: str, name: str) -> Var: + """ + Get MIL variables as input and build a variable using MIL builder, that + contains the output of the einsum equation + + :param a_var: + - var + - first input variable + :param b_var: + - var + - second input variable + :param equation: + - str + - the einsum equation + :param name: + - str + - name tp be assigned to the output var + + :return: + - var + - output var that contains the einsum result + """ + + ## TODO: rdar://73851694 (Update einsum op translation to support generic cases) + equation = equation.replace(" ", "") + parsed_vectors = parse_einsum_equation(equation) + equation_rev = _reverse_input_einsum_eq(equation) + parsed_vectors_rev = parse_einsum_equation(equation_rev) + + def _swap(a, b): + return b, a + + is_dynamic = any_symbolic(a_var.shape) or any_symbolic(b_var.shape) + # list of equations supported for explicit mil translations + vec_bnqd_bnkd_bnqk = ( + [0, 1, 2, 3], + [0, 1, 4, 3], + [0, 1, 2, 4], + ) # equation == "bnqd,bnkd->bnqk" + vec_bhcq_bhck_bhqk = ( + [0, 1, 2, 3], + [0, 1, 2, 4], + [0, 1, 3, 4], + ) # equation == "bhcq,bhck->bhqk" + vec_abc_cd_abd = ([0, 1, 2], [2, 3], [0, 1, 3]) # equation == "abc,cd->abd" + vec_abc_cde_abde = ( + [0, 1, 2], + [2, 3, 4], + [0, 1, 3, 4], + ) # equation == "abc,cde->abde" + vec_btnh_bfnh_bnft = ( + [0, 1, 2, 3], + [0, 4, 2, 3], + [0, 2, 4, 1], + ) # equation == "btnh,bfnh->bnft" + vec_bnft_btnh_bfnh = ( + [0, 1, 2, 3], + [0, 3, 1, 4], + [0, 2, 1, 4], + ) # equation == "bnft,btnh->bfnh" + vec_abcd_cde_abe = ( + [0, 1, 2, 3], + [2, 3, 4], + [0, 1, 4], + ) # equation == "abcd,cde->abe" + vec_nchw_nwhu_nchu = ( + [0, 1, 2, 3], + [0, 3, 2, 4], + [0, 1, 2, 4], + ) # equation == "nchw,nwhu->nchu" + vec_chw_whu_chu = ([0, 1, 2], [2, 1, 3], [0, 1, 3]) # equation == "chw,whu->chu" + + # add the op(s) corresponding to the equation + if vec_bnqd_bnkd_bnqk in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors_rev == vec_bnqd_bnkd_bnqk: + a_var, b_var = _swap(a_var, b_var) + x = mb.matmul(x=a_var, y=b_var, transpose_x=False, transpose_y=True, name=name) + elif vec_bhcq_bhck_bhqk in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors_rev == vec_bhcq_bhck_bhqk: + a_var, b_var = _swap(a_var, b_var) + x = mb.matmul(x=a_var, y=b_var, transpose_x=True, transpose_y=False, name=name) + elif vec_abc_cd_abd in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors_rev == vec_abc_cd_abd: + a_var, b_var = _swap(a_var, b_var) + x = mb.matmul(x=a_var, y=b_var, transpose_x=False, transpose_y=False, name=name) + elif vec_abc_cde_abde in [parsed_vectors, parsed_vectors_rev] and not is_dynamic: + if parsed_vectors_rev == vec_abc_cde_abde: + a_var, b_var = _swap(a_var, b_var) + x_1 = mb.reshape(x=a_var, shape=[a_var.shape[0] * a_var.shape[1], a_var.shape[2]]) + x_2 = mb.reshape(x=b_var, shape=[b_var.shape[0], b_var.shape[1] * b_var.shape[2]]) + x = mb.matmul(x=x_1, y=x_2, transpose_x=False, transpose_y=False) + x = mb.reshape( + x=x, shape=[a_var.shape[0], a_var.shape[1], b_var.shape[1], b_var.shape[2]], name=name + ) + elif vec_btnh_bfnh_bnft in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors_rev == vec_btnh_bfnh_bnft: + a_var, b_var = _swap(a_var, b_var) + x_1 = mb.transpose(x=a_var, perm=[0, 2, 1, 3]) + x_2 = mb.transpose(x=b_var, perm=[0, 2, 1, 3]) + x = mb.matmul(x=x_2, y=x_1, transpose_x=False, transpose_y=True, name=name) + elif vec_bnft_btnh_bfnh in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors_rev == vec_bnft_btnh_bfnh: + a_var, b_var = _swap(a_var, b_var) + b_var = mb.transpose(x=b_var, perm=[0, 2, 1, 3]) + x = mb.matmul(x=a_var, y=b_var, transpose_x=False, transpose_y=False) + x = mb.transpose(x=x, perm=[0, 2, 1, 3], name=name) + elif vec_abcd_cde_abe in [parsed_vectors, parsed_vectors_rev] and not is_dynamic: + if parsed_vectors_rev == vec_abcd_cde_abe: + a_var, b_var = _swap(a_var, b_var) + x_1 = mb.reshape(x=a_var, shape=[a_var.shape[0], a_var.shape[1], a_var.shape[2] * a_var.shape[3]]) + x_2 = mb.reshape(x=b_var, shape=[b_var.shape[0] * b_var.shape[1], b_var.shape[2]]) + x = mb.matmul(x=x_1, y=x_2, transpose_x=False, transpose_y=False, name=name) + elif vec_nchw_nwhu_nchu in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors == vec_nchw_nwhu_nchu: + x = mb.einsum(values=(a_var, b_var), equation=equation, name=name) + else: + x = mb.einsum(values=(b_var, a_var), equation=equation_rev, name=name) + elif vec_chw_whu_chu in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors == vec_chw_whu_chu: + x = mb.einsum(values=(a_var, b_var), equation=equation, name=name) + else: + x = mb.einsum(values=(b_var, a_var), equation=equation_rev, name=name) + else: + x = solve_generic_einsum(parsed_vectors, a_var, b_var, name) + + return x + + +def is_symbolic_dim_in_prog(prog): + ''' + Takes in a MIL program object, checks if any of the tensors in it contain a symbolic dimension. + Returns true if it does. + + :param prog: coremltools.converters.mil.Program + :return: bool + ''' + def _does_block_contain_symbolic_shape(block): + for op in block.operations: + for b in op.blocks: + if _does_block_contain_symbolic_shape(b): + return True + for out in op.outputs: + if types.is_tensor(out.sym_type): + shape = out.sym_type.get_shape() + if any_symbolic(shape): + return True + elif types.is_scalar(out.sym_type) or types.is_str(out.sym_type): + if is_symbolic(out.val): + return True + elif types.is_list(out.sym_type): + if types.is_tensor(out.elem_type): + if any_symbolic(out.elem_type.get_shape()): + return True + else: + raise NotImplementedError("\'{}\' type in a list not handled".format(out.elem_type)) + else: + raise NotImplementedError("\'{}\' type is not handled".format(out.sym_type)) + return False + + for f in prog.functions.values(): + if _does_block_contain_symbolic_shape(f): + return True + return False + + +def get_output_names(outputs) -> Optional[List[str]]: + """ + :param: list[ct.TensorType/ct.ImageType] + :return: list[str] or None + """ + output_names = None + if outputs is not None: + assert all([isinstance(t, InputType) for t in outputs]), \ + "outputs must be a list of ct.ImageType or ct.TensorType" + output_names = [t.name for t in outputs] + if all([name is None for name in output_names]): + output_names = None + return output_names + + +def solve_diagonal_einsum(parsed_vectors, vars): + def solve_diagonal_einsum_one_step(parsed_vector, x): + for i in range(len(parsed_vector)): + for j in range(i + 1, len(parsed_vector)): + if parsed_vector[i] != parsed_vector[j]: + continue + + perm = list(range(len(parsed_vector))) + duplicated_indices = [j for j in range(len(parsed_vector)) if parsed_vector[j] == parsed_vector[i]] + for i, j in enumerate(duplicated_indices): + perm[i], perm[j] = perm[j], perm[i] + parsed_vector[i], parsed_vector[j] = parsed_vector[j], parsed_vector[i] + + dims = mb.shape(x=x) + dim_length = value_at(dims, duplicated_indices[0]) + + indices = mb.range_1d(end=dim_length, start=0, step=1) + indices = mb.stack(values=[indices] * len(duplicated_indices), axis=1) + x = mb.transpose(x=x, perm=perm) + x = mb.gather_nd(x=x, indices=indices) + ret_parsed_vector = [parsed_vector[0]] + parsed_vector[len(duplicated_indices):] + return ret_parsed_vector, x + + parsed_vectors = list(parsed_vectors) + for i in range(len(vars)): + while len(parsed_vectors[i]) != len(set(parsed_vectors[i])): + parsed_vector, var = solve_diagonal_einsum_one_step(parsed_vectors[i], vars[i]) + parsed_vectors[i] = parsed_vector + vars[i] = var + return tuple(parsed_vectors), vars + + +def solve_sum_einsum(parsed_vectors, vars): + """ + Apply reduce_sum for axes before binary einsum calculation if enable. + + e.g.: + input : "abce,acd->ae" + returns : "ace,ac->ae" + + In this example, since each of those axes is only used by one var and does not appear in the output, + axes `b` and `d` can be reduced before binary einsum. + """ + + def solve_sum_einsum_one_step(src_axes, used_by_other_axes, x): + dst_axes = [] + for axis in src_axes: + if axis not in used_by_other_axes: + continue + dst_axes.append(axis) + summed_axis_indices = [i for i in range(len(src_axes)) if src_axes[i] not in dst_axes] + if summed_axis_indices: + x = mb.reduce_sum(x=x, axes=summed_axis_indices) + return dst_axes, x + + ret_parsed_vectors = [] + parsed_vectors = list(parsed_vectors) + for i, var in enumerate(vars): + used_by_other_axes = [] + for j, parsed_vector in enumerate(parsed_vectors): + if i != j: + used_by_other_axes += parsed_vector + dst_axes, var = solve_sum_einsum_one_step(parsed_vectors[i], used_by_other_axes, vars[i]) + ret_parsed_vectors.append(dst_axes) + vars[i] = var + ret_parsed_vectors.append(parsed_vectors[-1]) + return ret_parsed_vectors, vars + + +def solve_generic_einsum(parsed_vectors, a_var, b_var, name): + """ + :param parsed_vectors: list[list[int]] + :param a_var: + - var + - first input variable + :param b_var: + - var + - second input variable + :param name: + - str + - name to be assigned to the output var + + :return: + - var + - output var that contains the einsum result + """ + + def _get_perm(src_axes, dst_axes): + """ + :param src_axes: list[int] + :param dst_axes: list[int] + :return: list[int] + """ + return [src_axes.index(s) for s in dst_axes] + + def _concat_dims(dims, none_if_empty=False): + if len(dims) == 0: + if none_if_empty: + return None + else: + return 1 + return mb.concat(values=dims, axis=0) + + parsed_vectors, vars = solve_diagonal_einsum(parsed_vectors, [a_var, b_var]) + parsed_vectors, vars = solve_sum_einsum(parsed_vectors, vars) + a_var, b_var = vars + a_axes, b_axes, out_axes = parsed_vectors + + a_dims = mb.shape(x=a_var) + b_dims = mb.shape(x=b_var) + + batched_axes = [] + reduced_axes = [] + a_unique_axes = [] + b_unique_axes = [] + + batch_dims = [] + reduce_dims = [] + a_unique_dims = [] + b_unique_dims = [] + + for i, a_axis in enumerate(a_axes): + a_dim = value_at(a_dims, i) + if a_axis in b_axes: + if a_axis in out_axes: + batched_axes.append(a_axis) + batch_dims.append(a_dim) + else: + reduced_axes.append(a_axis) + reduce_dims.append(a_dim) + else: + a_unique_axes.append(a_axis) + a_unique_dims.append(a_dim) + concat_batch_dims = _concat_dims(batch_dims, True) + # if there is no dim to reduce, then add a dummy dim, + # so mb.matmul will reduce the dummy dim to achieve outer product + concat_reduce_dims = _concat_dims(reduce_dims) + # if there is no dim of `a` remains, then add a dummy dim for `a` as a matrix dim, + # otherwise mb.matmul may mistake the batch dim of `a` as the matrix dim + concat_a_unique_dims = _concat_dims(a_unique_dims) + + for i, b_axis in enumerate(b_axes): + b_dim = value_at(b_dims, i) + if b_axis not in a_axes: + b_unique_axes.append(b_axis) + b_unique_dims.append(b_dim) + # if there is no dim of `b` remains, then add a dummy dim for `b`, + # otherwise mb.matmul may mistake the batch dim of `b` as a matrix dim + concat_b_unique_dims = _concat_dims(b_unique_dims) + + a_transpose_axes = batched_axes + a_unique_axes + reduced_axes + a = mb.transpose(x=a_var, perm=_get_perm(a_axes, a_transpose_axes)) + a_reshape_dims = _concat_dims( + [mb.reduce_prod(x=x) for x in [concat_batch_dims, concat_a_unique_dims, concat_reduce_dims] if x is not None]) + a = mb.reshape(x=a, shape=a_reshape_dims) + + b_transpose_axes = batched_axes + reduced_axes + b_unique_axes + b = mb.transpose(x=b_var, perm=_get_perm(b_axes, b_transpose_axes)) + b_reshape_dims = _concat_dims( + [mb.reduce_prod(x=x) for x in [concat_batch_dims, concat_reduce_dims, concat_b_unique_dims] if x is not None]) + b = mb.reshape(x=b, shape=b_reshape_dims) + + ab = mb.matmul(x=a, y=b) + concat_batch_dims = _concat_dims(batch_dims, True) + concat_a_unique_dims = _concat_dims(a_unique_dims, True) + concat_b_unique_dims = _concat_dims(b_unique_dims, True) + ab_reshaped_dims = _concat_dims( + [ + x + for x in [concat_batch_dims, concat_a_unique_dims, concat_b_unique_dims] + if x is not None + ], + True, + ) + # Removes excessive dimensions for scalar output + if ab_reshaped_dims is None: + return mb.squeeze(x=ab, name=name) + # Reshape tensor output to specified output shape + else: + ab = mb.reshape(x=ab, shape=ab_reshaped_dims) + ab_reshaped_axes = batched_axes + a_unique_axes + b_unique_axes + ab = mb.transpose(x=ab, perm=_get_perm(ab_reshaped_axes, out_axes), name=name) + return ab diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/__init__.py new file mode 100644 index 00000000..34ab79f0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .load import load diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/helper.py new file mode 100644 index 00000000..b1fe7e6a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/helper.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.program import get_new_symbol + + +def get_proto_dim(dim): + if dim.WhichOneof("dimension") == "constant": + return dim.constant.size + else: + if not dim.unknown.variadic: + return get_new_symbol() + raise NotImplementedError("Variadic dimensions not yet implemented.") + + +def proto_to_types(valuetype): + """ + A helper function that maps the proto value type to PyMIL types. + """ + if valuetype.WhichOneof("type") == "tensorType": + tensortype = valuetype.tensorType + dtype = types.proto_to_builtin_types[tensortype.dataType] + + if tensortype.rank < 0: + raise ValueError("Negative or Dynamic ranks not supported") + if tensortype.rank != len(tensortype.dimensions): + raise ValueError("Rank doesn't match the number of dimensions") + if tensortype.attributes != {}: + raise ValueError("Attributes on tensorType not supported") + + shape = [] + for i in range(tensortype.rank): + shape.append(get_proto_dim(tensortype.dimensions[i])) + + # For the zero rank tensor, we always convert it back to scalar in PyMIL first + if tensortype.rank == 0: + return dtype + + return types.tensor(dtype, shape) + + elif valuetype.WhichOneof("type") == "listType": + listtype = valuetype.listType + elem_type = proto_to_types(listtype.type) + + if listtype.length.unknown: + init_length = None + else: + init_length = listtype.length.constant.size + + # In the MIL proto, there is no such thing of "dynamic_length", hence we set it to True when + # converting back to PyMIL + return types.list(elem_type, init_length, dynamic_length=True) + + elif valuetype.WhichOneof("type") == "dictionaryType": + dicttype = valuetype.dictionaryType + keytype = proto_to_types(dicttype.keyType) + valuetype = proto_to_types(dicttype.valueType) + + return types.dict(keytype, valuetype) + else: + raise NotImplementedError("Types {} not yet implemented".format(valuetype.WhichOneof("type"))) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/load.py new file mode 100644 index 00000000..1761e31b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/load.py @@ -0,0 +1,429 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as _target +from coremltools.converters.mil.mil import Block +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import (Function, ListVar, Placeholder, + Program, TupleInputType, Var, + mil_list, types) +from coremltools.converters.mil.mil.block import curr_block +from coremltools.converters.mil.mil.ops.registry import \ + SSAOpRegistry as _SSAOpRegistry +from coremltools.proto import MIL_pb2 as pm +from coremltools.proto import Model_pb2 as ml + +from .helper import proto_to_types + +try: + from coremltools.libmilstoragepython import _BlobStorageReader as BlobReader +except: + BlobReader = None + + +class TranscriptionContext: + """ + Holds shared variables needed for transcription. + """ + + def __init__(self, weights_dir=""): + self.name_to_var = {} # mapping from name -> var object + self.blob_reader_from_filename = ( + {} + ) # mapping from filename -> BlobReader object + self.weights_dir = weights_dir + + def register_var_with_name(self, name, var): + var.name = name + if name in self.name_to_var: + # Overriding allow us to translate control flow blocks + msg = "Var %s is added again. Overriding previous value" + logger.info(msg % name) + self.name_to_var[name] = var + + def get_var_from_name(self, name): + if name not in self.name_to_var: + raise KeyError("Var {} not found".format(name)) + return self.name_to_var[name] + + +def _load_tensorvalue(tensorvalue_spec): + if not isinstance(tensorvalue_spec, pm.TensorValue): + raise TypeError("Invalid TensorValue spec object") + + if tensorvalue_spec.WhichOneof("value") == "floats": + return tensorvalue_spec.floats.values + elif tensorvalue_spec.WhichOneof("value") == "ints": + return tensorvalue_spec.ints.values + elif tensorvalue_spec.WhichOneof("value") == "bools": + return tensorvalue_spec.bools.values + elif tensorvalue_spec.WhichOneof("value") == "strings": + return tensorvalue_spec.strings.values + elif tensorvalue_spec.WhichOneof("value") == "longInts": + return tensorvalue_spec.longInts.values + elif tensorvalue_spec.WhichOneof("value") == "doubles": + return tensorvalue_spec.doubles.values + elif tensorvalue_spec.WhichOneof("value") == "bytes": + return tensorvalue_spec.bytes.values + else: + raise ValueError("Invalid dtype for TensorValue type") + + +def _load_immediate_value(immediatevalue_spec): + if not isinstance(immediatevalue_spec, pm.Value.ImmediateValue): + raise TypeError("Invalid ImmedidateValue spec object") + + if immediatevalue_spec.WhichOneof("value") == "tensor": + return _load_tensorvalue(immediatevalue_spec.tensor) + elif immediatevalue_spec.WhichOneof("value") == "list": + return immediatevalue_spec.list.values + else: + raise NotImplementedError( + "Immediate value type not supported yet." + ) + + +def _load_file_value(context, filevalue_spec, dtype): + if BlobReader is None: + raise RuntimeError("BlobReader not loaded") + if not isinstance(filevalue_spec, pm.Value.BlobFileValue): + raise TypeError("Invalid BlobFileValue spec object") + + filename = os.path.join(context.weights_dir, filevalue_spec.fileName.split("/")[-1]) + offset = filevalue_spec.offset + + if filename in context.blob_reader_from_filename: + blob_reader = context.blob_reader_from_filename[filename] + else: + blob_reader = BlobReader(filename) + context.blob_reader_from_filename[filename] = blob_reader + + if dtype == types.uint8: + np_value = np.array(blob_reader.read_uint8_data(offset), np.uint8) + elif dtype == types.int8: + np_value = np.array(blob_reader.read_int8_data(offset), np.int8) + elif dtype == types.fp16: + np_value_uint16 = np.array(blob_reader.read_fp16_data(offset), np.uint16) + np_value = np.frombuffer(np_value_uint16.tobytes(), np.float16) + elif dtype == types.fp32: + np_value = np.array(blob_reader.read_float_data(offset), np.float32) + else: + raise ValueError("Invalid dtype for blob file value type") + + return np_value + + +def _load_value(context, value_spec): + if not isinstance(value_spec, pm.Value): + raise TypeError("Invalid Value spec object") + + if value_spec.docString: + raise ValueError("Docstring would get lost in the process.") + + if value_spec.type.WhichOneof("type") == "tensorType": + valuetype = proto_to_types(value_spec.type) + + is_tensor = types.is_tensor(valuetype) + + dtype = valuetype if not is_tensor else valuetype.get_primitive() + shape = () if not is_tensor else valuetype.get_shape() + + if value_spec.WhichOneof("value") == "immediateValue": + value = _load_immediate_value(value_spec.immediateValue) + else: + value = _load_file_value(context, value_spec.blobFileValue, dtype) + + if dtype in (types.fp16, types.int8, types.uint8, types.uint32): + value = np.frombuffer(value, types.nptype_from_builtin(dtype)).reshape( + shape + ) + elif dtype == types.str and shape == (): + value = str(value[0]) + elif dtype in (types.fp32, types.str, types.bool, types.int32, types.int64): + value = ( + np.array(value).astype(types.nptype_from_builtin(dtype)).reshape(shape) + ) + else: + raise ValueError("Invalid dtype for tensor value") + else: + raise NotImplementedError("Only value of tensorType implemented yet") + + if not is_tensor and not isinstance(value, str): + value = types.nptype_from_builtin(dtype)(value.item()) + + return value + + +def _create_var_from_spec(spec): + """ + This helper function is used for creating PyMIL Var/ListVar from the proto spec. + Mainly used for the contruction of the control flow ops. + """ + assert isinstance(spec, pm.NamedValueType) + sym_type = proto_to_types(spec.type) + name = spec.name + if types.is_list(sym_type): + var = ListVar( + name, + elem_type=sym_type.T[0], + init_length=sym_type.T[1], + dynamic_length=sym_type.T[2]) + else: + var = Var(name, sym_type, None, op=None, op_output_idx=None) + return var + +def _set_outer_op_for_nested_blocks(blocks, op): + """ + An ultility function that sets the outer_op of the blocks for control flow ops. + """ + for block in blocks: + block.outer_op = op + +def _create_nested_blocks(context, op_spec): + """ + An utility function that creates nested blocks for control flow ops. + """ + if not op_spec.blocks: + return [] + + blocks = [] + + for block_spec in op_spec.blocks: + input_vars = [_create_var_from_spec(input) for input in block_spec.inputs] + + # add block input vars to the context + for v in input_vars: + context.register_var_with_name(v.name, v) + + # In pymil, the outer_op for a block can only be None if the block is a Functino. + # As the result, we use a dummy outer_op here for block creation, and set it to + # the legit op later on in _set_outer_op_for_nested_blocks + dummy = mb.const(val=0.) + with Block(block_inputs=input_vars, outer_op=dummy._op, + name=Block._get_new_name()) as block: + _load_block(context, block_spec) + + blocks.append(block) + + return blocks + +def _set_inputs_for_control_flow_op(inputs, blocks, op_type): + """ + An utility function that set the dummy functional inputs and blocks inputs for + control flow ops. + """ + if op_type == "while_loop": + def _dummy_cond(*loop_vars): + return None + + def _dummy_body(*loop_vars): + return None + + inputs["_existing_blocks"] = blocks + inputs["_cond"] = _dummy_cond + inputs["_body"] = _dummy_body + + elif op_type == "cond": + def _dummy_true_fn(*loop_vars): + return None + def _dummy_false_fn(*loop_vars): + return None + + inputs["_existing_blocks"] = blocks + inputs["_true_fn"] = _dummy_true_fn + inputs["_false_fn"] = _dummy_false_fn + + +def _load_operation(context, op_spec): + if not isinstance(op_spec, pm.Operation): + raise TypeError("Invalid Operation spec object") + + op_type = op_spec.type + if op_type == "const" or op_type.startswith("constexpr_"): + if op_spec.blocks: + raise ValueError("const / constexpr operation can't have any block") + if op_spec.inputs: + raise ValueError("const / constexpr operation can't have any input") + + inputs = {k: _load_value(context, v) for k, v in op_spec.attributes.items()} + pymil_var = getattr(mb, op_type)(**inputs) + context.register_var_with_name(op_spec.outputs[0].name, pymil_var) + + else: + if op_type == "custom_layer": + raise NotImplementedError( + "Loading Custom Layer operation not yet implemented" + ) + + if op_spec.attributes: + raise ValueError("Attributes on operation not supported") + + # The conversion steps of an operation proto -> PyMIL operation are as following: + + # (i) Convert the input arguments: + # In most of the cases, the input variable is already created beforehand, hence we can + # directly access and get them through the TranscriptionContext. + # There are cases, though, the inputs are literal value. This could happens in the classify op spec. + # For that case, we directly create a constant variable. + + # (ii) Create nested blocks for control flow operations: + # The Python functinoal input arguments for control flow ops cannot be recovered from milproto -> pymil conversion, + # for instance, the _body, _cond for mb.while_loop and _true_fn, _false_fn for mb.cond are not invertible + # Hence, here we directly create the nested blocks from the proto, and set them to mb.while_loop.blocks / mb.cond.blocks. + # Note that, when creating a block, PyMIL required an outer_op, which should be the control flow operation itself. However, + # in this approach we take, the outer_op hasn't been created at the time when the blocks produced. Here, we make a "dummy outer_op", + # which could pass the check in PyMIL, also it could provide enough information (such as visible variables in the blocks etc.) + # for the creation of the block. + + # (iii) Create PyMIL operation using inputs / blocks + # Note that for the control flow cases, we create dummy functional inputs, and use the exisiting block to create the op. + + # (iv) Set the outer_op for control flow + # Once the operation is created, we replace the dummy outer_op with the legit one, to make it a valid PyMIL program + + inputs = {} + for param_name, argument in op_spec.inputs.items(): + vars = [] + for binding in argument.arguments: + binding_type = binding.WhichOneof("binding") + if binding_type == "name": + vars.append(context.get_var_from_name(binding.name)) + elif binding_type == "value": + # We only support the list value for now (for the classifier use case) + value_spec = binding.value + assert value_spec.WhichOneof("value") == "immediateValue" + assert value_spec.immediateValue.WhichOneof("value") == "list" + list_value = _load_immediate_value(value_spec.immediateValue) + values = [] + for value_spec in list_value: + values.append(_load_value(context, value_spec)) + var = mb.const(val=mil_list(values)) + vars.append(var) + else: + raise NotImplementedError("Binding {} not yet implemented".format(binding_type)) + op_cls = _SSAOpRegistry._get_core_op_cls(op_type) + if len(vars) == 1 and not isinstance( + op_cls.input_spec.input_types[param_name], TupleInputType + ): + inputs[param_name] = vars[0] + else: + inputs[param_name] = vars + + blocks = _create_nested_blocks(context, op_spec) + _set_inputs_for_control_flow_op(inputs, blocks, op_type) + + output_var = getattr(mb, op_type)(**inputs) + if not isinstance(output_var, (tuple, list)): + output_var = [output_var] + + if len(output_var) != len(op_spec.outputs): + raise AssertionError( + "Mismatch between number of outputs in operation specification vs PyMIL outputs" + ) + + for spec, var in zip(op_spec.outputs, output_var): + context.register_var_with_name(spec.name, var) + + pymil_type = var.sym_type + proto_type = proto_to_types(spec.type) + if not types.is_compatible_type(pymil_type, proto_type): + # We allow a corner case where the pymil has an 0 rank tensor and the spec produces a scalar + if types.is_tensor(pymil_type) and types.is_scalar(proto_type): + if pymil_type.get_primitive() == proto_type: + continue + raise AssertionError( + "Mismatch between var types in specification vs PyMIL" + ) + + _set_outer_op_for_nested_blocks(blocks, output_var[0].op) + + +def _load_block(context, block_spec): + if not isinstance(block_spec, pm.Block): + raise TypeError("Invalid Block spec object") + + if block_spec.attributes: + raise ValueError("Attributes on block not supported") + + block_outputs = block_spec.outputs + output_vars = [] + for op_spec in block_spec.operations: + _load_operation(context, op_spec) + + for proto_output_name in block_outputs: + output_vars.append(context.get_var_from_name(proto_output_name)) + + pymil_block = curr_block() + pymil_block.set_outputs(output_vars) + return pymil_block + + +def _load_function(context, func_spec, spec_version): + if not isinstance(func_spec, pm.Function): + raise TypeError("Invalid Function spec object") + + if func_spec.attributes: + raise ValueError("Attributes on functions not supported") + + func_inputs = {} + for named_value_type in func_spec.inputs: + name = named_value_type.name + valuetype = proto_to_types(named_value_type.type) + + if not types.is_tensor(valuetype): + raise ValueError("Functions inputs can only be tensors") + func_inputs[name] = Placeholder( + sym_shape=valuetype.get_shape(), dtype=valuetype.get_primitive(), name=name + ) + context.register_var_with_name(name, func_inputs[name].outputs[0]) + + opset = func_spec.opset + if opset not in func_spec.block_specializations: + raise ValueError("Missing block specialization for opset {}".format(opset)) + + with Function(func_inputs, opset_version=_target(spec_version)) as pymil_func: + _load_block(context, func_spec.block_specializations[opset]) + + return pymil_func + + +def load(model_spec, specification_version, file_weights_dir="", **kwargs): + if not isinstance(model_spec, ml.Model): + raise TypeError("Invalid Model sepc object") + + if specification_version < model_spec.specificationVersion: + raise ValueError("specification_version must be greater or equal to the input model spec version") + + if model_spec.WhichOneof("Type") != "mlProgram": + raise ValueError("Only MIL proto based mlmodels can be loaded") + + program_spec = model_spec.mlProgram + if not isinstance(program_spec, pm.Program): + raise TypeError("Invalid Program spec object") + + if program_spec.docString: + raise NotImplementedError("Docstring would be lost in the process") + + if program_spec.version != 1: + raise ValueError("Invalid program version") + + context = TranscriptionContext(file_weights_dir) + pymil_program = Program() + for func_name, func_spec in program_spec.functions.items(): + pymil_program.add_function( + func_name, _load_function(context, func_spec, specification_version) + ) + + for attr_name, attr_spec in program_spec.attributes.items(): + if attr_name not in ("buildInfo",): + raise ValueError("Invalid attribute for program") + + return pymil_program diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/test_load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/test_load.py new file mode 100644 index 00000000..cb45d13b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/test_load.py @@ -0,0 +1,199 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import pytest + +import coremltools as ct +from coremltools import ComputeUnit +from coremltools._deps import _HAS_TF_2, _HAS_TORCH +from coremltools.converters._converters_entry import _get_metadata_from_mlmodel +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.converter import mil_convert +from coremltools.converters.mil.frontend.milproto.load import \ + load as milproto_to_pymil +from coremltools.converters.mil.frontend.tensorflow.test.test_ops import \ + TestTensorArray +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import \ + run_compare_tf +from coremltools.converters.mil.mil.ops.tests.testing_utils import \ + compare_backend +from coremltools.converters.mil.testing_utils import get_op_types_in_program + +if _HAS_TORCH: + import torch + from coremltools.converters.mil.frontend.torch.test.test_torch_ops import \ + TestScriptedModels + + +def get_pymil_prog_from_mlmodel(mlmodel): + model_spec = mlmodel.get_spec() + return milproto_to_pymil( + model_spec=model_spec, + specification_version=model_spec.specificationVersion, + file_weights_dir=mlmodel.weights_dir, + ) + +def get_roundtrip_mlmodel(mlmodel): + """ + This utility function does the following roundtrip conversion: + + mlprogram proto -> pymil program -> mlprogram model + """ + pymil_prog = get_pymil_prog_from_mlmodel(mlmodel) + + # convert the pymil program to mlmodel + model_spec = mlmodel.get_spec() + roundtripped_mlmodel = mil_convert( + pymil_prog, + convert_to="mlprogram", + convert_from="milinternal", + compute_units=mlmodel.compute_unit, + model_description=model_spec.description, + specification_version=model_spec.specificationVersion, + ) + + # set MIL program attributes + build_info = _get_metadata_from_mlmodel(mlmodel) + roundtripped_mlmodel._set_build_info_mil_attributes(build_info) + return roundtripped_mlmodel + +def roundtrip_and_compare_mlmodel(mlmodel, input_dict): + roundtripped_mlmodel = get_roundtrip_mlmodel(mlmodel) + expected_outputs = mlmodel.predict(input_dict) + compare_backend(roundtripped_mlmodel, input_dict, expected_outputs) + + +class TestLoadAPIUsage: + def test_mil_proto_to_pymil(self): + # Define a PyMIL program + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 100, 100)), ]) + def prog(x): + # MIL operation takes named inputs (instead of positional inputs). + # Here `name` argument is optional. + x = mb.relu(x=x, name='relu') + x = mb.conv(x=x, weight=np.random.rand(10, 3, 2, 2), name="conv") + x = mb.transpose(x=x, perm=[0, 3, 1, 2], name='transpose') + x = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=False, name='reduce') + x = mb.log(x=x, name='log') + return x + + # Convert it to MIL proto backed MLModel + mlmodel = ct.convert(prog, convert_to="mlprogram") + + # Load MLModel back to PyMIL + loaded_pymil_prog = get_pymil_prog_from_mlmodel(mlmodel) + + # Assert that loaded PyMIL prog matches with defined PyMIL prog + if get_op_types_in_program(loaded_pymil_prog) != get_op_types_in_program(prog): + raise AssertionError("Mismatch between defined PyMIL prog and loaded PyMIL prog") + + def test_mil_proto_to_pymil_with_version_handling(self): + # This test makes sure the correct version of the op is picked up during mil_proto -> pymil conversion + + # iOS15 version program with iOS13 version topk + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=ct.target.iOS15) + def prog(x): + x = mb.topk(x=x, k=1, axis=-1, ascending=True) + return x + + iOS15_mlmodel = ct.convert(prog, convert_to="mlprogram", minimum_deployment_target=ct.target.iOS15) + iOS15_pymil_prog = get_pymil_prog_from_mlmodel(iOS15_mlmodel) + topk_op = iOS15_pymil_prog.functions["main"].find_ops(op_type="topk")[0] + assert not hasattr(topk_op, "sort") + + # iOS16 version program with iOS16 version topk + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=ct.target.iOS16) + def prog(x): + x = mb.topk(x=x, k=1, axis=-1, ascending=True) + return x + + iOS16_mlmodel = ct.convert(prog, convert_to="mlprogram", minimum_deployment_target=ct.target.iOS16) + iOS16_pymil_prog = get_pymil_prog_from_mlmodel(iOS16_mlmodel) + topk_op = iOS16_pymil_prog.functions["main"].find_ops(op_type="topk")[0] + assert hasattr(topk_op, "sort") + +@pytest.mark.skipif(ct.utils._macos_version() < (12, 0), reason="mlprogram predict available only on macOS12+") +class TestE2ENumericalCorrectness: + @pytest.mark.skipif(not _HAS_TORCH, reason="requires torch") + def test_elu(self): + inputs = [ct.TensorType(name="data", shape=(2, 3, 1))] + input_data = [torch.rand(*i.shape.to_list()) for i in inputs] + torchmodel = torch.jit.trace(torch.nn.ELU(inplace=False), input_data) + + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram", + compute_units=ComputeUnit.CPU_ONLY) + input_values = { + i.name: val.detach().numpy() for i, val in zip(inputs, input_data) + } + roundtrip_and_compare_mlmodel(mlmodel, input_values) + + @pytest.mark.skipif(not _HAS_TORCH, reason="requires torch") + def test_linear(self): + inputs = [ct.TensorType(name="data", shape=(10, 2))] + input_data = [torch.rand(*i.shape.to_list()) for i in inputs] + torchmodel = torch.jit.trace( + torch.nn.Linear(in_features=2, out_features=3, bias=True), input_data + ) + + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram", + compute_units=ComputeUnit.CPU_ONLY) + input_values = { + i.name: val.detach().numpy() for i, val in zip(inputs, input_data) + } + roundtrip_and_compare_mlmodel(mlmodel, input_values) + + @pytest.mark.skipif(not _HAS_TORCH, reason="requires torch") + def test_conv(self): + inputs = [ct.TensorType(name="data", shape=(5, 10, 4, 4))] + input_data = [torch.rand(*i.shape.to_list()) for i in inputs] + torchmodel = torch.jit.trace( + torch.nn.Conv2d(in_channels=10, out_channels=20, kernel_size=4), input_data + ) + + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram", + compute_units=ComputeUnit.CPU_ONLY) + input_values = { + i.name: val.detach().numpy() for i, val in zip(inputs, input_data) + } + roundtrip_and_compare_mlmodel(mlmodel, input_values) + + @pytest.mark.skipif(not _HAS_TORCH, reason="requires torch") + def test_while_loop(self): + model = TestScriptedModels.get_while_loop_model() + model_spec = torch.jit.script(model) + mlmodel = ct.convert(model_spec, + inputs=[ct.TensorType(name="data", shape=model.input_size, dtype=np.float32)], + convert_to="mlprogram", + compute_units=ComputeUnit.CPU_ONLY + ) + input_values = {"data": np.array([10.])} + roundtrip_and_compare_mlmodel(mlmodel, input_values) + + @pytest.mark.skipif(not _HAS_TORCH, reason="requires torch") + def test_cond(self): + model = TestScriptedModels.get_cond_model() + model_spec = torch.jit.script(model) + mlmodel = ct.convert(model_spec, + inputs=[ct.TensorType(name="data", shape=(1,), dtype=np.float32)], + convert_to="mlprogram", + compute_units=ComputeUnit.CPU_ONLY + ) + roundtrip_and_compare_mlmodel(mlmodel, {"data": np.array([1.])}) + roundtrip_and_compare_mlmodel(mlmodel, {"data": np.array([11.])}) + + @pytest.mark.skipif(_HAS_TF_2, reason="Fix and re-enable this test: rdar://76293949 (TF2 unit test InvalidArgumentError)") + def test_list(self): + model, inputs, outputs = TestTensorArray.get_dynamic_elem_shape_model() + input_values = [np.random.rand(2, 3)] + input_dict = dict(zip(inputs, input_values)) + _, mlmodel, _, _ = run_compare_tf( + model, + input_dict, + outputs, + compute_unit=ct.ComputeUnit.CPU_ONLY, + backend=("mlprogram", "fp16") + ) + roundtrip_and_compare_mlmodel(mlmodel, {"Placeholder": input_values[0]}) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/__init__.py new file mode 100644 index 00000000..92a4ecf4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import logging +# suppress TensorFlow stdout prints +import os + +from coremltools._deps import _HAS_TF + +if os.getenv("TF_SUPPRESS_LOGS", "1") == "1": + os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # FATAL + logging.getLogger("tensorflow").setLevel(logging.FATAL) + +register_tf_op = None + +if _HAS_TF: + # Importing these causes them to register their ops + from . import ops + from .dialect_ops import (TfLSTMBase, tf_lstm_block, tf_lstm_block_cell, + tf_make_list) + from .tf_op_registry import register_tf_op diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/basic_graph_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/basic_graph_ops.py new file mode 100644 index 00000000..81d2f72e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/basic_graph_ops.py @@ -0,0 +1,356 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +def connect_edge(g, source, dest): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + source.outputs.append(dest.name) + dest.inputs.append(source.name) + + +def connect_edge_at_index(g, source, dest, idx): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + source.outputs.insert(idx, dest.name) + dest.inputs.insert(idx, source.name) + + +def replace_source(g, source, dest, new_source): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + if isinstance(new_source, str): + new_source = g[new_source] + dest_inputs = [] + for inp in dest.inputs: + if inp == source.name: + dest_inputs.append(new_source.name) + g[new_source.name].outputs.append(dest.name) + else: + dest_inputs.append(inp) + dest.inputs = dest_inputs + source.outputs = [i for i in g[source.name].outputs if i != dest.name] + + +def replace_control_source(g, source, dest, new_source): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + if isinstance(new_source, str): + new_source = g[new_source] + dest_inputs = [] + for inp in dest.control_inputs: + if inp == source.name: + dest_inputs.append(new_source.name) + g[new_source.name].control_outputs.append(dest.name) + else: + dest_inputs.append(inp) + dest.control_inputs = dest_inputs + source.control_outputs = [i for i in g[source.name].outputs if i != dest.name] + + +def replace_dest(g, source, dest, new_dest): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + if isinstance(new_dest, str): + new_dest = g[new_dest] + for idx, d in enumerate(source.outputs): + if d == dest.name: + source.outputs[idx] = new_dest.name + new_dest.inputs = new_dest.inputs[:] + [source.name] + + dest.inputs = [i for i in dest.inputs if i != source.name] + + +def replace_control_dest(g, source, dest, new_dest): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + if isinstance(new_dest, str): + new_dest = g[new_dest] + for idx, d in enumerate(source.control_outputs): + if d == dest.name: + source.control_outputs[idx] = new_dest.name + new_dest.control_inputs = new_dest.control_inputs[:] + [source.name] + + dest.control_inputs = [i for i in dest.control_inputs if i != source.name] + + +def connect_dests(g, source, dests): + for i in dests: + connect_edge(g, source, i) + + +def connect_sources(g, sources, dest): + for i in sources: + connect_edge(g, i, dest) + + +def disconnect_edge(g, source, dest): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + source.outputs = [i for i in source.outputs if i != dest.name] + + dest.inputs = [i for i in dest.inputs if i != source.name] + + +def disconnect_control_edge(g, source, dest): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + source.control_outputs = [i for i in source.control_outputs if i != dest.name] + + dest.control_inputs = [i for i in dest.control_inputs if i != source.name] + + +def disconnect_vertex_outs(g, source): + if isinstance(source, str): + source = g[source] + for out in source.outputs: + g[out].inputs = [i for i in g[out].inputs if i != source.name] + source.outputs = [] + + +def disconnect_vertex_ins(g, dest): + if isinstance(dest, str): + dest = g[dest] + for inp in dest.inputs: + if isinstance(inp, str): + innode = g[inp] + else: + innode = inp + innode.outputs = [i for i in innode.outputs if i != dest.name] + dest.inputs = [] + + +def disconnect_vertex_control_ins(g, dest): + if isinstance(dest, str): + dest = g[dest] + for inp in dest.control_inputs: + if isinstance(inp, str): + innode = g[inp] + else: + innode = inp + innode.control_outputs = [i for i in innode.control_outputs if i != dest.name] + dest.control_inputs = [] + + +def disconnect_vertex_control_outs(g, source): + if isinstance(source, str): + source = g[source] + for out in source.control_outputs: + g[out].control_inputs = [i for i in g[out].control_inputs if i != source.name] + source.control_outputs = [] + + +def delete_node(g, node): + if not isinstance(node, str): + node = node.name + disconnect_vertex_ins(g, node) + disconnect_vertex_outs(g, node) + disconnect_vertex_control_ins(g, node) + disconnect_vertex_control_outs(g, node) + del g[node] + + +def replace_node(g, original_node, new_node): + if isinstance(new_node, str): + new_node = g[new_node] + if not isinstance(original_node, str): + original_node = original_node.name + + for o in list(g[original_node].control_outputs): + replace_control_source(g, original_node, o, new_node) + for o in list(g[original_node].outputs): + replace_source(g, original_node, o, new_node) + for i in list(g[original_node].control_inputs): + replace_control_dest(g, i, original_node, new_node) + for i in list(g[original_node].inputs): + replace_dest(g, i, original_node, new_node) + + +def fill_outputs(gd): + """ + Fills the output lists of of a graph of ParsedNode + + Takes a graph in "dict{str, ParsedNode}" form, and returns a new graph. + """ + # fill outputs + for k, v in gd.items(): + for i in v.inputs: + gd[i].outputs.append(v.name) + for i in v.control_inputs: + gd[i].control_outputs.append(v.name) + get_tuple_ops = ["Split", "SplitV", "LSTMBlock", "NonMaxSuppressionV5"] + for k, v in gd.items(): + if v.op in get_tuple_ops: + outputs = [[out, int(gd[out].attr["index"])] for out in v.outputs] + outputs.sort(key=lambda x: x[1]) + gd[k].outputs = [out for [out, _] in outputs] + + return gd + + +def check_connections(gd): + """ + Given a graph, checks that all + - inputs/outputs are symmetric + - control_inputs/control_outputs are symmetric + - The graph does not reference vertices outside of the graph + + Takes a graph in "dict{str, ParsedNode}" form. Does not return, + asserts false on failure. + """ + # check that inputs and outputs line up + for k, v in gd.items(): + for i in v.inputs: + if isinstance(i, str): + assert k in gd[i].outputs + else: + assert k in gd[i.name].outputs + for i in v.outputs: + inputs = [ + inp if isinstance(inp, str) else inp.name + for inp in gd[i].inputs + ] + assert k in inputs + for i in v.control_inputs: + if isinstance(i, str): + assert k in gd[i].control_outputs + else: + assert k in gd[i.name].control_outputs + for i in v.control_outputs: + control_inputs = [ + inp if isinstance(inp, str) else inp.name + for inp in gd[i].control_inputs + ] + assert k in control_inputs + + +def const_determined_nodes(gd, assume_variable_nodes=None): + """ + Given a graph, extract all nodes that only depends on const nodes. + + # TODO: extract nodes that depends on the "const part" of placeholders. + """ + if assume_variable_nodes is None: + assume_variable_nodes = [] + vis = {} + + def visit(node): + # make sure node is a ParsedNode + if isinstance(node, str): + node = gd[node] + if node.name in vis: + return + + if "Const" in node.op: + vis[node.name] = True + elif "Variable" in node.op: + vis[node.name] = False + elif "Placeholder" in node.op: + vis[node.name] = False + # TF1 uses TensorArray* while TF2 uses TensorList* ops + elif "TensorArray" in node.op or "TensorList" in node.op: + vis[node.name] = False + elif "function" in node.op: + vis[node.name] = False + elif "global" in node.op: + vis[node.name] = False + elif "FakeQuant" in node.op: + vis[node.name] = False + elif node.name in assume_variable_nodes: + vis[node.name] = False + else: + ret = True + vis[node.name] = False + for innode in node.inputs: + if isinstance(innode, str): + inname = innode + else: + inname = innode.name + if inname not in vis: + visit(innode) + if not vis[inname]: + ret = False + break + vis[node.name] = ret + + for k, v in gd.items(): + if k in vis: + continue + visit(k) + + ret = [] + for k, v in vis.items(): + if v: + ret.append(k) + return ret + + +def topsort(graph): + if len(graph) == 0: + return [] + inedge_count = {k: len(v.inputs) + len(v.control_inputs) for k, v in graph.items()} + ret = [] + curboundary = [k for k, v in inedge_count.items() if v == 0] + nextboundary = [] + if len(curboundary) == 0: + raise ValueError("Graph is not a DAG!") + + while len(curboundary) > 0: + ret.extend(curboundary) + for b in curboundary: + for o in graph[b].outputs + graph[b].control_outputs: + inedge_count[o] -= 1 + if inedge_count[o] == 0: + nextboundary.append(o) + curboundary = nextboundary + nextboundary = [] + if len(ret) != len(graph): + raise ValueError("Graph is not a DAG!") + return ret + + +def simple_topsort(inputs): + if len(inputs) == 0: + return [] + outputs = {k: [] for k in inputs} + for k in inputs: + for o in inputs[k]: + outputs[o].append(k) + + inedge_count = {k: len(v) for k, v in inputs.items()} + ret = [] + curboundary = [k for k, v in inedge_count.items() if v == 0] + nextboundary = [] + if len(curboundary) == 0: + raise ValueError("Graph is not a DAG!") + + while len(curboundary) > 0: + ret.extend(curboundary) + for b in curboundary: + for o in outputs[b]: + inedge_count[o] -= 1 + if inedge_count[o] == 0: + nextboundary.append(o) + curboundary = nextboundary + nextboundary = [] + if len(ret) != len(inputs): + raise ValueError("Graph is not a DAG!") + return ret diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/convert_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/convert_utils.py new file mode 100644 index 00000000..5e83f867 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/convert_utils.py @@ -0,0 +1,211 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from collections import defaultdict + +from tqdm import tqdm as _tqdm + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types.symbolic import (any_variadic, + is_symbolic) +from coremltools.converters.mil.mil.var import ListVar + +from .basic_graph_ops import topsort +from .tf_op_registry import _TF_OPS_REGISTRY + + +def compatible_shapes(tf_shape, inf_shape): + def compare_elem(dt, ds): + if dt is None or dt < 0: + return True + elif dt == ds: + return True + elif is_symbolic(ds): + if is_symbolic(dt) and dt != ds: + logger.warning("Symbolic dim {} and {}".format(ds, dt) +\ + " assumed to be equal") + return True + else: + return False + + if tf_shape is None or any_variadic(inf_shape): + return True + else: + return all(compare_elem(dt, ds) for dt, ds in zip(tf_shape, inf_shape)) + + +def check_output_shapes(x, node): + """ + x: list[Var] or tuple[Var] + node: ParsedTFNode + """ + if isinstance(x, ListVar): + # No check on list. + return + if not isinstance(x, (list, tuple)): + x = [x] + tf_shapes = node.attr.get("_output_shapes", None) + if tf_shapes is None: + return + inf_shapes = [] + for y in x: + if y is None: + msg = "TF convert returns None type in TF node {}" + raise TypeError(msg.format(node.name)) + if types.is_tensor(y.sym_type): + inf_shapes.append(list(y.shape)) + elif types.is_scalar(y.sym_type): + inf_shapes.append([]) + else: + msg = "Output type {} not understood" + raise ValueError(msg.format(y)) + + for t, s in zip(tf_shapes, inf_shapes): + if not compatible_shapes(t, s): + msg = ( + "Op {} ({}) type inference ({}) and TF output shape " + "({}) mismatch" + ) + raise ValueError(msg.format(node.name, node.op, s, t)) + + +def connect_global_initializer(graph): + # In TF1, variable initialization (from frozen graph) is done by a + # DAG in main function that is disconnected from the rest of the main + # function. For example: + # + # Initialization DAG (disconnected from Main DAG): + # Const -> set_global(variable='v1') + # + # Main DAG: + # Placeholder --- + # | + # get_global(variable='v1') ----> some_output + # + # (Note that in this example there's no loop or other function.) + # + # If the variable does not cross block boundary, we can always represent + # `get_global` by the input to `set_global`, which may or may not be + # Const, following the control dependency. + # + # Note that this is incorrect if global variable crosses, say, + # while_loop block boundary, which needs a more complex resource inference + # to support and is not supported in this function. + # + # Due to the lack of control depeendency between thhe two DAG, we could be + # converting `set_global` after `get_global`, which makes it impossible to + # perform eager type inference, as type information (e.g., tensor shape) + # is only provided by `set_global` (whether setting it to a const or a + # non-const). + # + # Here we remedy the simpler case: when `set_global` takes in a Const, + # we assume it's initialization and thus must + # run before get_global, i.e. all get_global(variable='v1') must be a + # control_output of set_global(variable='v1') where set_global's input is + # Const (with and control_inputs set symmetrically). Note that multiple + # `get_global(variable='v1')` might have dependences among themselves, but + # they should all take the constant `set_global(variable='v1')` as control + # dependency. + + # Phase 1: Collect get_global nodes for each variable. + # variable name to list[ParsedTFNode] + var_to_get_global_nodes = defaultdict(list) + for node in graph.values(): + if node.op == "get_global": + variable_name = node.attr["variable"] + var_to_get_global_nodes[variable_name].append(node) + + # Phase 2: Find set_global with compile time values + for node_name, node in graph.items(): + if node.op != "set_global": + continue + input_name = node.inputs[0] + input_node = graph[input_name] + if input_node.op != "Const": + continue + variable_name = node.attr["variable"] + for get_node in var_to_get_global_nodes[variable_name]: + logger.info( + "add {} as control inputs of {}".format(node_name, get_node.name) + ) + get_node.control_inputs.append(node_name) + node.control_outputs.append(get_node.name) + + +def convert_graph(context, graph, outputs=None): + """ + Construct Core ML ops corresponding to `graph`. + + Inputs: + + - context (TranscriptContext) + + - graph (dict of str -> ParsedTFNode): op name --> ParsedTFNode + + - outputs (list[str]): List of output names. If outputs is None, the last + node graph (after topsort) must have op type return. + + Returns: + + list[Var]: the output Vars of the constructed Block. + """ + connect_global_initializer(graph) + nodes = topsort(graph) + + if outputs is None: + # infer outputs from return + last_node = graph[nodes[-1]] + if last_node.op != "return": + msg = "Expect the last node in graph to be 'return'; Got {}" + raise ValueError(msg.format(last_node.op)) + second_last_node = graph[last_node.inputs[0]] + if second_last_node.op == "make_tuple": + outputs = second_last_node.inputs + else: + # single output function + outputs = second_last_node.name + + # Translate the non-placeholder ops. + num_nodes = len(nodes) + for i, node_name in enumerate( + _tqdm(nodes, desc="Converting TF Frontend ==> MIL Ops", unit=" ops") + ): + node = graph[node_name] + if node.op == "return": + continue + logger.info( + "[{}/{}] Converting {} op '{}'".format(i + 1, num_nodes, node.op, node.name) + ) + + if node.op in ("NoOp", "Assert"): + continue + + add_op = _TF_OPS_REGISTRY.get(node.op, None) + if add_op is None: + msg = "Conversion for TF op '{0}' not implemented.\n \n{1}".format( + node.op, node.original_node + ) + raise NotImplementedError(msg) + add_op(context, node) + + if len(node.outputs) > 0: + # set_global / get_global / NoOp has no direct consumer / outputs + x = context[node.name] + check_output_shapes(x, node) + + output_is_list = isinstance(outputs, (tuple, list)) + if not output_is_list: + outputs = [outputs] + + output_vars = [] + for output in outputs: + x = context[output.split(":")[0]] + if isinstance(x, (tuple, list)): + idx = int(output.split(":")[1]) + output_vars.append(x[idx]) + else: + output_vars.append(x) + + return output_vars if output_is_list else output_vars[0] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/converter.py new file mode 100644 index 00000000..98dd468c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/converter.py @@ -0,0 +1,466 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools.converters._profile_utils import _profile +from coremltools.converters.mil._deployment_compatibility import AvailableTarget as _target +from coremltools.converters.mil.input_types import ImageType, InputType, RangeDim +from coremltools.converters.mil.input_types import Shape as InputShape +from coremltools.converters.mil.input_types import TensorType, _get_shaping_class +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, Program, get_new_symbol, types +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.mil.var import Var + +from .._utils import get_output_names +from .basic_graph_ops import simple_topsort +from .convert_utils import convert_graph + + +# TranscriptionContext maintains a map of tf_node.name --> ssa_var available +# to the current TF --> tfssa transcription. +class TranscriptionContext: + def __init__(self, name=None): + self.name = name if name is not None else "" + self.context = {} + self.graphs = {} + + # TF loops are represented as functions, so nested loops becomes + # stacked functions. Stacked functions are translated to nested + # blocks in Program, like + # + # while_loop(loop_vars=(%a, %b)) + # cond_block1(%a.x, %b.x) { + # ...some ops + # } -> (%bool_var1) + # body_block1(%a.x, %b.x) { + # %ret_axx = while_loop(loop_vars=(%a.x,)) + # cond_block2(%a.x.x) { + # ...some ops + # } -> (%bool_var2) + # body_block2(%a.x.x) { + # ...some ops + # } -> (%new_a.x.x) + # } -> (%ret_axx) + # ....some ops using %ret_a + # } -> (%ret_ax, %ret_bx) + # + # During the translation of cond_block2, we'd have func_input_stack + # + # (%a.x.x,) + # (%a.x, %b.x) + # + # where [%a.x.x] would be unstacked once cond_block2 is done. + self.func_input_stack = [] # list of tuple[Var] + + def add(self, tf_name, ssa_vars, is_new_var=True): + """ + ssa_vars: list[Var] / tuple[Var] (multiple outputs) or + Var (single_output) + is_new_var: True if ssa_vars are newly created for tf_name. + """ + if tf_name in self.context: + # Overriding allow us to translate while_loop body twice (which is + # needed to figure out shapes changes during iterates) + msg = "TF var %s is added again. Overriding previous value" + logger.info(msg % tf_name) + if is_new_var and isinstance(ssa_vars, Var) and tf_name != ssa_vars.name: + msg = ( + "MIL op's name ({}) does not match TensorFlow's node name ({})." + " Warning: Node added to context must have the same name as the name passed to context." + ) + raise ValueError(msg.format(tf_name, ssa_vars.name)) + self.context[tf_name] = ssa_vars + + def add_graph(self, graph_name, graph): + self.graphs[graph_name] = graph + + def get_graph(self, graph_name): + if graph_name not in self.graphs: + msg = "Graph '{}' not found in: {}" + raise KeyError(msg.format(graph_name, list(self.graphs.keys()))) + return self.graphs[graph_name] + + def stack_func_inputs(self, inputs): + self.func_input_stack.append(inputs) + + def unstack_func_inputs(self): + if len(self.func_input_stack) == 0: + raise ValueError("No func input available") + self.func_input_stack.pop() + + def get_func_inputs(self): + if len(self.func_input_stack) == 0: + raise ValueError("No func input available") + return self.func_input_stack[-1] + + def __getitem__(self, tf_name): + if tf_name not in self.context: + msg = "TF var {} not found in context {}" + raise KeyError(msg.format(tf_name, self.name)) + return self.context[tf_name] + + def __contains__(self, tf_name): + return tf_name in self.context + + +class TFConverter: + def __init__(self, tfssa, inputs=None, outputs=None, opset_version=None): + """ + tfssa: TensorFlow IR. + inputs: list of TensorType or ImageType, optional, defaults to None. + outputs: list[ct.InputType] or None + list of either ct.TensorTypes or ct.ImageTypes (both of which are child classes of InputType) + This is the value of the "outputs" argument, passed on by the user in "coremltools.convert" API. + """ + self.tfssa = tfssa + self.global_type = {} + self.inputs = None + self.main_output_types = outputs + self.opset_version = _target(opset_version) if opset_version is not None else None + output_names = get_output_names(outputs) + + main_func = tfssa.functions["main"] + graph = main_func.graph + + # Filter the inputs to only Placeholder names + tf_placeholder_names = [n for n in graph if graph[n].op == "Placeholder"] + placeholder_names = [] + if inputs is not None: + # Check inputs format + if not isinstance(inputs, (list, tuple)): + raise ValueError( + "Type of inputs should be list or tuple, got {} instead.".format( + type(inputs) + ) + ) + if not all([isinstance(i, InputType) for i in inputs]): + raise ValueError( + "Type of inputs should be list or tuple of TensorType or ImageType, got {} instead.".format( + [type(i) for i in inputs] + ) + ) + + # Special case: if there's only 1 input and 1 placeholder, we match them. + if len(tf_placeholder_names) == 1 and len(inputs) == 1: + if inputs[0].name is None: + inputs[0].name = tf_placeholder_names[0] + + # We fill in shapes for user-specified input that doesn't have shape + for inp in inputs: + # Check inputs existence + if inp.name is None: + raise ValueError( + "Multiple inputs are found in graph, but no input name was provided" + ) + if inp.name not in tf_placeholder_names: + raise ValueError( + "Input ({}) provided is not found in given tensorflow graph. Placeholders in graph are: {}".format( + inp.name, tf_placeholder_names + ) + ) + if inp.shape is None: + shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) + # _get_shaping_class does not accept -1 or None dimension. + shape = [get_new_symbol() if s is None or s == -1 else s \ + for s in shape] + inp.shape = _get_shaping_class(shape) + + # Extract placeholders that users didn't specify. + user_input_names = [inp.name for inp in inputs] + for name in tf_placeholder_names: + if name not in user_input_names: + placeholder_names.append(name) + else: + inputs = [] + placeholder_names = tf_placeholder_names + + # name -> (shape, mil_type) mapping. shape has type list[int] + added_inputs = {} + for inp in main_func.inputs: + if inp not in placeholder_names: + continue + node = graph[inp] + dtype = node.attr['dtype'] + shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) + shape = [get_new_symbol() if s is None or s == -1 else s \ + for s in shape] + inputs.append(TensorType(name=inp, shape=shape, dtype=dtype)) + added_inputs[inp] = (shape, dtype) + + if len(added_inputs) > 0: + logger.info( + "Adding Input not specified by users: '{}'".format( + added_inputs) + ) + + for idx, inp in enumerate(inputs): + # We set the default image format in TF as NHWC, since NHWC is used + # for TF unless GPU is specified as device. + if isinstance(inp, ImageType) and inputs[idx].channel_first is None: + inputs[idx].channel_first = False + self.inputs = tuple(inputs) + + for inputtype in self.inputs: + if not isinstance(inputtype.shape, InputShape): + continue + if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]): + continue + if inputtype.name not in graph: + raise ValueError( + f"The input {inputtype.name} provided is not in graph." + ) + node = graph[inputtype.name] + shape = [-1 if is_symbolic(s) else s for s in inputtype.shape.shape] + node.attr["_output_shapes"] = [shape] # list of length 1 + + # infer outputs if not provided + self._validate_outputs(tfssa, output_names) + output_names = main_func.outputs if output_names is None else output_names + output_names = output_names if isinstance(output_names, (tuple, list)) else [output_names] + output_names = [x if isinstance(x, str) else x.name for x in output_names] + self.output_names = output_names + + # We would like a stack so that we run conversion sequentially. + self.graph_stack = self._get_stack(tfssa, root="main") + self.context = TranscriptionContext() + + def _get_placeholder_shape_from_tf_graph(self, tfgraph, name): + + error_message = "Unable to determine the shape of input: {}." \ + " Please provide its shape during conversion, using \n" \ + "'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])".format(name, name) + + if tfgraph[name].attr.get("shape", None) is not None: + shape = tfgraph[name].attr["shape"] + + elif tfgraph[name].attr.get("_output_shapes", None) is not None: + shape = tfgraph[name].attr["_output_shapes"][0] + if shape is None: + raise ValueError(error_message) + else: + raise ValueError(error_message) + + return shape + + def _get_stack(self, tfssa, root="main"): + # We're trying to get a order of how to loop through the graphs. + # This is NOT necessarily a DAG. + dep = {x: [] for x in tfssa.functions} + for fname in tfssa.functions: + for node in tfssa.functions[fname].graph.values(): + func_x, func_y = None, None + + if node.op == "while": + func_x = node.attr["body_function"] + func_y = node.attr["cond_function"] + + if func_x and fname not in dep[func_x]: + dep[func_x].append(fname) + if func_y and fname not in dep[func_y]: + dep[func_y].append(fname) + + assert len(dep[root]) == 0 + graph_stack = simple_topsort(dep) + + return graph_stack + + @staticmethod + def _get_tensor_name(tensor): + ret = None + if isinstance(tensor, str): + ret = tensor + else: + ret = tensor.name + return ret.split(":")[0] + + def _validate_outputs(self, tfssa, outputs): + if outputs is None: + return + outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs] + output_nodes = [] + for f in tfssa.functions.values(): + output_nodes += list(f.outputs) + all_nodes = [] + for f in tfssa.functions.values(): + all_nodes += list(f.graph.keys()) + for n in outputs: + if self._get_tensor_name(n) not in output_nodes + all_nodes: + raise KeyError('Output node name "{}" does exist.'.format(n)) + + def _validate_and_update_main_output_types(self, prog): + assert isinstance(self.main_output_types, list) + assert len(self.main_output_types) > 0 + output_vars = prog.functions["main"].outputs + output_vars_names = set([var.name for var in output_vars]) + + # validation + if get_output_names(self.main_output_types) is None: + # this is the case, where the user did not provide names for the outputs. + # In this case, the outputs were inferred from the TF graph autmatically. + # There are two scenarios here: number of inferred outputs equal to 1 or greater than 1 + if len(output_vars) == 1: + if len(self.main_output_types) > 1: + msg = "The list of ct.TensorType()/ct.ImageType() provided in the 'outputs' argument, does not " \ + "have names. When more than 1 output is provided for tensorflow conversion, " \ + "each entry in the outputs list must have the name specified as well, " \ + "via the 'name' argument in ct.TensorType/ct.ImageType" + raise ValueError(msg) + else: # len(output_vars) > 1 + # if there are more than 1 sink nodes (i.e. inferred outputs), the user must provide names + # so that the output types can be correctly mapped. + msg = "The list of ct.TensorType()/ct.ImageType() provided in the 'outputs' argument, does not " \ + "have names. When names are not provided, the outputs are automatically inferred " \ + "from the TF graph. There are {} outputs detected which are more than 1. " \ + "In this case, to map the output types correctly, " \ + "please provide names for each of the " \ + "outputs. The output names inferred from the TF graph are: {} " + raise ValueError(msg.format( + len(output_vars), + output_vars_names, + )) + else: + # user provided output names. In this case, the appropriate tensors must have + # been selected from the TF graph bases on the output names. + # Verify that the names present in self.main_output_types match the output_vars_names (it should match). + # Also, reconstruct the self.main_output_types list, in the same order of outputs as + # present in the output_vars_names + assert len(output_vars) == len(self.main_output_types), \ + "this should match if the outputs were picked correctly from the TF graph" + for out in self.main_output_types: + if out.name not in output_vars_names: + msg = "output name, '{}', not found in Tensorflow Graph. Available output names are: {}" + raise KeyError(msg.format(out.name, output_vars_names)) + name_to_input_type_map = {} + for out in self.main_output_types: + name_to_input_type_map[out.name] = out + main_output_types = [] + for out_var in output_vars: + main_output_types.append(name_to_input_type_map[out_var.name]) + self.main_output_types = main_output_types + + def check_placeholder_output(self, prog, outputs_name): + """ + Handle the cases where placeholder is output. + There is a case where the program is like + main(%Placeholder: (5,fp32)) { + block3() { + } -> (%Placeholder) + } + But self.output_names = ["Placeholder:0"] + We need to change the block output to Placeholder:0 by inserting an identity + """ + block = prog["main"] + input_name = [x.name for x in list(block.inputs.values())] + with block: + new_outputs = [] + for output, output_name in zip(block.outputs, outputs_name): + if output.name not in input_name or output.name == output_name: + new_output = output + else: + new_output = mb.identity(x=output, name=output_name) + new_outputs.append(new_output) + block.set_outputs(new_outputs) + + def convert_main_graph(self, prog, graph): + func_inputs = {} + for input_type in self.inputs: + func_inputs[input_type.name] = mb.placeholder( + input_type.shape.symbolic_shape, dtype=input_type.dtype) + prog.set_main_input_types(self.inputs) + + with Function(func_inputs, opset_version=self.opset_version) as ssa_func: + # Get the input Var + for name in func_inputs.keys(): + input_var = ssa_func.inputs[name] + if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \ + and (input_var.dtype == types.fp16 or input_var.dtype == types.fp64): + # cast the input var to float32 + # We need to do this because the type inference is very buggy when started from + # float16/float64 typed inputs. Until that is fixed in the following radar + # we cast all inputs of type float16/float64 to float32 as the first step. + # These casts will later get removed, if compute_precision=Float16 is + # provided, which will cause the FP16ComputePrecision pass to run. + # TODO: remove this when this radar is fixed: rdar://93731970 + input_var = mb.cast(x=input_var, dtype="fp32", name=name) + self.context.add(name, input_var) + outputs = convert_graph(self.context, graph, self.output_names) + ssa_func.set_outputs(outputs) + prog.add_function("main", ssa_func) + # check duplicate output + # Note: sometimes two outputs are pointing to the same Var, we should + # create mb.identity for those cases + block = prog["main"] + with block: + name_counts = {} + new_outputs = [output for output in block.outputs] + for i, v_o in enumerate(block.outputs): + if v_o.name not in name_counts: + name_counts[v_o.name] = 1 + else: + name_counts[v_o.name] += 1 + new_name = v_o.name + "_duplicate_" + str(name_counts[v_o.name]) + x = mb.identity(x=v_o, name=new_name) + new_outputs[i] = x + block.set_outputs(new_outputs) + + # Rename outputs to TF's name. This is needed when the last op doesn't + # generate a new Var (e.g., get_tuple, Identity etc.), and thus the + # last Var would have a different name than the last TF op's name. + # + # Example: + # + # TF code: + # x = tf.placeholder(tf.float32, shape=(1,)) + # y = tf.placeholder(tf.float32, shape=(1,)) + # c = lambda i, j: \ + # tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) + # b = lambda i, j: (tf.add(i, 1), j) + # res = tf.while_loop(c, b, [x, y]) + # + # Resulting nodes (excluding the nodes in while loop cond & body): + # + # node name: Placeholder op type: Placeholder inputs: [] + # node name: Placeholder_1 op type: Placeholder inputs: [] + # node name: make_input_0 op type: make_tuple inputs: ['Placeholder', + # 'Placeholder_1'] + # node name: while_0 op type: while inputs: ['make_input_0'] + # node name: while/Exit op type: get_tuple inputs: ['while_0'] + # node name: while/Exit_1 op type: get_tuple inputs: ['while_0'] + # + # Observe that return node `while/Exit` is an output from get_tuple, + # which in our translation simply unpack a python tuple of Vars + # ('while_0:0', 'while_0:1') returned from while_0 SSA op. We need to + # rename `while_0:0` to `while/Exit` in order for users to find the + # output. + # Note: only rename the output if the output is not Placeholder. + + input_names = [x.name for x in self.inputs] + for v_o, out_name in zip(prog["main"].outputs, self.output_names): + if v_o.name != out_name and v_o.name not in input_names: + logger.info( + "Renaming output var: '{}' -> '{}'".format(v_o.name, out_name) + ) + v_o.name = out_name + self.check_placeholder_output(prog, self.output_names) + + # verify that if model output dtypes / names are provided by the user, they are valid + if self.main_output_types is not None: + self._validate_and_update_main_output_types(prog) + prog.set_main_output_types(self.main_output_types) + + @_profile + def convert(self): + prog = Program() + if len(self.graph_stack) == 0: + raise ValueError("At least one TF function must be present") + if self.graph_stack[0] != "main": + msg = "TF root graph must be named 'main'. Got {}" + raise ValueError(msg.format(self.graph_stack[0])) + graph = self.tfssa.functions["main"].graph + for g_name in self.graph_stack[1:]: + self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) + self.convert_main_graph(prog, graph) + return prog diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dialect_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dialect_ops.py new file mode 100644 index 00000000..1bc96bb9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dialect_ops.py @@ -0,0 +1,173 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry + +register_op = SSAOpRegistry.register_op + + +# This file contains the TF dialect of SSA. Briefly, these ops are only +# understandable in the TF frontend and not acceptable in the standard op set. +# No backend would support any of the op here. These ops exist to facilitate +# frontend SSA passes, but must be replaced with standard ops during SSA +# passes. + +# All tf op must start with 'tf_' prefix. +# +# tf_make_list allows elem_shape to be unspecified. core op make_list does +# not allow that. +@register_op(namespace="tf") +class tf_make_list(Operation): + input_spec = InputSpec( + init_length=TensorInputType(optional=True, type_domain=types.int32), + dynamic_length=TensorInputType(optional=True, type_domain=types.bool), + elem_shape=TensorInputType(const=True, optional=True, type_domain=types.int32), + dtype=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + def default_inputs(self): + return DefaultInputs( + init_length=1, + dynamic_length=True, + dtype="fp32", + ) + + def type_inference(self): + init_length = self.init_length.val + if self.elem_shape is None or self.elem_shape.sym_val is None: + return types.list( + types.unknown, + init_length=init_length, + dynamic_length=self.dynamic_length.val, + ) + builtin_dtype = types.string_to_builtin(self.dtype.val) + if builtin_dtype is None: + raise ValueError("Unsupported dtype {}".format(self.dtype.val)) + elem_type = types.tensor(builtin_dtype, self.elem_shape.sym_val) + return types.list( + elem_type, init_length=init_length, dynamic_length=self.dynamic_length.val + ) + + +class TfLSTMBase(Operation): + """ + Common LSTM inputs for BlockLSTMCell and BlockLSTM. + """ + + input_spec = InputSpec( + c_prev=TensorInputType(type_domain="T"), # [batch, hidden_dim] + h_prev=TensorInputType(type_domain="T"), # [batch, hidden_dim] + # weight: [input_dim + hidden_dim, 4*hidden_dim] (icfo layout) + weight=TensorInputType(const=True, type_domain="T"), + forget_bias=TensorInputType(const=True, optional=True, type_domain="T"), + # cell_clip == None implies not using cell clip + cell_clip=TensorInputType(const=True, optional=True, type_domain="T"), + # If use_peephole == False, weight_peep_* is ignored + use_peephole=TensorInputType(const=True, optional=True, type_domain=types.bool), + weight_peep_i=TensorInputType(const=True, optional=True, type_domain="T"), # [hidden_dim,] + weight_peep_f=TensorInputType(const=True, optional=True, type_domain="T"), # [hidden_dim,] + weight_peep_o=TensorInputType(const=True, optional=True, type_domain="T"), # [hidden_dim,] + bias=TensorInputType(const=True, type_domain="T"), # [4*hidden_dim] (icfo layout) + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + forget_bias=1., + use_peephole=False, + ) + + def _check_peephole_weights(self): + # Check weight_peep_* + if self.use_peephole.val: + if ( + self.weight_peep_i is None + or self.weight_peep_f is None + or self.weight_peep_o is None + ): + raise ValueError( + "weight_peep_* cannot be None when use_peephole is True" + ) + + +@register_op(namespace="tf") +class tf_lstm_block_cell(TfLSTMBase): + """ + xh = [x, h_prev] + [i, ci, f, o] = xh * w + b + f = f + forget_bias + + if not use_peephole: + wci = wcf = wco = 0 + i = sigmoid(cs_prev .* wci + i) + f = sigmoid(cs_prev .* wcf + f) + ci = tanh(ci) + cs = ci .* i + cs_prev .* f + cs = clip(cs, cell_clip) + o = sigmoid(cs * wco + o) + co = tanh(cs) + h = co .* o + """ + input_spec = ( + InputSpec(x=TensorInputType(type_domain="T"),) + TfLSTMBase.input_spec # [batch, input_dim] + ) + + def __init__(self, **kwargs): + super(tf_lstm_block_cell, self).__init__(**kwargs) + + def type_inference(self): + self._check_peephole_weights() + # all return shapes are [batch, hidden_dim] + ret_shape = self.c_prev.shape + dtype = self.x.dtype + # See + # https://www.tensorflow.org/api_docs/python/tf/raw_ops/LSTMBlockCell + # All returned shapes are [batch, hidden_dim] + return ( + types.tensor(dtype, ret_shape), # i + types.tensor(dtype, ret_shape), # cs + types.tensor(dtype, ret_shape), # f + types.tensor(dtype, ret_shape), # o + types.tensor(dtype, ret_shape), # ci + types.tensor(dtype, ret_shape), # co + types.tensor(dtype, ret_shape), + ) # h + + +@register_op(namespace="tf") +class tf_lstm_block(TfLSTMBase): + """ + Apply LSTM to an input sequence + """ + input_spec = ( + InputSpec( + seq_len=TensorInputType(type_domain=types.int32), # int + x=TensorInputType(type_domain="T"), # [padded_len, batch, input_dim] + ) + + TfLSTMBase.input_spec + ) + + def type_inference(self): + self._check_peephole_weights() + padded_len = self.x.shape[0] + ret_shape = [padded_len] + list(self.c_prev.shape) + dtype = self.x.dtype + # All returned shapes are [padded_len, b, hidden_dim] + return ( + types.tensor(dtype, ret_shape), # i + types.tensor(dtype, ret_shape), # cs + types.tensor(dtype, ret_shape), # f + types.tensor(dtype, ret_shape), # o + types.tensor(dtype, ret_shape), # ci + types.tensor(dtype, ret_shape), # co + types.tensor(dtype, ret_shape), + ) # h diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dot_visitor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dot_visitor.py new file mode 100644 index 00000000..2e4ba504 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dot_visitor.py @@ -0,0 +1,149 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import types + + +class DotVisitor: + """ + Generates a dot description of a graph in dictionary form. + """ + + def __init__(self, annotation=None): + self.result = [] + self.visited_memo = {} + self.highlights = {} + self.alternate_labeller = None + self.annotation = annotation + + def labeller(self, labeller): + self.alternate_labeller = labeller + return self + + def highlight_nodes(self, nodeset, color="yellow"): + for i in nodeset: + self.highlights[i] = color + return self + + def visit(self, graph, node, nodename_prefix=""): + if node.name in self.visited_memo: + return self + + # For printing datatype, breaks type + if node.attr.get("symbolic_datatype", None) is not None: + dtype = str(types.get_type_info(node.attr["symbolic_datatype"])) + elif node.datatype is not None: + dtype = str(types.get_type_info(node.datatype)) + else: + dtype = "Unknown" + + label = "" + if self.alternate_labeller is not None: + label = self.alternate_labeller(node) + else: + if len(node.outputs) == 0: + label = "\\n{" + node.name + "}" + if "Placeholder" in node.op: + label = "\\n{" + node.name + "}" + if node.op == "while": + label = ( + "\\n{body: " + + node.attr["body_function"] + + " cond:" + + node.attr["cond_function"] + + "}" + ) + if node.op == "function": + label = "\\n{body: " + node.attr["function_name"] + "}" + if node.op == "function_entry": + label = "\\n{" + node.name + "}" + label = node.op + ":" + dtype + label + + if node.name in self.highlights: + self.result.append( + '"' + + nodename_prefix + + node.name + + '"' + + '[label="' + + label + + '",fillcolor=%s,style=filled,fontcolor=%s]' + % ( + self.highlights[node.name], + "violetred" if node.attr.get(self.annotation, False) else "black", + ) + ) + else: + self.result.append( + '"' + + nodename_prefix + + node.name + + '"' + + '[label="' + + label + + '",fontcolor=%s]' + % ("violetred" if node.attr.get(self.annotation, False) else "black") + ) + + for i in node.inputs: + input_name = i + edge = ( + '"' + + nodename_prefix + + input_name + + '"' + + " -> " + + '"' + + nodename_prefix + + node.name + + '"' + ) + self.result.append(edge) + + for i in node.control_inputs: + input_name = i + edge = ( + '"' + + nodename_prefix + + input_name + + '"' + + " -> " + + '"' + + nodename_prefix + + node.name + + '"' + ) + edge = edge + " [style=dotted]" + self.result.append(edge) + + self.visited_memo[node.name] = 1 + + for i in node.inputs: + input_name = i + if input_name[0] == "^": + input_name = input_name[1:] + assert input_name in graph + self.visit(graph, graph[input_name], nodename_prefix) + return self + + def visit_all(self, graph, nodename_prefix=""): + for i in graph: + self.visit(graph, graph[i], nodename_prefix) + return self + + def get_result(self, graphtype="digraph", graph_name="g"): + return ( + graphtype + + " " + + graph_name + + " {\n\t" + + "\n\t".join(str(i) for i in self.result) + + ';\n\tlabel="' + + graph_name[8:] + + '";\n\tfontsize=96;\n}' + ) + + def __str__(self): + return self.get_result() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/load.py new file mode 100644 index 00000000..bd77337b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/load.py @@ -0,0 +1,316 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import gc +import os +from distutils.version import StrictVersion as _StrictVersion +from tempfile import mktemp + +import tensorflow as tf +from tqdm import tqdm as _tqdm + +from coremltools import _logger as logger +from coremltools._deps import _get_version +from coremltools.converters._profile_utils import _profile + +from .._utils import get_output_names +from .basic_graph_ops import fill_outputs +from .converter import TFConverter +from .parsed_tf_node import ParsedTFNode +from .tf_graph_pass import (cond_to_where, constant_propagation, + delete_asserts, delete_disconnected_nodes, + delete_unnecessary_constant_nodes, + functionalize_loops, fuse_dilation_conv, + insert_get_tuple, quantization_pass, + remove_variable_nodes, + tensor_array_resource_removal) +from .tfssa import NetworkEnsemble, SSAFunction + + +class TFLoader: + """Abstract class for TensorFlow model loader.""" + + def __init__(self, model, debug=False, **kwargs): + """ + TensorFlow model loader. + + Parameters + ---------- + model: TensorFlow model + Model generated using TensorFlow. + debug: bool, optional, defaults to False + If true, display verbose logging and visualizations. + kwargs: dict(str, Any), optional, defaults to None + Dictionary of additional arguments. + """ + self.model = model + self.debug = debug + self.kwargs = kwargs + self._graph_def = None + self._tf_ssa = None + + @_profile + def load(self): + """Load TensorFlow model into MIL program.""" + + logger.info("Loading TensorFlow model '{}'".format(self.model)) + outputs = self.kwargs.get("outputs", None) + output_names = get_output_names(outputs) + self._graph_def = self._graph_def_from_model(output_names) + + if self._graph_def is not None and len(self._graph_def.node) == 0: + msg = "tf.Graph should have at least 1 node, Got empty graph." + raise ValueError(msg) + + self._tf_ssa = self._tf_ssa_from_graph_def() + + del self._graph_def + gc.collect() + + if self.debug: + import graphviz + + dot_string = self._tf_ssa.get_dot_string( + annotation=True, name_and_op_style=True, highlight_debug_nodes=[] + ) + graphviz.Source(dot_string).view( + filename="/tmp/ssa_before_tf_passes", cleanup=True + ) + + program = self._program_from_tf_ssa() + logger.debug("program:\n{}".format(program)) + return program + + # @abstractmethod + def _graph_def_from_model(self, output_names=None): + """Load TensorFlow model into GraphDef. Overwrite for different TF versions.""" + pass + + # @abstractmethod + def _tf_ssa_from_graph_def(self, fn_name="main"): + """Load GraphDef and parse into NetworkEnsemble (TFSSA).""" + pass + + # @abstractmethod + def _program_from_tf_ssa(self): + """Load NetworkEnsemble (TFSSA) and parse into MIL program.""" + pass + + @staticmethod + def extract_sub_graph(graph_def, outputs=None): + """Extract sub-graph based on user-provided outputs.""" + if outputs is None or len(outputs) == 0: + return graph_def + msg = "Extracting sub-graph based on outputs '{}' from the full model" + logger.debug(msg.format(outputs)) + outputs = outputs if isinstance(outputs, list) else [outputs] + outputs = [i.split(":")[0] for i in outputs] + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): + return tf.graph_util.extract_sub_graph(graph_def, outputs) + else: + return tf.compat.v1.graph_util.extract_sub_graph(graph_def, outputs) + + +class TF1Loader(TFLoader): + def __init__(self, model, debug=False, **kwargs): + """ + TensorFlow 1.x model loader. + + Parameters + ---------- + model: Model created with TensorFlow 1.x + One of the following model format: + - TensorFlow tf.Graph object or frozen graph (.pb) file path + - TensorFlow tf.keras.Model object or HDF5 (.h5) file path + - TensorFlow SavedModel directory path + debug: bool, optional. Defaults to False. + This flag should generally be False except for debugging purposes + for diagnosing conversion errors. Setting this flag to True will + cause graph pass errors to be ignored, forcefully returning a + NetworkEnsemble object. + kwargs: dict(str, Any), optional + Dictionary of additional arguments. + """ + TFLoader.__init__(self, model, debug, **kwargs) + + def _graph_def_from_model(self, output_names=None): + """Overwrites TFLoader._graph_def_from_model()""" + msg = "Expected model format: [tf.Graph | .pb | SavedModel | tf.keras.Model | .h5], got {}" + if isinstance(self.model, tf.Graph) and hasattr(self.model, "as_graph_def"): + graph_def = self.model.as_graph_def(add_shapes=True) + return self.extract_sub_graph(graph_def, output_names) + elif isinstance(self.model, tf.keras.Model): + graph_def = self._from_tf_keras_model(self.model) + return self.extract_sub_graph(graph_def, output_names) + elif isinstance(self.model, str): + if not os.path.exists(str(self.model)): + raise ValueError('Input model "{}" does not exist'.format(self.model)) + elif os.path.isfile(str(self.model)) and self.model.endswith(".pb"): + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): + with open(self.model, "rb") as f: + gd = tf.GraphDef() + gd.ParseFromString(f.read()) + with tf.Graph().as_default() as graph: + tf.import_graph_def(gd, name="") + else: + with tf.io.gfile.GFile(self.model, "rb") as f: + gd = tf.compat.v1.GraphDef() + gd.ParseFromString(f.read()) + with tf.Graph().as_default() as graph: + tf.graph_util.import_graph_def(gd, name="") + graph_def = graph.as_graph_def(add_shapes=True) + return self.extract_sub_graph(graph_def, output_names) + elif os.path.isfile(str(self.model)) and self.model.endswith(".h5"): + graph_def = self._from_tf_keras_model(self.model) + return self.extract_sub_graph(graph_def, output_names) + elif os.path.isdir(str(self.model)): + graph_def = self._from_saved_model(self.model) + return self.extract_sub_graph(graph_def, output_names) + else: + raise NotImplementedError(msg.format(self.model)) + else: + raise NotImplementedError(msg.format(self.model)) + + def _tf_ssa_from_graph_def(self, fn_name="main"): + """Overwrites TFLoader._tf_ssa_from_graph_def()""" + graph_dict = {} + for node in self._graph_def.node: + graph_dict[node.name] = ParsedTFNode(node) + + tensor_array_resource_removal(graph_dict) + graph = insert_get_tuple(graph_dict) + graph = fill_outputs(graph) + delete_disconnected_nodes(graph) + + tf_ssa = NetworkEnsemble() + tf_ssa.functions[fn_name] = SSAFunction(graph) + return tf_ssa + + def _program_from_tf_ssa(self): + """Overwrites TFLoader._mil_program_from_tf_ssa()""" + # Applying frontend passes on TFSSA. Note that these are different from + # passes applied to MIL in TF frontend. + tf_passes = [ + delete_asserts, + functionalize_loops, + constant_propagation, + delete_unnecessary_constant_nodes, # must come after constant_propagation + quantization_pass, + cond_to_where, + remove_variable_nodes, + fuse_dilation_conv, + ] + + if self.debug: + for tf_pass in _tqdm( + tf_passes, desc="Running TensorFlow Graph Passes", unit=" passes" + ): + try: + tf_pass(self._tf_ssa) + except Exception as e: + logger.exception('Exception in pass "{}": {}'.format(tf_pass, e)) + logger.info("Ignoring exception and continuing to next pass") + else: + for tf_pass in _tqdm( + tf_passes, desc="Running TensorFlow Graph Passes", unit=" passes" + ): + tf_pass(self._tf_ssa) + + if self.debug: + import graphviz + + dot_string = self._tf_ssa.get_dot_string( + annotation=True, name_and_op_style=True, highlight_debug_nodes=[] + ) + graphviz.Source(dot_string).view( + filename="/tmp/ssa_after_tf_passes", cleanup=True + ) + + converter = TFConverter( + tfssa=self._tf_ssa, + inputs=self.kwargs["inputs"], + outputs=self.kwargs["outputs"], + opset_version=self.kwargs["specification_version"], + ) + return converter.convert() + + @staticmethod + def _from_saved_model(saved_model_dir): + # must import here as tf.contrib is only available on TF 1.x + from tensorflow.contrib.saved_model.python.saved_model import reader + from tensorflow.python.tools import freeze_graph + + saved_model_tags = reader.get_saved_model_tag_sets(saved_model_dir)[0] + if not saved_model_tags: + msg = "Unsupported SavedModel directory format: no tag_sets available" + raise NotImplementedError(msg) + + # get model outputs + output_node_names = [] + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): + sess = tf.Session() + else: + sess = tf.compat.v1.Session() + metagraph = tf.saved_model.loader.load( + sess, saved_model_tags, saved_model_dir + ) + for sd in metagraph.signature_def.values(): + output_node_names += [o.name.split(":")[0] for o in sd.outputs.values()] + + sess.close() + + # get frozen graph + output_graph = mktemp() + tf.compat.v1.reset_default_graph() if _get_version(tf.__version__) >= _StrictVersion("1.13.1") else tf.reset_default_graph() + freeze_graph.freeze_graph( + input_graph=None, + input_saver=None, + input_binary=None, + input_checkpoint=None, + output_node_names=",".join(output_node_names), + restore_op_name=None, + filename_tensor_name=None, + output_graph=output_graph, + clear_devices=True, + initializer_nodes="", + variable_names_whitelist="", + variable_names_blacklist="", + input_meta_graph=None, + input_saved_model_dir=saved_model_dir, + saved_model_tags=",".join(saved_model_tags), + ) + + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): + graph_def = tf.GraphDef() + with open(output_graph, "rb") as f: + graph_def.ParseFromString(f.read()) + graph_def = tf.graph_util.remove_training_nodes(graph_def) + else: + graph_def = tf.compat.v1.GraphDef() + with open(output_graph, "rb") as f: + graph_def.ParseFromString(f.read()) + graph_def = tf.compat.v1.graph_util.remove_training_nodes(graph_def) + with tf.Graph().as_default() as graph: + tf.graph_util.import_graph_def(graph_def, name="") + return graph.as_graph_def(add_shapes=True) + + @staticmethod + def _from_tf_keras_model(keras_model): + from tensorflow.python.framework.convert_to_constants import \ + convert_variables_to_constants_v2 + from tensorflow.python.keras.saving import saving_utils + + if not isinstance(keras_model, tf.keras.Model): + keras_model = tf.keras.models.load_model(keras_model, None) + + tf.keras.backend.clear_session() + tf.keras.backend.set_learning_phase(False) + fn = saving_utils.trace_model_call(keras_model) + cf = fn.get_concrete_function() + try: + frozen_fn = convert_variables_to_constants_v2(cf) + return frozen_fn.graph.as_graph_def(add_shapes=True) + except Exception: + raise NotImplementedError("Unhandled tf.keras model format") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/naming_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/naming_utils.py new file mode 100644 index 00000000..ebb94bc3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/naming_utils.py @@ -0,0 +1,35 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +_varname_charset = set( + [chr(i) for i in range(ord("A"), ord("Z") + 1)] + + [chr(i) for i in range(ord("a"), ord("z") + 1)] + + [chr(i) for i in range(ord("0"), ord("9") + 1)] + + ["_"] +) + + +def escape_name(name): + ret = "".join([i if i in _varname_charset else "_" for i in name]) + if ret.endswith("_"): + return ret + else: + return ret + "_" + + +def escape_fn_name(name): + ret = "".join([i if i in _varname_charset else "_" for i in name]) + ret = escape_name(name) + if ret.startswith("f_"): + return ret + else: + return "f_" + ret + + +def normalize_names(names): + if isinstance(names, str): + return names.replace(":", "__").replace("/", "__") + return [i.replace(":", "__").replace("/", "__") for i in names] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ops.py new file mode 100644 index 00000000..856ade4f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ops.py @@ -0,0 +1,3546 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.block import \ + is_current_opset_version_compatible_with +from coremltools.converters.mil.mil.ops.defs._utils import ( + broadcast_shapes, promote_input_dtypes) +from coremltools.converters.mil.mil.types import builtin_to_string +from coremltools.converters.mil.mil.types.symbolic import (any_symbolic, + is_symbolic) + +from .._utils import build_einsum_mil +from .convert_utils import convert_graph +from .tf_op_registry import register_tf_op + + +def _adjust_min_max(min, max, num_bits=8): + if (min <= max) and (max <= 0): + min = (min - max) * 1.0 + max = 0.0 + elif (min >= 0) and (max >= min): + max = (max - min) * 1.0 + min = 0.0 + else: + scale = (max - min) / (2 ** num_bits - 1) + min_adj = scale * round(min / scale) + max_adj = max + min_adj - min + min = min_adj + max = max_adj + return min, max + + +def _is_scalar(type_): + if type_ is None: + return False + result = types.is_int(type_) or types.is_float(type_) or types.is_bool(type_) + if types.is_tensor(type_) and (len(type_.get_shape()) == 0): + result = True + return result + + +def _transpose_NHWC_to_NCHW(x): + return mb.transpose(x=x, perm=[0, 3, 1, 2]) + + +def _transpose_NCHW_to_NHWC(x, node_name): + return mb.transpose(x=x, perm=[0, 2, 3, 1], name=node_name) + + +def _transpose_NDHWC_to_NCDHW(x): + return mb.transpose(x=x, perm=[0, 4, 1, 2, 3]) + + +def _transpose_NCDHW_to_NDHWC(x, node_name): + return mb.transpose(x=x, perm=[0, 2, 3, 4, 1], name=node_name) + + +def _check_axes_type(x): + if x is None or x.val is None: + return None + if isinstance(x.val, _np.int32): + return _np.array([x.val]) + return x.val + + +def _value_at(x, idx): + """ + input x: 1D tensor (vector). + return value at index idx. x[idx]. + """ + assert x.rank == 1 + return mb.slice_by_index(x=x, begin=[idx], end=[0], squeeze_mask=[True]) + + +def _freq_to_mel(freq): + return 1127.0 * _np.log(1 + freq / 700.0) + + +def _get_MFCC_constants(spectrogram_N, + sample_rate, + upper_frequency_limit, + lower_frequency_limit, + filterbank_channel_count, + dct_coefficient_count): + + """ + params: + spectrogram_N : int + sample_rate: int + upper_frequency_limit : int + filterbank_channel_count : int + dct_coefficient_count : int + + returns: + array(shape: (spectrogram_N,)) + array(shape: (spectrogram_N, filterbank_channel_count)) + array(shape: (spectrogram_N, filterbank_channel_count)) + array(shape: (filterbank_channel_count, dct_coefficient_count)) + + reference: + https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/core/kernels/mfcc_mel_filterbank.cc + """ + + center_frequencies = _np.zeros((filterbank_channel_count + 1)) + mel_low = _freq_to_mel(lower_frequency_limit) + mel_hi = _freq_to_mel(upper_frequency_limit) + mel_span = mel_hi - mel_low + mel_spacing = mel_span / (filterbank_channel_count + 1) + for i in range(filterbank_channel_count + 1): + center_frequencies[i] = mel_low + (mel_spacing * (i + 1)) + + hz_per_sbin = 0.5 * sample_rate / (spectrogram_N - 1) + start_index = int(1.5 + (lower_frequency_limit / hz_per_sbin)) + end_index = int(upper_frequency_limit / hz_per_sbin) + + band_mapper = _np.zeros((spectrogram_N)) + channel = 0 + for i in range(spectrogram_N): + melf = _freq_to_mel(i * hz_per_sbin) + if (i < start_index) or (i > end_index): + band_mapper[i] = -2 + else: + while channel < filterbank_channel_count and center_frequencies[channel] < melf: + channel += 1 + band_mapper[i] = channel - 1 # Can be == -1 + + weights = _np.zeros((spectrogram_N)) + for i in range(spectrogram_N): + channel = int(band_mapper[i]) + if (i < start_index) or (i > end_index): + weights[i] = 0 + else: + if channel >= 0: + weights[i] = (center_frequencies[channel + 1] - _freq_to_mel(i * hz_per_sbin)) / ( + center_frequencies[channel + 1] - center_frequencies[channel]) + else: + weights[i] = (center_frequencies[0] - _freq_to_mel(i * hz_per_sbin)) / (center_frequencies[0] - mel_low) + + mat_spec_val = _np.zeros((spectrogram_N, filterbank_channel_count)) + mat_weighted = _np.zeros((spectrogram_N, filterbank_channel_count)) + for i in range(start_index, end_index + 1): # For each FFT bin + channel = int(band_mapper[i]) + if channel >= 0: + mat_weighted[i, channel] = 1 # Right side of triangle, downward slope + channel += 1 + if channel < filterbank_channel_count: + mat_weighted[i, channel] = -1 # Left side of triangle + mat_spec_val[i, channel] = 1 # Left side of triangle + + # compute the dct matrix + cosines = _np.zeros((filterbank_channel_count, dct_coefficient_count)) + fnorm = _np.sqrt(2.0 / filterbank_channel_count) + arg = _np.pi / filterbank_channel_count + for i in range(filterbank_channel_count): + for j in range(dct_coefficient_count): + cosines[i, j] = fnorm * _np.cos(j * arg * (i + 0.5)) + + return weights, mat_weighted, mat_spec_val, cosines + + +def _reshape_remaining_dimensions_to_canonical_shape(x, remaining_rank): + # An utility function that reshape a tensor with shape [batch, spatial_dims, remaining_dim_1, ..., remaining_dim_N] + # to [batch, spatial_dims, remaining_dim_1 * ... * remaining_dim_N] + # For the special case where there is no remaining dimensions, we expand the last axis + assert remaining_rank != 1 + if remaining_rank == 0: + return mb.expand_dims(x=x, axes=[-1]) + else: + x_shape = mb.shape(x=x) + batch_and_spatial_shape = mb.slice_by_size(x=x_shape, begin=[0], size=[x.rank-remaining_rank]) + reshape_shape = mb.concat(values=[batch_and_spatial_shape, [-1]], axis=0) + return mb.reshape(x=x, shape=reshape_shape) + + +def _reshape_remaining_dimension_to_original_shape(x, original_shape, remaining_rank): + # An utility function that reshape the tensor with shape [batch_new, spatial_dims_new, remaining_dims] to the original + # form, which is [batch_new, spatial_dims_new, remaining_dim_1, ..., remaining_dim_N] + assert remaining_rank != 1 + if remaining_rank == 0: + return mb.squeeze(x=x, axes=[-1]) + else: + x_shape = mb.shape(x=x) + spatial_rank = original_shape.shape[0] - remaining_rank - 1 + batch_and_spatial_shape = mb.slice_by_size(x=x_shape, begin=[0], size=[1+spatial_rank]) + remaining_shape = mb.slice_by_size(x=original_shape, begin=[1+spatial_rank], size=[-1]) + reshape_shape = mb.concat(values=[batch_and_spatial_shape, remaining_shape], axis=0) + return mb.reshape(x=x, shape=reshape_shape) + + +@register_tf_op(tf_alias=["BiasAdd", "AddV2"]) +def Add(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x, y = promote_input_dtypes([x, y]) + + if "data_format" in node.attr and node.attr["data_format"] == "NCHW": + if x.rank != 1 and y.rank != 1: + raise AssertionError("Bias needs to have its rank equals to 1") + + bias, data = (y, x) if y.rank == 1 else (x, y) + + if not data.rank >= 3: + raise AssertionError("Data needs to be of at least ranke 3") + + axes = [-(i + 1) for i in range(data.rank - 2)] + + x = data + y = mb.expand_dims(x=bias, axes=axes, name=node.name + "_expanded_bias") + + x = mb.add(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def AddN(context, node): + values = [context[name] for name in node.inputs] + if len(values) == 1: + Identity(context, node) + return + prev_var = values[0] + for idx, var in enumerate(values[1:]): + if var == values[-1]: + x = mb.add(x=prev_var, y=var, name=node.name) + else: + prev_var = mb.add(x=prev_var, y=var, name=node.name + "_tmpAddN_" + str(idx)) + context.add(node.name, x) + + +@register_tf_op +def Abs(context, node): + x = context[node.inputs[0]] + x = mb.abs(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Acos(context, node): + x = context[node.inputs[0]] + x = mb.acos(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def All(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.cast(x=x, dtype="int32") + x = mb.reduce_prod(x=x, axes=axes, keep_dims=keep_dims) + x = mb.cast(x=x, dtype="bool", name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Any(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.cast(x=x, dtype="int32") + x = mb.reduce_sum(x=x, axes=axes, keep_dims=keep_dims) + x = mb.cast(x=x, dtype="bool", name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ArgMax(context, node): + x = context[node.inputs[0]] + axis = context[node.inputs[1]] + x = mb.reduce_argmax(x=x, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ArgMin(context, node): + x = context[node.inputs[0]] + axis = context[node.inputs[1]] + x = mb.reduce_argmin(x=x, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Asin(context, node): + x = context[node.inputs[0]] + x = mb.asin(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Atan(context, node): + x = context[node.inputs[0]] + x = mb.atan(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Atanh(context, node): + x = context[node.inputs[0]] + x = mb.atanh(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def AvgPool(context, node): + x = context[node.inputs[0]] + in_shape = x.sym_type.get_shape() + d_rank = len(in_shape) - 2 + data_format = node.attr.get("data_format", "NHWC") + ksize = node.attr.get("ksize", None) + kernel_sizes = _pool_pads_or_strides(ksize, data_format, d_rank) + strides = node.attr.get("strides", None) + if strides is not None: + strides = _pool_pads_or_strides(strides, data_format, d_rank) + pad_type = node.attr["padding"].lower() + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + x = mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + exclude_padding_from_average=True, + ) + x = _transpose_NCHW_to_NHWC(x, node.name) + else: + x = mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + exclude_padding_from_average=True, + name=node.name, + ) + context.add(node.name, x) + + +@register_tf_op +def AvgPool3D(context, node): + x = context[node.inputs[0]] + d_rank = x.rank - 2 + data_format = node.attr.get("data_format", "NDHWC") + ksize = node.attr.get("ksize", None) + kernel_sizes = _pool_pads_or_strides(ksize, data_format, d_rank) + strides = node.attr.get("strides", None) + if strides is not None: + strides = _pool_pads_or_strides(strides, data_format, d_rank) + pad_type = node.attr["padding"].lower() + if data_format == "NDHWC": + x = _transpose_NDHWC_to_NCDHW(x) + x = mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + exclude_padding_from_average=True, + ) + x = _transpose_NCDHW_to_NDHWC(x, node.name) + else: + x = mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + exclude_padding_from_average=True, + name=node.name, + ) + + context.add(node.name, x) + + +@register_tf_op +def BatchToSpaceND(context, node): + # In tensorflow, the input tensor has the shape of (batch,) + spatial_shape + remaining_shape. + # The shape is treated as a combination of 3 components: + # 1. A single batch dimension + # 2. Spatial dimensions, with a length spatial_rank, which could be neither 1 or 2. Also, spatial_rank + # is equal to the length of block_shape + # 3. Remaining dimensions, with a length remaining_rank + + # The logic of translating this op is as followed: + # 1. We first reshape the input to a canonical shape (rolling the remaining shape dimensions into a + # single dimension): (batch,) + spatial_shape + (R), where R = remaining_dim_1 * ... * remaining_dim_n + # 2. We support rank 1 and rank 2 spatial shape: + # (i) rank 1: We decompose the BatchToSpace into small basic ops. + # (ii) rank 2: We directly use the built in batch_to_space op. + # The output would have shape (batch_new,) + spatial_shape_new + (R) + # 3. We transform the tensor back, by unrolling the remaining shape: (B_new,) + spatial_shape_new + remaining_shape + + x = context[node.inputs[0]] + block_shape = context[node.inputs[1]].val + crops = context[node.inputs[2]].val + original_shape = mb.shape(x=x) + + input_rank = x.rank + spatial_rank = len(block_shape) + remaining_rank = x.rank - 1 - spatial_rank + has_non_unity_remaining_dims = remaining_rank != 1 + + if block_shape is None or crops is None: + raise NotImplementedError( + "Not support dynamic block_shape and crops for BatchToSpaceND!" + ) + + if has_non_unity_remaining_dims: + # Reshape the input tensor to shape [batch, spatial_shape, remaining_dim_1 * ... * remaining_dim_N] + x = _reshape_remaining_dimensions_to_canonical_shape(x, remaining_rank) + + if spatial_rank >= 3: + raise NotImplementedError("Rank of spatial shape > 2 is not supported.") + + if spatial_rank == 2: + # Tensor has shape [B, H, W, C], we can directly use the batch_to_space op by doing + # [B, H, W, C] -> transpose -> [B, C, H, W] -> batch_to_space -> [B_new, C, H_new, W_new] -> + # transpose -> [B_new, H_new, W_new, C] + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.batch_to_space(x=x, block_shape=block_shape, crops=_np.zeros((2, 2), _np.int32), name=node.name) + if tuple(crops[0]) != (0, 0) or tuple(crops[1]) != (0, 0): + x = mb.crop(x=x, crop_height=crops[0], crop_width=crops[1]) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + + if spatial_rank == 1: + # In this case, we decompose space_to_batch into small basic ops + # [B, H, C] -> decomposite ops -> [B_new, H_new, C] + + # reshape input to [block_shape, B/block_shape, H, C] + input_shape = mb.shape(x=x) + block_shape = block_shape[0] + batch_size = _value_at(input_shape, 0) + spatial_size = _value_at(input_shape, 1) + channel_size = _value_at(input_shape, 2) + new_batch_size = mb.cast(x=mb.real_div(x=batch_size, y=block_shape), dtype="int32") + reshape_values = [block_shape, new_batch_size, spatial_size, channel_size] + reshape_shape = mb.concat(values=reshape_values, axis=0) + x = mb.reshape(x=x, shape=reshape_shape, name=node.name) + + # permute the tensor to [B/block_shape, H, block_shape, C] + x = mb.transpose(x=x, perm=[1, 2, 0, 3]) + + # reshape the tensor to [B/block_shape, H*block_shape, C] + new_spatial_size = mb.cast(x=mb.mul(x=spatial_size, y=block_shape), dtype="int32") + reshape_values = [new_batch_size, new_spatial_size, channel_size] + reshape_shape = mb.concat(values=reshape_values, axis=0) + x = mb.reshape(x=x, shape=reshape_shape) + + # crop the tensor to [B/block_shape, H - crops[0][0] - crops[0][1], C] + x = mb.crop(x=x, crop_height=crops[0], crop_width=[0, 0]) + + if has_non_unity_remaining_dims: + # Reshape the tensor from shape [batch_new, spatial_shape_new, remaining_dim_1 * ... * remaining_dim_N] back to + # shape [batch_new, spatial_shape_new, remaining_shape] + x = _reshape_remaining_dimension_to_original_shape(x, original_shape, remaining_rank) + + context.add(node.name, mb.identity(x=x, name=node.name)) + + +@register_tf_op +def Ceil(context, node): + x = context[node.inputs[0]] + x = mb.ceil(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Const(context, node): + if node.value is None: + raise ValueError("Const node '{}' cannot have no value".format(node.name)) + x = mb.const(val=node.value.val, name=node.name) + context.add(node.name, x) + + +def _conv2d3d_strides_or_dilations(name, value, data_format, default_value=1): + """Compute strides or dilation values for 2D and 3D convolutions.""" + if value is None: + value = default_value + if not isinstance(value, (int, list)): + raise ValueError("{} must be an int or list".format(name)) + + # Parse number of spatial dimensions from `data_format`, assuming N (batch) and C + # (input channels) are present + n_dims = len(data_format) - 2 + + if isinstance(value, int): + return [value] * n_dims + + if len(value) == 1: + return value * n_dims + if len(value) == n_dims: + return value + if len(value) != n_dims + 2: + raise ValueError( + "{} must have length 1, {}, or {}".format(name, n_dims, n_dims + 2) + ) + + if data_format == "NHWC": + # Only support stride/dilation along N, C == 1 + if not (value[0] == value[3] == 1): + raise ValueError( + "{} along N and C other than 1 not implemented".format(name) + ) + return value[1:3] + elif data_format == "NCHW" or data_format == "NCDHW": + if not (value[0] == value[1] == 1): + raise ValueError( + "{} along N and C other than 1 not implemented".format(name) + ) + return value[2:] + # "NDHWC" + if not (value[0] == value[4] == 1): + raise ValueError("{} along N and C other than 1 not implemented".format(name)) + return value[1:4] + + +@register_tf_op +def Cos(context, node): + x = context[node.inputs[0]] + x = mb.cos(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Cosh(context, node): + x = context[node.inputs[0]] + x = mb.cosh(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Cross(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + # last dim must be 3; other dims must match + assert x.shape[1:] == y.shape[1:] + assert x.shape[-1] == 3 + x1 = mb.gather(x=x, indices=[1, 2, 0], axis=-1) + x2 = mb.gather(x=x, indices=[2, 0, 1], axis=-1) + y1 = mb.gather(x=y, indices=[1, 2, 0], axis=-1) + y2 = mb.gather(x=y, indices=[2, 0, 1], axis=-1) + z = mb.sub(x=mb.mul(x=x1, y=y2), y=mb.mul(x=x2, y=y1), name=node.name) + context.add(node.name, z) + + +@register_tf_op +def Einsum(context, node): + equation = node.attr["equation"] + a = context[node.inputs[0]] + b = context[node.inputs[1]] + x = build_einsum_mil(a, b, equation, node.name) + context.add(node.name, x) + + +@register_tf_op +def Equal(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.equal(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ExtractImagePatches(context, node): + x = context[node.inputs[0]] + sizes = node.attr.get("ksizes") + strides = node.attr.get("strides") + rates = node.attr.get("rates") + padding = node.attr.get("padding") + if x.rank != 4: + raise ValueError("input for ExtractImagePatches should be a 4D tensor.") + if not all([rate == 1 for rate in rates]): + raise NotImplementedError( + "only rates with all 1s is implemented for ExtractImagePatches." + ) + if len(sizes) != 4 or sizes[0] != 1 or sizes[3] != 1: + raise ValueError( + "ExtractImagePatches only supports sizes (4D tensor) with 1s for batch and channel dimensions." + ) + if len(sizes) != 4 or strides[0] != 1 or strides[3] != 1: + raise ValueError( + "ExtractImagePatches only supports strides (4D tensor) with 1s for batch and channel dimensions." + ) + if not padding in ["VALID", "SAME"]: + raise ValueError("non-supported padding for ExtractImagePatches.") + h, w = x.shape[1], x.shape[2] + + # padding for SAME mode + if padding == "SAME": + delta_h = h % strides[1] if h % strides[1] != 0 else strides[1] + delta_w = w % strides[2] if w % strides[2] != 0 else strides[2] + last_h = h - delta_h + 1 + last_w = w - delta_w + 1 + pad_h = max(0, last_h + sizes[1] - 1 - h) + pad_w = max(0, last_w + sizes[2] - 1 - w) + pad_h = [pad_h // 2, pad_h // 2 if pad_h % 2 == 0 else pad_h // 2 + 1] + pad_w = [pad_w // 2, pad_w // 2 if pad_w % 2 == 0 else pad_w // 2 + 1] + pad = _np.array([[0, 0], pad_h, pad_w, [0, 0]]).astype(_np.int32) + pad = pad.reshape(-1) + if not all(pad == 0): + x = mb.pad(x=x, pad=pad, mode="constant", constant_val=0.0) + h, w = x.shape[1], x.shape[2] + + # compute boxes + batch = x.shape[0] + boxes = [] + h_index = list(range(0, h - sizes[1] + 1, strides[1])) + w_index = list(range(0, w - sizes[2] + 1, strides[2])) + for hi in h_index: + for wi in w_index: + boxes.append((hi, wi, hi + sizes[1] - 1, wi + sizes[2] - 1)) + + boxes = _np.array(boxes, dtype=_np.float32) + box_indices = _np.arange(batch) + box_indices = _np.tile(box_indices, (len(boxes), 1)) + box_indices = _np.transpose(box_indices) + box_indices = box_indices.reshape(-1, 1) + boxes = _np.tile(boxes, (batch, 1)) + boxes = _np.concatenate([box_indices, boxes], axis=1) + boxes = boxes.reshape(boxes.shape[0], 1, boxes.shape[1], 1, 1) + + # use crop_and_resize + x = _transpose_NHWC_to_NCHW(x) + x = mb.crop_resize( + x=x, + roi=boxes, + target_height=sizes[1], + target_width=sizes[2], + normalized_coordinates=False, + spatial_scale=1.0, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + x = mb.squeeze(x=x, axes=[1]) + x = _transpose_NCHW_to_NHWC(x, node_name=node.name + "_transpose_to_nhwc") + x = mb.reshape(x=x, shape=(batch, len(h_index), len(w_index), -1), name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Exp(context, node): + x = context[node.inputs[0]] + x = mb.exp(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Floor(context, node): + x = context[node.inputs[0]] + x = mb.floor(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def FloorDiv(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.floor_div(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Greater(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.greater(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def GreaterEqual(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.greater_equal(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Less(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.less(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def LessEqual(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.less_equal(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Log(context, node): + x = context[node.inputs[0]] + x = mb.log(x=x, name=node.name) + context.add(node.name, x) + +@register_tf_op +def Log1p(context, node): + x = context[node.inputs[0]] + x = mb.log(x=x, epsilon=1., name=node.name) + context.add(node.name, x) + +@register_tf_op +def LogicalAnd(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.logical_and(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def LogicalNot(context, node): + x = context[node.inputs[0]] + x = mb.logical_not(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def LogicalOr(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.logical_or(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def LogicalXor(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.logical_xor(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def LRN(context, node): + x = context[node.inputs[0]] + depth_radius = node.attr.get("depth_radius") + size = (depth_radius * 2) + 1 + alpha = node.attr.get("alpha") * size + beta = node.attr.get("beta") + bias = node.attr.get("bias") + x = _transpose_NHWC_to_NCHW(x) + x = mb.local_response_norm(x=x, size=size, alpha=alpha, beta=beta, k=bias) + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def Maximum(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.maximum(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Minimum(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.minimum(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def FloorMod(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + floor = mb.floor_div(x=x, y=y, name=node.name + "_floor_div") + floor_mutiply = mb.mul(x=floor, y=y, name=node.name + "_multiply") + x = mb.sub(x=x, y=floor_mutiply, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Mul(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.mul(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Neg(context, node): + x = context[node.inputs[0]] + x, y = promote_input_dtypes([x, -1]) + x = mb.mul(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def NotEqual(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.not_equal(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Pow(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.pow(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def DepthwiseConv2dNative(context, node): + # [kH, kW, C_in, multiplier] + W_hwim = context[node.inputs[1]] # m = multiplier + # [kH, kW, 1, C_in * multipler] + shape_hw1o = list(W_hwim.shape[:2]) + [1, W_hwim.shape[2] * W_hwim.shape[3]] + W_hw1o = mb.reshape(x=W_hwim, shape=shape_hw1o) + # [C_in * multipler, 1, kH, kW]. Note that C_in * multiplier = C_out in + # MIL. C_in / groups = 1 in depthwise conv. + W_o1hw = mb.transpose(x=W_hw1o, perm=[3, 2, 0, 1]) + data_format = node.attr.get("data_format", "NHWC") + HW_dilations = _conv2d3d_strides_or_dilations( + "dilations", node.attr.get("dilations"), data_format + ) + HW_strides = _conv2d3d_strides_or_dilations( + "strides", node.attr.get("strides"), data_format + ) + + pad_type = node.attr.get("padding") + if pad_type not in ["VALID", "SAME"]: + raise ValueError("Invalid padding type for tf.nn.depthwise_conv2d") + + pad_type = pad_type.lower() + x = context[node.inputs[0]] + C_in = x.shape[-1] + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + # Only the last op should have the same name as node.name + conv_name = node.name + "x" if data_format == "NHWC" else node.name + x = mb.conv( + x=x, + weight=W_o1hw, + pad_type=pad_type, + strides=HW_strides, + dilations=HW_dilations, + groups=C_in, + name=conv_name, + ) + if data_format == "NHWC": + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def FakeQuantWithMinMaxVars(context, node): + w = context[node.inputs[0]] + min = context[node.inputs[1]].val + max = context[node.inputs[2]].val + num_bits = node.attr['num_bits'] + narrow_range = node.attr['narrow_range'] + + min, max = _adjust_min_max(min, max, num_bits) + + if narrow_range: + scale = (max-min) / (2 ** (num_bits) - 2) + bias = min - scale + else: + scale = (max-min) / (2 ** (num_bits) - 1) + bias = min + + w = mb.clip(x=w, alpha=min, beta=max) + w = mb.sub(x=w, y=bias) + x = mb.real_div(x=w, y=scale) + x = mb.round(x=x) + x = mb.mul(x=x, y=scale) + x = mb.add(x=x, y=bias, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Conv2D(context, node): + if "quantize" in node.attr: + quantization_type = "linear" + min = node.attr['quantize_min'] + max = node.attr['quantize_max'] + nbits = node.attr['num_bits'] + narrow_range = node.attr['narrow_range'] + + w = context[node.inputs[1]].sym_val + + min, max = _adjust_min_max(min, max, nbits) + + if narrow_range: + quant_scale = (max - min) / (2 ** (nbits) - 2) + quant_bias = (min-quant_scale) + else: + quant_scale = (max - min) / (2 ** (nbits) - 1) + quant_bias = (min) + + w_clip = _np.clip(w, min, max) + w_round = _np.round((w_clip-quant_bias)/quant_scale) + W_hwio = w_round.astype(_np.uint8) + + if not isinstance(quant_scale, list) and not isinstance(quant_scale, tuple): + quant_bias = [quant_bias] + quant_scale = [quant_scale] + else: + quantization_type = None + nbits = None + quant_scale = None + quant_bias = None + W_hwio = context[node.inputs[1]] + + if quantization_type is not None: + W_oihw = _np.transpose(W_hwio, axes=[3, 2, 0, 1]) + else: + W_oihw = mb.transpose(x=W_hwio, perm=[3, 2, 0, 1]) + + data_format = node.attr.get("data_format", "NHWC") + HW_dilations = _conv2d3d_strides_or_dilations( + "dilations", node.attr.get("dilations"), data_format + ) + HW_strides = _conv2d3d_strides_or_dilations( + "strides", node.attr.get("strides"), data_format + ) + + pad_type = node.attr.get("padding") + pad_type = pad_type.lower() + pad_type = "custom" if pad_type == "explicit" else pad_type + assert pad_type in {"same", "valid", "custom"} + x = context[node.inputs[0]] + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + if pad_type == "custom": + pad_val = node.attr["explicit_paddings"] + pad_val = pad_val[2:-2] + elif data_format == "NCHW" and pad_type == "custom": + pad_val = node.attr["explicit_paddings"] + pad_val = pad_val[4:] + # Only the last op should have the same name as node.name + conv_name = node.name + "x" if data_format == "NHWC" else node.name + + # get the groups from the weighs shape and the input shape + _, in_channel, _, _ = x.shape + _, weight_in_channel, _, _ = W_oihw.shape + if in_channel % weight_in_channel != 0: + raise ValueError("input channel should be divided by the weight channel.") + groups = int(in_channel / weight_in_channel) + + if quantization_type is not None: + x = mb.conv_quantized( + x=x, + weight=W_oihw, + pad_type=pad_type, + strides=HW_strides, + dilations=HW_dilations, + name=conv_name, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + groups=groups, + ) + elif pad_type == "custom": + x = mb.conv( + x=x, + weight=W_oihw, + pad_type=pad_type, + strides=HW_strides, + dilations=HW_dilations, + pad=pad_val, + groups=groups, + name=conv_name, + ) + else: + x = mb.conv( + x=x, + weight=W_oihw, + pad_type=pad_type, + strides=HW_strides, + dilations=HW_dilations, + groups=groups, + name=conv_name, + ) + if data_format == "NHWC": + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def Conv3D(context, node): + W_dhwio = context[node.inputs[1]] + W_oidhw = mb.transpose(x=W_dhwio, perm=[4, 3, 0, 1, 2]) + data_format = node.attr.get("data_format", "NDHWC") + DHW_dilations = _conv2d3d_strides_or_dilations( + "dilations", node.attr.get("dilations"), data_format + ) + DHW_strides = _conv2d3d_strides_or_dilations( + "strides", node.attr.get("strides"), data_format + ) + + pad_type = node.attr.get("padding") + if not isinstance(pad_type, str): + pad_type = "custom" + raise NotImplementedError("Custom padding not implemented for TF") + pad_type = pad_type.lower() + x = context[node.inputs[0]] + if data_format == "NDHWC": + # Convert input to NCDHW + x = _transpose_NDHWC_to_NCDHW(x) + # Only the last op should have the same name as node.name + conv_name = node.name + "x" if data_format == "NDHWC" else node.name + _, in_channel, _, _, _ = x.shape + _, weight_in_channel, _, _, _ = W_oidhw.shape + if in_channel % weight_in_channel != 0: + raise ValueError("input channel should be divided by the weight channel.") + groups = int(in_channel / weight_in_channel) + + x = mb.conv( + x=x, + weight=W_oidhw, + pad_type=pad_type, + strides=DHW_strides, + dilations=DHW_dilations, + groups=groups, + name=conv_name, + ) + if data_format == "NDHWC": + # Convert input back to NDHWC (from NCDHW) + x = _transpose_NCDHW_to_NDHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def Conv3DBackpropInputV2(context, node): + # Output shape: [N, D_out, H_out, W_out, C_out] + output_shape = context[node.inputs[0]].val + # Weight shape: [D, H, W, C_out, C_in] + W_dhwoi = context[node.inputs[1]] + W_iodhw = mb.transpose(x=W_dhwoi, perm=[4, 3, 0, 1, 2]) + # Input shape: [N, D_in, H_in, W_in, C_in] + x = context[node.inputs[2]] + + data_format = node.attr.get("data_format", "NDHWC") + DHW_dilations = _conv2d3d_strides_or_dilations( + "dilations", node.attr.get("dilations"), data_format + ) + DHW_strides = _conv2d3d_strides_or_dilations( + "strides", node.attr.get("strides"), data_format + ) + pad_type = node.attr.get("padding", None) + + if pad_type is None: + raise ValueError("Padding type not specified for op: {}".format(node.name)) + + if not isinstance(pad_type, str): + pad_type = "custom" + raise NotImplementedError("Custom padding not implemented for TF") + pad_type = pad_type.lower() + + if data_format == "NDHWC": + # Convert input to NCDHW + x = _transpose_NDHWC_to_NCDHW(x) + if output_shape is not None: + output_shape = [output_shape[0], output_shape[4], + output_shape[1], output_shape[2], output_shape[3]] + + # Only the last op should have the same name as node.name + conv_name = node.name + "_x" if data_format == "NDHWC" else node.name + # Pass output shape provided above + x = mb.conv_transpose( + x=x, + weight=W_iodhw, + pad_type=pad_type, + strides=DHW_strides, + output_shape=output_shape, + dilations=DHW_dilations, + name=conv_name, + ) + if data_format == "NDHWC": + # Convert input back to NDHWC (from NCDHW) + x = _transpose_NCDHW_to_NDHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def DepthToSpace(context, node): + x = context[node.inputs[0]] + block_size = node.attr.get("block_size") + data_format = node.attr.get("data_format", "NHWC") + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + x = mb.depth_to_space(x=x, block_size=block_size) + x = _transpose_NCHW_to_NHWC(x, node.name) + else: + x = mb.depth_to_space(x=x, block_size=block_size, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def EuclideanNorm(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.reduce_l2_norm(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + +@register_tf_op +def IdentityN(context, node): + res = [mb.identity(x=context[x]) for x in node.inputs] + context.add(node.name, res) + + +@register_tf_op +def ExpandDims(context, node): + x = context[node.inputs[0]] + axis = context[node.inputs[1]] + if axis.op.op_type == "const" and (axis.val is not None and axis.val.size == 1): + axis = axis.val[0] if axis.shape == (1,) else axis.val + else: + raise ValueError("Expand Dims: Invalid value for parameter axis") + x = mb.expand_dims(x=x, axes=[axis], name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["FusedBatchNormV2", "FusedBatchNormV3"]) +def FusedBatchNorm(context, node): + # Get attributes + data_format = node.attr.get("data_format", "NHWC") + epsilon = node.attr.get("epsilon", None) + + # Get inputs + x = context[node.inputs[0]] + scale = context[node.inputs[1]] + offset = context[node.inputs[2]] + mean = context[node.inputs[3]] + variance = context[node.inputs[4]] + if data_format == "NHWC": + # TF's FusedBatchNorm is only for 4D inputs + x = _transpose_NHWC_to_NCHW(x) + x = mb.batch_norm( + x=x, mean=mean, variance=variance, gamma=scale, beta=offset, epsilon=epsilon + ) + x = _transpose_NCHW_to_NHWC(x, node.name + ":0") + else: + x = mb.batch_norm( + x=x, + mean=mean, + variance=variance, + gamma=scale, + beta=offset, + epsilon=epsilon, + name=node.name + ":0", + ) + # Inference only batch norm does not have meaningful outputs for + # batch_mean, batch_variance etc. + context.add(node.name, [x, mean, variance]) + + +@register_tf_op +def Fill(context, node): + shape = context[node.inputs[0]] + value = context[node.inputs[1]] + x = mb.fill(shape=shape, value=value, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["ImageProjectiveTransformV3"]) +def ImageProjectiveTransformV2(context, node): + # Data shape format: [batch, height, width, channels] + x = context[node.inputs[0]] + # Transforms shape format: [batch, 8] or [1, 8] matrix, [a0, a1, a2, b0, b1, b2, c0, c1] + transforms = context[node.inputs[1]] + # 1-D Tensor [new_height, new_width] + output_shape = context[node.inputs[2]] + + # For V3, there is an additional fill_value input + if len(node.inputs) == 4: + fill_value = context[node.inputs[3]].val + if fill_value != 0.0: + msg = ("fill_value {} not supported for tf ImageProjectiveTransformV2/V3 op {}. " + "Only fill_value = 0.0 is supported.").format(fill_value, node.name) + raise ValueError(msg) + + interpolation = node.attr.get("interpolation") + if interpolation != "BILINEAR": + msg = ("interpolation {} not supported for tf ImageProjectiveTransformV2/V3 op {}. " + "Only interpolation = BILINEAR is supported.").format(interpolation, node.name) + raise ValueError(msg) + + fill_mode = node.attr.get("fill_mode") + if fill_mode != "CONSTANT": + msg = ("fill_mode {} not supported for tf ImageProjectiveTransformV2/V3 op {}. " + "Only fill_mode = CONSTANT is supported.").format(fill_mode, node.name) + raise ValueError(msg) + + h_out = output_shape.val[0] + w_out = output_shape.val[1] + h_in = x.shape[1] + w_in = x.shape[2] + + # Don't allow non-zero c0 or c1, check for each batch + n_batch = transforms.val.shape[0] + transform_matrix = [] + for batch in range(n_batch): + c0 = transforms.val[batch][6] + c1 = transforms.val[batch][7] + if not (c0 == c1 == 0.0): + raise NotImplementedError( + "'affine' op with 'transforms' contains non-zero " + + "c0 or c1 is not supported, Got: {}".format( + transforms + ) + ) + # In the tensorflow affine transform function, the coordinate is in the original image size range, + # i.e., for the input image, x is in range [0, W_in), and y is in range [0, H_in) + # For the output image, x is in range [0, W_out), and y is in range [0, H_out) + # However, the MIL affine op is in the normalized coordinate, in which x and y are both in range [-1, 1] + # So we need to update the affine transformation matrix. + # We have the following four equations: + # (1) x_original_in = (2 * x_normalized_in + 1) * (W_in - 1) + # (2) y_original_in = (2 * y_normalized_in + 1) * (H_in - 1) + # (3) x_original_out = (2 * x_normalized_out + 1) * (W_out - 1) + # (4) y_original_out = (2 * y_normalized_out + 1) * (H_out - 1) + # The original transforms matrix is in the original coordinate: + # (i) x_original_in = a * x_original_out + b * y_original_out + c + # (ii) y_original_in = d * x_original_out + e * y_original_out + f + # After plugging (1) - (4) into (i) (ii), we could have the new transformation matrix in the normalized coordinate + a, b, c, d, e, f = transforms.val[batch].tolist()[:6] + new_a = a * (w_out - 1) / (w_in - 1) + new_b = b * (h_out - 1) / (w_in - 1) + new_c = (2 * c + a * (w_out - 1) + b * (h_out - 1)) / (w_in - 1) - 1 + new_d = d * (w_out - 1) / (h_in - 1) + new_e = e * (h_out - 1) / (h_in - 1) + new_f = (2 * f + d * (w_out - 1) + e * (h_out - 1)) / (h_in - 1) - 1 + transform_matrix.append([new_a, new_b, new_c, new_d, new_e, new_f]) + + transform_matrix = _np.array(transform_matrix) + + x = _transpose_NHWC_to_NCHW(x) + x = mb.affine( + x=x, + transform_matrix=transform_matrix, + output_height=output_shape.val[0], + output_width=output_shape.val[1], + sampling_mode="bilinear", + padding_mode="constant", + padding_value=0.0, + coordinates_mode="normalized_minus_one_to_one", + align_corners=True, + name=node.name + "_affine", + ) + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["DivNoNan"]) +def RealDiv(context, node): + x = mb.cast(x=context[node.inputs[0]], dtype="fp32") + y = mb.cast(x=context[node.inputs[1]], dtype="fp32") + x = mb.real_div(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["Addons>Resampler"]) +def Resampler(context, node): + # Data shape format: (Batch, Hin, Win, C) + x = context[node.inputs[0]] + # Warp shape format: (Batch, Hout, Wout, 2) + warp = context[node.inputs[1]] + + # Handle rank-3 warp tensor + is_rank3_warp = warp.rank == 3 + if is_rank3_warp: # expand spatial dimension + warp = mb.expand_dims(x=warp, axes=[1], name=warp.name + "_expand_dims") + + x = _transpose_NHWC_to_NCHW(x) + x = mb.resample( + x=x, + coordinates=warp, + sampling_mode="bilinear", + padding_mode="constant", + padding_value=0.0, + coordinates_mode="unnormalized", + align_corners=True, + name=node.name + "_resample", + ) + x = _transpose_NCHW_to_NHWC( + x, node.name + "_transpose" if is_rank3_warp else node.name + ) + if is_rank3_warp: # squeeze spatial dimension + x = mb.squeeze(x=x, axes=[1], name=node.name) + + context.add(node.name, x) + + +@register_tf_op +def Rsqrt(context, node): + x = context[node.inputs[0]] + x = mb.rsqrt(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Sub(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.sub(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def StopGradient(context, node): + Identity(context, node) + + +@register_tf_op +def Identity(context, node): + x = context[node.inputs[0]] + # In many cases we can skip and just make downstream ops reference the + # pre-identity op. However, when identity is an output or pre-identity + # is a placeholder, an identity op, or mb.mul(x, 1.0) is required. + if len(node.outputs) != 0 or x.op is not None: + context.add(node.name, x, is_new_var=False) + else: + x = mb.mul(x=x, y=1.0, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Print(context, node): + Identity(context, node) + + +@register_tf_op +def Placeholder(context, node): + # no-op as we add Placeholder separately. + pass + + +def _pool_pads_or_strides(tf_spec, data_format, d_rank): + if tf_spec is None: + d_spec = [1] * d_rank + elif not isinstance(tf_spec, list): + d_spec = [tf_spec] * d_rank + elif len(tf_spec) == 2: + d_spec = tf_spec + elif len(tf_spec) == 4: + if data_format == "NHWC": + d_spec = tf_spec[1:3] + else: + d_spec = tf_spec[2:] + elif len(tf_spec) == 5: + if data_format == "NDHWC": + d_spec = tf_spec[1:4] + else: + # NCDHW + d_spec = tf_spec[2:] + else: + raise ValueError("Unsupported tf_spec: %s" % tf_spec) + return d_spec + + +@register_tf_op(tf_alias=["BatchMatMul", "BatchMatMulV2"]) +def MatMul(context, node): + a = context[node.inputs[0]] + b = context[node.inputs[1]] + transpose_a = node.attr.get("adj_x", False) or node.attr.get("transpose_a", False) + transpose_b = node.attr.get("adj_y", False) or node.attr.get("transpose_b", False) + a, b = promote_input_dtypes([a, b]) + x = mb.matmul( + x=a, y=b, transpose_x=transpose_a, transpose_y=transpose_b, name=node.name + ) + context.add(node.name, x) + + +@register_tf_op +def MaxPool(context, node): + x = context[node.inputs[0]] + in_shape = x.sym_type.get_shape() + d_rank = len(in_shape) - 2 + data_format = node.attr.get("data_format", "NHWC") + ksize = node.attr.get("ksize", None) + kernel_sizes = _pool_pads_or_strides(ksize, data_format, d_rank) + strides = node.attr.get("strides", None) + if strides is not None: + strides = _pool_pads_or_strides(strides, data_format, d_rank) + pad_type = node.attr["padding"].lower() + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + x = mb.max_pool( + x=x, kernel_sizes=kernel_sizes, strides=strides, pad_type=pad_type + ) + x = _transpose_NCHW_to_NHWC(x, node.name) + else: + x = mb.max_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + name=node.name, + ) + context.add(node.name, x) + + +@register_tf_op +def MaxPool3D(context, node): + x = context[node.inputs[0]] + d_rank = x.rank - 2 + data_format = node.attr.get("data_format", "NDHWC") + ksize = node.attr.get("ksize", None) + kernel_sizes = _pool_pads_or_strides(ksize, data_format, d_rank) + strides = node.attr.get("strides", None) + if strides is not None: + strides = _pool_pads_or_strides(strides, data_format, d_rank) + pad_type = node.attr["padding"].lower() + if data_format == "NDHWC": + x = _transpose_NDHWC_to_NCDHW(x) + x = mb.max_pool( + x=x, kernel_sizes=kernel_sizes, strides=strides, pad_type=pad_type + ) + x = _transpose_NCDHW_to_NDHWC(x, node.name) + else: + x = mb.max_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + name=node.name, + ) + + context.add(node.name, x) + + +@register_tf_op +def MatrixBandPart(context, node): + x = context[node.inputs[0]] + lower = context[node.inputs[1]] + upper = context[node.inputs[2]] + x = mb.band_part(x=x, lower=lower, upper=upper, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Max(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.reduce_max(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Min(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.reduce_min(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Prod(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.reduce_prod(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Cast(context, node): + type_map = { + types.fp16: "fp16", + types.float: "fp32", + types.double: "fp32", + types.int32: "int32", + types.int64: "int32", + } + if node.attr["DstT"] not in type_map.keys(): + raise NotImplementedError( + "Cast: Provided destination type {} not " + "supported.".format(types.get_type_info(node.attr["DstT"])) + ) + x = context[node.inputs[0]] + dtype = type_map[node.attr["DstT"]] + x = mb.cast(x=x, dtype=dtype, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Round(context, node): + x = context[node.inputs[0]] + x = mb.round(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Sign(context, node): + x = context[node.inputs[0]] + x = mb.sign(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Sin(context, node): + x = context[node.inputs[0]] + x = mb.sin(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Sinh(context, node): + x = context[node.inputs[0]] + x = mb.sinh(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Slice(context, node): + x = context[node.inputs[0]] + begin = context[node.inputs[1]] + size = context[node.inputs[2]] + res = mb.slice_by_size(x=x, begin=begin, size=size, name=node.name) + context.add(node.name, res) + + +@register_tf_op +def Sqrt(context, node): + x = context[node.inputs[0]] + x = mb.sqrt(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Square(context, node): + x = context[node.inputs[0]] + x = mb.mul(x=x, y=x, name=node.name) + context.add(node.name, x) + + +def _softmax_cross_entropy_with_logits(feats, labels, name): + # compute the log softmax + y = mb.reduce_log_sum_exp(x=feats, axes=[-1], keep_dims=True) + log_softmax = mb.sub(x=feats, y=y) + loss = mb.mul(x=labels, y=log_softmax) + loss = mb.mul(x=loss, y=-1.) + loss = mb.reduce_sum(x=loss, axes=[-1], name=name) + return loss + + +@register_tf_op +def SparseSoftmaxCrossEntropyWithLogits(context, node): + feats = context[node.inputs[0]] + labels = context[node.inputs[1]] + class_nums = feats.shape[1] + labels = mb.one_hot( + indices=labels, + one_hot_vector_size=class_nums, + ) + labels = mb.cast(x=labels, dtype="fp32") + loss = _softmax_cross_entropy_with_logits(feats, labels, node.name) + context.add(node.name, loss) + + +@register_tf_op +def SoftmaxCrossEntropyWithLogits(context, node): + feats = context[node.inputs[0]] + labels = context[node.inputs[1]] + loss = _softmax_cross_entropy_with_logits(feats, labels, node.name) + context.add(node.name, loss) + + +@register_tf_op +def StridedSlice(context, node): + x = context[node.inputs[0]] + begin = context[node.inputs[1]] + end = context[node.inputs[2]] + stride = context[node.inputs[3]] + + def bitmask_to_array(bit): + if bit < 0: + arr = _np.binary_repr(bit, width=8)[::-1] + arr = [bool(int(x)) for x in list(arr)] + if node.attr.get("ellipsis_mask", 0) != 0: + # In case of non-zero ellipsis_mask, we compute the output rank to be the + # max rank of all the masks. This doesn't work if we computed a mask of constant + # width 8 here (since the max rank is then taken to be 8 wrongly). + raise ValueError("Cannot figure out slice rank with negative mask values and " \ + "non-zero ellipsis_mask") + else: + # This method prevents unnecessary padding of the bitmask when it is not negative. + # It can be padded with any extra False values later, based on output rank. + arr = [] + while bit > 0: + if bit & 1: + arr.append(True) + else: + arr.append(False) + bit >>= 1 + + return arr + + begin_mask = bitmask_to_array(node.attr.get("begin_mask", 0)) + end_mask = bitmask_to_array(node.attr.get("end_mask", 0)) + squeeze_mask = bitmask_to_array(node.attr.get("shrink_axis_mask", 0)) + ellipsis_mask = bitmask_to_array(node.attr.get("ellipsis_mask", 0)) + new_axis_mask = bitmask_to_array(node.attr.get("new_axis_mask", 0)) + + def _pad_mask( + x, + begin, + end, + stride, + begin_mask, + end_mask, + squeeze_mask, + ellipsis_mask, + new_axis_mask, + ): + # This function pad the masks, stride, begin and end to the same rank as the input tensor. + if begin.rank != 1: + raise ValueError( + "begin should be 1-D tensor, got {}-D tensor instead".format(begin.rank) + ) + if end.rank != 1: + raise ValueError( + "end should be 1-D tensor, got {}-D tensor instead".format(end.rank) + ) + + # check if inputs can be determined + begin_cache = begin + end_cache = end + begin = [] if begin.val is None else begin.val.tolist() + end = [] if end.val is None else end.val.tolist() + stride = [] if stride is None else stride.val.tolist() + + # pad masks function + new_dims = sum(i is True for i in new_axis_mask) + if new_dims > 0: + x_rank = x.rank + new_dims + else: + x_rank = x.rank + + def pad_array(arr, max_rank, idx, default_value): + """ + This function pads the arr to x_rank with default_value. + idx is the index where ellipis_mask = True. + max_rank is the maximum rank of the masks, stride, begin and end. + """ + mask = arr[:] + mask += [default_value] * (x_rank - len(mask)) + new_mask = [] + + for i in range(max_rank): + num = 1 if i != idx else x_rank - max_rank + 1 + new_mask += [mask[i]] * num + return new_mask + + mask_list = [ + begin_mask, + end_mask, + squeeze_mask, + ellipsis_mask, + new_axis_mask, + stride, + begin, + end, + ] + max_rank = max([len(arr) for arr in mask_list]) + + # If ellipsis_mask is given, the last element of it would be True + # Otherwise, we simply pad each mask by appending default value + if ellipsis_mask != []: + rank = max_rank + idx = len(ellipsis_mask) - 1 + else: + rank = x_rank + idx = -1 + + begin_mask = pad_array(begin_mask, rank, idx, False) + end_mask = pad_array(end_mask, rank, idx, False) + squeeze_mask = pad_array(squeeze_mask, rank, idx, False) + ellipsis_mask = pad_array(ellipsis_mask, rank, idx, False) + new_axis_mask = pad_array(new_axis_mask, rank, idx, False) + stride = pad_array(stride, rank, idx, 1) + + # pad begin and end if they are determined during compile time + if begin != []: + begin = pad_array(begin, rank, idx, 0) + if end != []: + end = pad_array(end, rank, idx, 0) + + # make sure begin_mask, end_mask, and stride are consistent with ellipsis mask + # begin_mask and end_mask should be True, and stride should be 1. + for i, mask in enumerate(ellipsis_mask): + if mask: + begin_mask[i] = True + end_mask[i] = True + stride[i] = 1 + + # make sure begin_mask, end_mask, and stride are consistent with new axis mask + # begin_mask and end_mask should be True, and stride should be 1. + for i, mask in enumerate(new_axis_mask): + if mask: + begin_mask[i] = True + end_mask[i] = True + stride[i] = 1 + + # convert begin and end back to cache value if they are run-time determined + if begin == []: + begin = begin_cache + + if end == []: + end = end_cache + + # check which mask is adding by our default value + # This happens when the given index is less than the tensor rank, + # for instance, indexing a 3D tensor A with A[:1, :1] is equivalent to + # A[:1, :1, :]. In this case we should append True to begin_mask and end_mask + if ellipsis_mask == [False] * x_rank: + for i in range(max_rank, x_rank): + begin_mask[i] = True + end_mask[i] = True + + return begin, end, stride, begin_mask, end_mask, squeeze_mask, new_axis_mask + + begin, end, stride, begin_mask, end_mask, squeeze_mask, new_axis_mask = _pad_mask( + x, + begin, + end, + stride, + begin_mask, + end_mask, + squeeze_mask, + ellipsis_mask, + new_axis_mask, + ) + + if sum(i is True for i in new_axis_mask) > 0: + axes = [i for i, val in enumerate(new_axis_mask) if val is True] + x = mb.expand_dims(x=x, axes=axes, name=node.name + "_new_axes") + + x = mb.slice_by_index( + x=x, + name=node.name, + begin=begin, + end=end, + stride=stride, + begin_mask=begin_mask, + end_mask=end_mask, + squeeze_mask=squeeze_mask, + ) + + context.add(node.name, x) + + +@register_tf_op +def Sum(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + input_type = x.sym_type + if _is_scalar(input_type): + context.add(node.name, x, is_new_var=False) + else: + x = mb.reduce_sum(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Tan(context, node): + x = context[node.inputs[0]] + x = mb.tan(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def get_tuple(context, node): + x = context[node.inputs[0]] + if not isinstance(x, (list, tuple)): + # In some rare cases, the upstream op produces a single output + x = [x] + idx = node.attr["index"] + if idx >= len(x): + msg = "Index {} out of range, op '{}' only has {} outputs: {}" + raise IndexError(msg.format(idx, node.inputs[0], len(x), [v.name for v in x])) + context.add(node.name, x[idx], is_new_var=False) + + +@register_tf_op +def Mean(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.reduce_mean(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def MatrixDiag(context, node): + x = context[node.inputs[0]] + if x.rank != 1: + raise NotImplementedError('Only support MatrixDiag op with input rank = 1.') + length = mb.shape(x=x) + x = mb.expand_dims(x=x, axes=[0]) + reps = mb.concat(values=[length, [1]], axis=0) + x = mb.tile(x=x, reps=reps) + x = mb.band_part(x=x, lower=0, upper=0, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def MirrorPad(context, node): + x = context[node.inputs[0]] + pad = context[node.inputs[1]] + constant_val = node.attr.get("constant_val", 0.0) + + if pad is None: + raise ValueError("TF `paddings` in Pad op must be const.") + + mode = node.attr.get("mode", "reflect").lower() + if mode == "symmetric": + mode = "reflect" + in_rank = len(x.sym_type.get_shape()) + + if in_rank > 5 or in_rank < 2: + raise ValueError( + "Unsupported Pad configuration with input rank {}!".format(str(in_rank)) + ) + + if pad.val.shape != (in_rank, 2): + raise ValueError("Padding must have length as input tensor rank.") + + pad = pad.val + + # get axix which is non zero + non_zero_axis = [] + for i in range(len(pad)): + if not all(pad[i] == 0): + non_zero_axis.append(i) + + if len(non_zero_axis) > 2: + raise ValueError("Unsupported configuration for Pad layer!") + + # make padding a 2 x 2 tensor if len(non_zero_axis) < 2 + if len(non_zero_axis) == 0: + non_zero_axis = [0, 1] + + if len(non_zero_axis) == 1: + if non_zero_axis[0] != len(pad) - 1: + non_zero_axis.append(len(pad) - 1) + else: + non_zero_axis = [0, non_zero_axis[0]] + + # transpose the input such that the padding dim is the last two + perm = [i for i in range(in_rank) if i not in non_zero_axis] + non_zero_axis + x = mb.transpose(x=x, perm=perm, name=node.name + "_transpose_1") + pad = pad[non_zero_axis, :] + pad = pad.reshape(-1) + x = mb.pad( + x=x, pad=pad, name=node.name + "_pad", constant_val=constant_val, mode=mode + ) + inverse_perm = [-1] * len(perm) + for i, index in enumerate(perm): + inverse_perm[index] = i + x = mb.transpose(x=x, perm=inverse_perm, name=node.name) + + context.add(node.name, x) + + +@register_tf_op +def Pad(context, node): + x = context[node.inputs[0]] + pad = context[node.inputs[1]] + input_dtype = x.dtype + + mode = node.attr.get("mode", "constant").lower() + if mode == "symmetric": + mode = "reflect" + constant_val = node.attr.get("constant_val", 0.0) + constant_val = mb.const(val=constant_val) + in_rank = len(x.sym_type.get_shape()) + + if in_rank > 5: + raise ValueError("Unsupported Pad configuration!") + + if pad.val is None: + pad = mb.reshape(x=pad, shape=[-1]) + else: + pad = pad.val.reshape(-1) + + x = mb.cast(x=x, dtype=builtin_to_string(constant_val.dtype)) + x = mb.pad(x=x, pad=pad, mode=mode, constant_val=constant_val) + x = mb.cast(x=x, dtype=builtin_to_string(input_dtype), name=node.name) + + context.add(node.name, x) + + +@register_tf_op +def PadV2(context, node): + # compared to tf.raw_ops.Pad, tf.raw_ops.PadV2 allow constant values rather than 0. + x = context[node.inputs[0]] + pad = context[node.inputs[1]] + constant_val = context[node.inputs[2]] + + if constant_val.shape != (): + raise NotImplementedError( + "TF `constant_values` in PadV2 op must be const scalar." + ) + in_rank = x.rank + if in_rank > 5: + raise ValueError("Unsupported Pad configuration!") + + if pad.val is None: + pad = mb.reshape(x=pad, shape=[-1]) + else: + pad = pad.val.reshape(-1) + + constant_val = constant_val.val + if constant_val == -_np.inf: + INT_MIN = -_np.iinfo(_np.int64).max - 1 + constant_val = float(INT_MIN) + + if constant_val == _np.inf: + INT_MAX = _np.iinfo(_np.int64).max + constant_val = float(INT_MAX) + + x = mb.pad(x=x, pad=pad, name=node.name, mode="constant", constant_val=constant_val) + context.add(node.name, x) + + +@register_tf_op +def Relu(context, node): + x = context[node.inputs[0]] + x = mb.relu(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Reciprocal(context, node): + x = context[node.inputs[0]] + x = mb.inverse(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Relu6(context, node): + x = context[node.inputs[0]] + x = mb.relu6(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Reshape(context, node): + x = context[node.inputs[0]] + new_shape = context[node.inputs[1]] + x = mb.reshape(x=x, shape=new_shape, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["ReverseV2"]) +def Reverse(context, node): + x = context[node.inputs[0]] + axes = context[node.inputs[1]] + x = mb.reverse(x=x, axes=axes, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ReverseSequence(context, node): + x = context[node.inputs[0]] + lengths = context[node.inputs[1]] + seq_axis = node.attr.get("seq_dim") + batch_axis = node.attr.get("batch_dim") + x = mb.reverse_sequence( + x=x, lengths=lengths, seq_axis=seq_axis, batch_axis=batch_axis, name=node.name + ) + context.add(node.name, x) + + +@register_tf_op +def Transpose(context, node): + x = context[node.inputs[0]] + perm = context[node.inputs[1]] + x = mb.transpose(x=x, perm=perm, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Squeeze(context, node): + x = context[node.inputs[0]] + axes = node.attr.get("squeeze_dims", []) + if axes == []: + axes = None + x = mb.squeeze(x=x, axes=axes, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Multinomial(context, node): + x = context[node.inputs[0]] + size = context[node.inputs[1]] + x = mb.random_categorical(x=x, size=size, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["Elu"]) +def ELU(context, node): + x = context[node.inputs[0]] + x = mb.elu(x=x, alpha=1.0, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["Erf"]) +def ERF(context, node): + x = context[node.inputs[0]] + x = mb.erf(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["LeakyRelu"]) +def LeakyReLU(context, node): + x = context[node.inputs[0]] + alpha = node.attr["alpha"] + x = mb.leaky_relu(x=x, alpha=alpha, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Selu(context, node): + x = context[node.inputs[0]] + x = mb.elu(x=x, alpha=1.6732632423543772) + x = mb.mul(x=x, y=1.0507009873554805, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["SelectV2"]) +def Select(context, node): + cond = context[node.inputs[0]] + a = context[node.inputs[1]] + b = context[node.inputs[2]] + + # broadcast vector type cond + rank_cond = cond.rank + rank_a = a.rank + if rank_cond == 1 and rank_a > 1: + axes = [-i - 1 for i in range(rank_a - rank_cond)] + cond = mb.expand_dims(x=cond, axes=axes) + + if not types.is_bool(cond.dtype): + # cond must be bool type + cond = mb.cast(x=cond, dtype="bool") + + x = mb.select(cond=cond, a=a, b=b, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Sigmoid(context, node): + x = context[node.inputs[0]] + x = mb.sigmoid(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Softplus(context, node): + x = context[node.inputs[0]] + x = mb.softplus(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Softsign(context, node): + x = context[node.inputs[0]] + x = mb.softsign(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Softmax(context, node): + logit = context[node.inputs[0]] + axis = node.attr.get("axis") + x = mb.softmax(x=logit, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def SpaceToBatchND(context, node): + # In tensorflow, the input tensor has the shape of (batch,) + spatial_shape + remaining_shape. + # The shape is treated as a combination of 3 components: + # 1. A single batch dimension + # 2. Spatial dimensions, with a length spatial_rank, which could be neither 1 or 2. Also, spatial_rank + # is equal to the length of block_shape + # 3. Remaining dimensions, with a length remaining_rank + + # The logic of translating this op is as followed: + # 1. We first reshape the input to a canonical shape (rolling the remaining shape dimensions into a + # single dimension): (batch,) + spatial_shape + (R), where R = remaining_dim_1 * ... * remaining_dim_n + # 2. We support rank 1 and rank 2 spatial shape: + # (i) rank 1: We decompose the SpaceToBatch into small basic ops. + # (ii) rank 2: We directly use the built in space_to_batch op. + # The output would have shape (batch_new,) + spatial_shape_new + (R) + # 3. We transform the tensor back, by unrolling the remaining shape: (B_new,) + spatial_shape_new + remaining_shape + + x = context[node.inputs[0]] + block_shape = context[node.inputs[1]].val + paddings = context[node.inputs[2]].val + original_shape = mb.shape(x=x) + + input_rank = x.rank + spatial_rank = len(block_shape) + remaining_rank = x.rank - 1 - spatial_rank + has_non_unity_remaining_dims = remaining_rank != 1 + + if block_shape is None or paddings is None: + raise NotImplementedError( + "Not support dynamic block_shape and paddings for SpaceToBatchND!" + ) + + if has_non_unity_remaining_dims: + # Reshape the input tensor to shape [batch, spatial_shape, remaining_dim_1 * ... * remaining_dim_N] + x = _reshape_remaining_dimensions_to_canonical_shape(x, remaining_rank) + + if spatial_rank >= 3: + raise NotImplementedError("Rank of spatial shape > 2 is not supported.") + + if spatial_rank == 2: + # Tensor has shape [B, H, W, C], we can directly use the space_to_batch op by doing + # [B, H, W, C] -> transpose -> [B, C, H, W] -> space_to_batch -> [B_new, C, H_new, W_new] -> + # transpose -> [B_new, H_new, W_new, C] + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + if tuple(paddings[0]) != (0, 0) or tuple(paddings[1]) != (0, 0): + x = mb.pad(x=x, pad=paddings.flatten(), mode="constant") + x = mb.space_to_batch(x=x, block_shape=block_shape, paddings=_np.zeros((2, 2), _np.int32)) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + + if spatial_rank == 1: + # In this case, we decompose space_to_batch into small basic ops + # [B, H, C] -> decomposite ops -> [B_new, H_new, C] + + # expand padding to shape [3, 2] + new_paddings = _np.zeros(shape=(3, 2), dtype=_np.int32) + new_paddings[1] = paddings + paddings = new_paddings + needs_paddings = any(paddings.flatten()) + if needs_paddings: + padded = mb.pad(x=x, pad=paddings.flatten(), mode="constant") + else: + padded = x + + # padded_shape = [B, H_padded, C] + padded_shape = mb.shape(x=padded) + + # reshape to [B, H_padded/block_shape, block_shape, C] + block_shape = block_shape[0] + batch_size = _value_at(padded_shape, 0) + spatial_dim = mb.real_div(x=_value_at(padded_shape, 1), y=block_shape) + spatial_dim = mb.cast(x=spatial_dim, dtype="int32") + remain_dim = _value_at(padded_shape, 2) + reshape_shape = mb.concat(values=[batch_size, spatial_dim, block_shape, remain_dim], axis=0) + reshaped_padded = mb.reshape(x=padded, shape=reshape_shape) + + # permute the shape to: [block_shape, B, H_padded/block_shape, C] + permuted_reshaped_padded = mb.transpose(x=reshaped_padded, perm=[2, 0, 1, 3]) + + # reshape the tensor to [block_shape * B, H_padded/block_shape, C] + final_reshape_values = [mb.mul(x=batch_size, y=block_shape), spatial_dim, remain_dim] + final_shape = mb.concat(values=final_reshape_values, axis=0) + x = mb.reshape(x=permuted_reshaped_padded, shape=final_shape) + + if has_non_unity_remaining_dims: + # Reshape the tensor from shape [batch_new, spatial_shape_new, remaining_dim_1 * ... * remaining_dim_N] back to + # shape [batch_new, spatial_shape_new, remaining_shape] + x = _reshape_remaining_dimension_to_original_shape(x, original_shape, remaining_rank) + + context.add(node.name, mb.identity(x=x, name=node.name)) + + +@register_tf_op +def SpaceToDepth(context, node): + x = context[node.inputs[0]] + block_size = node.attr.get("block_size") + data_format = node.attr.get("data_format", "NHWC") + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + x = mb.space_to_depth(x=x, block_size=block_size) + x = _transpose_NCHW_to_NHWC(x, node.name) + else: + x = mb.space_to_depth(x=x, block_size=block_size, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Tanh(context, node): + x = context[node.inputs[0]] + x = mb.tanh(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["TopKV2"]) +def TopK(context, node): + x = context[node.inputs[0]] + k = context[node.inputs[1]].val + sort = node.attr["sorted"] + + kwargs = { + "x": x, + "k": k, + "axis": -1, + "name": node.name + } + + if is_current_opset_version_compatible_with(target.iOS16): + kwargs["sort"] = sort + elif not sort: + raise ValueError("For opset <= iOS16, only sorted=True supported for the topk") + + context.add(node.name, mb.topk(**kwargs)) + +@register_tf_op(tf_alias=["InTopKV2"]) +def InTopK(context, node): + x = context[node.inputs[0]] + target = context[node.inputs[1]] + k = context[node.inputs[2]].val + + _, class_num = x.shape + if not is_symbolic(class_num): + k = min(k, class_num) + + _, indices = mb.topk(x=x, k=k, axis=-1) + target = mb.expand_dims(x=target, axes=[-1]) + x = mb.equal(x=target, y=indices) + x = mb.cast(x=x, dtype="fp32") + x = mb.reduce_sum(x=x, axes=[-1], keep_dims=False) + x = mb.cast(x=x, dtype="bool", name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Cumsum(context, node): + x = context[node.inputs[0]] + axis = context[node.inputs[1]] + exclusive = node.attr.get("exclusive", False) + reverse = node.attr.get("reverse", False) + x = mb.cumsum(x=x, axis=axis, exclusive=exclusive, reverse=reverse, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Gather(context, node): + x = context[node.inputs[0]] + indices = context[node.inputs[1]] + axis = 0 + x = mb.gather(x=x, indices=indices, axis=axis, name=node.name) + context.add(node.name, x) + +def _perform_gather_with_batch_dims(x, indices, batch_dims, gather_func, func_args, name): + """ + An utility function to compute gather and gather_nd with batch_dims + """ + # (Step 1) + # Reshape x, indices with shape + # x: [batch_1, ..., batch_n, *remaining_x_shape] + # indices: [batch_1, ..., batch_n, *remaing_indices_shape] + # into shape + # x_reshape: [prod(batch_1, ..., batch_n), *remaning_x_shape] + # indices_reshape: [prod(batch_1, ..., batch_n), *remaning_indices_shape] + msg = ("The implementation of gather/gather_nd for iOS15 and older is not efficient. Highly recommend " + " set minimum_deployment_target=coremltools.target.iOS16 in the coremltools.convert() function." + ) + logger.warning(msg) + x_shape = mb.shape(x=x) + indices_shape = mb.shape(x=indices) + batch_shape = mb.gather(x=x_shape, indices=_np.array(range(batch_dims)), axis=0) + batch_prod = mb.reduce_prod(x=batch_shape, axes=[0], keep_dims=True) + x_remaining_shape = mb.gather(x=x_shape, indices=_np.array(range(batch_dims, x.rank)), axis=0) + indices_remaining_shape = mb.gather(x=indices_shape, indices=_np.array(range(batch_dims, indices.rank)), axis=0) + new_x_shape = mb.concat(values=[batch_prod, x_remaining_shape], axis=0) + new_indices_shape = mb.concat(values=[batch_prod, indices_remaining_shape], axis=0) + x_reshape = mb.reshape(x=x, shape=new_x_shape) + indices_reshape = mb.reshape(x=indices, shape=new_indices_shape) + + # (Step 2) + # We iterate through the batch dimension, and compute the gather individually for each batch + # All results are stacked into a tensor with shape [prod(batch_1, ..., batch_n), *remaning_result_shape] + res = [] + if batch_prod.val is None: + raise ValueError("batch dimenstion must be known at compile time") + for i in range(batch_prod.val[0]): + temp_x = mb.gather(x=x_reshape, indices=[i], axis=0) + temp_indices = mb.gather(x=indices_reshape, indices=[i], axis=0) + temp_x = mb.squeeze(x=temp_x, axes=[0]) + temp_indices = mb.squeeze(x=temp_indices, axes=[0]) + func_args.update({"x": temp_x, "indices": temp_indices}) + temp = gather_func(**func_args) + res.append(temp) + res = mb.stack(values=res, axis=0) + + # (Step 3) + # Finally, we reshape the result to shape [batch_1, ..., batch_n, *remaining_result_shape] + res_shape = mb.shape(x=res) + res_remaning_shape = mb.gather(x=res_shape, indices=_np.array(range(1, res_shape.shape[0])), axis=0) + res_new_shape = mb.concat(values=[batch_shape, res_remaning_shape], axis=0) + return mb.reshape(x=res, shape=res_new_shape, name=name) + + +@register_tf_op +def GatherV2(context, node): + x = context[node.inputs[0]] + indices = context[node.inputs[1]] + axis = context[node.inputs[2]].val + batch_dims = node.attr.get("batch_dims", 0) + if is_current_opset_version_compatible_with(target.iOS16): + # For iOS16 and above, we can directly use the batch_dims argument + x = mb.gather(x=x, indices=indices, axis=axis, batch_dims=batch_dims, name=node.name) + else: + # For iOS15 or below, we have to manually compute it + if batch_dims == 0: + x = mb.gather(x=x, indices=indices, axis=axis, name=node.name) + else: + func_args = {"axis": axis - batch_dims} + x = _perform_gather_with_batch_dims(x, indices, batch_dims, mb.gather, func_args, node.name) + + context.add(node.name, x) + + +@register_tf_op +def GatherNd(context, node): + x = context[node.inputs[0]] + indices = context[node.inputs[1]] + batch_dims = node.attr.get("batch_dims", 0) + if is_current_opset_version_compatible_with(target.iOS16): + # For iOS16 and above, we can directly use the batch_dims argument + x = mb.gather_nd(x=x, indices=indices, batch_dims=batch_dims, name=node.name) + else: + if batch_dims == 0: + x = mb.gather_nd(x=x, indices=indices, name=node.name) + else: + x = _perform_gather_with_batch_dims(x, indices, batch_dims, mb.gather_nd, {}, node.name) + + context.add(node.name, x) + + +@register_tf_op +def Tile(context, node): + x = context[node.inputs[0]] + reps = context[node.inputs[1]] + x = mb.tile(x=x, reps=reps, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Where(context, node): + if len(node.inputs) > 1: + raise NotImplementedError('tf.where with x,y will be supported by ' + 'MIL::select in the future') + x = context[node.inputs[0]] + x = mb.non_zero(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def SquaredDifference(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.sub(x=x, y=y, name=node.name + '_sub') + x = mb.square(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Conv2DBackpropInput(context, node): + # Output shape: [N, H_out, W_out, C_out] + output_shape = context[node.inputs[0]].val + # Weight shape: [H, W, C_out, C_in] + W_hwoi = context[node.inputs[1]] + W_iohw = mb.transpose(x=W_hwoi, perm=[3, 2, 0, 1]) + # Input shape: [N, H_in, W_in, C_in] + x = context[node.inputs[2]] + + data_format = node.attr.get("data_format", "NHWC") + HW_dilations = _conv2d3d_strides_or_dilations( + "dilations", node.attr.get("dilations"), data_format + ) + HW_strides = _conv2d3d_strides_or_dilations( + "strides", node.attr.get("strides"), data_format + ) + pad_type = node.attr.get("padding") + + if not isinstance(pad_type, str): + pad_type = "custom" + raise NotImplementedError("Custom padding not implemented for TF") + + pad_type = pad_type.lower() + # CoreML expects input to be in NCHW format + # Transpose input to NCHW format + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + if output_shape is not None: + output_shape = [output_shape[0], output_shape[3], + output_shape[1], output_shape[2]] + + # Only the last op should have the same name as node.name + conv_name = node.name + "x" if data_format == "NHWC" else node.name + # Pass output shape provided above + x = mb.conv_transpose( + x=x, + weight=W_iohw, + pad_type=pad_type, + output_shape=output_shape, + strides=HW_strides, + dilations=HW_dilations, + name=conv_name, + ) + + # Convert NCHW output back to NHWC format + if data_format == "NHWC": + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def Range(context, node): + start = context[node.inputs[0]] + end = context[node.inputs[1]] + step = context[node.inputs[2]] + x = mb.range_1d(start=start, end=end, step=step, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def RandomUniform(context, node): + shape = context[node.inputs[0]] + seed = node.attr["seed"] + x = mb.random_uniform(shape=shape, seed=seed, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def RandomStandardNormal(context, node): + shape = context[node.inputs[0]] + seed = node.attr["seed"] + x = mb.random_normal(shape=shape, seed=seed, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def OneHot(context, node): + indices = context[node.inputs[0]] + depth = context[node.inputs[1]] + on_value = context[node.inputs[2]] + off_value = context[node.inputs[3]] + axis = node.attr.get("axis", -1) + x = mb.one_hot( + indices=indices, + one_hot_vector_size=depth, + axis=axis, + on_value=on_value, + off_value=off_value, + name=node.name, + ) + context.add(node.name, x) + + +def _get_non_maximum_supression(context, node, iou_threshold_override=None, score_threshold_override=None): + """ + The helper function returns the outputs from mb.non_maximum_suppression, + along with the number of boxes and the maximum number of boxes. + """ + boxes = context[node.inputs[0]] + scores = context[node.inputs[1]] + max_boxes = context[node.inputs[2]] + iou_threshold = iou_threshold_override or context[node.inputs[3]] + score_threshold = score_threshold_override or context[node.inputs[4]] + + # The boxes' coordinates in Tensorflow is (y1, x1, y2, x2) where (y1, x1) and (y2, x2) are the + # coordinates of diagonal pair of box corners. However, MIL NMS expects CENTER_SIZE_WIDTH_FIRST + # format, which is (x, y, width, height) where (x, y) is the center coordinate. + y1, x1, y2, x2 = mb.split(x=boxes, num_splits=4, axis=-1) + # As the input coordinates could be any diagonal pair of box corners, it's not guaranteed that + # x2 > x1 nor y2 > y1. So we need to use abs to get width/height, and (x1+x2)/2 to get center. + width = mb.abs(x=mb.sub(x=x2, y=x1)) + height = mb.abs(x=mb.sub(x=y2, y=y1)) + center_x = mb.real_div(x=mb.add(x=x1, y=x2), y=2.0) + center_y = mb.real_div(x=mb.add(x=y1, y=y2), y=2.0) + boxes = mb.concat(values=[center_x, center_y, width, height], axis=-1) + + if score_threshold.val == float("-inf"): + # TensorFlow's default value for score_threshold, Core ML does not + # have float('-inf') support, converted to minimum float32 instead + score_threshold = -3.4e38 + + boxes = mb.expand_dims(x=boxes, axes=[0]) + scores = mb.expand_dims(x=scores, axes=[0, -1]) + coordinates, scores, indices, valid_outputs = mb.non_maximum_suppression( + boxes=boxes, + scores=scores, + max_boxes=max_boxes, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + + # The results from MIL NMS op are padded to max_boxes. We need to extract the valid part for TF. + # Notice that the batch dim and class num dim also need to be squeezed. + valid_outputs = mb.squeeze(x=valid_outputs, axes=[0]) + range = mb.range_1d(end=valid_outputs, start=0, step=1) + coordinates = mb.squeeze(x=coordinates, axes=[0]) + valid_coordinates = mb.gather(x=coordinates, indices=range, axis=0) + scores = mb.squeeze(x=scores, axes=[0, -1]) + valid_scores = mb.gather(x=scores, indices=range, axis=0) + indices = mb.squeeze(x=indices, axes=[0]) + valid_indices = mb.cast( + x=mb.gather(x=mb.cast(x=indices, dtype="fp32"), indices=range, axis=0), + dtype="int32", + name=node.name, + ) + + return valid_coordinates, valid_scores, valid_indices, valid_outputs + + +@register_tf_op(tf_alias=["NonMaxSuppressionV3"]) +def NonMaxSuppression(context, node): + _, _, valid_indices, valid_outputs = _get_non_maximum_supression(context, node) + context.add(node.name, valid_indices) + + +@register_tf_op +def NonMaxSuppressionV5(context, node): + """ + Different from NonMaxSuppression/NonMaxSuppressionV3, which only returns the indices of the selected boxes, + NonMaxSuppressionV5 returns all indices, scores and number of the selected boxes. + """ + soft_nms_sigma = context[node.inputs[5]].val + iou_threshold_override = None + score_threshold_override = None + if soft_nms_sigma != 0: + # fallback to "hard" NMS with sensible defaults + iou_threshold_override = types.fp32(0.5) + score_threshold_override = types.fp32(float("-inf")) + logger.warning("NonMaxSuppressionV5 with soft_nms_sigma != 0 not supported. " + "Setting soft_nms_sigma to zero.") + + _, valid_scores, valid_indices, valid_outputs = _get_non_maximum_supression( + context, node, iou_threshold_override=iou_threshold_override, score_threshold_override=score_threshold_override + ) + res = [valid_indices, valid_scores, valid_outputs] + context.add(node.name, res) + + +@register_tf_op +def Shape(context, node): + x = context[node.inputs[0]] + if types.is_complex(x.dtype): + x = mb.complex_shape(x=x, name=node.name) + else: + x = mb.shape(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ResizeNearestNeighbor(context, node): + # "ResizeNearestNeighbor" op in TF is always in the channel last mode + # instead of upsample factor, it uses output size, which is the second input + x = context[node.inputs[0]] + + input_shape = x.shape # (N,Hin,Win,C) + if len(input_shape) != 4: + raise ValueError('"ResizeNearestNeighbor" op: input rank is not 4') + + if len(context[node.inputs[1]].shape) != 1: + raise ValueError('"ResizeNearestNeighbor" op: the second input, must have rank 1') + + if context[node.inputs[1]].shape[0] != 2: + raise ValueError( + '"ResizeNearestNeighbor" op: the second input, which is the output size, must have 2 elements' + ) + Hout, Wout = None, None + if context[node.inputs[1]].val is None: + # for the dynamic input shape case, + # context[node.inputs[1]] is a mul(x=input_shape, y=scaling_factor) op. + scaling_factor_h = context[node.inputs[1]].op.y.val[0] + scaling_factor_w = context[node.inputs[1]].op.y.val[1] + else: + Hin, Win = input_shape[1], input_shape[2] + Hout, Wout = context[node.inputs[1]].val + scaling_factor_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin + scaling_factor_w = Wout / Win if Wout % Win == 0 else (Wout + 1e-4) / Win + + if scaling_factor_h < 1 and scaling_factor_w < 1: + ResizeBilinear(context, node) + return + + # first transpose to from channel last to channel first format for coreml + x = _transpose_NHWC_to_NCHW(x) + + align_corners = node.attr.get("align_corners", False) + half_pixel_centers = node.attr.get("half_pixel_centers", False) + + # add either the resize or the upsample layer + if align_corners is False and half_pixel_centers is False: + x = mb.upsample_nearest_neighbor( + x=x, + scale_factor_height=scaling_factor_h, + scale_factor_width=scaling_factor_w, + name=node.name + "_channel_first_upsample", + ) + elif align_corners is False and half_pixel_centers is True: + # if output size can be determined at compile time, + # we call the core op resize_nearest_neighbor, + # otherwise we use upsample_nearest_neighbor for approximation. + # rdar://75204549 (resize_nearest_neighbor need to support dynamic input shape) + if Hout is not None and Wout is not None: + x = mb.resize_nearest_neighbor( + x=x, + target_size_height=Hout, + target_size_width=Wout, + name=node.name + "_channel_first_resize", + ) + else: + logger.warning('Using upsample_nearest_neighbor to approximate resize_nearest_neighbor.') + x = mb.upsample_nearest_neighbor( + x=x, + scale_factor_height=scaling_factor_h, + scale_factor_width=scaling_factor_w, + name=node.name + "_channel_first_upsample", + ) + + else: + raise NotImplementedError( + "ResizeNearestNeighbor op with align_corners={}and half_pixel_centers={} not supported".format( + align_corners, half_pixel_centers + ) + ) + + # transpose again + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def ResizeBilinear(context, node): + # "ResizeBilinear" op in TF is always in the channel last mode + # second input is the output size + + x = context[node.inputs[0]] + input_shape = x.shape # (N,Hin,Win,C) + if len(input_shape) != 4: + raise ValueError('"ResizeBilinear" op: input rank is not 4') + + if len(context[node.inputs[1]].shape) != 1: + raise ValueError('"ResizeBilinear" op: the second input, must have rank 1') + + if context[node.inputs[1]].shape[0] != 2: + raise ValueError( + '"ResizeBilinear" op: the second input, which is the output size, must have 2 elements' + ) + + align_corners = node.attr.get("align_corners", False) + half_pixel_centers = node.attr.get("half_pixel_centers", False) + + if align_corners and half_pixel_centers: + # we should not come here since TF does not support align_corners=True and half_pixel_centers=True + raise ValueError( + '"ResizeBilinear" op: "align_corners" and "half_pixel_centers" are both True and this mode is not supported' + ) + + # In iOS16, we can support dynamic shape + any combination of aligh_corners and half_pixel_centers, + # if the output_shape comes from a pattern of input_shape * (h_scale, w_scale) + if is_current_opset_version_compatible_with(target.iOS16) and context[node.inputs[1]].val is None: + output_shape = context[node.inputs[1]] + if output_shape.op.op_type == "mul": + scale_factor_height = context[node.inputs[1]].op.y.val[0] + scale_factor_width = context[node.inputs[1]].op.y.val[1] + x = _transpose_NHWC_to_NCHW(x) + x = mb.upsample_bilinear( + x=x, + scale_factor_height=scale_factor_height, + scale_factor_width=scale_factor_width, + align_corners=align_corners, + half_pixel_centers=half_pixel_centers, + ) + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + return + + if (align_corners and not half_pixel_centers) or \ + (not align_corners and not half_pixel_centers): + # output shape needed to be known at compile time + if context[node.inputs[1]].val is None: + raise ValueError( + '"ResizeBilinear" op: the second input, which is the output size, must be known statically' + ) + + Hout, Wout = context[node.inputs[1]].val + + if not (isinstance(Hout, (_np.int32, _np.int64)) and isinstance(Wout, (_np.int32, _np.int64))): + raise ValueError( + '"ResizeBilinear" op: the second input, which is the output size, must have elements of type int32 or int64' + ) + + # first transpose to from channel last to channel first format for coreml + x = _transpose_NHWC_to_NCHW(x) + + # add either the resize_bilinear layer or the upsample layer + + # [align_corners = True, half_pixel_centers = False] + if align_corners and not half_pixel_centers: + x = mb.resize_bilinear( + x=x, + target_size_height=Hout, + target_size_width=Wout, + sampling_mode="STRICT_ALIGN_CORNERS", + name=node.name + "_channel_first_resize_bilinear", + ) + + # [align_corners = False, half_pixel_centers = False] + elif not align_corners and not half_pixel_centers: + x = mb.resize_bilinear( + x=x, + target_size_height=Hout, + target_size_width=Wout, + sampling_mode="DEFAULT", + name=node.name + "_channel_first_resize_bilinear", + ) + + # [align_corners = False, half_pixel_centers = True] + elif not align_corners and half_pixel_centers: + if context[node.inputs[1]].val is None: + # for the dynamic input shape case, + # context[node.inputs[1]] is a mul(x=input_shape, y=scaling_factor) op. + if context[node.inputs[1]].op.op_type != "mul": + raise NotImplementedError("Cannot determine the scale factor for the bilinear resize layer.") + scale_factor_height = context[node.inputs[1]].op.y.val[0] + scale_factor_width = context[node.inputs[1]].op.y.val[1] + else: + Hin, Win = input_shape[1], input_shape[2] + Hout, Wout = context[node.inputs[1]].val + # check if the output size divide the input size, + # if not, then cast the scale factor to float type. + scale_factor_height = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin + scale_factor_width = Wout / Win if Wout % Win == 0 else (Wout + 1e-4) / Win + + x = mb.upsample_bilinear( + x=x, + scale_factor_height=scale_factor_height, + scale_factor_width=scale_factor_width, + align_corners=False, + name=node.name + "_channel_first_upsample_bilinear", + ) + + # transpose again + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def make_tuple(context, node): + res = tuple([context[in_name] for in_name in node.inputs]) + context.add(node.name, res) + + +@register_tf_op +def function_entry(context, node): + if context.get_func_inputs() is None: + msg = ( + "function_entry requires function inputs stored in " + + "context.curr_func_inputs" + ) + raise ValueError(msg) + context.add(node.name, context.get_func_inputs()) + + +@register_tf_op(tf_alias=["while"]) +def While(context, node): + # TF while will never have break statement, because break can always be + # transformed into while and condition. Example: + # + # while pred: + # a = op1(...) + # if a == 0: + # break + # b = op2(...) + # + # is equivalent to + # + # while pred and not break_a: + # a = op1(...) + # break_a = a == 0 + # if not break_a: + # b = op2(...) + + # node.inputs[0] == 'make_tuple_X' (always a make_tuple) + loop_vars = context[node.inputs[0]] # python tuple of Vars + cond_graph = context.get_graph(node.attr["cond_function"]) + body_graph = context.get_graph(node.attr["body_function"]) + + def cond(*loop_vars): + context.stack_func_inputs(loop_vars) + + # convert_graph uses context to convert cond_graph. During conversion + # it constructs operations (mb.some_op). Note that cond(*loop_vars) is + # only evaluated inside while_loop's type_inference(), not here. In + # other words, we use python's deferred function evaluation to defer + # the SSA block construction until inside while_loop Operation. + res = convert_graph(context, cond_graph) + # Done with translating the function + context.unstack_func_inputs() + return res + + def body(*loop_vars): + context.stack_func_inputs(loop_vars) + res = convert_graph(context, body_graph) + # Done with translating the function + context.unstack_func_inputs() + return res + + x = mb.while_loop(_cond=cond, _body=body, loop_vars=loop_vars, name=node.name) + # wraps x as tuple for get_tuple that always follow the while node. + if not isinstance(x, (tuple, list)): + x = (x,) + context.add(node.name, x) + + +@register_tf_op +def iff(context, node): + pred = context[node.inputs[0]] + + # this is always a tensor, as TF uses one iff op for each returned value. + # + # Example TF program: + # + # x = tf.placeholder(tf.float32, shape=(1,)) + # y = tf.placeholder(tf.float32, shape=(1,)) + # z = tf.multiply(x, y) + # pred = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + # def true_fn(): return tf.add(x, z), x + # def false_fn(): return tf.square(y), z + # res = tf.cond(pred, true_fn, false_fn) + # + # There will be 2 iffs: + # + # iff('cond/pred_id', 'cond/Add', 'cond/Square') + # iff('cond/pred_id', 'cond/Add/Switch', 'cond/Switch_1') + # + # where + # 'cond/pred_id': pred + # 'cond/Add': tf.add(x, z) + # 'cond/Square': tf.square(y) + # 'cond/Add/Switch': x + # 'cond/Switch_1': z + # + # And both branches are executed, and one of the results will be + # discarded at iff nodes. + # + # Note that the above program would translate to two cond ops, each with + # two blocks. + true_output_var = context[node.inputs[1]] + false_output_var = context[node.inputs[2]] + + def true_fn(): + return mb.identity(x=true_output_var) + + def false_fn(): + return mb.identity(x=false_output_var) + + x = mb.cond(pred=pred, _true_fn=true_fn, _false_fn=false_fn, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Concat(context, node): + values = [context[input] for input in node.inputs[1:]] + axis = context[node.inputs[0]] + x = mb.concat(values=values, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ConcatV2(context, node): + values = [context[input] for input in node.inputs[:-1]] + axis = context[node.inputs[-1]] + x = mb.concat(values=values, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Pack(context, node): + values = [context[name] for name in node.inputs] + axis = node.attr["axis"] + if axis < 0: + # TF axis = -1 creates new dim at the end + axis += values[0].rank + 1 + if len(values) == 1: + # for example: + # y = tf.raw_ops.Pack(values=[2], axis=0). + # or y = tf.raw_ops.Pack(values=[tf.constant([1,2])], axis=0) + input_type = values[0].sym_type + if _is_scalar(input_type): + x = mb.mul(x=_np.array([1], dtype=_np.int32), y=values[0], name=node.name) + else: + x = mb.expand_dims(x=values[0], axes=[axis], name=node.name) + else: + if all([_is_scalar(input.sym_type) for input in values]): + x = mb.concat(values=values, axis=axis, name=node.name) + else: + x = mb.stack(values=values, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Unpack(context, node): + x = context[node.inputs[0]] + axis = int(node.attr["axis"]) + num_splits = node.attr.get("num", None) + if num_splits is None: + num_splits = x.shape[axis] + if num_splits == 1: + y = [x] + else: + y = mb.split(x=x, num_splits=num_splits, axis=axis, name=node.name + "_unsqueezed") + output_vars = [] + for i in range(num_splits): + output_vars.append( + mb.squeeze(x=y[i], axes=[axis], name=node.name + ":{}".format(i)) + ) + + context.add(node.name, output_vars) + + +@register_tf_op +def Split(context, node): + axis = context[node.inputs[0]] + x = context[node.inputs[1]] + if "num_split" not in node.attr: + raise ValueError("num_splits not found in TF op {}".format(node.name)) + num_splits = node.attr["num_split"] + if num_splits == 1: + if len(node.outputs) == 0: + x = mb.mul(x=x, y=1.0, name=node.name) + context.add(node.name, x) + else: + # Don't change tfssa. Just make downstream ops reference the pre-identity op. + context.add(node.name, [x], is_new_var=False) + else: + x = mb.split(x=x, num_splits=num_splits, axis=axis, name=node.name) + context.add(node.name, x) + # TODO : If tf.split output is returned, there's no + # get_tuple nodes. Some graph pass is needed. Example: + # + # x = tf.placeholder(tf.float32, shape=input_shape1) + # res = tf.split(x, 3, axis=0) + # + # res are ['split:0', 'split:1', 'split'] + # + # but node.outputs == ['gto_1', 'gto_2', 'gto_3'] + + +@register_tf_op +def SplitV(context, node): + x = context[node.inputs[0]] + split_sizes = context[node.inputs[1]] + axis = context[node.inputs[2]] + if "num_split" not in node.attr: + raise ValueError("num_splits not found in TF op {}".format(node.name)) + num_splits = node.attr["num_split"] + if num_splits == 1: + Identity(context, node) + else: + x = mb.split( + x=x, + num_splits=num_splits, + split_sizes=split_sizes, + axis=axis, + name=node.name, + ) + context.add(node.name, x) + + +@register_tf_op +def ScatterNd(context, node): + indices = context[node.inputs[0]] + updates = context[node.inputs[1]] + shape = context[node.inputs[2]] + x = mb.fill(shape=shape, value=types.nptype_from_builtin(updates.dtype)(0)) + x = mb.scatter_nd(data=x, indices=indices, updates=updates, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def TensorScatterAdd(context, node): + tensor, indices, updates, = [context[name] for name in node.inputs] + output = mb.scatter_nd(data=tensor, indices=indices, updates=updates, mode="add", name=node.name) + context.add(node.name, output) + + +@register_tf_op +def ZerosLike(context, node): + x = context[node.inputs[0]] + if x.rank == 0: + np_type = types.nptype_from_builtin(x.sym_type) + x = mb.const(val=np_type(0), name=node.name) + else: + np_type = types.nptype_from_builtin(x.sym_type.get_primitive()) + x = mb.fill(shape=mb.shape(x=x), value=np_type(0), name=node.name) + context.add(node.name, x) + + +@register_tf_op +def IsFinite(context, node): + x = context[node.inputs[0]] + if any_symbolic(x.shape): + x_shape = mb.shape(x=x) + else: + x_shape = [1] if x.shape == () else x.shape + max_tensor = mb.fill(shape=x_shape, value=_np.finfo(_np.float32).max) + min_tensor = mb.fill(shape=x_shape, value=_np.finfo(_np.float32).min) + less_then = mb.less_equal(x=x, y=max_tensor) + greater_than = mb.greater_equal(x=x, y=min_tensor) + x = mb.logical_and(x=less_then, y=greater_than, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def CropAndResize(context, node): + x = context[node.inputs[0]] + input_shape = x.shape # (B, h_in, w_in, C) + if len(input_shape) != 4: + raise ValueError( + '"CropResize" op: expected input rank 4, got {}'.format(x.rank) + ) + Hin, Win = input_shape[1:3] + + const_box_info = True + if context[node.inputs[1]].val is None or context[node.inputs[2]].val is None: + const_box_info = False + + crop_size = context[node.inputs[3]].val + method = node.attr.get("method", "bilinear") + pad_value = node.attr.get("extrapolation_value", 0.0) + + # CoreML index information along with boxes + if const_box_info: + boxes = context[node.inputs[1]].val + box_indices = context[node.inputs[2]].val + box_indices = _np.expand_dims(box_indices, axis=1) + boxes = _np.concatenate([box_indices, boxes], axis=1) + # CoreML expects boxes/ROI in + # [N, 1, 5, 1, 1] format + boxes = boxes.reshape(boxes.shape[0], 1, boxes.shape[1], 1, 1) + else: + box_indices = context[node.inputs[2]] + boxes = context[node.inputs[1]] + box_indices = mb.expand_dims(x=box_indices, axes=[1]) + if box_indices.dtype != boxes.dtype: + box_indices = mb.cast(x=box_indices, dtype=types.builtin_to_string(boxes.dtype)) + boxes = mb.concat(values=(box_indices, boxes), axis=1) + # TODO: Dynamic rank: Use GetShape and select indices dynamically + boxes = mb.reshape(x=boxes, shape=[boxes.shape[0], 1, boxes.shape[1], 1, 1]) + + # Get Height and Width of crop + h_out, w_out = crop_size[0], crop_size[1] + + # TF `nearest` mode not supported + method_map = {"bilinear": "ALIGN_CORNERS"} + if method not in method_map: + raise ValueError( + "CropResize op: Unsupported method {}. Supports {}".format( + method, method_map.keys() + ) + ) + method = method_map[method] + + # TF input format: [B, h_in, w_in, C] + # CoreML input format: [B, C, h_in, w_in] + x = _transpose_NHWC_to_NCHW(x) + + # Crop Resize + args = { + "x": x, + "roi": boxes, + "target_height": h_out, + "target_width": w_out, + "normalized_coordinates": True, + "spatial_scale": 1.0, + "box_coordinate_mode": "CORNERS_HEIGHT_FIRST", + "sampling_mode": method, + } + if is_current_opset_version_compatible_with(target.iOS16): + args["pad_value"] = pad_value + else: + if pad_value != 0.0: + msg = ( + "For iOS15 or older, only extrapolation_value=0.0 is supported or the tf CropAndResize op. " + "Got {}" + ).format(pad_value) + raise ValueError(msg) + x = mb.crop_resize(**args) + + # CoreML output format: [N, 1, C, h_out, w_out] + # TF output format: [N, h_out, w_out, C] + x = mb.squeeze(x=x, axes=[1]) + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def TensorArrayV3(context, node): + if "infer_shape" in node.attr: + if not node.attr["infer_shape"]: + raise ValueError("Only fixed size TensorArray is supported") + + dynamic_length = node.attr.get("dynamic_size", True) + elem_shape = node.attr.get("element_shape", None) + size = node.attr.get("size", None) + if size is None: + size = context[node.inputs[0]] + + if size.val is None: + init_length = size + else: + init_length = size.val + if init_length == 0: + # Dynamic list. Use 1 as init_length + init_length = 1 + + builtin_dtype = node.attr["dtype"] + dtype_str = types.builtin_to_string(builtin_dtype) + if elem_shape is not None and not -1 in elem_shape: + ls = mb.make_list( + init_length=init_length, + dtype=dtype_str, + elem_shape=elem_shape, + dynamic_length=dynamic_length, + name=node.name, + ) + else: + ls = mb.tf_make_list( + init_length=init_length, + dtype=dtype_str, + dynamic_length=dynamic_length, + name=node.name, + ) + context.add(node.name, ls) + + +@register_tf_op +def TensorArrayWriteV3(context, node): + index = context[node.inputs[0]] + new_val = context[node.inputs[1]] + ls = context[node.inputs[2]] + new_list = mb.list_write(ls=ls, index=index, value=new_val, name=node.name) + context.add(node.name, new_list) + + +@register_tf_op +def TensorArraySizeV3(context, node): + ls = context[node.inputs[0]] + length = mb.list_length(ls=ls, name=node.name) + context.add(node.name, length) + + +@register_tf_op +def TensorArrayGatherV3(context, node): + indices = context[node.inputs[0]] + ls = context[node.inputs[1]] + tensor = mb.list_gather(ls=ls, indices=indices, name=node.name) + context.add(node.name, tensor) + + +@register_tf_op +def TensorArrayReadV3(context, node): + idx = context[node.inputs[0]] + ls = context[node.inputs[1]] + ls = mb.list_read(ls=ls, index=idx, name=node.name) + context.add(node.name, ls) + + +@register_tf_op +def TensorArrayScatterV3(context, node): + indices = context[node.inputs[0]] + value = context[node.inputs[1]] + ls = context[node.inputs[2]] + ls = mb.list_scatter(ls=ls, indices=indices, value=value, name=node.name) + context.add(node.name, ls) + + +@register_tf_op +def BroadcastTo(context, node): + x = context[node.inputs[0]] + shape = context[node.inputs[1]] + if shape.val is None: # dynamic shape + raise NotImplementedError("dynamic shape not yet supported") + else: # static shape + target_shape = tuple(shape.val) + broadcast_shape = broadcast_shapes(x.shape, target_shape) + if target_shape != broadcast_shape: + msg = "shapes are not broadcastable: {} vs. {}" + raise ValueError(msg.format(x.shape, target_shape)) + target_rank = len(target_shape) + if x.rank != target_rank: + axes = [i for i in range(target_rank - x.rank)] + x = mb.expand_dims(x=x, axes=axes) + reps = [1] * target_rank + for i in range(target_rank): + reps[i] = target_shape[i] // x.shape[i] + + x = mb.tile(x=x, reps=reps, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def get_global(context, node): + # Design comment: This is only works if variable doesn't cross block + # boundary (e.g. while_loop, cond, function) + variable_name = node.attr["variable"] + x = context[variable_name] # This must've been set by set_global + context.add(node.name, x, is_new_var=False) + + +@register_tf_op +def set_global(context, node): + x = context[node.inputs[0]] + variable_name = node.attr["variable"] + context.add(variable_name, x, is_new_var=False) + + +def _get_const_or_raise(variable): + if variable.val is None: + raise ValueError("Var {} must be const".format(variable.name)) + return variable.val + + +@register_tf_op +def LSTMBlockCell(context, node): + x = context[node.inputs[0]] # [batch, input_dim] + c_prev = context[node.inputs[1]] # [b, hidden_dim] + h_prev = context[node.inputs[2]] # [b, hidden_dim] + # W layout is ifco + W = context[node.inputs[3]] # [input_dim + hidden_dim, 4*hidden_dim] + + kwargs = {} + use_peephole = node.attr["use_peephole"] + if use_peephole: + peep_i = context[node.inputs[4]] # [hidden_dim,] + peep_f = context[node.inputs[5]] # [hidden_dim,] + peep_o = context[node.inputs[6]] # [hidden_dim,] + kwargs["weight_peep_i"] = peep_i + kwargs["weight_peep_f"] = peep_f + kwargs["weight_peep_o"] = peep_o + + bias = context[node.inputs[7]] # [4*hidden_dim,] + + forget_bias = node.attr["forget_bias"] + cell_clip = None + if node.attr["cell_clip"] is not None and node.attr["cell_clip"] > 0: + cell_clip = node.attr["cell_clip"] + + res = mb.tf_lstm_block_cell( + x=x, + c_prev=c_prev, + h_prev=h_prev, + weight=W, + bias=bias, + forget_bias=forget_bias, + cell_clip=cell_clip, + use_peephole=use_peephole, + name=node.name, + **kwargs + ) + context.add(node.name, res) + +@register_tf_op(tf_alias=["BlockLSTMV2"]) +def BlockLSTM(context, node): + # BlockLSTM: https://www.tensorflow.org/api_docs/python/tf/raw_ops/BlockLSTM + # BlockLSTMV2: https://www.tensorflow.org/api_docs/python/tf/raw_ops/BlockLSTMV2 + seq_len = context[node.inputs[0]] # int + x = context[node.inputs[1]] # [padded_len, batch, input_dim] + init_c = context[node.inputs[2]] # [1, hidden_dim] + init_h = context[node.inputs[3]] # [1, hidden_dim] + # BlockLSTM: icfo format, BlockLSTMV2: ifco format + weight = context[node.inputs[4]] # [input_dim + hidden_dim, 4*hidden_dim] + + kwargs = {} + use_peephole = node.attr["use_peephole"] + if use_peephole: + peep_i = context[node.inputs[5]] # [hidden_dim,] + peep_f = context[node.inputs[6]] # [hidden_dim,] + peep_o = context[node.inputs[7]] # [hidden_dim,] + kwargs["weight_peep_i"] = peep_i + kwargs["weight_peep_f"] = peep_f + kwargs["weight_peep_o"] = peep_o + + # BlockLSTM: icfo format, BlockLSTMV2: ifco format + bias = context[node.inputs[8]] # [4*hidden_dim,] + + # forget bias is always 0 for BlockLSTMV2 + forget_bias = 0.0 if node.op == "BlockLSTMV2" else node.attr["forget_bias"] + cell_clip = None + if node.attr["cell_clip"] is not None and node.attr["cell_clip"] > 0: + cell_clip = node.attr["cell_clip"] + + if node.op == "BlockLSTMV2": + # mb.tf_lstm_block takes weights and bias in icfo format + # BlockLSTMV2's weights and bias are in ifco format + # convert from ifco to icfo format + w_i, w_f, w_c, w_o = mb.split(x=weight, num_splits=4, axis=-1) + weight = mb.concat(values=(w_i, w_c, w_f, w_o), axis=1, name=weight.name) + b_i, b_f, b_c, b_o = mb.split(x=bias, num_splits=4, axis=-1) + bias = mb.concat(values=(b_i, b_c, b_f, b_o), axis=0, name=bias.name) + + res = mb.tf_lstm_block( + seq_len=seq_len, + x=x, + c_prev=init_c, + h_prev=init_h, + weight=weight, + bias=bias, + forget_bias=forget_bias, + cell_clip=cell_clip, + use_peephole=use_peephole, + name=node.name, + **kwargs + ) + context.add(node.name, res) + +@register_tf_op +def ClipByValue(context, node): + x = context[node.inputs[0]] + min_value = context[node.inputs[1]] + max_value = context[node.inputs[2]] + if min_value.val < max_value.val: + x = mb.clip(x=x, alpha=min_value, beta=max_value, name=node.name) + else: + # When min >= max, TensorFlow sets all values to min. + x = mb.fill(shape=mb.shape(x=x), value=min_value, name=node.name) + context.add(node.name, x) + +@register_tf_op +def Size(context, node): + x = context[node.inputs[0]] + x = mb.shape(x=x) + x = mb.reduce_prod(x=x, axes=[0], name=node.name) + context.add(node.name, x) + +@register_tf_op +def LogSoftmax(context, node): + x = context[node.inputs[0]] + axis = node.attr.get('axis', -1) + x_max = mb.reduce_max(x=x, axes=[axis], keep_dims=True) + x_off = mb.sub(x=x, y=x_max) + y = mb.reduce_log_sum_exp(x=x_off, axes=[axis], keep_dims=True) + res = mb.sub(x=x_off, y=y, name=node.name) + context.add(node.name, res) + +@register_tf_op +def AudioSpectrogram(context, node): + """ + input shape: (Tin, channels) + attributes: stride (int), window_size (int), magnitude_squared (bool) + + output shape : (channels, Tout, fout) + where, + Tout = floor((Tin - window_size)/stride + 1) + fout = N / 2 + 1 + where N = next_power_of_2(window_size) = 2 ^ ceil(log2(window_size)) + + reference: + https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/core/kernels/spectrogram_op.cc + """ + + x = context[node.inputs[0]] # (Tin, channels) + if x.rank != 2: + raise NotImplementedError("AudioSpectrogram op: rank of the input must be 2") + + if "magnitude_squared" not in node.attr: + raise ValueError("AudioSpectrogram op: missing attribute: 'magnitude_squared'") + if "stride" not in node.attr: + raise ValueError("AudioSpectrogram op: missing attribute: 'stride'") + if "window_size" not in node.attr: + raise ValueError("AudioSpectrogram op: missing attribute: 'window_size'") + + magnitude_squared = node.attr["magnitude_squared"] + stride = node.attr["stride"] + window_size = node.attr["window_size"] + + N = 2 ** _np.ceil(_np.log2(window_size)) + N = N.astype(_np.int32) + fout = N / 2 + 1 + fout = fout.astype(_np.int32) + + # construct constant for hann window tensor, of shape (window_size,) + h = _np.arange(window_size) * ((2 * _np.pi) / window_size) + h = 0.5 - 0.5 * _np.cos(h) + + # construct the constant DFT matrices + k = _np.arange(fout).reshape(1, fout) # (1, fout) + n = _np.arange(N).reshape(N, 1) # (N, 1) + kn = _np.matmul(n, k) * (2 * _np.pi / N) # (N, fout) + Re_DFT_matrix_const = _np.cos(kn) # (N, fout) + Im_DFT_matrix_const = -_np.sin(kn) # (N, fout) + + # transpose input + x = mb.transpose(x=x, perm=[1,0]) # (channels, Tin) + # extract slices from the input + x = mb.sliding_windows(x=x, axis=1, size=window_size, stride=stride) # (channels, Tout, window_size) + # multiply with hann window + x = mb.mul(x=x, y=h) + # pad the last dimension to size N + x = mb.pad(x=x, pad=[0,0,0,0,0,N - window_size], mode="constant", constant_val=0.0) # (channels, Tout, N) + # multiply by DFT matrices + re = mb.matmul(x=x, y=Re_DFT_matrix_const) # (channels, Tout, fout) + im = mb.matmul(x=x, y=Im_DFT_matrix_const) # (channels, Tout, fout) + + # compute spectrogram + re = mb.mul(x=re, y=re) + im = mb.mul(x=im, y=im) + if not magnitude_squared: + y = mb.add(x=re, y=im) + y = mb.sqrt(x=y, name=node.name) + else: + y = mb.add(x=re, y=im, name=node.name) + context.add(node.name, y) + +@register_tf_op +def Mfcc(context, node): + """ + inputs: + - x : (channels, T, N) + - sampling rate: int + + attributes: + - upper_frequency_limit : int + - lower_frequency_limit : int + - filterbank_channel_count : int + - dct_coefficient_count : int + + output shape: (channels, T, dct_coefficient_count) + """ + x = context[node.inputs[0]] # (channels, T, F) + if x.rank != 3: + raise NotImplementedError("Mfcc op: rank of the input must be 3") + sampling_rate_var = context[node.inputs[1]] + if sampling_rate_var.val is None: + raise NotImplementedError("Mfcc op: dynamic sampling rate not supported") + sample_rate = sampling_rate_var.val + if is_symbolic(x.shape[2]): + raise NotImplementedError("Mfcc op: the last dimension, i.e. spectrogram size of the input must be known") + + spectrogram_N = x.shape[2] + upper_frequency_limit = node.attr.get("upper_frequency_limit", 4000) + lower_frequency_limit = node.attr.get("lower_frequency_limit", 20) + filterbank_channel_count = node.attr.get("filterbank_channel_count", 40) + dct_coefficient_count = node.attr.get("dct_coefficient_count", 13) + + # get the constant weights, matrices for MFCC filterbank and for DCT + # weights: (N,) + # mat_weighted, mat_spec_val : (N, filterbank_channel_count) + # cosines : (filterbank_channel_count, dct_coefficient_count) + weights, mat_weighted, mat_spec_val, cosines = _get_MFCC_constants(spectrogram_N, + sample_rate, + upper_frequency_limit, + lower_frequency_limit, + filterbank_channel_count, + dct_coefficient_count) + + spectogram_value = mb.sqrt(x=x) # (channels, T, N) + weighted_spectogram_value = mb.mul(x=spectogram_value, y=weights) # (channels, T, N) + x1 = mb.matmul(x=weighted_spectogram_value, y=mat_weighted) # (channels, T, filterbank_channel_count) + x2 = mb.matmul(x=spectogram_value, y=mat_spec_val) # (channels, T, filterbank_channel_count) + y = mb.add(x=x1, y=x2) # (channels, T, filterbank_channel_count) + y = mb.log(x=y, epsilon=1e-12) + y = mb.matmul(x=y, y=cosines, name=node.name) # (channels, T, dct_coefficient_count) + context.add(node.name, y) + + +@register_tf_op +def Complex(context, node): + real_part = context[node.inputs[0]] + imag_part = context[node.inputs[1]] + result = mb.complex(real_data=real_part, imag_data=imag_part, name=node.name) + context.add(node.name, result) + + +@register_tf_op +def Real(context, node): + input_data = context[node.inputs[0]] + if types.is_complex(input_data.dtype): + real_part = mb.complex_real(data=input_data, name=node.name) + else: + real_part = input_data + context.add(node.name, real_part) + + +@register_tf_op +def Imag(context, node): + input_data = context[node.inputs[0]] + if types.is_complex(input_data.dtype): + imag_part = mb.complex_imag(data=input_data, name=node.name) + else: + # According to the doc of tf.math.imag, it returns a tensor of all zeros if input is real. + np_type = types.nptype_from_builtin(input_data.sym_type.get_primitive()) + imag_part = mb.fill( + shape=mb.shape(x=input_data), value=np_type(0), name=node.name + ) + context.add(node.name, imag_part) + + +@register_tf_op +def FFT(context, node): + input_data = context[node.inputs[0]] + fft_res = mb.complex_fft(data=input_data, name=node.name) + context.add(node.name, fft_res) + + +@register_tf_op +def RFFT(context, node): + input_data = context[node.inputs[0]] + fft_length = context[node.inputs[1]] + # The fft_length is an int32 tensor of shape [1] instead of an integer. To make it compatible + # to complex_rfft (which use PyTorch's params as reference), we extract the value from tensor. + rfft_res = mb.complex_rfft( + data=input_data, n=mb.const(val=fft_length.val[0]), name=node.name + ) + context.add(node.name, rfft_res) + + +@register_tf_op +def IFFT(context, node): + input_data = context[node.inputs[0]] + ifft_res = mb.complex_ifft(data=input_data, name=node.name) + context.add(node.name, ifft_res) + + +@register_tf_op +def IRFFT(context, node): + input_data = context[node.inputs[0]] + fft_length = context[node.inputs[1]] + # The fft_length is an int32 tensor of shape [1] instead of an integer. To make it compatible + # to complex_rfft (which use PyTorch's params as reference), we extract the value from tensor. + irfft_res = mb.complex_irfft( + data=input_data, n=mb.const(val=fft_length.val[0]), name=node.name + ) + context.add(node.name, irfft_res) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parse.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parse.py new file mode 100644 index 00000000..9247885b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parse.py @@ -0,0 +1,138 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np +from tensorflow.core.framework.types_pb2 import DataType +from tensorflow.python.framework.dtypes import _TF_TO_NP + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import types + + +def parse_type(t): + mapping = { + # bool + DataType.DT_BOOL: types.bool, + # floating point + DataType.DT_HALF: types.fp16, + DataType.DT_FLOAT: types.float, + DataType.DT_DOUBLE: types.double, + # int + DataType.DT_INT8: types.int8, + DataType.DT_INT16: types.int16, + DataType.DT_INT32: types.int32, + DataType.DT_INT64: types.int32, + + # unsigned int + DataType.DT_UINT8: types.uint8, + DataType.DT_UINT16: types.uint16, + DataType.DT_UINT32: types.uint32, + DataType.DT_UINT64: types.uint64, + # string + DataType.DT_STRING: types.str, + } + t = int(t) + if t in mapping: + return mapping[t] + else: + logger.info("Type %d cannot be mapped", t) + return None + + +def parse_shape(t): + if t.unknown_rank: + return None + ret = [d.size for d in t.dim] + return ret + + +def parse_tensor(t): + typ = parse_type(t.dtype) + shape = parse_shape(t.tensor_shape) + + retval = None + if len(t.half_val) > 0: + retval = _np.array(t.half_val, dtype=_TF_TO_NP[t.dtype]) + elif len(t.float_val) > 0: + retval = _np.array(t.float_val, dtype=_TF_TO_NP[t.dtype]) + elif len(t.double_val) > 0: + retval = _np.array(t.double_val, dtype=_TF_TO_NP[t.dtype]) + elif len(t.int_val) > 0: + retval = _np.array(t.int_val, dtype=_TF_TO_NP[t.dtype]) + elif len(t.int64_val) > 0: + retval = _np.array(t.int64_val, dtype=_TF_TO_NP[t.dtype]) + elif len(t.bool_val) > 0: + retval = _np.array(t.bool_val, dtype=_TF_TO_NP[t.dtype]) + elif hasattr(t, "uint32_val") and len(t.uint32_val) > 0: + retval = _np.array(t.uint32_val, dtype=_TF_TO_NP[t.dtype]) + elif hasattr(t, "uint64_val") and len(t.uint64_val) > 0: + retval = _np.array(t.uint64_val, dtype=_TF_TO_NP[t.dtype]) + + if not t.tensor_shape.unknown_rank and len(shape) == 0: + retobj = typ() + if retval is not None: + retobj.val = retval[0] + else: + rettype = types.tensor(typ, tuple(shape)) + retobj = rettype() + retobj.shape = shape + if retval is not None: + retobj.val = retval + + return retobj + + +def parse_string(s): + if isinstance(s, bytes): + return s.decode("utf-8", errors="ignore") + else: + return s + + +def parse_list(t): + if len(t.s) > 0: + return list(parse_string(s) for s in t.s) + elif len(t.i) > 0: + return list(t.i) + elif len(t.f) > 0: + return list(t.f) + elif len(t.b) > 0: + return list(t.b) + elif len(t.type) > 0: + return list(parse_type(z) for z in t.type) + elif len(t.shape) > 0: + return list(parse_shape(z) for z in t.shape) + elif len(t.tensor) > 0: + return list(parse_tensor(z) for z in t.tensor) + else: + return [] + + +def parse_func(f): + return f.name + + +def parse_attr(attr): + if attr.HasField("s"): + return parse_string(attr.s) + elif attr.HasField("i"): + return attr.i + elif attr.HasField("f"): + return attr.f + elif attr.HasField("b"): + return attr.b + elif attr.HasField("type"): + return parse_type(attr.type) + elif attr.HasField("shape"): + return parse_shape(attr.shape) + elif attr.HasField("tensor"): + return parse_tensor(attr.tensor) + elif attr.HasField("list"): + return parse_list(attr.list) + elif attr.HasField("func"): + return parse_func(attr.func) + elif attr.HasField("placeholder"): + raise NotImplementedError("placeholder not yet implemented") + raise ValueError("unintelligible TFNode attributes") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parsed_tf_node.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parsed_tf_node.py new file mode 100644 index 00000000..10253d89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parsed_tf_node.py @@ -0,0 +1,80 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import types + +from .tfssa import ParsedNode + + +class ParsedTFNode(ParsedNode): + """ + A parsed TensorFlow Node. + + name: The name of the node (str) + op: The operation represented by the node (str) + datatype: The type of the node. (type) + value: The value of the node if available + inputs: The list of nodes which are inputs to this node (list[str]) + control_inputs: The list of nodes which have to be executed before this node (list[str]) + attr: The attributes of the node + outputs: The list of nodes which consume the result of this node (list[str]) + control_outputs: The list of nodes which have to be executed after this node (list[str]) + """ + + def __init__(self, tfnode=None): + super(ParsedTFNode, self).__init__() + self.original_node = tfnode + + if tfnode is not None: + from .parse import parse_attr + + self.name = tfnode.name + if tfnode.op == "PlaceholderWithDefault": + self.op = "Placeholder" + else: + self.op = tfnode.op + self.inputs = [x for x in tfnode.input if not x.startswith("^")] + self.control_inputs = [x[1:] for x in tfnode.input if x.startswith("^")] + self.attr = {k: parse_attr(v) for k, v in tfnode.attr.items()} + + def parse_from_attr(self): + if "value" in self.attr: + self.datatype = self.attr["value"].__class__ + elif "_output_shapes" in self.attr: + output_shapes = self.attr["_output_shapes"] + if output_shapes[0] is not None and len(output_shapes[0]) > 0: + if "dtype" in self.attr: + rettype = types.tensor(self.attr["dtype"], tuple(output_shapes[0])) + elif "T" in self.attr: + rettype = types.tensor(self.attr["T"], tuple(output_shapes[0])) + elif "Tparams" in self.attr: + rettype = types.tensor( + self.attr["Tparams"], tuple(output_shapes[0]) + ) + else: + raise NotImplementedError( + "Op-(%s) %s not implemented\nWith attribute:" + + str(self.attr) % (self.op, self.name) + ) + self.datatype = rettype + elif "dtype" in self.attr: + self.datatype = self.attr["dtype"] + elif "shape" in self.attr: + shape = self.attr["shape"] + assert "dtype" in self.attr + if len(shape) == 0: + self.datatype = self.attr["dtype"] + else: + self.datatype = types.tensor(self.attr["dtype"], shape) + elif "dtype" in self.attr: + self.datatype = self.attr["dtype"] + + def _copy_impl(self, dest): + dest = super(ParsedTFNode, self)._copy_impl(dest) + dest.original_node = self.original_node + return dest + + def __copy__(self): + return self._copy_impl(ParsedTFNode()) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py new file mode 100644 index 00000000..2dc81438 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import backfill_make_list_elem_type, expand_tf_lstm, tf_lstm_to_core_lstm diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/backfill_make_list_elem_type.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/backfill_make_list_elem_type.py new file mode 100644 index 00000000..81d8423e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/backfill_make_list_elem_type.py @@ -0,0 +1,121 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.mil.var import ListVar + + +@register_pass(namespace="tensorflow") +class backfill_make_list_elem_type(AbstractGraphPass): + """ + TF's TensorArrayV3 (represented as make_list in mil) doesn't necessarily + contain elem shape/type, which is known when write is performed. We + backfill elem type info to make_list + + Inputs: + + prog: Program + """ + def apply(self, prog): + for f in prog.functions.values(): + _backfill_make_list_elem_type_block(f) + +@block_context_manager +def _backfill_make_list_elem_type_block(block): + # shallow copy hides changes on f.operations during the loop + for op in block.operations: + for b in op.blocks: + _backfill_make_list_elem_type_block(b) + + if op.op_type != "tf_make_list": + continue + + if op.outputs[0].elem_type != types.unknown: + # elem_type of the list is known + continue + + list_var = op.outputs[0] + elem_type = _infer_elem_type(list_var) # types.tensor + if elem_type is None: + msg = ( + "No list_write or list_scatter op to infer make_list " + + "'{}' element type. Block:\n{}" + ) + raise ValueError(msg.format(op.name, op.enclosing_block)) + + # elem_shape can be runtime-detemrined, which cannot be inferred here at this point, + # so we add an internal _const_symbolic node to cover both static and dynamic cases. + elem_shape = [dim.name if is_symbolic(dim) else dim for dim in elem_type.get_shape()] + new_list = mb.make_list( + init_length=op.init_length, + dynamic_length=op.dynamic_length, + elem_shape=tuple(elem_shape), + dtype=op.inputs["dtype"], + before_op=op, + name=op.name, + ) + + block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=new_list + ) + block.remove_ops([op]) + + +def _infer_elem_type(list_var): + """ + Returns types.tensor. None if failed to infer element type. + Example: + + Given: + + main(%update: (2,fp32)) { + block0() { + %list: List[unknown] = tf_make_list(...) # unknown elem type + %while_loop_0:0: (i32), %while_loop_0:1: List[(2,fp32)] = while_loop(loop_vars=(...)) + while_loop_0_body(...) { + %list_write_0: List[(2,fp32)] = list_write(index=..., ls=%list, value=%update) + } -> (%add_0, %list_write_0) + + Result: + + main(%update: (2,fp32)) { + block0() { + %list: List[(2,fp32)] = tf_make_list(...) # Get the elem type from list_write + %while_loop_0:0: (i32), %while_loop_0:1: List[(2,fp32)] = while_loop(loop_vars=(...)) + while_loop_0_body(...) { + %list_write_0: List[(2,fp32)] = list_write(index=..., ls=%list, value=%update) + } -> (%add_0, %list_write_0) + """ + # Search for child op that have informative element types + for o in list_var.child_ops: + if o.op_type in ["list_write", "list_scatter"]: + return o.outputs[0].elem_type + if o.op_type == "while_loop": + idx = list(o.loop_vars).index(list_var) + block = o.blocks[0] + # the corresponding Var in body block + block_var = block.inputs[idx] + elem_type = _infer_elem_type(block_var) + if elem_type is not None: + + def _set_types_for_block_inputs(block): + block_var = block.inputs[idx] + new_block_var = ListVar(name=block_var.name, elem_type=elem_type, + init_length=block_var.sym_type.T[1], + dynamic_length=block_var.sym_type.T[2]) + block._replace_var(block_var, new_block_var) + + _set_types_for_block_inputs(o.blocks[0]) # condition block + _set_types_for_block_inputs(o.blocks[1]) # body block + + return elem_type + # otherwise continue to other block_var (a list_var can be + # passed into while_loop twice). + return None diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/expand_tf_lstm.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/expand_tf_lstm.py new file mode 100644 index 00000000..1f28bfad --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/expand_tf_lstm.py @@ -0,0 +1,225 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="tensorflow") +class expand_tf_lstm(AbstractGraphPass): + """ + Expand tf_lstm_block_cell to fine-grained SSA ops following: + + xh = [x, h_prev] + [i, ci, f, o] = xh * w + b + f = f + forget_bias + if not use_peephole: + wci = wcf = wco = 0 + i = sigmoid(cs_prev .* wci + i) + f = sigmoid(cs_prev .* wcf + f) + ci = tanh(ci) + cs = ci .* i + cs_prev .* f + cs = clip(cs, cell_clip) + o = sigmoid(cs * wco + o) + co = tanh(cs) + h = co .* o + + Inputs: + + prog: Program + """ + def apply(self, prog): + for f in prog.functions.values(): + _expand_tf_lstm_helper(f) + + +def _expand_tf_lstm_helper(block): + # shallow copy hides changes on f.operations during the loop + for op in block.operations[:]: + for b in op.blocks: + _expand_tf_lstm_helper(b) + + if op.op_type == "tf_lstm_block_cell": + _expand_tf_lstm_block_cell(op) + logger.info("Expanding {} (op_type: {})".format(op.name, op.op_type)) + + if op.op_type == "tf_lstm_block": + # only cs, h are supported for now. Can be easily extended to other outputs at performance hit. + i, cs, f, o, ci, co, h = op.outputs + if all( + map(lambda x: len(x.child_ops) == 0 and len(x.consuming_blocks) == 0, + (i, f, o, ci, co) + ) + ): + _expand_tf_lstm_block(op) + logger.info("Expanding {} (op_type: {})".format(op.name, op.op_type)) + + +def _lstm_cell_builder(op, x, h_prev, cs_prev, before_op=None): + b = op.bias # [4*hidden_dim] + forget_bias = op.forget_bias.val # python:float + + # xh = [x, h_prev] + # xh shape: [b, input_dim+hidden_dim] + xh = mb.concat(values=[x, h_prev], axis=-1, before_op=before_op) + + # w: [4*hidden_dim, input_dim + hidden_dim] (icfo layout) + w = np.transpose(op.weight.val) + # [i, ci, f, o] = xh * w + b. Shape is [b, 4*hidden_dim] + icfo = mb.linear(x=xh, weight=w, bias=b, before_op=before_op) + + # i, ci, f, o shape: [b, hidden_dim] + i, ci, f, o = mb.split(x=icfo, num_splits=4, axis=-1, before_op=before_op) + if op.forget_bias.val != 0: + f = mb.add(x=f, y=forget_bias, before_op=before_op) + + # note that .* means Hadamard product + # i = sigmoid(cs_prev .* wci + i) + # f = sigmoid(cs_prev .* wcf + f) + if op.use_peephole.val: + wci = op.weight_peep_i.val # [hidden_dim] + wcf = op.weight_peep_f.val # [hidden_dim] + + x = mb.mul(x=cs_prev, y=wci, before_op=before_op) + pre_i = mb.add(x=x, y=i, before_op=before_op) + + x = mb.mul(x=cs_prev, y=wcf, before_op=before_op) + pre_f = mb.add(x=x, y=f, before_op=before_op) + else: + pre_i = i + pre_f = f + + i = mb.sigmoid(x=pre_i, before_op=before_op) + f = mb.sigmoid(x=pre_f, before_op=before_op) + ci = mb.tanh(x=ci, before_op=before_op) + + # cs = ci .* i + cs_prev .* f + x = mb.mul(x=ci, y=i, before_op=before_op) + y = mb.mul(x=cs_prev, y=f, before_op=before_op) + cs = mb.add(x=x, y=y, before_op=before_op) + + # cs = clip(cs, cell_clip) + if op.cell_clip is not None: + clip_val = op.cell_clip.val + cs = mb.clip(x=cs, alpha=-clip_val, beta=clip_val, before_op=before_op) + + # o = sigmoid(cs * wco + o) + if op.use_peephole.val: + wco = op.weight_peep_o.val + x = mb.mul(x=cs, y=wco, before_op=before_op) + pre_o = mb.add(x=x, y=o, before_op=before_op) + else: + pre_o = o + o = mb.sigmoid(x=pre_o, before_op=before_op) + co = mb.tanh(x=cs, before_op=before_op) + + # h = co .* o + h = mb.mul(x=co, y=o, before_op=before_op) + + return [i, cs, f, o, ci, co, h] + + +def _expand_tf_lstm_block_cell(op): + if op.op_type != "tf_lstm_block_cell": + raise ValueError() + + with op.enclosing_block as block: + x = op.x # [b, input_dim] + h_prev = op.h_prev # [b, hidden_dim] + cs_prev = op.c_prev # [b, hidden_dim] + + i, cs, f, o, ci, co, h = _lstm_cell_builder( + op, x, h_prev, cs_prev, before_op=op + ) + + # Replace all outputs + new_outputs = [i, cs, f, o, ci, co, h] + for old_v, new_v in zip(op.outputs, new_outputs): + block.replace_uses_of_var_after_op( + anchor_op=op, old_var=old_v, new_var=new_v + ) + block.remove_ops([op]) + + +def _expand_tf_lstm_block(op): + if op.op_type != "tf_lstm_block": + raise ValueError() + + with op.enclosing_block as block: + x = op.x # [s, b, input_dim] + h_prev = op.h_prev # [b, hidden_dim] + cs_prev = op.c_prev # [b, hidden_dim] + + # cond and body function gor the while_loop + def cond(i, cs_list, h_list): + return mb.less(x=i, y=length) + + def body(i, cs_list, h_list): + xi = mb.gather(x=x, indices=i, axis=0) + h_prev = mb.gather(x=h_list, indices=i, axis=0) + cs_prev = mb.gather(x=cs_list, indices=i, axis=0) + + ig, cs, fg, og, ci, co, h = _lstm_cell_builder(op, xi, h_prev, cs_prev) + + counter = mb.add(x=i, y=1) + + return ( + counter, + mb.scatter(data=cs_list, indices=counter, updates=cs), + mb.scatter(data=h_list, indices=counter, updates=h), + ) + + # Allocate two lists: cs & h + x_shape = mb.shape(x=x, before_op=op) + length = mb.slice_by_index(x=x_shape, begin=[0], end=[1], before_op=op) + h_shape = mb.shape(x=h_prev, before_op=op) + list_shape = mb.concat(values=[length, h_shape], axis=0, before_op=op) + cs_list = mb.fill(shape=list_shape, before_op=op) + h_list = mb.fill(shape=list_shape, before_op=op) + + # append initial state at index 0 + cs_prev = mb.expand_dims(x=cs_prev, axes=[0], before_op=op) + cs_list = mb.concat(values=[cs_prev, cs_list], axis=0, before_op=op) + h_prev = mb.expand_dims(x=h_prev, axes=[0], before_op=op) + h_list = mb.concat(values=[h_prev, h_list], axis=0, before_op=op) + + _, cs_list, h_list = mb.while_loop( + _cond=cond, _body=body, loop_vars=([0], cs_list, h_list), before_op=op + ) + + # strip initial state or element at index 0 + begin, end = [1, 0, 0], [0, 0, 0] + begin_mask = [False, True, True] + end_mask = [True, True, True] + cs = mb.slice_by_index( + x=cs_list, + begin=begin, + end=end, + begin_mask=begin_mask, + end_mask=end_mask, + before_op=op, + ) + h = mb.slice_by_index( + x=h_list, + begin=begin, + end=end, + begin_mask=begin_mask, + end_mask=end_mask, + before_op=op, + ) + + # Replace all outputs + new_outputs = [cs, h] + for old_v, new_v in zip( + [ov for index, ov in enumerate(op.outputs) if index in [1, 6]], new_outputs + ): + block.replace_uses_of_var_after_op( + anchor_op=op, old_var=old_v, new_var=new_v + ) + block.remove_ops([op]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/test_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/test_passes.py new file mode 100644 index 00000000..5b5d53cc --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/test_passes.py @@ -0,0 +1,56 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import (assert_model_is_valid, + assert_same_output_names) + +pytest.importorskip("tensorflow", minversion="1.15.0") + + +def test_backfill_make_list_elem_type(): + # The while_loop appends [1, 2]*i to `ls` for each iteration + # i = 0, ... num_iters-1. + + elem_shape = (2,) + + @mb.program( + input_specs=[mb.TensorSpec(shape=elem_shape),] + ) + def prog(update): + def body(i, ls): + return mb.add(x=i, y=1), mb.list_write(ls=ls, index=i, value=update) + + def cond(i, ls): + return mb.less(x=i, y=num_iters) + + i = 0 + ls = mb.tf_make_list(init_length=1) + num_iters = 3 + _, final_tensor_list = mb.while_loop(_cond=cond, _body=body, loop_vars=(i, ls)) + list_len = mb.list_length(ls=final_tensor_list) + indices = mb.range_1d(start=0, end=list_len, step=1) + return mb.list_gather(ls=final_tensor_list, indices=indices) + + # tf_make_list has no elem_type info + make_list_op = prog.find_ops(op_type="tf_make_list", exactly_one=True)[0] + assert make_list_op.outputs[0].elem_type == types.unknown + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["tensorflow::backfill_make_list_elem_type"](prog) + assert_same_output_names(prev_prog, prog) + prog.validate() + + # tf_make_list is replaced with make_list and should have elem_type now + make_list_op = prog.find_ops(op_type="make_list", exactly_one=True)[0] + assert make_list_op.outputs[0].elem_type.get_shape() == elem_shape + + assert_model_is_valid(prog, {"update": elem_shape}) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/tf_lstm_to_core_lstm.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/tf_lstm_to_core_lstm.py new file mode 100644 index 00000000..18819629 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/tf_lstm_to_core_lstm.py @@ -0,0 +1,308 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import ( + Block, + Builder as mb, + Operation, + Var, +) +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + +SUPPORTED_TF_LSTM_OPS = ["tf_lstm_block_cell", "tf_lstm_block"] + +@register_pass(namespace="tensorflow") +class tf_lstm_to_core_lstm(AbstractGraphPass): + """ + Try to map TF dialect ops `tf_lstm_block` and `tf_lstm_block_cell` to + `lstm` in the core op set if compatible. They are compatible if all of the + followings are satisfied: + + - If tf_lstm_block: only h output is consumed. tf_lstm_block has 7 + sequence outputs: [i, cs, f, o, ci, co, h]. Each of them (e.g., i) has + shape [seq_len, batch, hidden_dim] (see tf_lstm_block op doc string). + core lstm only supports sequence output for hidden state h, and thus if + any outputs other than `h` is consumed, we cannot convert to lstm in the + core op set. + + - If tf_lstm_block_cell: only cs, h output (outputs[1], outputs[6]) + are consumed. Similar to above. + + Inputs: + + prog: Program + """ + def apply(self, prog): + for f in prog.functions.values(): + _tf_lstm_to_core_lstm_block(f) + +@block_context_manager +def _tf_lstm_to_core_lstm_block(block: Block): + # shallow copy hides changes on f.operations during the loop + for op in block.operations: + for b in op.blocks: + _tf_lstm_to_core_lstm_block(b) + + if op.op_type in SUPPORTED_TF_LSTM_OPS: + if _try_replace_with_core_lstm(op): + logger.info("Successfully map {} to lstm".format(op.op_type)) + else: + logger.info("Unable to map {} to lstm".format(op.op_type)) + +def _try_get_last_cell_state_in_tf_lstm_block(op: Operation) -> Var: + """ + Parameters + ---------- + op: Operation + Must have op type "tf_lstm_block" + + Returns + ------- + Var, a var representing the last cell state in the lstm. None if check fails. + + One of the outputs of the op "tf_lstm_block" is the cell state (cs) which has shape [seq_len, batch, feat]. + That is, it is the cell state tensor of the lstm, which includes all the time steps. + This, normally, can not be mapped to the MIL lstm op's cell state output, since that op only + returns the last time step of the cell state, which is a tensor of shape [batch, feat]. + However, if the cell state output of "tf_lstm_block" is being sliced, before being used anywhere else, + and sliced in such a way that it extracts just the last time step of the seq dimension, then + it can indeed be mapped to MIL's lstm op. + This utility function detects this condition. If true, it returns the var + that corresponds to the rank 2 sliced cell state. + + In particular, the following pattern is detected: + + Input pattern: + ..., cs, ... = tf_lstm_block(...) # [seq_len, batch, feat] + extracted_cell_state = slice_by_index(x=cs, ...) # [batch, feat] or [1, batch, feat], such that seq dim. is sliced at the last time step + out = op(extracted_cell_state) + + The "cs" var can be feeding into multiple "slice_by_index" ops, some of which slice it into [batch, feat] and + some into [1, batch, feat] shaped tensors. This scenario is handled in the following manner: + + step 1: verify that the output "cs" only feeds into slice_by_index ops + step 2: add a slice_by_index op to the graph, which slices the last time step and creates a + tensor, "last_cs", of shape [batch, feat] + step 3: add an expand_dims op to the graph which takes in "last_cs" and expands it to create + a tensor, "expanded_last_cs", of shape [1, batch, feat] + step 4: now, iterate over all the child ops of "cs". Each one of these will be of type "slice_by_index". + Verify that they are slicing only the last time step. If not, exit out of the function by returning None. + Once verified, replace its output var with either "last_cs" or "expanded_last_cs", depending on its shape. + step 5: remove all the child ops of "cs". Return "last_cs" + """ + if op.op_type != "tf_lstm_block": + raise ValueError("op must have type 'tf_lstm_block'. Got {}".format(op.op_type)) + + cs = op.outputs[1] + if len(cs.child_ops) == 0 and len(cs.consuming_blocks) == 0: + return cs + if len(cs.consuming_blocks) > 1: + return None + if not all([child_op.op_type == "slice_by_index" for child_op in cs.child_ops]): + return None + child_ops = cs.child_ops[:] + block = op.enclosing_block + + # extract the last time step of the cell states + last_cs = mb.slice_by_index( + x=cs, + begin=[-1, 0, 0], + end=[-1, 0, 0], + begin_mask=[False, True, True], + end_mask=[False, True, True], + squeeze_mask=[True, False, False], + before_op=child_ops[0], + ) # this is of shape [batch, feat] + expanded_last_cs = mb.expand_dims( + x=last_cs, axes=[0], before_op=child_ops[0] + ) # shape: [1, batch, feat] + + # for each child op, which is a "slice_by_index" op, verify the following conditions: + # - input is a rank 3 tensor, of shape [seq_len, batch, feat] + # - output is either a rank 2 tensor of shape [batch, feat] or rank 3 of shape [1, batch, feat] + # - the first dimension is sliced with an index that is the last index, + # so if its positive it should be of value, seq-1, or if negative, it should be -1 + for slice_op in child_ops: + # if any of the input arguments of the slice op is not compile time known, the check fails early + for input in slice_op.inputs.values(): + if input == slice_op.x: + continue + if input is None or input.val is None: + return None + + x = slice_op.x + out = slice_op.outputs[0] + # check input rank + if x.rank != 3: + return None + # check output rank and shape + if out.rank not in (2, 3): + return None + if out.shape[-2:] != x.shape[-2:]: + return None + if out.rank == 3 and out.shape[0] != 1: + return None + + # check that only the last time step is being extracted + begin = slice_op.begin.val.tolist() + end = slice_op.end.val.tolist() + stride = slice_op.stride.val.tolist() + begin_mask = slice_op.begin_mask.val.tolist() + end_mask = slice_op.end_mask.val.tolist() + squeeze_mask = slice_op.squeeze_mask.val.tolist() + + # the stride for the first dimension must be 1 + if stride[0] != 1: + return None + + # check if the first dimension is sliced exactly for the last time step + if is_symbolic(x.shape[0]): + """ + When the first dimension is symbolic, we check for the following condition to be true: + - begin[0] == -1 and begin_mask[0] == False + If this condition is not met, we return None and exit + """ + if begin[0] != -1 or begin_mask[0]: + return None + else: + time = x.shape[0] + begin = [i + time if i < 0 else i for i in begin] + end = [i + time if i < 0 else i for i in end] + + begin_time = 0 if begin_mask[0] else begin[0] + end_time = time if end_mask[0] else end[0] + if squeeze_mask[0]: + if begin_time != time - 1: + return None + else: + if end_time - begin_time != 1: + return None + if begin_time != time - 1: + return None + + block.replace_uses_of_var_after_op( + anchor_op=slice_op, + old_var=slice_op.outputs[0], + new_var=last_cs if len(out.shape) == 2 else expanded_last_cs, + ) + + block.remove_ops(child_ops) + return last_cs + + +def _try_replace_with_core_lstm(op: Operation) -> bool: + """ + Inputs: + + op (Operation): op.op_type must be 'tf_lstm_block_cell' or `tf_lstm_block` + + Returns: + + True if op can be represented by mb.lstm op in SSA. False otherwise + """ + def _check_unsupported_outputs(unsupported_outputs): + for ov in unsupported_outputs: + if len(ov.child_ops) > 0 or len(ov.consuming_blocks) > 0: + return False + return True + + # Check for unsupported configuration : When peephole is present + if op.use_peephole.val: + return False + + # Check if the tf lstm op can be replaced with coreml lstm op + # We check the following two conditions + # (1) The outputs must not be (i, f, o, ci, co), since there is no corresponding outputs with the LSTM in Core ML + # (2) For the tf_lstm_block op, only the last time step of cell state can be used + # Here is an example of valid supported configuration: + # _, cell_states, _, _, _, _, _, _ = tf_lstm_block.outputs + # output = cell_states[-1, 1:2, :] + # And here is an example that coreml cannot handle currently: + # _, cell_states, _, _, _, _, _, _ = tf_lstm_block.outputs + # output = cell_states[:2, :, :] + i, cs, f, o, ci, co, h = op.outputs + unsupported_outputs = [i, f, o, ci, co] + if not _check_unsupported_outputs(unsupported_outputs): + return False + + if op.op_type == "tf_lstm_block": + cs = _try_get_last_cell_state_in_tf_lstm_block(op) + if cs is None: + return False + + # op is compatible with lstm + + mb_peep = None + if op.use_peephole.val: + mb_peep = np.stack( + [op.weight_peep_i.val, op.weight_peep_f.val, op.weight_peep_o.val] + ) + + # Set weights. The layout of the weight in TF1 is icfo (input, cell, forget, output gate). + # Need to convert to ifoc for coreml + tf_w = op.weight.val # [input_dim+hidden_dim, 4*hidden_dim] in icfo layout + tf_w_i, tf_w_c, tf_w_f, tf_w_o = np.split(tf_w, 4, axis=1) + w = np.concatenate([tf_w_i, tf_w_f, tf_w_o, tf_w_c], axis=1) + w = np.transpose(w, [1, 0]) + hidden_dim = w.shape[0] // 4 + input_dim = w.shape[1] - hidden_dim + # Split input and hidden weights + w_ih, w_hh = np.split(w, [input_dim], axis=1) + + # Bias is icfo. Convert to ssa LSTM's ifoc layout + tf_b = op.bias.val + tf_b_i, tf_b_c, tf_b_f, tf_b_o = np.split(tf_b, 4, axis=0) + tf_b_f += op.forget_bias.val # add forget bias to bias + bias = np.concatenate([tf_b_i, tf_b_f, tf_b_o, tf_b_c], axis=0) + + cell_clip = None if op.cell_clip is None else op.cell_clip.val + + output_sequence = op.op_type == "tf_lstm_block" + + block = op.enclosing_block + # x: [seq_len, batch, input_dim] + if op.op_type == "tf_lstm_block_cell": + x = mb.expand_dims(x=op.x, axes=[0], before_op=op) + elif op.op_type == "tf_lstm_block": + x = op.x + else: + raise ValueError("tf lstm op {} not supported. Only {} supported".format(op.op_type, SUPPORTED_TF_LSTM_OPS)) + + new_h_all, new_h, new_cs = mb.lstm( + x=x, + initial_c=op.c_prev, + initial_h=op.h_prev, + weight_ih=w_ih, + weight_hh=w_hh, + bias=bias, + recurrent_activation="sigmoid", + cell_activation="tanh", + activation="tanh", + peephole=mb_peep, + clip=cell_clip, + output_sequence=output_sequence, + name=op.name, + before_op=op, + ) + + ops_to_remove = [op] + block.replace_uses_of_var_after_op(anchor_op=op, old_var=cs, new_var=new_cs) + if op.op_type == "tf_lstm_block_cell": + block.replace_uses_of_var_after_op(anchor_op=op, old_var=h, new_var=new_h) + elif op.op_type == "tf_lstm_block": + block.replace_uses_of_var_after_op(anchor_op=op, old_var=h, new_var=new_h_all) + if cs.op != op: + ops_to_remove.append(cs.op) + else: + raise ValueError("tf lstm op {} not supported. Only {} supported".format(op.op_type, SUPPORTED_TF_LSTM_OPS)) + + block.remove_ops(ops_to_remove) + return True diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_composite_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_composite_ops.py new file mode 100644 index 00000000..6d813d6f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_composite_ops.py @@ -0,0 +1,69 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, make_tf_graph) +# Importing _TF_OPS_REGISTRY to ensure `overriding` existing TF op does not break +# testing of default op +# pytest imports all the tests and hence overriding op invokes custom op which is not expected +# In real usecase, importing following is not recommended!! +from coremltools.converters.mil.frontend.tensorflow.tf_op_registry import ( + _TF_OPS_REGISTRY, register_tf_op) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen + +tf = pytest.importorskip("tensorflow") + +class TestCompositeOp(TensorFlowBaseTest): + @pytest.fixture(scope="class") + def create_custom_selu(self): + default_selu = _TF_OPS_REGISTRY.get("Selu", None) + + @register_tf_op(tf_alias=[], override=True) + def Selu(context, node): + x = context[node.inputs[0]] + alpha = 1.6732631921768188 + lamda = 1.0507010221481323 + out_elu = mb.elu(x=x, alpha=alpha) + out = mb.mul(x=out_elu, y=lamda, name=node.name) + context.add(node.name, out) + + yield + + _TF_OPS_REGISTRY["Selu"] = default_selu + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + list(range(1, 5)) + ), + ) + @pytest.mark.usefixtures("create_custom_selu") + def test_selu(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=6, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.keras.activations.selu(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_custom_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_custom_ops.py new file mode 100644 index 00000000..6edcec0e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_custom_ops.py @@ -0,0 +1,288 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, make_tf_graph) +# Importing _TF_OPS_REGISTRY to ensure `overriding` existing TF op does not break +# testing of default op +# pytest imports all the tests and hence overriding op invokes custom op which is not expected +# In real usecase, importing following is not recommended!! +from coremltools.converters.mil.frontend.tensorflow.tf_op_registry import ( + _TF_OPS_REGISTRY, register_tf_op) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen + +tf = pytest.importorskip("tensorflow") + + +class TestCustomMatMul: + # Define SSA Custom Op for Sparse MatMul + # This will map to `custom_op` in SSA with binding information + # to bind input spec to the custom implementation + @register_op(is_custom_op=True) + class custom_sparse_matmul(Operation): + # Defining input spec for current op + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + y=TensorInputType(type_domain="T"), + transpose_x=TensorInputType(const=True, optional=True, type_domain=types.bool), + transpose_y=TensorInputType(const=True, optional=True, type_domain=types.bool), + x_is_sparse=TensorInputType(const=True, optional=True, type_domain=types.bool), + y_is_sparse=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + # Specifying binding for custom op for specifying inputs, + # parameters required for creating custom op to be synced with Swift API + bindings = { + "class_name": "SparseMatMul", + "input_order": ["x", "y"], + "parameters": ["transpose_x", "transpose_y", "x_is_sparse", "y_is_sparse"], + "description": "Custom Sparse MatMul Layer", + } + + def default_inputs(self): + return DefaultInputs( + transpose_x=False, + transpose_y=False, + x_is_sparse=False, + y_is_sparse=False, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + y_shape = self.y.shape + # For illustration purpose, assumming getting valid shape + # Ideally, should consider transpose_?, ?_is_sparse parameters into consideration + # for computing output shape + return types.tensor(x_type, [x_shape[0], y_shape[1]]) + + # TensorFlow Sparse Matmul Op + @register_tf_op + def SparseMatMul(context, node): + a = context[node.inputs[0]] + b = context[node.inputs[1]] + transpose_a = node.attr.get("transpose_a", False) + transpose_b = node.attr.get("transpose_b", False) + a_is_sparse = node.attr.get("a_is_sparse", False) + b_is_sparse = node.attr.get("b_is_sparse", False) + + x = mb.custom_sparse_matmul( + x=a, + y=b, + transpose_x=transpose_a, + transpose_y=transpose_b, + x_is_sparse=a_is_sparse, + y_is_sparse=b_is_sparse, + name=node.name, + ) + context.add(node.name, x) + + + @pytest.mark.parametrize( + "compute_unit, backend, transpose_a, transpose_b," "a_is_sparse, b_is_sparse, b_is_const", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + [True, False], + [True, False], + [True, False], + ), + ) + def test_tf( + self, compute_unit, backend, transpose_a, transpose_b, a_is_sparse, b_is_sparse, b_is_const, + ): + if backend[0] == 'mlprogram': + pytest.skip("Custom layer not supported with ML Program backend") + + rank = 2 + input_shape = list(np.random.randint(low=3, high=100, size=1)) * rank + if b_is_const: + @make_tf_graph([input_shape]) + def build_model(x): + ref = tf.compat.v1.sparse_matmul( + x, + random_gen(input_shape), + transpose_a=transpose_a, + transpose_b=transpose_b, + a_is_sparse=a_is_sparse, + b_is_sparse=b_is_sparse, + ) + return ref + input_values = [random_gen(input_shape, -1.0, 1.0)] + else: + @make_tf_graph([input_shape, input_shape]) + def build_model(x, y): + ref = tf.compat.v1.sparse_matmul( + x, + y, + transpose_a=transpose_a, + transpose_b=transpose_b, + a_is_sparse=a_is_sparse, + b_is_sparse=b_is_sparse, + ) + return ref + input_values = [random_gen(input_shape, -1.0, 1.0), random_gen(input_shape, -1.0, 1.0)] + + model, inputs, outputs = build_model + input_dict = dict(zip(inputs, input_values)) + spec, _, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + frontend_only=True, + backend=backend, + ) + + layers = spec.neuralNetwork.layers + assert layers[-1].custom is not None, "Expecting a custom layer" + assert ( + "SparseMatMul" == layers[-1].custom.className + ), "Custom Layer class name mis-match" + assert ( + transpose_a == layers[-1].custom.parameters["transpose_x"].boolValue + ), "Incorrect parameter value k" + assert ( + transpose_b == layers[-1].custom.parameters["transpose_y"].boolValue + ), "Incorrect parameter value k" + assert ( + a_is_sparse == layers[-1].custom.parameters["x_is_sparse"].boolValue + ), "Incorrect parameter value k" + assert ( + b_is_sparse == layers[-1].custom.parameters["y_is_sparse"].boolValue + ), "Incorrect parameter value k" + + assert len(layers) == 2 if b_is_const else len(layers) == 1 + + +class TestCustomTopK: + @pytest.fixture(scope="class") + def create_custom_TopK(self): + # Defining SSA TopK Op + @register_op(is_custom_op=True) + class custom_topk(Operation): + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + k=TensorInputType(const=True, optional=True, type_domain=types.int32), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + sorted=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + bindings = { + "class_name": "TopK", + "input_order": ["x"], + "parameters": ["k", "axis", "sorted"], + "description": "Top K Custom layer", + } + + def default_inputs(self): + return DefaultInputs( + k=1, + axis=-1, + sorted=False, + ) + + def __init__(self, **kwargs): + super(custom_topk, self).__init__(**kwargs) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + k = self.k.val + axis = self.axis.val + + if not is_symbolic(x_shape[axis]) and k > x_shape[axis]: + msg = "K={} is greater than size of the given axis={}" + raise ValueError(msg.format(k, axis)) + + ret_shape = list(x_shape) + ret_shape[axis] = k + return types.tensor(x_type, ret_shape), types.tensor(types.int32, ret_shape) + + # Following logging is to ensure testing of TopK implemented in tf converter + # default path is testing with appropriate conversion function + # Log default tf topk + default_tf_topk = _TF_OPS_REGISTRY.get("TopKV2", None) + + # Override TopK op with override=True flag + @register_tf_op(tf_alias=["TopKV2"], override=True) + def CustomTopK(context, node): + x = context[node.inputs[0]] + k = context[node.inputs[1]] + sorted = node.attr.get("sorted", False) + x = mb.custom_topk(x=x, k=k.val, axis=-1, sorted=sorted, name=node.name) + context.add(node.name, x) + + yield + + _TF_OPS_REGISTRY["TopKV2"] = default_tf_topk + + @pytest.mark.parametrize( + "compute_unit, backend, rank, k", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 4)], + [1, 2], + ), + ) + @pytest.mark.usefixtures("create_custom_TopK") + def test_tf(self, compute_unit, backend, rank, k): + if backend[0] == 'mlprogram': + pytest.skip("Custom layer not supported with ML Program backend") + + input_shape = np.random.randint(low=3, high=6, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + ref = tf.math.top_k(x, k=k, sorted=True) + return ref[1], ref[0] + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1.0, 1.0)] + input_dict = dict(zip(inputs, input_values)) + spec, _, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + frontend_only=True, + backend=backend, + ) + + layers = spec.neuralNetwork.layers + assert layers[-1].custom is not None, "Expecting a custom layer" + assert ( + "TopK" == layers[-1].custom.className + ), "Custom Layer class name mis-match" + assert ( + k == layers[-1].custom.parameters["k"].intValue + ), "Incorrect parameter value k" + assert ( + layers[-1].custom.parameters["sorted"].boolValue is True + ), "Incorrect parameter value for Sorted" diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_graphs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_graphs.py new file mode 100644 index 00000000..381c2d62 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_graphs.py @@ -0,0 +1,45 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, make_tf_graph) +from coremltools.converters.mil.testing_reqs import backends, compute_units + +tf = pytest.importorskip("tensorflow") + + +# TODO (rdar://103050703): Move it to test_ops because it only test tf ops instead of graphs. +class TestTFGraphs(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_masked_input(self, compute_unit, backend): + + input_shape = [4, 10, 8] + val = np.random.rand(*input_shape).astype(np.float32) + + @make_tf_graph([input_shape]) + def build_model(input): + sliced_input = input[..., 4] + mask = tf.where(sliced_input > 0) + masked_input = tf.gather_nd(input, mask) + return masked_input + + model, inputs, outputs = build_model + + input_values = [val] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_load.py new file mode 100644 index 00000000..175aaac0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_load.py @@ -0,0 +1,435 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import tempfile + +import numpy as np +import pytest + +import coremltools as ct +import coremltools.converters as converter +import coremltools.proto.FeatureTypes_pb2 as ft +from coremltools import EnumeratedShapes, ImageType, RangeDim, TensorType +from coremltools._deps import _HAS_TF_1, _IS_MACOS, MSG_TF1_NOT_FOUND +from coremltools.converters.mil.frontend.tensorflow.converter import \ + TFConverter +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, get_tf_keras_io_names, make_tf_graph) +from coremltools.converters.mil.testing_utils import random_gen + +tf = pytest.importorskip("tensorflow") +frontend = "tensorflow" + +class TestTfModelInputsOutputs(TensorFlowBaseTest): + def setup(self): + self.saved_model_dir = tempfile.mkdtemp() + _, self.model_path_h5 = tempfile.mkstemp( + suffix=".h5", prefix=self.saved_model_dir + ) + _, self.model_path_pb = tempfile.mkstemp( + suffix=".pb", prefix=self.saved_model_dir + ) + + def teardown(self): + if os.path.exists(self.saved_model_dir): + shutil.rmtree(self.saved_model_dir) + + def test_infer_inputs(self): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + if not isinstance(outputs, (tuple, list)): + outputs = [outputs] + + output_names = [ + j if isinstance(j, str) else j.op.name for j in outputs + ] + mlmodel = converter.convert(model, outputs=output_names) + assert mlmodel is not None + + input_values = [random_gen(x_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf(model, input_dict, outputs) + + def test_infer_outputs(self): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + input_name = ( + inputs[0] if isinstance(inputs[0], str) else inputs[0].op.name + ) + mlmodel = converter.convert(model, inputs=[TensorType(input_name, (3, 4, 5))]) + assert mlmodel is not None + + input_values = [random_gen(x_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf(model, input_dict, outputs) + + def test_infer_inputs_and_outputs(self): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + mlmodel = converter.convert(model) + assert mlmodel is not None + + input_values = [random_gen(x_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf(model, input_dict, outputs) + + def test_extract_sub_model(self): + x_shape = (3, 4, 5) + y_shape = (3, 4, 5) + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf.nn.relu(x), tf.math.add(x, y) + + model, inputs, outputs = build_model + if isinstance(outputs[0], str): + first_output_name = outputs[0] + else: + first_output_name = outputs[0].name.split(":")[0] + mlmodel = converter.convert(model, outputs=[first_output_name]) + assert mlmodel is not None + + def test_auto_image_nhwc_input_names(self): + x_shape = (4, 5, 3) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + + mlmodel = converter.convert(model, inputs=[ImageType()]) + assert mlmodel is not None + + def test_auto_image_nchw_input_names(self): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + + mlmodel = converter.convert(model, inputs=[ImageType(channel_first=True)]) + assert mlmodel is not None + + @pytest.mark.parametrize( + "target", + [ct.target.iOS13, ct.target.macOS15, ct.target.watchOS6, ct.target.tvOS13], + ) + def test_invalid_deployment_target_cumsum(self, target): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.math.cumsum(x, axis=-1, reverse=False, exclusive=False) + + model, inputs, outputs = build_model + + with pytest.raises(ValueError) as e: + converter.convert(model, minimum_deployment_target=target) + e.match( + r"Provided minimum deployment target requires model to be of version 4 but converted model " + r"uses following features which are available from version 5 onwards. " + r"Please use a higher minimum deployment target to convert. \n 1. Cumsum operation\n" + ) + + @pytest.mark.parametrize( + "target", + [ct.target.iOS14, ct.target.macOS16, ct.target.watchOS7, ct.target.tvOS14], + ) + def test_valid_deployment_target_cumsum(self, target): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.math.cumsum(x, axis=-1, reverse=False, exclusive=False) + + model, inputs, outputs = build_model + + # successful conversion + converter.convert(model, minimum_deployment_target=target) + + def test_invalid_output_names(self): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + with pytest.raises(AssertionError) as e: + converter.convert(model, source=frontend, outputs=["invalid_name"]) + e.match(r".* is not in graph") + + def test_missing_placeholder_shape(self): + x_shape = None # Missing Placeholder shape + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + with pytest.raises(ValueError) as e: + converter.convert(model, source=frontend) + e.match(r"Unable to determine the shape of input .*") + + mlmodel = converter.convert(model, source=frontend, + inputs=[ct.TensorType(shape=(1,))]) + assert mlmodel is not None + + @pytest.mark.skip(reason="Rank-0 input is not supported") + def test_scalar_placeholder_shape(self): + x_shape = () # Scalar Placeholder Shape + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + mlmodel = converter.convert(model, source=frontend) + assert mlmodel is not None + + input_values = [random_gen(x_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf(model, input_dict, outputs) + + def test_shaping_utils(self): + @make_tf_graph([(None, 4, 5)]) + def build_flexible_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_flexible_model + input_name = TFConverter._get_tensor_name(inputs[0]) + output_name = TFConverter._get_tensor_name(outputs[0]) + + # static-Flexible shape + mlmodel = converter.convert( + model, + inputs=[ + # Use TF's input shapes (None, 4, 5) + TensorType(name=input_name) + ], + outputs=[output_name] + ) + assert mlmodel is not None + input_values = [random_gen((3, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + if _IS_MACOS: + ret = mlmodel.predict(input_dict) + np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) + + # Enumerate shape + inputs_shape = [ + TensorType(input_name, EnumeratedShapes(shapes=[(3, 4, 5), (4, 4, 5)])) + ] + mlmodel = converter.convert(model, inputs=inputs_shape, outputs=[output_name]) + assert mlmodel is not None + input_values = [random_gen((3, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + if _IS_MACOS: + ret = mlmodel.predict(input_dict) + np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) + + input_values = [random_gen((4, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + if _IS_MACOS: + ret = mlmodel.predict(input_dict) + np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) + + if _IS_MACOS: + with pytest.raises(RuntimeError): + input_values = [random_gen((5, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + ret = mlmodel.predict(input_dict) + + # Ranged shape + inputs_shape = [TensorType(input_name, [RangeDim(3, 5), 4, 5])] + mlmodel = converter.convert(model, inputs=inputs_shape, outputs=[output_name]) + assert mlmodel is not None + input_values = [random_gen((3, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + if _IS_MACOS: + ret = mlmodel.predict(input_dict) + np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) + + input_values = [random_gen((4, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + if _IS_MACOS: + ret = mlmodel.predict(input_dict) + np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) + + if _IS_MACOS: + with pytest.raises(RuntimeError): + input_values = [random_gen((2, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + ret = mlmodel.predict(input_dict) + + def test_default_data_types(self): + @make_tf_graph([(2, 2)]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + mlmodel = converter.convert(model) + assert mlmodel is not None + spec = mlmodel.get_spec() + + # Defaults should be FLOAT32 instead of DOUBLE + it = spec.description.input[0].type.multiArrayType.dataType + assert it == ft.ArrayFeatureType.ArrayDataType.Value("FLOAT32") + ot = spec.description.output[0].type.multiArrayType.dataType + assert ot == ft.ArrayFeatureType.ArrayDataType.Value("FLOAT32") + + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestTf1ModelFormats: + def setup(self): + self.saved_model_dir = tempfile.mkdtemp() + _, self.model_path_h5 = tempfile.mkstemp( + suffix=".h5", prefix=self.saved_model_dir + ) + _, self.model_path_pb = tempfile.mkstemp( + suffix=".pb", prefix=self.saved_model_dir + ) + + def teardown(self): + if os.path.exists(self.saved_model_dir): + shutil.rmtree(self.saved_model_dir) + + def test_graph_def(self): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(3, 4, 5)) + out = tf.nn.relu(x) + mlmodel = converter.convert( + graph, inputs=[TensorType(x.op.name, (3, 4, 5))], outputs=[out.op.name] + ) + assert mlmodel is not None + + def test_graph_def_file(self): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(3, 4, 5)) + out = tf.nn.relu(x) + tf.io.write_graph( + graph, self.saved_model_dir, self.model_path_pb, as_text=False + ) + mlmodel = converter.convert( + self.model_path_pb, + inputs=[TensorType(x.op.name, (3, 4, 5))], + outputs=[out.op.name], + ) + assert mlmodel is not None + + def test_saved_model_from_simple_save(self): + with tf.compat.v1.Session() as sess: + x = tf.placeholder(shape=(1, 3, 5), dtype=tf.float32) + y = tf.nn.relu(x) + inputs = {"x": x} + outputs = {"y": y} + tf.compat.v1.saved_model.simple_save( + sess, self.saved_model_dir, inputs, outputs + ) + mlmodel = converter.convert(self.saved_model_dir) + assert mlmodel is not None + + def test_tf_keras(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + input_names, output_names = get_tf_keras_io_names(keras_model) + mlmodel = converter.convert( + keras_model, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + assert mlmodel is not None + + def test_tf_keras_hdf5_file(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + keras_model.save(self.model_path_h5) + input_names, output_names = get_tf_keras_io_names(keras_model) + mlmodel = converter.convert( + self.model_path_h5, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + assert mlmodel is not None + + def test_model_metadata(self): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(3, 4, 5)) + out = tf.nn.relu(x) + mlmodel = converter.convert( + graph, inputs=[TensorType(x.op.name, (3, 4, 5))], outputs=[out.op.name] + ) + metadata_keys = mlmodel.get_spec().description.metadata.userDefined + assert "com.github.apple.coremltools.version" in metadata_keys + assert "com.github.apple.coremltools.source" in metadata_keys + assert "tensorflow==1." in metadata_keys["com.github.apple.coremltools.source"] + + def test_invalid_format_none(self): + with pytest.raises(NotImplementedError) as e: + converter.convert(None, source="tensorflow") + e.match(r"Expected model format: .* .pb") + + def test_invalid_format_invalid_extension(self): + _, invalid_filename = tempfile.mkstemp( + suffix=".invalid", prefix=self.saved_model_dir + ) + with pytest.raises(NotImplementedError) as e: + converter.convert(invalid_filename, source="tensorflow") + e.match(r"Expected model format: .* .pb") + + def test_invalid_converter_source(self): + with pytest.raises(ValueError) as e: + converter.convert(None, source="invalid") + expected_msg = r'Unrecognized value of argument "source": .*' + e.match(expected_msg) + + def test_invalid_converter_minimum_deployment_flag(self): + with pytest.raises(TypeError) as e: + converter.convert( + None, source="tensorflow", minimum_deployment_target="iOs14" + ) + expected_msg = ( + "Unrecognized value of argument 'minimum_deployment_target': iOs14. " + "It needs to be a member of 'coremltools.target' enumeration" + ) + + e.match(expected_msg) + + def test_invalid_converter_target(self): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(3, 4, 5)) + with pytest.raises(NotImplementedError) as e: + converter.convert(graph, convert_to="invalid", source="tensorflow") + e.match(r"Backend converter .* not implemented") + + def test_invalid_format_non_exist(self): + non_exist_filename = self.model_path_pb.replace(".pb", "_non_exist.pb") + with pytest.raises(ValueError) as e: + converter.convert(non_exist_filename, source="tensorflow") + e.match(r"Input model .* does not exist") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_ops.py new file mode 100644 index 00000000..1cdcfbdd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_ops.py @@ -0,0 +1,7370 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import math +import os +import shutil +import tempfile +from distutils.version import StrictVersion + +import numpy as np +import pytest + +import coremltools as ct +from coremltools import RangeDim, TensorType +from coremltools._deps import _HAS_TF_1, _HAS_TF_2, MSG_TF1_NOT_FOUND, _get_version +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, + freeze_g, + layer_counts, + load_tf_pb, + make_tf_graph, +) +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import ( + einsum_equations, + gen_input_shapes_einsum, + random_gen, +) +from coremltools.models.utils import _is_macos, _macos_version + +tf = pytest.importorskip("tensorflow") + +PREBUILT_TF1_WHEEL_VERSION = "1.15.5" + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestContribResampler(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, data_warp_shapes", + itertools.product( + compute_units, + backends, + [ + # Data shape format: (Batch, Hin, Win, C) + # Warp shape format: (Batch, Hout, Wout, 2) + [(1, 3, 3, 1), (1, 3, 3, 2)], # no size change + [(2, 5, 5, 3), (2, 3, 3, 2)], # down-sampling + [(3, 6, 6, 1), (3, 8, 8, 2)], # up-sampling + [(1, 3, 9, 1), (1, 19, 2)], # rank-3 warp tensor + ], + ), + ) + def test( + self, compute_unit, backend, data_warp_shapes, + ): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + data_shape, warp_shape = data_warp_shapes + + @make_tf_graph([data_shape, warp_shape]) + def build_model(x, warp): + return tf.contrib.resampler.resampler(data=x, warp=warp) + + model, inputs, outputs = build_model + # warp exceeding input sizes in order to test more padding modes + input_values = [ + random_gen(data_shape, -100, 100), + random_gen(warp_shape, -15, 15), + ] + input_dict = dict(zip(inputs, input_values)) + self.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestDebugging(TensorFlowBaseTest): + """ + TF converter does not handling debugging nodes, they are + expected to be deleted by graph pass before op conversions + in Grappler graph pass: debug_stripper. + """ + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_assert(self, compute_unit, backend): + input_shape = (1,) + + @make_tf_graph([input_shape]) + def build_model(x): + tf.debugging.Assert(True, [x]) + return tf.nn.relu(x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, 0, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_check_numerics(self, compute_unit, backend): + input_shape = (1,) + + @make_tf_graph([input_shape]) + def build_model(x): + tf.debugging.check_numerics(x, 'check') + return tf.nn.relu(x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, 0, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_print(self, compute_unit, backend): + input_shape = (1,) + + @make_tf_graph([input_shape]) + def build_model(x): + tf.raw_ops.Print(input=x, data=[x], message='[x]') + return tf.nn.relu(x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, 0, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPlaceholderAsOutput(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(6)] + ), + ) + def test(self, compute_unit, backend, rank): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape, input_shape]) + def build_model(x, y): + return x, y, x + 1, x + y + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1), random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestDuplicateOutputs(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(6)] + ), + ) + def test(self, compute_unit, backend, rank): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + b = tf.identity(x) + c = tf.identity(x) + d = b + c + return b, c, d + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1), random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestIdentity(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(6)] + ), + ) + def test(self, compute_unit, backend, rank): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return x + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationElu(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.elu(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestAddN(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, num_inputs", + itertools.product( + compute_units, + backends, + list(range(6)), + [1, 3, 9], + ), + ) + def test(self, compute_unit, backend, rank, num_inputs): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + input_shape = np.random.randint(low=1, high=4, size=rank) + input_shapes = [input_shape[:] for _ in range(num_inputs)] + + @make_tf_graph(input_shapes) + def build_model(*inputs): + return tf.raw_ops.AddN(inputs=inputs) + + model, inputs, outputs = build_model + input_values = [random_gen(shape, -1, 1) for shape in input_shapes] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestAddOrdering(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test(self, compute_unit, backend): + @make_tf_graph([(2, 3, 4), (2, 3, 4)]) + def build_model(x, y): + return tf.math.add(x, y) + + model, inputs, outputs = build_model + input_values = [random_gen((2, 3, 4), -1, 1)] * 2 + input_dict = dict(zip(inputs, input_values)) + + spec, _, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + if backend[0] == "neuralnetwork": + nn_spec = spec.neuralNetwork + if _HAS_TF_1: + input_names = ["Placeholder", "Placeholder_1"] + elif _HAS_TF_2: + input_names = ["args_0", "args_1"] + + assert nn_spec.layers[0].input[0] == input_names[0] + assert nn_spec.layers[0].input[1] == input_names[1] + + +class TestActivationLeakyReLU(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.leaky_relu(x, 0.2) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationReLU(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -10.0, 10)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationReLU6(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.relu6(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestGelu(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, mode", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 3)], + ("tanh_approx", "exact_1", "exact_2", "exact_3") + ), + ) + def test(self, compute_unit, backend, rank, mode): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model_tanh_approx(x): + a = 0.5 * ( + 1.0 + tf.tanh((math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3)))) + ) + return a * x + + @make_tf_graph([input_shape]) + def build_model_exact_1(x): + return x * (0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))) + + @make_tf_graph([input_shape]) + def build_model_exact_2(x): + return 0.5 * (x * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))) + + @make_tf_graph([input_shape]) + def build_model_exact_3(x): + return (x * 0.5) * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0))) + + if mode == "tanh_approx": + build_model = build_model_tanh_approx + elif mode == "exact_1": + build_model = build_model_exact_1 + elif mode == "exact_2": + build_model = build_model_exact_2 + elif mode == "exact_3": + build_model = build_model_exact_3 + else: + raise ValueError("Unexpected mode for Gelu layer") + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -5, 5)] + input_dict = dict(zip(inputs, input_values)) + spec, mlmodel, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + assert TestGelu._op_count_in_mil_program(mlmodel, "gelu") == 1 + assert TestGelu._op_count_in_mil_program(mlmodel, "erf") == 0 + assert TestGelu._op_count_in_mil_program(mlmodel, "pow") == 0 + assert TestGelu._op_count_in_mil_program(mlmodel, "tanh") == 0 + + +class TestActivationSigmoid(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.sigmoid(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationSoftPlus(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.softplus(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationSoftmax(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes", + itertools.product( + compute_units, + backends, + [(rank, axis) for rank in range(1, 6) for axis in range(-1, rank)], + ), + ) + def test(self, compute_unit, backend, rank_and_axes): + rank, axis = rank_and_axes + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.softmax(x, axis=axis) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationSoftSign(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.softsign(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationSelu(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.selu(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1.0, 1.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class Testlog1p(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3, 5] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.log1p(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, 0.0, 2.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSelect(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, broadcast, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [True, False], + [True, False], + ), + ) + def test_select(self, compute_unit, backend, rank, broadcast, dynamic): + shape = np.random.randint(low=1, high=4, size=rank) + cond_shape = np.array([shape[0]]) if broadcast else shape + + cond_val = np.random.randint(low=0, high=2, size=cond_shape).astype(bool) + a_val = random_gen(shape=shape, rand_min=-1962.0, rand_max=0.0) + b_val = random_gen(shape=shape, rand_min=0.0, rand_max=1964.0) + + if dynamic: + cond_shape = [None] * len(cond_shape) + [tf.bool] + a_shape = [None] * len(shape) + [tf.float32] + b_shape = [None] * len(shape) + [tf.float32] + else: + cond_shape = cond_shape.tolist() + [tf.bool] + a_shape = shape.tolist() + [tf.float32] + b_shape = shape.tolist() + [tf.float32] + + @make_tf_graph([cond_shape, a_shape, b_shape]) + def build_model_select(cond, a, b): + return tf.raw_ops.Select(condition=cond, x=a, y=b) + + model, inputs, outputs = build_model_select + inputs_dic = dict(zip(inputs, [cond_val, a_val, b_val])) + TensorFlowBaseTest.run_compare_tf( + model, + inputs_dic, + outputs, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestWhere(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test_where_1_input(self, compute_unit, backend, rank): + shape = np.random.randint(low=1, high=4, size=rank) + cond_val = np.random.randint(low=-1, high=2, size=shape).astype(np.float32) + + @make_tf_graph([shape]) + def build_model(condition): + return tf.where(condition=condition) + + model, inputs, outputs = build_model + inputs_dic = dict(zip(inputs, [cond_val])) + TensorFlowBaseTest.run_compare_tf( + model, + inputs_dic, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test_where(self, compute_unit, backend, rank): + shape = np.random.randint(low=1, high=4, size=rank) + cond_val = np.random.randint(low=0, high=2, size=shape).astype(bool) + x_val = random_gen(shape=shape, rand_min=-1962.0, rand_max=0.0) + y_val = random_gen(shape=shape, rand_min=0.0, rand_max=1964.0) + + @make_tf_graph([[*shape, tf.bool], shape, shape]) + def build_model(condition, x, y): + return tf.where(condition=condition, x=x, y=y) + + model, inputs, outputs = build_model + inputs_dic = dict(zip(inputs, [cond_val, x_val, y_val])) + TensorFlowBaseTest.run_compare_tf( + model, + inputs_dic, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCast(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, dtype", + itertools.product( + compute_units, + backends, + list(range(1, 6)), + ['int32', 'float64'] + ), + ) + def test(self, compute_unit, backend, rank, dtype): + shape = np.random.randint(low=1, high=3, size=rank) + + if backend[0] == "mlprogram" and dtype == "int32": + pytest.xfail("rdar://78630549") + + @make_tf_graph([shape]) + def build_model(x): + y = tf.cast(x, dtype=dtype) + y = tf.square(y) + return y + + model, inputs, outputs = build_model + min_range, max_range = -100, 100 + input_values = [random_gen(shape, min_range, max_range)] + + # When using GPU with neuralnetwork backend, that uses FP16 precision, we make sure that + # the input is not too close to its ceiling / floor, + # for instance, 24.993 or -13.985 will not be allowed. + if compute_unit != ct.ComputeUnit.CPU_ONLY and dtype == "int32": + TOR_THRESHOLD = 0.03 + value = input_values[0].flatten() + for i, v in enumerate(value): + while abs(math.ceil(v) - v) < TOR_THRESHOLD or abs(math.floor(v) - v) < TOR_THRESHOLD: + v = random_gen((1,), min_range, max_range)[0] + value[i] = v + value = np.reshape(value, shape) + input_values = [value] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestCond(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_naive(self, compute_unit, backend): + if (backend[0] == "mlprogram" and backend[1] == "fp16"): + pytest.xfail("rdar://96627246 (ConsTest unittest is failing)") + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + return tf.cond(tf.constant(True), lambda: x + y, lambda: x * y) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([6], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + pred = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + return tf.cond(pred, lambda: tf.add(x, z), lambda: tf.square(y)) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_multi_returns(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + pred = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + + def true_fn(): + return tf.add(x, z), tf.math.multiply(x, z) + + def false_fn(): + return tf.square(y), tf.sqrt(z) + + return tf.cond(pred, true_fn, false_fn) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_with_identity(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + pred = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + return tf.cond(pred, lambda: z, lambda: tf.square(y)) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_multi_returns_with_identity(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + pred = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + + def true_fn(): + return tf.add(x, z), x + + def false_fn(): + return tf.square(y), z + + return tf.cond(pred, true_fn, false_fn) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_nested_0(self, compute_unit, backend): + if backend == ("mlprogram", "fp16"): + pytest.xfail("rdar://80660074 (Cond mlprogram FP16 tests falling in TF1 converter with numerical errors)") + + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + t = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + f = tf.less(tf.math.reduce_mean(z), tf.math.reduce_mean(y)) + inner_cond = tf.cond( + f, lambda: tf.pow(x, y), lambda: tf.math.subtract(x, y) + ) + return tf.cond(t, lambda: inner_cond, lambda: tf.square(y)) + + model, inputs, outputs = build_model + + input_values = [ + np.array([2], dtype=np.float32), + np.array([3], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_nested_1(self, compute_unit, backend): + if backend == ("mlprogram", "fp16"): + pytest.xfail("rdar://80660074 (Cond mlprogram FP16 tests falling in TF1 converter with numerical errors)") + + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + t = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + f = tf.less(tf.math.reduce_mean(z), tf.math.reduce_mean(y)) + cond_1 = tf.cond(f, lambda: tf.pow(x, y), lambda: tf.math.subtract(x, y)) + cond_2 = tf.cond(t, lambda: tf.multiply(x, y), lambda: tf.math.mod(x, y)) + cond_3 = tf.cond(f, lambda: tf.math.divide(x, y), lambda: cond_2) + return tf.cond(t, lambda: cond_1, lambda: cond_3) + + model, inputs, outputs = build_model + + input_values = [ + np.array([2], dtype=np.float32), + np.array([3], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestWhileLoop(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_with_changing_shape(self, compute_unit, backend): + @make_tf_graph([(2, 1), (2, 1)]) + def build_model(x, y): + c = lambda i, j: tf.less(tf.shape(j)[1], 5) + b = lambda i, j: (i, tf.concat([i, j], axis=1)) + return tf.while_loop(c, b, [x, y], shape_invariants=[x.get_shape(), tf.TensorShape([2, None])]) + + model, inputs, outputs = build_model + input_values = [np.array([[1], [2]], dtype=np.float32), np.array([[1], [2]], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_no_entry(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + c = lambda i: tf.greater(tf.math.reduce_mean(i), 5) + b = lambda i: i - 1 + return tf.while_loop(c, b, [x]) + + model, inputs, outputs = build_model + input_values = [np.array([5], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_0(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + c = lambda i: tf.greater(tf.math.reduce_mean(i), 5) + b = lambda i: i - 1 + return tf.while_loop(c, b, [x]) + + model, inputs, outputs = build_model + input_values = [np.array([10], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_1(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + c = lambda i, j: tf.greater(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) + b = lambda i, j: (tf.add(i, 1), tf.square(j)) + return tf.while_loop(c, b, [x, y]) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_2(self, compute_unit, backend): + @make_tf_graph([(1,), (1, 2)]) + def build_model(x, y): + c = lambda i, j: tf.greater(tf.math.reduce_mean(i), 5) + b = lambda i, j: (i - 3, j * 2) + return tf.while_loop(c, b, [x, y]) + + model, inputs, outputs = build_model + input_values = [ + np.array([10], dtype=np.float32), + np.array([[2, 3]], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_3(self, compute_unit, backend): + @make_tf_graph([(1,), (1, 2), (1,)]) + def build_model(x, y, z): + c = lambda i, j, k: tf.greater( + tf.math.reduce_mean(i), tf.math.reduce_mean(j) + ) + b = lambda i, j, k: (i / 3, j ** 2, k - 2) + return tf.while_loop(c, b, [x, y, z]) + + model, inputs, outputs = build_model + input_values = [ + np.array([10], dtype=np.float32), + np.array([[2, 3]], dtype=np.float32), + np.array([5], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_4(self, compute_unit, backend): + @make_tf_graph([(1,), (1, 2), (1,), (2, 1)]) + def build_model(x, y, z, m): + c = lambda i, j, k, l: tf.greater( + tf.math.reduce_mean(i), tf.math.reduce_mean(j) + ) + b = lambda i, j, k, l: (i / 3, j ** 2, k - 2, l % 2) + return tf.while_loop(c, b, [x, y, z, m]) + + model, inputs, outputs = build_model + input_values = [ + np.array([10], dtype=np.float32), + np.array([[2, 3]], dtype=np.float32), + np.array([5], dtype=np.float32), + np.array([[2], [3]], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.skipif(_HAS_TF_2, reason="tf.function() error in TF2") + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_nested_while_body(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + # The following while loop: + # + # i, j = 0, 10 + # while i < j: + # while 2*i < i+2: + # i += 1 + # i += 2 + + def cond2(i): + return tf.less(2 * tf.math.reduce_mean(i), tf.math.reduce_mean(i + 2)) + + def body2(i): + return i + 1 + + def cond1(i, j): + return tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) + + def body1(i, j): + new_i = tf.while_loop(cond2, body2, [i]) + return new_i + 2, j + + return tf.while_loop(cond1, body1, [x, y]) + + model, inputs, outputs = build_model + input_values = [ + np.array([0], dtype=np.float32), + np.array([10], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_nested_while_cond(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + # The following while loop: + # + # def cond(i, j): + # while 2*i < i+2: + # i += 1 + # return i < j + # + # i, j = 0, 10 + # while cond(i, j): + # i += 2 + # j += 1 + + def cond2(i): + return tf.less(2 * tf.math.reduce_mean(i), tf.math.reduce_mean(i + 2)) + + def body2(i): + return i + 1 + + def cond1(i, j): + new_i = tf.while_loop(cond2, body2, [i]) + return tf.less(tf.squeeze(new_i), tf.squeeze(j)) + + def body1(i, j): + return i + 2, j + 1 + + return tf.while_loop(cond1, body1, [x, y]) + + model, inputs, outputs = build_model + input_values = [ + np.array([0], dtype=np.float32), + np.array([10], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestConv(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "conv_dim", # 1d or 2d conv + "padding", + "data_format", + "HWkHkW", + "strides", + "dilations", + "dynamic_weights", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + ["conv1d", "conv2d"], + ["SAME", "VALID", [[2, 3], [3, 2]]], + ["NHWC"], # NCHW not supported by TF. + [(11, 12, 3, 2), (12, 11, 2, 3)], + [(1, 1), (2, 3)], + [(1, 1), (2, 3)], + [True, False], + [1, 3], + ), + ) + def test( + self, + compute_unit, + backend, + conv_dim, + padding, + data_format, + HWkHkW, + strides, + dilations, + dynamic_weights, + batch_size, + ): + H, W, kH, kW = HWkHkW + N, C_in, C_out = batch_size, 2, 3 + if data_format == "NHWC": + input_shape = (N, W, C_in) if conv_dim == "conv1d" else (N, H, W, C_in) + if isinstance(padding, list): + padding = [[0, 0]] + padding + [[0, 0]] + if conv_dim == "conv1d": + data_format = "NWC" + if isinstance(padding, list): + # No explicit padding for conv1d in TF + return + else: # 'NCHW' + input_shape = (N, C_in, W) if conv_dim == "conv1d" else (N, C_in, H, W) + if isinstance(padding, list): + padding = [[0, 0], [0, 0]] + padding + if conv_dim == "conv1d": + data_format = "NCW" + if isinstance(padding, list): + # No explicit padding for conv1d in TF + return + W_shape = (kW, C_in, C_out) if conv_dim == "conv1d" else (kH, kW, C_in, C_out) + dilations = dilations[1] if conv_dim == "conv1d" else dilations + strides = strides[1] if conv_dim == "conv1d" else strides + + # We do not support dynamic weight when dilations != 1. + if dynamic_weights and dilations == (1, 1): + + @make_tf_graph([input_shape, W_shape]) + def build_model_dynamic_weights(x, W): + if conv_dim == "conv1d": + conv = tf.nn.conv1d( + x, + W, + stride=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + else: + conv = tf.nn.conv2d( + x, + W, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + return conv + + model, inputs, outputs = build_model_dynamic_weights + input_values = [ + random_gen(input_shape, -10.0, 10.0), + random_gen(W_shape, -1.0, 1.0), + ] + input_dict = dict(zip(inputs, input_values)) + + else: + + @make_tf_graph([input_shape]) + def build_model_static_weights(x): + W = tf.constant(np.random.rand(*W_shape), tf.float32) + if conv_dim == "conv1d": + conv = tf.nn.conv1d( + x, + W, + stride=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + else: + conv = tf.nn.conv2d( + x, + W, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + return conv + + model, inputs, outputs = build_model_static_weights + input_values = [random_gen(input_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestConv3d(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "data_format", + "input_size", + "kernel_size", + "strides", + "dilations", + "padding_type", + "batch_size", + ] + ), + itertools.product( + compute_units, # compute_unit + backends, + ["NDHWC"], # NCDHW not supported by TF. + [(7, 11, 13), (32, 16, 8)], # input_size + [(1, 1, 1), (3, 3, 3), (1, 2, 3)], # kernel_size + [(1, 1, 1), (2, 2, 2), (3, 2, 1)], # strides + [ + (1, 1, 1) + ], # , (2, 2, 2), (2, 3, 1)], # dilations: dilations greater than 1 not supported on CPU + ["SAME", "VALID"], # padding_type + [1, 3], # batch_size + ), + ) + def test_tf( + self, + compute_unit, + backend, + data_format, + input_size, + kernel_size, + strides, + dilations, + padding_type, + batch_size, + ): + C_in = np.random.randint(low=1, high=4) + C_out = np.random.randint(low=1, high=(C_in + 1)) + input_shape = [batch_size] + list(input_size) + [C_in] + weights_shape = list(kernel_size) + [C_in, C_out] + # TF1 and TF2 tf.nn.conv3d require dilations and strides to have length 5 or greater, with values of 1 for + # indices 0 and 4 (batch and channel in NDHWC format) + tf_strides = [1] + list(strides) + [1] + tf_dilations = [1] + list(dilations) + [1] + + @make_tf_graph([input_shape]) + def build_model_static_weights(x): + W = tf.constant(np.random.rand(*weights_shape), tf.float32) + return tf.nn.conv3d( + x, + W, + strides=tf_strides, + padding=padding_type, + data_format=data_format, + dilations=tf_dilations, + ) + + model, inputs, outputs = build_model_static_weights + input_values = [random_gen(input_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-03, # default 1e-04 + rtol=2e-03, # default 1e-05 + ) + + +class TestDepthwiseConv(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "HWkHkW", + "strides", + "dilations", + "dynamic_weights", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + ["SAME", "VALID"], + [(11, 12, 3, 2), (12, 11, 2, 3)], + # TF doesn't support non-square strides for depthwise + # https://github.com/tensorflow/tensorflow/issues/33005 + [(1, 1, 1, 1), (1, 2, 2, 1)], + [ + (1, 1), + (2, 2), + ], + [True, False], + [1, 3], + ), + ) + def test_depthwise_conv( + self, + compute_unit, + backend, + padding, + HWkHkW, + strides, + dilations, + dynamic_weights, + batch_size, + ): + if backend[0] == "mlprogram" and dilations == (1,1) and dynamic_weights and compute_unit != ct.ComputeUnit.CPU_ONLY: + # in this case, there is a numerical mismatch on the GPU MIL backend. The GPU runtime tests are + # tracked seprately. + return + + if np.sum(strides) != len(strides) and np.sum(dilations) != len(dilations): + # TF doesn't compute correct output for non-one stride+dilation + return + + H, W, kH, kW = HWkHkW + N, C_in, C_out = batch_size, 2, 6 + input_shape = (N, H, W, C_in) + data_format = "NHWC" + assert C_out % C_in == 0 + multiplier = int(C_out / C_in) + W_shape = (kH, kW, C_in, multiplier) + + def test_static_W(): + W = np.random.rand(*W_shape).astype(np.float32) + + @make_tf_graph([input_shape]) + def build_model_static_weights(x): + return tf.nn.depthwise_conv2d( + x, + W, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + + model, inputs, outputs = build_model_static_weights + + input_values = [(np.random.rand(*input_shape).astype(np.float32))] + input_dict = dict(zip(inputs, input_values)) + + proto, _, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + if backend[0] == 'neuralnetwork': + assert layer_counts(proto, "reorganizeData") == 0 + + def test_dynamic_W(): + @make_tf_graph([input_shape, W_shape]) + def build_model_dynamic_weights(x, W): + return tf.nn.depthwise_conv2d( + x, + W, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + + model, inputs, outputs = build_model_dynamic_weights + + input_values = [ + (np.random.rand(*input_shape).astype(np.float32)), + (np.random.rand(*W_shape).astype(np.float32)), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + if backend[0] == "neuralnetwork" and dynamic_weights: + pytest.skip("dynamic conv with groups > 1 is not supported on the neuralnetwork backend") + + # We do not support dynamic weight when dilations != 1. + test_dynamic_W() if dynamic_weights and dilations == (1, 1) else test_static_W() + + +class TestSeparableConv(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "HWkHkW", + "strides", + "dilations", + "dynamic_weights", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + ["SAME", "VALID"], + [(11, 12, 3, 2), (12, 11, 2, 3)], + [(1, 1, 1, 1), (1, 2, 2, 1)], + [(1, 1), (2, 2)], + [True, False], + [1, 3], + ), + ) + def test_separable_conv( + self, + compute_unit, + backend, + padding, + HWkHkW, + strides, + dilations, + dynamic_weights, + batch_size, + ): + if backend[0] == "mlprogram" and dilations == (1,1) and compute_unit != ct.ComputeUnit.CPU_ONLY: + msg = "In this case, there is a numerical mismatch on the GPU MIL backend. The GPU runtime tests are tracked seprately." + pytest.skip(msg) + + H, depthwise_filter, kH, kW = HWkHkW + N, C_in, C_out = batch_size, 2, 6 + input_shape = (N, H, depthwise_filter, C_in) + data_format = "NHWC" + assert C_out % C_in == 0 + multiplier = int(C_out / C_in) + depthwise_filter_shape = (kH, kW, C_in, multiplier) + pointwise_filter_shape = [1, 1, multiplier * C_in, C_out] + if dilations != (1, 1): + strides = (1, 1, 1, 1) + + def test_dynamic_W(): + @make_tf_graph( + [input_shape, depthwise_filter_shape, pointwise_filter_shape] + ) + def build_model_dynamic_weights(x, depthwise_filter, pointwise_filter): + return tf.nn.separable_conv2d( + x, + depthwise_filter, + pointwise_filter, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + + model, inputs, outputs = build_model_dynamic_weights + + input_values = [ + (np.random.rand(*input_shape).astype(np.float32)), + (np.random.rand(*depthwise_filter_shape).astype(np.float32)), + (np.random.rand(*pointwise_filter_shape).astype(np.float32)), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_static_W(): + depthwise_filter = np.random.rand(*depthwise_filter_shape).astype( + np.float32 + ) + pointwise_filter = np.random.rand(*pointwise_filter_shape).astype( + np.float32 + ) + + @make_tf_graph([input_shape]) + def build_model_static_weights(x): + return tf.nn.separable_conv2d( + x, + depthwise_filter, + pointwise_filter, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + + model, inputs, outputs = build_model_static_weights + + input_values = [(np.random.rand(*input_shape).astype(np.float32))] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + test_static_W() + if not any([True if d > 1 else False for d in dilations]): + if backend[0] == "neuralnetwork": + pytest.skip("dynamic conv with groups > 1 is not supported on the neuralnetwork backend") + test_dynamic_W() + +class TestConvTranspose(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "conv_dim", # 1d or 2d conv + "padding", + "data_format", + "HWkHkW", + "strides", + "dilations", + "dynamic", + ] + ), + itertools.product( + compute_units, + backends, + ["conv1d", "conv2d"], + ["SAME", "VALID"], + ["NHWC"], # NCHW not supported by TF + [(12, 10, 2, 2), (4, 2, 2, 3), (7, 5, 3, 3)], + [(1, 1), (1, 2)], + [(1, 1)], # Dilation > 1 not supported by TF + [True, False], + ), + ) + def test_conv_transpose( + self, + compute_unit, + backend, + conv_dim, + padding, + data_format, + HWkHkW, + strides, + dilations, + dynamic, + ): + H, W, kH, kW = HWkHkW + N, C_in, C_out = 1, 1, 2 + + if data_format == "NHWC": + input_shape = (N, W, C_in) if conv_dim == "conv1d" else (N, H, W, C_in) + if conv_dim == "conv1d": + data_format = "NWC" + else: # 'NCHW' + pass + + w_shape = (kW, C_out, C_in) if conv_dim == "conv1d" else (kH, kW, C_out, C_in) + + # dynamic input shape + tf_input_shape = list(input_shape) + if dynamic: + if data_format == "NHWC": + tf_input_shape[1] = None + tf_input_shape[2] = None + elif data_format == "NWC": + tf_input_shape[1] = None + + @make_tf_graph([tf_input_shape]) + def build_model(x): + Weight = tf.constant(np.random.rand(*w_shape), tf.float32) + + # get the dynamic height and width + if dynamic: + shape = tf.shape(x) + if data_format == "NHWC": + H, W = shape[1], shape[2] + elif data_format == "NWC": + W = shape[1] + else: + H, W = HWkHkW[:2] + + kH, kW = HWkHkW[2:] + + is_conv_2d = conv_dim == "conv2d" + + # compute the output shape, in both static / dynamic cases + if padding == "SAME": + oW = W * strides[1] + if is_conv_2d: + oH = H * strides[0] + elif padding == "VALID": + oW = (W - 1) * strides[1] + (kW - 1) * dilations[1] + 1 + if is_conv_2d: + oH = (H - 1) * strides[0] + (kH - 1) * dilations[0] + 1 + + if data_format == "NHWC": + output_shape = [N, oH, oW, C_out] + elif data_format == "NWC": + output_shape = [N, oW, C_out] + + if conv_dim == "conv1d": + return tf.nn.conv1d_transpose( + x, + Weight, + output_shape=output_shape, + strides=strides[1], + padding=padding, + dilations=dilations[1], + data_format=data_format, + ) + elif conv_dim == "conv2d": + return tf.nn.conv2d_transpose( + x, + Weight, + output_shape=output_shape, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + model, inputs, outputs = build_model + + input_values = [(np.random.rand(*input_shape).astype(np.float32))] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "data_format", + "DHWkDkHkW", + "strides", + "dilations", + "dynamic", + ] + ), + itertools.product( + compute_units, + backends, + [ + "SAME", "VALID" + ], + ["NDHWC"], + [ + (10, 12, 14, 2, 3, 5), + (4, 6, 8, 2, 3, 1), + (6, 8, 10, 3, 3, 3), + (5, 7, 9, 2, 4, 2), + ], + [(1, 1, 1), (1, 2, 3)], + [(1, 1, 1)], # Dilation > 1 not supported by TF + [True, False], + ), + ) + def test_conv3d_transpose( + self, compute_unit, backend, padding, data_format, DHWkDkHkW, strides, dilations, dynamic, + ): + if _macos_version() < (12, 0) and strides == (1, 2, 3) and padding == "VALID": + # Behavior changed in macOS 12 + return + + D, H, W, kD, kH, kW = DHWkDkHkW + N, C_in, C_out = 2, 1, 2 + + if data_format == "NDHWC": + input_shape = (N, D, H, W, C_in) + else: # 'NCDHW' + pass + + tf_input_shape = list(input_shape) + if dynamic: + if data_format == "NDHWC": + tf_input_shape[1] = None + tf_input_shape[2] = None + tf_input_shape[3] = None + else: + pass + + w_shape = (kD, kH, kW, C_out, C_in) + + @make_tf_graph([tf_input_shape]) + def build_model(x): + weight = tf.constant(np.random.rand(*w_shape), tf.float32) + + # get the depth, height and width + if dynamic: + shape = tf.shape(x) + if data_format == "NDHWC": + D, H, W = shape[1], shape[2], shape[3] + else: + pass + else: + D, H, W = DHWkDkHkW[:3] + + kD, kH, kW = DHWkDkHkW[3:] + + # compute the output shape + if padding == "SAME": + oD = D * strides[0] + oH = H * strides[1] + oW = W * strides[2] + else: + oD = (D - 1) * strides[0] + (kD - 1) * dilations[0] + 1 + oH = (H - 1) * strides[1] + (kH - 1) * dilations[1] + 1 + oW = (W - 1) * strides[2] + (kW - 1) * dilations[2] + 1 + + if data_format == "NDHWC": + output_shape = [N, oD, oH, oW, C_out] + else: + pass + + return tf.nn.conv3d_transpose( + x, + weight, + output_shape=output_shape, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + + model, inputs, outputs = build_model + + input_values = [(np.random.rand(*input_shape).astype(np.float32))] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestElementWiseBinary(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, tf_op, broadcast_case", + itertools.product( + compute_units, + backends, + [0, 1, 2, 3, 4], + [ + tf.math.add, + tf.math.floordiv, + tf.math.floormod, + tf.math.maximum, + tf.math.minimum, + tf.math.mod, + tf.math.multiply, + tf.math.pow, + tf.math.truediv, + tf.math.subtract, + tf.math.squared_difference, + ], + [0, 1, 2, 3] + ), + ) + def test_binary_math(self, compute_unit, backend, rank, tf_op, + broadcast_case): + if rank == 0 or broadcast_case == 0: + pytest.skip("Rank-0 input is not supported") + + x_shape = y_shape = list(np.random.randint(low=2, high=4, size=rank)) + + # test broadcasting + # 0 -> broadcast with one of the inputs is a 0-D tensor (scalar) + # 1 -> broadcast with same rank, some of dimensions are size 1 + # 2 -> broadcast with different rank, extra dimension with size 1 + # 3 -> no broadcast, same type for both inputs + if broadcast_case == 0: + y_shape = [] + elif broadcast_case == 1: + y_shape = [1 if np.random.randint(2) == 0 else d for d in y_shape] + elif broadcast_case == 2: + y_shape = [1] + y_shape + + # randomly swap x and y + if np.random.randint(2) == 0: + x_shape, y_shape = y_shape, x_shape + + # lower precision input data for non-CPU tests + dtype = np.float32 if compute_unit == ct.ComputeUnit.CPU_ONLY else np.float16 + + if tf_op in {tf.math.add, tf.math.subtract, tf.math.multiply}: + x_val = random_gen(x_shape, -100, 100, dtype=dtype).astype(np.float32) + y_val = random_gen(y_shape, -100, 100, dtype=dtype).astype(np.float32) + elif tf_op in {tf.math.truediv, tf.math.floordiv, tf.math.floormod, tf.math.mod}: + x_val = random_gen(x_shape, -100, 100, dtype=dtype).astype(np.float32) + y_val = random_gen(y_shape, 1, 20, dtype=dtype).astype(np.float32) + elif tf_op in {tf.math.maximum, tf.math.minimum}: + x_val = random_gen(x_shape, -10, 10, dtype=dtype).astype(np.float32) + y_val = random_gen(y_shape, -10, 10, dtype=dtype).astype(np.float32) + elif tf_op in {tf.math.pow, tf.math.squared_difference}: + x_val = random_gen(x_shape, -5, 5, dtype=np.int32).astype(np.float32) + y_val = random_gen(y_shape, -5, 5, dtype=np.int32).astype(np.float32) + else: + raise NotImplementedError("input values needs to be defined") + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf_op(x, y) + + model, inputs, outputs = build_model + input_values = [x_val, y_val] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, tf_op, broadcast_case", + itertools.product( + compute_units, + backends, + [0, 1, 2, 3, 4], + [ + tf.equal, + tf.not_equal, + tf.greater, + tf.greater_equal, + tf.less, + tf.less_equal, + ], + [0, 1, 2, 3], + ), + ) + def test_binary_compare(self, compute_unit, backend, rank, tf_op, + broadcast_case): + if rank == 0 or broadcast_case == 0: + pytest.skip("Rank-0 input is not supported") + + x_shape = y_shape = list(np.random.randint(low=2, high=4, size=rank)) + + # test broadcasting + # 0 -> broadcast with one of the inputs is a 0-D tensor (scalar) + # 1 -> broadcast with same rank, some of dimensions are size 1 + # 2 -> broadcast with different rank, extra dimension with size 1 + # 3 -> no broadcast, same type for both inputs + if broadcast_case == 0: + y_shape = [] + elif broadcast_case == 1: + y_shape = [1 if np.random.randint(2) == 0 else d for d in y_shape] + elif broadcast_case == 2: + y_shape = [1] + y_shape + + # randomly swap x and y + if np.random.randint(2) == 0: + x_shape, y_shape = y_shape, x_shape + + # lower precision input data for non-CPU tests + dtype = np.float32 if compute_unit == ct.ComputeUnit.CPU_ONLY else np.float16 + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf_op(x, y) + + model, inputs, outputs = build_model + input_values = [ + random_gen(x_shape, -5, 3, dtype=dtype).astype(np.float32), + random_gen(y_shape, -5, 3, dtype=dtype).astype(np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, tf_op, broadcast_case", + itertools.product( + compute_units, + backends, + [0, 1, 2, 3, 4], + [ + tf.math.logical_and, + tf.math.logical_or, + tf.math.logical_xor, + ], + [0, 1, 2, 3], + ), + ) + def test_binary_logical(self, compute_unit, backend, rank, tf_op, + broadcast_case): + if rank == 0 or broadcast_case == 0: + pytest.skip("Rank-0 input is not supported") + + x_shape = y_shape = list(np.random.randint(low=2, high=4, size=rank)) + + # test broadcasting + # 0 -> broadcast with one of the inputs is a 0-D tensor (scalar) + # 1 -> broadcast with same rank, some of dimensions are size 1 + # 2 -> broadcast with different rank, extra dimension with size 1 + # 3 -> no broadcast, same type for both inputs + if broadcast_case == 0: + y_shape = [] + elif broadcast_case == 1: + y_shape = [1 if np.random.randint(2) == 0 else d for d in y_shape] + elif broadcast_case == 2: + y_shape = [1] + y_shape + + # randomly swap x and y + if np.random.randint(2) == 0: + x_shape, y_shape = y_shape, x_shape + + @make_tf_graph([x_shape + [tf.bool], y_shape + [tf.bool]]) + def build_model(x, y): + return tf_op(x, y) + + model, inputs, outputs = build_model + input_values = [ + random_gen(x_shape, 0, 2, dtype=np.int32).astype(bool), + random_gen(y_shape, 0, 2, dtype=np.int32).astype(bool), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCross(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [2, 3, 4], + ) + ) + def test(self, compute_unit, backend, rank): + input_shape = list(np.random.randint(low=2, high=4, size=rank)) + [3] + input_shapes = [input_shape, input_shape] + + @make_tf_graph(input_shapes) + def build_model(x, y): + return tf.linalg.cross(x, y) + + model, inputs, outputs = build_model + + input_values = [random_gen(shape, -1, 1) for shape in input_shapes] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestEinsum(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, equation, reverse_input_order", + itertools.product( + compute_units, + backends, + einsum_equations, + [False, True], + ) + ) + def test(self, compute_unit, backend, equation, reverse_input_order): + input_shapes, _ = gen_input_shapes_einsum(equation, False) + if _HAS_TF_1: + if len(set(input_shapes[0])) < len(input_shapes[0]) or len(set(input_shapes[1])) < len(input_shapes[1]): + pytest.skip("tf1 does not support diagonal cases") + + if reverse_input_order: + input_output_strings = equation.split('->') + input_strings = input_output_strings[0].split(',') + equation = input_strings[1] + ',' + input_strings[0] + '->' + input_output_strings[1] + input_shapes = [input_shapes[1], input_shapes[0]] + + @make_tf_graph(input_shapes) + def build_model(x, y): + return tf.einsum(equation, x, y) + + model, inputs, outputs = build_model + + input_values = [ + random_gen(input_shapes[0], -1, 1), + random_gen(input_shapes[1], -1, 1), + ] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestElementWiseUnary(TensorFlowBaseTest): + _FP16_UNSUPPORTED = {'acos', 'asin', 'atan', 'atanh', 'cosh', 'sinh'} + + @pytest.mark.parametrize( + "compute_unit, backend, rank, mode", + itertools.product( + compute_units, + backends, + [1, 2, 5], + [ + "abs", + "acos", + "asin", + "atan", + "atanh", + "cast", + "ceil", + "clip", + "cos", + "cosh", + "erf", + "exp", + "floor", + "inverse", + "log", + "negative", + "round", + "rsqrt", + "sign", + "sin", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + ], + ), + ) + def test_unary(self, compute_unit, backend, rank, mode): + _PREBUILD_WHEEL_SEGFAULTING_MODE = ["acos", "asin", "atan", "atanh", "cosh", "sinh"] + + if compute_unit != ct.ComputeUnit.CPU_ONLY and mode in self._FP16_UNSUPPORTED: + return + + if _get_version(tf.__version__) == StrictVersion(PREBUILT_TF1_WHEEL_VERSION): + if mode in _PREBUILD_WHEEL_SEGFAULTING_MODE: + # we shuold re-enable these tests after this radar rdar://100735561 ([CI] Build a more stable TF1 Rosetta wheel for the lightning CI) is fixed + pytest.skip("Prebuilt wheel segfaulting on several functions.") + + if _macos_version() < (13, 0): + if backend == ("mlprogram", "fp16") and _is_macos(): + pytest.skip("Requires macOS13 or greater") + elif compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.skip("GPU issue fixed in iOS16/macOS13") + else: + dtype = np.float32 + tf_dtype = tf.float32 + + atol, rtol = 1e-4, 1e-5 + input_shape = np.random.randint(low=2, high=4, size=rank) + + if backend == ("mlprogram", "fp16") and mode != "clip": + # For the clip mode with tf.float16 as input, it seems like the tf graph is producing wrong results + # It looks like a tensorflow bug, tracked by this radar: + # rdar://96850184 (Tensor clip_by_value is producing wrong numerical outputs with tf.float16 type input) + dtype = np.float16 + tf_dtype = tf.float16 + else: + dtype = np.float32 + tf_dtype = tf.float32 + + def cast_func(x): + return tf.cast(x, dtype=tf.int32) + + def clip_func(x): + return tf.clip_by_value(x, clip_value_min=0.0, clip_value_max=5.0) + + def _get_test(test_mode): + if test_mode == "abs": + res = tf.abs + val = random_gen(input_shape, rand_min=-1, rand_max=1) + elif test_mode == "acos": + res = tf.acos + val = random_gen(input_shape, rand_min=-1, rand_max=1) + elif test_mode == "asin": + res = tf.asin + val = random_gen(input_shape, rand_min=-1, rand_max=1) + elif test_mode == "atan": + res = tf.atan + val = random_gen(input_shape, rand_min=-100, rand_max=100) + elif test_mode == "atanh": + res = tf.atanh + val = random_gen(input_shape, rand_min=-0.9, rand_max=0.9) + elif test_mode == "cast": + eps_from_int = 0.0 + if compute_unit != ct.ComputeUnit.CPU_ONLY: + eps_from_int = 0.1 + res = cast_func + val = random_gen( + input_shape, + rand_min=-10, + rand_max=10, + eps_from_int=eps_from_int, + dtype=dtype, + ) + elif test_mode == "ceil": + res = tf.math.ceil + eps_from_int = 0.0 + if compute_unit != ct.ComputeUnit.CPU_ONLY: + eps_from_int = 0.1 + val = random_gen( + input_shape, + rand_min=-100, + rand_max=100, + eps_from_int=eps_from_int, + dtype=dtype, + ) + elif test_mode == "clip": + if compute_unit != ct.ComputeUnit.CPU_ONLY: + return None, None # clip does not support float16 + res = clip_func + val = random_gen(input_shape, rand_min=-5, rand_max=10) + elif test_mode == "cos": + res = tf.cos + rand_range = 1000 + if compute_unit != ct.ComputeUnit.CPU_ONLY: + rand_range = 10 + val = random_gen(input_shape, rand_min=-rand_range, rand_max=rand_range) + elif test_mode == "cosh": + res = tf.cosh + val = random_gen(input_shape, rand_min=-4, rand_max=4) + elif test_mode == "erf": + res = tf.math.erf + val = random_gen(input_shape, rand_min=1, rand_max=6) + elif test_mode == "exp": + if compute_unit != ct.ComputeUnit.CPU_ONLY: + # We skip GPU here, since exp(1) already differs in backend. + return None, None + res = tf.exp + val = random_gen(input_shape, rand_min=-4, rand_max=4) + elif test_mode == "floor": + res = tf.floor + eps_from_int = 0.0 + if compute_unit != ct.ComputeUnit.CPU_ONLY: + eps_from_int = 0.1 + val = random_gen( + input_shape, + rand_min=-100, + rand_max=100, + eps_from_int=eps_from_int, + dtype=dtype, + ) + elif test_mode == "inverse": + res = tf.math.reciprocal + val = random_gen(input_shape, rand_min=0.1, rand_max=10) + elif test_mode == "log": + res = tf.math.log + val = random_gen(input_shape, rand_min=0.2, rand_max=1000) + elif test_mode == "negative": + res = tf.math.negative + val = random_gen(input_shape, rand_min=-100.0, rand_max=100.0) + elif test_mode == "round": + res = tf.round + val = random_gen( + input_shape, rand_min=-1000, rand_max=1000, dtype=dtype + ) + elif test_mode == "rsqrt": + res = tf.math.rsqrt + val = random_gen(input_shape, rand_min=0.5, rand_max=1000) + elif test_mode == "sign": + res = tf.sign + val = random_gen(input_shape, rand_min=-5, rand_max=5) + elif test_mode == "sin": + res = tf.sin + rand_range = 1000 + if compute_unit != ct.ComputeUnit.CPU_ONLY: + rand_range = 10 + val = random_gen(input_shape, rand_min=-rand_range, rand_max=rand_range) + elif test_mode == "sinh": + res = tf.sinh + val = random_gen(input_shape, rand_min=-10, rand_max=10) + elif test_mode == "sqrt": + res = tf.sqrt + val = random_gen(input_shape, rand_min=0.5, rand_max=1000) + elif test_mode == "square": + res = tf.math.square + val = random_gen(input_shape, rand_min=-5, rand_max=5) + elif test_mode == "tan": + res = tf.tan + val = random_gen(input_shape, rand_min=-1000, rand_max=1000) + elif test_mode == "tanh": + res = tf.tanh + val = random_gen(input_shape, rand_min=-1000, rand_max=1000) + + return res, val + + func, input_val = _get_test(mode) + if func is None: + return + + input_type = list(input_shape) + [tf_dtype] + @make_tf_graph([input_type]) + def build_model(x): + return func(x) + + model, inputs, outputs = build_model + + input_dict = dict(zip(inputs, [input_val.astype(dtype)])) + + if mode == "inverse" or mode == "rsqrt": + atol, rtol = 1e-2, 1e-3 + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=atol, + rtol=rtol, + minimum_deployment_target=ct.target.iOS16 if backend == ("mlprogram", "fp16") else None, + ) + + +class TestImageResizing(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, target_shape, align_corners, half_pixel_centers", + itertools.product( + compute_units, + backends, + [(1, 10, 20, 1), (2, 5, 1, 3)], + [(25, 30), (2, 20)], + [True, False], + [True, False], + ), + ) + def test_resize_bilinear( + self, + compute_unit, + backend, + input_shape, + target_shape, + align_corners, + half_pixel_centers, + ): + if half_pixel_centers and align_corners: + return + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.ResizeBilinear( + images=x, + size=target_shape, + half_pixel_centers=half_pixel_centers, + align_corners=align_corners, + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -100, 100)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, scale_factor, align_corners, half_pixel_centers", + itertools.product( + compute_units, + backends, + [(1, 10, 20, 1), (2, 5, 2, 3)], + [(2, 3),], + [True, False], + [True, False], + ), + ) + def test_resize_bilinear_dynamic_shape( + self, + compute_unit, + backend, + input_shape, + scale_factor, + align_corners, + half_pixel_centers, + ): + if backend[0] == "neuralnetwork" or ct.utils._macos_version() < (13, 0): + pytest.skip("half_pixel_centers only support for iOS16 upsample_bilinear layer") + + if half_pixel_centers and align_corners: + pytest.skip("half_pixel_centers and align_corners cannot be both True") + + batch_dim, _, _, channel = input_shape + h_factor, w_factor = scale_factor + + @make_tf_graph([(batch_dim, None, None, channel, tf.float32)]) + def build_model(x): + input_shape = tf.shape(x) + target_shape = tf.math.multiply(input_shape[1:3], (h_factor, w_factor)) + return tf.raw_ops.ResizeBilinear( + images=x, + size=target_shape, + half_pixel_centers=half_pixel_centers, + align_corners=align_corners, + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, upsample_factor, data_format", + itertools.product( + compute_units, + backends, + [(1, 1, 1, 3), (1, 10, 5, 3)], + [(1, 2), (4, 3)], + ["channels_last", "channels_first"], + ), + ) + def test_upsampling_2d( + self, compute_unit, backend, input_shape, upsample_factor, data_format + ): + if data_format == "channels_last": + input_shape = ( + input_shape[0], + input_shape[2], + input_shape[3], + input_shape[1], + ) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.keras.layers.UpSampling2D( + size=upsample_factor, data_format=data_format, interpolation="nearest" + )(x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -100, 100)] + input_dict = dict(zip(inputs, input_values)) + spec = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + )[0] + # also check if the scale factor are integers + if backend[0] == 'neuralnetwork': + for layer in spec.neuralNetwork.layers: + if layer.WhichOneof('layer') == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, num_of_crops, crop_size, method, dynamic, extrapolation_value", + itertools.product( + compute_units, + backends, + [(1, 64, 64, 1)], + [1, 3, 5], + [(2, 2), (1, 1), (4, 4), (128, 128)], + ["bilinear"], + [False, True], + [0.0, 1.0], + ), + ) + def test_crop_and_resize( + self, + compute_unit, + backend, + input_shape, + num_of_crops, + crop_size, + method, + dynamic, + extrapolation_value, + ): + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY and crop_size == (1, 1): + # in this case, there is a numerical mismatch on the GPU MIL backend. The GPU runtime tests are + # tracked seprately. + return + + if extrapolation_value != 0.0: + if backend[0] == "neuralnetwork": + pytest.xfail("pad_value not availabe in neural network backend.") + if ct.utils._macos_version() < (13, 0): + pytest.skip("pad_value not supported in macOS12 or older.") + minimum_deployment_target = ct.target.iOS16 + else: + minimum_deployment_target = None + + # rdar://98749492 (crop_resize is unstable for cropping out of bound setting in fp16) + if backend[0] == "mlprogram": + backend = ("mlprogram", "fp32") + + # TODO(rdar://98749492): Once resolved, set crop_bias = 0.5 in order to test the crop outside the image + crop_bias = 0.0 + + input = np.random.randn(*input_shape).astype(np.float32) + boxes = np.random.uniform(size=(num_of_crops, 4)).astype(np.float32) + crop_bias + box_indices = np.random.randint( + size=(num_of_crops,), low=0, high=input_shape[0] + ).astype(np.int32) + + def test_static(): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.CropAndResize( + image=x, + boxes=boxes, + box_ind=box_indices, + crop_size=crop_size, + method=method, + extrapolation_value=extrapolation_value, + ) + + model, inputs, outputs = build_model + input_values = [input] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + def test_dynamic(): + @make_tf_graph([input_shape, boxes.shape, list(box_indices.shape) + [tf.int32]]) + def build_model(x, boxes_pl, box_indices_pl): + return tf.raw_ops.CropAndResize( + image=x, + boxes=boxes_pl, + box_ind=box_indices_pl, + crop_size=crop_size, + method=method, + extrapolation_value=extrapolation_value + ) + model, inputs, outputs = build_model + input_values = [input, boxes, box_indices] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + test_dynamic() if dynamic else test_static() + + @pytest.mark.parametrize( + "compute_unit, backend, width, height, strides, sizes, padding,", + itertools.product( + compute_units, + backends, + [1, 3, 5], + [2, 7, 12], + [(1, 1), (2, 1), (3, 5)], + [(1, 1), (1, 2), (5, 4)], + ["VALID", "SAME"], + ), + ) + def test_extract_patches( + self, compute_unit, backend, width, height, strides, sizes, padding + ): + # TODO: theoritically, the current extractpatches code handle batch size rather than 1, + # but there seems to have a bug in crop_resize when using GPU and batch_size > 1. + # We should test batch_size > 1 after the issue is fixed. + # + input = np.random.rand(1, height, width, 128).astype(np.float32) + if padding == "VALID": + size_h = min(sizes[0], height) + size_w = min(sizes[1], width) + else: + size_h = sizes[0] + size_w = sizes[1] + + @make_tf_graph([input.shape]) + def build_model(x): + return tf.compat.v1.image.extract_image_patches( + images=x, + ksizes=[1, size_h, size_w, 1], + strides=[1, strides[0], strides[1], 1], + rates=[1, 1, 1, 1], + padding=padding, + ) + model, inputs, outputs = build_model + input_values = [input] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestLinear(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, dim, transpose_a, transpose_b, use_constant", + itertools.product( + compute_units, + backends, + [2, 4, 8], + [True, False], + [True, False], + [True, False], + ), + ) + def test_matmul( + self, compute_unit, backend, dim, transpose_a, transpose_b, use_constant + ): + shape_x = np.array([dim, dim * 2, dim * 4]) + shape_y = np.array([dim * 4, dim * 2]) + + flip = (not transpose_a and transpose_b) or (transpose_a and not transpose_b) + shape_y = np.flip(shape_y) if flip else shape_y + + if not use_constant: + + @make_tf_graph([shape_x, shape_y]) + def build_model(x, y): + return tf.linalg.matmul( + x, y, transpose_a=transpose_a, transpose_b=transpose_b + ) + + input_values = [ + random_gen(shape=shape_x, rand_min=-100, rand_max=100), + random_gen(shape=shape_y, rand_min=-1.0, rand_max=1.0), + ] + else: + y = random_gen(shape=shape_y, rand_min=-1.0, rand_max=1.0) + + @make_tf_graph([shape_x]) + def build_model(x): + return tf.linalg.matmul( + x, y, transpose_a=transpose_a, transpose_b=transpose_b + ) + + input_values = [random_gen(shape=shape_x, rand_min=-100, rand_max=100)] + + model, inputs, outputs = build_model + + input_dict = dict(zip(inputs, input_values)) + + proto, _, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + for layer in proto.neuralNetwork.layers: + if layer.WhichOneof("layer") == "batchedMatmul": + wp = layer.batchedMatmul.weights + if use_constant: + assert len(wp.floatValue) != 0 + else: + assert len(wp.floatValue) == 0 + + +class TestBatchNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, shape_mode, epsilon", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 6)], + [True, False], + [1e-1, 1e-10], + ), + ) + def test_batch_norm(self, compute_unit, backend, rank, shape_mode, epsilon): + input_shape = np.random.randint(low=1, high=4, size=rank) + if shape_mode: + # same shape with 1 for being normalized over + attr_shape = list(input_shape) + attr_shape[1] = 1 + attr_shape[2] = 1 + else: + # 1D tensor of the same size as channel dimension + attr_shape = [list(input_shape)[-1]] + + @make_tf_graph([input_shape, attr_shape, attr_shape, attr_shape, attr_shape]) + def build_model(x, m, v, o, s): + return tf.nn.batch_normalization( + x, mean=m, variance=v, offset=o, scale=s, variance_epsilon=epsilon + ) + + model, inputs, outputs = build_model + + input_values = [ + random_gen(shape=input_shape, rand_min=-100.0, rand_max=100.0), + random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0), + random_gen(shape=attr_shape, rand_min=0.0, rand_max=10.0), + random_gen(shape=attr_shape, rand_min=1.0, rand_max=10.0), + random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=.2, + rtol=1e-4, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, shape_mode, epsilon, scale_after_normalization", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 6)], + [True, False], + [1e-1, 1e-10], + [True, False], + ), + ) + def test_batch_norm_with_global_normalization( + self, + compute_unit, + backend, + rank, + shape_mode, + epsilon, + scale_after_normalization, + ): + input_shape = np.random.randint(low=1, high=4, size=rank) + if shape_mode: + # same shape with 1 for being normalized over + attr_shape = list(input_shape) + attr_shape[1] = 1 + attr_shape[2] = 1 + else: + # 1D tensor of the same size as channel dimension + attr_shape = [list(input_shape)[-1]] + + if scale_after_normalization: + + @make_tf_graph( + [input_shape, attr_shape, attr_shape, attr_shape, attr_shape] + ) + def build_model(x, m, v, b, g): + return tf.nn.batch_norm_with_global_normalization( + x, + mean=m, + variance=v, + beta=b, + gamma=g, + variance_epsilon=epsilon, + scale_after_normalization=scale_after_normalization, + ) + + else: + + @make_tf_graph([input_shape, attr_shape, attr_shape, attr_shape]) + def build_model(x, m, v, b): + return tf.nn.batch_norm_with_global_normalization( + x, + mean=m, + variance=v, + beta=b, + gamma=None, + variance_epsilon=epsilon, + scale_after_normalization=scale_after_normalization, + ) + + model, inputs, outputs = build_model + + input_values = [ + random_gen(shape=input_shape, rand_min=-100.0, rand_max=100.0), + random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0), + random_gen(shape=attr_shape, rand_min=0.0, rand_max=10.0), + random_gen(shape=attr_shape, rand_min=1.0, rand_max=10.0), + ] + if scale_after_normalization: + input_values.append( + random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) + ) + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=0.2, + rtol=1e-4, + ) + + +class TestNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, epsilon", + itertools.product( + compute_units, + backends, + [1e-1, 1e-10] + ), + ) + def test_fused_batch_norm(self, compute_unit, backend, epsilon): + # TensorFlow's FusedBatchNorm is only for 4D inputs + input_shape = np.random.randint(low=1, high=4, size=4) + attr_shape = [list(input_shape)[-1]] + + m = random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) + v = random_gen(shape=attr_shape, rand_min=0.0, rand_max=10.0) + o = random_gen(shape=attr_shape, rand_min=1.0, rand_max=10.0) + s = random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.compat.v1.nn.fused_batch_norm( + x, + mean=m, + variance=v, + offset=o, + scale=s, + epsilon=epsilon, + is_training=False, + )[0] + + model, inputs, outputs = build_model + + input_values = [random_gen(shape=input_shape, rand_min=-100.0, rand_max=100.0)] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-2, + rtol=1e-3, + ) + +class TestL2Normalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axes, epsilon", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 6)], + [(-1,), (-2,), (0, 1)], + [1e-5, 1e-10], + ), + ) + def test_l2_normalize(self, compute_unit, backend, rank, axes, epsilon): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.l2_normalize(x, axis=axes, epsilon=epsilon) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, rand_min=-10, rand_max=10)] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=0.05, + rtol=1e-4, + ) + +class TestLocalResponseNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, size, alpha, beta, k", + itertools.product( + compute_units, + backends, + [1, 2, 3], + [0.0001, 0.01], + [0.75, 1.0], + [1.0, 2.0], + ), + ) + def test_local_response_normalization( + self, compute_unit, backend, size, alpha, beta, k + ): + # TensorFlow's local_response_normalization only supports rank 4 + input_shape = np.random.randint(low=3, high=4, size=4) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.local_response_normalization( + x, depth_radius=size, bias=k, alpha=alpha, beta=beta + ) + + model, inputs, outputs = build_model + + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-2, + rtol=1e-3, + ) + + +class TestPool1d(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,)], + [(1,), (2,)], + ["same", "valid"], + ), + ) + def test_avg_pool_1d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=2, high=4, size=3) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.avg_pool1d( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,)], + [(1,), (2,)], + ["same", "valid"], + ), + ) + def test_max_pool_1d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=2, high=4, size=3) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.max_pool1d( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPool2d(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,), (2,), (1, 1), (1, 2), (2, 2)], + [(1,), (2,), (1, 1), (1, 2), (2, 2)], + ["same", "valid"], + ), + ) + def test_avg_pool_2d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=2, high=4, size=4) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.avg_pool( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,), (2,), (1, 1), (1, 2), (2, 2)], + [(1,), (2,), (1, 1), (1, 2), (2, 2)], + ["same", "valid"], + ), + ) + def test_max_pool_2d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=2, high=4, size=4) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.max_pool( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPool3d(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,), (2,), (1, 1, 1), (1, 2, 3), (2, 2, 3), (3, 3, 3)], + [(1,), (2,), (1, 1, 1), (1, 2, 3), (2, 2, 3), (3, 3, 3)], + ["same", "valid"], + ), + ) + def test_avg_pool_3d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=3, high=4, size=5) + + if kernel_sizes[0] == 1 and pad_type == "same": + pytest.xfail("rdar://81630684 (Pool3d with pad type == same fails from TF2.5 onwards)") + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.avg_pool3d( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,), (2,), (1, 1, 1), (1, 2, 3), (2, 2, 3), (3, 3, 3)], + [(1,), (2,), (1, 1, 1), (1, 2, 3), (2, 2, 3), (3, 3, 3)], + ["same", "valid"], + ), + ) + def test_max_pool_3d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=3, high=4, size=5) + + if kernel_sizes[0] == 1 and pad_type == "same": + pytest.xfail("rdar://81630684 (Pool3d with pad type == same fails from TF2.5 onwards)") + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.max_pool3d( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPrint(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [size for size in range(1, 5)], + ), + ) + def test_print(self, compute_unit, backend, rank): + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + + @make_tf_graph([shape]) + def build_model(x): + print_layer = tf.raw_ops.Print(input=x, data=[]) + res = print_layer + 1 + return res + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestRandom(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, size, rank, constant", + itertools.product( + compute_units, + backends, + [1, 4], + [1, 5], + [True, False], + ), + ) + def test_random_binomial(self, compute_unit, backend, size, rank, constant): + if not constant and backend[0] != "neuralnetwork": + return # dynamic input is only support in neuralnetwork backend + + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + @make_tf_graph([shape]) + def build_model(x): + if constant: + ref = tf.add(x, tf.keras.backend.random_binomial(shape=shape, p=1.0)) + else: + ref = tf.add( + x, + tf.keras.backend.random_binomial( + shape=tf.raw_ops.Shape(input=x), p=1.0 + ), + ) + return ref + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + + @pytest.mark.parametrize( + "compute_unit, backend, size", + itertools.product( + compute_units, + backends, + [1, 4] + ), + ) + def test_random_categorical(self, compute_unit, backend, size): + # TensorFlow's input is 2-D tensor with shape [batch_size, num_classes]. + shape = np.random.randint(low=1, high=4, size=2) + y_shape = (1,) + @make_tf_graph([shape, y_shape]) + def build_model(x, y): + x = tf.random.categorical(x, size) + x = tf.cast(x, dtype=tf.float32) + return x * y + + model, inputs, outputs = build_model + input_value = [np.zeros(shape).astype(np.float32), np.zeros(y_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, mean, rank, constant", + itertools.product( + compute_units, + backends, + [0.0], + [1, 5], + [True, False], + ), + ) + def test_random_normal(self, compute_unit, backend, mean, rank, constant): + if not constant and backend[0] != "neuralnetwork": + return # dynamic input is only support in neuralnetwork backend + + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + @make_tf_graph([shape]) + def build_model(x): + if constant: + ref = tf.add(x, tf.random.normal(shape=shape, mean=mean, stddev=0.0)) + else: + ref = tf.add( + x, + tf.random.normal( + shape=tf.raw_ops.Shape(input=x), mean=mean, stddev=0.0 + ), + ) + return ref + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, mean, rank, constant", + itertools.product( + compute_units, + backends, + [0.0], + [1, 5], + [True, False], + ), + ) + def test_keras_random_normal(self, compute_unit, backend, mean, rank, constant): + if not constant and backend[0] != "neuralnetwork": + return # dynamic input is only support in neuralnetwork backend + + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + @make_tf_graph([shape]) + def build_model(x): + if constant: + ref = tf.add(x, tf.keras.backend.random_normal(shape=shape, mean=mean, stddev=0.0)) + else: + ref = tf.add( + x, + tf.keras.backend.random_normal( + shape=tf.raw_ops.Shape(input=x), mean=mean, stddev=0.0 + ), + ) + return ref + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, low, high, rank, constant", + itertools.product( + compute_units, + backends, + [0.0], + [0.0], + [1], + [True, False], + ), + ) + def test_random_uniform(self, compute_unit, backend, low, high, rank, constant): + if not constant and backend[0] != "neuralnetwork": + return # dynamic input is only support in neuralnetwork backend + + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + @make_tf_graph([shape]) + def build_model(x): + if constant: + ref = tf.add(x, tf.random.uniform(shape=shape, minval=low, maxval=high)) + else: + ref = tf.add( + x, + tf.random.uniform( + shape=tf.raw_ops.Shape(input=x), minval=low, maxval=high + ), + ) + return ref + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, low, high, rank, constant", + itertools.product( + compute_units, + backends, + [1.0], + [1.0], + [rank for rank in range(1, 6)], + [True, False], + ), + ) + def test_keras_random_uniform( + self, compute_unit, backend, low, high, rank, constant + ): + if not constant and backend[0] != "neuralnetwork": + return # dynamic input is only support in neuralnetwork backend + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + + @make_tf_graph([shape]) + def build_model(x): + if constant: + ref = tf.add(x, tf.keras.backend.random_uniform(shape=shape, minval=low, maxval=high)) + else: + ref = tf.add( + x, + tf.keras.backend.random_uniform( + shape=tf.raw_ops.Shape(input=x), minval=low, maxval=high + ), + ) + return ref + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +@pytest.mark.skipif(_macos_version() < (10, 16), + reason="This only works for 'neuralnetwork' on macOS 11") +class TestReduction(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes, keep_dims, tf_op", + itertools.product( + compute_units, + backends, + [ + (1, (-1,)), + (2, (0,)), + (2, (-1, 0)), + (3, (1, -3)), + (3, (-2,)), + (3, (-3, -2, -1)), + (4, (0, 1, 2)), + (4, (-2, -1, 0)), + (4, (1, -2)), + (5, (-3, -1)), + (5, (-2, -1)), + (5, (-3, -2, -1)), + (5, (0, -1, 1, -2)), + (3, None), + (5, None), + (3, 1), + ], + [True, False], + [ + tf.reduce_all, + tf.math.reduce_euclidean_norm, + tf.reduce_max, + tf.reduce_mean, + tf.reduce_min, + tf.reduce_prod, + tf.reduce_sum, + tf.reduce_any, + tf.reduce_logsumexp, + tf.math.argmax, + tf.math.argmin, + ], + ), + ) + def test_reduction(self, compute_unit, backend, rank_and_axes, keep_dims, tf_op): + rank, axes = rank_and_axes + shape = np.random.randint(low=1, high=3, size=rank) + + def parse_axes(axes): + if axes is None: + axes = 0 + elif isinstance(axes, (tuple, list)): + axes = axes[0] + return axes + + def test_tf_argmax(): + @make_tf_graph([shape]) + def build_model(x): + return tf.math.argmax(x, axis=parse_axes(axes)) + + model, inputs, outputs = build_model + input_values = [random_gen(shape, rand_min=-5.0, rand_max=5.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_tf_argmin(): + @make_tf_graph([shape]) + def build_model(x): + return tf.math.argmin(x, axis=parse_axes(axes)) + + model, inputs, outputs = build_model + input_values = [random_gen(shape, rand_min=-5.0, rand_max=5.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_tf_reduction(): + if isinstance(axes, list) and axes and len(axes) == rank and not keep_dims: + return + + input_type = list(shape) + x_val = random_gen(shape=shape, rand_min=-5.0, rand_max=5.0) + if tf_op in {tf.reduce_all, tf.reduce_any}: + input_type += [tf.bool] + x_val = np.random.randint(low=0, high=2, size=shape).astype(bool) + elif tf_op in {tf.math.reduce_euclidean_norm}: + x_val = random_gen(shape=shape, rand_min=0.0, rand_max=10.0) + elif tf_op in {tf.reduce_prod}: + x_val = random_gen(shape=shape, rand_min=1.0, rand_max=1.3) + elif tf_op in {tf.reduce_logsumexp}: + x_val = random_gen(shape=shape, rand_min=-5, rand_max=5) + + @make_tf_graph([input_type]) + def build_model(x): + ref = tf_op(x, axis=axes, keepdims=keep_dims) + if tf_op == tf.reduce_any: + ref = tf.cast(ref, tf.float32) + return ref + + model, inputs, outputs = build_model + input_dict = dict(zip(inputs, [x_val])) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + if tf_op in {tf.math.argmax}: + test_tf_argmax() + elif tf_op in {tf.math.argmin}: + test_tf_argmin() + else: + test_tf_reduction() + +class TestGather(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rankX_rankIndices_axis, mode", + itertools.product( + compute_units, + backends, + [ + (1, 2, -1), + (2, 1, 0), + (3, 2, -2), + (2, 3, 1), + (2, 2, 1), + (1, 1, 0), + (3, 3, -2), + (3, 3, 2), + (3, 3, 0), + (1, 3, -1), + (3, 1, 2), + (3, 1, -1), + ], + ["Gather", "GatherV2", "gather"], + ), + ) + def test_gather_function(self, compute_unit, backend, rankX_rankIndices_axis, mode): + x_rank, indices_rank, axis = rankX_rankIndices_axis + x_shape = np.random.randint(low=2, high=4, size=x_rank) + indices_shape = np.random.randint(low=2, high=4, size=indices_rank) + + @make_tf_graph([x_shape, list(indices_shape) + [tf.int32]]) + def build_model(x, indices): + if mode == "Gather": + res = tf.raw_ops.Gather(params=x, indices=indices) + elif mode == "GatherV2": + res = tf.raw_ops.GatherV2(params=x, indices=indices, axis=axis) + elif mode == "gather": + res = tf.gather(x, indices, axis=axis) + + return res + + model, inputs, outputs = build_model + + axis = 0 if mode == "Gather" else axis + input_dict = {inputs[0]: np.random.rand(*x_shape).astype(np.float32), + inputs[1]: np.random.randint(0, x_shape[axis], size=indices_shape, dtype=np.int32)} + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rankX_rankIndices_axis_batchdims, mode", + itertools.product( + compute_units, + backends, + [ + (2, 2, 1, 0), + (3, 2, 1, 1), + (3, 3, 2, 0), + (3, 3, 2, 1), + (3, 3, 2, 2), + ], + ["GatherV2", "gather"], + ), + ) + def test_gather_with_batch_dims(self, compute_unit, backend, rankX_rankIndices_axis_batchdims, mode): + if _macos_version() < (13, 0) and backend[0] == 'mlprogram': + pytest.skip("Requires macOS 13 or higher") + + x_rank, indices_rank, axis, batch_dims = rankX_rankIndices_axis_batchdims + x_shape = np.random.randint(low=2, high=4, size=x_rank) + indices_shape = np.random.randint(low=2, high=4, size=indices_rank) + indices_shape[:batch_dims] = x_shape[:batch_dims] + + @make_tf_graph([x_shape, list(indices_shape) + [tf.int32]]) + def build_model(x, indices): + if mode == "GatherV2": + res = tf.raw_ops.GatherV2(params=x, indices=indices, axis=axis, batch_dims=batch_dims) + elif mode == "gather": + res = tf.gather(x, indices, axis=axis, batch_dims=batch_dims) + else: + raise ValueError("Unsupported tf op {}".format(mode)) + return res + + model, inputs, outputs = build_model + + axis = 0 if mode == "Gather" else axis + input_dict = {inputs[0]: np.random.rand(*x_shape).astype(np.float32), + inputs[1]: np.random.randint(0, x_shape[axis], size=indices_shape, dtype=np.int32)} + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16 if backend[0] == "mlprogram" else None + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rankX_rankIndices", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (2, 2), + (3, 2), + (2, 3), + (1, 4), + (5, 2), + (2, 5), + (4, 3), + (3, 4), + (2, 4), + (4, 2), + (1, 5), + ], + ), + ) + def test_gather_nd(self, compute_unit, backend, rankX_rankIndices): + x_rank, indices_rank = rankX_rankIndices + x_shape = np.random.randint(low=2, high=4, size=x_rank) + indices_shape = np.random.randint(low=2, high=4, size=indices_rank) + indices_shape[-1] = np.random.randint(low=1, high=x_rank + 1) + + @make_tf_graph([x_shape, list(indices_shape) +[tf.int32]]) + def build_model(x, indices): + return tf.gather_nd(x, indices) + + model, inputs, outputs = build_model + + a = np.random.rand(*x_shape).astype(np.float32) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append( + np.random.randint(0, x_shape[i], size=indices_shape[:-1]) + ) + + input_dict = { + inputs[0]: a, + inputs[1]: np.stack(indices_list, axis=-1).astype(np.int32), + } + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rankX_rankIndices_batchdims", + itertools.product( + compute_units, + backends, + [ + (1, 2, 0), + (2, 2, 1), + (3, 5, 2), + (5, 5, 3), + ], + ), + ) + def test_gather_nd_with_batch_dims(self, compute_unit, backend, rankX_rankIndices_batchdims): + if _macos_version() < (13, 0) and backend[0] == 'mlprogram': + pytest.skip("Requires macOS 13 or higher") + + x_rank, indices_rank, batch_dims = rankX_rankIndices_batchdims + x_shape = np.random.randint(low=2, high=4, size=x_rank) + indices_shape = np.random.randint(low=2, high=4, size=indices_rank) + x_shape[:batch_dims] = indices_shape[:batch_dims] + indices_shape[-1] = np.random.randint(low=1, high=x_rank + 1 - batch_dims) + + @make_tf_graph([x_shape, list(indices_shape) +[tf.int32]]) + def build_model(x, indices): + return tf.gather_nd(x, indices, batch_dims=batch_dims) + + model, inputs, outputs = build_model + + a = np.random.rand(*x_shape).astype(np.float32) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append( + np.random.randint(0, x_shape[i+batch_dims], size=indices_shape[:-1]) + ) + + input_dict = { + inputs[0]: a, + inputs[1]: np.stack(indices_list, axis=-1).astype(np.int32), + } + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16 if backend[0] == "mlprogram" else None + ) + + +class TestScatter(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, data_rank, indices_rank", + itertools.product( + compute_units, + backends, + list(range(1, 4)), + list(range(2, 4)), + ), + ) + def test_scatter_nd_with_zeros( + self, compute_unit, backend, data_rank, indices_rank + ): + + shape = np.random.randint(low=2, high=4, size=data_rank).astype(np.int32) + indices_shape = np.random.randint(low=2, high=4, size=indices_rank) + indices_shape[-1] = np.random.randint(low=1, high=data_rank + 1) + updates_shape = list(indices_shape[:-1]) + list(shape[indices_shape[-1] :]) + + updates = np.random.rand(*updates_shape).astype(np.int32) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append(np.random.randint(0, shape[i], size=indices_shape[:-1])) + + indices = np.stack(indices_list, axis=-1).astype(np.int32) + + @make_tf_graph( + [list(indices.shape) + [tf.int32], updates_shape + [tf.int32], [data_rank, tf.int32]] + ) + def build_model(indices, updates, shape): + return tf.raw_ops.ScatterNd(indices=indices, updates=updates, shape=shape) + + model, inputs, outputs = build_model + input_values = [indices, updates, shape] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestTensorScatterAdd(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, tensor_rank, indices_rank", + itertools.product( + compute_units, + backends, + # updates_rank = indices_rank - 1 + tensor_rank - indices_shape[-1] <= tensor_rank + indices_rank - 2 + # and Core ML only supports updates_rank < 6, + # so we constrain tensor_rank + indices_rank - 2 < 6 + [tensor_rank for tensor_rank in range(1, 5)], + [indices_rank for indices_rank in range(2, 4)] + ), + ) + def test(self, compute_unit, backend, tensor_rank, indices_rank): + # To avoid indexing out of bound: + # tensor size for each dimension >= MIN_TENSOR_SIZE + # index for each dimension < MIN_TENSOR_SIZE + MIN_TENSOR_SIZE = 3 + + tensor_shape = np.random.randint(low=MIN_TENSOR_SIZE, high=9, size=tensor_rank) + # indices shape constraint: 0 < indices_shape[-1] <= tensor_rank + indices_shape = np.random.randint(low=1, high=tensor_rank + 1, size=indices_rank) + + # updates rank and shape are infered from tensor and indices + # reference https://www.tensorflow.org/api_docs/python/tf/compat/v1/scatter_nd_add + updates_rank = indices_rank - 1 + tensor_rank - indices_shape[-1] + updates_shape = [] + for i in range(indices_rank - 1): + updates_shape.append(indices_shape[i]) + for i in range(indices_shape[-1], tensor_rank): + updates_shape.append(tensor_shape[i]) + updates_shape = np.array(updates_shape) + + @make_tf_graph([tensor_shape, list(indices_shape) + [tf.int32], updates_shape]) + def build_model(tensor, indices, updates): + return tf.tensor_scatter_nd_add(tensor, indices, updates) + + model, inputs, outputs = build_model + input_values = [ + random_gen(tensor_shape, rand_min=-1.0, rand_max=1.0), + random_gen(indices_shape, rand_min=0, rand_max=MIN_TENSOR_SIZE, dtype=np.int32), + random_gen(updates_shape, rand_min=-1.0, rand_max=1.0), + ] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSliceByIndex(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, masking_type", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 5)], + ["none", "positive_mask", "negative_mask"] + ), + ) + def test_slice_by_index_simple(self, compute_unit, backend, rank, masking_type): + input_shape = np.random.randint(low=2, high=4, size=rank) + begin_val = np.array( + [ + np.random.randint(low=-input_shape[i], high=input_shape[i]) + for i in range(rank) + ] + ).astype(np.int32) + end_val = np.array( + [ + np.random.randint(low=-input_shape[i], high=input_shape[i]) + for i in range(rank) + ] + ).astype(np.int32) + stride_val = np.array( + [ + np.random.randint(low=-input_shape[i], high=input_shape[i]) + for i in range(rank) + ] + ).astype(np.int32) + if masking_type == "none": + begin_mask = [False] * rank + end_mask = [False] * rank + squeeze_mask = [False] * rank + else: + begin_mask = np.array( + [np.random.choice([True, False, False]) for i in range(rank)] + ).astype(bool) + end_mask = np.array( + [np.random.choice([True, False, False]) for i in range(rank)] + ).astype(bool) + squeeze_flag = True + # We do not squeeze to scalar in nn + while squeeze_flag: + squeeze_mask = np.array( + [np.random.choice([True, False]) for i in range(rank)] + ).astype(bool) + for i in range(rank): + if begin_mask[i] or end_mask[i]: + squeeze_mask[i] = False + for s in squeeze_mask: + if not s: + squeeze_flag = False + + for i in range(rank): + if begin_mask[i] or end_mask[i]: + stride = 0 + while stride == 0: + stride = np.random.randint(low=-input_shape[i], high=input_shape[i]) + stride_val[i] = stride + + if not end_mask[i]: + while True: + end = np.random.randint( + low=-input_shape[i], high=input_shape[i] + ) + normalized_end = input_shape[i] + end if end < 0 else end + if normalized_end == 0 and stride_val[i] > 0: + continue + elif normalized_end == input_shape[i] - 1 and stride_val[i] < 0: + continue + else: + end_val[i] = end + break + continue + if squeeze_mask[i]: + stride_val[i] = 1 + while True: + end = np.random.randint(low=-input_shape[i], high=input_shape[i]) + normalized_end = input_shape[i] + end if end < 0 else end + normalized_begin = ( + input_shape[i] + begin_val[i] if begin_val[i] < 0 else begin_val[i] + ) + if normalized_end == normalized_begin: + continue + if begin_mask[i] or end_mask[i] or squeeze_mask[i]: + stride = 1 + elif normalized_end < normalized_begin: + stride = -np.random.randint(low=1, high=input_shape[i]) + else: + stride = np.random.randint(low=1, high=input_shape[i]) + end_val[i] = end + stride_val[i] = stride + break + + def _mask_to_bit(mask): + ret = 0 + for x in mask[::-1]: + ret <<= 1 + if x: + ret += 1 + if ret > 0 and masking_type == "negative_mask": + ret = ret - 2**rank + return ret + + @make_tf_graph( + [ + input_shape, + list(begin_val.shape) + [tf.int32], + list(end_val.shape) + [tf.int32], + ] + ) + def build_model(x, begin, end): + return tf.strided_slice( + x, + begin, + end, + stride_val, + begin_mask=_mask_to_bit(begin_mask), + end_mask=_mask_to_bit(end_mask), + shrink_axis_mask=_mask_to_bit(squeeze_mask), + ) + + model, inputs, outputs = build_model + + input_values = [ + np.array(list(range(np.prod(input_shape)))) + .reshape(input_shape) + .astype(np.float32), + begin_val, + end_val, + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, testcase", + itertools.product( + compute_units, + backends, + # Change to slice representation for allowing iteration with a non-constant input + [ + ( + slice(1, 2), + slice(1, 2), + slice(1, 2), + ), # equivalent to [1:2, 1:2, 1:2] + (slice(-3, -2), slice(-4, -3), slice(-5, -4)), + (slice(0, -2), slice(0, -1), slice(-3, -2)), + (slice(-1, 0, -2), slice(-1, 1, -1), slice(-1, -3, -3)), + (slice(1, 2), slice(1, 3), slice(1, 4, 2)), + (slice(None, 2), slice(1, 3), slice(None, 4, 2)), + ( + slice(None), + slice(1, None), + slice(None, 4, 2), + ), # equivalent to [:,1:,:4:2] + (slice(1, None, 1), 1, slice(None, 3, 2)), + (slice(None), slice(None), slice(None)), + (slice(1, 2), slice(1, 2), 1), + (slice(1, 2), slice(None), slice(None)), + (slice(None), slice(None), slice(None)), + (slice(1, 2), slice(None), slice(1, 2)), + (slice(None), slice(None), 1), + (0, 0, slice(None)), + (slice(1, 2)), + (slice(1, 2), slice(1, 2)), + (1), + (slice(0, 3)), + (slice(None)), + (slice(None), slice(None), slice(None, None, -1)), + ], + ), + ) + def test_slice_by_index_from_scratch(self, compute_unit, backend, testcase): + input_shape = np.array([3, 4, 5]) + + @make_tf_graph([input_shape]) + def build_model(x): + return x[testcase] + + model, inputs, outputs = build_model + + input_values = [ + np.array(list(range(np.prod(input_shape)))) + .reshape(input_shape) + .astype(np.float32) + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_and_slice", + itertools.product( + compute_units, + backends, + [ + [[3], (slice(1, 2))], + [[2,10], (slice(0, 2), slice(None, 8, 2))], + [[2,3,4,5], (slice(None), slice(1, None, 3), slice(None), slice(0, 5))], + [[2,3,4,5], (slice(0, None), slice(None), slice(2, None, 1), slice(None))], + ], + ), + ) + def test_slice_by_index_one_dimension(self, compute_unit, backend, shape_and_slice): + input_shape, testcase = shape_and_slice + + @make_tf_graph([input_shape]) + def build_model(x): + return x[testcase] + + model, inputs, outputs = build_model + + input_values = [ + np.array(list(range(np.prod(input_shape)))) + .reshape(input_shape) + .astype(np.float32) + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_slice_by_index_smoke(self, compute_unit, backend): + input_shape = [1, 64, 2] + x_val = np.random.rand(*input_shape).astype(np.float32) + y_val = np.random.rand(*input_shape).astype(np.float32) + + @make_tf_graph([input_shape, input_shape]) + def build_model(x, y): + x_slice = x[:, :, 0] + y_slice = y[:, :, 0] + return (x_slice, y_slice) + + model, inputs, outputs = build_model + + input_values = [x_val, y_val] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.xfail(reason="ExpandDims exist mismatch", run=False) + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_slice_by_index_with_new_axes(self, compute_unit, backend): + input_shape = [4, 5, 64] + val = np.random.rand(*input_shape).astype(np.float32) + num_cases = 8 + + @make_tf_graph([input_shape] * num_cases) + def build_model(*args): + a, b, c, d, e, f, g, h = args + slice_0 = a[:1, tf.newaxis, :3, :] + slice_1 = b[:, tf.newaxis] + slice_2 = c[..., tf.newaxis] + slice_3 = d[..., tf.newaxis, :, 10] + slice_4 = e[:, 2, tf.newaxis, ...] + slice_5 = f[2, ..., :, tf.newaxis] + slice_6 = g[tf.newaxis, ..., tf.newaxis] + slice_7 = h[tf.newaxis, 2, tf.newaxis, ...] + + return ( + slice_0, + slice_1, + slice_2, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + ) + + model, inputs, outputs = build_model + + input_values = [val] * num_cases + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSliceBySize(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, single_size, dynamic_size", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 5)], + [True, False], + [True, False], + ), + ) + def test_dynamic_slice_by_size( + self, compute_unit, backend, rank, single_size, dynamic_size + ): + # Test for when either begin or size are runtime determines + input_shape = np.random.randint(low=2, high=4, size=rank) + begin_val = np.array( + [np.random.randint(input_shape[i]) for i in range(rank)] + ).astype(np.int32) + size_val = np.array( + [np.random.randint(input_shape[i] - begin_val[i]) + 1 for i in range(rank)] + ) + if single_size: + for r in range(rank): + size_val_r = np.array( + [s if i == r else -1 for i, s in enumerate(size_val)] + ).astype(np.int32) + + @make_tf_graph([input_shape, list(begin_val.shape) + [tf.int32]]) + def build_model(x, begin): + return tf.slice(x, begin, size_val_r) + + @make_tf_graph( + [ + input_shape, + list(begin_val.shape) + [tf.int32], + list(size_val_r.shape) + [tf.int32], + ] + ) + def build_model_dynamic_size(x, begin, size): + return tf.slice(x, begin, size) + + if dynamic_size: + model, inputs, outputs = build_model_dynamic_size + input_values = [ + random_gen(input_shape, rand_min=-100, rand_max=100), + begin_val, + size_val_r, + ] + else: + model, inputs, outputs = build_model + input_values = [ + random_gen(input_shape, rand_min=-100, rand_max=100), + begin_val, + ] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + else: + size_val = np.array( + [s if np.random.randint(2) == 0 else -1 for s in size_val] + ).astype(np.int32) + + @make_tf_graph([input_shape, list(begin_val.shape) + [tf.int32]]) + def build_model(x, begin): + return tf.slice(x, begin, size_val) + + @make_tf_graph( + [ + input_shape, + list(begin_val.shape) + [tf.int32], + list(size_val.shape) + [tf.int32], + ] + ) + def build_model_dynamic_size(x, begin, size): + return tf.slice(x, begin, size) + + if dynamic_size: + model, inputs, outputs = build_model_dynamic_size + input_values = [ + random_gen(input_shape, rand_min=-100, rand_max=100), + begin_val, + size_val, + ] + else: + model, inputs, outputs = build_model + input_values = [ + random_gen(input_shape, rand_min=-100, rand_max=100), + begin_val, + ] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, begin_size", + itertools.product( + compute_units, + backends, + [ + [[0, 1, 2], [1, 1, 1]], + [[0, 0, 0], [-1, -1, -1]], + [[0, 0, 1], [1, 2, -1]], + [[0, 1, 2], [-1, -1, -1]], + ] + ), + ) + def test_static_slice_by_size( + self, compute_unit, backend, begin_size + ): + # Test for when begin and size are both constant + input_shape = [1, 2, 3] + begin, size = begin_size + tf_input_shape = input_shape.copy() + + for i in range(3): + if np.random.randint(2) == 0: + tf_input_shape[i] = None + # We set the begin to 0 for the symbolic dimension, + # since the default input shape will be 1 in this case, + # we need to make sure that begin = 0 and size = 1 (unless size == -1) + begin[i] = 0 + if size[i] != -1: + size[i] = 1 + + @make_tf_graph([tf_input_shape]) + def build_model(x): + return tf.slice(x, begin, size) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, rand_min=-2, rand_max=2)] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestMatrixBandPart(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, lower_and_upper", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 6)], + [(0, -1), (-1, 0), (0, 0)], + ), + ) + def test_matrix_band_part(self, compute_unit, backend, rank, lower_and_upper): + + lower, upper = lower_and_upper + shape = np.random.randint(low=3, high=4, size=rank) + + @make_tf_graph([shape]) + def build_model(x): + return tf.raw_ops.MatrixBandPart(input=x, num_lower=lower, num_upper=upper) + + model, inputs, outputs = build_model + TensorFlowBaseTest.run_compare_tf( + model, + {inputs[0]: random_gen(shape, rand_min=-100, rand_max=100)}, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCumSum(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, reverse, exclusive", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [True, False], + [True, False], + ), + ) + def test_cumsum(self, compute_unit, backend, rank, reverse, exclusive): + input_shape = np.random.randint(low=1, high=4, size=rank) + for axis in range(-1, rank, 3): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.cumsum(x, axis=axis, reverse=reverse, exclusive=exclusive) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, rand_min=-10, rand_max=10)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf(model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend) + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestFakeQuant(TensorFlowBaseTest): + @pytest.mark.parametrize( + "num_bits, weight_boundaries, compute_unit, backend", + itertools.product( + [2, 8], # TensorFlow does not support 1-bit quantization + [(0, 10), (-0.01, 0.02), (-101, 100)], + compute_units, + backends, + ), + ) + def test_fake_quant_weight_quantization_with_conv(self, num_bits, weight_boundaries, compute_unit, backend): + if backend[0] == 'mlprogram': + pytest.skip("Not supported with ML Program backend") + + tf.reset_default_graph() + filter_width = 1 + filter_height = 1 + spatial_size = 2 + input_channels = 3 + output_channels = 1 + input_tensor = tf.placeholder(tf.float32, [1, spatial_size, spatial_size, input_channels], name='input') + output_tensor = tf.placeholder(tf.float32, [1, spatial_size, spatial_size, output_channels], name='output') + kernel_in = random_gen((filter_width, filter_height), weight_boundaries[0], weight_boundaries[1]) + init = tf.constant_initializer(kernel_in) + + def model(x): + with tf.compat.v1.variable_scope('quantized_model'): + x = tf.layers.conv2d(x, filters=3, kernel_size=1, strides=1, kernel_initializer=init) + return x + + with tf.compat.v1.variable_scope('quantize'): + output = model(x=input_tensor) + tf.contrib.quantize.experimental_create_training_graph(quant_delay=0, weight_bits=num_bits, + activation_bits=num_bits) + loss = tf.losses.mean_squared_error(labels=input_tensor, predictions=output) + saver = tf.train.Saver() + + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + with tf.control_dependencies(update_ops): + optimizer = tf.train.AdamOptimizer().minimize(loss) + + checkpoint_dir = tempfile.mkdtemp() + # Run training pass to retrieve the correct min and max in FakeQuant op (to avoid using default values) and + # save dummy checkpoint. + with tf.Session() as sess: + tf.global_variables_initializer().run() + for iter in range(1): + image = np.random.rand(spatial_size, spatial_size, input_channels).astype(np.float32) * 255 + label = np.random.rand(spatial_size, spatial_size, output_channels).astype(np.float32) * 255 + training_loss, _ = sess.run([loss, optimizer], feed_dict={input_tensor: image[None, ...], + output_tensor: label[None, ...]}) + + saver.save(sess=sess, save_path=os.path.join(checkpoint_dir, 'quantization')) + + with tf.Graph().as_default() as g: + input_tensor = tf.placeholder(tf.float32, [1, spatial_size, spatial_size, input_channels], name='input') + with tf.variable_scope('quantize'): + output = model(x=input_tensor) + + # define eval graph, by quantizing the weights of the model with learned min/max values for each layer + tf.contrib.quantize.experimental_create_eval_graph(input_graph=g, weight_bits=num_bits, + activation_bits=num_bits) + tmpdir = tempfile.mkdtemp() + tf_graph_path = os.path.join(str(tmpdir), "tf_graph.pb") + tf_graph_path_quantized = os.path.join(str(tmpdir), "frozen_graph_quantized.pb") + with open(tf_graph_path, 'wb') as f: + f.write(g.as_graph_def().SerializeToString()) + freeze_g(input_graph=tf_graph_path, + input_saver="", + input_binary=True, + input_checkpoint=os.path.join(checkpoint_dir, 'quantization'), + output_node_names="quantize/quantized_model/conv2d/Conv2D", + restore_op_name="save/restore_all", + filename_tensor_name="save/Const:0", + output_graph=tf_graph_path_quantized, + clear_devices=True, + initializer_nodes="") + shutil.rmtree(checkpoint_dir) + + graph = load_tf_pb(tf_graph_path_quantized) + + tf.reset_default_graph() + graphdef = tf.GraphDef() + input_dict = {} + with open(tf_graph_path_quantized, "rb") as f: + graphdef.ParseFromString(f.read()) + shutil.rmtree(tmpdir) + + with tf.Graph().as_default(), tf.Session(config=None) as sess: + tf.graph_util.import_graph_def(graphdef, name='') + input_dict[sess.graph.get_tensor_by_name('input:0')] = (np.random.rand(1, spatial_size, spatial_size, + input_channels).astype(np.float32)) + outputs = [] + outputs.append(sess.graph.get_tensor_by_name('quantize/quantized_model/conv2d/Conv2D:0')) + tf_outs = sess.run(outputs, feed_dict=input_dict) + + TensorFlowBaseTest.run_compare_tf( + graph, + input_dict, + ["quantize/quantized_model/conv2d/Conv2D"], + compute_unit=compute_unit, + backend=backend, + tf_outputs=tf_outs, + rtol=0.005, + ) + + +class TestFill(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, value", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [-19.0, 0.0, 37.0], + ), + ) + def test_fill(self, compute_unit, backend, rank, value): + def test_tf_static(): + shape = np.random.randint(low=1, high=3, size=rank) + + @make_tf_graph([shape]) + def build_model(x): + return tf.add( + x, tf.fill(dims=np.array(shape, dtype=np.float32), value=value) + ) + + model, inputs, outputs = build_model + input_values = [np.random.rand(*shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + def test_tf_dynamic(): + shape = np.random.randint(low=1, high=3, size=rank) + @make_tf_graph([(len(shape), tf.int32)]) + def build_model(x): + return tf.fill(dims=x, value=value) + + model, inputs, outputs = build_model + input_values = [np.array(shape, dtype=np.int32)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + test_tf_static() + test_tf_dynamic() + + +class TestNonMaximumSuppression(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "num_boxes", + "max_boxes", + "iou_threshold", + "score_threshold", + "use_V5", + ] + ), + itertools.product( + compute_units, + backends, + [1, 5, 20, 1000], + [1, 8, 100], + [0.2, 0.8], + [float("-inf"), -200.0, 200.0], + [True, False], + ), + ) + def test_non_max_suppression( + self, + compute_unit, + backend, + num_boxes, + max_boxes, + iou_threshold, + score_threshold, + use_V5, + ): + if score_threshold > 100.0: + pytest.xfail( + "When score threshold is too high, TF will return empty result, while MIL " + "will still keep the highest score box." + ) + if num_boxes >= 1000 and backend == ("mlprogram", "fp16"): + pytest.xfail( + "rdar://103891349 ([TensorFlow] [PyTorch] NMS discrepancy in Fp16 when " + "number of boxes is large)" + ) + + boxes_val = random_gen(shape=(num_boxes, 4), rand_min=0, rand_max=32) + # When the input score is too close, the returned index order is not guaranteed. + # So instead of generating random scores by rand, use shuffle. + scores_val = np.arange(num_boxes).astype(np.float32) + np.random.shuffle(scores_val) + + @make_tf_graph([boxes_val.shape, scores_val.shape]) + def build_model(boxes, scores): + if use_V5: + ret = tf.raw_ops.NonMaxSuppressionV5( + boxes=boxes, + scores=scores, + max_output_size=max_boxes, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + soft_nms_sigma=0., + ) + else: + ret = tf.image.non_max_suppression( + boxes=boxes, + scores=scores, + max_output_size=max_boxes, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + return ret + + model, inputs, outputs = build_model + input_dict = dict(zip(inputs, [boxes_val, scores_val])) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestOneHot(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis, dynamic", + itertools.product( + compute_units, + backends, + [ + (2, 0), + (2, -1), + (3, 3), + (3, 0), + (3, -2), + (4, -4), + (4, 1), + (4, -1), + (4, -2), + (4, 3), + ], + [True, False], + ), + ) + def test_one_hot(self, compute_unit, backend, rank_and_axis, dynamic): + rank, axis = rank_and_axis + depth, on_value, off_value = 30, 28.0, -4.0 + x_shape = np.random.randint(low=2, high=4, size=rank) + axis = (axis if axis >= -1 else axis + rank + 1) + + if not dynamic: + @make_tf_graph([list(x_shape)+[tf.int32]]) + def build_model(x): + return tf.one_hot(x, axis=axis, depth=depth, on_value=on_value, off_value=off_value) + + model, inputs, outputs = build_model + input_values = [np.random.randint(0, depth, size=x_shape).astype(np.int32)] + input_dict = dict(zip(inputs, input_values)) + + else: # Dynamic Case with depth being an input + @make_tf_graph([list(x_shape)+[tf.int32], [1, tf.int32]]) + def build_model(x, depth_input): + # tf.squeeze since CoreML input has to be rank 1~5. + return tf.one_hot(x, axis=axis, depth=tf.squeeze(depth_input), + on_value=on_value, off_value=off_value) + + model, inputs, outputs = build_model + input_values = [np.random.randint(0, depth, size=x_shape).astype(np.int32), + np.array([depth]).astype(np.int32)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestSoftmaxCrossEntropyWithLogits(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, class_num", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_sparse_softmax_cross_entropy_with_logits(self, compute_unit, backend, class_num): + batch_size = 2 + feature_shape = [batch_size, class_num] + label_shape = [batch_size, tf.int32] + + @make_tf_graph([feature_shape, label_shape]) + def build_model(feat, label): + return tf.raw_ops.SparseSoftmaxCrossEntropyWithLogits(features=feat, labels=label)[0] + + model, inputs, outputs = build_model + features = random_gen(feature_shape, rand_min=0, rand_max=1) + labels = np.random.randint(low=0, high=class_num, size=(batch_size,), dtype=np.int32) + input_values = [features, labels] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, class_num", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_softmax_cross_entropy_with_logits(self, compute_unit, backend, class_num): + batch_size = 2 + feature_shape = [batch_size, class_num] + label_shape = [batch_size, class_num] + + @make_tf_graph([feature_shape, label_shape]) + def build_model(feat, label): + return tf.raw_ops.SoftmaxCrossEntropyWithLogits(features=feat, labels=label)[0] + + model, inputs, outputs = build_model + input_values = [ + random_gen(feature_shape, rand_min=0, rand_max=1), + random_gen(label_shape, rand_min=0, rand_max=1), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestIdentityN(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_identity_n(self, compute_unit, backend): + shape_1 = [1,] + shape_2 = [3, 4] + shape_3 = [5, 6, 7] + + @make_tf_graph([shape_1, shape_2, shape_3]) + def build_model(x, y ,z): + return tf.raw_ops.IdentityN(input=[x, y, z]) + + model, inputs, outputs = build_model + input_values = [ + random_gen(shape_1, rand_min=0, rand_max=1), + random_gen(shape_2, rand_min=0, rand_max=1), + random_gen(shape_3, rand_min=0, rand_max=1), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_identity_n_with_downstream_op(self, compute_unit, backend): + shape = [3, 4] + + @make_tf_graph([shape]) + def build_model(x): + x = tf.identity_n(input=[x, x]) + return tf.reduce_max(x, 1) + + model, inputs, outputs = build_model + input_values = [np.random.rand(*shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPad(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, mode, dynamic, trial", + itertools.product( + compute_units, + backends, + [2, 3, 4], + ['constant', 'reflect'], + [True, False], + list(range(10)), + ), + ) + def test(self, compute_unit, backend, rank, mode, dynamic, trial): + input_shape = np.random.randint(low=2, high=10, size=rank) + min_input_dim_size = input_shape.min() + padding_val = np.random.randint(low=0, high=min_input_dim_size, size=(rank, 2), dtype=np.int32) + + # Only constant mode supports padding across all dimensions + # All other padding modes are only applied on two dimensions. + perm = list(range(rank)) + import random + random.shuffle(perm) + if mode != "constant": + padding_val[perm[:-2]] = 0 + tf_mode = mode.upper() + + if dynamic: + if mode != "constant": + return + padding_shape = padding_val.shape + @make_tf_graph([input_shape, list(padding_shape)+[tf.int32]]) + def build_model(x, paddings): + return tf.pad(x, paddings=paddings, mode=tf_mode) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, rand_min=0.2, rand_max=1000), padding_val] + input_dict = dict(zip(inputs, input_values)) + + else: + @make_tf_graph([input_shape]) + def build_model(x): + return tf.pad(x, paddings=padding_val, mode=tf_mode) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, rand_min=0.2, rand_max=1000)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPadV2(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, constant_values, dynamic, trial", + itertools.product( + compute_units, + backends, + list(range(1, 6)), + [0., 10, -1], + [True], + list(range(10)) + ), + ) + def test(self, compute_unit, backend, rank, constant_values, dynamic, trial): + input_shape = np.random.randint(low=2, high=10, size=rank) + paddings = np.random.randint(low=2, high=5, size=2*rank).astype(np.int32) + padding_val = paddings.reshape(-1,2) + if dynamic: + padding_shape = padding_val.shape + @make_tf_graph([input_shape, list(padding_shape)+[tf.int32]]) + def build_model(x, paddings): + return tf.raw_ops.PadV2(input=x, paddings=paddings, constant_values=constant_values) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, rand_min=0.2, rand_max=1000), padding_val] + input_dict = dict(zip(inputs, input_values)) + + else: + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.PadV2(input=x, paddings=padding_val, constant_values=constant_values) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, rand_min=0.2, rand_max=1000)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestRange(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, params", + itertools.product( + compute_units, + backends, + [ + (-10.4, 23, 12.2), + (0, 10, 1), + (50.5, 90.5, 1.5), + (5, 8, 2), + (5, 8, 98), + (5, 8, 1.5), + (10, 5, -0.6), + (24, -65, -2), + ], + ), + ) + def test_range(self, compute_unit, backend, params): + start, end, step = np.array(params).astype(np.float32) + + # CoreML requires rank-1~5 input. + @make_tf_graph([[1, tf.float32]]) + def build_model(limit): + return tf.range(start=start, limit=tf.squeeze(limit), delta=step) + + model, inputs, outputs = build_model + input_values = [np.array([end])] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + # CoreML requires rank-1~5 input. + @make_tf_graph([[1, tf.float32]]) + def build_model(delta): + return tf.range(start=start, limit=end, delta=tf.squeeze(delta)) + + model, inputs, outputs = build_model + input_values = [np.array([step])] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + # CoreML requires rank-1~5 input. + @make_tf_graph([[1, tf.float32]]) + def build_model(begin): + return tf.range(start=tf.squeeze(begin), limit=end, delta=step) + + model, inputs, outputs = build_model + input_values = [np.array([start])] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestTile(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_reps", + itertools.product( + compute_units, + backends, + [ + (1, (2,)), + (2, (1, 2)), + (2, (2, 2)), + (3, (3, 2, 1)), + (3, (2, 1, 3)), + (3, (2, 1, 1)), + (4, (1, 3, 2, 1)), + (4, (2, 1, 1, 2)), + (5, (2, 1, 1, 3, 2)), + (5, (1, 1, 2, 3, 2)), + ], + ), + ) + def test_tile(self, compute_unit, backend, rank_and_reps): + rank, reps = rank_and_reps + x_shape = np.random.randint(low=2, high=4, size=rank) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.tile(x, multiples=reps) + + model, inputs, outputs = build_model + input_values = [random_gen(x_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestDynamicTile(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, [1, 2, 3, 4, 5]), + ) + def test_tile(self, compute_unit, backend, rank): + x_shape = np.random.randint(low=2, high=4, size=rank) + reps_val = np.random.randint(low=1, high=3, size=rank).astype(np.int32) + + @make_tf_graph([x_shape, [*reps_val.shape, tf.int32]]) + def build_model(x, reps): + return tf.tile(input=x, multiples=reps) + + model, inputs, outputs = build_model + input_values = [random_gen(x_shape), reps_val] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestTopK(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, k, sort", + itertools.product( + compute_units, + backends, + [1, 3, 5], + [1, 3], + [True, False], + ), + ) + def test_top_k(self, compute_unit, backend, rank, k, sort): + if not sort and backend[0] == "neuralnetwork": + pytest.skip("iOS16 version topk needed for sort = False") + if not sort and _macos_version() < (13, 0): + pytest.skip("New functionality in macOS13/iOS16") + + # TensorFlow only supports last dimension (axis = -1). + shape = np.random.randint(low=3, high=4, size=rank) + + @make_tf_graph([shape]) + def build_model(x): + ref = tf.math.top_k(x, k=k, sorted=sort) + if not sort: + ref = (tf.sort(ref[0]), tf.sort(ref[1])) + return ref + + model, inputs, outputs = build_model + input_values = [random_gen(shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16 if not sort else None, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, k", + itertools.product( + compute_units, + backends, + [(1, 3), (1, 10), (3, 50)], + [1, 3, 20], + ), + ) + def test_in_top_k(self, compute_unit, backend, shape, k): + # TensorFlow only supports last dimension (axis = -1). + batch_size, class_num = shape + + @make_tf_graph([shape, (batch_size, tf.int32)]) + def build_model(predictions, targets): + return tf.math.in_top_k(predictions=predictions, targets=targets, k=k) + + model, inputs, outputs = build_model + pred_values = random_gen(shape, rand_min=-2, rand_max=2) + target_values = np.random.randint(class_num, size=batch_size).astype(np.int32) + input_values = [pred_values, target_values] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestConcat(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, op_version, rank, num_inputs", + itertools.product( + compute_units, + backends, + ['v1', 'v2'], + list(range(6)), + list(range(1, 4)), + ), + ) + def test_concat(self, compute_unit, backend, op_version, rank, num_inputs): + + import random + for axis in range(-rank, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + input_shapes = [input_shape.copy() for _ in range(num_inputs)] + concat_axis_value = np.random.randint(low=1, high=3, size=num_inputs) + for i, v in enumerate(concat_axis_value): + input_shapes[i][axis] = concat_axis_value[i] + + @make_tf_graph(input_shapes) + def build_model(*inputs): + # add 3 additional tensor contains dimension size of 0 + zero_shape = input_shape.copy() + zero_shape[axis] = 0 + const = [tf.constant([], shape=zero_shape) for _ in range(3)] + values = inputs + tuple(const) + values = list(values) + random.shuffle(values) + values = tuple(values) + if op_version == 'v1': + # Seems like now the tf functions are using concatV2, so create as raw_ops here + res = tf.raw_ops.Concat(concat_dim=axis, values=values) + elif op_version == 'v2': + res = tf.raw_ops.ConcatV2(values=values, axis=axis) + return res + + model, inputs, outputs = build_model + input_values = [random_gen(shape) for shape in input_shapes] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestSplit(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, dynamic", + itertools.product( + compute_units, + backends, + [1, 2, 3, 4], + [True, False] + ), + ) + def test_split(self, compute_unit, backend, rank, dynamic): + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY and dynamic: + pytest.xfail("rdar://97398133 (TestSplit::test_split is failing on mlprogram + GPU + dynamic combination)") + if _macos_version() < (13, 0) and (dynamic or (backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY)): + pytest.skip("Issue fixed in iOS16/macOS13") + + input_shape1 = np.random.randint(low=1, high=3, size=rank) + for axis in range(-rank, rank, 2): + for split_num in range(2, input_shape1[axis] + 1, 2): + if input_shape1[axis] % split_num != 0: + continue + tf_input_shape = list(input_shape1) + if dynamic: + axis1 = np.random.randint(low=0, high=rank) + tf_input_shape[axis1] = None + + @make_tf_graph([tf_input_shape]) + def build_model(x): + res = tf.split(x, split_num, axis=axis) + # Comment: If tf.split output is returned, there's no + # get_tuple nodes. Some graph pass is needed. Example: + # + # x = tf.placeholder(tf.float32, shape=input_shape1) + # res = tf.split(x, 3, axis=0) + # + # res are ['split:0', 'split:1', 'split'] + # + # but node.outputs == ['gto_1', 'gto_2', 'gto_3'] + import random + + random.shuffle(res) + return tuple(res) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, sizes", + itertools.product( + compute_units, + backends, + [[1, 1, 2], [0, 2, 2], [1, 0, 3], [2, 0, 1, 1, 0]] + ), + ) + def test_split_with_sizes(self, compute_unit, backend, sizes): + input_shape = (4, 2) + + @make_tf_graph([input_shape]) + def build_model(x): + res = tf.split(x, sizes, axis=0) + # split sizes can contain 0s, and we skip those in outputs + return tuple([res[i] for i in range(len(sizes)) if sizes[i] != 0]) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_splitv(self, compute_unit, backend): + input_shape = [3, 2, 1] + + @make_tf_graph([input_shape]) + def build_model(x): + res = tf.split(x, [1, 2], axis=0) + return res[0], res[1] + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestStack(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends, ) + ) + def test_stack(self, compute_unit, backend): + input_shape1 = [3, 1, 1] + input_shape2 = [3, 1, 1] + + @make_tf_graph([input_shape1, input_shape2]) + def build_model(x, y): + return [tf.stack((x, y), axis=0), tf.stack((y, x), axis=-1)] + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape1), random_gen(input_shape2)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestUnstack(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [[3, 1], [4, 3]] + ), + ) + def test_unstack(self, compute_unit, backend, shape): + @make_tf_graph([shape]) + def build_model(x): + return tf.unstack(x, axis=1) + + model, inputs, outputs = build_model + input_values = [random_gen(shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [[3, 1], [4, 3]] + ), + ) + def test_unstack_and_stack(self, compute_unit, backend, shape): + @make_tf_graph([shape]) + def build_model(x): + x = tf.unstack(x, axis=1) + return tf.stack(x) + + model, inputs, outputs = build_model + input_values = [random_gen(shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPack(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, num_inputs", + itertools.product(compute_units, backends, list(range(5)), list(range(1, 5))), + ) + def test_pack(self, compute_unit, backend, rank, num_inputs): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + shape = np.random.randint(low=1, high=4, size=rank) + input_shapes = [shape[:] for _ in range(num_inputs)] + + @make_tf_graph(input_shapes) + def build_model(*inputs): + return tf.raw_ops.Pack(values=inputs, axis=0) + + model, inputs, outputs = build_model + input_values = [ + random_gen(shape, rand_min=-1, rand_max=1) for shape in input_shapes + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestArgSort(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axis, direction", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [-1, 0], + ["ascending", "descending"], + ), + ) + def test_argsort(self, compute_unit, backend, rank, axis, direction): + shape = np.random.randint(low=1, high=4, size=rank) + dtype = np.float32 + tf_dtype = tf.float32 + + @make_tf_graph([list(shape) + [tf_dtype]]) + def build_model(x): + return tf.argsort(x, axis=axis, direction=direction.upper()) + + model, inputs, outputs = build_model + input_values = np.arange(np.prod(shape)) + np.random.shuffle(input_values) + input_values = [np.reshape(input_values, shape).astype(dtype)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestDepthToSpace(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, block_size", + itertools.product( + compute_units, + backends, + [(1, 1, 1, 16), (1, 1, 1, 32), (1, 3, 3, 16)], + [2, 4], + ), + ) + def test_depth_to_space(self, compute_unit, backend, input_shape, block_size): + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.depth_to_space(x, block_size) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestExpandDims(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis", + itertools.product( + compute_units, + backends, + [ + (rank, axis) + for rank in range(1, 5) + for axis in range(-rank - 1, rank + 1) + ], + ), + ) + def test_expand_dims(self, compute_unit, backend, rank_and_axis): + rank, axis = rank_and_axis + input_shape = np.random.randint(low=2, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.expand_dims(x, axis=axis) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestReshape(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_flatten(self, compute_unit, backend): + shapes = [[2, 2], [3, 2, 1, 2], [2, 1, 4, 3]] + + for input_shape in shapes: + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.keras.backend.flatten(x) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product( + compute_units, + backends, + [ + ([10, 10], [5, 20]), + ([3, 4, 5, 6], [4, 5, 3, 6]), + ([4, 4, 5, 6], [2, 2, -1]), + ], + ), + ) + def test_reshape_static(self, compute_unit, backend, input_shape): + @make_tf_graph([input_shape[0]]) + def build_model(x): + return tf.reshape(x, shape=input_shape[1]) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape[0]).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product( + compute_units, + backends, + [ + ([10, 10], [5, 20]), + ([3, 4, 5, 6], [4, 5, 3, 6]), + ([4, 4, 5, 6], [2, 2, -1]), + ([2, 3, 5, 3], [2, -1]), + ], + ), + ) + def test_reshape_dynamic(self, compute_unit, backend, input_shape): + @make_tf_graph([input_shape[0], (len(input_shape[1]), tf.int32)]) + def build_model(x, y): + return tf.reshape(x, shape=y) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*input_shape[0]).astype(np.float32), + np.array(input_shape[1], dtype=np.int32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [[1], [1, 1], [1, 1, -1], []], + ), + ) + def test_reshape_scalar(self, compute_unit, backend, shape): + pytest.skip('Rank 0 not supported by CoreML runtime') + + input_shape = () + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.Reshape(tensor=x, shape=shape) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestShape(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + ), + ) + def test_shape(self, compute_unit, backend, rank): + shape = np.random.randint(low=3, high=4, size=rank) + shape_holder = [None] * rank + + @make_tf_graph([shape_holder]) + def build_model(x): + return tf.shape(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestMatrixDiag(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, length, dynamic", + itertools.product( + compute_units, + backends, + [length for length in range(1, 5)], + [True, False] + ), + ) + def test(self, compute_unit, backend, length, dynamic): + + if dynamic: + input_shape = np.random.randint(low=1, high=4, size=length) + a, b = np.prod(input_shape[:2]), np.prod(input_shape[2:]) + size = np.array([a,b]).astype(np.int32) + reshape_shape = [2] + + @make_tf_graph([input_shape, reshape_shape+[tf.int32]]) + def build_model(x, reshape): + x = tf.reshape(x, reshape) + x = tf.reshape(x, [-1]) + return tf.raw_ops.MatrixDiag(diagonal=x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1), size] + else: + input_shape = [length] + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.MatrixDiag(diagonal=x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1)] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestReverse(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes", + itertools.product( + compute_units, + backends, + [ + (1, (-1,)), + (2, (0,)), + (2, (-1, 0)), + (3, (1, -3)), + (3, (-2,)), + (3, (0, 1, 2)), + (4, (-2, -1, 0)), + (4, (-1, -2)), + (4, []), + (5, (-3, -1, 3)), + (5, (0, -1, 1, -2)), + ], + ), + ) + def test_reverse(self, compute_unit, backend, rank_and_axes): + rank, axes = rank_and_axes + shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([shape]) + def build_model(x): + return tf.reverse(x, axis=axes) + + model, inputs, outputs = build_model + input_values = [random_gen(shape)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestReverseSequence(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 6)] + ), + ) + def test_reverse_sequence(self, compute_unit, backend, rank): + shape = np.random.randint(low=1, high=4, size=rank) + seq_axis = np.random.randint(low=1, high=rank) + batch_axis = np.random.randint(low=0, high=seq_axis) + lengths = np.random.randint(low=0, high=shape[seq_axis], size=shape[batch_axis]) + + @make_tf_graph([shape]) + def build_model(x): + return tf.reverse_sequence( + x, seq_lengths=lengths, seq_axis=seq_axis, batch_axis=batch_axis + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSpaceToDepth(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, block_size", + itertools.product( + compute_units, + backends, + [(1, 6, 6, 1), (1, 12, 12, 1), (1, 6, 6, 3)], + [2, 3], + ), + ) + def test_space_to_depth(self, compute_unit, backend, input_shape, block_size): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.space_to_depth(x, block_size) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSqueeze(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes", + itertools.product( + compute_units, + backends, + [ + (2, (1,)), + (2, (0,)), + (3, (1,)), + (3, (0, -1)), + (3, []), + (4, (-1, 2, 1)), + (4, (0, 1)), + (5, (3, 1, 2)), + (5, (-1,)), + ], + ), + ) + def test_squeeze(self, compute_unit, backend, rank_and_axes): + rank, axes = rank_and_axes + x_shape = np.random.randint(low=2, high=4, size=rank) + for axis in axes: + x_shape[axis] = 1 + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.squeeze(x, axis=axes) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*x_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestTranspose(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_perm", + itertools.product( + compute_units, + backends, + [ + (1, (0,)), + (2, (1, 0)), + (2, (0, 1)), + (3, (0, 2, 1)), + (3, (2, 1, 0)), + (3, (2, 0, 1)), + (4, (0, 3, 2, 1)), + (4, (3, 0, 1, 2)), + (5, (2, 3, 1, 0, 4)), + (5, (3, 1, 0, 4, 2)), + ], + ), + ) + def test_transpose_1(self, compute_unit, backend, rank_and_perm): + + rank, perm = rank_and_perm + x_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.transpose(x, perm=perm) + + model, inputs, outputs = build_model + input_values = [random_gen(x_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 2, 3, 4], + ), + ) + def test_transpose_2(self, compute_unit, backend, rank): + + input_shape = np.random.randint(low=1, high=4, size=rank) + perm = np.random.permutation(rank) + + def static_perm(): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.transpose(x, perm=perm) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def dynamic_perm(): + @make_tf_graph([input_shape, list(perm.shape) + [tf.int32]]) + def build_model(x, tf_perm): + return tf.transpose(x, perm=tf_perm) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape), perm.astype(np.int32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + static_perm() + # Note that TF supports dynamic perm in tf.transpose. + with pytest.raises(ValueError, match=r".*must be const at compile time.*"): + dynamic_perm() + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_perm", + itertools.product( + compute_units, + backends, + [ + (2, (0, 1)), + (3, (0, 2, 1)), + ], + ), + ) + def test_transpose_after_another_op(self, compute_unit, backend, rank_and_perm): + + rank, perm = rank_and_perm + x_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([x_shape]) + def build_model(x): + # Test transpose operations after another operation that may return symbolic value + # in value_inference implementation (e.g. concat) - see issue #1556 + x = tf.concat([x, x], axis=-1) + return tf.transpose(x, perm=perm) + + model, inputs, outputs = build_model + input_values = [random_gen(x_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_redundant_transpose(self, compute_unit, backend, rank): + import random + random.seed(10) + input_shape = np.random.randint(low=1, high=4, size=rank) + num_layers = 30 + perms = [] + for _ in range(num_layers): + perm = list(range(rank)) + random.shuffle(perm) + perms.append(perm) + + @make_tf_graph([input_shape]) + def build_model(x): + net = x + for perm in perms: + net = tf.transpose(net, perm=perm) + return net + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSpaceToBatchND(TensorFlowBaseTest): + # No direct mil smoke test since it's a TF op which is a composite of several ops. + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, block_shape, paddings", + itertools.product( + compute_units, + backends, + [(1, 4, 4, 1), (1, 4, 4, 3), (2, 4, 6, 1)], + [[2, 2]], + [[[0, 0], [0, 0]], [[1, 1], [0, 2]], [[4, 2], [4, 2]]], + ), + ) + def test_smoke(self, compute_unit, backend, input_shape, block_shape, paddings): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.SpaceToBatchND( + input=x, block_shape=block_shape, paddings=paddings + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_block_paddings, dynamic", + itertools.product( + compute_units, + backends, + [ + [(1, 4, 6, 2, 2), [2, 3], [[2, 0], [3, 6]]], + [(2, 4, 6, 1), [1, 2], [[2, 1], [3, 3]]], + [(2, 4, 6, 1, 2), [2, 1], [[0, 0],[0, 0]]], + [(2, 4, 6, 1, 2), [2], [[0, 0]]], + ], + [True, False], + ), + ) + def test_smoke_new_op(self, compute_unit, backend, shape_block_paddings, dynamic): + input_shape, block_shape, paddings = shape_block_paddings + + # The neuralnetwork backend doesn't support these tests + if backend[0] == "neuralnetwork": + return + + tf_input_shape = input_shape if not dynamic else [None] * len(input_shape) + @make_tf_graph([tf_input_shape]) + def build_model(x): + return tf.raw_ops.SpaceToBatchND( + input=x, block_shape=block_shape, paddings=paddings + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_block_rank, dynamic", + itertools.product( + compute_units, + backends, + [(3, 1), (3, 2), (4, 1)], + [True, False], + ), + ) + def test_programmatic( + self, compute_unit, backend, input_block_rank, dynamic + ): + + input_rank, block_rank = input_block_rank + + # generate data + input_shape = np.random.randint(low=1, high=4, size=input_rank) + block_shape = np.random.randint(low=1, high=3, size=block_rank) + + if backend[0] == "neuralnetwork": + if block_rank == 2 and block_shape[0] != block_shape[1]: + pytest.skip("neuralnetwork backend doesn't support unequal block shape.") + if block_shape[0] == 1: + pytest.skip("neuralnetwork backend doesn't support unity block shape.") + + paddings = [] + for i in range(block_rank): + while True: + temp = np.random.randint(low=0, high=10, size=2) + if (np.sum(temp) + input_shape[i + 1]) % block_shape[i] == 0: + paddings.append(temp) + break + paddings = np.array(paddings) + + if not dynamic: + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.SpaceToBatchND( + input=x, block_shape=block_shape, paddings=paddings + ) + + else: + + @make_tf_graph([[None] * input_rank]) + def build_model(x): + return tf.raw_ops.SpaceToBatchND( + input=x, block_shape=block_shape, paddings=paddings + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestBatchToSpaceND(TensorFlowBaseTest): + # No direct mil smoke test since it's a TF op which is a composite of several ops. + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, block_size, crops", + itertools.product( + compute_units, + backends, + [(4, 4, 4, 1), (4, 4, 4, 3), (4, 4, 6, 1)], + [[2, 2]], + [[[0, 0], [0, 0]], [[1, 1], [0, 2]], [[4, 2], [4, 2]]], + ), + ) + def test_smoke(self, compute_unit, backend, input_shape, block_size, crops): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.BatchToSpaceND( + input=x, block_shape=block_size, crops=crops + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_block_rank, dynamic", + itertools.product( + compute_units, + backends, + [(3, 1), (3, 2), (4, 1)], + [True, False] + ), + ) + def test_programmatic( + self, compute_unit, backend, input_block_rank, dynamic): + + input_rank, block_rank = input_block_rank + + # generate data + input_shape = np.random.randint(low=1, high=4, size=input_rank) + block_shape = np.random.randint(low=1, high=3, size=block_rank) + + if backend[0] == "neuralnetwork": + if block_rank == 2 and block_shape[0] != block_shape[1]: + pytest.skip("neuralnetwork backend doesn't support unequal block shape.") + if block_shape[0] == 1: + pytest.skip("neuralnetwork backend doesn't support unity block shape.") + + input_shape[0] = input_shape[0] * np.prod(block_shape) + crops = [] + for i in range(block_rank): + while True: + temp = np.random.randint(low=0, high=4, size=2) + if np.sum(temp) < input_shape[i + 1] * block_shape[i]: + crops.append(temp) + break + crops = np.array(crops) + + if not dynamic: + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.BatchToSpaceND( + input=x, block_shape=block_shape, crops=crops + ) + + else: + + @make_tf_graph([[None] * input_rank]) + def build_model(x): + return tf.raw_ops.BatchToSpaceND( + input=x, block_shape=block_shape, crops=crops + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + + # Before rdar://93071454 (batch_to_space is error out in espresso for dynamic inputs cormel model) is fixed, + # we need to specify the default shape for the dynamic model by setting inputs_for_conversion + if dynamic: + shape = tuple([RangeDim(default=dim) for dim in input_shape]) + inputs_for_conversion = [TensorType(shape=shape, dtype=np.float32)] + else: + inputs_for_conversion = None + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + inputs_for_conversion=inputs_for_conversion, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_block_crops, dynamic", + itertools.product( + compute_units, + backends, + [ + [(6, 4, 6, 2, 2), [2, 3], [[2, 0], [3, 6]]], + [(4, 4, 6, 1), [1, 2], [[2, 1], [3, 3]]], + [(4, 4, 6, 1, 2), [2, 1], [[0, 0],[0, 0]]], + [(4, 4, 6, 1, 2), [2], [[0, 0]]], + ], + [True, False], + ), + ) + def test_smoke_new_op(self, compute_unit, backend, shape_block_crops, dynamic): + input_shape, block_shape, crops = shape_block_crops + + # The neuralnetwork backend doesn't support these tests + if backend[0] == "neuralnetwork": + return + + tf_input_shape = input_shape if not dynamic else [None] * len(input_shape) + @make_tf_graph([tf_input_shape]) + def build_model(x): + return tf.raw_ops.BatchToSpaceND( + input=x, block_shape=block_shape, crops=crops + ) + + # Before rdar://93071454 (batch_to_space is error out in espresso for dynamic inputs cormel model) is fixed, + # we need to specify the default shape for the dynamic model by setting inputs_for_conversion + if dynamic: + shape = tuple([RangeDim(default=dim) for dim in input_shape]) + inputs_for_conversion = [TensorType(shape=shape, dtype=np.float32)] + else: + inputs_for_conversion = None + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + inputs_for_conversion=inputs_for_conversion, + backend=backend, + ) + +@pytest.mark.skipif(_HAS_TF_2, reason="Fix and re-enable this test: rdar://76293949 (TF2 unit test InvalidArgumentError)") +class TestTensorArray(TensorFlowBaseTest): + @staticmethod + def get_dynamic_elem_shape_model(): + elem_shape = (None, None) + @make_tf_graph([elem_shape]) + def build_model(x): + ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True) + ta = ta.write(10, x) + ta = ta.write(9, x) + ta = ta.scatter([3], tf.expand_dims(x, 0)) + ta = ta.scatter([8], tf.expand_dims(x, 0)) + + return ta.stack() + return build_model + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_tf_basic(self, compute_unit, backend): + # TF1: TensorArrayV3, TensorArrayWriteV3, TensorArrayScatterV3, + # TensorArraySizeV3, TensorArrayGatherV3 + # TF2: TensorListReserve, TensorListLength, TensorListSetItem, + # TensorListScatterIntoExistingList, TensorListStack, + # TensorListResize + + elem_shape = (3, 2) + + @make_tf_graph([elem_shape]) + def build_model(x): + ta = tf.TensorArray(dtype=tf.float32, size=1, dynamic_size=True) + + ta = ta.write(2, x) + + # TensorArray has write-once semantics, and thus we write to a new + # index + # (https://www.tensorflow.org/api_docs/python/tf/TensorArray) + # writing to out of bound index + ta = ta.scatter([3], tf.expand_dims(x, 0)) + + # writing to in-bound index + ta = ta.scatter([0], tf.expand_dims(x, 0)) + + return ta.stack() + + model, inputs, outputs = build_model + input_values = [random_gen(elem_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_tf_dynamic_elem_shape(self, compute_unit, backend): + + # TF1: TensorArrayV3, TensorArrayWriteV3, TensorArrayScatterV3, + # TensorArraySizeV3, TensorArrayGatherV3 + # TF2: TensorListReserve, TensorListLength, TensorListSetItem, + # TensorListScatterIntoExistingList, TensorListStack, + # TensorListResize + model, inputs, outputs = TestTensorArray.get_dynamic_elem_shape_model() + input_values = [random_gen((2, 3))] + input_dict = dict(zip(inputs, input_values)) + _, mlmodel, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, outputs, + compute_unit=compute_unit, + backend=backend) + + # Once rdar://76293949 (TF2 unit test InvalidArgumentError) is fixed, the following milproto frontend tests should be removed + from coremltools.converters.mil.frontend.milproto.test_load import \ + roundtrip_and_compare_mlmodel + if backend[0] != "mlprogram": + pytest.skip("milproto front end only supported in mlprogram") + roundtrip_and_compare_mlmodel(mlmodel, {"Placeholder": input_values[0]}) + + @pytest.mark.skip( + reason="[NNv2 TensorArray scatter returns wrong result](rdar://63345281)" + ) + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_tf_while_loop(self, compute_unit, backend): + @make_tf_graph([(3, 2)]) + def build_model(x): + def body(i, num_iters, array, update): + return i + 1, num_iters, array.write(i, update), update + + def cond(i, num_iters, array, update): + return i < num_iters + + i = 0 + max_iters = 3 + ta = tf.TensorArray(dtype=tf.float32, size=1, dynamic_size=True) + _, _, new_ta, _ = tf.while_loop(cond, body, [i, max_iters, ta, x]) + new_ta = new_ta.scatter([max_iters], tf.expand_dims(x, 0)) + + return new_ta.stack() + + model, inputs, outputs = build_model + input_values = [random_gen(shape=(3, 2))] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestBroadcastTo(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes, is_dynamic", + itertools.product( + compute_units, + backends, + [ + ((2,), (2,)), + ((1,), (10,)), + ((3,), (3, 3)), + ((1, 1), (1, 4)), + ((1, 1, 5), (3, 4, 4, 4, 5)), + ((3,), (1, 3, 2, 1, 3)), + ((3, 5), (2, 3, 5)), + ((1, 2), (2, 3, 1, 2)), + ((1, 3, 1, 4), (8, 3, 32, 4)), + ((2, 16), (3, 1, 4, 2, 16)), + ], + [False], + ), + ) + def test(self, compute_unit, backend, shapes, is_dynamic): + input_shape, output_shape = shapes + + if is_dynamic is False: + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.broadcast_to(x, output_shape) + + else: # output / target shape is an input (placeholder) + + @make_tf_graph([input_shape, (len(output_shape), tf.int32)]) + def build_model(x, shape): + return tf.broadcast_to(x, shape) + + model, inputs, outputs = build_model + if is_dynamic is False: + input_values = [random_gen(input_shape)] + else: + input_values = [ + random_gen(input_shape), + np.array(output_shape, dtype=np.int32), + ] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestContribLSTMBlockCell(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, batch, return_hc_only, has_peephole, has_clip", + itertools.product( + compute_units, + backends, + [1, 2], + [True, False], + [True, False], + [True, False], + ), + ) + def test_tf_no_variable( + self, compute_unit, batch, backend, return_hc_only, has_peephole, has_clip + ): + """ + If return_hc_only == True, the op can be mapped to mb.lstm. + Otherwise it has to be expanded. + """ + # _lstm_block_cell allows fine-grained control of W, peephole etc + from tensorflow.contrib.rnn.python.ops.lstm_ops import _lstm_block_cell + + input_dim, hidden_dim = 2, 3 + x_shape = (batch, input_dim) + init_h = np.random.rand(batch, hidden_dim).astype(np.float32) + init_c = np.random.rand(batch, hidden_dim).astype(np.float32) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=x_shape) + res = _lstm_block_cell( + x, + tf.constant(init_c), + tf.constant(init_h), + w=tf.constant( + np.random.rand(input_dim + hidden_dim, 4 * hidden_dim).astype( + np.float32 + ) + ), + b=tf.constant(np.random.rand(4 * hidden_dim).astype(np.float32)), + use_peephole=has_peephole, + wci=tf.constant(np.random.rand(hidden_dim).astype(np.float32)), + wcf=tf.constant(np.random.rand(hidden_dim).astype(np.float32)), + wco=tf.constant(np.random.rand(hidden_dim).astype(np.float32)), + forget_bias=np.random.rand(), + cell_clip=np.random.rand() if has_clip else -1, + ) + if return_hc_only: + # All other outputs aren't supported by mb.lstm. + res = res[1], res[6] + + TensorFlowBaseTest.run_compare_tf( + graph, + {x: np.random.rand(*x_shape).astype(np.float32),}, + res, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, batch", + itertools.product(compute_units, backends, [1, 2],), + ) + def test_tf_lstm_block_cell(self, compute_unit, backend, batch): + # tf.contrib.rnn.LSTMBlockCell runs a single step of an LSTM. It needs to be wrapped + # inside a for loop to handle inputs with sequence length more than 1. In that case, use + # tf.contrib.rnn.LSTMBlockFusedCell + input_dim, hidden_dim = 2, 3 + x_shape = (batch, input_dim) + init_h = np.random.rand(batch, hidden_dim).astype(np.float32) + init_c = np.random.rand(batch, hidden_dim).astype(np.float32) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=x_shape) + rnn_cell = tf.contrib.rnn.LSTMBlockCell( + hidden_dim, use_peephole=True, forget_bias=np.random.rand() + ) + res = rnn_cell(x, (init_h, init_c)) + cs_new, h_new = res[1][0], res[1][1] + res = [h_new, cs_new] # shape of h_new, cs_new: (batch_dim, hidden_dim) + + TensorFlowBaseTest.run_compare_tf( + graph, + {x: np.random.rand(*x_shape).astype(np.float32),}, + res, + compute_unit=compute_unit, + backend=backend, + # variable needs to be frozen + freeze_graph=True, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, batch_size", + itertools.product(compute_units, backends, [1, 2],), + ) + def test_tf_lstm_block_fused_cell(self, compute_unit, backend, batch_size): + # tf.contrib.rnn.LSTMBlockFusedCell runs an LSTM over a sequence of inputs + input_dim, hidden_dim = 4, 3 + seq_length = 5 + init_h = np.zeros((batch_size, hidden_dim)).astype(np.float32) + init_c = np.zeros((batch_size, hidden_dim)).astype(np.float32) + x_shape = (seq_length, batch_size, input_dim) + with tf.Graph().as_default() as graph: + lstm_cell = tf.contrib.rnn.LSTMBlockFusedCell( + num_units=hidden_dim, + forget_bias=2.0, + cell_clip=None, + use_peephole=False, + ) + + x = tf.placeholder(tf.float32, shape=x_shape) + # shape of output: (seq_length, batch_size, hidden_dim) + # shape of output_state: Tuple of shape ((batch_size, hidden_dim), (batch_size, hidden_dim)) + output, output_state = lstm_cell( + inputs=x, + initial_state=(init_c, init_h), + ) + output = tf.nn.relu(output) + + res = TensorFlowBaseTest.run_compare_tf( + graph, + {x: np.random.rand(*x_shape).astype(np.float32),}, + output, + compute_unit=compute_unit, + backend=backend, + # variable needs to be frozen + freeze_graph=True, + ) + + # check that the resulting program has the LSTM block as a fused op + coreml_model = res[1] + mil_prog = coreml_model._get_mil_internal() + assert len(mil_prog.find_ops(op_type="lstm")) == 1 + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,), + ) + def test_tf_multiple_lstm_block_fused_cell(self, compute_unit, backend): + ''' + Define a network with a stack of fused LSTM ops: + + %input (shape: (Seq, Batch, idim) == (5, 2, 4)) + %x1 = LSTM(h=10) (%input) # shape = (5, 2, 10) + %x2 = LSTM(h=20) (%x1) # shape = (5, 2, 20) + %x3 = slice()(%x2) # shape = (1, 2, 20), to get the final seq value + %x4 = reshape((1, -1)) (%x3) # shape = (1, 40) + %x5 = Dense(h=3)(%x4) # shape = (1, 3) + ''' + input_dim = 4 + seq_length = 5 + batch_size = 2 + x_shape = (seq_length, batch_size, input_dim) + + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=x_shape) # shape = (5, 2, 4) + + lstm_cell_1 = tf.contrib.rnn.LSTMBlockFusedCell(num_units=10) + x1, _ = lstm_cell_1(x, dtype=tf.float32) # shape = (5, 2, 10) + lstm_cell_2 = tf.contrib.rnn.LSTMBlockFusedCell(num_units=20) + x2 , _ = lstm_cell_2(x1, dtype=tf.float32) # shape = (5, 2, 20) + x3 = tf.slice(x2, begin=[4, 0, 0], size=[1, 2, 20]) # shape = [1, 2, 20] + x4 = tf.reshape(x3, shape=(1, -1)) # shape = [1, 40] + x5 = tf.linalg.matmul(x4, tf.constant(np.arange(1, 40*3, dtype=np.float32), shape=[40, 3])) # shape: [1, 3] + + res = TensorFlowBaseTest.run_compare_tf( + graph, + {x: np.random.rand(*x_shape).astype(np.float32),}, + x5, + compute_unit=compute_unit, + backend=backend, + # variable needs to be frozen + freeze_graph=True, + ) + + # check that the resulting program has the LSTM block ops as fused ops + coreml_model = res[1] + mil_prog = coreml_model._get_mil_internal() + assert len(mil_prog.find_ops(op_type="lstm")) == 2 + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestVariable(TensorFlowBaseTest): + @pytest.mark.xfail(reason="Investigate get_global ", run=False) + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_tf_no_variable(self, compute_unit, backend): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1,], name="input") + y = tf.Variable([1.0], dtype=tf.float32, name="y") + + # We set our assign op + assign_op = tf.assign(y, y + 10) + + with tf.control_dependencies([assign_op]): + res = tf.multiply(x, y, name="output") + + TensorFlowBaseTest.run_compare_tf( + graph, + {x: np.random.rand(1).astype(np.float32),}, + res, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestZerosLike(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(5)], + [True, False], + ), + ) + def test(self, compute_unit, backend, rank, dynamic): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + input_shape = np.random.randint(low=2, high=4, size=rank) + input_value = random_gen(input_shape, rand_min=-1, rand_max=1) + if dynamic: + a, b = np.prod(input_shape[:2]), np.prod(input_shape[2:]) + reshape_vals = np.array([a, b], dtype=np.int32) + reshape_input_shape = np.array([2], dtype=np.int32) + + @make_tf_graph([input_shape, list(reshape_input_shape) + [tf.int32]]) + def build_model(x, reshape): + x = tf.reshape(x, shape=reshape) + return tf.raw_ops.ZerosLike(x=x) + + model, inputs, outputs = build_model + input_values = [input_value, reshape_vals] + else: + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.ZerosLike(x=x) + + model, inputs, outputs = build_model + input_values = [input_value] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestIsFinite(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(5)], + [True, False] + ), + ) + def test(self, compute_unit, backend, rank, dynamic): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + def _generate_num_with_inf(input_shape): + res = random_gen(input_shape, rand_min=-1, rand_max=1) + random_map = np.random.choice([np.inf, -np.inf, 0], size=input_shape) + if len(input_shape) == 0: + return random_map.astype(np.float32) + res[np.where(random_map == np.inf)] = np.inf + res[np.where(random_map == -np.inf)] = -np.inf + return res.astype(np.float32) + + input_shape = np.random.randint(low=2, high=4, size=rank) + input_value = _generate_num_with_inf(input_shape) + if dynamic: + reshape_shape = [2, tf.int32] + + if len(input_shape) == 0: + reshape_value = np.array([1, 1], dtype=np.int32) + else: + reshape_value = np.array( + [input_shape[0], np.prod(input_shape[1:])], dtype=np.int32 + ) + + @make_tf_graph([input_shape, reshape_shape]) + def build_model(x, reshape): + x = tf.reshape(x, reshape) + x = tf.raw_ops.IsFinite(x=x) + return tf.raw_ops.Cast(x=x, DstT=tf.float32) + + model, inputs, outputs = build_model + input_values = [input_value, reshape_value] + + else: + + @make_tf_graph([input_shape]) + def build_model(x): + x = tf.raw_ops.IsFinite(x=x) + return tf.raw_ops.Cast(x=x, DstT=tf.float32) + + model, inputs, outputs = build_model + input_values = [input_value] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + backend=backend, + compute_unit=compute_unit, + ) + +class TestLogSoftMax(TensorFlowBaseTest): + @pytest.mark.parametrize( + 'compute_unit, backend', + itertools.product( + compute_units, + backends, + ), + ) + def test(self, compute_unit, backend): + input_shape = (5, 20) + input_value = random_gen(input_shape, rand_min=-1, rand_max=1) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.log_softmax(x) + + model, inputs, outputs = build_model + input_values = [input_value] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + 'compute_unit, backend', + itertools.product( + compute_units, + backends, + ), + ) + def test_numerical_stability(self, compute_unit, backend): + input_shape = (4,) + input_value = np.array([10, 2, 10000, 4], dtype=np.float32) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.log_softmax(x) + + model, inputs, outputs = build_model + input_values = [input_value] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestClipByValue(TensorFlowBaseTest): + @pytest.mark.parametrize( + 'compute_unit, backend, rank, min_and_max', + itertools.product( + compute_units, + backends, + [rank for rank in range(5)], + [(-1, 1), (-1, -1), (1, 2), (-3, -2)], + ), + ) + def test(self, compute_unit, backend, rank, min_and_max): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + input_shape = np.random.randint(low=2, high=4, size=rank) + min_val, max_val = min_and_max + input_value = random_gen(input_shape, rand_min=min_val-1, rand_max=max_val+1) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.ClipByValue(t=x, clip_value_min=min_val, clip_value_max=max_val) + + model, inputs, outputs = build_model + input_values = [input_value] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestSize(TensorFlowBaseTest): + @pytest.mark.parametrize( + 'compute_unit, backend, rank, dynamic', + itertools.product( + compute_units, + backends, + [rank for rank in range(5)], + [True, False], + ), + ) + def test(self, compute_unit, backend, rank, dynamic): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + input_shape = np.random.randint(low=2, high=4, size=rank) + input_value = random_gen(input_shape, rand_min=-1, rand_max=1) + if dynamic: + a, b = np.prod(input_shape[:2]), np.prod(input_shape[2:]) + reshape_vals = np.array([a,b], dtype=np.int32) + reshape_input_shape = np.array([2], dtype=np.int32) + + @make_tf_graph([input_shape, list(reshape_input_shape)+[tf.int32]]) + def build_model(x, reshape): + x = tf.reshape(x, shape=reshape) + return tf.raw_ops.Size(input=x) + + model, inputs, outputs = build_model + input_values = [input_value, reshape_vals] + else: + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.Size(input=x) + + model, inputs, outputs = build_model + input_values = [input_value] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + +class TestAudioSpectrogram(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, params, magnitude_squared", + itertools.product( + compute_units, + backends, + [ + ((100, 2), 5, 10), + ((50, 1), 18, 2), + ((512, 1), 512, 320), + ], + [True, False], + ), + ) + def test_audio_spectrogram(self, compute_unit, backend, params, magnitude_squared): + input_shape = params[0] + window_size = params[1] + stride = params[2] + + @make_tf_graph([input_shape]) + def build_model(x): + y = tf.raw_ops.AudioSpectrogram(input=x, + window_size=window_size, + stride=stride, + magnitude_squared=magnitude_squared) + return y + + model, inputs, outputs = build_model + + input_values = [(2 * np.random.rand(*input_shape) - 1).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestMfcc(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, params", + itertools.product( + compute_units, + backends, + [ + ((100, 2), 5, 10, 8000, (40, 4000), 20, 13), + ((50, 1), 18, 2, 4000, (20, 1500), 40, 26), + ((512, 1), 512, 320, 16000, (20, 8000), 40, 26), + ], + ), + ) + def test_mfcc(self, compute_unit, backend, params): + if backend == ("mlprogram", "fp16"): + pytest.xfail("rdar://80660411 (MFCC FP16 unit tests failing in TF1 converter with numerical errors)") + + input_shape = params[0] + window_size = params[1] + stride = params[2] + sample_rate = params[3] + lower_frequency_limit, upper_frequency_limit = params[4] + filterbank_channel_count = params[5] + dct_coefficient_count = params[6] + + @make_tf_graph([input_shape]) + def build_model(x): + y = tf.raw_ops.AudioSpectrogram(input=x, + window_size=window_size, + stride=stride, + magnitude_squared=True) + y_out = tf.raw_ops.Mfcc(spectrogram=y, + sample_rate=sample_rate, + upper_frequency_limit=upper_frequency_limit, + lower_frequency_limit=lower_frequency_limit, + filterbank_channel_count=filterbank_channel_count, + dct_coefficient_count=dct_coefficient_count) + return y_out + + model, inputs, outputs = build_model + + input_values = [(2 * np.random.rand(*input_shape) - 1).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestComplex(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + # Placeholder doesn't support rank-0 input, so we don't use empty shape here. + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_complex_basic(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_data = tf.complex(x, y) + return tf.stack([tf.math.real(complex_data), tf.math.imag(complex_data)]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestReal(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_real_real_input(self, compute_unit, backend, input_shape): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.real(x) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_real_complex_input(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf.math.real(tf.complex(x, y)) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestImag(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_imag_real_input(self, compute_unit, backend, input_shape): + @make_tf_graph([input_shape]) + def build_model(x): + return x + tf.math.imag(x) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_imag_complex_input(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf.math.imag(tf.complex(x, y)) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestFft(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_fft_basic(self, compute_unit, backend, input_shape): + # No need to test other parameter combinations because tf.signal.fft doesn't provide API to + # control more fine-grained params such as "n,dim,norm" in PyTorch. + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_data = tf.complex(x, y) + fft_res = tf.signal.fft(complex_data) + return tf.stack([tf.math.real(fft_res), tf.math.imag(fft_res)]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_fft_directly_output_error(self, compute_unit, backend): + x_shape = [2, 3] + y_shape = [2, 3] + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_data = tf.complex(x, y) + return tf.signal.fft(complex_data) + + model, inputs, outputs = build_model + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + + with pytest.raises( + ValueError, match="MIL doesn't support complex data as model's output" + ): + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_fft_nested(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_data = tf.complex(x, y) + fft_res1 = tf.signal.fft(complex_data) + fft_res2 = tf.signal.fft(fft_res1) + fft_res3 = tf.signal.fft(fft_res2) + return tf.stack([tf.math.real(fft_res3), tf.math.imag(fft_res3)]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestRfft(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, fft_length, input_shape", + # TF requires fft_length be an int32 tensor of shape [1] instead of an integer. + itertools.product( + compute_units, backends, [None, [1], [3], [5]], [[1], [2, 3], [4, 1, 5]] + ), + ) + def test_rfft_basic(self, compute_unit, backend, fft_length, input_shape): + @make_tf_graph([input_shape]) + def build_model(x): + rfft_res = tf.signal.rfft(x, fft_length=fft_length) + return tf.stack([tf.math.real(rfft_res), tf.math.imag(rfft_res)]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*input_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestIfft(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_ifft_basic(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_input = tf.complex(x, y) + ifft_res = tf.signal.ifft(complex_input) + return tf.stack([tf.math.real(ifft_res), tf.math.imag(ifft_res)]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestIrfft(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, fft_length, input_shape", + # TF requires fft_length be an int32 tensor of shape [1] instead of an integer. + itertools.product( + compute_units, backends, [None, [1], [3], [5]], [[6], [2, 3], [4, 1, 5]] + ), + ) + def test_irfft_basic(self, compute_unit, backend, fft_length, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_input = tf.complex(x, y) + return tf.signal.irfft(complex_input, fft_length=fft_length) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[6], [2, 3], [4, 1, 5]]), + ) + def test_fft_length_specify_by_shape(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_input = tf.complex(x, y) + return tf.signal.irfft(complex_input, fft_length=[complex_input.shape[-1]]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parse.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parse.py new file mode 100644 index 00000000..286c2670 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parse.py @@ -0,0 +1,124 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import pytest + +pytest.importorskip("tensorflow", minversion="1.15.0") +from tensorflow.core.framework import attr_value_pb2 as attr_value +from tensorflow.core.framework import tensor_shape_pb2 as tensor_shape +from tensorflow.core.framework import types_pb2 as types + +import coremltools.converters.mil.frontend.tensorflow.parse as parse +from coremltools.converters.mil.mil import types as mil_types + + +class TestParse(unittest.TestCase): + def test_parse_list(self): + def compare(expected, lst, field_name): + attr = attr_value.AttrValue() + field = getattr(attr.list, field_name) + field.extend(lst) + + actual = parse.parse_attr(attr) + self.assertEqual(expected, actual) + + compare([1, 2, 3], [1, 2, 3], "i") + compare(["foo", "bar"], [b"foo", b"bar"], "s") + + def test_parse_scalar(self): + def compare(expected, val, field_name): + a = attr_value.AttrValue() + setattr(a, field_name, val) + actual = parse.parse_attr(a) + self.assertEqual(expected, actual) + + compare("a String", b"a String", "s") + compare(55, 55, "i") + compare(True, True, "b") + + attr = attr_value.AttrValue() + attr.f = 12.3 + self.assertAlmostEqual(12.3, parse.parse_attr(attr), places=2) + + @staticmethod + def _attr_with_shape(dims, unknown_rank=0): + attr = attr_value.AttrValue() + for (dim_size, dim_name) in dims: + tf_dim = tensor_shape.TensorShapeProto.Dim() + tf_dim.size = dim_size + tf_dim.name = dim_name + attr.shape.dim.append(tf_dim) + attr.shape.unknown_rank = unknown_rank + return attr + + def test_parse_shape(self): + def compare(expected, dims, unknown_rank=0): + attr = self._attr_with_shape(dims, unknown_rank) + actual = parse.parse_attr(attr) + self.assertEqual(expected, actual) + + compare(None, [], 5) + compare([100], [(100, "outer")]) + compare([1, 2, 3], [(1, "outer"), (2, "middle"), (3, "inner")]) + + def test_parse_tensor(self): + # Zero-rank tensor + attr = attr_value.AttrValue() + attr.tensor.version_number = 1 + attr.tensor.dtype = types.DataType.DT_INT32 + t = parse.parse_attr(attr) + self.assertTrue(isinstance(t, mil_types.int32)) + self.assertEqual(0, t.val) + + # Non-zero rank + attr = attr_value.AttrValue() + attr.tensor.version_number = 1 + attr.tensor.dtype = types.DataType.DT_INT32 + shaped_attr = self._attr_with_shape([(1, "outer"), (2, "middle"), (3, "inner")]) + attr.tensor.tensor_shape.dim.extend(shaped_attr.shape.dim) + attr.tensor.int_val.extend([55, 56, 57]) + + t = parse.parse_attr(attr) + self.assertEqual([55, 56, 57], t.val.tolist()) + self.assertEqual("tensor", mil_types.get_type_info(t).name) + + # Note that the result of t.get_primitive() is a function that returns a type + # rather than an instance of that type as it is when the tensor has rank zero. + self.assertTrue(isinstance(t.get_primitive()(), mil_types.int32)) + self.assertEqual((1, 2, 3), t.get_shape()) + + def test_parse_type(self): + def compare(expected, tf_type): + attr = attr_value.AttrValue() + attr.type = tf_type + self.assertEqual(expected, parse.parse_attr(attr)) + + compare(None, types.DataType.DT_INVALID) + compare(mil_types.float, types.DataType.DT_FLOAT) + compare(mil_types.double, types.DataType.DT_DOUBLE) + compare(mil_types.int32, types.DataType.DT_INT32) + compare(mil_types.uint8, types.DataType.DT_UINT8) + compare(mil_types.int16, types.DataType.DT_INT16) + compare(mil_types.int8, types.DataType.DT_INT8) + compare(mil_types.int8, types.DataType.DT_INT8) + compare(mil_types.str, types.DataType.DT_STRING) + compare(None, types.DataType.DT_COMPLEX64) + compare(mil_types.int32, types.DataType.DT_INT64) + compare(mil_types.bool, types.DataType.DT_BOOL) + compare(None, types.DataType.DT_QINT8) + compare(None, types.DataType.DT_QUINT8) + compare(None, types.DataType.DT_QINT32) + compare(None, types.DataType.DT_BFLOAT16) + compare(None, types.DataType.DT_QINT16) + compare(None, types.DataType.DT_QUINT16) + compare(mil_types.uint16, types.DataType.DT_UINT16) + compare(None, types.DataType.DT_COMPLEX128) + compare(mil_types.fp16, types.DataType.DT_HALF) + compare(None, types.DataType.DT_RESOURCE) + compare(None, types.DataType.DT_VARIANT) + compare(mil_types.uint32, types.DataType.DT_UINT32) + compare(mil_types.uint64, types.DataType.DT_UINT64) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parsed_tf_node.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parsed_tf_node.py new file mode 100644 index 00000000..d39bb861 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parsed_tf_node.py @@ -0,0 +1,65 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import pytest + +pytest.importorskip("tensorflow", minversion="1.15.0") +from tensorflow.core.framework import node_def_pb2 as node_def +from tensorflow.core.framework import tensor_shape_pb2 as tensor_shape +from tensorflow.core.framework import types_pb2 as types + +from coremltools.converters.mil.frontend.tensorflow.parsed_tf_node import \ + ParsedTFNode + + +def _mock_tf_node(): + tfnode = node_def.NodeDef() + tfnode.name = "aNode" + tfnode.op = "PlaceholderWithDefault" + tfnode.input.extend(["anInput", "^aControlInput"]) + tfnode.attr["dtype"].type = types.DataType.DT_INT32 + dims = [(1, "outer"), (2, "middle"), (3, "inner")] + for (dim_size, dim_name) in dims: + tf_dim = tensor_shape.TensorShapeProto.Dim() + tf_dim.size = dim_size + tf_dim.name = dim_name + tfnode.attr["shape"].shape.dim.append(tf_dim) + return tfnode + + +class TestParsedTFNode(unittest.TestCase): + def test_init(self): + parsed_node = ParsedTFNode(_mock_tf_node()) + parsed_node.parse_from_attr() + self.assertEqual("aNode", parsed_node.name) + self.assertEqual("Placeholder", parsed_node.op) + self.assertEqual(["anInput"], parsed_node.inputs) + self.assertEqual(["aControlInput"], parsed_node.control_inputs) + + def test_copy(self): + parsed_node = ParsedTFNode(_mock_tf_node()) + parsed_node.parse_from_attr() + copy = parsed_node.copy() + self.assertTrue(isinstance(copy, type(parsed_node))) + props = [ + "name", + "op", + "datatype", + "value", + "inputs", + "control_inputs", + "outputs", + "control_outputs", + "attr", + "original_node", + ] + for prop in props: + self.assertEqual( + getattr(parsed_node, prop), + getattr(copy, prop), + "Mismatch in property {}".format(prop), + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_tf_conversion_api.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_tf_conversion_api.py new file mode 100644 index 00000000..3c820eb2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_tf_conversion_api.py @@ -0,0 +1,766 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import tempfile + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TF_1, _HAS_TF_2, MSG_TF1_NOT_FOUND +from coremltools.converters.mil.testing_utils import ( + assert_cast_ops_count, assert_input_dtype, assert_ops_in_mil_program, + assert_output_dtype, assert_prog_input_type, assert_prog_output_type, + assert_spec_input_image_type, assert_spec_output_image_type, + get_op_types_in_program, verify_prediction) +from coremltools.proto import FeatureTypes_pb2 as ft +from coremltools.test.api.test_api_examples import TestInputs as _TestInputs + +tf = pytest.importorskip("tensorflow") + +################################################################################# +# Note: all tests are also used as examples in https://coremltools.readme.io/docs +# as a reference. +# Whenever any of the following test fails, we should update API documentations +################################################################################# + + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +@pytest.mark.skipif(ct.utils._macos_version() < (10, 15), reason='Model produces specification 4.') +class TestTensorFlow1ConverterExamples: + @staticmethod + def test_convert_from_frozen_graph(tmpdir): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + y = tf.nn.relu(x, name="output") + + mlmodel = ct.convert(graph, compute_units=ct.ComputeUnit.CPU_ONLY) + + test_input = np.random.rand(1, 2, 3) - 0.5 + with tf.compat.v1.Session(graph=graph) as sess: + expected_val = sess.run(y, feed_dict={x: test_input}) + results = mlmodel.predict({"input": test_input}) + np.testing.assert_allclose(results["output"], expected_val) + + @staticmethod + def test_convert_from_frozen_graph_file(tmpdir): + # create the model to convert + + # write a toy frozen graph + # Note that we usually needs to run freeze_graph() on tf.Graph() + # skipping here as this toy model does not contain any variables + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + y = tf.nn.relu(x, name="output") + + save_path = str(tmpdir) + tf.io.write_graph(graph, save_path, "frozen_graph.pb", as_text=False) + + # Create a test sample + # -0.5 to have some negative values + test_input = np.random.rand(1, 2, 3) - 0.5 + with tf.compat.v1.Session(graph=graph) as sess: + expected_val = sess.run(y, feed_dict={x: test_input}) + + # The input `.pb` file is a frozen graph format that usually + # generated by TensorFlow's utility function `freeze_graph()` + pb_path = os.path.join(save_path, "frozen_graph.pb") + + # 3 ways to specify inputs: + # (1) Fully specify inputs + mlmodel = ct.convert( + pb_path, + # We specify inputs with name matching the placeholder name. + inputs=[ct.TensorType(name="input", shape=(1, 2, 3))], + outputs=["output"], + ) + + # (2) Specify input TensorType without name (when there's only one + # input) + mlmodel = ct.convert( + pb_path, + # TensorType name is optional when there's only one input. + inputs=[ct.TensorType(shape=(1, 2, 3))], + outputs=["output"], + ) + + # (3) Not specify inputs at all. `inputs` is optional for TF. When + # inputs is not specified, convert() infers inputs from Placeholder + # nodes. + mlmodel = ct.convert(pb_path, outputs=["output"], compute_units=ct.ComputeUnit.CPU_ONLY) + + results = mlmodel.predict({"input": test_input}) + np.testing.assert_allclose(results["output"], expected_val) + mlmodel_path = os.path.join(save_path, "model.mlmodel") + # Save the converted model + mlmodel.save(mlmodel_path) + + results = mlmodel.predict({"input": test_input}) + np.testing.assert_allclose(results["output"], expected_val, atol=1e-3) + + @staticmethod + def test_convert_from_saved_model_dir(tmpdir): + # Sample input + test_input = np.random.rand(1, 3, 5) - 0.5 + + # create the model to convert + with tf.compat.v1.Session() as sess: + x = tf.placeholder(shape=(1, 3, 5), dtype=tf.float32) + y = tf.nn.relu(x) + + expected_val = sess.run(y, feed_dict={x: test_input}) + + # Save model as SavedModel + inputs = {"x": x} + outputs = {"y": y} + save_path = str(tmpdir) + tf.compat.v1.saved_model.simple_save(sess, save_path, inputs, outputs) + + # SavedModel directory generated by TensorFlow 1.x + # when converting from SavedModel dir, inputs / outputs are optional + mlmodel = ct.convert(save_path, compute_units=ct.ComputeUnit.CPU_ONLY) + + # Need input output names to call mlmodel + # x.name == 'Placeholder:0'. Strip out ':0' + input_name = x.name.split(":")[0] + results = mlmodel.predict({input_name: test_input}) + # y.name == 'Relu:0'. output_name == 'Relu' + output_name = y.name.split(":")[0] + np.testing.assert_allclose(results[output_name], expected_val) + + + @staticmethod + def test_freeze_and_convert_matmul_graph(): + # testing : https://coremltools.readme.io/docs/tensorflow-1#export-as-frozen-graph-and-convert + graph = tf.Graph() + with graph.as_default(): + x = tf.placeholder(tf.float32, shape=[None, 20], name="input") + W = tf.Variable(tf.truncated_normal([20, 10], stddev=0.1)) + b = tf.Variable(tf.ones([10])) + y = tf.matmul(x, W) + b + output_names = [y.op.name] + + from tensorflow.python.tools.freeze_graph import freeze_graph + + model_dir = tempfile.TemporaryDirectory() + graph_def_file = os.path.join(model_dir.name, "tf_graph.pb") + checkpoint_file = os.path.join(model_dir.name, "tf_model.ckpt") + frozen_graph_file = os.path.join(model_dir.name, "tf_frozen.pb") + + with tf.Session(graph=graph) as sess: + # initialize variables + sess.run(tf.global_variables_initializer()) + # save graph definition somewhere + tf.train.write_graph( + sess.graph, model_dir.name, graph_def_file, as_text=False + ) + # save the weights + saver = tf.train.Saver() + saver.save(sess, checkpoint_file) + + # take the graph definition and weights + # and freeze into a single .pb frozen graph file + freeze_graph(input_graph=graph_def_file, + input_saver="", + input_binary=True, + input_checkpoint=checkpoint_file, + output_node_names=",".join(output_names), + restore_op_name="save/restore_all", + filename_tensor_name="save/Const:0", + output_graph=frozen_graph_file, + clear_devices=True, + initializer_nodes="") + print("Tensorflow frozen graph saved at {}".format(frozen_graph_file)) + ct.convert(frozen_graph_file) + + @staticmethod + def test_convert_tf1_frozen_graph_to_milinternal(tmpdir): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + y = tf.nn.relu(x, name="output") + + model = ct.convert(graph, convert_to='milinternal') + assert isinstance(model, ct.converters.mil.Program) + + @staticmethod + def test_mil_op_names_consistency(tmpdir): + ''' + Test to make sure that when the same model is converted to MIL program, + in the same session, it gives the same program, with the same op names + ''' + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 5, 5, 3), name="input") + conv = tf.nn.conv2d( + x, + filter = tf.constant(np.random.rand(1, 1, 3, 5), tf.float32), + padding = "VALID", + ) + y = tf.nn.relu(conv, name="output") + + mil_prog1 = ct.convert(graph, convert_to='milinternal') + # convert the same model again + mil_prog2 = ct.convert(graph, convert_to='milinternal') + + # compare op names of the two programs + np.testing.assert_array_equal(get_op_types_in_program(mil_prog1), get_op_types_in_program(mil_prog2)) + +############################################################################### +# Note: Stress tests for TF1 input / output types +############################################################################### +@pytest.mark.skipif(ct.utils._macos_version() < (10, 15), reason='Model produces specification 4.') +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestTf1Inputs(_TestInputs): + @staticmethod + def test_input_noname(): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + x1 = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input_1") + y = tf.nn.relu(x, name="output") + y1 = tf.nn.relu(x1, name="output_1") + + with pytest.raises(ValueError) as e: + model = ct.convert( + graph, + inputs=[ct.TensorType(shape=(1, 2, 3))] + ) + expected_error = "Multiple inputs are found in graph, but no input name was provided" + assert expected_error == str(e.value) + + @staticmethod + def test_input_wrongname(): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + x1 = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input_1") + y = tf.nn.relu(x, name="output") + y1 = tf.nn.relu(x1, name="output_1") + + with pytest.raises(ValueError) as e: + model = ct.convert( + graph, + inputs=[ct.TensorType(shape=(1, 2, 3), name="wrong_input")] + ) + expected_error = "Multiple inputs are found in graph, but no input name was provided" + expected_error = "Input ({}) provided is not found in given tensorflow graph. Placeholders in graph are: {}".format("wrong_input", ["input", "input_1"]) + assert expected_error == str(e.value) + + @staticmethod + @pytest.mark.skipif(not ct.utils._is_macos(), reason="test needs predictions") + def test_tf_predict_input(): + TestTf1Inputs._test_variant_input_type_prediction(tf.convert_to_tensor) + +@pytest.fixture +def int32_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.int32, shape=[10, 20], name="input") + out = tf.add(x, tf.constant(5, dtype=tf.int32), name="output") + return graph + +@pytest.fixture +def float32_input_model_add_op(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[10, 20], name="input") + out = tf.add(x, tf.constant(5.5, dtype=tf.float32), name="output") + return graph + +@pytest.fixture +def float32_input_model_relu_ops(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[10, 20], name="input") + x1 = tf.nn.relu(x) + out = tf.nn.relu(x1, name="output") + return graph + +@pytest.fixture +def int64_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.int64, shape=[10, 20], name="input") + out = tf.add(x, tf.constant(5, dtype=tf.int64), name="output") + return graph + +@pytest.fixture +def float32_two_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[10, 20], name="input1") + y = tf.placeholder(tf.float32, shape=[10, 20], name="input2") + out = tf.add(x, y, name="output") + return graph + +@pytest.fixture +def float32_two_output_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[10, 20], name="input") + y = tf.nn.relu(x) + out2 = tf.nn.relu6(x, name="output2") + out1 = tf.nn.relu(y, name="output1") + return graph + +@pytest.fixture +def rank3_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 10, 20], name="input") + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return graph + +@pytest.fixture +def rank4_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 10, 20, 3], name="input") + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return graph + +@pytest.fixture +def rank4_input_model_with_channel_first_output(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 10, 20, 3], name="input") + y = tf.add(x, tf.constant(5, dtype=tf.float32)) + out = tf.transpose(y, perm=[0, 3, 1, 2], name="output") + return graph + +@pytest.fixture +def rank4_grayscale_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 10, 20, 1], name="input") + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return graph + +@pytest.fixture +def rank4_grayscale_input_model_with_channel_first_output(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 10, 20, 1], name="input") + y = tf.add(x, tf.constant(5, dtype=tf.float32)) + out = tf.transpose(y, perm=[0, 3, 1, 2], name="output") + return graph + +@pytest.fixture +def linear_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + # this model will test the fuse_matmul_weight_bias pass + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 2], name="input") + y = tf.matmul(x, tf.constant([1, 2], shape=(2, 4), dtype=tf.float32)) + y = tf.add(y, tf.constant([1, 2, 3, 4], shape=(4,), dtype=tf.float32)) + out = tf.nn.relu(y) + return graph + + +@pytest.mark.skipif(ct.utils._macos_version() < (13, 0), reason='Tests are for deployment target ios16/macos13') +class TestInputOutputConversionAPI: + + def test_input_dtype_inferred(self, int32_input_model): + # test that the input dtype is picked up from TF correctly + mlmodel = ct.convert(int32_input_model, + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32") + verify_prediction(mlmodel) + + def test_unsupported_input_dtype_in_tf_graph(self, int64_input_model): + # test that no error is raised when no dtype is provided by the user, + # and the TF graph's input dtype is not supported. + # In this case, it will be mapped to the closest supported dtype + mlmodel = ct.convert(int64_input_model, + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32") + verify_prediction(mlmodel) + + def test_input_dtype_user_provided(self, int32_input_model): + # test that provided dtype in the api overrides the input dtype in the TF model + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.float32)], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_invalid_input_dtype(self, int32_input_model): + # error should be raised if a dtype is provided by the user that is not supported + with pytest.raises(TypeError, + match="is unsupported for inputs/outputs of the model" + ): + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.int16)], + minimum_deployment_target=ct.target.macOS12) + + with pytest.raises(TypeError, + match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13" + ): + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS12) + + def test_fp16_input_dtype(self, float32_input_model_add_op, float32_input_model_relu_ops, int32_input_model): + """ + Test that providing fp16 input dtype works with macOS13. + """ + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13 + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13 + ) + # Two consecutive relus are merged in the `merge_consecutive_relus` pass. + assert_ops_in_mil_program(mlmodel, expected_op_list=["relu", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_fp16_input_dtype_fp32_precision(self, float32_input_model_add_op, float32_input_model_relu_ops, + int32_input_model): + """ + Same test as test_fp16_input_dtype, but with Float32 precision + """ + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + + def test_two_input_model(self, float32_two_input_model): + # test forcing input type of "input1" to be int32 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(name="input1", dtype=np.int32)], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32", expected_name="input1") + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="input2") + assert_output_dtype(mlmodel, expected_type_str="fp32") + + # test forcing both inputs to be int32 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(name="input1", dtype=np.int32), + ct.TensorType(name="input2", dtype=np.int32), + ], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32", expected_name="input1") + assert_input_dtype(mlmodel, expected_type_str="int32", expected_name="input2") + assert_output_dtype(mlmodel, expected_type_str="int32") + + # if names are not provided an error should be raised + with pytest.raises(ValueError): + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(dtype=np.int32), + ct.TensorType(dtype=np.int32), + ], + minimum_deployment_target=ct.target.macOS12) + + # test forcing both inputs to be float16 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(name="input1", dtype=np.float16), + ct.TensorType(name="input2", dtype=np.float16), + ], + minimum_deployment_target=ct.target.macOS13) + assert_input_dtype(mlmodel, expected_type_str="fp16", expected_name="input1") + assert_input_dtype(mlmodel, expected_type_str="fp16", expected_name="input2") + assert_output_dtype(mlmodel, expected_type_str="fp32") + assert_cast_ops_count(mlmodel, expected_count=1) + verify_prediction(mlmodel) + + def test_single_output_model(self, int32_input_model, float32_input_model_relu_ops): + # test output type + mlmodel = ct.convert(int32_input_model, + minimum_deployment_target=ct.target.macOS12) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_output_dtype(mlmodel, expected_type_str="int32") + + # test that error is raised when an output of unknown name is provided + with pytest.raises(Exception): + # output name does not exist in the model + mlmodel = ct.convert(int32_input_model, + outputs=["z"], + minimum_deployment_target=ct.target.macOS12) + + # test that error is raised when two outputs are provided without names + with pytest.raises(ValueError, match=", does not have names"): + mlmodel = ct.convert(int32_input_model, + outputs=[ct.TensorType(dtype=np.float32), ct.TensorType(dtype=np.float32)], + minimum_deployment_target=ct.target.macOS12) + + # test that an error is raised when shape is provided for the output + with pytest.raises(ValueError): + mlmodel = ct.convert(int32_input_model, + outputs=[ct.TensorType(dtype=np.float32, shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + + # test that the output dtype provided by the user is applied during conversion + mlmodel = ct.convert(int32_input_model, + outputs=[ct.TensorType(dtype=np.float32)], + minimum_deployment_target=ct.target.macOS12) + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="Identity" if _HAS_TF_2 else "output") + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + + # test that output dtype of float16 is rejected when deployment target is low + with pytest.raises(TypeError, + match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13" + ): + ct.convert(float32_input_model_relu_ops, + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS12, + ) + + # test that output type float16 is applied correctly + mlmodel = ct.convert(float32_input_model_relu_ops, + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="Identity" if _HAS_TF_2 else "output") + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu"]) + + # test that input and output types float16 are applied correctly + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="Identity" if _HAS_TF_2 else "output") + assert_ops_in_mil_program(mlmodel, expected_op_list=["relu"]) + verify_prediction(mlmodel) + + def test_multi_output_model(self, float32_two_output_model): + # check that error is raised when only 1 output provided + with pytest.raises(ValueError, match="please provide names for each of the outputs"): + mlmodel = ct.convert(float32_two_output_model, + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + + # check that error is raised when multiple outputs are provided without names + with pytest.raises(ValueError, match="please provide names for each of the outputs"): + mlmodel = ct.convert(float32_two_output_model, + outputs=[ct.TensorType(dtype=np.float16), ct.TensorType(dtype=np.float32)], + minimum_deployment_target=ct.target.macOS13, + ) + + # set 1 output to float16 and the other to float32 + output1_name = "Identity" if _HAS_TF_2 else "output1" + output2_name = "Identity_1" if _HAS_TF_2 else "output2" + mlmodel = ct.convert(float32_two_output_model, + inputs=[ct.TensorType(dtype=np.float16)], + outputs=[ct.TensorType(name=output2_name, dtype=np.float16), + ct.TensorType(name=output1_name, dtype=np.float32)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_cast_ops_count(mlmodel, expected_count=1) + assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name=output2_name, index=0) + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name=output1_name, index=1) + assert_input_dtype(mlmodel, expected_type_str="fp16") + verify_prediction(mlmodel) + + # in this case only the single output will be selected + mlmodel = ct.convert(float32_two_output_model, + inputs=[ct.TensorType(dtype=np.float16)], + outputs=[ct.TensorType(name=output2_name, dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_cast_ops_count(mlmodel, expected_count=0) + assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name=output2_name, index=0) + assert_input_dtype(mlmodel, expected_type_str="fp16") + verify_prediction(mlmodel) + + def test_color_input(self, rank4_input_model, rank3_input_model): + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "transpose", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + with pytest.raises(ValueError, match="must have rank 4"): + mlmodel = ct.convert(rank3_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS12, + ) + + def test_grayscale_input(self, rank4_input_model, rank3_input_model, rank4_grayscale_input_model): + with pytest.raises(ValueError, match="must have rank 4"): + mlmodel = ct.convert(rank3_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + + # invalid shape + with pytest.raises(ValueError): + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "transpose", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS12, + ) + + # test that grayscale_16 raises error when used with neural network + with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + ) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["transpose", "add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16") + verify_prediction(mlmodel) + + def test_color_output(self, rank4_input_model, rank4_input_model_with_channel_first_output): + # check that an error is raised if the output shape is not of form (1, 3, H, W) + with pytest.raises(ValueError, match="Shape of the RGB/BGR image output,"): + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + + mlmodel = ct.convert(rank4_input_model_with_channel_first_output, + inputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + # check neural network conversion + mlmodel = ct.convert(rank4_input_model_with_channel_first_output, + inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)], + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR) + verify_prediction(mlmodel) + + def test_grayscale_output(self, rank4_grayscale_input_model, rank4_grayscale_input_model_with_channel_first_output): + # check that an error is raised if the output shape is not of form (1, 1, H, W) + with pytest.raises(ValueError, match="Shape of the Grayscale image output,"): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + ) + + with pytest.raises(TypeError, match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"): + mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output, + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS12, + ) + + mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + verify_prediction(mlmodel) + + mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_cast_ops_count(mlmodel, expected_count=0) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16") + verify_prediction(mlmodel) + + mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16") + verify_prediction(mlmodel) + + + def test_linear_model(self, linear_model): + # this will test the fuse_matmul_weight_bias pass, when the inputs are of type float16 + mlmodel = ct.convert(linear_model, + inputs=[ct.TensorType(dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16") + assert_ops_in_mil_program(mlmodel, ["linear", "relu"]) + verify_prediction(mlmodel) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/testing_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/testing_utils.py new file mode 100644 index 00000000..08dc9d9b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/testing_utils.py @@ -0,0 +1,373 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import tempfile + +import numpy as np +import pytest + +import coremltools.models.utils as coremltoolsutils +from coremltools._deps import _HAS_TF_2 +from coremltools.converters.mil.testing_reqs import ct +from coremltools.converters.mil.testing_utils import (compare_backend, + ct_convert) + +tf = pytest.importorskip("tensorflow", minversion="1.15.0") + +from tensorflow.python.framework import dtypes +from tensorflow.python.keras.saving import saving_utils as _saving_utils +from tensorflow.python.tools.freeze_graph import freeze_graph as freeze_g + + +def make_tf_graph(input_types): + """ + Decorator to help construct TensorFlow 1.x model. + + Parameters + ---------- + input_types: list of tuple or list of list + List of input types. E.g. [(3, 224, 224, tf.int32)] represent 1 input, + with shape (3, 224, 224), and the expected data type is tf.int32. The + dtype is optional, in case it's missing, tf.float32 will be used. + + Returns + ------- + tf.Graph, list of str, list of str + """ + + def wrapper(ops): + with tf.Graph().as_default() as model: + inputs = [] + for input_type in input_types: + input_type = tuple(input_type) if input_type is not None else None + if input_type is not None and len(input_type) > 0 and isinstance(input_type[-1], dtypes.DType): + shape, dtype = input_type[:-1], input_type[-1] + else: + shape, dtype = input_type, tf.float32 + inputs.append(tf.placeholder(shape=shape, dtype=dtype)) + + outputs = ops(*inputs) + return model, inputs, outputs + + return wrapper + + +def get_tf_keras_io_names(model): + """ + Utility function to get tf.keras inputs/outputs names from a tf.keras model. + + Parameter + --------- + model: tf.keras.Model + """ + input_names, output_names = [], [] + try: + # The order of outputs in conc_func.structured_outputs is the same order + # that Keras predicts in, which can be different from model.outputs + input_signature = _saving_utils.model_input_signature( + model, keep_original_batch_size=True + ) + fn = _saving_utils.trace_model_call(model, input_signature) + conc_func = fn.get_concrete_function() + for key in conc_func.structured_outputs: + output_names.append(conc_func.structured_outputs[key].name.split(":")[0]) + except: + for o in model.outputs: + output_names.append(o.name.split(":")[0].split("/")[-1]) + for name in model.input_names: + input_names.append(name.split(":")[0]) + return input_names, output_names + + +def get_tf_node_names(tf_nodes, mode="inputs"): + """ + Inputs: + - tf_nodes: list[str]. Names of target placeholders or output variable. + - mode: str. When mode == inputs, do the stripe for the input names, for + instance 'placeholder:0' could become 'placeholder'. + when model == 'outputs', we keep the origin suffix number, like + 'bn:0' will still be 'bn:0'. + Return a list of names from given list of TensorFlow nodes. Tensor name's + postfix is eliminated if there's no ambiguity. Otherwise, postfix is kept + """ + if not isinstance(tf_nodes, list): + tf_nodes = [tf_nodes] + names = list() + for n in tf_nodes: + tensor_name = n if isinstance(n, str) else n.name + if mode == "outputs": + names.append(tensor_name) + continue + name = tensor_name.split(":")[0] + if name in names: + # keep postfix notation for multiple inputs/outputs + names[names.index(name)] = name + ":" + str(names.count(name) - 1) + names.append(tensor_name) + else: + names.append(name) + return names + + +def tf_graph_to_mlmodel( + graph, feed_dict, output_nodes, frontend="tensorflow", + backend=("neuralnetwork", "fp32"), compute_unit=ct.ComputeUnit.CPU_ONLY, + inputs_for_conversion=None, minimum_deployment_target=None, +): + """ + Parameters + ---------- + graph: tf.Graph + TensorFlow 1.x model in tf.Graph format. + feed_dict: dict of {tf.placeholder -> np.array or python primitive) + Dict of placeholder and value pairs representing inputs. + output_nodes: tf.node or list[tf.node] + List of names representing outputs. + frontend: str + Frontend to convert from. + backend: str + Backend to convert to. + compute_unit: Enum[ct.ComputeUnit]. + Compute unit for the coreml model + inputs_for_conversion: list of coremltools.TensorType() or coremltools.ImageType() objects + Defaults to None. It is passed as is to the "inputs" argument of the converter. + minimum_deployment_target : coremltools.target enumeration + It set the minimum_deployment_target argument in the coremltools.convert functino. + ----------- + Returns MLModel, Input Values, Output Names + """ + if isinstance(output_nodes, tuple): + output_nodes = list(output_nodes) + if not isinstance(output_nodes, list): + output_nodes = [output_nodes] + + # Convert TF graph. + input_names = get_tf_node_names(list(feed_dict.keys()), mode="inputs") + output_names = get_tf_node_names(output_nodes, mode="outputs") + input_values = {name: val for name, val in zip(input_names, feed_dict.values())} + + inputs = inputs_for_conversion if inputs_for_conversion is not None else None + + mlmodel = ct_convert( + graph, inputs=inputs, outputs=output_names, source=frontend, convert_to=backend, + compute_units=compute_unit, + minimum_deployment_target=minimum_deployment_target, + ) + + return mlmodel, input_values, output_names, output_nodes + + +def load_tf_pb(pb_file): + """ + Loads a pb file to tf.Graph + """ + # We load the protobuf file from the disk and parse it to retrieve the + # unsterilized graph_def + with tf.io.gfile.GFile(pb_file, "rb") as f: + graph_def = tf.compat.v1.GraphDef() + graph_def.ParseFromString(f.read()) + + # Then, we import the graph_def into a new Graph and returns it + with tf.Graph().as_default() as graph: + # The name var will prefix every op/nodes in your graph + # Since we load everything in a new graph, this is not needed + tf.import_graph_def(graph_def, name="") + return graph + + +def run_compare_tf( + graph, + feed_dict, + output_nodes, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + frontend="tensorflow", + backend=("neuralnetwork", "fp32"), + atol=1e-04, + rtol=1e-05, + freeze_graph=False, + tf_outputs=None, + minimum_deployment_target=None, +): + """ + Utility function to convert and compare a given TensorFlow 1.x model. + + Parameters + ---------- + graph: tf.Graph + TensorFlow 1.x model in tf.Graph format. + feed_dict: dict of (tf.placeholder, np.array) + Dict of placeholder and value pairs representing inputs. + output_nodes: tf.node or list[tf.node] + List of names representing outputs. + inputs_for_conversion: list of coremltools.TensorType() or coremltools.ImageType() objects + Defaults to None. It is passed as is to the "inputs" argument of the converter. + compute_unit: Enum[ct.ComputeUnit]. + Compute unit for the coreml model + frontend_only: bool + If true, skip the prediction call, only validate conversion. + frontend: str + Frontend to convert from. + backend: str + Backend to convert to. + atol: float + The absolute tolerance parameter. + rtol: float + The relative tolerance parameter. + freeze_graph: bool + If True, use the "tensorflow.python.tools.freeze_graph" function + to freeze the TF graph prior to conversion. This will ensure that + all the variables in the graph have been converted to constants. + tf_outputs: float or list[float] + If present, use it as TensorFlow predictions + minimum_deployment_target : coremltools.target enumeration + It set the minimum_deployment_target argument in the coremltools.convert functino. + + Return: + Proto, mlmodel, input dictionay, prediction(if possible) + """ + if not isinstance(output_nodes, (tuple, list)): + output_nodes = [output_nodes] + + if freeze_graph: + with tempfile.TemporaryDirectory() as model_dir: + graph_def_file = os.path.join(model_dir, "tf_graph.pb") + checkpoint_file = os.path.join(model_dir, "tf_model.ckpt") + static_model_file = os.path.join(model_dir, "tf_static.pb") + + with tf.Session(graph=graph) as sess: + sess.run(tf.global_variables_initializer()) + if tf_outputs is None: + tf_outputs = sess.run(output_nodes, feed_dict=feed_dict) + tf.train.write_graph(sess.graph, model_dir, graph_def_file, as_text=False) + saver = tf.train.Saver() + saver.save(sess, checkpoint_file) + output_node_names = get_tf_node_names(output_nodes, mode="outputs") + output_node_names = [name.split(":")[0] for name in output_node_names] + output_op_names = ",".join(output_node_names) + freeze_g( + input_graph=graph_def_file, + input_saver="", + input_binary=True, + input_checkpoint=checkpoint_file, + output_node_names=output_op_names, + restore_op_name="save/restore_all", + filename_tensor_name="save/Const:0", + output_graph=static_model_file, + clear_devices=True, + initializer_nodes="", + ) + graph = load_tf_pb(static_model_file) + + mlmodel, input_key_values, output_names, output_nodes = tf_graph_to_mlmodel( + graph, feed_dict, output_nodes, frontend, backend, + compute_unit=compute_unit, + inputs_for_conversion=inputs_for_conversion, + minimum_deployment_target=minimum_deployment_target + ) + + if frontend_only or coremltoolsutils._macos_version() < (10, 13) \ + or (mlmodel.is_package and coremltoolsutils._macos_version() < (12, 0)): + return mlmodel._spec, mlmodel, input_key_values, None + + if tf_outputs is None: + with tf.Session(graph=graph) as sess: + sess.run(tf.global_variables_initializer()) + tf_outputs = sess.run(output_nodes, feed_dict=feed_dict) + + expected_outputs = {name: val for name, val in zip(output_names, tf_outputs)} + + for k, v in input_key_values.items(): + if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer): + input_key_values[k] = v.astype(float) # Core ML only accepts floats + + pred = None + if not coremltoolsutils._has_custom_layer(mlmodel._spec): + pred = compare_backend( + mlmodel, + input_key_values, + expected_outputs, + atol=atol, + rtol=rtol, + also_compare_shapes=True, + dtype=backend[1], + ) + else: + print('Skipping model prediction as it has a custom nn layer!') + return mlmodel._spec, mlmodel, input_key_values, pred + + +def layer_counts(spec, layer_type): + spec_type_map = { + "neuralNetworkClassifier": spec.neuralNetworkClassifier, + "neuralNetwork": spec.neuralNetwork, + "neuralNetworkRegressor": spec.neuralNetworkRegressor, + } + nn_spec = spec_type_map.get(spec.WhichOneof("Type")) + if nn_spec is None: + raise ValueError("MLModel must have a neural network") + + n = 0 + for layer in nn_spec.layers: + if layer.WhichOneof("layer") == layer_type: + n += 1 + return n + + +class TensorFlowBaseTest: + testclassname='' + testmodelname='' + + @pytest.fixture(autouse=True) + def store_testname_with_args(self, request): + TensorFlowBaseTest.testclassname = type(self).__name__ + TensorFlowBaseTest.testmodelname = request.node.name + + @staticmethod + def run_compare_tf(graph, feed_dict, output_nodes, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, frontend="tensorflow", + backend=("neuralnetwork", "fp32"), atol=1e-04, rtol=1e-05, + freeze_graph=False, tf_outputs=None, + minimum_deployment_target=None): + + res = run_compare_tf(graph, + feed_dict, + output_nodes, + inputs_for_conversion=inputs_for_conversion, + compute_unit=compute_unit, + frontend_only=frontend_only, + frontend=frontend, + backend=backend, atol=atol, + rtol=rtol, + freeze_graph=freeze_graph, + tf_outputs=tf_outputs, + minimum_deployment_target=minimum_deployment_target + ) + + alist = [] + if res is not None: + alist = list(res) + alist.append(TensorFlowBaseTest.testclassname) + alist.append(TensorFlowBaseTest.testmodelname) + + return tuple(alist) + + @staticmethod + def _op_count_in_mil_program(mlmodel, op_type): + prog = mlmodel._mil_program + return len(prog.find_ops(op_type=op_type)) + + +if _HAS_TF_2: + from coremltools.converters.mil.frontend.tensorflow2.test.testing_utils import ( + TensorFlow2BaseTest, make_tf2_graph) + from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import \ + TensorFlowBaseTest + TensorFlowBaseTest.run_compare_tf = TensorFlow2BaseTest.run_compare_tf2 + make_tf_graph = make_tf2_graph + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__init__.py new file mode 100644 index 00000000..67dd1921 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from .cond_to_where import cond_to_where +from .constant_propagation import constant_propagation +# graph passes +from .delete_asserts import delete_asserts +from .delete_constant import delete_unnecessary_constant_nodes +# graphdef to tfssa +from .delete_disconnected_nodes import delete_disconnected_nodes +from .functionalize_loops import functionalize_loops +from .fuse_dilation_conv import fuse_dilation_conv +from .insert_get_tuple import insert_get_tuple +from .quantization_pass import quantization_pass +from .tensor_array_transform import tensor_array_resource_removal +from .variable_node_transform import remove_variable_nodes diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/cond_to_where.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/cond_to_where.py new file mode 100644 index 00000000..678cd585 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/cond_to_where.py @@ -0,0 +1,130 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools._deps import _HAS_TF_2 + +from ..basic_graph_ops import delete_node, disconnect_edge +from .visitors import FindAllUpstreamTerminals + + +def compute_max_rank(graph): + # highly inefficient way to calculate the rank of every node + ret = {} + # begin at max rank + for v in graph.keys(): + if len(graph[v].inputs) == 0: + ret[v] = 0 + else: + ret[v] = len(graph) + + changes = True + while changes: + changes = False + for v in graph.keys(): + if len(graph[v].inputs) > 0: + rank = max(ret[i] for i in graph[v].inputs) + 1 + if ret[v] != rank: + changes = True + ret[v] = rank + return ret + + +class CondToWhere: + @staticmethod + def _search(g, node_name): + """ + Find the nearest Switch nodes upstream of node_name. + """ + node = g[node_name] + + switches = ( + FindAllUpstreamTerminals(lambda x: x.op == "Switch") + .visit(g, node.name) + .get_result() + ) + if len(switches) == 0: + switches = ( + FindAllUpstreamTerminals( + lambda x: x.op == "Switch" or x.attr.get("was_switch") is not None + ) + .visit(g, node.name) + .get_result() + ) + return switches + + @staticmethod + def _fix_found_cond(g, merge, switches): + """ + Convert a Merge's Switch nodes to Identity ops and the Merge to iff. + """ + if g[switches[0]].op == "Switch": + condition_input = g[switches[0]].inputs[1] + else: + condition_input = g[switches[0]].attr["was_switch"] + + # convert the merge to a select + # TensorFlow seems to ensure the condition that the first + # merge input is the True branch and the second merge input + # is the false branch. + + # we convert switches to identity, detaching to switch condition + for s in switches: + if g[s].op == "Switch": + g[s].op = "Identity" + g[s].attr["was_switch"] = g[s].inputs[1] + # detach input 1: the switch condition + if g[s].inputs[0] == g[s].inputs[1]: + g[s].inputs.pop() + g[g[s].inputs[0]].outputs.pop() + else: + disconnect_edge(g, g[s].inputs[1], s) + + # build the final select + g[merge].op = "iff" + if not _HAS_TF_2: + # swap true branch with false branch to get the right semantics for IFF + g[merge].inputs[0], g[merge].inputs[1] = ( + g[merge].inputs[1], + g[merge].inputs[0], + ) + + g[merge].inputs = [condition_input] + g[merge].inputs + g[condition_input].outputs.append(merge) + + def cond_to_where(self, graph): + stuff_done = False + g = graph + ranks = compute_max_rank(graph) + merges = [a for a in g if g[a].op == "Merge"] + merges = sorted(merges, key=lambda k: ranks[k]) + if len(merges) == 0: + return False + for m in merges: + logger.debug("Fixing cond at merge location: %s", m) + switches = self._search(g, m) + self._fix_found_cond(g, m, switches) + stuff_done = True + + # delete the extra switches that seem to just lead to identities + # which then lead nowhere but into control dependencies + extra_switches = [a for a in g if g[a].op == "Switch"] + for s in extra_switches: + if all( + [g[o].op == "Identity" and len(g[o].outputs) == 0 for o in g[s].outputs] + ): + nodes_to_delete = g[s].outputs + [s] + for d in nodes_to_delete: + delete_node(g, d) + stuff_done = True + return stuff_done + + +def cond_to_where(tfssa): + for k, v in tfssa.functions.items(): + while True: + stuff_done = CondToWhere().cond_to_where(v.graph) + if not stuff_done: + break diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py new file mode 100644 index 00000000..82f42d98 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py @@ -0,0 +1,163 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import gc +from distutils.version import StrictVersion as _StrictVersion + +import tensorflow as tf + +from coremltools import _logger as logger +from coremltools._deps import _get_version +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types.type_mapping import \ + numpy_val_to_builtin_val + +from ..basic_graph_ops import const_determined_nodes + + +def _get_const_nodes(fn): + from tensorflow.core.framework import graph_pb2, node_def_pb2 + + new_graph = graph_pb2.GraphDef() + constant_nodes = set() + constant_node_num_outputs = {} + generated_nodes = [k for k, v in fn.graph.items() if v.original_node is None] + const_nodes_in_this_graph = const_determined_nodes(fn.graph, set(generated_nodes)) + # we can only run TF on nodes with outputs since we must evaluate + # tensors and not ops + const_nodes_in_this_graph = [ + i for i in const_nodes_in_this_graph if fn.graph[i].op != "NoOp" + ] + constant_nodes = constant_nodes.union(set(const_nodes_in_this_graph)) + + # topological sort const nodes + topsort = [] + topsort_set = set() + while len(const_nodes_in_this_graph) > 0: + for n in const_nodes_in_this_graph: + input_names = fn.graph[n].inputs + if len(set(input_names).difference(topsort_set)) == 0: + topsort.append(n) + topsort_set.add(n) + + const_nodes_in_this_graph = set(const_nodes_in_this_graph).difference( + topsort_set + ) + + for node in topsort: + new_node = node_def_pb2.NodeDef() + new_node.CopyFrom(fn.graph[node].original_node) + if "_class" in new_node.attr: + del new_node.attr["_class"] + del new_node.input[:] + new_node.input.extend(fn.graph[node].inputs) + if "_output_shapes" in fn.graph[node].attr: + constant_node_num_outputs[node] = len(fn.graph[node].attr["_output_shapes"]) + else: + constant_node_num_outputs[node] = 1 + new_graph.node.extend([new_node]) + del new_node + gc.collect() + return new_graph, list(constant_nodes), constant_node_num_outputs + + +def _constant_propagation(fn, new_graph, constant_nodes, constant_node_num_outputs): + try: + if len(constant_nodes) > 0: + with tf.Graph().as_default() as graph: + tf.import_graph_def(new_graph, name="") + + # We're only making one call to `sess.run()` in order to compute constant values. + # In this context, the default optimization settings make everything dramatically + # slower and more memory-intensive. + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): + session_config = tf.ConfigProto() + session_config.graph_options.optimizer_options.opt_level = ( + tf.OptimizerOptions.L0 + ) + sess = tf.Session(graph=graph, config=session_config) + else: + session_config = tf.compat.v1.ConfigProto() + session_config.graph_options.optimizer_options.opt_level = ( + tf.compat.v1.OptimizerOptions.L0 + ) + session_config.graph_options.rewrite_options.disable_meta_optimizer = ( + True + ) + sess = tf.compat.v1.Session(graph=graph, config=session_config) + + query_list = list() + control_flow_ops = list() + for c in constant_nodes: + for j in range(constant_node_num_outputs[c]): + query = c + ":" + str(j) + lower_query = query.lower() + if "switch" in lower_query or "cond" in lower_query: + control_flow_ops.append(query) + else: + query_list.append(query) + result_list = sess.run(query_list) + result = { + query_list[i]: result_list[i] for i in range(len(query_list)) + } + # propagate switch one by one + for op in control_flow_ops: + try: + res = sess.run([op]) + result.update({op: res[0]}) + except: + logger.warning( + '[Constant Propagation] Skip "dead" tensor: {}'.format( + op + ) + ) + result.update({op: None}) + + sess.close() + + for k, v in fn.graph.items(): + if k in constant_node_num_outputs: + if constant_node_num_outputs[k] == 1: + result_entry = k + ":0" + try: + v.value, v.datatype = numpy_val_to_builtin_val( + result[result_entry] + ) + except: + logger.error(result_entry) + logger.error(result[result_entry]) + else: + values = [ + result[k + ":" + str(i)] + for i in range(constant_node_num_outputs[k]) + ] + try: + npval = [numpy_val_to_builtin_val(i) for i in values] + v.datatype = types.tuple(tuple([val[1] for val in npval])) + v.value = v.datatype() + for idx, val in enumerate(npval): + v.value.val[idx] = val[0] + except: + logger.error(values) + for k, v in fn.graph.items(): + if v.op == "get_tuple": + inp = fn.graph[v.inputs[0]] + idx = v.attr["index"] + if inp.value is not None: + v.value = inp.value.val[idx] + v.datatype = inp.datatype.T[idx] + + except Exception as e: + logger.exception("Constant Propagation pass failed: {}".format(e)) + + +def constant_propagation(tfssa): + # we are going to rely on the TensorFlow graph to perform constant + # propagation. For each graph, we construct a new graph comprising + # only a subset of nodes that are constant nodes. + + for f in tfssa.functions.values(): + const_nodes_info = _get_const_nodes(f) + _constant_propagation(f, *const_nodes_info) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_asserts.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_asserts.py new file mode 100644 index 00000000..15fe20ec --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_asserts.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import sys +from coremltools import _logger as logger + +from ..basic_graph_ops import delete_node + + +sys.setrecursionlimit(5000) # increase recursion limit to support convert large models + + +def _all_assert_leaves(gdict, nodename, memo): + """ + Does the given node lead to only assertions? + + Args: + gdict (dict): The node's graph. + nodename (str): The name of the node to test. + memo (dict): Storage for memoization. + """ + work = [nodename] + while True: + assert len(work) <= len(gdict) # If true, this algorithm is broken + node = gdict[work.pop()] + + # Entries in memo have one of the following values for a given node: + # None: the node is in the stack; this node is downstream. + # True: the node is an assertion or leads only to assertions. + # False: the node does not lead only to assertions. + if not isinstance(memo.get(node.name), bool): + memo[node.name] = None + outputs = node.outputs + if len(outputs) == 0: + # Leaf node: stack shrinks + memo[node.name] = node.op in ("Assert", "CheckNumerics") + else: + outputs_to_process = [n for n in outputs if n not in memo] + if len(outputs_to_process) == 0: + # Non-leaf node with fully processed outputs: stack shrinks + memo[node.name] = all(memo[n] for n in outputs) + else: + # Non-leaf node with unprocess outputs: stack grows + work.append(node.name) + work.extend(outputs_to_process) + if len(work) == 0: + return memo[node.name] + + +def delete_asserts(tfssa): + """ + Delete all nodes that lead only to assertions. + """ + delete_count = 0 + for f in tfssa.functions.values(): + memo = {} + for n in f.graph: + _all_assert_leaves(f.graph, n, memo) + for m in memo: + if memo[m]: + delete_count += 1 + delete_node(f.graph, m) + logger.debug("%d assert nodes deleted", delete_count) + return delete_count diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_constant.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_constant.py new file mode 100644 index 00000000..9cc7ffbd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_constant.py @@ -0,0 +1,82 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger + +from ..basic_graph_ops import check_connections, delete_node, disconnect_edge + + +def convert_constant_nodes_to_const_ops(tfssa): + """ + Convert nodes with known constant value to Const nodes + """ + for fn_key in list(tfssa.functions.keys()): + f = tfssa.functions[fn_key] + for k in list(f.graph.keys()): + v = f.graph.get(k, None) + if v is None: + continue + if v.value is not None: + v.op = "Const" + # delete all upstream edges now that this is constant + inv = v.inputs[:] + for i in inv: + curnode = i + nextnode = v.name + disconnect_edge(f.graph, curnode, nextnode) + + # keep deleting upwards as long as it is a chain + while curnode is not None: + prevnode = None + if len(f.graph[curnode].outputs) == 0: + if len(f.graph[curnode].inputs) == 1: + prevnode = f.graph[curnode].inputs[0] + delete_node(f.graph, curnode) + curnode = prevnode + + +def delete_nodes_with_only_constant_descendents(tfssa): + # look for nodes whose value is known AND downstream values are known + # and delete them + delete_count = 0 + for fn_key in list(tfssa.functions.keys()): + f = tfssa.functions[fn_key] + keys = list(f.graph.keys()) + for k in keys: + if k not in f.graph: + continue + to_delete = (f.graph[k].value is not None) and (k not in f.outputs) + if to_delete: + # check the outputs + for o in f.graph[k].outputs: + if f.graph[o].value is None: + to_delete = False + else: + disconnect_edge(f.graph, k, o) + if to_delete: + delete_count += 1 + delete_node(f.graph, k) + # also delete all Const nodes with no descendants + keys = list(f.graph.keys()) + for k in keys: + if k not in f.graph: + continue + if ( + f.graph[k].op == "Const" + and len(f.graph[k].outputs) == 0 + and (k not in f.outputs) + ): + delete_count += 1 + delete_node(f.graph, k) + return delete_count + + +def delete_unnecessary_constant_nodes(tfssa): + delete_count = delete_nodes_with_only_constant_descendents(tfssa) + for f in list(tfssa.functions.values()): + check_connections(f.graph) + convert_constant_nodes_to_const_ops(tfssa) + logger.debug("%s nodes deleted", delete_count) + return delete_count diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py new file mode 100644 index 00000000..9a83956a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py @@ -0,0 +1,21 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +def delete_disconnected_nodes(gd): + # delete all nodes with no inputs and outputs + empty_nodes = [] + for k, v in gd.items(): + if ( + len(gd[k].inputs) == 0 + and len(gd[k].outputs) == 0 + and len(gd[k].control_inputs) == 0 + and len(gd[k].control_outputs) == 0 + and gd[k].op != "Placeholder" + ): + empty_nodes.append(k) + + for k in empty_nodes: + del gd[k] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/functionalize_loops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/functionalize_loops.py new file mode 100644 index 00000000..f3fb006a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/functionalize_loops.py @@ -0,0 +1,469 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger + +from ..basic_graph_ops import (connect_dests, connect_edge, connect_sources, + delete_node, disconnect_edge, replace_dest, + replace_source) +from ..parsed_tf_node import ParsedTFNode +from ..tfssa import SSAFunction +from .visitors import (FindAllReachableNodes, FindImmediateDownstreamNodes, + FindImmediateUpstreamNodes, FindSubgraph) + + +class FunctionalizeLoops: + """ + Turns while loops in TensorFlow dataflow graph into the functional form: + while(cond_function, body_function) + + Usage: + Given a graph in tfssa (the NetworkEnsemble defined in network.py) form: + + This will functionalize *ONE* loop in the main function. + + f = FunctionalizeLoops() + ret = f.functionalize_loops(self, tfssa, "main") + + if ret is True, one loop has been functionalized, and the new functions + added to tfssa. If False, there is no loop to functionalize. + + Generally, repeated calls to this will be necessary to catch all loops. + + Instead, use functionalize_loops. + """ + + def __init__(self): + self.exits = None + self.merges = None + self.enters = None + self.constant_enters = None + self.switches = None + self.subgraph = None + self.loopcond = None + self.is_constant = None + self.next_iterations = None + self.cond = None + self.body = None + + def _search(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + # we look for NextIteration nodes + assert node.op == "Enter" + + frame_name = node.attr["frame_name"] + logger.debug("Fixing frame name: %s", frame_name) + # find all the enter args + # this is basically the enter frame + # functionalize_control_flow.cc:FunctionalizeControlFlow (1160-1196) + self.enters = [ + k for k, v in g.items() if v.attr.get("frame_name", "") == frame_name + ] + self.is_constant = [ + bool(g[n].attr.get("is_constant", False)) for n in self.enters + ] + self.merges = ( + FindImmediateDownstreamNodes(lambda x: x.op == "Merge") + .visit_many(g, self.enters) + .get_result() + ) + self.next_iterations = ( + FindImmediateUpstreamNodes(lambda x: x.op == "NextIteration") + .visit_many(g, self.merges) + .get_result() + ) + self.switches = ( + FindImmediateDownstreamNodes(lambda x: x.op == "Switch") + .visit_many(g, self.merges) + .get_result() + ) + self.exits = ( + FindImmediateDownstreamNodes(lambda x: x.op == "Exit") + .visit_many(g, self.switches) + .get_result() + ) + self.loopcond = list( + set( + FindImmediateUpstreamNodes(lambda x: x.op == "LoopCond") + .visit_many(g, self.switches) + .get_result() + ) + ) + + self.subgraph = FindSubgraph(self.exits).visit_many(g, self.enters).get_result() + self.cond = FindSubgraph(self.switches).visit_many(g, self.merges).get_result() + self.body = ( + FindSubgraph([node.name] + self.exits) + .visit_many(g, self.switches) + .get_result() + ) + # drop merges and switches from cond and body + self.cond = [ + i for i in self.cond if i not in (self.merges + self.switches + self.enters) + ] + self.body = ( + [i for i in self.body if i not in ([node.name] + self.switches)] + + [node.name] + + self.switches + + self.merges + + self.enters + ) + + # ok. we can now rebuild. + + def _fix_graph_invariants(self, g): + import copy + + check = lambda x: x is not None and len(x) > 0 + check(self.exits) + check(self.merges) + check(self.enters) + check(self.switches) + check(self.subgraph) + check(self.cond) + check(self.loopcond) + assert len(self.loopcond) == 1 + # maintain the invariant of a unique Enter node per argument + # functionalize_control_flow.cc:FunctionalizeLoop (295) + for i in copy.copy(self.enters): + node = g[i] + assert len(node.outputs) > 0 + assert len(node.inputs) == 1 + assert len(node.control_inputs) == 0 + assert len(node.control_outputs) == 0 + if len(node.outputs) == 1: + continue + node_output_copy = copy.copy(node.outputs) + for j in range(1, len(node_output_copy)): + # make a new enter node for each + new_enter_node = copy.deepcopy(node) + new_enter_node.inputs = [] + new_enter_node.outputs = [] + new_enter_node.name = node.name + "/trsplit%d" % (j) + g[new_enter_node.name] = new_enter_node + logger.debug("splitting %s", node.name) + # connect the new node + enter_output = node_output_copy[j] + disconnect_edge(g, node.name, enter_output) + connect_edge(g, new_enter_node.name, enter_output) + connect_sources(g, node.inputs, new_enter_node.name) + # insert into graph + self.enters.append(new_enter_node.name) + + def functionalize_loops(self, tfssa, function_to_functionalize): + g = tfssa.functions[function_to_functionalize].graph + loopni = [a for a in g if g[a].op == "Enter"] + if len(loopni) == 0: + return False + self._search(g, loopni[0]) + + self.constant_enters = [ + self.enters[i] for i in range(len(self.enters)) if self.is_constant[i] + ] + self.enters = [ + self.enters[i] for i in range(len(self.enters)) if not self.is_constant[i] + ] + self._fix_graph_invariants(g) + # for each enter node, find the corresponding downstream merge node + enter_corresponding_merge = [ + FindImmediateDownstreamNodes(lambda x: x.op == "Merge") + .visit(g, enter) + .get_result()[0] + for enter in self.enters + ] + merge_corresponding_ni = [ + FindImmediateUpstreamNodes(lambda x: x.op == "NextIteration") + .visit(g, merge) + .get_result()[0] + for merge in enter_corresponding_merge + ] + switch_corresponding_merge = [] + for merge in enter_corresponding_merge: + switch_after_merge = ( + FindImmediateDownstreamNodes(lambda x: x.op == "Switch") + .visit(g, merge) + .get_result() + ) + if len(switch_after_merge) > 0: + switch_corresponding_merge.append(switch_after_merge[0]) + else: + # There are some situations there is no switch not for a given + # merge. While odd... its ok. we construct one + # In this situation there is no Exit either, but it can be + # constructed later on + new_switch_node = ParsedTFNode() + new_switch_node.op = "Switch" + new_switch_node.name = tfssa._find_free_name("fake_switch_") + g[new_switch_node.name] = new_switch_node + connect_edge(g, merge, new_switch_node.name) + connect_edge(g, self.loopcond[0], new_switch_node.name) + switch_corresponding_merge.append(new_switch_node.name) + + exit_corresponding_switch = [] + for switch in switch_corresponding_merge: + res = ( + FindImmediateDownstreamNodes(lambda x: x.op == "Exit") + .visit(g, switch) + .get_result() + ) + if len(res) > 0: + exit_corresponding_switch.append(res[0]) + else: + new_exit_node = ParsedTFNode() + new_exit_node.op = "Exit" + new_exit_node.name = tfssa._find_free_name("fake_exit_") + g[new_exit_node.name] = new_exit_node + connect_edge(g, switch, new_exit_node.name) + exit_corresponding_switch.append(new_exit_node.name) + + while_loop = ParsedTFNode() + while_loop.op = "while" + while_loop.name = tfssa._find_free_name("while_") + g[while_loop.name] = while_loop + + # Build the Loop Condition + + # replace all enters with a single make_tuple + # we replace merge with get_tuple and turn it into a function call + # terminated with LoopCond + make_inputs = ParsedTFNode() + make_inputs.op = "make_tuple" + make_inputs.name = tfssa._find_free_name("make_input_") + g[make_inputs.name] = make_inputs + for enter in self.enters: + replace_dest(g, g[enter].inputs[0], enter, make_inputs.name) + constant_base_index = len(make_inputs.inputs) + for enter in self.constant_enters: + replace_dest(g, g[enter].inputs[0], enter, make_inputs.name) + + connect_edge(g, make_inputs.name, while_loop.name) + connect_dests(g, while_loop.name, exit_corresponding_switch) + + # build the cond function + cond_body = ParsedTFNode() + cond_body.op = "function_entry" + cond_body.name = tfssa._find_free_name("cond_function_") + cond_body.inputs = [] + g[cond_body.name] = cond_body + for merge_idx in range(len(enter_corresponding_merge)): + merge = enter_corresponding_merge[merge_idx] + switch = switch_corresponding_merge[merge_idx] + enter_node = g[self.enters[merge_idx]] + merge_node = g[merge] + if switch is not None: + switch_node = g[switch] + else: + switch_node = None + merge_node.op = "get_tuple" + merge_node.attr = {"index": merge_idx} + # disconnect merge from switch + # disconnect loopcond from switch + disconnect_edge(g, enter_node.name, merge_node.name) + if switch_node is not None: + disconnect_edge(g, merge_node.name, switch_node.name) + disconnect_edge(g, self.loopcond[0], switch_node.name) + for i in merge_node.inputs[:]: + disconnect_edge(g, i, merge_node.name) + connect_edge(g, cond_body.name, merge_node.name) + # delete get_tuple if it does nothing + if len(merge_node.outputs) == 0: + delete_node(g, merge) + + g[self.loopcond[0]].op = "return" + + # build the body function + body = ParsedTFNode() + body.op = "function_entry" + body.name = tfssa._find_free_name("body_function_") + body.inputs = [] + g[body.name] = body + for switch_idx in range(len(switch_corresponding_merge)): + switch = switch_corresponding_merge[switch_idx] + exit = exit_corresponding_switch[switch_idx] + disconnect_edge(g, switch, exit) + + # replace switch with a get_tuple + switch_node = g[switch] + switch_node.op = "get_tuple" + switch_node.attr = {"index": switch_idx} + connect_edge(g, body.name, switch_node.name) + # delete get_tuple if it does nothing + if len(switch_node.outputs) == 0: + delete_node(g, switch) + + # replace all next_iteration with a single make_tuple + # we replace merge with get_tuple and turn it into a function call + # terminated with LoopCond + make_outputs = ParsedTFNode() + make_outputs.op = "make_tuple" + make_outputs.name = tfssa._find_free_name("make_output_") + g[make_outputs.name] = make_outputs + for ni in merge_corresponding_ni: + connect_edge(g, g[ni].inputs[0], make_outputs.name) + + # connect constant enters to come from function + # connect constant enters to exit + for idx, enter in enumerate(self.constant_enters): + for output in list(g[enter].outputs): + if output not in self.cond and output not in self.body: + cond_intersection = ( + FindSubgraph(self.cond).visit(g, output).get_result() + ) + body_intersection = ( + FindSubgraph(self.body).visit(g, output).get_result() + ) + if len(cond_intersection) > 0: + cond_intersection.append(output) + self.cond += cond_intersection + if len(body_intersection) > 0: + body_intersection.append(output) + self.body += body_intersection + get_tuple = ParsedTFNode() + get_tuple.op = "get_tuple" + get_tuple.name = tfssa._find_free_name("get_tuple_const_") + get_tuple.attr = {"index": idx + constant_base_index} + g[get_tuple.name] = get_tuple + + if output in self.cond: + connect_edge(g, cond_body.name, get_tuple.name) + elif output in self.body: + connect_edge(g, body.name, get_tuple.name) + replace_source(g, enter, output, get_tuple.name) + + # body must accept and return everything + get_tuple = ParsedTFNode() + get_tuple.op = "get_tuple" + get_tuple.name = tfssa._find_free_name("get_tuple_const_") + get_tuple.attr = {"index": idx + constant_base_index} + g[get_tuple.name] = get_tuple + connect_edge(g, body.name, get_tuple.name) + connect_edge(g, get_tuple.name, make_outputs.name) + + assert len(g[make_outputs.name].inputs) == len(g[make_inputs.name].inputs) + + output_return = ParsedTFNode() + output_return.op = "return" + output_return.name = tfssa._find_free_name("body_return_") + g[output_return.name] = output_return + connect_edge(g, make_outputs.name, output_return.name) + while_loop.attr["cond_function"] = cond_body.name + while_loop.attr["body_function"] = body.name + for i in self.enters: + delete_node(g, i) + for i in self.next_iterations: + delete_node(g, i) + for i in self.constant_enters: + delete_node(g, i) + + for i in range(len(exit_corresponding_switch)): + exit_node = exit_corresponding_switch[i] + g[exit_node].op = "get_tuple" + g[exit_node].attr = {"index": i} + cond_function = ( + FindSubgraph(self.loopcond[0]).visit(g, cond_body.name).get_result() + ) + cond_function = set(cond_function + [self.loopcond[0], cond_body.name]) + body_function = ( + FindSubgraph(output_return.name).visit(g, body.name).get_result() + ) + body_function = set(body_function + [body.name, output_return.name]) + + # trace input constants associated with the cond_graph + # and the body_graph. These constants can only have one consumer + # for now. Any more and we will either need to associate + # it as an argument, or split the constant. + cond_constants = ( + FindImmediateUpstreamNodes(lambda x: x.op == "Const") + .visit_many(g, cond_function) + .get_result() + ) + body_constants = ( + FindImmediateUpstreamNodes(lambda x: x.op == "Const") + .visit_many(g, body_function) + .get_result() + ) + # for const_node in cond_constants + body_constants: + # assert(len(g[const_node].outputs) == 1) + + cond_function = cond_function.union(set(cond_constants)) + body_function = body_function.union(set(body_constants)) + + downstream_cond = ( + FindAllReachableNodes(lambda x: True) + .visit_many(g, cond_function) + .get_result() + ) + downstream_cond = set(downstream_cond) - cond_function + if len(downstream_cond) > 0: + logger.debug( + "Disconnecting unused variables in condition function %s", + downstream_cond, + ) + for i in downstream_cond: + delete_node(g, i) + + downstream_body = ( + FindAllReachableNodes(lambda x: True) + .visit_many(g, body_function) + .get_result() + ) + downstream_body = set(downstream_body) - body_function + if len(downstream_body) > 0: + logger.debug( + "Disconnecting unused variables in body function %s", downstream_body + ) + for i in downstream_body: + delete_node(g, i) + + cond_graph = {k: v for k, v in g.items() if k in cond_function} + body_graph = {k: v for k, v in g.items() if k in body_function} + g = { + k: v + for k, v in g.items() + if k not in cond_function and k not in body_function + } + # localize control dependencies + # In the main graph, reattach the control dependency to the while op + for k, v in g.items(): + for idx in range(len(v.control_inputs)): + if v.control_inputs[idx] not in g: + v.control_inputs[idx] = while_loop.name + while_loop.control_outputs.append(k) + for idx in range(len(v.control_outputs)): + if v.control_outputs[idx] not in g: + v.control_outputs[idx] = while_loop.name + while_loop.control_inputs.append(k) + + # in the cond and body graphs, drop non-local control dependencies + # entirely + for graph in [cond_graph, body_graph]: + for k, v in graph.items(): + for idx in range(len(v.control_inputs) - 1, -1, -1): + if v.control_inputs[idx] not in graph: + v.control_inputs.pop(idx) + + for idx in range(len(v.control_outputs) - 1, -1, -1): + if v.control_outputs[idx] not in graph: + v.control_outputs.pop(idx) + tfssa.functions[function_to_functionalize] = SSAFunction(g) + tfssa.add_function(cond_body.name, SSAFunction(cond_graph)) + tfssa.add_function(body.name, SSAFunction(body_graph)) + return True + + +def functionalize_loops(tfssa): + """ + Functionalize all loops in an tfssa + """ + done = False + while not done: + done = True + for f in list(tfssa.functions.keys()): + functionalize = FunctionalizeLoops() + ret = functionalize.functionalize_loops(tfssa, f) + if ret: + done = False diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/fuse_dilation_conv.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/fuse_dilation_conv.py new file mode 100644 index 00000000..ddf3b7af --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/fuse_dilation_conv.py @@ -0,0 +1,215 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from ..basic_graph_ops import delete_node, replace_source + + +def _try_same(input_h, input_w, W_h, W_w, dilation_factor, padding, crop): + base_paddings = [0] * 4 + + dilated_W_h = dilation_factor[0] * (W_h - 1) + 1 + dilated_W_w = dilation_factor[1] * (W_w - 1) + 1 + + base_paddings[0] = (dilated_W_h - 1) // 2 + base_paddings[1] = dilated_W_h - 1 - (dilated_W_h - 1) // 2 + base_paddings[2] = (dilated_W_w - 1) // 2 + base_paddings[3] = dilated_W_w - 1 - (dilated_W_w - 1) // 2 + + pad_start_h = base_paddings[0] + pad_start_w = base_paddings[2] + orig_pad_end_h = base_paddings[1] + orig_pad_end_w = base_paddings[3] + full_input_h = input_h + pad_start_h + orig_pad_end_h + full_input_w = input_w + pad_start_w + orig_pad_end_w + pad_end_extra_h = ( + dilation_factor[0] - full_input_h % dilation_factor[0] + ) % dilation_factor[0] + pad_end_extra_w = ( + dilation_factor[1] - full_input_w % dilation_factor[1] + ) % dilation_factor[1] + pad_end_h = orig_pad_end_h + pad_end_extra_h + pad_end_w = orig_pad_end_w + pad_end_extra_w + + return ( + padding[0] == pad_start_h + and padding[1] == pad_end_h + and padding[2] == pad_start_w + and padding[3] == pad_end_w + and crop[0] == 0 + and crop[1] == pad_end_extra_h + and crop[2] == 0 + and crop[3] == pad_end_extra_w + ) + + +def _pattern_match_and_rewrite(gddict, conv_op): + node = gddict[conv_op] + channel_first = node.attr["data_format"].startswith("NC") + + if len(node.inputs) == 0 or len(node.outputs) == 0: + return + + prev_node = gddict[node.inputs[0]] + next_node = gddict[node.outputs[0]] + + expand_node = None + squeeze_node = None + # Check for Conv1D cases + if prev_node.op == "ExpandDims": + # All Conv1D has ExpandDims and Squeeze as pairs. + if next_node.op != "Squeeze": + return + + expand_node = prev_node + squeeze_node = next_node + + if len(prev_node.inputs) == 0 or len(next_node.outputs) == 0: + return + prev_node = gddict[prev_node.inputs[0]] + next_node = gddict[next_node.outputs[0]] + + # Check if Conv1D/Conv2D is surrounded by SpaceToBatchND and BatchToSpaceND + if prev_node.op != "SpaceToBatchND" or next_node.op != "BatchToSpaceND": + return + else: + stb_node = prev_node + bts_node = next_node + + dilation_node = gddict[stb_node.inputs[1]] + if dilation_node.value is None: + return + dilation_factor = dilation_node.value.val + if gddict[bts_node.inputs[1]].value is None or np.any( + dilation_factor != gddict[bts_node.inputs[1]].value.val + ): + # If SpaceToBatchND and BatchToSpaceND doesn't match, we do not fuse. + return + + padding_node = gddict[stb_node.inputs[2]] + if padding_node.value is None: + return + padding_val = padding_node.value.val.flatten() + + crop_node = gddict[bts_node.inputs[2]] + if crop_node.value is None: + return + crop_val = crop_node.value.val.flatten() + + if expand_node: + dilation_factor = [1] + list(dilation_factor) + padding_val = [0, 0] + list(padding_val) + crop_val = [0, 0] + list(crop_val) + # Trying to inverse the logic of TF generating padding/cropping values for + # SpaceToBatchND and BatchToSpaceND with different padding values in Conv2D. + # Logic extracted from TF's builder at: + # tensorflow/python/ops/nn_ops.py and tensorflow/python/ops/array_ops.py + is_same = False + if np.any(padding_val != 0): + input_shape = gddict[stb_node.inputs[0]].attr.get("_output_shapes", None) + if input_shape is None: + input_shape = gddict[stb_node.inputs[0]].attr.get("shape", None) + else: + input_shape = input_shape[0] + W_node = gddict[node.inputs[1]] + W_shape = None if W_node.op != "Const" else W_node.datatype.get_shape() + if input_shape is None or W_shape is None: + return + W_h, W_w = W_shape[0], W_shape[1] + HW = input_shape[2:] if channel_first else input_shape[1:-1] + if expand_node: + HW = [1] + list(HW) + is_same = _try_same( + HW[0], HW[1], W_h, W_w, dilation_factor, padding_val, crop_val + ) + + # Re-wiring the nodes to skip SpaceToBatchND. + # We change BatchToSpaceND to Identity since it might be a terminate op. + deleted_nodes = set() + if expand_node: + replace_source(gddict, stb_node, expand_node, stb_node.inputs[0]) + else: + replace_source(gddict, stb_node, node, stb_node.inputs[0]) + + bts_node.op = "Identity" + bts_node.attr = {} + + deleted_nodes.update(stb_node.inputs[1:]) + deleted_nodes.update([stb_node.name]) + deleted_nodes.update(bts_node.inputs[1:]) + + # Rewrite dilation attribute for (Depthwise)Conv2D + dilation_val = ( + [1, 1] + list(dilation_factor) + if node.attr["data_format"] == "NCHW" + else [1] + list(dilation_factor) + [1] + ) + node.attr["dilations"] = dilation_val + # Rewrite padding attribute for (Depthwise)Conv2D + # This is due to, TF always plug in VALID padding for Conv2D after + # SpaceToBatchND. If, the original Conv2D is SAME padding, TF would + # automatically insert padding, therefore, we set it as SAME over here. + if is_same: + node.attr["padding"] = "SAME" + + # Removing stale attributes for nodes. + if expand_node and "_output_shapes" in expand_node.attr: + del expand_node.attr["_output_shapes"] + if squeeze_node and "_output_shapes" in squeeze_node.attr: + del squeeze_node.attr["_output_shapes"] + if "_output_shapes" in node.attr: + del node.attr["_output_shapes"] + if expand_node and "shape" in expand_node.attr: + del expand_node.attr["shape"] + if squeeze_node and "shape" in squeeze_node.attr: + del squeeze_node.attr["shape"] + if "shape" in node.attr: + del node.attr["shape"] + + for d in deleted_nodes: + delete_node(gddict, d) + + +def _fuse_dilation_conv(gddict): + """ + A dilated convolution in older tensorflow versions might not be fused in the + Conv2D or DepthwiseConv2D op, but represented with the following format: + + SpaceToBatchND -> (Depthwise)Conv2D -> BatchToSpaceND + + We try to fuse it back into (Depthwise)Conv2D with the dilation parameter + set in attribute. + There are several patterns that exist in tensorflow for breaking up dilation + convolutions. We detect the following patterns: + + SpaceToBatchND -> ExpandDims -> Conv2D -> Squeeze -> BatchToSpaceND + + SpaceToBatchND -> Conv2D -> BatchToSpaceND + + The first case appears when Conv1D is used, TF expands/squeeze the inputs to + conform Conv2D pattern. + The second case is a basic Conv2D pattern. + + """ + for name in list(gddict.keys()): + if name not in gddict: + # Node might have been removed from graph during fusion. + continue + node = gddict[name] + if node.op in {"Conv2D", "DepthwiseConv2dNative"}: + _pattern_match_and_rewrite(gddict, name) + + +def fuse_dilation_conv(tfssa): + """ + Tensorflow decomposes Depthwise Convolution with dialtion into: + + SpaceToBatchND ---> Conv2D/DepthwiseConv2D ---> BatchToSpaceND + + We identify such pattern and use Conv2D/DepthwiseConv2D to represent it. + """ + for f in tfssa.functions.keys(): + _fuse_dilation_conv(tfssa.functions[f].graph) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/insert_get_tuple.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/insert_get_tuple.py new file mode 100644 index 00000000..bca2a4b4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/insert_get_tuple.py @@ -0,0 +1,111 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +from ..parsed_tf_node import ParsedTFNode + + +def insert_get_tuple(gddict): + """ + TensorFlow uses input "nodename:i" to denote "get tuple i" from "nodename". + Here we split it so that: + + node1:i -> node2 + + gets transformed into + + node1 -> get_tuple(i) --> node2 + + Takes a graph in "dict{str, ParsedTFNode}" form, and returns a new graph. + + We do not do this for control flow nodes(Switch, Enter, Exit, Merge + LoopCond, NextIteration). For these nodes, we just convert + + node1:i -> node2 + + to + + node1 -> node2 + """ + retdict = {} + get_tuple_op_var_index = 1 + + inserted_ops = {} + + def make_op(input_node, index, new_node_name, gto_make_op_cache): + cache_key = ( + input_node, + index, + ) + if cache_key in gto_make_op_cache: + return gto_make_op_cache[cache_key] + + inserted_op_name = new_node_name + inserted_op = ParsedTFNode() + inserted_op.name = inserted_op_name + inserted_op.op = "get_tuple" + inserted_op.inputs = [input_node] + inserted_op.attr["index"] = index + inserted_ops[inserted_op_name] = inserted_op + gto_make_op_cache[cache_key] = inserted_op + return inserted_op + + exclusions = [ + "Switch", + "Enter", + "Exit", + "Merge", + "LoopCond", + "NextIteration", + "TensorArrayV3", + "Const", + ] + inclusions = ["IdentityN", "Split", "SplitV", "LSTMBlockCell", "TopK", "TopKV2", "Unpack", "BlockLSTM", "BlockLSTMV2", "NonMaxSuppressionV5"] + gto_make_op_cache = {} + for name in list(gddict.keys()): + new_node = ParsedTFNode() + new_node = copy.deepcopy(gddict[name]) + new_inputs = [] + for idx in range(len(new_node.inputs)): + if ":" in new_node.inputs[idx]: + input_node, input_index = new_node.inputs[idx].split(":") + else: + input_node = new_node.inputs[idx] + input_index = 0 + + if ( + "_output_shapes" in gddict[input_node].attr + and len(gddict[input_node].attr["_output_shapes"]) > 1 + and gddict[input_node].op not in exclusions + ) or (gddict[input_node].op in inclusions): + get_tuple_node_name = "gto_%s" % (get_tuple_op_var_index) + new_inputs.append( + make_op( + input_node, + int(input_index), + get_tuple_node_name, + gto_make_op_cache, + ).name + ) + get_tuple_op_var_index += 1 + else: + new_inputs.append(new_node.inputs[idx]) + new_node.inputs = new_inputs + + retdict[name] = new_node + + for k, v in inserted_ops.items(): + retdict[k] = v + + # Force fix up the remaining node names by dropping the : + # + for k, v in retdict.items(): + for idx in range(len(v.inputs)): + if ":" in v.inputs[idx]: + nodename, nodeindex = v.inputs[idx].split(":") + v.inputs[idx] = nodename + + return retdict diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/quantization_pass.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/quantization_pass.py new file mode 100644 index 00000000..ca06494c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/quantization_pass.py @@ -0,0 +1,63 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..basic_graph_ops import delete_node + + +def delete_fakequant_node_and_repair_graph(g, node): + inputs = node.inputs + # Delete const inputs of the fakequant op + for i in inputs: + if g[i].op == 'Const': + delete_node(g, i) + else: + non_const_input = i + outputs = node.outputs + # Append FakeQuant Op's outputs to its input node's outputs + g[non_const_input].outputs = [i for i in g[non_const_input].outputs if i != node.name] + g[non_const_input].outputs.extend(outputs) + # Modify the FakeQuant op's outputs to set FakeQuant op's parent node as the new input. + for i in outputs: + for j in range(len(g[i].inputs)): + if g[i].inputs[j] == node.name: + g[i].inputs[j] = non_const_input + delete_node(g, node) + +def quantization_pass_impl(fn): + all_quantization_ops = [i for i in fn.graph.values() if "FakeQuant" in i.op] + for node in all_quantization_ops: + is_const_input = True + for input in node.inputs: + if fn.graph[input].op != 'Const': + is_const_input = False + if not is_const_input and ('weights_quant' not in input): + # If activation quantization - + # Delete the FakeQuant op and its const inputs, + # Append FakeQuant Op's outputs to its input node's outputs, + # Modify the FakeQuant op's outputs to reflect the 'new' input node. + delete_fakequant_node_and_repair_graph(fn.graph, node) + else: + # If weight quantization - + # Add attributes of the FakeQuant op to its output's attr dict + for output in node.outputs: + output_node = fn.graph[output] + output_node.attr['quantize'] = True + output_node.attr['num_bits'] = node.attr['num_bits'] + output_node.attr['narrow_range'] = node.attr['narrow_range'] + output_node.attr['quantize_min'] = fn.graph[node.inputs[1]].value.val + output_node.attr['quantize_max'] = fn.graph[node.inputs[2]].value.val + +def quantization_pass(tfssa): + """ + Delete activation quantization ops and repair TF graph: + If the FakeQuant op is not connected to constant inputs (which means that the op performs activation + quantization) then delete that FakeQuant op and repair the graph. + Edit weight quantization ops: + If the FakeQuant op is connected to constant inputs then add its attributes to its output op so that parameters + min, max, narrow_range, num_bits are available (in addition to weights) to downstream ops for denoting and + supporting weight quantization. + """ + for v in tfssa.functions.values(): + quantization_pass_impl(v) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/tensor_array_transform.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/tensor_array_transform.py new file mode 100644 index 00000000..27be50b6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/tensor_array_transform.py @@ -0,0 +1,78 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +# A TensorArray is essentially a runtime vector with +# +# - an optional requirement "infer_shape" (True by default) that all Tensors +# stored within the vector have the same size/shape (inferred by the +# first element stored into the tensor) +# - an optional "element_shape" which requires all elements to have this +# exact shape. +# - an optional "clear_after_read" (True by default) where read of an index +# is destructive. (It doesn't *really* destroy, but just enables a particular +# optimization where the tensor memory can be reused). +# - An optional "dynamic_size" (False by default) where the vector is resized +# automatically at runtime +# +# The way it works is rather odd. To enforce "control dependency" constraints, +# a single float (flow) variable is passed between operations that write/read +# the TensorArray. Additionally, a "Resource" variable is also passed along +# which contains the actual handle to the TensorArray. +# +# The TensorArray can therefore also be passed around as as argument to while +# loops. Thus unlike a global "Variable", this really is better thought of as +# an additional type, a list[tensor]. +# +# See: +# +# https://github.com/tensorflow/tensorflow/blob/r1.6/tensorflow/python/ops/tensor_array_ops.py +# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/tensor_array.h +# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/tensor_array.cc +# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/ops/data_flow_ops.cc +# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/tensor_array_ops.cc +# +# The way we transform it is to introduce a new type. list[tensor] +# The flow variable is the list[tensor] since that is consistently passed through +# every operation. +# The 'resource' edges then gets passed as void. +# +# We would like to delete the resource edges, but once too many graph passes are +# performed, this becomes very difficult (since tuple shapes have to be updated). +# The ideal is to perform the resource edge deletion *BEFORE* any additional +# graph transformations. +# The conversion of the flow variable to list[tensor] can be performed during +# type inference. +# +# +# After this op: +# All nodes which take a TensorArray resource input will have the resource input +# edge deleted. +# +# TensorArrayV3 op will only have 1 output, a flow variable. + + +def tensor_array_resource_removal(gd): + # this should be called *BEFORE* introduction of tuples, + # and before output edges are added (for simplicity) + for k, node in gd.items(): + if node.op.startswith("TensorArray") and node.op != "TensorArrayV3": + # generally the resource edge is the first edge + # input is resource, indices, flow + # output is generally flow + node.inputs = node.inputs[1:] + + # TensorArrayV3 node outputs resource and flow + # shift all flow reads from TensorArray to output 0 of TensorArray + for i in range(len(node.inputs)): + if ":" in node.inputs[i]: + input_node, input_index = node.inputs[i].split(":") + input_index = int(input_index) + else: + input_node = node.inputs[i] + input_index = 0 + if gd[input_node].op == "TensorArrayV3": + if input_index == 1: + node.inputs[i] = "%s" % input_node diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/variable_node_transform.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/variable_node_transform.py new file mode 100644 index 00000000..9c977a9e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/variable_node_transform.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..basic_graph_ops import delete_node, disconnect_vertex_ins + + +# Variable nodes are not horribly complicated. +# +# There are Variable nodes which don't really do much on their own +# +# To initialize, there is an additional Assign op which is just dangling away +# on one side which assigns from "Variable/initial_value". +# +# [Variable] --> Assign <-- Const (VariableName/initial_value) +# | +# | ... rest of graph ... +# v +# ... Assign <---- New Values +# ... etc +# +# Reads of the variable go through an Identity node with the name +# VariableName/read, and has attribute _class:loc:@VariableName. +# +# Writes of the variable go through an Assign nodes which take as input +# one Variable and one value, and has attribute _class:loc:@VariableName. +# Assign also returns the new value of the variable. +# +# +# +# - We transform Variable to a function attribute +# - We transform Assign ops to just "set_global" with attribute variable:VariableName +# - We transform Read ops to just "get_global" with attribute variable:VariableName +def remove_variable_node_impl(fn, tfssa): + variables = [var for var in fn.graph.values() if var.op == "VariableV2"] + assigns = [assign for assign in fn.graph.values() if assign.op == "Assign"] + reads = [ + read + for read in fn.graph.values() + if read.op == "Identity" + and len(read.inputs) == 1 + and fn.graph[read.inputs[0]].op == "VariableV2" + ] + + # find the variable initial values + variable_values = {} + additional_nodes_to_delete = [] + for v in variables: + v.parse_from_attr() + variable_values[v.name] = v.datatype() + for node in fn.graph.values(): + if ( + node.op == "Assign" + and node.inputs[0] == v.name + and node.inputs[1] == v.name + "/initial_value" + ): + variable_values[v.name] = fn.graph[node.inputs[1]].value + additional_nodes_to_delete += [node.name, node.inputs[1]] + for r in reads: + r.op = "get_global" + r.attr["variable"] = r.inputs[0] + disconnect_vertex_ins(fn.graph, r.name) + + # transform writes to set_global + for r in assigns: + r.op = "set_global" + r.attr["variable"] = r.inputs[0] + + for var in variables: + delete_node(fn.graph, var.name) + + for node in additional_nodes_to_delete: + delete_node(fn.graph, node) + + for k, v in variable_values.items(): + tfssa.variables[k] = v + + +def remove_variable_nodes(tfssa): + """ + This should be performed after constant propagation pass. + """ + for v in tfssa.functions.values(): + remove_variable_node_impl(v, tfssa) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/visitors.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/visitors.py new file mode 100644 index 00000000..516963b6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/visitors.py @@ -0,0 +1,233 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..parsed_tf_node import ParsedTFNode + + +class FindAllDownstreamTerminals: + # Find all nodes matching a particular function + # which is downstream reachable from a set of nodes. + def __init__(self, fn): + self.result = [] + self.fn = fn + self.memo = {} + + def visit(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + if node.name in self.memo: + return self + self.memo[node.name] = 1 + + if self.fn(node): + self.result.append(node.name) + return self + + for i in node.outputs: + self.visit(g, g[i]) + + return self + + def visit_many(self, g, nodes): + for i in nodes: + self.visit(g, i) + return self + + def get_result(self): + return self.result + + +class FindAllReachableNodes: + # Find all nodes reachable from a set of nodes which satisfy a criteria + def __init__(self, fn): + self.result = [] + self.fn = fn + self.memo = {} + + def visit(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + if node.name in self.memo: + return self + self.memo[node.name] = 1 + + if self.fn(node): + self.result.append(node.name) + + for i in node.outputs: + self.visit(g, g[i]) + + for i in node.inputs: + self.visit(g, g[i]) + + return self + + def visit_many(self, g, nodes): + for i in nodes: + self.visit(g, i) + return self + + def get_result(self): + return self.result + + +class FindImmediateUpstreamNodes: + # Find all nodes matching a particular function which is immediately above a set of nodes + def __init__(self, fn): + self.result = [] + self.fn = fn + + def visit(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + for i in node.inputs: + if self.fn(g[i]): + self.result.append(i) + + return self + + def visit_many(self, g, nodes): + for i in nodes: + self.visit(g, i) + return self + + def get_result(self): + return self.result + + +class FindImmediateDownstreamNodes: + # Find all nodes matching a particular function which is immediately above a set of nodes + def __init__(self, fn): + self.result = [] + self.fn = fn + + def visit(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + for i in node.outputs: + if self.fn(g[i]): + self.result.append(i) + + return self + + def visit_many(self, g, nodes): + for i in nodes: + self.visit(g, i) + self.result = list(set(self.result)) + return self + + def get_result(self): + return self.result + + +class FindAllUpstreamTerminals: + # Find the "upstream frontier" of nodes passing some predicate. + # In other words, perform a pre-order traversal of a node and its inputs, collecting all nodes + # passing a given predicate as we go along. Terminate the search along a given branch as soon + # as a node is collected. + def __init__(self, fn, control_dependencies=False): + self.result = [] + self.fn = fn + self.control_dependencies = control_dependencies + self.memo = {} + + def visit(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + if node.name in self.memo: + return self + self.memo[node.name] = 1 + + if self.fn(node): + self.result.append(node.name) + return self + + for i in node.inputs: + self.visit(g, g[i]) + if self.control_dependencies: + for i in node.control_inputs: + self.visit(g, g[i]) + return self + + def visit_many(self, g, nodes): + for i in nodes: + self.visit(g, i) + self.result = list(set(self.result)) + return self + + def get_result(self): + return self.result + + +class FindSubgraph: + # Find all nodes between a set of sources and a set of terminals + # Sources are not returned, but reached terminals are returned + def __init__(self, terminal_nodes): + self.memo = {} + self.terminal = terminal_nodes + + def visit_impl(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + if node.name in self.terminal: + self.memo[node.name] = True + return True + + if node.name in self.memo: + return self.memo[node.name] + + # add self to memo first otherwise cycles will not terminate + self.memo[node.name] = None + reachable = None + all_unreachable = True + for i in node.outputs + node.control_outputs: + visit_result = self.visit_impl(g, g[i]) + if visit_result == True: # pylint: disable=singleton-comparison + reachable = True + if visit_result != False: # pylint: disable=singleton-comparison + all_unreachable = False + + if reachable: + self.memo[node.name] = reachable + elif all_unreachable: + self.memo[node.name] = False + else: + self.memo[node.name] = None + + return reachable + + def visit(self, g, node): + self.visit_impl(g, node) + while True: + if None in iter(self.memo.values()): + revisit = [k for k, v in self.memo.items() if v is None] + self.memo = {k: v for k, v in self.memo.items() if v is not None} + for n in revisit: + self.visit_impl(g, n) + else: + break + return self + + def visit_many(self, g, nodes): + for node in nodes: + self.visit_impl(g, node) + while True: + if None in iter(self.memo.values()): + revisit = [k for k, v in self.memo.items() if v is None] + self.memo = {k: v for k, v in self.memo.items() if v is not None} + for n in revisit: + self.visit_impl(g, n) + else: + break + return self + + def get_result(self): + return [k for k, v in self.memo.items() if v] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_op_registry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_op_registry.py new file mode 100644 index 00000000..9b5d48a7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_op_registry.py @@ -0,0 +1,47 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +_TF_OPS_REGISTRY = {} + + +def register_tf_op(_func=None, tf_alias=None, override=False): + """ + Registration routine for TensorFlow operators + _func: (TF conversion function) [Default=None] + TF conversion function to register + + tf_alias: (List of string) [Default=None] + All other TF operators that should also be mapped to + current conversion routine. + e.g. Sort aliased with SortV1, SortV2 + All provided alias operators must not be registered previously. + + override: (Boolean) [Default=False] + If True, overrides earlier registration i.e. specified + operator and alias will start pointing to current conversion + function. + Otherwise, duplicate registration will error out. + """ + + def func_wrapper(func): + f_name = func.__name__ + + if not override and f_name in _TF_OPS_REGISTRY: + raise ValueError("TF op {} already registered.".format(f_name)) + _TF_OPS_REGISTRY[f_name] = func + # If tf_alias is provided, then all the functions mentioned as aliased + # are mapped to current function + if tf_alias is not None: + for name in tf_alias: + if not override and name in _TF_OPS_REGISTRY: + msg = "TF op alias {} already registered." + raise ValueError(msg.format(name)) + _TF_OPS_REGISTRY[name] = func + return func + + if _func is None: + # decorator called without argument + return func_wrapper + return func_wrapper(_func) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tfssa.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tfssa.py new file mode 100644 index 00000000..44abe88b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tfssa.py @@ -0,0 +1,549 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import types + +from .basic_graph_ops import check_connections, const_determined_nodes +from .dot_visitor import DotVisitor +from .naming_utils import escape_fn_name + + +class ParsedNode: + """ + Node class for the tfssa graph. + + name: The name of the node (str) + op: The operation represented by the node (str) + datatype: The type of the node. (type) + value: The value of the node if available + inputs: The list of nodes which are inputs to this node (list[str]) + control_inputs: The list of nodes which have to be executed before this node (list[str]) + attr: The attributes of the node + outputs: The list of nodes which consume the result of this node (list[str]) + control_outputs: The list of nodes which have to be executed after this node (list[str]) + """ + + __slots__ = [ + "name", + "op", + "datatype", + "value", + "inputs", + "control_inputs", + "outputs", + "control_outputs", + "attr", + ] + + def __init__(self): + self.name = None + self.op = None + self.datatype = None + self.value = None + self.inputs = [] + self.outputs = [] + self.control_inputs = [] + self.control_outputs = [] + self.attr = {} + + def __copy__(self): + return self._copy_impl(ParsedNode()) + + def _copy_impl(self, dest): + dest.name = self.name + dest.op = self.op + dest.datatype = self.datatype + dest.value = copy.deepcopy(self.value) + dest.inputs = self.inputs[:] + dest.control_inputs = self.control_inputs[:] + dest.outputs = self.outputs[:] + dest.control_outputs = self.control_outputs[:] + dest.attr = {k: copy.deepcopy(v) for k, v in self.attr.items()} + return dest + + def copy(self): + return self.__copy__() + + +class SSAFunction: + __slots__ = ["graph", "inputs", "input_types", "outputs", "output_types", "ret"] + + def __init__(self, gdict=None, inputs=None, outputs=None, ret=None): + if gdict is None: + gdict = {} + self.graph = gdict + self.inputs = [] if inputs is None else inputs + self.outputs = [] if outputs is None else outputs + self.input_types = [] + self.output_types = [] + + # ret is a mapping from the output arg names from `signature` to the + # outputs from `node_def` that should be returned by the function. + # Only used in TF2 for getting indices when generating get_tuple ops + # for control flow ops. Because the sub-graph's outputs and control + # flow node's outputs mapping is defined in `ret` dict. See usages in + # tf_graph_pass: rewrite_control_flow_functions for details. + # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/function.proto + self.ret = [] if ret is None else ret + + check_connections(gdict) + + # respect TF inputs/outputs if given, otherwise, infer from the graph + # in currently implementation: TF1 will always infer from graph. TF2, + # on the other hand, respect the inputs/outputs provided. + if len(self.inputs) == 0 or len(self.outputs) == 0: + self.find_inputs_and_outputs() + else: + self.inputs, self.outputs = inputs, outputs + self.filter_inputs_and_outputs() + + def find_inputs_and_outputs(self): + # solve for input and output vars + sorted_keys = sorted(self.graph.keys()) + + # we use function entry and exit points if available + # otherwise we find graph entry and exit points + enters = [ + n.name for n in self.graph.values() if ("entry" in n.op or "Entry" in n.op) + ] + exits = [n.name for n in self.graph.values() if n.op in ("Return", "return")] + if len(enters) > 0 or len(exits) > 0: + assert len(enters) > 0 + assert len(exits) > 0 + self.inputs = enters + self.input_types = [self.graph[v].datatype for v in self.inputs] + self.outputs = exits + self.output_types = [self.graph[v].datatype for v in self.outputs] + else: + for k in sorted_keys: + v = self.graph[k] + if len(v.inputs) == 0 and v.op not in ["Const", "get_global", "NoOp"]: + self.inputs.append(k) + self.input_types.append(v.datatype) + elif len(v.inputs) != 0 and v.op == "Placeholder": + assert len(v.inputs) == 1, "This is not a PlaceholderWithDefault!" + self.inputs.append(k) + self.input_types.append(v.datatype) + if ( + len(v.outputs) == 0 + and len(v.control_outputs) == 0 + and v.op != "set_global" + ): + self.outputs.append(k) + self.output_types.append(v.datatype) + + def filter_inputs_and_outputs(self): + """ + Eliminate invalid input/output nodes in the given list. Should only be + invoked if the self.inputs and self.outputs are both provided and we + want to respect those when adding SSAFunctions. Only needed for TF2 for + now because of the needs to parse multiple functions in graph. TF1 only + has one "main" function. + """ + filtered_inputs = [] + filtered_outputs = [] + for k in self.inputs: + if k not in self.graph.keys(): + continue + v = self.graph[k] + if len(v.inputs) == 0 and v.op not in {"Const", "get_global", "NoOp"}: + filtered_inputs.append(k) + self.input_types.append(v.datatype) + elif len(v.inputs) != 0 and v.op == "Placeholder": + assert len(v.inputs) == 1, "This is not a PlaceholderWithDefault!" + filtered_inputs.append(k) + self.input_types.append(v.datatype) + for k in self.outputs: + if k not in self.graph.keys(): + continue + v = self.graph[k] + filtered_outputs.append(k) + self.output_types.append(v.datatype) + self.inputs, self.outputs = filtered_inputs, filtered_outputs + + def __copy__(self): + ret = SSAFunction() + ret.inputs = self.inputs[:] + ret.input_types = self.input_types[:] + ret.outputs = self.outputs[:] + ret.output_types = self.output_types[:] + ret.graph = {k: copy.deepcopy(v) for k, v in self.graph.items()} + + return ret + + def copy(self): + return self.__copy__() + + +class NetworkEnsemble: + __slots__ = ["functions", "variables", "global_resource"] + + def __init__(self, instance=None): + self.functions = {} + self.variables = {} + self.global_resource = {} + + if isinstance(instance, NetworkEnsemble): + self.functions = instance.functions + self.variables = instance.variables + self.global_resource = instance.global_resource + elif instance is not None: + raise ValueError( + "Instance type {} not compatible with NetworkEnsemble".format( + type(instance) + ) + ) + + def rename_function(self, src_func, tgt_func): + """ + Renames the function with function name (src_func) to (tgt_func) + """ + if src_func not in self.functions: + logger.warning("Couldn't find function name (%s).", src_func) + return + if tgt_func in self.functions: + logger.warning("(%s) already exists in some function name.", tgt_func) + return + + self.functions[tgt_func] = self.functions.pop(src_func) + logger.debug( + "Successfully changed function name from (%s) to (%s)", src_func, tgt_func + ) + + def rename_node(self, src_node, tgt_node): + """ + Rename the node with node name (src_node) to (tgt_node). + Note that the name (tgt_node) cannot appear in the whole network, + not only the function it lies in. + """ + in_ssa = False + success = None + for func, tfssa in self.functions.items(): + if src_node in tfssa.graph: + in_ssa = True + if tgt_node in tfssa.graph: + logger.warning( + "(%s) already exists in function (%s).", tgt_node, func + ) + break + success = func + tfssa.graph[tgt_node] = tfssa.graph.pop(src_node) + # Replace other nodes' output dependency + for inp in tfssa.graph[tgt_node].inputs: + for idx, out in enumerate(tfssa.graph[inp].outputs): + if out == src_node: + tfssa.graph[inp].outputs[idx] = tgt_node + break + # Replace other nodes' control output dependency + for c_inp in tfssa.graph[tgt_node].control_inputs: + for idx, c_out in enumerate(tfssa.graph[c_inp].control_outputs): + if c_out == src_node: + tfssa.graph[c_inp].control_outputs[idx] = tgt_node + break + # Replace other nodes' input dependency + for out in tfssa.graph[tgt_node].outputs: + for idx, inp in enumerate(tfssa.graph[out].inputs): + if inp == src_node: + tfssa.graph[out].inputs[idx] = tgt_node + break + # Replace other nodes' control input dependency + for c_out in tfssa.graph[tgt_node].control_outputs: + for idx, c_inp in enumerate(tfssa.graph[c_out].control_inputs): + if c_inp == src_node: + tfssa.graph[c_out].control_inputs[idx] = tgt_node + break + break + + if not in_ssa: + logger.warning("Couldn't find (%s) in any functions", src_node) + if success is not None: + logger.debug( + "Changed (%s) to (%s) in function (%s)", src_node, tgt_node, success + ) + + def extract_subgraph(self, outputs, target_inputs=None, name=""): + """Add a new SSAFunction to the current NetworkEnsemble to produce the given outputs. + + Args: + outputs: The outputs the new function must produce. + target_inputs: + name: The name of the new function to create. If unspecified, a name will be generated + by joining output names. + Returns: + The name of the new function. + """ + if not isinstance(outputs, list): + raise TypeError("Expected a list of output names for subgraph extraction") + + if name == "": + outputs.sort() + name = escape_fn_name("_".join(outputs)) + + if target_inputs is None: + target_inputs = [] + + def DFS_inputs(graph, node, vis): + vis.add(node) + if node in target_inputs: + return [node] + if ( + len(graph[node].inputs) == 0 + and len(graph[node].control_inputs) == 0 + and graph[node].op != "Const" + ): + return [node] + inputs = [] + for i in graph[node].inputs + graph[node].control_inputs: + if i in vis: + continue + inputs += DFS_inputs(graph, i, vis) + return inputs + + def DFS_set_globals(graph, node, vis): + vis.add(node) + set_globals = [] + if graph[node].op == "set_global": + set_globals.append(node) + for i in graph[node].outputs + graph[node].control_outputs: + if i in vis: + continue + set_globals += DFS_set_globals(graph, i, vis) + return set_globals + + for k in list(self.functions.keys()): + v = self.functions[k] + extract = [] + for output in outputs: + if output in v.graph: + extract.append(output) + + if len(extract) == 0: + continue + incl_nodes = set() + gdict = copy.deepcopy(v.graph) + inputs = [] + set_globals = [] + for output in extract: + inputs += DFS_inputs(gdict, output, incl_nodes) + vis_nodes = set() + for inp in inputs: + set_globals += DFS_set_globals(gdict, inp, vis_nodes) + for node in set_globals: + inputs += DFS_inputs(gdict, node, incl_nodes) + + for new_k, new_v in v.graph.items(): + if new_k not in incl_nodes: + del gdict[new_k] + continue + if new_k in target_inputs: + gdict[new_k].op = "Placeholder" + gdict[new_k].inputs = [inp for inp in new_v.inputs if inp in incl_nodes] + gdict[new_k].outputs = [ + out for out in new_v.outputs if out in incl_nodes + ] + gdict[new_k].control_inputs = [ + inp for inp in new_v.control_inputs if inp in incl_nodes + ] + gdict[new_k].control_outputs = [ + out for out in new_v.control_outputs if out in incl_nodes + ] + + for output in extract: + old_name = "preIdentity_" + output + output_node = copy.deepcopy(gdict[output]) + output_node.op = "Identity" + output_node.inputs = [old_name] + output_node.control_inputs = [] + output_node.outputs = [] + output_node.control_outputs = [] + + for inp in gdict[output].inputs: + for idx, out in enumerate(gdict[inp].outputs): + if out == output: + gdict[inp].outputs[idx] = old_name + for inp in gdict[output].control_inputs: + for idx, out in enumerate(gdict[inp].control_outputs): + if out == output: + gdict[inp].control_outputs[idx] = old_name + for out in gdict[output].outputs: + for idx, inp in enumerate(gdict[out].inputs): + if inp == output: + gdict[out].inputs[idx] = old_name + for out in gdict[output].control_outputs: + for idx, inp in enumerate(gdict[out].control_inputs): + if inp == output: + gdict[out].control_inputs[idx] = old_name + gdict[output].outputs.append(output) + gdict[output].name = old_name + gdict[old_name] = gdict[output] + gdict[output] = output_node + + self.functions[name] = SSAFunction(gdict) + return name + + def delete_subgraph(self, name): + """ + Delete the SSAfunction with function_name. + """ + if name not in self.functions: + logger.warning("(%s) not in NetworkEnsemble", name) + return + del self.functions[name] + + def __repr__(self): + return str(self) + + def __str__(self): + ret = "" + for func, v in self.functions.items(): + if func.startswith("body_function_") or func.startswith("f_body_function_"): + continue + elif func.startswith("cond_function_") or func.startswith( + "f_cond_function_" + ): + continue + + ret += "Input Function Name: %s\n" % (func) + ret += " Inputs:\n" + for inp in v.inputs: + ret += " %s\n" % (inp) + ret += " Outputs:\n" + for out in v.outputs: + if out.startswith("fake_exit_"): + continue + ret += " %s\n" % (out) + return ret + + def get_dot_string( + self, name_and_op_style=False, annotation=False, highlight_debug_nodes=None + ): + """ + Return the dot string that can be used to show the whole graph + with dot. By default, the graph contains op and type. If + name_and_op_style is set, the graph will contain the name of the node + and the op instead. + + * Input nodes : yellow + * constant nodes : azure + * output nodes : goldenrod2 + * nodes with variable shaped tensors : cyan + * node names or op types that user wants to highlight: green + + Parameters + ---------- + name_and_op_style: bool + If set, graph contains only the name and the op. + + annotation: bool + Examples + -------- + >>> import graphviz + >>> graphviz.Source(network.get_dot_string()).view() + + """ + if highlight_debug_nodes is None: + highlight_debug_nodes = [] + function_names = sorted(self.functions.keys()) + + dotstring = "digraph g {\n" + "\tcompound=true;\n" + # find all tensor nodes with unknown sizes + ctr = 0 + for k in function_names: + const_nodes = const_determined_nodes(self.functions[k].graph) + unknown_sized_tensor_ops = [] + for v, n in self.functions[k].graph.items(): + if n.datatype is None or ( + n.datatype is not None + and types.is_tensor(n.datatype) + and ( + len(n.datatype.get_shape()) == 0 or -1 in n.datatype.get_shape() + ) + ): + unknown_sized_tensor_ops.append(v) + if n.op in highlight_debug_nodes: + highlight_debug_nodes.append(v) + + v = self.functions[k] + vis = DotVisitor(annotation) + vis.highlight_nodes(v.inputs, "yellow").highlight_nodes( + const_nodes, "azure2" + ).highlight_nodes(v.outputs, "goldenrod2").highlight_nodes( + unknown_sized_tensor_ops, "cyan2" + ) + if len(highlight_debug_nodes) > 0: + vis.highlight_nodes(highlight_debug_nodes, "green") + if name_and_op_style: + vis.labeller(lambda n: n.name + " (" + n.op + ")") + + res = vis.visit_all(v.graph, nodename_prefix=str(ctr)).get_result( + "subgraph", "cluster_" + k.replace("/", "_") + ) + dotstring += "\n".join("\t" + r for r in res.split("\n")) + "\n" + ctr += 1 + dotstring += "}" + return dotstring + + def add_function_with_prefix(self, fprefix, tfssa): + assert isinstance(tfssa, SSAFunction) + s = 0 + while fprefix + str(s) in self.functions: + s += 1 + self.functions[fprefix + str(s)] = tfssa + + def add_function(self, f, tfssa): + self.functions[f] = tfssa + + def __copy__(self): + ret = self.__class__() + ret.functions = self.functions + ret.variables = self.variables + ret.global_resource = self.global_resource + return ret + + def __deepcopy__(self, memo): + ret = self.__class__() + ret.functions = {k: copy.copy(v) for k, v in self.functions.items()} + ret.variables = {k: copy.copy(v) for k, v in self.variables.items()} + ret.global_resource = {k: copy.copy(v) for k, v in self.global_resource.items()} + return ret + + def copy(self): + return self.__copy__() + + def _find_free_name(self, prefix): + idx = 0 + while True: + name = prefix + str(idx) + found = False + for v in self.functions.values(): + if name in v.graph: + found = True + break + if found: + idx += 1 + else: + return name + + def get_image_format(self): + """ + Iterates over graph and returns input format (`NCHW` or `NHWC`) + if input is of type Image, otherwise `None` + """ + for fn_key in list(self.functions.keys()): + graph = self.functions[fn_key].graph + + for name in graph: + node = graph[name] + if ( + node.attr.get("data_format", None) == "NHWC" + or node.attr.get("data_format") == "NHWC_format_inserted" + ): + return "NHWC" + elif node.attr.get("data_format", None) == "NCHW": + return "NCHW" + return None diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/__init__.py new file mode 100644 index 00000000..fc51ca1e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ....._deps import _HAS_TF_2 + +if _HAS_TF_2: + # importing these causes all its imports to be registered + from coremltools.converters.mil.frontend.tensorflow.tf_op_registry import \ + register_tf_op + + from . import ops diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/converter.py new file mode 100644 index 00000000..b4876c18 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/converter.py @@ -0,0 +1,40 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.frontend.tensorflow.basic_graph_ops import \ + simple_topsort +from coremltools.converters.mil.frontend.tensorflow.converter import \ + TFConverter + + +class TF2Converter(TFConverter): + def _get_stack(self, tfssa, root="main"): + """ + Overwrite TFConverter._get_stack() as TF2 generates different sub-graphs. + """ + + # We're trying to get a order of how to loop through the graphs. + # This is NOT necessarily a DAG. + dep = {x: [] for x in tfssa.functions} + for fname in tfssa.functions: + for node in tfssa.functions[fname].graph.values(): + func_x, func_y = None, None + + if node.op in {"StatelessIf", "If"}: + func_x = node.attr.get("then_branch") + func_y = node.attr.get("else_branch") + elif node.op in {"StatelessWhile", "While"}: + func_x = node.attr.get("body") + func_y = node.attr.get("cond") + + if func_x and fname not in dep[func_x]: + dep[func_x].append(fname) + if func_y and fname not in dep[func_y]: + dep[func_y].append(fname) + + assert len(dep[root]) == 0 + graph_stack = simple_topsort(dep) + + return graph_stack diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/load.py new file mode 100644 index 00000000..e7f2504b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/load.py @@ -0,0 +1,346 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os.path as _os_path +from distutils.version import StrictVersion as _StrictVersion + +import tensorflow as _tf +from tensorflow.lite.python.util import \ + get_grappler_config as _get_grappler_config +from tensorflow.lite.python.util import \ + run_graph_optimizations as _run_graph_optimizations +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.python.framework.convert_to_constants import \ + convert_variables_to_constants_v2 as _convert_variables_to_constants_v2 +from tensorflow.python.framework.function_def_to_graph import \ + function_def_to_graph as _function_def_to_graph +from tensorflow.python.keras.saving import saving_utils as _saving_utils +from tqdm import tqdm as _tqdm + +from coremltools import _logger as logger +from coremltools._deps import _get_version +from coremltools.converters.mil.frontend.tensorflow2.tf_graph_pass import ( + flatten_sub_graph_namespaces, rewrite_control_flow_functions) +from coremltools.converters.mil.frontend.tensorflow.basic_graph_ops import \ + fill_outputs +from coremltools.converters.mil.frontend.tensorflow.load import TFLoader +from coremltools.converters.mil.frontend.tensorflow.parsed_tf_node import \ + ParsedTFNode +from coremltools.converters.mil.frontend.tensorflow.tf_graph_pass import ( + constant_propagation, delete_disconnected_nodes, + delete_unnecessary_constant_nodes, fuse_dilation_conv, insert_get_tuple, + remove_variable_nodes, tensor_array_resource_removal) +from coremltools.converters.mil.frontend.tensorflow.tfssa import ( + NetworkEnsemble, SSAFunction) +from coremltools.converters.mil.input_types import TensorType + +from .converter import TF2Converter + + +class TF2Loader(TFLoader): + """ + There are the steps how the TF2Loader loads and converts the TF2 model + 1. Get the concrete functions from the Keras model (only 1 concrete function is supported now) + 2. Get the tensorflow graphdef from the concrete function by doing + (a) calling tensorflow's convert_variables_to_constants_v2 API to freeze variables into constants + (b) run grappler optimizations on the graphdef ("constfold", "dependency", "debug_stripper") + 3. Extract sub graph based on "outputs" + 4. Construct tfssa IR from graphdef + 5. Run tfssa graph passes + 6. Convert tfssa to program by TF2Converter + """ + def __init__(self, model, debug=False, **kwargs): + """ + TensorFlow 2.x model loader. + + Parameters + ---------- + model: Model created with TensorFlow 2.x + One of the following model format: + - TensorFlow tf.keras.Model object or HDF5 (.h5 or .hdf5) file path + - TensorFlow SavedModel directory path + - TensorFlow list of concrete functions(s) + debug: bool, optional. Defaults to False. + This flag should generally be False except for debugging purposes + for diagnosing conversion errors. Setting this flag to True will + cause graph pass errors to be ignored, forcefully returning a + NetworkEnsemble object. + kwargs: dict(str, Any), optional + Dictionary of additional arguments. + """ + TFLoader.__init__(self, model, debug, **kwargs) + + """ + tf_ssa graph passes + Notes: + - "flatten_while_loop_namespaces" should be after "constant_propagation" + as it changes node names which constant propagation pass is relying on + to perform session.run(), renamed nodes are not understandable for TF. + """ + self.tfssa_passes = [ + constant_propagation, + delete_unnecessary_constant_nodes, # delete_unnecessary_constant_nodes must come right after constant_propagation + rewrite_control_flow_functions, + flatten_sub_graph_namespaces, + remove_variable_nodes, + fuse_dilation_conv, + ] + + def _get_concrete_functions_and_graph_def(self): + msg = ( + "Expected model format: [SavedModel | [concrete_function] | " + "tf.keras.Model | .h5 | GraphDef], got {}" + ) + if ( + isinstance(self.model, list) + or isinstance(self.model, _tf.keras.Model) + or isinstance(self.model, str) + or isinstance(self.model, _tf.compat.v1.GraphDef) + ): + cfs = [] + if isinstance(self.model, list): + cfs = self.model + if isinstance(self.model, _tf.keras.Model): + cfs = self._concrete_fn_from_tf_keras_or_h5(self.model) + elif isinstance(self.model, _tf.compat.v1.GraphDef): + return None, self.model + elif isinstance(self.model, str): + if not _os_path.exists(self.model): + raise ValueError( + 'Input model "{}" does not exist'.format(self.model) + ) + elif _os_path.isfile(self.model) \ + and (self.model.endswith(".h5") or self.model.endswith(".hdf5")): + cfs = self._concrete_fn_from_tf_keras_or_h5(self.model) + elif _os_path.isdir(self.model): + saved_model = _tf.saved_model.load(self.model) + sv = saved_model.signatures.values() + cfs = sv if isinstance(sv, list) else list(sv) + else: + raise NotImplementedError(msg.format(self.model)) + else: + raise NotImplementedError(msg.format(self.model)) + + graph_def = self._graph_def_from_concrete_fn(cfs) + + return cfs, graph_def + + def _graph_def_from_model(self, output_names=None): + """Overwrites TFLoader._graph_def_from_model()""" + cfs, graph_def = self._get_concrete_functions_and_graph_def() + if isinstance(self.model, _tf.keras.Model) and self.kwargs.get("outputs", None) is None: + # For the keras model, check if the outputs is provided by the user. + # If not, we make sure the coreml model outputs order is the same as + # the original keras model + cf = cfs[0] + output_names = [] + for key in cf.structured_outputs: + output_names.append(cf.structured_outputs[key].name.split(":")[0]) + self.kwargs["outputs"] = [TensorType(name=name) for name in output_names] + return self.extract_sub_graph(graph_def, output_names) + + def _tf_ssa_from_graph_def(self, fn_name="main"): + """Overwrites TFLoader._tf_ssa_from_graph_def()""" + with _tf.Graph().as_default() as tf_graph: + _tf.graph_util.import_graph_def(self._graph_def, name="") + + # sub-graphs' input shapes are required for extracting sub-graphs + sg_input_shapes = self._populate_sub_graph_input_shapes( + tf_graph, tf_graph._functions + ) + + # get graph_dict and sub-graphs' inputs / outputs + graph_dict, inputs, outputs, ret = self._dict_from_graph_def( + tf_graph, fn_name, sg_input_shapes + ) + + tf_ssa = NetworkEnsemble() + for name, graph in graph_dict.items(): + tensor_array_resource_removal(graph) + graph = insert_get_tuple(graph) + graph = fill_outputs(graph) + if name == "main": # skip for sub-graphs as input can be also output + delete_disconnected_nodes(graph) + tf_ssa.functions[name] = SSAFunction( + graph, inputs=inputs[name], outputs=outputs[name], ret=ret[name] + ) + + return tf_ssa + + def _run_tf_ssa_passes(self): + tf_passes = self.tfssa_passes + + if self.debug: + for tf_pass in _tqdm( + tf_passes, desc="Running TensorFlow Graph Passes", unit=" passes" + ): + try: + tf_pass(self._tf_ssa) + except Exception as e: + logger.exception('Exception in pass "{}": {}'.format(tf_pass, e)) + logger.info("Ignoring exception and continuing to next pass") + + else: + for tf_pass in _tqdm( + tf_passes, desc="Running TensorFlow Graph Passes", unit=" passes" + ): + tf_pass(self._tf_ssa) + + if self.debug: + import graphviz + + dot_string = self._tf_ssa.get_dot_string( + annotation=True, name_and_op_style=True, highlight_debug_nodes=[] + ) + graphviz.Source(dot_string).view( + filename="/tmp/ssa_after_tf_passes", cleanup=True + ) + + def _program_from_tf_ssa(self): + self._run_tf_ssa_passes() + converter = TF2Converter( + tfssa=self._tf_ssa, + inputs=self.kwargs["inputs"], + outputs=self.kwargs["outputs"], + opset_version=self.kwargs["specification_version"], + ) + return converter.convert() + + def _populate_sub_graph_input_shapes(self, graph, graph_fns): + """ + Populate function (sub-graph) input shapes from control flow op's inputs + Note that the functions (sub-graphs) are not nested but the control flow + ops are nested. The input shapes are used to extract sub-graphs from the + parent graph (as the input of function_def_to_graph). + + Parameter + --------- + graph: tf.Graph + TensorFlow graph. + graph_fns: list of graph functions. + List of TensorFlow graph functions. + + Returns + ------- + sg_input_shapes: dict(str: list) + Dictionary of function (sub-graph) name and input shape pairs. + """ + sg_input_shapes = {} + sub_graphs = [] + for op in graph.get_operations(): + if op.type not in {"StatelessIf", "If", "StatelessWhile", "While"}: + continue + + sg1, sg2 = None, None + if op.type in {"StatelessIf", "If"}: + sg1 = op.get_attr("then_branch").name + sg2 = op.get_attr("else_branch").name + if op.type in {"StatelessWhile", "While"}: + sg1 = op.get_attr("cond").name + sg2 = op.get_attr("body").name + + # memorize input shapes for sub-graph conversions + op_input_shapes = [i.get_shape() for i in op.inputs] + sg_input_shapes.update({sg1: op_input_shapes, sg2: op_input_shapes}) + sub_graphs += [sg1, sg2] + + for name in sub_graphs: + sg = graph_fns.get(name) + fn_def = context.get_function_def(name) + op_input_shapes = sg_input_shapes[name] + op_input_shapes = op_input_shapes[-len(fn_def.signature.input_arg) :] + fn_graph = _function_def_to_graph(fn_def, input_shapes=op_input_shapes) + sg_input_shapes.update( + self._populate_sub_graph_input_shapes(fn_graph, graph_fns) + ) + + return sg_input_shapes + + @staticmethod + def _dict_from_graph_def(graph, fn_name="main", sg_input_shapes=None): + """ + Loads a tf.Graph and transform it into dictionary of ParsedTFNodes. + Potentially contains multiple functions, in such case, recursively + resolve functions (sub-graphs). + + Parameters + ---------- + graph: tf.Graph + TensorFlow graph. + fn_name: str, optional, defaults to 'main' + Function name of the graph. + sg_input_shapes: dict(str: list) + Dictionary of name and input shapes for functions / sub-graphs. + + Returns + ------- + dict(str: dict(str: ParsedTFNode)) + Dictionary of function name and dictionary of node name and + ParsedTFNode object. + """ + graph_dict = {fn_name: {}} + graph_inputs = {fn_name: []} + graph_outputs = {fn_name: []} + graph_ret = {fn_name: {}} + + for op in graph.get_operations(): + graph_dict[fn_name].update({op.name: ParsedTFNode(op.node_def)}) + + for name, sg in graph._functions.items(): + sg_def = context.get_function_def(name) + if name in sg_input_shapes: + input_shapes = sg_input_shapes[name] + input_shapes = input_shapes[-len(sg_def.signature.input_arg):] + fn_graph = _function_def_to_graph(sg_def, input_shapes=input_shapes) + + graph_dict.update( + TF2Loader._dict_from_graph_def(fn_graph, name, sg_input_shapes)[0] + ) + graph_inputs.update({name: [t.name.split(":")[0] for t in fn_graph.inputs]}) + graph_outputs.update( + {name: [t.name.split(":")[0] for t in fn_graph.outputs]} + ) + + # ret is a mapping from the output arg names from `signature` to the + # outputs from `node_def` that should be returned by the function. + graph_ret.update({name: sg_def.ret}) + + return graph_dict, graph_inputs, graph_outputs, graph_ret + + @staticmethod + def _concrete_fn_from_tf_keras_or_h5(keras_model): + if not isinstance(keras_model, _tf.keras.Model): + keras_model = _tf.keras.models.load_model(keras_model) + input_signature = _saving_utils.model_input_signature( + keras_model, keep_original_batch_size=True + ) + fn = _saving_utils.trace_model_call(keras_model, input_signature) + return [fn.get_concrete_function()] + + def _graph_def_from_concrete_fn(self, cfs): + if len(cfs) != 1: + raise NotImplementedError("Only a single concrete function is supported.") + + if _get_version(_tf.__version__) >= _StrictVersion("2.2.0"): + frozen_fn = _convert_variables_to_constants_v2(cfs[0], lower_control_flow=False, aggressive_inlining=True) + else: + frozen_fn = _convert_variables_to_constants_v2(cfs[0], lower_control_flow=False) + graph_def = frozen_fn.graph.as_graph_def(add_shapes=True) + + # run a Grappler's constant folding pass. + fn_inputs = [t for t in frozen_fn.inputs if t.dtype != _dtypes.resource] + grappler_optimizers_list = self._get_grappler_optimizers_list() + graph_def = _run_graph_optimizations( + graph_def, + fn_inputs, + frozen_fn.outputs, + config=_get_grappler_config(grappler_optimizers_list), + graph=frozen_fn.graph, + ) + return graph_def + + def _get_grappler_optimizers_list(self): + return ["constfold", "dependency", "debug_stripper"] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ops.py new file mode 100644 index 00000000..225a2b0b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ops.py @@ -0,0 +1,235 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +# TF 2.x now imports and registers all TF 1.x op against the new registry +# (separated from TF 1.x registry). Overwrite might needed in case the op +# semantics are different between TF 1.x and TF 2.x.< +from coremltools.converters.mil.frontend.tensorflow.convert_utils import \ + convert_graph +from coremltools.converters.mil.frontend.tensorflow.ops import ( + _transpose_NCDHW_to_NDHWC, _transpose_NCHW_to_NHWC, + _transpose_NDHWC_to_NCDHW, _transpose_NHWC_to_NCHW) +from coremltools.converters.mil.frontend.tensorflow.tf_op_registry import \ + register_tf_op +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.types import builtin_to_string +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +@register_tf_op(override=True, tf_alias=["FusedBatchNorm"]) +def FusedBatchNormV3(context, node): + + # helper function that add the batch norm layer + def _add_batch_norm(x, mean, variance, scale, offset, epsilon, name): + + if mean.shape[0] != 0 and variance.shape[0] != 0: + # In this case, we can use the mb.batch_norm directly + x = mb.batch_norm( + x=x, mean=mean, variance=variance, gamma=scale, beta=offset, epsilon=epsilon, name=name + ) + else: + # In this case, we need to manually compute the batch_norm + axes = [axis for axis in range(x.rank) if axis != 1] + mean = mb.reduce_mean(x=x, axes=axes, keep_dims=True) + num = mb.sub(x=x, y=mean) + square = mb.mul(x=num, y=num) + variance = mb.reduce_mean(x=square, axes=axes, keep_dims=True) + variance_add_epsilon = mb.add(x=variance, y=epsilon) + sqrt = mb.sqrt(x=variance_add_epsilon) + x = mb.real_div(x=num, y=sqrt) + + shape = [1] * x.rank + shape[1] = -1 if any_symbolic(scale.shape) else scale.shape[0] + scale_reshape = mb.reshape(x=scale, shape=shape) + offset_reshape = mb.reshape(x=offset, shape=shape) + + x = mb.mul(x=x, y=scale_reshape) + x = mb.add(x=x, y=offset_reshape, name=name) + + return x + + # Get attributes + data_format = node.attr.get("data_format", "NHWC") + epsilon = node.attr.get("epsilon", None) + + # Get inputs + x = context[node.inputs[0]] + scale = context[node.inputs[1]] + offset = context[node.inputs[2]] + mean = context[node.inputs[3]] + variance = context[node.inputs[4]] + input_dtype = x.dtype + + batch_norm_name = node.name + "_nchw" if data_format == "NHWC" else node.name + + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + elif data_format == "NDHWC": + x = _transpose_NDHWC_to_NCDHW(x) + + x = mb.cast(x=x, dtype=builtin_to_string(mean.dtype)) + + x = _add_batch_norm(x, mean, variance, scale, offset, epsilon, batch_norm_name) + + if data_format == "NHWC": + x = _transpose_NCHW_to_NHWC(x, node.name + "_to_NHWC") + elif data_format == "NDHWC": + x = _transpose_NCDHW_to_NDHWC(x, node.name + "_to_NDHWC") + + x = mb.cast(x=x, dtype=builtin_to_string(input_dtype), name=node.name) + + # Inference only batch norm does not have meaningful outputs for + # batch_mean, batch_variance etc. + context.add(node.name, x) + + +@register_tf_op(tf_alias=["If"], override=True) +def StatelessIf(context, node): + pred = context[node.inputs[0]][0] + then_graph = context.get_graph(node.attr.get("then_branch")) + else_graph = context.get_graph(node.attr.get("else_branch")) + + def then_fn(): + context.stack_func_inputs(context[node.inputs[0]]) + then_output_var = convert_graph(context, then_graph) + context.unstack_func_inputs() + return then_output_var + + def else_fn(): + context.stack_func_inputs(context[node.inputs[0]]) + else_output_var = convert_graph(context, else_graph) + context.unstack_func_inputs() + return else_output_var + + x = mb.cond(pred=pred, _true_fn=then_fn, _false_fn=else_fn, name=node.name) + + # wraps x as tuple for get_tuple that always follow the cond node. + x = (x,) if not isinstance(x, (tuple, list)) else x + + context.add(node.name, x) + + +@register_tf_op(tf_alias=["While"], override=True) +def StatelessWhile(context, node): + # inputs are loop_counter, max_iterations, [loop_vars] + loop_vars = context[node.inputs[0]][2:] + + cond_graph = context.get_graph(node.attr.get("cond")) + body_graph = context.get_graph(node.attr.get("body")) + + def cond(*loop_vars): + context.stack_func_inputs(loop_vars) + cond_output_vars = convert_graph(context, cond_graph) + context.unstack_func_inputs() + return cond_output_vars + + def body(*loop_vars): + context.stack_func_inputs(loop_vars) + body_output_vars = convert_graph(context, body_graph) + context.unstack_func_inputs() + return body_output_vars + + x = mb.while_loop(_cond=cond, _body=body, loop_vars=loop_vars, name=node.name) + + # wraps x as tuple for get_tuple that always follow the while node. + x = (x,) if not isinstance(x, (tuple, list)) else x + + context.add(node.name, x) + + +@register_tf_op +def TensorListFromTensor(context, node): + value = context[node.inputs[0]] + element_shape = context[node.inputs[1]] + element_dtype = node.attr.get("element_dtype") + dtype_str = builtin_to_string(element_dtype) + + length = mb.shape(x=value) + length = mb.slice_by_index(x=length, begin=[0], end=[1], squeeze_mask=[True]) + + if element_shape is not None and all(_np.atleast_1d(element_shape.val) != -1): + ls = mb.make_list(init_length=length, + elem_shape=tuple(element_shape.val.tolist()), dtype=dtype_str) + else: + ls = mb.tf_make_list(init_length=length, dtype=dtype_str) + + indices = mb.range_1d(end=length, start=0, step=1) + ls = mb.list_scatter(ls=ls, indices=indices, value=value, name=node.name) + context.add(node.name, ls) + + +@register_tf_op +def TensorListGather(context, node): + ls = context[node.inputs[0]] + indices = context[node.inputs[1]] + tensor = mb.list_gather(ls=ls, indices=indices, name=node.name) + context.add(node.name, tensor) + + +@register_tf_op +def TensorListGetItem(context, node): + ls = context[node.inputs[0]] + index = context[node.inputs[1]] + new_ls = mb.list_read(ls=ls, index=index, name=node.name) + context.add(node.name, new_ls) + + +@register_tf_op +def TensorListLength(context, node): + ls = context[node.inputs[0]] + length = mb.list_length(ls=ls, name=node.name) + context.add(node.name, length) + + +@register_tf_op +def TensorListReserve(context, node): + element_shape = context[node.inputs[0]] + num_elements = context[node.inputs[1]] + element_dtype = node.attr.get("element_dtype") + dtype = builtin_to_string(element_dtype) + + if element_shape is not None and all(_np.atleast_1d(element_shape.val) != -1): + ls = mb.make_list( + init_length=num_elements, + elem_shape=tuple(element_shape.val.tolist()), + dynamic_length=num_elements.val is None, + dtype=dtype, + name=node.name, + ) + else: + ls = mb.tf_make_list(init_length=num_elements, + dtype=dtype, + dynamic_length=num_elements.val is None, + name=node.name) + context.add(node.name, ls) + + +@register_tf_op +def TensorListScatterIntoExistingList(context, node): + ls = context[node.inputs[0]] + value = context[node.inputs[1]] + indices = context[node.inputs[2]] + ls = mb.list_scatter(ls=ls, indices=indices, value=value, name=node.name) + context.add(node.name, ls) + + +@register_tf_op +def TensorListSetItem(context, node): + ls = context[node.inputs[0]] + index = context[node.inputs[1]] + value = context[node.inputs[2]] + new_ls = mb.list_write(ls=ls, index=index, value=value, name=node.name) + context.add(node.name, new_ls) + + +@register_tf_op +def TensorListStack(context, node): + ls = context[node.inputs[0]] + length = mb.list_length(ls=ls) + indices = mb.range_1d(end=length, start=0, step=1) + x = mb.list_gather(ls=ls, indices=indices, name=node.name) + context.add(node.name, x) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__init__.py new file mode 100644 index 00000000..91ca84e4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import remove_vacuous_cond diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/remove_vacuous_cond.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/remove_vacuous_cond.py new file mode 100644 index 00000000..65815792 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/remove_vacuous_cond.py @@ -0,0 +1,118 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@block_context_manager +def _remove_vacuous_cond_block(block): + num_changes = 0 + for op in list(block.operations): + for b in op.blocks: + num_changes += _remove_vacuous_cond_block(b) + + if op.op_type != "cond": + continue + + then_ops = op.blocks[0].operations + else_ops = op.blocks[1].operations + + if len(then_ops) > 1 or len(else_ops) > 1: + continue + + # Pattern 1: dynamic length TensorList generates this pattern. See + # conversion functions of TensorList* ops for details. TF2's graph + # contains a tf.cond op with 2 sub-graphs. The condition is either + # `less_equal` or `greater_equal` op. 1 sub-graph contains only an + # identity op forwarding the original TensorList, another sub-graph + # contains TensorListResize op to generate a new TensorList. But in + # backend, list length is handled dynamically in list_write/scatter + # and thus, the entire tf.cond and it's sub-graphs can be removed. + if len(then_ops) == 0 and len(else_ops) == 0: + if op.pred.op.op_type not in {"less_equal", "greater_equal"}: + continue + + # cond op must have pred + pred_x = op.pred.op.x.op + pred_y = op.pred.op.y.op + + if pred_x is None and pred_y is None: + continue + + if op.pred.op.op_type == "less_equal": + if pred_x.op_type != "list_length": + continue + new_var = pred_x.ls + + else: # op.pred.op.op_type == 'greather_equal': + if pred_y.op_type != "list_length": + continue + new_var = pred_y.ls + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=new_var + ) + block.remove_ops([op]) # rely on DCE to remove extra cond inputs + num_changes += 1 + + # Pattern 2: both than and else branch contains exactly 1 identity op + if len(then_ops) == 1 and len(then_ops) == 1: + if then_ops[0].op_type != "identity" or else_ops[0].op_type != "identity": + continue + if then_ops[0].x != else_ops[0].x: + continue + + new_var = mb.identity(x=then_ops[0].x, before_op=op, name=op.name) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=new_var + ) + block.remove_ops([op]) # rely on DCE to remove extra cond inputs + num_changes += 1 + + return num_changes + +@register_pass(namespace="tensorflow2") +class remove_vacuous_cond(AbstractGraphPass): + """ + Remove cond op and it's sub-graphs that produces identity on both then and + else branch. One example use case is the TensorListReverse op, in Core ML, + we dynamically resize in write operations, and thus, both branches of the + cond op will be a skip (identity) op. + + Given: + + main(%a: (1, bool), + %b: (2, 3, fp32)) { + block0() { + %squeeze_0: (bool) = squeeze(x=%a, name="squeeze_0") + %cond_0: (2, 3, fp32) = cond(pred=%squeeze_0, name="cond_0") + cond_0_true() { + %identity_0: (2, 3, fp32) = identity(x=%b, name="identity_0") + } -> (%identity_0) + cond_0_false() { + %identity_1: (2, 3, fp32) = identity(x=%b, name="identity_1") + } -> (%identity_1) + } -> (%cond_0) + } + + Result: + + main(%a: (1, bool), + %b: (2, 3, fp32)) { + block0() { + %squeeze_0: (bool) = squeeze(x=%a, name="squeeze_0") + %cond_0: (2, 3, fp32) = identity(x=%b, name="cond_0") + } -> (%cond_0) + } + """ + def apply(self, prog): + for f_name, f in prog.functions.items(): + num_changes = _remove_vacuous_cond_block(f) + msg = "remove_vacuous_cond: changed {} ops in function '{}'" + logger.info(msg.format(num_changes, f_name)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/test_v2_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/test_v2_passes.py new file mode 100644 index 00000000..8b9ec829 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/test_v2_passes.py @@ -0,0 +1,54 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import (assert_model_is_valid, + assert_same_output_names) + +np.random.seed(1984) +validate_model = True + + +def test_remove_vacuous_cond(): + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1,), dtype=types.bool), + mb.TensorSpec(shape=(2, 3)), + ] + ) + def prog(a, b): + def then_branch(): + return mb.identity(x=b) + + def else_branch(): + return mb.identity(x=b) + + pred = mb.squeeze(x=a) + return mb.cond(pred=pred, _true_fn=then_branch, _false_fn=else_branch) + + cond_op = prog.find_ops(op_type="cond", exactly_one=True)[0] + original_cond_op_name = cond_op.name + assert len(cond_op.blocks[0].operations) == 1 + assert len(cond_op.blocks[1].operations) == 1 + assert cond_op.blocks[0].operations[0].op_type == "identity" + assert cond_op.blocks[1].operations[0].op_type == "identity" + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["tensorflow2::remove_vacuous_cond"](prog) + assert_same_output_names(prev_prog, prog) + + cond_op = prog.find_ops(op_type="cond") + assert len(cond_op) == 0 + identity_op = prog.find_ops(prefix=original_cond_op_name, exactly_one=True)[0] + assert identity_op.op_type == "identity" + + if validate_model: + assert_model_is_valid(prog, {"a": (1,), "b": (2, 3)}) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_tf2_conversion_api.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_tf2_conversion_api.py new file mode 100644 index 00000000..a004ed89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_tf2_conversion_api.py @@ -0,0 +1,437 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import platform +import urllib +from io import BytesIO +from os import chdir, getcwd +from shutil import rmtree +from tempfile import mkdtemp + +import numpy as np +import pytest +import requests +from PIL import Image + +import coremltools as ct +from coremltools.converters.mil.mil import types + +tf = pytest.importorskip("tensorflow", minversion="2.1.0") + +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers + + +@pytest.fixture +def int32_input_model(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input", dtype=tf.int32) + out = tf.add(x, tf.constant(5, dtype=tf.int32), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def float32_input_model_add_op(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input", dtype=tf.float32) + out = tf.add(x, tf.constant(5.5, dtype=tf.float32), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def float32_input_model_relu_ops(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input", dtype=tf.float32) + x1 = tf.keras.layers.ReLU()(x) + out = tf.keras.layers.ReLU(name="output")(x1) + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def int64_input_model(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input", dtype=tf.int64) + out = tf.add(x, tf.constant(5, dtype=tf.int64), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def float32_two_input_model(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input1", dtype=tf.float32) + y = tf.keras.Input(batch_input_shape=(10, 20), name="input2", dtype=tf.float32) + out = tf.add(x, y, name="output") + return tf.keras.Model(inputs=[x, y], outputs=out) + +@pytest.fixture +def float32_two_output_model(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input", dtype=tf.float32) + y = tf.nn.relu(x) + out2 = tf.nn.relu6(x, name="output2") + out1 = tf.nn.relu(y, name="output1") + return tf.keras.Model(inputs=x, outputs=[out1, out2]) + +@pytest.fixture +def rank3_input_model(): + x = tf.keras.Input(batch_input_shape=(1, 10, 20), name="input", dtype=tf.float32) + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def rank4_input_model(): + x = tf.keras.Input(batch_input_shape=(1, 10, 20, 3), name="input", dtype=tf.float32) + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def rank4_input_model_with_channel_first_output(): + x = tf.keras.Input(batch_input_shape=(1, 10, 20, 3), name="input", dtype=tf.float32) + y = tf.add(x, tf.constant(5, dtype=tf.float32)) + out = tf.transpose(y, perm=[0, 3, 1, 2], name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def rank4_grayscale_input_model(): + x = tf.keras.Input(batch_input_shape=(1, 10, 20, 1), name="input", dtype=tf.float32) + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def rank4_grayscale_input_model_with_channel_first_output(): + x = tf.keras.Input(batch_input_shape=(1, 10, 20, 1), name="input", dtype=tf.float32) + y = tf.add(x, tf.constant(5, dtype=tf.float32)) + out = tf.transpose(y, perm=[0, 3, 1, 2], name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def linear_model(): + # this model will test the fuse_matmul_weight_bias pass + x = tf.keras.Input(batch_input_shape=(1, 10), name="input", dtype=tf.float32) + y = tf.keras.layers.Dense(4)(x) + y = tf.add(y, tf.constant([1, 2, 3, 4], shape=(4,), dtype=tf.float32)) + out = tf.nn.relu(y) + return tf.keras.Model(inputs=x, outputs=out) + + + +################################################################################# +# Note: all tests are also used as examples in https://coremltools.readme.io/docs +# as a reference. +# Whenever any of the following test fails, we should update API documentations +################################################################################# + +class TestTensorFlow2ConverterExamples: + def setup_class(self): + self._cwd = getcwd() + self._temp_dir = mkdtemp() + # step into temp directory as working directory + # to make the user-facing examples cleaner + chdir(self._temp_dir) + + # create toy models for conversion examples + # write a toy tf.keras HDF5 model + tf_keras_model = tf.keras.Sequential( + [ + tf.keras.layers.Flatten(input_shape=(28, 28)), + tf.keras.layers.Dense(128, activation=tf.nn.relu), + tf.keras.layers.Dense(10, activation=tf.nn.softmax), + ] + ) + tf_keras_model.save("./tf_keras_model.h5") + + # write a toy SavedModel directory + tf_keras_model.save("./saved_model", save_format="tf") + + def teardown_class(self): + chdir(self._cwd) + if os.path.exists(self._temp_dir): + rmtree(self._temp_dir) + + @staticmethod + def test_convert_tf_keras_h5_file(): + if platform.machine() == "arm64": + pytest.xfail("rdar://101162740 ([CI] [TF] The tf_keras_h5_file API testing is failing on M1 with new OS)") + + for file_extension in ("h5", "hdf5"): + x = tf.keras.Input(shape=(32,), name="input") + y = tf.keras.layers.Dense(16, activation="softmax")(x) + keras_model = tf.keras.Model(x, y) + temp_dir = mkdtemp() + save_dir = str(temp_dir) + path = os.path.join(save_dir, "tf_keras_model." + file_extension) + keras_model.save(path) + mlmodel = ct.convert(path) + + test_input = np.random.rand(2, 32) + expected_val = keras_model(test_input) + results = mlmodel.predict({"input": test_input}) + np.testing.assert_allclose(results["Identity"], expected_val, rtol=1e-4) + + @staticmethod + def test_convert_tf_keras_model(): + x = tf.keras.Input(shape=(32,), name="input") + y = tf.keras.layers.Dense(16, activation="softmax")(x) + keras_model = tf.keras.Model(x, y) + + mlmodel = ct.convert(keras_model) + + test_input = np.random.rand(2, 32) + expected_val = keras_model(test_input) + results = mlmodel.predict({"input": test_input}) + np.testing.assert_allclose(results["Identity"], expected_val, rtol=1e-4) + + @staticmethod + @pytest.mark.parametrize( + "dtype", ['default', 'mil_type', 'np type']) + def test_convert_tf_keras_applications_model(dtype): + tf_keras_model = tf.keras.applications.MobileNet( + weights="imagenet", input_shape=(224, 224, 3) + ) + + # inputs / outputs are optional, we can get from tf.keras model + # this can be extremely helpful when we want to extract sub-graphs + input_name = tf_keras_model.inputs[0].name.split(":")[0] + + if dtype == 'default': + dtype = None + elif dtype == 'mil_type': + dtype = types.fp32 + else: + dtype = np.float32 + + mlmodel = ct.convert( + tf_keras_model, + inputs=[ct.TensorType(shape=(1, 224, 224, 3), dtype=dtype)], + ) + mlmodel.save("./mobilenet.mlmodel") + + @staticmethod + def test_convert_from_saved_model_dir(): + # SavedModel directory generated by TensorFlow 2.x + mlmodel = ct.convert("./saved_model") + mlmodel.save("./model.mlmodel") + + + @staticmethod + def test_keras_custom_layer_model(): + # testing : https://coremltools.readme.io/docs/tensorflow-2#conversion-from-user-defined-models + + class CustomDense(layers.Layer): + def __init__(self, units=32): + super(CustomDense, self).__init__() + self.units = units + + def build(self, input_shape): + self.w = self.add_weight( + shape=(input_shape[-1], self.units), + initializer="random_normal", + trainable=True, + ) + self.b = self.add_weight( + shape=(self.units,), initializer="random_normal", trainable=True + ) + + def call(self, inputs): + return tf.matmul(inputs, self.w) + self.b + + inputs = keras.Input((4,)) + outputs = CustomDense(10)(inputs) + model = keras.Model(inputs, outputs) + ct.convert(model) + + @staticmethod + def test_concrete_function_conversion(): + # testing : https://coremltools.readme.io/docs/tensorflow-2#conversion-from-user-defined-models + + @tf.function(input_signature=[tf.TensorSpec(shape=(6,), dtype=tf.float32)]) + def gelu_tanh_activation(x): + a = (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))) + y = 0.5 * (1.0 + tf.tanh(a)) + return x * y + + conc_func = gelu_tanh_activation.get_concrete_function() + mlmodel = ct.convert([conc_func]) + + @staticmethod + def test_convert_tf2_keras(): + x = tf.keras.Input(shape=(32,), name="input") + y = tf.keras.layers.Dense(16, activation="softmax")(x) + keras_model = tf.keras.Model(x, y) + model = ct.convert(keras_model, convert_to='milinternal') + assert isinstance(model, ct.converters.mil.Program) + + +class TestTF2FlexibleInput: + # Test examples in https://coremltools.readme.io/docs/flexible-inputs + @staticmethod + @pytest.mark.parametrize("use_symbol", [True, False]) + def test_tf2keras_shared_range_dim(use_symbol): + input_dim = 3 + # None denotes seq_len dimension + x1 = tf.keras.Input(shape=(None,input_dim), name="seq1") + x2 = tf.keras.Input(shape=(None,input_dim), name="seq2") + y = x1 + x2 + keras_model = tf.keras.Model(inputs=[x1, x2], outputs=[y]) + + # One RangeDim shared by two inputs + if use_symbol: + seq_len_dim = ct.RangeDim(symbol='seq_len') + else: + # symbol is optional + seq_len_dim = ct.RangeDim() + seq1_input = ct.TensorType(name="seq1", shape=(1, seq_len_dim, input_dim)) + seq2_input = ct.TensorType(name="seq2", shape=(1, seq_len_dim, input_dim)) + mlmodel = ct.convert(keras_model, + inputs=[seq1_input, seq2_input]) + + batch = 1 + seq_len = 5 + test_input_x1 = np.random.rand(batch, seq_len, input_dim).astype(np.float32) + test_input_x2 = np.random.rand(batch, seq_len, input_dim).astype(np.float32) + expected_val = keras_model([test_input_x1, test_input_x2]) + if ct.utils._is_macos(): + results = mlmodel.predict({ + "seq1": test_input_x1, + "seq2": test_input_x2}) + np.testing.assert_allclose(results["Identity"], expected_val, + rtol=1e-2, atol=1e-2) + + + @staticmethod + def test_tf2keras_incorrect_range_dim(): + input_dim = 3 + # None denotes seq_len dimension + x1 = tf.keras.Input(shape=(None,input_dim), name="seq1") + y = x1 + 1 + keras_model = tf.keras.Model(inputs=[x1], outputs=[y]) + + # Incorrectly using -1 instead of ct.RangeDim + # One RangeDim shared by two inputs + with pytest.raises(ValueError, + match=r"Can\'t convert to CoreML shaping"): + seq1_input = ct.TensorType(name="seq1", shape=(1, -1, input_dim)) + mlmodel = ct.convert(keras_model, inputs=[seq1_input]) + + @staticmethod + @pytest.mark.parametrize("use_symbol", [True, False]) + def test_tf2keras_outofbound_range_dim(use_symbol): + input_dim = 3 + # None denotes seq_len dimension + x = tf.keras.Input(shape=(None,input_dim), name="seq") + y = x * 2 + keras_model = tf.keras.Model(inputs=[x], outputs=[y]) + + if use_symbol: + seq_len_dim = ct.RangeDim(symbol='sequence_len', lower_bound=3, + upper_bound=5) + else: + seq_len_dim = ct.RangeDim(lower_bound=3, upper_bound=5) + seq_input = ct.TensorType(name="seq", shape=(1, seq_len_dim, input_dim)) + mlmodel = ct.convert(keras_model, inputs=[seq_input]) + + # seq_len is within bound + batch = 1 + seq_len = 3 + test_input_x = np.random.rand(batch, seq_len, input_dim).astype(np.float32) + expected_val = keras_model([test_input_x]) + if ct.utils._is_macos(): + results = mlmodel.predict({"seq": test_input_x}) + np.testing.assert_allclose(results["Identity"], expected_val, + rtol=1e-4, atol=1e-3) + + # seq_len below/above lower_bound/upper_bound + with pytest.raises(RuntimeError, + match=r"Size \(2\) of dimension \(1\) is not in allowed range \(3\.\.5\)"): + seq_len = 2 + test_input_x = np.random.rand(batch, seq_len, + input_dim).astype(np.float32) + results = mlmodel.predict({"seq": test_input_x}) + + with pytest.raises(RuntimeError, + match=r"Size \(6\) of dimension \(1\) is not in allowed range \(3\.\.5\)"): + seq_len = 6 + test_input_x = np.random.rand(batch, seq_len, + input_dim).astype(np.float32) + results = mlmodel.predict({"seq": test_input_x}) + + @staticmethod + def test_tf2_image_enumerated_shapes(): + keras_model = tf.keras.applications.MobileNetV2( + input_shape=(None, None, 3,), + classes=1000, + include_top=False, + ) + input_shapes = ct.EnumeratedShapes(shapes=[(1, 192, 192, 3), (1, 224, 224, 3)]) + image_input = ct.ImageType(shape=input_shapes, + bias=[-1,-1,-1], scale=1/127) + model = ct.convert(keras_model, inputs=[image_input]) + assert model is not None + spec = model.get_spec() + assert len(spec.description.input[0].type.imageType.enumeratedSizes.sizes) == 2 + + @staticmethod + def test_tf2keras_enumerated_shapes(): + input_shape = (28, 28, 3) + # None denotes seq_len dimension + x = tf.keras.Input(shape=input_shape, name="input") + C_out = 2 + kHkW = 3 + y = tf.keras.layers.Conv2D(C_out, kHkW, activation='relu', + input_shape=input_shape)(x) + keras_model = tf.keras.Model(inputs=[x], outputs=[y]) + + # One RangeDim shared by two inputs + shapes = [(1, 28, 28, 3), (1, 56, 56, 3)] + enumerated_shapes = ct.EnumeratedShapes(shapes=shapes) + tensor_input = ct.TensorType(name="input", shape=enumerated_shapes) + mlmodel = ct.convert(keras_model, inputs=[tensor_input]) + + # Test (1, 28, 28, 3) shape + test_input_x = np.random.rand(*shapes[0]).astype(np.float32) + expected_val = keras_model([test_input_x]) + if ct.utils._is_macos(): + results = mlmodel.predict({ + "input": test_input_x}) + # rdar://101303143 ([CI] test_tf2keras_enumerated_shapes is getting some stochastic numerical issues on intel machines) + # The tolerance is set a little bit big here. Need to investigate this issue if possible and lower the threshold down. + np.testing.assert_allclose(results["Identity"], + expected_val, atol=1e-2, rtol=3) + + # Test (1, 56, 56, 3) shape (can't verify numerical parity with Keras + # which doesn't support enumerated shape) + test_input_x = np.random.rand(*shapes[1]).astype(np.float32) + results = mlmodel.predict({ + "input": test_input_x}) + + # Test with a wrong shape + with pytest.raises(RuntimeError, + match=r"MultiArray Shape \(1 x 29 x 29 x 3\) was not in enumerated set of allowed shapes"): + test_input_x = np.random.rand(1, 29, 29, 3).astype(np.float32) + results = mlmodel.predict({ + "input": test_input_x}) + + @staticmethod + def test_tf2keras_optional_input(): + input_dim = 3 + # None denotes seq_len dimension + x1 = tf.keras.Input(shape=(None,input_dim), name="optional_input") + x2 = tf.keras.Input(shape=(None,input_dim), name="required_input") + y = x1 + x2 + keras_model = tf.keras.Model(inputs=[x1, x2], outputs=[y]) + + seq_len_dim = ct.RangeDim() + default_value = np.ones((1, 2, input_dim)).astype(np.float32) + optional_input = ct.TensorType( + name="optional_input", + shape=(1, seq_len_dim, input_dim), + default_value=default_value, + ) + required_input = ct.TensorType( + name="required_input", + shape=(1, seq_len_dim, input_dim), + ) + mlmodel = ct.convert(keras_model, + inputs=[optional_input, required_input]) + + batch = 1 + seq_len = 2 + test_input_x2 = np.random.rand(batch, seq_len, input_dim).astype(np.float32) + expected_val = keras_model([default_value, test_input_x2]) + if ct.utils._is_macos(): + results = mlmodel.predict({"required_input": test_input_x2}) + np.testing.assert_allclose(results["Identity"], expected_val, rtol=1e-2) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py new file mode 100644 index 00000000..7e05b4ca --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py @@ -0,0 +1,224 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import tempfile + +import pytest + +import coremltools.converters as converter +from coremltools.converters.mil.frontend.tensorflow.test.test_load import \ + frontend +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import \ + get_tf_keras_io_names +from coremltools.converters.mil.input_types import TensorType + + +tf = pytest.importorskip("tensorflow", minversion="2.1.0") + + +class TestTf2ModelFormats: + def setup(self): + self.saved_model_dir = tempfile.mkdtemp() + _, self.model_path_h5 = tempfile.mkstemp( + suffix=".h5", prefix=self.saved_model_dir + ) + + def teardown(self): + if os.path.exists(self.saved_model_dir): + shutil.rmtree(self.saved_model_dir) + + def test_keras_model(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + input_names, output_names = get_tf_keras_io_names(keras_model) + mlmodel = converter.convert( + keras_model, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + assert mlmodel is not None + + def test_keras_saved_model_file(self): + keras_model = tf.keras.Sequential( + [ + tf.keras.layers.Flatten(input_shape=(28, 28), batch_size=1), + tf.keras.layers.Dense(10, activation=tf.nn.relu), + ] + ) + keras_model.save(self.saved_model_dir, save_format="tf") + mlmodel = converter.convert( + self.saved_model_dir, outputs=["Identity"], source=frontend + ) + assert mlmodel is not None + + def test_keras_h5_file(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + input_names, output_names = get_tf_keras_io_names(keras_model) + keras_model.save(self.model_path_h5, save_format="h5") + mlmodel = converter.convert( + self.model_path_h5, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + assert mlmodel is not None + + def test_keras_hdf5_file(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + input_names, output_names = get_tf_keras_io_names(keras_model) + keras_model.save(self.model_path_h5, save_format="h5") + mlmodel = converter.convert( + self.model_path_h5, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + assert mlmodel is not None + + def test_concrete_function_list_from_tf_low_level_api(self): + root = tf.train.Checkpoint() + root.v1 = tf.Variable(3.0) + root.v2 = tf.Variable(2.0) + root.f = tf.function(lambda x: root.v1 * root.v2 * x) + + input_data = tf.constant(1.0, shape=[1, 1]) + to_save = root.f.get_concrete_function(input_data) + tf.saved_model.save(root, self.saved_model_dir, to_save) + + tf_model = tf.saved_model.load(self.saved_model_dir) + concrete_func = tf_model.signatures[ + tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY + ] + mlmodel = converter.convert( + [concrete_func], outputs=["Identity"], source=frontend + ) + assert mlmodel is not None + + def test_saved_model_list_from_tf_function(self): + class build_model(tf.Module): + @tf.function( + input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)] + ) + def __call__(self, x): + return tf.nn.relu(x) + + model = build_model() + tf.saved_model.save(model, self.saved_model_dir) + mlmodel = converter.convert( + self.saved_model_dir, outputs=["Identity"], source=frontend + ) + assert mlmodel is not None + + def test_concrete_function_list_from_tf_function(self): + class build_model(tf.Module): + @tf.function( + input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)] + ) + def __call__(self, x): + return tf.nn.relu(x) + + model = build_model() + concrete_func = model.__call__.get_concrete_function() + mlmodel = converter.convert( + [concrete_func], outputs=["Identity"], source=frontend + ) + assert mlmodel is not None + + def test_graphdef_from_tf_function(self): + class build_model(tf.Module): + def __init__(self): + self.dense = tf.keras.layers.Dense(256, activation="relu") + + input_signature = [ + tf.TensorSpec(name="input", shape=( + 128, 128), dtype=tf.float32), + ] + + @tf.function(input_signature=input_signature) + def call(self, x): + x = self.dense(x) + return x + + model = build_model() + + from tensorflow.python.framework.convert_to_constants import \ + convert_variables_to_constants_v2 + frozen_graph_func = convert_variables_to_constants_v2( + model.call.get_concrete_function()) + frozen_graph_def = frozen_graph_func.graph.as_graph_def() + + mlmodel = converter.convert(frozen_graph_def) + assert mlmodel is not None + + def test_model_metadata(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + input_names, output_names = get_tf_keras_io_names(keras_model) + mlmodel = converter.convert( + keras_model, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + metadata_keys = mlmodel.get_spec().description.metadata.userDefined + assert "com.github.apple.coremltools.version" in metadata_keys + assert "com.github.apple.coremltools.source" in metadata_keys + assert "tensorflow==2." in metadata_keys["com.github.apple.coremltools.source"] + + def test_invalid_format_none(self): + with pytest.raises(NotImplementedError) as e: + converter.convert(None, source=frontend) + e.match(r"Expected model format: .* .h5") + + def test_invalid_format_invalid_extension(self): + _, invalid_filename = tempfile.mkstemp( + suffix=".invalid", prefix=self.saved_model_dir + ) + with pytest.raises(NotImplementedError) as e: + converter.convert(invalid_filename, source=frontend) + e.match(r"Expected model format: .* .h5") + + def test_invalid_format_multiple_concrete_functions(self): + class build_model(tf.Module): + @tf.function( + input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)] + ) + def __call__(self, x): + return tf.nn.relu(x) + + model = build_model() + cf = model.__call__.get_concrete_function() + with pytest.raises(NotImplementedError) as e: + converter.convert([cf, cf, cf], source=frontend) + e.match(r"Only a single concrete function is supported") + + def test_invalid_converter_type(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + with pytest.raises(ValueError) as e: + converter.convert(keras_model, source="invalid") + + expected_msg = r'Unrecognized value of argument "source": .*' + e.match(expected_msg) + + with pytest.raises(NotImplementedError) as e: + converter.convert(keras_model, convert_to="invalid", source=frontend) + e.match(r"Backend converter .* not implemented") + + def test_invalid_format_non_exist(self): + non_exist_filename = self.model_path_h5.replace(".h5", "_non_exist.h5") + with pytest.raises(ValueError) as e: + converter.convert(non_exist_filename, source=frontend) + e.match(r"Input model .* does not exist") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops.py new file mode 100644 index 00000000..5713ae60 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops.py @@ -0,0 +1,792 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import \ + TensorFlowBaseTest +from coremltools.converters.mil.frontend.tensorflow2.test.testing_utils import \ + TensorFlow2BaseTest +from coremltools.converters.mil.frontend.tensorflow2.test.testing_utils import \ + make_tf2_graph as make_tf_graph +from coremltools.converters.mil.testing_utils import random_gen + +TensorFlowBaseTest.run_compare_tf = TensorFlow2BaseTest.run_compare_tf2 + +tf = pytest.importorskip("tensorflow", minversion="2.1.0") + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + +class TestImageResample(TensorFlowBaseTest): + @pytest.mark.skip( + "TODO: rdar://100812753 ([TF] [Infra] TensorFlow Addons dylib issues in TF 2.10.0)" + ) + @pytest.mark.parametrize( + "compute_unit, backend, data_warp_shapes", + itertools.product( + compute_units, + backends, + [ + # Data shape format: (Batch, Hin, Win, C) + # Warp shape format: (Batch, Hout, Wout, 2) + [(1, 3, 3, 1), (1, 3, 3, 2)], # no size change + [(2, 5, 5, 3), (2, 3, 3, 2)], # down-sampling + [(3, 6, 6, 1), (3, 8, 8, 2)], # up-sampling + ], + ), + ) + def test_resample( + self, compute_unit, backend, data_warp_shapes, + ): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + tfa = pytest.importorskip("tensorflow_addons") + + data_shape, warp_shape = data_warp_shapes + + @make_tf_graph([data_shape, warp_shape]) + def build_model(x, warp): + return tfa.image.resampler(data=x, warp=warp) + + model, inputs, outputs = build_model + # warp exceeding input sizes in order to test more padding modes + input_values = [ + random_gen(data_shape, -100, 100), + random_gen(warp_shape, -15, 15), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestImageTransform(TensorFlowBaseTest): + @pytest.mark.skip( + "TODO: rdar://73165549 (Add other mode in 'affine' to coremltools when backend is ready)" + ) + @pytest.mark.parametrize( + "compute_unit, backend, transforms, interpolation, shapes", + itertools.product( + [True], + backends, + [ + [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, -250, 0.0, 1.0, 0.0, 0.0, 0.0], + [1.25, -1.75, 25.0, -25.0, 1.5, -1.5, 0.0, 0.0], + ], + ["BILINEAR"], + [ + ((1, 2, 2, 1), None), + ((2, 2, 2, 1), (2, 3)), + ((3, 5, 5, 2), (4, 4)), + ((1, 3, 3, 2), (6, 6)), + ((3, 50, 50, 2), (20, 20)), + ], + ), + ) + def test(self, compute_unit, backend, transforms, interpolation, shapes): + x_shape, output_shape = shapes + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + tfa = pytest.importorskip("tensorflow_addons") + + @make_tf_graph([x_shape]) + def build_model(x): + return tfa.image.transform( + x, + transforms=transforms, + interpolation=interpolation, + output_shape=output_shape, + ) + + model, inputs, outputs = build_model + input_values = [ + random_gen(x_shape, -100, 100), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, InputShape_OutputShape, op", + itertools.product( + compute_units, + backends, + [ + [(2, 5, 15, 3), (2, 5, 15, 3)], + [(2, 4, 8, 5), (2, 2, 4, 5)], + [(2, 4, 8, 3), (2, 9, 13, 3)], + ], + ["V2", "V3"], + ), + ) + def test_affine_transform(self, compute_unit, backend, InputShape_OutputShape, op): + if backend[0] == "neuralnetwork": + pytest.skip("Affine op not available in the neuralnetwork backend") + + input_shape, output_shape = InputShape_OutputShape + batch_size = input_shape[0] + transforms = np.random.rand(batch_size, 8) - 0.05 + transforms[:, 6:8] = 0 + + @make_tf_graph([input_shape]) + def build_model(x): + if op == "V2": + return tf.raw_ops.ImageProjectiveTransformV2( + images=x, + transforms=transforms, + fill_mode="CONSTANT", + output_shape=(output_shape[0], output_shape[1]), + interpolation="BILINEAR", + ) + elif op == "V3": + return tf.raw_ops.ImageProjectiveTransformV3( + images=x, + transforms=transforms, + fill_mode="CONSTANT", + output_shape=(output_shape[0], output_shape[1]), + interpolation="BILINEAR", + fill_value=0.0, + ) + else: + raise ValueError("tensorflow op {} not supported".format(op)) + + model, inputs, outputs = build_model + input_values = [np.random.rand(*input_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationSiLU(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, tf_op", + itertools.product( + compute_units, + backends, + list(range(1, 6)), + [ + tf.nn.swish, # TODO(yuduo): in TF 2.4.0+, it's renamed to tf.nn.silu, + tf.keras.activations.swish, + ], + ), + ) + def test(self, compute_unit, backend, rank, tf_op): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + x_shape = tuple(np.random.randint(low=1, high=4, size=rank)) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf_op(x) + + model, inputs, outputs = build_model + input_values = [ + random_gen(x_shape, -100, 100), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestResizeNearestNeighbor(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, target_shape, align_corners, half_pixel_centers", + itertools.product( + compute_units, + backends, + [(1, 10, 20, 1), (2, 5, 1, 3)], + [(25, 30), (2, 20)], + [False], + [True, False], + ), + ) + def test_raw_ops( + self, + compute_unit, + backend, + input_shape, + target_shape, + align_corners, + half_pixel_centers, + ): + if align_corners is True and half_pixel_centers is True: + return + + if backend[0] == "neuralnetwork": + # neural network backend does not support fractional scale factors for nearest neighbor upsample op + if target_shape[-1] % input_shape[-1] != 0: + return + if target_shape[-2] % input_shape[-2] != 0: + return + + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY and not half_pixel_centers: + pytest.xfail("rdar://97399545 (TestResizeNearestNeighbor failing on mlprogram + GPU + half_pixel_centers=False)") + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.ResizeNearestNeighbor( + images=x, + size=target_shape, + align_corners=align_corners, + half_pixel_centers=half_pixel_centers, + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -100, 100)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size", + itertools.product(compute_units, backends, [(1, 1), (2, 3), (4, 1)]), + ) + def test_keras_layer(self, compute_unit, backend, size): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + x_shape = tuple(np.random.randint(low=1, high=4, size=4)) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.keras.layers.UpSampling2D( + size=size, interpolation="nearest", + )(x) + + model, inputs, outputs = build_model + input_values = [random_gen(x_shape, -100, 100)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size, method", + itertools.product( + compute_units, + backends, + [(1, 1), (2, 3)], + [tf.image.ResizeMethod.NEAREST_NEIGHBOR], + ), + ) + def test_tf_image_resize(self, compute_unit, backend, size, method): + if backend[0] == "mlprogram" and size == (1, 1): + pytest.xfail("rdar://79699954 (Nearest neighbor resize numerical mismatch when output size is (1,1))") + + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + x_shape = tuple(np.random.randint(low=1, high=3, size=4)) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.image.resize(x, size=size, method=method) + + model, inputs, outputs = build_model + input_values = [ + random_gen(x_shape, -100, 100), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestNormalizationTF2(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, func, backend, epsilon", + itertools.product( + compute_units, + [tf.raw_ops.FusedBatchNorm, tf.raw_ops.FusedBatchNormV3], + backends, + [1e-1, 1e-10] + ), + ) + def test_fused_batch_norm(self, compute_unit, func, backend, epsilon): + input_shape = np.random.randint(low=1, high=4, size=4) + attr_shape = [list(input_shape)[-1]] + + m = random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) + v = random_gen(shape=attr_shape, rand_min=0.0, rand_max=10.0) + o = random_gen(shape=attr_shape, rand_min=1.0, rand_max=10.0) + s = random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) + + @make_tf_graph([input_shape]) + def build_model(x): + return func( + x=x, + scale=s, + offset=o, + mean=m, + variance=v, + epsilon=epsilon, + is_training=False, + )[0] + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-2, + rtol=1e-3, + ) + + +class TestElementWiseBinaryTF2(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, [rank for rank in range(1, 4)]), # False + ) + def test_add_v2(self, compute_unit, backend, rank): + x_shape = list(np.random.randint(low=2, high=5, size=rank)) + y_shape = x_shape[:] + for i in range(rank): + if np.random.randint(4) == 0: + y_shape[i] = 1 + if np.random.randint(2) == 0: + y_shape = [1] + y_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf.raw_ops.AddV2(x=x, y=y) + + model, inputs, outputs = build_model + + input_values = [ + np.random.randint(low=-1, high=1, size=x_shape).astype(np.float32), + np.random.randint(low=-1, high=1, size=y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestControlFlowFromAutoGraph(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_if_unary_const(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + if x > 0.5: + y = x - 0.5 + else: + y = x + 0.5 + return y + + model, inputs, outputs = build_model + input_values = [np.array([0.7], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_if_unary_double_if_positive_else_square(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + if x >= 0: + out = x + x + else: + out = x * x + return out + + model, inputs, outputs = build_model + input_values = [np.array([2], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_if_binary_add_if_else_mul(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + if x > y: + out = x + x + else: + out = x * x + return out + + model, inputs, outputs = build_model + input_values = [ + np.array([3], dtype=np.float32), + np.array([7], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_square(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + i = 0 + while i < 10: + x *= 2 + i += 1 + return x + + model, inputs, outputs = build_model + input_values = [np.array([2.0], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_power(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + i = 0 + while i < 3: + x *= x + i += 1 + return x + + model, inputs, outputs = build_model + input_values = [np.array([2.0], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_nested_body(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + i, j = 0, 10 + while i < j: + while 2 * i < i + 2: + i += 1 + x -= 1 + i += 2 + x *= 2 + return x + + model, inputs, outputs = build_model + input_values = [np.array([9.0], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + +@pytest.mark.xfail(reason="rdar://76293949 (TF2 unit test InvalidArgumentError)", run=False) +class TestTensorList(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, size_dynamic_shape", + itertools.product( + compute_units, + backends, + [ + (1, True, None), + (1, True, (1,)), + (2, False, (1,)) + ], + ), + ) + def test_write_read_and_stack(self, compute_unit, backend, size_dynamic_shape): + size, dynamic_size, element_shape = size_dynamic_shape + + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + ta = tf.TensorArray( + tf.float32, + size=size, + dynamic_size=dynamic_size, + element_shape=element_shape, + ) + ta = ta.write(0, x) + ta = ta.write(1, y) + return ta.read(0), ta.read(1), ta.stack() + + model, inputs, outputs = build_model + input_values = [ + np.array([3.14], dtype=np.float32), + np.array([6.17], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size_dynamic_shape", + itertools.product( + compute_units, + backends, + [ + (0, True, None), + (1, True, (1,)), + (3, False, (1,)) + ], + ), + ) + def test_unstack_and_read(self, compute_unit, backend, size_dynamic_shape): + size, dynamic_size, element_shape = size_dynamic_shape + + @make_tf_graph([(3, 1)]) + def build_model(x): + ta = tf.TensorArray( + tf.float32, + size=size, + dynamic_size=dynamic_size, + element_shape=element_shape, + ) + ta = ta.unstack(x) + return ta.read(0), ta.read(1), ta.read(2) + + model, inputs, outputs = build_model + input_values = [np.array([[3.14], [6.17], [12.14]], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size_dynamic_shape", + itertools.product( + compute_units, + backends, + [ + (2, True, None), + (1, True, (1,)), + (3, False, (1,)) + ], + ), + ) + def test_write_and_gather(self, compute_unit, backend, size_dynamic_shape): + size, dynamic_size, element_shape = size_dynamic_shape + + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + ta = tf.TensorArray( + tf.float32, + size=size, + dynamic_size=dynamic_size, + element_shape=element_shape, + ) + ta = ta.write(0, x) + ta = ta.write(1, y) + return ta.gather(indices=[0, 1]) + + model, inputs, outputs = build_model + input_values = [ + np.array([3.14], dtype=np.float32), + np.array([6.17], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size_dynamic_shape", + itertools.product( + compute_units, + backends, + [ + (2, True, None), + (1, True, (1,)), + (3, False, (1,)) + ], + ), + ) + def test_scatter_and_read(self, compute_unit, backend, size_dynamic_shape): + size, dynamic_size, element_shape = size_dynamic_shape + + @make_tf_graph([(3, 1)]) + def build_model(x): + ta = tf.TensorArray( + tf.float32, + size=size, + dynamic_size=dynamic_size, + element_shape=element_shape, + ) + ta = ta.scatter(indices=[0, 1, 2], value=x) + return ta.read(0), ta.read(1), ta.read(2) + + model, inputs, outputs = build_model + input_values = [np.array([[3.14], [6.17], [12.14]], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size_dynamic_shape", + itertools.product(compute_units, backends, [(2, False, (None, 8))]), + ) + def test_partial_element_shape(self, compute_unit, backend, size_dynamic_shape): + size, dynamic_size, element_shape = size_dynamic_shape + + @make_tf_graph([(3, 1, 8)]) + def build_model(x): + ta = tf.TensorArray( + tf.float32, + size=size, + dynamic_size=dynamic_size, + element_shape=element_shape, + ) + ta = ta.scatter(indices=[0, 1, 2], value=x) + return ta.read(0), ta.read(1), ta.read(2) + + model, inputs, outputs = build_model + input_values = [np.random.rand(3, 1, 8).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPartitionedCall(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_partitioned_call_optimized_to_add_op(self, compute_unit, backend): + """ + The PartitionedCall will be optimized to V2Add op in TF's internal optimization pass (see + `_run_inline_graph_optimization`), so this test passes even when we haven't implemented + the `PartitionedCall` op). + """ + x_shape = [2, 3] + y_shape = [2, 3] + + @tf.function + def simple_func(*args): + output = [args[0] + args[1]] + return output + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf.raw_ops.PartitionedCall( + args=[x, y], + f=simple_func.get_concrete_function(tf.zeros(x_shape), tf.zeros(y_shape)), + Tout=[tf.float32] + ) + + model, inputs, outputs = build_model + + input_values = [ + np.zeros(x_shape).astype(np.float32), + np.zeros(y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops_tf_keras.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops_tf_keras.py new file mode 100644 index 00000000..7dd167eb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops_tf_keras.py @@ -0,0 +1,1739 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import platform +import random +from distutils.version import StrictVersion as _StrictVersion + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _get_version +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.frontend._utils import is_symbolic_dim_in_prog +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, +) +from coremltools.converters.mil.frontend.tensorflow2.test.testing_utils import ( + TensorFlow2BaseTest, +) +from coremltools.converters.mil.testing_utils import get_op_types_in_program, random_gen +from coremltools.models.utils import _macos_version + +TensorFlowBaseTest.run_compare_tf_keras = TensorFlow2BaseTest.run_compare_tf_keras +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + +tf = pytest.importorskip("tensorflow", minversion="2.1.0") + +import tensorflow as _tf # should be after pytest.importorskip checks +from tensorflow.keras import Input +from tensorflow.keras.layers import Conv2D, GlobalMaxPooling2D +from tensorflow.keras.models import Model + + +class TestActivation(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, op", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [ + tf.keras.layers.ELU, + tf.keras.layers.LeakyReLU, + tf.keras.layers.ReLU, + tf.keras.layers.PReLU, + tf.keras.layers.Softmax, + tf.keras.layers.ThresholdedReLU, + ], + ), + ) + def test_layer(self, compute_unit, backend, rank, op): + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential([op(batch_input_shape=shape)]) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, -10, 10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, op", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [ + tf.keras.activations.elu, + tf.keras.activations.exponential, + tf.keras.activations.hard_sigmoid, + tf.keras.activations.linear, + tf.keras.activations.relu, + tf.keras.activations.selu, + tf.keras.activations.sigmoid, + tf.keras.activations.softmax, + tf.keras.activations.softplus, + tf.keras.activations.softsign, + tf.keras.activations.tanh, + ], + ), + ) + def test_activation(self, compute_unit, backend, rank, op): + kwargs = ( + {"atol": 1e-3, "rtol": 1e-4} + if op == tf.keras.activations.exponential and compute_unit != ct.ComputeUnit.CPU_ONLY + else {} + ) + if op == tf.keras.activations.softmax and rank == 1: + return # skip apply softmax to a tensor that is 1D + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [tf.keras.layers.Activation(op, batch_input_shape=shape)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, -10, 10)], + compute_unit=compute_unit, + backend=backend, + **kwargs + ) + + @pytest.mark.parametrize("backend", backends) + def test_conv2d_prelu_fusion(self, backend): + x_shape = (1, 10, 10, 32) + x = tf.keras.Input(batch_input_shape=x_shape) # (B, H, W, C) + x1 = tf.keras.layers.Conv2D(16, kernel_size=1)(x) + x1 = tf.keras.layers.PReLU(alpha_initializer='glorot_uniform', shared_axes=[1, 2])(x1) + x1 = tf.keras.layers.Conv2D(16, kernel_size=1)(x1) + x1 = tf.keras.layers.PReLU(alpha_initializer='glorot_uniform', shared_axes=[1, 2])(x1) + keras_model = tf.keras.Model(inputs=x, outputs=x1) + + res = TensorFlowBaseTest.run_compare_tf_keras( + keras_model, + [random_gen(x_shape, -1, 1)], + compute_unit=ct.ComputeUnit.CPU_ONLY, + backend=backend, + ) + coreml_model = res[1] + mil_prog = coreml_model._get_mil_internal() + # assert that "prelu" ops are present in the mil program, + # which should be if "fuse_prelu" pass worked correctly + assert len(mil_prog.find_ops(op_type="prelu")) == 2 + assert "relu" not in get_op_types_in_program(mil_prog) + + +class TestBinary(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, op", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 6)], + [ + tf.keras.layers.Add, + tf.keras.layers.Average, + tf.keras.layers.Subtract, + tf.keras.layers.Maximum, + tf.keras.layers.Minimum, + ], + ), + ) + def test(self, compute_unit, backend, rank, op): + shape = np.random.randint(low=1, high=4, size=rank) + input_x = tf.keras.layers.Input(batch_input_shape=tuple(shape)) + input_y = tf.keras.layers.Input(batch_input_shape=tuple(shape)) + out = op()([input_x, input_y]) + model = tf.keras.Model(inputs=[input_x, input_y], outputs=out) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, -10, 10), random_gen(shape, -10, 10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, axes, normalize", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 3)], + [-1,], + [True, False], + ), + ) + def test_dot(self, compute_unit, rank, backend, axes, normalize): + shape = np.random.randint(low=2, high=4, size=rank) + input_x = tf.keras.layers.Input(batch_input_shape=tuple(shape)) + input_y = tf.keras.layers.Input(batch_input_shape=tuple(shape)) + out = tf.keras.layers.Dot(axes=axes, normalize=normalize)([input_x, input_y]) + model = tf.keras.Model(inputs=[input_x, input_y], outputs=out) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, -10, 10), random_gen(shape, -10, 10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestConcatenate(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axis", + itertools.product( + compute_units, backends, [rank for rank in range(5, 6)], [-1, -2], + ), + ) + def test(self, compute_unit, backend, rank, axis): + shape = np.random.randint(low=2, high=4, size=rank) + inputs = [] + for _ in range(2): + inputs.append(tf.keras.layers.Input(batch_input_shape=tuple(shape))) + out = tf.keras.layers.Concatenate(axis=axis)(inputs) + model = tf.keras.Model(inputs=inputs, outputs=out) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape), random_gen(shape)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestConvolution(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "op", + "padding", + "data_format", + "spatial_dim_and_ks", + "strides", + "dilations", + "batch_size", + "groups", + ] + ), + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.Conv1D, + tf.keras.layers.Conv2D, + tf.keras.layers.Conv3D, + ], + ["same", "valid"], + ["channels_last"], + [ + (2, 4, 4, 2, 2, 2), + (3, 7, 5, 1, 3, 2) + ], + [ + (1, 1, 1), + (1, 2, 3), + (1, 3, 2) + ], + [ + (1, 1, 1), (2, 2, 2), + ], + [1, 3], + [1, 2], + ), + ) + def test_conv( + self, + compute_unit, + backend, + op, + padding, + data_format, + spatial_dim_and_ks, + strides, + dilations, + batch_size, + groups, + ): + if _get_version(_tf.__version__) < _StrictVersion("2.5.0") and groups != 1: + pytest.skip("TF supports groupwise convolution only for version > tf.2.5.0-rc3") + + if _get_version(_tf.__version__) > _StrictVersion("2.8.0") and groups != 1: + pytest.xfail("rdar://100814590 ([TF] [Infra] TF 2.10.0 Uses Unimplemented " + "PartitionedCall op for Groupwise Convolution)") + + if op == tf.keras.layers.Conv3D and groups != 1: + pytest.xfail("rdar://81629932 (Conv3d with group > 1 tests failing in TF2.0 converter)") + + for i, stride in enumerate(strides): + if stride > 1 and dilations[i] > 1: + pytest.skip("TF does not support strides > 1 in conjunction with dilation_rate > 1") + + for d in dilations: + if d > 1 and op == tf.keras.layers.Conv3D: + pytest.skip("Dilations with Conv3D not supported yet, since SpaceToBatchND is " + "only supported for ranks 3 or 4") + + s1, s2, s3, k1, k2, k3 = spatial_dim_and_ks + c_in, c_out = 2, 4 + input_shape = None + kernel_size = None + if op == tf.keras.layers.Conv1D: + input_shape = (batch_size, s3, c_in) + kernel_size = k3 + strides = strides[2] + dilations = dilations[2] + elif op == tf.keras.layers.Conv2D: + input_shape = (batch_size, s2, s3, c_in) + kernel_size = (k2, k3) + strides = (strides[1], strides[2]) + dilations = dilations[1:] + elif op == tf.keras.layers.Conv3D: + input_shape = (batch_size, s1, s2, s3, c_in) + kernel_size = (k1, k2, k3) + + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + filters=c_out, + kernel_size=kernel_size, + strides=strides, + padding=padding.upper(), + data_format=data_format, + dilation_rate=dilations, + groups=groups, + ) + ] + ) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(input_shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "op", + "padding", + "data_format", + "spatial_dim_and_ks", + "strides", + "dilations", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.LocallyConnected1D, + tf.keras.layers.LocallyConnected2D, + ], + ["same", "valid"], + ["channels_last"], + [ + (2, 4, 4, 2, 2, 2), + (3, 7, 5, 1, 3, 2) + ], + [ + (1, 1, 1), + (1, 2, 3), + (1, 3, 2) + ], + [ + (1, 1, 1), (2, 2, 2), + ], + [1, 3], + ), + ) + def test_conv_locally_connected( + self, + compute_unit, + backend, + op, + padding, + data_format, + spatial_dim_and_ks, + strides, + dilations, + batch_size, + ): + s1, s2, s3, k1, k2, k3 = spatial_dim_and_ks + c_in, c_out = 2, 3 + input_shape = None + kernel_size = None + if op in {tf.keras.layers.Conv1D, tf.keras.layers.LocallyConnected1D}: + input_shape = (batch_size, s3, c_in) + kernel_size = k3 + strides = strides[2] + dilations = dilations[2] + elif op in {tf.keras.layers.Conv2D, tf.keras.layers.LocallyConnected2D}: + input_shape = (batch_size, s2, s3, c_in) + kernel_size = (k2, k3) + strides = (strides[1], strides[2]) + dilations = dilations[1:] + elif op == tf.keras.layers.Conv3D: + input_shape = (batch_size, s1, s2, s3, c_in) + kernel_size = (k1, k2, k3) + + if op in { + tf.keras.layers.LocallyConnected1D, + tf.keras.layers.LocallyConnected2D, + }: + if padding != "valid": + return # tf.keras only supports "valid" + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + filters=c_out, + kernel_size=kernel_size, + strides=strides, + padding=padding.upper(), + data_format=data_format, + ) + ] + ) + else: + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + filters=c_out, + kernel_size=kernel_size, + strides=strides, + padding=padding.upper(), + data_format=data_format, + dilation_rate=dilations, + ) + ] + ) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(input_shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "op", + "padding", + "data_format", + "spatial_dim_and_ks", + "strides", + "dilations", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + [tf.keras.layers.DepthwiseConv2D], + ["same", "valid"], + ["channels_last"], + [(11, 12, 3, 2), (12, 11, 2, 3)], + [(1, 1), (2, 2)], + [(1, 1), (2, 2)], + [1, 3], + ), + ) + def test_depth_wise_conv( + self, + compute_unit, + backend, + op, + padding, + data_format, + spatial_dim_and_ks, + strides, + dilations, + batch_size, + ): + s1, s2, k1, k2 = spatial_dim_and_ks + c_in = 2 + + if len(strides) != np.sum(strides) and len(dilations) != np.sum(dilations): + # TF produces incorrect output for non-one strides + dilations + return + + input_shape = (batch_size, s1, s2, c_in) + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + kernel_size=(k1, k2), + strides=strides, + padding=padding.upper(), + data_format=data_format, + dilation_rate=dilations, + ) + ] + ) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(input_shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + ] + ), + itertools.product( + compute_units, + backends, + ["same", "valid"], + ), + ) + def test_conv2d_padding_dynamic_input( + self, + compute_unit, + backend, + padding, + ): + if backend[0] == "mlprogram" and _macos_version() < (13, 0): + pytest.skip("Error in declaring network.") + + # Test same padding + input_layer = Input(batch_size=1, shape=(None, None, 1)) + layer = Conv2D( + filters=16, + kernel_size=(3, 3), + padding=padding, + activation="relu" + )(input_layer) + output_layer = GlobalMaxPooling2D()(layer) + model = Model(inputs=[input_layer], outputs=[output_layer]) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen((1, 80, 40, 1), rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "op", + "padding", + "data_format", + "spatial_dim_and_ks", + "strides", + "dilations", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + [tf.keras.layers.SeparableConv1D, tf.keras.layers.SeparableConv2D], + ["same", "valid"], + ["channels_last"], + [ + (14, 14, 2, 2), + (11, 9, 3, 2), + (12, 11, 2, 3) + ], + [ + (1, 1), (2, 2), (3, 3) + ], + [(1, 1)], + [1, 3], + ), + ) + def test_separable_conv( + self, + compute_unit, + backend, + op, + padding, + data_format, + spatial_dim_and_ks, + strides, + dilations, + batch_size, + ): + s1, s2, k1, k2 = spatial_dim_and_ks + c_in, c_out = 2, 3 + input_shape = None + kernel_size = None + if op == tf.keras.layers.SeparableConv1D: + input_shape = (batch_size, s2, c_in) + kernel_size = k2 + strides = strides[1] + dilations = dilations[1] + elif op == tf.keras.layers.SeparableConv2D: + input_shape = (batch_size, s1, s2, c_in) + kernel_size = (k1, k2) + + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + filters=c_out, + kernel_size=kernel_size, + strides=strides, + padding=padding.upper(), + data_format=data_format, + dilation_rate=dilations, + ) + ] + ) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(input_shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + +class TestConvTranspose(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "op", + "padding", + "data_format", + "spatial_dim_and_ks", + "output_padding", + "strides", + "dilations", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + [tf.keras.layers.Conv2DTranspose, tf.keras.layers.Conv3DTranspose], + ["same", "valid"], + ["channels_last"], + [(7, 11, 12, 1, 2, 2), (9, 5, 7, 3, 3, 3)], + [(1, 1, 1)], + [(2, 2, 2), (2, 3, 3)], + [(1, 1, 1)], # Dilation > 1 not supported by TF + [1, 3], + ), + ) + def test_conv_transpose( + self, + compute_unit, + backend, + op, + padding, + data_format, + spatial_dim_and_ks, + output_padding, + strides, + dilations, + batch_size, + ): + if ( + platform.machine() == "arm64" + and backend == ("mlprogram", "fp16") + and op == tf.keras.layers.Conv3DTranspose + and padding == "valid" + and spatial_dim_and_ks == (7, 11, 12, 1, 2, 2) + and strides == (2, 3, 3) + and batch_size == 3 + ): + pytest.xfail("rdar://98015195 ([M1 native tests] Some MIL unittests are failing M1 native)") + + s1, s2, s3, k1, k2, k3 = spatial_dim_and_ks + c_in, c_out = 2, 3 + input_shape = None + kernel_size = None + if op == tf.keras.layers.Conv2DTranspose: + input_shape = (batch_size, s2, s3, c_in) + kernel_size = (k2, k3) + strides = (strides[1], strides[2]) + dilations = dilations[1:] + output_padding = (output_padding[1], output_padding[2]) + elif op == tf.keras.layers.Conv3DTranspose: + input_shape = (batch_size, s1, s2, s3, c_in) + kernel_size = (k1, k2, k3) + + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + filters=c_out, + kernel_size=kernel_size, + strides=strides, + padding=padding.upper(), + output_padding=output_padding, + data_format=data_format, + dilation_rate=dilations, + ) + ] + ) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(input_shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + +class TestCropping(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, begin_end", + itertools.product( + compute_units, backends, [(0, 0), (1, 1), (1, 2), (2, 1), (2, 4), (3, 2)], + ), + ) + def test_cropping_1d(self, compute_unit, backend, begin_end): + shape = (1, 10, 3) + model = tf.keras.Sequential( + [tf.keras.layers.Cropping1D(batch_input_shape=shape, cropping=begin_end)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, begin_end1, begin_end2", + itertools.product( + compute_units, + backends, + [(0, 0), (1, 1), (2, 1)], + [(0, 0), (1, 2), (4, 2)], + ), + ) + def test_cropping_2d(self, compute_unit, backend, begin_end1, begin_end2): + shape = (1, 10, 10, 3) + model = tf.keras.Sequential( + [ + tf.keras.layers.Cropping2D( + batch_input_shape=shape, cropping=(begin_end1, begin_end2) + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, begin_end1, begin_end2, begin_end3", + itertools.product( + compute_units, + backends, + [(0, 0), (1, 2), (2, 1)], + [(1, 1), (1, 2), (4, 2)], + [(0, 0), (1, 1), (2, 4)], + ), + ) + def test_cropping_3d( + self, compute_unit, backend, begin_end1, begin_end2, begin_end3 + ): + shape = (1, 10, 10, 10, 3) + model = tf.keras.Sequential( + [ + tf.keras.layers.Cropping3D( + batch_input_shape=shape, + cropping=(begin_end1, begin_end2, begin_end3), + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestDense(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, units, activation, use_bias", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 6)], + [2, 4, 8], + [tf.nn.relu, tf.nn.softmax, tf.nn.swish], + [True, False], + ), + ) + def test(self, compute_unit, backend, rank, units, activation, use_bias): + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [ + tf.keras.layers.Dense( + batch_input_shape=shape, + units=units, + activation=activation, + use_bias=use_bias, + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestEmbedding(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, dims, batch_size, input_length", + itertools.product( + compute_units, + backends, + [(4, 1), (8, 3), (16, 5), (32, 7), (64, 9)], + [1, 3, 5], + [2, 4, 10], + ), + ) + def test(self, compute_unit, backend, dims, batch_size, input_length): + # input shape: 2D tensor (batch_size, input_length) + # output shape: 3D tensor (batch_size, input_length, output_dim) + shape = (batch_size, input_length) + model = tf.keras.Sequential( + [ + tf.keras.layers.Embedding( + batch_input_shape=shape, + input_dim=dims[0], + output_dim=dims[1], + input_length=input_length, + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=0, rand_max=dims[0])], + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-4, + ) + + +class TestFlatten(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, data_format", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + ["channels_last", "channels_first"], + ), + ) + def test(self, compute_unit, backend, rank, data_format): + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [tf.keras.layers.Flatten(batch_input_shape=shape, data_format=data_format,)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestLambda(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, function", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [ + lambda x: x + x, + lambda x: x * 3.14 - 1.0, + lambda x: np.sqrt(4) + x, + lambda x: tf.math.abs(x), + ], + ), + ) + def test_unary(self, compute_unit, backend, rank, function): + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [tf.keras.layers.Lambda(batch_input_shape=shape, function=function,)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-5, rand_max=5)], + compute_unit=compute_unit, + backend=backend, + ) + +class TestBatchNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axis, momentum, epsilon, mixed_precision", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [0, -1], + [0.99, 0.85], + [1e-2, 1e-5], + [True, False], + ), + ) + def test_batch_normalization( + self, compute_unit, backend, rank, axis, momentum, epsilon, mixed_precision + ): + if backend[0] != "mlprogram" and mixed_precision: + pytest.skip("neuralnetwork backend doesn't support fp16 computation.") + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy('mixed_float16') + + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [ + tf.keras.layers.BatchNormalization( + batch_input_shape=shape, + axis=axis, + momentum=momentum, + epsilon=epsilon, + ) + ] + ) + random_weights = np.random.rand(4, shape[axis]) + model.layers[0].set_weights(random_weights) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy(tf.keras.backend.floatx()) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis, momentum, epsilon, mixed_precision", + itertools.product( + compute_units, + backends, + [(4, 1), (4, -3)], + [0.99, 0.85], + [1e-2, 1e-5], + [True, False], + ), + ) + def test_fused_batch_norm_v3( + self, compute_unit, backend, rank_and_axis, momentum, epsilon, mixed_precision + ): + if backend[0] != "mlprogram" and mixed_precision: + pytest.skip("neuralnetwork backend doesn't support fp16 computation.") + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy('mixed_float16') + + rank, axis = rank_and_axis + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [ + tf.keras.layers.BatchNormalization( + batch_input_shape=shape, + axis=axis, + momentum=momentum, + epsilon=epsilon, + ) + ] + ) + random_weights = np.random.rand(4, shape[axis]) + model.layers[0].set_weights(random_weights) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy(tf.keras.backend.floatx()) + + +class TestInstanceNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axis, epsilon, center, scale", + itertools.product( + compute_units, + backends, + [rank for rank in range(4, 5)], + [-1], + [1e-3, 1e-5], + [True, False], + [True, False], + ), + ) + def test_instance_normalization( + self, compute_unit, backend, rank, axis, epsilon, center, scale + ): + tensorflow_addons = pytest.importorskip("tensorflow_addons") + from tensorflow_addons.layers import InstanceNormalization + + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [ + InstanceNormalization( + batch_input_shape=shape, + axis=axis, + epsilon=epsilon, + center=center, + scale=scale, + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + atol=1e-2, + rtol=1e-3, + ) + + +class TestNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axis, epsilon, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 4)], + [-1,], + [1e-2, 1e-10], + [True, False], + ), + ) + def test_layer_normalization(self, compute_unit, backend, rank, axis, epsilon, dynamic): + shape = np.random.randint(low=2, high=4, size=rank) + keras_shape = shape.tolist() + + if dynamic: + keras_shape[0] = None + + model = tf.keras.Sequential( + [ + tf.keras.layers.LayerNormalization( + batch_input_shape=keras_shape, axis=axis, epsilon=epsilon, trainable=False + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-100, rand_max=100)], + compute_unit=compute_unit, + backend=backend, + ) + + + @pytest.mark.parametrize( + "compute_unit, backend, rank, groups, axis, epsilon, center, scale", + itertools.product( + compute_units, + backends, + [rank for rank in range(4, 5)], + [1, 2, 3], + [-1], + [1e-3, 1e-5], + [True, False], + [True, False], + ), + ) + def test_group_normalization( + self, compute_unit, backend, rank, groups, axis, epsilon, center, scale + ): + tensorflow_addons = pytest.importorskip("tensorflow_addons") + from tensorflow_addons.layers import GroupNormalization + + shape = np.random.randint(low=2, high=4, size=rank) + shape[-1] = shape[-1] * groups # groups must be a multiple of channels + model = tf.keras.Sequential( + [ + GroupNormalization( + batch_input_shape=shape, + groups=groups, + axis=axis, + epsilon=epsilon, + center=center, + scale=scale, + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-4, + ) + + +class TestPadding(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, op, data_format, padding, mixed_precision", + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.ZeroPadding1D, + tf.keras.layers.ZeroPadding2D, + tf.keras.layers.ZeroPadding3D, + ], + ["channels_first", "channels_last"], + [(1, 1, 1), (2, 2, 2), (3, 3, 3), (1, 3, 4), (2, 3, 5)], + [True, False], + ), + ) + def test(self, compute_unit, backend, op, data_format, padding, mixed_precision): + if backend[0] != "mlprogram" and mixed_precision: + pytest.skip("neuralnetwork backend doesn't support fp16 computation.") + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy("mixed_float16") + + shape = None + kwargs = {} + if op == tf.keras.layers.ZeroPadding1D: + padding = padding[-1] + shape = np.random.randint(low=2, high=4, size=3) + elif op == tf.keras.layers.ZeroPadding2D: + padding = padding[1:] + kwargs = {"data_format": data_format} + shape = np.random.randint(low=2, high=4, size=4) + elif op == tf.keras.layers.ZeroPadding3D: + kwargs = {"data_format": data_format} + shape = np.random.randint(low=2, high=4, size=5) + model = tf.keras.Sequential( + [op(batch_input_shape=shape, padding=padding, **kwargs)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy(tf.keras.backend.floatx()) + + +class TestPermute(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_perm", + itertools.product( + compute_units, + backends, + [ + (rank, perm) + for rank in range(3, 6) + for perm in list(itertools.permutations(range(rank)[1:])) + ], + ), + ) + def test(self, compute_unit, backend, rank_and_perm): + rank, perm = rank_and_perm + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [tf.keras.layers.Permute(batch_input_shape=shape, dims=perm)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestGlobalPooling(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, op, data_format", + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.GlobalAveragePooling1D, + tf.keras.layers.GlobalAveragePooling2D, + tf.keras.layers.GlobalAveragePooling3D, + tf.keras.layers.GlobalMaxPool1D, + tf.keras.layers.GlobalMaxPool2D, + tf.keras.layers.GlobalMaxPool3D, + ], + ["channels_first", "channels_last"], + ), + ) + def test_global_pooling(self, compute_unit, backend, op, data_format): + shape = None + if op in { + tf.keras.layers.GlobalAveragePooling1D, + tf.keras.layers.GlobalMaxPool1D, + }: + shape = np.random.randint(low=2, high=4, size=3) + elif op in { + tf.keras.layers.GlobalAveragePooling2D, + tf.keras.layers.GlobalMaxPool2D, + }: + shape = np.random.randint(low=2, high=4, size=4) + elif op in { + tf.keras.layers.GlobalAveragePooling3D, + tf.keras.layers.GlobalMaxPool3D, + }: + shape = np.random.randint(low=2, high=4, size=5) + model = tf.keras.Sequential( + [op(batch_input_shape=shape, data_format=data_format)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPooling(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, op, data_format, pool_size", + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.AveragePooling1D, + tf.keras.layers.AveragePooling2D, + tf.keras.layers.AveragePooling3D, + tf.keras.layers.MaxPool1D, + tf.keras.layers.MaxPool2D, + tf.keras.layers.MaxPool3D, + ], + ["channels_first", "channels_last"], + [(2, 2, 1), (2, 3, 2), (1, 2, 3)], + ), + ) + def test_pooling(self, compute_unit, backend, op, data_format, pool_size): + shape = None + if op in {tf.keras.layers.AveragePooling1D, tf.keras.layers.MaxPool1D}: + shape = np.random.randint(low=3, high=9, size=3) + pool_size = pool_size[2] + elif op in {tf.keras.layers.AveragePooling2D, tf.keras.layers.MaxPool2D}: + if data_format == "channels_first": + return # AvgPoolingOp only supports NHWC on CPU + shape = np.random.randint(low=3, high=9, size=4) + pool_size = pool_size[1:] + elif op in {tf.keras.layers.AveragePooling3D, tf.keras.layers.MaxPool3D}: + shape = np.random.randint(low=3, high=9, size=5) + model = tf.keras.Sequential( + [op(batch_input_shape=shape, pool_size=pool_size, data_format=data_format)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestRecurrent(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, units, activation, " + "recurrent_activation, use_bias, return_sequences", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 4)], + [1, 3], + [None, tf.nn.tanh], + [None, tf.nn.relu], + [True, False], + [True, False], + ), + ) + def test_lstm( + self, + compute_unit, + backend, + rank, + units, + activation, + recurrent_activation, + use_bias, + return_sequences, + ): + shape = np.random.randint(low=1, high=4, size=rank) + model = tf.keras.Sequential( + [ + tf.keras.layers.LSTM( + batch_input_shape=shape, + units=units, + activation=activation, + recurrent_activation=recurrent_activation, + use_bias=use_bias, + return_sequences=return_sequences, + ), + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_lstmcell(self, compute_unit, backend): + shape = np.random.randint(low=1, high=4, size=3) + model = tf.keras.Sequential( + [ + tf.keras.layers.RNN( + batch_input_shape=shape, cell=tf.keras.layers.LSTMCell(units=3) + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_lstm_time_distributed_dense(self, compute_unit, backend): + shape = list(np.random.randint(low=1, high=4, size=3)) + k_in = tf.keras.layers.Input(batch_size=shape[0], shape=shape[1:]) + lstm = tf.keras.layers.LSTM(units=32, return_sequences=True)(k_in) + k_out = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(1))(lstm) + model = tf.keras.Model(inputs=k_in, outputs=k_out) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_lstm_dynamic_batch(self, compute_unit, backend): + input_shape = (1, 1280) + inp = tf.keras.layers.Input(shape=input_shape) + out, hn, cn = tf.keras.layers.LSTM(512, + return_sequences=True, + return_state=True, + recurrent_activation='sigmoid')(inp) + model = tf.keras.models.Model(inputs=[inp], outputs=[out, hn, cn]) + batch_size = 2 + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen((batch_size, 1, 1280), -1, 1),], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_lstm_conversion_static_shapes(self, compute_unit, backend): + ''' + Test that intermediate tensor shapes are populated correctly by the converter. + That is, there are no symbolic dimensions in the shapes, when conversion is + performed with a fixed input shape, irrespective of the shape used in the source model definition. + ''' + def _get_keras_simple_lstm_model(input_shape): + input = tf.keras.Input(batch_input_shape=input_shape) + output = tf.keras.layers.LSTM(5)(input) + keras_model = tf.keras.Model(inputs=input, outputs=output) + return keras_model + + def _test_for_symbolic_shapes(keras_input_shape, input_shape_for_conversion, are_symbols_expected): + keras_model = _get_keras_simple_lstm_model(keras_input_shape) + res = TensorFlowBaseTest.run_compare_tf_keras( + keras_model, + [random_gen((1, 32, 10), -1, 1)], + inputs_for_conversion=[ct.TensorType(shape=input_shape_for_conversion)], + compute_unit=compute_unit, + backend=backend, + ) + coreml_model = res[1] + mil_prog = coreml_model._get_mil_internal() + assert is_symbolic_dim_in_prog(mil_prog) == are_symbols_expected + + _test_for_symbolic_shapes(keras_input_shape=(1, 32, 10), + input_shape_for_conversion=(1, 32, 10), + are_symbols_expected=False) + + _test_for_symbolic_shapes(keras_input_shape=(None, 32, 10), + input_shape_for_conversion=(1, 32, 10), + are_symbols_expected=False) + + _test_for_symbolic_shapes(keras_input_shape=(None, None, 10), + input_shape_for_conversion=(1, 32, 10), + are_symbols_expected=False) + + _test_for_symbolic_shapes(keras_input_shape=(None, 32, 10), + input_shape_for_conversion=(ct.RangeDim(1, 10), 32, 10), + are_symbols_expected=True) + + if backend[0] != "mlprogram": + # FIX ME: model load fails if backend is "mlprogram". rdar://84862138 + _test_for_symbolic_shapes(keras_input_shape=(None, None, 10), + input_shape_for_conversion=(ct.RangeDim(1, 10), ct.RangeDim(16, 64), 10), + are_symbols_expected=True) + + @pytest.mark.parametrize( + "compute_unit, tf_raw_lstm_op, is_flexible_input, batch_size, backend", + itertools.product( + compute_units, + [ + tf.raw_ops.BlockLSTMV2, + tf.raw_ops.BlockLSTM, + ], + [False, True], + [1, 2], + backends, + ), + ) + def test_lstm_block_fused_op( + self, compute_unit, tf_raw_lstm_op, is_flexible_input, batch_size, backend + ): + """ + Define a model with custom LSTM ops that uses tf.raw_ops.BlockLSTM / tf.raw_ops.BlockLSTMV2 + and verify that it converts to a fused lstm op. + + %x (shape: (Seq, Batch, idim) == (seq_len, batch, 4)) + %x1 = LSTM(h=10) (%input) # shape = (seq_len, batch, 10) + %x2 = LSTM(h=20) (%x1) # shape = (seq_len, batch, 20) + %x3 = slice()(%x2) # shape = (1, batch, 20), to get the final seq value + %x4 = reshape((1, -1)) (%x3) # shape = (1, batch * 20) + %x5 = Dense(h=3)(%x4) # shape = (1, 3) + """ + + class CustomLSTM(tf.keras.layers.Layer): + def __init__(self, num_units, max_seq_length, batch_size): + super(CustomLSTM, self).__init__() + self.hidden_dim = num_units + self.seq_length = max_seq_length + self.batch_size = batch_size + + def build(self, input_shape): + input_dim = input_shape[-1] + self.w = self.add_weight( + shape=(input_dim + self.hidden_dim, 4 * self.hidden_dim), + initializer="random_normal", + trainable=True, + ) + self.b = self.add_weight(shape=(4 * self.hidden_dim,), initializer="random_normal", trainable=True) + self.init_h = tf.constant(np.zeros((self.batch_size, self.hidden_dim)).astype(np.float32)) + self.init_c = tf.constant(np.zeros((self.batch_size, self.hidden_dim)).astype(np.float32)) + + def call(self, inputs): + _, output_state, _, _, _, _, output = tf_raw_lstm_op( + seq_len_max=self.seq_length, + x=inputs, + cs_prev=self.init_c, + h_prev=self.init_h, + w=self.w, + wci=tf.constant(np.zeros((self.hidden_dim)).astype(np.float32)), + wcf=tf.constant(np.zeros((self.hidden_dim)).astype(np.float32)), + wco=tf.constant(np.zeros((self.hidden_dim)).astype(np.float32)), + b=self.b, + ) + return output, output_state + + input_dim = 4 + seq_length = 5 + batch_size = batch_size + x_shape = (seq_length, batch_size, input_dim) + hidden_dim_1 = 10 + hidden_dim_2 = 20 + + x = tf.keras.Input(batch_input_shape=x_shape) # (seq_len, batch, 4) + x1, output_states_1 = CustomLSTM(num_units=hidden_dim_1, max_seq_length=seq_length, batch_size=batch_size)(x) # (seq_len, batch, 10), (seq_len, batch, 10) + x2, output_states_2 = CustomLSTM(num_units=hidden_dim_2, max_seq_length=seq_length, batch_size=batch_size)(x1) # (seq_len, batch, 20), (seq_len, batch 10) + x3 = tf.slice(x2, begin=[4, 0, 0], size=[1, batch_size, 20]) # (1, batch, 20) + x4 = tf.reshape(x3, shape=(1, -1)) # (1, batch * 20) + x5 = tf.keras.layers.Dense(3)(x4) # (1, 3) + + # Test that we can fuse the lstm op if we have an output that only extract the information from the last cell state + x6 = tf.keras.layers.ReLU()(output_states_1[4, :, :]) + x7 = output_states_2[4:5, :, :] + x8 = output_states_1[-1, :, :] + x9 = tf.keras.layers.ReLU()(output_states_2[-1:, :, :]) + outputs = [x5, x8, x9] if is_flexible_input else [x5, x6, x7, x8, x9] + + keras_model = tf.keras.Model(inputs=x, outputs=outputs) + + inputs = None + if is_flexible_input: + inputs = [ + ct.TensorType( + shape=(ct.RangeDim(seq_length, 20), batch_size, input_dim) + ) + ] + + res = TensorFlowBaseTest.run_compare_tf_keras( + keras_model, + [random_gen(x_shape, -1, 1)], + compute_unit=compute_unit, + backend=backend, + inputs_for_conversion=inputs, + ) + coreml_model = res[1] + mil_prog = coreml_model._get_mil_internal() + # assert that "lstm" ops are present in the mil program + assert len(mil_prog.find_ops(op_type="lstm")) == 2 + + +class TestRepeatVector(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, n", + itertools.product( + compute_units, + backends, + [2, 3, 5, 7], + ), + ) + def test(self, compute_unit, backend, n): + # input shape 2D tensor (batch size, features) + # output shape 3D tensor (batch size, n, features) + shape = np.random.randint(low=1, high=4, size=2) + model = tf.keras.Sequential( + [tf.keras.layers.RepeatVector(batch_input_shape=shape, n=n)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestReshape(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, infer_shape", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [True, False], + ), + ) + def test(self, compute_unit, backend, rank, infer_shape): + shape = np.random.randint(low=2, high=4, size=rank) + # target shape does not include the batch dimension + target_shape = random.sample(list(shape[1:]), len(shape[1:])) + if len(target_shape) > 0 and infer_shape: + target_shape[-1] = -1 + model = tf.keras.Sequential( + [ + tf.keras.layers.Reshape( + batch_input_shape=shape, target_shape=target_shape + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSkips(TensorFlowBaseTest): + # ops in this class should be ignored / pass-through during conversion + + @pytest.mark.parametrize( + "compute_unit, backend, skip_op", + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.Dropout, + tf.keras.layers.AlphaDropout, + tf.keras.layers.GaussianDropout, + tf.keras.layers.SpatialDropout1D, + tf.keras.layers.SpatialDropout2D, + tf.keras.layers.SpatialDropout3D, + ], + ), + ) + def test_skip_dropout(self, compute_unit, backend, skip_op): + shape = np.random.randint(low=1, high=4, size=5) + if skip_op == tf.keras.layers.SpatialDropout1D: + shape = shape[:3] + elif skip_op == tf.keras.layers.SpatialDropout2D: + shape = shape[:4] + model = tf.keras.Sequential([skip_op(batch_input_shape=shape, rate=0.5)]) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_skip_noise(self, compute_unit, backend): + shape = np.random.randint(low=1, high=4, size=5) + model = tf.keras.Sequential( + [ + # GaussianNoise should do nothing in inference mode + tf.keras.layers.GaussianNoise(batch_input_shape=shape, stddev=0.5) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, l1, l2", + itertools.product( + compute_units, + backends, + [rank for rank in range(5, 6)], + [0.0, 0.5, 1.0], + [0.0, 0.5, 1.0], + ), + ) + def test_skip_regularization(self, compute_unit, backend, rank, l1, l2): + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [ + tf.keras.layers.ActivityRegularization( + batch_input_shape=shape, l1=l1, l2=l2 + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestUpSampling(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, op, upsample_factor, data_format, interpolation, dynamic", + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.UpSampling1D, + tf.keras.layers.UpSampling2D, + tf.keras.layers.UpSampling3D, + ], + [(2, 2, 1), (4, 3, 2), (1, 2, 3)], + ["channels_first", "channels_last"], + ["nearest", "bilinear"], + [True, False], + ), + ) + def test( + self, compute_unit, backend, op, upsample_factor, data_format, interpolation, dynamic + ): + kwargs = {} + shape = None + keras_shape = None + + if op == tf.keras.layers.UpSampling1D: + shape = np.random.randint(low=2, high=4, size=3) + keras_shape = np.copy(shape).tolist() + if dynamic: + keras_shape[1] = None + upsample_factor = upsample_factor[2] + elif op == tf.keras.layers.UpSampling2D: + kwargs = {"data_format": data_format, "interpolation": interpolation} + shape = np.random.randint(low=2, high=4, size=4) + keras_shape = np.copy(shape).tolist() + if dynamic: + keras_shape[1] = keras_shape[2] = None + upsample_factor = (upsample_factor[1], upsample_factor[2]) + elif op == tf.keras.layers.UpSampling3D: + kwargs = {"data_format": data_format} + shape = np.random.randint(low=2, high=4, size=5) + keras_shape = np.copy(shape).tolist() + # not support upsampling3D with dynamic input shape, since 6D tensors are produced in that case + if dynamic: + return + + model = tf.keras.Sequential( + [op(batch_input_shape=keras_shape, size=upsample_factor, **kwargs)] + ) + spec = TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + )[0] + # also check if the scale factor are integers + if backend[0] == 'neuralnetwork': + for layer in spec.neuralNetwork.layers: + if layer.WhichOneof('layer') == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + +class TestGelu(TensorFlowBaseTest): + @pytest.mark.skipif( + _get_version(_tf.__version__) < _StrictVersion("2.4.0"), + reason="Gelu is a new layer for tf 2.4.0 and above." + ) + @pytest.mark.parametrize( + "compute_unit, backend, rank, approximate", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [True, False], + ), + ) + def test( + self, compute_unit, backend, rank, approximate + ): + shape = np.random.randint(low=2, high=4, size=rank) + input = tf.keras.layers.Input(batch_input_shape=tuple(shape)) + out = tf.keras.activations.gelu(input, approximate=approximate) + model = tf.keras.Model(inputs=[input], outputs=out) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, -10, 10)], + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/testing_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/testing_utils.py new file mode 100644 index 00000000..b80e5df1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/testing_utils.py @@ -0,0 +1,290 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +import numpy as np +import pytest + +tf = pytest.importorskip("tensorflow", minversion="2.1.0") +from tensorflow.python.framework import dtypes + +import coremltools as ct +import coremltools.models.utils as coremltoolsutils +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, get_tf_node_names) +from coremltools.converters.mil.input_types import RangeDim, TensorType +from coremltools.converters.mil.testing_utils import (compare_backend, + ct_convert) +from coremltools.models.utils import _macos_version + + +def make_tf2_graph(input_types): + """ + Decorator to help construct TensorFlow 2.x model. + + Parameters + ---------- + input_types: list of tuple or list of list + List of input types. E.g. [(3, 224, 224, tf.int32)] represent 1 input, + with shape (3, 224, 224), and the expected data type is tf.int32. The + dtype is optional, in case it's missing, tf.float32 will be used. + + Returns + ------- + list of ConcreteFunction, list of str, list of str + """ + + def wrapper(ops): + input_signature = [] + for input_type in input_types: + if input_type is not None and len(input_type) > 0 and isinstance(input_type[-1], dtypes.DType): + shape, dtype = input_type[:-1], input_type[-1] + else: + shape, dtype = input_type, tf.float32 + input_signature.append(tf.TensorSpec(shape=shape, dtype=dtype)) + + @tf.function(input_signature=input_signature) + def tf2_model(*args): + return ops(*args) + + concrete_func = tf2_model.get_concrete_function() + inputs = get_tf_node_names( + [t.name for t in concrete_func.inputs if t.dtype != dtypes.resource], + mode="input", + ) + outputs = get_tf_node_names( + [t.name for t in concrete_func.outputs], mode="output" + ) + return [concrete_func], inputs, outputs + + return wrapper + + +def run_compare_tf2( + model, + input_dict, + output_names, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + frontend="tensorflow", + backend=("neuralnetwork", "fp32"), + debug=False, + atol=1e-04, + rtol=1e-05, + minimum_deployment_target=None, +): + """ + Parameters + ---------- + model: list of tf.ConcreteFunction + List of TensorFlow 2.x concrete functions. + input_dict: dict of (str, np.array) + Dict of name and value pairs representing inputs. + output_names: list of str + List of output node names. + inputs_for_conversion: list of coremltools.TensorType() or coremltools.ImageType() objects + Defaults to None. It is passed as is to the "inputs" argument of the converter. + compute_unit: Enum[ct.ComputeUnit] + Compute unit for the coreml model + frontend_only: bool + If True, skip the prediction call, only validate conversion. + frontend: str + Frontend to convert from. + backend: str + Backend to convert to. + debug: bool + If True, print verbose information and plot intermediate graphs. + atol: float + The absolute tolerance parameter. + rtol: float + The relative tolerance parameter. + minimum_deployment_target: coremltools.target enumeration + The spec version for the mlmodel + """ + inputs = [] + if inputs_for_conversion is None: + cf_inputs = [t for t in model[0].inputs if t.dtype != dtypes.resource] + for t in cf_inputs: + name = get_tf_node_names(t.name)[0] + shape = [RangeDim() if s is None or s == -1 else s \ + for s in list(t.get_shape())] + inputs.append(TensorType(name=name, shape=shape, + dtype=t.dtype.as_numpy_dtype)) + else: + inputs = inputs_for_conversion + + outputs = [] + for t in output_names: + name = get_tf_node_names(t)[0] + outputs.append(name) + + # get TensorFlow 2.x output as reference and run comparison + tf_input_values = [tf.constant(t) for t in input_dict.values()] + tf_outputs = model[0](*tf_input_values) + if isinstance(tf_outputs, (tuple, list)): + ref = [t.numpy() for t in tf_outputs] + else: + ref = [tf_outputs.numpy()] + expected_outputs = {n: v for n, v in zip(outputs, ref)} + + mlmodel = ct_convert( + model, + source=frontend, + inputs=inputs, + outputs=outputs, + convert_to=backend, + debug=debug, + compute_units=compute_unit, + minimum_deployment_target=minimum_deployment_target, + ) + + for k,v in input_dict.items(): + if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer): + input_dict[k] = v.astype(float) # Core ML only accepts floats + + if frontend_only or _macos_version() < (10, 13) \ + or (mlmodel.is_package and _macos_version() < (12, 0)): + return mlmodel._spec, mlmodel, input_dict, None + + pred = None + if not coremltoolsutils._has_custom_layer(mlmodel._spec): + pred = compare_backend( + mlmodel, + input_dict, + expected_outputs, + atol=atol, + rtol=rtol, + also_compare_shapes=True, + dtype=backend[1], + ) + else: + print('Skipping model prediction as it has a custom nn layer!') + return mlmodel._spec, mlmodel, input_dict, pred + + +def run_compare_tf_keras( + model, + input_values, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + frontend="tensorflow", + backend=("neuralnetwork", "fp32"), + atol=1e-04, + rtol=1e-05, +): + """ + Parameters + ---------- + model: TensorFlow 2.x model + TensorFlow 2.x model annotated with @tf.function. + input_values: list of np.array + List of input values in the same order as the input signature. + inputs_for_conversion: list of coremltools.TensorType() or coremltools.ImageType() objects + Defaults to None. It is passed as is to the "inputs" argument of the converter. + compute_unit: Enum[ct.ComputeUnit] + Compute unit for the coreml model + frontend_only: bool + If True, skip the prediction call, only validate conversion. + frontend: str + Frontend to convert from. + backend: str + Backend to convert to. + atol: float + The absolute tolerance parameter. + rtol: float + The relative tolerance parameter. + """ + mlmodel = ct_convert(model, inputs=inputs_for_conversion, source=frontend, convert_to=backend, + compute_units=compute_unit) + + # assumes conversion preserve the i/o names + proto = mlmodel._spec + inputs = [i.name.split(":")[0].strip() for i in model.inputs] + outputs = [str(o.name) for o in proto.description.output] + + # get tf.keras model output as reference and run comparison + keras_outputs = model(input_values) + if not isinstance(keras_outputs, list): + keras_outputs = [keras_outputs] + ref = [output.numpy() for output in keras_outputs] + expected_outputs = {n: v for n, v in zip(outputs, ref)} + input_key_values = {n: v for n, v in zip(inputs, input_values)} + + if frontend_only or _macos_version() < (10, 13) \ + or (mlmodel.is_package and _macos_version() < (12, 0)): + return proto, mlmodel, input_key_values, None + + pred = None + if not coremltoolsutils._has_custom_layer(proto): + pred = compare_backend( + mlmodel, + input_key_values, + expected_outputs, + atol=atol, + rtol=rtol, + also_compare_shapes=True, + dtype=backend[1] + ) + else: + print('Skipping model prediction as it has a custom nn layer!') + return proto, mlmodel, input_key_values, pred + + +class TensorFlow2BaseTest(TensorFlowBaseTest): + + @staticmethod + def run_compare_tf2(model, + input_dict, + output_names, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + frontend="tensorflow", + backend=("neuralnetwork", "fp32"), + debug=False, + atol=1e-04, + rtol=1e-05, + minimum_deployment_target=None,): + res = run_compare_tf2(model, + input_dict, + output_names, + inputs_for_conversion=inputs_for_conversion, + compute_unit=compute_unit, + frontend_only=frontend_only, + frontend=frontend, + backend=backend, + debug=debug, + atol=atol, + rtol=rtol, + minimum_deployment_target=minimum_deployment_target,) + alist = list(res) + alist.append(TensorFlow2BaseTest.testclassname) + alist.append(TensorFlow2BaseTest.testmodelname) + return tuple(alist) + + @staticmethod + def run_compare_tf_keras( + model, + input_values, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + frontend="tensorflow", + backend=("neuralnetwork", "fp32"), + atol=1e-04, + rtol=1e-05 + ): + res = run_compare_tf_keras(model, input_values, + inputs_for_conversion=inputs_for_conversion, + compute_unit=compute_unit, + frontend_only=frontend_only, + frontend=frontend, + backend=backend, atol=atol, rtol=rtol) + alist = list(res) + alist.append(TensorFlow2BaseTest.testclassname) + alist.append(TensorFlow2BaseTest.testmodelname) + return tuple(alist) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__init__.py new file mode 100644 index 00000000..5f18ff00 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .rewrite_control_flow_functions import (flatten_sub_graph_namespaces, + rewrite_control_flow_functions) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/rewrite_control_flow_functions.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/rewrite_control_flow_functions.py new file mode 100644 index 00000000..68468055 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/rewrite_control_flow_functions.py @@ -0,0 +1,561 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools import _logger as logger +from coremltools.converters.mil.frontend.tensorflow.basic_graph_ops import ( + connect_edge, connect_edge_at_index, delete_node, disconnect_edge, + replace_dest, replace_node) +from coremltools.converters.mil.frontend.tensorflow.parsed_tf_node import \ + ParsedTFNode + + +def _rename_node_in_fn(node, new_name, fn): + """ + Rename a node and all it's connections. + + Parameters + ---------- + node: ParsedTFNode + Node to rename. + new_name: str + New name of the node. + fn: SSAFunction + Function that contains graph to operate on. + """ + old_name = node.name + node.name = new_name + for i in node.inputs: + idx = fn.graph[i].outputs.index(old_name) + fn.graph[i].outputs[idx] = new_name + if old_name in fn.graph[i].control_outputs: + idx = fn.graph[i].control_outputs.index(old_name) + fn.graph[i].control_outputs[idx] = new_name + + for o in node.outputs: + idx = fn.graph[o].inputs.index(old_name) + fn.graph[o].inputs[idx] = new_name + if old_name in fn.graph[o].control_inputs: + idx = fn.graph[o].control_inputs.index(old_name) + fn.graph[o].control_inputs[idx] = new_name + + for i in node.control_inputs: + if old_name in fn.graph[i].control_outputs: + idx = fn.graph[i].control_outputs.index(old_name) + fn.graph[i].control_outputs[idx] = new_name + + for o in node.control_outputs: + if old_name in fn.graph[o].control_inputs: + idx = fn.graph[o].control_inputs.index(old_name) + fn.graph[o].control_inputs[idx] = new_name + + fn.graph[new_name] = fn.graph.pop(old_name) + + +def _flatten_sub_graph_namespaces(tf_ssa, fn_name): + """ + A pass to flatten namespaces for sub-graphs of the control flow while_loop + op. For example, the while_loop's has two sub-graphs, "cond" and "body", + all the nodes in the graph will be prefixing the sub-graph's name. This + pass is required for converting control flow v2 ops (enabled by default in + TensorFlow 2.0+) as the original sub-graphs will contain duplicated names. + + Parameters + ---------- + tf_ssa: NetworkEnsemble + An object that contains multiple functions / sub-graphs. + fn_name: str + Name of the function / sub-graph to operate on. + """ + count = 0 + fn = tf_ssa.functions.get(fn_name) + for name, node in fn.graph.copy().items(): + if node.op not in {"StatelessWhile", "While", "StatelessIf", "If"}: + continue + + if node.op in {"StatelessWhile", "While"}: + sub_fn_names = [node.attr.get("cond"), node.attr.get("body")] + else: + sub_fn_names = [node.attr.get("then_branch"), node.attr.get("else_branch")] + + for sf_name in sub_fn_names: + sf = tf_ssa.functions.get(sf_name) + prefix = "{}/{}".format(node.name, sf_name) + + for old_name, n in sf.graph.copy().items(): + _rename_node_in_fn(n, "{}/{}".format(prefix, old_name), sf) + count += 1 + + ios = set(sf.inputs + sf.outputs) + io_name_mappings = {n: "{}/{}".format(prefix, n) for n in ios} + sf.inputs = [io_name_mappings[n] for n in sf.inputs] + sf.outputs = [io_name_mappings[n] for n in sf.outputs] + _flatten_sub_graph_namespaces(tf_ssa, sf_name) + + msg = "flatten_sub_graph_namespaces: {} nodes renamed in '{}'" + logger.info(msg.format(count, sf_name)) + + +def _insert_op(fn, op, name, attr=None): + """ + Create a node with given attributes, then insert to the target graph in + given function. + + Parameters + ---------- + fn: SSAFunction + Function that contains graph to operate on. + op: str + Type of the operation for the new node. + name: str + Name of the new node. + attr: dict or None (optional) + Attributes of the new node. + + Returns + ------- + node: ParsedTFNode + New node object. + """ + node = ParsedTFNode() + node.op = op + node.name = name + if attr is not None: + node.attr = attr + fn.graph[node.name] = node + return node + + +def _insert_function_entry(fn): + return _insert_op(fn=fn, op="function_entry", name="entry") + + +def _insert_return(fn): + return _insert_op(fn=fn, op="return", name="return") + + +def _insert_make_tuple(fn, name=None): + name = "make_tuple" if name is None else name + return _insert_op(fn=fn, op="make_tuple", name=name) + + +def _insert_get_tuple(fn, name, idx): + return _insert_op(fn=fn, op="get_tuple", name=name, attr={"index": idx}) + + +def _rewrite_cond_functions(tf_ssa, fn): + r""" + Rewrite tf.cond's sub-graphs with get_tuple, make_tuple, function_entry and + return ops. This rewrite is required in order to convert functional form + control flow v2 nodes 'StatelessIf' and 'If'. + + Parameters + ---------- + tf_ssa: NetworkEnsemble + An object that contains multiple functions / sub-graphs. + fn: SSAFunction + Function that contains graph to operate on. + + Examples + -------- + + Input: + + Before pass "main" graph: + + [const/greater/y] ---------\ + [placeholder/args_0] -> [greater] -> [if] -> [identity] + \------------------/ \--> [identity] + [placeholder/args_1] ----------------/ + + Before pass "then" graph: + + [const/sub/y] ---------------\ + [placeholder/sub_args_0] -> [sub] + [placeholder/sub_args_1] -> [identity] + + Before pass "else" graph: + + [const/add/y] ---------------\ + [placeholder/add_args_0] -> [add] + + [const/mul/y] ---------------\ + [placeholder/add_args_1] -> [mul] + + Output: + + After pass "main" graph: + + [const/greater/y] ---------\ + [placeholder/args_0] -> [greater] -> [make_tuple] -> [if] -> [get_tuple] -> [identity] + \---------------------/ \--> [get_tuple] -> [identity] + [placeholder/args_1] -------------------/ + + After pass "then" graph: + + [const/sub/y] ---------------\ + [entry] -> [get_tuple] -> [placeholder/sub_args_0] -> [sub] -> [make_tuple] -> [return] + -> [get_tuple] -> [placeholder/sub_args_1] -----------------/ + + After pass "else" graph: + + [const/add/y] ---------------\ + [entry] -> [get_tuple] -> [placeholder/add_args_0] -> [add] -> [make_tuple] -> [return] + -> [get_tuple] -> [placeholder/add_args_1] -> [mul] --------/ + [const/mul/y] ---------------/ + + """ + for cond_name, cond_node in fn.graph.copy().items(): + if cond_node.op not in {"StatelessIf", "If"}: + continue + + then_fn_name = cond_node.attr.get("then_branch") + else_fn_name = cond_node.attr.get("else_branch") + + msg = "Rewriting '{}' ({}) sub-graphs: then '{}', else '{}'" + logger.info( + msg.format(cond_node.name, cond_node.op, then_fn_name, else_fn_name) + ) + + then_fn = tf_ssa.functions.get(then_fn_name) + else_fn = tf_ssa.functions.get(else_fn_name) + + # insert function entry nodes + then_entry = _insert_function_entry(then_fn) + else_entry = _insert_function_entry(else_fn) + + # pack node inputs to a single tuple + cond_input = _insert_make_tuple(fn, "make_tuple/{}".format(cond_name)) + for ci in cond_node.inputs: + disconnect_edge(fn.graph, ci, cond_node.name) + connect_edge(fn.graph, ci, cond_input) + connect_edge(fn.graph, cond_input, cond_node.name) + + # unpack node outputs to multiple get_tuples + for i, co in enumerate(cond_node.outputs): + # utilize FunctionDef's ret to make sure function outputs and + # node outputs order matches when multiple outputs are there. + # Fallback to use original cond_node.outputs order if fails. + o_original = fn.graph[co].original_node + if o_original: + c_input = [n for n in o_original.input if str(n).startswith(cond_name)][ + 0 + ] + if ":" in c_input: + identity_postfix = "identity_{}".format(c_input.split(":")[-1]) + else: # access identity "0" + identity_postfix = "identity" + + identity_keys = [t for t in then_fn.ret.keys() if t.endswith(identity_postfix)] + if len(identity_keys) != 1: + raise NotImplementedError("Branch not found.") + + mapped_name = then_fn.ret[identity_keys[0]].split(":")[0] + + if mapped_name in then_fn.outputs: + idx = then_fn.outputs.index(mapped_name) + else: # in else_fn.outputs + idx = else_fn.outputs.index(mapped_name) + else: + idx = i + + cond_output = _insert_get_tuple( + fn, "get_tuple/{}/{}".format(idx, cond_name), idx + ) + edge_idx = fn.graph[co].inputs.index(cond_node.name) + replace_dest(fn.graph, cond_node, co, cond_output) + connect_edge_at_index(fn.graph, cond_output, co, edge_idx) + + # fetch inputs using get_tuple for then branch + for i, ti in enumerate(then_fn.inputs): + then_input = _insert_get_tuple( + then_fn, "get_tuple/{}/{}".format(i, ti), i + 1 + ) + connect_edge(then_fn.graph, then_entry, then_input) + replace_node(then_fn.graph, ti, then_input) + delete_node(then_fn.graph, ti) + + # fetch inputs using get_tuple for else branch + for i, ei in enumerate(else_fn.inputs): + else_input = _insert_get_tuple( + else_fn, "get_tuple/{}/{}".format(i, ei), i + 1 + ) + connect_edge(else_fn.graph, else_entry, else_input) + replace_node(else_fn.graph, ei, else_input) + delete_node(else_fn.graph, ei) + + # returns a tuple of value(s) as output for then branch + then_output = _insert_make_tuple(then_fn) + for to in then_fn.outputs: + if to not in then_fn.graph.keys(): + # from identity, map back to get_tuple node + to = "get_tuple/{}/{}".format(then_fn.inputs.index(to), to) + connect_edge(then_fn.graph, to, then_output.name) + + then_return = _insert_return(then_fn) + connect_edge(then_fn.graph, then_output.name, then_return.name) + + # returns a tuple of value(s) as output for else branch + else_output = _insert_make_tuple(else_fn) + for eo in else_fn.outputs: + if eo not in else_fn.graph.keys(): + # from identity, map back to get_tuple node + eo = "get_tuple/{}/{}".format(else_fn.inputs.index(eo), eo) + connect_edge(else_fn.graph, eo, else_output.name) + + else_return = _insert_return(else_fn) + connect_edge(else_fn.graph, else_output.name, else_return.name) + + +def _eliminate_loop_cond_nodes(tf_ssa, fn): + """ + Eliminate loop condition nodes, such as loop_counters, max_iterations from + the cond sub-graph and body sub-graph of tf.while_loop. + + Parameters + ---------- + tf_ssa: NetworkEnsemble + An object that contains multiple functions / sub-graphs. + fn: SSAFunction + Function that contains graph to operate on. + + Examples + -------- + + Input: + + Before pass "main" graph: + + [while/maximum_iterations] -----\ + [while/loop_counter] -------> [while] --> [identity] + [placeholder/args_0] ----------/ + + Before pass "cond" graph: + + [const/mean] -------\ + [placeholder] --> [mean] --> [greater] + [const/greater/y] --------------/ + + [while_maximum_iterations], [while_loop_counter] (not connected) + + Before pass "body" graph: + + [const/sub/y] ------\ + [placeholder] ---> [sub] + + [const/add/y] ------------\ + [while_loop_counter] --> [add] + + [while_maximum_iterations] (not connected) + + Output: + + After pass "main" graph: + + [placeholder/args_0] --> [while] --> [identity] + + After pass "cond" graph: + + [const/mean] -------\ + [placeholder] --> [mean] --> [greater] + [const/greater/y] --------------/ + + After pass "body" graph: + + [const/sub/y] ------\ + [placeholder] ---> [sub] + """ + for name, node in fn.graph.copy().items(): + if node.op not in {"StatelessWhile", "While"}: + continue + + cond_fn = tf_ssa.functions.get(node.attr.get("cond")) + body_fn = tf_ssa.functions.get(node.attr.get("body")) + + cond_lc_nodes = {cond_fn.inputs.pop(0), cond_fn.inputs.pop(0)} + logger.info("Removing {} from cond graph".format(cond_lc_nodes)) + for n in cond_lc_nodes: + delete_node(cond_fn.graph, n) + + body_lc_nodes = {body_fn.inputs.pop(0), body_fn.inputs.pop(0)} + q = list(body_lc_nodes) + + # delete entire sub-fn + while len(q) > 0: + n = body_fn.graph[q.pop(0)] + for o in n.outputs: + if o not in body_lc_nodes: + q.append(o) + body_lc_nodes.add(o) + for i in body_fn.graph[o].inputs: + if i not in body_lc_nodes: + q.append(i) + body_lc_nodes.add(i) + + # remove if in outputs + for n in body_lc_nodes: + if n in body_fn.outputs: + msg = "Removing '{}' ({}) from body fn outputs" + logger.info(msg.format(n, body_fn.graph[n].op)) + body_fn.outputs.remove(n) + + logger.info("Removing {} from body graph".format(body_lc_nodes)) + for n in body_lc_nodes: + delete_node(body_fn.graph, n) + + +def _rewrite_while_loop_functions(tf_ssa, fn): + """ + Rewrite tf.while_loop's sub-graphs with get_tuple, make_tuple, + function_entry and return ops. This rewrite is required in order to convert + functional form control flow v2 nodes 'StatelessWhile' and 'While'. + + Parameters + ---------- + tf_ssa: NetworkEnsemble + An object that contains multiple functions / sub-graphs. + fn: SSAFunction + Function that contains graph to operate on. + + Example + ------- + + Input: + + Before pass "main" graph: + + [placeholder/args_0] --> [while] --> [identity] + + Before pass "cond" graph: + + [const/mean] -------\ + [placeholder] --> [mean] --> [greater] + [const/greater/y] --------------/ + + Before pass "body" graph: + + [const/sub/y] ------\ + [placeholder] ---> [sub] + + Output: + + After pass "main" graph: + + [placeholder/args_0] --> [make_tuple] --> [while] --> [get_tuple] --> [identity] + + After pass "cond" graph: + + [const/mean] ------\ + [entry] -> [get_tuple] -> [placeholder] -> [mean] -> [greater] -> [make_tuple] -> [return] + [const/greater/y] ------------/ + + After pass "body" graph: + + [const/sub/y] ----\ + [entry] -> [get_tuple] -> [placeholder] -> [sub] -> [make_tuple] -> [return] + """ + for while_name, while_node in fn.graph.copy().items(): + if while_node.op not in {"StatelessWhile", "While"}: + continue + + cond_fn_name = while_node.attr.get("cond") + body_fn_name = while_node.attr.get("body") + + msg = "Rewriting '{}' ({}) sub-graphs: cond '{}', body '{}'" + logger.info( + msg.format(while_node.name, while_node.op, cond_fn_name, body_fn_name) + ) + + cond_fn = tf_ssa.functions.get(cond_fn_name) + body_fn = tf_ssa.functions.get(body_fn_name) + + # insert function entry nodes + cond_entry = _insert_function_entry(cond_fn) + body_entry = _insert_function_entry(body_fn) + + # pack node inputs to a single tuple + while_input_tuple = _insert_make_tuple(fn, "make_tuple/{}".format(while_name)) + for wi in while_node.inputs: + disconnect_edge(fn.graph, wi, while_node.name) + connect_edge(fn.graph, wi, while_input_tuple) + connect_edge(fn.graph, while_input_tuple, while_node.name) + + # unpack node outputs to multiple get_tuples + for i, wo in enumerate(while_node.outputs): + # utilize FunctionDef's ret to make sure function outputs and + # node outputs order matches when multiple outputs are there. + o_original = fn.graph[wo].original_node + while_input = [ + n for n in o_original.input if str(n).startswith(while_name) + ][0] + while_index = while_input.split(":")[-1] + if while_index != 0: + identity_postfix = "identity_{}".format(while_index) + else: # access identity "0" + identity_postfix = "identity" + + identity_keys = [t for t in body_fn.ret.keys() if t.endswith(identity_postfix)] + if len(identity_keys) != 1: + raise NotImplementedError("Branch not found.") + + mapped_name = body_fn.ret[identity_keys[0]].split(":")[0] + idx = body_fn.outputs.index(mapped_name) + + loop_output = _insert_get_tuple( + fn, "get_tuple/{}/{}".format(idx, while_input), idx + ) + + edge_idx = fn.graph[wo].inputs.index(while_node.name) + replace_dest(fn.graph, while_node, wo, loop_output) + connect_edge_at_index(fn.graph, loop_output, wo, edge_idx) + + # fetch inputs using get_tuple for cond fn + for i, ci in enumerate(cond_fn.inputs): + cond_input = _insert_get_tuple(cond_fn, "get_tuple/{}/{}".format(i, ci), i) + connect_edge(cond_fn.graph, cond_entry, cond_input) + replace_node(cond_fn.graph, ci, cond_input) + delete_node(cond_fn.graph, ci) + + # fetch inputs using get_tuple for body fn + for i, bi in enumerate(body_fn.inputs): + new_name = "get_tuple/{}/{}".format(i, bi) + + if bi in body_fn.outputs: # input is also an output + body_fn.outputs[body_fn.outputs.index(bi)] = new_name + + body_input = _insert_get_tuple(body_fn, new_name, i) + + connect_edge(body_fn.graph, body_entry, body_input) + replace_node(body_fn.graph, bi, body_input) + delete_node(body_fn.graph, bi) + + # returns a tuple of value(s) as output for cond fn + cond_output = _insert_make_tuple(cond_fn) + for co in cond_fn.outputs: + connect_edge(cond_fn.graph, co, cond_output.name) + + cond_return = _insert_return(cond_fn) + connect_edge(cond_fn.graph, cond_output.name, cond_return.name) + + # returns a tuple of value(s) as output for body branch + body_output = _insert_make_tuple(body_fn) + + for bo in body_fn.outputs: + connect_edge(body_fn.graph, bo, body_output.name) + + body_return = _insert_return(body_fn) + connect_edge(body_fn.graph, body_output.name, body_return.name) + + +def rewrite_control_flow_functions(tf_ssa): + for fn_name, fn in tf_ssa.functions.items(): + _rewrite_cond_functions(tf_ssa, fn) + for fn_name, fn in tf_ssa.functions.items(): + _eliminate_loop_cond_nodes(tf_ssa, fn) + _rewrite_while_loop_functions(tf_ssa, fn) + + +def flatten_sub_graph_namespaces(tf_ssa): + _flatten_sub_graph_namespaces(tf_ssa, fn_name="main") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/__init__.py new file mode 100644 index 00000000..521d2e46 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools._deps import _HAS_TORCH + +register_torch_op = None + +if _HAS_TORCH: + from .dialect_ops import (torch_tensor_assign, torch_upsample_bilinear, + torch_upsample_nearest_neighbor) + from .torch_op_registry import register_torch_op diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/converter.py new file mode 100644 index 00000000..a7cdff80 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/converter.py @@ -0,0 +1,495 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from collections import OrderedDict + +import numpy as np +import torch as torch + +from coremltools import _logger as logger +from coremltools._deps import version_lt +from coremltools.converters.mil._deployment_compatibility import AvailableTarget as _target +from coremltools.converters.mil.input_types import ImageType +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, Program, types + +from .._utils import get_output_names +from .internal_graph import InternalTorchIRGraph, InternalTorchIRNode +from .ops import convert_nodes +from .torch_op_registry import _TORCH_OPS_REGISTRY +from .torchir_passes import ( + flatten_graph_input_values, + flatten_graph_output_values, + generate_tensor_assignment_ops, + remove_getattr_nodes, + transform_inplace_ops, +) + +torch_to_mil_types = { + torch.bool: types.bool, + torch.float16: types.fp16, + torch.float32: types.fp32, + torch.float64: types.fp32, + torch.int32: types.int32, + torch.int64: types.int32, +} + + +mil_to_torch_types = {v: k for k, v in torch_to_mil_types.items()} + + +class TranscriptionContext: + """ + Maintains a map from torch operations to their MIL values + while building the graph. Can be used to process subgraphs recursively + by pushing new context when stepping into a subgraph and popping that + context when stepping out. + """ + + def __init__(self, name=None): + self.name = name if name else "" + self._current_graph = [{}] + + def prepare_for_conversion(self, node: InternalTorchIRNode): + """ + Perform any preparation necessary before node-specific frontend conversion + is invoked. + """ + pass + + def add(self, ssa_var, torch_name=None): + """ + Arguments: + ssa_var: Variable to add to the graph being constructed. + torch_name: Optional unique string identifier of the operation. If + omitted, it will use @ssa_var.name. + """ + if torch_name is None: + torch_name = ssa_var.name + if torch_name in self._current_graph[-1]: + print("Torch var {} is added again.".format(torch_name)) + return + self._current_graph[-1][torch_name] = ssa_var + + def __getitem__(self, torch_name): + """ + Lookup a name in the context. Note that since nested blocks must be + able to access anything that was defined before them, we have to + search all contexts for a name, starting with the most local scope. + """ + for idx in reversed(range(len(self._current_graph))): + current_graph = self._current_graph[idx] + if torch_name in current_graph: + return self._current_graph[idx][torch_name] + raise ValueError( + "Torch var {} not found in context {}".format(torch_name, self.name) + ) + + def __contains__(self, torch_name): + """Returns whether or not the torch var exist in context.""" + return torch_name in self._current_graph[-1] + + def push(self, inputs=None): + """ + Add another frame to the context. Optionally provide a tuple of + (name list, Var list) to populate the new context frame. + """ + self._current_graph.append({}) + + if inputs is not None: + if len(inputs[0]) != len(inputs[1]): + raise ValueError("name list and Var list must be the same length") + for name, var in zip(inputs[0], inputs[1]): + self.add(var, torch_name=name) + + def pop(self): + """ + Remove and discard the top context frame. + """ + self._current_graph = self._current_graph[:-1] + + def __str__(self): + _str = "" + for current_graph in reversed(self._current_graph): + __str = "" + for k, v in current_graph.items(): + if hasattr(v, "shape_str"): + shape_str = v.shape_str() + elif hasattr(v, "sym_shape"): + shape_str = v.sym_shape() + else: + shape_str = "None" + __str += "%{} : {}\n".format(k, shape_str) + _str += __str + "\n" + return _str + + def __repr__(self): + return str(self) + + +class TorchConverter: + """ + Class that handles conversion of pytorch models represented in TorchScript + format to the MIL format. + + Models passed to the @TorchConverter go from: + TorchScript -> Expanded/Optimized Torch IR -> Internal Graph -> CoreML SSA + The internal graph representation was added to make testing easier. + """ + + def __init__( + self, + torchscript, + inputs, + outputs=None, + cut_at_symbols=None, + opset_version=None, + ): + """ + Arguments: + torchscript: torch.jit.ScriptModule object representing the model to convert. + inputs: Input values and optional names. See kwarg in load.py for full description. + outputs: List of outputs as ct.InputType. See kwarg in load.py for full description. + cut_at_symbols: A list of internal symbol name strings. Graph conversion will + terminate once these symbols have been generated. For debugging use + only. See kwarg in load.py. + opset_version: An int represents the Core ML opset version. + """ + assert isinstance(torchscript, torch.jit.ScriptModule) + + self.inputs = inputs + for idx, inp in enumerate(self.inputs): + if isinstance(inp, ImageType) and self.inputs[idx].channel_first is None: + self.inputs[idx].channel_first = True + + self.torchscript = torchscript + self.outputs = outputs + self.output_names = get_output_names(self.outputs) + self.opset_version = _target(opset_version) if opset_version is not None else None + self.context = TranscriptionContext() + raw_graph, params_dict = self._expand_and_optimize_ir(self.torchscript) + self.params_dict = params_dict + self.graph = InternalTorchIRGraph( + raw_graph, params_dict, self.inputs, cut_at_symbols + ) + + # TODO (rdar://106161395): Register Torch IR passes and unify them into the pass pipeline. + # Apply Torch IR passes + passes = [ + transform_inplace_ops, + flatten_graph_input_values, + flatten_graph_output_values, + remove_getattr_nodes, + generate_tensor_assignment_ops, + ] + for p in passes: + p(self.graph) + + self.inputs = list(self.graph.inputs.values()) + self._prog = Program() + + @staticmethod + def _check_ops(graph): + """ + Returns the set of ops in @graph that are implemented, and the set + for which no conversion function is registered. @graph can be + either InternalTorchIRGraph or InternalTorchIRBlock. + """ + implemented_ops = set() + missing_ops = set() + for node in graph.nodes: + _add_op = _TORCH_OPS_REGISTRY.get(node.kind, None) + if _add_op is None: + missing_ops.add(node.kind) + else: + implemented_ops.add(node.kind) + for block in node.blocks: + _impl, _miss = TorchConverter._check_ops(block) + implemented_ops.update(_impl) + missing_ops.update(_miss) + return implemented_ops, missing_ops + + @staticmethod + def _create_placeholder(_input): + """ + Converts an InputType into a Placeholder. + + _input: TensorType + """ + shape = _input.shape.symbolic_shape + dtype = _input.dtype + return mb.placeholder(shape, dtype=dtype) + + def check_ops(self): + """ + Returns the set of ops in @self.graph that are implemented, and + the set for which no conversion function is registered. + """ + return TorchConverter._check_ops(self.graph) + + def convert_const(self): + for name, val in self.graph.params.items(): + if not isinstance(val, np.ndarray): + raise ValueError("unsupported class for {} in PyTorch graph: {}".format(name, type(val))) + if val.dtype == np.uint8: + val = val.astype(np.int32) + const = mb.const(val=val, name=name) + self.context.add(const) + + def convert(self): + logger.info("Converting graph.") + + # This will hold the converted model. + prog = self._prog + + # Construct placeholder for input to SSA function + # This is where input renaming occurs + ssa_func_inputs = OrderedDict() + for index, (name, spec) in enumerate(self.graph.inputs.items()): + placeholder = self._create_placeholder(spec) + # Set SSA function input name to user defined name if provided. + if spec.name is not None: + name = spec.name + self.inputs[index].name = name + ssa_func_inputs[name] = placeholder + prog.set_main_input_types(tuple(self.inputs)) + + # Initialize the SSA for conversion + with Function(ssa_func_inputs, opset_version=self.opset_version) as ssa_func: + + # Map internal @self.graph.inputs to user specified @ssa_func_inputs + # If @self.graph.inputs == @ssa_func_inputs this just adds the inputs + # to the context. + for internal_name, users_name in zip( + self.graph.inputs.keys(), ssa_func_inputs.keys() + ): + input_var = ssa_func.inputs[users_name] + if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \ + and (input_var.dtype == types.fp16 or input_var.dtype == types.fp64): + # cast the input var to float32 + # We need to do this because the type inference is very buggy when started from + # float16/float64 typed inputs. Until that is fixed in the following radar + # we cast all inputs of type float16/float64 to float32 as the first step. + # These casts will later get removed, if compute_precision=Float16 is + # provided, which will cause the FP16ComputePrecision pass to run. + # TODO: remove this when this radar is fixed: rdar://93731970 + input_var = mb.cast(x=input_var, dtype="fp32") + self.context.add(input_var, torch_name=internal_name) + + self.convert_const() + + # Add the rest of the operations + convert_nodes(self.context, self.graph) + + graph_outputs = [self.context[name] for name in self.graph.outputs] + + # An output can be None when it's a None constant, which happens + # in Fairseq MT. + for g in graph_outputs: + if g is None: + msg = "Droping output {} which is None" + logger.warning(msg.format(g)) + graph_outputs = [g for g in graph_outputs if g is not None] + + # Output renaming occurs + if self.outputs is not None: + if len(self.outputs) != len(graph_outputs): + msg = "Number of outputs provided, {}, do not match the number of outputs detected in the model, {}." + raise ValueError(msg.format( + len(self.outputs), + len(graph_outputs), + )) + if self.output_names: + for index, var in enumerate(graph_outputs): + if self.output_names[index] is not None: + output_rename = self.output_names[index] + var.name = output_rename + + ssa_func.set_outputs(graph_outputs) + prog.add_function("main", ssa_func) + if self.outputs is not None: + prog.set_main_output_types(self.outputs) + return prog + + def _jit_pass_lower_graph(graph, torchscript): + """ + This graph pass does a similar thing as torch._C._jit_pass_lower_graph does. + It does two things: + 1. Rename getattr nodes which produce a torch tensor to match the keys in torch model's state_dict + 2. Construct the params_dict, with the keys similar to state_dict + + To be more specific, this graph pass traces down series of GetAttr ops, and rename the final node to match the torch model state_dict. + It also replaces the node inputs by the first created tensor node with the same name. + + Example: + Input graph: + graph(%self.1 : __torch__.torch.nn.modules.Sequential, %input.1 : Tensor): + %2 : prim::GetAttr[name="linear"](%self.1) + %3 : prim::GetAttr[name="weight"](%2) + %4 : prim::GetAttr[name="bias"](%2) + %5 : prim::GetAttr[name="bias"](%2) # duplicated node + %6 : conv(%input.1, %3, %4) + %7 : add(%input.1, %5) + return (%6, %7) + + Output graph: + graph(%self.1 : __torch__.torch.nn.modules.Sequential, %input.1 : Tensor): + %2 : prim::GetAttr[name="linear"](%self.1) + %linear.weight : prim::GetAttr[name="weight"](%2) + %linear.bias : prim::GetAttr[name="bias"](%2) + %5 : prim::GetAttr[name="bias"](%2) # duplicated node, it is not used now + %6 : conv(%input.1, %linear.weight, %linear.bias) + %7 : add(%input.1, %linear.bias) # the second input is replaced + return (%6, %7) + + And a dictionary {"linear.weight": ..., "linear.bias": ...} is returned, to record the parameters values. + Note that, those GetAttr nodes are still in the torch ir graph, but they would be removed in a latter + graph pass in the coremltools torch internal graph + + """ + + """ + Each getattr node corresponds to a torch object in the torch IR, + it could be either: + 1. torch.nn.modules: submodule in a torch model. For instance, a linear layer in a MLP network. + 2. torch.Tensor: torch model parameters. For instance, weight for a conv layer. + 3. torch._C.ScriptObject: quantized torch model parameters. + For example, in the graph above, %2 is pointing to the __torch__.torch.nn.modules.Sequential.linear torch submodule. + node_to_module_map tracks these mapping. + + node_to_prefic_map track the name for each module, + for example, %2 has the prefix name linear and %3 is linear.weight. + These names are also keys in the state_dict + """ + node_to_module_map = {} + node_to_prefix_map = {} + first_node_with_prefix = {} + replace_input = {} + + base_module_node = list(graph.inputs())[0] + node_to_module_map[base_module_node] = torchscript + node_to_prefix_map[base_module_node] = "" + + """ + params_dict will be contructed in this graph pass. It contains all const tensors needed for the graph computation. + And the value is validated against the state_dict if the key is presented in both dictionaries. + In some rare cases, state_dict lacks parameters / buffers, so we still need to go through the while graph ourselves. + """ + params_dict = {} + state_dict = torchscript.state_dict(keep_vars=True) + + def _check_is_tensor(node, module): + if not isinstance(module, torch.Tensor): + return False + if str(node.output().type()) not in ("Tensor", "Optional[Tensor]"): + raise TypeError("Type \"{}\" not supported".format(node.output().type())) + return True + + def _check_is_quantized_tensor(node, module): + if not isinstance(module, torch._C.ScriptObject): + return False + # We only support ScriptObjects that correspond to quantized packed params. + assert "PackedParams" in node.output().type().name() + return True + + def _lower_graph_block(graph): + for node in list(graph.nodes()): + + for block in node.blocks(): + _lower_graph_block(block) + + for idx, _input in enumerate(list(node.inputs())): + if _input in replace_input: + node.replaceInput(idx, replace_input[_input]) + + kind = node.kind().split("::")[1].lower() + if kind != "getattr": + continue + + _input = node.input() + _output = node.output() + attr_name = getattr(node, node.kindOf("name"))("name") + + module = getattr(node_to_module_map[_input], attr_name) + node_to_module_map[_output] = module + + input_prefix = node_to_prefix_map[_input] + prefix = input_prefix + '.' + attr_name if input_prefix != "" else attr_name + node_to_prefix_map[_output] = prefix + + is_tensor = _check_is_tensor(node, module) + is_quantized_tensor = _check_is_quantized_tensor(node, module) + + if is_tensor or is_quantized_tensor: + if is_tensor and prefix in state_dict: + assert torch.equal( + module, state_dict[prefix] + ), "tensor value not consistent between torch ir and state_dict" + if prefix in params_dict: + assert torch.equal(module, params_dict[prefix]) + replace_input[_output] = first_node_with_prefix[prefix] + else: + params_dict[prefix] = module + first_node_with_prefix[prefix] = _output + _output.setDebugName(prefix) + + _lower_graph_block(graph) + + return graph, params_dict + + @staticmethod + def _expand_and_optimize_ir(torchscript): + """ + Given a torch.jit.ScriptModule, convert it to a optimized + torch._C.Graph and dict of model parameter's names to tensors. + """ + graph = torchscript.forward.graph + + # From PyTorch code: Inline function and method calls. + torch._C._jit_pass_inline(graph) + # From PyTorch code: This inlines the forked section in the fork() + # callsite and replaces uses of the result of wait() calls with the + # values produced from the (now-inlined) forked section. + torch._C._jit_pass_inline_fork_wait(graph) + # Starting from the return node, marks all nodes that feed into the + # output, as well as nodes with side effects. Any nodes not marked are + # eliminated. + torch._C._jit_pass_dce(graph) + # From PyTorch code: checks well-formedness and invariants of graph. + torch._C._jit_pass_lint(graph) + # Replaces a couple specific ops patterns (add, sub, mul, div, chunk). + if version_lt(torch, "1.6.0"): + torch._C._jit_pass_canonicalize_ops(graph) + torch._C._jit_pass_lint(graph) + + # From PyTorch code: This pass catches all of the small, easy to catch + # peephole optimizations you might be interested in doing. + # Eliminate no-op 'expand' nodes + # Simplify x.t().t() to x + # pass disabled for v1.6.0 and onwards, wrongly captures the shape of dummy inputs during tracing. + torch._C._jit_pass_peephole(graph, addmm_fusion_enabled=False) + else: + # v1.6.0 pass renamed + torch._C._jit_pass_canonicalize_graph_fuser_ops(graph) + torch._C._jit_pass_lint(graph) + + # From PyTorch docs: Renumber the graph so that all structurally + # equivalent graphs have same numbers. + graph = torch._C._jit_pass_canonicalize(graph) + torch._C._jit_pass_lint(graph) + if version_lt(torch, "1.6.0"): + # v1.6.0 JIT changes disallows pulling list values out of + # prim::Constant. We can only pull scalar values. constant + # propagation removes `listConstruct` and results in list values. + # We disallow constant prop pass to keep them as scalars, and rely + # on our own constant prop to interpret `listConstruct`. + torch._C._jit_pass_constant_propagation(graph) + # NOTE: Don't need another DCE, it's included in constant propagation. + torch._C._jit_pass_lint(graph) + + # Get the params_dict and rename the getattr nodes in the graph + graph, params_dict = TorchConverter._jit_pass_lower_graph(graph, torchscript) + + return graph, params_dict diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/dialect_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/dialect_ops.py new file mode 100644 index 00000000..101144c6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/dialect_ops.py @@ -0,0 +1,219 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, get_new_symbol, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._utils import \ + solve_slice_by_index_shape +from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry +from coremltools.converters.mil.mil.types.symbolic import \ + is_compatible_symbolic_vector + +register_op = SSAOpRegistry.register_op + + +# This file contains the Torch dialect of SSA. Briefly, these ops are only +# understandable in the Torch frontend and not acceptable in the standard op set. +# No backend would support any of the op here. These ops exist to facilitate +# frontend SSA passes, but must be replaced with standard ops during SSA +# passes. + +# All torch op must start with 'torch_' prefix. + +# torch_upsample_nearest_neighbor is dealing with upsample layer which has flexible input shape, +# and recompute_scale_factor is set to True in the original torch layer. +@register_op(namespace="torch") +class torch_upsample_nearest_neighbor(Operation): + """ + Upsample the spatial dimensions (last two dimensions) of the input by + scale factors using nearest-neighbor interpolation. + It corresponds to `torch.nn.functional.interpolate` function with `mode=nearest`, + `recompute_scale_factor=True`, and input with flexible shape. + source: https://pytorch.org/docs/stable/_modules/torch/nn/functional.html#interpolate + + Parameters + ---------- + x: tensor<[b, C, H1, W1],T> (Required) + * Must be rank ``4``. + output_height: i32 + * Output height for the height dimension. + output_width: i32 + * Output width for the width dimension. + + Returns + ------- + tensor<[b, C, H2, W2],T> + * Tensor with same type as the input. + * ``H2`` = output_height + * ``W2`` = output_width + + Attributes + ---------- + T: fp16, fp32 + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + output_height=TensorInputType(type_domain=types.int32), + output_width=TensorInputType(type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + if self.x.rank != 4: + raise ValueError( + 'input to the "torch_upsample_nearest_neighbor" op must have rank 4' + ) + ret_shape = list(self.x.shape) + ret_shape[2] = get_new_symbol() + ret_shape[3] = get_new_symbol() + return types.tensor(self.x.dtype, ret_shape) + +# torch_upsample_bilinear is dealing with upsample layer which has flexible input shape, +# and recompute_scale_factor is set to True in the original torch layer. +@register_op(namespace="torch") +class torch_upsample_bilinear(Operation): + """ + Upsample the spatial dimensions (last two dimensions) of the input by + scale factors using bilinear interpolation. + It corresponds to `torch.nn.functional.interpolate` function with `mode=bilinear`, + `recompute_scale_factor=True`, and input with flexible shape. + source: https://pytorch.org/docs/stable/_modules/torch/nn/functional.html#interpolate + + Parameters + ---------- + x: tensor<[b, C, H1, W1], T> (Required) + * Must be rank ``4``. + output_height: i32 + * Output height for the height dimension. + output_width: i32 + * Output width for the width dimension. + aligh_corners: const + * The `aligh_corners` parameter for the original torch op. + + Returns + ------- + tensor<[b, C, H2, W2], T> + * Tensor with same type as the input. + * ``H2`` = output_height + * ``W2`` = output_width + + Attributes + ---------- + T: fp16, fp32 + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + output_height=TensorInputType(type_domain=types.int32), + output_width=TensorInputType(type_domain=types.int32), + align_corners=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + align_corners=True, + ) + + def type_inference(self): + if self.x.rank != 4: + raise ValueError( + 'input to the "torch_upsample_bilinear" op must have rank 4' + ) + ret_shape = list(self.x.shape) + ret_shape[2] = get_new_symbol() + ret_shape[3] = get_new_symbol() + return types.tensor(self.x.dtype, ret_shape) + +# torch_tensor_assign is dealing with the tensor assignment operation +@register_op(namespace="torch") +class torch_tensor_assign(Operation): + """ + Method for tensor value assignment via indexing and slicing. + Suppose we have a tensor ``x``, this method achieves: + ``x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...] = value`` + + Parameters + ---------- + data: tensor<*?, T> (Required) + * Input tensor + updates: tensor<\*K, T> (Required) + * Value tensor to be inserted + * The shape of the updates tensor must match the slicing result of the input data. + begin: tensor<[rank], i32> (Required) + * Starting index for the dimension of slicing. + end: tensor<[rank(x)], i32> (Required) + * Ending index for the dimension of slicing. + stride: tensor<[rank(x)], i32> (Optional) + * Default as all ``1``s. + * Stride for the dimension of slicing. + begin_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``begin_mask[i]==True``, neglect ``begin[i]``, and set ``begin[i]`` to ``0``. + end_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``end_mask[i]==True``, neglect ``end[i]``, and set ``end[i]`` to ``x.shape[i]``. + squeeze_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``squeeze_mask[i]==true``, neglect ``end[i]``, and do the pure index at ``begin[i]``. + + Returns + ------- + tensor<*?, T> + - Scalar or tensor. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + updates=TensorInputType(type_domain="T"), + begin=TensorInputType(type_domain=types.int32), + end=TensorInputType(type_domain=types.int32), + stride=TensorInputType(const=True, optional=True, type_domain=types.int32), + begin_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + end_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + squeeze_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + stride=None, + begin_mask=None, + end_mask=None, + squeeze_mask=None, + ) + + def type_inference(self): + # Verify the updates and the data slicing have the same shape + begin = self.begin.val + end = self.end.val + data_rank = self.data.rank + stride = self.stride.val if self.stride is not None else [1] * data_rank + begin_mask = ( + self.begin_mask.val if self.begin_mask is not None else [False] * data_rank + ) + end_mask = self.end_mask.val if self.end_mask is not None else [False] * data_rank + squeeze_mask = ( + self.squeeze_mask.val if self.squeeze_mask is not None else [False] * data_rank + ) + data_shape = self.data.shape + expected_updates_shape = tuple(solve_slice_by_index_shape(data_shape, begin, end, stride, begin_mask, end_mask, squeeze_mask)) + if not is_compatible_symbolic_vector(expected_updates_shape, self.updates.shape): + raise ValueError("The updates tensor should have shape {}. Got {}".format(expected_updates_shape, self.updates.shape)) + return self.data.sym_type diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/internal_graph.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/internal_graph.py new file mode 100644 index 00000000..76633f87 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/internal_graph.py @@ -0,0 +1,336 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import torch + +from collections import OrderedDict +from itertools import islice + +_DEFAULT_OP_NAMESPACES = set(["aten", "prim"]) + + +def _make_ssa_name(name): + """ + Converts a symbol name (string) into an SSA name, by prepending '%'. + Only used for pretty printing the graph. + """ + if name is None: + return "None" + return "%" + name + + +def _ssa_name_list(names): + """ + Take a list of symbol names (strings) and return them as SSA names. Only + used for pretty printing the graph. + """ + return [_make_ssa_name(x) for x in names] + + +def _find_new_name(old_name, node_names): + """ + Disambiguate a node's name from a list of existing node names by adding + successively larger integers. + """ + count = 0 + new_name = old_name + "." + str(count) if count != 0 else old_name + while new_name in node_names: + count += 1 + new_name = old_name + "." + str(count) + return new_name + + +def _replace_in_list(ls, old_val, new_val): + """Helper function to replace a value in a list.""" + try: + idx = ls.index(old_val) + except ValueError: + pass + else: + ls[idx] = new_val + + +class InternalTorchIRBlock: + """ + coremltools internal representation of a torch IR block. + """ + + def __init__(self, raw_block=None, parent=None, nodes=None, inputs=None, outputs=None): + """" + Arguments: + raw_block: The torch._C.Block to convert, or None. + parent: The InternalTorchIRNode this block belongs to. + nodes: If @raw_block is None, the list of InternalTorchIRNodes in the block + inputs: If @raw_block is None, the list of input symbols. + outputs: If @raw_block is None, the list of output symbols. + """ + + self.nodes = [] + node_names = set() + self.inputs = [] + self.outputs = [] + self.parent = parent + + if raw_block: + # Add nodes + for raw_node in raw_block.nodes(): + new_node = InternalTorchIRNode(raw_node, parent=self) + if new_node.name == new_node.kind: + new_node.name = _find_new_name(new_node.name, node_names) + self.nodes.append(new_node) + node_names.add(new_node.name) + + # Add inputs + for inp in raw_block.inputs(): + self.inputs.append(inp.debugName()) + + # Add outputs + for outp in raw_block.outputs(): + self.outputs.append(outp.debugName()) + else: + self.nodes = nodes + self.inputs = inputs + self.outputs = outputs + + def __str__(self, indent=2): + indent_str = " " * indent + graph_str = "{}block({}):\n".format( + indent_str, ", ".join(_ssa_name_list(self.inputs)) + ) + graph_str += "{}\n".format(indent_str).join( + [x.__str__(indent=indent + 2) for x in self.nodes] + ) + graph_str += "\n{}return ({})".format( + indent_str, ", ".join(_ssa_name_list(self.outputs)) + ) + return graph_str + + def __repr__(self): + return str(self) + + def replace_name(self, old_name, new_name): + """Replaces all instances of @old_name with @new_name in @self.""" + + # Replace graph inputs/outputs + _replace_in_list(self.inputs, old_name, new_name) + _replace_in_list(self.outputs, old_name, new_name) + + for node in self.nodes: + node.replace_name(old_name, new_name) + + +class InternalTorchIRNode: + """ + coremltools internal representation of a torch IR node. + Can construct itself from a provided torchIR node or manually constructed with + args for testing. + + See InternalTorchIRGraph for the motivation behind this structure. + """ + + def __init__( + self, node=None, parent=None, attr=None, inputs=None, outputs=None, kind=None, blocks=None, + ): + """ + Arguments: + node: The torch._C.Node to convert, or None. + parent: The InternalTorchIRGraph/Block this node belongs to. + attr: If @node is not specified, the dict of named attributes. + inputs: If @node is not specified, the list of input symbols. + outputs: If @node is not specified, the list of output symbols. + kind: If @node is not specified, the kind (op) of the node. + blocks: If @node is not specified, the list of InternalTorchIRBlock. + """ + + self.parent = parent + if node is not None: + self.inputs = [_input.debugName() for _input in node.inputs()] + self.outputs = [output.debugName() for output in node.outputs()] + namespace = node.kind().split("::")[0].lower() + if namespace in _DEFAULT_OP_NAMESPACES: + # We conventionally skip the aten/prim namespaces in our naming. + self.kind = node.kind().split("::")[-1].lower() + else: + self.kind = node.kind().lower() + self.blocks = [InternalTorchIRBlock(raw_block=b, parent=self) for b in node.blocks()] + self.attr = { + name: getattr(node, node.kindOf(name))(name) + for name in node.attributeNames() + } + if "value" not in self.attr: + self.attr["value"] = None + # If the output is boolean, explicitly cast it so type inference + # will work correctly. + if len(self.outputs) == 1 and next(node.outputs()).type().str() == "bool": + self.attr["value"] = bool(self.attr["value"]) + else: + self.inputs = inputs + self.outputs = outputs + self.kind = kind + self.blocks = blocks if blocks is not None else [] + self.attr = attr if attr is not None else {"value": None} + # On rare occassions, a node has no outputs. In that case, the node's + # name will be its kind. However, this no longer guarantees the node's + # name is unique. It will be up to the graph constructing the node to + # make sure names are unique. + self.name = self.outputs[0] if len(self.outputs) > 0 else self.kind + + def __str__(self, indent=2): + node_str = " " * indent + "{} = {}".format( + ", ".join(_ssa_name_list(self.outputs)), self.kind + ) + node_str += "[{}]".format( + ", ".join( + ["{}={}".format(n, v) for n, v in self.attr.items() if v is not None] + ) + ) + node_str += "({})".format(", ".join(_ssa_name_list(self.inputs))) + for b in self.blocks: + node_str += "\n" + b.__str__(indent=indent + 2) + return node_str + + def __repr__(self): + return str(self) + + def replace_name(self, old_name, new_name): + """Replaces all instances of @old_name with @new_name in @self.""" + + _replace_in_list(self.inputs, old_name, new_name) + _replace_in_list(self.outputs, old_name, new_name) + + if self.name == old_name: + self.name = new_name + for block in self.blocks: + block.replace_name(old_name, new_name) + + +class InternalTorchIRGraph: + """ + CoreML internal representation of a torch IR graph. A torch._C.Graph + object is not an ideal structure to use in converting to CoreML. Conversion + to an InternalTorchIRGraph is inserted between the original graph and the + final CoreML model to address several issues: + 1. A torch._C.graph is hard to work with. For example, its .inputs() + and .outputs() functions return iterators, so the only way to + determine the number of inputs/outputs is by counting to the end. + There are other examples of why the torch structure is hard to work + with, and this structure alleviates those isses. + 2. torch._C.graph is an internal API and so we can't count on its + stability. By inserting a layer in between, we can handle any changes + to torch._C.graph here and isolate the ops code that processes the + graph. + 3. torch._C.graph does not expose a Python constructor. This makes + it impossible to write unit tests that isolate specific ops since + they have to come from actually converting a PyTorch graph. With an + internal structure, we can directly build the test cases we need for + unit testing. + """ + + def __init__( + self, raw_graph=None, params_dict=None, input_values=None, cut_at_symbols=None, + nodes=None, params=None, inputs=None, outputs=None, + ): + """ + Arguments: + raw_graph: raw_graph: The torch._C.Graph to convert, or None. + params_dict: A dictionary mapping graph parameter names to tensors. + Must be given if @raw_graph is not None. + input_values: A list of inputs to the graph. Must be given is + @raw_graph if not None. + cut_at_symbols: The list of desired outputs from the graph. Symbols + must be present in the graph. For debugging use only. Can only + be given if @raw_graph is not None. + nodes: If @raw_graph is None, the list of InternalTorchIRNodes in + the graph. + params: If @raw_graph is None, the dict mapping parameter names to + their numpy value. + inputs: If @raw_graph is None, the OrderedDict mapping input names + to their example values. + outputs: list[str], If @raw_graph is None, the list of outputs from the graph. + """ + + self.nodes = [] + node_names = set() + self.params = {} + self.inputs = OrderedDict() + self.outputs = [] + + if raw_graph is not None: + # Add nodes + for raw_node in raw_graph.nodes(): + new_node = InternalTorchIRNode(raw_node, parent=self) + if new_node.name == new_node.kind: + new_node.name = _find_new_name(new_node.name, node_names) + self.nodes.append(new_node) + node_names.add(new_node.name) + + # Add params + for name, param in params_dict.items(): + if isinstance(param, torch.Tensor): + value = param.detach().cpu().numpy() + else: + value = param + self.params[name] = value + + # Add inputs + # The first element of the raw_graph.inputs() is the 'self' of the module, which is not used. + graph_inputs = list(raw_graph.inputs())[1:] + for index, _input in enumerate(islice(graph_inputs, len(input_values))): + name = _input.debugName() + value = input_values[index] + self.inputs[name] = value + + # Add outputs, cutting if @cut_at_symbols is set + output_names = cut_at_symbols + if output_names is None: + output_names = [x.debugName() for x in raw_graph.outputs()] + for output in output_names: + self.outputs.append(output) + else: + self.nodes = nodes + self.params = params + self.inputs = inputs + self.outputs = outputs + + def __str__(self): + graph_str = "graph(\n" + graph_str += self._format_inputs(self.inputs, unpack=True) + graph_str += self._format_inputs(self.params) + graph_str += "):\n" + graph_str += "\n".join([str(x) for x in self.nodes]) + "\n" + graph_str += "return ({})".format(", ".join(_ssa_name_list(self.outputs))) + return graph_str + + def _format_inputs(self, inputs, unpack=False): + def tensor_str(x): + try: + return "Tensor{}".format( + tuple(list(x.shape.shape if unpack else x.shape) + [str(x.dtype)]) + ) + except: + + return "Custom Params({})".format(type(x)) + + inp_str = "" + for k, v in inputs.items(): + if isinstance(v, (tuple, list)): + shape_str = "({})".format(", ".join([tensor_str(x) for x in v])) + else: + shape_str = tensor_str(v) + inp_str += " {} : {},\n".format(_make_ssa_name(k), shape_str) + return inp_str + + def __repr__(self): + return str(self) + + def replace_name(self, old_name, new_name): + """Replaces all instances of @old_name with @new_name in @self.""" + + # Replace graph inputs/outputs + _replace_in_list(self.inputs, old_name, new_name) + _replace_in_list(self.outputs, old_name, new_name) + + for node in self.nodes: + node.replace_name(old_name, new_name) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/load.py new file mode 100644 index 00000000..c95f2777 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/load.py @@ -0,0 +1,112 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os.path as _os_path + +import torch as _torch + +from coremltools import _logger as logger +from coremltools.converters.mil.input_types import InputType, TensorType + +from .converter import TorchConverter, torch_to_mil_types + + +def load(model_spec, inputs, specification_version, + debug=False, outputs=None, cut_at_symbols=None, + **kwargs): + """ + Convert PyTorch model to mil CoreML format. + + Parameters + ---------- + model_spec: String path to .pt file, or a TorchScript object representing + the model to convert. + inputs: Can be a singular element or list of elements of the following form + 1. Any subclass of InputType + 2. torch.Tensor (only shape and dtype will be used) + 3. list of (1. or 2.) + Inputs are parsed in the flattened order that the model accepts them. + If names are not specified: input keys for calling predict on the converted model + will be internal symbols of the input to the graph. + User can specify a subset of names. + debug: bool, optional. Defaults to False. + This flag should generally be False except for debugging purposes + for diagnosing conversion errors. Setting this flag to True will + print the list of supported and unsupported ops found in the model + if conversion fails due to an unsupported op. + outputs (optional): list[ct.InputType] or None + list of either ct.TensorTypes or ct.ImageTypes (both of which are child classes of InputType) + This is the value of the "outputs" argument, passed on by the user in "coremltools.convert" API. + cut_at_symbols (optional): List of internal symbol name strings. Graph conversion will + terminate once these symbols have been generated. For debugging use + only. + """ + torchscript = _torchscript_from_model(model_spec) + + if hasattr(torchscript, 'training') and torchscript.training: + logger.warning("Model is not in eval mode. " + "Consider calling '.eval()' on your model prior to conversion") + if type(torchscript) == _torch.jit._script.RecursiveScriptModule: + logger.warning("Support for converting Torch Script Models is experimental. " + "If possible you should use a traced model for conversion.") + + inputs = _convert_to_torch_inputtype(inputs) + converter = TorchConverter( + torchscript, + inputs, + outputs, + cut_at_symbols, + specification_version, + ) + return _perform_torch_convert(converter, debug) + + +def _torchscript_from_model(model_spec): + if isinstance(model_spec, str) and (model_spec.endswith(".pt") or model_spec.endswith(".pth")): + filename = _os_path.abspath(model_spec) + return _torch.jit.load(filename) + elif isinstance(model_spec, _torch.jit.ScriptModule): + return model_spec + else: + raise TypeError( + "@model must either be a PyTorch .pt or .pth file or a TorchScript object, received: {}".format( + type(model_spec) + ) + ) + +def _convert_to_torch_inputtype(inputs): + input_type = [] + for _input in inputs: + if isinstance(_input, (list, tuple)): + input_type.append(_convert_to_torch_inputtype(_input)) + elif isinstance(_input, InputType): + if _input.shape is None: + raise ValueError("'shape' must be provided in the 'inputs' argument for pytorch conversion") + input_type.append(_input) + elif isinstance(_input, _torch.Tensor): + input_type.append( + TensorType( + shape=_input.shape, dtype=torch_to_mil_types[_input.dtype] + ) + ) + else: + raise ValueError( + "Unknown type {} for conversion to InputType.".format(type(_input)) + ) + return input_type + +def _perform_torch_convert(converter, debug): + try: + prog = converter.convert() + except RuntimeError as e: + if debug and "convert function" in str(e): + implemented, missing = converter.check_ops() + print("the following model ops are IMPLEMENTED:") + print("\n".join([" " + str(x) for x in sorted(implemented)])) + print("the following model ops are MISSING:") + print("\n".join([" " + str(x) for x in sorted(missing)])) + raise e + + return prog diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ops.py new file mode 100644 index 00000000..b882b234 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ops.py @@ -0,0 +1,5734 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import builtins +import math as _math +import numbers +from collections.abc import Iterable +from typing import List, Optional + +import numpy as _np +import torch +from tqdm import tqdm as _tqdm + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import ( + AvailableTarget as target, +) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Symbol, types +from coremltools.converters.mil.mil.block import ( + is_current_opset_version_compatible_with, +) +from coremltools.converters.mil.mil.ops.defs._utils import ( + MAX_SIZE_CONSTANT_FOLDING, promote_input_dtypes, + solve_slice_by_index_shape) +from coremltools.converters.mil.mil.types import is_bool, nptype_from_builtin +from coremltools.converters.mil.mil.types.symbolic import ( + any_symbolic, + is_symbolic, +) +from coremltools.converters.mil.mil.var import ListVar, Var + +from .._utils import value_at, build_einsum_mil +from .torch_op_registry import _TORCH_OPS_REGISTRY, register_torch_op + +# The pytorch args for many of the below ops were sourced from +# https://github.com/pytorch/pytorch/blob/d971007c291c0ead1003d12cd553d18ddb582207/torch/csrc/jit/mobile/register_mobile_ops.cpp#L216 + + +# Max int64 value. Used as a default value in many PyTorch functions. +PYTORCH_DEFAULT_VALUE = 2**63 - 1 + +VALUE_CLOSE_TO_INFINITY = 1e+38 + + +def _all_outputs_present(context, graph): + """ + Returns true if all the symbols in the graph's output list are + present in context. + """ + for outp in graph.outputs: + try: + context[outp] + except ValueError: + return False + return True + + +def convert_nodes(context, graph): + """ + Iterate over the nodes of a graph or block and convert to MIL. + + Arguments: + context: A TranscriptionContext object to pull node inputs and + assign node outputs. + graph: An InternalTorchIRGraph or InternalTorchIRBlock object. + """ + for node in _tqdm(graph.nodes, desc="Converting PyTorch Frontend ==> MIL Ops", unit=" ops"): + op_lookup = node.kind + if op_lookup.startswith("__") and op_lookup.endswith("__"): + # Some ops may have double underscore, such as `__and__`. + op_lookup = op_lookup[2:-2] + elif op_lookup.endswith("_"): + # This is an "in place" op. + # Look up the standard op instead by removing underscore. + op_lookup = op_lookup[:-1] + add_op = _TORCH_OPS_REGISTRY.get(op_lookup, None) + + logger.info("Converting op {} : {}".format(node.name, node.kind)) + if add_op is None: + raise RuntimeError( + "PyTorch convert function for op '{}' not implemented.".format(node.kind) + ) + + context.prepare_for_conversion(node) + add_op(context, node) + + # We've generated all the outputs the graph needs, terminate conversion. + if _all_outputs_present(context, graph): + break + + +def convert_block(context, block, inputs): + """Convert a block (sub-graph) to MIL. Conversion happens within a new + context frame. + + Arguments: + context: A TranscriptionContext object to pull node inputs and + assign node outputs. + block: An InternalTorchIRBlock object. + inputs: List of Vars from the outer context that map to the block's + expected inputs. The number of inputs provided must match the + number expected by the block. + """ + + assert len(block.inputs) == len(inputs) + + # Start a new context frame. + context.push((block.inputs, inputs)) + + # Add the block ops. + convert_nodes(context, block) + + # Collect the block outputs. + outputs = [context[outp] for outp in block.outputs] + + # Return to the previous context frame. + context.pop() + return outputs + + +# Some ops will receive a dtype input as an integer +# which maps to a torch dtype. The below mapping was found by +# converting test models with different dtypes passed to ones. +NUM_TO_TORCH_DTYPE = { + 0: torch.uint8, + 1: torch.int8, + 2: torch.int16, + 3: torch.int32, + 4: torch.int32, + 5: torch.float16, + 6: torch.float32, + 7: torch.float32, + 11: torch.bool, + 12: torch.qint8, + 13: torch.quint8, +} + +NUMPY_DTYPE_TO_TORCH_NUM = { + _np.uint8: 0, + _np.int8: 1, + _np.int16: 2, + _np.int32: 3, + _np.int64: 4, + _np.float16: 5, + _np.float32: 6, + _np.float64: 7, + bool: 11, +} + +NUM_TO_NUMPY_DTYPE = { + 0: _np.uint8, + 1: _np.int8, + 2: _np.int16, + 3: _np.int32, + 4: _np.int32, + 5: _np.float16, + 6: _np.float32, + 7: _np.float32, + 11: bool, +} + +NUM_TO_DTYPE_STRING = { + 3: "int32", + 4: "int32", + 5: "fp16", + 6: "fp32", + 7: "fp32", + 11: "bool", +} + +TYPE_TO_DTYPE_STRING = { + types.bool: "bool", + types.fp16: "fp16", + types.fp32: "fp32", + types.int32: "int32", +} + + +def _get_inputs(context, node, expected=None, min_expected=None) -> List[Var]: + """ + Look up a node's inputs in @context and return them as a list. If + @expected is not None, also verifies the number of inputs matches the + value of @expected. + """ + inputs = [context[name] for name in node.inputs] + if expected is not None: + expected = [expected] if not isinstance(expected, (list, tuple)) else expected + + if len(inputs) not in expected: + raise ValueError( + "node {} ({}) got {} input(s), expected {}".format( + node.name, node.kind, len(inputs), expected + ) + ) + if min_expected is not None: + if len(inputs) < min_expected: + raise ValueError( + "node {} ({}) got {} input(s), expected minimum {} inputs".format( + node.name, node.kind, len(inputs), min_expected + ) + ) + + return inputs + + +def _list_select(shape_var, index): + """ + Sometimes we need to select a specific item from a list. If that item + is known at compile time, extract it as a const. Otherwise, if it's + symbolic, use gather. + """ + if shape_var.can_be_folded_to_const(): + res = mb.const(val=shape_var.val[index]) + else: + res = mb.gather(x=shape_var, indices=index) + return res + + +def _construct_constant(val, name): + # Converter cannot handle torch tensors. + if isinstance(val, torch.Tensor): + val = val.cpu().numpy() + + # MIL casts ints to int32, which can't represent PyTorch's default value. + # So we instead represent it with None, and any ops that might get the + # value will check for None instead. + if isinstance(val, int) and val == PYTORCH_DEFAULT_VALUE: + val = None + + # Pytorch uses inf + if val is not None and isinstance(val, numbers.Number) and _np.isinf(val): + if val < 0: # neg inf + # most negative number in fp32 + val = -3.4e+38 + else: # positive inf + val = 3.4e+38 + if val is None: + return None + else: + return mb.const(val=val, name=name) + + +@register_torch_op +def affine_grid_generator(context, node): + # rdar://73165386 (Improve error handling of coremltools "affine" op PyTorch conversion.) + + affine_op_name = node.name + theta, size, align_corners = _get_inputs(context, node, expected=3) + + # note: only add consts here as PyTorch uses affine_grid + grid_sampler together + is_theta_const = theta.val is not None + if is_theta_const: + context.add(mb.const(val=theta.val, name="{}_theta".format(affine_op_name))) + else: # theta is dynamic input, keep track of it's name + context.add(mb.const(val=theta.name, name="{}_theta".format(affine_op_name))) + + context.add(mb.const(val=size.val, name="{}_size".format(affine_op_name))) + context.add(mb.const(val=align_corners.val, name="{}_align_corners".format(affine_op_name))) + + +@register_torch_op +def grid_sampler(context, node): + affine_op_name = node.inputs[1] + # https://github.com/pytorch/pytorch/blob/00d432a1ed179eff52a9d86a0630f623bf20a37a/aten/src/ATen/native/GridSampler.h#L10-L11 + m_mode = {0: "bilinear", 1: "nearest"} + m_padding_mode = {0: "constant", 1: "border", 2: "reflection"} + + # add `resample` if grid/coordinates is in input, otherwise, + # add `affine` to generate grid from `affine_grid_generator`. + if affine_op_name in context: # add `resample` op + inputs = _get_inputs(context, node, expected=5) + sampling_mode = m_mode[inputs[2].val] + padding_mode = m_padding_mode[inputs[3].val] + align_corners = inputs[4].val + + # When align_corners=False, padding_mode is corresponding to Core ML's symmetric + if padding_mode == "reflection" and align_corners is False: + padding_mode = "symmetric" + + x = mb.resample( + x=inputs[0], + coordinates=inputs[1], + sampling_mode=sampling_mode, + padding_mode=padding_mode, + padding_value=0.0, + coordinates_mode="normalized_minus_one_to_one", + align_corners=align_corners, + name=node.name, + ) + context.add(x) + else: # add `affine` op instead + x = context[node.inputs[0]] + # inputs from `affine_grid_generator` + affine_theta = context["{}_theta".format(affine_op_name)] + affine_size = context["{}_size".format(affine_op_name)] + affine_align_corners = context["{}_align_corners".format(affine_op_name)] + + # affine_theta.val is either name string (dynamic input) or np.ndarray (static values) + # see `affine_grid_generator` for details. + is_theta_const = not isinstance(affine_theta.val, str) + if is_theta_const: + transform_matrix = _np.reshape(affine_theta.val, (affine_theta.shape[0], 6)) + else: # theta is dynamic input, add `reshape` op to PyMIL + transform_matrix = mb.reshape( + x=context[affine_theta.val], + shape=(-1, 6), + name=node.name + "_theta_reshape", + ) + + # inputs from `grid_sampler` + sampling_mode = m_mode[context[node.inputs[2]].val] + padding_mode = m_padding_mode[context[node.inputs[3]].val] + align_corners = context[node.inputs[4]].val + + if sampling_mode != "bilinear": + raise NotImplementedError("'sampling_mode' not supported.") + + if padding_mode != "constant": + raise NotImplementedError("'padding_mode' not supported.") + + if affine_align_corners.val != align_corners: + raise ValueError( + "Op 'affine_grid_generator' and 'grid_sampler' must agree on 'align_corners'." + ) + + x = mb.affine( + x=x, + transform_matrix=transform_matrix, + output_height=affine_size.val[2], + output_width=affine_size.val[3], + sampling_mode=sampling_mode, + padding_mode=padding_mode, + padding_value=0.0, + coordinates_mode="normalized_minus_one_to_one", + align_corners=align_corners, + name=node.name, + ) + context.add(x) + + +@register_torch_op +def silu(context, node): + inputs = _get_inputs(context, node, expected=1) + x = mb.silu(x=inputs[0], name=node.name) + context.add(x) + + +@register_torch_op +def constant(context, node): + assert len(node.inputs) == 0 + assert len(node.outputs) == 1 + + name = node.name + val = node.attr["value"] + + const = _construct_constant(val, name) + context.add(const, torch_name=name) + + +@register_torch_op +def cosine_similarity(context, node): + inputs = _get_inputs(context, node, expected=4) + dim = inputs[-2].val + eps = inputs[-1].val + xy = mb.mul(x=inputs[0], y=inputs[1]) + sum_xy = mb.reduce_sum(x=xy, axes=[dim]) + + xx = mb.mul(x=inputs[0], y=inputs[0]) + sum_xx = mb.reduce_sum(x=xx, axes=[dim]) + yy = mb.mul(x=inputs[1], y=inputs[1]) + sum_yy = mb.reduce_sum(x=yy, axes=[dim]) + + mul_sum_xy = mb.mul(x=sum_xx, y=sum_yy) + div_12 = mb.maximum(x=mul_sum_xy, y=eps * eps) + div_sqrt = mb.sqrt(x=div_12) + + cs = mb.real_div(x=sum_xy, y=div_sqrt, name=node.name) + context.add(cs) + + +@register_torch_op +def selu(context, node): + ALPHA = 1.6732632423543772 + SCALE = 1.0507009873554805 + + x = _get_inputs(context, node, expected=1)[0] + x = mb.elu(x=x, alpha=ALPHA) + x = mb.mul(x=x, y=SCALE, name=node.name) + context.add(x) + + +@register_torch_op +def dot(context, node): + inputs = _get_inputs(context, node, expected=2) + xy = mb.mul(x=inputs[0], y=inputs[1]) + sum_xy = mb.reduce_sum(x=xy, axes=[0]) + context.add(sum_xy, node.name) + + +@register_torch_op +def mv(context, node): + inputs = _get_inputs(context, node, expected=2) + expand = mb.expand_dims(x=inputs[1], axes=[-1], name=node.name + "_expanded") + mv = mb.matmul(x=inputs[0], y=expand, name=node.name + "_mv") + res = mb.squeeze(x=mv, axes=[-1], name=node.name) + context.add(res) + + +@register_torch_op +def outer(context, node): + inputs = _get_inputs(context, node, expected=2) + x = mb.reshape(x=inputs[0], shape=[-1, 1]) + y = mb.reshape(x=inputs[1], shape=[1, -1]) + res = mb.matmul(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op +def cross(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + y = inputs[1] + dim = inputs[2] + + x1 = mb.gather(x=x, indices=[1, 2, 0], axis=dim, name="x1") + x2 = mb.gather(x=x, indices=[2, 0, 1], axis=dim, name="x2") + y1 = mb.gather(x=y, indices=[1, 2, 0], axis=dim, name="y1") + y2 = mb.gather(x=y, indices=[2, 0, 1], axis=dim, name="y2") + m1 = mb.mul(x=x1, y=y2) + m2 = mb.mul(x=x2, y=y1) + z = mb.sub(x=m1, y=m2, name=node.name) + context.add(z) + + +@register_torch_op +def frobenius_norm(context, node): + x, dim, keep_dims = _get_inputs(context, node, expected=3) + result = mb.reduce_l2_norm(x=x, axes=dim, keep_dims=keep_dims, name=node.name) + context.add(result) + + +@register_torch_op +def norm(context, node): + x, num, dim, keep_dims = _get_inputs(context, node, expected=4) + assert x is not None and keep_dims is not None and num is not None and dim is not None + temp = _vector_norm(x=x, order=num, dim=dim, keep_dims=keep_dims, name=node.name) + context.add(temp) + + +def _vector_norm(x, order, dim, keep_dims, name): + if order.val == 0: + # sum(x!=0) + x = mb.cast(x=x, dtype="fp32") + temp = mb.not_equal(x=x, y=0.) + temp = mb.cast(x=temp, dtype='int32') + temp = mb.reduce_sum(x=temp, axes=dim, keep_dims=keep_dims, name=name) + elif order.val > VALUE_CLOSE_TO_INFINITY: + # max(abs(x)) + temp = mb.abs(x=x) + temp = mb.reduce_max(x=temp, axes=dim, keep_dims=keep_dims, name=name) + elif order.val < -VALUE_CLOSE_TO_INFINITY: + # min(abs(x)) + temp = mb.abs(x=x) + temp = mb.reduce_min(x=temp, axes=dim, keep_dims=keep_dims, name=name) + else: + # sum(abs(x)^{order})^{(1 / order)} + temp = mb.abs(x=x) + x, y = promote_input_dtypes([temp, order.val]) + temp = mb.pow(x=x, y=y) + temp = mb.reduce_sum(x=temp, axes=dim, keep_dims=keep_dims) + temp = mb.pow(x=temp, y=1.0 / order.val, name=name) + return temp + +@register_torch_op +def _weight_norm(context, node): + v, g, dim = _get_inputs(context, node, expected=3) + + # Determine axes for L2 norm + if dim.val == -1: + axes = None + else: + axes = list(range(v.rank)) + dim = dim.val + if dim >= 0: + axes.remove(dim) + else: + axes.remove(v.rank + dim) + + # Calculate L2 norm of v + temp = mb.pow(x=v, y=2.) + temp = mb.reduce_sum(x=temp, axes=axes, keep_dims=True) + norm = mb.pow(x=temp, y=1./2) + + inverse_norm = mb.inverse(x=norm) + direction = mb.mul(x=v, y=inverse_norm) + result = mb.mul(x=g, y=direction, name=node.name) + context.add(result) + + + +def _matrix_norm(x, order, dim, keep_dims, name): + if order.val == 1: + # min(sum(abs(x), dim=0)) + temp = mb.abs(x=x) + temp = mb.reduce_sum(x=temp, axes=[dim[0]], keep_dims=True) + temp = mb.reduce_max(x=temp, axes=dim, keep_dims=keep_dims, name=name) + elif order.val == -1: + # min(sum(abs(x), dim=0)) + temp = mb.abs(x=x) + temp = mb.reduce_sum(x=temp, axes=[dim[0]], keep_dims=True) + temp = mb.reduce_min(x=temp, axes=dim, keep_dims=keep_dims, name=name) + elif order.val == "fro": + # sum(x**2)**1/2 + temp = mb.reduce_l2_norm(x=x, axes=dim, keep_dims=keep_dims, name=name) + elif order.val > VALUE_CLOSE_TO_INFINITY: + # max(sum(abs(x), dim=1)) + temp = mb.abs(x=x) + temp = mb.reduce_sum(x=temp, axes=[dim[1]], keep_dims=True) + temp = mb.reduce_max(x=temp, axes=dim, keep_dims=keep_dims, name=name) + elif order.val < -VALUE_CLOSE_TO_INFINITY: + # min(sum(abs(x), dim=1)) + temp = mb.abs(x=x) + temp = mb.reduce_sum(x=temp, axes=[dim[1]], keep_dims=True) + temp = mb.reduce_min(x=temp, axes=dim, keep_dims=keep_dims, name=name) + else: + raise RuntimeError("Matrix norm is not defined for the current inputs") + return temp + + +@register_torch_op +def linalg_vector_norm(context, node): + x, order, dim, keep_dims, _ = _get_inputs(context, node, expected=5) + assert x is not None and keep_dims is not None and order is not None + temp = _vector_norm(x=x, order=order, dim=dim, keep_dims=keep_dims, name=node.name) + context.add(temp) + + +@register_torch_op +def linalg_matrix_norm(context, node): + x, order, dim, keep_dims, _ = _get_inputs(context, node, expected=5) + assert x is not None and keep_dims is not None and order is not None and dim is not None + assert len(dim.val) == 2 + temp = _matrix_norm(x=x, order=order, dim=dim.val, keep_dims=keep_dims, name=node.name) + context.add(temp) + + +@register_torch_op +def linalg_norm(context, node): + x, order, dim, keep_dims, _ = _get_inputs(context, node, expected=5) + assert x is not None and keep_dims is not None + if dim is None: + dim = _np.arange(x.rank) + else: + dim = dim.val + if order is None: + temp = mb.reduce_l2_norm(x=x, axes=dim, keep_dims=keep_dims, name=node.name) + elif len(dim) == 2: + temp = _matrix_norm( + x=x, order=order, dim=dim, keep_dims=keep_dims, name=node.name + ) + else: + temp = _vector_norm(x=x, order=order, dim=dim, keep_dims=keep_dims, name=node.name) + context.add(temp) + + +@register_torch_op +def hardswish(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + + w = mb.thresholded_relu(x=x, alpha=-3.0) + y = mb.sigmoid_hard( + x=w, alpha=1.0 / 6, beta=0.5 + ) # ``y = min(max(alpha * x + beta, -1), 1) + result = mb.mul(x=w, y=y, name=node.name) + + context.add(result) + + +@register_torch_op +def reshape_as(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + ref = inputs[1] + shape = mb.shape(x=ref) + result = mb.reshape(x=x, shape=shape, name=node.name) + context.add(result) + + +def _array_construct(context, node, array_type): + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node) + scalar_inputs = [ + inp + for inp in inputs + if isinstance(inp, Var) and inp.can_be_folded_to_const() and len(inp.shape) == 0 + ] + + if len(scalar_inputs) == len(inputs): + # All the list items are compile-time scalar constants, so let's create + # a new const that concatenates them. + val = array_type([inp.val for inp in inputs]) + const = mb.const(val=val, name=node.name) + context.add(const) + else: + # If at least one input to the construct op is non-const, collect + # the inputs and add them directly to the context. Ops that use this + # node's output will take the list directly as input. + context.add(array_type(inputs), node.name) + + +@register_torch_op +def tupleconstruct(context, node): + _array_construct(context, node, array_type=tuple) + + +@register_torch_op +def listconstruct(context, node): + _array_construct(context, node, array_type=list) + + +@register_torch_op +def eq(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + y = inputs[1] + if is_bool(x.dtype): + x = mb.cast(x=x, dtype='int32') + if is_bool(y.dtype): + y = mb.cast(x=y, dtype='int32') + x, y = promote_input_dtypes([x, y]) + equal_to = mb.equal(x=x, y=y, name=node.name) + context.add(equal_to) + + +@register_torch_op +def ne(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + y = inputs[1] + if is_bool(x.dtype): + x = mb.cast(x=x, dtype='int32') + if is_bool(y.dtype): + y = mb.cast(x=y, dtype='int32') + x, y = promote_input_dtypes([x, y]) + equal_to = mb.not_equal(x=x, y=y, name=node.name) + context.add(equal_to) + + +@register_torch_op +def le(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs) + less_equal = mb.less_equal(x=x, y=y, name=node.name) + context.add(less_equal) + + +@register_torch_op +def lt(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs) + less = mb.less(x=x, y=y, name=node.name) + context.add(less) + + +@register_torch_op +def ge(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs) + greater_equal = mb.greater_equal(x=x, y=y, name=node.name) + context.add(greater_equal) + + +@register_torch_op +def gt(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs[:2]) + greater = mb.greater(x=x, y=y, name=node.name) + context.add(greater) + + +@register_torch_op(torch_alias=["t"]) +def transpose(context, node): + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node) + x = inputs[0] + + if len(node.inputs) == 1: + # PyTorch has several transpose ops that can be emitted. This one is only + # emitted when .t() is called on a tensor, which means it can only be + # called on a matrix. + if len(x.shape) > 2: + raise ValueError("transpose without dims for rank > 2 is unsupported") + res = mb.transpose(x=x, perm=[1, 0], name=node.name) + else: + assert len(inputs) == 3 + ax0 = inputs[1].val + ax1 = inputs[2].val + + perm = list(range(len(x.shape))) + perm[ax0] = ax1 + perm[ax1] = ax0 + + res = mb.transpose(x=x, perm=perm, name=node.name) + context.add(res) + + +@register_torch_op +def permute(context, node): + inputs = _get_inputs(context, node, expected=2) + perm = mb.transpose(x=inputs[0], perm=inputs[1], name=node.name) + context.add(perm) + + +@register_torch_op +def frac(context, node): + # Frac(x) = x - floor(abs(x)) * sign(x) + + x = _get_inputs(context, node, expected=1)[0] + floor_abs = mb.floor(x=mb.abs(x=x)) + sign_abs_floor = mb.mul(x=floor_abs, y=mb.sign(x=x)) + res = mb.sub(x=x, y=sign_abs_floor) + context.add(res, torch_name=node.name) + + +@register_torch_op +def pixel_shuffle(context, node): + inputs = _get_inputs(context, node, expected=2) + perm = mb.pixel_shuffle(x=inputs[0], upscale_factor=inputs[1], name=node.name) + context.add(perm) + + +@register_torch_op +def pixel_unshuffle(context, node): + inputs = _get_inputs(context, node, expected=2) + downscale_factor = _np.uint32(inputs[1].val) + perm = mb.pixel_unshuffle(x=inputs[0], downscale_factor=downscale_factor, name=node.name) + context.add(perm) + + +@register_torch_op(torch_alias=["bmm"]) +def matmul(context, node): + inputs = _get_inputs(context, node, expected=2) + if inputs[1].val is not None and \ + len(inputs[1].shape) == 2 and len(inputs[0].shape) <= 3: + res = mb.linear(x=inputs[0], weight=_np.transpose(inputs[1].val), name=node.name) + else: + res = mb.matmul(x=inputs[0], y=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def add(context, node): + add_inputs = _get_inputs(context, node) + assert len(node.outputs) == 1 + + # TODO (sberardi): 3rd param to aten::add is a scale factor, need to handle that. + # out=input+alpha x other + # rdar://60175736 + if len(add_inputs) > 2 and add_inputs[2].val != 1: + raise ValueError("ADD does not support scale factor param") + x, y = promote_input_dtypes(add_inputs[:2]) + add_node = mb.add(x=x, y=y, name=node.name) + context.add(add_node) + + +@register_torch_op +def cumsum(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + if is_bool(x.dtype): + x = mb.cast(x=x, dtype='int32') + res = mb.cumsum(x=x, axis=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def addmm(context, node): + # addmm(Tensor input, Tensor mat1, Tensor mat2, Scalar beta=1, Scalar alpha=1) + # output = beta * input + alpha * mat1 * mat2 + + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node, expected=5) + bias = inputs[0] + mat1 = inputs[1] + mat2 = inputs[2] + beta = inputs[3] + alpha = inputs[4] + + if beta.val != 1.0: + # Apply scaling factor beta to the bias. + bias = mb.mul(x=beta, y=bias, name=bias.name + "_scaled") + context.add(bias) + + if alpha.val != 1.0: + # Apply scaling factor alpha to the input. + mat1 = mb.mul(x=alpha, y=mat1, name=mat1.name + "_scaled") + context.add(mat1) + + # MIL linear will transpose mat2, but addmm expects that mat1 and mat2 + # can multiply as is. So we add a tranpose. + mat2 = mb.transpose(x=mat2, perm=[1, 0], name=mat2.name + "_transposed") + context.add(mat2) + + addmm_node = mb.linear(x=mat1, weight=mat2, bias=bias, name=node.name) + context.add(addmm_node) + + +@register_torch_op +def linear(context, node): + inputs = _get_inputs(context, node, expected=[2, 3]) + x = inputs[0] + W = inputs[1] + bias = inputs[2] if len(node.inputs) == 3 else None + res = mb.linear(x=x, weight=W, bias=bias, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["conv2d"]) +def _convolution(context, node): + inputs = _get_inputs(context, node) + + x = inputs[0] + # PyTorch and MIL has same weight layout + # Conv: [Cout, Cin, *D] + # ConvTranspose: [Cin, Cout, *D] + weight = inputs[1] + bias = inputs[2] + strides = inputs[3] + + x, weight = promote_input_dtypes([x, weight]) + + # Expand padding. Torch accepts either an int (for all dimensions) or an n-tuple of ints (one per dimension), but + # we require a (2 * n)-tuple, where n is the number of spatial dimensions, start and end for each spatial dimension + pad = inputs[4].val + + if len(weight.shape) in (3, 4): + # 1D and 2D: Need to explicitly state L-R, T-B pad + pad = _np.repeat(pad, 2) + elif len(weight.shape) == 5: + # 3D: Need to explicitly state F-Bk, L-R, T-B pad + if type(pad) == int: + pad = _np.repeat(pad, 6) + elif len(pad) == 3: + pad = _np.repeat(pad, 2) + else: + raise ValueError( + "Invalid weight dimension. Must be 3, 4, or 5 for 1D, 2D, or 3D convolution, respectively." + ) + + dilations = inputs[5] + out_pad = None + if len(inputs) >= 12: + transposed = inputs[6].val + out_pad = inputs[7].val + group = inputs[8] + elif len(inputs) == 7: + transposed = False + group = inputs[6] + else: + raise ValueError( + "unexpected number of inputs for node {} ({}): {}".format( + node.name, node.kind, len(inputs) + ) + ) + + kwargs = { + "x": x, + "weight": weight, + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + "groups": group, + "name": node.name, + } + # Bias is optional in PyTorch's convolution. + if bias is not None: + kwargs["bias"] = bias + + if transposed is True: + # Transposed convolution + # Handle output_padding using pre-pad or post-crop + pre_pad = [0] * len(pad) + post_crop = [0] * len(pad) + + if out_pad is not None and any(out_pad): + output_padding = [0] * len(pad) + # output padding adds additional padding on one of the side of dimension + # i.e. bottom from top-bottom, + # right from left-right + # back from front-back + # Core ML padding structure is similar [top, bottom, left, right] + # mapping output_padding to simplify further processing! + # + # For ConvTranspose2d: [bottom, right] -> [0, b, 0, r] + output_padding = [ + 0 if i % 2 == 0 else out_pad[i // 2] for i in range(len(pad)) + ] + if sum(pad) == 0 and any(output_padding): + raise ValueError( + "ConvTranspose configuration of padding=0 and output_padding > 0 not supported!" + ) + post_crop = pad.copy() + pad *= 0 + for i in range(0, len(pad)): + if post_crop[i] >= output_padding[i]: + post_crop[i] -= output_padding[i] + else: + pre_pad[i] = output_padding[i] - post_crop[i] + kwargs["pad"] = pre_pad + if any(pre_pad): + # Constant pad requires pad to be of length 2*input_rank + pre_pad = [0] * 2 * (len(x.shape) - 2) + pre_pad + x = mb.pad(x=x, pad=pre_pad) + kwargs["x"] = x + if any(post_crop): + del kwargs["name"] + + conv = mb.conv_transpose(**kwargs) + if any(post_crop): + # TODO: rdar://65575826 (PyTorch converter: output_padding mapping to slice + # instead of crop layer for 1 and 3D ConvTranspose) + if len(post_crop) == 2 and conv.rank == 3: + # Number of elements to crop from right = post_crop[-1]. + # Since slicing supports negative indexing, end_id = -1 * post_crop[-1] + conv = mb.slice_by_index( + x=conv, + begin=[0, 0, post_crop[0]], + end=[0, 0, -1 * post_crop[-1]], + begin_mask=[True, True, False], + end_mask=[True, True, False], + name=node.name, + ) + elif len(post_crop) == 4 and conv.rank == 4: + conv = mb.crop( + x=conv, + crop_height=post_crop[:2], + crop_width=post_crop[2:4], + name=node.name, + ) + else: + raise ValueError( + "output_padding is supported only for ConvTranspose1D or ConvTranspose2D!" + ) + else: + # Normal convolution + conv = mb.conv(**kwargs) + context.add(conv) + + +# Convolution with "same, valid" padding +@register_torch_op +def _convolution_mode(context, node): + inputs = _get_inputs(context, node, expected=7) + mode = inputs[4].val + + context.add( + mb.conv( + x=inputs[0], + weight=inputs[1], + bias=inputs[2], + strides=inputs[3], + pad_type=mode, + dilations=inputs[5], + groups=inputs[6], + name=node.name, + ) + ) + + +@register_torch_op +def softmax(context, node): + inputs = _get_inputs(context, node) + + x = inputs[0] + axis = inputs[1] + res = mb.softmax(x=x, axis=axis, name=node.name) + context.add(res) + + +@register_torch_op +def flatten(context, node): + inputs = _get_inputs(context, node) + + x = inputs[0] + dims = list(x.shape) + start_val = inputs[1].val + end_val = inputs[2].val + + start = len(dims) + start_val if start_val < 0 else start_val + end = len(dims) + end_val if end_val < 0 else end_val + + if start > len(dims) or end > len(dims) or start < 0 or end < 0: + raise ValueError( + "Invalid start and end. (start, end) == ({}, {})".format(start, end_val) + ) + if start > end: + raise ValueError( + "Start must be before end. (start, end) == ({}, {})".format(start, end_val) + ) + x_shape = mb.shape(x=x) + + shape1 = mb.slice_by_index(x=x_shape, begin=[0], end=[start]) + shape2 = mb.slice_by_index(x=x_shape, begin=[end + 1], end=[len(dims)]) + + flatten_dim = -1 + if not any_symbolic(x.shape): + flatten_dim = 1 + for dim in dims[start: end + 1]: + flatten_dim *= dim + + shape = mb.concat(values=(shape1, [flatten_dim], shape2), axis=0) + shape = mb.cast(x=shape, dtype="int32") + reshape = mb.reshape(x=x, shape=shape, name=node.name) + context.add(reshape) + + +@register_torch_op +def _reshape_from_tensor(context, node): + inputs = _get_inputs(context, node, expected=2) + + reshape = mb.reshape(x=inputs[0], shape=inputs[1], name=node.name) + context.add(reshape) + + +@register_torch_op +def softsign(context, node): + inputs = _get_inputs(context, node, expected=1) + + res = mb.softsign(x=inputs[0], name=node.name) + context.add(res) + + +@register_torch_op +def relu(context, node): + inputs = _get_inputs(context, node, expected=1) + + res = mb.relu(x=inputs[0], name=node.name) + context.add(res) + + +@register_torch_op +def prelu(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + alpha = inputs[1] + # In the MIL backend, it assumes that the inputs of prelu should have + # at least rank 3, i.e. [batch, channel, spatial_dims*]. + if x.rank >= 2: + alpha = alpha.val + alpha = _np.ones((x.shape[1],)) * alpha + + if x.rank <= 2: + axes = [1, 2] if x.rank == 1 else [2] + x = mb.expand_dims(x=x, axes=axes) + x = mb.prelu(x=x, alpha=alpha) + res = mb.squeeze(x=x, axes=axes, name=node.name) + else: + res = mb.prelu(x=x, alpha=alpha, name=node.name) + + context.add(res) + + +@register_torch_op +def linspace(context, node): + inputs = _get_inputs(context, node, min_expected=3) + + start = inputs[0] + end = inputs[1] + nums = inputs[2] + start = mb.cast(x=start, dtype="fp32") + end = mb.cast(x=end, dtype="fp32") + + if start.can_be_folded_to_const() and end.can_be_folded_to_const() and nums.can_be_folded_to_const(): + start_val = start.val + end_val = end.val + nums_val = nums.val + if nums_val < MAX_SIZE_CONSTANT_FOLDING: + res = mb.const(val=_np.linspace(start_val, end_val, nums_val), name=node.name) + context.add(res) + return + + if nums.val is None: + msg = "Dynamic steps input for torch.linspace is not supported. Please use torch.arange instead" + raise NotImplementedError(msg) + else: + if nums.val == 1: + res = mb.expand_dims(x=start, axes=[0], name=node.name) + else: + # step = (end - start) / (nums - 1) + x = mb.sub(x=end, y=start) + y = mb.sub(x=nums, y=1) + x = mb.cast(x=x, dtype="fp32") + y = mb.cast(x=y, dtype="fp32") + step = mb.real_div(x=x, y=y) + + # Note that the range_1d op excluded the end point, + # so we have to add the end back to the resulting array. + arange = mb.range_1d(end=end, start=start, step=step) + new_end = mb.expand_dims(x=end, axes=[0]) + res = mb.concat(values=[arange, new_end], axis=0, name=node.name) + context.add(res) + + +@register_torch_op +def relu6(context, node): + inputs = _get_inputs(context, node, expected=1) + + res = mb.relu6(x=inputs[0], name=node.name) + context.add(res) + + +@register_torch_op +def einsum(context, node): + a = context[node.inputs[1]][0] + b = context[node.inputs[1]][1] + equation = context[node.inputs[0]].val + x = build_einsum_mil(a, b, equation, node.name) + context.add(x) + + +@register_torch_op +def eye(context, node): + # TODO: rdar://104400568 ([PyTorch] Use MIL ops to construct the eye matrix in order to avoid directly folding the input into a const) + inputs = _get_inputs(context, node, expected=[5, 6]) + if len(inputs) == 5: + eye = _np.eye(inputs[0].val) + if len(inputs) == 6: + eye = _np.eye(inputs[0].val, inputs[1].val) + eye = mb.const(val=eye, name=node.name) + context.add(eye) + + +@register_torch_op +def elu(context, node): + ## Torch port to ATen adds scale and input_scale which is set to 1 + inputs = _get_inputs(context, node, expected=4) + + res = mb.elu(x=inputs[0], alpha=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def leaky_relu(context, node): + inputs = _get_inputs(context, node, expected=2) + + res = mb.leaky_relu(x=inputs[0], alpha=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def rrelu(context, node): + inputs = _get_inputs(context, node, expected=5) + + # Alpha in evaluation mode is just the average between upper and lower. + lower_alpha = inputs[1] + upper_alpha = inputs[2] + alpha = (lower_alpha.val + upper_alpha.val) / 2 + + res = mb.leaky_relu(x=inputs[0], alpha=alpha, name=node.name) + context.add(res) + + +@register_torch_op +def softplus(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + beta_ = inputs[1].val + C = x.shape[1] + alpha_br = _np.repeat(1.0 / beta_, C).astype('float32') + beta_br = _np.repeat(beta_, C).astype('float32') + + res = mb.softplus_parametric(x=x, alpha=alpha_br, beta=beta_br, name=node.name) + context.add(res) + + +@register_torch_op +def mish(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + + softplus = mb.softplus(x=x) + tanh = mb.tanh(x=softplus) + res = mb.mul(x=x, y=tanh, name=node.name) + context.add(res) + + +def _adjust_pad_for_ceil_mode(input_shape, kernel_size, stride_sizes, pad_sizes): + """ Given an input tensor and pooling parameters, add the extra input + padding needed to replicate ceil_mode. + MIL 3D pooling does not support ceil_mode natively, but we can + workaround by padding the input appropriately. + + PyTorch output size formula for pooling: + (reference: https://github.com/pytorch/pytorch/blob/375c30a7177442fb9d6de7516a9ae4031ae324c4/aten/src/ATen/native/Pool.h#L28) + + When ceil mode is True: + out_dim = floor((in_dim + pad_l + pad_r - kernel_size + (stride-1)) / stride) + 1 + if (out_dim-1) * stride >= in_dim + pad_l and (pad_l > 0 or pad_r > 0): + out_dim = out_dim - 1 + When ceil mode is False: + out_dim = floor((in_dim + pad_l + pad_r - kernel_size) / stride) + 1 + + + # follow the approach here to calculate padding: + # https://github.com/pytorch/pytorch/blob/edf751ca2fededecdd9366874c761431c0f61f01/aten/src/ATen/native/mkldnn/Pooling.cpp#L121 + # which keeps increasing the pad_r value until the output size without the ceil mode matches that of the ceil mode + """ + + def _calculate_pool_output_size(in_dim, kernel, stride, pad_l, pad_r, ceil_mode): + if ceil_mode: + out_dim = _math.floor((in_dim + pad_r + pad_l - kernel + stride - 1) / stride) + 1 + if (out_dim - 1) * stride >= in_dim + pad_l and (pad_l > 0 or pad_r > 0): + out_dim = out_dim - 1 + else: + out_dim = _math.floor((in_dim + pad_r + pad_l - kernel) / stride) + 1 + return out_dim + + new_pad = pad_sizes.copy() + for idx in range(len(input_shape)): + if is_symbolic(input_shape[idx]): + logger.warning( + "pooling padding adjusted to support ceil_mode=True, for symbolic dimension." + "Output shape of the pool op maybe be wrong for certain input shapes." + ) + new_pad[2 * idx + 1] += stride_sizes[idx] - 1 + else: + out_dim_with_ceil_mode = _calculate_pool_output_size( + input_shape[idx], + kernel_size[idx], + stride_sizes[idx], + pad_sizes[2 * idx], + pad_sizes[2 * idx + 1], + True, + ) + is_equal = False + while not is_equal: + out_dim_without_ceil_mode = _calculate_pool_output_size( + input_shape[idx], + kernel_size[idx], + stride_sizes[idx], + new_pad[2 * idx], + new_pad[2 * idx + 1], + False, + ) + is_equal = True + if out_dim_without_ceil_mode < out_dim_with_ceil_mode: + new_pad[2 * idx + 1] += 1 + is_equal = False + + return new_pad + + +def _max_pool(context, node, inputs): + x = inputs[0] + kernel_sizes = inputs[1] + strides = inputs[2] + if strides.op.op_type == "const" and (not list(strides.val)): + strides = mb.const(val=kernel_sizes.val, name=strides.name) + + pad_type = "custom" + # Need to explicitly state L-R, T-B pad + pad = inputs[3] + pad = _np.repeat(pad.val, 2) + dilation = inputs[4].val + ceil_mode = inputs[5].val + if _np.any(dilation > 1): + # See: rdar://60633736 (Implement dilation for mil op max_pool) + raise ValueError("@max_pool does not support dilation > 1") + spatial_rank = len(pad) // 2 + if spatial_rank > 2 and ceil_mode is True and list(strides.val) != [1] * len(strides.val): + # since MIL does not support ceil_mode for 3D pool, + # need to adjust padding values if ceil_mode is True + # ceil_mode only causes any difference though, if the strides are not 1 + x_spatial_dimensions = x.shape[-spatial_rank:] + pad = _adjust_pad_for_ceil_mode(x_spatial_dimensions, kernel_sizes.val, strides.val, pad) + + pool = mb.max_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + pad=pad, + name=node.name, + ceil_mode=ceil_mode if spatial_rank <= 2 else False, + ) + context.add(pool) + + +@register_torch_op +def max_pool1d(context, node): + inputs = _get_inputs(context, node, expected=6) + _max_pool(context, node, inputs) + + +@register_torch_op +def max_pool2d(context, node): + inputs = _get_inputs(context, node, expected=6) + _max_pool(context, node, inputs) + + +@register_torch_op +def max_pool3d(context, node): + inputs = _get_inputs(context, node, expected=6) + _max_pool(context, node, inputs) + + +@register_torch_op +def minimum(context, node): + inputs = _get_inputs(context, node, expected=2) + assert len(node.outputs) == 1 + x = context[node.inputs[0]] + y = context[node.inputs[1]] + out = mb.minimum(x=x, y=y, name=node.name) + context.add(out) + + +@register_torch_op +def clamp_min(context, node): + x = _get_inputs(context, node, expected=2) + x = mb.clip(x=x[0], alpha=x[1], beta=_np.inf, name=node.name) + context.add(x) + + +@register_torch_op +def maximum(context, node): + inputs = _get_inputs(context, node, expected=2) + assert len(node.outputs) == 1 + x = context[node.inputs[0]] + y = context[node.inputs[1]] + out = mb.maximum(x=x, y=y, name=node.name) + context.add(out) + + +@register_torch_op +def div(context, node): + inputs = _get_inputs(context, node, expected=[2, 3]) + + if len(inputs) > 2 and inputs[2] is not None: + rounding_mode = inputs[2].val + if rounding_mode == "floor": + # round towards negative infinity + # e.g.: + # values before floor: [2.6, -3.4, -3.6] + # values after floor: [2, -4, -4] + res = mb.floor_div(x=inputs[0], y=inputs[1], name=node.name) + elif rounding_mode == "trunc": + # round towards 0 + # e.g.: + # values before trunc: [2.6, -3.4, -3.6] + # values after trunc: [2, -3, -3] + x = mb.cast(x=inputs[0], dtype="fp32") + y = mb.cast(x=inputs[1], dtype="fp32") + z = mb.real_div(x=x, y=y) + s = mb.sign(x=z) + all_positive = mb.mul(x=z, y=s) + all_positive_floor = mb.floor(x=all_positive) + res = mb.mul(x=all_positive_floor, y=s, name=node.name) + else: + raise NotImplementedError( + 'rounding mode "{}" not supported in the "div" op'.format(rounding_mode) + ) + else: + x = mb.cast(x=inputs[0], dtype="fp32") + y = mb.cast(x=inputs[1], dtype="fp32") + res = mb.real_div(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["floordiv"]) +def floor_divide(context, node): + inputs = _get_inputs(context, node, expected=2) + inputs = promote_input_dtypes(inputs) + div_res = mb.floor_div(x=inputs[0], y=inputs[1]) + # Pytorch's floor_divide always returns fp32, even if the inputs are int + res = mb.cast(x=div_res, dtype='fp32', name=node.name) + context.add(res) + + +@register_torch_op +def true_divide(context, node): + inputs = _get_inputs(context, node, expected=2) + res = mb.real_div(x=inputs[0], y=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def mul(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs) + res = mb.mul(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op +def pow(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs) + res = mb.pow(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["rsub"]) +def sub(context, node): + inputs = _get_inputs(context, node, expected=[2, 3]) + assert len(node.outputs) == 1 + + if node.kind == "rsub": + # rsub reverses the order of arguments + y = inputs[0] + x = inputs[1] + else: + x = inputs[0] + y = inputs[1] + + if len(inputs) > 2: + alpha = inputs[2].val + + # TODO (sberardi): 3rd param to aten::sub is a scale factor, need to handle that. + # out=input-alpha x other + # rdar://60175736 + if alpha != 1: + raise ValueError("SUB does not support scale factor param") + + x, y = promote_input_dtypes([x, y]) + res = mb.sub(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op( + torch_alias=[ + "sum", + "logsumexp", + ]) +def mean(context, node): + inputs = _get_inputs(context, node) + + x = inputs[0] + if types.is_bool(x.dtype): + # TODO: In the future when MIL op supports bool, we need to use curr_opset_version to decide + # if we want to cast or not. + x = mb.cast(x=x, dtype="fp32") + kwargs = {"x": x, "name": node.name} + + # @axes is optional, so omit if None. + axes = inputs[1] + if axes is not None: + # @axes needs to be a list, but if only one axis was specified in the + # model, it will be constructed as an int. Construct a new constant as a + # list. + if not isinstance(axes.val, _np.ndarray): + axes = mb.const(val=[axes.val], name=axes.name + "_list") + context.add(axes) + kwargs["axes"] = axes + + # @keep_dims is optional. + if len(inputs) >= 3: + keep_dims = inputs[2] + kwargs["keep_dims"] = keep_dims + + # Last input to mean is an optional output tensor. We always expect this to + # be None or absent. + assert len(inputs) <= 3 or inputs[3] is None + if node.kind == "sum": + res = mb.reduce_sum(**kwargs) + elif node.kind == "logsumexp": + res = mb.reduce_log_sum_exp(**kwargs) + else: + res = mb.reduce_mean(**kwargs) + context.add(res) + + +@register_torch_op +def squeeze(context, node): + inputs = _get_inputs(context, node) + if len(inputs) == 1: + res = mb.squeeze(x=inputs[0], name=node.name) + elif len(inputs) == 2: + squeeze_dim = inputs[1].val + res = mb.squeeze(x=inputs[0], axes=(squeeze_dim,), name=node.name) + context.add(res) + + +@register_torch_op +def unsqueeze(context, node): + inputs = _get_inputs(context, node, expected=2) + unsqueeze = mb.expand_dims(x=inputs[0], axes=[inputs[1].val], name=node.name) + context.add(unsqueeze) + + +@register_torch_op +def size(context, node): + inputs = _get_inputs(context, node, expected=[1, 2]) + x = inputs[0] + + # Get the shape of the tensor. + if types.is_complex(x.dtype): + size_node = mb.complex_shape(x=inputs[0], name=node.name + "_shape") + else: + size_node = mb.shape(x=inputs[0], name=node.name + "_shape") + + # Get the size of the tensor along the input dimension. + if len(node.inputs) == 2: + dim = inputs[1].val + size_node = _list_select(size_node, dim) + context.add(size_node, node.name) + + +@register_torch_op +def _shape_as_tensor(context, node): + inputs = _get_inputs(context, node, expected=1) + + # Get the shape of the tensor. + shape_node = mb.shape(x=inputs[0], name=node.name) + context.add(shape_node, node.name) + + +@register_torch_op(torch_alias=["reshape"]) +def view(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + shape = inputs[1] + + if isinstance(shape, ListVar): + length = mb.list_length(ls=shape) + indices = mb.range_1d(start=0, end=length, step=1) + shape = mb.list_gather(ls=shape, indices=indices) + + if ( + isinstance(shape, list) + and all([isinstance(dim, Var) and len(dim.shape) == 0 for dim in shape]) + and any([dim.val is None for dim in shape]) + ): + shape = mb.concat(values=shape, axis=0) + + shape = mb.cast(x=shape, dtype="int32") + view = mb.reshape(x=x, shape=shape, name=node.name) + context.add(view) + + +@register_torch_op(torch_alias=['constant_pad_nd']) +def pad(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + + pad = inputs[1] + if pad.val is not None: + pad = pad.val.reshape((-1, 2))[::-1].reshape(-1).tolist() + missing_dims = x.rank - (len(pad) // 2) + pad = [0, 0] * missing_dims + pad + + if len(inputs) == 4: + mode = inputs[2].val + assert mode in ('constant', 'reflect', 'replicate') + val_index = 3 + else: + mode = 'constant' + val_index = 2 + + scalar_val = inputs[val_index] if inputs[val_index] else 0.0 + if inputs[val_index] and inputs[val_index].op.op_type == "const": + scalar_val = float(scalar_val.val) + + res = mb.pad(x=x, pad=pad, mode=mode, constant_val=scalar_val, name=node.name) + context.add(res) + + +@register_torch_op +def adaptive_avg_pool2d(context, node): + _adaptive_pool2d(context, node, mb.avg_pool, mb.reduce_mean) + + +@register_torch_op +def adaptive_max_pool2d(context, node): + _adaptive_pool2d(context, node, mb.max_pool, mb.reduce_max) + + +def _adaptive_pool2d_non_fixed_kernel_size_and_stride(x, output_shape, name, reduce_op): + ''' + If the input dimension is not evenly divisible by the output dimension, then the + stride and kernel size used by PyTorch is not fixed. This is true for both the + height and width dimension. + ''' + + def get_kernel_indexes_1d(in_dimension, out_dimension): + results = [] + for i in range(out_dimension): + start = _math.floor(i * in_dimension / out_dimension) + end = _math.ceil((i + 1) * in_dimension / out_dimension) + results.append((start, end)) + + return results + + pool_results = [] + + for s2, e2 in get_kernel_indexes_1d(x.shape[2], output_shape[0]): + for s3, e3 in get_kernel_indexes_1d(x.shape[3], output_shape[1]): + cur_kernel = mb.slice_by_index( + x=x, + begin=[0, 0, s2, s3], + end=[x.shape[0], x.shape[1], e2, e3], + ) + cur_result = reduce_op( + x=cur_kernel, + axes=[-2, -1], + keep_dims=True + ) + pool_results.append(cur_result) + + return mb.reshape( + x=mb.concat(values=pool_results, axis=-1), + shape=[x.shape[0], x.shape[1], output_shape[0], output_shape[1]], + name=name, + ) + + +def _adaptive_pool2d(context, node, pool_op, reduce_op): + # Get input tensor and output shape + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + output_shape = inputs[1].val + assert isinstance(output_shape, _np.ndarray) and len(output_shape) == 2 + output_shape = tuple(output_shape) + + if output_shape == (1, 1): + # Represent (1,1) output size with global reduce op + result = reduce_op(x=x, axes=[-2, -1], keep_dims=True, name=node.name) + elif x.shape is None or any_symbolic(x.shape): + raise ValueError( + "Adaptive pooling is only supported when input tensor size is known or output size == (1,1). " + "Received: input size == {}, output size == {}".format( + x.shape_str(), output_shape, + ) + ) + elif x.shape[-2] % output_shape[-2] == 0 and x.shape[-1] % output_shape[-1] == 0: + # Stride and and kernel size is fixed + strides = [ind // outd for ind, outd in zip(x.shape[-2:], output_shape)] + kernel_sizes = [ + ind - s * (outd - 1) + for ind, outd, s in zip(x.shape[-2:], output_shape, strides) + ] + result = pool_op( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type="valid", + name=node.name, + ) + else: + result = _adaptive_pool2d_non_fixed_kernel_size_and_stride( + x, output_shape, node.name, reduce_op + ) + + context.add(result) + + +@register_torch_op +def batch_norm(context, node): + inputs = _get_inputs(context, node, expected=9) + # inputs skipped: + # float momentum (6) + # bool cudnn_enabled (8) + input_rank = inputs[0].rank + if input_rank < 2 or input_rank > 5: + raise ValueError( + "BatchNorm: Encountered invalid input rank during translation in torch frontend." + ) + + _input = inputs[0] + weight = inputs[1] + bias = inputs[2] + running_mean = inputs[3] + running_var = inputs[4] + training = inputs[5].val + eps = inputs[7] + + # If training = True, the mean and variance of the current batch of data are used to normalize the input data. + # If training = False, data statistics running_mean and running_var are used instead. + # Note that, even in the evaluation mode (after calling model.eval()), the training parameter can still be true + # and it just refers to a different computation as mentioned above. + + # helper functions for different type of batch norm + def _add_batch_norm_dynamic(): + x = _input + + if training or (running_mean is None) or (running_var is None): + axes = [axis for axis in range(x.rank) if axis != 1] + mean = mb.reduce_mean(x=x, axes=axes, keep_dims=True) + num = mb.sub(x=x, y=mean) + square = mb.mul(x=num, y=num) + variance = mb.reduce_mean(x=square, axes=axes, keep_dims=True) + shape = mb.shape(x=variance) + else: + shape = [1] * x.rank + shape[1] = -1 if any_symbolic(running_mean.shape) else running_mean.shape[0] + mean = mb.reshape(x=running_mean, shape=shape) + num = mb.sub(x=x, y=mean) + variance = mb.reshape(x=running_var, shape=shape) + + variance_add_epsilon = mb.add(x=variance, y=eps) + sqrt = mb.sqrt(x=variance_add_epsilon) + + name = node.name if weight is None and bias is None else node.name + "_div" + x = mb.real_div(x=num, y=sqrt, name=name) + + if weight is not None: + weight_reshape = mb.reshape(x=weight, shape=shape) + name = node.name if bias is None else node.name + "_mul" + x = mb.mul(x=x, y=weight_reshape, name=name) + + if bias is not None: + bias_reshape = mb.reshape(x=bias, shape=shape) + x = mb.add(x=x, y=bias_reshape, name=node.name) + + context.add(x) + + def _add_batch_norm_1d(): + # first expand the 3d tensor to 4d, and call the standard mb.batch_norm + x = mb.expand_dims(x=_input, axes=[-1], name=node.name + "_rank2_expansion") + bn = mb.batch_norm( + x=x, + mean=running_mean, + variance=running_var, + gamma=weight, + beta=bias, + epsilon=eps, + name=node.name + "_batch_norm_1d", + ) + bn = mb.squeeze(x=bn, name=node.name, axes=[-1]) + context.add(bn) + + def _add_batch_norm(): + bn = mb.batch_norm( + x=_input, + mean=running_mean, + variance=running_var, + gamma=weight, + beta=bias, + epsilon=eps, + name=node.name, + ) + context.add(bn) + + is_batch_norm_1d_rank_2 = input_rank == 2 + + if training or running_mean.val is None or running_var.val is None or weight is None or bias is None: + _add_batch_norm_dynamic() + elif is_batch_norm_1d_rank_2: + _add_batch_norm_1d() + else: + _add_batch_norm() + + +@register_torch_op +def instance_norm(context, node): + inputs = _get_inputs(context, node, expected=9) + x = inputs[0] + weight = inputs[1] + bias = inputs[2] + eps = inputs[7] + x = mb.instance_norm( + x=x, + gamma=weight, + beta=bias, + epsilon=eps, + name=node.name, + ) + context.add(x) + + +@register_torch_op +def group_norm(context, node): + inputs = _get_inputs(context, node, expected=6) + x = inputs[0] + num_groups = inputs[1].val + weight = inputs[2] + bias = inputs[3] + eps = inputs[4] + n,c = x.shape[0],x.shape[1] # at minimum (N, C) required + input_shape = [*x.shape] # n, c, * + num_groups = builtins.min(num_groups,c) + new_shape = [n, num_groups, c//num_groups] + new_shape += [*x.shape[2:]] # adds remaining dims + num_extra_axes = len(x.shape[2:]) + axes_ = [int(i) for i in range(2, 2 + num_extra_axes + 1)] + weight_shape, bias_shape = [1,c], [1,c] + weight_shape += [1 for _ in range(num_extra_axes)] + bias_shape += [1 for _ in range(num_extra_axes)] + + x = mb.reshape(x=x, shape=new_shape) + mean = mb.reduce_mean(x=x, axes=axes_, keep_dims=True) + var = _std(x,axes_,True,False,eps.val) + x = mb.sub(x=x,y=mean) + x = mb.real_div(x=x,y=var) + x = mb.reshape(x=x, shape=input_shape) + if weight is not None: + weight = mb.reshape(x=weight, shape=weight_shape) + x = mb.mul(x=x,y=weight) + if bias is not None: + bias = mb.reshape(x=bias, shape=bias_shape) + x = mb.add(x=x,y=bias) + context.add(x,node.name) + + +@register_torch_op +def embedding(context, node): + inputs = _get_inputs(context, node) + _input = inputs[0] + indices = inputs[1] + + padding_idx = -1 + scale_grad_by_freq = False + sparse = False + if len(inputs) >= 3: + padding_idx = inputs[2].val + if len(inputs) >= 4: + scale_grad_by_freq = inputs[3].val + if len(inputs) >= 5: + sparse = inputs[4].val + + if padding_idx != -1 or scale_grad_by_freq or sparse: + logger.warning( + "Core ML embedding (gather) layer does not support any " + "inputs besides the weights and indices. Those given " + "will be ignored." + ) + + indices = mb.cast(x=indices, dtype="int32") + + # Changing the axis from 0 is not an option in torch, so we don't expose it + gather = mb.gather(x=_input, indices=indices, name=node.name) + context.add(gather) + + +@register_torch_op +def hardtanh(context, node): + inputs = _get_inputs(context, node, expected=3) + _input = inputs[0] + min_val = inputs[1].val + max_val = inputs[2].val + + res = mb.clip(x=_input, alpha=min_val, beta=max_val, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=['concat']) +def cat(context, node): + inputs = _get_inputs(context, node) + axis = 0 if len(inputs) == 1 else inputs[1] + concat = mb.concat( + values=promote_input_dtypes(inputs[0]), axis=axis, name=node.name + ) + context.add(concat) + + +@register_torch_op +def stack(context, node): + inputs = _get_inputs(context, node) + + values = inputs[0] + + if len(inputs) < 2: + axis = 0 + else: + axis = inputs[1] + + if len(values) == 1: + res = mb.expand_dims(x=values[0], axes=[axis.val], name=node.name) + else: + res = mb.stack(values=values, axis=axis, name=node.name) + context.add(res) + + +@register_torch_op +def item(context, node): + inputs = _get_inputs(context, node, expected=1) + + if inputs[0].shape == (): + # MIL ops that reduce already output a scalar, so no need to do + # anything. + res = inputs[0] + elif _np.all([d == 1 for d in inputs[0].shape]): + # Item only makes sense when called on a length 1 tensor. We use + # reduce_max as a workaround for not having a way to extract a scalar + # from a symbolic tensor. + res = mb.reduce_max(x=inputs[0], name=node.name) + else: + raise ValueError("expected input to be a scalar or a length 1 tensor") + context.add(res, node.name) + + +def _cast(context, node, dtype, dtype_name): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + # Input must either be a scalar or a (1 x 1 x ... x 1) tensor + if not (len(x.shape) == 0 or _np.all([d == 1 for d in x.shape])): + raise ValueError("input to cast must be either a scalar or a length 1 tensor") + + if x.can_be_folded_to_const(): + # If x is a compile-time constant, directly cast it to @dtype if it's + # not one already. + if not isinstance(x.val, dtype): + res = mb.const(val=dtype(x.val), name=node.name) + else: + res = x + elif x.shape == (1,): + x = mb.squeeze(x=x, name=node.name + "_item") + res = mb.cast(x=x, dtype=dtype_name, name=node.name) + else: + if len(x.shape) > 0: + # TODO: There's no MIL op to extract a value from a symbolic tensor, + # so as a workaround we use reduce_max to convert it to a scalar. + x = mb.reduce_max(x=x, name=node.name + "_item") + res = mb.cast(x=x, dtype=dtype_name, name=node.name) + context.add(res, node.name) + + +@register_torch_op(torch_alias=["bool"]) +def _bool(context, node): + _cast(context, node, bool, "bool") + + +@register_torch_op(torch_alias=["int"]) +def _int(context, node): + _cast(context, node, int, "int32") + + +@register_torch_op +def layer_norm(context, node): + inputs = _get_inputs(context, node, expected=6) + _input = inputs[0] + normalized_shape = inputs[1] + weight = inputs[2] + bias = inputs[3] + eps = inputs[4] + # cudnn_enable = inputs[5] unused + + layer_norm = mb.layer_norm( + x=_input, + axes=list(range(-len(normalized_shape.val), 0)), + gamma=weight, + beta=bias, + epsilon=eps, + name=node.name, + ) + context.add(layer_norm) + + +@register_torch_op +def numtotensor(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + if x.shape != (): + raise ValueError( + "numtotensor expected scalar input, got tensor with shape {}".format( + x.shape + ) + ) + + if x.can_be_folded_to_const(): + res = mb.const(val=[x.val], name=node.name) + context.add(res) + else: + context.add(x, node.name) + + +def _ifzo_to_ifoz(weights, name): + """ + i, f, z, o -> i, f, o, z + where weights_split[0] == i, etc. + Used to transform lstm weights from pytorch + to Core ML format + """ + split_size = weights.shape[0] // 4 + weights_split = mb.split(x=weights, split_sizes=_np.array([split_size] * 4), axis=0) + return mb.concat( + values=[weights_split[0], weights_split[1], weights_split[3], weights_split[2]], + axis=0, + ) + + +def _pytorch_hidden_to_coreml_milops(x, name): + """ + Used to transform lstm state values (hn, cn) + from pytorch to Core ML format. + """ + split_size = x.shape[0] // 2 + x_split = mb.split(x=x, split_sizes=_np.array([split_size] * 2), axis=0) + x_concat = mb.concat( + values=[x_split[0], x_split[1]], + axis=2, + ) + # (4.) See docstring to @lstm + return mb.squeeze(x=x_concat, axes=_np.array([0]), name=name) + + +def _add_gru_layer(_input, h0, wi, wh, bi, bh, h_list_name, h_name): + """ + Add a single GRU layer. + Please note that the Core ML GRU has different definition from Torch, + so we cannot use mb.gru, and need to implement it with while loop. + To be more specific, in Core ML: + + o_t = activation(W_{io} x_t + r_t * W_{ho} h_(t−1) + b_{o}) + + while torch has + o_t = activation(W_{io} x_t + b_{io} + r_t * (W_{ho} h_(t−1) + b_{ho})) + + Inputs: + _input : (seq_len, batch_size, input_dim) + h0 : (1, batch_size, hidden_dim) + wi : (3*hidden_dim, input_dim) for the first layer, else (3*hidden_dim, hidden_dim) + wh : (3*hidden_dim, hidden_dim) + bi : (3*hidden_dim) + bh : (3*hidden_dim) + + Return: + h_list : the list contains all hidden states for each time step + with shape (seq_len, batch_size, hidden_dim) + h : the last hidden state, with shape (1, batch_size, hidden_dim + """ + + # split the weights and bias + w_ir, w_iz, w_in = _np.split(wi, 3) + w_hr, w_hz, w_hn = _np.split(wh, 3) + b_ir, b_iz, b_in = _np.split(bi, 3) + b_hr, b_hz, b_hn = _np.split(bh, 3) + + # allocate hlist + # hlist : (seq_len, batch_size, hidden_dim) + x_shape = mb.shape(x=_input) + seq_len = mb.slice_by_index(x=x_shape, begin=[0], end=[1]) + h_shape = mb.shape(x=h0) + h_shape = mb.slice_by_index(x=h_shape, begin=[1], end=[3]) + h_list_shape = mb.concat(values=[seq_len, h_shape], axis=0) + h_list = mb.fill(shape=h_list_shape) + + # concate h0 to h_list + # h_list: (seq_len + 1, batch_size, hidden_dim) + h_list = mb.concat(values=[h0, h_list], axis=0) + + def cond(i, h_list): + return mb.less(x=i, y=seq_len) + + def body(i, h_list): + # slice for the x and state for time step i + # the resulting shape: + # xt : (batch_size, input_dim) + # h_prev : (batch_size, hidden_dim) + + xt = mb.gather(x=_input, indices=i, axis=0) + h_prev = mb.gather(x=h_list, indices=i, axis=0) + + xt = mb.squeeze(x=xt, axes=[0]) + h_prev = mb.squeeze(x=h_prev, axes=[0]) + + # rt = sigmoid(wir * xt + whr * h_prev + bir + bhr) + # rt : (batch_size, hidden_dim) + rt_1 = mb.linear(x=xt, weight=w_ir, bias=b_ir) + rt_2 = mb.linear(x=h_prev, weight=w_hr, bias=b_hr) + rt = mb.add(x=rt_1, y=rt_2) + rt = mb.sigmoid(x=rt) + + # zt = sigmoid(wiz * xt + whz * h_prev + biz + bhz) + # zt : (batch_size, hidden_dim) + zt_1 = mb.linear(x=xt, weight=w_iz, bias=b_iz) + zt_2 = mb.linear(x=h_prev, weight=w_hz, bias=b_hz) + zt = mb.add(x=zt_1, y=zt_2) + zt = mb.sigmoid(x=zt) + + # nt = tanh(win * xt + bin + rt(whn * h_prev + bhn)) + # nt : (batch_size, hidden_dim) + nt_1 = mb.linear(x=xt, weight=w_in, bias=b_in) + nt_2 = mb.linear(x=h_prev, weight=w_hn, bias=b_hn) + nt_2 = mb.mul(x=rt, y=nt_2) + nt = mb.add(x=nt_1, y=nt_2) + nt = mb.tanh(x=nt) + + # h = (1-zt) * nt + zt* h_prev + # h : (batch_size, hidden_dim) + h_1 = mb.sub(x=1., y=zt) + h_1 = mb.mul(x=h_1, y=nt) + h_2 = mb.mul(x=zt, y=h_prev) + h = mb.add(x=h_1, y=h_2) + + # update counter + counter = mb.add(x=i, y=1) + + # update h and h_list + h = mb.expand_dims(x=h, axes=[0]) + h_list = mb.scatter(data=h_list, indices=counter, updates=h) + + return ( + counter, + h_list, + ) + + _, h_list = mb.while_loop( + _cond=cond, _body=body, loop_vars=([0], h_list), + ) + + # slice h0 out of h_list + h_list = mb.slice_by_index( + x=h_list, + begin=[1, 0, 0], + end=[0, 0, 0], + begin_mask=[False, True, True], + end_mask=[True, True, True], + name=h_list_name, + ) + + # get the last state of h_list + if seq_len.val is None or seq_len.val > 1: + h = mb.slice_by_index( + x=h_list, + begin=[-1, 0, 0], + end=[-2, 0, 0], + begin_mask=[False, True, True], + end_mask=[False, True, True], + stride=[-1, 1, 1], + name=h_name, + ) + else: + h = h_list + + return h_list, h + + +@register_torch_op +def gru(context, node): + inputs = _get_inputs(context, node, expected=9) + + _input = inputs[0] + h0 = inputs[1] + weights_list = inputs[2] + has_bias = inputs[3].val + num_layers = inputs[4].val + dropout = inputs[5] + bidirectional = inputs[7].val + batch_first = inputs[8].val + + # For each layer of GRU, the layout of the weights list is [Wi, Wh, bi, bh] with has_bias == True, + # and is [Wi, Wh] with bias == False. + # If bidirectional == True, the list is double up, corresponding to forward and backward direction. + expected_num_weights = 2 * num_layers * (int(has_bias) + 1) * (int(bidirectional) + 1) + if len(weights_list) != expected_num_weights: + raise ValueError( + "Incorrect weights shape for gru layer: Expected: {}. Recieved {}".format( + expected_num_weights, len(weights_list) + ) + ) + + # Transpose the input data to (seq_len, batch_size, input_dim) if batch_first == True + if batch_first: + _input = mb.transpose(x=_input, perm=[1, 0, 2]) + + # iterate through all the layers + x = _input + state_out_list = [] + + def _get_weights_and_bias(weights_list, index, num_layers, has_bias, bidirectional, mode): + num_weights_per_layer = len(weights_list) // num_layers + weights = weights_list[ + num_weights_per_layer * index : num_weights_per_layer * (index + 1) + ] + + if bidirectional: + weights_f, weights_r = ( + weights[: num_weights_per_layer // 2], + weights[num_weights_per_layer // 2 :], + ) + assert len(weights_f) == len(weights_r) + else: + weights_f, weights_r = weights, [] + + if mode == "forward": + weights = weights_f + elif mode == "reverse": + weights = weights_r + + wi, wh = weights[0].val, weights[1].val + + if has_bias: + bi, bh = weights[2].val, weights[3].val + else: + hidden_dim = wh.shape[1] + bi, bh = _np.zeros(3 * hidden_dim), _np.zeros(3 * hidden_dim) + + return wi, wh, bi, bh + + def _get_initial_state(h0, i, bidirectional, mode): + + if mode == "forward": + return mb.slice_by_index( + x=h0, + begin=[(1 + int(bidirectional)) * i, 0, 0], + end=[(1 + int(bidirectional)) * i + 1, 0, 0], + begin_mask=[False, True, True], + end_mask=[False, True, True], + ) + if mode == "reverse": + assert bidirectional + return mb.slice_by_index( + x=h0, + begin=[2 * i + 1, 0, 0], + end=[2 * (i + 1), 0, 0], + begin_mask=[False, True, True], + end_mask=[False, True, True], + ) + + seq_output_name = node.outputs[0] # output sequence name + state_output_name = node.outputs[1] # output state name + + for i in range(num_layers): + # get layer names + x_name = seq_output_name + "_layer_" + str(i) if i < num_layers - 1 else seq_output_name + h_name = state_output_name + '_layer_' + str(i) if num_layers > 0 else state_output_name + + if batch_first: + x_name += "_tmp" + + if bidirectional: + x_f_name = x_name + '_forward' + h_f_name = h_name + '_forward' + x_r_name = x_name + '_backward' + h_r_name = h_name + '_backward' + else: + x_f_name = x_name + h_f_name = h_name + + # forward direction + x_f = x + wi_f, wh_f, bi_f, bh_f = _get_weights_and_bias( + weights_list, i, num_layers, has_bias, bidirectional, "forward" + ) + initial_h_f = _get_initial_state(h0, i, bidirectional, "forward") + x_f, h_f = _add_gru_layer(x_f, initial_h_f, wi_f, wh_f, bi_f, bh_f, x_f_name, h_f_name) + + # reverse direction + if bidirectional: + x_r = mb.reverse(x=x, axes=[0]) + wi_r, wh_r, bi_r, bh_r = _get_weights_and_bias( + weights_list, i, num_layers, has_bias, bidirectional, "reverse" + ) + initial_h_r = _get_initial_state(h0, i, bidirectional, "reverse") + x_r, h_r = _add_gru_layer( + x_r, + initial_h_r, + wi_r, + wh_r, + bi_r, + bh_r, + x_r_name + "_reverse", + h_r_name, + ) + x_r = mb.reverse(x=x_r, axes=[0], name=x_r_name) + + # concate output from forward and reverse direction + x = mb.concat(values=[x_f, x_r], axis=2, name=x_name) + h = mb.concat(values=[h_f, h_r], axis=0, name=h_name) + else: + x = x_f + h = h_f + + state_out_list.append(h) + + # rnn output + if batch_first: + x = mb.transpose(x=x, perm=[1, 0, 2], name=seq_output_name) + context.add(x, seq_output_name) + + # state output + if len(state_out_list) > 1: + h = mb.concat(values=state_out_list, axis=0, name=state_output_name) + context.add(h, state_output_name) + + +def _add_simple_rnn(context, node, activation): + inputs = _get_inputs(context, node, expected=9) + + ''' + Batch size: B + Sequence length: S + Input dimension: C + Hidden dimension: H + + (1) _input : (B, S, C) if batch_first == True, else (S, B, C) + (2) h0: (num_layers, B, H) + ''' + _input = inputs[0] + h0 = inputs[1] + weights_list = inputs[2] + has_bias = inputs[3].val + num_layers = inputs[4].val + dropout = inputs[5] + bidirectional = inputs[7].val + batch_first = inputs[8].val + + # We only support uni-directional simple RNN now + if bidirectional: + raise NotImplementedError("Bidirectional simple RNN not supported.") + + expected_num_weights = 2 * num_layers * (int(has_bias) + 1) + if len(weights_list) != expected_num_weights: + raise ValueError( + "Incorrect weights shape for lstm layer: Expected: {}. Recieved {}".format( + expected_num_weights, len(weights_list) + ) + ) + + # Transpose the input data to (S, B, C) if batch_first == True + if batch_first: + _input = mb.transpose(x=_input, perm=[1, 0, 2]) + + state_out_list = [] + out = _input + + for i in range(num_layers): + if has_bias: + weight_ih = weights_list[4 * i] + weight_hh = weights_list[4 * i + 1] + bias = mb.add(x=weights_list[4 * i + 2], y=weights_list[4 * i + 3]) + else: + weight_ih = weights_list[2 * i] + weight_hh = weights_list[2 * i + 1] + bias = None + + # get the initial state + initial_h = mb.slice_by_index( + x=h0, + begin=[i, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True], + squeeze_mask=[True, False, False], + ) + + # get the RNN output for each unit + out, state = mb.rnn( + x=out, + initial_h=initial_h, + weight_ih=weight_ih, + weight_hh=weight_hh, + bias=bias, + output_sequence=True, + activation=activation, + ) + + # append state to lists which will stack later + state_out_list.append(state) + + # rnn output + output_name = node.outputs[0] + if batch_first: + out = mb.transpose(x=out, perm=[1, 0, 2], name=output_name) + else: + out = mb.identity(x=out, name=output_name) + context.add(out, output_name) + + # stack the states into a single tensor + state_output_name = node.outputs[1] + if num_layers == 1: + state = mb.expand_dims(x=state_out_list[0], axes=[0], name=state_output_name) + else: + state = mb.stack(values=state_out_list, axis=0, name=state_output_name) + context.add(state, state_output_name) + + +@register_torch_op +def rnn_tanh(context, node): + _add_simple_rnn(context, node, "tanh") + + +@register_torch_op +def rnn_relu(context, node): + _add_simple_rnn(context, node, "relu") + + +def _add_mil_lstm(input, initial_h, initial_c, weights, has_bias, bidirectional, name): + """ + Most of this code is to transform the tensors into + a shape acceptable by the Core ML implementation of LSTM. + + For weights, biases, per direction, pytorch uses two tensors: + (ii, if, ig, io) stacked on top of each other for each layer (tensor 1) + and (hi, hf, hg, ho) stacked on top of each other for each layer (tensor 2). + That is, (W_ii|W_if|W_ig|W_io), of shape (4*hidden_size, input_size) and + (W_hi|W_hf|W_hg|W_ho), of shape (4*hidden_size, hidden_size). + + + The Core ML LSTM op expects two tensors, weight and bias. So + the tensors for weight and bias are seperated from pytorch's @weights list (1.). + For bias tensor, the Core ML LSTM op expects the form ii, if, io, ig and hi, hf, ho, hg, + requiring the ifzo_to_ifoz function. Further adding input and hidden bias into one (2.). + Similar to bias, input and hidden weight requires different layout. (3.) + + initial_h and initial_c are list of "num_layers" tensors, each of shape [n_directions, B, H], + where n_directions = 1 or 2 + whereas the shapes of the initial states to MIL's LSTM, BiLSTM must be [B, H] and [B, 2*H] respectively. + This means we need to do the following transformations: + - if its an LSTM (n_directions=1): + squeeze the first dimension of initial_h/initial_c , before feeding it to MIL's LSTM + - if its a BiLSTM (n_directions=2): + - split the input, shape=(2, B, H), to get (1,B,H) and (1,B,H) + - concatenate to get (1,B,2*H) + - squeeze to get (B,2*H) + """ + + if bidirectional: + if has_bias: + # (1.) + biases = weights[2:4] + weights[6:8] + weights = weights[0:2] + weights[4:6] + + # (2.) + assert len(biases) == 4 + for index in range(len(biases)): + biases[index] = _ifzo_to_ifoz( + biases[index], + name="{}_lstm_bias_reshape_{}".format(name, index), + ) + f_b = mb.add(x=biases[0], y=biases[1], ) + r_b = mb.add(x=biases[2], y=biases[3], ) + + # (3.) + f_ih_w = _ifzo_to_ifoz( + weights[0], name=name + "_lstm_forward_ih_weights_ifoz_to_ifzo", + ) + f_hh_w = _ifzo_to_ifoz( + weights[1], name=name + "_lstm_forward_hh_weights_ifoz_to_ifzo", + ) + r_ih_w = _ifzo_to_ifoz( + weights[2], name=name + "_lstm_reverse_ih_weights_ifoz_to_ifzo", + ) + r_hh_w = _ifzo_to_ifoz( + weights[3], name=name + "_lstm_reverse_hh_weights_ifoz_to_ifzo", + ) + + h = _pytorch_hidden_to_coreml_milops(initial_h, name=name + "_lstm_h0_reshaped") + c = _pytorch_hidden_to_coreml_milops(initial_c, name=name + "_lstm_c0_reshaped") + return mb.lstm(x=input, + initial_h=h, + initial_c=c, + weight_ih=f_ih_w, + weight_hh=f_hh_w, + weight_ih_back=r_ih_w, + weight_hh_back=r_hh_w, + bias=(f_b if has_bias else None), + bias_back=(r_b if has_bias else None), + direction="bidirectional", + output_sequence=True, + name=name) + else: + if has_bias: + # (1.) + biases = weights[len(weights) // 2:] + weights = weights[: len(weights) // 2] + # (2.) + b = mb.add(x=biases[0], y=biases[1], ) + b = _ifzo_to_ifoz( + b, name=name + "_lstm_bias_transformed", + ) + # (3.) + f_ih_w = _ifzo_to_ifoz( + weights[0], name=name + "_lstm_ih_weights_ifoz_to_ifzo", + ) + f_hh_w = _ifzo_to_ifoz( + weights[1], name=name + "_lstm_hh_weights_ifoz_to_ifzo", + ) + + h = mb.squeeze(x=initial_h, axes=_np.array([0]), name=name + "_lstm_h0_squeeze") + c = mb.squeeze(x=initial_c, axes=_np.array([0]), name=name + "_lstm_c0_squeeze") + + return mb.lstm(x=input, + initial_h=h, + initial_c=c, + weight_ih=f_ih_w, + weight_hh=f_hh_w, + bias=(b if has_bias else None), + direction="forward", + output_sequence=True, + name=name) + + +@register_torch_op +def lstm(context, node): + inputs = _get_inputs(context, node, expected=9) + + _input = inputs[0] + + # there are two cases here, + # (1) the input tensor is a PackedSequence object, + # in this case, the second input of the lstm layer is the batch_size (MIL Var). + # (2) the input tensor is a normal tensor, + # in this case, the second input is an array. + # As the result, we can use the second input to identify which category the graph is. + + has_batch_sizes = not isinstance(inputs[1], Iterable) + if has_batch_sizes: + batch_sizes = inputs[1] + h0, c0 = inputs[2] + weights_list = inputs[3] + has_bias = inputs[4].val + num_layers = inputs[5].val + dropout = inputs[6] + bidirectional = inputs[8].val + # the output of the _pack_padded_sequence is always in the layout of batch first + batch_first = True + else: + h0, c0 = inputs[1] + weights_list = inputs[2] + has_bias = inputs[3].val + num_layers = inputs[4].val + dropout = inputs[5] + bidirectional = inputs[7].val + batch_first = inputs[8].val + + ''' + Torch LSTM layer's input shapes: + + (1) first input + (Seq, B, C) : if batch_first = False + (B, Seq, C) : if batch_first = True + + (2) & (3) initialization states + (num_layers, B, H) : if bidirectional = False + (num_layers * 2, B, H) : if bidirectional = True + + + For the MIL LSTM layer, these are the input shapes: + + (1) first input: (Seq, B, C) + this means, if batch_first=True, we need to insert a transpose op first + + (2) & (3) initialization states + MIL's LSTM layer does not natively support the "num_layers" parameters. + So, when num_layers > 1, we add multiple MIL LSTM ops in a sequence. + Each of these LSTM ops will take in initialization states in the following shape: + (B, H) if bidirectional = False + (B, 2*H) if bidirectional = True + ''' + + if batch_first: + _input = mb.transpose(x=_input, perm=[1, 0, 2], name=_input.name + "_batch_first_transpose") + + expected_num_weights = 2 * num_layers * (int(bidirectional) + 1) * (int(has_bias) + 1) + if len(weights_list) != expected_num_weights: + raise ValueError( + "Incorrect weights shape for lstm layer: Expected: {}. Recieved {}".format( + expected_num_weights, len(weights_list) + ) + ) + + # shape of h0 and c0 are (num_layers * n_directions, B, H) + if num_layers == 1: + all_initial_h = [h0] # [(n_directions, B, H)] + all_initial_c = [c0] # [(n_directions, B, H)] + else: + all_initial_h = mb.split( + x=h0, num_splits=num_layers, axis=0 + ) # [(n_directions, B, H)] + all_initial_c = mb.split( + x=c0, num_splits=num_layers, axis=0 + ) # [(n_directions, B, H)] + + n_weights_per_layer = int(len(weights_list) / num_layers) + x = _input + h_out_list = [] + c_out_list = [] + for i in range(num_layers): + if i < num_layers - 1: + op_name = node.name + "_lstm_layer_{}".format(i) + else: + if batch_first: + op_name = node.name + "_batch_first" + else: + op_name = node.name + + lstm_out = _add_mil_lstm( + input=x, + initial_h=all_initial_h[i], + initial_c=all_initial_c[i], + weights=weights_list[ + i * n_weights_per_layer : (i + 1) * n_weights_per_layer + ], + has_bias=has_bias, + bidirectional=bidirectional, + name=op_name, + ) + # shape of lstm_out[0] == (S,B,H) if bidirectional = True else (S, B, 2*H) + x = lstm_out[0] + # shape of lstm_out[1] == (B,H) if bidirectional = False else (B, 2*H) + h_out_list.append(lstm_out[1]) + # shape of lstm_out[2] == (B,H) if bidirectional = False else (B, 2*H) + c_out_list.append(lstm_out[2]) + + ''' + For torch, these are the dimensions of the 3 output tensors: + (1) output[0] : + (Seq, B, H) if batch_first = False, bidirectional = False + (Seq, B, 2*H) if batch_first = False, bidirectional = True + (B, Seq, H) if batch_first = True, bidirectional = False + (B, Seq, 2*H) if batch_first = True, bidirectional = True + + (2) & (3) these are the state outputs: + (num_layers, B, H) if bidirectional = False + (num_layers * 2, B, H) if bidirectional = True + + MIL lstm layer's output shapes: + (1) output[0]: + (Seq, B, H) if bidirectional = False + (Seq, B, 2*H) if bidirectional = True + This means we need a transpose op if batch_first is True + + (2) & (3) shapes of the state outputs: + each MIL LSTM op will produce final state tensors with the following shape: + (B, H) if bidirectional = False + (B, 2*H) if bidirectional = True + + stack/expand the final state tensors to match the Torch output + ''' + for index, (name, output) in enumerate(zip(node.outputs, lstm_out)): + if index > 0: + # index > 0 ===> its one of the state outputs (h or c) + if bidirectional: + if num_layers == 1: + out1, out2 = mb.split( + x=output, num_splits=2, axis=1 + ) # each output of shape [B, H] after the split + final_out = mb.stack( + values=[out1, out2], axis=0, name=name + ) # [2, B, H] + context.add(final_out, name) + else: + out_state_tensors_list = ( + h_out_list if index == 1 else c_out_list + ) # each tensor in the list is of shape (B, 2*H) + list_of_tensors_to_stack = [] + for i in range(num_layers): + out1, out2 = mb.split( + x=out_state_tensors_list[i], num_splits=2, axis=1 + ) # each output of shape [B, H] after the split + out = mb.stack(values=[out1, out2], axis=0) # [2, B, H] + list_of_tensors_to_stack.append(out) + final_out = mb.concat( + values=list_of_tensors_to_stack, axis=0, name=name + ) # output of shape (num_layers * 2, B, H) + context.add(final_out, name) + else: + if num_layers == 1: + unsqueeze = mb.expand_dims(x=output, axes=[0], name=name) + context.add(unsqueeze, name) + else: + out = mb.stack( + values=h_out_list if index == 1 else c_out_list, + axis=0, + name=name, + ) + context.add(out, name) + else: + if batch_first: + output = mb.transpose(x=output, perm=[1, 0, 2], name=name) + context.add(output, name) + + +def _get_scales_from_output_size(output_size, input_shape): + scales = [] + if output_size is not None: + # output_size will be either + # (1) A list of Var, and each Var indicates the output size for that dimension + # (2) A single Var which indicates the whole output size + # (3) A numpy array + + if isinstance(output_size, list): + output_size = [x.val for x in output_size] + if isinstance(output_size, Var): + output_size = [x for x in output_size.val] + if isinstance(output_size, _np.ndarray): + output_size = output_size.tolist() + + # output size is computed using the formula floor (scale * input_size) in Core ML (and PyTorch). + # Thus, when computing the scales from the output size, we add a small positive constant to the output size + # to make sure that the floor formula results in the correct output size and not 1 unit smaller. + # For instance, if output size = 5 and input size = 2, then scale will be 2.5, which can get + # represented as 2.49999 due to float precision issues, and this might resultin an output size of 4 + # instead of 5, without the epsilon correction. + + if len(output_size) == 1: + # 1d upsampling + Hout = output_size[0] + Hin = input_shape[-1] + scales_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin + scales = scales_h + elif len(output_size) == 2: + # 2d upsampling + Hout, Wout = output_size[0], output_size[1] + Hin, Win = input_shape[-2], input_shape[-1] + scales_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin + scales_w = Wout / Win if Wout % Win == 0 else (Wout + 1e-4) / Win + scales = [scales_h, scales_w] + else: + msg = "Only 1d and 2d unsampling are supported." + raise NotImplementedError(msg) + + return scales + + +def _is_float_value(x, threshold=0.001): + return x - _math.floor(x) > threshold + + +@register_torch_op +def upsample_linear1d(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + output_size = inputs[1] + align_corners = bool(inputs[2].val) + scale = inputs[3] + + scale_factor = None + + if scale is not None and scale.val is not None and scale.shape == (1,): + # Get the scale factor from provided inputs + # This happens when recompute_scale_factor = False + scale_factor = scale.val[0] + + # Currently, we are not supporting recompute_scale_factor = False, align_corners = False with float output size + _, _, h = x.shape + if not is_symbolic(h): + # For the static input shape, we can compute the output size beforehand, and check if it is a float value + output_size = h * scale_factor + is_float = _is_float_value(output_size) + else: + # For the dynamic input shape, we check if the scale factor itself is float + is_float = _is_float_value(scale_factor) + + if is_float and not align_corners: + msg = ( + "recompute_scale_factor = False, align_corners = False with float output size is " + + "not supported for the upsample op {}".format(node.name) + ) + raise NotImplementedError(msg) + + elif isinstance(output_size, list): + # When the input shape is dynamic and recompute_scale_factor = True, + # we need to trace the graph to find the scale factor. + x = mb.expand_dims(x=x, axes=[3]) + x = mb.torch_upsample_bilinear( + x=x, + output_height=output_size[0], + output_width=1, + align_corners=align_corners, + ) + x = mb.squeeze(x=x, axes=[3], name=node.name) + context.add(x) + return + + elif output_size.val is not None: + # Infer the scale factor from the provided output size + scale_factor = _get_scales_from_output_size(output_size, x.shape) + + # Expand the input to a 4d tensor, and use MIL's upsample_bilinear op + x = mb.expand_dims(x=x, axes=[3]) + x = mb.upsample_bilinear( + x=x, + scale_factor_height=scale_factor, + scale_factor_width=1., + align_corners=align_corners, + ) + x = mb.squeeze(x=x, axes=[3], name=node.name) + context.add(x) + + +@register_torch_op +def upsample_bilinear2d(context, node): + inputs = _get_inputs(context, node) + _input = inputs[0] + output_size = inputs[1] + align_corners = bool(inputs[2].val) + scale_factors = inputs[3] + + scales_h, scales_w = None, None + + if ( + scale_factors is not None + and scale_factors.val is not None + and scale_factors.rank == 1 + and scale_factors.shape[0] == 2 + ): + # get scale factors from provided inputs + # this happens when recompute_scale_factor = False + scale_factors = scale_factors.val + scales_h = scale_factors[0] + scales_w = scale_factors[1] + + # currently, we are not supporting recompute_scale_factor = False, align_corners = False with float output size + _, _, h, w = _input.shape + if not is_symbolic(h) and not is_symbolic(w): + # For the static input shape, we can compute the output size beforehand + output_h = h * scales_h + output_w = w * scales_w + is_h_float = _is_float_value(output_h) + is_w_float = _is_float_value(output_w) + + else: + # For the dynamic input shape, we check if the scale factor itself is float + is_h_float = _is_float_value(scales_h) + is_w_float = _is_float_value(scales_w) + + if (is_h_float or is_w_float) and not align_corners: + msg = ( + "recompute_scale_factor = False, align_corners = False with float output size is " + + "not supported for the upsample op {}".format(node.name) + ) + raise NotImplementedError(msg) + + elif ( + isinstance(output_size, list) + and output_size[0].val is None + and output_size[1].val is None + ): + # the input shape is dynamic and recompute_scale_factor = True + # need to trace the graph to find the scale factor + # we define a torch front end op mb.torch_upsample_bilinear to resolve the const scaling factor + torch_upsample_bilinear = mb.torch_upsample_bilinear( + x=_input, + output_height=output_size[0], + output_width=output_size[1], + align_corners=align_corners, + name=node.name, + ) + context.add(torch_upsample_bilinear) + return + else: + # infer scale factors from output sizes + # This happens when recompute_scale_factor = True or the output_size is specified + scales = _get_scales_from_output_size(output_size, _input.shape) + if scales: + scales_h, scales_w = scales + + if scales_h is None or scales_w is None: + if len(inputs) == 5: + # For torch==1.5.0, upsample_bilinear2d has 5 inputs. + scales_h = inputs[3] + scales_w = inputs[4] + else: + raise ValueError("Failed to infer scale factors from inputs.") + + upsample_bilinear = mb.upsample_bilinear( + x=_input, + scale_factor_height=scales_h, + scale_factor_width=scales_w, + align_corners=align_corners, + name=node.name, + ) + context.add(upsample_bilinear) + + +@register_torch_op +def upsample_nearest1d(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + output_size = inputs[1] + scale = inputs[2] + + scale_factor = None + + if scale is not None and scale.val is not None and scale.shape == (1,): + # Get the scale factor from provided inputs + # This happens when recompute_scale_factor = False + scale_factor = scale.val[0] + + elif isinstance(output_size, list): + # When the input shape is dynamic and recompute_scale_factor = True, + # we need to trace the graph to find the scale factor. + x = mb.expand_dims(x=x, axes=[3]) + x = mb.torch_upsample_nearest_neighbor( + x=x, + output_height=output_size[0], + output_width=1, + ) + x = mb.squeeze(x=x, axes=[3], name=node.name) + context.add(x) + return + else: + # Infer scale factors from output sizes + scale_factor = _get_scales_from_output_size(output_size, x.shape) + + x = mb.expand_dims(x=x, axes=[3]) + x = mb.upsample_nearest_neighbor( + x=x, + scale_factor_height=scale_factor, + scale_factor_width=1., + ) + x = mb.squeeze(x=x, axes=[3], name=node.name) + context.add(x) + + +@register_torch_op +def upsample_nearest2d(context, node): + inputs = _get_inputs(context, node) + _input = inputs[0] + scales_h, scales_w = None, None + + output_size = inputs[1] + scale_factors = inputs[2] + + if ( + scale_factors is not None + and scale_factors.val is not None + and scale_factors.rank == 1 + and scale_factors.shape[0] == 2 + ): + # get scale factors from provided inputs + scale_factors = scale_factors.val + scales_h = scale_factors[0] + scales_w = scale_factors[1] + elif ( + isinstance(output_size, list) + and output_size[0].val is None + and output_size[1].val is None + ): + # the input shape is dynamic and recompute_scale_factor = True + # need to trace the graph to find the scale factor + # we define a torch front end op mb.torch_upsample_nearest_neighbor to resolve the const scaling factor + torch_upsample_nearest2d = mb.torch_upsample_nearest_neighbor( + x=_input, + output_height=output_size[0], + output_width=output_size[1], + name=node.name, + ) + context.add(torch_upsample_nearest2d) + return + else: + # infer scale factors from output sizes + scales = _get_scales_from_output_size(output_size, _input.shape) + if scales: + scales_h, scales_w = scales + + if scales_h is None or scales_w is None: + if len(inputs) == 5: + # For torch==1.5.0, upsample_bilinear2d has 5 inputs. + scales_h = inputs[3] + scales_w = inputs[4] + else: + raise ValueError("Failed to infer scale factors from inputs.") + + upsample_nearest2d = mb.upsample_nearest_neighbor( + x=_input, + scale_factor_height=scales_h, + scale_factor_width=scales_w, + name=node.name, + ) + context.add(upsample_nearest2d) + + +@register_torch_op(torch_alias=["listunpack"]) +def tupleunpack(context, node): + inputs = _get_inputs(context, node, expected=1) + values = inputs[0] + + # Node input could have been turned into constant array in @tupleconstruct + if not isinstance(values, (tuple, list)): + if values.val is not None: + values = values.val + else: + # The `values` could be a single Var with symbolic val. + values = [values] + + if len(values) != len(node.outputs): + raise ValueError(f"unpack node expected {len(node.outputs)} outputs, got {len(values)}") + + # @value is either a numpy primitive or a Var object + for value, output in zip(values, node.outputs): + if not isinstance(value, Var): + value = _construct_constant(value, name=output) + assert isinstance(value, Var) + context.add(value, output) + + +@register_torch_op +def loop(context, node): + """ In TorchIR, a loop looks like: + %y_1, ..., %y_r = prim::Loop(%max_trip_count, %initial_condition, %x_1, ..., %x_r) + block0(%i, %a_1, ..., %a_r): + %b_1, ..., %b_m = some::node(%a_value_from_outer_block, %a_1) + %iter_condition = some::other_node(%a_2) + -> (%iter_condition, %b_1, ..., %b_r) + + This translates to pseudo code as: + y_1, ..., y_r = x_1, ..., x_r + condition = initial_condition + i = 0 + while condition and i < max_trip_count: + a_1, ..., a_r = y_1, ..., y_r + + ############################################################ + # Actual body of the loop + b_1, ..., b_m = some::node(a_value_from_outside_of_the_loop, a_1) + iter_condition = some::node(a_2) + ############################################################ + + y_1, ..., y_r = b_1, ..., b_r + condition = iter_condition + i += 1 + + Which further translates to MIL while_loop as: + loop_vars = (0, initial_condition, x_1, ..., x_r) + _cond = { + return (loop_vars[1] and loop_vars[0] < max_trip_count) + } + _body = { + a_1, ..., a_r = loop_vars[2], ..., loop_vars[-1] + b_1, ..., b_m = some::node(a_value_from_outside_of_the_loop, a_1) + iter_condition = some::node(a_2) + return (loop_vars[0] + 1, iter_condition, b_1, ..., b_r) + } + + For loops pass True for %initial_condition and %iter_condition + While loops set %max_trip_count to INT_MAX and %i is unused + """ + name = node.name + # inputs[0]: max iter count + # inputs[1]: initial condition + # inputs[2]: block input 0 + # ... + # inputs[N+2]: block input N + inputs = _get_inputs(context, node) + max_iter_count = inputs[0] + + # Magic default signals this is a while-only loop, so no iteration count + # is needed. + has_iter_count = max_iter_count is not None + + # Create an interation count. This will only be used if this is a for loop. + iter_count = mb.const(val=0, name=node.name + "_iter") + # @loop_vars is tuple(iter_count, cond, inputs...) + loop_vars = tuple([iter_count] + inputs[1:]) + + def _loop_cond(*loop_vars): + cond = loop_vars[1] + + # Check the iteration count if we're keeping track. + if has_iter_count: + iter_count = loop_vars[0] + iter_cond = mb.less( + x=iter_count, y=max_iter_count, name=node.name + "_cond" + ) + return mb.logical_and(x=cond, y=iter_cond) + else: + return mb.identity(x=cond) + + def _shapes_are_equivalent(shape1, shape2): + """ Compares two sets of tensor shapes and returns True if they are + equivalent. That is, they are the same rank, and each dimension + is the same or symbolic. + """ + if len(shape1) != len(shape2): + return False + + # Each dimension must have the same integer length, or else be + # symbolic. + all_equivalent = [ + s1 == s2 or (isinstance(s1, Symbol) and isinstance(s2, Symbol)) + for s1, s2 in zip(shape1, shape2) + ] + return all_equivalent + + def _loop_body(*loop_vars): + block = node.blocks[0] + iter_var = loop_vars[0] + inputs = (iter_var,) + loop_vars[2:] + res = convert_block(context, block, inputs) + + for input_var, output_var in zip(loop_vars[2:], res[1:]): + if not _shapes_are_equivalent(input_var.shape, output_var.shape): + logger.warning( + "detected change in shape of loop variable. this could lead to incorrect inference results!" + ) + logger.warning( + "{}:{} -> {}:{}".format( + input_var.name, + input_var.shape, + output_var.name, + output_var.shape, + ) + ) + + # Update the iteration count if we're keeping track. + if has_iter_count: + iter_var = mb.add(x=iter_var, y=1, name=iter_var.name + "_inc") + else: + iter_var = mb.identity(x=iter_var) + + # Must return tuple with same length and types as @loop_vars. + return tuple( + [ + iter_var, + ] + + res + ) + + loop = mb.while_loop( + _cond=_loop_cond, _body=_loop_body, loop_vars=loop_vars, name=name + ) + + # Make sure the loop returned the expected number of outputs. Note that the + # first two loop outputs are the iteration count and condition. + assert len(loop) - 2 == len(node.outputs) + for output_name, output_var in zip(node.outputs, loop[2:]): + context.add(output_var, torch_name=output_name) + + +@register_torch_op(torch_alias=["if"]) +def _if(context, node): + """ In TorchIR, a conditional looks like: + %y_1, ..., %y_r = prim::If(%condition) + block0(): # TRUE BRANCH, never takes arguments, has to return r outputs + %t_1, ..., %t_k = some::node(%a_value_from_outer_block) + -> (%t_1, ..., %t_r) + block1(): # FALSE BRANCH, never takes arguments, has to return r outputs + %f_1, ..., %f_m = some::node(%a_value_from_outer_block) + -> (%f_1, ..., %f_r) + + This translates to pseudo code as: + if (condition): + t_1, ..., t_k = some::node(a_value_from_outer_block) + y_1, ..., y_r = t_1, ..., t_r + else: + f_1, ..., f_m = some::node(a_value_from_outer_block) + y_1, ..., y_r = f_1, ..., f_r + + Which further translates to MIL cond as: + _true = { + t_1, ..., t_k = some::node(a_value_from_outer_block) + return (t_1, ..., t_r) + } + _false = { + f_1, ..., f_m = some::node(a_value_from_outer_block) + return (f_1, ..., f_m) + } + """ + name = node.name + # inputs[0]: condition + inputs = _get_inputs(context, node, expected=1) + condition = inputs[0] + + assert len(node.blocks) == 2 + true_block = node.blocks[0] + false_block = node.blocks[1] + + def _true_path(): + res = convert_block(context, true_block, []) + return tuple(res) + + def _false_path(): + res = convert_block(context, false_block, []) + return tuple(res) + + cond = mb.cond( + pred=condition, _true_fn=_true_path, _false_fn=_false_path, name=name + ) + # If the condition only returns one item, wrap it in a tuple. + if not isinstance(cond, (tuple, list)): + cond = (cond,) + + # Make sure the condition returned the expected number of outputs. + assert len(cond) == len(node.outputs) + for output_name, output_var in zip(node.outputs, cond): + context.add(output_var, torch_name=output_name) + + +@register_torch_op +def select(context, node): + inputs = _get_inputs(context, node, expected=3) + _input = inputs[0] + dim = inputs[1].val + index = inputs[2].val + + assert dim.shape == () + assert index.shape == () + + # NOTE: + # Each index in @begin_array/@end_array corresponds to a dimension of @_input + # Each val of those arrays corresponds to the start/end index to slice in that dimension + rank = _input.rank + begin_array = [0] * rank + begin_array[dim] = index + end_array = [s if isinstance(s, int) else 0 for s in _input.shape] + end_mask = [True] * rank + squeeze_mask = [False] * rank + squeeze_mask[dim] = True + + if index != -1: + end_array[dim] = index + 1 + end_mask[dim] = False + + slice_by_index = mb.slice_by_index( + x=_input, + begin=begin_array, + end=end_array, + end_mask=end_mask, + squeeze_mask=squeeze_mask, + name=node.name, + ) + context.add(slice_by_index) + + +@register_torch_op +def type_as(context, node): + inputs = _get_inputs(context, node, expected=2) + + if inputs[0].dtype == inputs[1].dtype: + x = mb.identity(x=inputs[0], name=node.name) + else: + x = inputs[0] + if inputs[1].dtype not in TYPE_TO_DTYPE_STRING: + raise NotImplementedError( + "Tensor type {} cast is not supported.".format(inputs[1].dtype) + ) + x = mb.cast(x=x, dtype=TYPE_TO_DTYPE_STRING[inputs[1].dtype], name=node.name) + + context.add(x) + + +@register_torch_op +def nonzero(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + nonzero = mb.non_zero(x=x, name=node.name) + context.add(nonzero) + + +def _get_slice_params(context, data, inputs): + rank = data.rank + begin = [0] * rank + end = [0] * rank + stride = [1] * rank + begin_mask = [False] * rank + end_mask = [False] * rank + squeeze_mask = [False] * rank + + num_of_slice_set = len(inputs) // 3 + + for i in range(num_of_slice_set): + if inputs[3 * i + 1] is None: + # This is pure index select + idx = context[inputs[3 * i]].val + begin[i] = idx + squeeze_mask[i] = True + else: + # This is a slice + begin_var = context[inputs[3 * i]] + end_var = context[inputs[3 * i + 1]] + stride_var = context[inputs[3 * i + 2]] + + if begin_var is None: + begin_mask[i] = True + else: + begin[i] = begin_var + + if end_var is None: + end_mask[i] = True + else: + end[i] = end_var + + if stride_var is None: + stride[i] = 1 + else: + stride[i] = stride_var.val + + for i in range(num_of_slice_set, rank): + begin_mask[i] = True + end_mask[i] = True + + begin = mb.concat(values=begin, axis=0) + end = mb.concat(values=end, axis=0) + + return begin, end, stride, begin_mask, end_mask, squeeze_mask + + +@register_torch_op +def _internal_op_tensor_inplace_copy(context, node): + data = context[node.inputs[0]] + updates = context[node.inputs[1]] + begin, end, stride, begin_mask, end_mask, squeeze_mask = _get_slice_params( + context, data, node.inputs[2:] + ) + + data, updates = promote_input_dtypes([data, updates]) + updated_x = mb.torch_tensor_assign( + data=data, + updates=updates, + begin=begin, + end=end, + stride=stride, + begin_mask=begin_mask, + end_mask=end_mask, + squeeze_mask=squeeze_mask, + name=node.name, + ) + context.add(updated_x) + + +@register_torch_op +def _internal_op_tensor_inplace_fill(context, node): + data = context[node.inputs[0]] + fill_scalar = context[node.inputs[1]] + + begin, end, stride, begin_mask, end_mask, squeeze_mask = _get_slice_params( + context, data, node.inputs[2:] + ) + if begin.val is None or end.val is None: + raise ValueError("_internal_op_tensor_inplace_fill does not support dynamic index") + + fill_shape = solve_slice_by_index_shape( + data.shape, begin.val, end.val, stride, begin_mask, end_mask, squeeze_mask + ) + update_values = _np.full(fill_shape, fill_scalar.val) + + data, update_values = promote_input_dtypes([data, update_values]) + updated_x = mb.torch_tensor_assign( + data=data, + updates=update_values, + begin=begin, + end=end, + stride=stride, + begin_mask=begin_mask, + end_mask=end_mask, + squeeze_mask=squeeze_mask, + name=node.name, + ) + context.add(updated_x) + + +@register_torch_op +def index_put(context, node): + inputs = _get_inputs(context, node, expected=4) + x = inputs[0] + indices = inputs[1] + values = inputs[2] + accumulate = inputs[3].val + rank = x.rank + mode = "add" if accumulate else "update" + + indices_type = indices[0].sym_type.get_primitive() + + if types.is_bool(indices_type): + assert len(indices) == 1, "Unsupported index_put_ usage." + indices = indices[0] + assert indices.shape == x.shape, "indices shape must equal to input shape for index put operation." + indices = mb.cast(x=indices, dtype="int32") + indices = mb.non_zero(x=indices) + + if types.is_int(indices_type): + if len(indices) > 1: + indices = mb.stack(values=indices, axis=rank - 1) + else: + indices = mb.expand_dims(x=indices[0], axes=[-1]) + + if len(values.shape) == 0: + values = mb.expand_dims(x=values, axes=[0]) + + if values.rank == 1 and values.shape[0] == 1: + reps = value_at(mb.shape(x=indices), 0) + reps = mb.expand_dims(x=reps, axes=[0]) + values = mb.tile(x=values, reps=reps) + + result = mb.scatter_nd(data=x, indices=indices, updates=values, mode=mode, name=node.name) + context.add(result) + + +@register_torch_op +def index(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + indices = inputs[1] + rank = x.rank + + """ + Case 1: A single boolean index selection + Ex: + a = torch.rand(2, 3, 4) + b = torch.rand(3, 4) + index = b > 0.1 + c = a[:, b] + + For this case, the only non-None tensor is with dtype bool + The true value indicates whether the element should be selected among the masked axes + The output c is a tensor with shape (2, N), where N is the number of elements of b satisfying condition > 0.1 + """ + boolean_indices_axis = [] + for i, index in enumerate(indices): + if index is not None and types.is_bool(index.dtype): + boolean_indices_axis.append(i) + if len(boolean_indices_axis) == 1: + # get the True element indices + axis = boolean_indices_axis[0] + axes = list(range(axis, axis + index.rank)) + index = indices[axis] + index = mb.non_zero(x=index) + + # tranpose the masked axes to the beginning + perm = axes + [i for i in range(rank) if i not in axes] + x = mb.transpose(x=x, perm=perm) + x = mb.gather_nd(x=x, indices=index) + + # transpose the tensor back + perm_back = list(range(1, x.rank)) + perm_back.insert(axis, 0) + res = mb.transpose(x=x, perm=perm_back, name=node.name) + context.add(res) + return + + """ + Case 2: Pure index selection + Ex # 1 [Single dimension selection]: + a = torch.rand(1,2,3,4) + index = torch.tensor([0, 1]) + b = a[:,:,:,index] + + In this case, indices is a list [None, None, None, [0, 1]]]. The None element means the corresponding + dimension is masked. + + b has shape (1,2,3,2). + + Ex # 2 [Multiple disconnected dimensions selection]: + a = torch.rand(1,2,3,4) + index = torch.tensor([0, 1]) + b = a[:,index,:,index] + + In this case, indices is a list [None, [0,1], None, [0,1]] + + b has shape (2,1,3), + where b[0,:,:] = a[:,0,:,0] and b[1,:,:] = a[:,1,:,1] + + Ex # 3 [Multiple connected dimensions selection]: + a = torch.rand(1,2,3,4) + index_1 = torch.tensor([0, 1]) + index_2 = torch.tensor([0, 1]) + b = a[:,index_1,index_2,:] + + indices is a list [None, [0, 1], [0, 1], None] + + b has shape (1,2,4), + where b[:,0,:] = a[:,0,0,:] and b[:,1,:] = a[:,1,1,:] + + Ex # 4 [Selection with boolean masks]: + a = torch.rand(4,5) + index_1 = [True, True, False, False] + index_2 = [False, True, True, False, False] + b = a[index_1, index_2] + + indices is a list [[True, True, False, False], [False, True, True, False, False]] + + In this case, index_1 and index_2 are interpreted as mask by indices of True, + index_1 -> [0, 1] + index_2 -> [1, 2] + + b has shape (2,), + where b[0] = a[0, 1] and b[1] = a[1, 2] + + Ex # 5 [Broadcast selection]: + a = torch.rand(1,2,3,4) + index_1 = torch.tensor([0, 1]) + index_2 = torch.tensor([0]) + b = a[:,index_1,index_2,:] + + indices is a list [None, [0, 1], [0], None] + + In this case, index_2 is going to be broadcasted to [0, 0] + + b has shape (1,2,4), + where b[:,0,:] = a[:,0,0,:] and b[:,1,:] = a[:,1,0,:] + + """ + + # get the index axes + indices = indices + [None] * (x.rank - len(indices)) + indices_axes = [] + valid_indices = [] + for i, index in enumerate(indices): + if index is not None: + indices_axes.append(i) + valid_indices.append(index) + + # If all elements in indices is None, simpily return the original tensor. + if len(indices_axes) == 0: + x = mb.identity(x=x, name=node.name) + context.add(x) + return + + # convert all indices to int type + for i, indice in enumerate(valid_indices): + if indice is not None and types.is_bool(indice.dtype): + indice = mb.non_zero(x=indice) + indice = mb.squeeze(x=indice, axes=[1]) + valid_indices[i] = indice + + # For the single index axis case, we can use mb.gather directly + if len(indices_axes) == 1: + axis = indices_axes[0] + x = mb.gather(x=x, indices=valid_indices[0], axis=axis, name=node.name) + context.add(x) + return + + # For multiple index axes case, we delegate broadcast to np if there is no dynamic shape. + if all(not any_symbolic(idx.shape) for idx in valid_indices): + broadcasted_shape = _np.broadcast_shapes(*[idx.shape for idx in valid_indices]) + for i, index in enumerate(valid_indices): + if (index.shape != broadcasted_shape) and index.val is not None: + new_val = _np.broadcast_to(index.val, broadcasted_shape) + valid_indices[i] = mb.const( + val=new_val, name=index.name + "_broadcasted" + ) + valid_indices = [mb.cast(x=index, dtype="int32") for index in valid_indices] + + # First stack the index together + indices_rank = valid_indices[0].rank + indices = mb.stack(values=valid_indices, axis=indices_rank) + + # transpose the input tensor to gather the slicing index in front + is_connected = True + for i in range(1, len(indices_axes)): + if indices_axes[i] != indices_axes[i - 1] + 1: + is_connected = False + break + + name = node.name + "_transpose" if is_connected else node.name + perm = indices_axes + [axis for axis in range(x.rank) if axis not in indices_axes] + x = mb.transpose(x=x, perm=perm) + x = mb.gather_nd(x=x, indices=indices, name=name) + + # if the index axes are connect, we need to transpose it back + if is_connected: + new_dimensions = list(range(indices_axes[0], indices_axes[0] + indices_rank)) + new_perm = new_dimensions + [ + axis + for axis in range(rank + indices_rank - len(indices_axes)) + if axis not in new_dimensions + ] + perm_back = [new_perm.index(axis) for axis in range(len(new_perm))] + x = mb.transpose(x=x, perm=perm_back, name=node.name) + context.add(x) + + +@register_torch_op +def ones(context, node): + inputs = _get_inputs(context, node, expected=[5, 6]) + size = inputs[0] + # dtype = NUM_TO_TORCH_DTYPE[inputs[1].val] unused + # layout = inputs[2] unused + # device = inputs[3] unused + # requires_grad = inputs[4] unused + # out = inputs[5] unused + if isinstance(size, list): + size = mb.concat(values=size, axis=0) + fill = mb.fill(shape=size, value=1.0, name=node.name) + context.add(fill) + + +@register_torch_op +def ones_like(context, node): + inputs = _get_inputs(context, node, expected=6) + x = inputs[0] + if is_current_opset_version_compatible_with(target.iOS16): + fill = mb.fill_like(ref_tensor=x, value=1.0, name=node.name) + else: + size = mb.shape(x=x) + # dtype = NUM_TO_TORCH_DTYPE[inputs[1].val] unused + # layout = inputs[2] unused + # device = inputs[3] unused + # requires_grad = inputs[4] unused + # out = inputs[5] unused + fill = mb.fill(shape=size, value=1.0, name=node.name) + context.add(fill) + + +def _make_fill_op(size, val, name): + assert val is not None + if isinstance(size, list): + size = mb.concat(values=size, axis=0) + fill = mb.fill(shape=size, value=val, name=name) + return fill + + +@register_torch_op +def full(context, node): + inputs = _get_inputs(context, node) + size = inputs[0] + val = inputs[1].val + result = _make_fill_op(size, val, node.name) + context.add(result) + + +@register_torch_op +def full_like(context, node): + inputs = _get_inputs(context, node, expected=7) + x = inputs[0] + val = inputs[1].val + if is_current_opset_version_compatible_with(target.iOS16): + result = mb.fill_like(ref_tensor=x, value=val, name=node.name) + else: + size = mb.shape(x=inputs[0]) + result = _make_fill_op(size, val, node.name) + context.add(result) + + +@register_torch_op +def new_full(context, node): + # The difference between "new_full" and "full" is that the "new_full" is called from + # an existing tensor: tensor.new_full(size, fill_value), while the "full" is called + # from the torch API: torch.full(size, fill_value). + # But they are basically doing the same thing. + inputs = _get_inputs(context, node) + size = inputs[1] + val = inputs[2].val + result = _make_fill_op(size, val, node.name) + context.add(result) + +@register_torch_op +def randint(context, node): + inputs = _get_inputs(context, node, expected=8) + low = mb.cast(x=inputs[0], dtype="fp32") + high = mb.cast(x=inputs[1], dtype="fp32") + shape = inputs[2] + rand_uniform = mb.random_uniform(shape=shape, low=low, high=high) + rand_int = mb.cast(x=rand_uniform, dtype="int32", name=node.name) + context.add(rand_int) + + +@register_torch_op +def bitwise_not(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + dtype = x.dtype + if types.is_int(dtype): + x = mb.add(x=x, y=1) + x = mb.mul(x=x, y=-1, name=node.name) + elif types.is_bool(dtype): + x = mb.logical_not(x=x, name=node.name) + else: + raise ValueError("Not supported type {} found for 'bitwise_not' op".format(dtype)) + context.add(x) + + +@register_torch_op(torch_alias=["and"]) +def bitwise_and(context, node): + inputs = _get_inputs(context, node) + + input_dtypes = [i.dtype for i in inputs] + if all(types.is_bool(input_dtype) for input_dtype in input_dtypes): + logical_and(context, node) + else: + raise NotImplementedError( + f"The `bitwise_and` op only supports boolean input, but get {input_dtypes}." + ) + + +def _avg_pool(context, node, inputs): + x = inputs[0] + kernel_sizes = inputs[1] + strides = inputs[2] + if strides.op.op_type == "const" and (not list(strides.val)): + strides = mb.const(val=kernel_sizes.val, name=strides.name) + pad_type = "custom" + # Need to explicitly state L-R, T-B pad + pad = inputs[3] + pad = _np.repeat(pad.val, 2) + ceil_mode = inputs[4].val + include_pad = inputs[5].val + + spatial_rank = len(pad) // 2 + if spatial_rank > 2 and ceil_mode is True and list(strides.val) != [1] * len(strides.val): + # since MIL does not support ceil_mode for 3D pool, + # need to adjust padding values if ceil_mode is True + # ceil_mode only causes any difference though, if the strides are not 1 + x_spatial_dimensions = x.shape[-spatial_rank:] + new_pad = _adjust_pad_for_ceil_mode( + x_spatial_dimensions, kernel_sizes.val, strides.val, pad + ) + if _np.sum(_np.abs(new_pad - pad)) > 1e-3: + if include_pad: + raise ValueError('pool3D with ceil mode=True and include_pad=True not supported') + pad = new_pad + + pool = mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + pad=pad, + name=node.name, + exclude_padding_from_average=not include_pad, + ceil_mode=ceil_mode if spatial_rank <= 2 else False, + ) + context.add(pool) + + +@register_torch_op +def avg_pool1d(context, node): + inputs = _get_inputs(context, node, expected=6) + _avg_pool(context, node, inputs) + + +@register_torch_op +def avg_pool2d(context, node): + inputs = _get_inputs(context, node, expected=7) + divisor_override = inputs[6] + if divisor_override is not None: + raise ValueError("divisor_override is not supported for avg_pool2d") + _avg_pool(context, node, inputs) + + +@register_torch_op +def avg_pool3d(context, node): + inputs = _get_inputs(context, node, expected=7) + divisor_override = inputs[6] + if divisor_override is not None: + raise ValueError("divisor_override is not supported for avg_pool3d") + _avg_pool(context, node, inputs) + + +@register_torch_op +def log_softmax(context, node): + inputs = _get_inputs(context, node) + + x = inputs[0] + axis = inputs[1] + out = inputs[2] # Ignored. + assert out is None + res = mb.softmax(x=x, axis=axis, name=node.name + "_softmax") + res = mb.log(x=res, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["nll_loss_nd"]) +def nll_loss(context, node): + inputs = _get_inputs(context, node, expected=5) + + x = inputs[0] + target = inputs[1] + weight = inputs[2] + reduction = inputs[3] + ignore_index = inputs[4] + + # mapping for reduction + reduction_mapping = {0: "none", 1: "mean", 2: "sum"} + reduction = reduction_mapping[reduction.val] + + # compute the weights loss + batch_size = x.shape[0] + + # only support weight and ignore_index both None + if weight is not None: + raise NotImplementedError("Only unity weight is supported for NLLLoss.") + if ignore_index.val != -100: + raise NotImplementedError("ignore index not supported for NLLLoss.") + + x = mb.cast(x=x, dtype="fp32") + x = mb.mul(x=x, y=-1.) + range_indices = mb.range_1d(end=batch_size, start=0, step=1) + total_indices = mb.stack(values=[range_indices, target], axis=1) + loss = mb.gather_nd(x=x, indices=total_indices) + + # reduction type + if reduction == "none": + out = mb.identity(x=loss, name=node.name) + elif reduction == "sum": + out = mb.reduce_sum(x=loss, axes=[0], keep_dims=False, name=node.name) + elif reduction == "mean": + out = mb.real_div(x=loss, y=_np.float32(batch_size)) + out = mb.reduce_sum(x=out, axes=[0], keep_dims=False, name=node.name) + else: + raise NotImplementedError("Unsupported reduction type for NLLLoss.") + + context.add(out) + + +@register_torch_op +def sigmoid(context, node): + inputs = _get_inputs(context, node, expected=1) + + res = mb.sigmoid(x=inputs[0], name=node.name) + context.add(res) + + +@register_torch_op +def hardsigmoid(context, node): + inputs = _get_inputs(context, node, expected=1) + + res = mb.sigmoid_hard(x=inputs[0], alpha=1.0 / 6, beta=0.5, name=node.name) + context.add(res) + + +@register_torch_op +def gelu(context, node): + inputs = _get_inputs(context, node) + assert len(inputs) in (1, 2) + if len(inputs) == 2: + approximate = inputs[1].val + assert approximate == 'none' + res = mb.gelu(x=inputs[0], name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["slice"]) +def _slice(context, node): + inputs = _get_inputs(context, node, expected=5) + x = inputs[0] + dim = inputs[1].val + + if inputs[2] and inputs[2].val is not None: + start = inputs[2].val + elif isinstance(inputs[2], Var): + start = inputs[2] + else: + start = 0 + + if inputs[3] and inputs[3].val is not None: + end = inputs[3].val + elif isinstance(inputs[3], Var): + end = inputs[3] + else: + end = None + + step = inputs[4].val + + if start == 0 and end is None and step == 1: + # Handling x[:], just pass through the tensor. + context.add(x, node.name) + return + + begin_array = [0] * len(x.shape) + begin_array[dim] = start + end_array = [s if isinstance(s, int) else 0 for s in x.shape] + end_mask = [True] * len(x.shape) + if end is not None: + end_array[dim] = end + end_mask[dim] = False + + if isinstance(start, Var): + begin_array = mb.concat(values=begin_array, axis=0) + + if isinstance(end, Var): + end_array = mb.concat(values=end_array, axis=0) + + kwargs = { + "x": x, + "begin": begin_array, + "end": end_array, + "end_mask": end_mask, + "name": node.name, + } + + if step != 1: + stride_array = _np.array([1] * len(x.shape)) + stride_array[dim] = step + kwargs["stride"] = stride_array + + res = mb.slice_by_index(**kwargs) + context.add(res) + + +@register_torch_op(torch_alias=["split_with_sizes"]) +def split(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + split_sizes = inputs[1] + dim = inputs[2].val + + if not isinstance(split_sizes.val, _np.ndarray): + shape = mb.shape(x=x) + dim_size = _list_select(shape, dim) + # MIL split op needs the size of each split to be given explicitly. + num_whole_splits = mb.floor_div(x=dim_size, y=split_sizes) + remainder = mb.mod(x=dim_size, y=split_sizes) + + # MIL doesn't have a way of turning a scalar into a tensor (list write + # only supports tensors). As a workaround, we create a constant [1] + # tensor and multiply it by the scalar value, thus creating a tensor + # with the scalar value in it. + tmp = mb.const(val=[1]) + whole_sizes = mb.mul(x=tmp, y=split_sizes) + reps = mb.mul(x=tmp, y=num_whole_splits) + whole_sizes = mb.tile(x=whole_sizes, reps=reps) + if remainder.val == 0: + split_sizes = whole_sizes + else: + partial_size = mb.mul(x=tmp, y=remainder) + split_sizes = mb.concat(values=[whole_sizes, partial_size], axis=0) + res = mb.split(x=x, split_sizes=split_sizes, axis=dim, name=node.name) + context.add(res, torch_name=node.name) + + +@register_torch_op +def unbind(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + dim = inputs[1].val + split_sizes = [1] * x.shape[dim] + if len(split_sizes) == 1: + res = [mb.squeeze(x=x, axes=[dim])] + else: + res = mb.split(x=x, split_sizes=split_sizes, axis=dim, name=node.name) + res = [mb.squeeze(x=x, axes=[dim]) for x in res] + context.add(res, torch_name=node.name) + + +@register_torch_op +def to(context, node): + inputs = _get_inputs(context, node) + + # There are a lot of variants of `to` op. + # - When len(inputs) is 7 or 8, we only care about the first two params (input and dtype). + # - When len(inputs) == 6, the parameter is (input, _, dtype, non_blocking, copy, memory_format) + # - When len(inputs) == 5, the parameter is (input, dtype, non_blocking, copy, memory_format) + # - When len(inputs) == 4, the parameter is (input, dtype, non_blocking, copy) + # - When len(inputs) == 3, the parameter is (input, non_blocking, copy) + # We only use `input` and `dtype`, and `non_blocking` and `copy` are unused. + _input = inputs[0] + target_dtype: Optional[Var] + inputs_len = len(inputs) + if inputs_len in (4, 5, 7, 8): + target_dtype = inputs[1] + elif inputs_len == 6: + target_dtype = inputs[2] + elif inputs_len == 3: + target_dtype = None + else: + raise ValueError( + "Received invalid arguments for PyTorch conversion of op {}".format(node) + ) + + if target_dtype is None: + # When target_dtype is None, it means the input's dtype is already the target dtype. + context.add(_input, torch_name=node.name) + return + elif types.is_scalar(target_dtype.sym_type) and target_dtype.val is not None: + dtype = target_dtype.val + else: + # When the val of dtype is not available, bridge from the np dtype. + np_type = nptype_from_builtin(target_dtype.dtype) + dtype = NUMPY_DTYPE_TO_TORCH_NUM[np_type] + + torch_dtype = NUM_TO_TORCH_DTYPE[dtype] + if isinstance(_input, Var) and _input.can_be_folded_to_const(): + # numpy -> torch -> torch cast -> numpy + # This path is needed to use the mapping of passed in dtypes to torch dtypes. + casted_input = torch.tensor(_input.val).type(torch_dtype).cpu().numpy() + res = mb.const(val=casted_input, name=node.name) + else: + if dtype in NUM_TO_DTYPE_STRING: + res = mb.cast(x=_input, dtype=NUM_TO_DTYPE_STRING[dtype], name=node.name) + else: + # For dtype that is not supported by mb.cast, we do it in best-effort to cast it to int + # or float based on the dtype. + np_dtype = NUM_TO_NUMPY_DTYPE[dtype] + if _np.issubdtype(np_dtype, _np.integer): + res = mb.cast(x=_input, dtype="int32", name=node.name) + elif _np.issubdtype(np_dtype, _np.floating): + res = mb.cast(x=_input, dtype="fp32", name=node.name) + else: + raise ValueError(f"Unsupported op {node} with target dtype {np_dtype}") + context.add(res) + + +@register_torch_op +def erf(context, node): + inputs = _get_inputs(context, node, expected=1) + _input = inputs[0] + erf = mb.erf(x=_input, name=node.name) + context.add(erf) + + +@register_torch_op(torch_alias=["scalarimplicit"]) +def implicittensortonum(context, node): + inputs = _get_inputs(context, node, expected=1) + _input = inputs[0] + + if _input.shape == (): # already a scalar + context.add(_input, node.name) + else: + assert _input.shape == (1,) + # shape: (1,) -> () + squeeze = mb.squeeze(x=_input, name=node.name) + context.add(squeeze) + + +@register_torch_op +def constantchunk(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + # ConstantChunk gets its parameters as attributes of the node. + chunks = node.attr["chunks"] + dim = node.attr["dim"] + + total = x.shape[dim] + size = int(_math.ceil(float(total) / float(chunks))) + split_sizes = [size] * int(_math.floor(total / size)) + remainder = total - sum(split_sizes) + if remainder > 0: + split_sizes.append(remainder) + + res = mb.split(x=x, split_sizes=split_sizes, axis=dim, name=node.name) + for val, name in zip(res, node.outputs): + context.add(val, name) + + +def _broadcast(name, tensor, shape): + if len(shape) > tensor.rank: + new_dims = len(shape) - tensor.rank + tensor = mb.expand_dims(x=tensor, axes=list(range(new_dims))) + + reps = [] + for ts, ds in zip(tensor.shape, shape): + if not is_symbolic(ts) and not is_symbolic(ds) and ds > 0 and ts == 1: + reps.append(ds) + else: + reps.append(1) + + res = mb.tile(x=tensor, reps=reps, name=name) + return res + + +@register_torch_op +def expand(context, node): + def _broadcast_dynamic(name, tensor, shape): + # Add any extra dimensions + if len(shape) > tensor.rank: + new_dims = len(shape) - tensor.rank + tensor = mb.expand_dims(x=tensor, axes=list(range(new_dims))) + + tensor_shape = mb.shape(x=tensor) + shape = mb.concat(values=shape, axis=0) + reps = mb.real_div(x=shape, y=tensor_shape) + reps = mb.cast(x=reps, dtype="int32") + res = mb.tile(x=tensor, reps=reps, name=name) + return res + + + # PyTorch 1.6+ has 3 inputs while older version has 2 + inputs = _get_inputs(context, node, expected=[2, 3]) + + x = inputs[0] + shape = inputs[1] + + if isinstance(shape, list): + res = _broadcast_dynamic(node.name, x, shape) + else: + res = _broadcast(node.name, x, shape.val) + context.add(res) + + +@register_torch_op +def expand_as(context, node): + # PyTorch 1.6+ has 3 inputs while older version has 2 + inputs = _get_inputs(context, node, expected=[2, 3]) + x = inputs[0] + other = inputs[1] + + res = _broadcast(node.name, x, other.shape) + context.add(res) + + +@register_torch_op +def arange(context, node): + inputs = _get_inputs(context, node) + # dtype = inputs[-4] + # layout = inputs[-3] + # device = inputs[-2] + # pin_memory = inputs[-1] + if len(inputs) == 5: + # inputs are [end, dtype, layout, device, pin_memory] + start = 0 + end = inputs[0] + step = 1 + elif len(inputs) == 6: + # inputs are [start, end, dtype, layout, device, pin_memory] + start = inputs[0] + end = inputs[1] + step = 1 + elif len(inputs) == 7: + # inputs are [start, end, step, dtype, layout, device, pin_memory] + start = inputs[0] + end = inputs[1] + step = inputs[2] + else: + raise ValueError( + "arange must have exactly 5, 6, or 7 inputs, got {}".format(len(inputs)) + ) + # If start, end, and step don't have the same dtype, we cast them to fp32 + int_start = isinstance(start, int) or types.is_int(start.dtype) + int_end = isinstance(end, int) or types.is_int(end.dtype) + int_step = isinstance(step, int) or types.is_int(step.dtype) + + if int_start != int_end or int_start != int_step: + start = mb.cast(x=start, dtype="fp32") + end = mb.cast(x=end, dtype="fp32") + step = mb.cast(x=step, dtype="fp32") + res = mb.range_1d(start=start, end=end, step=step, name=node.name) + context.add(res) + + +@register_torch_op +def masked_fill(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + mask = inputs[1] + value = inputs[2] + # @mb.select does not properly broadcast scalar input, so as a workaround + # we create a full sized tensor. + + if types.is_int(value.dtype): + # @mb.fill cannot handle value with dtype integer + # so we cast the value. + value = mb.cast(x=value, dtype="fp32") + + if not types.is_bool(mask.dtype): + # cond must be bool type + mask = mb.cast(x=mask, dtype="bool") + + shape = mb.shape(x=x, name=node.name + "_shape") + value = mb.fill(shape=shape, value=value, name=node.name + "_value") + res = mb.select(cond=mask, a=value, b=x, name=node.name) + context.add(res) + + +@register_torch_op +def meshgrid(context, node): + """ + For N input tensors, a meshgrid is constructed by viewing each tensor as an N-dimension tensor + with values in the dimension corresponding it its order in the args. (a.) + Then, it is expanded along dimensions corresponding to the dimensions of each + 1d tensor in the order that they were passed in. (b.) + + Each output tensor is put into a tuple that is returned. These tuples form + N, N-dimenional grids, where the ith grid is defined as expanding the ith input over + dimensions defined by the other inputs. + """ + supported_indexing_modes = ("ij", "xy") + indexing = "ij" + inputs = _get_inputs(context, node, expected=[1, 2]) + + if len(inputs) == 2: + indexing = inputs[1].val + if indexing not in supported_indexing_modes: + raise ValueError("indexing mode {} not supported".format(indexing)) + + tensor_inputs = inputs[0] + assert isinstance(tensor_inputs, (list, tuple)) + if len(tensor_inputs) < 2: + raise ValueError("Requires >= 2 tensor inputs.") + + if any([len(tensor_var.shape) > 1 for tensor_var in tensor_inputs]): + raise ValueError("meshgrid recieved non-1d tensor.") + + dim_tuple = tuple(tensor_var.shape[0] for tensor_var in tensor_inputs) + + grids = [] + size = len(tensor_inputs) + for i in range(size): + view_shape = [1] * size + view_shape[i] = -1 + view_shape = tuple(view_shape) + # (a.) in docstring + view = mb.reshape( + x=tensor_inputs[i], shape=view_shape, name=node.name + "_view_" + str(i) + ) + + # (b.) in docstring + reps = [ + ds if ds > 0 and ts == 1 else 1 for ts, ds in zip(view.shape, dim_tuple) + ] + res = mb.tile(x=view, reps=reps, name=node.name + "_expand_" + str(i)) + + # transpose the first two dimensions for "xy" indexing + if indexing == "xy": + perm = [1, 0] + list(range(2, size)) + res = mb.transpose(x=res, perm=perm, name=node.name + "_transpose_" + str(i)) + + grids.append(res) + + context.add(tuple(grids), node.name) + + +# Defines all the nodes that are noOps +@register_torch_op( + torch_alias=[ + "dropout", + "dropout_", + "feature_dropout", + "contiguous", + "device", + "detach", + "clone", + ] +) +def noop(context, node): + logger.info("Setting pytorch op: {} to no-op.".format(node)) + inputs = _get_inputs(context, node) + _input = inputs[0] + context.add(_input, torch_name=node.name) + + +@register_torch_op +def argmax(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + axis = inputs[1] + keep_dims = inputs[2] + if types.is_int(x.dtype) and x.dtype._width == 64: + # MIL reduce_argmax doesn't support int64. + x = mb.cast(x=x, dtype="int32") + res = mb.reduce_argmax(x=x, axis=axis, keep_dims=keep_dims, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["empty_like"]) +def zeros_like(context, node): + inputs = _get_inputs(context, node, expected=6) + x = inputs[0] + dtype = inputs[1].val + shape = mb.shape(x=x) + np_type = NUM_TO_NUMPY_DTYPE[dtype] + + if shape.can_be_folded_to_const(): + shape = shape.val + zeros = _np.zeros(shape).astype(np_type) + zeros_like = mb.const(val=zeros, name=node.name) + else: + value = np_type(0) + if is_current_opset_version_compatible_with(target.iOS16): + zeros_like = mb.fill_like(ref_tensor=x, value=value, name=node.name) + else: + zeros_like = mb.fill(shape=shape, value=value, name=node.name) + + context.add(zeros_like) + + +@register_torch_op(torch_alias=["empty"]) +def zeros(context, node): + inputs = _get_inputs(context, node) + size = inputs[0] + if inputs[1] is not None: + dtype = inputs[1].val + else: + dtype = torch.get_default_dtype() + assert dtype in (torch.float32, torch.float64) + dtype = 6 + + if isinstance(size, list) or not size.can_be_folded_to_const(): + # the size is dynamic or this zeros op cannot be folded into const. + size = mb.concat(values=size, axis=0) if isinstance(size, list) else size + np_type = NUM_TO_NUMPY_DTYPE[dtype] + zeros = mb.fill(shape=size, value=np_type(0), name=node.name) + else: + # the size is static and this zeros op can be folded into const. + size = size.val + # layout = inputs[2] unused + # device = inputs[3] unused + # pin_memory = inputs[4] unused + torch_dtype = NUM_TO_TORCH_DTYPE[dtype] + zeros_array = torch.zeros(tuple(size)).type(torch_dtype).numpy() + zeros = mb.const(val=zeros_array, name=node.name) + + context.add(zeros) + + +@register_torch_op(torch_alias=["new_empty"]) +def new_zeros(context, node): + inputs = _get_inputs(context, node) + shape = inputs[1] + if isinstance(shape, list): + # when the size is dynamic, it is a list of pymil scalar, + # we need to concat them first to get a shape. + shape = mb.concat(values=shape, axis=0) + context.add(mb.fill(shape=shape, value=0., name=node.name)) + + +@register_torch_op +def dim(context, node): + inputs = _get_inputs(context, node) + shape = mb.shape(x=inputs[0]) + rank = mb.shape(x=shape) + context.add(value_at(rank, 0, node.name)) + + +@register_torch_op +def min(context, node): + inputs = _get_inputs(context, node, expected=[1, 2, 3]) + + # mimic functionality from https://pytorch.org/docs/stable/generated/torch.min.html + if len(inputs) == 1: + value = mb.reduce_min(x=inputs[0], axes=None, name=node.name) + context.add(value) + elif len(inputs) == 2: + value = mb.minimum(x=inputs[0], y=inputs[1], name=node.name) + context.add(value) + elif len(inputs) == 3: + _input = inputs[0] + dim = inputs[1].val + keepdim = inputs[2].val + + values = mb.reduce_min(x=_input, axes=[dim], keep_dims=keepdim) + indices = mb.reduce_argmin(x=_input, axis=dim, keep_dims=keepdim) + assert len(node.outputs) == 2 + values_name = node.outputs[0] + indices_name = node.outputs[1] + context.add(values, torch_name=values_name) + context.add(indices, torch_name=indices_name) + + +@register_torch_op +def max(context, node): + inputs = _get_inputs(context, node, expected=[1, 2, 3]) + + # mimic functionality from https://pytorch.org/docs/stable/generated/torch.max.html + if len(inputs) == 1: + value = mb.reduce_max(x=inputs[0], axes=None, name=node.name) + context.add(value) + elif len(inputs) == 2: + value = mb.maximum(x=inputs[0], y=inputs[1], name=node.name) + context.add(value) + elif len(inputs) == 3: + _input = inputs[0] + dim = inputs[1].val + keepdim = inputs[2].val + + values = mb.reduce_max(x=_input, axes=[dim], keep_dims=keepdim) + indices = mb.reduce_argmax(x=_input, axis=dim, keep_dims=keepdim) + assert len(node.outputs) == 2 + values_name = node.outputs[0] + indices_name = node.outputs[1] + context.add(values, torch_name=values_name) + context.add(indices, torch_name=indices_name) + +def _add_amax_amin(context, node, reduce_op): + # mimic functionality from https://pytorch.org/docs/stable/generated/torch.amax.html + # mimic functionality from https://pytorch.org/docs/stable/generated/torch.amin.html + assert len(node.outputs) == 1 + + all_inputs = _get_inputs(context, node, expected=[2, 3]) + _input = all_inputs[0] + dim = [all_inputs[1].val] if type(all_inputs[1].val) == int else [x for x in all_inputs[1].val] + keepdim = all_inputs[2] if len(all_inputs) == 3 else False + + context.add(reduce_op(x=_input, axes=dim, keep_dims=keepdim), torch_name=node.outputs[0]) + +@register_torch_op +def amax(context, node): + _add_amax_amin(context, node, mb.reduce_max) + +@register_torch_op +def amin(context, node): + _add_amax_amin(context, node, mb.reduce_min) + + +@register_torch_op +def argsort(context, node): + inputs = _get_inputs(context, node, expected=3) + ascending = mb.logical_not(x=inputs[2]) + argsort = mb.argsort(x=inputs[0], axis=inputs[1], ascending=ascending, name=node.name) + context.add(argsort) + + +@register_torch_op +def sort(context, node): + inputs = _get_inputs(context, node) + _input = inputs[0] + axis = inputs[1].val + ascending = not inputs[2].val + indices_name = node.outputs[1] + values_name = node.outputs[0] + indices = mb.argsort(x=_input, axis=axis, ascending=ascending, name=indices_name) + values = mb.gather_along_axis(x=_input, indices=indices, axis=axis, name=values_name) + context.add(values, torch_name=values_name) + context.add(indices, torch_name=indices_name) + + +@register_torch_op +def append(context, node): + # Note: by applying torchir_passes.transform_inplace_ops the meaning of + # this op is changed from the original TorchIR. This op expects a python + # list or MIL List as its first input. If an MIL List, the second input + # must be a tensor of whatever shape the List expects. If not an MIL List, + # the second input can by anything. The result will be the second input + # joined to the first input, either by list_write if an MIL list, or + # append if a python list. + inputs = _get_inputs(context, node, expected=2) + ls = inputs[0] + value = inputs[1] + + if isinstance(ls, list): + context.add(ls + [value], node.name) + elif isinstance(ls, ListVar): + index = mb.list_length(ls=ls, name=node.name + "_index") + res = mb.list_write(ls=ls, index=index, value=value, name=node.name) + context.add(res) + else: + raise ValueError( + "can only append to Python list or MIL ListVar, got {}.".format( + type(inputs[0]) + ) + ) + + +@register_torch_op +def gather(context, node): + inputs = _get_inputs(context, node) + res = mb.gather_along_axis(x=inputs[0], indices=inputs[2], axis=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def index_select(context, node): + x = context[node.inputs[0]] + axis = context[node.inputs[1]] + indices = context[node.inputs[2]] + context.add(mb.gather(x=x, indices=indices, axis=axis, name=node.name)) + + +@register_torch_op(torch_alias=["abs"]) +def _abs(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.abs(x=inputs[0], name=node.name)) + + +@register_torch_op +def repeat(context, node): + x = context[node.inputs[0]] + reps = context[node.inputs[1]] + if isinstance(reps, list): + reps = mb.concat(values=reps, axis=0) + + if reps.shape[0] > len(x.shape): + x = mb.expand_dims(x=x, axes=list(range(reps.shape[0] - x.rank))) + context.add(mb.tile(x=x, reps=reps, name=node.name)) + + +@register_torch_op +def acos(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.acos(x=inputs[0], name=node.name)) + + +@register_torch_op +def acosh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.acosh(x=inputs[0], name=node.name)) + + +@register_torch_op +def asin(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.asin(x=inputs[0], name=node.name)) + + +@register_torch_op +def atan(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.atan(x=inputs[0], name=node.name)) + + +@register_torch_op +def atan2(context, node): + """ + atan2(Tensor y, Tensor x) + Element-wise arctangent of y / x with consideration of the quadrant + Returns a new tensor with the signed angles in radians between vector (x, y) and vector (1, 0) + + On a high level: + 1. atan(y / x) to get the angle in [-pi / 2, pi / 2] + 2. analyze quadrant to determine the angle in [-pi, pi] + + Reference PyTorch code https://gist.github.com/nikola-j/b5bb6b141b8d9920318677e1bba70466 + def my_atan2(y, x): + pi = torch.from_numpy(np.array([np.pi])).to(y.device, y.dtype) + ans = torch.atan(y / x) + ans += ((y > 0) & (x < 0)) * pi + ans -= ((y < 0) & (x < 0)) * pi + ans *= (1 - ((y > 0) & (x == 0)) * 1.0) + ans += ((y > 0) & (x == 0)) * (pi / 2) + ans *= (1 - ((y < 0) & (x == 0)) * 1.0) + ans += ((y < 0) & (x == 0)) * (-pi / 2) + return ans + """ + inputs = _get_inputs(context, node, expected=2) + y = inputs[0] + x = inputs[1] + if not types.is_float(y.dtype): + y = mb.cast(x=y, dtype="fp32") + if not types.is_float(x.dtype): + x = mb.cast(x=x, dtype="fp32") + + # basic logical expressions + y_less_0 = mb.less(x=y, y=0.0) + y_greater_0 = mb.greater(x=y, y=0.0) + x_less_0 = mb.less(x=x, y=0.0) + x_equal_0 = mb.equal(x=x, y=0.0) + + # combined logical expressions + ygreater0_and_xless0 = mb.logical_and(x=y_greater_0, y=x_less_0) + yless0_and_xless0 = mb.logical_and(x=y_less_0, y=x_less_0) + ygreater0_and_xequal0 = mb.logical_and(x=y_greater_0, y=x_equal_0) + yless0_and_xequal0 = mb.logical_and(x=y_less_0, y=x_equal_0) + + # bool -> fp32 for numeric operation + ygreater0_and_xless0_numeric = mb.cast(x=ygreater0_and_xless0, dtype="fp32") + yless0_and_xless0_numeric = mb.cast(x=yless0_and_xless0, dtype="fp32") + ygreater0_and_xequal0_numeric = mb.cast(x=ygreater0_and_xequal0, dtype="fp32") + yless0_and_xequal0_numeric = mb.cast(x=yless0_and_xequal0, dtype="fp32") + + # quadrant modification coefficients + coeff1 = mb.mul(x=ygreater0_and_xless0_numeric, y=_np.pi) + coeff2 = mb.mul(x=yless0_and_xless0_numeric, y=_np.pi) + coeff3 = mb.sub(x=1.0, y=ygreater0_and_xequal0_numeric) + coeff4 = mb.mul(x=ygreater0_and_xequal0_numeric, y=_np.pi / 2.0) + coeff5 = mb.sub(x=1.0, y=yless0_and_xequal0_numeric) + coeff6 = mb.mul(x=yless0_and_xequal0_numeric, y=-_np.pi / 2.0) + + # if -1e-8 < x < 1e-8, x += 2e-8 to avoid y / 0 + # this shift makes atan2(0, 0) = 0, which is consistent with PyTorch torch.atan2 + x0left = mb.greater(x=x, y=-1e-8) + x0right = mb.less(x=x, y=1e-8) + x0 = mb.logical_and(x=x0left, y=x0right) + x0numeric = mb.cast(x=x0, dtype="fp32") + safe_shift = mb.mul(x=x0numeric, y=2e-8) + x_safe = mb.add(x=x, y=safe_shift) + + # compute atan(y / x) + ydx = mb.real_div(x=y, y=x_safe) + atan2_1 = mb.atan(x=ydx) + + # analyze quadrant + atan2_2 = mb.add(x=atan2_1, y=coeff1) + atan2_3 = mb.sub(x=atan2_2, y=coeff2) + atan2_4 = mb.mul(x=atan2_3, y=coeff3) + atan2_5 = mb.add(x=atan2_4, y=coeff4) + atan2_6 = mb.mul(x=atan2_5, y=coeff5) + context.add(mb.add(x=atan2_6, y=coeff6, name=node.name)) + + +@register_torch_op +def atanh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.atanh(x=inputs[0], name=node.name)) + + +@register_torch_op +def ceil(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.ceil(x=inputs[0], name=node.name)) + + +@register_torch_op +def clamp(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + min_val = inputs[1] if inputs[1] else _np.finfo(_np.float32).min + max_val = inputs[2] if inputs[2] else _np.finfo(_np.float32).max + + if isinstance(min_val, Var) and isinstance(max_val, Var) and min_val.val >= max_val.val: + # When min >= max, PyTorch sets all values to max. + context.add(mb.fill(shape=mb.shape(x=x), value=max_val.val, name=node.name)) + return + + is_input_int = types.is_int(x.dtype) + if not types.is_float(x.dtype): + # The `mb.clip` op requires parameters from type domain ['fp16', 'fp32']. + x = mb.cast(x=x, dtype="fp32") + x, min_val, max_val = promote_input_dtypes([x, min_val, max_val]) + if is_input_int: + clip_res = mb.clip(x=x, alpha=min_val, beta=max_val) + context.add(mb.cast(x=clip_res, dtype="int32", name=node.name)) + else: + context.add(mb.clip(x=x, alpha=min_val, beta=max_val, name=node.name)) + + +@register_torch_op +def triu(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + diagonal = inputs[1] + diagonal = 0 if diagonal is None else diagonal.val + if diagonal <= 0: + res = mb.band_part(x=x, lower=-diagonal, upper=-1, name=node.name) + else: + y = mb.band_part(x=x, lower=-1, upper=diagonal - 1) + res = mb.sub(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op +def tril(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + diagonal = inputs[1] + diagonal = 0 if diagonal is None else diagonal.val + if diagonal >= 0: + res = mb.band_part(x=x, lower=-1, upper=diagonal, name=node.name) + else: + y = mb.band_part(x=x, lower=-diagonal - 1, upper=-1) + res = mb.sub(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op +def cos(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.cos(x=inputs[0], name=node.name)) + + +@register_torch_op +def cosh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.cosh(x=inputs[0], name=node.name)) + + +@register_torch_op +def exp(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.exp(x=inputs[0], name=node.name)) + + +@register_torch_op +def exp2(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.exp2(x=inputs[0], name=node.name)) + + +@register_torch_op +def floor(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.floor(x=inputs[0], name=node.name)) + + +@register_torch_op +def reciprocal(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.inverse(x=inputs[0], name=node.name)) + + +@register_torch_op +def log(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.log(x=inputs[0], name=node.name)) + + +@register_torch_op(torch_alias=["round"]) +def _round(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.round(x=inputs[0], name=node.name)) + + +@register_torch_op +def rsqrt(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.rsqrt(x=inputs[0], name=node.name)) + + +@register_torch_op +def sin(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.sin(x=inputs[0], name=node.name)) + + +@register_torch_op +def sinh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.sinh(x=inputs[0], name=node.name)) + + +@register_torch_op +def asinh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.asinh(x=inputs[0], name=node.name)) + + +@register_torch_op +def sqrt(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.sqrt(x=inputs[0], name=node.name)) + + +@register_torch_op +def square(context, node): + inputs = _get_inputs(context, node, expected=1) + # mb.square is not supported in some backend + context.add(mb.mul(x=inputs[0], y=inputs[0], name=node.name)) + + +@register_torch_op +def tan(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.tan(x=inputs[0], name=node.name)) + + +@register_torch_op +def tanh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.tanh(x=inputs[0], name=node.name)) + + +@register_torch_op +def threshold(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + alpha = inputs[1] + threshold_val = inputs[2] + + # Simple case (threshold_val == alpha) + if alpha.val == threshold_val.val: + threshold_node = mb.threshold(x=x, alpha=alpha, name=node.name) + context.add(threshold_node) + return + + # Complex case (threshold_val != threshold) + threshold_node = mb.threshold(x=x, alpha=alpha, name=node.name + '_threshold') + context.add(threshold_node) + + gt_node = mb.greater_equal(x=alpha, y=x, name=node.name + '_ge') + context.add(gt_node) + gt_node_32 = mb.cast(x=gt_node, dtype="fp32", name=node.name + '_ge32') + + mul_node = mb.linear_activation( + x=gt_node_32, + alpha=float(threshold_val.val - alpha.val), + beta=0., + name=node.name + '_mul' + ) + context.add(mul_node) + + final_node = mb.add(x=mul_node, y=threshold_node, name=node.name) + context.add(final_node) + + +@register_torch_op +def sign(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.sign(x=inputs[0], name=node.name)) + + +@register_torch_op +def is_floating_point(context, node): + inputs = _get_inputs(context, node, expected=1) + is_float = types.is_float(inputs[0].dtype) + context.add(mb.const(val=is_float, name=node.name)) + + +@register_torch_op +def logical_and(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = inputs + x = mb.cast(x=x, dtype="bool") + y = mb.cast(x=y, dtype="bool") + context.add(mb.logical_and(x=x, y=y, name=node.name)) + +@register_torch_op +def logical_or(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = inputs + x = mb.cast(x=x, dtype="bool") + y = mb.cast(x=y, dtype="bool") + context.add(mb.logical_or(x=x, y=y, name=node.name)) + + +@register_torch_op +def logical_xor(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = inputs + x = mb.cast(x=x, dtype="bool") + y = mb.cast(x=y, dtype="bool") + context.add(mb.logical_xor(x=x, y=y, name=node.name)) + + +def _nonzero_as_tuple(context, node, x): + ''' + Calculates the non-zero elements of x then slices results by each inner index. + ''' + non_zero = mb.non_zero(x=x) + + result = [] + for i in range(x.rank): + result.append( + mb.slice_by_index( + x=non_zero, + begin=[0, i], + end=[-1, -1], # Ignored, but required + end_mask=[True, False], + squeeze_mask=[False, True] + ) + ) + + context.add(result, node.name) + + +@register_torch_op +def where(context, node): + inputs = _get_inputs(context, node) + + if len(inputs) == 1: + _nonzero_as_tuple(context, node, inputs[0]) + return + + assert len(inputs) == 3 + cond = inputs[0] + if not types.is_bool(cond.dtype): + # cond must be bool type + cond = mb.cast(x=cond, dtype="bool") + if not any([any_symbolic(x.shape) for x in inputs[:3]]): + # broadcast all tensors to the same shape + broadcast_inputs = _broadcast_tensors([cond, inputs[1], inputs[2]]) + result = mb.select( + cond=broadcast_inputs[0], + a=broadcast_inputs[1], + b=broadcast_inputs[2], + name=node.name, + ) + else: + result = mb.select(cond=cond, a=inputs[1], b=inputs[2], name=node.name) + context.add(result) + + +@register_torch_op +def nonzero_numpy(context, node): + inputs = _get_inputs(context, node, expected=1) + _nonzero_as_tuple(context, node, inputs[0]) + + +@register_torch_op +def neg(context, node): + inputs = _get_inputs(context, node, expected=1) + x, y = promote_input_dtypes([inputs[0], -1]) + context.add(mb.mul(x=x, y=y, name=node.name)) + +@register_torch_op +def topk(context, node): + def dynamic_topk(x, k, axis, ascending): + assert k.val is None, "Please use mb.topk directly if k is compile time known" + indices = mb.argsort(x=x, axis=axis, ascending=ascending) + values = mb.gather_along_axis(x=x, indices=indices, axis=axis) + + k_indices = mb.range_1d(end=k, start=0, step=1) + values = mb.gather(x=values, indices=k_indices, axis=axis) + indices = mb.gather(x=indices, indices=k_indices, axis=axis) + + return values, indices + + inputs = _get_inputs(context, node) + kwargs = {"name": node.name, "x": inputs[0], "k": inputs[1]} + + if len(inputs) > 6: + raise Exception("Number of inputs to topk exceeds 6") + # optional: @axis + if len(inputs) > 2: + if inputs[2] is not None: + kwargs["axis"] = inputs[2].val + + # optional: @ascending + if len(inputs) > 3: + largest = inputs[3].val + kwargs["ascending"] = not largest + + # last inputs to topk are optional - sorted and out. + sort = True + if len(inputs) > 4: + if inputs[4].val is False and not is_current_opset_version_compatible_with(target.iOS16): + raise Exception("For opset <= iOS16, only sorted=True supported for the topk") + sort = inputs[4].val + + if len(inputs) > 5: + if inputs[5] is not None: + raise Exception( + "Unsupported value for argument 'out' in topk. Supported values: None, but input " + "is {}".format(inputs[5].val) + ) + + if is_current_opset_version_compatible_with(target.iOS16): + kwargs["sort"] = sort + + if kwargs["k"].val is None: + res = dynamic_topk( + x=kwargs["x"], + k=kwargs["k"], + axis=kwargs["axis"], + ascending=kwargs["ascending"] + ) + else: + res = mb.topk(**kwargs) + + values_name = node.outputs[0] + indices_name = node.outputs[1] + context.add(res[0], torch_name=values_name) + context.add(res[1], torch_name=indices_name) + + +def _std(x, axes, keep_dim, unbiased, eps): + need_rescale = False + if unbiased: + # If "unbiased" is True, + # then we need to divide by "N-1" (instead of "N") to compute the mean of (x-E[x])^2 + # for an unbiased estimate of the variance / standard deviation. + # In the sequence of MIL ops added below, we first compute the mean using "N", and only if its unbiased + # we rescale later, the final result. + # We ignore the "unbiased" flag, if any of the dimensions involved in this operation are dynamic + # (we could have still handled that case by using "get_shape" etc ops, but we don't do that here, + # trading performance for numerical accuracy) + if axes is None: + if not any_symbolic(x.shape) and _np.prod(x.shape) > 1: + N = _np.prod(x.shape) + need_rescale = True + else: + dims = [] + # collect dimensions corresponding to "axes" + for axis in axes: + dims.append(x.shape[axis]) + if all([not is_symbolic(s) for s in dims]): + N = _np.prod(dims) + if N > 1: + need_rescale = True + if need_rescale: + rescale_factor = _np.sqrt(N / float(N - 1)) + + x_mean = mb.reduce_mean(x=x, axes=axes, keep_dims=True) + x_demeaned = mb.sub(x=x, y=x_mean) + x_demeaned_square = mb.square(x=x_demeaned) + x_demeaned_square_mean = mb.reduce_mean(x=x_demeaned_square, axes=axes, keep_dims=keep_dim) + if eps > 0: + x_demeaned_square_mean = mb.add(x=x_demeaned_square_mean, y=eps) + if need_rescale: + y_before_scale = mb.sqrt(x=x_demeaned_square_mean) + y = mb.mul(x=y_before_scale, y=rescale_factor) + else: + y = mb.sqrt(x=x_demeaned_square_mean) + return y + +@register_torch_op +def numel(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + x = mb.shape(x=x) + x = mb.reduce_prod(x=x, axes=[0], name=node.name) + context.add(x) + +@register_torch_op +def std(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + if not (len(inputs) == 2 or len(inputs) == 4): + raise ValueError("Number of inputs to the 'std' op must be" + "2 or 4") + + keep_dim = False + axes = None + if len(inputs) == 2: + unbiased = inputs[1].val + if len(inputs) == 4: + axes = inputs[1].val + if isinstance(axes, int): + axes = [axes] + unbiased = inputs[2].val + keep_dim = inputs[3].val + + y = _std(x, axes, keep_dim, unbiased, 0) + context.add(y, node.name) + + +@register_torch_op +def copy(context, node): + inputs = _get_inputs(context, node, expected=[2, 3]) + context.add(mb.identity(x=inputs[0], name=node.name)) + + +@register_torch_op +def dtype(context, node): + inputs = _get_inputs(context, node, expected=1) + dtype_str = inputs[0].dtype.__name__ + context.add(mb.const(val=dtype_str, name=node.name)) + + +@register_torch_op +def tensor(context, node): + def _make_tensor(list_of_tensor, name, rank): + if rank == 6: + raise NotImplementedError("Core ML only supports tensor rank <= 5.") + if not isinstance(list_of_tensor, list): + return list_of_tensor + values = [ + _make_tensor(x, name + "_r_" + str(i), rank + 1) + for i, x in enumerate(list_of_tensor) + ] + if len(values) == 1: + return mb.expand_dims(x=values[0], axes=[0], name=name) + return mb.stack(values=values, axis=0, name=name) + + inputs = _get_inputs(context, node, expected=4) + + # Case 1: Using torch.tensor to create a const tensor + # For example: + # torch.tensor([[[0, 0], [0, 10], [5, 10], [5, 0]]], dtype=torch.float32) + val = inputs[0] + if isinstance(val, list): + context.add(_make_tensor(val, node.name, 1)) + return + + if inputs[2] is None: + context.add(mb.identity(x=val, name=node.name)) + return + + # Case 2: Create a tensor filled with a single value + val = val.val # element val to fill + msg_prefix = 'torch::tensor {} '.format(node.name) + if val is None: + raise ValueError(msg_prefix + 'val is None') + dtype_str = inputs[1].val + if dtype_str != "fp32": + raise NotImplementedError( + msg_prefix + "Unsupported dtype: {}".format(dtype_str) + ) + # inputs[3] is a bool (not sure what it is) + shape = mb.shape(x=inputs[2], name=node.name + "_shape") + context.add(mb.fill(shape=shape, value=val, name=node.name)) + + +""" +Pack and unpack op in pytorch. +The typical pattern is as following + +>>> seq = torch.tensor([[1,2,0], [3,0,0], [4,5,6]]) +>>> lens = [2, 1, 3] +>>> packed = pack_padded_sequence(seq, lens, batch_first=True, enforce_sorted=False) +>>> packed +PackedSequence(data=tensor([4, 1, 3, 5, 2, 6]), batch_sizes=tensor([3, 2, 1]), + sorted_indices=tensor([2, 0, 1]), unsorted_indices=tensor([1, 2, 0])) +>>> seq_unpacked, lens_unpacked = pad_packed_sequence(packed, batch_first=True) +>>> seq_unpacked +tensor([[1, 2, 0], + [3, 0, 0], + [4, 5, 6]]) +>>> lens_unpacked +tensor([2, 1, 3]) + +source from https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.pad_packed_sequence.html +""" + + +@register_torch_op +def _pack_padded_sequence(context, node): + # The implementation of this op is not efficient. Raise a warning. + logger.warning( + "Encountered a _pack_padded_sequence layer. The implementation of translating pack/unpack op\ + in pytorch is not efficient due to the current limitation of Core ML. Removing the pack-unpack logic \ + and use a fixed batch size model is recommended." + ) + + inputs = _get_inputs(context, node, expected=3) + tensor_name, batch_sizes_name = node.outputs + tensor_input = inputs[0] + batch_sizes = inputs[1] + batch_first = inputs[2].val + + # by assuming that the output of this op is always feed in lstm layer, + # we enforce the layout to be Batch * seq_length * Feature. + if not batch_first: + tensor_input = mb.transpose(x=tensor_input, perm=[1, 0, 2]) + context.add(mb.identity(x=tensor_input, name=tensor_name)) + + # add the batch_sizes in the context, so that _pad_packed_sequence can + # find it later. + context.add(mb.identity(x=batch_sizes, name=batch_sizes_name)) + + +@register_torch_op +def _pad_packed_sequence(context, node): + # The implementation of this op is not efficient. Raise a warning. + logger.warning( + "Encountered a _pad_packed_sequence layer. The implementation of translating pack/unpack op\ + in pytorch is not efficient due to the current limitation of Core ML. Removing the pack-unpack logic \ + and use a fixed batch size model is recommended." + ) + inputs = _get_inputs(context, node) + + # seq_lengths denotes the actual sequence length for each batch. + # pad denotes the padding value for those data which has shorter length. + input_tensor = inputs[0] + seq_lengths = inputs[1] + batch_first = inputs[2].val + pad = inputs[3].val + + # we only support pack and unpack translation for static tensor shape, + # i.e., the three dimensions are all known during compile time. + if any([is_symbolic(x) for x in input_tensor.shape]): + raise NotImplementedError("Only static shape of PackedSequence object is supported.") + + # the input always has batch first layout. + # padded_seq_len denotes the maximum sequence length across batches. + batch, padded_seq_len, input_dim = input_tensor.shape + assert seq_lengths.rank == 1 + assert batch == seq_lengths.shape[0] + + # we iterate through the batch, pad each data, and concate them into a single tensor in the end, + # which is the total_tensor here. + # Say the input_tensor has shape [batch , padded_seq_len, input_dim], + # and the seq_lengths = [len_1, len_2, len_3]. + # Note that in pytorch, the seq_lengths must be decreasing in order, len_1 >= len_2 >= len_3. + total_tensor = [] + + for i in range(batch): + # slice for each data + # x has shape [padded_seq_len, input_dim] + x = mb.slice_by_index( + x=input_tensor, + begin=[i, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True], + squeeze_mask=[True, False, False], + ) + + # get the unpadded sequence, + # if the unpadded sequence has length seq_length, + # x would have shape [seq_length, input_dim]. + # For example, the first data would result in a [len_1, input_dim] tensor. + seq_length = mb.cast(x=value_at(seq_lengths, i), dtype="int32") + concate_values = [seq_length, input_dim] + end_index = mb.concat(values=concate_values, axis=0) + x = mb.slice_by_index( + x=x, + begin=[0, 0], + end=end_index, + stride=[1, 1], + begin_mask=[True, True], + end_mask=[False, True], + ) + + # get the padding part of the data + # Note that we always add one dummy padding in the end with shape [padded_seq_len - seq_length + 1, input_dim]. + # The reason is that for the case when seq_length = padded_seq_len, + # coreml cannot handle the empty tensor. + pad_length = mb.sub(x=padded_seq_len + 1, y=seq_length) + concate_values = [pad_length, input_dim] + shape = mb.concat(values=concate_values, axis=0) + pad_values = mb.fill(shape=shape, value=pad) + + # concate the unpadded sequence and the padding data + # the resulting tensor would have shape [padded_seq_len + 1, input_dim] + x, pad_values = promote_input_dtypes([x, pad_values]) + concate_values = [x, pad_values] + add_values = mb.concat(values=concate_values, axis=0) + + # trim the dummy padding tensor + # the output would have shpae [padded_seq_len, input_dim] + x = mb.slice_by_index( + x=add_values, + begin=[0, 0], + end=[padded_seq_len, 0], + stride=[1, 1], + begin_mask=[True, True], + end_mask=[False, True], + ) + + # add it to total tensor + total_tensor.append(x) + + # transpose the tensor if batch_first = False + if not batch_first: + x = mb.stack(values=total_tensor, axis=0) + x = mb.transpose(x=x, perm=[1, 0, 2], name=node.name) + else: + x = mb.stack(values=total_tensor, axis=0, name=node.name) + + context.add(x) + + +@register_torch_op +def log10(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + log_x = mb.log(x=x) + context.add(mb.mul(x=log_x, y=1 / _np.log(10.0)), node.name) + + +@register_torch_op +def log2(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + log_x = mb.log(x=x) + context.add(mb.mul(x=log_x, y=1 / _np.log(2.0)), node.name) + + +@register_torch_op +def flip(context, node): + inputs = _get_inputs(context, node, expected=2) + x = mb.reverse(x=inputs[0], axes=inputs[1], name=node.name) + context.add(x, node.name) + + +@register_torch_op(torch_alias=["reflection_pad1d"]) +def reflection_pad2d(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + torch_pad = inputs[1].val + pad_flipped = torch_pad.reshape((-1, 2))[::-1].ravel() + pad = _np.pad(pad_flipped, (len(x.shape) * 2 - len(pad_flipped), 0)) + context.add(mb.pad(x=x, pad=pad, mode='reflect'), node.name) + + +@register_torch_op(torch_alias=["replication_pad1d"]) +def replication_pad2d(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + torch_pad = inputs[1].val + pad_flipped = torch_pad.reshape((-1, 2))[::-1].ravel() + pad = _np.pad(pad_flipped, (len(x.shape) * 2 - len(pad_flipped), 0)) + context.add(mb.pad(x=x, pad=pad, mode='replicate'), node.name) + + +def _broadcast_tensors(tensors): + def _solve_broadcast_shape(shapes): + rank = _np.max([len(shape) for shape in shapes]) + shapes = [[1] * (rank - len(shape)) + shape for shape in shapes] + result_shape = [] + for i in range(rank): + dims = [shapes[j][i] for j in range(len(tensors))] + if any_symbolic(dims): + # rdar://85559497 (Handle dynamic shapes inputs broadcast for pytorch) + raise NotImplementedError( + "Only static shaped inputs are supported for torch.broadcast_tensors conversion." + ) + result_shape.append(_np.max(dims)) + return result_shape + + if len(tensors) == 1: + return tensors + + # solve the broadcast shape + input_shapes = [list(x.shape) for x in tensors] + broadcast_shape = _solve_broadcast_shape(input_shapes) + + # do the broadcasting + results = [] + for tensor in tensors: + name = tensor.name + "_after_broadcast" + results.append(_broadcast(name, tensor, broadcast_shape)) + return results + + +@register_torch_op +def broadcast_tensors(context, node): + inputs = _get_inputs(context, node) + context.add(_broadcast_tensors(inputs[0]), node.name) + + +def _scatter(context, inputs, mode, name): + data = inputs[0] + axis = inputs[1].val + indices = inputs[2] + updates = inputs[3] + if types.is_scalar(updates.sym_type): + updates = mb.fill(shape=indices.shape, value=updates.val, name=name) + result = mb.scatter_along_axis(data=data, indices=indices, updates=updates, + axis=axis, mode=mode, name=name) + context.add(result) + + +@register_torch_op +def scatter(context, node): + inputs = _get_inputs(context, node) + assert len(inputs) in (4, 5) + + # Determine reduce/mode parameter + if len(inputs) == 5: + mode = inputs[4].val + if mode == 'multiply': + mode = 'mul' + else: + assert mode == 'add' + else: + mode = 'update' + + _scatter(context, inputs, mode, node.name) + + +@register_torch_op +def scatter_add(context, node): + inputs = _get_inputs(context, node) + _scatter(context, inputs, 'add', node.name) + + +@register_torch_op +def baddbmm(context, node): + """ + baddbmm(Tensor input, Tensor batch1, Tensor batch2, Scalar beta=1, Scalar alpha=1) + output = beta * input + alpha * batch1 * batch2 + + Notice that batch1 and batch2 must be 3-D tensors each containing the same number of matrices. + If batch1 is a (b×n×m) tensor, batch2 is a (b×m×p) tensor, then input must be broadcastable with a (b×n×p) tensor + and out will be a (b×n×p) tensor. + """ + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node, expected=5) + bias, batch1, batch2, beta, alpha = inputs + + if beta.val != 1.0: + # Apply scaling factor beta to the bias. + bias = mb.mul(x=beta, y=bias, name=bias.name + "_scaled") + context.add(bias) + + if alpha.val != 1.0: + # Apply scaling factor alpha to the input. + batch1 = mb.mul(x=alpha, y=batch1, name=batch1.name + "_scaled") + context.add(batch1) + + bmm_node = mb.matmul(x=batch1, y=batch2, name=node.name + "_bmm") + context.add(bmm_node) + + baddbmm_node = mb.add(x=bias, y=bmm_node, name=node.name) + context.add(baddbmm_node) + + +@register_torch_op +def glu(context, node): + """ + glu(Tensor input, Scalar dim=-1) + Applies the gated linear unit function GLU(a,b)=a⊗σ(b) where a is the first half of the input matrices and b is the + second half. + """ + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node, expected=2) + input, axis = inputs + + first_half, second_half = mb.split(x=input, num_splits=2, axis=axis.val, name=node.name + "_split") + context.add(first_half) + context.add(second_half) + + sigmoid_second_half = mb.sigmoid(x=second_half, name=second_half.name + "_sigmoid") + context.add(sigmoid_second_half) + + glu_node = mb.mul(x=first_half, y=sigmoid_second_half, name=node.name) + context.add(glu_node) + + +@register_torch_op +def hstack(context, node): + """ + hstack(List[Tensor] tensors, Optional[Tensor] out) + Stack tensors in sequence horizontally (column wise). This is equivalent to concatenation along the first axis for + 1-D tensors, and along the second axis for all other tensors. + """ + inputs = _get_inputs(context, node) + tensors = inputs[0] + input_shapes = [list(x.shape) for x in tensors] + # Concatenates along the first axis for 1-D tensors, and along the second axis for all other tensors. + axis = 0 if len(input_shapes[0]) == 1 else 1 + hstack_node = mb.concat(values=tensors, axis=axis, name=node.name) + context.add(hstack_node) + + +@register_torch_op +def remainder(context, node): + """ + remainder(Tensor dividend, Tensor divisor, Optional[Tensor] out) + Computes Python’s modulus operation entrywise. The result has the same sign as the divisor and its absolute value + is less than that of divisor. It may also be defined in terms of torch.div() as: + remainder(a, b) == a - a.div(b, rounding_mode="floor") * b + """ + # Don't specify `expected` because the parameter `out` is optional. + inputs = _get_inputs(context, node) + dividend, divisor = promote_input_dtypes([inputs[0], inputs[1]]) + div_node = mb.floor_div(x=dividend, y=divisor, name=node.name + "_div") + context.add(div_node) + scaled_div = mb.mul(x=div_node, y=divisor, name=div_node.name + "_scaled") + context.add(scaled_div) + remainder_node = mb.sub(x=dividend, y=scaled_div, name=node.name) + context.add(remainder_node) + + +@register_torch_op +def hann_window(context, node): + inputs = _get_inputs(context, node, expected=[5, 6]) + if inputs[0].val is None: + raise NotImplementedError("variable 'window_length' not supported.") + + periodic = True + if len(inputs) == 6: + if inputs[1].val is None: + raise NotImplementedError("variable 'periodic' not supported.") + if not inputs[1].val: + periodic = False + + size = (inputs[0].val,) + if inputs[0].val <= 1: + one = mb.fill(shape=size, value=1.0, name=node.name) + context.add(one) + return + + ones = mb.fill(shape=size, value=1.0) + cum = mb.cumsum(x=ones, axis=0) + seq = mb.sub(x=cum, y=ones) + pi = mb.fill(shape=size, value=_math.pi) + window_length_float = mb.cast(x=inputs[0], dtype="fp32") + if not periodic: + window_length_float = mb.sub(x=window_length_float, y=ones) + denominator = mb.fill(shape=size, value=window_length_float) + numerator = mb.mul(x=seq, y=pi) + frac = mb.real_div(x=numerator, y=denominator) + sin = mb.sin(x=frac) + sin_sq = mb.mul(x=sin, y=sin, name=node.name) + context.add(sin_sq) + +@register_torch_op +def mse_loss(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + y = inputs[1] + reduction = inputs[2].val + + diff = mb.sub(x=x, y=y) + + if reduction == 0: + # reduction is "none" + res = mb.mul(x=diff, y=diff, name=node.name) + context.add(res) + return + + square = mb.mul(x=diff, y=diff) + if reduction == 1: + # reduction is "mean" + res = mb.reduce_mean(x=square, axes=None, name=node.name) + + elif reduction == 2: + # reduction is "sum" + res = mb.reduce_sum(x=square, axes=None, name=node.name) + else: + raise ValueError("Reduction is not supported") + + context.add(res) + +@register_torch_op +def trace(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + dims = mb.shape(x=x) + dim0 = value_at(dims, 0) + dim1 = value_at(dims, 1) + min_dim = mb.minimum(x=dim0, y=dim1) + indices = mb.range_1d(end=min_dim, start=0, step=1) + indices = mb.stack(values=[indices, indices], axis=1) + diagonal = mb.gather_nd(x=x, indices=indices) + trace = mb.reduce_sum(x=diagonal, name=node.name) + context.add(trace) + +@register_torch_op +def roll(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + shift = inputs[1].val + dims = inputs[2].val + origin_shape = mb.shape(x=x) + + need_flatten = len(dims) == 0 + + if need_flatten: + # The tensor is flattened before rolling + x = mb.reshape(x=x, shape=[-1]) + dims = [0] + + shape = mb.shape(x=x) + + for s, i in zip(shift, dims): + dim = value_at(shape, i) + s = mb.mod(x=s, y=dim) + start_idx = mb.sub(x=dim, y=s) + indices0 = mb.range_1d(end=dim, start=start_idx, step=1) + indices1 = mb.range_1d(end=start_idx, start=0, step=1) + indices = mb.concat(values=[indices0, indices1], axis=0) + x = mb.gather(x=x, indices=indices, axis=i) + + if need_flatten: + x = mb.reshape(x=x, shape=origin_shape) + + context.add(x, node.name) + + +@register_torch_op +def im2col(context, node): + """ + Extract sliding local blocks from a batched input tensor (rank=4). + + torch.nn.functional.unfold aims to be the general version: im2col is the rank=4 case of unfold. + PyTorch currently only supports rank=4 input: torch.nn.functional.unfold redispatches to at::im2col, + which is why coremltools needs im2col to convert torch.nn.functional.unfold. + + We currently only support rank=4 input (consistent with PyTorch) and dilation set to 1. + More flexbible dilation support will be added in the future. + + Reference https://pytorch.org/docs/stable/generated/torch.nn.Unfold.html + """ + inputs = _get_inputs(context, node, expected=5) + x = inputs[0] + kernel_size = inputs[1].val + dilation = inputs[2].val + padding = inputs[3].val + stride = inputs[4].val + + if x.rank != 4: + raise ValueError("Only supports rank=4 input data for im2col (unfold).") + if not (dilation[0] == 1 and dilation[1] == 1): + raise ValueError("Only supports dilation=1 for im2col (unfold).") + + # for simplicity, we explicitly pad; TODO: implicit padding would be more efficient + # torch.unfold padding has different semantics + # * for torch.unfold + # x.shape[i + x.rank - padding.rank] = padding[i] + x.shape[i + x.rank - padding.rank] + padding[i] + # taking x.rank = 4 and padding.rank = 2 as an example: + # x.shape[0 + 4 - 2] = padding[0] + x.shape[0 + 4 - 2] + padding[0] + # x.shape[1 + 4 - 2] = padding[1] + x.shape[1 + 4 - 2] + padding[1] + # * for mb.pad(x=x, pad=pad, mode="constant") + # x.shape[i] = pad[2 * i] + x.shape[i] + pad[2 * i + 1] + # * for torch.nn.functional.pad + # x.shape[-1] = padding[0] +x.shape[-1] + padding[1] + # x.shape[-2] = padding[2] +x.shape[-1] + padding[3] + # ... + # x.shape[-i] = padding[2 * i - 2] + x.shape[-i] + padding[2 * i - 1] + # so we need to convert torch.unfold padding to mb.pad(mode="constant") pad + missing_dims = x.rank - len(padding) + pad = [0, 0] * missing_dims + _np.array(padding).repeat(2).tolist() + x = mb.pad(x=x, pad=pad, mode="constant") + + N, C, H, W = x.shape + + # Get total number of blocks. It follows the formula at torch.nn.Unfold documentation. + sptial_size = (H, W) + block_count = 1 + for i in range(2): + block_count *= ( + _np.floor( + # the original formula is + # (sptial_size[i] + 2 * padding[i] - dilation[i] * (kernel_size[i] - 1) - 1) / stride[i] + # since we have explicitly padded, we no longer add 2 * padding[i] to sptial_size[i] + (sptial_size[i] - dilation[i] * (kernel_size[i] - 1) - 1) / stride[i] + ).astype(_np.int32) + + 1 + ) + + """ + The implementation below assumes x to be contiguous + """ + + # Get batch block indices. + batch_idx = _np.arange(N)[:, None, None] * C * H * W + + # Get starting block indices. + start_idx = _np.arange(kernel_size[0])[None, :, None] * W + _np.arange( + kernel_size[1] + ) + + # Generate depth indices. + channel_index = H * W * _np.arange(C) + start_idx = (channel_index[None, :, None] + _np.ravel(start_idx)).reshape( + (-1, kernel_size[0], kernel_size[1]) + ) + + # Get offsetted indices across the height and width of input array. + row_extent = H - kernel_size[0] + 1 + col_extent = W - kernel_size[1] + 1 + offset_idx = _np.arange(0, row_extent, stride[0])[None, :, None] * W + _np.arange(0, col_extent, stride[1]) + indices = _np.ravel(start_idx)[:, None] + _np.ravel(offset_idx) + + # Gather batches together. + indices = batch_idx + indices + x = mb.reshape(x=x, shape=[-1]) + gathered_data = mb.gather_along_axis(x=x, indices=indices.reshape(-1), axis=0) + block_size = C * kernel_size[0] * kernel_size[1] + output = mb.reshape( + x=gathered_data, shape=(N, block_size, block_count), name=node.name + ) + + context.add(output) + + +@register_torch_op +def complex(context, node): + real_part, imag_part = _get_inputs(context, node, expected=2) + result = mb.complex(real_data=real_part, imag_data=imag_part) + context.add(result, node.name) + + +@register_torch_op +def real(context, node): + input_data = _get_inputs(context, node, expected=1)[0] + if types.is_complex(input_data.dtype): + real_part = mb.complex_real(data=input_data) + context.add(real_part, node.name) + else: + context.add(input_data, node.name) + + +@register_torch_op +def imag(context, node): + input_data = _get_inputs(context, node, expected=1)[0] + if not types.is_complex(input_data.dtype): + # Keep consistent with PyTorch. + raise ValueError("The `imag` op only supports complex input.") + real_part = mb.complex_imag(data=input_data) + context.add(real_part, node.name) + + +@register_torch_op +def fft_fft(context, node): + """Lowers torch.fft.fft by the dialect op `complex_fft` from complex_dialect_ops.py.""" + input_data, n, dim, norm = _get_inputs(context, node, expected=[4]) + fft_res = mb.complex_fft(data=input_data, n=n, dim=dim, norm=norm) + context.add(fft_res, node.name) + + +@register_torch_op +def fft_fftn(context, node): + """Lowers torch.fft.fftn by the dialect op `complex_fftn` from complex_dialect_ops.py.""" + input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4]) + fft_res = mb.complex_fftn(data=input_data, shapes=shapes, dims=dims, norm=norm) + context.add(fft_res, node.name) + + +@register_torch_op +def fft_rfft(context, node): + """Lowers torch.fft.rfft by the dialect op `complex_rfft` from complex_dialect_ops.py.""" + input_data, n, dim, norm = _get_inputs(context, node, expected=[4]) + rfft_res = mb.complex_rfft(data=input_data, n=n, dim=dim, norm=norm) + context.add(rfft_res, node.name) + + +@register_torch_op +def fft_rfftn(context, node): + """Lowers torch.fft.rfftn by the dialect op `complex_rfftn` from complex_dialect_ops.py.""" + input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4]) + rfft_res = mb.complex_rfftn(data=input_data, shapes=shapes, dims=dims, norm=norm) + context.add(rfft_res, node.name) + + +@register_torch_op +def fft_ifft(context, node): + """Lowers torch.fft.ifft by the dialect op `complex_ifft` from complex_dialect_ops.py.""" + input_data, n, dim, norm = _get_inputs(context, node, expected=[4]) + ifft_res = mb.complex_ifft(data=input_data, n=n, dim=dim, norm=norm) + context.add(ifft_res, node.name) + + +@register_torch_op +def fft_ifftn(context, node): + """Lowers torch.fft.ifftn by the dialect op `complex_ifftn` from complex_dialect_ops.py.""" + input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4]) + ifftn_res = mb.complex_ifftn(data=input_data, shapes=shapes, dims=dims, norm=norm) + context.add(ifftn_res, node.name) + + +@register_torch_op +def fft_irfft(context, node): + """Lowers torch.fft.irfft by the dialect op `complex_irfft` from complex_dialect_ops.py.""" + input_data, n, dim, norm = _get_inputs(context, node, expected=[4]) + irfft_res = mb.complex_irfft(data=input_data, n=n, dim=dim, norm=norm) + context.add(irfft_res, node.name) + + +@register_torch_op +def fft_irfftn(context, node): + """Lowers torch.fft.irfftn by the dialect op `complex_irfftn` from complex_dialect_ops.py.""" + input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4]) + irfftn_res = mb.complex_irfftn(data=input_data, shapes=shapes, dims=dims, norm=norm) + context.add(irfftn_res, node.name) + + +@register_torch_op(torch_alias=["torchvision::nms"]) +def torchvision_nms(context, node): + inputs = _get_inputs(context, node, expected=3) + boxes, scores = promote_input_dtypes([inputs[0], inputs[1]]) + iou_threshold = inputs[2].val + # Use float min to avoid boxes being pruned by scores in MIL NMS op. + score_threshold = ( + _np.finfo(_np.float16).min + if boxes.dtype._width == 16 + else _np.finfo(_np.float32).min + ) + + box_num = boxes.shape[0] + if is_symbolic(box_num): + # When the number of boxes is unknown at compile time, use a large number to avoid valid + # boxes got pruned. We don't use _np.iinfo(_np.int32).max here because it triggers the MIL + # NMS op segment fault. + box_num = 10000 + + # The boxes' coordinates from PyTorch input is (x1, y1, x2, y2) format with 0 <= x1 < x2 and + # 0 <= y1 < y2. However, the MIL NMS op expects CENTER_SIZE_WIDTH_FIRST format, which is + # (x, y, width, height) where (x, y) is the center coordinate. + x1, y1, x2, y2 = mb.split(x=boxes, num_splits=4, axis=-1) + # For numerical stability, use x1+(x2-x1)/2 instead of (x1+x2)/2 to calculate center coordinate. + width = mb.sub(x=x2, y=x1) + height = mb.sub(x=y2, y=y1) + center_x = mb.add(x=x1, y=mb.real_div(x=width, y=2.0)) + center_y = mb.add(x=y1, y=mb.real_div(x=height, y=2.0)) + boxes = mb.concat(values=[center_x, center_y, width, height], axis=-1) + + # Expand dims to construct the batch dim and score class dim expected by MIL NMS op. + boxes = mb.expand_dims(x=boxes, axes=[0]) + scores = mb.expand_dims(x=scores, axes=[0, -1]) + + _, _, indices, valid_outputs = mb.non_maximum_suppression( + boxes=boxes, + scores=scores, + max_boxes=box_num, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + + indices = mb.squeeze(x=indices, axes=[0]) + valid_outputs = mb.squeeze(x=valid_outputs, axes=[0]) + range = mb.range_1d(end=valid_outputs, start=0, step=1) + indices = mb.cast(x=indices, dtype="fp32") + valid_indices = mb.gather(x=indices, indices=range, axis=0) + valid_indices = mb.cast(x=valid_indices, dtype="int32", name=node.name) + context.add(valid_indices) + + +@register_torch_op +def tupleindex(context, node): + tuple_input, index_input = _get_inputs(context, node, expected=2) + context.add(tuple_input[index_input.val], node.name) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/__init__.py new file mode 100644 index 00000000..2dac14c2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import torch_tensor_assign_to_core, torch_upsample_to_core_upsample diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_tensor_assign_to_core.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_tensor_assign_to_core.py new file mode 100644 index 00000000..a24a31c3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_tensor_assign_to_core.py @@ -0,0 +1,64 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="torch") +class torch_tensor_assign_to_core(AbstractGraphPass): + """ + Map Torch dialect ops `torch_tensor_assign` into core opset. + + Currently, we tranform the torch_tensor_assign op using mb.scatter. + """ + def apply(self, prog): + for f in prog.functions.values(): + _torch_tensor_assign_to_core_block(f) + +@block_context_manager +def _torch_tensor_assign_to_core_block(block): + for op in block.operations[:]: + for b in op.blocks: + _torch_tensor_assign_to_core_block(b) + + if op.op_type in ["torch_tensor_assign"]: + _transform_tensor_assign(op, block) + + +def _transform_tensor_assign(op, block): + shape = mb.shape(x=op.data, before_op=op) + dim_prod = mb.reduce_prod(x=shape, before_op=op) + ref_indices = mb.range_1d(end=dim_prod, start=0, step=1, before_op=op) + ref_indices = mb.reshape(x=ref_indices, shape=shape, before_op=op) + ref_sliced_indices = mb.slice_by_index( + x=ref_indices, + begin=op.begin, + end=op.end, + stride=op.stride, + begin_mask=op.begin_mask, + end_mask=op.end_mask, + squeeze_mask=op.squeeze_mask, + before_op=op, + ) + flatten_indices = mb.reshape(x=ref_sliced_indices, shape=[-1], before_op=op) + flatten_updates = mb.reshape(x=op.updates, shape=[-1], before_op=op) + flatten_data = mb.reshape(x=op.data, shape=[-1], before_op=op) + new_data = mb.scatter( + data=flatten_data, + indices=flatten_indices, + updates=flatten_updates, + mode="update", + before_op=op + ) + new_data = mb.reshape(x=new_data, shape=shape, before_op=op) + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=new_data + ) + # Remove all the ops at once + block.remove_ops([op]) \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_upsample_to_core_upsample.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_upsample_to_core_upsample.py new file mode 100644 index 00000000..d8864f80 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_upsample_to_core_upsample.py @@ -0,0 +1,135 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +target_ops = [ + "torch_upsample_nearest_neighbor", + "torch_upsample_bilinear", +] + + +@register_pass(namespace="torch") +class torch_upsample_to_core_upsample(AbstractGraphPass): + """ + Try to map Torch dialect ops + 1. `torch_upsample_nearest_neighbor` + 2. `torch_upsample_bilinear` + to `upsample_nearest_neighbor` or `upsample_bilinear` in the core op set if compatible. + + Inputs: + + prog: Program + """ + def apply(self, prog): + for f in prog.functions.values(): + _torch_upsample_to_core_upsample_block(f) + +@block_context_manager +def _torch_upsample_to_core_upsample_block(block): + for op in block.operations[:]: + for b in op.blocks: + _torch_upsample_to_core_upsample_block(b) + + if op.op_type in target_ops: + if _try_replace_with_core_upsample(op): + logger.info("Successfully map {} to core upsample".format(op.op_type)) + else: + raise ValueError("Unable to map {} to core upsample".format(op.op_type)) + + +def _try_get_upsample_factor(output_size): + op = output_size + # If the output has value, than the upsample op itself is derived from the upsample_1d op, + # so we can just return scale factor 1 for that case + if op.outputs[0].val is not None: + assert op.outputs[0].val == 1. + return 1. + + # output_size = [ + # (torch.floor((input.size(i + 2).float() * torch.tensor(scale_factors[i], dtype=torch.float32)).float())) + # for i in range(dim) + # ] + # source from : https://pytorch.org/docs/stable/_modules/torch/nn/functional.html#interpolate + # We validation if we can trace all the way back to the original scale_factor + # The whole sequence is mul(input_size, scale_factor) -> cast(fp32) -> floor() -> cast(int32) + + # 1. check if the output_size is type 'cast' with dtype 'int32' + if op.op_type != "cast" or op.dtype.val != "int32": + return None + + # 2. check if the op is type 'floor' + op = op.x.op + if op.op_type != "floor": + return None + + # 3. check if the op is type 'cast' with dtype 'fp32' + op = op.x.op + if op.op_type != 'cast' or op.dtype.val != "fp32": + return None + + # 4. check if the op is type mul + op = op.x.op + if op.op_type != 'mul': + return None + + # we successfully trace back the original scale factor + return np.float32(op.y.val) + + +def _try_replace_with_core_upsample(op): + """ + Inputs: + + op (Operation): op.op_type must be either + 1. `torch_upsample_nearest_neighbor` + 2. `torch_upsample_bilinear` + + Returns: + + True if op can be represented by mb.upsample_nearest_neighbor or mb.upsample_bilinear op in SSA. + False otherwise + """ + assert op.op_type in target_ops + + # 2d upsampling + if op.op_type in ["torch_upsample_nearest_neighbor", "torch_upsample_bilinear"]: + scales_h = _try_get_upsample_factor(op.output_height.op) + scales_w = _try_get_upsample_factor(op.output_width.op) + + if scales_h is None or scales_w is None: + return False + + old_upsample = op.outputs[0] + block = op.enclosing_block + + if op.op_type == "torch_upsample_nearest_neighbor": + new_upsample = mb.upsample_nearest_neighbor( + x=op.x, + scale_factor_height=scales_h, + scale_factor_width=scales_w, + name=op.name, + before_op=op, + ) + elif op.op_type == "torch_upsample_bilinear": + new_upsample = mb.upsample_bilinear( + x=op.x, + scale_factor_height=scales_h, + scale_factor_width=scales_w, + align_corners=op.align_corners, + name=op.name, + before_op=op, + ) + block.replace_uses_of_var_after_op(anchor_op=op, old_var=old_upsample, new_var=new_upsample) + block.remove_ops([op]) + + return True diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_api.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_api.py new file mode 100644 index 00000000..f52eb32f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_api.py @@ -0,0 +1,62 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import pytest +import torch +import torchvision + +import coremltools as ct +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND + +if _HAS_TORCH: + import torch + import torchvision + + +@pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) +class TestPyTorchConverter: + @staticmethod + def test_no_inputs(): + model = torchvision.models.mobilenet_v2() + model.eval() + + example_input = torch.rand(1, 3, 256, 256) + + traced_model = torch.jit.trace(model, example_input) + + with pytest.raises(ValueError) as e: + ct.convert(traced_model) + e.match(r'Expected argument for pytorch "inputs" not provided') + + + @staticmethod + def test_pth_extension(tmpdir): + # test for issue: https://github.com/apple/coremltools/issues/917 + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.linear = torch.nn.Linear(10, 20) + + def forward(self, x): + return self.linear(x) + + model = TestModule() + model.eval() + example_input = torch.rand(1, 10) + traced_model = torch.jit.trace(model, example_input) + model_path = os.path.join(str(tmpdir), "torch_model.pth") + traced_model.save(model_path) + + ct.convert( + model_path, + source='pytorch', + inputs=[ + ct.TensorType( + shape=example_input.shape, + ) + ], + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_custom_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_custom_ops.py new file mode 100644 index 00000000..d8e266c9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_custom_ops.py @@ -0,0 +1,144 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import pytest +import torch +import torch.nn as nn + +from coremltools.converters.mil.frontend.torch.ops import _get_inputs +from coremltools.converters.mil.frontend.torch.ops import \ + cosine_similarity as cosine_similarity_main +from coremltools.converters.mil.frontend.torch.torch_op_registry import \ + _TORCH_OPS_REGISTRY as _TORCH_OPS_REG +from coremltools.converters.mil.frontend.torch.torch_op_registry import \ + register_torch_op +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op + +from .testing_utils import TorchBaseTest, convert_to_mlmodel + +# Custom layer imports + + + +# Log Converter supported Cosine Similarity conversion function +default_cosine_similarity = _TORCH_OPS_REG.get("cosine_similarity", None) + + +@register_torch_op(override=True) +def cosine_similarity(context, node): + cosine_similarity_main(context, node) + + +# Log custom Cosine Similarity conversion function +custom_cosine_similarity = _TORCH_OPS_REG["cosine_similarity"] + + +def _set_torch_reg_op(op_type, op_func): + _TORCH_OPS_REG[op_type] = op_func + + +class TestCompositeOp(TorchBaseTest): + + @pytest.mark.parametrize("input_shape", [(100, 180), (56, 123)]) + def test_composite_op(self, input_shape): + _set_torch_reg_op("cosine_similarity", custom_cosine_similarity) + model = nn.CosineSimilarity(dim=1, eps=1e-6) + self.run_compare_torch([input_shape, input_shape], model) + _set_torch_reg_op("cosine_similarity", default_cosine_similarity) + + +class TestCustomOp: + # Define SSA Custom Op for Sparse MatMul + # This will map to `custom_op` in SSA with binding information + # to bind input spec to the custom implementation + @register_op(is_custom_op=True) + class custom_torch_sparse_matmul(Operation): + # Defining input spec for current op + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + y=TensorInputType(type_domain="T"), + transpose_x=TensorInputType(const=True, optional=True, type_domain=types.bool), + transpose_y=TensorInputType(const=True, optional=True, type_domain=types.bool), + x_is_sparse=TensorInputType(const=True, optional=True, type_domain=types.bool), + y_is_sparse=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + transpose_x=False, + transpose_y=False, + x_is_sparse=False, + y_is_sparse=False, + ) + + # Specifying binding for custom op for specifying inputs, + # parameters required for creating custom op to be synced with Swift API + bindings = { + "class_name": "SparseMatMul", + "input_order": ["x", "y"], + "parameters": ["transpose_x", "transpose_y", "x_is_sparse", "y_is_sparse"], + "description": "Custom Sparse MatMul Layer", + } + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + y_shape = self.y.shape + # For illustration purpose, assumming getting valid shape + # Ideally, should consider transpose_?, ?_is_sparse parameters into consideration + # for computing output shape + return types.tensor(x_type, [x_shape[0], y_shape[1]]) + + @register_torch_op() + def _sparse_mm(context, node): + inputs = _get_inputs(context, node, expected=2) + x = mb.custom_torch_sparse_matmul( + x=inputs[0], y=inputs[1], x_is_sparse=True, y_is_sparse=True, name=node.name + ) + context.add(x) + + def test_custom_sparse_mm_op(self, input_shape=(4, 4)): + class TestLayer(nn.Module): + def __init__(self): + super(TestLayer, self).__init__() + + def forward(self, x, y): + x = torch.sparse.mm(x, y) + return x + + model = TestLayer() + input_data_x = torch.ones(input_shape) + input_data_y = torch.ones(input_shape) + input_data = [input_data_x, input_data_y] + model.eval() + torch_model = torch.jit.trace(model, (input_data_x, input_data_y)) + mlmodel = convert_to_mlmodel(torch_model, input_data) + + layers = mlmodel.get_spec().neuralNetwork.layers + assert layers[-1].custom is not None, "Expecting a custom layer" + assert ( + "SparseMatMul" == layers[-1].custom.className + ), "Custom Layer class name mis-match" + assert ( + not layers[-1].custom.parameters["transpose_x"].boolValue + ), "Incorrect parameter value k" + assert ( + not layers[-1].custom.parameters["transpose_y"].boolValue + ), "Incorrect parameter value k" + assert ( + layers[-1].custom.parameters["x_is_sparse"].boolValue + ), "Incorrect parameter value k" + assert ( + layers[-1].custom.parameters["y_is_sparse"].boolValue + ), "Incorrect parameter value k" diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_examples.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_examples.py new file mode 100644 index 00000000..10a99a5a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_examples.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import pytest + +import coremltools +from coremltools._deps import ( + _HAS_TORCH, + MSG_TORCH_NOT_FOUND, +) + +if _HAS_TORCH: + import torch + from torch import nn + import torch.nn.functional as F + + +@pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) +class TestModelScripting: + @staticmethod + def test(): + # Example code from https://coremltools.readme.io/docs/model-scripting + + class _LoopBody(nn.Module): + def __init__(self, channels): + super(_LoopBody, self).__init__() + conv = nn.Conv2d( + in_channels=channels, + out_channels=channels, + kernel_size=3, + padding=1, + ) + self.conv = conv + + def forward(self, x): + x = self.conv(x) + x = F.relu(x) + return x + + + class ControlFlowNet(nn.Module): + def __init__(self, num_channels: int): + super(ControlFlowNet, self).__init__() + self.loop_body = _LoopBody(num_channels) + + def forward(self, x): + avg = torch.mean(x) + if avg.item() < 0: + loop_count = 2 + else: + loop_count = 1 + for _ in range(loop_count): + x = self.loop_body(x) + return x + + model = ControlFlowNet(num_channels=3) + scripted_model = torch.jit.script(model) + + mlmodel = coremltools.converters.convert( + scripted_model, + inputs=[coremltools.TensorType(shape=(1, 3, 64, 64))], + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_internal_graph.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_internal_graph.py new file mode 100644 index 00000000..d2c3fdb8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_internal_graph.py @@ -0,0 +1,1804 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +torch = pytest.importorskip("torch") + +import torch.nn as nn +import torch.nn.functional as F + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, get_new_symbol, types +from coremltools.converters.mil.testing_utils import random_gen + +from .. import ops +from ..converter import TranscriptionContext +from ..internal_graph import InternalTorchIRNode + + +class TestTorchOps: + """Class containing tests for converting TorchIR -> CoreML ops. + + These tests interface with only the InternalTorchIRGraph and do not + build a torch module. Thus, they are much faster then the numerical tests. + However, for some ops it is necessary to use the torch module to verify + numerical output so they are placed the numerical tests. + + NOTE: Confused where @context is coming from? Its from the pytest fixture defined below. + """ + + @pytest.fixture + def context(self): + return TranscriptionContext() + + @pytest.fixture + def set_random_seeds(self): + torch.manual_seed(1) + np.random.seed(1) + + @pytest.mark.parametrize("dtype", [torch.bool, torch.float, torch.int]) + def test_constant(self, context, dtype): + test_data = torch.ones(1, dtype=dtype) + node = InternalTorchIRNode( + attr={"value": test_data}, kind="constant", inputs=[], outputs=["1"] + ) + ssa = self._construct_test_graph(context, ops.constant, node, "1") + assert np.allclose(test_data, ssa.val) + assert test_data.shape == ssa.shape + + def test_constant_magic(self, context): + test_val = ops.PYTORCH_DEFAULT_VALUE + node = InternalTorchIRNode( + attr={"value": test_val}, kind="constant", inputs=[], outputs=["1"] + ) + ssa = self._construct_test_graph(context, ops.constant, node, "1") + # We expect the magic default to get converted to None + assert ssa is None + + @staticmethod + def _gen_constants(size, vals): + """Helper function. Generates a list of internal constant nodes. + + Arguments: + size: number of constants to generate + vals: Either a list of values for each constant or one value used for all constants.""" + is_list = isinstance(vals, list) + if is_list: + if len(vals) != size: + raise ValueError("len(@vals): {} != size: {}".format(len(vals), size)) + constants = [] + for index in range(size): + if is_list: + val = vals[index] + else: + val = vals + constants.append( + InternalTorchIRNode( + attr={"value": val}, + kind="constant", + inputs=[], + outputs=[str(index)], + ) + ) + input_list = [str(i) for i in range(size)] + output_name = str(len(input_list)) + return constants, input_list, output_name + + @staticmethod + def _construct_test_graph( + context, test_op, test_node, output_name=None, graph_inputs=None, constants=None + ): + """ Construct an Function for the given @graph_inputs, @constants, + and @test_node. Returns the output of the graph, which is the ssa + Var of the given @output_name. + """ + if graph_inputs is None: + graph_inputs = {} + if constants is None: + constants = [] + + with Function(inputs=graph_inputs) as ssa_func: + for name in ssa_func.inputs.keys(): + context.add(ssa_func.inputs[name]) + for node in constants: + ops.constant(context, node) + test_op(context, test_node) + + ssa = None + if output_name: + ssa = context[output_name] + return ssa + + def _test_elementwise_binary( + self, context, op_name, op, test_input, num_constants, expected_result + ): + """Helper function, runs op on test input and compares against expected result""" + constants, input_list, output_name = self._gen_constants( + num_constants, test_input + ) + eb_node = InternalTorchIRNode( + kind=op_name, inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, op, eb_node, output_name, constants=constants + ) + np.testing.assert_allclose(expected_result, ssa.val, atol=1e-6) + + def _test_cast(self, context, test_val, op_kind, op_func, python_type): + constants, input_list, output_name = self._gen_constants(1, [test_val]) + node = InternalTorchIRNode( + kind=op_kind, inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, op_func, node, output_name, constants=constants + ) + assert ssa.val == python_type(test_val) + + def test_add(self, context): + test_input_1 = np.random.rand(2, 3) + test_input_2 = np.random.rand(2, 3) + scale_factor = 1 + self._test_elementwise_binary( + context, + "Add", + ops.add, + [test_input_1, test_input_2, scale_factor], + 3, + test_input_1 + test_input_2, + ) + + def test_add_no_scale_factor(self, context): + test_input_1 = np.random.rand(2, 3) + test_input_2 = np.random.rand(2, 3) + self._test_elementwise_binary( + context, + "Add", + ops.add, + [test_input_1, test_input_2], + 2, + test_input_1 + test_input_2, + ) + + @pytest.mark.parametrize( + "test_input_1, test_input_2", + [(np.random.rand(3, 2), np.random.rand(3, 2)), (np.random.rand(3, 2), 5), ], + ) + def test_sub(self, context, test_input_1, test_input_2): + scale_factor = 1 + self._test_elementwise_binary( + context, + "Sub", + ops.sub, + [test_input_1, test_input_2, scale_factor], + 3, + test_input_1 - test_input_2, + ) + + @pytest.mark.parametrize( + "test_input_1, test_input_2", + [(np.random.rand(3, 2), np.random.rand(3, 2)), (np.random.rand(3, 2), 5), ], + ) + def test_rsub(self, context, test_input_1, test_input_2): + scale_factor = 1 + self._test_elementwise_binary( + context, + "rsub", + ops.sub, + [test_input_1, test_input_2, scale_factor], + 3, + # Note the reversal of arg ordering relative to 'sub' + test_input_2 - test_input_1, + ) + + def test_mul(self, context): + test_input_1 = np.random.rand(3, 2) + test_input_2 = np.random.rand(3, 2) + self._test_elementwise_binary( + context, + "Mul", + ops.mul, + [test_input_1, test_input_2], + 2, + test_input_1 * test_input_2, + ) + + def test_div(self, context): + test_input_1 = np.random.rand(3, 2) + test_input_2 = np.random.rand(3, 2) + self._test_elementwise_binary( + context, + "Div", + ops.div, + [test_input_1, test_input_2], + 2, + np.divide(test_input_1, test_input_2), + ) + + def test_floor_divide(self, context): + test_input_1 = np.random.randint(low=1, high=100, size=(3, 2)) + test_input_2 = np.random.randint(low=1, high=100, size=(3, 2)) + self._test_elementwise_binary( + context, + "floor_divide", + ops.floor_divide, + [test_input_1, test_input_2], + 2, + np.floor_divide(test_input_1, test_input_2), + ) + + def test_pow(self, context): + test_input_1 = np.random.rand(3, 2) + test_input_2 = np.random.rand(3, 2) + self._test_elementwise_binary( + context, + "Pow", + ops.pow, + [test_input_1, test_input_2], + 2, + np.power(test_input_1, test_input_2), + ) + + def test_eq(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 == test_input_2).float() + + self._test_elementwise_binary( + context, "Eq", ops.eq, [test_input_1, test_input_2], 2, expected_output + ) + + def test_ne(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 != test_input_2).float() + + self._test_elementwise_binary( + context, "ne", ops.ne, [test_input_1, test_input_2], 2, expected_output + ) + + def test_le(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 <= test_input_2).float() + + self._test_elementwise_binary( + context, "Le", ops.le, [test_input_1, test_input_2], 2, expected_output + ) + + def test_lt(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 < test_input_2).float() + + self._test_elementwise_binary( + context, "Lt", ops.lt, [test_input_1, test_input_2], 2, expected_output + ) + + def test_ge(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 >= test_input_2).float() + + self._test_elementwise_binary( + context, "Ge", ops.ge, [test_input_1, test_input_2], 2, expected_output + ) + + def test_gt(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 > test_input_2).float() + + self._test_elementwise_binary( + context, "Gt", ops.gt, [test_input_1, test_input_2], 2, expected_output + ) + + @pytest.mark.parametrize( + "size, array_type", + itertools.product( + [1, 5, 7], + [ + ("ListConstruct", ops.listconstruct), + ("TupleConstruct", ops.tupleconstruct), + ], + ), + ) + def test_arrayconstruct_scalars(self, context, size, array_type): + constant_vals = list(range(size)) + array_kind = array_type[0] + array_op = array_type[1] + constants, input_list, output_name = self._gen_constants(size, constant_vals) + ac_node = InternalTorchIRNode( + kind=array_kind, inputs=input_list, outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, array_op, ac_node, output_name, constants=constants + ) + expected_val = np.arange(size) + np.testing.assert_equal(ssa.shape, (size,)) + np.testing.assert_array_equal(ssa.val, expected_val) + + @pytest.mark.parametrize( + "shape1, shape2, array_type", + itertools.product( + [(1, 2), (3, 4, 5), (2,)], + [(2, 1), (1, 4, 5), (3,)], + [ + ("ListConstruct", ops.listconstruct), + ("TupleConstruct", ops.tupleconstruct), + ], + ), + ) + def test_arrayconstruct_nonscalar(self, context, shape1, shape2, array_type): + tensor1 = torch.rand(shape1) + tensor2 = torch.rand(shape2) + array_kind = array_type[0] + array_op = array_type[1] + constants, input_list, output_name = self._gen_constants(2, [tensor1, tensor2]) + ac_node = InternalTorchIRNode( + kind=array_kind, inputs=input_list, outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, array_op, ac_node, output_name, constants=constants + ) + expected_val = (tensor1.numpy(), tensor2.numpy()) + np.testing.assert_equal(len(ssa), 2) + for x, y in zip(ssa, expected_val): + np.testing.assert_allclose(x.val, y) + + @pytest.mark.parametrize( + "input_shape, dim0, dim1", + [ + x + for x in itertools.product( + [(1, 2, 3), (1, 2, 3, 4), (1, 2, 3, 4, 5)], [0, 1, -1], [0, 2, -2], + ) + ] + + [((1, 2), None, None)], + ) + def test_transpose(self, context, input_shape, dim0, dim1): + test_input = torch.rand(input_shape) + + constant_list = [test_input] + if len(input_shape) > 2: + constant_list += [dim0, dim1] + kind = "transpose" + expected_result = torch.transpose(test_input, dim0, dim1) + else: + kind = "t" + expected_result = test_input.t() + + constants, input_list, output_name = self._gen_constants( + len(constant_list), constant_list + ) + transpose_node = InternalTorchIRNode( + kind=kind, inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.transpose, transpose_node, output_name, constants=constants, + ) + np.testing.assert_array_equal(expected_result.shape, ssa.shape) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "dim1, dim2, dim3", itertools.product([1, 2, 5], [2, 5, 10], [1, 2, 5]), + ) + def test_matmul(self, context, dim1, dim2, dim3): + mat1 = torch.rand((dim1, dim2)) + mat2 = torch.rand((dim2, dim3)) + constant_vals = [ + mat1, + mat2, + ] + constants, input_list, output_name = self._gen_constants(2, constant_vals) + + matmul_node = InternalTorchIRNode( + kind="matmul", inputs=input_list, outputs=[output_name], + ) + + ssa = self._construct_test_graph( + context, ops.matmul, matmul_node, output_name, constants=constants + ) + expected_result = torch.matmul(mat1, mat2).detach().numpy() + assert np.allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "input_shape, axis, expected_shape", + [ + ((1, 2), None, (2,)), + ((1, 2), 0, (2,)), + ((1, 2, 1), None, (2,)), + ((1, 2, 1, 1), None, (2,)), + ((1, 2, 1, 1), 2, (1, 2, 1)), + ((1, 2, 1, 1, 1), None, (2,)), + ], + ) + def test_squeeze(self, context, input_shape, axis, expected_shape): + test_data = torch.rand(input_shape) + if axis is None: + constants, input_list, output_name = self._gen_constants(1, test_data) + else: + constants, input_list, output_name = self._gen_constants( + 2, [test_data, axis] + ) + squeeze_node = InternalTorchIRNode( + kind="Squeeze", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.squeeze, squeeze_node, output_name, constants=constants + ) + if axis is None: + expected_result = torch.squeeze(test_data) + else: + expected_result = torch.squeeze(test_data, axis) + assert np.allclose(expected_result, ssa.val) + assert expected_result.size() == torch.Size(expected_shape) + + @pytest.mark.parametrize( + "input_shape, axis, expected_shape", + [ + ((2,), 0, (1, 2)), + ((2,), 1, (2, 1)), + ((2,), -1, (2, 1)), + ((2, 3), 1, (2, 1, 3)), + ], + ) + def test_unsqueeze(self, context, input_shape, axis, expected_shape): + test_data = torch.rand(input_shape) + constants, input_list, output_name = self._gen_constants(2, [test_data, axis]) + unsqueeze_node = InternalTorchIRNode( + kind="Unsqueeze", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.unsqueeze, unsqueeze_node, output_name, constants=constants + ) + expected_result = torch.unsqueeze(test_data, axis) + assert np.allclose(expected_result, ssa.val) + assert expected_result.size() == torch.Size(expected_shape) + + @pytest.mark.parametrize( + "input_shape, start, end", + [ + ((2, 1, 1, 2), 1, 3), + ((2, 2, 1, 1), 1, -2), + ((1, 1, 1), 0, 2), + ((1, 2), 0, 1), + ((1, 2), 1, 1), + ((1, 1), 1, -1), + ((1,), 0, 0), + ], + ) + def test_flatten(self, context, input_shape, start, end): + test_data = torch.rand(input_shape) + constants, input_list, output_name = self._gen_constants( + 3, [test_data, start, end] + ) + flatten_node = InternalTorchIRNode( + kind="Flatten", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.flatten, flatten_node, output_name, constants=constants + ) + expected_result = torch.flatten(test_data, start, end) + assert np.allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "start, end", [(0, -5), (100, 2), (2, 100), (-3, -4),], + ) + def test_flatten_exception(self, context, start, end): + test_data = torch.rand(1, 1, 1, 1) + constants, input_list, output_name = self._gen_constants( + 3, [test_data, start, end] + ) + flatten_node = InternalTorchIRNode( + kind="Flatten", inputs=input_list, outputs=[output_name] + ) + with pytest.raises(ValueError): + self._construct_test_graph( + context, ops.flatten, flatten_node, output_name, constants=constants, + ) + + @pytest.mark.parametrize( + "input_shape", [(2, 3), (2, 3, 4), (2, 3, 4, 5), (2, 3, 4, 5, 6),], + ) + def test_permute(self, context, input_shape): + test_data = torch.rand(*input_shape) + permutation = list(range(len(input_shape))) + np.random.shuffle(permutation) + constants, input_list, output_name = self._gen_constants( + 2, [test_data, permutation] + ) + permute_node = InternalTorchIRNode( + kind="Permute", inputs=input_list, outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, ops.permute, permute_node, output_name, constants=constants + ) + expected_result = test_data.permute(*permutation) + assert expected_result.shape == ssa.shape + + @pytest.mark.parametrize( + "in_features, out_features, scaling", + itertools.product([10, 25, 100], [3, 6], [1.0, 0.5]), + ) + def test_addmm(self, context, in_features, out_features, scaling): + input_data = torch.rand((1, in_features)) + weight_data = torch.rand((in_features, out_features)) + bias_data = torch.rand((out_features)) + constant_vals = [ + scaling, + input_data, + weight_data, + bias_data, + ] + constants, _, output_name = self._gen_constants(4, constant_vals) + + addmm_node = InternalTorchIRNode( + kind="addmm", inputs=["3", "1", "2", "0", "0"], outputs=[output_name], + ) + + ssa = self._construct_test_graph( + context, ops.addmm, addmm_node, output_name, constants=constants + ) + torch_linear = nn.Linear(in_features=in_features, out_features=out_features,) + expected_shape = tuple(torch_linear(input_data).shape) + assert expected_shape == ssa.shape + + @pytest.mark.parametrize( + "height, width, kernel_size, stride, padding, dilation", + itertools.product([5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [1, 3]), + ) + def test_convolution2d( + self, + context, + height, + width, + kernel_size, + stride, + padding, + dilation, + groups=1, + in_channels=1, + out_channels=2, + ): + test_input = torch.rand(1, in_channels, height, width) + constant_vals = [ + 1, # None argument + test_input, + np.random.rand( + out_channels, in_channels, kernel_size, kernel_size + ), # weights + np.random.rand(out_channels), # bias + np.array([stride, stride]), + np.array([padding, padding]), + np.array([dilation, dilation]), + False, # transposed + np.array([0, 0]), # output_pad + groups, + ] + constants, _, output_name = self._gen_constants( + len(constant_vals), constant_vals + ) + # For reference, the values for `kind` and `inputs` indices are determined from the definition for Torch's + # `at::_convolution` used for all convolutions. The link below is approximately correct at the time of writing. + # https://github.com/pytorch/pytorch/blob/bd604mb5b7ae4f6388aca461891d620b0d485fbb/aten/src/ATen/native/Convolution.cpp#L544 + conv_node = InternalTorchIRNode( + kind="_convolution", + inputs=["1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "0", "0"], + outputs=[output_name], + ) + + ssa = self._construct_test_graph( + context, ops._convolution, conv_node, output_name, constants=constants + ) + torch_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ) + expected_shape = tuple(torch_conv(test_input).shape) + assert ssa.val is None + assert expected_shape == ssa.shape + + @pytest.mark.parametrize( + "depth, height, width, kernel_size, stride, padding, dilation, groups", + itertools.product( + [5, 5], + [5, 6], + [5, 7], + [1, 3], + [(1, 1, 1), (3, 2, 1)], + [(1, 1, 1), (1, 3, 2)], + [(1, 1, 1), (1, 2, 3)], + [ + 1, + -1, + ], # -1 groups indicates it should be set to the number of input channels for depthwise convolution + ), + ) + def test_convolution3d( + self, + context, + depth, + height, + width, + kernel_size, + stride, + padding, + dilation, + groups, + in_channels=2, + out_channels=4, + ): + if groups == -1: + groups = in_channels + test_input = torch.rand(1, in_channels, depth, height, width) + constant_vals = [ + 1, # None argument + test_input, + np.random.rand( + out_channels, + in_channels // groups, + kernel_size, + kernel_size, + kernel_size, + ), # weights + np.random.rand(out_channels), # bias + # PyTorch's Conv3d accepts either an int (for all dimensions) or a 3-tuple of ints (one per dimension) + np.array([stride[0], stride[1], stride[2]]), + np.array([padding[0], padding[1], padding[2]]), + np.array([dilation[0], dilation[1], dilation[2]]), + False, # transposed + np.array([0, 0, 0]), # out_pad + groups, + ] + constants, _, output_name = self._gen_constants( + len(constant_vals), constant_vals + ) + # For reference, the values for `kind` and `inputs` indices are determined from the definition for Torch's + # `at::_convolution` used for all convolutions. The link below is approximately correct at the time of writing. + # https://github.com/pytorch/pytorch/blob/bd604mb5b7ae4f6388aca461891d620b0d485fbb/aten/src/ATen/native/Convolution.cpp#L544 + conv_node = InternalTorchIRNode( + kind="_convolution", + inputs=["1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "0", "0"], + outputs=[output_name], + ) + + ssa = self._construct_test_graph( + context, ops._convolution, conv_node, output_name, constants=constants + ) + torch_conv = nn.Conv3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + expected_result = torch_conv(test_input) + expected_shape = tuple(expected_result.shape) + assert ssa.val is None + assert expected_shape == ssa.shape + + @pytest.mark.parametrize( + "height, width, kernel_size, stride, padding, dilation", + itertools.product([5, 6], [5, 7], [1, 3], [2, 3], [0, 1], [1, 3]), + ) + def test_convolution_transpose2d( + self, + context, + height, + width, + kernel_size, + stride, + padding, + dilation, + groups=1, + in_channels=1, + out_channels=2, + ): + test_input = torch.rand(1, in_channels, height, width) + + constant_vals = [ + np.random.rand( + in_channels, out_channels, kernel_size, kernel_size + ), # weights + np.random.rand(out_channels), # bias + np.array([stride, stride]), + np.array([padding, padding]), + np.array([dilation, dilation]), + True, # transposed, + np.array([0, 0]), # output_pad + groups, + False, + False, + False, + ] + graph_inputs = {"input": mb.placeholder(test_input.shape, dtype=types.float)} + + constants, input_list, output_name = self._gen_constants( + len(constant_vals), constant_vals + ) + conv_node = InternalTorchIRNode( + kind="_convolution", inputs=["input"] + input_list, outputs=[output_name], + ) + + ssa = self._construct_test_graph( + context, + ops._convolution, + conv_node, + output_name, + constants=constants, + graph_inputs=graph_inputs, + ) + torch_conv = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ) + expected_shape = tuple(torch_conv(test_input).shape) + assert ssa.val is None + assert expected_shape == ssa.shape + + @pytest.mark.parametrize( + "input_shape, dim, keepdim", + itertools.product([(3, 20, 20), (1, 50, 50)], [0, 1, 2, [0, 2]], [True, False]), + ) + def test_mean(self, context, input_shape, dim, keepdim): + test_input = torch.rand(*input_shape) + + constants, input_list, output_name = self._gen_constants( + 4, [test_input, dim, keepdim, None] + ) + mean_node = InternalTorchIRNode( + kind="mean", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.mean, mean_node, output_name, constants=constants + ) + expected_result = torch.mean(test_input, dim, keepdim) + assert np.allclose(expected_result, ssa.val) + + def test_mean_no_dims(self, context): + test_input = torch.rand((3, 20, 20)) + + constants, input_list, output_name = self._gen_constants(2, [test_input, None]) + mean_node = InternalTorchIRNode( + kind="mean", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.mean, mean_node, output_name, constants=constants + ) + expected_result = torch.mean(test_input) + assert np.allclose(expected_result, ssa.val) + + def test_embedding(self, context): + EMBEDDING_DIMENSION = 10 + NUM_EMBEDDINGS = 20 + input_shape = (NUM_EMBEDDINGS, EMBEDDING_DIMENSION) + # size is arbitrary for indices + indices = np.random.randint(NUM_EMBEDDINGS, size=100) + test_input = torch.rand(input_shape) + constants, input_list, output_name = self._gen_constants( + 2, [test_input, indices] + ) + gather_node = InternalTorchIRNode( + kind="embedding", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.embedding, gather_node, output_name, constants=constants + ) + torch_embedding = nn.Embedding.from_pretrained(test_input) + expected_result = torch_embedding(torch.LongTensor(indices)) + assert np.allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "dim", [0, 1, 2, 3, 4], + ) + def test_size(self, context, dim): + test_input = torch.rand(1, 2, 3, 4, 5) + + graph_inputs = {"input": mb.placeholder(test_input.shape, dtype=types.float)} + constants, input_list, output_name = self._gen_constants(1, [dim]) + size_node = InternalTorchIRNode( + kind="size", inputs=["input"] + input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, + ops.size, + size_node, + output_name, + constants=constants, + graph_inputs=graph_inputs, + ) + expected_result = test_input.shape[dim] + assert expected_result == ssa.val + + @pytest.mark.parametrize( + "dim", [0, 1], + ) + def test_size_symbolic(self, context, dim): + test_shape = (3, get_new_symbol()) + graph_inputs = {"input": mb.placeholder(shape=test_shape, dtype=types.float)} + constants, input_list, output_name = self._gen_constants(1, [dim]) + size_node = InternalTorchIRNode( + kind="size", inputs=["input"] + input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, + ops.size, + size_node, + output_name, + constants=constants, + graph_inputs=graph_inputs, + ) + expected_result = test_shape[dim] + assert expected_result == ssa.sym_val + + @pytest.mark.parametrize( + "input_size, shape", + itertools.product([(5, 12), (1, 4, 15), (3, 5, 4)], [(3, 20), (-1, 6), (60,)],), + ) + def test_view(self, context, input_size, shape): + test_input = torch.rand(input_size) + + constants, input_list, output_name = self._gen_constants(2, [test_input, shape]) + view_node = InternalTorchIRNode( + kind="view", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.view, view_node, output_name, constants=constants + ) + expected_result = test_input.view(shape) + assert np.allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "input_shape, output_shape", + itertools.product( + [(1, 3, 15, 15), (1, 1, 2, 2), (1, 3, 10, 10)], [(1, 1), (2, 2), (2, 1)], + ), + ) + def test_adaptive_avg_pool2d(self, context, input_shape, output_shape): + test_input = torch.rand(input_shape) + + constants, input_list, output_name = self._gen_constants( + 2, [test_input, output_shape] + ) + + adaptive_avg_pool2d_node = InternalTorchIRNode( + kind="adaptive_avg_pool2d", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, + ops.adaptive_avg_pool2d, + adaptive_avg_pool2d_node, + output_name, + constants=constants, + ) + expected_result = torch._adaptive_avg_pool2d(test_input, output_shape) + expected_shape = tuple(expected_result.shape) + assert expected_shape == ssa.shape + # We only expect numerical output when reducing to global average. + if output_shape == (1, 1): + assert np.allclose(expected_result, ssa.val) + + def test_adaptive_avg_pool2d_exception(self, context): + # For this test, the input tensor HW channels are dynamic. + input_shape = [1, 3, get_new_symbol(), get_new_symbol()] + graph_inputs = {"input": mb.placeholder(input_shape, dtype=types.float)} + constants, input_list, output_name = self._gen_constants(1, [(2, 1)]) + adaptive_avg_pool2d_node = InternalTorchIRNode( + kind="adaptive_avg_pool2d", + inputs=["input"] + input_list, + outputs=[output_name], + ) + with pytest.raises(ValueError): + self._construct_test_graph( + context, + ops.adaptive_avg_pool2d, + adaptive_avg_pool2d_node, + output_name, + constants=constants, + graph_inputs=graph_inputs, + ) + + @pytest.mark.parametrize("input_shape", [(1, 3, 15, 15), (1, 1, 1, 1)]) + def test_batch_norm(self, context, input_shape): + test_input = torch.rand(input_shape) + channels = input_shape[1] + constants, input_list, output_name = self._gen_constants( + 9, + [ + torch.rand(input_shape), # input + torch.rand(channels), # weight + torch.rand(channels), # bias + torch.rand(channels), # running mean + torch.rand(channels), # running var + 0, # training + 0.1, # momentum + 1e-6, # eps + 1, # cudnn_enabled + ], + ) + + batch_norm_node = InternalTorchIRNode( + kind="batch_norm", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.batch_norm, batch_norm_node, output_name, constants=constants + ) + assert ssa.val is None + assert ssa.shape == tuple(test_input.shape) + + @pytest.mark.parametrize("input_shape", [(1, 3, 15, 15), (1, 1, 1, 1)]) + def test_instance_norm(self, context, input_shape): + test_input = torch.rand(input_shape) + channels = input_shape[1] + constants, input_list, output_name = self._gen_constants( + 9, + [ + torch.rand(input_shape), # input + torch.rand(channels), # weight + torch.rand(channels), # bias + torch.rand(channels), # running mean + torch.rand(channels), # running var + 0, # training + 0.1, # momentum + 1e-6, # eps + 1, # cudnn_enabled + ], + ) + + instant_norm_node = InternalTorchIRNode( + kind="instance_norm", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.instance_norm, instant_norm_node, output_name, constants=constants + ) + assert ssa.val is None + assert ssa.shape == tuple(test_input.shape) + + @pytest.mark.parametrize("axis", [1, 2, 3]) + def test_cat(self, context, axis): + input_shape = (1, 3, 240, 320) + + test_input1 = torch.rand(input_shape) + test_input2 = torch.rand(input_shape) + const_input = torch.rand(input_shape) + + graph_inputs = { + "input1": mb.placeholder(input_shape, dtype=types.float), + "input2": mb.placeholder(input_shape, dtype=types.float), + } + dim_node = InternalTorchIRNode( + attr={"value": axis}, kind="constant", inputs=[], outputs=["0"], + ) + const_tensor_node = InternalTorchIRNode( + attr={"value": const_input.numpy()}, + kind="constant", + inputs=[], + outputs=["1"], + ) + listconstruct_node = InternalTorchIRNode( + kind="listconstruct", inputs=["1", "input1", "input2"], outputs=["2"] + ) + cat_node = InternalTorchIRNode( + kind="cat", inputs=["2", "0"], outputs=["output"] + ) + + with Function(inputs=graph_inputs) as ssa_func: + context.add(ssa_func.inputs["input1"]) + context.add(ssa_func.inputs["input2"]) + ops.constant(context, dim_node) + ops.constant(context, const_tensor_node) + ops.listconstruct(context, listconstruct_node) + ops.cat(context, cat_node) + + ssa = context["output"] + expected_result = torch.cat( + (const_input, test_input1, test_input2), dim=axis + ).numpy() + assert np.allclose(expected_result.shape, ssa.shape) + + @pytest.mark.parametrize("axis", [0, 1, 2, 3, 4]) + def test_stack(self, context, axis): + input_shape = (1, 3, 240, 320) + + test_input1 = torch.rand(input_shape) + test_input2 = torch.rand(input_shape) + const_input = torch.rand(input_shape) + + graph_inputs = { + "input1": mb.placeholder(input_shape, dtype=types.float), + "input2": mb.placeholder(input_shape, dtype=types.float), + } + dim_node = InternalTorchIRNode( + attr={"value": axis}, kind="constant", inputs=[], outputs=["0"], + ) + const_tensor_node = InternalTorchIRNode( + attr={"value": const_input.numpy()}, + kind="constant", + inputs=[], + outputs=["1"], + ) + listconstruct_node = InternalTorchIRNode( + kind="listconstruct", inputs=["1", "input1", "input2"], outputs=["2"] + ) + stack_node = InternalTorchIRNode( + kind="stack", inputs=["2", "0"], outputs=["output"] + ) + + with Function(inputs=graph_inputs) as ssa_func: + context.add(ssa_func.inputs["input1"]) + context.add(ssa_func.inputs["input2"]) + ops.constant(context, dim_node) + ops.constant(context, const_tensor_node) + ops.listconstruct(context, listconstruct_node) + ops.stack(context, stack_node) + + ssa = context["output"] + expected_result = np.stack((const_input, test_input1, test_input2), axis=axis) + assert np.allclose(expected_result.shape, ssa.shape) + + def test_item(self, context): + const_val = 0 + constants, input_list, output_name = self._gen_constants(1, [const_val]) + item_node = InternalTorchIRNode( + kind="item", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.item, item_node, output_name, constants=constants + ) + assert ssa.val == const_val + + def test_item_exception(self, context): + const_val = [0, 1] + constants, input_list, output_name = self._gen_constants(1, [const_val]) + item_node = InternalTorchIRNode( + kind="item", inputs=input_list, outputs=[output_name] + ) + with pytest.raises(ValueError): + self._construct_test_graph( + context, ops.item, item_node, output_name, constants=constants, + ) + + @pytest.mark.parametrize("test_val", [1, 1.5, False]) + def test_bool(self, context, test_val): + self._test_cast(context, test_val, "bool", ops._bool, bool) + + @pytest.mark.parametrize("test_val", [1, 1.5, -0.3]) + def test_int(self, context, test_val): + self._test_cast(context, test_val, "int", ops._int, int) + + @pytest.mark.parametrize("input_shape", [(1, 3, 15, 15), (1, 1, 1, 1)]) + def test_layer_norm(self, context, input_shape): + graph_inputs = {"input": mb.placeholder(input_shape, dtype=types.float)} + constants, input_list, output_name = self._gen_constants( + 5, + [ + input_shape, # normalized shape + torch.rand(*input_shape), # weight + torch.rand(*input_shape), # running bias + 1e-6, + 1, # cudnn enabled + ], + ) + + layer_norm_node = InternalTorchIRNode( + kind="layer_norm", inputs=["input"] + input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, + ops.layer_norm, + layer_norm_node, + output_name, + graph_inputs=graph_inputs, + constants=constants, + ) + assert ssa.val is None + assert ssa.shape == input_shape + + @pytest.mark.parametrize("shape", [(1, 2), (2, 3, 4, 5), (3, 4, 5),]) + def test_ones(self, context, shape): + constants, constant_input_list, output_name = self._gen_constants( + 6, [shape, 1, 1, 1, 1, 1] + ) + ones_node = InternalTorchIRNode( + kind="ones", inputs=constant_input_list, outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, ops.ones, ones_node, output_name, constants=constants, + ) + assert ssa.shape == shape + + @pytest.mark.parametrize("input_shape", [(1, 2), (2, 3, 4, 5), (3, 4, 5),]) + def test_ones_like(self, context, input_shape): + graph_inputs = {"input": mb.placeholder(input_shape, dtype=types.float)} + constants, constant_input_list, output_name = self._gen_constants(5, 1) + ones_node = InternalTorchIRNode( + kind="ones_like", + inputs=["input"] + constant_input_list, + outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, + ops.ones_like, + ones_node, + output_name, + graph_inputs=graph_inputs, + constants=constants, + ) + assert ssa.shape == input_shape + + @pytest.mark.parametrize( + "input_size, dim, index", + itertools.product( + [(13, 43, 10), (39, 14, 11, 9)], [0, 1, 2], [0, 1, 3, 8, -1], + ), + ) + def test_select(self, context, input_size, dim, index): + graph_inputs = {"input1": mb.placeholder(input_size, dtype=types.float)} + constants, constant_input_list, output_name = self._gen_constants( + 2, [dim, index] + ) + select_node = InternalTorchIRNode( + kind="select", + inputs=["input1"] + constant_input_list, + outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, + ops.select, + select_node, + output_name, + graph_inputs=graph_inputs, + constants=constants, + ) + select_index = index + if index < 0: + select_index += input_size[dim] + expected_shape = tuple( + torch.rand(input_size) + .index_select(dim, torch.tensor([select_index])) + .squeeze(dim) + .shape + ) + assert np.allclose(ssa.shape, expected_shape) + + @pytest.mark.parametrize( + "dynamic, test_tuple", itertools.product([True, False], [True, False]) + ) + def test_tuple_and_list_unpack(self, context, dynamic, test_tuple): + """ + if @dynamic is True then packs up a dynamic input + if @test_tuple is True tests tupleUnpack else tests listUnpack + """ + if test_tuple: + construct_op = ops.tupleconstruct + construct_name = "TupleConstruct" + unpack_name = "TupleUnpack" + else: + construct_op = ops.listconstruct + construct_name = "ListConstruct" + unpack_name = "ListUnpack" + + input_shape = (1, 2, 3) + constant_vals = [str(i) for i in range(1, 6)] + constants_unpacked = [str(i) for i in range(6, 11)] + constants, input_list, _ = self._gen_constants(5, constant_vals) + output_list = constants_unpacked[:] + graph_inputs = {} + if dynamic: + graph_input_name = "input1" + graph_inputs = { + graph_input_name: mb.placeholder(input_shape, dtype=types.float) + } + input_list += [graph_input_name] + output_list += [graph_input_name + "_out"] + + construct_node = InternalTorchIRNode( + kind=construct_name, inputs=input_list, outputs=["construct"], + ) + unpack_node = InternalTorchIRNode( + kind=unpack_name, inputs=["construct"], outputs=output_list + ) + with Function(inputs=graph_inputs) as ssa_func: + if dynamic: + context.add(ssa_func.inputs["input1"]) + for node in constants: + ops.constant(context, node) + construct_op(context, construct_node) + ops.tupleunpack(context, unpack_node) + + ssa_constants = [] + for name in constants_unpacked: + ssa_constants.append(context[name].val) + assert ssa_constants == constant_vals + + if dynamic: + ssa_dyanmic = context[graph_input_name + "_out"] + assert ssa_dyanmic.val is None + assert ssa_dyanmic.shape == input_shape + + def _test_pool( + self, context, test_input, param_list, op_kind, op_func, expected_result + ): + constants, input_list, output_name = self._gen_constants( + len(param_list) + 1, [test_input] + param_list, + ) + + pool_node = InternalTorchIRNode( + kind=op_kind, inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, op_func, pool_node, output_name, constants=constants, + ) + expected_shape = tuple(expected_result.shape) + assert expected_shape == ssa.shape + + @pytest.mark.parametrize( + "input_shape, kernel_size, stride, pad, include_pad, ceil_mode", + itertools.product( + [(1, 3, 15), (1, 1, 7), (1, 3, 10)], + [1, 3], + [1, 2], + [0, 1], + [True, False], + [False, True], + ), + ) + def test_avg_pool1d( + self, context, input_shape, kernel_size, stride, pad, include_pad, ceil_mode, + ): + if pad > kernel_size / 2: + return + + if ceil_mode: + if kernel_size == 1 and stride == 2 and pad == 0 and input_shape[-1] == 10: + pytest.xfail("Torch ceil_mode does not match exactly with CoreML's ceil_mode. rdar://80050546") + + test_input = torch.rand(input_shape) + expected_result = F.avg_pool1d( + test_input, + kernel_size=kernel_size, + stride=stride, + padding=pad, + ceil_mode=ceil_mode, + count_include_pad=include_pad, + ) + self._test_pool( + context, + test_input, + [[kernel_size], [stride], [pad], ceil_mode, not include_pad], + "avg_pool1d", + ops.avg_pool1d, + expected_result, + ) + + @pytest.mark.parametrize( + "input_shape, kernel_size, stride, pad, include_pad, ceil_mode", + itertools.product( + [(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)], + [1, 3], + [1, 2], + [0, 1], + [True, False], + [False, True], + ), + ) + def test_avg_pool2d( + self, context, input_shape, kernel_size, stride, pad, include_pad, ceil_mode, + ): + if pad > kernel_size / 2: + return + + if ceil_mode: + if kernel_size == 1 and stride == 2 and pad == 0 and input_shape[-1] == 10: + pytest.xfail("Torch ceil_mode does not match exactly with CoreML's ceil_mode. rdar://80050546") + + test_input = torch.rand(input_shape) + expected_result = F.avg_pool2d( + test_input, + kernel_size=kernel_size, + stride=stride, + padding=pad, + ceil_mode=ceil_mode, + count_include_pad=include_pad, + ) + self._test_pool( + context, + test_input, + [ + [kernel_size, kernel_size], + [stride, stride], + [pad, pad], + ceil_mode, + not include_pad, + None, + ], + "avg_pool2d", + ops.avg_pool2d, + expected_result, + ) + + @pytest.mark.parametrize( + "input_shape, kernel_size, stride, pad, ceil_mode", + itertools.product( + [(1, 3, 15), (1, 1, 7), (1, 3, 10)], [1, 3], [1, 2], [0, 1], [False, True] + ), + ) + def test_max_pool1d( + self, context, input_shape, kernel_size, stride, pad, ceil_mode + ): + if pad > kernel_size / 2: + return + + if ceil_mode: + if kernel_size == 1 and stride == 2 and pad == 0 and input_shape[-1] == 10: + pytest.xfail("Torch ceil_mode does not match exactly with CoreML's ceil_mode. rdar://80050546") + + test_input = torch.rand(input_shape) + expected_result = F.max_pool1d( + test_input, + kernel_size=kernel_size, + stride=stride, + padding=pad, + ceil_mode=ceil_mode, + ) + self._test_pool( + context, + test_input, + [[kernel_size], [stride], [pad], [1], ceil_mode], + "max_pool1d", + ops.max_pool1d, + expected_result, + ) + + @pytest.mark.parametrize( + "input_shape, kernel_size, stride, pad, ceil_mode", + itertools.product( + [(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)], + [1, 3], + [1, 2], + [0, 1], + [False, True], + ), + ) + def test_max_pool2d( + self, context, input_shape, kernel_size, stride, pad, ceil_mode, + ): + if pad > kernel_size / 2: + return + + if ceil_mode: + if kernel_size == 1 and stride == 2 and pad == 0 and input_shape[-1] == 10: + pytest.xfail("Torch ceil_mode does not match exactly with CoreML's ceil_mode. rdar://80050546") + + test_input = torch.rand(input_shape) + expected_result = F.max_pool2d( + test_input, + kernel_size=kernel_size, + stride=stride, + padding=pad, + ceil_mode=ceil_mode, + ) + self._test_pool( + context, + test_input, + [ + [kernel_size, kernel_size], + [stride, stride], + [pad, pad], + [1, 1,], # dilation + ceil_mode, + ], + "max_pool2d", + ops.max_pool2d, + expected_result, + ) + + @pytest.mark.parametrize( + "dim, start, end, step", + itertools.product([0, 1, 2], [0, 1, 2], [3, 4, 5, None], [1, 2]), + ) + def test_slice(self, context, dim, start, end, step): + test_input = torch.rand(5, 5, 5) + constants, input_list, output_name = self._gen_constants( + 5, [test_input, dim, start, end, step] + ) + node = InternalTorchIRNode( + kind="slice", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops._slice, node, output_name, constants=constants + ) + if end is None: + end = test_input.shape[dim] + expected_result = test_input.index_select( + dim, torch.LongTensor(range(start, end, step)) + ) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "split_sizes, dim, make_explicit", + itertools.product([2, 3], [0, 1, 2], [True, False]), + ) + def test_split(self, context, split_sizes, dim, make_explicit): + test_input = torch.rand(3, 4, 5) + if make_explicit: + # Explicitly provide the size of each split. This will be two + # splits, the given size and the remainder. + split_sizes = [split_sizes, test_input.shape[dim] - split_sizes] + constants, input_list, output_name = self._gen_constants( + 3, [test_input, split_sizes, dim] + ) + node = InternalTorchIRNode( + kind="split", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.split, node, output_name, constants=constants + ) + expected_result = torch.split(test_input, split_sizes, dim) + if not isinstance(ssa, list): + ssa = [ssa] + + for ex_res, ssa_res in zip(expected_result, ssa): + np.testing.assert_allclose(ex_res.numpy(), ssa_res.val, atol=1e-6) + + def test_floor(self, context): + test_input = torch.rand(1, 2, 3) * 10 + constants, input_list, output_name = self._gen_constants(1, test_input) + floor_node = InternalTorchIRNode( + kind="floor", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.floor, floor_node, output_name, constants=constants, + ) + expected_result = test_input.floor() + assert np.allclose(expected_result, ssa.val) + + def test_erf(self, context): + test_input = torch.rand(1, 2, 3, 4) + constants, input_list, output_name = self._gen_constants(1, test_input) + node = InternalTorchIRNode(kind="erf", inputs=input_list, outputs=[output_name]) + ssa = self._construct_test_graph( + context, ops.erf, node, output_name, constants=constants + ) + expected_result = test_input.erf() + assert np.allclose(expected_result, ssa.val) + + def test_implicittensortonum(self, context): + input_shape = (1,) + graph_input_name = "input1" + graph_inputs = { + graph_input_name: mb.placeholder(input_shape, dtype=types.float) + } + output_name = "1" + node = InternalTorchIRNode( + kind="implicittensortonum", inputs=["input1"], outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, + ops.implicittensortonum, + node, + output_name, + graph_inputs=graph_inputs, + ) + assert ssa.shape == () + + @pytest.mark.parametrize( + "chunks, dim", itertools.product([2, 3, 5], [0, 1, 2, 3]), + ) + def test_constantchunk(self, context, chunks, dim): + test_input = torch.rand(5, 8, 9, 11) + expected_result = test_input.chunk(chunks, dim=dim) + constants, input_list, first_output = self._gen_constants(1, [test_input]) + outputs = [str(int(first_output) + i) for i in range(len(expected_result))] + node = InternalTorchIRNode( + attr={"chunks": chunks, "dim": dim}, + kind="constantchunk", + inputs=input_list, + outputs=outputs, + ) + self._construct_test_graph( + context, ops.constantchunk, node, first_output, constants=constants + ) + actual_result = [context[name] for name in outputs] + + np.testing.assert_equal(len(expected_result), len(actual_result)) + for ex_res, ssa_res in zip(expected_result, actual_result): + np.testing.assert_allclose(ex_res.numpy(), ssa_res.val, atol=1e-6) + + @pytest.mark.parametrize( + "input_shape, shape", + [ + ((3, 1), (3, 4)), + ((3, 1), (-1, 4)), + ((3, 1, 1), (3, 4, 1)), + ((3, 1, 1), (3, -1, 5)), + ((3, 1, 1), (3, 4, 5)), + ((1, 3, 1, 1), (2, 3, -1, 1)), + ((1, 3, 4, 1), (2, 3, -1, 5)), + ], + ) + def test_expand(self, context, input_shape, shape): + test_input = torch.rand(input_shape) + constants, input_list, output_name = self._gen_constants(2, [test_input, shape]) + node = InternalTorchIRNode( + kind="expand", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.expand, node, output_name, constants=constants + ) + expected_result = test_input.expand(shape) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "input_shape, other_shape", + [ + ((3, 1), (3, 4)), + ((3, 1, 1), (3, 4, 1)), + ((3, 1, 1), (3, 4, 5)), + ((1, 3, 1, 1), (2, 3, 4, 1)), + ((1, 3, 4, 1), (2, 3, 4, 5)), + ((1, 3, 4, 1), (1, 3, 4, 5)), + ], + ) + def test_expand_as(self, context, input_shape, other_shape): + test_input = torch.rand(input_shape) + other = torch.rand(other_shape) + constants, input_list, output_name = self._gen_constants(2, [test_input, other]) + node = InternalTorchIRNode( + kind="expand_as", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.expand_as, node, output_name, constants=constants + ) + expected_result = test_input.expand_as(other) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "start, end, step", + [x for x in itertools.product((None, 0, 2), (5, 10), (None,),)] + + [x for x in itertools.product((0, 2), (5, 10), (1, 2))], + ) + def test_arange(self, context, start, end, step): + # Arange can get [end], [start, end], or [start, end, step] + args = [x for x in [start, end, step] if x is not None] + args += [0, 0, 0, False] # Extra args needed but ignored by arange + constants, input_list, output_name = self._gen_constants(len(args), args) + node = InternalTorchIRNode( + kind="arange", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.arange, node, output_name, constants=constants + ) + kwargs = {"end": end} + if start is not None: + kwargs["start"] = start + if step is not None: + kwargs["step"] = step + expected_result = torch.arange(**kwargs) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "input_shape, axis", + [((2, 3), 0), ((2, 3, 4), 1), ((2, 3, 4, 5), 0), ((2, 3, 4, 5), 2),], + ) + def test_masked_fill(self, context, input_shape, axis): + mask_shape = list(input_shape) + mask_shape[axis] = 1 + mask = torch.randint(0, 1, mask_shape, dtype=torch.bool) + input_data = torch.rand(input_shape) + value = -1.0 + constants, input_list, output_name = self._gen_constants( + 3, [input_data, mask, value] + ) + node = InternalTorchIRNode( + kind="masked_fill", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.masked_fill, node, output_name, constants=constants + ) + expected_result = input_data.masked_fill(mask, value) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "noop_kind", + ["dropout", "dropout_", "feature_dropout", "contiguous", "device", "detach"], + ) + def test_noops(self, context, noop_kind): + test_input = torch.rand(3, 4, 5) + constants, input_list, output_name = self._gen_constants( + 3, [test_input, "test", "test"] + ) + node = InternalTorchIRNode( + kind=noop_kind, inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.noop, node, output_name, constants=constants + ) + assert np.allclose(test_input.numpy(), ssa.val) + + def test_tanh(self, context): + test_input = torch.rand(3, 4, 5) + constants, input_list, output_name = self._gen_constants(1, [test_input]) + node = InternalTorchIRNode( + kind="tanh", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.tanh, node, output_name, constants=constants + ) + expected_result = torch.tanh(test_input) + assert np.allclose(expected_result.numpy(), ssa.val) + + @pytest.mark.parametrize( + "input_shape, dim, keepdim", + itertools.product([(3, 20, 20), (1, 50, 50)], [0, 1, 2], [True, False]), + ) + def test_argmax(self, context, input_shape, dim, keepdim): + test_input = torch.rand(*input_shape) + + constants, input_list, output_name = self._gen_constants( + 4, [test_input, dim, keepdim, None] + ) + node = InternalTorchIRNode( + kind="argmax", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.argmax, node, output_name, constants=constants + ) + expected_result = torch.argmax(test_input, dim, keepdim) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "size, dtype", itertools.product([(1, 2, 3, 4), (1,)], [11, 0, 1, 6]), + ) + def test_zeros(self, context, size, dtype): + layout = 0 # unused + device = 0 # unused + pin_memory = 0 # unused + constants, input_list, output_name = self._gen_constants( + 5, [size, dtype, layout, device, pin_memory] + ) + node = InternalTorchIRNode( + kind="zeros", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.zeros, node, output_name, constants=constants + ) + expected_result = torch.zeros(size, dtype=ops.NUM_TO_TORCH_DTYPE[dtype]) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize("input_size", [(1, 2, 3, 4), (1,)]) + def test_exp(self, context, input_size): + test_input = torch.rand(input_size) + constants, input_list, output_name = self._gen_constants(1, test_input) + node = InternalTorchIRNode(kind="exp", inputs=input_list, outputs=[output_name]) + ssa = self._construct_test_graph( + context, ops.exp, node, output_name, constants=constants + ) + expected_result = torch.exp(test_input) + np.testing.assert_allclose(expected_result, ssa.val, rtol=1e-06) + + @pytest.mark.parametrize( + "input_size, dim, keepdim", + itertools.product([(1, 2, 3, 4)], [0, 1, 2], [True, False]), + ) + def test_max(self, context, input_size, dim, keepdim): + test_input = torch.rand(input_size) + constants, input_list, _ = self._gen_constants(3, [test_input, dim, keepdim]) + node = InternalTorchIRNode( + kind="max", inputs=input_list, outputs=["out1", "out2"], + ) + self._construct_test_graph(context, ops.max, node, constants=constants) + torch.max(test_input, dim=dim, keepdim=keepdim) + + @pytest.mark.parametrize( + "input_size, dim, descending", + itertools.product([(2, 3, 4), (1, 2, 3, 4)], [0, 1, 2], [True, False]), + ) + def test_sort(self, context, input_size, dim, descending): + test_input = torch.rand(input_size) + constants, input_list, output_name = self._gen_constants( + 3, [test_input, dim, descending] + ) + node = InternalTorchIRNode( + kind="sort", inputs=input_list, outputs=["out1", "out2"], + ) + self._construct_test_graph(context, ops.sort, node, constants=constants) + expected_sort, expected_index = torch.sort( + test_input, dim=dim, descending=descending + ) + sort_result = context["out1"].val + index_result = context["out2"].val + np.testing.assert_allclose(expected_sort, sort_result) + np.testing.assert_allclose(expected_index, index_result) + + @pytest.mark.parametrize( + "input_shape, dim, keepdim", + itertools.product( + [(3, 20, 20), (1, 50, 50)], + [[0], [1], [2], [0, 2]], + [True, False]), + ) + def test_sum(self, context, input_shape, dim, keepdim): + test_input = torch.rand(*input_shape) + + constants, input_list, output_name = self._gen_constants( + 4, [test_input, dim, keepdim, None] + ) + sum_node = InternalTorchIRNode( + kind="sum", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.mean, sum_node, output_name, constants=constants + ) + expected_result = torch.sum(test_input, dim, keepdim) + assert np.allclose(expected_result, ssa.val) + + def test_sum_no_dims(self, context): + test_input = torch.rand((3, 20, 20)) + + constants, input_list, output_name = self._gen_constants(2, [test_input, None]) + sum_node = InternalTorchIRNode( + kind="sum", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.mean, sum_node, output_name, constants=constants + ) + expected_result = torch.sum(test_input) + assert np.allclose(expected_result, ssa.val) + + def test_neg(self, context): + test_input = torch.rand(3, 4, 5) + constants, input_list, output_name = self._gen_constants(1, [test_input]) + node = InternalTorchIRNode( + kind="neg", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.neg, node, output_name, constants=constants + ) + expected_result = torch.neg(test_input) + assert np.allclose(expected_result.numpy(), ssa.val) + + @pytest.mark.parametrize( + "input_shape, k, dim, largest", + itertools.product([(5, 10, 10), (10, 5, 5)], [0, 3, 5], [0, 1, 2], [True, False]), + ) + def test_topk(self, context, input_shape, k, dim, largest): + test_input = torch.tensor(random_gen(input_shape, allow_duplicate=False)) + + constants, input_list, output_name = self._gen_constants( + 6, [test_input, k, dim, largest, True, None] + ) + topk_node = InternalTorchIRNode( + kind="topk", inputs=input_list, outputs=["out1", "out2"] + ) + self._construct_test_graph( + context, ops.topk, topk_node, constants=constants + ) + topk_result = context["out1"].val + index_result = context["out2"].val + + expected_max, expected_indices = torch.topk(test_input, k, dim, largest) + np.testing.assert_allclose(expected_max.numpy(), topk_result) + np.testing.assert_allclose(expected_indices.numpy(), index_result) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_passes.py new file mode 100644 index 00000000..423cedb9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_passes.py @@ -0,0 +1,371 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +from collections import OrderedDict + +import numpy as np +import pytest +import torch + +from ..internal_graph import ( + InternalTorchIRBlock, + InternalTorchIRGraph, + InternalTorchIRNode, +) +from ..torchir_passes import ( + flatten_graph_input_values, + flatten_graph_output_values, + transform_inplace_ops, +) + + +def _build_flattening_test_graph(): + # This test graph is: + # graph( + # %1 : (Tensor[1, 1], (Tensor[1, 2], Tensor[1, 3])) + # ): + # %2, %3 = tupleunpack[](%1) + # %4, %5 = tupleunpack[](%3) + # %6 = tupleconstruct[](%2, %4) + # %7 = tupleconstruct[](%6, %5) + # return (%7) + # + # And if you were to run the graph it would turn + # (a, (b, c)) + # into + # ((a, b), c) + + graph_params = {} + graph_inputs = OrderedDict() + graph_inputs["1"] = ( + torch.rand(1, 1), + ( + torch.rand(1, 2), + torch.rand(1, 3), + ), + ) + graph_nodes = [ + InternalTorchIRNode( + inputs=["1"], + outputs=["2", "3"], + kind="tupleunpack", + ), + InternalTorchIRNode( + inputs=["3"], + outputs=["4", "5"], + kind="tupleunpack", + ), + InternalTorchIRNode( + inputs=["2", "4"], + outputs=["6"], + kind="tupleconstruct", + ), + InternalTorchIRNode( + inputs=["6", "5"], + outputs=["7"], + kind="tupleconstruct", + ), + ] + graph_outputs = ["7"] + + return InternalTorchIRGraph( + nodes=graph_nodes, + params=graph_params, + inputs=graph_inputs, + outputs=graph_outputs, + ) + + +class TestTorchPasses: + """Class containing tests for InternalTorchIR optimization passes. + """ + + @pytest.fixture + def set_random_seeds(self): + torch.manual_seed(1) + np.random.seed(1) + + def test_flatten_input_values(self): + graph = _build_flattening_test_graph() + + flatten_graph_input_values(graph) + + # The graph input tuple should have been flattened. + np.testing.assert_equal(len(graph.inputs.keys()), 3) + # Tuple flattening should introduce two new ops. + np.testing.assert_equal(len(graph.nodes), 6) + # The new ops at the beginning of the graph should be a tupleconstruct. + np.testing.assert_equal(graph.nodes[0].kind, "tupleconstruct") + np.testing.assert_equal(graph.nodes[1].kind, "tupleconstruct") + # The inputs to the tupleconstructs should be the new flattened inputs. + input_names = [k for k in graph.inputs.keys()] + np.testing.assert_equal(input_names[1:], graph.nodes[0].inputs) + np.testing.assert_equal(input_names[0], graph.nodes[1].inputs[0]) + np.testing.assert_equal(graph.nodes[0].outputs[0], graph.nodes[1].inputs[1]) + # The last inserted tuple construct should produce the input for the + # next op. + np.testing.assert_equal(graph.nodes[1].outputs[0], graph.nodes[2].inputs[0]) + + def test_flatten_output_values(self): + graph = _build_flattening_test_graph() + + flatten_graph_output_values(graph) + + # The graph output tuple should have been flattened. + np.testing.assert_equal(len(graph.outputs), 3) + # The outputs of the graph should come from intermediate ops. + np.testing.assert_equal(graph.outputs[0], graph.nodes[0].outputs[0]) + np.testing.assert_equal(graph.outputs[1], graph.nodes[1].outputs[0]) + np.testing.assert_equal(graph.outputs[2], graph.nodes[1].outputs[1]) + + def test_transform_inplace_ops_graph(self): + # The test graph is: + # graph( + # %x : Tensor[1], + # ): + # %1 = constant[value=0]() + # %2 = constant[value=10]() + # %3 = listconstruct[](%1) + # %4 = append[](%3, %2) + # return (%3) + graph_params = {} + graph_inputs = OrderedDict() + graph_inputs["x"] = torch.rand(1) + graph_nodes = [ + InternalTorchIRNode( + inputs=[], + attr={"value": 0}, + outputs=["1"], + kind="constant", + ), + InternalTorchIRNode( + inputs=[], + attr={"value": 10}, + outputs=["2"], + kind="constant", + ), + InternalTorchIRNode( + inputs=["1"], + outputs=["3"], + kind="listconstruct", + ), + InternalTorchIRNode( + inputs=["3", "2"], + outputs=["4"], + kind="append", + ), + ] + graph_outputs = ["3"] + graph = InternalTorchIRGraph( + nodes=graph_nodes, + params=graph_params, + inputs=graph_inputs, + outputs=graph_outputs, + ) + for node in graph.nodes: + node.parent = graph + + transform_inplace_ops(graph) + + np.testing.assert_equal(len(graph.outputs), 1) + np.testing.assert_equal(graph.outputs[0], graph.nodes[-1].outputs[0]) + + def test_transform_inplace_ops_loop(self): + # The test graph is: + # graph( + # %x : Tensor[1], + # ): + # %1 = constant[value=True]() + # %2 = constant[value=-1]() + # %3 = constant[value=10]() + # %4 = listconstruct[](%2) + # = loop[](%3, %1) + # block(%i.1): + # %6 = append[](%4, %i.1) + # return (%1) + # return (%4) + graph_params = {} + graph_inputs = OrderedDict() + graph_inputs["x"] = torch.rand(1) + loop_block = InternalTorchIRBlock( + inputs=["i.1"], + outputs=["1"], + nodes=[ + InternalTorchIRNode( + inputs=["4", "i.1"], + outputs=["6"], + kind="append", + ), + ], + ) + loop_block.nodes[0].parent = loop_block + loop_node = InternalTorchIRNode( + inputs=["3", "1"], + outputs=[], + kind="loop", + blocks=[loop_block], + ) + loop_block.parent = loop_node + graph_nodes = [ + InternalTorchIRNode( + inputs=[], + attr={"value": True}, + outputs=["1"], + kind="constant", + ), + InternalTorchIRNode( + inputs=[], + attr={"value": -1}, + outputs=["2"], + kind="constant", + ), + InternalTorchIRNode( + inputs=[], + attr={"value": 10}, + outputs=["3"], + kind="constant", + ), + InternalTorchIRNode( + inputs=["2"], + outputs=["4"], + kind="listconstruct", + ), + loop_node, + ] + graph_outputs = ["4"] + graph = InternalTorchIRGraph( + nodes=graph_nodes, + params=graph_params, + inputs=graph_inputs, + outputs=graph_outputs, + ) + for node in graph.nodes: + node.parent = graph + + transform_inplace_ops(graph) + + # There should be an additional input to the loop. + np.testing.assert_equal(len(loop_node.inputs), 3) + # That input should be the output of the previous op. + np.testing.assert_equal(loop_node.inputs[2], graph.nodes[3].outputs[0]) + # The loop block should have an additional input. + np.testing.assert_equal(len(loop_block.inputs), 2) + # The loop block's new input should be the input to append. + np.testing.assert_equal(loop_block.inputs[1], loop_block.nodes[0].inputs[0]) + # The loop block should have an additional output. + np.testing.assert_equal(len(loop_block.outputs), 2) + # Append's output should be returned from the loop block. + np.testing.assert_equal(loop_block.outputs[1], loop_block.nodes[0].outputs[0]) + # The loop should now have an output. + np.testing.assert_equal(len(loop_node.outputs), 1) + # The loop's name should now be the name of its output. + np.testing.assert_equal(loop_node.name, loop_node.outputs[0]) + # That graph output should now be the output of the graph. + np.testing.assert_equal(loop_node.outputs[0], graph.outputs[0]) + + @pytest.mark.xfail(reason="rdar://64235006") + def test_transform_inplace_ops_if(self): + # The test graph is: + # graph( + # %x : Tensor[1], + # ): + # %1 = constant[value=True]() + # %2 = constant[value=0]() + # %3 = constant[value=1]() + # %4 = listconstruct[](%2) + # = if[](%1) + # block0(): + # %5 = append[](%4, %3) + # return () + # block1(): + # %6 = append[](%4, %2) + # return () + # return (%4) + graph_params = {} + graph_inputs = OrderedDict() + graph_inputs["x"] = torch.rand(1) + if_true_block = InternalTorchIRBlock( + inputs=[], + outputs=[], + nodes=[ + InternalTorchIRNode( + inputs=["4", "3"], + outputs=["5"], + kind="append", + ), + ], + ) + if_true_block.nodes[0].parent = if_true_block + if_false_block = InternalTorchIRBlock( + inputs=[], + outputs=[], + nodes=[ + InternalTorchIRNode( + inputs=["4", "2"], + outputs=["6"], + kind="append", + ), + ], + ) + if_false_block.nodes[0].parent = if_false_block + if_node = InternalTorchIRNode( + inputs=["1"], + outputs=[], + kind="if", + blocks=[if_true_block, if_false_block], + ) + if_true_block.parent = if_node + if_false_block.parent = if_node + graph_nodes = [ + InternalTorchIRNode( + inputs=[], + attr={"value": True}, + outputs=["1"], + kind="constant", + ), + InternalTorchIRNode( + inputs=[], + attr={"value": 0}, + outputs=["2"], + kind="constant", + ), + InternalTorchIRNode( + inputs=[], + attr={"value": 1}, + outputs=["3"], + kind="constant", + ), + InternalTorchIRNode( + inputs=["2"], + outputs=["4"], + kind="listconstruct", + ), + if_node, + ] + graph_outputs = ["4"] + graph = InternalTorchIRGraph( + nodes=graph_nodes, + params=graph_params, + inputs=graph_inputs, + outputs=graph_outputs, + ) + for node in graph.nodes: + node.parent = graph + + transform_inplace_ops(graph) + + # The true block should now have an output. + np.testing.assert_equal(len(if_true_block.outputs), 1) + # The true block should output the result of the append op. + np.testing.assert_equal(if_true_block.outputs[0], if_true_block.nodes[0].outputs[0]) + # The false block should now have an output. + np.testing.assert_equal(len(if_false_block.outputs), 1) + # The false block should output the result of the append op. + np.testing.assert_equal(if_false_block.outputs[0], if_false_block.nodes[0].outputs[0]) + # The if op should have an additional output. + np.testing.assert_equal(len(if_node.outputs), 1) + # The if's name should now be the name of its output. + np.testing.assert_equal(if_node.name, if_node.outputs[0]) + # The graph output should be the if op output. + np.testing.assert_equal(if_node.outputs[0], graph.outputs[0]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_conversion_api.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_conversion_api.py new file mode 100644 index 00000000..bd7d1ce1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_conversion_api.py @@ -0,0 +1,1401 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import os + +import numpy as np +import pytest +from PIL import Image + +import coremltools as ct +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND +from coremltools.converters.mil.frontend.torch.test.testing_utils import \ + _copy_input_data +from coremltools.converters.mil.testing_utils import ( + assert_cast_ops_count, assert_input_dtype, assert_ops_in_mil_program, + assert_output_dtype, assert_prog_input_type, assert_prog_output_type, + assert_spec_input_image_type, assert_spec_output_image_type, + verify_prediction) +from coremltools.proto import FeatureTypes_pb2 as ft +from coremltools.test.api.test_api_examples import TestInputs as _TestInputs + +if _HAS_TORCH: + import torch + import torchvision + +################################################################################# +# Note: all tests are also used as examples in https://coremltools.readme.io/docs +# as a reference. +# Whenever any of the following test fails, we should update API documentations +################################################################################# + +@pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) +class TestPyTorchConverterExamples: + @staticmethod + def test_convert_torch_vision_mobilenet_v2(tmpdir): + """ + In this example, we'll instantiate a PyTorch classification model and convert + it to Core ML. + """ + + """ + Here we instantiate our model. In a real use case this would be your trained + model. + """ + model = torchvision.models.mobilenet_v2() + + """ + The next thing we need to do is generate TorchScript for the model. The easiest + way to do this is by tracing it. + """ + + """ + It's important that a model be in evaluation mode (not training mode) when it's + traced. This makes sure things like dropout are disabled. + """ + model.eval() + + """ + Tracing takes an example input and traces its flow through the model. Here we + are creating an example image input. + + The rank and shape of the tensor will depend on your model use case. If your + model expects a fixed size input, use that size here. If it can accept a + variety of input sizes, it's generally best to keep the example input small to + shorten how long it takes to run a forward pass of your model. In all cases, + the rank of the tensor must be fixed. + """ + example_input = torch.rand(1, 3, 256, 256) + + """ + Now we actually trace the model. This will produce the TorchScript that the + CoreML converter needs. + """ + traced_model = torch.jit.trace(model, example_input) + + """ + Now with a TorchScript representation of the model, we can call the CoreML + converter. The converter also needs a description of the input to the model, + where we can give it a convenient name. + """ + mlmodel = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + ) + + """ + Now with a conversion complete, we can save the MLModel and run inference. + """ + save_path = os.path.join(str(tmpdir), "mobilenet_v2.mlmodel") + mlmodel.save(save_path) + + """ + Running predict() is only supported on macOS. + """ + if ct.utils._is_macos(): + results = mlmodel.predict({"input": example_input.numpy()}) + assert isinstance(results, dict) + + @staticmethod + def test_convert_torch_traced_model_to_milinternal(tmpdir): + from torch import nn + class Network(nn.Module): + def __init__(self): + super(Network, self).__init__() + self.hidden = nn.Linear(100, 10) + self.output = nn.Linear(10, 2) + self.sigmoid = nn.Sigmoid() + self.softmax = nn.Softmax(dim=1) + + def forward(self, x): + x = self.hidden(x) + x = self.sigmoid(x) + x = self.output(x) + x = self.softmax(x) + return x + + torch_model = Network() + torch_model.eval() + example_input = torch.rand(1, 100) + traced_model = torch.jit.trace(torch_model, example_input) + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + convert_to='milinternal' + ) + assert isinstance(model, ct.converters.mil.Program) + + @staticmethod + def test_torch_classifier(): + class Net(torch.nn.Module): + def __init__(self): + super(Net, self).__init__() + self.linear1 = torch.nn.Linear(28 * 28, 100) + self.linear2 = torch.nn.Linear(100, 50) + self.final = torch.nn.Linear(50, 10) + self.relu = torch.nn.ReLU() + + def forward(self, img): # convert + flatten + x = img.view(-1, 28 * 28) + x = self.relu(self.linear1(x)) + x = self.relu(self.linear2(x)) + x = self.final(x) + return x + model = Net() + model.eval() + example_input = torch.rand(1, 28 * 28, 1) + traced_model = torch.jit.trace(model, example_input) + traced_model.eval() + + def _test_classifier(traced_model, example_input, class_type, backend): + label = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + if class_type == "str": + label = list(map(lambda x: str(x), label)) + classifier_config = ct.ClassifierConfig(label) + mlmodel = ct.convert( + traced_model, + source='pytorch', + convert_to=backend, + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + classifier_config=classifier_config + ) + if ct.utils._is_macos(): + coreml_out = mlmodel.predict({"input": example_input.detach().numpy()}) + assert "classLabel" in coreml_out + key_type = str if class_type == "str" else int + assert isinstance(coreml_out["classLabel"], key_type) + + for class_type in ("str", "int"): + _test_classifier(traced_model, example_input, class_type, "neuralnetwork") + if ct.utils._macos_version() >= (12, 0): + _test_classifier(traced_model, example_input, class_type, "mlprogram") + + @staticmethod + @pytest.mark.parametrize("convert_to", ['neuralnetwork', 'mlprogram']) + def test_convert_to_argument_with_torch_model(tmpdir, convert_to): + class Network(torch.nn.Module): + def __init__(self): + super(Network, self).__init__() + self.hidden = torch.nn.Linear(30, 5) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.hidden(x) + return self.relu(x) + + torch_model = Network() + torch_model.eval() + example_input = torch.rand(1, 30) + traced_model = torch.jit.trace(torch_model, example_input) + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + convert_to=convert_to + ) + assert isinstance(model, ct.models.MLModel) + spec = model.get_spec() + if convert_to == "mlprogram": + assert spec.WhichOneof('Type') == 'mlProgram' + else: + assert spec.WhichOneof('Type') == 'neuralNetwork' + + @staticmethod + def test_deployment_target_argument_with_torch_model(): + class Network(torch.nn.Module): + def __init__(self): + super(Network, self).__init__() + self.hidden = torch.nn.Linear(30, 5) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.hidden(x) + return self.relu(x) + + torch_model = Network() + torch_model.eval() + example_input = torch.rand(1, 30) + traced_model = torch.jit.trace(torch_model, example_input) + + # convert to 'neuralnetwork' by specifying an iOS13 target + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + minimum_deployment_target=ct.target.iOS13, + ) + assert isinstance(model, ct.models.MLModel) + assert model.get_spec().WhichOneof('Type') == 'neuralNetwork' + + # convert to 'mlprogram' by specifying an iOS15 target + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + minimum_deployment_target=ct.target.iOS15, + ) + assert isinstance(model, ct.models.MLModel) + assert model.get_spec().WhichOneof('Type') == 'mlProgram' + + # verify an error is raised when convert_to="neuralnetwork" and target is iOS15 + with pytest.raises(ValueError) as e: + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + convert_to="neuralnetwork", + minimum_deployment_target=ct.target.iOS15, + ) + expected_error = "If minimum deployment target is iOS15/macOS12/watchOS8/tvOS15 or higher, " \ + "then 'convert_to' cannot be neuralnetwork. It must be 'mlprogram'" + assert expected_error == str(e.value) + + # verify an error is raised when convert_to="mlprogram" and target is less than iOS15 + with pytest.raises(ValueError) as e: + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + convert_to="mlprogram", + minimum_deployment_target=ct.target.iOS14, + ) + expected_error = "When 'convert_to' is mlprogram, the minimum deployment target " \ + "must be at least iOS15/macOS12/watchOS8/tvOS15" + assert expected_error == str(e.value) + + @staticmethod + def test_get_milprogram_method_with_torch_model(): + class Network(torch.nn.Module): + def __init__(self): + super(Network, self).__init__() + self.hidden = torch.nn.Linear(100, 10) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.hidden(x) + x = self.relu(x) + return x + + torch_model = Network() + torch_model.eval() + example_input = torch.rand(1, 100) + traced_model = torch.jit.trace(torch_model, example_input) + model = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to='mlprogram' + ) + assert isinstance(model._get_mil_internal(), ct.converters.mil.Program) + + @staticmethod + @pytest.mark.skipif(ct.utils._macos_version() < (12, 0), reason='Model produces specification 6.') + @pytest.mark.parametrize( + "convert_to, provide_prob_output_argument", + itertools.product( + ["neuralnetwork", "mlprogram"], + [False, True], + ) + ) + def test_classifier_from_torch_model(convert_to, provide_prob_output_argument): + torch_model = torch.nn.ReLU().eval() + traced_model = torch.jit.trace(torch_model, torch.rand(3,)) + variable_name = "var_2" + class_label_name = "class_label" + classifier_config = ct.ClassifierConfig( + class_labels=['a', 'b', 'c'], + predicted_feature_name=class_label_name, + predicted_probabilities_output=variable_name if provide_prob_output_argument else None, + ) + + model = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=(3,))], + classifier_config = classifier_config, + convert_to=convert_to, + ) + spec = model.get_spec() + input_name = spec.description.input[0].name + out_dict = model.predict({input_name : np.array([1.0, 2.0, 3.0])}) + + assert class_label_name in out_dict + assert out_dict[class_label_name] == 'c' + if convert_to == "neuralnetwork": + assert variable_name in out_dict + assert isinstance(out_dict[variable_name], dict) + else: + output_dict_feature_name = class_label_name + "_probs" + assert output_dict_feature_name in out_dict + assert isinstance(out_dict[output_dict_feature_name], dict) + +############################################################################### +# Note: Stress tests for PyTorch input / output types +############################################################################### + +@pytest.mark.skipif(ct.utils._macos_version() < (10, 15), reason='Model produces specification 4.') +@pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) +class TestTorchInputs(_TestInputs): + @staticmethod + @pytest.mark.skipif(not ct.utils._is_macos(), reason="test needs predictions") + def test_torch_predict_input(): + TestTorchInputs._test_variant_input_type_prediction(torch.tensor) + + @staticmethod + def test_int64_inputs(): + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, + embedding_size) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.randint(high=num_tokens, size=(2,), dtype=torch.int64) + traced_model = torch.jit.trace(model, example_input) + mlmodel = ct.convert( + traced_model, + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + ) + + # running predict() is supported on macOS + if ct.utils._is_macos(): + result = mlmodel.predict( + {"input": example_input.detach().numpy().astype(np.float32)} + ) + + # Verify outputs + expected = model(example_input) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.detach().numpy()) + + # Duplicated inputs are invalid + with pytest.raises(ValueError, match=r"Duplicated inputs"): + mlmodel = ct.convert( + traced_model, + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ), + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ), + ], + ) + + # Outputs must be of type ct.ImageType or ct.TensorType + with pytest.raises(ValueError, match=r"must be a list of type ct.TensorType or ct.ImageType"): + mlmodel = ct.convert( + traced_model, + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ), + ], + outputs=["output"], + ) + + @staticmethod + def test_fully_dynamic_inputs(): + """ + All dims of the inputs are dynamic, and write to slice to one of the + inputs. + """ + + class Model(torch.nn.Module): + def __init__(self, index): + super(Model, self).__init__() + self.index = index + + def forward(self, x, y): + x[:, int(self.index.item())] = 0.0 + y = y.unsqueeze(0) + return y, x + + model = Model(torch.tensor(3)) + scripted_model = torch.jit.script(model) + + mlmodel = ct.convert( + scripted_model, + inputs=[ + ct.TensorType("x", shape=(ct.RangeDim(), ct.RangeDim())), + ct.TensorType("y", shape=(ct.RangeDim(), ct.RangeDim())) + ], + ) + + # running predict() is supported on macOS + if ct.utils._is_macos(): + x, y = torch.rand(2, 4), torch.rand(1, 2) + torch_input = _copy_input_data([x, y]) + torch_res = model(*torch_input) + results = mlmodel.predict({"x": x.cpu().detach().numpy(), + "y": y.cpu().detach().numpy()}) + for i, name in enumerate(mlmodel.output_description): + np.testing.assert_allclose(torch_res[i], results[name]) + + x, y = torch.rand(1, 6), torch.rand(2, 3) + torch_input = _copy_input_data([x, y]) + torch_res = model(*torch_input) + results = mlmodel.predict({"x": x.cpu().detach().numpy(), + "y": y.cpu().detach().numpy()}) + for i, name in enumerate(mlmodel.output_description): + np.testing.assert_allclose(torch_res[i], results[name]) + + @staticmethod + def test_rank0_inputs_torch(): + """Similar to TestPyTorchConverterExamples::test_int64_inputs but + using rank-0 int input. + """ + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, + embedding_size) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.tensor(1) + traced_model = torch.jit.trace(model, example_input) + with pytest.raises(ValueError, match=r"Rank-0"): + mlmodel = ct.convert( + traced_model, + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + ) + + @staticmethod + @pytest.mark.parametrize("variable_length", [True, False]) + def test_torch_range_dim_lstm(variable_length): + """ + This example shows how to run LSTM with previous hidden / cell states + """ + + input_size = 3 + hidden_size = 2 + + class TestNet(torch.nn.Module): + def __init__(self): + super(TestNet, self).__init__() + self.lstm = torch.nn.LSTM(input_size, hidden_size, 1) + + def forward(self, x, hidden_state, cell_state): + # LSTM takes in previous hidden and cell states. The first + # invokation usually have zero vectors as initial states. + output, (new_hidden_state, new_cell_state) = \ + self.lstm(x, (hidden_state, cell_state)) + # LSTM hidden / cell states are returned to be managed by the + # caller (and is fed in as inputs in the next call). + return output, new_hidden_state, new_cell_state + + model = TestNet() + model.eval() + + seq_len = 2 # we'll make seq_len dynamic later + batch = 1 + input_shape = (seq_len, batch, input_size) + rand_input = torch.rand(*input_shape) + h_shape = (1, batch, hidden_size) + rand_h0 = torch.rand(*h_shape) + rand_c0 = torch.rand(*h_shape) + + traced_model = torch.jit.trace(model, (rand_input, rand_h0, rand_c0)) + + # ct.RangeDim() tells coremltools that this dimension can change for + # each inference example (aka "runtime-determined"). If the sequence + # length is always the same (e.g., 2 step LSTM would have seq_len == 2) + # Note that fixed-length models usually run slightly faster + # than variable length models. + ct_seq_len = ct.RangeDim() if variable_length else seq_len + seq_input = ct.TensorType(shape=(ct_seq_len, batch, input_size), + name="seq_input") + h_input = ct.TensorType(shape=h_shape, name="h_input") + c_input = ct.TensorType(shape=h_shape, name="c_input") + + mlmodel = ct.convert( + traced_model, + inputs=[seq_input, h_input, c_input], + ) + + if ct.utils._is_macos(): + result = mlmodel.predict( + {"seq_input": rand_input.detach().numpy().astype(np.float32), + "h_input": rand_h0.detach().numpy().astype(np.float32), + "c_input": rand_c0.detach().numpy().astype(np.float32), + } + ) + + # Verify outputs + expected = model(rand_input, rand_h0, rand_c0) + names = list(result.keys()) + names.sort() + np.testing.assert_allclose(result[names[0]], + expected[0].detach().numpy(), atol=1e-4) + np.testing.assert_allclose(result[names[1]], + expected[1].detach().numpy(), atol=1e-4) + np.testing.assert_allclose(result[names[2]], + expected[2].detach().numpy(), atol=1e-4) + + # Try example of different length + if variable_length: + seq_len = 10 + input_shape = (seq_len, batch, input_size) + rand_input = torch.rand(*input_shape) + + result = mlmodel.predict( + {"seq_input": rand_input.detach().numpy().astype(np.float32), + "h_input": rand_h0.detach().numpy().astype(np.float32), + "c_input": rand_c0.detach().numpy().astype(np.float32), + } + ) + expected = model(rand_input, rand_h0, rand_c0) + names = list(result.keys()) + names.sort() + np.testing.assert_allclose(result[names[0]], + expected[0].detach().numpy(), atol=1e-4) + np.testing.assert_allclose(result[names[1]], + expected[1].detach().numpy(), atol=1e-4) + np.testing.assert_allclose(result[names[2]], + expected[2].detach().numpy(), atol=1e-4) + + @staticmethod + @pytest.mark.parametrize("use_symbol", [True, False]) + def test_torch_outofbound_range_dim(use_symbol): + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, embedding_size) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.randint(high=num_tokens, size=(3,), + dtype=torch.int64) + traced_model = torch.jit.trace(model, example_input) + + if use_symbol: + seq_len_dim = ct.RangeDim(symbol='len', lower_bound=3, + upper_bound=5) + else: + # symbol is optional + seq_len_dim = ct.RangeDim(lower_bound=3, upper_bound=5) + seq_input = ct.TensorType(name="input", shape=(seq_len_dim,), + dtype=np.int64) + mlmodel = ct.convert( + traced_model, + inputs=[seq_input], + ) + + if ct.utils._is_macos(): + result = mlmodel.predict( + {"input": example_input.detach().numpy().astype(np.float32)} + ) + + # Verify outputs + expected = model(example_input) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.detach().numpy()) + + # seq_len below/above lower_bound/upper_bound + with pytest.raises(RuntimeError, + match=r"Size \(99\) of dimension \(0\) is not in allowed range \(3\.\.5\)"): + example_input2 = torch.randint(high=num_tokens, size=(99,), + dtype=torch.int64) + result = mlmodel.predict( + {"input": example_input2.detach().numpy().astype(np.float32)} + ) + + with pytest.raises(RuntimeError, + match=r"Size \(2\) of dimension \(0\) is not in allowed range \(3\.\.5\)"): + example_input2 = torch.randint(high=num_tokens, size=(2,), + dtype=torch.int64) + result = mlmodel.predict( + {"input": example_input2.detach().numpy().astype(np.float32)} + ) + + @staticmethod + def test_torch_enumerated_shapes(): + + in_channels = 3 + out_channels = 2 + kernel_size = 3 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.conv = torch.nn.Conv2d(in_channels, out_channels, + kernel_size) + + def forward(self, x): + return self.conv(x) + + model = TestModule() + model.eval() + + example_input = torch.randn(1, 3, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + shapes = [(1, 3, 28, 28), (1, 3, 56, 56)] + enumerated_shapes = ct.EnumeratedShapes(shapes=shapes) + tensor_input = ct.TensorType(name="input", shape=enumerated_shapes) + + mlmodel = ct.convert( + traced_model, + inputs=[tensor_input], + compute_units=ct.ComputeUnit.CPU_ONLY + ) + + if ct.utils._is_macos(): + result = mlmodel.predict( + {"input": example_input.detach().numpy().astype(np.float32)}, + ) + + # Verify outputs + expected = model(example_input) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.detach().numpy(), + rtol=1e-3, atol=1e-4) + + # Test (1, 3, 56, 56) shape (can't verify numerical parity with Torch + # which doesn't support enumerated shape) + test_input_x = np.random.rand(*shapes[1]).astype(np.float32) + mlmodel.predict({"input": test_input_x}) + + # Test with a wrong shape + with pytest.raises(RuntimeError, + match=r"MultiArray Shape \(1 x 3 x 29 x 29\) was not in enumerated set of allowed shapes"): + test_input_x = np.random.rand(1, 3, 29, 29).astype(np.float32) + mlmodel.predict({"input": test_input_x}) + + @staticmethod + def test_torch_image_enumerated_shapes(): + import torchvision + torch_model = torchvision.models.mobilenet_v2().features + torch_model.eval() + example_input = torch.rand(1, 3, 256, 256) + traced_model = torch.jit.trace(torch_model, example_input) + input_shapes = ct.EnumeratedShapes(shapes=[(1, 3, 256, 256), (1, 3, 224, 224)]) + image_input = ct.ImageType(shape=input_shapes, + bias=[-1, -1, -1], scale=1 / 127) + model = ct.convert(traced_model, inputs=[image_input]) + assert model is not None + spec = model.get_spec() + assert len(spec.description.input[0].type.imageType.enumeratedSizes.sizes) == 2 + + @staticmethod + def test_torch_optional_input(): + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, embedding_size) + + def forward(self, x, y): + return self.embedding(x) + y + + model = TestModule() + model.eval() + + example_input = [ + torch.randint(high=num_tokens, size=(2,), dtype=torch.int64), + torch.rand(1), + ] + traced_model = torch.jit.trace(model, example_input) + + required_input = ct.TensorType( + name="required_input", shape=(ct.RangeDim(),), dtype=np.int64) + default_value = np.array([3]).astype(np.float32) + optional_input = ct.TensorType(name="optional_input", shape=(1,), + default_value=default_value) + + for compute_units in ct.ComputeUnit: + if compute_units == ct.ComputeUnit.CPU_AND_NE and ct.utils._macos_version() < (13, 0): + continue + + mlmodel = ct.convert( + traced_model, + inputs=[required_input, optional_input], + compute_units=compute_units, + ) + + assert(mlmodel.compute_unit == compute_units) + + if ct.utils._is_macos(): + result = mlmodel.predict( + {"required_input": + example_input[0].detach().numpy().astype(np.float32)} + ) + + # Verify outputs + torch_default_value = torch.tensor([3]) + expected = model(example_input[0].detach(), torch_default_value) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.detach().numpy()) + + +@pytest.fixture +def int32_input_model(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 5 + example_input = torch.randint(0, 100, (10, 20), dtype=torch.int32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def int64_input_model(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 5 + example_input = torch.randint(0, 100, (10, 20), dtype=torch.int64) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def float32_input_model_add_op(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 5.5 + example_input = torch.randint(0, 100, (10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def float32_input_model_relu_ops(): + class Model(torch.nn.Module): + def forward(self, x): + x = torch.nn.ReLU()(x) + return torch.nn.ReLU()(x) + example_input = torch.randint(0, 100, (10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def float32_two_input_model(): + class Model(torch.nn.Module): + def forward(self, x, y): + return x + y + example_input = torch.randint(0, 100, (10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), [example_input, example_input]) + +@pytest.fixture +def float32_two_output_model(): + class Model(torch.nn.Module): + def forward(self, x): + y = torch.nn.ReLU()(x) + out1 = torch.nn.ReLU()(y) + out2 = torch.nn.ReLU6()(x) + return out1, out2 + example_input = torch.randint(0, 100, (10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def rank3_input_model(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 5.5 + example_input = torch.randint(0, 100, (1, 10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def rank4_input_model(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 5.5 + example_input = torch.randint(0, 100, (1, 3, 10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def rank4_grayscale_input_model(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 10 + example_input = torch.randint(0, 100, (1, 1, 10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def linear_model(): + # this model will test the fuse_linear_bias pass + class Model(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(10, 15, bias=False) + self.constant_tensor = torch.ones((15,), dtype=torch.float32) + + def forward(self, x): + x = self.linear(x) + x = x - self.constant_tensor + x = torch.nn.ReLU()(x) + return x + example_input = torch.randint(0, 10, (1, 10), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + + +@pytest.mark.skipif(ct.utils._macos_version() < (13, 0), reason='Tests are for deployment target ios16/macos13') +class TestInputOutputConversionAPI: + + def test_input_dtype_default(self, int32_input_model): + #if dtype is not provided it defaults to float32 + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_input_shape_missing_error(self, float32_input_model_add_op): + with pytest.raises(ValueError, + match="'shape' must be provided in the 'inputs' argument for pytorch conversion"): + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(dtype=np.int32)], + minimum_deployment_target=ct.target.macOS12) + + def test_unsupported_input_dtype_in_torch_model(self, int64_input_model): + # test that no error is raised when no dtype is provided by the user, + # and the Torch model's input dtype is not supported. + # In this case, it will be mapped to the default dtype which is float32 + mlmodel = ct.convert(int64_input_model, + inputs=[ct.TensorType(shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_input_dtype_user_provided(self, float32_input_model_add_op): + # test that provided dtype in the api is applied + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32)], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_invalid_input_dtype(self, int32_input_model): + with pytest.raises(TypeError, + match="is unsupported for inputs/outputs of the model" + ): + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.int16)], + minimum_deployment_target=ct.target.macOS12) + + with pytest.raises(TypeError, + match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13" + ): + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS12) + + def test_fp16_input_dtype(self, float32_input_model_add_op, float32_input_model_relu_ops, int32_input_model): + """ + Test that providing fp16 input dtype works with macOS13. + """ + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13 + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13 + ) + # Two consecutive relus are merged in the `merge_consecutive_relus` pass. + assert_ops_in_mil_program(mlmodel, expected_op_list=["relu", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_fp16_input_dtype_fp32_precision(self, float32_input_model_add_op, float32_input_model_relu_ops, + int32_input_model): + """ + Same test as test_fp16_input_dtype, but with Float32 precision + """ + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + """ + Although no FP16ComputePrecision is applied, the float16 input propagates through the network + """ + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + + def test_input_name_specified_by_user(self, float32_input_model_relu_ops, + float32_two_input_model): + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20), name="my_custom_input_name")], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="my_custom_input_name") + + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(shape=(10, 20), name="user_provided_name_1"), + ct.TensorType(shape=(10, 20), name="user_provided_name_2")], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="user_provided_name_1", index=0) + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="user_provided_name_2", index=1) + + def test_two_input_model(self, float32_two_input_model): + # test that error is raised if only 1 input is provided + with pytest.raises(ValueError): + ct.convert(float32_two_input_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32)], + minimum_deployment_target=ct.target.macOS12) + + + # test forcing 1st input to type int32 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32), + ct.TensorType(shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32", index=0) + assert_input_dtype(mlmodel, expected_type_str="fp32", index=1) + assert_output_dtype(mlmodel, expected_type_str="fp32") + + # test forcing both inputs to be int32 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32), + ct.TensorType(shape=(10, 20), dtype=np.int32), + ], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32", index=0) + assert_input_dtype(mlmodel, expected_type_str="int32", index=1) + assert_output_dtype(mlmodel, expected_type_str="int32") + + # test forcing both inputs to be float16 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16), + ct.TensorType(shape=(10, 20), dtype=np.float16), + ], + minimum_deployment_target=ct.target.macOS13) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16", index=0) + assert_input_dtype(mlmodel, expected_type_str="fp16", index=1) + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_output_name_specified_by_user(self, float32_input_model_relu_ops, float32_two_output_model): + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20), name="custom_input_name")], + outputs=[ct.TensorType(name="custom_output_name")], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="custom_input_name") + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="custom_output_name") + + mlmodel = ct.convert(float32_two_output_model, + inputs=[ct.TensorType(shape=(10, 20), name="custom_input_name")], + outputs=[ct.TensorType(name="custom_output1_name"), + ct.TensorType(name="custom_output2_name")], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="custom_input_name") + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="custom_output1_name", index=0) + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="custom_output2_name", index=1) + + def test_single_output_model(self, int32_input_model, float32_input_model_relu_ops): + # test output type: if not provided, it should be the default which is float32 + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp32") + assert_output_dtype(mlmodel, expected_type_str="fp32") + + # test that the output dtype provided by the user is applied during conversion + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.TensorType(dtype=np.int32)], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32") + assert_output_dtype(mlmodel, expected_type_str="int32") + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu", "cast", "cast"]) + + # test that an error is raised when shape is provided for the output + with pytest.raises(ValueError): + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.TensorType(dtype=np.float32, shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + + # test that output dtype of float16 is rejected when deployment target is low + with pytest.raises(TypeError, + match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13" + ): + ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS12, + ) + + # test that output type float16 is applied correctly + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_output_dtype(mlmodel, expected_type_str="fp16") + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu"]) + + # test that input and output types float16 are applied correctly + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16") + assert_ops_in_mil_program(mlmodel, expected_op_list=["relu"]) + verify_prediction(mlmodel) + + def test_multi_output_model(self, float32_two_output_model): + # check that error is raised when only 1 output provided + with pytest.raises(ValueError, match="Number of outputs provided, 1, " + "do not match the number of outputs detected in the model, 2"): + ct.convert(float32_two_output_model, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.TensorType()], + minimum_deployment_target=ct.target.macOS12) + + # set 1 output to float16 and the other to float32 + mlmodel = ct.convert(float32_two_output_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + outputs=[ct.TensorType(name="out1", dtype=np.float16), + ct.TensorType(name="out2", dtype=np.float32)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_cast_ops_count(mlmodel, expected_count=1) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="out1" ,index=0) + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="out2", index=1) + verify_prediction(mlmodel) + + def test_color_input(self, rank4_input_model, rank3_input_model): + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(shape=(1, 3, 10, 20), color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + with pytest.raises(ValueError, match="must have rank 4"): + mlmodel = ct.convert(rank3_input_model, + inputs=[ct.ImageType(shape=(1, 10, 20), color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS12, + ) + + def test_grayscale_input(self, rank4_input_model, rank3_input_model, rank4_grayscale_input_model): + with pytest.raises(ValueError, match="must have rank 4"): + ct.convert(rank3_input_model, + inputs=[ct.ImageType(shape=(1, 10, 20), color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + + # invalid shape + with pytest.raises(ValueError): + ct.convert(rank4_input_model, + inputs=[ct.ImageType(shape=(1, 3, 10, 20), color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"): + ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS12, + ) + + # test that grayscale_16 raises error when used with neural network + with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"): + ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + ) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16") + verify_prediction(mlmodel) + + def test_color_output(self, rank4_input_model, float32_input_model_add_op): + # check that an error is raised if the output shape is not of form (1, 3, H, W) + with pytest.raises(ValueError, match="must have rank 4. Instead it has rank 2"): + ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13) + + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(shape=(1, 3, 10, 20), + color_layout=ct.colorlayout.BGR)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + # check neural network conversion + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(shape=(1, 3, 10, 20), + color_layout=ct.colorlayout.RGB)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)], + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR) + verify_prediction(mlmodel) + + def test_grayscale_output(self, rank4_grayscale_input_model): + with pytest.raises(TypeError, match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"): + ct.convert(rank4_grayscale_input_model, + inputs=[ct.TensorType(shape=(1, 1, 10, 20))], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS12, + ) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + verify_prediction(mlmodel) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16") + verify_prediction(mlmodel) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16") + verify_prediction(mlmodel) + + def test_linear_model(self, linear_model): + # this will test the fuse_linear_bias pass, when the inputs are of type float16 + mlmodel = ct.convert(linear_model, + inputs=[ct.TensorType(shape=(1, 10), dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16") + assert_ops_in_mil_program(mlmodel, ["linear", "relu"]) + verify_prediction(mlmodel) + + + def test_classifier(self): + torch_model = torch.nn.ReLU().eval() + traced_model = torch.jit.trace(torch_model, torch.rand(3,)) + model = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=(3,), dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + classifier_config = ct.ClassifierConfig(['a', 'b', 'c']), + convert_to='mlprogram', + minimum_deployment_target=ct.target.macOS13, + ) + assert_input_dtype(model, expected_type_str="fp16") + assert_ops_in_mil_program(model, ["relu", "cast", "classify"]) + spec = model.get_spec() + input_name = spec.description.input[0].name + out_dict = model.predict({input_name : np.array([1.0, 2.0, 3.0])}) + assert 'classLabel' in out_dict + assert out_dict['classLabel'] == 'c' + assert len(spec.description.output) == 2 + assert "classLabel_probs" in out_dict + assert isinstance(out_dict["classLabel_probs"], dict) + + def test_prediction_with_fp16_io(self): + torch_model = torch.nn.Linear(30, 5).eval() + traced_model = torch.jit.trace(torch_model, torch.rand(1, 30)) + mlmodel = ct.convert(traced_model, + inputs=[ct.TensorType(name="input", shape=(1, 30), dtype=np.float32)], + outputs=[ct.TensorType(dtype=np.float32)], + minimum_deployment_target=ct.target.macOS13, + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + # test prediction + sample_input = np.random.rand(1, 30).astype(np.float32) * 10 + model_output = mlmodel.predict({"input": sample_input})[mlmodel._spec.description.output[0].name] + reference_output = traced_model(torch.from_numpy(sample_input)).detach().numpy() + np.testing.assert_allclose(reference_output, model_output, rtol=1e-2, atol=1e-2) + + +@pytest.mark.skipif(ct.utils._macos_version() < (13, 0), reason='Tests are for deployment target ios16/macos13') +class TestGrayscaleImagePredictions: + + def test_grayscale_input_image(self, rank4_grayscale_input_model): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(name="input_image", + shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.TensorType(name="output")], + minimum_deployment_target=ct.target.macOS13, + ) + sample_input = np.random.randint(low=0, high=246, size=(1, 1, 10, 20)) + img_input = Image.fromarray(sample_input[0, 0, :, :].astype(np.uint8), 'L') + model_output = mlmodel.predict({"input_image": img_input})['output'] + reference_output = rank4_grayscale_input_model(torch.from_numpy(sample_input.astype(np.float32))).detach().numpy() + np.testing.assert_allclose(reference_output, model_output, rtol=1e-2, atol=1e-2) + + def test_grayscale_fp16_input_image(self, rank4_grayscale_input_model): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(name="input_image", + shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + outputs=[ct.TensorType(name="output")], + minimum_deployment_target=ct.target.macOS13, + ) + + # incorrect way to do prediction + with pytest.raises(TypeError, + match="must be of type PIL.Image.Image with mode=='F'", + ): + sample_input = np.random.randint(low=0, high=246, size=(1, 1, 10, 20)) + img_input = Image.fromarray(sample_input[0, 0, :, :].astype(np.uint8), 'L') + mlmodel.predict({"input_image": img_input}) + + # correct way to do prediction + sample_input = np.random.rand(1, 1, 10, 20) # in between [0, 1] + img_input = Image.fromarray(sample_input[0, 0, :, :].astype(np.float32), 'F') + model_output = mlmodel.predict({"input_image": img_input})['output'] + reference_output = rank4_grayscale_input_model(torch.from_numpy(sample_input.astype(np.float32))).detach().numpy() + np.testing.assert_allclose(reference_output, model_output, rtol=1e-2, atol=1e-2) + + def test_grayscale_output_image(self, rank4_grayscale_input_model): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.TensorType(name="input", + shape=(1, 1, 10, 20))], + outputs=[ct.ImageType(name="output_image", + color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + sample_input = np.random.randint(low=0, high=200, size=(1, 1, 10, 20)).astype(np.float32) + model_output_pil_image = mlmodel.predict({"input": sample_input})['output_image'] + assert isinstance(model_output_pil_image, Image.Image) + assert model_output_pil_image.mode == "L" + model_output_as_numpy = np.array(model_output_pil_image) + reference_output = rank4_grayscale_input_model(torch.from_numpy(sample_input)).detach().numpy() + reference_output = np.squeeze(reference_output) + np.testing.assert_allclose(reference_output, model_output_as_numpy, rtol=1e-2, atol=1e-2) + + def test_grayscale_fp16_output_image(self, rank4_grayscale_input_model): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.TensorType(name="input", + shape=(1, 1, 10, 20))], + outputs=[ct.ImageType(name="output_image", + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + sample_input = np.random.randint(low=0, high=200, size=(1, 1, 10, 20)).astype(np.float32) + model_output_pil_image = mlmodel.predict({"input": sample_input})['output_image'] + assert isinstance(model_output_pil_image, Image.Image) + assert model_output_pil_image.mode == "F" + model_output_as_numpy = np.array(model_output_pil_image) + reference_output = rank4_grayscale_input_model(torch.from_numpy(sample_input)).detach().numpy() + reference_output = np.squeeze(reference_output) + np.testing.assert_allclose(reference_output, model_output_as_numpy, rtol=1e-2, atol=1e-2) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py new file mode 100644 index 00000000..d7416387 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py @@ -0,0 +1,8442 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import platform +from typing import List, Tuple +from unittest.mock import patch + +import numpy as np +import pytest +import torch.nn as nn +import torchvision + +import coremltools as ct +from coremltools import RangeDim, Shape, TensorType +from coremltools._deps import version_lt +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil.var import Var +from coremltools.converters.mil.testing_utils import einsum_equations, gen_input_shapes_einsum +from coremltools.models.utils import _macos_version, _python_version + +from .testing_utils import ModuleWrapper, TorchBaseTest, contains_op, generate_input_data + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + +torch = pytest.importorskip("torch") +torch.manual_seed(30) +np.random.seed(30) + +# Set of common shapes for testing. Not all layers support 1D, so these two +# set of shapes are kept separate +COMMON_SHAPES = [(1, 10), (1, 5, 6), (1, 3, 5, 6), (1, 3, 4, 5, 6)] +COMMON_SHAPES_ALL = [(1,)] + COMMON_SHAPES + + +class TestScriptedModels(TorchBaseTest): + + @staticmethod + def get_while_loop_model(): + class TestLayer(nn.Module): + def forward(self, x): + x = 0.5 * x + return x + + class TestNet(nn.Module): + input_size = (1,) + + def __init__(self): + super(TestNet, self).__init__() + layer = TestLayer() + self.layer = torch.jit.trace(layer, torch.rand(self.input_size)) + + def forward(self, x): + while x > 0.01: + x = self.layer(x) + return x + + return TestNet().eval() + + @staticmethod + def get_cond_model(): + class TestNet(nn.Module): + def forward(self, x): + if torch.squeeze(x) < 10.0: + return x * 10.0 + else: + return x * 2.0 + + return TestNet().eval() + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_while_loop(self, compute_unit, backend): + model = TestScriptedModels.get_while_loop_model() + self.run_compare_torch( + model.input_size, + model, + backend=backend, + compute_unit=compute_unit, + use_scripting=True + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_cond(self, compute_unit, backend): + torch_model = TestScriptedModels.get_cond_model() + + self.run_compare_torch( + torch.tensor([1.]), + torch_model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + use_scripting=True + ) + + self.run_compare_torch( + torch.tensor([11.]), + torch_model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + use_scripting=True + ) + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_for_loop(self, compute_unit, backend): + class TestLayer(nn.Module): + def forward(self, x): + x = 2.0 * x + return x + + class TestNet(nn.Module): + input_size = (64,) + + def __init__(self): + super(TestNet, self).__init__() + layer = TestLayer() + self.layer = torch.jit.trace(layer, torch.rand(self.input_size)) + + def forward(self, x): + for _ in range(7): + x = self.layer(x) + return x + + model = TestNet().eval() + + self.run_compare_torch( + model.input_size, + model, + backend=backend, + compute_unit=compute_unit, + use_scripting=True + ) + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_if(self, compute_unit, backend): + class TestLayer(nn.Module): + def forward(self, x): + x = torch.mean(x) + return x + + class TestNet(nn.Module): + input_size = (64,) + + def __init__(self): + super(TestNet, self).__init__() + layer = TestLayer() + self.layer = torch.jit.trace(layer, torch.rand(self.input_size)) + + def forward(self, x): + m = self.layer(x) + if m < 0: + scale = -2.0 + else: + scale = 2.0 + x = scale * x + return x + + model = TestNet().eval() + + self.run_compare_torch( + model.input_size, + model, + backend=backend, + compute_unit=compute_unit, + use_scripting=True + ) + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_linear(self, compute_unit, backend): + class Model(torch.nn.Module): + def __init__(self): + super(Model, self).__init__() + self.linear = torch.nn.Linear(2, 2) + + def forward(self, x): + return self.linear(x) + + model = Model().eval() + + self.run_compare_torch( + torch.tensor([[1.0, 2.0]]), + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + use_scripting=True, + ) + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_conv(self, compute_unit, backend): + pytest.xfail( + "rdar://88194776 ([Converter] coremltools is not working with scripted torch convolution model)" + ) + model = torch.nn.Conv2d( + in_channels=2, + out_channels=3, + kernel_size=1, + padding="same", + stride=1, + dilation=1, + groups=1, + bias=False, + ) + self.run_compare_torch( + (1, 2, 4, 5), + model, + backend=backend, + compute_unit=compute_unit, + use_scripting=True, + ) + + +class TestMean(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_with_flexible_shape(self, compute_unit, backend): + if backend[0] == "mlprogram" and _macos_version() < (13, 0): + pytest.xfail( + "Issue fixed in iOS16/macOS13: https://github.com/apple/coremltools/issues/1420" + ) + + class Model(nn.Module): + def forward(self, x): + return torch.mean(x, dim=(2, 3), keepdim=True) + + model = Model() + shape = (1, 3, 256, 256) + converter_input_type = [ + TensorType(shape=Shape(shape=[1, 3, RangeDim(), RangeDim()], default=shape)) + ] + + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + + @staticmethod + @pytest.mark.skipif( + ct.utils._macos_version() < (13, 0), reason="Bug fixed in macOS13/iOS16" + ) + def test_flexible_shape_with_default_value(): + # test for bug reported in https://github.com/apple/coremltools/issues/1420 + class Network(torch.nn.Module): + def forward(self, x): + return torch.mean(x, dim=(2, 3), keepdim=True) + + model = Network() + x = torch.rand(1, 3, 256, 256) + traced_model = torch.jit.trace(model, x) + input_x = ct.TensorType( + shape=(1, 3, ct.RangeDim(default=256), ct.RangeDim(default=256)), + name="input", + ) + cml = ct.convert( + traced_model, + inputs=[input_x], + outputs=[ct.TensorType(name="out")], + convert_to="mlprogram", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + input_dict = {"input": np.random.rand(1, 3, 112, 112)} + + if ct.utils._is_macos(): + out = cml.predict(input_dict)["out"] + assert out.shape == (1, 3, 1, 1) + + +class TestAffineGrid(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "x_shape_and_target_size", + "sampling_mode", + "padding_mode", + "align_corners", + ] + ), + itertools.product( + compute_units, + backends, + [ + # shape format: (Batch, Channel, Height, Width) + [(1, 1, 3, 3), (1, 1, 3, 3)], # no size change + [(2, 3, 5, 5), (2, 3, 3, 2)], # down-sampling + [(3, 1, 6, 6), (3, 1, 8, 8)], # up-sampling + ], + ["bilinear"], + ["zeros"], + [True], + ), + ) + def test( + self, + compute_unit, + backend, + x_shape_and_target_size, + sampling_mode, + padding_mode, + align_corners, + ): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + x_shape, target_size = x_shape_and_target_size + theta = torch.rand((x_shape[0], 2, 3)) + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.affine_grid = torch.nn.functional.affine_grid + self.grid_sample = torch.nn.functional.grid_sample + + def forward(self, x): + grid = self.affine_grid( + theta=theta, + size=target_size, + align_corners=align_corners, + ) + x = self.grid_sample( + x, + grid=grid, + mode=sampling_mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) + return x + + model = TestModule() + self.run_compare_torch( + x_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestGridSample(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, data_grid_shapes, mode, padding_mode, align_corners", + itertools.product( + compute_units, + backends, + [ + # Input shape format: (Batch, C, Hin, Win) + # Grid shape format: (Batch, Hout, Wout, 2) + [(1, 1, 3, 3), (1, 3, 3, 2)], # no size change + [(2, 3, 5, 5), (2, 3, 3, 2)], # down-sampling + [(3, 1, 6, 6), (3, 8, 8, 2)], # up-sampling + ], + ["bilinear", "nearest"], + ["zeros", "border", "reflection"], + [True, False], + ), + ) + def test( + self, + compute_unit, + backend, + data_grid_shapes, + mode, + padding_mode, + align_corners, + ): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + params = { + "mode": mode, + "padding_mode": padding_mode, + "align_corners": align_corners, + } + model = ModuleWrapper(function=torch.nn.functional.grid_sample, kwargs=params) + self.run_compare_torch( + data_grid_shapes, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestFrac(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + ), + ) + def test_frac(self, compute_unit, backend, shape): + model = ModuleWrapper(function=torch.frac) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + rand_range=(-10.0, 10.0), + ) + + +class TestNLLLoss(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, reduction", + itertools.product( + compute_units, + backends, + ["none", "sum", "mean"], + ), + ) + def test_nllloss( + self, + compute_unit, + backend, + reduction, + ): + class NLLLossModel(nn.Module): + def __init__(self): + super(NLLLossModel, self).__init__() + self.loss = nn.NLLLoss(reduction=reduction) + + def forward(self, x, target): + loss = self.loss(x, target) + return loss + + x = torch.randn(3, 5) + target = torch.tensor([1, 0, 4]) + inputs = (x, target) + + model = NLLLossModel() + expected_results = model(*inputs) + + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestArgSort(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, axis, descending", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-1, 0], + [True, False], + ), + ) + def test_argsort(self, compute_unit, backend, shape, axis, descending): + model = ModuleWrapper( + function=torch.argsort, kwargs={"dim": axis, "descending": descending} + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestSort(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, axis, descending", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-1, 0], + [True, False], + ), + ) + def test_sort(self, compute_unit, backend, shape, axis, descending): + model = ModuleWrapper( + function=torch.sort, kwargs={"dim": axis, "descending": descending} + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestSelu(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, inplace", + itertools.product( + compute_units, + backends, + [True, False], + ), + ) + def test_selu(self, compute_unit, backend, inplace): + x = torch.tensor([-6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0]) + model = torch.nn.SELU(inplace=inplace) + TorchBaseTest.run_compare_torch( + x, + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestMv(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, matrix_shape", + itertools.product(compute_units, backends, [(2, 3), (10, 12), (10, 1), (1, 5)]), + ) + def test_mv(self, compute_unit, backend, matrix_shape): + model = ModuleWrapper(function=torch.mv) + + matrix = generate_input_data(matrix_shape) + vector_length = matrix_shape[-1] + vector = generate_input_data((vector_length,)) + + TorchBaseTest.run_compare_torch( + (matrix, vector), + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +@pytest.mark.skip( + reason="rdar://100332029 ([PyTorch] cos_similarity unittest is failing stochastically)" +) +class TestCosineSimilarity(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, dim, eps, shape", + itertools.product( + compute_units, + backends, + [0, -1], + [0.1, 1e-5, 1e-8], + COMMON_SHAPES, + ), + ) + def test_cosine_similarity(self, compute_unit, backend, dim, eps, shape): + class CosineSimilarity(nn.Module): + def __init__(self, dim, eps): + super(CosineSimilarity, self).__init__() + self.cossim = torch.nn.CosineSimilarity(dim=dim, eps=eps) + + def forward(self, x, y): + out = self.cossim(x, y) + return out + + model = CosineSimilarity(dim, eps) + input1 = generate_input_data(shape) + input2 = generate_input_data(shape) + + TorchBaseTest.run_compare_torch( + [input1, input2], + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestDot(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, vector_length", + itertools.product(compute_units, backends, [1, 5, 11]), + ) + def test_dot(self, compute_unit, backend, vector_length): + model = ModuleWrapper(function=torch.dot) + + vector1 = generate_input_data((vector_length,)) + vector2 = generate_input_data((vector_length,)) + + TorchBaseTest.run_compare_torch( + (vector1, vector2), + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestOuter(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x_vector_length, y_vector_length", + itertools.product( + compute_units, + backends, + [1, 5], + [1, 3], + ), + ) + def test_outer(self, compute_unit, backend, x_vector_length, y_vector_length): + model = ModuleWrapper(function=torch.outer) + + vector1 = generate_input_data((x_vector_length,)) + vector2 = generate_input_data((y_vector_length,)) + + TorchBaseTest.run_compare_torch( + (vector1, vector2), + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestCross(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape_dim", + itertools.product(compute_units, backends, [((3,), 0), ((4, 3, 2), 1)]), + ) + def test_cross(self, compute_unit, backend, shape_dim): + shape = shape_dim[0] + dim = shape_dim[1] + + class CrossModel(nn.Module): + def forward(self, x, y): + return torch.cross(x, y, dim) + + x = generate_input_data(shape) + y = generate_input_data(shape) + model = CrossModel().eval() + torch_out = model(x, y) + self.run_compare_torch( + (x, y), + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestNormalize(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + ), + ) + def test_normalize(self, compute_unit, backend, shape): + model = ModuleWrapper(function=nn.functional.normalize) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestNorms(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, keepdim", + itertools.product(compute_units, backends, COMMON_SHAPES, [True, False]), + ) + def test_frobenius_norm(self, compute_unit, backend, shape, keepdim): + num_dims = len(shape) + for dim in range(-num_dims, num_dims): + model = ModuleWrapper( + function=torch.norm, kwargs={"keepdim": keepdim, "dim": dim} + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, p, keepdim", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-1, 0, 1, 2, 3, np.inf, -np.inf], + [True, False], + ), + ) + def test_number_norm(self, compute_unit, backend, shape, p, keepdim): + for dim in (-1, 0, 1): + model = ModuleWrapper( + function=torch.norm, kwargs={"p": p, "keepdim": keepdim, "dim": dim} + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + atol=1e-2, + ) + + +class TestWeightNorm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, in_out_features", + itertools.product( + compute_units, + backends, + [(1, 1), (2, 10), (20, 10)], + ), + ) + def test_linear(self, compute_unit, backend, in_out_features): + in_features, out_features = in_out_features + + for dim in (None, -2, -1, 0, 1): + model = nn.utils.weight_norm(nn.Linear(in_features, out_features), dim=dim) + TorchBaseTest.run_compare_torch( + (in_features,), + model, + backend=backend, + compute_unit=compute_unit, + atol=1e-3, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_conv2d(self, compute_unit, backend): + x = torch.randn(20, 16, 50, 100) + + for dim in (None,) + tuple(range(-4, 4)): + model = nn.utils.weight_norm(nn.Conv2d(16, 33, 3), dim=dim) + TorchBaseTest.run_compare_torch( + x, + model, + input_as_shape=False, + atol=1e-3, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_conv3d(self, compute_unit, backend): + x = torch.randn(20, 16, 5, 50, 100) + + for dim in (None,) + tuple(range(-5, 5)): + model = nn.utils.weight_norm(nn.Conv3d(16, 33, 3), dim=dim) + TorchBaseTest.run_compare_torch( + x, + model, + input_as_shape=False, + atol=1e-3, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestLinAlgNorms(TorchBaseTest): + def _is_valid_config(self, shape, order, dim): + if isinstance(dim, tuple): + if isinstance(order, int) and (order == 0 or order > 2): + return False + elif isinstance(dim, int): + if order == "fro": + return False + elif dim is None: + if order is not None: + if len(shape) > 2: + return False + elif ( + len(shape) == 2 + and not isinstance(order, str) + and (order == 0 or order > 2) + ): + return False + elif len(shape) == 1 and isinstance(order, str): + return False + return True + + @pytest.mark.parametrize( + "compute_unit, backend, shape, order, keepdim, dim", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-2, -1, 0, 1, 2, 3, np.inf, -np.inf, "fro", None], + [True, False], + [-1, 0, 1, (0, 1), (0, -1), None], + ), + ) + def test_norm(self, compute_unit, backend, shape, order, keepdim, dim): + if not self._is_valid_config(shape, order, dim): + pytest.skip() + if ( + isinstance(order, int) + and abs(order) == 2 + and ((dim is None and len(shape) == 2) or isinstance(dim, tuple)) + ): + pytest.xfail("Matrix norm for order 2 and -2 is not implemented") + model = ModuleWrapper( + function=torch.linalg.norm, + kwargs={"ord": order, "keepdim": keepdim, "dim": dim}, + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + atol=1e-2, + ) + + +class TestLinAlgMatrixNorms(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, order, keepdim, dim", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-2, -1, 1, 2, np.inf, -np.inf, "fro", "nuc"], + [True, False], + [(0, 1), (0, -1), (1, 2), (0, 2), (2, 3)], + ), + ) + def test_norm(self, compute_unit, backend, shape, order, keepdim, dim): + if dim[-1] > len(shape) - 1: + pytest.skip() + if order == "nuc" or (type(order) != str and abs(order) == 2): + pytest.xfail("Matrix norm for order 2, -2 and nuc is not implemented") + model = ModuleWrapper( + function=torch.linalg.matrix_norm, + kwargs={"ord": order, "keepdim": keepdim, "dim": dim}, + ) + TorchBaseTest.run_compare_torch( + shape, model, backend=backend, compute_unit=compute_unit, atol=1e-2 + ) + + +class TestLinAlgVectorNorms(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, order, keepdim, dim", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-2, -1, 0, 1, 2, np.inf, -np.inf], + [True, False], + [-1, 0, 1, (0, 1), (0, -1), None], + ), + ) + def test_norm(self, compute_unit, backend, shape, order, keepdim, dim): + model = ModuleWrapper( + function=torch.linalg.vector_norm, + kwargs={"ord": order, "keepdim": keepdim, "dim": dim}, + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + atol=1e-2, + ) + + +class TestHardswish(TorchBaseTest): + class HardswishModel(nn.Module): + def __init__(self, inplace=False): + super(TestHardswish.HardswishModel, self).__init__() + self.activation = nn.Hardswish(inplace=inplace) + + def forward(self, x): + return self.activation(x) + + def test_longer_range_input_element_values(self): + x = torch.tensor([-6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0]) + + model = TestHardswish.HardswishModel() + TorchBaseTest.run_compare_torch(x, model, input_as_shape=False) + + model = TestHardswish.HardswishModel(inplace=True) + TorchBaseTest.run_compare_torch(x, model, input_as_shape=False) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + ), + ) + def test_additional_shapes_and_backends(self, compute_unit, backend, shape): + model = TestHardswish.HardswishModel() + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestBatchNorm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, num_features, eps, affine", + itertools.product( + compute_units, backends, [5, 3, 1], [0.1, 1e-05], [True, False] + ), + ) + def test_batchnorm(self, compute_unit, backend, num_features, eps, affine): + model = nn.BatchNorm2d(num_features, eps, affine=affine) + self.run_compare_torch( + (6, num_features, 5, 5), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, affine", + itertools.product(compute_units, backends, [True, False]), + ) + def test_batchnorm_2d_with_conv(self, compute_unit, backend, affine): + class CRNNBase(nn.Module): + def __init__(self, ch_in, ch_out, kernel_size=3): + super(CRNNBase, self).__init__() + self.conv = nn.Conv2d(ch_in, ch_out, kernel_size=kernel_size) + self.norm = nn.BatchNorm2d(ch_out, affine=affine) + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + model = CRNNBase(ch_in=6, ch_out=16) + self.run_compare_torch( + (1, 6, 15, 30), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, num_features, eps, affine, dynamic_input", + itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + [5, 1], + [0.1, 1e-05], + [True, False], + ["None", "Batch", "Height", "Width", "Depth", "All"], + ), + ) + def test_batchnorm_3d( + self, compute_unit, backend, num_features, eps, affine, dynamic_input + ): + model = nn.BatchNorm3d(num_features, eps, affine=affine) + input_shape = (6, num_features, 2, 3, 4) + if dynamic_input == "None": + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + else: + if dynamic_input == "Batch": + converter_input_type = [ + TensorType(shape=(6, num_features, 2, 3, 4), dtype=np.float32) + ] + converter_input_type = [ + TensorType( + shape=(RangeDim(1, 10), num_features, 2, 3, 4), dtype=np.float32 + ) + ] + elif dynamic_input == "Height": + converter_input_type = [ + TensorType( + shape=(6, num_features, RangeDim(1, 10), 3, 4), dtype=np.float32 + ) + ] + elif dynamic_input == "Width": + converter_input_type = [ + TensorType( + shape=(6, num_features, 2, RangeDim(1, 10), 4), dtype=np.float32 + ) + ] + elif dynamic_input == "Depth": + converter_input_type = [ + TensorType( + shape=(6, num_features, 2, 3, RangeDim(1, 10)), dtype=np.float32 + ) + ] + elif dynamic_input == "All": + converter_input_type = [ + TensorType( + shape=( + RangeDim(1, 10), + num_features, + RangeDim(1, 10), + RangeDim(1, 10), + RangeDim(1, 10), + ), + dtype=np.float32, + ) + ] + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, num_features, eps, training", + itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + [3, 4, 5], + [5, 1], + [0.1, 1e-05], + [True, False], + ), + ) + def test_batchnorm_dynamic( + self, compute_unit, backend, rank, num_features, eps, training + ): + model = ModuleWrapper( + nn.functional.batch_norm, + { + "training": training, + "eps": eps, + }, + ) + input_shape = [6, num_features, 3, 4, 5] + input_shape = input_shape[:rank] + _input = torch.randn(*input_shape) + _mean = torch.randn(num_features) + _var = torch.randn(num_features) + + inputs = (_input, _mean, _var) + expected_results = model(*inputs) + + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, has_weight, has_bias, has_running_mean, has_running_var", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + [True, False], + [True, False], + ), + ) + def test_batchnorm_dynamic_stress( + self, + compute_unit, + backend, + has_weight, + has_bias, + has_running_mean, + has_running_var, + ): + num_features = 5 + input_shape = (3, num_features, 2) + + weight = torch.randn(num_features) if has_weight else None + bias = torch.randn(num_features) if has_bias else None + running_mean = torch.randn(num_features) if has_running_mean else None + running_var = torch.randn(num_features) if has_running_var else None + + class Model(torch.nn.Module): + def forward(self, x): + res = torch.nn.functional.batch_norm( + input=x, + running_mean=running_mean, + running_var=running_var, + weight=weight, + bias=bias, + training=True, + momentum=0.0, + eps=1e-05, + ) + return res + + self.run_compare_torch( + input_shape, + Model(), + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, affine", + itertools.product(compute_units, backends, [True, False]), + ) + def test_batchnorm_1d_with_conv(self, compute_unit, backend, affine): + class CRNNBase(nn.Module): + def __init__(self, ch_in, ch_out, kernel_size=3): + super(CRNNBase, self).__init__() + self.conv = nn.Conv1d(ch_in, ch_out, kernel_size=kernel_size) + self.norm = nn.BatchNorm1d(ch_out, affine=affine) + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + model = CRNNBase(ch_in=6, ch_out=16) + self.run_compare_torch( + (1, 6, 15), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, eps, affine", + itertools.product( + compute_units, + backends, + [(1, 10), (4, 6), (10, 1)], + [0.1, 1e-05], + [True, False], + ), + ) + def test_batchnorm1d_rank2(self, compute_unit, backend, shape, eps, affine): + N, C = shape + batchnorm = nn.BatchNorm1d(C, eps=eps, affine=affine).eval() + self.run_compare_torch( + (N, C), + batchnorm, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, eps, affine", + itertools.product( + compute_units, + backends, + [(4, 8, 2), (1, 5, 3), (5, 10, 1), (6, 1, 4)], + [0.1, 1e-05], + [True, False], + ), + ) + def test_batchnorm1d_rank3(self, compute_unit, backend, shape, eps, affine): + N, C, L = shape + batchnorm = nn.BatchNorm1d(C, eps=eps, affine=affine).eval() + self.run_compare_torch( + (N, C, L), + batchnorm, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestInstanceNorm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, num_features, eps", + itertools.product(compute_units, backends, [5, 2, 1], [0.1, 1e-05]), + ) + def test_instancenorm(self, compute_unit, backend, num_features, eps): + model = nn.InstanceNorm2d(num_features, eps) + self.run_compare_torch( + (6, num_features, 5, 5), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, num_features", + itertools.product(compute_units, backends, [5, 2, 1]), + ) + def test_instancenorm_1d(self, compute_unit, backend, num_features): + model = nn.InstanceNorm1d(num_features) + self.run_compare_torch( + (6, num_features, 10), + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestGroupNorm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, group_features, eps, affine", + itertools.product( + compute_units, backends, [(16, 32), (1, 1)], [0.1, 1e-05], [True, False] + ), + ) + def test_groupnorm(self, compute_unit, backend, group_features, eps, affine): + model = nn.GroupNorm( + group_features[0], group_features[1], eps=eps, affine=affine + ) + self.run_compare_torch( + (6, group_features[1], 5, 5), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, group_features, eps, affine", + itertools.product( + compute_units, backends, [(16, 32), (1, 1)], [0.1, 1e-05], [True, False] + ), + ) + def test_groupnorm_rank3_input( + self, compute_unit, backend, group_features, eps, affine + ): + model = nn.GroupNorm( + group_features[0], group_features[1], eps=eps, affine=affine + ) + self.run_compare_torch( + (6, group_features[1], 5), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, group_features, eps, affine", + itertools.product( + compute_units, backends, [(16, 32), (1, 1)], [0.1, 1e-05], [True, False] + ), + ) + def test_groupnorm_rank2_input( + self, compute_unit, backend, group_features, eps, affine + ): + model = nn.GroupNorm( + group_features[0], group_features[1], eps=eps, affine=affine + ) + self.run_compare_torch( + (4, group_features[1]), + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestLinear(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, in_features, out_features, bias", + itertools.product( + compute_units, + backends, + [5], + [10], + [True, False], + ), + ) + def test_linear_rank1_input( + self, compute_unit, backend, in_features, out_features, bias + ): + model = nn.Linear(in_features, out_features, bias=bias) + self.run_compare_torch( + (in_features,), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, in_features, out_features, bias", + itertools.product(compute_units, backends, [10, 25], [3, 6], [True, False]), + ) + def test_linear_rank2_input( + self, compute_unit, backend, in_features, out_features, bias + ): + model = nn.Linear(in_features, out_features, bias=bias) + self.run_compare_torch( + (1, in_features), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, in_features, out_features, bias", + itertools.product(compute_units, backends, [10], [6], [True, False]), + ) + def test_linear_rank3_input( + self, compute_unit, backend, in_features, out_features, bias + ): + model = nn.Linear(in_features, out_features, bias=bias) + self.run_compare_torch( + (1, 3, in_features), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, in_features, out_features, bias", + itertools.product(compute_units, backends, [10], [6], [True, False]), + ) + def test_linear_rank4_input( + self, compute_unit, backend, in_features, out_features, bias + ): + model = nn.Linear(in_features, out_features, bias=bias) + self.run_compare_torch((1, 5, 3, in_features), model, backend=backend) + + +class TestConv(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "stride", + "length", + "in_channels", + "out_channels", + "kernel_size", + "dilation", + "bias", + ] + ), + [ + (compute_unit, backend, padding, stride, *param) + for compute_unit, backend, padding, stride, param in itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + ["same", "valid", 0, 1], + [1, 2, 3], + [ + (5, 1, 1, 1, 1, True), + (3, 1, 1, 1, 3, False), + (4, 3, 3, 2, 1, True), + (7, 3, 3, 1, 1, False), + (5, 3, 3, 1, 1, True), + (3, 3, 3, 1, 1, False), + (3, 3, 3, 1, 3, True), + (7, 3, 3, 2, 3, False), + ], + ) + ], + ) + def test_convolution1d( + self, + compute_unit, + backend, + padding, + stride, + length, + in_channels, + out_channels, + kernel_size, + dilation, + bias, + groups=1, + ): + if padding == "same" and stride != 1: + # configuration not supported + return + model = nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias, + ) + self.run_compare_torch( + (1, in_channels, length), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "stride", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "dilation", + "bias", + ] + ), + [ + (compute_unit, backend, padding, stride, *param) + for compute_unit, backend, padding, stride, param in itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + ["same", "valid", 1, 0], + [1, 2, 3], + [ + (5, 3, 1, 1, 1, 1, True), + (3, 3, 1, 1, 1, 3, False), + (4, 3, 3, 3, 2, 1, True), + (7, 3, 3, 3, 1, 1, False), + (5, 5, 3, 3, 1, 1, True), + (3, 5, 3, 3, 1, 1, False), + (3, 5, 3, 3, 1, 3, True), + (7, 5, 3, 3, 2, 3, False), + ], + ) + ], + ) + def test_convolution2d( + self, + compute_unit, + backend, + padding, + stride, + height, + width, + in_channels, + out_channels, + kernel_size, + dilation, + bias, + groups=1, + ): + if padding == "same" and stride != 1: + return + model = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias, + ) + self.run_compare_torch( + (1, in_channels, height, width), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "stride", + "depth", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "dilation", + "bias", + ] + ), + [ + (compute_unit, backend, padding, stride, *param) + for compute_unit, backend, padding, stride, param in itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + ["same", "valid", 1, 0], + [1, 2, 3], + [ + (5, 3, 2, 1, 1, 1, 1, True), + (3, 3, 1, 1, 1, 1, 3, False), + (4, 3, 3, 3, 3, 2, 1, True), + (7, 3, 4, 3, 3, 1, 1, False), + (5, 5, 3, 3, 3, 1, 1, True), + (3, 5, 1, 3, 3, 1, 1, False), + (3, 5, 4, 3, 3, 1, 3, True), + (7, 5, 6, 3, 3, 2, 3, False), + ], + ) + ], + ) + def test_convolution3d( + self, + compute_unit, + backend, + padding, + stride, + depth, + height, + width, + in_channels, + out_channels, + kernel_size, + dilation, + bias, + groups=1, + ): + if padding == "same" and stride != 1: + return + model = nn.Conv3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias, + ) + self.run_compare_torch( + (1, in_channels, depth, height, width), + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestDynamicConv(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (5, 1, 1, 1, 2, 1), + (3, 1, 1, 1, 2, 3), + (4, 3, 3, 1, 2, 1), + (7, 3, 3, 1, 3, 1), + (5, 3, 3, 2, 2, 1), + (3, 3, 3, 1, 3, 1), + (3, 3, 3, 1, 3, 3), + (7, 3, 3, 3, 1, 3), + ], + ) + ], + ) + def test_convolution1d( + self, + compute_unit, + backend, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1, + ): + class DynamicConv(nn.Module): + def forward(self, input_data, weights): + return nn.functional.conv1d( + input_data, weights, stride=stride, padding=padding + ) + + model = DynamicConv() + input_shape = [ + (1, in_channels, width), + (out_channels, int(in_channels / groups), kernel_size), + ] + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + "dilation", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (5, 3, 1, 1, 1, 2, 0, 1), + (3, 3, 1, 1, 1, 2, 1, 3), + (4, 3, 3, 3, 1, 2, 0, 1), + (7, 3, 3, 3, 1, 3, 0, 1), + (5, 5, 3, 3, 2, 1, 0, 1), + (3, 5, 3, 3, 1, 3, 0, 1), + (3, 5, 3, 3, 1, 3, 1, 3), + (7, 5, 3, 3, 2, 3, 1, 3), + ], + ) + ], + ) + def test_convolution2d( + self, + compute_unit, + backend, + height, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + groups=1, + ): + class DynamicConv(nn.Module): + def forward(self, input_data, weights): + return nn.functional.conv2d( + input_data, weights, stride=stride, padding=padding + ) + + model = DynamicConv() + + input_shape = [ + (1, in_channels, height, width), + (out_channels, int(in_channels / groups), kernel_size, kernel_size), + ] + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestConvTranspose(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + "dilation", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (3, 1, 1, 1, 2, 0, 1), + (3, 1, 1, 1, 2, 1, 3), + (3, 3, 3, 1, 2, 0, 1), + (3, 3, 3, 1, 3, 0, 1), + (5, 3, 3, 1, 3, 0, 1), + (5, 3, 3, 1, 3, 0, 1), + (5, 3, 3, 1, 3, 1, 3), + (5, 3, 3, 1, 3, 1, 3), + ], + ) + ], + ) + def test_convolution_transpose1d( + self, + compute_unit, + backend, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + groups=1, + ): + model = nn.ConvTranspose1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + self.run_compare_torch( + (1, in_channels, width), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + "dilation", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (5, 5, 1, 1, 1, 2, 0, 1), + (5, 5, 1, 1, 1, 2, 1, 3), + (5, 5, 3, 3, 1, 2, 0, 1), + (5, 5, 3, 3, 1, 3, 0, 1), + (6, 5, 3, 3, 1, 3, 0, 1), + (6, 5, 3, 3, 1, 3, 0, 1), + (6, 5, 3, 3, 1, 3, 1, 3), + (6, 5, 3, 3, 1, 3, 1, 3), + ], + ) + ], + ) + def test_convolution_transpose2d( + self, + compute_unit, + backend, + height, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + groups=1, + ): + model = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ) + self.run_compare_torch( + (1, in_channels, height, width), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, dynamic_input", + itertools.product( + compute_units, + backends, + [True, False], + ), + ) + def test_convolution_transpose2d_dynamic_input( + self, + compute_unit, + backend, + dynamic_input, + ): + in_channels = 5 + model = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=10, + kernel_size=3, + stride=2, + padding=1, + dilation=3, + ) + in_height = 256 + in_width = 512 + input_shape = (1, in_channels, in_height, in_width) + + if dynamic_input: + converter_input_type = [ + TensorType( + shape=(1, in_channels, RangeDim(256, -1), RangeDim(256, -1)), + dtype=np.float32, + ) + ] + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + else: + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + "dilation", + "output_padding", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (5, 5, 1, 1, 1, 2, 1, 1, 1), + (5, 5, 1, 1, 1, 2, 2, 3, 2), + (5, 5, 3, 3, 1, 2, 0, 1, 0), + (5, 5, 3, 3, 1, 3, 1, 1, 1), + (6, 5, 3, 3, 1, 3, 2, 1, 2), + (6, 5, 3, 3, 1, 3, 1, 1, 1), + (6, 5, 3, 3, 1, 3, 2, 3, 2), + (6, 5, 3, 3, 1, 3, 3, 3, 3), + ], + ) + ], + ) + def test_convolution_transpose2d_output_padding( + self, + compute_unit, + backend, + height, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + output_padding, + groups=1, + ): + + # Output padding must be less than either stride or dilation + # Skip testing invalid combinations + if isinstance(output_padding, int): + if output_padding >= stride and output_padding >= dilation: + return + elif isinstance(output_padding, tuple): + for _output_padding in output_padding: + if _output_padding >= stride and _output_padding >= dilation: + return + + model = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + output_padding=output_padding, + ) + self.run_compare_torch((1, in_channels, height, width), model, backend=backend) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "depth", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + "dilation", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (3, 5, 5, 1, 1, 1, 2, 0, 1), + (3, 5, 5, 1, 1, 1, 2, 1, 3), + (3, 5, 5, 3, 3, 1, 2, 0, 1), + (3, 5, 5, 3, 3, 1, 1, 0, 2), + (4, 6, 5, 3, 3, 1, 3, 0, 1), + (4, 6, 5, 3, 3, 1, 3, 1, 2), + (4, 6, 5, 3, 3, 1, 3, 1, 3), + ], + ) + ] + + [ + pytest.param( + ct.ComputeUnit.CPU_ONLY, + "neualnetwork", + 5, + 5, + 1, + 1, + 3, + 4, + 1, + 1, + 2, + marks=pytest.mark.xfail, + ), + pytest.param( + ct.ComputeUnit.CPU_ONLY, + "neualnetwork", + 5, + 5, + 1, + 1, + 3, + 2, + 1, + 3, + 2, + marks=pytest.mark.xfail, + ), + ], + ) + def test_convolution_transpose3d( + self, + compute_unit, + backend, + depth, + height, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + ): + model = nn.ConvTranspose3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ) + self.run_compare_torch( + (1, in_channels, depth, height, width), + model, + backend=backend, + compute_unit=compute_unit, + ) + + +def _is_float_value(x, threshold=0.001): + return x - np.floor(x) > threshold + + +class TestUpsample(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, output_size, align_corners", + itertools.product( + compute_units, + backends, + [1, 3, 10, 190], + [True, False], + ), + ) + def test_upsample_linear1d_with_output_size( + self, compute_unit, backend, output_size, align_corners + ): + input_shape = (1, 3, 10) + output_size = 3 + model = ModuleWrapper( + nn.functional.interpolate, + { + "size": output_size, + "mode": "linear", + "align_corners": align_corners, + }, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scale, align_corners, recompute_scale_factor", + itertools.product( + compute_units, backends, [2, 0.5, 5.3], [True, False], [True, False] + ), + ) + def test_upsample_linear1d_with_scales( + self, compute_unit, backend, scale, align_corners, recompute_scale_factor + ): + Height = 8 + input_shape = (1, 3, Height) + output_h = Height * scale + is_h_float = _is_float_value(output_h) + + if is_h_float and not align_corners and not recompute_scale_factor: + pytest.xfail("rdar://81124053 (Support recompute_scale_factor)") + + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": scale, + "mode": "linear", + "align_corners": align_corners, + "recompute_scale_factor": recompute_scale_factor, + }, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales, align_corners, recompute_scale_factor", + itertools.product( + compute_units, backends, [2, 0.7, 3.6], [True, False], [True, False] + ), + ) + def test_upsample_linear1d_with_scales_dynamic( + self, compute_unit, backend, scales, align_corners, recompute_scale_factor + ): + + is_float = _is_float_value(scales) + input_shape = (1, 3, 22) + + if is_float and not align_corners and not recompute_scale_factor: + pytest.xfail("rdar://81124053 (Support recompute_scale_factor)") + + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": scales, + "mode": "linear", + "align_corners": align_corners, + "recompute_scale_factor": recompute_scale_factor, + }, + ) + converter_input_type = [ + TensorType(shape=(1, 3, RangeDim(default=22)), dtype=np.float32) + ] + mlmodel = self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + )[1] + + # also check if the scale factor are integers + if backend[0] == "neuralnetwork" and not is_float: + for layer in mlmodel._spec.neuralNetwork.layers: + if layer.WhichOneof("layer") == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + + @pytest.mark.parametrize( + "compute_unit, backend, output_size", + itertools.product( + compute_units, + backends, + [10, 170], + ), + ) + def test_upsample_nearest1d_with_output_size( + self, compute_unit, backend, output_size + ): + input_shape = (1, 3, 10) + model = ModuleWrapper( + nn.functional.interpolate, + {"size": output_size, "mode": "nearest"}, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales", + itertools.product(compute_units, backends, [2, 3, 4.5]), + ) + def test_upsample_nearest1d_with_scales(self, compute_unit, backend, scales): + if backend[0] == "neuralnetwork": + if isinstance(scales, float): + return # Skip fractional scale factors tests for neuralnetwork + + input_shape = (1, 3, 10) + model = ModuleWrapper( + nn.functional.interpolate, + {"scale_factor": scales, "mode": "nearest"}, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales", + itertools.product(compute_units, backends, [2, 3]), + ) + def test_upsample_nearest1d_with_scales_dynamic( + self, compute_unit, backend, scales + ): + input_shape = (1, 3, 10) + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": scales, + "mode": "nearest", + "recompute_scale_factor": True, + }, + ) + converter_input_type = [TensorType(shape=(1, 3, RangeDim()), dtype=np.float32)] + mlmodel = self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + )[1] + + # also check if the scale factor are integers + if backend[0] == "neuralnetwork": + for layer in mlmodel._spec.neuralNetwork.layers: + if layer.WhichOneof("layer") == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + + @pytest.mark.parametrize( + "compute_unit, backend, output_size, align_corners", + itertools.product( + compute_units, + backends, + [ + (10, 10), + # PyTorch has a bug for the following parameter: + # (1, 1), + # See: https://github.com/pytorch/pytorch/issues/71188 + (2, 3), + (190, 170), + ], + [True, False], + ), + ) + def test_upsample_bilinear2d_with_output_size( + self, compute_unit, backend, output_size, align_corners + ): + input_shape = (1, 3, 10, 10) + model = ModuleWrapper( + nn.functional.interpolate, + { + "size": output_size, + "mode": "bilinear", + "align_corners": align_corners, + }, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales_h, scales_w, align_corners, recompute_scale_factor", + itertools.product( + compute_units, + backends, + [2, 0.5, 4.1], + [3, 0.5, 5.3], + [True, False], + [True, False], + ), + ) + def test_upsample_bilinear2d_with_scales( + self, + compute_unit, + backend, + scales_h, + scales_w, + align_corners, + recompute_scale_factor, + ): + + Height = 8 + Width = 22 + input_shape = (1, 3, Height, Width) + output_h = Height * scales_h + output_w = Width * scales_w + is_h_float = _is_float_value(output_h) + is_w_float = _is_float_value(output_w) + + if ( + (is_h_float or is_w_float) + and not align_corners + and not recompute_scale_factor + ): + pytest.xfail("rdar://81124053 (Support recompute_scale_factor)") + + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": (scales_h, scales_w), + "mode": "bilinear", + "align_corners": align_corners, + "recompute_scale_factor": recompute_scale_factor, + }, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, output_size", + itertools.product( + compute_units, + backends, + [(10, 10), (190, 170)], + ), + ) + def test_upsample_nearest2d_with_output_size( + self, compute_unit, backend, output_size + ): + input_shape = (1, 3, 10, 10) + model = ModuleWrapper( + nn.functional.interpolate, + {"size": output_size, "mode": "nearest"}, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales_h, scales_w", + itertools.product(compute_units, backends, [2, 3, 4.5], [4, 5, 5.5]), + ) + def test_upsample_nearest2d_with_scales( + self, compute_unit, backend, scales_h, scales_w + ): + if backend[0] == "neuralnetwork": + if isinstance(scales_h, float) or isinstance(scales_w, float): + return # Skip fractional scale factors tests for neuralnetwork + + input_shape = (1, 3, 10, 10) + model = ModuleWrapper( + nn.functional.interpolate, + {"scale_factor": (scales_h, scales_w), "mode": "nearest"}, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales_h, scales_w", + itertools.product(compute_units, backends, [2, 3], [4, 5]), + ) + def test_upsample_nearest2d_with_scales_dynamic( + self, compute_unit, backend, scales_h, scales_w + ): + input_shape = (1, 3, 10, 10) + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": (scales_h, scales_w), + "mode": "nearest", + "recompute_scale_factor": True, + }, + ) + converter_input_type = [ + TensorType(shape=(1, 3, RangeDim(), RangeDim()), dtype=np.float32) + ] + mlmodel = self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + )[1] + + # also check if the scale factor are integers + if backend[0] == "neuralnetwork": + for layer in mlmodel._spec.neuralNetwork.layers: + if layer.WhichOneof("layer") == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + + @pytest.mark.parametrize( + "compute_unit, backend, scales_h, scales_w, align_corners, recompute_scale_factor", + itertools.product( + compute_units, + backends, + [2, 3.6], + [4, 0.7], + [True, False], + [True, False], + ), + ) + def test_upsample_bilinear2d_with_scales_dynamic( + self, + compute_unit, + backend, + scales_h, + scales_w, + align_corners, + recompute_scale_factor, + ): + + is_h_float = _is_float_value(scales_h) + is_w_float = _is_float_value(scales_w) + input_shape = (1, 3, 9, 22) + + if ( + (is_h_float or is_w_float) + and not align_corners + and not recompute_scale_factor + ): + pytest.xfail("rdar://81124053 (Support recompute_scale_factor)") + + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": (scales_h, scales_w), + "mode": "bilinear", + "align_corners": align_corners, + "recompute_scale_factor": recompute_scale_factor, + }, + ) + converter_input_type = [ + TensorType( + shape=(1, 3, RangeDim(default=9), RangeDim(default=22)), + dtype=np.float32, + ) + ] + mlmodel = self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + )[1] + + # also check if the scale factor are integers + if backend[0] == "neuralnetwork" and not is_h_float and not is_w_float: + for layer in mlmodel._spec.neuralNetwork.layers: + if layer.WhichOneof("layer") == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + + +class TestEmpty(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + ), + ) + def test_empty_like(self, compute_unit, backend, shape): + class TestModel(nn.Module): + def forward(self, x): + y = torch.empty_like(x) + # Value of y is Nondeterministic, so return length + return torch.Tensor([len(y)]) + + self.run_compare_torch( + shape, TestModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + ), + ) + def test_new_empty(self, compute_unit, backend, shape): + class TestModel(nn.Module): + def forward(self, _): + tensor = torch.ones(()) + y = tensor.new_empty(shape) + # Value of y is Nondeterministic, so return length + return torch.Tensor([len(y)]) + + self.run_compare_torch( + shape, + TestModel(), + backend=backend, + compute_unit=compute_unit, + ) + + +class TestAvgPool(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_shape", + "kernel_size", + "stride", + "padding", + "ceil_mode", + "include_pad", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + ((1, 3, 5), 1, 1, 0, True, True), + ((1, 3, 5), 3, 1, 0, False, True), + ((1, 3, 5), 1, 2, 1, False, False), + ((1, 3, 5), 3, 2, 1, False, True), + ((1, 3, 5), 1, 2, 0, False, True), + ((1, 3, 10), 1, 1, 1, False, False), + ((1, 3, 10), 3, 1, 0, False, False), + ((1, 3, 10), 1, 2, 1, True, True), + ((1, 3, 10), 3, 2, 0, True, False), + ((1, 3, 10), 1, 1, 1, True, True), + ], + ) + ], + ) + def test_avg_pool1d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + include_pad, + ): + if padding > kernel_size / 2: + return + + model = nn.AvgPool1d( + kernel_size, + stride, + padding, + ceil_mode=ceil_mode, + count_include_pad=include_pad, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_shape", + "kernel_size", + "stride", + "padding", + "ceil_mode", + "include_pad", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + ((1, 3, 5, 5), 1, 1, 0, True, True), + ((1, 3, 5, 5), 3, 1, 0, False, True), + ((1, 3, 5, 5), 1, 2, 1, False, False), + ((1, 3, 5, 5), 3, 2, 1, False, True), + ((1, 3, 5, 5), 1, 2, 0, False, True), + ((1, 3, 10, 10), 1, 1, 1, False, False), + ((1, 3, 10, 10), 3, 1, 0, False, False), + ((1, 3, 10, 10), 1, 2, 1, True, True), + ((1, 3, 10, 10), 3, 2, 0, True, False), + ((1, 3, 10, 10), 1, 1, 1, True, True), + ], + ) + ], + ) + def test_avg_pool2d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + include_pad, + ): + if padding > kernel_size / 2: + return + + model = nn.AvgPool2d( + kernel_size, + stride, + padding, + ceil_mode=ceil_mode, + count_include_pad=include_pad, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_shape", + "kernel_size", + "stride", + "padding", + "ceil_mode", + "include_pad", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + ((1, 3, 11, 5, 5), 1, 1, 0, True, True), + ((1, 3, 11, 5, 5), 3, 1, 0, False, True), + ((1, 3, 11, 5, 5), 1, 2, 1, False, False), + ((1, 3, 11, 5, 5), 3, 2, 1, False, True), + ((1, 3, 11, 5, 5), 1, 2, 0, False, True), + ((1, 3, 6, 10, 10), 1, 1, 1, False, False), + ((1, 3, 6, 10, 10), 3, 1, 0, False, False), + ((1, 3, 6, 10, 10), 1, 2, 1, True, True), + ((1, 3, 6, 10, 10), 3, 2, 0, True, False), + ((1, 3, 6, 10, 10), 1, 1, 1, True, True), + ], + ) + ], + ) + def test_avg_pool3d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + include_pad, + ): + if padding > kernel_size / 2: + return + + if include_pad and ceil_mode and stride > 1: + # skip: MIL/CoreML does not support this configuration + pytest.xfail( + "rdar://73723194 (Support 3D Avg pooling with ceil_mode=True and include_pad = True, in MIL)" + ) + model = nn.AvgPool3d( + kernel_size, + stride, + padding, + ceil_mode=ceil_mode, + count_include_pad=include_pad, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestAdaptiveMaxPool(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, output_size, magnification, delta, depth, n", + itertools.product( + compute_units, + backends, + [(1, 1), (3, 2)], + [1, 2, 7], + [0, 11], + [1, 2, 3], + [1, 2], + ), + ) + def test_adaptive_max_pool2d( + self, compute_unit, backend, output_size, magnification, delta, depth, n + ): + # input_size = output_size * magnification + delta + input_size = ( + delta + magnification * output_size[0], + delta + magnification * output_size[1], + ) + in_shape = (n, depth) + input_size + model = nn.AdaptiveMaxPool2d(output_size) + self.run_compare_torch( + in_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestAdaptiveAvgPool(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, output_size, magnification, delta, depth, n", + itertools.product( + compute_units, + backends, + [(1, 1), (3, 2)], + [1, 2, 7], + [0, 11], + [1, 2, 3], + [1, 2], + ), + ) + def test_adaptive_avg_pool2d( + self, compute_unit, backend, output_size, magnification, delta, depth, n + ): + # input_size = output_size * magnification + delta + input_size = ( + delta + magnification * output_size[0], + delta + magnification * output_size[1], + ) + in_shape = (n, depth) + input_size + model = nn.AdaptiveAvgPool2d(output_size) + self.run_compare_torch( + in_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestMaxPool(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, kernel_size, stride, padding, ceil_mode", + itertools.product( + compute_units, + backends, + [(1, 3, 15), (1, 1, 7)], + [1, 3], + [1, 2], + [0, 1], + [True, False], + ), + ) + def test_max_pool1d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + ): + if padding > kernel_size / 2: + return + if ceil_mode > 0 and padding == 0 and kernel_size == 1 and stride == 2: + if input_shape[-1] % 2 == 0: + # TODO: is this a valid case? + # in this case, torch adds "-inf" values at the border, post max pool operation + return + + model = nn.MaxPool1d( + kernel_size, + stride, + padding, + dilation=1, + return_indices=False, + ceil_mode=ceil_mode, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, kernel_size, stride, padding, ceil_mode", + itertools.product( + compute_units, + backends, + [(1, 3, 15, 15), (1, 1, 7, 7)], + [1, 3], + [1, 2], + [0, 1], + [True, False], + ), + ) + def test_max_pool2d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + ): + if padding > kernel_size / 2: + return + if ceil_mode > 0 and padding == 0 and kernel_size == 1 and stride == 2: + for r in range(2, 4): + if input_shape[r] % 2 == 0: + # TODO: is this a valid case? + # in this case, torch adds "-inf" values at the border, post max pool operation + return + + model = nn.MaxPool2d( + kernel_size, + stride, + padding, + dilation=1, + return_indices=False, + ceil_mode=ceil_mode, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, kernel_size, stride, padding, ceil_mode", + itertools.product( + compute_units, + backends, + [(1, 3, 11, 3, 11), (1, 1, 7, 4, 7)], + [1, 3], + [1, 2], + [0, 1], + [True, False], + ), + ) + def test_max_pool3d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + ): + if padding > kernel_size / 2: + return + if ceil_mode > 0 and padding == 0 and kernel_size == 1 and stride == 2: + for r in range(2, 5): + if input_shape[r] % 2 == 0: + # TODO: is this a valid case? + # in this case, torch adds "-inf" values at the border, post max pool operation + return + + model = nn.MaxPool3d( + kernel_size, + stride, + padding, + dilation=1, + return_indices=False, + ceil_mode=ceil_mode, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestMaximumMinimum(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shapes, mode", + itertools.product( + compute_units, + backends, + [ + [(2, 5, 7, 3), (2, 5, 7, 3)], + [(3, 2, 9), (3, 2, 9)], + [(1, 2, 3), (1,)], + [(1,), (2, 5, 6, 7)], + [(1, 2, 1), (3, 4, 2, 5)], + ], + ["minimum", "maximum"], + ), + ) + def test_minimum_maximum(self, compute_unit, backend, input_shapes, mode): + class TestModel(torch.nn.Module): + def forward(self, x, y): + if mode == "minimum": + return torch.minimum(x, y) + elif mode == "maximum": + return torch.maximum(x, y) + else: + raise ValueError("Unsupported mode: {mode}".format(mode=mode)) + + model = TestModel() + self.run_compare_torch( + input_shapes, model, backend=backend, compute_unit=compute_unit + ) + +class TestAMaxAMin(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shapes, mode, reduce_dim, keepdim", + itertools.product( + compute_units, + backends, + [ + [(2, 5, 7, 3)], + [(3, 2, 9)], + [(1,)], + ], + ["minimum", "maximum"], + [0, 1, 2, 3, [0, 1], [0, 1, 2], [0, 1, 2, 3]], + [True, False], + ), + ) + def test_minimum_maximum(self, compute_unit, backend, input_shapes, mode, reduce_dim, keepdim): + class TestModel(torch.nn.Module): + def forward(self, input): + if type(reduce_dim) == int: + reduce_dim_clamped = min(input.dim() - 1, reduce_dim) + else: + reduce_dim_clamped = reduce_dim[:input.dim()] + if mode == "minimum": + return torch.amin(input, reduce_dim_clamped, keepdim) + elif mode == "maximum": + return torch.amax(input, reduce_dim_clamped, keepdim) + else: + raise ValueError("Unsupported mode: {mode}".format(mode=mode)) + + model = TestModel() + self.run_compare_torch( + input_shapes, model, backend=backend, compute_unit=compute_unit + ) + + +class TestPoolSymbolicInput(TorchBaseTest): + def test_max_pool(self): + model = nn.MaxPool2d( + kernel_size=1, + stride=2, + padding=0, + dilation=1, + ceil_mode=True, + ) + input_shape = (1, 1, 11, 11) + converter_input_type = [ + TensorType(shape=(1, 1, RangeDim(), RangeDim()), dtype=np.float32) + ] + self.run_compare_torch( + input_shape, + model, + backend=backends[0], + converter_input_type=converter_input_type, + ) + + def test_avg_pool(self): + model = nn.AvgPool2d( + kernel_size=2, + stride=2, + padding=1, + count_include_pad=True, + ceil_mode=True, + ) + input_shape = (1, 2, 15, 15) + converter_input_type = [ + TensorType(shape=(1, 2, RangeDim(), RangeDim()), dtype=np.float32) + ] + self.run_compare_torch( + input_shape, + model, + backend=backends[0], + converter_input_type=converter_input_type, + ) + + +class TestLSTM(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_size", + "hidden_size", + "num_layers", + "bias", + "batch_first", + "dropout", + "bidirectional", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (1, 1, 1, True, True, 0.3, True), + (1, 1, 1, False, True, 0.3, False), + (1, 1, 1, False, True, 0.3, True), + (3, 1, 5, True, False, 0.3, False), + (3, 1, 5, True, True, 0.3, True), + (3, 7, 5, True, False, 0.3, False), + (3, 7, 5, False, True, 0.3, True), + (3, 7, 5, False, True, 0.3, False), + ], + ) + ], + ) + def test_lstm( + self, + compute_unit, + backend, + input_size, + hidden_size, + num_layers, + bias, + batch_first, + dropout, + bidirectional, + ): + model = nn.LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + bias=bias, + batch_first=batch_first, + dropout=dropout, + bidirectional=bidirectional, + ) + SEQUENCE_LENGTH = 3 + BATCH_SIZE = 2 + model.eval() + + num_directions = int(bidirectional) + 1 + + if batch_first: + _input = torch.randn(BATCH_SIZE, SEQUENCE_LENGTH, input_size) + else: + _input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size) + + h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size) + c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size) + + inputs = (_input, (h0, c0)) + expected_results = model(*inputs) + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + ) + + +class TestRNN(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_size", + "hidden_size", + "num_layers", + "bias", + "batch_first", + "dropout", + "activation", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (1, 1, 1, True, True, 0.3, "tanh"), + (1, 1, 1, False, True, 0.3, "relu"), + (1, 1, 1, False, True, 0.3, "tanh"), + (3, 1, 5, True, False, 0.3, "relu"), + (3, 1, 5, True, True, 0.3, "tanh"), + (3, 7, 5, True, False, 0.3, "relu"), + (3, 7, 5, False, True, 0.3, "relu"), + (3, 7, 5, False, True, 0.3, "tanh"), + ], + ) + ], + ) + def test_rnn( + self, + compute_unit, + backend, + input_size, + hidden_size, + num_layers, + bias, + batch_first, + dropout, + activation, + ): + SEQUENCE_LENGTH = 10 + BATCH_SIZE = 3 + model = nn.RNN( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + bias=bias, + batch_first=batch_first, + dropout=dropout, + nonlinearity=activation, + bidirectional=False, # bi-directional simple RNN not supported + ) + model.eval() + num_directions = 1 + + if batch_first: + _input = torch.randn(BATCH_SIZE, SEQUENCE_LENGTH, input_size) + else: + _input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size) + + h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size) + inputs = (_input, h0) + expected_results = model(*inputs) + + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestGRU(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_size", + "hidden_size", + "num_layers", + "bias", + "batch_first", + "sequence_length", + "bidirectional", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (1, 1, 1, True, True, 10, True), + (1, 1, 1, False, True, 10, True), + (1, 1, 1, False, True, 1, False), + (3, 1, 5, True, False, 10, False), + (3, 1, 5, True, True, 10, True), + (3, 7, 5, True, True, 10, False), + (3, 7, 5, False, True, 10, True), + (3, 7, 5, False, True, 1, True), + ], + ) + ], + ) + def test_gru( + self, + compute_unit, + backend, + input_size, + hidden_size, + num_layers, + bias, + batch_first, + sequence_length, + bidirectional, + ): + DROPOUT = 0.3 + BATCH_SIZE = 3 + model = nn.GRU( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + bias=bias, + batch_first=batch_first, + dropout=DROPOUT, + bidirectional=bidirectional, + ) + model.eval() + num_directions = int(bidirectional) + 1 + + if batch_first: + _input = torch.randn(BATCH_SIZE, sequence_length, input_size) + else: + _input = torch.randn(sequence_length, BATCH_SIZE, input_size) + + h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size) + + inputs = (_input, h0) + expected_results = model(*inputs) + + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestLSTMWithPackedSequence(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, pack_batch_first, pad_batch_first, LSTM_batch_first, pad_value", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + [True, False], + [-1, 0], + ), + ) + def test_lstm( + self, + compute_unit, + backend, + pack_batch_first, + pad_batch_first, + LSTM_batch_first, + pad_value, + ): + from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence + + input_size = 4 + hidden_size = 6 + num_layers = 1 + + class Encoder(torch.nn.Module): + def __init__(self): + super().__init__() + self.lstm = torch.nn.LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + batch_first=LSTM_batch_first, + bidirectional=False, + dropout=0.0, + ) + + def forward(self, batch_in, seq_lengths): + packed_input = pack_padded_sequence( + batch_in, seq_lengths, batch_first=pack_batch_first + ) + output_packed, (hidden, _) = self.lstm(packed_input) + output, _ = pad_packed_sequence( + output_packed, padding_value=pad_value, batch_first=pad_batch_first + ) + return output + + SEQUENCE_LENGTH = 10 + BATCH_SIZE = 3 + model = Encoder() + model.eval() + + if pack_batch_first: + _input = torch.randn(BATCH_SIZE, SEQUENCE_LENGTH, input_size) + else: + _input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size) + + seq_lengths = torch.tensor([10, 5, 1], dtype=torch.int32) + + inputs = (_input, seq_lengths) + expected_results = model(*inputs) + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +# Workaround for GitHub Issue #824 +# i.e. the return h_n/c_n for a converted BLSTM are mangled. +# Therefore, just look at output 'y' (for now) which is correct. +class StripCellAndHidden(nn.Module): + def __init__(self, flagReturnTuple_): + super(StripCellAndHidden, self).__init__() + self.flagReturnTuple = flagReturnTuple_ + + def forward(self, x): + # Pass tuple, not tensor, to avoid issue in coremltools/converters/mil/frontend/torch/test/testing_utils.py on "if not expected_results:" + # Pass tensor when we need input for LSTM #2 as part of nn.Sequential() + return tuple(x[0]) if self.flagReturnTuple else x[0] + + +# Check GitHub Issue #810, assume num_layers == 2 and bidirectional == True +class TestStackedBLSTM(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional", + itertools.product( + compute_units, + backends, + [7], + [5], + [2], + [True, False], + [True, False], + [0.3], + [True], + ), + ) + def test_lstm( + self, + compute_unit, + backend, + input_size, + hidden_size, + num_layers, + bias, + batch_first, + dropout, + bidirectional, + ): + model = nn.Sequential( + nn.LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=1, + bias=bias, + batch_first=batch_first, + dropout=dropout, + bidirectional=True, + ), + StripCellAndHidden(False), + nn.LSTM( + input_size=2 * hidden_size, + hidden_size=hidden_size, + num_layers=1, + bias=bias, + batch_first=batch_first, + dropout=dropout, + bidirectional=True, + ), + StripCellAndHidden(True), + ) + + SEQUENCE_LENGTH = 3 + BATCH_SIZE = 2 + + # (seq_len, batch, input_size) + if batch_first: + _input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size) + else: + _input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size) + + # Do not use h_0/c_0 input and do not check h_n/c_n output, GitHub Issue #824 + expected_results = model(_input) + + self.run_compare_torch( + _input, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestConcat(TorchBaseTest): + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_cat_basic(self, compute_unit, backend): + class TestNet(nn.Module): + def forward(self, x): + x = torch.cat((x, x), axis=1) + return x + + model = TestNet() + self.run_compare_torch( + (1, 2, 3), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_cat_input_types_promotion(self, compute_unit, backend): + class TestNet(nn.Module): + def forward(self, x, y): + return torch.cat((x, y), axis=1) + + input_data_x = torch.randint(low=0, high=10, size=(2, 3), dtype=torch.int32) + input_data_y = torch.rand(2, 3) + self.run_compare_torch( + [input_data_x, input_data_y], + TestNet(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + # This tests an edge case where the list of tensors to concatenate only + # has one item. NN throws an error for this case, hence why we have to + # run through the full conversion process to test it. + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_cat_single_input(self, compute_unit, backend): + class TestNet(nn.Module): + def forward(self, x): + x = torch.cat((x,), axis=1) + return x + + model = TestNet() + self.run_compare_torch( + (1, 3, 16, 16), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_cat_const_fold(self, compute_unit, backend): + class TestNet(nn.Module): + def forward(self, x): + x = torch.tensor([[[1, 2], [2, 3], [3, 4]]]) + return torch.cat((x, x), axis=1) + + model = TestNet() + mlmodel = self.run_compare_torch( + (1, 2, 3), + model, + backend=backend, + compute_unit=compute_unit, + ) + prog = mlmodel[1]._mil_program + # The `listconstruct` is folded into a single const. + assert len(prog.find_ops(op_type="const")) == 1 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that the input with shape [1, 3, 2] const is non-replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op and var.op.op_type == "const" and var.rank == 3 + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The `listconstruct` is not folded so there are 3 const ops. + assert len(prog.find_ops(op_type="const")) == 3 + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_concat_alias(self, compute_unit, backend): + class Outer(torch.nn.Module): + def __init__(self, net): + super(Outer, self).__init__() + self.net = net + + def forward(self, x): + x = self.net(x) + return x + + class TestNet(nn.Module): + def forward(self, x): + x = torch.concat((x, x), axis=1) + return x + + # test passes without adding alias if `Outer` is not used + model = Outer(TestNet()) + self.run_compare_torch( + (1, 3, 16, 16), + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestBitwiseNot(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_type", + itertools.product( + compute_units, + backends, + ["int", "bool"], + ), + ) + def test_bitwise_not(self, compute_unit, backend, input_type): + class TestNet(nn.Module): + def forward(self, x): + return torch.bitwise_not(x) + + model = TestNet() + if input_type == "int": + torch_in = torch.tensor([1, 2, 3, -5, 0], dtype=torch.int32) + elif input_type == "bool": + torch_in = torch.tensor([True, False, True, False]) + self.run_compare_torch( + torch_in, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestFull(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_full_dynamic(self, compute_unit, backend, rank): + class FullDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return torch.full(x.shape, fill_value=3.14) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = FullDynamicModel().eval() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_val", + itertools.product( + compute_units, + backends, + [ + [(1,), 0.0], + [(2, 3), 3.1415], + [(1, 1, 2, 5, 1), -2.0], + ], + ), + ) + def test_full_static(self, compute_unit, backend, shape_val): + shape, val = shape_val + + class FullStaticModel(nn.Module): + def forward(self, x): + return torch.full(x.shape, fill_value=val) + + self.run_compare_torch( + shape, FullStaticModel().eval(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_val", + itertools.product( + compute_units, + [ + ["neuralnetwork", "fp32", ct.target.iOS14], + ["mlprogram", "fp16", ct.target.iOS15], + ["mlprogram", "fp32", ct.target.iOS15], + ["mlprogram", "fp16", ct.target.iOS16], + ["mlprogram", "fp32", ct.target.iOS16], + ], + [ + [(1,), 0.0], + [(2, 3), 3.1415], + [(1, 1, 2, 5, 1), -2.0], + ], + ), + ) + def test_full_like(self, compute_unit, backend, shape_val): + if _macos_version() < (13, 0) and backend[2] == ct.target.iOS16: + pytest.skip("iOS16 target not available on macOS 13") + shape, val = shape_val + + class FullLikeModel(nn.Module): + def forward(self, x): + return torch.full_like(x, fill_value=val) + + self.run_compare_torch( + shape, + FullLikeModel().eval(), + backend=backend[:2], + compute_unit=compute_unit, + minimum_deployment_target=backend[2], + ) + + +class TestDim(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1,), + (2, 3), + (1, 1, 2, 5, 1), + ], + ), + ) + def test_dim(self, compute_unit, backend, shape): + class DimModel(nn.Module): + def forward(self, x): + return torch.tensor([x.dim()]) + + self.run_compare_torch( + shape, DimModel().eval(), backend=backend, compute_unit=compute_unit + ) + + +class TestNewZeros(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_new_zeros_dynamic(self, compute_unit, backend, rank): + class ZerosDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return x.new_zeros(x.shape) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = ZerosDynamicModel().eval() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1,), + (2, 3), + (1, 1, 2, 5, 1), + ], + ), + ) + def test_new_zeros_static(self, compute_unit, backend, shape): + class ZerosStaticModel(nn.Module): + def __init__(self): + super(ZerosStaticModel, self).__init__() + + def forward(self, x): + return x.new_zeros(x.shape) + + self.run_compare_torch( + shape, ZerosStaticModel().eval(), backend=backend, compute_unit=compute_unit + ) + + +class TestNewFull(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_new_full_dynamic(self, compute_unit, backend, rank): + class FullDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return x.new_full(x.shape, fill_value=3.14) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = FullDynamicModel().eval() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_val", + itertools.product( + compute_units, + backends, + [ + [(1,), 0.0], + [(2, 3), 3.1415], + [(1, 1, 2, 5, 1), -2.0], + ], + ), + ) + def test_new_full_static(self, compute_unit, backend, shape_val): + shape, val = shape_val + + class FullStaticModel(nn.Module): + def forward(self, x): + return x.new_full(x.shape, fill_value=val) + + self.run_compare_torch( + shape, FullStaticModel().eval(), backend=backend, compute_unit=compute_unit + ) + + +class TestEye(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, eye_type", + itertools.product( + compute_units, + backends, + ["single", "double"], + ), + ) + def test(self, compute_unit, backend, eye_type): + class Model(nn.Module): + def forward(self, x): + if eye_type == "single": + eye = torch.eye(3) + return x + eye + elif eye_type == "double": + eye = torch.eye(2, 3) + return x + eye + + input_shape = (3, 3) if eye_type == "single" else (2, 3) + model = Model().eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestOnes(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_ones_dynamic(self, compute_unit, backend, rank): + class OnesDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return torch.ones(x.shape) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = OnesDynamicModel().eval() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [(1,), (2, 3), (1, 1, 2, 5, 1)], + ), + ) + def test_ones_static(self, compute_unit, backend, shape): + class OnesStaticModel(nn.Module): + def forward(self, x): + return torch.ones(x.shape) + + self.run_compare_torch( + shape, OnesStaticModel().eval(), backend=backend, compute_unit=compute_unit + ) + + +class TestRandint(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, low, high", + itertools.product( + compute_units, + backends, + [(1,), (2, 3)], + [-1, 2], + [3, 5], + ), + ) + def test_randint(self, compute_unit, backend, shape, low, high): + class TestModel(nn.Module): + def forward(self, x): + y = torch.randint(low, high, x.shape) + return torch.Tensor([len(y)]) + + self.run_compare_torch( + shape, TestModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestTypeAs(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, type", + itertools.product(compute_units, backends, ["int32", "float32", "bool"]), + ) + def test_type_as(self, compute_unit, backend, type): + class TestNet(nn.Module): + def forward(self, x, y): + return x.type_as(y) + + model = TestNet() + type_map = { + "int32": torch.int32, + "float16": torch.float16, + "float32": torch.float32, + "bool": torch.bool, + } + input = [ + torch.Tensor([0, 1, 2, 3]).to(torch.float32), + torch.Tensor([2, 3]).to(type_map[type]), + ] + self.run_compare_torch( + input, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestReduction(TorchBaseTest): + class TestModel(nn.Module): + def __init__(self, mode, dim=None, keepdim=None): + super().__init__() + args = {"dim": dim, "keepdim": keepdim} + self.op_args = {k: v for k, v in args.items() if v is not None} + + if mode == "min": + self.op = torch.min + elif mode == "max": + self.op = torch.max + else: + raise ValueError("Unsupported mode: {mode}".format(mode=mode)) + + def forward(self, x, y=None): + if y is not None: + return self.op(x, y) + return self.op(x, **self.op_args) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, dim, keepdim, mode", + itertools.product( + compute_units, + backends, + [(2, 2), (1, 1)], + [0, 1, None], + [True, False, None], + ["min", "max"], + ), + ) + def test_min_max(self, compute_unit, backend, input_shape, dim, keepdim, mode): + if dim is None and keepdim is not None: + pytest.skip("invalid torch.min configuration") + + input_data = torch.rand(input_shape) + model = self.TestModel(mode, dim=dim, keepdim=keepdim) + + self.run_compare_torch( + input_data, + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, mode", + itertools.product(compute_units, backends, [(2, 2), (1, 1)], ["min", "max"]), + ) + def test_min_max_with_no_arguments(self, compute_unit, backend, input_shape, mode): + self.run_compare_torch( + input_shape, + self.TestModel(mode), + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, dim, mode", + itertools.product( + compute_units, backends, [(2, 2), (1, 1)], [0, 1], ["min", "max"] + ), + ) + def test_min_max_no_keepdim(self, compute_unit, backend, input_shape, dim, mode): + input_data = torch.rand(input_shape) + model = self.TestModel(mode, dim=dim) + expected_results = model(input_data) + + self.run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, mode", + itertools.product(compute_units, backends, [(2, 2), (1, 1)], ["min", "max"]), + ) + def test_min_max_two_tensors(self, compute_unit, backend, input_shape, mode): + model = self.TestModel(mode) + self.run_compare_torch( + [input_shape] * 2, model, backend=backend, compute_unit=compute_unit + ) + + +class TestLayerNorm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, eps", + itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + [(1, 3, 15, 15), (1, 1, 1, 1)], + [1e-5, 1e-7], + ), + ) + def test_layer_norm(self, compute_unit, backend, input_shape, eps): + model = nn.LayerNorm(input_shape, eps=eps) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestPixelShuffle(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, batch_size, CHW, r", + itertools.product( + compute_units, backends, [1, 3], [(1, 4, 4), (3, 2, 3)], [2, 4] + ), + ) + def test_pixel_shuffle(self, compute_unit, backend, batch_size, CHW, r): + C, H, W = CHW + input_shape = (batch_size, C * r * r, H, W) + model = nn.PixelShuffle(upscale_factor=r) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +@pytest.mark.skipif( + _macos_version() < (13, 0), reason="New functionality in macOS13/iOS16" +) +class TestPixelUnshuffle(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, batch_size, CHW, r", + itertools.product( + compute_units, backends, [1, 3], [(1, 4, 4), (3, 2, 3)], [2, 4] + ), + ) + def test_pixel_shuffle(self, compute_unit, backend, batch_size, CHW, r): + if backend[0] == "neuralnetwork": + pytest.skip("pixel_unshuffle only supported in mlprogram backend.") + + C, H, W = CHW + input_shape = (batch_size, C, H * r, W * r) + model = nn.PixelUnshuffle(downscale_factor=r) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + minimum_deployment_target=ct.target.iOS16, + ) + + +class TestExpand(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 1), (2, 2)], + [(3, 1), (-1, 4)], + [(1, 3, 4, 4), (3, 3, 4, 4)], + [(4,), (3, 4)], + [(3, 2), (1, 2, -1, 2)], + ], + ), + ) + def test_expand(self, compute_unit, backend, shapes): + input_shape, output_shape = shapes + + class TestModel(torch.nn.Module): + def forward(self, x): + return x.expand(*output_shape) + + model = TestModel() + + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_expand_dynamic_shape0(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + return x.expand(x.shape[1], x.shape[1]) + + self.run_compare_torch( + torch.arange(20).reshape((1, 20)), + TestModel(), + input_as_shape=False, + converter_input_type=[TensorType(shape=[1, ct.RangeDim()])], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_expand_dynamic_shape1(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + return x.expand(x.shape[0], 1, x.shape[-1], x.shape[-1]) + + self.run_compare_torch( + torch.arange(20).reshape((1, 20)), + TestModel(), + input_as_shape=False, + converter_input_type=[TensorType(shape=[ct.RangeDim(), ct.RangeDim()])], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_expand_dynamic_shape2(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + return x.expand(x.shape[-1], 1, x.shape[-1], x.shape[-1]) + + self.run_compare_torch( + torch.arange(20).reshape((1, 20)), + TestModel(), + input_as_shape=False, + converter_input_type=[TensorType(shape=[1, ct.RangeDim()])], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_expand_dynamic_shape3(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + return x.expand(x.shape[0], 10) + + self.run_compare_torch( + torch.arange(20).reshape((20, 1)), + TestModel(), + input_as_shape=False, + converter_input_type=[TensorType(shape=[ct.RangeDim(), ct.RangeDim()])], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_expand_dynamic_shape_from_another_input(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x, y): + return x.expand(int(y[0]), int(y[1])) + + self.run_compare_torch( + [torch.arange(20).reshape((20, 1)), torch.Tensor([20, 20])], + TestModel(), + input_as_shape=False, + converter_input_type=[TensorType(shape=[ct.RangeDim(), 1])], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 1), (2, 2)], + [(3, 1), (3, 4)], + [(1, 3, 4, 4), (3, 3, 4, 4)], + [(4,), (1, 3, 4)], + ], + ), + ) + def test_expand_as(self, compute_unit, backend, input_shapes): + class TestModel(torch.nn.Module): + def forward(self, x, y): + return x.expand_as(y) + + model = TestModel() + + self.run_compare_torch( + input_shapes, model, backend=backend, compute_unit=compute_unit + ) + + +class TestExpandDims(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis", + itertools.product( + compute_units, + backends, + [ + (rank, axis) + for rank in range(1, 5) + for axis in range(-rank - 1, rank + 1) + ], + ), + ) + def test_unsqueeze(self, compute_unit, backend, rank_and_axis): + rank, axis = rank_and_axis + input_shape = tuple(np.random.randint(low=2, high=10, size=rank)) + model = ModuleWrapper(function=torch.unsqueeze, kwargs={"dim": axis}) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestLinspace(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, start_end, steps", + itertools.product( + compute_units, + backends, + [(-0.1, -0.7), (1, 10)], + [1, 3], + ), + ) + def test_linspace_static(self, compute_unit, backend, start_end, steps): + input_shape = tuple([steps]) + start, end = start_end + + class Model(nn.Module): + def forward(self, x): + return torch.linspace(start, end, steps) + + model = Model() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_linspace_static_large(self, compute_unit, backend): + input_shape = tuple([1]) + + class Model(nn.Module): + def forward(self, x): + return torch.linspace(1, 2_000_000, 2_000_000) + + model = Model() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, start_end, steps", + itertools.product( + compute_units, + backends, + [(-0.1, -0.7), (1, 10)], + [1, 2, 100], + ), + ) + def test_linspace_dynamic(self, compute_unit, backend, start_end, steps): + start, end = start_end + + class Model(nn.Module): + def forward(self, x): + return torch.linspace(x[0], x[1], steps) + + model = Model() + inputs = [torch.Tensor([start, end])] + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_linspace_static_not_fold(self, compute_unit, backend): + class Model(nn.Module): + def forward(self, x): + return torch.linspace(0, 1, 100) + + model = Model() + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The linspace op is folded to const, so there is no range_1d op. + assert len(prog.find_ops(op_type="const")) == 1 + assert len(prog.find_ops(op_type="range_1d")) == 0 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that the first param to linspace is non-replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op + and var.op.op_type == "const" + and var.rank == 0 + and var.val == 0 + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The linspace op is not folded to const, but translated to range_1d instead. + assert len(prog.find_ops(op_type="range_1d")) == 1 + + +class TestArange(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, start_end_step", + itertools.product( + compute_units, + backends, + [ + (-0.1, -0.7, -0.07), + (3, 10, 0.3), + (1, 10, 100), + (1, 300000, 1), + (1, 10, 1e-6), + ], + ), + ) + def test_arange_static(self, compute_unit, backend, start_end_step): + if start_end_step == (1, 10, 1e-6): + pytest.xfail( + "rdar://88998831 (range_1d has numerical issue when the step is small)" + ) + input_shape = tuple( + [ + 1, + ] + ) + start, end, step = start_end_step + + class Model(nn.Module): + def forward(self, x): + return torch.arange(start, end, step) + + model = Model() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, start_end_step", + itertools.product( + compute_units, + backends, + [ + (-0.1, -0.7, -0.07), + (3, 10, 0.3), + (1, 10, 100), + (1, 300000, 1), + ], + ), + ) + def test_arange_dynamic(self, compute_unit, backend, start_end_step): + start, end, step = start_end_step + + class Model(nn.Module): + def forward(self, x): + return torch.arange(x[0], x[1], x[2]) + + model = Model() + inputs = [torch.tensor([start, end, step])] + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + +class TestEinsum(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, equation, reverse_input_order, dynamic", + itertools.product( + compute_units, + backends, + einsum_equations, + [False, True], + [False, True], + ), + ) + def test_einsum(self, compute_unit, backend, equation, reverse_input_order, dynamic): + class TestEinsum(nn.Module): + def forward(self, x, y): + return torch.einsum(equation, x, y) + if backend == ("mlprogram", "fp16"): + if equation in [ + "abc,cde->abde", + "abcd,cde->abe", + "iji,ji->j", + "jii,ijk->jk", + "ija,la->ijal", + "ia,ia->a", + "ai,ia->a", + "abi,abi->ab", + "iab,iab->ab", + "abi,bai->ba", + "ij,j->i", + "i,ij->j", + "ai,ija->aj", + "aibj,bi->jba", + "ij,jk->ik", + "abij,abjk->abik", + "aijb,bajk->abik", + "aij,aij->a", + "ija,ija->a", + "ija,jia->a", + "aijb,ajbi->ab", + "aibj,cdij->cadb", + "ijk,lmj->iklm", + "ijak,akl->aijl", + ] and dynamic: + pytest.xfail("rdar://106631543 ([Infra]Re-enable the unittests for torch einsum ops)") + + input_shapes, converter_input_type = gen_input_shapes_einsum(equation, dynamic) + + if reverse_input_order: + input_output_strings = equation.split("->") + input_strings = input_output_strings[0].split(",") + equation = ( + input_strings[1] + + "," + + input_strings[0] + + "->" + + input_output_strings[1] + ) + input_shapes = [input_shapes[1], input_shapes[0]] + if converter_input_type is not None: + converter_input_type = [converter_input_type[1], converter_input_type[0]] + + model = TestEinsum() + self.run_compare_torch( + input_shapes, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=True, + converter_input_type=converter_input_type + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_einsum_with_same_input(self, compute_unit, backend): + class Einsum(nn.Module): + def forward(self, m1, m2, m3): + y1 = torch.einsum("bnhd,bdhm->bnhm", m1, m2) + y2 = torch.einsum("bnhd,bdhm->bnhm", m1, m3) + return y1, y2 + + m1 = torch.rand(1, 8, 8, 64) + m3 = torch.rand(1, 8, 128, 64).transpose(1, 3).transpose(2, 3) + m2 = m3.clone() + model = Einsum() + out = model(m1, m2, m3) + + self.run_compare_torch( + [m1, m2, m3], + Einsum(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + expected_results=out, + ) + + +class TestSqueeze(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis", + itertools.product( + compute_units, + backends, + [ + (2, 1), + (2, 0), + (3, 1), + (3, None), + (4, None), + (4, 2), + (5, None), + (5, -1), + ], + ), + ) + def test_squeeze(self, compute_unit, backend, rank_and_axis): + rank, axis = rank_and_axis + input_shape = list(np.random.randint(low=2, high=10, size=rank)) + if axis is not None: + input_shape[axis] = 1 + else: + input_shape[0] = 1 + input_shape = tuple(input_shape) + model = ModuleWrapper( + function=torch.squeeze, kwargs={"dim": axis} if axis else {} + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestCumSum(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, axis", + itertools.product( + compute_units, + backends, + [-1, 0, 1, 2, 3], + ), + ) + def test_cumsum(self, compute_unit, backend, axis): + input_shape = list(np.random.randint(low=2, high=10, size=4)) + input_shape = tuple(input_shape) + model = ModuleWrapper(function=torch.cumsum, kwargs={"dim": axis}) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestReshape(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, output_shape", + itertools.product( + compute_units, + backends, + [ + (3, 2), + (2, -1), + (2, 1, 1, 3), + ], + ), + ) + def test_reshape(self, compute_unit, backend, output_shape): + input_shape = (2, 3) + model = ModuleWrapper(function=torch.reshape, kwargs={"shape": output_shape}) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestReshapeAs(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_output_shape", + itertools.product( + compute_units, + backends, + [ + ((6, 1, 1), (3, 2)), + ((8,), (2, 1, 1, 2, 2)), + ], + ), + ) + def test_reshape(self, compute_unit, backend, input_output_shape): + class Model(nn.Module): + def forward(self, x, ref): + return x.reshape_as(ref) + + model = Model() + input_shape, output_shape = input_output_shape + self.run_compare_torch( + [input_shape, output_shape], + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestFlatten(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, start_dim, end_dim, is_dynamic", + itertools.product(compute_units, backends, [2, -2, 0], [3, -1], [False, True]), + ) + def test_flatten(self, compute_unit, backend, start_dim, end_dim, is_dynamic): + input_shape = (2, 3, 4, 5) + converter_input_type = None + if is_dynamic: + converter_input_type = [ + TensorType( + shape=(2, 3, RangeDim(default=4), RangeDim(default=5)), + dtype=np.float32, + ) + ] + model = ModuleWrapper( + function=torch.flatten, kwargs={"start_dim": start_dim, "end_dim": end_dim} + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + + +class TestGather(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis", + itertools.product( + compute_units, backends, [(i, j) for i in range(1, 6) for j in range(0, i)] + ), + ) + def test_gather_along_axis(self, compute_unit, backend, rank_and_axis): + rank, axis = rank_and_axis + params_shape = np.random.randint(low=2, high=5, size=rank) + indices_shape = np.copy(params_shape) + indices_shape[axis] = np.random.randint(low=1, high=8) + indices = np.random.randint(0, params_shape[axis], size=indices_shape) + params_shape, indices_shape = tuple(params_shape), tuple(indices_shape) + model = ModuleWrapper( + function=torch.gather, + kwargs={"dim": axis, "index": torch.from_numpy(indices)}, + ) + self.run_compare_torch( + [params_shape], model, backend=backend, compute_unit=compute_unit + ) + + +class TestActivation(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_relu(self, compute_unit, backend, shape): + model = nn.ReLU().eval() + self.run_compare_torch( + shape, + model, + backend=backend, + ) + + model = ModuleWrapper(nn.functional.relu_) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_relu6(self, compute_unit, backend, shape): + model = nn.ReLU6().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, alpha, shape, single_alpha", + itertools.product( + compute_units, + backends, + [0.25, 2.0], + [(3,), (2, 6), (2, 3, 4), (2, 5, 6, 7), (2, 3, 4, 5, 6)], + [True, False], + ), + ) + def test_prelu(self, compute_unit, backend, alpha, shape, single_alpha): + if backend[0] == "mlprogram" and backend[1] == "fp16" or (len(shape) == 5): + pytest.xfail( + "rdar://92175249 ([MIL] TestActivation::test_prelu[backend=(mlprogram, fp16)] CI failure)" + ) + input_shape = shape + num_parameters = input_shape[1] if len(input_shape) >= 2 else 1 + if single_alpha: + num_parameters = 1 + model = nn.PReLU(num_parameters, alpha).eval() + mlmodel = self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # Unfortunately since all these tests result in a prelu with a common leakage factor, the + # prelu_to_lrelu pass optimizes them to contain leaky_relu instead. + assert len(prog.find_ops(op_type="leaky_relu")) == 1 + assert len(prog.find_ops(op_type="prelu")) == 0 + + @pytest.mark.parametrize( + "compute_unit, backend, shape, alpha", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL, [0.1, 2.0, 1.4]), + ) + def test_leaky_relu(self, compute_unit, backend, shape, alpha): + model = nn.LeakyReLU(negative_slope=alpha).eval() + self.run_compare_torch( + shape, + model, + backend=backend, + ) + + model = ModuleWrapper(nn.functional.leaky_relu_, {"negative_slope": alpha}) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES_ALL, + ), + ) + def test_randomized_leaky_relu(self, compute_unit, backend, shape): + model = nn.RReLU(lower=0.01, upper=0.9).eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_softmax(self, compute_unit, backend, shape): + model = nn.Softmax().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, range_val", + itertools.product( + compute_units, backends, [(-1.0, 1.0), (0.0, 0.1), (1.0, 3.0), (-1.0, 6.0)] + ), + ) + def test_hardtanh(self, compute_unit, backend, range_val): + input_shape = (1, 10, 4, 5) + model = nn.Hardtanh(range_val[0], range_val[1]).eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + model = ModuleWrapper( + nn.functional.hardtanh_, {"min_val": range_val[0], "max_val": range_val[1]} + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, alpha", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL, [0.1, 2.0, 1.4]), + ) + def test_elu(self, compute_unit, backend, shape, alpha): + model = nn.ELU(alpha).eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_gelu(self, compute_unit, backend, shape): + model = nn.GELU().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_erf(self, compute_unit, backend, shape): + class ERFActivation(nn.Module): + def forward(self, x): + return torch.erf(x) + + model = ERFActivation().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, backends, [(1, 10), (1, 3, 5), (1, 5, 6, 7), (1, 3, 4, 5, 6)] + ), + ) + def test_sigmoid(self, compute_unit, backend, shape): + model = nn.Sigmoid().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_sigmoid_hard(self, compute_unit, backend, shape): + model = nn.Hardsigmoid().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, beta, threshold", + itertools.product(compute_units, backends, [1, 2, 5], [5, 10, 20]), + ) + @pytest.mark.skipif( + _macos_version() <= (10, 15), + reason="Parametric SoftPlus segfaults on macOS 10.15 and below.", + ) + def test_softplus(self, compute_unit, backend, beta, threshold): + input_shape = (1, 10, 5, 15) + model = nn.Softplus(beta, threshold).eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES_ALL + ), + ) + def test_mish(self, compute_unit, backend, shape): + model = nn.Mish().eval() + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_softsign(self, compute_unit, backend, shape): + model = nn.Softsign().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.skipif( + condition=version_lt(torch, "1.7.0"), + reason="torch.nn.SiLU available only in PyTorch 1.7.0+", + ) + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, [(1, 10), (1, 3, 4), (1, 4, 5, 6)]), + ) + def test_silu(self, compute_unit, backend, shape): + model = ModuleWrapper(function=torch.nn.functional.silu) + self.run_compare_torch([shape], model, backend=backend) + + @pytest.mark.parametrize( + "compute_unit, backend, rounding_mode", + itertools.product(compute_units, backends, [None, "floor", "trunc"]), + ) + def test_div(self, compute_unit, backend, rounding_mode): + model = ModuleWrapper( + function=torch.div, kwargs={"rounding_mode": rounding_mode} + ) + x1 = torch.from_numpy(np.array([2.3, 2.6, -3.6, -3.2], dtype=np.float32)) + x2 = torch.from_numpy(np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32)) + out = torch.div(x1, x2, rounding_mode=rounding_mode) + self.run_compare_torch( + [x1, x2], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + expected_results=out, + ) + + +class TestElementWiseUnary(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, op_string", + itertools.product( + compute_units, + backends, + [(1, 3, 5, 8)], + [ + "abs", + "acos", + "asin", + "atan", + "ceil", + "cos", + "cosh", + "exp", + "floor", + "round", + "sin", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + "sign", + ], + ), + ) + def test_elementwise_no_params(self, compute_unit, backend, shape, op_string): + if not contains_op(torch, op_string): + return + if op_string == "sqrt" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.skip("sqrt on GPU producing nan.") + + op_func = getattr(torch, op_string) + model = ModuleWrapper(function=op_func) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, clamp_range", + itertools.product( + compute_units, + backends, + [(1, 3, 5, 8)], + [ + (0.0, 1.0), + (-1.0, 0.5), + (0.2, 0.7), + (None, 4.0), + (-3.0, None), + (1, 2), + (1, 3.5), + (1, -1), + ], + ), + ) + def test_clamp(self, compute_unit, backend, shape, clamp_range): + params_dict = {} + if clamp_range[0] is not None: + params_dict["min"] = clamp_range[0] + if clamp_range[1] is not None: + params_dict["max"] = clamp_range[1] + + model = ModuleWrapper(torch.clamp, params_dict) + self.run_compare_torch( + shape, model, backend=backend, compute_unit=compute_unit, rand_range=(-5, 5) + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_clamp_int_input(self, compute_unit, backend): + params_dict = {"min": -2, "max": 2} + input_data = torch.randint(low=-5, high=5, size=(2, 3, 4)) + model = ModuleWrapper(torch.clamp, params_dict) + self.run_compare_torch( + input_data, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + converter_input_type=[TensorType(shape=input_data.shape, dtype=np.int32)], + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, threshold", + itertools.product( + compute_units, + backends, + [(1, 3, 5, 8)], + [(0.0, 0.0), (0.5, 0.5), (0.5, 10), (0.9, 0.0)], + ), + ) + def test_threshold(self, compute_unit, backend, shape, threshold): + model = torch.nn.Threshold(threshold[0], threshold[1]).eval() + input_value = torch.rand(np.prod(shape)) + # make sure the values are not too close to the threshold + for i in range(len(input_value)): + if abs(input_value[i] - threshold[0]) < 0.005: + input_value[i] += 0.05 + input_value = torch.reshape(input_value, shape) + self.run_compare_torch( + input_value, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, op_string", + itertools.product( + compute_units, + backends, + [(1, 3, 5, 8)], + [ + "log", + "rsqrt", + "reciprocal", + ], + ), + ) + def test_elementwise_numerically_stable( + self, compute_unit, backend, shape, op_string + ): + op_func = getattr(torch, op_string) + model = ModuleWrapper(function=op_func) + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + rand_range=(20, 100), + ) + + +class TestAtan2(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_atan2(self, compute_unit, backend, rank): + model = ModuleWrapper(function=torch.atan2) + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + TorchBaseTest.run_compare_torch( + [input_shape, input_shape], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=True, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_atan2_x0(self, compute_unit, backend, rank): + model = ModuleWrapper(function=torch.atan2) + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + y = generate_input_data(input_shape, rand_range=(-1.0, 1.0)) + x = torch.zeros(input_shape) + TorchBaseTest.run_compare_torch( + (y, x), + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_atan2_y0x0(self, compute_unit, backend, rank): + model = ModuleWrapper(function=torch.atan2) + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + y = torch.zeros(input_shape) + x = torch.zeros(input_shape) + TorchBaseTest.run_compare_torch( + (y, x), + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_atan2_broadcast(self, compute_unit, backend, rank): + model = ModuleWrapper(function=torch.atan2) + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + truncated_shape = list(input_shape) + while len(truncated_shape) > 1: + truncated_shape.pop(0) + TorchBaseTest.run_compare_torch( + [input_shape, truncated_shape], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=True, + ) + TorchBaseTest.run_compare_torch( + [truncated_shape, input_shape], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=True, + ) + + +class TestTriu(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, diagonal", + itertools.product( + compute_units, + backends, + [(5, 5), (3, 4), (5, 1)], + [None, -1, 0, 2], + ), + ) + def test_triu(self, compute_unit, backend, shape, diagonal): + params_dict = {} + if diagonal is not None: + params_dict["diagonal"] = diagonal + model = ModuleWrapper(torch.triu, params_dict) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + +class TestTril(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, diagonal", + itertools.product( + compute_units, + backends, + [(5, 5), (3, 4), (5, 1)], + [None, -1, 0, 2], + ), + ) + def test_tril(self, compute_unit, backend, shape, diagonal): + params_dict = {} + if diagonal is not None: + params_dict["diagonal"] = diagonal + model = ModuleWrapper(torch.tril, params_dict) + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestMatMul(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_bmm(self, compute_unit, backend): + shape_x, shape_y = (3, 4, 5), (3, 5, 6) + model = ModuleWrapper(function=torch.bmm) + self.run_compare_torch( + [shape_x, shape_y], model, backend=backend, compute_unit=compute_unit + ) + + +class TestNumel(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product( + compute_units, + backends, + [(1,), (2, 3)], + ), + ) + def test_numel(self, compute_unit, backend, input_shape): + class TestModel(torch.nn.Module): + def forward(self, x): + res = torch.numel(x) + return x + res + + model = TestModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestSplit(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, split_size_or_sections, dim", + itertools.product(compute_units, backends, [1, 2, [1, 4]], [0, -2]), + ) + def test_split(self, compute_unit, backend, split_size_or_sections, dim): + input_shape = (5, 2) + model = ModuleWrapper( + function=torch.split, + kwargs={"split_size_or_sections": split_size_or_sections, "dim": dim}, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, split_sizes, dim", + itertools.product(compute_units, backends, [[1, 4], [3, 2]], [-1, -2]), + ) + def test_split_with_sizes(self, compute_unit, backend, split_sizes, dim): + input_shape = (5, 5) + model = ModuleWrapper( + function=torch.split_with_sizes, + kwargs={"split_sizes": split_sizes, "dim": dim}, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestUnbind(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, dim", + itertools.product(compute_units, backends, [0, 1, 2]), + ) + def test_unbind(self, compute_unit, backend, dim): + input_shape = (3, 3, 4) + model = ModuleWrapper(function=torch.unbind, kwargs={"dim": dim}) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_unbind_one_dim_shape(self, compute_unit, backend): + input_shape = (1,) + dim = 0 + model = ModuleWrapper(function=torch.unbind, kwargs={"dim": dim}) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestTranspose(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, dims", + itertools.product( + compute_units, backends, COMMON_SHAPES, [(0, 1), (-2, -1), (1, 0), (-1, -2)] + ), + ) + def test(self, compute_unit, backend, shape, dims): + model = ModuleWrapper( + function=torch.transpose, kwargs={"dim0": dims[0], "dim1": dims[1]} + ) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + +class TestTo(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_cast_bug(self, compute_unit, backend): + if _macos_version() < (13, 0) and backend[0] == "mlprogram": + pytest.xfail("Issue fixed in iOS16/macOS13") + + class TestModel(torch.nn.Module): + def forward(self, spans, embedding): + spans = spans.float().relu().int() + + max1, _ = torch.max(spans, dim=1, keepdim=False) + max1, _ = torch.max(max1, dim=1, keepdim=False) + max2, _ = torch.max(embedding, dim=1, keepdim=False) + max2, _ = torch.max(max2, dim=1, keepdim=False) + sigmoided_scores = max1 + max2 + return sigmoided_scores + + if ( + platform.machine() == "arm64" + and compute_unit != ct.ComputeUnit.CPU_ONLY + and backend[0] == "neuralnetwork" + ): + pytest.xfail( + "rdar://98015195 ([M1 native tests] Some MIL unittests are failing on M1 native)" + ) + model = TestModel() + self.run_compare_torch( + [(1, 4, 2), (1, 6, 3)], model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_to_uint8(self, compute_unit, backend): + class TestModel(torch.nn.Module): + def forward(self, input_data): + input_data = input_data + input_data + return input_data.to(torch.uint8) + + inputs = [TensorType(name="input_data", shape=(1, 2, 3), dtype=np.int32)] + self.run_compare_torch( + inputs, TestModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_type", + itertools.product( + compute_units, + backends, + [np.float32, np.float16, np.int32], + ), + ) + def test_to_no_param(self, compute_unit, backend: Tuple[str], input_type): + if input_type == np.float16 and backend[0] == "neuralnetwork": + pytest.skip( + "Input float16 needs target >= iOS16, which doesn't support neuralnetwork." + ) + if input_type == np.float16 and _macos_version() < (13, 0): + pytest.skip("Input float16 needs target >= iOS16, which is not available until macOS 13.") + + class TestModel(torch.nn.Module): + def forward(self, input_data): + return input_data.to() + + inputs = [TensorType(name="input_data", shape=(1, 2, 3), dtype=input_type)] + # The float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13. + minimum_deployment_target = ( + ct.target.iOS16 if input_type == np.float16 else None + ) + self.run_compare_torch( + inputs, + TestModel(), + backend=backend, + compute_unit=compute_unit, + minimum_deployment_target=minimum_deployment_target, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ) + ) + def test_fold_const(self, compute_unit: ct.ComputeUnit.CPU_ONLY, backend: List[Tuple[str]]): + class TestModel(torch.nn.Module): + def forward(self, x): + return torch.arange(0, 3).float() + + model = TestModel() + + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The range_1d op translated from `torch.arange` is folded to const. + assert len(prog.find_ops(op_type="range_1d")) == 0 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that only the range_1d op is not replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op and "range_1d" in var.op.op_type + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The range_1d op translated from `torch.arange` shouldn't be folded. + assert len(prog.find_ops(op_type="range_1d")) == 1 + + +class TestSlice(TorchBaseTest): + @pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6") + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_dynamic_slice(self, compute_unit, backend): + class DynamicSlicer(torch.nn.Module): + def forward(self, x, context_length): + return x[context_length:, :, :] + + class Model(torch.nn.Module): + def __init__(self): + super(Model, self).__init__() + self.tokens_embedding = torch.nn.Embedding(10, 10, 0) + self.context_embedding = torch.nn.Embedding(10, 10, 0) + self.dynamic_slicer = DynamicSlicer() + + def forward(self, tokens, context, context_length): + # CoreML requires rank1~5 input, so we use rank 1 for + # context-length + tokens_embeddings = self.tokens_embedding(tokens) + context_embeddings = self.context_embedding(context) + embeddings = torch.cat((context_embeddings, tokens_embeddings), dim=0) + embeddings = self.dynamic_slicer( + embeddings, torch.squeeze(context_length) + ) + + return embeddings + + model = Model() + batch_size = 5 + inputs = [ + TensorType(name="tokens", shape=(10, batch_size), dtype=np.int64), + TensorType(name="context", shape=(3, batch_size), dtype=np.int64), + TensorType(name="context_length", shape=(1,), dtype=np.int32), + ] + self.run_compare_torch( + inputs, model, rand_range=(0, 8), backend=backend, compute_unit=compute_unit + ) + + +class TestRepeat(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_repeat(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=2, high=6, size=rank) + repeats = np.random.randint(low=2, high=4, size=rank) + input_shape = tuple(input_shape) + + model = ModuleWrapper(function=lambda x: x.repeat(*repeats)) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, (1, 2)), + ) + def test_repeats_with_extra_dimensions(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=2, high=6, size=rank) + + for num_extra_dims in (1, 2): + repeats = np.random.randint(low=2, high=4, size=rank + num_extra_dims) + model = ModuleWrapper(function=lambda x: x.repeat(*repeats)) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_repeats_with_enumerated_shape_case1(self, compute_unit, backend): + class Model(nn.Module): + def forward(self, x, y): + reps = x.size(0) + return y.repeat(reps) + + enumerated_shapes = ct.EnumeratedShapes(shapes=[(1, 1), (2, 1)]) + module = Model() + inputs = [torch.tensor([[1]]), torch.tensor([2])] + + self.run_compare_torch( + inputs, + module, + input_as_shape=False, + converter_input_type=[ + ct.TensorType(shape=enumerated_shapes), + ct.TensorType(shape=(1,)), + ], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_repeats_with_enumerated_shape_case2(self, compute_unit, backend): + class Model(nn.Module): + def forward(self, x, y): + return y.repeat(x.size(0), x.size(1)) + + enumerated_shapes = ct.EnumeratedShapes(shapes=[(1, 1), (2, 1)]) + module = Model() + inputs = [torch.tensor([[1], [2]]), torch.tensor([2])] + self.run_compare_torch( + inputs, + module, + input_as_shape=False, + converter_input_type=[ + ct.TensorType(shape=enumerated_shapes), + ct.TensorType(shape=(1,)), + ], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_repeats_with_symbolic_shape(self, compute_unit, backend): + class Model(nn.Module): + def forward(self, x, y): + return y.repeat([x.shape[-1], 1, x.shape[0]]) + + module = Model() + inputs = [torch.tensor([[1], [2]]), torch.tensor([2])] + self.run_compare_torch( + inputs, + module, + input_as_shape=False, + converter_input_type=[ + ct.TensorType(shape=(ct.RangeDim(), ct.RangeDim())), + ct.TensorType(shape=(1,)), + ], + backend=backend, + compute_unit=compute_unit, + ) + + +class TestStd(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, unbiased", + itertools.product(compute_units, backends, [True, False]), + ) + def test_std_2_inputs(self, compute_unit, backend, unbiased): + model = ModuleWrapper(function=torch.std, kwargs={"unbiased": unbiased}) + x = torch.randn(1, 5, 10) * 3 + out = torch.std(x, unbiased=unbiased).unsqueeze(0) + self.run_compare_torch( + x, + model, + expected_results=out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, unbiased, dim, keepdim", + itertools.product( + compute_units, backends, [True, False], [[0, 2], [1], [2]], [True, False] + ), + ) + def test_std_4_inputs(self, compute_unit, backend, unbiased, dim, keepdim): + model = ModuleWrapper( + function=torch.std, + kwargs={"unbiased": unbiased, "dim": dim, "keepdim": keepdim}, + ) + input_shape = (2, 5, 10) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestOnesLike(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_ones_like_static(self, compute_unit, backend, rank): + class OnesLikeStaticModel(nn.Module): + def forward(self, x): + return torch.ones_like(x) + + input_shape = np.random.randint(low=2, high=6, size=rank) + input_shape = tuple(input_shape) + model = OnesLikeStaticModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + [ + ["neuralnetwork", "fp32", ct.target.iOS14], + ["mlprogram", "fp16", ct.target.iOS15], + ["mlprogram", "fp32", ct.target.iOS15], + ["mlprogram", "fp16", ct.target.iOS16], + ["mlprogram", "fp32", ct.target.iOS16], + ], + [1, 3], + ), + ) + def test_ones_like_dynamic(self, compute_unit, backend, rank): + if _macos_version() < (13, 0) and backend[2] == ct.target.iOS16: + pytest.skip("iOS16 target not available on macOS 13") + + class OnesLikeDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return torch.ones_like(x) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape) + model = OnesLikeDynamicModel() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend[:2], + compute_unit=compute_unit, + minimum_deployment_target=backend[2], + ) + + +class TestZeros(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_zeros_like_static(self, compute_unit, backend, rank): + class ZerosLikeStaticModel(nn.Module): + def forward(self, x): + return torch.zeros_like(x) + + input_shape = np.random.randint(low=2, high=6, size=rank) + input_shape = tuple(input_shape) + model = ZerosLikeStaticModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + [ + ["neuralnetwork", "fp32", ct.target.iOS14], + ["mlprogram", "fp16", ct.target.iOS15], + ["mlprogram", "fp32", ct.target.iOS15], + ["mlprogram", "fp16", ct.target.iOS16], + ["mlprogram", "fp32", ct.target.iOS16], + ], + [1, 3], + ), + ) + def test_zeros_like_dynamic(self, compute_unit, backend, rank): + if _macos_version() < (13, 0) and backend[2] == ct.target.iOS16: + pytest.skip("iOS16 target not available on macOS 13") + + class ZerosLikeDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return torch.zeros_like(x) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = ZerosLikeDynamicModel() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend[:2], + compute_unit=compute_unit, + minimum_deployment_target=backend[2], + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ) + ) + def test_zeros_like_static_fold_to_const(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + x = torch.arange(0, 3) + return torch.zeros_like(x) + + model = TestModel() + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The empty_like op is folded to const, so there is no fill nor fill_like op. + assert len(prog.find_ops(op_type="fill")) + len(prog.find_ops(op_type="fill_like")) == 0 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that only shape op is not replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op and var.op.op_type == "shape" + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The shape op is not folded to const. + assert len(prog.find_ops(op_type="fill")) + len(prog.find_ops(op_type="fill_like")) == 1 + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_zeros_static(self, compute_unit, backend, rank): + class ZerosStaticModel(nn.Module): + def forward(self, x): + if rank == 1: + return torch.zeros(1) + elif rank == 3: + return torch.zeros(2, 3, 5) + + input_shape = np.random.randint(low=2, high=6, size=rank) + input_shape = tuple(input_shape) + model = ZerosStaticModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_zeros_dynamic(self, compute_unit, backend, rank): + class ZerosDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return x + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = ZerosDynamicModel() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ) + ) + def test_zeros_static_fold_to_const(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + return torch.zeros(2, 3, 5) + + model = TestModel() + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The zeros op is folded to const. + assert len(prog.find_ops(op_type="fill")) == 0 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that the size parameter to torch.zeros is non-replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op and var.rank == 1 and np.all(var.val == [2, 3, 5]) + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The zeros op is not folded to const. + assert len(prog.find_ops(op_type="fill")) == 1 + + +class TestTopk(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, largest, sort, dynamic, shape_dim_k", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + [True, False], + [((4, 6, 7, 3), -1, 2), ((10, 3, 4), 2, 2), ((5,), 0, 2)], + ), + ) + def test_topk(self, compute_unit, backend, largest, sort, shape_dim_k, dynamic): + if not sort and backend[0] == "neuralnetwork": + pytest.xfail("iOS16 version topk needed for sort = False") + if not sort and _macos_version() < (13, 0): + pytest.skip("New functionality in macOS13/iOS16") + + input_shape = shape_dim_k[0] + dim = shape_dim_k[1] + k = shape_dim_k[2] + + class TopkModel(nn.Module): + def forward(self, x, y): + if dynamic: + nonlocal k + k = torch.min(y) + topk = torch.topk(x, k, dim=dim, largest=largest, sorted=sort) + values, indices = topk.values, topk.indices + if not sort: + values, _ = torch.sort(values, dim=dim) + indices, _ = torch.sort(indices, dim=dim) + return values, indices, y + 1 + + input_data = torch.rand(input_shape) + k_list = torch.tensor([k + 1, k, k + 2]) + + model = TopkModel() + expected_results = model(input_data, k_list) + self.run_compare_torch( + [input_data, k_list], + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + minimum_deployment_target=ct.target.iOS16 if not sort else None, + ) + + +class TestLog10(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_log10(self, compute_unit, backend, rank): + class Log10Model(nn.Module): + def forward(self, x): + return torch.log10(x) + + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + model = Log10Model() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestLog2(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_log2(self, compute_unit, backend, rank): + class Log2Model(nn.Module): + def __init__(self): + super(Log2Model, self).__init__() + + def forward(self, x): + return torch.log2(x) + + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + model = Log2Model() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestFlip(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_dim", + itertools.product( + compute_units, + backends, + [(1, [0]), (2, [0, 1]), (3, [1]), (4, [0, 1, 2, 3])], + ), + ) + def test_flip(self, compute_unit, backend, rank_dim): + rank, dim = rank_dim + + class FlipModel(nn.Module): + def forward(self, x): + return torch.flip(x, dim) + + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + model = FlipModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestBitWiseLogical(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x_y, op_string", + itertools.product( + compute_units, + backends, + [ + ([True, False, True, False], [True, True, False, False]), + ([[True, False], [True, False]], [[True, True], [False, False]]), + ([[True, False], [True, False]], [[1, 0], [2, 1]]), + ([-1.5, 0.0, 1.0, 0.0], [0.1, 2.5, 0.0, 0.0]), + ([2, 0, -1, 0, 5], [1, 1, 0, 0, -5]), + ], + [ + "eq", + "ne", + ], + ), + ) + def test_bitwise_logical(self, compute_unit, backend, x_y, op_string): + if not contains_op(torch, op_string): + return + op_func = getattr(torch, op_string) + model = ModuleWrapper(function=op_func) + x = torch.tensor(x_y[0]) + y = torch.tensor(x_y[1]) + self.run_compare_torch( + [x, y], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestLogicalAnd(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x_y", + itertools.product( + compute_units, + backends, + [ + ([True, False, True, False], [True, True, False, False]), + ([[True, False], [True, False]], [[True, True], [False, False]]), + ([-1.5, 0.0, 1.0, 0.0], [0.1, 2.5, 0.0, 0.0]), + ([2, 0, -1, 0, 5], [1, 1, 0, 0, -5]), + ], + ), + ) + def test_logical_and(self, compute_unit, backend, x_y): + class TestNet(nn.Module): + def forward(self, x, y): + return torch.logical_and(x, y) + + model = TestNet() + x = torch.tensor(x_y[0]) + y = torch.tensor(x_y[1]) + self.run_compare_torch( + [x, y], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestLogicalOr(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x_y", + itertools.product( + compute_units, + backends, + [ + ([True, False, True, False], [True, True, False, False]), + ([[True, False], [True, False]], [[True, True], [False, False]]), + ([-1.5, 0.0, 1.0, 0.0], [0.1, 2.5, 0.0, 0.0]), + ([2, 0, -1, 0, 5], [1, 1, 0, 0, -5]), + ], + ), + ) + def test_logical_or(self, compute_unit, backend, x_y): + class TestNet(nn.Module): + def forward(self, x, y): + return torch.logical_or(x, y) + + model = TestNet() + x = torch.tensor(x_y[0]) + y = torch.tensor(x_y[1]) + self.run_compare_torch( + [x, y], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestLogicalXor(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x_y", + itertools.product( + compute_units, + backends, + [ + ([True, False, True, False], [True, True, False, False]), + ([[True, False], [True, False]], [[True, True], [False, False]]), + ([-1.5, 0.0, 1.0, 0.0], [0.1, 2.5, 0.0, 0.0]), + ([2, 0, -1, 0, 5], [1, 1, 0, 0, -5]), + ], + ), + ) + def test_logical_xor(self, compute_unit, backend, x_y): + class TestNet(nn.Module): + def forward(self, x, y): + return torch.logical_xor(x, y) + + model = TestNet() + x = torch.tensor(x_y[0]) + y = torch.tensor(x_y[1]) + self.run_compare_torch( + [x, y], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestWhere(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, [(2, 6), (3, 4, 5)]), + ) + def test_where_test1(self, compute_unit, backend, shape): + class WhereModel(nn.Module): + def forward(self, x, y): + return torch.where(x > 0.5, x, y) + + input_shape = [shape, shape] + model = WhereModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, [(2, 6), (3, 4, 5)]), + ) + def test_where_test2(self, compute_unit, backend, shape): + class WhereModel(nn.Module): + def forward(self, cond, x, y): + return torch.where(cond, x, y) + + cond = torch.rand(*shape) > 0.5 + inputs = [cond, torch.rand(*shape), torch.rand(*shape)] + model = WhereModel() + expected_results = model(*inputs) + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + expected_results=expected_results, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(1, 2), (1, 2), (1, 1)], + [(1, 2, 3), (1, 1, 1), (1, 1, 3)], + ], + ), + ) + def test_where_test3(self, compute_unit, backend, shapes): + class WhereModel(nn.Module): + def forward(self, cond, x, y): + return torch.where(cond, x, y) + + cond_shape, x_shape, y_shape = shapes + cond = torch.rand(*cond_shape) > 0.5 + inputs = [cond, torch.rand(*x_shape), torch.rand(*y_shape)] + model = WhereModel() + expected_results = model(*inputs) + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + expected_results=expected_results, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES + [(10,)]), + ) + def test_where_single_param(self, compute_unit, backend, shape): + class WhereModelSingleParam(nn.Module): + def forward(self, x): + return torch.where(x) + + # Create a tensor of given shape with ~90% zero entries + x = np.zeros(shape) + all_indices = list(zip(*np.where(x == 0))) + num_indices = len(all_indices) + random_picks = np.random.choice( + np.arange(num_indices), size=num_indices // 10, replace=False + ) + for i in random_picks: + x[all_indices[i]] = np.random.choice([-1, 12, 100]) + x = torch.Tensor(x) + + self.run_compare_torch( + x, + WhereModelSingleParam(), + backend=backend, + input_as_shape=False, + compute_unit=compute_unit, + ) + + +class TestSelect(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, dim_index", + itertools.product( + compute_units, + backends, + [ + [0, 0], + [1, 1], + [-1, -1], + ], + ), + ) + def test_select(self, compute_unit, backend, dim_index): + dim, index = dim_index + + class SelectModel(nn.Module): + def forward(self, x): + return x.select(dim, index) + + input_shape = (1, 2, 3) + model = SelectModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestNonZero(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, as_tuple", + itertools.product( + compute_units, + backends, + [1, 3], + [False, True], + ), + ) + def test_non_zero(self, compute_unit, backend, rank, as_tuple): + + if rank == 1: + input_shape = 10 + zeros_indices = np.array([1, 4, 7, 9]) + elif rank == 3: + input_shape = (2, 7, 3) + zeros_indices = np.array([1, 12, 33, 40]) + + input = np.arange(np.prod(input_shape)).astype(np.float32) + input[zeros_indices] = 0 + input = np.reshape(input, input_shape) + input = torch.tensor(input) + + model = ModuleWrapper( + torch.nonzero, + {"as_tuple": as_tuple}, + ) + + self.run_compare_torch( + input, + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestTorchTensor(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [0, 1, 2, 3, 4, 5], + ), + ) + def test_torch_tensor(self, compute_unit, backend, rank): + class Model(nn.Module): + def __init__(self, rank): + super(Model, self).__init__() + self.rank = rank + + def forward(self, x): + with torch.no_grad(): + if self.rank == 0: + res = self.generate_tensor_rank_0(x) + return torch.unsqueeze(res, 0) + if self.rank == 1: + return self.generate_tensor_rank_1(x) + if self.rank == 2: + return self.generate_tensor_rank_2(x) + if self.rank == 3: + return self.generate_tensor_rank_3(x) + if self.rank == 4: + return self.generate_tensor_rank_4(x) + if self.rank == 5: + return self.generate_tensor_rank_5(x) + + @torch.jit.script + def generate_tensor_rank_0(x): + _, _, _, w = x.shape + return torch.tensor(w, dtype=torch.int32) + + @torch.jit.script + def generate_tensor_rank_1(x): + _, _, h, w = x.shape + return torch.tensor([h, w, 0, 1], dtype=torch.int32) + + @torch.jit.script + def generate_tensor_rank_2(x): + _, _, h, w = x.shape + return torch.tensor([[0, h], [h, w], [w, w]], dtype=torch.float32) + + @torch.jit.script + def generate_tensor_rank_3(x): + _, _, h, w = x.shape + return torch.tensor([[[h, 1]], [[3, w]]], dtype=torch.int32) + + @torch.jit.script + def generate_tensor_rank_4(x): + _, _, h, w = x.shape + return torch.tensor( + [ + [[[h, h], [h, w]], [[w, w], [w, 1]]], + [[[0, 0], [1, 1]], [[0, h], [h, w]]], + ], + dtype=torch.float32, + ) + + @torch.jit.script + def generate_tensor_rank_5(x): + _, _, h, w = x.shape + return torch.tensor( + [[[[[h, w], [w, w]], [[1, 1], [0, h]]]]], dtype=torch.float32 + ) + + shape = (1, 1, 3, 4) + model = Model(rank) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, torch_op", + itertools.product( + compute_units, + backends, + [ + torch.abs, + torch.acos, + torch.asin, + torch.atan, + torch.atanh, + torch.ceil, + torch.cos, + torch.cosh, + torch.exp, + torch.exp2, + torch.floor, + torch.round, + torch.rsqrt, + torch.sign, + torch.sin, + torch.sinh, + torch.sqrt, + torch.square, + torch.tan, + torch.tanh, + ], + ), + ) + def test_torch_rank0_tensor(self, compute_unit, backend, torch_op): + class Model(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch_op(torch.tensor(0.1)) + + model = Model() + self.run_compare_torch( + torch.tensor([1.0, 2.0, 3.0]), + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestTensorAssign(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tensor_assign_case_1(self, compute_unit, backend): + # single dimension assignment for a 1D tensor + class TensorAssignModel(torch.nn.Module): + def forward(self, x): + x[0] = 0 + x[1] = 1 + y = x + 1 + x[1] = 2 * y[1] + return x, y + + shape = (5,) + model = TensorAssignModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tensor_assign_case_2(self, compute_unit, backend): + # single dimension assignment for two 1D tensors + class TensorAssignModel(torch.nn.Module): + def forward(self, x, y): + x[0] = 0 + y[1] = 2 + y = x + y + x = 2 * y + y[3] = x[1] + 5 + y[0] = x[0] * 10 + z = x + y + return z, x, y + + shape = (5,) + model = TensorAssignModel() + self.run_compare_torch( + [shape, shape], model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (5, 4), + (5, 4, 3), + ], + ), + ) + def test_tensor_assign_case_3(self, compute_unit, backend, shape): + # broadcast assignment for two n-D tensors + class TensorAssignModel(torch.nn.Module): + def __init__(self): + super(TensorAssignModel, self).__init__() + + def forward(self, x, y): + x[0] = 0 + x[3] = 1 + y[2] = 2 + return x + + model = TensorAssignModel() + self.run_compare_torch( + [shape, shape], model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_itensor_assign_case_4(self, compute_unit, backend): + # single dimension assignment for two n-D tensors + class TensorAssignModel(torch.nn.Module): + def forward(self, x, y): + x[0] = torch.tensor([1.0, 2.0, 3.0, 4.0]) + x[3] = 1 + y[0] = x[0] + return x, y + + shape = (5, 4) + model = TensorAssignModel() + self.run_compare_torch( + [shape, shape], model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tensor_assign_case_5(self, compute_unit, backend): + # slice dimension assigment + class TensorAssignModel(torch.nn.Module): + def forward(self, x): + x[:, 1] = torch.tensor([1.0, 2.0]) + return x + + shape = (2, 10) + model = TensorAssignModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tensor_assign_case_6(self, compute_unit, backend): + # a more complicated slice dimension assigment + class TensorAssignModel(torch.nn.Module): + def forward(self, x): + x[:, 1, :] = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).view(2, 3) + return x + + shape = (2, 10, 3) + model = TensorAssignModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, dynamic", + itertools.product( + compute_units, + backends, + [True, False], + ), + ) + def test_tensor_assign_case_7(self, compute_unit, backend, dynamic): + # general case + class TensorAssignModel(torch.nn.Module): + def forward(self, x): + x[:1, 1, :1] = torch.tensor([1.0]).view(1, 1) + x[0, 1, 2] = 6. + x[:2, 2:8:2, 1:2] = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).view(2, 3, 1) + x[:, 1:10:8, 1:3] = torch.tensor([1.0, 2.0, 3.0, 4.0]).view(2, 1, 2) + return x + + shape = (2, 10, 3) + model = TensorAssignModel() + if dynamic: + converter_input_type = [ct.TensorType(shape=(ct.RangeDim(), ct.RangeDim(), ct.RangeDim()))] + else: + converter_input_type = None + self.run_compare_torch( + shape, + model, + converter_input_type=converter_input_type, + backend=backend, + compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, dynamic", + itertools.product( + compute_units, + backends, + [True, False], + ), + ) + def test_tensor_assign_case_8(self, compute_unit, backend, dynamic): + # general case with dynamic begin and end + class TensorAssignModel(torch.nn.Module): + def forward(self, x, begin_0, begin_1, end_1): + x[:1, begin_0:begin_0+5:2, 2] = torch.tensor([1.0, 2.0, 3.0]).view(1, 3) + x[:, 4, begin_1:end_1] = torch.tensor([1.0]).view(1, 1) + return x + + shape = (2, 10, 3) + model = TensorAssignModel() + if dynamic: + converter_input_type = [ + ct.TensorType(shape=(ct.RangeDim(), ct.RangeDim(), ct.RangeDim())), + ct.TensorType(shape=(1,), dtype=np.int32), + ct.TensorType(shape=(1,), dtype=np.int32), + ct.TensorType(shape=(1,), dtype=np.int32), + ] + else: + converter_input_type = None + + inputs = [ + torch.rand(*shape), + torch.as_tensor([1], dtype=torch.int32), + torch.as_tensor([1], dtype=torch.int32), + torch.as_tensor([2], dtype=torch.int32), + ] + + torch_inputs = [torch.clone(x) for x in inputs] + expected_results = model(*torch_inputs) + + self.run_compare_torch( + inputs, + model, + expected_results=expected_results, + input_as_shape=False, + converter_input_type=converter_input_type, + backend=backend, + compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tensor_assign_type_compatibility(self, compute_unit, backend): + class TensorAssignModel(torch.nn.Module): + def forward(self, x): + x[:, 1] = torch.tensor([1, 2], dtype=torch.int32) + return x + + shape = (2, 3) + model = TensorAssignModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + +class TestIndexPut(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_index_put_case_1(self, compute_unit, backend): + class IndexPutModel(torch.nn.Module): + def forward(self, x, y): + y = x + 1 + mask = torch.tensor([True, False, False, False, True, True]).view(3, 2) + x[mask] = y[mask] + return x + + shape = (3, 2) + model = IndexPutModel() + self.run_compare_torch( + [shape, shape], model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [0, 1], + ), + ) + def test_index_put_case_2(self, compute_unit, backend, rank): + class IndexPutModel(torch.nn.Module): + def forward(self, x): + mask = torch.tensor([True, False, False, False, True, True]).view(3, 2) + if rank == 0: + x[mask] = 0.0 + if rank == 1: + x[mask] = torch.tensor([1.0]) + return x + + shape = (3, 2) + model = IndexPutModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_index_put_case_3(self, compute_unit, backend): + if _macos_version() < (13, 0): + pytest.skip("Issue fixed in iOS16/macOS13") + + class IndexPutModel(torch.nn.Module): + def forward(self, x, y): + mask = y > 1 + x[y > 1] = 0.0 + return x + + inputs = [ + torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6]), + torch.Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + ] + model = IndexPutModel() + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, accumulate", + itertools.product(compute_units, backends, [1, 2], [True, False]), + ) + def test_index_put_case_4(self, compute_unit, backend, rank, accumulate): + class IndexPutModel(torch.nn.Module): + def forward(self, x, indices, values): + x.index_put_(tuple(indices.t()), values, accumulate=accumulate) + return x + + if rank == 1: + inputs = [ + torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6]), + torch.LongTensor([[0], [4]]), + torch.Tensor([3.0, 7.0]), + ] + elif rank == 2: + inputs = [ + torch.ones([3, 4]), + torch.LongTensor([[0, 1], [1, 2], [2, 2]]), + torch.Tensor([1.0, 5.0, 8.0]), + ] + + model = IndexPutModel() + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestIndex(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (10,), + (3, 4, 5, 6), + ], + ), + ) + def test_index_bool_indices(self, compute_unit, backend, shape): + rank = len(shape) + class IndexModel(torch.nn.Module): + def __init__(self, axis): + super().__init__() + self.axis = axis + + def forward(self, x, y): + index = y > 0.5 + if self.axis == 0: + return x[index] + elif self.axis == 1: + return x[:, index] + elif self.axis == 2: + return x[:, :, index] + else: + assert self.axis == 3 + return x[:, :, :, index] + + for index_rank in range(1, rank + 1): + for axis in range(rank + 1 - index_rank): + input_data = torch.randn(*shape, dtype=torch.float32) + ref_data_shape = shape[axis:axis+index_rank] + ref_data = torch.rand(ref_data_shape) + # We set the first element to 0.6, so that we can make sure at least one element is selected, + # and ensure no empty tensors are produced. + ref_data[0] = 0.6 + + model = IndexModel(axis=axis) + self.run_compare_torch( + [input_data, ref_data], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (3, 4, 5, 6), + ], + ), + ) + def test_index_int_index_case_1(self, compute_unit, backend, shape): + # all elements are selected + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 2: + return x[:, :] + elif len(shape) == 4: + return x[:] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (3, 4, 5, 6), + ], + ), + ) + def test_index_int_index_case_2(self, compute_unit, backend, shape): + # only one axis is sliced + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 2: + index = torch.tensor([0]) + return x[index, :] + elif len(shape) == 4: + index = torch.tensor([1, 2]) + return x[:, :, index] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_3(self, compute_unit, backend, shape): + # only two axes are sliced, and connected + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + index_1 = torch.tensor([0]) + index_2 = torch.tensor([1]) + return x[index_1, index_2, :] + + elif len(shape) == 4: + index_1 = torch.tensor([0, 1, 1]) + index_2 = torch.tensor([2, 1, 0]) + return x[:, index_1, index_2, :] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_4(self, compute_unit, backend, shape): + # only two axes are sliced, and not connected + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + index_1 = torch.tensor([0]) + index_2 = torch.tensor([1]) + return x[index_1, :, index_2] + + elif len(shape) == 4: + index_1 = torch.tensor([0, 1, 1]) + index_2 = torch.tensor([3, 3, 4]) + return x[index_1, :, :, index_2] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_5(self, compute_unit, backend, shape): + # all axes are sliced + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + index_1 = torch.tensor([0]) + index_2 = torch.tensor([1]) + index_3 = torch.tensor([2]) + return x[index_1, index_2, index_3] + + elif len(shape) == 4: + index_1 = torch.tensor([0, 1, 1, 0, 0]) + index_2 = torch.tensor([1, 2, 0, 0, 0]) + index_3 = torch.tensor([0, 1, 2, 3, 3]) + index_4 = torch.tensor([2, 1, 0, 4, 4]) + return x[index_1, index_2, index_3, index_4] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (3, 4, 5, 6), + ], + ), + ) + def test_index_int_index_case_6(self, compute_unit, backend, shape): + # only one axis is sliced + nd mode + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 2: + index = torch.tensor([0, 0, 0, 0, 0, 0]) + index = index.view(2, 3) + return x[index, :] + elif len(shape) == 4: + index = torch.tensor([0, 1, 2, 3, 0, 1]) + index = index.view(3, 2) + return x[:, index] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_7(self, compute_unit, backend, shape): + # two axes are sliced, and connected + nd mode + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + index_1 = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0]).view(4, 2) + index_2 = torch.tensor([1, 0, 0, 0, 1, 1, 1, 1]).view(4, 2) + return x[index_1, index_2, :] + + elif len(shape) == 4: + index_1 = torch.tensor([0, 0, 2, 2, 1, 1, 2, 0]).view(2, 4) + index_2 = torch.tensor([0, 1, 2, 3, 0, 1, 2, 3]).view(2, 4) + return x[:, index_1, index_2, :] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_8(self, compute_unit, backend, shape): + # two axes are sliced, and not connected + nd mode + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + index_1 = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0]).view(2, 4) + index_2 = torch.tensor([1, 0, 0, 2, 2, 1, 1, 1]).view(2, 4) + return x[index_1, :, index_2] + + elif len(shape) == 4: + index_1 = torch.tensor([0, 1, 1, 1, 1, 1, 0, 0]).view(4, 2) + index_2 = torch.tensor([0, 1, 2, 3, 4, 0, 1, 2]).view(4, 2) + return x[index_1, :, :, index_2] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_9(self, compute_unit, backend, shape): + # one axis is sliced through bool mask + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + return x[:, [True, False], :] + + elif len(shape) == 4: + return x[[True, False], :, :, :] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_10(self, compute_unit, backend, shape): + # multiple axes are sliced through bool masks with possible broadcasting + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + return x[[True], [True, False], [False, True, False]] + + else: + assert len(shape) == 4 + # This is an non-broadcasable case, where the number of `True` for each dimension is the same + output_1 = x[ + [True, True], + :, + [True, True, False, False], + [True, False, False, True, False], + ] + # This is a broadcasable case + output_2 = x[ + [True, True], + :, + [False, False, True, False], + [True, False, False, True, False], + ] + return output_1, output_2 + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (3, 4), + (3, 4, 5, 6) + ], + ), + ) + def test_index_int_index_case_11(self, compute_unit, backend, shape): + # broadcasable indices + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 2: + index_1 = torch.tensor([0, 1]) + index_2 = torch.tensor([0]) + return x[index_1, index_2] + else: + assert len(shape) == 4 + index_1 = torch.tensor([0, 1, 1, 1, 1, 1, 0, 0]).view(4, 2) + index_2 = torch.tensor([0, 1, 2, 3]).view(4, 1) + index_3 = torch.tensor([2]).view(1,) + return x[index_1, :, index_3, index_2] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_12(self, compute_unit, backend, shape): + # Another broadcastable indices test case + class IndexModel(torch.nn.Module): + def forward(self, x): + index_1 = torch.tensor([0, 1]) + index_2 = torch.tensor([0]) + return ( + x[:, index_1, index_2] + if len(shape) == 3 + else x[:, index_1, index_2, :] + ) + + self.run_compare_torch( + shape, IndexModel(), backend=backend, compute_unit=compute_unit + ) + +class TestLoss(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, reduction", + itertools.product( + compute_units, backends, range(1, 4), ["none", "mean", "sum"] + ), + ) + def test_mse_loss(self, compute_unit, backend, rank: int, reduction: str): + input_shape = tuple(np.random.randint(low=1, high=5, size=rank)) + class Model(torch.nn.Module): + def __init__(self): + super().__init__() + self.loss = nn.MSELoss(reduction=reduction) + + def forward(self, x, y): + return self.loss(x, y) + + input_shapes = [input_shape, input_shape] + + self.run_compare_torch( + input_shapes, Model(), backend=backend, compute_unit=compute_unit + ) + + +class TestPad(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, mode", + itertools.product( + compute_units, backends, range(3, 5), ["reflect", "replicate"] + ), + ) + def test_pad_reflect_replicate(self, compute_unit, backend, rank: int, mode: str): + if rank == 3: + pad_len = 2 + input_shape = (5, 10, 10) + elif rank == 4: + pad_len = 4 + input_shape = (10, 5, 5, 10) + else: + raise NotImplementedError( + "Only 3D, 4D padding with non-constant padding are supported for now" + ) + max_pad = min(input_shape[-1], input_shape[-2]) + pad = list(np.random.randint(low=0, high=max_pad, size=pad_len)) + model = ModuleWrapper( + function=torch.nn.functional.pad, kwargs={"pad": pad, "mode": mode} + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_pad_constant(self, compute_unit, backend, rank: int): + if rank > 5: + raise NotImplementedError("Only supports < 6D constant padding") + val = float(np.random.random(1)) + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + pad_dims = np.random.randint(low=1, high=rank + 1) + pad = list(np.random.randint(low=0, high=10, size=pad_dims * 2)) + model = ModuleWrapper( + function=torch.nn.functional.pad, + kwargs={"pad": pad, "mode": "constant", "value": val}, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_constant_pad_1d(self, compute_unit, backend): + input_shape = (3, 4, 5) + model = torch.nn.ConstantPad1d((5, 6), 3.5).eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_constant_pad_2d(self, compute_unit, backend): + input_shape = (3, 4, 5, 6) + model = torch.nn.ConstantPad2d((5, 6, 3, 8), 3.5).eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_constant_pad_3d(self, compute_unit, backend): + input_shape = (3, 4, 5, 6, 2) + model = torch.nn.ConstantPad3d((5, 6, 3, 8, 2, 4), 3.5).eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestMeshgrid(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x, y, z, dtype, inp_mode, indexing", + itertools.product( + compute_units, + backends, + [1, 2], + [3, 4], + [5, 6], + [torch.int, torch.float], + ["norm", "list"], + [None, "ij", "xy"], + ), + ) + def test_meshgrid( + self, + compute_unit, + backend, + x, + y, + z, + dtype, + inp_mode, + indexing, + ): + class TestModel(nn.Module): + def forward(self, x, y, z): + if inp_mode == "norm": + return torch.meshgrid(x, y, z, indexing=indexing) + elif inp_mode == "list": + return torch.meshgrid([x, y, z], indexing=indexing) + else: + raise ValueError("Unsupported mode: {mode}".format(mode=inp_mode)) + + inputs = ( + torch.arange(start=0, end=x, step=1, dtype=dtype), + torch.arange(start=0, end=y, step=1, dtype=dtype), + torch.arange(start=0, end=z, step=1, dtype=dtype), + ) + model = TestModel().eval() + expected_results = model(*inputs) + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestScatter(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes_dims", + itertools.product( + compute_units, + backends, + [ + [(10,), (0, -1)], + [(2, 3), (1, -1)], + [(2, 3, 4, 5), (0, -2)], + ], + ), + ) + def test_scatter(self, compute_unit, backend, shapes_dims): + class TestModel(nn.Module): + def __init__(self, dim, shapes): + super(TestModel, self).__init__() + self.dim = dim + self.source = torch.rand(*(shapes)) + self.index = torch.randint(0, shapes[dim], size=shapes) + + def forward(self, x): + return x.scatter_(self.dim, self.index, self.source) + + shapes, dims = shapes_dims + for dim in dims: + m = TestModel(0, shapes) + self.run_compare_torch( + shapes, m, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes_dims", + itertools.product( + compute_units, + backends, + [ + [(10,), (0, -1)], + [(2, 3), (1, -1)], + [(2, 3, 4, 5), (0, -2)], + ], + ), + ) + def test_scatter_with_scalar_source(self, compute_unit, backend, shapes_dims): + class TestModel(nn.Module): + def __init__(self, dim, shapes): + super(TestModel, self).__init__() + self.dim = dim + self.source = 1.0 + self.index = torch.randint(0, shapes[dim], size=shapes) + + def forward(self, x): + return x.scatter_(self.dim, self.index, self.source) + + shapes, dims = shapes_dims + for dim in dims: + m = TestModel(0, shapes) + self.run_compare_torch( + shapes, m, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes_dims, mode", + itertools.product( + compute_units, + backends, + [ + [(10,), (0, -1)], + [(2, 3), (1, -1)], + [(2, 3, 4, 5), (0, -2)], + ], + ["add", "multiply"], + ), + ) + def test_scatter_with_reduce(self, compute_unit, backend, shapes_dims, mode): + class TestModel(nn.Module): + def __init__(self, dim, shapes, mode): + super(TestModel, self).__init__() + self.dim = dim + self.mode = mode + self.source = torch.rand(*(shapes)) + self.index = torch.randint(0, shapes[dim], size=shapes) + + def forward(self, x): + return x.scatter_(self.dim, self.index, self.source, reduce=self.mode) + + shapes, dims = shapes_dims + for dim in dims: + m = TestModel(0, shapes, mode) + self.run_compare_torch( + shapes, m, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes_dims", + itertools.product( + compute_units, + backends, + [ + [(10,), (0, -1)], + [(2, 3), (1, -1)], + [(2, 3, 4, 5), (0, -2)], + ], + ), + ) + def test_scatter_add(self, compute_unit, backend, shapes_dims): + class TestModel(nn.Module): + def __init__(self, dim, shapes): + super(TestModel, self).__init__() + self.dim = dim + self.source = torch.rand(*(shapes)) + self.index = torch.randint(0, shapes[dim], size=shapes) + + def forward(self, x): + return x.scatter_add_(self.dim, self.index, self.source) + + shapes, dims = shapes_dims + for dim in dims: + m = TestModel(dim, shapes) + self.run_compare_torch( + shapes, m, backend=backend, compute_unit=compute_unit + ) + + +class TestBroadcastTensors(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [(1,), (1, 2)], + ), + ) + def test_one_tensor(self, compute_unit, backend, shapes): + class TestModel(nn.Module): + def forward(self, a): + return torch.broadcast_tensors(a) + + self.run_compare_torch( + shapes, TestModel().eval(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 1), (1, 3)], + [(5, 1, 4, 1), (3, 1, 1)], + [(1,), (3, 1, 7)], + [(2, 1), (4, 3, 2, 1)], + ], + ), + ) + def test_two_tensors(self, compute_unit, backend, shapes): + class TestModel(nn.Module): + def forward(self, a, b): + return torch.broadcast_tensors(a, b) + + self.run_compare_torch( + shapes, TestModel().eval(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 1), (1, 3), (1,), (1, 1)], + [(5, 1, 4, 1), (3, 1, 1), (1,), (4, 8)], + [(1,), (2, 1), (3, 2, 1), (5, 4, 3, 2, 1)], + ], + ), + ) + def test_four_tensors(self, compute_unit, backend, shapes): + class TestModel(nn.Module): + def forward(self, a, b, c, d): + return torch.broadcast_tensors(a, b, c, d) + + self.run_compare_torch( + shapes, TestModel().eval(), backend=backend, compute_unit=compute_unit + ) + + +class TestEmbedding(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_dtype", + itertools.product( + compute_units, + backends, + [np.int32, np.float32], + ), + ) + def test_embedding(self, compute_unit, backend, input_dtype): + num_embeddings = 4 + embedding_size = 10 + B = 2 + dim = 5 + converter_input_type = [TensorType(shape=(B, dim), dtype=input_dtype)] + + # input shape: (B, dim) + # output shape : (B, dim, embedding_size) + # shape of weights : (num_embeddings, embedding_size) + class EmbeddingModel(nn.Module): + def __init__(self): + super(EmbeddingModel, self).__init__() + self.embedding = torch.nn.Embedding(num_embeddings, embedding_size) + + def forward(self, x): + return self.embedding(x) + + input_data = np.random.randint(low=0, high=num_embeddings, size=(B, dim)) + input_data = torch.from_numpy(input_data) + model = EmbeddingModel() + expected_results = model(input_data) + self.run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + + +class TestDuplicateOutputTensors(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_dtype", + itertools.product( + compute_units, + backends, + [np.int32, np.float32], + ), + ) + # Test case for rdar://100138064 (Duplicate output tensors trigger ops removal errors). + def test_duplicate_output_not_raise_errors( + self, compute_unit, backend, input_dtype + ): + if backend[0] == "neuralnetwork": + pytest.skip( + "rdar://100243127 ([PyTorch] Duplicate Output Tensor Doesn't work for neuralnetwork)" + ) + + class DuplicateTensorsModel(torch.nn.Module): + def forward(self, x): + return x, x + + input_data = torch.rand(2, 2, 1, 1) + converter_input_type = [ct.TensorType(shape=input_data.shape)] + model = DuplicateTensorsModel() + expected_results = model(input_data) + self.run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + + +class TestBaddbmm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [(2, 4, 6, 8), (4, 12, 6, 16)], + ), + ) + def test_baddbmm(self, compute_unit, backend, shapes): + B, N, M, P = shapes + + # input shape: any shape broadcastable to (B, N, P) + # batch1 shape: (B, N, M) + # batch2 shape: (B, M, P) + # output shape : (B, N, P) + class BaddbmmModel(nn.Module): + def __init__(self): + super(BaddbmmModel, self).__init__() + self.batch1 = torch.randn(B, N, M) + self.batch2 = torch.randn(B, M, P) + + def forward(self, x): + return torch.baddbmm(x, self.batch1, self.batch2) + + model = BaddbmmModel() + # Makes it broadcastable to (B, N, P). + for input_shape in [(1, N, P), (B, 1, P), (1, P)]: + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestGlu(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [(2, 4, 6, 8), (6, 2, 10)], + ), + ) + def test_glu(self, compute_unit, backend, shapes): + # The dim specified for GLU shouldn't exceed the max dim in input. + glu_dim_list = [-1] + [i for i in range(len(shapes))] + for glu_dim in glu_dim_list: + model = torch.nn.GLU(glu_dim) + self.run_compare_torch( + shapes, model, backend=backend, compute_unit=compute_unit + ) + + +class TestHstack(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 4, 6), (2, 4, 6)], + [(1, 4, 5), (1, 2, 5)], + [(1,), (3,)], + ], # Test 1-D tensors. + ), + ) + def test_hstack(self, compute_unit, backend, shapes): + class HstackModel(nn.Module): + def forward(self, *tensors): + return torch.hstack(tensors) + + self.run_compare_torch( + shapes, HstackModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [[(2, 4, 6), (2, 4, 6)]], + ), + ) + def test_hstack_with_parameter_out(self, compute_unit, backend, shapes): + class HstackModel(nn.Module): + def forward(self, *tensors): + output_tensor = torch.tensor([]) + torch.hstack(tensors, out=output_tensor) + return output_tensor + + self.run_compare_torch( + shapes, HstackModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestRemainder(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 4, 6), (2, 4, 6)], + [(2, 4, 6), (4, 6)], # broadcastable tensors + [(2, 4, 6), (2, 1, 6)], + ], + ), + ) + def test_remainder(self, compute_unit, backend, shapes): + class RemainderModel(nn.Module): + def forward(self, dividend, divisor): + return torch.remainder(dividend, divisor) + + self.run_compare_torch( + shapes, RemainderModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [[(2, 4, 6), (2, 4, 6)]], + ), + ) + def test_remainder_with_parameter_out(self, compute_unit, backend, shapes): + class RemainderModel(nn.Module): + def forward(self, dividend, divisor): + output_tensor = torch.tensor([]) + torch.remainder(dividend, divisor, out=output_tensor) + return output_tensor + + self.run_compare_torch( + shapes, RemainderModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_remainder_input_types_promotion(self, compute_unit, backend): + class RemainderModel(nn.Module): + def forward(self, dividend, divisor): + return torch.remainder(dividend, divisor) + + input_dividend = torch.randint(low=0, high=10, size=(2, 3), dtype=torch.int32) + input_divisor = torch.rand(2, 3) + self.run_compare_torch( + [input_dividend, input_divisor], + RemainderModel(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestSum(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_dtype", + itertools.product( + compute_units, backends, [torch.int32, torch.float32, torch.bool] + ), + ) + def test_sum(self, compute_unit, backend, input_dtype): + model = ModuleWrapper(function=torch.sum) + + input_data = torch.zeros(2, 3).to(input_dtype) + expected_results = model(input_data) + + TorchBaseTest.run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestLogsumexp(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, dim", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [0, -1], + ), + ) + def test_logsumexp(self, compute_unit, backend, shape, dim): + params = {"dim": dim} + model = ModuleWrapper( + function=torch.logsumexp, + kwargs=params, + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestHannWindow(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, window_length, periodic", + itertools.product( + compute_units, + backends, + [1, 3, 6, 10, 12], + [True, False], + ), + ) + def test_hann_window(self, compute_unit, backend, window_length, periodic): + class HannWindowModel(nn.Module): + def forward(self, x): + return torch.hann_window(window_length, periodic) + + input_shape = np.random.randint(low=1, high=10, size=(window_length,)) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = HannWindowModel().eval() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestTrace(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [(1, 1), (2, 4), (4, 3), (5, 5)], + ), + ) + def test_trace(self, compute_unit, backend, shape): + model = ModuleWrapper(torch.trace) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + +class TestRoll(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, shifts", + itertools.product( + compute_units, + backends, + [(5,), (2, 4), (4, 2, 3)], + [0, 1, 3], + ), + ) + def test_roll(self, compute_unit, backend, shape, shifts): + model = ModuleWrapper(torch.roll, kwargs={"shifts": shifts}) + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, shifts_dims", + itertools.product( + compute_units, + backends, + [(4, 2, 3)], + [ + [0, 0], + [4, 0], + [9, 0], + [[0, 1], [0, 1]], + # Shifts exceeeds dimension + [[89, 93, 102], [0, 1, 2]], + # Negative shifts + [[-9, -1], [1, 2]], + # Duplicate dims + [[8, 10, -8], [0, 1, 0]] + ], + ), + ) + def test_roll_with_dims(self, compute_unit, backend, shape, shifts_dims): + shifts, dims = shifts_dims + model = ModuleWrapper(torch.roll, kwargs={"shifts": shifts, "dims": dims}) + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit + ) + +class TestArgmax(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, axis, input_dtype", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-1, 0], + [np.float32, np.int32, np.int64], + ), + ) + def test_argmax( + self, + compute_unit, + backend: Tuple[str, str], + shape: Tuple[int], + axis: int, + input_dtype: np.dtype, + ): + input_data = ( + torch.rand(*shape) + if input_dtype == np.float32 + else torch.randint(10, shape) + ) + converter_input_type = [ + ct.TensorType(shape=input_data.shape, dtype=input_dtype) + ] + model = ModuleWrapper(function=torch.argmax, kwargs={"dim": axis}) + expected_results = model(input_data) + TorchBaseTest.run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + ) + + +class TestStack(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, num", + itertools.product( + compute_units, + backends, + [1, 3], + [1, 3], + ), + ) + def test_stack(self, compute_unit, backend, rank, num): + input_shape = np.random.randint(low=1, high=6, size=rank) + for dim in [None] + list(range(rank + 1)): + print("dim", dim) + + class StackModel(torch.nn.Module): + def forward(self, *inputs): + if dim is None: + return torch.stack(inputs) + else: + return torch.stack(inputs, dim=dim) + + TorchBaseTest.run_compare_torch( + [input_shape] * num, + StackModel(), + backend=backend, + compute_unit=compute_unit, + ) + + +class TestComplex(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_complex(self, compute_unit: ct.ComputeUnit, backend): + class ComplexModel(torch.nn.Module): + def forward(self, x): + real_part = x + 1 + imag_part = -x + complex_data = torch.complex(real_part, imag_part) + return torch.stack([complex_data.real, complex_data.imag], dim=1) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), ComplexModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_complex_real_imag_same_input(self, compute_unit: ct.ComputeUnit, backend): + class ComplexModel(torch.nn.Module): + def forward(self, x): + return torch.complex(x, x).real + + TorchBaseTest.run_compare_torch( + (2, 3, 4), ComplexModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_complex_input_error(self, compute_unit: ct.ComputeUnit, backend): + class ComplexModel(torch.nn.Module): + def forward(self, x): + return torch.complex(x.real, x.imag) + + input_data = torch.tensor([1 + 0j, 2 + 3j], dtype=torch.complex64) + with pytest.raises( + TypeError, + match="dtype= is unsupported for inputs/outputs of the model", + ): + converter_input_type = [ + ct.TensorType(shape=input_data.shape, dtype=np.complex64) + ] + TorchBaseTest.run_compare_torch( + input_data, + ComplexModel(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + converter_input_type=converter_input_type, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_complex_output_error(self, compute_unit: ct.ComputeUnit, backend): + class ComplexModel(torch.nn.Module): + def forward(self, x): + return torch.complex(x, x) + + with pytest.raises( + ValueError, match="MIL doesn't support complex data as model's output" + ): + TorchBaseTest.run_compare_torch( + (2, 3, 4), ComplexModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestReal(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_real_real_input(self, compute_unit: ct.ComputeUnit, backend): + class RealModel(torch.nn.Module): + def forward(self, x): + return torch.real(x) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), RealModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_real_complex_input(self, compute_unit: ct.ComputeUnit, backend): + class RealModel(torch.nn.Module): + def forward(self, x): + return torch.real(torch.complex(x, x)) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), RealModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestImag(TorchBaseTest): + # torch.imag only support complex input, so we don't need to test real number input. + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_imag_complex_input(self, compute_unit: ct.ComputeUnit, backend): + class ImagModel(torch.nn.Module): + def forward(self, x): + return torch.imag(torch.complex(x, x)) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), ImagModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestFft(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_directly_use_fft_complex_output_error( + self, compute_unit: ct.ComputeUnit, backend + ): + class FftModel(torch.nn.Module): + def forward(self, x): + return torch.fft.fft(x) + + with pytest.raises( + ValueError, match="MIL doesn't support complex data as model's output" + ): + TorchBaseTest.run_compare_torch( + (2, 3, 4), FftModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, fft_variant", + itertools.product( + compute_units, + backends, + [(1,), (2, 3), (3, 1, 2)], + ["fft", "rfft", "ifft", "irfft"], + ), + ) + def test_fft_basic_no_param( + self, compute_unit: ct.ComputeUnit, backend, input_shape, fft_variant + ): + if input_shape == (1,) and fft_variant == "irfft": + pytest.skip("PyTorch doesn't support length-1 input (1,) for irfft.") + + class FftModel(torch.nn.Module): + def forward(self, x): + if fft_variant == "fft": + return torch.fft.fft(x).real + elif fft_variant == "rfft": + return torch.fft.rfft(x).real + elif fft_variant == "ifft": + x = torch.complex(x, x) + return torch.fft.ifft(x).real + elif fft_variant == "irfft": + x = torch.complex(x, x) + return torch.fft.irfft(x) + else: + raise ValueError(f"Invalid fft_variant {fft_variant}.") + + TorchBaseTest.run_compare_torch( + input_shape, FftModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, fft_variant, n, dim, norm", + itertools.product( + compute_units, + backends, + ["fft", "rfft", "ifft", "irfft"], + [None, 1, 5], + [0, 1, -1], + [None, "forward", "backward", "ortho"], + ), + ) + def test_fft_basic( + self, compute_unit: ct.ComputeUnit, backend, fft_variant, n, dim, norm + ): + class FftModel(torch.nn.Module): + def forward(self, x): + if fft_variant == "fft": + fft_res = torch.fft.fft(x, n=n, dim=dim, norm=norm) + elif fft_variant == "rfft": + fft_res = torch.fft.rfft(x, n=n, dim=dim, norm=norm) + elif fft_variant == "ifft": + x = torch.complex(x, x) + fft_res = torch.fft.ifft(x, n=n, dim=dim, norm=norm) + elif fft_variant == "irfft": + x = torch.complex(x, x) + return torch.fft.irfft(x, n=n, dim=dim, norm=norm) + else: + raise ValueError(f"Invalid fft_variant {fft_variant}.") + return torch.stack([fft_res.real, fft_res.imag], dim=0) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), FftModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_fft_nested(self, compute_unit: ct.ComputeUnit, backend): + class FftModel(torch.nn.Module): + def forward(self, x): + fft_1 = torch.fft.fft(x, dim=2, norm="forward") + fft_2 = torch.fft.fft(fft_1, dim=0, norm="backward") + fft_3 = torch.fft.fft(fft_2, dim=1, norm="ortho") + return torch.real(fft_3) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), FftModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, fftn_variant, shapes_and_dims, norm", + itertools.product( + compute_units, + backends, + ["fftn", "rfftn", "ifftn", "irfftn"], + [ + (None, None), + (None, [1, 0]), + ([2], None), + ([5], [0]), + ([1, 4], [1, 2]), + ([1, 3, 5], [1, -1, 0]), + ], + [None, "forward", "backward", "ortho"], + ), + ) + def test_fftn( + self, compute_unit: ct.ComputeUnit, backend, fftn_variant, shapes_and_dims, norm + ): + shapes, dims = shapes_and_dims + + class FftnModel(torch.nn.Module): + def forward(self, x): + if fftn_variant == "fftn": + fftn_res = torch.fft.fftn(x, s=shapes, dim=dims, norm=norm) + elif fftn_variant == "rfftn": + fftn_res = torch.fft.rfftn(x, s=shapes, dim=dims, norm=norm) + elif fftn_variant == "ifftn": + x = torch.complex(x, x) + fftn_res = torch.fft.ifftn(x, s=shapes, dim=dims, norm=norm) + elif fftn_variant == "irfftn": + x = torch.complex(x, x) + return torch.fft.irfftn(x, s=shapes, dim=dims, norm=norm) + else: + raise ValueError(f"Invalid fftn_variant {fftn_variant}.") + return torch.stack([torch.real(fftn_res), torch.imag(fftn_res)], dim=0) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), FftnModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_dims_specify_by_shapes(self, compute_unit: ct.ComputeUnit, backend): + class FftnModel(torch.nn.Module): + def forward(self, x): + x = torch.complex(x, x) + return torch.fft.irfftn(x, s=x.shape[-3:], dim=(-3, -2, -1)) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), FftnModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestNms(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, box_num, iou_threshold, dynamic_input", + itertools.product( + compute_units, + backends, + [1, 5, 20, 1000], + [0.0, 0.2, 0.8], + [True, False], + ), + ) + def test_nms( + self, + compute_unit, + backend: Tuple[str, str], + box_num: int, + iou_threshold: float, + dynamic_input: bool, + ): + if box_num >= 1000 and backend == ("mlprogram", "fp16"): + pytest.xfail( + "rdar://103891349 ([TensorFlow] [PyTorch] NMS discrepancy in Fp16 when " + "number of boxes is large)" + ) + + class NmsModel(torch.nn.Module): + def forward(self, boxes, scores): + return torchvision.ops.nms(boxes, scores, iou_threshold=iou_threshold) + + input_boxes = torch.randint( + low=0, high=box_num, size=(box_num, 4), dtype=torch.float32 + ) + # When two boxes have IOU exactly equal to iou_threshold (>0.0), it will hit the corner case as shown in + # `test_nms_corner_case`, which has a discrepancy between CoreML and PyTorch. To avoid this situation, we keep + # regenerating the input boxes at most _MAX_REGEN times until there is no corner case in the generated boxes. + _MAX_REGEN = 3 + regen_count = 0 + while regen_count < _MAX_REGEN and iou_threshold > 0.0 and iou_threshold in torchvision.ops.box_iou( + input_boxes, input_boxes): + input_boxes = torch.randint( + low=0, high=box_num, size=(box_num, 4), dtype=torch.float32 + ) + regen_count += 1 + + # When the input score is too close, the returned index order is not guaranteed (same + # behaviour as PyTorch). So instead of generating random scores by torch.rand, use shuffle. + input_scores = np.arange(box_num) + np.random.shuffle(input_scores) + input_scores = torch.tensor(input_scores, dtype=torch.float32) + + if dynamic_input: + converter_input_type = [ + ct.TensorType(shape=(RangeDim(1, -1), 4)), + ct.TensorType(shape=(RangeDim(1, -1),)), + ] + else: + converter_input_type = [ + ct.TensorType(shape=input_boxes.shape), + ct.TensorType(shape=input_scores.shape), + ] + + nms_model = NmsModel() + nms_model.eval() + expected_results = nms_model(input_boxes, input_scores) + TorchBaseTest.run_compare_torch( + [input_boxes, input_scores], + nms_model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_nms_corner_case_iou_equal_threshold( + self, + compute_unit, + backend: Tuple[str, str], + ): + class NmsModel(torch.nn.Module): + def forward(self, boxes, scores): + return torchvision.ops.nms(boxes, scores, iou_threshold=0.2) + + input_boxes = torch.tensor([[3., 2., 3., 0.], + [0., 0., 2., 2.], + [1., 3., 2., 1.], + [0., 2., 1., 3.], + [1., 1., 2., 3.]], dtype=torch.float32) + input_scores = torch.tensor([3., 2., 0., 1., 4.], dtype=torch.float32) + converter_input_type = [ + ct.TensorType(shape=input_boxes.shape), + ct.TensorType(shape=input_scores.shape), + ] + + nms_model = NmsModel() + nms_model.eval() + expected_results = nms_model(input_boxes, input_scores) + with pytest.raises(AssertionError, match="Items are not equal"): + # TODO: rdar://104966206 ([PyTorch] Re-enable NMS Corner Case Tests After PyTorch Fixes Bugs). + # This is because the IOU between the last box ([1., 1., 2., 3.]) and the second box ([0., 0., 2., 2.]) is + # exactly 0.2 (IOU threshold), which leads to a corner case that PyTorch will remove the second box while + # CoreML keeps it. According to PyTorch's doc, only boxes with `greater than iou_threshold` should be + # removed, so it's a bug in PyTorch's side. + TorchBaseTest.run_compare_torch( + [input_boxes, input_scores], + nms_model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + ) + + # Change the last input box to make IOU slightly larger than 0.2, the output of CoreML will match PyTorch. + input_boxes[-1][-1] = 2.999 + expected_results = nms_model(input_boxes, input_scores) + TorchBaseTest.run_compare_torch( + [input_boxes, input_scores], + nms_model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + ) + + # Change the last input box to make IOU slightly smaller than 0.2, the output of CoreML will match PyTorch. + input_boxes[-1][-1] = 3.0001 + expected_results = nms_model(input_boxes, input_scores) + TorchBaseTest.run_compare_torch( + [input_boxes, input_scores], + nms_model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + ) + + +class TestTensorSize(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ) + ) + def test_tensor_size(self, compute_unit: ct.ComputeUnit.CPU_ONLY, backend: List[Tuple[str]]): + class TestModel(torch.nn.Module): + def forward(self, x): + return x.size() + + self.run_compare_torch( + [(1, 2, 3)], + TestModel(), + backend=backend, + compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + [('mlprogram', "fp16")], + ) + ) + def test_tensor_size_with_dim(self, compute_unit: ct.ComputeUnit.CPU_ONLY, + backend: List[Tuple[str]]): + class TestModel(torch.nn.Module): + def forward(self, x): + return x.size(dim=-1) + + model = TestModel() + + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The shape op is folded to const. + assert len(prog.find_ops(op_type="shape")) == 0 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that shape op is non-replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op and "shape" in var.op.op_type + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The shape op is not folded to const. + assert len(prog.find_ops(op_type="shape")) == 1 + + +class TestBitwiseAnd(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_bitwise_and( + self, compute_unit: ct.ComputeUnit.CPU_ONLY, backend: List[Tuple[str]] + ): + class TestModel(torch.nn.Module): + def forward(self, x, y): + return torch.bitwise_and(x, y) + + input_shape = (2, 3) + input_data_x = torch.rand(*input_shape) > 0.2 + input_data_y = torch.rand(*input_shape) < 0.8 + self.run_compare_torch( + [input_data_x, input_data_y], + TestModel(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_bitwise_and_unsupport_input( + self, compute_unit: ct.ComputeUnit.CPU_ONLY, backend: List[Tuple[str]] + ): + class TestModel(torch.nn.Module): + def forward(self, x, y): + return torch.bitwise_and(x, y) + + input_shape = (2, 3) + input_data_x = torch.randint( + low=0, high=10, size=input_shape, dtype=torch.int32 + ) + input_data_y = torch.randint( + low=0, high=10, size=input_shape, dtype=torch.int32 + ) + with pytest.raises( + NotImplementedError, + match="The `bitwise_and` op only supports boolean input", + ): + self.run_compare_torch( + [input_data_x, input_data_y], + TestModel(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestUnfold(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, kernel_size, padding, stride", + itertools.product( + compute_units, + backends, + [(1, 1, 10, 11), (5, 3, 12, 13)], + [(2, 3)], + [0, 1, 8, (1, 3), (2, 6), (0, 5)], + [1, 2, 7, (2, 3), (5, 4)], + ), + ) + def test_unfold(self, compute_unit, backend, input_shape, kernel_size, padding, stride): + class UnfoldModel(nn.Module): + def forward(self, x): + return torch.nn.functional.unfold( + input=x, kernel_size=kernel_size, padding=padding, stride=stride + ) + + self.run_compare_torch( + input_shape, UnfoldModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestTupleUnpack(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tuple_unpack(self, compute_unit, backend): + class ReturnTupleModel(nn.Module): + def forward(self, x): + return x * 3, x * 4, x * 5 + + class TestModel(nn.Module): + def __init__(self): + super().__init__() + self.return_tuple_layer = ReturnTupleModel() + + def forward(self, x): + out1, out2, out3 = self.return_tuple_layer(x) + return out1.relu(), out2.sigmoid(), out3.softmax(1) + + self.run_compare_torch((1, 2, 3), TestModel(), backend=backend, compute_unit=compute_unit) + + +class TestTupleIndex(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends,), + ) + def test_tuple_index(self, compute_unit, backend): + class InnerModel(nn.Module): + def forward(self,x): + return (torch.tensor([0]), torch.tensor([1])) + + class OuterModel(nn.Module): + def __init__(self): + super().__init__() + self.innermodel = torch.jit.trace(InnerModel().eval(), x) + + def forward(self, x): + inner = self.innermodel(x) + return inner[0] + + x = torch.rand(1, 3, 640, 640) + self.run_compare_torch(x, OuterModel(), + input_as_shape=False, use_scripting=True, + backend=backend, compute_unit=compute_unit) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/testing_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/testing_utils.py new file mode 100644 index 00000000..f50a587d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/testing_utils.py @@ -0,0 +1,259 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import pytest +import torch +import torch.nn as nn + +import coremltools as ct +import coremltools.models.utils as coremltoolsutils +from coremltools import RangeDim, TensorType +from coremltools._deps import _IS_MACOS +from coremltools.converters.mil.mil.types.type_mapping import \ + nptype_from_builtin +from coremltools.converters.mil.testing_utils import ct_convert + +from ..converter import torch_to_mil_types + + +class ModuleWrapper(nn.Module): + """ + Helper class to transform torch function into torch nn module. + This helps to keep the testing interface same for torch functional api. + """ + def __init__(self, function, kwargs=None): + super(ModuleWrapper, self).__init__() + self.function = function + self.kwargs = kwargs if kwargs else {} + + def forward(self, *args): + return self.function(*args, **self.kwargs) + + +np.random.seed(1984) + + +def _flatten(objects): + flattened_list = [] + for item in objects: + if isinstance(item, (list, tuple)): + flattened_list.extend(_flatten(item)) + else: + flattened_list.append(item) + return flattened_list + + +def _copy_input_data(input_data): + if isinstance(input_data, (list, tuple)): + return [_copy_input_data(x) for x in input_data] + return input_data.clone().detach() + + +def contains_op(torch, op_string): + return hasattr(torch, op_string) + + +def convert_to_coreml_inputs(input_description, inputs): + """ + Convenience function to combine a CoreML model's input description and + set of raw inputs into the format expected by the model's predict function. + """ + flattened_inputs = _flatten(inputs) + coreml_inputs = { + str(x): inp.numpy().astype(np.float32) for x, inp in zip(input_description, flattened_inputs) + } + + for k, v in coreml_inputs.items(): + if isinstance(v, np.ndarray) and v.ndim == 0: + coreml_inputs[k] = np.expand_dims(v, axis=-1) + + return coreml_inputs + + +def convert_to_mlmodel(model_spec, tensor_inputs, backend=("neuralnetwork", "fp32"), + converter_input_type=None, compute_unit=ct.ComputeUnit.CPU_ONLY, + minimum_deployment_target=None): + def _convert_to_inputtype(inputs): + if isinstance(inputs, list): + return [_convert_to_inputtype(x) for x in inputs] + elif isinstance(inputs, tuple): + return tuple([_convert_to_inputtype(x) for x in inputs]) + elif isinstance(inputs, TensorType): + return inputs + elif isinstance(inputs, torch.Tensor): + return TensorType(shape=inputs.shape, dtype=torch_to_mil_types[inputs.dtype]) + else: + raise ValueError( + "Unable to parse type {} into InputType.".format(type(inputs)) + ) + + if converter_input_type is None: + inputs = list(_convert_to_inputtype(tensor_inputs)) + else: + inputs = converter_input_type + + return ct_convert(model_spec, inputs=inputs, convert_to=backend, + source="pytorch", compute_units=compute_unit, + minimum_deployment_target=minimum_deployment_target) + + +def generate_input_data(input_size, rand_range=(0, 1)): + r1, r2 = rand_range + + def random_data(spec): + if isinstance(spec, TensorType): + spec_shape = spec.shape.shape + dtype = nptype_from_builtin(spec.dtype) + else: + spec_shape = spec + dtype = np.float32 + + static_shape = tuple([np.random.randint(dim.lower_bound, dim.upper_bound if dim.upper_bound > 0 else 10) + if isinstance(dim, RangeDim) else dim for dim in spec_shape]) + + data = np.random.rand(*static_shape) if static_shape != () else np.random.rand() + data = (r1 - r2) * data + r2 + return torch.from_numpy(np.array(data).astype(dtype)) + + if isinstance(input_size, list): + return [random_data(size) for size in input_size] + else: + return random_data(input_size) + + +def trace_model(model, input_data): + model.eval() + if isinstance(input_data, list): + input_data = tuple(input_data) + torch_model = torch.jit.trace(model, input_data) + return torch_model + + +def flatten_and_detach_torch_results(torch_results): + if isinstance(torch_results, (list, tuple)): + return [x.detach().numpy() for x in _flatten(torch_results) if x is not None] + # Do not need to flatten + return [torch_results.detach().numpy()] + + +def convert_and_compare( + input_data, + model_spec, + expected_results=None, + atol=1e-4, + rtol=1e-05, + backend=("neuralnetwork", "fp32"), + converter_input_type=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + minimum_deployment_target=None +): + """ + If expected results is not set, it will by default + be set to the flattened output of the torch model. + + Inputs: + + - input_data: torch.tensor or list[torch.tensor] + """ + if isinstance(model_spec, str): + torch_model = torch.jit.load(model_spec) + else: + torch_model = model_spec + + if not isinstance(input_data, (list, tuple)): + input_data = [input_data] + + if expected_results is None: + torch_input = _copy_input_data(input_data) + expected_results = torch_model(*torch_input) + expected_results = flatten_and_detach_torch_results(expected_results) + mlmodel = convert_to_mlmodel(model_spec, input_data, backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + minimum_deployment_target=minimum_deployment_target,) + + coreml_inputs = convert_to_coreml_inputs(mlmodel.input_description, input_data) + + if not _IS_MACOS or (mlmodel.is_package and coremltoolsutils._macos_version() < (12, 0)): + return model_spec, mlmodel, coreml_inputs, None + + _, dtype = backend + if mlmodel.compute_unit != ct.ComputeUnit.CPU_ONLY or (dtype == "fp16"): + atol = max(atol * 100.0, 5e-1) + rtol = max(rtol * 100.0, 5e-2) + + if not coremltoolsutils._has_custom_layer(mlmodel._spec): + coreml_preds = mlmodel.predict(coreml_inputs) + coreml_outputs = mlmodel._spec.description.output + coreml_results = [ + coreml_preds[output.name] for output in coreml_outputs + ] + for torch_result, coreml_result in zip(expected_results, + coreml_results): + + if torch_result.shape == (): + torch_result = np.array([torch_result]) + np.testing.assert_equal(coreml_result.shape, torch_result.shape) + np.testing.assert_allclose(coreml_result, torch_result, atol=atol, rtol=rtol) + return model_spec, mlmodel, coreml_inputs, coreml_preds + + +class TorchBaseTest: + testclassname = '' + testmodelname = '' + + @pytest.fixture(autouse=True) + def store_testname_with_args(self, request): + TorchBaseTest.testclassname = type(self).__name__ + TorchBaseTest.testmodelname = request.node.name + + @staticmethod + def run_compare_torch( + input_data, + model, + expected_results=None, + atol=1e-04, + rtol=1e-05, + input_as_shape=True, + backend=("neuralnetwork", "fp32"), + rand_range=(-1.0, 1.0), + use_scripting=False, + converter_input_type=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + minimum_deployment_target=None, + ): + """ + Traces a model and runs a numerical test. + Args: + input_as_shape : If true generates random input data with shape. + expected_results : Expected result from running pytorch model. + converter_input_type: If not None, then pass it to the "inputs" argument to the + ct.convert() call. + """ + model.eval() + if input_as_shape: + input_data = generate_input_data(input_data, rand_range) + + if use_scripting: + model_spec = torch.jit.script(model) + else: + model_spec = trace_model(model, _copy_input_data(input_data)) + + model_spec, mlmodel, coreml_inputs, coreml_results = \ + convert_and_compare( + input_data, + model_spec, + expected_results=expected_results, + atol=atol, + rtol=rtol, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + minimum_deployment_target=minimum_deployment_target, + ) + + return model_spec, mlmodel, coreml_inputs, coreml_results, \ + TorchBaseTest.testclassname, TorchBaseTest.testmodelname diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torch_op_registry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torch_op_registry.py new file mode 100644 index 00000000..128fdd5a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torch_op_registry.py @@ -0,0 +1,58 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +_TORCH_OPS_REGISTRY = {} + + +def register_torch_op(_func=None, torch_alias=None, override=False): + """ + Registration routine for PyTorch operators + _func: (PyTorch conversion function) [Default=None] + PyTorch conversion function to register + + torch_alias: (List of string) [Default=None] + All other PyTorch operators that should also be mapped to + current conversion routine. + e.g. Sort aliased with SortV1, SortV2 + All provided alias operators must not be registered previously. + + "In place" alias are looked up automatically and do not need to + be registered. PyTorch uses an underscore suffix to denote the + in place version, e.g. "sum_" is the in place version of "sum". + + override: (Boolean) [Default=False] + If True, overrides earlier registration i.e. specified + operator and alias will start pointing to current conversion + function. + Otherwise, duplicate registration will error out. + """ + + def func_wrapper(func): + f_name = func.__name__ + + if f_name.endswith("_"): + raise Exception( + "Attempting to register \"{}\" op. Do not register inplace ops. (inplace torch ops" + " end in a \"_\"). Instead register the normal op version: \"{}\". The inplace" + " version will be supported automatically.".format(f_name, f_name[:-1]) + ) + if not override and f_name in _TORCH_OPS_REGISTRY: + raise ValueError("Torch op {} already registered.".format(f_name)) + + _TORCH_OPS_REGISTRY[f_name] = func + + if torch_alias is not None: + for name in torch_alias: + if not override and name in _TORCH_OPS_REGISTRY: + msg = "Torch op alias {} already registered." + raise ValueError(msg.format(name)) + _TORCH_OPS_REGISTRY[name] = func + + return func + + if _func is None: + # decorator called without argument + return func_wrapper + return func_wrapper(_func) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torchir_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torchir_passes.py new file mode 100644 index 00000000..c3f4298b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torchir_passes.py @@ -0,0 +1,322 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +from collections import OrderedDict, defaultdict + +from coremltools import _logger as logger + +from .internal_graph import InternalTorchIRGraph, InternalTorchIRNode + + +def generate_tensor_assignment_ops(graph): + """ + This graph pass handles inplace tensor assignements, specifically it handles: + `torch.Tensor.copy_` and `torch.Tensor.fill_`. There are many other inplace tensor + assignments which are currently not handled. + + for instance: + + def forward(self, x): # x a tensor with shape [4,10] + x[:2, 4] = [[1],[3]] + return x + + In Pytorch, this is represented by a sequence of slice / select ops followed by a copy op: + + input -> %x + %1 = slice(%x, dim=0, begin=0, end=2, stride=1) # the slice for dimension 0 + %2 = select(%1, dim=1, index=4) # the select for dimension 1 + %3 = copy_(%2, value=[[1], [3]]) + output -> %x + + This graph pass fuses the sequences into a single InternalTorchIRNode of a new kind, which is defined as `_internal_op_tensor_inplace_copy`. + + input -> %x + %nodes_to_fuse = [slice(%x, begin=0, end=2, stride=1), select(%1, dim=1, index=4)] + %x_internal_tensor_assign_1 = _internal_op_tensor_inplace_copy(%x, value=[[1],[3]], nodes_to_fuse=nodes_to_fuse) + output -> x_internal_tensor_assign_1 + + The _internal_tensor_value_assign op takes an additional internal data member nodes_to_fuse, + which is a list of select / slice InternalTorchIRNodes that need to be fused. + Here is a more complicated example: + + def forward(self, x): # x a tensor with shape [4,10] + x[0, 0] = 1 + x[1:2, 1:2] = [[0]] + return x + + Input graph: + input -> %x + %1 = select(%x, dim=0, index=0) + %2 = select(%1, dim=0, index=0) + %3 = copy_(%2, value=1) + %4 = slice(%x, dim=0, begin=1, end=2, stride=1) + %5 = slice(%4, dim=1, begin=1, end=2, stride=1) + %6 = copy_(%5, value=[[0]]) + output -> %x + + Output graph: + input -> %x + %nodes_to_fuse_1 = [select(%x, dim=0, index=0), select(%1, dim=0, index=0)] + %x_internal_tensor_assign_1 = _internal_op_tensor_inplace_copy(%x, value=1, nodes_to_fuse=nodes_to_fuse_1) + %nodes_to_fuse_2 = [slice(%x, dim=0, begin=1, end=2, stride=1), slice(%4, dim=1, begin=1, end=2, stride=1)] + %x_internal_tensor_assign_2 = _internal_op_tensor_inplace_copy(%x_internal_tensor_assign_1, value=[[0]], nodes_to_fuse=nodes_to_fuse_2) + output -> x_internal_tensor_assign_2 + + torch.Tensor.fill_ works in a similar way, except the InternalTorchIRNodes is defined by `_internal_op_tensor_inplace_fill`. + + A fill_ operator is generated from the following forward pass: + + def forward(self, x): # x a tensor with shape [5, 4] + x[2] = 9 + return x + """ + + TENSOR_ASSIGMENT_PREFIX = "_internal_tensor_assign_" + + def _get_updated_name(name, updated_tensor_count): + if name in updated_tensor_count: + return name + TENSOR_ASSIGMENT_PREFIX + str(updated_tensor_count[name]) + return name + + def _construct_nodes_to_fuse_inputs(nodes_to_fuse): + inputs = [] + for node in nodes_to_fuse: + if node.kind == "select": + inputs += [node.inputs[2], None, None] + if node.kind == "slice": + inputs += [node.inputs[2], node.inputs[3], node.inputs[4]] + return inputs + + tensor_to_node_sequence_mapping = {} + updated_tensor_count = defaultdict(lambda: 0) + + for i in range(len(graph.nodes)): + node = graph.nodes[i] + + for idx in range(len(node.inputs)): + input_name = node.inputs[idx] + node.inputs[idx] = _get_updated_name(input_name, updated_tensor_count) + + if node.kind in ("empty", "select", "slice"): + node_input = node.inputs[0] + node_output = node.outputs[0] + node_sequence = tensor_to_node_sequence_mapping.get(node_input, []) + if len(node_sequence) > 0: + tensor_to_node_sequence_mapping.pop(node_input) + node_sequence.append(node) + tensor_to_node_sequence_mapping[node_output] = node_sequence + + if node.kind in ("copy_", "fill_"): + node_input = node.inputs[0] + if node_input not in tensor_to_node_sequence_mapping: + raise ValueError("No matching select or slice.") + + if node.kind == "copy_": + kind = "_internal_op_tensor_inplace_copy" + else: + kind = "_internal_op_tensor_inplace_fill" + + nodes_to_fuse = tensor_to_node_sequence_mapping[node_input] + source_tensor = nodes_to_fuse[0].inputs[0] + origin_name = source_tensor.split(TENSOR_ASSIGMENT_PREFIX)[0] + + updated_tensor_count[origin_name] += 1 + + outputs = [_get_updated_name(origin_name, updated_tensor_count)] + + update_value = node.inputs[1] + nodes_to_fuse_inputs = _construct_nodes_to_fuse_inputs(nodes_to_fuse) + tensor_assign_node = InternalTorchIRNode( + node=None, + inputs=[source_tensor, update_value] + nodes_to_fuse_inputs, + outputs=outputs, + kind=kind, + blocks=[], + ) + graph.nodes[i] = tensor_assign_node + + # modify the graph outputs if it is effected by this graph pass + for idx in range(len(graph.outputs)): + output = graph.outputs[idx] + if output in updated_tensor_count: + graph.outputs[idx] = _get_updated_name(output, updated_tensor_count) + + +def remove_getattr_nodes(graph): + """ + Remove the getattr nodes in the graph + """ + + getattr_nodes = [] + new_nodes = [] + + for node in graph.nodes: + + for block in node.blocks: + remove_getattr_nodes(block) + + if node.kind == "getattr": + getattr_nodes.append(node) + else: + new_nodes.append(node) + + # check the getattr nodes not in the outputs + for node in getattr_nodes: + if node.name in graph.outputs: + raise RuntimeError("{} should not be in the graph outputs.".format(node.name)) + + # remove the getattr nodes + graph.nodes = new_nodes + + +def transform_inplace_ops(graph, name_remap_dict=None): + + # As we modify ops, we'll need to remap symbols. + if name_remap_dict is None: + name_remap_dict = {} + + for node in graph.nodes: + for k, v in name_remap_dict.items(): + node.replace_name(k, v) + + if node.kind == "append": + if isinstance(node.parent, InternalTorchIRGraph): + # If append appears in a graph (outer block), replace + # subsequent uses of its input symbol with its output symbol. + name_remap_dict[node.inputs[0]] = node.outputs[0] + elif node.parent.parent.kind == "loop": + # If append appears in a loop block, add its inputs to the block + # inputs and loop inputs, and its outputs to the block outputs + # and loop outputs. + + # This is the global input to append. We need to add it to the + # loop's input list, and replace any uses after the node with + # @global_output below. + global_input = node.inputs[0] + # This will be the name of the input to append within the + # block. We need to add it to the block inputs. + local_input = node.parent.parent.name + ".0" + # This is the output of append. We need to add it to the list + # of block outputs. + local_output = node.outputs[0] + # This is the name of the new output from the loop. It should + # replace any uses of @global_input after the loop op. + global_output = local_output + ".out" + name_remap_dict[global_input] = global_output + + node.parent.parent.inputs.append(global_input) + node.parent.inputs.append(local_input) + node.replace_name(global_input, local_input) + node.parent.outputs.append(local_output) + node.parent.parent.outputs.append(global_output) + node.parent.parent.name = node.parent.parent.outputs[0] + elif node.parent.parent.kind == "if": + # If append appears in an if/else block, add its outputs to the + # block outputs and loop outputs. + # Note that we can't assume the append appears in both blocks. + raise NotImplementedError( + "inplace_ops pass doesn't yet support append op inside conditional" + ) + + for block in node.blocks: + transform_inplace_ops(block, name_remap_dict) + + # Replace names in graph outputs + for k, v in name_remap_dict.items(): + try: + idx = graph.outputs.index(k) + except ValueError: + pass + else: + graph.outputs[idx] = v + + +def flatten_graph_input_values(graph): + """ CoreML can't handle nested iterables of tensors, so we flatten the + inputs of any graph that expects them. + """ + new_graph_inputs = graph.inputs + all_new_nodes = [] + changed = True + notified = False + + while changed: + old_graph_inputs = new_graph_inputs + new_graph_inputs = OrderedDict() + new_nodes = [] + changed = False + for _input_name, _input_val in old_graph_inputs.items(): + if isinstance(_input_val, (tuple, list)): + changed = True + if not notified: + notified = True + logger.warning( + "Tuple detected at graph input. This will be flattened in the converted model." + ) + # If this input to the graph is a tuple, we want to replace it + # with a flattened version and add an op to construct the tuple. + node_inputs = [] + for idx, item in enumerate(_input_val): + name = _input_name + "_{}".format(idx) + new_graph_inputs[name] = item + node_inputs.append(name) + new_nodes.append( + InternalTorchIRNode( + inputs=node_inputs, + outputs=[_input_name], + kind="tupleconstruct", + ) + ) + else: + # This input isn't a tuple, keep it as is. + new_graph_inputs[_input_name] = _input_val + all_new_nodes = new_nodes + all_new_nodes + graph.inputs = new_graph_inputs + graph.nodes = all_new_nodes + graph.nodes + + +def flatten_graph_output_values(graph): + """ + CoreML can't handle nested iterables of tensors, so we flatten the + outputs of any graph that produces them. + """ + node_names = [node.name for node in graph.nodes] + new_graph_outputs = graph.outputs + changed = True + notified = False + + while changed: + old_graph_outputs = new_graph_outputs + new_graph_outputs = [] + changed = False + for outp in old_graph_outputs: + # Find the node that generates this output var. + # It is possible to not find the output var in the list of node + # names since nodes are named after their first output. In that + # case, it means the output var comes from a node that returns + # multiple outputs, which means that node cannot be a construct op. + try: + node_idx = node_names.index(outp) + except: + # @outp doesn't come from a construct op + new_graph_outputs.append(outp) + continue + if graph.nodes[node_idx].kind in [ + "tupleconstruct", + "listconstruct", + ]: + # Since this output came from a construct op, we can replace it + # with the inputs to the op. + new_graph_outputs.extend(graph.nodes[node_idx].inputs) + changed = True + if not notified: + notified = True + logger.warning( + "Tuple detected at graph output. This will be flattened in the converted model." + ) + else: + new_graph_outputs.append(outp) + # Note: if we flattened outputs, there are likely to be construct ops + # that are no longer needed. These will be removed in a later DCE pass. + graph.outputs = new_graph_outputs diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/input_types.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/input_types.py new file mode 100644 index 00000000..0a06d4d7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/input_types.py @@ -0,0 +1,492 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from enum import Enum + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.mil.types.type_mapping import ( + is_builtin, numpy_type_to_builtin_type) + + + +class ColorLayout(Enum): + RGB = "RGB" + BGR = "BGR" + GRAYSCALE = "G" + GRAYSCALE_FLOAT16 = "G_FLOAT16" + + +class ClassifierConfig: + def __init__( + self, + class_labels, + predicted_feature_name="classLabel", + predicted_probabilities_output=None, + ): + """ + Configuration for classifier models. + + Parameters + ---------- + class_labels: str / list of int / list of str + If a ``list`` is provided, the ``list`` maps the index of the output of a + neural network to labels in a classifier. + + If a ``str`` is provided, the ``str`` points to a file which maps the index + to labels in a classifier. + + predicted_feature_name: str + Name of the output feature for the class labels exposed in the + Core ML neural network classifier. Default: ``'classLabel'``. + + predicted_probabilities_output: str + If provided, then this is the name of the neural network blob which + generates the probabilities for each class label (typically the output + of a softmax layer). + + If not provided, then the last output layer is assumed. + """ + self.class_labels = class_labels + self.predicted_feature_name = predicted_feature_name + self.predicted_probabilities_output = predicted_probabilities_output + + +class InputType: + def __init__(self, name=None, shape=None, dtype=None): + """ + The input type for inputs fed into the model. + + Parameters + ---------- + name: (str) + The name of the input. + + shape: list, tuple, Shape object, EnumeratedShapes object, or None + The shape(s) that are valid for this input. + + If set to ``None``, the shape will be infered from the model itself. + """ + + self.name = name + if shape is not None: + self.shape = _get_shaping_class(shape) + else: + self.shape = None + self.dtype = dtype + + +class ImageType(InputType): + def __init__( + self, + name=None, + shape=None, + scale=1.0, + bias=None, + color_layout=ColorLayout.RGB, + channel_first=None, + ): + """ + Configuration class used for image inputs in Core ML. + + Parameters + ---------- + scale: float or list of floats + The scaling factor for all values in the image channels. + + bias: float or list of floats + * If ``color_layout`` is ``ct.colorlayout.GRAYSCALE`` or + ``ct.colorlayout.GRAYSCALE_FLOAT16``, bias would be a ``float``. + * If ``color_layout`` is ``ct.colorlayout.RGB`` or ``ct.colorlayout.BGR``, + bias would be a list of ``float``. + + color_layout: string or enumeration of type ``ct.colorlayout`` + Color layout of the image. Valid values are as follows: + + Enumeration (recommended): + * ``ct.colorlayout.RGB`` + * ``ct.colorlayout.BGR`` + * ``ct.colorlayout.GRAYSCALE`` + * ``ct.colorlayout.GRAYSCALE_FLOAT16`` + + String values (older way to specify): + * ``'G'``: Grayscale (maps to ``ct.colorlayout.GRAYSCALE``) + * ``'RGB'``: [Red, Green, Blue] (maps to ``ct.colorlayout.BGR``) + * ``'BGR'``: [Blue, Green, Red] (maps to ``ct.colorlayout.RGB``) + + channel_first: (bool) or None + Set to ``True`` if input format is channel first. + + Default format: + * For TensorFlow: channel last (``channel_first=False``). + * For PyTorch: channel first (``channel_first=True``). + """ + super(ImageType, self).__init__(name, shape) + self.scale = scale + msg = "color_layout should be an enum of type ct.colorlayout, i.e. one of: " \ + "{ct.colorlayout.RGB, ct.colorlayout.BGR, " \ + "ct.colorlayout.GRAYSCALE, ct.colorlayout.GRAYSCALE_FLOAT16}" + if not (isinstance(color_layout, str) or isinstance(color_layout, ColorLayout)): + raise ValueError(msg) + if isinstance(color_layout, str): + if color_layout not in ("G", "RGB", "BGR"): + raise ValueError(msg) + color_layout = ColorLayout(color_layout) + + self.color_layout = color_layout + if color_layout == ColorLayout.GRAYSCALE_FLOAT16: + self.dtype = types.fp16 + if bias is None: + if color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16): + self.bias = 0.0 + else: + self.bias = [0.0, 0.0, 0.0] + else: + self.bias = bias + self.channel_first = channel_first + + def __repr__(self): + return self.__str__() + + def __str__(self): + str_repr = 'ImageType[name={}, shape={}, scale={}, bias={}, ' +\ + 'color_layout={}, channel_first={}]' + return str_repr.format(self.name, self.shape, self.scale, self.bias, + self.color_layout, self.channel_first) + + +class TensorType(InputType): + def __init__(self, name=None, shape=None, dtype=None, default_value=None): + """ + Specify a (dense) tensor input. + + Parameters + ---------- + name: str + Input name. Must match an input name in the model (usually the + Placeholder name for TensorFlow or the input name for PyTorch). + + The ``name`` is required except for a TensorFlow model in which there is + exactly one input Placeholder. + + shape: (1) list of positive int or RangeDim, or (2) EnumeratedShapes + The shape of the input. + + For TensorFlow: + * The ``shape`` is optional. If omitted, the shape is inferred from + TensorFlow graph's Placeholder shape. + + For PyTorch: + * The ``shape`` is required. + + dtype: np.generic or mil.type type + For example, ``np.int32`` or ``coremltools.converters.mil.mil.types.fp32`` + + default_value: np.ndarray + If provided, the input is considered optional. At runtime, if the + input is not provided, ``default_value`` is used. + + Limitations: + * If ``default_value`` is ``np.ndarray``, all + elements are required to have the same value. + + * The ``default_value`` may not be specified if ``shape`` is + ``EnumeratedShapes``. + + Examples + -------- + * ``ct.TensorType(name="input", shape=(1, 2, 3))` implies `dtype == + np.float32`` + + * ``ct.TensorType(name="input", shape=(1, 2, 3), dtype=np.int32)`` + + * ``ct.TensorType(name="input", shape=(1, 2, 3), + dtype=ct.converters.mil.types.fp32)`` + """ + super(TensorType, self).__init__(name, shape) + if dtype is not None: + if is_builtin(dtype): + self.dtype = dtype + if dtype not in (types.fp16, types.fp32, types.fp64, types.int32, types.int64, types.bool): + raise TypeError("dtype={} is unsupported for inputs/outputs of the model".format(dtype)) + else: + # Assume dtype is numpy type + try: + self.dtype = numpy_type_to_builtin_type(dtype) + except TypeError: + raise TypeError("dtype={} is unsupported".format(dtype)) + if dtype not in (np.float16, np.float32, np.float64, float, + np.int32, np.int64, int, + bool, np.bool_): + raise TypeError("dtype={} is unsupported for inputs/outputs of the model".format(dtype)) + + if default_value is not None: + if isinstance(shape, EnumeratedShapes): + msg = 'TensorType input {} has EnumeratedShapes and ' +\ + 'may not be optional' + raise ValueError(msg.format(name)) + if not isinstance(default_value, np.ndarray): + msg = 'TensorType {} default_value is not np.ndarray' + raise ValueError(msg.format(name)) + default_fill_val = default_value.flatten()[0] + if not np.all(default_value == default_fill_val): + msg = 'TensorType {} default_value can only have ' +\ + 'same entries' + raise ValueError(msg.format(name)) + if not self.shape.has_symbolic and \ + list(default_value.shape) != list(self.shape.symbolic_shape): + msg = 'TensorType {} default_value shape {} != ' +\ + 'TensorType.shape {}' + raise ValueError(msg.format(name, default_value.shape, + self.shape.to_list())) + if self.dtype is not None and \ + numpy_type_to_builtin_type(default_value.dtype) != self.dtype: + msg = 'TensorType {} default_value dtype {} != ' +\ + 'TensorType.dtype {}' + raise ValueError(msg.format(name, default_value.dtype, + self.dtype.__type_info__())) + else: + self.dtype = numpy_type_to_builtin_type(default_value.dtype) + + self.default_value = default_value + + def __repr__(self): + return self.__str__() + + def __str__(self): + return 'TensorType[name={}, shape={}, dtype={}]'.format(self.name, + self.shape, + self.dtype) + + +class RangeDim: + def __init__(self, lower_bound=1, upper_bound=-1, default=None, + symbol=None): + """ + A class for providing a range of accepted shapes. + + Parameters + ---------- + lower_bound: (int) + The minimum valid value for the shape. + + upper_bound: (int) + The maximum valid value for the shape. + + Set to ``-1`` if there is no upper limit. + + default: (int) or None + The default value that is used for initiating the model, and set in the input shape field of the model file. + + If set to ``None``, ``lower_bound`` would be used as default. + + symbol: (str) + Optional symbol name for the dim. Autogenerate a symbol name if + not specified. + """ + if symbol is None: + from coremltools.converters.mil.mil import get_new_symbol + self.symbol = get_new_symbol() + else: + from coremltools.converters.mil.mil import Symbol + self.symbol = Symbol(symbol) + self.lower_bound = lower_bound + self.upper_bound = upper_bound + if default is None: + self.default = lower_bound + else: + if default < lower_bound: + raise ValueError( + "Default value {} is less than minimum value ({}) for range".format( + default, lower_bound + ) + ) + if upper_bound > 0 and default > upper_bound: + raise ValueError( + "Default value {} is greater than maximum value ({}) for range".format( + default, upper_bound + ) + ) + self.default = default + + def __repr__(self): + return self.__str__() + + def __str__(self): + return 'RangeDim(lower_bound={}, upper_bound={}, default={}, symbol="{}")'.format( + self.lower_bound, self.upper_bound, self.default, self.symbol) + + +class Shape: + def __init__(self, shape, default=None): + """ + The basic shape class to be set in InputType. + + Parameters + ---------- + shape: list of (int), symbolic values, RangeDim object + The valid shape of the input. + + default: tuple of int or None + The default shape that is used for initiating the model, and set in + the metadata of the model file. + + If None, then ``shape`` is used. + """ + from coremltools.converters.mil.mil import get_new_symbol + + if not isinstance(shape, (list, tuple)): + msg = "Shape should be list or tuple, got type {} instead" + raise ValueError(msg.format(type(shape))) + self.symbolic_shape = [] + shape = list(shape) + for idx, s in enumerate(shape): + if s is None or s == -1: + msg = 'Dimension cannot be None or -1. Use ' +\ + 'ct.RangeDim for runtime determined dimension. ' +\ + 'Dim {}: {} ' +\ + 'See https://coremltools.readme.io/docs/flexible-inputs' + raise ValueError(msg.format(idx, s)) + if isinstance(s, RangeDim): + sym = s.symbol + self.symbolic_shape.append(sym) + elif isinstance(s, (np.generic, int)) or is_symbolic(s): + self.symbolic_shape.append(s) + else: + raise ValueError( + "Unknown type {} to build symbolic shape.".format(type(s)) + ) + + self.shape = tuple(shape) + if default is not None: + if not isinstance(default, (list, tuple)): + raise ValueError( + "Default shape should be list or tuple, got type {} instead".format( + type(default) + ) + ) + for idx, s in enumerate(default): + if not isinstance( + s, (np.generic, int) + ) and not is_symbolic(s): + raise ValueError( + "Default shape invalid, got error at index {} which is {}".format( + idx, s + ) + ) + else: + default = [] + for idx, s in enumerate(self.shape): + if isinstance(s, RangeDim): + default.append(s.default) + elif s is None or s == -1: + default.append(self.symbolic_shape[idx]) + else: + default.append(s) + self.default = tuple(default) + + @property + def has_symbolic(self): + return any(is_symbolic(s) for s in self.symbolic_shape) + + def to_list(self, allow_symbolic=False): + if not allow_symbolic and self.has_symbolic: + return None + return self.symbolic_shape + + +class EnumeratedShapes: + def __init__(self, shapes, default=None): + """ + A shape class for setting multiple valid shapes in InputType. + + Parameters + ---------- + shapes: list of Shape objects, or Shape-compatible lists. + The valid shapes of the inputs. + + If input provided is not a Shape object, but can be converted to a Shape, + the Shape object would be stored in ``shapes`` instead. + + default: tuple of int or None + The default shape that is used for initiating the model, and set in + the metadata of the model file. + + If None, then the first element in ``shapes`` is used. + """ + from coremltools.converters.mil.mil import get_new_symbol + + if not isinstance(shapes, (list, tuple)): + raise ValueError( + "EnumeratedShapes should be list or tuple of shape, got type {} instead".format( + type(shapes) + ) + ) + if len(shapes) < 2: + raise ValueError( + "EnumeratedShapes should be take a list or tuple with len >= 2, got {} instead".format( + len(shapes) + ) + ) + + self.shapes = [] + for idx, s in enumerate(shapes): + if isinstance(s, Shape): + self.shapes.append(s) + else: + self.shapes.append(Shape(s)) + + self.symbolic_shape = self.shapes[0].symbolic_shape + for shape in self.shapes: + for idx, s in enumerate(shape.symbolic_shape): + if is_symbolic(self.symbolic_shape[idx]): + continue + elif is_symbolic(s): + self.symbolic_shape[idx] = s + elif s != self.symbolic_shape[idx]: + self.symbolic_shape[idx] = get_new_symbol() + + if default is not None: + if not isinstance(default, (list, tuple)): + raise ValueError( + "Default shape should be list or tuple, got type {} instead".format( + type(default) + ) + ) + for idx, s in enumerate(default): + if not isinstance( + s, (np.generic, int) + ) and not is_symbolic(s): + raise ValueError( + "Default shape invalid, got error at index {} which is {}".format( + idx, s + ) + ) + else: + default = self.shapes[0].default + self.default = default + + +def _get_shaping_class(shape): + """ + Returns a Shape class or EnumeratedShapes class for `shape` + where `shape` could be lists/tuple/Shape/EnumeratedShapes/etc. + """ + if isinstance(shape, (Shape, EnumeratedShapes)): + return shape + + try: + enum_shape = EnumeratedShapes(shape) + return enum_shape + except ValueError: + pass + try: + shape = Shape(shape) + return shape + except ValueError: + pass + raise ValueError("Can't convert to CoreML shaping class from {}.".format(shape)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/__init__.py new file mode 100644 index 00000000..15f4c03b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +SPACES = " " + +from .block import Block, Function, curr_block +from .builder import Builder +from .input_type import (SUPPORT_FLOAT_TYPES, SUPPORT_INT_TYPES, DefaultInputs, + InputSpec, InternalVar, ListInputType, + PyFunctionInputType, TensorInputType, TupleInputType) +from .operation import Operation, mil_list, precondition +from .program import (InputType, Placeholder, Program, Symbol, + get_existing_symbol, get_new_symbol, + get_new_variadic_symbol) +from .var import ListVar, Var +from .ops.defs._op_reqs import register_op + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/block.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/block.py new file mode 100644 index 00000000..b9759959 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/block.py @@ -0,0 +1,894 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +from collections import Counter, OrderedDict + +from coremltools import _OPSET, _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as _target + +from . import SPACES, types +from .types.symbolic import is_symbolic, k_used_symbols +from .var import ComplexVar, InternalVar, Var +from .visitors.dot_visitor import DotVisitor + +# BLOCK_STACK[-1] is the current block +BLOCK_STACK = [] +DEBUG = False + +def curr_block(): + if len(BLOCK_STACK) == 0: + raise ValueError("Must call Builder inside an Function" + " or Block") + return BLOCK_STACK[-1] + +def curr_opset_version(): + block = curr_block() + while not isinstance(block, Function): + block = block.outer_op.enclosing_block + return block.opset_version + +def is_current_opset_version_compatible_with(opset_version): + if curr_opset_version() is None: + return opset_version <= _target.iOS13 + return curr_opset_version() >= opset_version + + +class InvalidBlockStateError(Exception): + pass + + +class Block: + __slots__ = [ + "name", + "_block_inputs", + "_outputs", + "operations", + "_internal_vars", + "outer_op", + ] + + counter = 0 + + @classmethod + def _get_new_name(cls): + curr_val = cls.counter + cls.counter += 1 + return "block" + str(curr_val) + + def __init__(self, block_inputs=None, outer_op=None, name=None): + """ + Inputs: + + block_inputs: python tuple[Var]. + + block_inputs is None except when the block represents loop. By + convention block_inputs should have name ending in '.x', and the + Variable are not produced by any op (block_inputs[i]._op is None). + + Ex: + + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32), + # %c: (1, 2, fp32)) { + # block0() { + # %const1: (1, fp32) = const(...) + # %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \ + # while_loop(loop_vars=(%a, %b)) + # loop_cond(%a.x, %b.x) { + # %blah: (bool) = some_op(x=%a.x, y=%b.x) + # %cond_var: (bool) = some_op2(x=%a.x, y=%blah) + # } -> (%cond_var) + # loop_body(%a.x, %b.x) { + # %add_0: (1, 2, fp32) = add(x=%a.x, y=%b.x) + # } -> (%add_0, %b.x) + # %linear: (1, fp32) = linear(...) + # } -> (%loop:0, %loop:1) + # } + + %a.x, %b.x are block_inputs. + + `some_op` in `loop_cond` block can access %a, %b, %a.x, %b.x. + `some_op`, however, cannot take %linear as input. + + outer_op: Operation + The enclosing op. None iff this Block is an Function. + + function_inputs: tuple[Var] + function_inputs are always visible for this block and all blocks + nested within. If function_inputs is None, get it from + `outer_op.block` + """ + self.name = name + if self.name is None: + self.name = Block._get_new_name() + + # list[Operation]. Topologically sorted. + self.operations = [] + + # Must be set before self.validate() + self.outer_op = outer_op + + self._block_inputs = block_inputs + if self._block_inputs is None: + self._block_inputs = tuple() + + # list[Var]. This is converted to str when generating MIL proto. + self._outputs = [] + + # If we create const, whose inputs (mode, val) cannot be const + # (infinite recursion). They must be considered as always visible. + self._internal_vars = set() + + if self.outer_op is None and not isinstance(self, Function): + msg = "Block {} is not Function and thus outer_op cannot be None" + raise ValueError(msg.format(self.name)) + + self.validate() + + def validate(self): + """ + Basic validation to protect against some invalid state. + """ + if not DEBUG: + return + + for op in self.operations: + for b in op.blocks: + b.validate() + if op.outputs is None: + raise InvalidBlockStateError() + + # Check the input output relationships + # from outputs -> inputs + for ov in op.outputs: + child_op_count = Counter(ov.child_ops) + for next_op, c in child_op_count.items(): + c_actual = next_op.get_flattened_inputs().count(ov) + if c_actual != c: + msg = ( + "Var {} should be consumed by op {} {}" + + " times, but op {} uses it {} times.\n{}" + ) + raise InvalidBlockStateError( + msg.format( + ov.name, + next_op.name, + c, + next_op.name, + c_actual, + next_op, + ) + ) + + # from inputs -> outputs + input_var_count = Counter(op.get_flattened_inputs()) + for iv, c in input_var_count.items(): + c_actual = iv.child_ops.count(op) + if c_actual != c: + msg = ( + "Var {} should be consumed by op {} {}" + + " times, but op {} uses it {} times.\n{}" + ) + raise InvalidBlockStateError( + msg.format(iv.name, op.name, c_actual, op.name, c, op) + ) + + # 1 to 1 mapping between Block outputs and Var.consuming_blocks + for op in self.operations: + for ov in op.outputs: + for b in ov.consuming_blocks: + if ov not in b.outputs: + msg = "Var {} should be output of block {}: {}" + raise ValueError(msg.format(ov.name, b.name, b)) + + for v in self.outputs: + if self not in v.consuming_blocks: + msg = "Var {} should be output of block {}: {}" + raise ValueError(msg.format(ov.name, b.name, b)) + + def remove_inputs(self, curr_input_vars): + """ + curr_input_vars: list[Var], whose elements must be in + self._block_inputs. + """ + self.validate() + remove_idx = [self._block_inputs.index(v) for v in curr_input_vars] + self._block_inputs = [ + v for i, v in enumerate(self._block_inputs) if i not in remove_idx + ] + + def find_ops(self, prefix=None, op_type=None): + """ + Return list of ops with name matching `prefix` if specified and + op_type, if specified. At least one of {prefix, op_type} must be specified. + + prefix: str + + Return list[Operation]. Empty list if no op satisfies. + """ + if prefix is None and op_type is None: + raise ValueError("Must specify one of {prefix, op_type}") + found_ops = [] + for op in self.operations: + prefix_match = prefix is None or op.name[: len(prefix)] == prefix + op_type_match = op_type is None or op.op_type == op_type + if prefix_match and op_type_match: + found_ops.append(op) + for b in op.blocks: + found_ops.extend(b.find_ops(prefix=prefix, op_type=op_type)) + return found_ops + + def add_internal_var(self, internal_var): + if not isinstance(internal_var, InternalVar): + raise ValueError("Only InternalVar can be manually added to Block.") + self._internal_vars.add(internal_var) + + @property + def inputs(self): + return self._block_inputs + + @property + def outputs(self): + return self._outputs + + def is_var_visible_in_block(self, var, upto_op_with_id=None): + """ + Checks if a var is visible to ops starting from id=`upto_op_with_id` inside the block. + + Var is visible if + - It is the output of a const op, or + - It is the output of "preceding" operations in that block, or + - It is visible in the enclosing block, or + - It is either a block or a function input + + If upto_op_with_id is None, outputs of all operations inside the block are visible to + that block. + """ + + if var in self._internal_vars: + return True + + inputs = self.function_inputs if isinstance(self, Function) else self.inputs + if var in inputs: + return True + + idx = len(self.operations) if upto_op_with_id is None else upto_op_with_id + + for i in range(idx-1, -1, -1): + op_outputs = self.operations[i].outputs + if op_outputs is not None and var in op_outputs: + return True + + if self.outer_op is not None: + enclosing_block = self.outer_op.enclosing_block + outer_op_id = enclosing_block.find_op_id_in_block(self.outer_op) + if enclosing_block.is_var_visible_in_block(var, upto_op_with_id=outer_op_id): + return True + + return False + + def find_op_id_in_block(self, target_op): + try: + idx = self.operations.index(target_op) + except ValueError: + raise ValueError("Op {} not found in {}: {}".format(target_op.name, self.name, self)) + return idx + + def set_outputs(self, outputs): + """ + outputs: list[Var] + """ + if not isinstance(outputs, list): + raise ValueError("Outputs must be list of Vars") + + self.validate() + for ov in outputs: + if not self.is_var_visible_in_block(ov): + msg = ( + "Var {} is not visible in block {} and thus cannot " + + "be a block output.\n{}" + ) + raise ValueError(msg.format(ov.name, self.name, self)) + + # For duplicate vars in self._outputs, only remove block once. + for ov in set(self._outputs): + ov.consuming_blocks.remove(self) + + # Need to copy, or block's output would be completely tied to a var's + # output and we cannot replace a block output with another var's + # output. + self._outputs = copy.copy(outputs) + # For duplicate vars in outputs, only add consuming_blocks once. + for ov in set(outputs): + ov.consuming_blocks.append(self) + + def __enter__(self): + global BLOCK_STACK + BLOCK_STACK.append(self) + return self + + def __exit__(self, type, value, traceback): + self._propagate_nonreplaceable_vars() + global BLOCK_STACK + BLOCK_STACK = BLOCK_STACK[:-1] + + def _insert_op_before(self, new_op, before_op=None): + """ + A private API used by builder. Please use `builder.YOUR_OP(...,before_op)`. + + new_op's outputs are not used (not input to any other op) after + this call. All inputs to new_op must be visible at or before + the before_op (i.e., new_op must be added in topologically sorted + order). Note that this is more restrictive than MIL, whose Block + supports lexical scoping and thus an op can reference Var in enclosing + scopes. new_op.name must be unique in the block. + + before_op=None to append new_op at the end of self.operations. + + Given: %2 = op0(%1, %1) + %4 = op2(%1) + %6 = op3(%4, %4) + + Execute: insert_op_before(op1, before_op=op2), + where %3 = op1(%1, %2) + + Result: %2 = op0(%1, %1) + %3 = op1(%1, %2) + %4 = op2(%1) + %6 = op3(%4, %4) + + Comment: We assume op1 has been constructed outside the block with + %1, %2 as inputs. Typically it's builder's job to create an op and + insert into the current block. + + Comment: insert_op_before(op1, before_op=op0) would error as %2 (an input to op1) + is not visible before op0. + """ + self.validate() + + idx = len(self.operations) if before_op is None else self.find_op_id_in_block(before_op) + + # check inputs are visible + for k, v in new_op.inputs.items(): + if not isinstance(v, (Var, tuple)): + continue + vs = [v] if isinstance(v, Var) else v + for s in vs: + if not self.is_var_visible_in_block(s, upto_op_with_id=idx): + before_op_name = before_op.name if before_op is not None else "None" + msg = "Op '{}' input {}={} is not in scope of {} before {}" + raise ValueError( + msg.format(new_op.name, k, s.name, self.name, before_op_name) + ) + + # add new_op + if before_op is None: + self.operations.append(new_op) + else: + self.operations.insert(idx, new_op) + + def _replace_var( + self, + old_var, + new_var, + start=0, + end_id=-1, + no_check_var_types=False, + ): + """ + Helper function for replace_uses_of_var_after_op + """ + num_ops_affected = 0 + + if end_id == -1: + op_list = self.operations[start:] + else: + op_list = self.operations[start : end_id + 1] + + for op in op_list: + new_inputs = {} + affected = False + for k, v in op.inputs.items(): + if isinstance(v, (list, tuple)) and old_var in v: + new_inputs[k] = tuple(new_var if vv == old_var else vv for vv in v) + affected = True + elif v == old_var: + new_inputs[k] = new_var + affected = True + else: + new_inputs[k] = v + if affected: + num_ops_affected += 1 + op.set_inputs(no_check_var_types=no_check_var_types, + **new_inputs) + + # Replace recursively. + for b in op.blocks: + num_ops_affected += b._replace_var(old_var, new_var) + + if end_id != -1 and old_var.op not in op_list: + return num_ops_affected + + if old_var in self._block_inputs: + idx = self._block_inputs.index(old_var) + self._block_inputs = list(self._block_inputs) + self._block_inputs[idx] = new_var + self._block_inputs = tuple(self._block_inputs) + + # If old_var is block's output, replace as well. + self.replace_block_output_var(old_var, new_var) + + return num_ops_affected + + def replace_block_output_var( + self, + old_var, + new_var, + ): + """ + If old_var is in the list of block's outputs, + replace old_var with the new_var. + """ + found_old_var_in_output = False + # There could be multiple matched `old_var` in output when the program has duplicate vars + # in the output. + for idx, output_var in enumerate(self._outputs): + if old_var == output_var: + found_old_var_in_output = True + self._outputs[idx] = new_var + if found_old_var_in_output: + new_var.consuming_blocks.append(self) + # This block no longer uses `old_var` as its outputs + old_var.consuming_blocks.remove(self) + # Ensure output name is consistent + if isinstance(self, Function): + new_var.name = old_var.name + + def try_replace_uses_of_var_after_op( + self, + anchor_op, + old_var, + new_var, + no_check_var_types=False, + no_check_var_visibility=False, + ): + """ + :param anchor_op: Operation + :param old_var: Var + :param new_var: Var + :param no_check_var_types: bool + :param no_check_var_visibility: bool + :return: True if the old_var can be replaced by new_var. False otherwsie. + + This helper function guards the replace_uses_of_var_after_op function, + by first checking if the old_var could be replaced by the new_var. + + 1. If old_var can be replaced by new_var, the replace_uses_of_var_after_op is called, + and returns True. 2. Return False if the replacement is not allow. + """ + if not old_var.can_be_replaced_by_var(new_var): + return False + + self.replace_uses_of_var_after_op( + anchor_op=anchor_op, + old_var=old_var, + new_var=new_var, + no_check_var_types=no_check_var_types, + no_check_var_visibility=no_check_var_visibility, + ) + return True + + def replace_uses_of_var_after_op( + self, + anchor_op, + old_var, + new_var, + no_check_var_visibility=False, + end_op=None, + no_check_var_types=False, + force_replace=False, + ): + """ + Replace all uses of `old_var` with `new_var` after `anchor_op`, + and before `end_op` (inclusive). + + That is all the ops that use `old_var` will now use `new_var`. + The op that produces the `old_var` will continue to produce it, its output + won't be replaced by `new_var`. + + If `anchor_op` is None, replace all input occurrences of `old_var` in the block. If + `end_op` is None, all occurrences of `old_var` are replaced in the block starting from + the op just after `anchor_op` + + no_check_var_visibility: True to disable the check ensuring new_var is visible + (visibility requirement depends on anchor_op). + + no_check_var_types: An error will be raised if the type of new_var is not same as the + old_var, unless `no_check_var_types` is set to True. Normally type inference is + re-invoked for all the child ops of `old_var` after updating it to `new_var`. However, + this is skipped if `no_check_var_types` is set to True. + + old_var, new_var must meet the following conditions: + + - old_var, new_var both existing within the block. This implies that + the op generating new_var must be inserted prior to this + replacement. + + - Affected ops (i.e., Operation after anchor_op that take old_var as + input) must generate the same type inference results as before. + + - new_var must be visible at or before anchor_op in the order of + self.operations. + + Given: %2 = op0(%1, %1) + %3 = op1(%1, %2) + %4 = op2(%1) + %6 = op3(%4, %4) + + Execute: replace_uses_of_var_after_op(op2, %4, %3) + + Result: %2 = op0(%1, %1) + %3 = op1(%1, %2) + %4 = op2(%1) + %6 = op3(%3, %3) # type inference check against %6 + + + Comment: Execute: replace_uses_of_var_after_op(op1, %4, %3) would lead to + identical results, as op2 does not take %4 as input. + + Comment: replace_uses_of_var_after_op(op0, %4, %3) would cause error as %3 is + after op0 + + Comment: To avoid clutter, we drop the names of arguments and return + Var in the illustration above. + + + Another example, usage of "end_op": + + Given: %2 = op0(%1, %1) + %3 = op1() + %4 = op2(%1, %2) + %5 = op3(%2) + + if execute replace_uses_of_var_after_op(anchor_op=op0, old_var=%2, new_var=%3) + + Result: %2 = op0(%1, %1) + %3 = op1() + %4 = op2(%1, %3) + %5 = op3(%3) + + if execute replace_uses_of_var_after_op(anchor_op=op0, old_var=%2, new_var=%3, end_op=op2) + + Result: %2 = op0(%1, %1) + %3 = op1() + %4 = op2(%1, %3) # %2 is replaced with %3 till here + %5 = op3(%2) # will continue using %2 + + """ + if not force_replace and old_var.op is not None and new_var.op is not None: + if not old_var.can_be_replaced_by_var(new_var): + old_nonreplaceable_vars = old_var.nonreplaceable_vars_upstream + new_nonreplaceable_vars = new_var.nonreplaceable_vars_upstream + err_var = None + for _var in old_nonreplaceable_vars: + if _var not in new_nonreplaceable_vars: + err_var = _var + break + msg = ( + "var {} cannot be replaced by {}. Since the nonreplaceable var {} might " + "potentially " + "be removed during the replacement of those vars." + ).format(old_var, new_var, err_var) + raise ValueError(msg) + + start = self.find_op_id_in_block(anchor_op) + 1 if anchor_op is not None else 0 + end_id = self.find_op_id_in_block(end_op) if end_op is not None else -1 + + if not no_check_var_visibility: + self.validate() + + idx = start if anchor_op is not None else len(self.operations) + visibility_error_msg = ( + "new_var '{}' is not visible in block '{}' at or before " + + "anchor_op '{}'" + ) + anchor_op_name = "None" if anchor_op is None else anchor_op.name + + if isinstance(new_var, ComplexVar): + # For CompleVar, as it's just a temp wrapper to transit the real and imag data, we + # check the visibility of its real and imaginary Var instead. + if not self.is_var_visible_in_block(new_var.real, upto_op_with_id=idx): + raise ValueError( + visibility_error_msg.format( + new_var.real.name, self.name, anchor_op_name + ) + ) + if not self.is_var_visible_in_block(new_var.imag, upto_op_with_id=idx): + raise ValueError( + visibility_error_msg.format( + new_var.imag.name, self.name, anchor_op_name + ) + ) + else: + if not self.is_var_visible_in_block(new_var, upto_op_with_id=idx): + raise ValueError( + visibility_error_msg.format( + new_var.name, self.name, anchor_op_name + ) + ) + + if end_id != -1 and end_id < start: + msg = "end_op '{}' comes before the anchor_op '{}'" + raise ValueError(msg.format(end_op.name, anchor_op.name)) + + num_ops_affected = self._replace_var( + old_var, + new_var, + start=start, + end_id=end_id, + no_check_var_types=no_check_var_types, + ) + + logger.debug("Num ops affected in replacing var: {}".format(num_ops_affected)) + + def remove_ops(self, existing_ops): + """ + Remove ops in `existing_ops`. + + Args: existing_ops: List[Operation]. All ops in this list must be pre-existing in the + block. It allows duplicated ops, but duplicated ops will only be removed once. + + Raises: + ValueError if any `op` in `existing_ops` meets any of following conditions: + - `op` is not found in the block + - any other op in the block uses output Vars of `op` + - the output var is block's output + """ + self.validate() + + # Dedup ops because each op can only be deleted once. + existing_ops_set = set(existing_ops) + existing_ops = list(existing_ops_set) + # Find the idx of each to-be-removed op, and raise errors if any op couldn't be found. + idxs = [-1] * len(existing_ops) + for i, op in enumerate(self.operations): + if op in existing_ops_set: + idxs[existing_ops.index(op)] = i + if -1 in idxs: + not_found = [] + for i, op in zip(idxs, existing_ops): + if i == -1: + not_found.append(op.name) + raise ValueError( + "Ops {} not found in block {}".format(not_found, self.name) + ) + + # Remove ops in reverse topological order + pairs = list(zip(idxs, existing_ops)) + pairs.sort(key=lambda x: x[0], reverse=True) + + for idx, op in pairs: + for i, v in enumerate(op.outputs): + # Check that no ops depend on op's outputs + if len(v.child_ops) > 0: + child_op_names = [s.name for s in v.child_ops] + msg = ( + "Cannot delete op '{}' with active output at id {}: '{}' " + + "used by ops {}" + ) + raise ValueError(msg.format(op.name, i, v.name, child_op_names)) + # Check that the output Var isn't block's output + if v in self._outputs: + msg = ( + "cannot delete op {} with output {}: {} " + + "that's block {}'s output" + ) + raise ValueError(msg.format(op.name, i, v.name, self.name)) + + for b in op.blocks: + b.set_outputs([]) + b.remove_ops(b.operations) + + # Remove the op (in reverse topological order) + self.operations.pop(idx) + op.enclosing_block = None + + for v in op.inputs.values(): + if isinstance(v, (tuple, list)): + for vv in v: + vv.remove_child_op(op) + else: + v.remove_child_op(op) + + def operations_for_vars(self, end_vs): + """ + Inputs: + + end_vs: list[Operation]. + + Return: + + list[Operation] which are subset of self.operations that are ancestors + of `end_vs`. Also do recursion into nested blocks. + """ + used_vars = set(end_vs) + used_ops = [] + for op in reversed(self.operations): + # if none of op's output is used, delete op + if not set(op.outputs).intersection(used_vars): + continue + + used_ops.append(op) # append in reverse topological order + + # recursively search for nested blocks + ops_to_check = [] + for b in op.blocks: + ops_to_check += b.operations_for_vars(b.outputs) + ops_to_check.append(op) + + # mark used vars + for op_to_check in ops_to_check: + # mark all op's inputs to used + for _, input_var in op_to_check.inputs.items(): + if isinstance(input_var, (tuple, list)): + used_vars.update(list(input_var)) + else: + used_vars.add(input_var) + + return used_ops[::-1] + + def _propagate_nonreplaceable_vars(self): + def propagate_nonreplaceable_vars_block(block): + for op in list(block.operations): + for b in op.blocks: + propagate_nonreplaceable_vars_block(b) + if op.outputs is None: + continue + for o in op.outputs: + o._reset_nonreplaceable_vars_upstream() + o._set_nonreplaceable_vars_upstream() + propagate_nonreplaceable_vars_block(self) + + def indented_str(self, indent=None): + if indent is None: + indent = "" + s = ( + indent + + self.name + + "(" + + ", ".join([str(var) for var in self._block_inputs]) + ) + s += ") {\n" + for op in self.operations: + s += op.indented_str(indent + SPACES * 1) + s += indent + "} -> (" + if self._outputs is not None: + s += ", ".join(["%" + v.name for v in self._outputs]) + s += ")\n" + return s + + def __repr__(self): + return self.__str__() + + def __str__(self): + return self.indented_str() + + def get_dot_string( + self, + function_name="main", + prefix_id=0, + highlight_debug_op_types=None, + highlight_debug_op_names=None, + ): + """ + Return the dot string that can be used to show the block + with dot. Const ops are not added to the dot string. + + * Input vars : yellow + * output vars : goldenrod2 + * op names that user wants to highlight, provided in "highlight_debug_op_names": cyan + * op types that user wants to highlight, provided in "highlight_debug_op_types": green + + Examples + -------- + >>> import graphviz + >>> graphviz.Source(block.get_dot_string()).view() + >>> # OR + >>> graphviz.Source(block.get_dot_string()).view(filename='graph.pdf') + """ + if highlight_debug_op_types is None: + highlight_debug_op_types = [] + if highlight_debug_op_names is None: + highlight_debug_op_names = [] + + dotstring = "digraph g {\n" + "\tcompound=true;\n" + + input_var_names = list(self.inputs.keys()) + output_var_names = [v.name for v in self.outputs] + + debug_op_types = [] + if len(highlight_debug_op_types) > 0: + for op in self.operations: + if op.op_type in highlight_debug_op_types: + debug_op_types.append(op.name) + + vis = DotVisitor() + vis.highlight_nodes(input_var_names, "yellow").highlight_nodes( + output_var_names, "goldenrod2" + ).highlight_nodes(highlight_debug_op_names, "cyan").highlight_nodes( + debug_op_types, "green" + ) + + vis.visit_all(self, nodename_prefix=str(prefix_id)) + res = vis.get_result("subgraph", "cluster_" + function_name.replace("/", "_")) + dotstring += "\n".join("\t" + r for r in res.split("\n")) + "\n" + dotstring += "}" + return dotstring + + +class Function(Block): + def __init__(self, inputs, opset_version=None): + """ + inputs: str -> placeholder + opset_version: AvailableTarget enum. Describes the opset version of the function + """ + self.placeholder_inputs = inputs + self.opset_version = opset_version + + # str -> Var + self._input_dict = OrderedDict() + for k, v in self.placeholder_inputs.items(): + v.set_name(k) # set to user input name + self._input_dict[k] = v.outputs[0] + self.function_inputs = tuple(self._input_dict.values()) + + global k_used_symbols + global k_num_internal_syms + for inp in self.function_inputs: + if types.is_tensor(inp.dtype): + shapes = inp.dtype.get_shape() + for s in shapes: + if is_symbolic(s): + k_used_symbols.add(s) + super().__init__() + + # Override Block's input + @property + def inputs(self): + return self._input_dict + + @property + def opset_version(self): + return self._opset_version + + @opset_version.setter + def opset_version(self, version): + if not ( + isinstance(version, _target) or + version is None + ): + raise ValueError("opset_version must be type of coremltools.AvailableTarget") + self._opset_version = version + + def __repr__(self): + return self.__str__() + + def __str__(self): + return self.to_str("function") + + def to_str(self, func_name="function"): + func_name = func_name + "[{}]".format(_OPSET[self.opset_version]) + if len(self._input_dict) == 0: + s = func_name + "()" + else: + inputs = [(in_name, ph) for in_name, ph in self._input_dict.items()] + s = func_name + "(" + str(inputs[0][1]) + for in_name, ph in inputs[1:]: + s += ",\n" + " " * (len(func_name) + 1) + str(ph) + s += ") {\n" + s += self.indented_str(SPACES) + s += "}\n" + return s diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/builder.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/builder.py new file mode 100644 index 00000000..2f782c27 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/builder.py @@ -0,0 +1,246 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numbers +from collections import defaultdict + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + +from .block import Function, curr_block +from .input_type import (InternalInputType, ListOrTensorInputType, + TensorInputType, TupleInputType) +from .program import Placeholder, Program +from .var import InternalVar, Var + + +def is_python_value(val): + return ( + isinstance(val, (np.generic, np.ndarray)) + or isinstance(val, numbers.Number) + or isinstance(val, str) + or isinstance(val, bool) + or (isinstance(val, (tuple, list)) and all(is_python_value(v) for v in val)) + ) + + +class Builder: + """ + This class is a singleton builder to construct a MIL program. For more + information, see `Create a MIL program `_. + + Importing ``.ops`` triggers the installation of all MIL ops into the Builder. + For details on each op, see `MIL ops `_. + + Examples + -------- + + >>> from coremltools.converters.mil.mil import Builder as mb + >>> from coremltools.converters.mil.mil import Program, Function + + >>> prog = Program() + >>> func_inputs = {"x": mb.placeholder(shape=[2,3]), + >>> "y": mb.placeholder(shape=[2,3])} + >>> with Function(func_inputs) as ssa_fun: + >>> x, y = ssa_fun.inputs['x'], ssa_fun.inputs['y'] + >>> res_var = mb.add(x=x, y=y) # created within ssa_fun block + >>> ssa_fun.set_outputs([res_var]) + >>> prog.add_function("main", ssa_fun) + + >>> # Importing ops triggers installation of all ops into Builder. + >>> from .ops import defs as _ops + + """ + + name_count = defaultdict(int) + + @classmethod + def _get_free_name(cls, name): + new_name = name + "_" + str(cls.name_count[name]) + cls.name_count[name] += 1 + return new_name + + @classmethod + def _maybe_set_name(cls, kwargs, op_type): + if "name" not in kwargs: + kwargs["name"] = cls._get_free_name(op_type) + return kwargs + + @classmethod + def _add_const(cls, val, name, before_op): + if not is_python_value(val): + raise ValueError("Cannot add const {}".format(val)) + if any_symbolic(val): + msg = ( + "Python native vals (list, tuple), np.array that are" + + "operation inputs cannot have symbolic values. Consider feeding" + + "symbolic shape in through placeholder and use mb.shape() " + + "operator. Input {}: {}" + ) + raise ValueError(msg.format(name, val)) + const_name = cls._get_free_name(name) + logger.debug("Adding const op '{}'".format(const_name)) + output_var = cls.const(val=val, name=const_name, + before_op=before_op) + return output_var + + + @classmethod + def _create_vars(cls, input_spec, op_name, before_op, + candidate_kv): + """ + For each key K in `candidate_kv`, create a Var if the + followings are satisfied: + + - K exists in input_spec and is not an InternalInputType + - candidate_kv[K] is not already a Var + + Inputs + ------ + - candidate_kv: Dict[str, Any] + Key-values may be inputs to an op (whose inputs is defined by + input_spec) + + Returns + ------- + - var_kv: Dict[str, Var] + For the K satisfying the above, var_kv[K] is the newly + created Var + """ + update_dict = {} + for k, val in candidate_kv.items(): + if isinstance(val, Var): + continue # already a Var + + if k not in input_spec.input_types: + continue # k is not an op input + + in_type = input_spec.input_types[k] + if isinstance(in_type, InternalInputType): + new_var_name = op_name + "_" + k + var = InternalVar(val, name=new_var_name) + curr_block().add_internal_var(var) + update_dict[k] = var + continue # Not a regular Var + + new_var_name = op_name + "_" + k + if isinstance(in_type, TupleInputType): + var = [] + for i, v in enumerate(val): + if isinstance(v, Var): + var.append(v) + continue + var.append( + cls._add_const(v, new_var_name + str(i), + before_op) + ) + update_dict[k] = var + continue + + if isinstance(in_type, (TensorInputType, ListOrTensorInputType)): + var = cls._add_const(val, new_var_name, before_op) + update_dict[k] = var + + return update_dict + + @classmethod + def _add_op(cls, op_cls, **kwargs): + """ + Add an op of type `op_cls` (e.g., convolution) to current block. + """ + kwargs = cls._maybe_set_name(kwargs, op_cls.__name__) + logger.info( + "Adding op '{}' of type {}".format(kwargs["name"], op_cls.__name__) + ) + before_op = kwargs.get("before_op", None) + # Shallow copy list inputs to ensure op inputs are immutable + kwargs = {k: v if not isinstance(v, (list, tuple)) else v[:] for k, v in kwargs.items() if v is not None} + kwargs.update(cls._create_vars( + input_spec=op_cls.input_spec, + op_name=kwargs["name"], before_op=before_op, + candidate_kv=kwargs)) + new_op = op_cls(**kwargs) + + # Initialize optional input Vars if it wasn't in kwargs + default_inputs = new_op.default_inputs() + # Shallow copy list inputs to ensure op inputs are immutable + missing_optional_vals = {k: v if not isinstance(v, (list, tuple)) else v[:] for k, v in default_inputs.items() + if k not in kwargs and v is not None} + missing_optional_vars = cls._create_vars( + input_spec=op_cls.input_spec, + op_name=kwargs["name"], before_op=before_op, + candidate_kv=missing_optional_vals) + new_op.set_inputs(type_inference=False, + **missing_optional_vars) + + curr_block()._insert_op_before(new_op, before_op=before_op) + new_op.build_nested_blocks() + new_op.type_value_inference() + if len(new_op.outputs) == 1: + return new_op.outputs[0] + return new_op.outputs + + @staticmethod + def placeholder(shape, dtype=None, allow_rank0_input=False): + return Placeholder(shape, dtype, allow_rank0_input=allow_rank0_input) + + @staticmethod + def TensorSpec(shape, dtype=None): + return Placeholder(shape, dtype) + + @staticmethod + def program(input_specs=None, opset_version=None): + """ + + The ``mb.program`` decorator creates a MIL program with a single + function (``main``). The input to ``main`` is a tensor. + + Parameters + ---------- + + input_specs: TensorSpec + Describes a tensor. + + opset_version: AvailableTarget enum + Describes the opset version of the program + + + Examples + -------- + >>> import coremltools as ct + >>> @mb.program(input_specs=[mb.TensorSpec(shape=(1,2))], opset_version=ct.target.iOS16) + >>> def prog(a): + >>> return mb.add(x=a, y=2) + + """ + if input_specs is None: + input_specs = [] + + def wrapper(main_block): + program = Program() + num_args = main_block.__code__.co_argcount + arg_names = list(main_block.__code__.co_varnames)[:num_args] + if len(input_specs) != num_args: + msg = "{} expects {} inputs: {}. Got {} input_specs." + raise ValueError( + msg.format( + main_block.__name__, num_args, arg_names, len(input_specs) + ) + ) + input_spec_dict = {k: v for k, v in zip(arg_names, input_specs)} + with Function(input_spec_dict, opset_version) as func: + input_vars = [func.inputs[a] for a in arg_names] + outputs = main_block(*input_vars) + if isinstance(outputs, tuple): + outputs = list(outputs) + elif not isinstance(outputs, list): + outputs = [outputs] + func.set_outputs(outputs) + program.add_function("main", func) + return program + + return wrapper diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/input_type.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/input_type.py new file mode 100644 index 00000000..8721d927 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/input_type.py @@ -0,0 +1,382 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from collections import OrderedDict + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.var import InternalVar + +SUPPORT_FLOAT_TYPES = [ + types.fp16, + types.fp32, + types.fp64, +] + +SUPPORT_INT_TYPES = [ + types.uint8, + types.uint16, + types.uint32, + types.uint64, + types.int8, + types.int16, + types.int32, + types.int64, +] + +SUPPORT_COMPLEX_TYPES = [ + types.complex64, + types.complex128, +] + +_SUPPORT_TYPES = ( + SUPPORT_FLOAT_TYPES + + SUPPORT_INT_TYPES + + SUPPORT_COMPLEX_TYPES + + [types.bool, types.str] +) + + +class DefaultInputs: + def __init__(self, **kwargs): + # Since python 3.6, kwargs preserves the input order. See + # https://docs.python.org/3/whatsnew/3.6.html#whatsnew36-pep468 + self._default_inputs = [(k, v) for k, v in kwargs.items()] + self._ordered_dict = OrderedDict() + for k, v in self._default_inputs: + self._ordered_dict[k] = v + + def items(self): + return self._ordered_dict.items() + + def __add__(self, default_inputs): + new_order_dict = {k: v for k, v in self._ordered_dict.items()} + for k, v in default_inputs._default_inputs: + new_order_dict[k] = v + return DefaultInputs(**new_order_dict) + + +class InputSpec: + def __init__(self, **kwargs): + # Since python 3.6, kwargs preserves the input order. See + # https://docs.python.org/3/whatsnew/3.6.html#whatsnew36-pep468 + self._input_types = [(k, v) for k, v in kwargs.items()] + self._ordered_dict = OrderedDict() + for k, v in self._input_types: + self._ordered_dict[k] = v + + def __add__(self, input_spec): + new_order_dict = {k: v for k, v in self._ordered_dict.items()} + for k, v in input_spec._input_types: + new_order_dict[k] = v + return InputSpec(**new_order_dict) + + + @property + def input_types(self): + """ + Ordered dict[str, _InputType] (name, input_type) + """ + return self._ordered_dict + + def validate_inputs(self, op_name, op_type, candidate_kvs): + """ + For each key K in `candidate_kvs`, if K is found in + self.input_types, perform the followings: + + - check that candidate_kvs[K] is a Var and satisfies + requirements in InputType (const, types) + - Place K, candidate_kvs[K] in output (list of (name, var) pairs). + + Note that this does not ensure the presence of all required + input_spec (optional == False). + + Parameters + ---------- + - op_name: str + + - op_type: str + + - candidate_kvs: Dict[str, Var] + Values cannot be None + + Return + ------ + None + + Raise: + ValueErrr if value type is incompatible + """ + msg_prefix = 'Op \"{}\" (op_type: {}) '.format(op_name, op_type) + + # check vars sharing the same type_domain_id have the same dtype + type_domain_group = {} + var_to_input_name = {} + for name, var in candidate_kvs.items(): + input_type = self.input_types[name] + if isinstance(input_type, TensorInputType) and input_type.type_domain_id is not None: + type_domain_id = input_type.type_domain_id + if type_domain_id in type_domain_group: + type_domain_group[type_domain_id].append(var) + else: + type_domain_group[type_domain_id] = [var] + var_to_input_name[var] = name + + for type_domain_id, vars in type_domain_group.items(): + expected_dtype = vars[0].dtype + ref_name = var_to_input_name[vars[0]] + for var in vars: + name = var_to_input_name[var] + if not var.dtype == expected_dtype: + msg = ( + "In op, of type {}, named {}, the named input `{}` must have the same data type " + "as the named input `{}`. However, {} has dtype {} whereas {} has dtype {}." + ).format(op_type, op_name, name, ref_name, name, + var.dtype.__type_info__(), ref_name, expected_dtype.__type_info__()) + raise ValueError(msg) + + # Ensure candidate_kvs doesn't contain None + for name, var in candidate_kvs.items(): + if var is None: + raise ValueError(msg_prefix + 'Input {} is None'.format(name)) + + if name not in self.input_types: + raise ValueError(msg_prefix + \ + 'Unrecognized input {}'.format(name)) + + input_type = self.input_types[name] + # Check constness + # Don't check InternalInputType (so _const_symbolic can work) + if input_type.const and \ + not isinstance(input_type, InternalInputType) \ + and var.val is None: + msg = msg_prefix + \ + 'Input {} must be const at compile time' + raise ValueError(msg.format(name), name, var.name) + + if not isinstance(var, InternalVar) and \ + not input_type.is_compatible(var): + msg = msg_prefix + "Input {}=\"{}\" expects " +\ + "{} but got {}" + raise ValueError(msg.format(name, var.name, input_type.type_str, + var.sym_type.__type_info__())) + + +class _InputType: + """ + (Untyped) input containing fundamental properties of all inputs to an + Operation: + """ + + def __init__(self, const=False, optional=False): + """ + const (bool): + True if the InputType has to be constant / materialized at compile time. + Const InputType is semantically equivalent to attribute. By + default False. Read-only. + + optional (bool): + True to allow user not to specify this input and rely on default + values (defined in default_inputs). + + Note: _InputType should not be directly instantiated. Only its subclasses may + be instantiated. + """ + self.const = const + self.optional = optional + + def is_compatible(self, v): + """ + Return True if (possibly symbolic) value `v` is compatible. False + otherwise. + + Inputs: + + v (Var | ListVar | native python function): input + + Comment: Define is_compatible as instance method to call proper subclass + methods. + """ + return self._is_compatible(v) + + def _is_compatible(self, v): + return True + + def _get_predefined_datatype(self): + """ + Override this function if datatype can be known without `_default` or + `_val`. + """ + return None + + def __str__(self): + return type(self).__name__ + + @property + def type_str(self): + """Descriptive string describing expected mil types""" + return self.__str__(self) + + +class TensorInputType(_InputType): + """ + TensorInputType specifies the generic tensor inputs. + The `type_domain` validates data type constraints, and it could be either + (1) A object / tuple of builtin types: + This puts constraint on the allowed inputs data type. + For example: + + ``` + input_spec = InputSpec( + x=TensorInputType(type_domain=types.int32), + ) + ``` + only allows input `x` have int32 dtype. + + ``` + input_spec = InputSpec( + x=TensorInputType(type_domain=(types.int32, types.fp16)), + ) + ``` + allows input `x` be either type of int32 or float16 + + (2) string: + Verify different input parameters binding with the same `type_domain` are the same data type. + This additional check is done by defining a `type_domains` dictionary in the Operation class + For example: + + ``` + class conv(Operation): + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + weight=TensorInputType(type_domain="U"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + ``` + would verify: + (i) `x` and `weight` are one of the float16 or float32 type. + (ii) `x` and `weight` are the same type. + + """ + def __init__(self, type_domain, **kwargs): + self._type_domain = () + self._type_domain_id = None + + if isinstance(type_domain, str): + self.type_domain_id = type_domain + else: + if isinstance(type_domain, type): + type_domain = (type_domain,) + self.type_domain = type_domain + super().__init__(**kwargs) + + def _is_compatible(self, v): + result = types.is_scalar(v.dtype) or types.is_tensor(v.dtype) + result = result and (v.dtype in self.type_domain) + return result + + @property + def type_domain(self): + return self._type_domain + + @type_domain.setter + def type_domain(self, val): + msg = "type_domain must be a tuple of builtin types" + if not isinstance(val, tuple) or any(map(lambda t: t not in _SUPPORT_TYPES, val)): + raise ValueError(msg) + self._type_domain = val + + @property + def type_domain_id(self): + return self._type_domain_id + + @type_domain_id.setter + def type_domain_id(self, val): + if not isinstance(val, str): + raise ValueError("type_domain_id must be type of str") + self._type_domain_id = val + + @property + def type_str(self): + return 'tensor or scalar of dtype from type domain ' + str([types.builtin_to_string(v) for v in self.type_domain]) + +class ListInputType(_InputType): + """ + ListInputType allows inputs of type types.list + """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _is_compatible(self, v): + return types.is_list(v.sym_type) + + @property + def type_str(self): + return 'list' + + +class ListOrTensorInputType(_InputType): + """ + ListOrTensorInputType allows inputs of + (1) MIL tensor + (2) python list/tuple of MIL tensors + """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _is_compatible(self, v): + return ( + types.is_list(v.sym_type) + or types.is_scalar(v.dtype) + or types.is_tensor(v.dtype) + ) + + @property + def type_str(self): + return 'list, tensor, or scalar' + + +class TupleInputType(_InputType): + """ + TupleInputType specifies input types of python list/tuple of MIL tensors. + """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _is_compatible(self, v): + # We don't check the detail types within the tuple. + return isinstance(v, (tuple, list)) + + @property + def type_str(self): + return 'tuple' + + +class InternalInputType(_InputType): + """ + InternalInputType specifies input types outside of Program's type system. + It allows ops to take, for example, python primitive types, instead of + only the builtin types. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _is_compatible(self, v): + return True # skip type check by default for InternalInputType. + + +class PyFunctionInputType(InternalInputType): + """ + Native python function. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _is_compatible(self, v): + return callable(v.val) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/operation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/operation.py new file mode 100644 index 00000000..8b8888e2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/operation.py @@ -0,0 +1,603 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from typing import Any, Dict, Tuple + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types import is_compatible_type +from coremltools.converters.mil.mil.types.symbolic import (any_symbolic, + is_symbolic) + +from . import SPACES +from .block import curr_block +from .input_type import DefaultInputs, TensorInputType, TupleInputType +from .var import ComplexVar, InternalVar, ListVar, Var + +VALUE = 1 +SYMBOL = 2 +NONE = 4 +ALL = 7 + + +def _is_compatible_symbolic_array(a, b): + """ + A helper function which check if two numpy array with symbolic value. + For instance, a = np.array([is0, is2]) + b = np.array([is1, 1]) + are considered compatible. + a = np.array([is0, 1]) + b = np.array([is1, -1]) + are not. + """ + if not a.shape == b.shape: + return False + a = a.flatten() + b = b.flatten() + for t, v in zip(a, b): + if not is_symbolic(t) and not is_symbolic(v): + if t != v: + return False + return True + + +def precondition(allow=ALL): + """ + A helper decorator for value_inference method. + Decorate value_inference with parameter VALUE/SYMBOL/NONE or ALL. + For VALUE/SYMBOL/NONE use logical or ( | ) for multiple allowance. + Note that: + 1. ALL == VALUE | SYMBOL | NONE + 2. Chosen flag (some or all VALUE/SYMBOL/NONE) must be satisfied + by EVERY INPUTS for the precondition to be satisfied. + + The meaning for each flag is: + VALUE: value that can be materialized during compile time + SYMBOL: value that cannot be materialized by exist as a symbol value + NONE: a None value + + Usage: + @precondition(allow=VALUE|SYMBOL) + def value_inference(self): + '''some value_inference implementation''' + """ + ALLOW_VALUE = allow & VALUE + ALLOW_SYMBOL = allow & SYMBOL + ALLOW_NONE = allow & NONE + + def process(v, has_value, has_symbol, has_none): + """ + v: Var + + Return updated has_value, has_symbol, has_none + """ + if any_symbolic(v.sym_val): + return has_value, True, has_none + elif v.val is None: + return has_value, has_symbol, True + return True, has_symbol, has_none + + def decorator(func): + def wrapper(self): + HAS_VALUE = False + HAS_SYMBOL = False + HAS_NONE = False + for in_name, in_type in self._input_types.items(): + if in_type.optional: + # Optional inputs are not required to invoke value_inference() + continue + + if isinstance(in_type, TupleInputType): + for v in self._input_vars[in_name]: + HAS_VALUE, HAS_SYMBOL, HAS_NONE = process( + v, HAS_VALUE, HAS_SYMBOL, HAS_NONE + ) + else: + HAS_VALUE, HAS_SYMBOL, HAS_NONE = process( + self._input_vars[in_name], HAS_VALUE, HAS_SYMBOL, HAS_NONE + ) + + if HAS_VALUE and not ALLOW_VALUE: + msg = "Implementation of value_inference() for op {} doesn't support input with VALUE" + raise NotImplementedError(msg.format(self.op_type)) + elif HAS_SYMBOL and not ALLOW_SYMBOL: + msg = "Implementation of value_inference() for op {} doesn't support input with SYMBOL" + raise NotImplementedError(msg.format(self.op_type)) + elif HAS_NONE and not ALLOW_NONE: + msg = "Implementation of value_inference() for op {} doesn't support input with NONE" + raise NotImplementedError(msg.format(self.op_type)) + else: + return func(self) + + return wrapper + + return decorator + + +def is_internal_input(arg_name): + return arg_name[0] == "_" + + +class mil_list: + """ + A wrapper around python list + """ + + def __init__(self, ls=None): + self.ls = ls if ls is not None else [] + if not isinstance(self.ls, list): + raise TypeError("Type of 'ls' must be list in the 'mil_list' class") + + +class Operation: + """ + Represents Operation in MIL. + + # Properties + name (str): + The name of the operation + + input_types (InputSpec, class attr): + Read-only named input types from all subclasses. Input types are used + to validate `inputs`. + + inputs [_input_vars] (dict of str --> Var): + An Operation (subclass of Operation) only has access to input Var, + which is already validated against `input_spec`. + + outputs [_output_vars] (list of Var): + List of output var based on type inference. Read-only + """ + + # Map from type domain id to a tuple of accepted types. + type_domains: Dict[str, Tuple[Any]] = dict() + + def __init__(self, **kwargs): + self._input_types = self.input_spec.input_types + self._type_domains = self.type_domains + self.name = kwargs.get("name", None) + + self._output_vars = None + self._input_vars = {} + self.blocks = [] + self.enclosing_block = curr_block() + + # Initialize inputs as object attributes (all None) + for k in self._input_types.keys(): + setattr(self, k, None) + self._input_vars[k] = None + + self._check_expected_inputs(kwargs) + + # Populate type_domains into input types + for v in self._input_types.values(): + if not isinstance(v, TensorInputType): + continue + if len(v.type_domain) == 0: + if v.type_domain_id not in self._type_domains: + raise ValueError("type_domain {} not defined.".format(v.type_domain_id)) + v.type_domain = self._type_domains[v.type_domain_id] + + # Set inputs from kwargs + input_kv = {k: v for k, v in kwargs.items() + if k in self._input_types and v is not None} + self._validate_and_set_inputs(input_kv) + self._ensure_required_inputs() + + def _check_expected_inputs(self, kwargs): + """ + Check that all kwargs are one of the following: + + - system inputs (non-attributes) + - op inputs (self._input_types.keys()) + """ + non_attributes = [ + "name", + "symbolic_datatype", + "datatype", + "symbolic_value", + "value", + "version", + "before_op", + "no_check_var_visibility", # no_check_var_visibility==True to deviate from SSA + "no_check_var_types", + # no_check_var_types==True to force set inputs, even if type does not match with earlier ones + ] + for k in kwargs.keys(): + if k not in non_attributes and k not in self._input_types: + raise ValueError( + "Unknown input '{}' for op '{}'".format(k, self.op_type) + ) + + def set_inputs(self, no_check_var_types=False, type_inference=False, **input_kvs): + """ + Parameters + ---------- + - input_kvs: Dict[str, Var] + Value cannot be None + + - type_inference: bool + True to perform type inference and recreate output Var. + """ + self._validate_and_set_inputs(input_kvs, no_check_var_types=no_check_var_types) + if type_inference and not no_check_var_types: + self.type_value_inference() + self._ensure_required_inputs() + + def get_flattened_inputs(self): + """ + Returns: + list[Var]. Flatten all tuple inputs + """ + flat_inputs = [] + for v in self.inputs.values(): + if isinstance(v, (list, tuple)): + flat_inputs.extend(v) + else: + flat_inputs.append(v) + return flat_inputs + + def type_value_inference(self, overwrite_output=False): + """ + Perform type inference and auto_val computation based on new input Vars + in kwargs. If self._output_vars is None then we generate _output_vars; + otherwise no new Var is created, but type inference result is verified + against existing _output_vars, if overwrite_output is False. + + If overwrite_output is True, then the type inference result overwrites the + existing _output_vars + """ + output_types = self.type_inference() + if not isinstance(output_types, tuple): + output_types = (output_types,) + output_vals = self._auto_val(output_types) + try: + output_names = self.output_names() + if not isinstance(output_names, tuple): + output_names = (output_names,) + except NotImplementedError: + if len(output_types) > 1: + output_names = tuple(str(i) for i, _ in enumerate(output_types)) + else: + output_names = ("",) # output name same as op name. + + # Combine (output_names, output_types, output_vals) to create output + # Vars. + if self._output_vars is None: + self._output_vars = [] + for i, (n, sym_type, sym_val) in enumerate( + zip(output_names, output_types, output_vals) + ): + name = self.name + "_" + n if n != "" else self.name + if types.is_list(sym_type): + new_var = ListVar( + name, + elem_type=sym_type.T[0], + init_length=sym_type.T[1], + dynamic_length=sym_type.T[2], + sym_val=sym_val + if (sym_val is not None and isinstance(sym_val.val, list)) + else None, + op=self, + op_output_idx=i, + ) + elem_shape = new_var.elem_shape + if elem_shape is not None and len(elem_shape) >= 5: + msg = ( + "Core ML only supports list of elements with rank <= 4. " + 'Layer "{}", with type "{}", outputs a list of rank {} tensors.' + ).format(self.name, self.op_type, len(elem_shape)) + raise ValueError(msg) + else: + if types.is_tensor(sym_type) and types.is_complex(sym_type.T[0]): + # Only `complex` op needs to maintain the real/imag data in the ComplexVar. + # For other ops, this ComplexVar is just a placeholder here, which will be + # replaced by a newly created ComplexVar during complex ops lowering pass. + real_data = ( + self.real_data if self.op_type == "complex" else None + ) + imag_data = ( + self.imag_data if self.op_type == "complex" else None + ) + new_var = ComplexVar( + name, + sym_type, + sym_val, + op=self, + op_output_idx=i, + real=real_data, + imag=imag_data, + ) + else: + new_var = Var(name, sym_type, sym_val, op=self, op_output_idx=i) + self._output_vars.append(new_var) + else: + # Check new inference result against existing self._output_vars. + for i, (sym_type, sym_val) in enumerate(zip(output_types, output_vals)): + out_var = self._output_vars[i] + # Check type inference + if overwrite_output: + out_var._sym_type = sym_type + elif not is_compatible_type(sym_type, out_var.sym_type): + msg = "Output Var {} in op {} type changes with new input Vars" + raise ValueError(msg.format(out_var.name, self.name)) + + # Check value inference + if overwrite_output: + out_var._sym_val = sym_val + + if sym_val is not None and out_var.sym_val is not None: + if np.any(sym_val.val != out_var.sym_val): + if overwrite_output: + out_var._sym_val = sym_val + else: + msg = 'value_inference differs for var {} in op {}' + if not _is_compatible_symbolic_array(sym_val.val, out_var.sym_val): + raise ValueError(msg.format(out_var.name, self.name)) + + for o in self.outputs: + o._set_nonreplaceable_vars_upstream() + + def _auto_val(self, output_types): + """ + # Evaluation is two stage: + # + # Stage 1: Check whether the method value_inference() is implemented + # + # Stage 2: Check if there's an value_inference() implementation + # for given input types. + # + # Suppose input are all SYMBOL: + # Case 1: No value_inference() implemented => fail at stage 1 + # Case 2: If value_inference() implemented, but requires all VALUE not + # SYMBOL => fail at stage 2 + # Case 3: If value_inference() implemented, and has no restriction on + # input types => Success + # + # If either stage fails, outputs[i].val is None. + # Otherwise, output[i].sym_val is not None. + + output_types: tuple of builtin types + + Returns: + output_vals: tuple of builtin type with value, or tuple of None + """ + do_auto_val = True + + if do_auto_val: + # Is self.value_inference implemented for corresponding input? + try: + vals = self.value_inference() + except NotImplementedError: + do_auto_val = False + + if not do_auto_val: + # No auto_val possible. + return tuple(None for _ in output_types) + + if not isinstance(vals, (tuple, list)): + vals = (vals,) + for val in vals: + if val is None: + do_auto_val = False + if not do_auto_val: + # No auto_val possible. + return tuple(None for _ in output_types) + + auto_val = [] + for t, v in zip(output_types, vals): + builtin_val = t() + if isinstance(v, mil_list): + builtin_val.val = v.ls + else: + builtin_val.val = v + auto_val.append(builtin_val) + return auto_val + + def value_inference(self): + """ + Optional Python implementation of the op based on (materialized) values + in `self.input_var`. Return a builtin value (single output) or a tuple of + builtin values (multi-outputs) of the same length as returned by ` + type_inference` + """ + msg = "value_inference() is not implemented by op {}" + raise NotImplementedError(msg.format(self.op_type)) + + def default_inputs(self): + """ + Optional. Returns default values for optional inputs. The + function is guaranteed to have access to all required inputs and + possibly some optional inputs should the user supply them. + They may be used to construct default values, such as + `strides=[1]*num_spatial_dims` in conv, where + `num_spatial_dims` may be inferred from the rank of + required inputs + """ + return DefaultInputs() + + def output_names(self): + """ + Optional. If implemented, we set the output var i name as + self.name + "/" + output_names[i] + + Returns a string (single output) or tuple of strings + """ + msg = "output_names() is not implemented by op {}" + raise NotImplementedError(msg.format(self.op_type)) + + def type_inference(self): + """ + Return (builtin_type, builtin_val) pair from type inference. + builtin_val may be None if symbolic_value is not attainable at compile + time. + """ + raise NotImplementedError("This function must be implemented by each op") + + def build_nested_blocks(self): + """ + Build nested blocks (for cond and while_loop and other composite + blocks) + """ + pass + + def _ensure_required_inputs(self): + """ + Raises ValueError if required inputs are not present + """ + for name, input_type in self._input_types.items(): + if not input_type.optional and self._input_vars[name] is None: + msg_prefix = 'Op "{}" (op_type: {}) '.format(self.name, self.op_type) + raise ValueError( + msg_prefix + "Required input {} is missing".format(name) + ) + + def _validate_and_set_inputs(self, input_kvs, no_check_var_types=False): + """ + For each k, v in `input_kvs`, perform the followings: + + - Check k exists in `self.input_specs` + - Check that v satisfies the correspodning `InputType` + - Set input, possibly replacing existing input. + + Note that it does not ensure all required inputs are satisfied. + Use _ensure_required_inputs() for that. + + Parameters + ---------- + - input_kvs: Dict[str, Var] + Each key in input_kvs must exist in `self.input_specs`. Its values + must be a Var. + + - no_check_var_types: bool + True to check var types against input_specs only, but not + enforcing new input vars to be a subtype of existing input vars + """ + for key in input_kvs.keys(): + if key not in self._input_types: + raise RuntimeError( + "Unknown input '{}' for op '{}'".format(key, self.op_type) + ) + + def check_and_detach(v_new, v_old, op, no_check_var_types): + # Check new var's sym_type is compatible with the + # existing's sym_type. + if ( + not is_compatible_type(v_new.sym_type, v_old.sym_type) + and not no_check_var_types + ): + msg = "New var type {} not a subtype of " + "existing var type {}" + raise ValueError(msg.format(v_new.sym_type, v_old.sym_type)) + v_old.remove_child_op(op, no_check_var_types) + + self.input_spec.validate_inputs(self.name, self.op_type, input_kvs) + + for name, var in input_kvs.items(): + # Remove this operation itself from existing input + # Var's child_ops + existing_input_var = self._input_vars[name] + if existing_input_var is not None: + if isinstance(existing_input_var, (list, tuple)): + for v_old, v_new in zip(existing_input_var, var): + check_and_detach(v_new, v_old, self, no_check_var_types) + else: + check_and_detach( + var, existing_input_var, self, no_check_var_types + ) + + # Set var as input_var + if isinstance(var, Var): + # TODO: the child op of complex op's input might get lost, as the complex op will + # be lowered. Maybe should add child op here and take care of it in lowering pass. + var.add_child_op(self) + elif isinstance(var, (tuple, list)): + for v in var: + v.add_child_op(self) + # ignore function inputs + self._input_vars[name] = var + setattr(self, name, var) + + @property + def inputs(self): + """ + Returns + ------- + - inputs: Dict[str, Union[Var, Tuple[Var]]] + """ + # Filter out InternalVar + return { + k: v + for k, v in self._input_vars.items() + if not isinstance(v, InternalVar) and v is not None + } + + @property + def outputs(self): + return self._output_vars + + @property + def op_type(self): + return type(self).__name__ + + @property + def opset_version(self): + op_variants = type(self)._op_variants + opset_versions = sorted(list(op_variants.keys())) + for i in opset_versions: + if op_variants[i] == type(self): + return i + + def remove_from_block(self): + """ + Remove / detach itself from the enclosing block. See Block.remove_ops + for details. + """ + self.enclosing_block.remove_ops([self]) + + @staticmethod + def var_to_str(v): + if isinstance(v, (tuple, list)): + return "(" + ", ".join(["%" + s.name for s in v]) + ")" + elif v.op and v.op.op_type == "const": + val = v.op.val.sym_val + if isinstance(val, (np.generic, np.ndarray)): + # for small tensors, serialize as string; skip large tensors. + if val.size <= 10: + return str(val.tolist()) + else: + # other types are small enough they can be serialized + return ( + '"' + val + '"' + if isinstance(val, str) + else str(val) + ) + + return "%" + v.name + + def indented_str(self, indent=""): + if self.op_type == "const": + return "" + s = indent + if self.outputs is not None: + s += ", ".join([str(o) for o in self.outputs]) + s += " = " + self.op_type + "(" + s += ", ".join( + [ + k + "=" + Operation.var_to_str(self.inputs[k]) + for k in self._input_types.keys() + if k in self.inputs and not is_internal_input(k) + ] + ) + s += ', name="{}")\n'.format(self.name) + for b in self.blocks: + s += b.indented_str(indent=indent + SPACES) + return s + + def __repr__(self): + return str(self) + + def __str__(self): + return self.indented_str(SPACES) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/__init__.py new file mode 100644 index 00000000..f62e29b9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import complex_dialect_ops, iOS15, iOS16 diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_op_reqs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_op_reqs.py new file mode 100644 index 00000000..e5a3b0cd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_op_reqs.py @@ -0,0 +1,8 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +from coremltools.converters.mil.mil.ops.registry import \ + SSAOpRegistry as _SSAOpRegistry + +register_op = _SSAOpRegistry.register_op diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_utils.py new file mode 100644 index 00000000..d971ffb8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_utils.py @@ -0,0 +1,548 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math +import numbers +from typing import List, Tuple + + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Var, get_new_symbol, types +from coremltools.converters.mil.mil.ops.defs.iOS15.elementwise_unary import ( + cast as cast_op_class, +) +from coremltools.converters.mil.mil.types import builtin_to_string, promote_dtypes +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + +MAX_SIZE_CONSTANT_FOLDING = 1024 * 1024 / 4 # When a fp32 const takes over 1MB, we won't create a const op for that + +def broadcast_shapes(shape_x, shape_y): + """ + Check and broadcast given input shapes. + :param shape_x: tuple of int or symbols + Shape of the first tensor (possibly symbolic). + :param shape_y: tuple of int or symbols + Shape of the second tensor (possibly symbolic). + :return: tuple of int or symbols + Result from broadcast. + """ + + def raise_incompatible_dim_exception(): + raise ValueError( + "Incompatible dim {} in shapes {} vs. {}".format( + i, shape_x, shape_y + ) + ) + + shape_x = tuple(shape_x) + shape_y = tuple(shape_y) + if len(shape_x) < len(shape_y): + shape_x = tuple([1] * (len(shape_y) - len(shape_x))) + shape_x + if len(shape_y) < len(shape_x): + shape_y = tuple([1] * (len(shape_x) - len(shape_y))) + shape_y + + ret_shapes = list() + for i in range(len(shape_x)): + if shape_x[i] == shape_y[i]: + ret_shapes.append(shape_x[i]) + else: + is_x_unknown = is_symbolic(shape_x[i]) + is_y_unknown = is_symbolic(shape_y[i]) + if shape_x[i] == 1: + ret_shapes.append(shape_y[i]) + elif shape_y[i] == 1: + ret_shapes.append(shape_x[i]) + elif not is_y_unknown and shape_y[i] > 1: + if not is_x_unknown and shape_x[i] != shape_y[i]: + raise_incompatible_dim_exception() + ret_shapes.append(shape_y[i]) + elif not is_x_unknown and shape_x[i] > 1: + if not is_y_unknown and shape_x[i] != shape_y[i]: + raise_incompatible_dim_exception() + ret_shapes.append(shape_x[i]) + elif is_x_unknown or is_y_unknown: + ret_shapes.append(get_new_symbol()) + else: + raise_incompatible_dim_exception() + + return tuple(ret_shapes) + + +def promoted_primitive_type(type1, type2): + """ + Given a pair of tensor or primitive types, find the smallest type that can store an instance + of their primitive type. + """ + ptype1 = type1.get_primitive() if types.is_tensor(type1) else type1 + ptype2 = type2.get_primitive() if types.is_tensor(type2) else type2 + return types.promote_types(ptype1, ptype2) + + +def effective_kernel(kernel_shape, dilations): + """ + + Args: + kernel_shape: tuple[int] representing the kernel shape in each + given dimension. + dilations: tuple[int] representing the dilation of the kernel + in each given dimension. Must be the same length as + kernel_shape, and is assumed to give the dimensions in + the same order as kernel_shape + + Returns: tuple[int] representing the effective shape of the kernel + in each given dimension, with each dimension in the order given, + taking into account dilation. + See http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#dilated-convolutions + Note that a dilation of 1 is equivalent to having no dilation. + + """ + if len(kernel_shape) != len(dilations): + raise ValueError( + f"kernel_shape ({len(kernel_shape)}) and dilations ({len(dilations)}) " + f"must be the same length" + ) + return [(k - 1) * d + 1 for k, d in zip(kernel_shape, dilations)] + + +def aggregated_pad( + pad_type, + kernel_shape, + input_shape=None, + strides=None, + dilations=None, + custom_pad=None, +): + """ + Args + pad_type: string. Must be one of ('same', 'same_lower', 'valid', 'custom') + + kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels) + + input_shape: [iH, iW, ...]: spatial input dims (excluding channels) + Required iff pad_type in ['same', 'same_lower'] + + strides: [sH, sW, ...]: spatial strides (excluding channels) + Required iff pad_type in ['same', 'same_lower'] + + dilations: [dH, dW, ...]: dilations (excluding channels) + If not provided, defaults to [1, 1, ...], effectively no dilation. + + custom_pad: Required iff pad_type == 'custom'. + custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding + for spatial dim i. + + + Returns: + A list of total (before + after) padding for each spatial dimension in kernel_shape. + """ + num_spatial_dims = len(kernel_shape) + if dilations is None: + dilations = [1] * num_spatial_dims + elif len(dilations) != num_spatial_dims: + raise ValueError( + f"dilations must have same length as kernel_shape " + f"({num_spatial_dims}, but got {len(dilations)})" + ) + if pad_type in ["same", "same_lower"]: + if input_shape is None or len(input_shape) != num_spatial_dims: + raise ValueError( + "For SAME padding input_shape must not be None and must have " + "same length as kernel_shape ({}, but got {})".format( + num_spatial_dims, + len(input_shape) if input_shape is not None else "None", + ) + ) + if strides is None or len(strides) != num_spatial_dims: + raise ValueError( + "For SAME padding strides must not be None and must have " + "same length as kernel_shape ({}, but got {})".format( + num_spatial_dims, len(strides) if strides is not None else "None" + ) + ) + effective_ks = effective_kernel(kernel_shape, dilations) + return [ + int(max(0, s * math.ceil(float(i) / float(s)) - i + k - s)) + if not is_symbolic(i) else get_new_symbol() + for i, k, s in zip(input_shape, effective_ks, strides) + ] + if pad_type == "valid": + return [0] * num_spatial_dims + if pad_type == "custom": + if custom_pad is None or len(custom_pad) != 2 * num_spatial_dims: + raise ValueError("Invalid custom_pad.") + return [ + custom_pad[2 * d] + custom_pad[2 * d + 1] for d in range(num_spatial_dims) + ] + raise ValueError('Invalid padding pad_type "{}"'.format(pad_type)) + + +def spatial_dimensions_out_shape( + pad_type, input_shape, kernel_shape, strides, dilations=None, custom_pad=None, ceil_mode=False, +): + """ + Args + pad_type: string. Must be one of ('same', 'same_lower', 'valid', 'custom') + + input_shape: [iH, iW, ...]: spatial input dims (excluding channels) + Required iff pad_type in ['same', 'same_lower'] + + kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels) + + strides: [sH, sW, ...]: spatial strides (excluding channels) + Required iff pad_type in ['same', 'same_lower'] + + dilations: [dH, dW, ...]: dilations (excluding channels) + If not provided, defaults to [1, 1, ...], effectively no dilation. + + custom_pad: Required iff pad_type == 'custom'. + custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding + for spatial dim i. + + ceil_mode: determines the padding and output shape. + When ceil mode is True: + out_dim = floor((in_dim + pad_l + pad_r - kernel_size + (stride-1)) / stride) + 1 + if (out_dim-1) * stride >= in_dim + pad_l and (pad_l > 0 or pad_r > 0): + out_dim = out_dim - 1 + When ceil mode is False: + out_dim = floor((in_dim + pad_l + pad_r - kernel_size) / stride) + 1 + + Returns: + A list of spatial output sizes for each spatial dimension of kernel_shape. + + """ + num_spatial_dims = len(kernel_shape) + if dilations is None: + dilations = [1] * num_spatial_dims + if custom_pad is None: + custom_pad = [0] * num_spatial_dims * 2 + if not ( + len(input_shape) + == len(kernel_shape) + == len(strides) + == len(dilations) + == len(custom_pad) / 2 + ): + raise ValueError( + f"input_shape (length {len(input_shape)}), " + f"kernel_shape (length {len(kernel_shape)}), " + f"strides (length {len(strides)}), " + f"dilations (length {len(dilations)}), " + f"and custom_pad (length {len(custom_pad)}) divided by two " + "must all be the same length" + ) + + pad = aggregated_pad( + pad_type=pad_type, + kernel_shape=kernel_shape, + input_shape=input_shape, + strides=strides, + dilations=dilations, + custom_pad=custom_pad, + ) + effective_ks = effective_kernel(kernel_shape, dilations) + out_shape = [] + for r in range(num_spatial_dims): + # only check if `input_shape` (spatial part of the input image) is symbolic, because: + # * `input_shape` can be symbolic + # * `pad` (aggregated from `input_shape` + ...) is symbolic only if `input_shape` is symbolic + # * `effective_ks` (effective kernel size, determined from kernel size + dilations) cannot be symbolic + # * strides cannot be symbolic + if is_symbolic(input_shape[r]): + out_shape.append(get_new_symbol()) + else: + out_dim = 0 + if not ceil_mode: + out_dim = math.floor((input_shape[r] + pad[r] - effective_ks[r]) / strides[r] + 1) + else: + out_dim = math.floor((input_shape[r] + pad[r] - effective_ks[r] + strides[r] - 1) / strides[r] + 1) + if (out_dim - 1) * strides[r] >= input_shape[r] + pad[r]/2 and pad[r] > 0: + out_dim = out_dim - 1 + if out_dim <= 0: + raise ValueError(f"spatial dimension {r} has invalid output size {out_dim}") + out_shape.append(out_dim) + return out_shape + + +def parse_einsum_equation(equation: str) -> Tuple[List]: + """ + Args + equation : str + + parse the equation in the following manner: + (running example: "nchw,nwhr->nchr") + + step 1: split the equation with delimiter "->" + e.g.: this will give "nchw,nwhr" and "nchr" + + step 2: split the LHS equation string with delimiter "," + e.g.: this will give input1 : "nchw", input2: "nwhr" + + step 3: map each character to a unique integer, which is incremented. + Iterate over input1, input2 and output, in that order. + e.g.: input 1, i.e., "nchw" will give vector {0,1,2,3} + input 2, i.e, "nwhr" will produce {0,3,2,4} + output , i.e. "nchr" will produce {0,1,2,4} + + return vectors corresponding to the 2 inputs and the output + """ + input_output_str = equation.split('->') + assert len(input_output_str) == 2, "unsupported einsum equation {}".format(equation) + input_str = input_output_str[0] + output_str = input_output_str[1] + + inputs = input_str.split(',') + assert len(inputs) == 2, "unsupported einsum equation {}".format(equation) + input1_str = inputs[0] + input2_str = inputs[1] + + input1_vec = [-1 for i in range(len(input1_str))] + input2_vec = [-1 for i in range(len(input2_str))] + output_vec = [-1 for i in range(len(output_str))] + map_char_to_int = {} + + def _update_vec(str, vec, map_char_to_int, index): + for i, s in enumerate(str): + if s not in map_char_to_int: + map_char_to_int[s] = index + index += 1 + vec[i] = map_char_to_int[s] + return index + + index = _update_vec(input1_str, input1_vec, map_char_to_int, 0) + index = _update_vec(input2_str, input2_vec, map_char_to_int, index) + _update_vec(output_str, output_vec, map_char_to_int, index) + + return input1_vec, input2_vec, output_vec + +def compute_gather(params, indices, axis, batch_dims): + """ + This utility function computes the gather operation with batch_dims supported. + """ + def compute_gather_helper(params, indices, axis): + scalar_indices = isinstance(indices, numbers.Integral) + if scalar_indices: + res = np.take(params, [indices], axis) + res2 = np.squeeze(res, axis=axis) + if isinstance(res2, np.ndarray) and len(res2.shape) == 0: + # The `res2` is a numpy 0-d array (after doing np.squeeze on a 1-d array). + # For 0-d array in numpy, we need to extract the scalar value by first converting + # it back to 1-d array. + # Notice that .item() doesn't work because it returns a built-in type instead of + # np.generic type, which will fail the downstream var value setter. + return np.atleast_1d(res2)[0] + return res2 + return np.take(params, indices, axis) + + if batch_dims == 0: + return compute_gather_helper(params, indices, axis) + + params_shape = params.shape + indices_shape = indices.shape + batch_shape = params_shape[:batch_dims] + + params_new_shape = [np.prod(batch_shape)] + list(params_shape[batch_dims:]) + indices_new_shape = [np.prod(batch_shape)] + list(indices_shape[batch_dims:]) + params_reshape = np.reshape(params, params_new_shape) + indices_reshape = np.reshape(indices, indices_new_shape) + + res = [] + for p, i in zip(params_reshape, indices_reshape): + res.append(compute_gather_helper(p, i, axis - batch_dims)) + res = np.stack(res) + res_new_shape = tuple(batch_shape) + tuple(res.shape[1:]) + return np.reshape(res, res_new_shape) + +def promote_input_dtypes(input_vars): + """ + This utility function promotes all input variables to the same data type. + It is used to homogenize inputs to an op such as matmul / elementwise_binary, + and not the inputs to a function itself. + """ + def _is_same_dtype(dtype1, dtype2): + return builtin_to_string(dtype1) == builtin_to_string(dtype2) + + def _promoted_var(var, promoted_dtype): + if var.val is None: + x = mb.cast( + x=var, dtype=builtin_to_string(promoted_dtype), name=var.name + "_promoted") + else: + const_value_after_cast = cast_op_class.get_cast_value(var, builtin_to_string(promoted_dtype)) + x = mb.const(val=const_value_after_cast, name=var.name + "_promoted") + return x + + for i, var in enumerate(input_vars): + if not isinstance(var, Var): + input_vars[i] = mb.const(val=var) + + promoted_dtype = promote_dtypes([var.dtype for var in input_vars]) + + for i, var in enumerate(input_vars): + if not _is_same_dtype(var.dtype, promoted_dtype): + input_vars[i] = _promoted_var(var, promoted_dtype) + + return input_vars + + +def solve_slice_by_index_shape(x_shape, begin, end, stride, begin_mask, end_mask, squeeze_mask): + """ + Helper function to solve the shape of tensor slicing. + """ + ret_shape = [] + + if begin is None or len(begin) == 0: + begin = [None] * len(x_shape) + if end is None or len(end) == 0: + end = [None] * len(x_shape) + + if len(begin) != len(x_shape): + raise TypeError( + "slice_by_index op: size of 'begin', {}, is not equal to the rank of input, which is {}".format( + len(begin), len(x_shape) + ) + ) + if len(end) != len(x_shape): + raise TypeError( + "slice_by_index op: size of 'end', {}, is not equal to the rank of input, which is {}".format( + len(end), len(x_shape) + ) + ) + + # solve for shape inference + for idx in range(len(x_shape)): + # skip if we want to squeeze the dimension + if squeeze_mask[idx]: + continue + + # for those a[:] cases + if begin_mask[idx] and end_mask[idx]: + if is_symbolic(x_shape[idx]): + if stride[idx] == -1 or stride[idx] == 1: + ret_shape.append(x_shape[idx]) + else: + ret_shape.append(get_new_symbol()) + else: + num = np.ceil(float(x_shape[idx]) / abs(stride[idx])).astype( + np.int32 + ) + ret_shape.append(num) + continue + + """ + We first deal with those cases, where the output size is a deterministic number, even if the input dimension + is unknown (i.e. symbolic) + """ + if ( + not begin_mask[idx] + and not end_mask[idx] + and begin[idx] is not None + and end[idx] is not None + ): + # in this case the slice is from "begin" to "end", where both these boundary points are known + # we can find the size of the slice in this case, unless one of them is positive and other is negative + # as in that case, we would need to know the size of the full input dimension + if begin[idx] >= 0 and end[idx] >= 0 and stride[idx] > 0: + if end[idx] < begin[idx]: + raise ValueError( + "slice_by_index op: unsupported values in for dimension {}, " + "(begin, end, stride) : ({}, {}, {})".format( + idx, begin[idx], end[idx], stride[idx] + ) + ) + ret_shape.append( + np.arange(end[idx] - begin[idx])[ + slice(0, end[idx] - begin[idx], stride[idx]) + ].size + ) + continue + if begin[idx] < 0 and end[idx] < 0 and stride[idx] < 0: + if begin[idx] < end[idx]: + raise ValueError( + "slice_by_index op: unsupported values in for dimension {}, " + "(begin, end, stride) : ({}, {}, {})".format( + idx, begin[idx], end[idx], stride[idx] + ) + ) + ret_shape.append( + np.arange(begin[idx] - end[idx])[ + slice(-1, end[idx] - begin[idx] - 1, stride[idx]) + ].size + ) + continue + + if begin_mask[idx] and not end_mask[idx] and end[idx] is not None: + # in this case we know that the slice is [0, end] or [-1, end], depending on the sign of stride, + # and the value of end is known + if end[idx] > 0 and stride[idx] > 0: + ret_shape.append( + np.arange(end[idx])[slice(None, end[idx], stride[idx])].size + ) + continue + if end[idx] < 0 and stride[idx] < 0: + ret_shape.append( + np.arange(abs(end[idx]))[slice(None, end[idx], stride[idx])].size + ) + continue + + if not begin_mask[idx] and end_mask[idx] and begin[idx] is not None: + # in this case we know the value of begin, and since end_mask is True, we know that the slice + # is till the right most edge + if begin[idx] > 0 and stride[idx] < 0: + ret_shape.append( + np.arange(begin[idx] + 1)[slice(begin[idx], None, stride[idx])].size + ) + continue + if begin[idx] < 0 and stride[idx] > 0: + ret_shape.append( + np.arange(abs(begin[idx]))[ + slice(begin[idx], None, stride[idx]) + ].size + ) + continue + + # for symbolic case + if is_symbolic(x_shape[idx]): + ret_shape.append(get_new_symbol()) + continue + + # for single-element extraction case + if x_shape[idx] == 1: + ret_shape.append(1) + continue + + # when begin and end are not determined + if begin[idx] is None and not begin_mask[idx]: + ret_shape.append(get_new_symbol()) + continue + if end[idx] is None and not end_mask[idx]: + ret_shape.append(get_new_symbol()) + continue + + # parse negative dimension + if begin[idx] is not None and begin[idx] < 0: + begin[idx] = max(0, begin[idx] + x_shape[idx]) + if end[idx] is not None and end[idx] < 0: + end[idx] = max(0, end[idx] + x_shape[idx]) + + # compute shape + low, high = [0, x_shape[idx]] if stride[idx] > 0 else [-1, x_shape[idx] - 1] + begin_idx, end_idx = ( + [begin[idx], end[idx]] if stride[idx] > 0 else [end[idx], begin[idx]] + ) + is_begin_mask, is_end_mask = ( + [begin_mask[idx], end_mask[idx]] + if stride[idx] > 0 + else [end_mask[idx], begin_mask[idx]] + ) + if is_begin_mask: + begin_idx = low + end_idx = high if is_end_mask else min(end_idx, high) + num = np.ceil(float(end_idx - begin_idx) / abs(stride[idx])).astype( + np.int32 + ) + ret_shape.append(max(0, num)) + + return ret_shape diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/complex_dialect_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/complex_dialect_ops.py new file mode 100644 index 00000000..2a12f029 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/complex_dialect_ops.py @@ -0,0 +1,744 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +This file contains the dialect ops for handling complex numbers. + +For example, torch.fft.fft accepts complex input and produces complex outputs, which is not +supported by CoreML. However, we can break the calculation into the real part and imaginary part +to work around the restriction. +The dialect op provided by this file could be used by any frontend (PyTorch, Tensorflow, etc). +For example, during torch frontend translation, the torch's fft_fft op could be translated to + def fft_fft(context, nodes): + input_data, n, dim, norm = _get_inputs(context, node, expected=[4]) + fft_res = mb.complex_fft(data=input_data, n=n, dim=dim, norm=norm) + context.add(fft_res, node.name) +and then the fft dialect op will be lowered into core ops by calculating the real and imaginary +part separately. + +There are mainly three types of complex dialect ops: +- Ops where real and imag data has interactions (such as fft). +- Ops where real and imag data go through the non-complex version op separately (such as add). +- Ops where only one of the real/imag data go through the non-complex version (such as shape). + +All dialect ops in this file will be lowered into core ops by `lower_complex_dialect_ops` pass. +For adding a new dialect op, see steps in the file docstring of `lower_complex_dialect_ops.py`. +Notice that all dialect op has `complex_` as prefix, because it's required by setting the +`namespace="complex"` in `register_op`. +""" + +from typing import Optional, Tuple + +import numpy as np + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import ( + DefaultInputs, + InputSpec, + TensorInputType, +) +from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry +from coremltools.converters.mil.mil.types.symbolic import any_symbolic, is_symbolic +from coremltools.converters.mil.mil.types.type_mapping import ( + infer_complex_dtype, + infer_fp_dtype_from_complex, +) +from coremltools.converters.mil.mil.var import ComplexVar, Var + +register_op = SSAOpRegistry.register_op + +_FFT_VALID_NORMS = {"forward", "backward", "ortho"} + + +def fft_canonicalize_length_dim( + input_data: Var, length: Optional[Var], dim: Optional[Var], c2r: bool = False +) -> Tuple[int, int]: + """ + Canonicalize shape and dim for 1-D FFT (based on PyTorch's fft documentation): + - length: Signal length. If given, the input will either be zero-padded or trimmed to this + length before computing the FFT. + - dim: The dimension along which to take the one dimensional FFT. + - c2r: Use for "complex to real", such as irfft, which takes complex and output real data. + """ + shapes, dims = fft_canonicalize_shapes_dims(input_data, length, dim, c2r) + return shapes[0], dims[0] + + +def fft_canonicalize_shapes_dims( + input_data: Var, shapes: Optional[Var], dims: Optional[Var], c2r: bool = False +) -> Tuple[Tuple[int], Tuple[int]]: + """ + Canonicalize shapes and dims for N-D FFT (based on PyTorch's fftn documentation): + - shapes: Signal size in the transformed dimensions. If given, each dimension dims[i] will + either be zero-padded or trimmed to the length s[i] before computing the FFT. If a + length -1 is specified, no padding is done in that dimension. + Default: s = [input.size(d) for d in dims] + - dims: Dimensions to be transformed. Default: all dimensions, or the last len(s) dimensions if + s is given. + - c2r: Use for "complex to real", such as irfftn, which takes complex and output real data. + """ + if shapes is not None: + shapes = shapes.val + if isinstance(shapes, np.integer): + shapes = (shapes,) + if dims is not None: + dims = dims.val + if isinstance(dims, np.integer): + dims = (dims,) + + # Input validation. + input_rank = input_data.rank + if dims is not None: + for dim in dims: + if dim < -input_rank or dim >= input_rank: + raise ValueError(f"Invalid dim {dim} in `dims`.") + if shapes is not None: + for shape in shapes: + if shape <= 0: + raise ValueError(f"Invalid shape {shape} in `shapes`.") + + # Determine if the last dim specified in dims need to be expanded. For IRFFTN, the input is + # interpreted as a one-sided Hermitian signal in the Fourier domain, as produced by rfftn(), so + # we need to expand the dim back to the full matrix (with conjugate part not pruned). + last_dim_expand: bool = shapes is None and c2r + + if shapes is not None: + if dims is None: + # Has shape, no dim. + # Default is last len(s) dimensions. + dims = tuple(range(input_rank - len(shapes), input_rank)) + else: + # Has shape, has dim. + if len(shapes) != len(dims): + raise ValueError( + "shapes and dims must have the same number of elements." + ) + shapes = tuple( + shape if shape != -1 else input_data.shape[dim] + for (shape, dim) in zip(shapes, dims) + ) + elif dims is None: + # No shape, no dim. + dims = tuple(range(input_rank)) + shapes = tuple(input_data.shape) + else: + # No shape, has dim. + shapes = tuple(input_data.shape[dim] for dim in dims) + + # In RFFTN, the output is trimmed (because FFT of real-value input is Hermitian-symmetric, the + # conjugate part is removed) to ``original_dim // 2 + 1``, so here we do the reverse + # ``2 * (trimmed_dim - 1)`` to restore the original shape. + if last_dim_expand: + target_last_dim_shape = 2 * (input_data.shape[dims[-1]] - 1) + shapes = shapes[:-1] + (target_last_dim_shape,) + + if len(shapes) != len(dims): + raise ValueError( + f"shape ({len(shapes)}) and dim ({len(dims)}) should have same number of elements." + ) + + return shapes, dims + + +@register_op(namespace="complex") +class complex(Operation): + """ + Dialect op for constructing a complex data from real and imaginary data. + """ + + input_spec = InputSpec( + real_data=TensorInputType(type_domain="T"), + imag_data=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp32,), + } + + def type_inference(self): + if self.real_data.shape != self.imag_data.shape: + raise ValueError( + f"The shape of real_data ({self.real_data.shape}) and imag_data " + f"({self.imag_data.shape}) must match to construct complex data." + ) + return types.tensor( + infer_complex_dtype(self.real_data.dtype, self.imag_data.dtype), + self.real_data.shape, + ) + + +@register_op(namespace="complex") +class complex_real(Operation): + """Dialect op for extracting real part of complex data.""" + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.complex64,), + } + + def type_inference(self): + return types.tensor( + infer_fp_dtype_from_complex(self.data.dtype), self.data.shape + ) + + +@register_op(namespace="complex") +class complex_imag(Operation): + """Dialect op for extracting imaginary part of complex data.""" + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.complex64,), + } + + def type_inference(self): + return types.tensor( + infer_fp_dtype_from_complex(self.data.dtype), self.data.shape + ) + + +@register_op(namespace="complex") +class complex_fft(Operation): + """ + Dialect op for 1-D FFT. As PyTorch's FFT API has a much more fine-grained control than + TensorFlow's, the parameters of this dialect op mainly follows `torch.fft.fft`. + + Parameters + ---------- + data: tensor<\*D, T> (Required) + * The input tensor. + n: const i32 (Optional. Default=None) + * Signal length. If given, the input will either be zero-padded or trimmed to this length + before computing the FFT. + dim: const i32 (Optional. Default=``-1``) + * The dimension along which to take the one dimensional FFT. + norm: const str (Optional. Default=``backward``) + * Normalization mode. For the forward transform (fft()), these correspond to: + * "forward" - normalize by 1/n + * "backward" - no normalization + * "ortho" - normalize by 1/sqrt(n) (making the FFT orthonormal) + * Calling the backward transform (ifft()) with the same normalization mode will apply an + overall normalization of 1/n between the two transforms. This is required to make ifft() + the exact inverse. + * Default is "backward" (no normalization). + + Returns + ------- + tensor<\*V, complex64> + * A complex tensor where real and imag parts have the same shape. + * If ``n`` is None, real's and imag's shapes are same as the input. + * If ``n`` is specified, shape is ``V[dim]=n``. + + Attributes + ---------- + T: fp32, complex64 + + References + ---------- + See `torch.fft.fft `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + n=TensorInputType(const=True, optional=True, type_domain=types.int32), + dim=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp32, types.complex64), + } + + def default_inputs(self): + return DefaultInputs( + n=None, + dim=-1, + norm="backward", + ) + + def type_inference(self): + if self.norm.val not in _FFT_VALID_NORMS: + raise ValueError( + f"Invalid norm param. Valid options are {_FFT_VALID_NORMS}" + ) + output_type = ( + self.data.dtype if types.is_complex(self.data.dtype) else types.complex64 + ) + # The shape of FFT output is determined by `n` and `dim`. + output_shape = list(self.data.shape) + n, dim = fft_canonicalize_length_dim(self.data, self.n, self.dim) + output_shape[dim] = n + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_fftn(Operation): + """ + Dialect op for N-D FFT. As PyTorch's FFT API has a much more fine-grained control than + TensorFlow's, the parameters of this dialect op mainly follows `torch.fft.fftn`. + + Parameters + ---------- + data: tensor<\*D, T> (Required) + * The input tensor. + shapes: const tensor (Optional. Default=None) + * Signal size in the transformed dimensions. If given, each dimension ``dims[i]`` will + either be zero-padded or trimmed to the length ``shapes[i]`` before computing the FFT. If + a length ``-1`` is specified, no padding is done in that dimension. If not specified, it's + equivalent to ``shapes = [data.size(dim) for dim in dims]``. + dims: const tensor (Optional. Default=None) + * Dimensions to be transformed. If not specified, it's equivalent to all dimensions, or the + last ``len(shapes)`` dimensions if ``shapes`` is given. + norm: const str (Optional. Default=``backward``) + * Normalization mode. For the forward transform (fftn()), these correspond to: + * "forward" - normalize by 1/n + * "backward" - no normalization + * "ortho" - normalize by 1/sqrt(n) (making the FFT orthonormal) + where ``n = prod(shapes)`` is the logical FFT size. Calling the backward transform + (ifftn()) with the same normalization mode will apply an overall normalization of 1/n + between the two transforms. This is required to make ifftn() the exact inverse. + * Default is "backward" (no normalization). + + Returns + ------- + tensor<\*V, complex64> + * A complex tensor where real and imag parts have the same shape. + * If ``shapes`` and ``dims`` are both None, real's and imag's shapes are same as the input. + * If ``shapes`` or ``dims`` is specified, shape is ``V[dim]=shapes[dim] for dim in dims``. + + Attributes + ---------- + T: fp32, complex64 + + References + ---------- + See `torch.fft.fftn `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + shapes=TensorInputType(const=True, optional=True, type_domain=types.int32), + dims=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp32, types.complex64), + } + + def default_inputs(self): + return DefaultInputs( + shapes=None, + dims=None, + norm="backward", + ) + + def type_inference(self): + if self.norm.val not in _FFT_VALID_NORMS: + raise ValueError( + f"Invalid norm param. Valid options are {_FFT_VALID_NORMS}" + ) + output_type = ( + self.data.dtype if types.is_complex(self.data.dtype) else types.complex64 + ) + # The shape of FFT output is determined by `shapes` and `dims`. + shapes, dims = fft_canonicalize_shapes_dims(self.data, self.shapes, self.dims) + output_shape = list(self.data.shape) + for shape, dim in zip(shapes, dims): + output_shape[dim] = shape + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_rfft(Operation): + """ + Dialect op for 1-D RFFT. It's similar to 1-D FFT, but the input is real number. The FFT of a + real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])``, so the output contains only the + positive frequencies below the Nyquist frequency. To compute the full output, use FFT. + + Parameters + ---------- + See the ``complex_fft`` op. + + Returns + ------- + tensor<\*V, complex64> + * Based on the output of FFT, further remove the redundant conjugate part, which means + ``V[dim] = V[dim] // 2 + 1``. + + Attributes + ---------- + T: fp32 + + References + ---------- + See `torch.fft.rfft `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + n=TensorInputType(const=True, optional=True, type_domain=types.int32), + dim=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp32,), + } + + def default_inputs(self): + return DefaultInputs( + n=None, + dim=-1, + norm="backward", + ) + + def type_inference(self): + if types.is_complex(self.data.dtype): + raise ValueError( + "RFFT requires real-value input. For complex input, please use FFT." + ) + output_type = infer_complex_dtype(self.data.dtype, self.data.dtype) + output_shape = list(self.data.shape) + n, dim = fft_canonicalize_length_dim(self.data, self.n, self.dim) + output_shape[dim] = n + # The shape of RFFT output is FFT after removing redundant conjugate part. + output_shape[self.dim.val] = output_shape[self.dim.val] // 2 + 1 + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_rfftn(Operation): + """ + Dialect op for N-D RFFT (rfftn). The FFT of a real signal is Hermitian-symmetric, + X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n]) so the full ``complex_fftn`` output contains + redundant information. ``complex_rfftn`` omits the negative frequencies in the last dimension. + + Parameters + ---------- + See the ``complex_fftn`` op. + + Returns + ------- + tensor<\*V, complex64> + * Based on the output of N-D FFT, further remove the redundant conjugate part in last dim, + which means ``V[dims[-1]] = V[dims[-1]] // 2 + 1``. + + Attributes + ---------- + T: fp32 + + References + ---------- + See `torch.fft.rfftn `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + shapes=TensorInputType(const=True, optional=True, type_domain=types.int32), + dims=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp32,), + } + + def default_inputs(self): + return DefaultInputs( + shapes=None, + dims=None, + norm="backward", + ) + + def type_inference(self): + output_type = infer_complex_dtype(self.data.dtype, self.data.dtype) + output_shape = list(self.data.shape) + shapes, dims = fft_canonicalize_shapes_dims(self.data, self.shapes, self.dims) + for shape, dim in zip(shapes, dims): + output_shape[dim] = shape + # The last dim's shape is after removing the redundant conjugate part. + output_shape[dims[-1]] = output_shape[dims[-1]] // 2 + 1 + + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_ifft(Operation): + """ + Dialect op for IFFT. Computes the one dimensional inverse discrete Fourier transform of input. + + Parameters + ---------- + All parameters except ``norm`` are same as the ``complex_fft`` op. + norm: const str (Optional. Default=``backward``) + * Normalization mode. For the backward transform (ifft()), these correspond to: + * "forward" - no normalization + * "backward" - normalize by 1/n + * "ortho" - normalize by 1/sqrt(n) (making the IFFT orthonormal) + * Calling the forward transform (fft()) with the same normalization mode will apply an + overall normalization of 1/n between the two transforms. This is required to make ifft() + the exact inverse. + * Default is "backward" (normalize by 1/n). + + Returns + ------- + tensor<\*V, T> + * A complex tensor where real and imag parts have the same shape. The shape is the same as + the input except for the ``dim``: + * If ``n`` is None, the shape is same as the input. + * If ``n`` is specified, the shape at the `dim` is ``V[dim]=n``. + + Attributes + ---------- + T: complex64 + + References + ---------- + See `torch.fft.ifft `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + n=TensorInputType(const=True, optional=True, type_domain=types.int32), + dim=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.complex64,), + } + + def default_inputs(self): + return DefaultInputs( + n=None, + dim=-1, + norm="backward", + ) + + def type_inference(self): + output_type = self.data.dtype + output_shape = list(self.data.shape) + n, dim = fft_canonicalize_length_dim(self.data, self.n, self.dim) + output_shape[dim] = n + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_ifftn(Operation): + """ + Dialect op for N-D IFFT (ifftn). + + Parameters + ---------- + All parameters except ``norm`` are same as the ``complex_fftn`` op. + norm: const str (Optional. Default=``backward``) + * Normalization mode. For the backward transform (ifftn()), these correspond to: + * "forward" - no normalization + * "backward" - normalize by 1/n + * "ortho" - normalize by 1/sqrt(n) (making the IFFT orthonormal) + where n = prod(s) is the logical IFFT size. Calling the forward transform (fftn()) with + the same normalization mode will apply an overall normalization of 1/n between the two + transforms. This is required to make ifftn() the exact inverse. + * Default is "backward" (normalize by 1/n). + + Returns + ------- + tensor<\*V, T> + * A complex tensor where real and imag parts have the same shape. The shape is the same as + the input except for the ``dim`` in ``dims``: + * If ``shapes`` and ``dims`` are both None, the shape is same as the input. + * If ``shapes`` or ``dims`` is specified, shape at ``dim`` is ``shapes[dim]``. + + Attributes + ---------- + T: complex64 + + References + ---------- + See `torch.fft.ifftn `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + shapes=TensorInputType(const=True, optional=True, type_domain=types.int32), + dims=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.complex64,), + } + + def default_inputs(self): + return DefaultInputs( + shapes=None, + dims=None, + norm="backward", + ) + + def type_inference(self): + output_type = self.data.dtype + output_shape = list(self.data.shape) + shapes, dims = fft_canonicalize_shapes_dims(self.data, self.shapes, self.dims) + for shape, dim in zip(shapes, dims): + output_shape[dim] = shape + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_irfft(Operation): + """ + Dialect op for IRFFT. Computes the inverse of RFFT. The input is interpreted as a one-sided + Hermitian signal in the Fourier domain, as produced by rfft(). By the Hermitian property, the + output will be real-valued. + + Parameters + ---------- + See the ``complex_ifft`` op for details. + + Returns + ------- + tensor<\*V, fp32> + * The shape is the same as the input except for the ``dim``: + * If ``n`` is None, the shape at the `dim` is ``V[dim] = 2 * (D[dim] - 1)``. + * If ``n`` is specified, the shape at the `dim` is ``V[dim]=n``. + + Attributes + ---------- + T: complex64 + + References + ---------- + See `torch.fft.irfft `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + n=TensorInputType(const=True, optional=True, type_domain=types.int32), + dim=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.complex64,), + } + + def default_inputs(self): + return DefaultInputs( + n=None, + dim=-1, + norm="backward", + ) + + def type_inference(self): + output_type = infer_fp_dtype_from_complex(self.data.dtype) + output_shape = list(self.data.shape) + n, dim = fft_canonicalize_length_dim(self.data, self.n, self.dim, c2r=True) + output_shape[dim] = n + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_irfftn(Operation): + """ + Dialect op for N-D IRFFT (irfftn). + + Parameters + ---------- + See the ``complex_ifftn`` op for details. + + Returns + ------- + tensor<\*V, fp32> + * The shape is the same as the input except for: + * If ``shapes`` and ``dims`` are both None, shape at the last dim ``V[-1]`` is + ``2 * (D[-1] - 1)``. + * If ``shapes`` or ``dims`` is specified, shape at ``dim`` is ``shapes[dim]``. + + Attributes + ---------- + T: complex64 + + References + ---------- + See `torch.fft.irfftn `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + shapes=TensorInputType(const=True, optional=True, type_domain=types.int32), + dims=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.complex64,), + } + + def default_inputs(self): + return DefaultInputs( + shapes=None, + dims=None, + norm="backward", + ) + + def type_inference(self): + output_type = infer_fp_dtype_from_complex(self.data.dtype) + output_shape = list(self.data.shape) + shapes, dims = fft_canonicalize_shapes_dims( + self.data, self.shapes, self.dims, c2r=True + ) + for shape, dim in zip(shapes, dims): + output_shape[dim] = shape + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_shape(Operation): + """ + Returns a 1-dimensional tensor with the shape of the input complex tensor. + + Parameters + ---------- + x: tensor<[*?], T> (Required) + * Input tensor. + + Returns + ------- + tensor + * Shape of the input tensor. + * ``K = x.real.rank``. + + Attributes + ---------- + T: complex64 + """ + + input_spec = InputSpec(x=TensorInputType(type_domain="T")) + + type_domains = { + "T": (types.complex64,), + } + + def type_inference(self): + if not isinstance(self.x, ComplexVar): + raise ValueError("x must be a ComplexVar.") + input_rank = self.x.real.rank + return types.tensor(types.int32, tuple([input_rank])) + + def value_inference(self): + if any_symbolic(self.x.real.shape): + # convert elements in shape to int32 + res = [x if is_symbolic(x) else np.int32(x) for x in self.x.real.shape] + return np.array(res) + else: + return np.array(self.x.real.shape).astype(np.int32) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/__init__.py new file mode 100644 index 00000000..9e7f6b89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/__init__.py @@ -0,0 +1,52 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target + +_IOS15_TARGET = target.iOS15 + +from .activation import (clamped_relu, elu, gelu, leaky_relu, + linear_activation, prelu, relu, relu6, scaled_tanh, + sigmoid, sigmoid_hard, silu, softmax, softplus, + softplus_parametric, softsign, thresholded_relu) +from .classify import classify +from .control_flow import (cond, const, list_gather, list_length, list_read, + list_scatter, list_write, make_list, select, + while_loop) +from .conv import conv, conv_quantized, conv_transpose +from .elementwise_binary import (add, elementwise_binary, equal, floor_div, + greater, greater_equal, less, less_equal, + logical_and, logical_or, logical_xor, maximum, + minimum, mod, mul, not_equal, pow, real_div, + sub) +from .elementwise_unary import (abs, acos, asin, atan, atanh, cast, ceil, clip, + cos, cosh, erf, exp, exp2, floor, inverse, log, + logical_not, round, rsqrt, sign, sin, sinh, + sqrt, square, tan, tanh, threshold) +from .image_resizing import (affine, crop, crop_resize, resample, + resize_bilinear, resize_nearest_neighbor, + upsample_bilinear, upsample_nearest_neighbor) +from .linear import einsum, linear, matmul +from .normalization import (batch_norm, instance_norm, l2_norm, layer_norm, + local_response_norm) +from .pool import avg_pool, l2_pool, max_pool +from .random import (random_bernoulli, random_categorical, random_normal, + random_uniform) +from .recurrent import gru, lstm, rnn +from .reduction import (reduce_argmax, reduce_argmin, reduce_l1_norm, + reduce_l2_norm, reduce_log_sum, reduce_log_sum_exp, + reduce_max, reduce_mean, reduce_min, reduce_prod, + reduce_sum, reduce_sum_square) +from .scatter_gather import (gather, gather_along_axis, gather_nd, scatter, + scatter_along_axis, scatter_nd) +from .tensor_operation import (argsort, band_part, concat, cumsum, fill, + flatten2d, identity, non_maximum_suppression, + non_zero, one_hot, pad, range_1d, shape, split, + stack, tile, topk) +from .tensor_transformation import (depth_to_space, expand_dims, pixel_shuffle, + reshape, reverse, reverse_sequence, + slice_by_index, slice_by_size, + sliding_windows, space_to_batch, + space_to_depth, squeeze, transpose) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/activation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/activation.py new file mode 100644 index 00000000..0df819f4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/activation.py @@ -0,0 +1,616 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import (VALUE, Operation, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op + +from .elementwise_unary import elementwise_unary + + +class activation_with_alpha(Operation): + """ + Activation with Alpha Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + alpha=TensorInputType(const=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + return self.x.sym_type + + +class activation_with_alpha_and_beta(Operation): + """ + Activation with Alpha Beta Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + alpha=TensorInputType(const=True, type_domain="T"), + beta=TensorInputType(const=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + return self.x.sym_type + + +@register_op +class clamped_relu(activation_with_alpha_and_beta): + """ + If ``x >= 0`` return elementwise ``min(beta, x)``, otherwise return + ``min(beta, alpha * x)``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + alpha: const T (Required) + beta: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same type and shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + x = np.minimum(np.maximum(self.x.val, 0), self.beta.val) + y = np.minimum(np.minimum(self.x.val, 0) * self.alpha.val, self.beta.val) + return x + y + + +@register_op +class elu(activation_with_alpha): + """ + If ``x > 0`` return elementwise ``x``, otherwise return ``alpha * (e^x - 1)``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + alpha: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + b = np.copy(self.x.val) + b[b < 0] = self.alpha.val * (np.exp(b[b < 0]) - 1) + return b + + +@register_op +class gelu(Operation): + """ + Return the elementwise Gaussian error linear unit activation function for ``x``. + + You can use ``EXACT``, ``TANH_APPROXIMATION``, or ``SIGMOID_APPROXIMATION`` values + based on the following formulas: + + * ``EXACT``: + + .. math:: + f(x) = 0.5x\\left ( 1+\\rm{erf}\\left ( \\frac{x}{\\sqrt{2}} \\right ) \\right ) + + * ``TANH_APPROXIMATION``: + + .. math:: + f(x) = 0.5x\\left ( 1+\\rm{tanh}\\left ( \\sqrt{2/\\pi}\\left ( x + 0.044715x^3 \\right ) \\right ) \\right ) + + * ``SIGMOID_APPROXIMATION``: + + .. math:: + f(x) = x*\\rm{sigmoid}(1.702x) + + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + mode: const str (Optional) + * Use ``'EXACT'``, ``'TANH_APPROXIMATION'``, or ``'SIGMOID_APPROXIMATION'`` for ``str``. + * Default is ``'EXACT'``. + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + mode="EXACT", + ) + + @precondition(allow=VALUE) + def value_inference(self): + if self.mode.val == "TANH_APPROXIMATION": + a = np.sqrt(2 / np.pi) * (self.x.val + 0.044715 * np.power(self.x.val, 3)) + return 0.5 * self.x.val * (1 + np.tanh(a)) + elif self.mode.val == "SIGMOID_APPROXIMATION": + return self.x.val * (1 / (1 + np.exp(-(1.702 * self.x.val)))) + else: + sqaure_root_of_2 = np.sqrt(2) + vfunc = np.vectorize(lambda x: 0.5 * x * (1 + math.erf(x / sqaure_root_of_2))) + return vfunc(self.x.val) + + def type_inference(self): + allowed_values = {"EXACT", "TANH_APPROXIMATION", "SIGMOID_APPROXIMATION"} + if self.mode.val not in allowed_values: + msg = '"gelu" op: unrecognized value of mode: "{}". Allowed values are {}' + raise ValueError(msg.format(self.mode.val, allowed_values)) + return self.x.sym_type + + +@register_op +class leaky_relu(activation_with_alpha): + """ + If ``x >= 0`` apply ``x`` elementwise, otherwise apply ``alpha * x`` elementwise. + + Parameters + ---------- + x: <*?, T> (Required) + alpha: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + b = np.copy(self.x.val) + b[b < 0] *= self.alpha.val + return b + + +@register_op +class linear_activation(activation_with_alpha_and_beta): + """ + Apply elementwise ``x * alpha + beta``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + alpha: const T (Required) + beta: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return self.alpha.val * self.x.val + self.beta.val + + +@register_op +class prelu(activation_with_alpha): + """ + Where ``i = 1 ... C``, if ``x_i > 0``, return ``x_i`` , otherwise return ``alpha_i * x_i``. + + Parameters + ---------- + x: tensor<[B, C, 1..3], T> (Required) + * x must have rank 4 or rank 3 or rank 5, i.e. a shape of (B,C,H) or (B,C,H,W) or (B,C,D,H,W) + alpha: const tensor<[C], T>, (Required) + * The length of alpha must match the second dimension of x (channel dimension) + + Returns + ------- + tensor<[B, C, 1..3], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp32, fp16 + """ + + @precondition(allow=VALUE) + def value_inference(self): + alpha_br = self.alpha.val + for i in range(1, len(self.x.shape)): + alpha_br = np.expand_dims(alpha_br, i) + x_pos = np.maximum(self.x.val, 0) + b = np.minimum(self.x.val, 0) + return x_pos + b * alpha_br + + def type_inference(self): + if self.x.rank not in (3, 4, 5): + raise ValueError( + "prelu op: x must be rank 3 or 4 or 5, instead it is of rank {}".format( + len(self.x.shape) + ) + ) + if len(self.alpha.val.shape) != 1: + raise ValueError("alpha should be rank 1") + if self.x.shape[1] != self.alpha.val.shape[0]: + raise ValueError( + "Size of dimension 1 of alpha should be the same as " + + "the size of dimension 1 of x." + ) + if self.x.rank in (3, 5): + # check whether all alpha values are the same or not + are_values_same = ( + np.where(np.abs(self.alpha.val - self.alpha.val[0]) > 1e-5)[0].size == 0 + ) + if not are_values_same: + raise ValueError( + "prelu op: rank 3 or rank 5 input is only supported when all the values of alpha are same," + "which is not the case here" + ) + return self.x.sym_type + + +@register_op +class relu(elementwise_unary): + """ + Return elementwise-applied rectified linear activation: ``max(x, 0)``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return np.maximum(self.x.val, 0) + + +@register_op +class relu6(elementwise_unary): + """ + Return elementwise-applied rectified linear activation: ``min(max(x, 0), 6)``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return np.minimum(np.maximum(self.x.val, 0), 6) + + +@register_op +class scaled_tanh(activation_with_alpha_and_beta): + """ + Return ``alpha * tanh(beta * x)`` elementwise. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input range is ``(-inf, inf)``. + alpha: const T (Required) + beta: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return self.alpha.val * np.tanh(self.x.val * self.beta.val) + + +@register_op +class sigmoid(elementwise_unary): + """ + Return ``sigmoid(x)`` elementwise. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return 1 / (1 + np.exp(-self.x.val)) + + +@register_op +class sigmoid_hard(activation_with_alpha_and_beta): + """ + Return ``min( max( alpha * x + beta, 0 ), 1 )`` elementwise. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + alpha: const T (Required) + beta: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return np.minimum( + np.maximum((self.alpha.val * self.x.val) + self.beta.val, 0), 1 + ) + + +@register_op +class silu(elementwise_unary): + """ + Sigmoid Linear Unit, elementwise apply the SiLU or Swish operation ``x * sigmoid(x)``. + + Parameters + ---------- + x: tensor<\*, T> + + Returns + ------- + tensor<\*, T> + + Attributes + ---------- + T: fp16, fp32 + """ + + pass + + +@register_op +class softplus(elementwise_unary): + """ + Return ``log( 1 + e^x )`` elementwise. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return np.log(1 + np.exp(-np.abs(self.x.val))) + np.maximum(self.x.val, 0) + + +@register_op +class softplus_parametric(activation_with_alpha_and_beta): + """ + Return ``alpha_i * log( 1 + e^( beta_i * x_i ) )``, where ``i = 1 ... C``. + + Parameters + ---------- + x: tensor<[b, C, n, m], T> (Required) + alpha: const tensor<[C], T> (Required) + beta: const tensor<[C], T> (Required) + + Returns + ------- + tensor<[b, C, n, m], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + alpha_br = np.copy(self.alpha.val) + beta_br = np.copy(self.beta.val) + for i in range(1, len(self.x.val.shape)): + alpha_br = np.expand_dims(alpha_br, i) + beta_br = np.expand_dims(beta_br, i) + return alpha_br * np.log(1 + np.exp(self.x.val * beta_br)) + + def type_inference(self): + if len(self.x.shape) < 3: + raise ValueError("x should be at least rank 3") + if len(self.alpha.val.shape) != 1: + raise ValueError("alpha should be rank 1") + if self.x.shape[1] != self.alpha.val.shape[0]: + raise ValueError( + "Size of dimension 0 of alpha should be the same as " + + "the size of dimension 1 of x." + ) + if len(self.beta.val.shape) != 1: + raise ValueError("beta should be rank 1") + if self.x.shape[1] != self.beta.val.shape[0]: + raise ValueError( + "Size of dimension 0 of beta should be the same as " + + "the size of dimension 1 of x." + ) + return self.x.sym_type + + +@register_op +class softmax(Operation): + """ + Return ``exp(x) / tf.reduce_sum(tf.exp(x), axis)``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + axis: const i32 (Optional) + * Default is ``-1``. + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + axis=-1, + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + x = self.x.val + axis = self.axis.val + max_vals = np.max(x, axis=axis, keepdims=True) + temp = np.exp(x - max_vals) + return temp / np.sum(temp, axis=axis, keepdims=True) + + +@register_op +class softsign(elementwise_unary): + """ + Return ``x / ( 1 + |x| )`` applied elementwise. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return self.x.val / (1 + np.abs(self.x.val)) + + +@register_op +class thresholded_relu(activation_with_alpha): + """ + Return ``x`` if ``x >= alpha``, otherwise return ``0``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + alpha: const T (Required) + + Returns + ------- + tensor<\*, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + y = self.x.val + y[y < self.alpha.val] = 0 + return y diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/classify.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/classify.py new file mode 100644 index 00000000..29d819fd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/classify.py @@ -0,0 +1,76 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (InputSpec, + ListInputType, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +@register_op +class classify(Operation): + """ + The presence of this op indicates that the model is of type classifier. The op + constructs the model output accordingly; that is, the predicted class label + and the output probability dictionary. The parameters of this op are set + based on the attributes set for the + `coremltools.ClassifierConfig `_ class + by the user. The outputs of this op cannot be used by another op. + + Parameters + ---------- + probabilities: tensor<[\* , ProbT]> (Required) + A tensor in the graph, which is used to compute the classifier output(s). This + is the tensor whose values are mapped to the class labels and used for constructing + the predicted class label and the output dictionary of class names and values. + + classes: list<\*, ClassT> (Required) + List of classes. + + Returns + ------- + + Dict[classT, probT] + + + Attributes + ---------- + ProbT: fp32 + ClassT: i64, str + """ + + input_spec = InputSpec( + probabilities=TensorInputType(type_domain=types.fp32), + classes=ListInputType(const=True), + ) + + def type_inference(self): + # check the type of "classes" + if not types.is_list(self.classes.sym_type): + msg = "'classes' in the op 'classify' must be of type list. Instead it is {}." + raise ValueError(msg.format(self.classes.sym_type.__type_info__())) + + # check the type of "probabilities" + if self.probabilities.dtype != types.fp32: + msg = "classify op: input probabilities must be of type fp32. Instead it is of type {}" + raise TypeError(msg.format(self.probabilities.sym_type.get_primitive().__type_info__())) + + classes_elem_type = self.classes.elem_type + if classes_elem_type not in {types.str, types.int64}: + msg = "Type of elements in 'classes' in the op 'classify' must be either str or int64. Instead it is {}." + raise ValueError(msg.format(classes_elem_type.__type_info__())) + + # check that the size of "classes" is compatible with the size of "probabilities" + if not any_symbolic(self.probabilities.shape): + size = np.prod(self.probabilities.shape) + if len(self.classes.val) != size: + msg = "In op 'classify', number of classes must match the size of the tensor corresponding to 'probabilities'." + raise ValueError(msg) + + return classes_elem_type, types.dict(classes_elem_type, types.double) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/control_flow.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/control_flow.py new file mode 100644 index 00000000..621ddf05 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/control_flow.py @@ -0,0 +1,828 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import (Block, get_existing_symbol, + get_new_symbol, types) +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + InternalInputType, + ListInputType, + PyFunctionInputType, + TensorInputType, + TupleInputType) +from coremltools.converters.mil.mil.operation import (NONE, SYMBOL, VALUE, + Operation, mil_list, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types import is_compatible_type +from coremltools.converters.mil.mil.types.type_mapping import ( + builtin_to_string, is_subtype, numpy_type_to_builtin_type, + numpy_val_to_builtin_val) + + +@register_op +class cond(Operation): + """ + Perform a conditional execution. The return types must be identical + between the true and false branches. + + Parameters + ---------- + pred: tensor<[], bool> (Required) + * 0-D tensor (scalar) predicate to switch between true and false branches. + + _true_fn: function (Required) + * A Python function that executes if ``pred`` evaluates to ``True``. + * It must take zero input (i.e, no input), and return one or more values whose type becomes + the operation's return type. + + _false_fn: function (Required) + * A Python function that executes if ``pred`` evaluates to ``False``. + * It must take zero input (i.e. no input), and have return types that match those of the + ``if`` branch. + + _existing_blocks: list[Block] (Optional) + * Python list of ``Block``. + * For internal use only. When converting a milproto, we already got existing blocks, + and the ``build_nested_blocks`` function can use them directly. + * When ``_existing_blocks`` is set, ``_true_fn`` and ``_false_fn`` must be dummy functions which returns ``None``. + + Returns + ------- + tuple + * Python tuple of ``Variables`` from one of the branches. + """ + + input_spec = InputSpec( + pred=TensorInputType(type_domain=types.bool), + _true_fn=PyFunctionInputType(), + _false_fn=PyFunctionInputType(), + _existing_blocks=InternalInputType(optional=True), + ) + + def build_nested_blocks(self): + # If the front end is milproto, we already have the well constructed cond/body block. + # For this case, we set self.blocks directly. + # We also check that _cond and _body are both dummy functions (return None). + if self._existing_blocks is not None and self._existing_blocks.val is not None: + assert self._true_fn.val([]) is None + assert self._false_fn.val([]) is None + self.blocks = self._existing_blocks.val + return + + # Cond block + true_block_name = self.name + "_true" + with Block(name=true_block_name, outer_op=self) as true_block: + true_func = self._true_fn.val + true_ret_vars = true_func() + if isinstance(true_ret_vars, tuple): + true_ret_vars = list(true_ret_vars) + if not isinstance(true_ret_vars, list): + true_ret_vars = [true_ret_vars] + true_block.set_outputs(true_ret_vars) + self.blocks.append(true_block) + + false_block_name = self.name + "_false" + with Block(name=false_block_name, outer_op=self) as false_block: + false_func = self._false_fn.val + false_ret_vars = false_func() + if isinstance(false_ret_vars, tuple): + false_ret_vars = list(false_ret_vars) + if not isinstance(false_ret_vars, list): + false_ret_vars = [false_ret_vars] + false_block.set_outputs(false_ret_vars) + self.blocks.append(false_block) + + def type_inference(self): + true_ret_vars = self.blocks[0].outputs + false_ret_vars = self.blocks[1].outputs + # Verify true_ret_vars has the same types as false_ret_vars + for i, (vt, vf) in enumerate(zip(true_ret_vars, false_ret_vars)): + if not is_compatible_type(vt.sym_type, vf.sym_type): + msg = ( + "true branch output {} type {} mismatch false branch" + + " output type {}" + ) + raise ValueError(msg.format(vt.name, + vt.sym_type.__type_info__(), vf.sym_type.__type_info__())) + + return tuple(v.sym_type for v in true_ret_vars) + + def value_inference(self): + if self.pred.val is None: + raise NotImplementedError() + if self.pred.val: + return [v.val for v in self.blocks[0].outputs] + return [v.val for v in self.blocks[1].outputs] + + +class Const(Operation): + """ + A base class that returns constant values. + + Parameters + ---------- + mode: immediate_value, file_value (Optional) + * Determines how the constant value is stored in the internal MIL format. + * For large constants such as convolution weights, use ``file_value``. + * For smaller-size constants such as values of a stride, use ``immediate_value``. + + val: const<\*,T> (Required) + + Returns + ------- + const<\*,T> + + Attributes + ---------- + T: fp16, fp32, i32, str, bool + """ + + input_spec = InputSpec( + val=InternalInputType(const=True), + ) + + def type_inference(self): + builtin_type, _ = self._get_type_val(self.val.val) + return builtin_type + + def value_inference(self): + _, val = self._get_type_val(self.val.val) + return val + + def _get_type_val(self, value): + + if isinstance(value, (float, np.float64)): + value = np.float32(value) + elif isinstance(value, bool): + pass + elif isinstance(value, (int, np.int64)): + value = np.int32(value) + elif isinstance(value, (tuple, list, np.ndarray)): + value = np.array(value) if isinstance(value, (tuple, list)) else value + + # For the int type, we use int32 by default + if value.dtype in [np.uint16, np.int16, np.uint64, np.int64]: + if value.dtype in [np.uint64, np.int64]: + msg = "Downcast const op {} data".format(self.name) + builtin_to_string(numpy_type_to_builtin_type(value.dtype)) + " as int32" + logger.debug(msg) + value = value.astype(np.int32) + + + # For the float type, we use float32 by default + elif value.dtype == np.float64: + msg = "Downcast const op {} data fp64 as fp32".format(self.name) + logger.debug(msg) + value = value.astype(np.float32) + + elif isinstance(value, mil_list): + # if val that was passed in is of type mil_list, which is just a wrapper on top of python list + # then construct the list type + list_value = value.ls + if len(list_value) == 0: + raise ValueError("'mil_list' points to an empty list") + builtin_elem_type, _ = self._get_type_val(list_value[0]) + # mil_list is a special case that we want to preserve the int64 element type + if isinstance(list_value[0], np.int64): + builtin_elem_type = types.int64 + from coremltools.converters.mil.mil.types.type_list import \ + list as types_list + builtin_type = types_list(builtin_elem_type, init_length=len(list_value), dynamic_length=False) + return builtin_type, value + + + if not isinstance(value, (np.generic, np.ndarray, str, bool, mil_list)): + raise ValueError("Unknown value for constant: {}".format(value)) + + _, builtin_type = numpy_val_to_builtin_val(value) + return builtin_type, value + + +@register_op +class const(Const): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + +# Internal const can have symbolic value (for testing purpose) +@register_op +class _const_symbolic(const): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def type_inference(self): + builtin_type, _ = self._get_type_val(self.val.sym_val) + return builtin_type + + def value_inference(self): + # We allow symbolic values in _const_symbolic + _, val = self._get_type_val(self.val.sym_val) + return val + + +@register_op +class select(Operation): + """ + Return the elements selected from either ``a`` or ``b`` depending on the ``cond``. + + The shape of ``cond``, ``a``, and ``b`` must be broadcastable. + You must provide ``a`` and ``b`` together, or provide neither. + If you provide neither, the operation returns the indices + of ``cond`` that are ``True``. + + Parameters + ---------- + cond: tensor<[\*D1], B> (Required) + * Tensor. When ``True``, select element from ``x``, otherwise, ``y``. + + a: tensor<[\*D2], T> (Optional) + * Values selected at indices where ``cond`` is ``True``. + * Default is ``None``. + + b: tensor<[\*D3], T> (Optional) + * Values selected at indices where ``cond`` is ``False``. + * Default is ``None``. + + Returns + ------- + tensor<[\*D_out], T> or tensor<[n, len(D1)], int32> + * If ``a, b`` are both provided, the return shape is based on broadcast rules + from ``cond, a, b``. + * If ``a, b`` are ``None``, the return shape is 2-D, where the first dimension + ``n`` is the number of matching indices in ``cond``, and ``len(D1)`` is the + rank of ``cond``. + + Attributes + ---------- + B: bool + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + cond=TensorInputType(type_domain=types.bool), + a=TensorInputType(type_domain="T"), + b=TensorInputType(type_domain="T") + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.bool, types.int32), + } + + def type_inference(self): + a_type = self.a.sym_type + b_type = self.b.sym_type + if all([a_type, b_type]): + compatible, ret_type = types.is_tensor_and_is_compatible_general_shape( + a_type, b_type + ) + if compatible: + return ret_type + elif a_type == b_type: + return a_type + else: + raise ValueError("Type mismatch {} vs. {}".format(a_type, b_type)) + return a_type if a_type is not None else b_type + + @precondition(allow=VALUE) + def value_inference(self): + return np.where(self.cond.val, self.a.val, self.b.val) + + +@register_op +class while_loop(Operation): + """ + Perform the body repeatedly while the condition ``cond`` is true. + + Parameters + ---------- + _cond: function (Required) + * A Python function that takes ``loop_vars`` as positional arguments. + * The function must return a ``bool`` ``Var``. + + _body: function (Required) + * A Python function that takes ``loop_vars`` as positional arguments. + * The function must return the same number of output vars as ``loop_vars`` + with the same types. + + loop_vars: tuple (Required) + * Python tuple of ``Variables``. + + _existing_blocks: list[Block] (Optional) + * Python list of ``Block``. + * For internal use only. When converting a milproto, we already got existing blocks, + and the ``build_nested_blocks`` function can use them directly. + * When ``_existing_blocks`` is set, ``_cond`` and ``_body`` must be dummy functions which returns ``None``. + + Returns + ------- + tuple + * Python tuple (same type as ``loop_vars``). + """ + + input_spec = InputSpec( + # arg name with underscore prefix won't be printed. + _cond=PyFunctionInputType(), + _body=PyFunctionInputType(), + loop_vars=TupleInputType(), + _existing_blocks=InternalInputType(optional=True), + ) + + @staticmethod + def _check_equal_value(val1, val2): + if val1 is None and val2 is None: + return True + if val1 is None or val2 is None: + return False + if isinstance(val1, np.ndarray) and isinstance(val2, np.ndarray): + return np.array_equal(val1, val2) + return val1 == val2 + + @staticmethod + def _clean_up_child_ops(block): + for op in list(block.operations): + + for b in op.blocks: + while_loop._clean_up_child_ops(b) + + inputs = op.get_flattened_inputs() + for in_var in inputs: + in_var.remove_child_op(op) + + def _build_block(self, block_inputs): + # Cond block: + block_name = self.name + '_cond_block' + with Block(block_inputs=block_inputs, outer_op=self, + name=block_name) as cond_block: + + cond_func = self._cond.val + cond_var = cond_func(*cond_block.inputs) + cond_vars = cond_var if isinstance(cond_var, list) else [cond_var] + cond_block.set_outputs(cond_vars) + + # Body block + block_name = self.name + '_body_block' + with Block(block_inputs=block_inputs, outer_op=self, + name=block_name) as body_block: + body_func = self._body.val + exit_vars = body_func(*body_block.inputs) + exit_vars = list(exit_vars) if isinstance(exit_vars, (list, tuple)) \ + else [exit_vars] + body_block.set_outputs(exit_vars) + + return cond_block, body_block, exit_vars + + def build_nested_blocks(self): + # self.loop_vars is python tuple of Vars. + + # block_inputs Var are not produced by any op. + # We assume block_inputs have the same types as self.loop_var. If not + # (e.g., when certain dimensions change shape during iterate), we'd + # adjust later. + + # We assume that sym_val is unchanging across the block iterate. If it + # changes, we rebuild the block and rerun type and value inference. + + # Design notes on two blocks (cond and body): + # + # - Observe that two blocks can always be represented as a single + # block that contains both cond and body logic, which would return + # [loop_cond] + loop_carries. `loop_cond` is a bool. + # + # - Observe that single block implies a do-while logic, + # in which the first iterate is always executed. It's possible to add + # a cond input to while_loop to modify do-while behavior: + # + # %first_cond = cond_logic(...) + # while_loop(cond=%first_cond, loop_vars=(...)) + # + # and we enter the first iterate only if cond is True. But this would + # require caller to execute cond logic outside of while_loop first + # (which also needs to be duplicated within the loop), + # resulting in duplicated code / ops. + # + # - Thus, single block is unnatural for the natural execution order, + # in which we execute the cond block first to get the loop_cond. Only + # if `loop_cond` is True do we execute the body block. This is the + # semantics of tf.while_loop. + + # If the front end is milproto, we already have the well constructed cond/body block. + # For this case, we set self.blocks directly. + # We also check that _cond and _body are both dummy functions (return None). + if self._existing_blocks is not None and self._existing_blocks.val is not None: + assert self._cond.val([]) is None + assert self._body.val([]) is None + self.blocks = self._existing_blocks.val + return + + block_inputs = tuple(copy.copy(v) for v in self.loop_vars) + name_count = {v.name: 0 for v in block_inputs} + for v in block_inputs: + v._op = None + v.op_output_idx = None + v._child_ops = list() + + # Get unique name + + old_v_name = v.name + v.name = v.name + "_x" + str(name_count[v.name]) + name_count[old_v_name] += 1 + + v._sym_val = v._sym_val + v.consuming_blocks = list() + + cond_block, body_block, exit_vars = self._build_block(block_inputs) + + # Verify exit_vars has the same types as loop_vars + block_input_type_change = False + for i, (v_in, v_out) in enumerate(zip(block_inputs, exit_vars)): + if not is_subtype(v_out.sym_type, v_in.sym_type): + compat_shape = while_loop.get_compat_shape(v_out.sym_type, + v_in.sym_type) + if compat_shape is None: + msg = "loop_vars '{}' changes in the body of " \ + "while_loop '{}':\n {} -> {}" + raise ValueError(msg.format( + v_in.name, self.name, + v_in.sym_type, v_out.sym_type)) + else: + block_inputs[i]._sym_type = types.tensor( + v_in.dtype, compat_shape) + block_input_type_change = True + if not while_loop._check_equal_value(v_out.sym_val, v_in.sym_val): + block_inputs[i]._sym_val = None + block_input_type_change = True + + if block_input_type_change: + # Since we are going to build the block again, we first need to remove ops + # in the block from vars's _child_ops. + while_loop._clean_up_child_ops(cond_block) + while_loop._clean_up_child_ops(body_block) + + # Rebuild our block to invoke type inference. + cond_block, body_block, exit_vars = self._build_block(block_inputs) + for i, (v_in, v_out) in enumerate(zip(block_inputs, exit_vars)): + if not is_subtype(v_out.sym_type, v_in.sym_type): + msg = 'Block output {}: {} is not a subtype of ' +\ + 'block input {}: {} after factoring shape changes' + raise ValueError(msg.format(v_out.name, v_out.sym_type.__name__, + v_in.name, v_in.sym_type.__name__)) + if not while_loop._check_equal_value(v_out.sym_val, v_in.sym_val): + msg = 'Block output {}: {} is not equal to ' +\ + 'block input {}: {} after value changes' + raise ValueError(msg.format(v_out.name, v.sym_val, + v_in.name, v_in.sym_val)) + self.blocks.append(cond_block) + self.blocks.append(body_block) + + @staticmethod + def get_compat_shape(type1, type2): + """ + For tensor types `type1`, `type2` that are of the same rank, return + compat_shape (python list) where compat_shape[i] is integer iff type1 + and type2 have the same integer shape on dim i. compat_shape[i] is + symbolic otherwise. + + Return None if `type1`, `type2` have different rank or non-tensor + type. + """ + if not types.is_tensor(type1) or not types.is_tensor(type2): + return None + + s1 = type1.get_shape() + s2 = type2.get_shape() + + if len(s1) != len(s2): + return None + + compat_shape = [] + for d1, d2 in zip(s1, s2): + if d1 != d2: + compat_shape.append(get_new_symbol()) + else: + compat_shape.append(d1) + return compat_shape + + def type_inference(self): + # Skip the conditional var + return tuple(v.sym_type for v in self.blocks[1].outputs) + + +@register_op +class make_list(Operation): + """ + Create a list of tensor elements. The elements should have the same shape. + The list is similar to an auto-resizing array. + + Parameters + ---------- + init_length: (Optional, Default=1) + * Initial length for the list. + * If ``dynamic_length`` is ``False``, + ``init_length`` is the fixed length of the list throughout runtime. + + dynamic_length: (Optional, Default is True) + + elem_shape: Tuple[const] (Required) + * 1-D vector denoting the shape of elements. + * If ``T = int32``, the element shape is known at compile time. + * ``T = string`` denotes the symbolic shape, in which the shape is determined + at runtime. + * If not provided, the resulting ``List`` won’t have the elementary shape + info, which may cause backend errors. Remedy this with SSA passes. + + dtype: const (Optional, Default is fp32) + * Possible values: ``{"bool", "fp16", "fp32", "int32"}`` + * Element tensor’s ``dtype``. + + Returns + ------- + List[*] + + Attributes + ---------- + T: i32, string + """ + + input_spec = InputSpec( + init_length=TensorInputType(optional=True, type_domain=types.int32), + dynamic_length=TensorInputType(const=True, optional=True, type_domain=types.bool), + elem_shape=TupleInputType(), + dtype=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + def default_inputs(self): + return DefaultInputs( + init_length=1, + dynamic_length=True, + dtype="fp32", + ) + + def type_inference(self): + builtin_dtype = types.string_to_builtin(self.dtype.val) + if builtin_dtype is None: + raise ValueError("Unsupported dtype {}".format(self.dtype.val)) + # Replace string with symbol + elem_shape_sym = [] + for s_var in self.elem_shape: + # s is str or int + s = s_var.val + if s is None: + msg = 'make_list elem_shape must be tuple of const. ' +\ + 'Tuple elem {} is not' + raise ValueError(msg.format(s_var.name)) + + if isinstance(s, str): + try: + symbol = get_existing_symbol(s) + except ValueError: + # Must be a new symbol + symbol = get_new_symbol(s) + elem_shape_sym.append(symbol) + else: + elem_shape_sym.append(s) + elem_type = types.tensor(builtin_dtype, elem_shape_sym) + return types.list( + elem_type, + init_length=self.init_length.val, + dynamic_length=self.dynamic_length.val, + ) + + +@register_op +class list_length(Operation): + """ + Return the length of ``ls``. + + Parameters + ---------- + ls: List[*] (Required) + + Returns + ------- + + * Length of ``ls``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec(ls=ListInputType(),) + + def type_inference(self): + return types.int32 + + @precondition(allow=VALUE | SYMBOL | NONE) + def value_inference(self): + if not self.ls.dynamic_length: + return self.ls.init_length + raise NotImplementedError() + + +@register_op +class list_write(Operation): + """ + Write a value into index ``index`` of ``ls``. + + Parameters + ---------- + ls: List (Required) + + index: (Required) + * Size of the list. + + value: <*,T> (Optional) + * Element value to write, which must match the element shape of ``ls``. + * Default is ``None``. + + Returns + ------- + List[*] + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + ls=ListInputType(), + index=TensorInputType(type_domain=types.int32), + value=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.bool, types.int32), + } + + def type_inference(self): + list_elem_type = self.ls.elem_type + value_type = self.value.sym_type + dynamic_length = self.ls.dynamic_length + init_length = self.ls.init_length + + if list_elem_type is None: + # fill in the elem type using value's type info. + return types.list( + value_type, init_length=init_length, dynamic_length=dynamic_length + ) + if list_elem_type == types.unknown: + msg = "Input ls elem type unknown. Override with {}" + logger.warning(msg.format(value_type)) + return types.list( + value_type, init_length=init_length, dynamic_length=dynamic_length + ) + if not types.is_subtype(value_type, list_elem_type): + msg = "Elem type mismatch: ls elem type {} vs " + "value type {}" + raise ValueError(msg.format(list_elem_type.__type_info__(), + value_type.__type_info__())) + return self.ls.sym_type + + +@register_op +class list_read(Operation): + """ + Read the value at location ``index`` of ``ls``. + + Parameters + ---------- + ls: List[\*] (Required) + + index: (Required) + * Size of the list. + + Returns + ------- + <\*,T> + * The element's value. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + ls=ListInputType(), + index=TensorInputType(type_domain=types.int32), + ) + + def type_inference(self): + list_elem_type = self.ls.elem_type + if list_elem_type is None: + msg = ( + "Unknown element type. The List might not have been " + + "written to ({})" + ) + raise ValueError(msg.format(self.name)) + return list_elem_type + + +@register_op +class list_gather(Operation): + """ + Return selected values in ``ls`` as a packed ``Tensor``. + + Parameters + ---------- + ls: List[\*] (Required) + + indices: (Required) + * Gather from indices, whose element must be in ``[0, ls.length)`` at runtime. + + Returns + ------- + <\*K,T> + * Selected tensors packed into a ``len(ls.elem_shape)+1`` rank tensor. + * ``K[0] == len(indices)``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + ls=ListInputType(), + indices=TensorInputType(type_domain=types.int32), + ) + + def type_inference(self): + list_elem_type = self.ls.elem_type + if list_elem_type == types.unknown: + msg = ( + "Unknown element type. The List might not have been " + + "written to ({})" + ) + raise ValueError(msg.format(self.name)) + elem_shape = list_elem_type.get_shape() + dtype = list_elem_type.get_primitive() + ret_shape = [self.indices.shape[0]] + list(elem_shape) + return types.tensor(dtype, tuple(ret_shape)) + + +@register_op +class list_scatter(Operation): + """ + Scatter ``values`` to ``ls`` at locations ``indices``. + + Parameters + ---------- + ls: List[*] (Required) + + indices: tensor (Required) + * Indices of ``ls`` to scatter to. + * Elements of ``indices`` must be in ``[0, ls.length)`` at runtime. + * If indices are greater than or equal to the list length, the list is + dynamically resized. + + value: <*,T> (Optional) + * Element value to write, which must match the element shape of ``ls``. + * Default is ``None``. + + Returns + ------- + List[*] + * Updated list. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + ls=ListInputType(), + indices=TensorInputType(type_domain=types.int32), + value=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.bool, types.int32), + } + + def type_inference(self): + num_indices = self.indices.shape[0] + num_values = self.value.shape[0] + if num_values != num_indices: + raise ValueError( + "Cannot scatter {} values to {} indices".format(num_values, num_indices) + ) + list_elem_type = self.ls.elem_type + value_type = self.value.sym_type + dynamic_length = self.ls.dynamic_length + init_length = self.ls.init_length + + elem_type = types.tensor(value_type.get_primitive(), value_type.get_shape()[1:]) + if list_elem_type == types.unknown: + # fill in the elem type using value's type info. + return types.list( + elem_type, dynamic_length=dynamic_length, init_length=init_length + ) + if not types.is_subtype(elem_type, list_elem_type): + msg = "Elem type mismatch: ls elem type {} vs " + "value type {}" + raise ValueError(msg.format(list_elem_type, elem_type)) + return self.ls.sym_type diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py new file mode 100644 index 00000000..ee0ffd80 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py @@ -0,0 +1,428 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.block import curr_opset_version +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import \ + spatial_dimensions_out_shape +from coremltools.converters.mil.mil.ops.defs.iOS15 import _IOS15_TARGET + + +@register_op +class conv(Operation): + """ + Perform convolution over input. Supports 1-D, 2-D, and 3-D convolution. + + Parameters + ---------- + x: tensor<[n, C_in, \*d_in], T> (Required) + + * ``d_in`` are (possibly runtime-determined) spatial dimensions. For example, + ``d_in = [224, 224]`` for 2D convolution. + * ``1 <= len(d_in) <= 3``. + * ``C_in`` is the number of input channels or depth dimensions. + * ``n`` is the batch dimension. + + weight: tensor<[C_out, C_in/groups, \*K], T> (Required) + + * Filter weights. + * ``C_in`` is the number of input channels. + * ``C_in`` must be divisible by ``groups``. + * ``K`` are kernel sizes. For example, ``K = [KH, KW]`` for 2-D convolution. + * When ``dilations`` is not all ``1``, ``weight`` has to be ``const`` + at compile time + + strides: const tensor<[S], i32> (Optional) + + * Default to one vector of length equal to the number of spatial dimensions. + * Strides along each of the spatial dimensions. + * ``S == len(d_in)``. + + pad_type: const str (Required) + + Must be one of the following: + + * ``valid``: No padding. This is equivalent to custom pad with + ``pad[2*i] == pad[2*i+1] == 0, for i=0,...,len(d_in)-1``. + * ``custom``: Specify custom padding in the parameter ``pad``. + * ``same``: Input is padded such that out spatial shapes are + ``d_out[i] = ceil(d_in[i] / strides[i])``. + * ``same_lower``: Similar to ``same`` but the padding + will place extra rows/cols on the top/left if the padding amount is odd. + + Specifically, for ``i = 0,..,,len(d_in)-1``, the equivalent paddings are + calculated as follows: + + * ``dilated_kernel = (K[i] - 1) * dilate[i] + 1`` + * If ``dilated_kernel`` is odd, + ``padding[2*i] = padding[2*i+1] = floor(dilated_kernel / 2)`` + * Otherwise: + ``padding[2*i] = ceil((dilated_kernel - 1) / 2)``, + ``padding[2*i+1] = floor((dilated_kernel - 1) / 2)`` + + pad: const tensor<[P], i32> (Optional. Default to all zeros) + + * ``len(P) = 2 * len(d_in)`` + * ``pad`` should be specified if and only if ``pad_type == custom``, + otherwise errors occur. + * ``pad`` represents the number of elements to pad before and after each + dimension. Specifically, ``pad[0], pad[1]`` are the pad size before / after + spatial dimension 0, ``pad[2], pad[3]`` are the pad size before / after + spatial dimension 1, etc. + + dilations: const tensor<[S], i32> (Optional. Default to all 1s) + + * Dilation value along each spatial dimension in ``d_in``. + See `visualization `_. + * ``S == len(d_in)``. + + groups: const tensor<[], i32> (Optional, default to 1) + + * Input and output channels are split by ``groups``. + * ``C_in`` must be divisible by ``groups``. + * Maximum value for group is ``C_in``, in which case it is a depthwise + convolution. + + For examples (assuming ``C_in = 16, C_out = 32``): + + * ``groups == 1``, ``weight`` has shape ``[32, 16, KH, KW]``: All input + channels are convolved with the ``weight`` kernel to produce all output + channels. + * ``groups == 2``, ``weight`` has shape ``[32, 8, KH, KW]``: Input + channels 0~7 are convolved with half of the ``weight`` kernel to produce + output channels 0~15. Similarly, input channels 8~15 are convolved with + the other half of ``weight`` to product output channels 16~31. + * ``groups == C_in``, ``weight`` has shape ``[32, 1, KH, KW]``: Each input + channel is convolved with its own set of filters and each produce + ``C_out / C_in = 2`` channels. This is equivalent to depthwise + convolution. + + bias: const tensor<[C_out],T> (Optional, default to all 0) + * Bias along output channels. + + Returns + ------- + tensor<[n, C_out, \*d_out], T> + * Output activation has the same rank and spatial dimension as the input. + That is, ``len(d_out) == len(d_in)``. + * For ``i=0,..,len(d_in)-1, d_out[i] = floor [(D_in[i] + pad[2*i] + + pad[2*i+1] - (K[i]-1)*dilations[i] - 1) / strides[i] ] + 1``. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + conv_transpose + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + weight=TensorInputType(type_domain="T"), + bias=TensorInputType(const=True, optional=True, type_domain="T"), + strides=TensorInputType(const=True, optional=True, type_domain=types.int32), + pad_type=TensorInputType(const=True, optional=True, type_domain=types.str), + pad=TensorInputType(const=True, optional=True, type_domain=types.int32), + dilations=TensorInputType(const=True, optional=True, type_domain=types.int32), + groups=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + num_spatial_dims = self.x.rank - 2 + return DefaultInputs( + bias=None, + strides=[1]*num_spatial_dims, + pad_type="valid", + pad=[0]*num_spatial_dims*2, + dilations=[1]*num_spatial_dims, + groups=1, + ) + + def type_inference(self): + inshape = self.x.shape + f_shape = self.weight.shape + kernel_shape = f_shape[2:] + C_out = f_shape[0] + C_in = self.x.shape[1] + groups = self.groups.val + + if self.bias is not None and \ + (len(self.bias.val.shape) > 1 or self.bias.val.shape[0] != C_out): + msg = "# of bias values {} not equal to # output channels {}" + raise ValueError(msg.format(self.bias.val.shape[0], C_out)) + if C_in % groups != 0: + msg = "# of input channels {} not divisible by groups {}" + raise ValueError(msg.format(C_in, groups)) + if C_in // groups != self.weight.shape[1]: + msg = "C_in / groups = {}/{} != weight[1] ({})" + raise ValueError(msg.format(C_in, groups, self.weight.shape[1])) + + strides = self.strides.val + dilations = self.dilations.val + + # The same_lower padding is not supported in iOS15 + if curr_opset_version() == _IOS15_TARGET and self.pad_type.val == "same_lower": + msg = "iOS15 version of conv does not support pad_type = `same_lower`" + raise ValueError(msg) + + # Ignore self.pad if pad_type != custom + custom_pad = None if self.pad_type.val != 'custom' else self.pad.val + + if self.weight.val is None and any([True if d > 1 else False for d in dilations]): + raise ValueError("Convolution with dynamic weights does not support dilations!") + + N = inshape[0] + C_out = f_shape[0] + # spatial dimensions + d_out_shape = spatial_dimensions_out_shape( + pad_type=self.pad_type.val, + input_shape=inshape[2:], + kernel_shape=kernel_shape, + strides=strides, + dilations=dilations, + custom_pad=custom_pad, + ) + retshape = [N, C_out] + d_out_shape + return types.tensor(self.x.dtype, tuple(retshape)) + + +@register_op +class conv_quantized(conv): + """ + Note: This is experimental and may change in the future. + Supports weight quantization for parameters while performing convolution over input. + ``W_float = W_quantized * scale + bias``. + + Parameters + ---------- + In addition to convolutional layer parameters, the following additional parameters + are required. + + quantization_type: const str (Required) + * One of ``linear``, or ``lut``. + + nbits: const tensor<[], i32> (Optional. Default to 8) + * Denotes the bit-width of the quantization. ``1 <= nbits <= 8``. + + quant_scale: tensor<*?, T> (Required) + * Denotes the scale of quantization. + + quant_bias: tensor<*?, T> (Required) + * Denotes the bias that is used to quantize/dequantize. + + Returns + ------- + tensor<[n, C_out, *d_out], T> + * Output activation has the same rank and spatial dimension as the input. + That is, ``len(d_out) == len(d_in)``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + weight=TensorInputType(type_domain="U"), + bias=TensorInputType(const=True, optional=True, type_domain="U"), + quantization_type=TensorInputType(const=True, type_domain=types.str), + nbits=TensorInputType(const=True, optional=True, type_domain=types.int32), + quant_scale=TensorInputType(const=True, type_domain="T"), + quant_bias=TensorInputType(const=True, type_domain="T"), + strides=TensorInputType(const=True, optional=True, type_domain=types.int32), + pad_type=TensorInputType(const=True, optional=True, type_domain=types.str), + pad=TensorInputType(const=True, optional=True, type_domain=types.int32), + dilations=TensorInputType(const=True, optional=True, type_domain=types.int32), + groups=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp32, types.fp16), + "U": (types.uint8,), + } + + def default_inputs(self): + return super().default_inputs() + \ + DefaultInputs( + nbits=8, + ) + +@register_op +class conv_transpose(Operation): + """ + Perform transposed convolution (also known as deconvolution and fractionally + stride convolution) over input. ``conv_transpose`` can also be used to compute + the gradient of conv. Supports 1-D, 2-D, and 3-D convolution. + + Parameters + ---------- + + x: tensor<[n,C_in,*D_in],T> (Required) + * Input data. + * ``D_in`` are spatial dimensions. + * ``1 <= len(D_in) <= 3``. + * ``C_in`` is the number of input channels. + + weight: const tensor<[C_in,C_out/groups,*D_in], T> (Required) + * Filter weights. ``C_in, C_out`` are the number of input and output channels + respectively. + * ``D_in`` are spatial dimensions. ``1 <= len(D_in) <= 2``. + + bias: const tensor<[C_out],T> (Optional, default to all 0) + * Bias added along output channels. + + pad: const tensor<[P],i32> (Optional, default to all 0s) + * Number of elements to pad before and after each dimension. + * ``P == 2 * len(D_in)``. + * ``pad[2*i], pad[2*i+1]`` are pad sizes before and after + dimension ``i``, where ``0 <= i < len(D_in)``. + + output_shape: const tensor<[P],i32> (Optional, default None) + * Expected output shape. The first two dimensions must be ``[n, C_out]``. + * The output shape of ``conv_transpose`` is underdetermined in general, + because ``conv`` can map multiple input shapes to a single output shape. + For example, for ``same`` padding mode, ``conv_out = ceil(conv_in/stride)``. + Hence we need ``output_shape`` when this occurs. + + pad_type: const tensor<[P],i32> (Optional, default valid) + * One of ``same``, ``valid``, or ``custom``. + + strides: const tensor<[S],i32> (Optional. Default to all 1s) + * Stride along each of the spatial dimensions. + * ``S == len(D_in)``. + + dilations: const tensor<[S],i32> (Optional. Default to all 1s) + * Dilation value along each spatial dimension in ``d_in``. See ``conv``. + * ``S == len(D_in)``. + + groups: const tensor<[], i32> (Optional. Default to 1) + * Input and output channels are separated into ``groups``. + * ``C_in`` and ``C_out`` must be divisible by the number of groups. + See ``conv`` for examples. + + Returns + ------- + tensor<[n,C_out,*D_out],T> + * If ``output_shape`` is not ``None``: + + ``Dout = output_shape`` + + * If ``pad_type == "custom"``: + + ``Dout[i] = (D_in[i]-1)*stride[i] + (K[i]-1) * dilation[i] + 1 - pad[2*i] - pad[2*i-1]`` + + * If ``pad_type == "valid"``: + + ``Dout[i] = (D_in[i]-1)*stride[i] + (K[i]-1) * dilation[i] + 1`` + + * If ``pad_type == "same"``: + + ``Dout[i] = D_in[i] * stride[i]`` + + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + conv + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), # [n, C_in, spatial_dims] + weight=TensorInputType(const=True, type_domain="T"), # [C_out, C_in, spatial_dims] + bias=TensorInputType(const=True, optional=True, type_domain="T"), + pad=TensorInputType(const=True, optional=True, type_domain=types.int32), + output_shape=TensorInputType(const=True, optional=True, type_domain=types.int32), + pad_type=TensorInputType(const=True, optional=True, type_domain=types.str), + strides=TensorInputType(const=True, optional=True, type_domain=types.int32), + dilations=TensorInputType(const=True, optional=True, type_domain=types.int32), + groups=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + num_spatial_dims = self.x.rank - 2 + return DefaultInputs( + bias=None, + pad=[0]*2*num_spatial_dims, + output_shape=None, + pad_type="valid", + strides=[1]*num_spatial_dims, + dilations=[1]*num_spatial_dims, + groups=1, + ) + + def type_inference(self): + # Input shape is [n, C_in, spatial_dims] + in_shape = self.x.shape + # Weight shape is [C_in, C_out/group, spatial_dims] + f_shape = self.weight.shape + kernel_shape = f_shape[2:] + spatial_dim_rank = len(in_shape) - 2 + N = in_shape[0] + C_in = self.x.shape[0] + groups = self.groups.val + C_out = f_shape[1] * groups + + if self.bias is not None and self.bias.val.shape[0] != C_out: + msg = "# of bias values {} not equal to # output channels {}" + raise ValueError(msg.format(self.bias.val.shape[0], C_out)) + if C_out % groups != 0: + msg = "# of input channels {} not divisible by groups {}" + raise ValueError(msg.format(C_in, groups)) + + # If output shape is given, return it + if self.output_shape is not None: + output_shape = self.output_shape.val + assert output_shape[0] == N + assert output_shape[1] == C_out + return types.tensor( + self.x.dtype, tuple(output_shape) + ) + + strides = self.strides.val + dilations = self.dilations.val + kernel_shape = [ + (kernel_shape[r] - 1) * dilations[r] + 1 for r in range(spatial_dim_rank) + ] + + D_in = in_shape[2:] # spatial dimensions + + # Deconv's output shape is non-deterministic, we follow TF shape logic here. + if self.pad_type.val == "same": + d_out_shape = [strides[r] * D_in[r] for r in range(spatial_dim_rank)] + elif self.pad_type.val == "valid": + d_out_shape = [ + strides[r] * (D_in[r]-1) + kernel_shape[r] + for r in range(spatial_dim_rank) + ] + elif self.pad_type.val == "custom": + if self.pad is None: + raise ValueError("self.pad must exist if pad_type is custom") + pad = self.pad.val + d_out_shape = [ + strides[r] * (D_in[r] - 1) + + kernel_shape[r] + - pad[2 * r] + - pad[2 * r + 1] + for r in range(spatial_dim_rank) + ] + + retshape = [N, C_out] + d_out_shape + return types.tensor(self.x.dtype, tuple(retshape)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_binary.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_binary.py new file mode 100644 index 00000000..1f89facb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_binary.py @@ -0,0 +1,638 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import operator + +import numpy as np + +from coremltools.converters.mil.mil import (InputSpec, Operation, + TensorInputType, precondition, + types) +from coremltools.converters.mil.mil.operation import VALUE +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import ( + broadcast_shapes, promoted_primitive_type) + + +class elementwise_binary(Operation): + """ + Elementwise Binary Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + y=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def type_inference(self): + typea = self.x.sym_type + typeb = self.y.sym_type + primitive_type = promoted_primitive_type(typea, typeb) + if primitive_type is None: + raise ValueError("Incompatible primitive types in broadcast operation") + primitive_type = self.get_dtype(primitive_type) + + # broadcast + if not types.is_tensor(typea) and not types.is_tensor(typeb): + # both typea and typeb are not tensors + return primitive_type + if types.is_tensor(typea) and not types.is_tensor(typeb): + # a is tensor, b is not + return types.tensor(primitive_type, typea.get_shape()) + if not types.is_tensor(typea) and types.is_tensor(typeb): + # a is not tensor, b is + return types.tensor(primitive_type, typeb.get_shape()) + + # both a, b are tensors + shapea = list(typea.get_shape()) + shapeb = list(typeb.get_shape()) + ret_shape = broadcast_shapes(shapea, shapeb) + return types.tensor(primitive_type, ret_shape) + + @precondition(allow=VALUE) + def value_inference(self): + return self._cast_check_value_inferene(self.x.val, self.y.val) + + def get_operator(self): + """ + All subclasses have to implement this. + """ + raise NotImplementedError() + + def get_dtype(self, promoted_dtype): + """ + Override if output primitive type is different from input types + (e.g., less, greater) + """ + return promoted_dtype + + def _cast_check_value_inferene(self, a, b): + """ + If one of the input is tensor, cast the result to tensor. + """ + to_cast = any([isinstance(x, np.ndarray) for x in [a, b]]) + result = self.get_operator()(a, b) + return result if not to_cast else np.array(result) + + +class elementwise_binary_logical(elementwise_binary): + """ + Elementwise Binary Logical Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + y=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.bool,), + } + + +""" +Elementwise Binary Op Implementation(s) +""" + + +@register_op +class add(elementwise_binary): + """ + Return ``x + y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: <\*,T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: <\*,T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + <\*,T> + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.add + + +@register_op +class equal(elementwise_binary): + """ + Return the truth value of ``x == y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: <\*,T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: <\*,T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + <\*, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return np.equal + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class floor_div(elementwise_binary): + """ + Return ``x / y`` element-wise with + `broadcasting `_, + rounded towards negative infinity. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*, T> + * A tensor of the same type and shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.floordiv + + +@register_op +class greater(elementwise_binary): + """ + Return the truth value of ``x > y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.gt + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class greater_equal(elementwise_binary): + """ + Return the truth value of ``x >= y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.ge + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class less(elementwise_binary): + """ + Return the truth value of ``x < y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.lt + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class less_equal(elementwise_binary): + """ + Return the truth value of ``x <= y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.le + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class logical_and(elementwise_binary_logical): + """ + Return the truth value of ``x AND y`` element-wise with + `broadcasting `_ + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: bool + + """ + + def get_operator(self): + return np.logical_and + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class logical_or(elementwise_binary_logical): + """ + Return the truth value of ``x OR y`` element-wise with + `broadcasting `_ + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: bool + + """ + + def get_operator(self): + return np.logical_or + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class logical_xor(elementwise_binary_logical): + """ + Return the truth value of ``x XOR y`` element-wise with + `broadcasting `_ + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: bool + + """ + + def get_operator(self): + return np.logical_xor + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class maximum(elementwise_binary): + """ + Return ``x > y ? x : y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return np.maximum + + +@register_op +class minimum(elementwise_binary): + """ + Return ``x > y ? y : x`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return np.minimum + + +@register_op +class mod(elementwise_binary): + """ + Return ``x % y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.mod + + +@register_op +class mul(elementwise_binary): + """ + Return ``x * y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.mul + + +@register_op +class not_equal(elementwise_binary): + """ + Return the truth value of ``x != y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the broadcasted shape from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.ne + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class real_div(elementwise_binary): + """ + Return ``x / y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.truediv + + +@register_op +class pow(elementwise_binary): + """ + Return ``x ^ y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.pow + + +@register_op +class sub(elementwise_binary): + """ + Return ``x - y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.sub diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_unary.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_unary.py new file mode 100644 index 00000000..1ef87516 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_unary.py @@ -0,0 +1,898 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import (SYMBOL, VALUE, Operation, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types import nptype_from_builtin +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + + +def _maintain_shape(x, y): + # numpy converts rank 0 tensors to scalars + if x.ndim == 0: + # convert back to rank 0 tensor + return np.array(y) + return y + + +class elementwise_unary(Operation): + """ + Elementwise Unary Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + return self.x.sym_type + +class elementwise_unary_with_int(Operation): + """ + Elementwise Unary Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def type_inference(self): + return self.x.sym_type + +""" +Elementwise unary op implementation(s) +""" + +@register_op +class abs(elementwise_unary_with_int): + """ + Return the absolute values of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.abs(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class acos(elementwise_unary): + """ + Return the inverse cosine values of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.arccos(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class asin(elementwise_unary): + """ + Return the inverse sine of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.arcsin(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class atan(elementwise_unary): + """ + Return the inverse tangent of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.arctan(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class atanh(elementwise_unary): + """ + Return the inverse hyperbolic tangent values of the input + ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.arctanh(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class ceil(elementwise_unary): + """ + Return the ceil values of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.ceil(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class clip(Operation): + """ + Clip the values in the input ``x`` to ``[alpha, beta]``, element-wise. + Any values less than ``alpha`` are set to ``alpha``, and any values greater + than ``beta`` are set to ``beta``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + alpha: const T (Required) + beta: const T (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + alpha=TensorInputType(const=True, type_domain="T"), + beta=TensorInputType(const=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + return np.minimum(np.maximum(self.x.val, self.alpha.val), self.beta.val) + + +@register_op +class cos(elementwise_unary): + """ + Return cosine of ``x`` element-wise. Input domain is ``(-inf, inf)`` and + output range is ``[-1,1]``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.cos(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class cosh(elementwise_unary): + """ + Return hyperbolic cosine of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.cosh(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class erf(elementwise_unary): + """ + Return the gauss error function of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + erf_vector_function = np.vectorize(math.erf) + return erf_vector_function(self.x.val) + + +@register_op +class exp(elementwise_unary): + """ + Return e^x, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.exp(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class exp2(elementwise_unary_with_int): + """ + Return 2^x, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.exp2(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class floor(elementwise_unary): + """ + Return the floor of the input ``x``, element-wise, the same as rounding + towards negative infinity. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.floor(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class inverse(Operation): + """ + Return the reciprocal value of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + epsilon: const T (Optional, default=1e-4) + * This is a small constant that is added to the input, before taking its + inverse, for stability. + * ``y = 1 / (x + epsilon)``. + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + epsilon=nptype_from_builtin(self.x.dtype)(1e-4), + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + return np.array(np.reciprocal(self.x.val + self.epsilon.val), copy=False) + + +@register_op +class log(Operation): + """ + Return the natural logarithm value of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + epsilon: const T (Optional, default=1e-45) + * This is a small constant that is added to the input, before taking log. + * ``y = log(x + epsilon)``. + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + epsilon=nptype_from_builtin(self.x.dtype)(1e-45) + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + return np.log(self.x.val + self.epsilon.val) + + +@register_op +class logical_not(Operation): + """ + Return the value of NOT the input ``x``, element-wise. (``1`` for true, ``0`` + for false in numeric domain.) A numeric value ``t`` is evaluated to true + ``iff t != 0``. + + Parameters + ---------- + x: tensor<[\*d], bool> (Required) + + Returns + ------- + tensor<[\*d], bool> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain=types.bool), + ) + + @precondition(allow=VALUE) + def value_inference(self): + return np.logical_not(self.x.val) + + def type_inference(self): + return self.x.sym_type + + +@register_op +class round(elementwise_unary): + """ + Return the round value of the input ``x`` to nearest integer, element-wise. + ``0.5`` is rounded to ``0``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.round(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class rsqrt(Operation): + """ + Return the reciprocal value of the square root of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + epsilon: const T (Optional, default=1e-12) + * This is a small constant that is added to the input, before applying the + ``rsqrt`` function, for stability. + * ``y = 1 / sqrt(x + epsilon)``. + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + epsilon=nptype_from_builtin(self.x.dtype)(1e-12), + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + result = 1.0 / np.sqrt(self.x.val + self.epsilon.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class sign(elementwise_unary_with_int): + """ + Return the sign value of the input ``x``, element-wise. + + All elements in the output will be either ``-1``. or ``1``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.sign(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class sin(elementwise_unary): + """ + Return the sine value of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.sin(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class sinh(elementwise_unary): + """ + Return the hyperbolic sine value of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.sinh(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class sqrt(elementwise_unary): + """ + Returns the square root value of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.sqrt(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class square(elementwise_unary_with_int): + """ + Return ``x^2``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return np.square(self.x.val) + + +@register_op +class tan(elementwise_unary): + """ + Return the tangent value of the input ``x``, element-wise. Both input and output + ranges are ``(-inf, inf)``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.tan(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class tanh(elementwise_unary): + """ + Return the hyperbolic tangent value of the input ``x``, element-wise. Both input + and output ranges are ``(-inf, inf)`` while output range is ``[-1, 1]``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.tanh(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class threshold(Operation): + """ + Set a lower bound ``alpha`` to the values in the input ``x``, element-wise. + Any values less than ``alpha`` are set to ``alpha``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + alpha: const T (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + alpha=TensorInputType(const=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + return np.maximum(self.x.val, self.alpha.val) + + +@register_op +class cast(Operation): + """ + Cast the input ``x`` to the new type ``dtype``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + dtype: const str (Required) + * Can be one of the following types: ``int32``, ``int64``, ``fp32``, ``fp64``. + + Returns + ------- + tensor<[\*d], dtype> + * A tensor of the same shape as ``x``, with type ``dtype``. + + Attributes + ---------- + T: i32, i64, fp16, fp32, fp64, bool. + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + dtype=TensorInputType(const=True, type_domain=types.str) + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.fp64, types.int32, types.int64, types.bool), + } + + def type_inference(self): + type_map = { + "int32": types.int32, + "int64": types.int32, + "fp16": types.fp16, + "fp32": types.fp32, + "fp64": types.fp32, + "bool": types.bool, + } + + if self.dtype.val not in type_map.keys(): + raise NotImplementedError( + "Parameter dtype of the cast operation can be one of the {}. " + "Provided {}".format(type_map.keys(), self.dtype.val) + ) + + if not types.is_tensor(self.x.sym_type): + return type_map[self.dtype.val] + + ret_shape = self.x.shape + return types.tensor(type_map[self.dtype.val], ret_shape) + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + return self.get_cast_value(self.x, self.dtype.val) + + @staticmethod + def get_cast_value(input_var, dtype_val): + type_map = { + "int32": np.int32, + "int64": np.int32, + "fp16": np.float16, + "fp32": np.float32, + "fp64": np.float32, + "bool": bool, + } + + if dtype_val not in type_map.keys(): + raise NotImplementedError( + "Parameter dtype of the cast operation can be one of the {}. " + "Provided {}".format(type_map.keys(), dtype_val) + ) + + if input_var.val is None: + if input_var.sym_val is not None and not is_symbolic(input_var.sym_val) and len(input_var.sym_val.shape) == 1: + result = [np.array(val).astype(dtype=type_map[dtype_val]).item() if not is_symbolic(val) else val for val in input_var.sym_val] + return np.array(result) + return None + + if not types.is_tensor(input_var.sym_type): + return input_var.val.astype(dtype=type_map[dtype_val]) + else: + return np.array(input_var.val).astype(dtype=type_map[dtype_val]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/image_resizing.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/image_resizing.py new file mode 100644 index 00000000..3186ead7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/image_resizing.py @@ -0,0 +1,899 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import (DefaultInputs, InputSpec, + Operation, TensorInputType, + get_new_symbol, types) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs.iOS15 import _IOS15_TARGET +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + + +@register_op +class upsample_nearest_neighbor(Operation): + """ + Upsample the spatial dimensions (last two dimensions) of the input + by integer scale factors using nearest-neighbor interpolation. + + Parameters + ---------- + x: tensor<[\*D, H1, W1],T> (Required) + * Must be at least rank ``3``. + scale_factor_height: const or const (Optional, default=1) + * Scale factor for the height dimension (``axis=-2``). + * Can be either an integer or fractional. + scale_factor_width: const or const (Optional, default=1) + * Scale factor for the width dimension (``axis=-1``). + * Can be either an integer or fractional. + + Returns + ------- + tensor<[\*D, H2, W2],T> + * Tensor with same type as the input. + * ``H2`` = floor(``H1`` * ``scale_factor_height``). + * ``W2`` = floor(``W1`` * ``scale_factor_width``). + + Attributes + ---------- + T: fp16, fp32 + U: fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + scale_factor_height=TensorInputType( + const=True, + optional=True, + type_domain="U" + ), + scale_factor_width=TensorInputType( + const=True, + optional=True, + type_domain="U" + ), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + "U": (types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + scale_factor_height=1, + scale_factor_width=1, + ) + + def type_inference(self): + if self.x.rank < 3: + raise ValueError( + 'input to the "upsample_nearest_neighbor" op must have rank at least 3' + ) + + ret_shape = list(self.x.shape) + ret_shape[-1] = np.floor(self.scale_factor_width.val * ret_shape[-1]) if not is_symbolic(ret_shape[-1]) else get_new_symbol() + ret_shape[-2] = np.floor(self.scale_factor_height.val * ret_shape[-2]) if not is_symbolic(ret_shape[-2]) else get_new_symbol() + return types.tensor(self.x.dtype, ret_shape) + + +@register_op +class resize_nearest_neighbor(Operation): + """ + Resize the spatial (last two) dimensions to the specified target size + using nearest neighbor interpolation. Although this op is similar to + ``upsample_nearest_neighbor``, ``resize_nearest_neighbor`` works with + a target size rather than with scale factors. + + Parameters + ---------- + x: tensor<[\*D, H1, W1], T> (Required) + * Must be at least rank ``3``. + target_size_height: const (Required) + * Target spatial size for the height dimension (``axis=-2``). + target_size_width: const (Required) + * Target spatial size for the width dimension (``axis=-1``). + + Notes + ----- + See ``resize_bilinear`` for examples. + + See Also + -------- + resize_bilinear + + Returns + ------- + tensor<[\*D, H2, W2], T> + * Tensor with same type as the input. + * ``H2`` = ``target_size_height``. + * ``W2`` = ``target_size_width``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + target_size_height=TensorInputType(const=True, type_domain=types.int32), + target_size_width=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + if self.x.rank < 3: + raise ValueError( + 'input to the "resize_nearest_neighbor" op must have rank at least 3' + ) + + ret_shape = list(self.x.shape) + ret_shape[-1] = int(self.target_size_width.val) + ret_shape[-2] = int(self.target_size_height.val) + return types.tensor(self.x.dtype, ret_shape) + + +@register_op +class upsample_bilinear(Operation): + """ + Upsample the spatial dimensions (last two dimensions) of the input + by scale factors using bilinear interpolation. + The upsample_bilinear operation in MIL corresponds to the recompute_scale_factor=True + mode in the pyorch bilinear interpolation op. That is, + the scale factor is recomputed by the output size. + Note that when the scale_factor_height and scale_factor_width are floating point, this + could result in a different scale factor due to rounding. + + Parameters + ---------- + x: tensor<[\*D, H1, W1], T> (Required) + * Must be at least rank ``3``. + scale_factor_height: const (Optional, default=1) + * Scale factor for the height dimension (``axis=-2``). + scale_factor_width: const (Optional, default=1) + * Scale factor for the width dimension (``axis=-1``). + align_corners: const (Optional, default=True) + * This parameter determines how samples are chosen for bilinear + interpolation. For details, see the Notes section. + + Notes + ----- + To understand the ``align_corners`` parameter, consider the 1-D case. + You need to sample a grid of pixels whose values are computed using linear + interpolation. This parameter controls how the grid is sampled. If the + input grid is ``[0, Xin-1]`` (corresponding to an input size of ``Xin``), + and if the output size is ``Xout``, then the grid points are sampled in + the following manner: + + .. sourcecode:: python + + # If align_corners == True: + spacing = (Xin - 1) / (Xout - 1) + grid_point[i] = min(Xin - 1, max(0, i*spacing)), for i=0,1,...,Xout-1 + + # If align_corners == False: + spacing = Xin / Xout + grid_point[i] = min(Xin - 1, max(0, i*spacing + 0.5*spacing - 0.5)), + ... for i=0,1,...,Xout-1 + + For example: + + .. sourcecode:: python + + Xin = 2 + input_interval = [0,1] + + Grid points: + + .. sourcecode:: python + + [0., 0.1, 0.5, 0.9, 1.] (Xout = 5, align_corners=False) + [0., 0.25, 0.5, 0.75, 1.] (Xout = 5, align_corners=True) + [0., 0., 0.33, 0.67, 1., 1.] (Xout = 6, align_corners=False) + [0., 0.2, 0.4, 0.6, 0.8, 1.] (Xout = 6, align_corners=True) + + Note the following similarities: + + * ``align_corners=False`` is the same as + ``tf.raw_ops.ResizeBilinear(align_corners=False, half_pixel_centers=True)``. + + * ``align_corners=True`` is the same as + ``tf.raw_ops.ResizeBilinear(align_corners=True, half_pixel_centers=False)``. + + Returns + ------- + tensor<[\*D, H2, W2], T> + * Tensor with same type as the input. + * ``H2`` = floor(``H1`` * ``scale_factor_height``). + * ``W2`` = floor(``W1`` * ``scale_factor_width``). + + Attributes + ---------- + T: fp16, fp32 + U : fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + scale_factor_height=TensorInputType( + const=True, + optional=True, + type_domain="U", + ), + scale_factor_width=TensorInputType( + const=True, + optional=True, + type_domain="U", + ), + align_corners=TensorInputType( + const=True, + optional=True, + type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + "U": (types.int32, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + scale_factor_height=1, + scale_factor_width=1, + align_corners=True, + ) + + def type_inference(self): + if self.x.rank < 3: + raise ValueError( + 'input to the "upsample_bilinear" op must have rank at least 3' + ) + + ret_shape = list(self.x.shape) + ret_shape[-1] = np.floor(self.scale_factor_width.val * ret_shape[-1]) if not is_symbolic(ret_shape[-1]) else get_new_symbol() + ret_shape[-2] = np.floor(self.scale_factor_height.val * ret_shape[-2]) if not is_symbolic(ret_shape[-2]) else get_new_symbol() + return types.tensor(self.x.dtype, ret_shape) + + +@register_op +class resize_bilinear(Operation): + """ + Resize the spatial (last two) dimensions to the specified target size + using bilinear interpolation. Although this op is similar to + ``upsample_bilinear``, ``resize_bilinear`` works with a target size + rather than with scale factors. + + Parameters + ---------- + x: tensor<[\*D, H1, W1],T> (Required) + * Must be at least rank ``3``. + target_size_height: const (Optional, default=1) + * Target spatial size for the height dimension (``axis=-2``). + target_size_width: const (Optional, default=1) + * Target spatial size for the width dimension (``axis=-1``). + sampling_mode: const (Optional, default="DEFAULT") + * This parameter can take ``"STRICT_ALIGN_CORNERS”``, ``"ALIGN_CORNERS"``, + ``"DEFAULT"``, ``"OFFSET_CORNERS"`` or ``UNALIGN_CORNERS`` as values. + For details, see the Notes section. + + Notes + ----- + To understand the ``sampling_mode`` parameter, consider the 1-D case. + You need to sample a grid of pixels whose values are computed using + linear interpolation. This parameter controls how the grid is sampled. + If the input grid is ``[0, Xin-1]`` (corresponding to an input size of + ``Xin``), and if the output size is ``Xout``, then the grid points are + sampled in the following manner: + + .. sourcecode:: python + + # "STRICT_ALIGN_CORNERS": + spacing = (Xin - 1) / (Xout - 1) + grid_point[i] = min(Xin-1, max(0, i*spacing)), for i=0,1,...,Xout-1 + + # "ALIGN_CORNERS": Same as "STRICT_ALIGN_CORNERS" unless Xout=1, + # in which case: + grid_point[0] = (Xin-1) / 2, if Xout==1 + + # "DEFAULT": + spacing = (Xin - Xin/Xout) / (Xout - 1) + grid_point[i] = min(Xin-1, max(0, i*spacing)), for i=0,1,...,Xout-1 + + # "OFFSET_CORNERS": + delta = max(1, Xin - 1) / Xout + spacing = ((Xout - 1) * delta) / (Xout - 1) + grid_point[i] = min(Xin-1, max(0, 0.5*delta + i*spacing)), for + ... i=0,1,...,Xout-1 + + # "UNALIGN_CORNERS": + spacing = Xin / Xout + grid_point[i] = min(Xin - 1, max(0, i*spacing + 0.5*spacing - 0.5)), for i=0,1,...,Xout-1 + + For example: + + .. sourcecode:: python + + Xin = 2 + input_interval = [0,1] + + Grid points: + + .. sourcecode:: python + + [0., 0.1, 0.5, 0.9, 1.] (Xout = 5, UNALIGN_CORNERS) + [0., 0.25, 0.5, 0.75, 1.] (Xout = 5, "STRICT_ALIGN_CORNERS" / "ALIGN_CORNERS") + [0., 0.4, 0.8, 1., 1.] (Xout = 5, "DEFAULT") + [0.1, 0.3, 0.5, 0.7, 0.9] (Xout = 5, "OFFSET_CORNERS") + + [0., 0., 0.33, 0.67, 1., 1.] (Xout = 6, UNALIGN_CORNERS) + [0., 0.2, 0.4, 0.6, 0.8, 1.] (Xout = 6, "STRICT_ALIGN_CORNERS" / "ALIGN_CORNERS") + [0., 0.33, 0.67, 1., 1., 1.] (Xout = 6, "DEFAULT") + [0.08, 0.25, 0.42, 0.58, 0.75, 0.92] (Xout = 6, "OFFSET_CORNERS") + + Note the following similarities: + + * ``"DEFAULT"`` is same as + ``tf.raw_ops.ResizeBilinear(align_corners=False, + half_pixel_centers=False)``. + * ``"STRICT_ALIGN_CORNERS"`` is same as + ``tf.raw_ops.ResizeBilinear(align_corners=True, + half_pixel_centers=False)``. + + Returns + ------- + tensor<[\*D, H2, W2],T> + * Tensor with same type as the input. + * ``H2`` = ``target_size_height``. + * ``W2`` = ``target_size_width``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + target_size_height=TensorInputType( + const=True, + optional=True, + type_domain=types.int32 + ), + target_size_width=TensorInputType( + const=True, + optional=True, + type_domain=types.int32 + ), + sampling_mode=TensorInputType( + const=True, + optional=True, + type_domain=types.str + ), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + target_size_height=1, + target_size_width=1, + sampling_mode="DEFAULT", + ) + + def type_inference(self): + if self.x.rank < 3: + raise ValueError( + 'input to the "resize_bilinear" op must have rank at least 3' + ) + + if self.sampling_mode.val not in { + "STRICT_ALIGN_CORNERS", + "ALIGN_CORNERS", + "UNALIGN_CORNERS", + "DEFAULT", + "OFFSET_CORNERS", + }: + raise ValueError( + '"resize_bilinear" op: unrecognized sampling mode "{}"'.format( + self.sampling_mode.val + ) + ) + + ret_shape = list(self.x.shape) + ret_shape[-1] = self.target_size_width.val + ret_shape[-2] = self.target_size_height.val + return types.tensor(self.x.dtype, ret_shape) + + +@register_op +class crop_resize(Operation): + """ + Resize the spatial dimensions (last two dimensions) of the first input + according to the bounding boxes specified in the second input, using + bilinear interpolation. + + Parameters + ---------- + + x: tensor<[B, C, H, W],T> (Required) + * The input, from which patches (regions of interest) are extracted + and resized using bilinear interpolation. + * Rank ``4``. + + roi: tensor<[N,1,4,1,1], T> or tensor<[N,1,5,1,1], T> (Required) + * Regions of interest, or coordinates of the boxes. The above input + represents coordinates of ``N`` boxes. + * The convention to express coordinates depends on the value of the + input ``box_coordinate_mode``. + * Rank ``5``. + * If ``tensor<[N,1,4,1,1], T>``: Resized images are computed for all + ``B`` input images. + * If ``tensor<[N,1,5,1,1], T>``: The first element from ``axis=-3`` + to be resized is an index. It must be within range ``[0, B)``. + + target_height: const (Optional, Default=1) + * Target height for resizing each patch. + + target_width: const (Optional, Default=1) + * Target width for resizing each patch. + + normalized_coordinates : const (Optional, default=False) + * If true, the bounding box coordinates must be in the + interval ``[0, 1]``. Scaling is based on the input spatial + dimensions: ``(H_in - 1)`` for height and ``(W_in - 1)`` for width. + * If false, the bounding box coordinates must be in the interval + ``[0, H_in - 1]`` for height dimensions and ``[0, W_in - 1]`` for + width dimensions. + + spatial_scale : const (Optional, default=1.0) + * Additional spatial scale that multiplies the bounding box coordinates. + You would use this to implement the RoI Align layer, which typically + uses unnormalized RoI coordinates along with a spatial scale that is + less than or equal to 1. + + box_coordinate_mode: const (Optional, default="CORNERS_HEIGHT_FIRST") + * Specifies the convention for specifying the four bounding box + coordinates for an image of size ``(Height, Width)``. The ``(0,0)`` + coordinate corresponds to the top-left corner of the image. + * This parameter can take one of four values: + + ``"CORNERS_HEIGHT_FIRST"``: ``[h_start, w_start, h_end, w_end]`` + + ``"CORNERS_WIDTH_FIRST"``: ``[w_start, h_start, w_end, h_end]`` + + ``"CENTER_SIZE_HEIGHT_FIRST"``: ``[h_center, w_center, box_height, box_width]`` + + ``"CENTER_SIZE_WIDTH_FIRST"``: ``[w_center, h_center, box_width, box_height]`` + + sampling_mode : const (Optional, default="DEFAULT") + * This parameter can take ``"STRICT_ALIGN_CORNERS"``, + ``"ALIGN_CORNERS"``, ``"DEFAULT"``, ``"OFFSET_CORNERS"`` or + ``UNALIGN_CORNERS`` as values. + * This same convention is used by the ``resize_bilinear`` op (see + that op for details). + + See Also + -------- + resize_bilinear + + Returns + ------- + tensor<[N, B, C, target_height, target_width],T> or tensor<[N, 1, C, target_height, target_width],T> + * Tensor with same type as the input. + * If ``roi : tensor<[N,1,4,1,1], T>``, the output is + ``tensor<[N, B, C, target_height, target_width],T>``. + Total crops = ``N*B``; that is, ``N`` crops for each input in the batch. + * If ``roi : tensor<[N,1,5,1,1], T>``, the output is + ``tensor<[N, 1, C, target_height, target_width],T>``. + Total crops = ``N``; that is, 1 crop for given input image index + in the batch. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + roi=TensorInputType(type_domain="T"), + target_height=TensorInputType(const=True, optional=True, type_domain=types.int32), + target_width=TensorInputType(const=True, optional=True, type_domain=types.int32), + normalized_coordinates=TensorInputType(const=True, optional=True, type_domain=types.bool), + spatial_scale=TensorInputType(const=True, optional=True, type_domain=types.fp32), + box_coordinate_mode=TensorInputType(const=True, optional=True, type_domain=types.str), + sampling_mode=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + target_height=1, + target_width=1, + normalized_coordinates=False, + spatial_scale=1., + box_coordinate_mode="CONRNERS_HEIGHT_FIRST", + sampling_mode="DEFAULT", + ) + + def type_inference(self): + if self.x.rank != 4: + raise ValueError( + 'input to the "crop_resize" op must be of rank 4. Provided {}'.format( + self.x.rank + ) + ) + + if self.roi.rank != 5: + raise ValueError( + 'ROI input to the "crop_resize" op must be of rank 5, provided {}'.format( + self.roi.rank + ) + ) + + if self.sampling_mode.val not in { + "STRICT_ALIGN_CORNERS", + "ALIGN_CORNERS", + "UNALIGN_CORNERS", + "DEFAULT", + "OFFSET_CORNERS", + }: + raise ValueError( + '"crop_resize" op: unrecognized sampling mode "{}"'.format( + self.sampling_mode + ) + ) + + # ret_shape: [N] + [B, C, h_out, w_out] + N, B, C = self.roi.shape[0], self.x.shape[0], self.x.shape[1] + ret_shape = [N, B, C, self.target_height.val, self.target_width.val] + return types.tensor(self.x.dtype, ret_shape) + + +@register_op +class crop(Operation): + """ + Crop the spatial dimensions (last two dimensions) of the input by the + specified amounts. + + Parameters + ---------- + x: tensor<[\*D, H1, W1],T> (Required) + * Must be at least rank ``3``. + crop_height: const<2, i32> (Required) + * Amount to be cropped from the top and bottom of the height dimension + (``axis=-2``). + crop_width: const<2, i32> (Required) + * Amount to be cropped from the left and right sides of the width dimension (``axis=-1``). + + Returns + ------- + tensor<[\*D, H2, W2],T> + * Tensor with same type as the input. + * ``H2`` = ``H1 - crop_height[0] - crop_height[1]``. + * ``W2`` = ``W1 - crop_width[0] - crop_width[1]``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + crop_height=TensorInputType(const=True, type_domain=types.int32), + crop_width=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + if self.x.rank < 3: + raise ValueError( + 'input to the "crop" op must at least be of rank 3. Provided {}'.format( + self.x.rank + ) + ) + + crop_height = self.crop_height.val + crop_width = self.crop_width.val + + if len(crop_height.flatten()) != 2: + raise ValueError( + "crop_height must have 2 elements. Provided {}".format( + len(crop_height.flatten()) + ) + ) + + if len(crop_width.flatten()) != 2: + raise ValueError( + "crop_width must have 2 elements. Provided {}".format( + len(crop_width.flatten()) + ) + ) + + input_shape = list(self.x.shape) + ret_shape = ( + input_shape[:-2] + + [input_shape[-2] - crop_height[0] - crop_height[1]] + + [input_shape[-1] - crop_width[0] - crop_width[1]] + ) + return types.tensor(self.x.dtype, ret_shape) + +@register_op(opset_version=_IOS15_TARGET) +class affine(Operation): + """ + Apply a linear affine transform to the input 2D image tensor. The value at the + ``(x, y)`` (i.e., ``(w, h)``) coordinate of the output is computed by first computing + the coordinates ``x’`` and ``y’`` with the following equation, and then computing the + value at the coordinate ``(x’,y’)`` in the input image using either bilinear or + nearest neighbor interpolation. If the ``(x’, y’)`` point falls outside the input + image, then padding information is used to compute the value. + + :: + + x’ = a0 * x + a1 * y + a2 + y’ = b0 * x + b1 * y + b2 + + + Parameters + ---------- + x: tensor<[B, C, H1, W1], T> + * Must be rank ``4``. + transform_matrix: tensor<[D, 6], T> + * Must be rank ``2``. + * ``D`` can be either ``B`` or 1. + * If ``D == B``, there is a separate transform matrix for each batch. + * If ``D == 1``, the same matrix is used for all input batches. + * For each batch: ``[a0, a1, a2, b0, b1, b2]``. + output_height: const + * Target output height + output_width: const + * Target output width + sampling_mode: const + * Allowed values: ``"bilinear"`` + padding_mode: const + * Allowed values: ``"constant"``. + * Note that the following example is 1D case for brevity. + The op supports only 2D image input. + * If ``padding_mode == "constant"``: + * The input image is assumed to be padded with the padding_value. + * For example, ``|1, 2, 3| -> |0, 0, 0, 1, 2, 3, 0, 0, 0|``. + padding_value: const + * Currently non-zero values are not supported. + * To be used only when ``padding_mode == "constant"``, ignored in other cases. + coordinates_mode: const + * Allowed values: ``"normalized_minus_one_to_one"`` + * If ``coordinates_mode == "normalized_minus_one_to_one"``, in-image values are ``[-1, 1]``. + * For example, if ``coordinates_mode == "normalized_minus_one_to_one"``, + the in range values are ``[-1, 1]``. That is: + * ``(-1, -1)``, i.e. ``(w=-1, h=-1)``, corresponds to the top-left pixel. + * ``(1, -1)``, i.e. ``(w=1, h=-1)``, corresponds to the top-right pixel. + * ``(-1, 1)``, i.e. ``(w=-1, h=1)``, corresponds to the bottom-left pixel. + * ``(1, 1)``, i.e. ``(w=1, h=1)``, corresponds to the bottom-right pixel. + align_corners: const + * Currently ``align_corners=False`` is not supported. + * To be used only when ``coordinates_mode != unnormalized``, ignored otherwise. + * if ``align_corners == True``, the extrema coordinates correspond + to the center of the first and last corner pixels. + * if ``align_corners == False``, the extrema coordinates correspond + to the edge of the first and last corner pixels. + + Returns + ------- + tensor<[B, C, output_height, output_width], T> + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + transform_matrix=TensorInputType(type_domain="T"), + output_height=TensorInputType(const=True, type_domain=types.int32), + output_width=TensorInputType(const=True, type_domain=types.int32), + sampling_mode=TensorInputType(const=True, type_domain=types.str), + padding_mode=TensorInputType(const=True, type_domain=types.str), + padding_value=TensorInputType(const=True, type_domain="T"), + coordinates_mode=TensorInputType(const=True, type_domain=types.str), + align_corners=TensorInputType(const=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + if self.x.rank != 4: + raise ValueError( + 'input "x" to the "affine" op must be a rank 4 tensor. ' + "Got rank {} tensor of shape {}".format( + self.x.rank, self.x.shape + ) + ) + if self.transform_matrix.rank != 2: + raise ValueError( + 'input "transform_matrix" to the "affine" op must be a rank 2 tensor. ' + "Got rank {} tensor of shape {}".format( + self.transform_matrix.rank, self.transform_matrix.shape + ) + ) + if self.sampling_mode.val.lower() != "bilinear": + raise NotImplementedError( + 'input "sampling_mode" to the "affine" not implemented. ' + 'Got "{}"'.format(self.sampling_mode.val) + ) + if self.coordinates_mode.val.lower() != "normalized_minus_one_to_one": + raise NotImplementedError( + 'input "coordinates_mode" to the "affine" not implemented. ' + 'Got "{}"'.format(self.coordinates_mode.val) + ) + if self.padding_mode.val.lower() != "constant" or self.padding_value.val != 0.0: + raise NotImplementedError( + 'input "padding_mode" to the "affine" not implemented. ' + 'Got "{}" with "padding_value={}"'.format( + self.padding_mode.val, self.padding_value.val + ) + ) + + input_shape = self.x.shape + transform_matrix_shape = self.transform_matrix.shape + if ( + not is_symbolic(transform_matrix_shape[-1]) + and transform_matrix_shape[-1] != 6 + ): + raise ValueError( + 'input "transform_matrix" to the "affine" op last dimension must be 6 ' + "[a0, a1, a2, b0, b1, b2], " + "Got {} for last dimension".format(transform_matrix_shape[-1]) + ) + + ret_shape = list(input_shape) + ret_shape[2] = self.output_height.val + ret_shape[3] = self.output_width.val + return types.tensor(self.x.dtype, tuple(ret_shape)) + + +@register_op(opset_version=_IOS15_TARGET) +class resample(Operation): + """ + Resample the input image tensor ``x`` at the ``coordinates``. + Resampling is required if the coordinates do not correspond to exact + pixels in the input image. The ``sampling_mode`` determines + the algorithm used for resampling and computing the values. + + Parameters + ---------- + x: tensor<[B, C, H1, W1], T> + * Must be rank ``4``. + coordinates: tensor<[B, H2, W2, 2], U> + * Must be rank ``4``. + * Coordinates are provided in the order ``(x, y)`` (i.e. ``(w, h)``). + * The value of each output location ``output[b, c, h, w]`` is calculated + by sampling from the input image ``x[b, c, :, :]``. + * The pixel at the ``(x, y)`` location corresponds to the length-2 + vector: ``coordinates[b, h, w, :]``. + * Coordinate (normalized or unnormalized) should be specified according + to ``coordinates_mode``. + sampling_mode: const + * Allowed values: ``"bilinear"`` , ``"nearest"`` + padding_mode: const + * Allowed values: ``"constant"``, ``"border"``, ``"reflection"``, ``"symmetric"`` + * Note that the following example is 1D case for brevity. + The op supports only 2D image input. + * If ``padding_mode == "constant"``: + * The input image is assumed to be padded with the ``padding_value``. + * For example: ``|1, 2, 3| -> |0, 0, 0, 1, 2, 3, 0, 0, 0|`` + * if ``padding_mode == "border"``: + * The input image is assumed to be padded with the values replicated + from the values at the edge. This is also referred to as the + "clamped" or "replication" mode, since the padded values are + clamped to the border values. + * For example: ``|1, 2, 3| -> |1, 1, 1, 1, 2, 3, 3, 3, 3|`` + * If ``padding_mode == "reflection"``: + * The border values are reflected, *not* including the values at the edge/border. + * For example: ``|1, 2, 3| -> |2, 3, 2, 1, 2, 3, 2, 1, 2|`` + * If ``padding_mode == "symmetric"``: + * Values are reflected, including the border/edge values. + * For example: ``|1, 2, 3| -> |3, 2, 1 , 1, 2, 3, 3, 2, 1|`` + padding_value: const + * To be used only when ``padding_mode == "constant"``, ignored in other cases. + coordinates_mode: const + * Allowed values: ``"unnormalized"``, ``"normalized_minus_one_to_one"``, + ``"normalized_zero_to_one"`` + * If ``coordinates_mode == "unnormalized"``, the coordinates input values + are interpreted to be in range ``[0, W - 1] / [0, H - 1]``, which + corresponds to the in-image point. + * If ``coordinates_mode == "normalized_minus_one_to_one"``, + the in-image values are ``[-1, 1]``. + * If ``coordinates_mode == "normalized_zero_to_one"``, + in-image values are ``[0, 1]``. + * For example, if ``coordinates_mode == "normalized_minus_one_to_one"``, + the in range values are [-1, 1]. That is: + * ``(-1, -1)``, i.e. ``(w=-1, h=-1)``, corresponds to the top-left pixel. + * ``(1, -1)``, i.e. ``(w=1, h=-1)``, corresponds to the top-right pixel. + * ``(-1, 1)``, i.e. ``(w=-1, h=1)``, corresponds to the bottom-left pixel. + * ``(1, 1)``, i.e. ``(w=1, h=1)``, corresponds to the bottom-right pixel. + align_corners: const + * If ``align_corners == True``, the extrema coordinates correspond + to the center of the first and last corner pixels. + * If ``align_corners == False``, the extrema coordinates correspond + to the edge of the first and last corner pixels. + + Returns + ------- + tensor<[B, C, H2, W2], T> + + Attributes + ---------- + T: fp16, fp32 + U: fp32, int32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + coordinates=TensorInputType(type_domain="U"), + sampling_mode=TensorInputType(const=True, type_domain=types.str), + padding_mode=TensorInputType(const=True, type_domain=types.str), + padding_value=TensorInputType(const=True, type_domain="T"), + coordinates_mode=TensorInputType(const=True, type_domain=types.str), + align_corners=TensorInputType(const=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + "U": (types.int32, types.fp32), + } + + def type_inference(self): + if self.x.rank != 4: + raise ValueError( + 'input "x" to the "resample" op must be a rank 4 tensor. ' + "Got rank {} tensor of shape {}".format( + self.x.rank, self.x.shape + ) + ) + if self.coordinates.rank != 4: + raise ValueError( + 'input "coordinates" to the "resample" op must be a rank 4 tensor. ' + "Got rank {} tensor of shape {}".format( + self.coordinates.rank, self.coordinates.shape + ) + ) + + input_shape = self.x.shape + coord_shape = self.coordinates.shape + if ( + not is_symbolic(input_shape[0]) + and not is_symbolic(coord_shape[0]) + and input_shape[0] != coord_shape[0] + ): + raise ValueError( + 'input "x" and "coordinates" to the "resample" must agree on ' + "dimension of batch size: {} vs. {}".format( + input_shape[0], coord_shape[0] + ) + ) + if not is_symbolic(coord_shape[-1]) and coord_shape[-1] != 2: + raise ValueError( + 'input "coordinates" to the "resample" op last dimension must be 2. ' + "Got {} for last dimension".format( + coord_shape[-1] + ) + ) + + ret_shape = list(input_shape) + ret_shape[2] = coord_shape[1] # Output height + ret_shape[3] = coord_shape[2] # Output width + return types.tensor(self.x.dtype, tuple(ret_shape)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/linear.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/linear.py new file mode 100644 index 00000000..87479ab8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/linear.py @@ -0,0 +1,343 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import ( + DefaultInputs, + InputSpec, + Operation, + TensorInputType, + TupleInputType, + precondition, + types, +) +from coremltools.converters.mil.mil.operation import VALUE +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import broadcast_shapes, parse_einsum_equation +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + + +@register_op +class linear(Operation): + """ + Perform ``x * weight.T + bias`` where ``weight`` and ``bias`` are constant at + compile time. + + Parameters + ---------- + x: tensor<[\*D,D_in], T> (Required) + * ``1 <= rank <= 3``. + * ``0 <= rank(*D) <= 2``. + weight: const tensor<[D_out,D_in], T> (Required) + bias: const tensor<[D_out],T> (Optional) + * Default to ``0``. + + Returns + ------- + tensor<[\*D,D_out], T> + * Same rank as the input ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + weight=TensorInputType(const=True, type_domain="T"), + bias=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + Dout = self.weight.shape[0] + return DefaultInputs( + bias=[0.]*Dout, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + weight_shape = self.weight.shape + assert len(weight_shape) == 2 + if not ( + x_shape[-1] == weight_shape[-1] + or is_symbolic(x_shape[-1]) + or is_symbolic(weight_shape[-1]) + ): + msg = "Op '{}' (linear op): Size of the last dimension of x, which is {}, " \ + "does not match the last dimension of weights, which is {}" + raise ValueError(msg.format(self.name, x_shape[-1], weight_shape[-1])) + if self.bias is not None: + assert len(self.bias.shape) == 1 + if len(self.bias.val) != weight_shape[-2]: + msg = "Op '{}' (linear op): Size of the bias, which is {}, " \ + "does not match the first dimension of weights, which is {}" + raise ValueError(msg.format(self.name, len(self.bias.val), weight_shape[-2])) + shape = list(x_shape) + shape[-1] = weight_shape[0] + return types.tensor(x_type, tuple(shape)) + + @precondition(allow=VALUE) + def value_inference(self): + res = np.matmul(self.x.val, np.transpose(self.weight.val)) + if self.bias is not None: + res += self.bias.val + return res + + +@register_op +class matmul(Operation): + """ + Perform N-D batch matrix multiplication with NumPy-style broadcasting + based on the following rules: + + Rule 1. If both ``x, y`` are 1-D, return the scalar from the dot product. + + Rule 2. If both ``x, y`` are 2-D or higher, perform a broadcast on the batch dimensions + (all dimensions except the last ``2``). + + For example: + + * ``x.shape == (10, 4, 3)`` + * ``y.shape == (5, 10, 3, 2)`` + * ``matmul(x, y).shape == (5, 10, 4, 2)`` + + Conventional matrix multiplication is a special case where both ``x, y`` are + exactly 2-D. For example: + + * ``x.shape == (4, 3)`` + * ``y.shape == (3, 2)`` + * ``matmul(x, y).shape == (4, 2)`` + + If ``x`` is 1-D, and ``y`` is N-D where ``N >= 2``, ``x`` is first promoted to + matrix ``xm`` by prepending a ``1`` to its dimension, and the resulting ``xm`` is + broadcast to ``y`` following Rule 2 above. After this, remove the inserted dimension. + For example: + + * ``x.shape == (4)`` + * ``y.shape == (10, 4, 3)`` + * ``xm.shape == (1, 4)`` + * ``matmul(xm, y).shape == (10, 1, 3)`` + * Removing the inserted dimension results in ``matmul(x, y).shape == (10, 3)``. + * Note: ``xm`` and ``matmul(xm, y)`` are for illustration only. + + If ``x`` is N-D where ``N >= 2``, and ``y`` is 1-D, ``y`` is first promoted to + matrix ``ym`` by appending a ``1`` to its dimension, and the resulting ``ym`` is + broadcast to ``x`` following Rule 2 above. After this, remove the inserted dimension. + For example: + + * ``x.shape == (10, 3, 4)`` + * ``y.shape == (4,)`` + * ``ym.shape == (4, 1)`` + * ``matmul(x, ym).shape == (10, 3, 1)`` + * Removing the inserted dimension results in ``matmul(x, y).shape == (10, 3)``. + * Note: ``xm`` and ``matmul(xm, y)`` are for illustration only. + + Parameters + ---------- + x: tensor<[\*,K1], T> (Required) + * ``x`` must be 1-D or higher. + y: tensor<[\*,K2], T> (Required) + * ``y`` must be 1-D or higher. + transpose_x: const bool (Optional) + * Default to ``False``. + * Use ``True`` to transpose the last two dimensions of ``x`` before multiplication. + It has no effect when ``x`` is 1-D. + transpose_y: const bool (Optional) + * Default to ``False``. + * Use ``True`` to transpose the last two dimensions of ``y`` before multiplication. + It has no effect when ``y`` is 1-D. + + Returns + ------- + tensor<\*, T> + * Scalar or tensor output. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + y=TensorInputType(type_domain="T"), + transpose_x=TensorInputType(const=True, optional=True, type_domain=types.bool), + transpose_y=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + transpose_x=False, + transpose_y=False, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = list(self.x.shape) + y_shape = list(self.y.shape) + x_rank = len(x_shape) + + if x_rank == 1 and self.transpose_x.val: + msg = "Op {} (matmul): x is rank 1, but transpose_x is True, which is not allowed." + raise ValueError(msg.format(self.name)) + + if self.transpose_x.val: + x_shape = list(x_shape) + x_shape[-1], x_shape[-2] = x_shape[-2], x_shape[-1] + x_shape = tuple(x_shape) + if self.transpose_y.val: + y_shape = list(y_shape) + y_shape[-1], y_shape[-2] = y_shape[-2], y_shape[-1] + y_shape = tuple(y_shape) + if not ( + x_shape[-1] == y_shape[-2] + or is_symbolic(x_shape[-1]) + or is_symbolic(y_shape[-2]) + ): + msg = "Op {} (matmul): x {}, y {} are not broadcastable" + raise ValueError(msg.format(self.name, self.x.shape, self.y.shape)) + + if x_rank == 1: + # promote shape of x to rank 2 + x_shape = list((1,) + tuple(x_shape)) + ret_shape = list(broadcast_shapes(x_shape[:-2], y_shape[:-2])) + ret_shape += [x_shape[-2], y_shape[-1]] + if x_rank == 1: + # remove the first dimension of the returned shape + return types.tensor(x_type, tuple(ret_shape[1:])) + else: + return types.tensor(x_type, tuple(ret_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + x = self.x.val + if self.transpose_x.val: + x = np.transpose(x) + y = self.y.val + if self.transpose_y.val: + y = np.transpose(y) + return np.matmul(x, y) + + +@register_op +class einsum(Operation): + """ + Perform tensor multiplication expressed according to the einsum notation. + The mode/equation that is currently supported is mutiplying matrices that are laid out on + dimensions -1 and -3, treating all the other dimensions as batch. Broadcasting is supported along batch dimensions. + In particular, the inputs must be of the following shapes: + + * Rank 4 input case: + * Input 1: ``[B, C, H, W1]``. + * Input 2: ``[B, W1, H, W2]``. + * Output: ``[B, C, H, W2]``. + * If, for one of the inputs, the dimensions ``"B"`` or ``"H"`` is 1, they are broadcast to match the other input. + + * Rank 3 input case: + * Input 1: ``[C, H, W1]``. + * Input 2: ``[W1, H, W2]``. + * Output: ``[C, H, W2]``. + * If, for one of the inputs, the dimension ``"H"`` is 1, it is broadcast to match the other input. + + Parameters + ---------- + values : Tuple(tensor_1, tensor_2) + * Where: + * ``tensor_1``: ``tensor<[*D, C, H, W1], T>``. + * Must be of rank 3 or 4. + * ``tensor_2``: ``tensor<[*D, W1, H, W2], T>``. + * Must be of rank 3 or 4. + equation: const + * Supported equations are: + * ``"nchw,nwhu->nchu"`` and its equivalent equation strings. + * ``"chw,whr->chr"`` and its equivalent equation strings. + + Returns + ------- + tensor<[\*D, C, H, W2], T> + * Same ranks as the inputs. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + values=TupleInputType(), + equation=TensorInputType(const=True, type_domain=types.str) + ) + + def type_inference(self): + if len(self.values) != 2: + raise ValueError("einsum op must get \'values\' of length 2") + x = self.values[0] + y = self.values[1] + + # validate the input shapes + x_type = x.dtype + assert x_type == y.dtype, "input types do not match" + x_shape = x.shape + y_shape = y.shape + assert len(x_shape) == len(y_shape), "inputs not of the same rank" + assert x_shape[-1] == y_shape[-3], "input shapes incompatible" + if x_shape[-2] != 1 and y_shape[-2] != 1: + assert x_shape[-2] == y_shape[-2], "input shapes incompatible" + if len(x_shape) == 4: + if x_shape[-4] != 1 and y_shape[-4] != 1: + assert x_shape[-4] == y_shape[-4], "input shapes incompatible" + + # validate the equation + input1_vec, input2_vec, output_vec = parse_einsum_equation(self.equation.val) + + assert \ + (input1_vec == [0, 1, 2, 3] and input2_vec == [0, 3, 2, 4] and output_vec == [0, 1, 2, 4]) or \ + (input1_vec == [0, 1, 2] and input2_vec == [2, 1, 3] and output_vec == [0, 1, 3]), \ + "unsupported einsum equation {}".format(self.equation.val) + + # calculate the output shape + def _get_dim_value(shape1, shape2, dim): + if is_symbolic(shape1[dim]) and is_symbolic(shape2[dim]): + return shape1[dim] + elif is_symbolic(shape1[dim]): + return shape1[dim] + elif is_symbolic(shape2[dim]): + return shape2[dim] + else: + return max(shape1[dim], shape2[dim]) + + out_shape = [1 for i in range(len(x_shape))] + out_shape[-1] = y_shape[-1] + out_shape[-3] = x_shape[-3] + out_shape[-2] = _get_dim_value(x_shape, y_shape, -2) + if len(x_shape) == 4: + out_shape[-4] = _get_dim_value(x_shape, y_shape, -4) + return types.tensor(x_type, tuple(out_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + x = self.values[0] + y = self.values[1] + x_shape = x.val.shape + y_shape = y.val.shape + # broadcast dimensions -2 and -4, if required + if len(x_shape) == 4: + x_shape = (max(x_shape[0], y_shape[0]), x_shape[1], max(x_shape[2], y_shape[2]), x_shape[3]) + y_shape = (max(x_shape[0], y_shape[0]), y_shape[1], max(x_shape[2], y_shape[2]), y_shape[3]) + elif len(x_shape) == 3: + x_shape = (x_shape[0], max(x_shape[1], y_shape[1]), x_shape[2]) + y_shape = (y_shape[0], max(x_shape[1], y_shape[1]), y_shape[2]) + else: + raise ValueError("ranks of the input must be 3 or 4") + res = np.einsum(self.equation.val, + np.broadcast_to(x.val, x_shape), + np.broadcast_to(y.val, y_shape)) + return res diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/normalization.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/normalization.py new file mode 100644 index 00000000..8f21d5f4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/normalization.py @@ -0,0 +1,381 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import numpy as np + +from coremltools.converters.mil.mil import (DefaultInputs, InputSpec, + Operation, TensorInputType, + precondition, types) +from coremltools.converters.mil.mil.operation import VALUE +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +@register_op +class batch_norm(Operation): + """ + Normalize input tensor ``x`` by ``mean`` and ``variance``, and optionally apply a + scale ``gamma`` and an offset ``beta``: + + .. math:: + y_i = \\gamma_i \\dfrac{ (x_i - mean_i)}{\\sqrt{variance_i + epsilon}} + beta_i \\;,\\;i=1,....,C + + The ``mean``, ``variance``, ``gamma``, and ``beta`` + must be 1-D tensors whose lengths are equal to the second axis (the "depth" + or "channel" dimension) of ``x``. + + Parameters + ---------- + x: tensor<[n,C,*D], T> (Required) + * ``3 <= rank <= 5``. + * ``*D`` refers to the spatial dimensions, ``1 <= rank(*D) <= 3``. + * ``n`` is the batch dimension. + mean: const tensor<[C], T> (Required) + variance: const tensor<[C], T> (Required) + gamma: const tensor<[C], T> (Optional) + * Optional scale applied to normalized tensor. + * Default is all ones. + beta: const tensor<[C], T> (Optional) + * Optional offset applied to normalized tensor. + * Default is all zeros. + epsilon: const T (Optional) + * Default is ``1e-5``. + + Returns + ------- + tensor<[n,C,*D], T> + * Output tensor has the same shape and type as the input ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + mean=TensorInputType(const=True, type_domain="T"), + variance=TensorInputType(const=True, type_domain="T"), + gamma=TensorInputType(const=True, optional=True, type_domain="T"), + beta=TensorInputType(const=True, optional=True, type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + gamma=None, + beta=None, + epsilon=1e-5, + ) + + def type_inference(self): + x_shape = self.x.shape + return types.tensor(self.x.dtype, tuple(x_shape)) + + +@register_op +class instance_norm(Operation): + """ + Apply instance normalization to the n-dimensional input tensor. + + Parameters + ---------- + x: tensor<[n,C,*D], T> (Required) + * ``3 <= rank(x) <= 4``. + * ``*D`` refers to the spatial dimensions, ``1 <= rank(*D) <= 2``. + * ``n`` is the batch dimension. + gamma: const tensor<[C], T> (Optional) + * Optional scale applied to normalized tensor. + * Default to all ones. + beta: const tensor<[C], T> (Optional) + * Optional offset applied to normalized tensor. + * Default to all zeros. + epsilon: const f32 (Optional) + * Default to ``1e-5``. + + Returns + ------- + tensor<[n,C,*D], T> + * Output tensor has the same shape and type as the input ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + gamma=TensorInputType(const=True, optional=True, type_domain="T"), + beta=TensorInputType(const=True, optional=True, type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + gamma=None, + beta=None, + epsilon=1e-5, + ) + + def type_inference(self): + x_shape = self.x.shape + return types.tensor(self.x.dtype, tuple(x_shape)) + + +@register_op +class l2_norm(Operation): + """ + Apply L2 normalization to the n-dimensional input tensor. That is, divide the input + tensor by the square root of the sum of squares of all elements of the input. + + .. math:: + x_i \\leftarrow \\dfrac{x_i}{\\sqrt{\\sum{x_i^2} + \\epsilon}} + + + Parameters + ---------- + x: tensor<[\*B, \*D], T> (Required) + * Input tensor, ``rank(x) >= 3``. + * ``*B`` refers to the leading dimensions. + * ``*D`` refers to the spatial dimensions to be normalized. Must be rank 3: ``rank(*D) == 3``. + * When ``rank(x) == 3``, in which ``rank(*B) == 0 and rank(*D) == 3``, the input is divided by + the square root of the sum of squares of all elements. + * For ranks greater than 3, in which ``rank(*B) >= 1 and rank(*D) == 3``, + the leading dimensions \*B, starting from ``0`` to ``-4`` (inclusive), + are all treated as batch. The L2 normalization are done batch-wise. + epsilon: const T (Optional) + * Small constant to avoid division by ``0``. + * Optional, defaults to ``1e-6``. + + Returns + ------- + tensor<[\*B, \*D], T> + * Same type and shape as the input tensor ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + epsilon=1e-6, + ) + + def type_inference(self): + if self.x.rank < 3: + msg = "Input rank of l2_norm must be at least 3. Got {}".format(self.x.rank) + raise ValueError(msg) + x_shape = self.x.shape + return types.tensor(self.x.dtype, tuple(x_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + val = self.x.val + eps = self.epsilon.val + shape = self.x.shape + rank = self.x.rank + batch_dims = rank - 3 + if batch_dims == 0: + square_sum = np.sum(val**2) + output = val/np.power(square_sum + eps, 0.5) + else: + batch_dim_prod = np.prod(shape[:batch_dims]) + reshape_val = np.reshape(val, (batch_dim_prod, -1)) + square_sum = np.sum(reshape_val * reshape_val, axis=1, keepdims=True) + eps + output = reshape_val/np.power(square_sum, 0.5) + output = np.reshape(output, shape) + return output + +@register_op +class layer_norm(Operation): + """ + Apply layer normalization to the n-dimensional input tensor: + + .. math:: + out = gamma * (input - E[x]) / sqrt(Var[x] + epsilon) + beta + + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + + axes: const<[K], i32> (Optional) + * Dimensions to perform layer normalization. + * Default is ``None`` (all dimensions). + + gamma: const tensor<\*?, T>, T> (Optional) + * if provided, the shape must be be ``x.shape[axes]``. For instance, if + input ``x`` with shape ``(3,4,5,6)`` and ``axes = [2,3]``, gamma must have + shape ``(5,6)``. + * Default is all ones. + + beta: const tensor<\*?, T>, T> (Optional) + * Same shape as gamma. + * Default is all zeros. + + epsilon: const T (Optional) + * Small constant to avoid division by ``0``. + * Default is ``1e-5``. + + + Returns + ------- + tensor<\*?, T>: + * Tensor with same shape and type as the input tensor ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axes=TensorInputType(const=True, optional=True, type_domain=types.int32), + gamma=TensorInputType(const=True, optional=True, type_domain="T"), + beta=TensorInputType(const=True, optional=True, type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + axes=range(self.x.rank), + gamma=None, + beta=None, + epsilon=1e-5, + ) + + @staticmethod + def _is_compatible_shape(shapea, shapeb): + if not len(shapea) == len(shapeb): + return False + for a, b in zip(shapea, shapeb): + if any_symbolic([a, b]): + continue + if a != b: + return False + return True + + def type_inference(self): + rank = self.x.rank + + # check valid axes + positive_axes = [axis + rank if axis < 0 else axis for axis in self.axes.val] + if not all([axis >= 0 and axis < rank for axis in positive_axes]): + raise ValueError("axes must in the range of [-x.rank, x.rank-1].") + + # check shape of gamma and beta + normalized_shape = [self.x.shape[i] for i in range(rank) if i in positive_axes] + if self.gamma is not None and not layer_norm._is_compatible_shape(list(self.gamma.shape), normalized_shape): + raise ValueError("Expect shape {} for gamma, but get shape {} instead".format(normalized_shape, self.gamma.shape)) + + if self.beta is not None and not layer_norm._is_compatible_shape(list(self.gamma.shape), normalized_shape): + raise ValueError("Expect shape {} for beta, but get shape {} instead".format(normalized_shape, self.beta.shape)) + + x_shape = self.x.shape + return types.tensor(self.x.dtype, tuple(x_shape)) + + + @precondition(allow=VALUE) + def value_inference(self): + def np_layer_norm(x, axes, gamma, beta, epsilon=1e-5): + rank = len(x.shape) + axes = [axis + rank if axis < 0 else axis for axis in axes] + normalized_shape = [x.shape[i] if i in axes else 1 for i in range(rank)] + gamma = np.ones(shape=normalized_shape) if gamma is None else np.reshape(gamma, normalized_shape) + beta = np.zeros(shape=normalized_shape) if beta is None else np.reshape(beta, normalized_shape) + num = x - np.mean(x, axis=tuple(axes), keepdims=True) + dem = np.sqrt( + np.sum(np.square(num), axis=tuple(axes), keepdims=True) + / np.prod(normalized_shape) + + epsilon + ) + return num / dem * gamma + beta + + _axes = self.x.shape if self.axes is None else self.axes.val + _gamma = None if self.gamma is None else self.gamma.val + _beta = None if self.beta is None else self.beta.val + return np_layer_norm(self.x.val, _axes, _gamma, _beta, self.epsilon.val) + + +@register_op +class local_response_norm(Operation): + """ + Apply local response normalization to the n-dimensional input tensor: + + .. math:: + x_i \\leftarrow \\dfrac{x_i}{\\left ( k + \\dfrac{\\alpha}{\\text{size}} \\sum_j x_j^2 \\right )^\\beta} + + + Parameters + ---------- + x: tensor<[n,C,*D], T> (Required) + * Input tensor, ``3 <= rank(x) <= 4``. + * ``*D`` refers to the spatial dimensions, ``1 <= rank(*D) <= 2``. + * ``n`` is the batch dimension. + size: const i32 (Required) + * Amount of neighboring channels to normalize. + alpha: const T (Optional) + * Scale factor. + * Default is ``1e-4``. + beta: const T (Optional) + * An exponent. + * Default is ``0.75``. + k: const T (Optional) + * Additive factor. + * Default is ``1.0``. + + Returns + ------- + tensor<[n,C,*D], T> + * Same type and shape as the input tensor ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + size=TensorInputType(const=True, type_domain=types.int32), + alpha=TensorInputType(const=True, optional=True, type_domain="T"), + beta=TensorInputType(const=True, optional=True, type_domain="T"), + k=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + alpha=1e-4, + beta=0.75, + k=1., + ) + + def type_inference(self): + x_shape = self.x.shape + return types.tensor(self.x.dtype, tuple(x_shape)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/pool.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/pool.py new file mode 100644 index 00000000..b1d25fb2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/pool.py @@ -0,0 +1,263 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.block import curr_opset_version +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import \ + spatial_dimensions_out_shape +from coremltools.converters.mil.mil.ops.defs.iOS15 import _IOS15_TARGET + + +class Pooling(Operation): + """ + Pooling Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + kernel_sizes=TensorInputType(const=True, type_domain=types.int32), + strides=TensorInputType(const=True, optional=True, type_domain=types.int32), + pad_type=TensorInputType(const=True, type_domain=types.str), + pad=TensorInputType(const=True, optional=True, type_domain=types.int32), + ceil_mode=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + num_spatial_dims = self.x.rank - 2 + return DefaultInputs( + strides=[1] * num_spatial_dims, + pad=[0] * 2 * num_spatial_dims, + ceil_mode=False, + ) + + def type_inference(self): + ksize = self.kernel_sizes.val + x_shape = self.x.shape + D_in_rank = len(x_shape) - 2 + + strides = [1] * D_in_rank if self.strides is None else self.strides.val + pad_type = "valid" if self.pad_type is None else self.pad_type.val.lower() + if pad_type not in ["valid", "same", "custom", "same_lower"]: + raise ValueError("Unrecognized value of pad_type : {}".format(pad_type)) + pad = None if self.pad is None else self.pad.val + D_in = x_shape[2:] # spatial dimensions + + if self.ceil_mode.val: + if D_in_rank > 2: + raise ValueError('pool: ceil_mode only supported for 1D or 2D pool') + if pad_type == "same" and self.ceil_mode.val: + raise ValueError("ceil_mode must be False when pad_type==same") + if pad is not None: + for i in range(D_in_rank): + if pad[2 * i] != pad[2 * i + 1]: + raise ValueError("Padding must be symmetric if ceil_mode is True") + + # The same_lower padding is not supported in iOS15 + if curr_opset_version() == _IOS15_TARGET and self.pad_type.val == "same_lower": + msg = "iOS15 version of pooling layers do not support pad_type = `same_lower`" + raise ValueError(msg) + + D_out_shape = spatial_dimensions_out_shape( + pad_type=pad_type, + input_shape=D_in, + kernel_shape=ksize, + strides=strides, + custom_pad=pad, + ceil_mode=self.ceil_mode.val, + ) + ret_shape = list(x_shape[:2]) + D_out_shape + return types.tensor(self.x.dtype, tuple(ret_shape)) + + +@register_op +class avg_pool(Pooling): + """ + Perform average pooling. Supports 1-D, 2-D, and 3-D pool (1, 2, or 3 spatial dimensions). + + Parameters + ---------- + x: tensor<[n,C_in,\*D_in], T> (Required) + * ``3 <= rank <= 5``. + * ``D_in`` are spatial dimensions, ``1 <= len(D_in) <= 3``. + * ``C_in`` is the number of input channels or depth dimensions. + * ``n`` is the batch dimension. + + kernel_sizes: const tensor<[K], T> (Required) + * The size of the window for each spatial dimension ``D_in`` of the + input tensor. + * ``K == len(D_in)`` + + strides: const tensor<[S],i32> (Optional, default to all 1s) + * Stride along each of the spatial dimensions. + * ``S == len(D_in)``. + + pad_type: const str (Required) + Must be one of ``valid``, ``same``, ``custom`` or ``same_lower``. + + * ``valid``: No padding. This is equivalent to custom pad with ``pad[i] = 0, for + all i``. + * ``same`` : This is equivalent to custom pad with ``pad[2*i] + pad[2*i+1] = kernel_size[i]``. + * ``custom``: Specify custom padding in the parameter pad. note that ``same`` + padding is equivalent to custom padding with + ``pad[2*i] + pad[2*i+1] = kernel_size[i]``. + * ``same_lower``: Similar to ``same`` but the padding + will place extra rows/cols on the top/left if the padding amount is odd. + + pad: const<[P],i32> (Optional. Default to all 0s) + * ``pad`` represents the number of elements to pad before and after each + dimension: ``pad[2*i], pad[2*i+1]`` are the pad size before and after spatial + dimension ``i``. + * ``P = 2 * len(D_in)``. + * ``pad`` should be specified if and only if ``pad_type == custom`` + + exclude_padding_from_average: const tensor<[], bool> (Optional, default to False) + * If ``True``, padded values (0s) are excluded from the denominator count + when computing the average over the kernel window. + + ceil_mode: const + * Same as PyTorch's ``ceil`` mode. + * ``ceil`` is used instead of floor in calculating the output size. + * Optional, defaults to ``False``. + * Only applicable when ``pad_type`` is ``valid`` or ``custom``. + * When ``ceil_mode`` is True, padding must be symmetric; that is, if specified, + ``pad[2*i] == pad[2*i+1]`` must hold. + + Returns + ------- + tensor<[n, C_out,\*D_out], T> + * Same rank as ``x``. + * When ``ceil_mode = False``: + * ``D_out[i] = floor[(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_sizes[i]) / + strides[i]] +1, for i = 0, .., len(D_in) - 1`` is mathematically the same + as (when all parameters involved are integers): + + * ``D_out[i] = ceil [(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_size[i] - 1) / stride[i]], for i = 0, .., len(D_in) - 1``. + * ``*D_out`` is all ones if ``global_pooling`` is ``true``. + + * When ``ceil_mode = True``: + * ``D_out[i] = ceil[(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_sizes[i]) / strides[i]] +1, for i = 0, .., len(D_in) - 1`` + + * If ``(D_out[i] - 1) * strides[i] >= D_in[i] + pad[2*i] and (pad[2*i] + pad[2*i+1] > 0)`` + then ``D_out[i] = D_out[i] - 1``. + + * The first equation is same as: + + * ``D_out[i] = floor[(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_sizes[i] + strides[i] - 1) / strides[i]] +1, for i = 0, .., len(D_in) - 1`` + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + l2_pool, max_pool + """ + + input_spec = ( + InputSpec( + exclude_padding_from_average=TensorInputType( + const=True, optional=True, type_domain=types.bool + ) + ) + + Pooling.input_spec + ) + + def default_inputs(self): + return super().default_inputs() + DefaultInputs( + exclude_padding_from_average=False, + ) + + +@register_op +class l2_pool(Pooling): + """ + Perform L2 pooling. Supports 1-D and 2-D pool. + + Parameters + ---------- + x: tensor<[n,C_in,*D_in], T> (Required) + * Only support 1d and 2d pooling. + * See ``avg_pool``. + + kernel_sizes: const tensor<[K], T> (Required) + * See ``avg_pool``. + + strides: const tensor<[S],i32> (Optional, default to all 1s) + * See ``avg_pool``. + + pad_type: const str (Required) + * See ``avg_pool``. + + pad: const<[P],i32> (Optional, default to all 0s) + * See ``avg_pool``. + + Returns + ------- + tensor<[n, C_out,*D_out], T> + * See ``avg_pool``. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + avg_pool, max_pool + """ + + def type_inference(self): + if self.x.rank - 2 > 2: + msg = "l2_pool only supports rank 1 or 2. Got rank: {}".format(self.x.rank - 2) + raise ValueError(msg) + return super().type_inference() + + +@register_op +class max_pool(Pooling): + """ + Perform max pooling. Supports 1-D, 2-D, and 3-D pool. + + Parameters + ---------- + x: tensor<[n,C_in,*D_in], T> (Required) + * See ``avg_pool``. + + kernel_sizes: const tensor<[K], T> (Required) + * See ``avg_pool``. + + strides: const tensor<[S],i32> (Optional, default to all 1s) + * See ``avg_pool``. + + pad_type: const str (Required) + * See ``avg_pool``. + + pad: const<[P],i32> (Optional, default to all 0s) + * See ``avg_pool``. + + ceil_mode: const + * see ``avg_pool``. + + Returns + ------- + tensor<[n, C_out,*D_out], T> + * See ``avg_pool``. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + avg_pool, l2_pool + """ + + pass diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/random.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/random.py new file mode 100644 index 00000000..f6663cf4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/random.py @@ -0,0 +1,294 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import (get_new_symbol, + get_new_variadic_symbol, types) +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import Operation +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +class RandomDistribution(Operation): + """ + Random Op Superclass + """ + input_spec = InputSpec( + shape=TensorInputType(type_domain=types.int32), + ) + out_dtype = types.fp32 + + def type_inference(self): + if any_symbolic(self.shape.shape): + # We can't infer any shape if shape has variable length. + return types.tensor(self.out_dtype, (get_new_variadic_symbol(),)) + + # shape has fixed length here. + if self.shape.sym_val is None: + shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])]) + return types.tensor(self.out_dtype, shape) + + return types.tensor(self.out_dtype, tuple(self.shape.sym_val.tolist())) + + +""" +Random Op Implementation(s) +""" + + +@register_op +class random_bernoulli(RandomDistribution): + r""" + Returns a tensor with the specified shape, with random values from a Bernoulli + distribution. + + .. math:: + f(k) = \begin{cases}1-p &\text{if } k = 0\\ + p &\text{if } k = 1\end{cases} + + for :math:`k` in :math:`\{0, 1\}`. + + Parameters + ---------- + shape: (Required) + * Target output tensor shape. + * ``K`` is the rank of the output tensor. + ``shape[k] > 0`` for ``k = 0,..., K-1``. + prob: const (Optional) + * The probability of sampling ``1``. Defaults to ``0.5``. + seed: const (Optional) + * Seed to create a reproducible sequence of values across multiple invokes. + + Returns + ------- + <\*, T> + * A tensor of the given target output shape filled with random values. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + random_categorical, random_normal, random_uniform + """ + + input_spec = ( + InputSpec( + shape=TensorInputType(type_domain=types.int32), + prob=TensorInputType(const=True, optional=True, type_domain="T"), + seed=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + RandomDistribution.input_spec + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return super().default_inputs() + \ + DefaultInputs( + seed=-1, + prob=0.5, + ) + + def type_inference(self): + self.out_dtype = self.prob.dtype + return super().type_inference() + + +@register_op +class random_categorical(Operation): + """ + Returns random values from a categorical distribution. + + Parameters + ---------- + shape: <\*D_in, T> + * N-dimensional tensor, one of ``logits`` (event log-probabilities) or ``probs`` + (event probabilities). The first ``N - 1`` dimensions specifies distributions, + and the last dimension represents a vector of probabilities. + + mode: const (Optional) + One of ``['logits', 'probs']``. Defaults to ``logits``. + + size: const (Optional) + Number of samples to draw. Defaults to ``1``. + + seed: const (Optional) + Seed to create a reproducible sequence of values across multiple invokes. + + Returns + ------- + <\*D_in[:-1] + [size], T> + * A tensor of the given target output shape filled with random values. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + random_bernoulli, random_normal, random_uniform + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + size=TensorInputType(const=True, optional=True, type_domain=types.int32), + seed=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + mode="logits", + size=1, + seed=-1, + ) + + def type_inference(self): + self.out_dtype = self.x.dtype + output_shape = self.x.shape[:-1] + (self.size.val,) + return types.tensor(self.out_dtype, output_shape) + + +@register_op +class random_normal(RandomDistribution): + r""" + Returns a tensor with the specified shape, with random values from a normal + distribution. + + Parameters + ---------- + shape: (Required) + * Target output tensor shape. + * ``K`` is the rank of the output tensor. + ``shape[k] > 0`` for ``k = 0,..., K-1``. + mean: const (Optional) + The mean (center) of the normal distribution. Defaults to 0.0. + stddev: const (Optional) + The standard deviation (width) of the normal distribution. Defaults to ``1.0``. + seed: const (Optional) + Seed to create a reproducible sequence of values across multiple invokes. + + Returns + ------- + <\*, T> + * A tensor of the given target output shape filled with random values. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + random_categorical, random_bernoulli, random_uniform + """ + + input_spec = ( + InputSpec( + shape=TensorInputType(type_domain=types.int32), + mean=TensorInputType(const=True, optional=True, type_domain="T"), + stddev=TensorInputType(const=True, optional=True, type_domain="T"), + seed=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + RandomDistribution.input_spec + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return super().default_inputs() + \ + DefaultInputs( + mean=0., + stddev=1., + seed=-1, + ) + + def type_inference(self): + if self.mean.dtype != self.stddev.dtype: + raise ValueError("Incompatible primitive types in random_normal operation") + self.out_dtype = self.mean.dtype + return super().type_inference() + + +@register_op +class random_uniform(RandomDistribution): + r""" + Returns a tensor with the specified shape with random values from a uniform + distribution. Samples are uniformly distributed over the half-open interval + ``[low, high)`` (includes low, but excludes high). + + .. math:: + p(x) = \frac{1}{high - low} + + For a real number :math:`x`. + + When ``high == low``, values of ``low`` will be returned. If ``high < low``, + the results are officially undefined and may eventually raise an error. + + Parameters + ---------- + shape: (Required) + * Target output tensor shape. + * ``K`` is the rank of the output tensor. + ``shape[k] > 0`` for ``k = 0,..., K-1``. + low: const (Optional) + * Lower boundary of the output interval (inclusive). Defaults to ``0.0``. + high: const (Optional) + * Upper boundary of the output interval (exclusive). Defaults to ``1.0``. + seed: const (Optional) + * Seed to create a reproducible sequence of values across multiple invokes. + + Returns + ------- + <\*, T> + * A tensor of the given target output shape filled with random values. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + random_categorical, random_bernoulli, random_normal + """ + + input_spec = ( + InputSpec( + shape=TensorInputType(type_domain=types.int32), + low=TensorInputType(const=True, optional=True, type_domain="T"), + high=TensorInputType(const=True, optional=True, type_domain="T"), + seed=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + RandomDistribution.input_spec + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return super().default_inputs() + \ + DefaultInputs( + low=0., + high=1., + seed=-1, + ) + + def type_inference(self): + if self.low.dtype != self.high.dtype: + raise ValueError("Incompatible primitive types in random_uniform operation") + self.out_dtype = self.low.dtype + return super().type_inference() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/recurrent.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/recurrent.py new file mode 100644 index 00000000..b6d5ee4e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/recurrent.py @@ -0,0 +1,519 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import ( + DefaultInputs, + InputSpec, + TensorInputType +) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op + + +@register_op +class gru(Operation): + r""" + Gated Recurrent Unit (GRU) + + .. math:: + r_t = \rm{recurrent\_activation}(W_{ir} x_t + b_{ir} + W_{hr} h_{t-1} + b_{hr}) + + .. math:: + z_t = \rm{recurrent\_activation}(W_{iz} x_t + b_{iz} + W_{hz} h_{t-1} + b_{hz}) + + .. math:: + o_t = \rm{activation}(W_{io} x_t + b_{io} + r_t * W_{ho} h_{t-1} + b_{ho}) + + .. math:: + h_t = (1 − z_t) * o_t + z_t * h_{t−1} + + Where: + + * :math:`W_{i[r|o|z]}` are state input weights for reset, output and update gate, respectively. + * :math:`b_{i[r|o|z]}` are input biases for reset, output and update gate, respectively. + * :math:`W_{h[r|o|z]}` are recurrent/hidden weights on hidden state to reset, output, and update gates, respectively. + * :math:`b_{h[r|o|z]}` are recurrent/hidden biases on hidden state to reset, output, and update gates, respectively. + * :math:`h_t` is the hidden state at time ``t``. + * :math:`x_t` is the input at time ``t``. + * :math:`h_{t-1}` is the hidden state of the layer at time ``t-1`` or the initial + hidden state at time ``0``. + * :math:`r_t`, :math:`o_t`, and :math:`z_t` are the reset, new, and update gates, respectively. + * :math:`*` is elementwise product. + + Parameters + ---------- + x: (Required) + * ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the + input dimension. + + initial_h: (Required) + * ``H`` denotes hidden size. + + weight_ih: const<3*H, I, T> (Required) - Weight matrix + * ``weigh_ih = [W_{ir} | W_{io} | W_{iz}]`` where ``[a|b]`` denotes column + concatenation and ``[a, b]`` denotes row concatenation. ``W_{ir}``, + ``W_{io}``, and ``W_{iz}`` have shape ``(H, I)``. + * This is used when direction="forward" or "reverse". + + weight_hh: const<3*H, H, T> (Required) - Weight matrix + * ``weight_hh = [W_{hr} | W_{ho} | W_{hz}]``: ``W_{hr}``, ``W_{ho}``, and + ``W_{hz}`` have shape ``(H, H)``. + * This is used when direction="forward" or "reverse". + + bias: const<3*H, T> (Optional) [Default all 0s] + * ``bias[0]`` are input-hidden and hidden-hidden bias. + * ``3*H`` are biases for ``[b_{ir} | b_{io} | b_{hz}]``. + * This is used when direction="forward" or "reverse". + + direction: const (Optional) [Default=forward] + * Either ``forward`` or ``reverse``. + + output_sequence: const (Optional) [Default=False] + * Outputs every step if ``True``. + + recurrent_activation: const (Optional) [Default=sigmoid] + * Activation applied on update and reset gate. + + activation: const (Optional) [Default=tanh] + * Activation applied on output gate. + + Returns + ------- + or <1, b, H, T> + * If ``output_sequence == True`` (hidden states from every step): + ````. + * Else ``<1, b, H, T>`` (hidden states of the final step). + + * Hidden states of the final step. + + Attributes + ---------- + T: fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + initial_h=TensorInputType(type_domain="T"), + weight_ih=TensorInputType(const=True, type_domain="T"), + weight_hh=TensorInputType(const=True, type_domain="T"), + bias=TensorInputType(const=True, optional=True, type_domain="T"), + direction=TensorInputType(const=True, optional=True, type_domain=types.str), + output_sequence=TensorInputType(const=True, optional=True, type_domain=types.bool), + recurrent_activation=TensorInputType(const=True, optional=True, type_domain=types.str), + activation=TensorInputType(const=True, optional=True, type_domain=types.str) + ) + + type_domains = { + "T": (types.fp32,), + } + + def default_inputs(self): + return DefaultInputs( + bias=None, + direction="forward", + output_sequence=False, + recurrent_activation="sigmoid", + activation="tanh", + ) + + def type_inference(self): + if self.x.rank != 3: + raise ValueError( + "Invalid input shape. Expecting Rank 3 input, got {}".format( + len(self.x.rank) + ) + ) + + sequence_length, batch_size, input_size = self.x.shape + + if self.weight_ih.rank != 2: + raise ValueError( + "Invalid weight shape. Expecting Rank 2 input, got {}".format( + len(self.weight_ih.rank) + ) + ) + if self.weight_hh.rank != 2: + raise ValueError( + "Invalid weight shape. Expecting Rank 2 input, got {}".format( + len(self.weight_hh.rank) + ) + ) + + hidden_dim, hidden_size = self.weight_hh.shape + + direction = self.direction.val + valid_directions = {"forward", "reverse"} + if direction not in valid_directions: + raise ValueError( + "Direction {} not supported. Supported directions: {}".format( + direction, valid_directions + ) + ) + + dim_factor = 3 + if hidden_size != (hidden_dim // dim_factor): + raise ValueError( + "Incorrect weight matrix: hidden dim size mismatch. \ + Provided weight_ih {}, weight_hh {}. Expecting ".format( + self.weight_ih.shape, self.weight_hh.shape + ) + ) + + out_seq_len = sequence_length if self.output_sequence.val else 1 + output_shape = [out_seq_len, batch_size, hidden_size] + output_h_shape = [batch_size, hidden_size] + return ( + types.tensor(self.x.dtype, tuple(output_shape)), + types.tensor(self.x.dtype, tuple(output_h_shape)), + ) + + +@register_op +class lstm(Operation): + r""" + Long Short-Term Memory (LSTM) + + .. math:: + i_t = \rm{recurrent\_activation}(W_{ii} x_t + B_{ii} + W_{hi} h_{t-1} + B_{hi}) + + .. math:: + f_t = \rm{recurrent\_activation}(W_{if} x_t + B_{if} + W_{hf} h_{t-1} + B_{hf}) + + .. math:: + z_t = \rm{cell\_activation}(W_{iz} x_t + B_{iz} + W_{hz} h_{t-1} + B_{hz}) + + .. math:: + o_t = \rm{recurrent\_activation}(W_{io} x_t + B_{io} + W_{ho} h_{t-1} + B_{ho}) + + .. math:: + c_t = f_t * c_{t-1} + i_t * z_t + + .. math:: + h_t = o_t * \rm{activation(c_t)} + + Where: + + * :math:`i_t`, :math:`f_t`, :math:`o_t`, and :math:`z_t` are input, forget, output, and cell gates, + respectively, at time ``t``. + * :math:`c_t` is cell state at time ``t``. + * :math:`h_t` is the hidden state at time ``t``. + * :math:`W_{ii}`, :math:`W_{if}`, :math:`W_{io}`, and :math:`W_{iz}` are input weights for input, + forget, output, and cell gate, respectively. + * :math:`B_{ii}`, :math:`B_{if}`, :math:`B_{io}`, and :math:`B_{iz}` are input biases for input, + forget, output, and cell gate, respectively. + * :math:`W_{hi}`, :math:`W_{hf}`, :math:`W_{ho}`, and :math:`W_{hz}` are recurrent weights for input, + forget, output, and cell gate, respectively. + * :math:`B_{hi}`, :math:`B_{hf}`, :math:`B_{ho}`, and :math:`B_{hz}` are recurrent weights for input, + forget, output, and cell gate, respectively. + + Parameters + ---------- + x: (Required) + * ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the + input dimension. + + initial_h: (Required) + * Initial hidden state. ``DIRECTIONS = 1`` for uni-directional. + ``DIRECTIONS = 2`` for bi-directional LSTM. + * ``H`` denotes hidden size. + * ``[b, :H]`` and ``[b, H:]`` represents forward and reverse direction + values, respectively. + + initial_c: (Required) + * Initial cell state. + * Format is same as ``initial_h``. + + weight_ih: const<4*H, I, T> (Required) + * Input-hidden weight matrix + * Weight tensor should be in order of + ``[input_gate, forget_gate, output_gate, cell_gate]``. + * If direction=="bidirectional", this is applied in forward direction. + * If direction=="forward" or "backward" these weights are used. + + weight_hh: const<4*H, H, T> (Required) + * Hidden-hidden weight matrix. + * Weight tensor should be in order of + ``[input_gate, forget_gate, output_gate, cell_gate]``. + * If direction=="bidirectional", this is applied in forward direction. + * If direction=="forward" or "backward" these weights are used. + + bias: const<4*H, T> (Optional, default all 0s) + * bias = input-hidden bias + hidden-hidden bias + * If direction=="bidirectional", this is applied in forward direction. + * If direction=="forward" or "backward" this bias are used. + + peephole: const<3*H, T> (Optional, default all 0s) + * Weight tensor for peephole. + * Order is ``[input_gate, forget_gate, output_gate]``. + * Shape of each peephole vector is ``(H,)`` (``H`` is hidden size). + * If direction=="bidirectional", this is applied in forward direction. + * If direction=="forward" or "backward" these weights are used. + + weight_ih_back: const<4*H, I, T> (Optional) - + * Input-hidden weight matrix for backward direction for `bidirectinal LSTM`. + * Weight tensor should be in order of + ``[input_gate, forget_gate, output_gate, cell_gate]``. + * Must be provided for `bidirectional LSTM`. + * This is only used when `direction` is "bidirectional". + * For direction="reverse" use `weight_ih` instead. + + weight_hh_back: const<4*H, H, T> (Optional) - Hidden-hidden weight matrix + * Hidden-hidden weight matrix for backward direction for `bidirectinal LSTM`. + * Weight tensor should be in order of + ``[input_gate, forget_gate, output_gate, cell_gate]``. + * Must be provided for `bidirectional LSTM`. + * This is only used when `direction` is "bidirectional". + * For direction="reverse" use `weight_hh` instead. + + bias_back: const<4*H, T> (Optional, default all 0s) + * bias = input-hidden bias + hidden-hidden bias. + * Bias of backward direction for `bidirectional lstm` + * This is only used when `direction` is "bidirectional". + * For direction="reverse" use `bias` instead. + + peephole_back: const<3*H, T> (Optional, default all 0s) + * Weight tensor for peephole in backward direction for `bidirectional LSTM`. + * Order is ``[input_gate, forget_gate, output_gate]``. + * Shape of each peephole vector is ``(H,)`` (``H`` is hidden size). + * Peephole of backward direction for `bidirectional lstm` + * Bias of backward direction for `bidirectional lstm` + * This is only used when `direction` is "bidirectional". + * For direction="reverse" use `peephole` instead. + + direction: const (Optional) [Default=forward] + * One of the following: ``forward``, ``reverse``, or ``bidirectional``. + * Must match ``DIRECTIONAL`` in initial states and weight parameters. + + output_sequence: const (Optional) [Default=False] + * Outputs every step if ``True``. + + recurrent_activation: const (Optional) [Default=sigmoid] + * Activation applied on input, forget, and output gates. + + cell_activation: const (Optional) [Default=tanh] + * Activation applied on cell gate. + + activation: const (Optional) [Default=tanh] + * Activation applied on output gate. + + clip: const (optional) [Default=None] + * Cell gate is clipped to ``[-clip, +clip]``. + + Returns + ------- + or <1, b, DIRECTIONS*H, T> + * If ``output_sequence == True`` (hidden states from every step): + ````. + * Else ``<1, b, DIRECTIONS*H, T>`` (hidden states of the final step). + + * Hidden states of the final step. + + * Memory state of the final step. + + Attributes + ---------- + T: fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + initial_h=TensorInputType(type_domain="T"), + initial_c=TensorInputType(type_domain="T"), + weight_ih=TensorInputType(const=True, type_domain="T"), # ifoz layout, + weight_hh=TensorInputType(const=True, type_domain="T"), # ifoz layout + bias=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout + peephole=TensorInputType(const=True, optional=True, type_domain="T"), # ifo layout + weight_ih_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout, + weight_hh_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout + bias_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout + peephole_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifo layout + direction=TensorInputType(const=True, optional=True, type_domain=types.str), + output_sequence=TensorInputType(const=True, optional=True, type_domain=types.bool), + recurrent_activation=TensorInputType(const=True, optional=True, type_domain=types.str), + cell_activation=TensorInputType(const=True, optional=True, type_domain=types.str), + activation=TensorInputType(const=True, optional=True, type_domain=types.str), + clip=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp32,), + } + + def default_inputs(self): + return DefaultInputs( + bias=None, + direction="forward", + output_sequence=False, + recurrent_activation="sigmoid", + cell_activation="tanh", + activation="tanh", + peephole=None, + clip=None) + + def type_inference(self): + if self.x.rank != 3: + raise ValueError( + "Invalid input shape. Expecting Rank 3 input, got {}".format( + len(self.x.rank) + ) + ) + sequence_length, batch_size, input_size = self.x.shape + + def weight_shape_check(wt_ih, wt_hh): + if wt_ih.rank != 2 or wt_hh.rank != 2: + raise ValueError( + "Expecting Rank 2 input, got weight_ih rank: {}, weight_hh rank: {}".format( + wt_ih.rank, wt_hh.rank + ) + ) + + hidden_size = wt_hh.shape[1] + if wt_hh.shape[0] // hidden_size != 4 or wt_ih.shape[0] // hidden_size != 4: + raise ValueError( + "Incorrect weight matrix: hidden dim size mismatch. \ + Provided weight_ih {}, weight_hh {}. Expecting <4*H, H>".format( + wt_ih.shape, wt_hh.shape + ) + ) + + direction = self.direction.val + valid_directions = {"forward", "reverse", "bidirectional"} + if direction not in valid_directions: + raise ValueError( + "Direction {} not supported. Supported directions: {}".format( + direction, valid_directions + ) + ) + + weight_shape_check(self.weight_ih, self.weight_hh) + if direction == "bidirectional": + weight_shape_check(self.weight_ih_back, self.weight_hh_back) + + hidden_dim, hidden_size = self.weight_hh.shape + + dim_factor = 8 if direction == "bidirectional" else 4 + out_seq_len = sequence_length if self.output_sequence.val else 1 + num_directions = dim_factor // 4 + output_shape = [out_seq_len, batch_size, num_directions * hidden_size] + output_h_shape = [batch_size, num_directions * hidden_size] + output_c_shape = [batch_size, num_directions * hidden_size] + return ( + types.tensor(self.x.dtype, tuple(output_shape)), + types.tensor(self.x.dtype, tuple(output_h_shape)), + types.tensor(self.x.dtype, tuple(output_c_shape)), + ) + + +@register_op +class rnn(Operation): + r""" + Recurrent Neural Network (RNN) + + .. math:: + h_t = \rm{activation}(W_{ih} x_t + b_{ih} + W_{hh} h_{t−1} + b_{hh}) + + Where: + + * :math:`W_{ih}` is the input weight. + * :math:`W_{hh}` is the hidden/recurrent weight. + * :math:`h_t` is the hidden state at time ``t``. + * :math:`x_t` is the input at time ``t``. + * :math:`h_{t-1}` is the hidden state of the layer at time ``t-1`` or the initial + hidden state at ``t = 0``. + * :math:`b_{ih}` is the input bias. + * :math:`b_{hh}` if the hidden/recurrent bias. + + Parameters + ---------- + x: (Required) + * ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the + input dimension. + + initial_h: (Required) + * ``H`` denotes hidden size. + + weight_ih: const (Required) - Input-hidden weight matrix + + weight_hh: const (Required) - Hidden-hidden weight matrix + + bias: const (Optional) [Default all 0s] + * bias for input-hidden and hidden-hidden + + direction: const (Optional) [Default=forward] + * Either ``forward`` or ``reverse``. + + output_sequence: const (Optional) [Default=False] + * Outputs every step if ``True``. + + activation: const (Optional) [Default=tanh] + * Supported activation functions: ``relu``, ``tanh``, ``sigmoid``, + ``sigmoid_hard``, ``scaled_tanh``, and ``linear``. + + Returns + ------- + or <1, b, H, T> + * If ``output_sequence == True`` (hidden states from every step): + ````. + * Else ``<1, b, H, T>`` (hidden states of the final step). + + * Hidden states of the final step. + + Attributes + ---------- + T: fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + initial_h=TensorInputType(type_domain="T"), + weight_ih=TensorInputType(const=True, type_domain="T"), + weight_hh=TensorInputType(const=True, type_domain="T"), + bias=TensorInputType(const=True, optional=True, type_domain="T"), + direction=TensorInputType(const=True, optional=True, type_domain=types.str), + output_sequence=TensorInputType(const=True, optional=True, type_domain=types.bool), + activation=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp32,), + } + + def default_inputs(self): + return DefaultInputs( + bias=None, + direction="forward", + output_sequence=False, + activation="tanh") + + def type_inference(self): + if self.x.rank != 3: + raise ValueError( + f"Invalid input shape. Expecting Rank 3 input, got {len(self.x.rank)}" + ) + + sequence_length, batch_size, input_size = self.x.shape + + if self.weight_ih.rank != 2 or self.weight_hh.rank != 2: + raise ValueError( + f"Invalid weight shape. Expecting Rank 2 input, got weight_ih " + f"{self.weight_ih.rank}, weight_hh {self.weight_hh.rank}" + ) + + hidden_size, _ = self.weight_ih.shape + + direction = self.direction.val + valid_directions = {"forward", "reverse"} + if direction not in valid_directions: + raise ValueError( + f"Direction {direction} not supported. Supported directions: {valid_directions}" + ) + + out_seq_len = sequence_length if self.output_sequence.val else 1 + output_shape = [out_seq_len, batch_size, hidden_size] + output_h_shape = [batch_size, hidden_size] + return ( + types.tensor(self.x.dtype, tuple(output_shape)), + types.tensor(self.x.dtype, tuple(output_h_shape)), + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/reduction.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/reduction.py new file mode 100644 index 00000000..ce934303 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/reduction.py @@ -0,0 +1,558 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import numpy as np + +from coremltools.converters.mil.mil import Operation, precondition, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import VALUE +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op + + +class ReductionAxes(Operation): + """ + Reduction Op Superclasses + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axes=TensorInputType(const=True, optional=True, type_domain=types.int32), + keep_dims=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axes=None, + keep_dims=False, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + axes = self.axes.val if self.axes is not None else None + if axes is None: + axes = range(self.x.rank) + keep_dims = self.keep_dims.val + + reduced_shape = list(x_shape) + if keep_dims: + for i in axes: + reduced_shape[i] = 1 + else: + # sort reverse so we can delete shape elements back to front + axes = [axis if axis >= 0 else axis + len(reduced_shape) for axis in axes] + for i in sorted(axes)[::-1]: + reduced_shape.pop(i) + if len(reduced_shape) == 0: + return x_type # scalar + + return types.tensor(x_type, tuple(reduced_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + axes = tuple(self.axes.val) if self.axes is not None else None + return self.get_operator()(self.x.val, axis=axes, keepdims=self.keep_dims.val) + + def get_operator(self): + raise NotImplementedError() + + +class ReductionAxis(Operation): + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + keep_dims=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=-1, + keep_dims=False, + ) + + def _find_reduced_shape(self): + x_shape = self.x.shape + axis = self.axis.val + + reduced_shape = list(x_shape) + axis = axis if axis >= 0 else axis + len(reduced_shape) + if self.keep_dims.val: + reduced_shape[axis] = 1 + else: + reduced_shape.pop(axis) + return reduced_shape + + def type_inference(self): + x_type = self.x.dtype + reduced_shape = self._find_reduced_shape_and_axis() + return types.tensor(x_type, tuple(reduced_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + tmp = self.get_operator()(self.x.val, axis=self.axis.val) + reduced_shape = self._find_reduced_shape() + if self.keep_dims.val: + tmp = np.reshape(tmp, reduced_shape) + return tmp + + def get_operator(self): + raise NotImplementedError() + + +class reduce_arg(ReductionAxis): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def type_inference(self): + x_shape = self.x.shape + axis = self.axis.val + + reduced_shape = list(x_shape) + axis = axis if axis >= 0 else axis + len(reduced_shape) + if self.keep_dims.val: + reduced_shape[axis] = 1 + else: + reduced_shape.pop(axis) + + return types.tensor(types.int32, tuple(reduced_shape)) + + +""" +Reduction op implementations +""" + +@register_op +class reduce_argmax(reduce_arg): + """ + Computes the indices of the maximum value across dimensions of a tensor. + In case of ties, the identity of the return value is not guaranteed. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axis: const (Optional) + * The dimension to reduce. Default is ``-1``. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` by removing the dimension + specified in ``axis``. If ``True``, retain reduced axis with length ``1``. + + Returns + ------- + <\*, int32> + + Attributes + ---------- + T: fp16, fp32, i32 + + References + ---------- + See `tf.math.argmax `_. + """ + + def get_operator(self): + return np.argmax + + +@register_op +class reduce_argmin(reduce_arg): + """ + Computes the indices of the minimum value across dimensions of a tensor. + In case of ties, the identity of the return value is not guaranteed. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axis: const (Optional) + * The dimension to reduce. Default is ``-1``. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` by removing the dimension specified + in ``axis``, otherwise retain reduced axis with length ``1``. + + Returns + ------- + <\*, int32> + + Attributes + ---------- + T: fp16, fp32, i32 + + References + ---------- + See `tf.math.argmin `_. + + """ + + def get_operator(self): + return np.argmin + + +@register_op +class reduce_l1_norm(ReductionAxes): + """ + Computes the L1 normalization of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + + References + ---------- + See `reduce_mean `_. + + """ + + def get_operator(self): + def l1_norm(x, axis=None, keepdims=False): + return np.sum(np.abs(x), axis=axis, keepdims=keepdims) + + return l1_norm + + +@register_op +class reduce_l2_norm(ReductionAxes): + """ + Computes the L2 normalization of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def get_operator(self): + def l2_norm(x, axis=None, keepdims=False): + return np.sqrt(np.sum(np.square(x), axis=axis, keepdims=keepdims)) + + return l2_norm + + +@register_op +class reduce_log_sum(ReductionAxes): + """ + Computes the natural logarithm of the sum of all the elements across given dimensions + of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def get_operator(self): + def log_sum(x, axis=None, keepdims=False): + return np.log(np.sum(x, axis=axis, keepdims=keepdims)) + + return log_sum + + +@register_op +class reduce_log_sum_exp(ReductionAxes): + """ + Computes the natural logarithm of the sum of the exponentials of the elements across + given dimensions of the input tensor. It is a smooth approximation of the maximum + function, more numerically stable than ``log(sum(exp(input)))``. It avoids + overflows caused by taking the ``exp`` of large inputs and underflows caused by + taking the ``log`` of small inputs. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + + References + ---------- + See `tf.math.reduce_logsumexp `_. + + """ + + def get_operator(self): + def operator(a, axis=None, keepdims=False): + max_values = np.amax(a, axis=axis, keepdims=True) + temp = np.exp(a - max_values) + + if not keepdims: + max_values = np.squeeze(max_values, axis=axis) + + sum = np.sum(temp, axis=axis, keepdims=keepdims) + result = np.log(sum) + return result + max_values + + return operator + + +@register_op +class reduce_max(ReductionAxes): + """ + Computes the maximum of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def get_operator(self): + return np.max + + +@register_op +class reduce_mean(ReductionAxes): + """ + Computes the mean of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + + References + ---------- + For an example, see `tf.math.reduce_mean `_. + """ + + def get_operator(self): + return np.mean + + +@register_op +class reduce_min(ReductionAxes): + """ + Computes the minimum of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def get_operator(self): + return np.min + + +@register_op +class reduce_prod(ReductionAxes): + """ + Computes the product of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + + """ + + def get_operator(self): + return np.prod + + +@register_op +class reduce_sum(ReductionAxes): + """ + Computes the sum of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def get_operator(self): + return np.sum + + +@register_op +class reduce_sum_square(ReductionAxes): + """ + Computes the sum of squares of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def get_operator(self): + def sum_squre(x, axis=None, keepdims=False): + return np.sum(np.square(x), axis=axis, keepdims=keepdims) + + return sum_squre diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/scatter_gather.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/scatter_gather.py new file mode 100644 index 00000000..6650c443 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/scatter_gather.py @@ -0,0 +1,549 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import (SYMBOL, VALUE, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import compute_gather +from coremltools.converters.mil.mil.types.symbolic import ( + is_compatible_symbolic_vector) + + +@register_op +class gather(Operation): + """ + Gather slices from input ``x`` along dimension ``axis`` according to ``indices``, + similar to `tf.gather `_. + + * If ``indices`` is scalar (0-D): + + .. math:: + output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] = + .. math:: + x[p_0, ..., p_{axis-1}, ~~~~~~~~~ indices, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] + + Where ``rank(x)`` is the rank of ``x``. The ``output`` has rank ``rank(x) - 1``. + + * If ``indices`` is 1-D tensor: + + .. math:: + output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~ i, ~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}] = + .. math:: + x[p_0, ..., p_{axis-1}, ~~~~~~~~ indices[i], ~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}] + + The output has rank ``rank(x)``. + + * In general: + + .. math:: + output[p_0, ..., p_{axis-1}, ~~~~~~~~ i_0, ..., i_{M-1}, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] = + .. math:: + x[p_0, ..., p_{axis-1}, ~~~~~~~ indices[i_0, ..., i_{M-1}], ~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] + + Where ``M = rank(indices)``. + + Parameters + ---------- + x: tensor<\*D, T> (Required) + indices: tensor<\*N, i32> (Required) + * Indices values may be negative. More precisely, ``-D[axis]<= v < D[axis]`` for ``v`` in ``indices``. + axis: const i32 (Optional. Default=``0``) + * Negative axis is supported. + + Returns + ------- + tensor<\*K, T> + * Where ``K = D[:axis] + N + D[axis+1:]``. + + Attributes + ---------- + T: fp16, fp32, i32 + + References + ---------- + See `tf.gather `_. + + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + ) + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + x = self.x.sym_val + indices = self.indices.val + if indices is None: + # only allow x to be symbolic. indices cannot. + return None + return compute_gather( + params=self.x.sym_val, + indices=self.indices.val, + axis=self.axis.val, + batch_dims=0 + ) + + def type_inference(self): + out_type = self.x.dtype + + if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + + output_rank = self.x.rank - 1 + self.indices.rank + if output_rank == 0: + # output scalar + return out_type + + axis = self.axis.val + axis = axis if axis >= 0 else axis + self.x.rank + out_shape = self.x.shape[:axis] + self.indices.shape + self.x.shape[axis + 1 :] + return types.tensor(out_type, out_shape) + + +@register_op +class scatter(Operation): + """ + Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis`` + by operation ``mode``. + + Example: ``mode == update``. + + * For ``i`` in ``[0, len(indices)]``: + + .. math:: + output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] = + .. math:: + updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + + * For ``j != i``: + + .. math:: + output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] = + .. math:: + data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] + + Example: ``mode == add``. + + * For ``i`` in ``[0, len(indices)]``: + + .. math:: + output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] = + .. math:: + updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + + .. math:: + x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] + + * For ``j != i``: + + .. math:: + output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] = + .. math:: + data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] + + Parameters + ---------- + data: tensor<\*D, T> (Required) + indices: tensor<[C], i32> (Required) + * 1-D tensor. + updates: tensor<\*K, T> (Required) + * ``K = data.shape[:axis] + [len(indices)] + data.shape[axis+1:]``. + axis: const i32 (Optional) + * Default to ``0``. + mode: const string (Optional) + * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``, + ``div``, ``max``, ``min``. + * Default value is ``update``. + + Returns + ------- + tensor<\*D, T> + * With the same type and shape as input ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + + For example: + data = [[1, 2, 3], [4, 5, 6]] + indices = [1, 0] + updates = [[5, 6, 7], [8, 9, 10]] + axis = 0 + mode = "update" + + produces: + [[9, 11, 13], [9, 11, 13]] + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + updates=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + mode="add", + ) + + def type_inference(self): + if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + + axis = self.axis.val + axis = axis if axis >= 0 else axis + self.data.rank + expected_updates_shape = ( + self.data.shape[:axis] + self.indices.shape + self.data.shape[axis + 1 :] + ) + + err = "Updates shape {} is incorrect. It should be {}.".format(self.updates.shape, expected_updates_shape) + assert is_compatible_symbolic_vector( + self.updates.shape, tuple(expected_updates_shape) + ), err + + return self.data.sym_type + + +@register_op +class gather_along_axis(Operation): + """ + Take the values along ``axis`` at locations ``indices``. + + .. math:: + idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + .. math:: + output[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] = = x[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] + + Parameters + ---------- + x: tensor<\*D, T> (Required) + indices: tensor<\*K, i32> (Required) + * ``rank(indices) == rank(x)``. + axis: const i32 (Optional): + * Default to ``0``. + + Returns + ------- + tensor<\*D, T>: + * Output tensor has the same shape as ``indices``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + ) + + @precondition(allow=VALUE) + def value_inference(self): + x = self.x.val + indices = self.indices.val + axis = self.axis.val + return np.take_along_axis(x, indices, axis) + + def type_inference(self): + + if self.x.rank != self.indices.rank: + raise ValueError( + "Rank mismatch between input and indices. \ + Input rank: {}, indices rank: {}".format( + self.x.rank, self.indices.rank + ) + ) + + if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + + axis = self.axis.val + axis = axis if axis >= 0 else axis + self.x.rank + + for i in range(self.x.rank): + if i != axis: + assert self.x.shape[i] == self.indices.shape[i] + + return types.tensor(self.x.dtype, self.indices.shape) + + +@register_op +class scatter_along_axis(Operation): + """ + Scatter ``updates`` to ``data`` at locations ``indices`` along ``axis`` dimension + using ``mode`` operation. + + Example: ``mode == update``. + + * For ``i`` in ``[0, len(indices)]``: + + .. math:: + idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + .. math:: + output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] = + .. math:: + updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + + * For ``j! = i``: + + .. math:: + output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] = + .. math:: + data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] + + Example: ``mode == add``. + + * For ``i`` in ``[0, len(indices)]``: + + .. math:: + idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + .. math:: + output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] = + .. math:: + updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + + .. math:: + x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] + + * For ``j! = i``: + + .. math:: + output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] = + .. math:: + data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] + + Parameters + ---------- + data: tensor<\*D, T> (Required) + indices: tensor<\*K, i32> (Required) + * ``rank(indices) == rank(data)``. + updates: tensor<\*K, T> (Required) + * Must be the same shape as ``indices``. + axis: const i32 (Optional) + * Default to ``0``. + mode: const string (Optional) + * Default to ``add``. + * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``, + ``div``, ``max``, ``min``. + + Returns + ------- + tensor<\*D, T> + * With the same type and shape as input ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + updates=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + mode="add", + ) + + @precondition(allow=VALUE) + def value_inference(self): + data = np.copy(self.data.val) + indices = self.indices.val + updates = self.updates.val + axis = self.axis.val + np_output = data + np.put_along_axis(np_output, indices, updates, axis=axis) + return np_output + + def type_inference(self): + if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + + axis = self.axis.val + axis = axis if axis >= 0 else axis + self.data.rank + + assert is_compatible_symbolic_vector( + self.indices.shape, self.updates.shape + ) + assert self.data.rank == self.indices.rank + for i in range(self.data.rank): + if i != axis: + assert self.data.shape[i] == self.indices.shape[i] + + return self.data.sym_type + + +@register_op +class gather_nd(Operation): + """ + Gather slices from ``x`` according to ``indices``, similar to `tf.gather_nd `_. + + The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a slice + of ``x``: + + .. math:: + output[i_0, ..., i_{K-2}]= x[indices[i_0, ..., i_{K-2}]] + + Where ``K = rank(indices)`` and ``x[indices[i_0, ..., i_{K-2}]]`` has rank + ``rank(x) - indices.shape[-1]``. + + Parameters + ---------- + x: tensor<\*D, T> (Required) + indices: tensor<\*K, i32> (Required) + + Returns + ------- + tensor<\*V, T> + * ``V = K[:-1] + D[K[-1]:]``, where ``D = x.shape`` and ``K = indices.shape``. + + Attributes + ---------- + T: fp16, fp32, i32 + + References + ---------- + See `tf.gather_nd `_. + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def type_inference(self): + assert self.indices.shape[-1] <= self.x.rank + out_type = self.x.dtype + out_shape = self.indices.shape[:-1] + self.x.shape[self.indices.shape[-1] :] + return types.tensor(out_type, out_shape) + + +@register_op +class scatter_nd(Operation): + """ + Scatter ``updates`` to ``data`` at locations ``indices``. + + The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a + slice of ``data``, ``K = rank(indices)``, and ``data[indices[i_0, ..., i_{K-2}]]`` + has rank ``rank(data) - indices.shape[-1]``. + + * Example: ``mode == update``: The ``output`` is set to ``data`` initially, and + the op updates ``output`` as follows: + + .. math:: + output[indices[i_0, ..., i_{K-2}]]= updates[indices[i_0, ..., i_{K-2}]] + + * Example: ``mode == add``. The update rule is: + + .. math:: + output[indices[i_0, ..., i_{K-2}]] += updates[indices[i_0, ..., i_{K-2}]] + + Parameters + ---------- + data: tensor<\*D, T> (Required) + indices: tensor<\*K, i32> (Required) + updates: tensor<\*K, T> (Required) + * Must be the shape as ``K[:-1]+data.shape[K[-1]:]``. + mode: const string (Optional) + * Default to ``add``. + * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``, + ``div``, ``max``, ``min``. + + Returns + ------- + tensor<\*D, T> + * A tensor with the same shape and type as ``data``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + updates=TensorInputType(type_domain="T"), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + mode="add", + ) + + def type_inference(self): + assert self.indices.shape[-1] <= self.data.rank + expected_updates_shape = ( + self.indices.shape[:-1] + self.data.shape[self.indices.shape[-1] :] + ) + assert is_compatible_symbolic_vector( + self.updates.shape, tuple(expected_updates_shape) + ) + return self.data.sym_type diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_operation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_operation.py new file mode 100644 index 00000000..f5cc3d36 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_operation.py @@ -0,0 +1,1320 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math + +import numpy as np + +from coremltools.converters.mil.mil import ( + get_new_symbol, + get_new_variadic_symbol, + types, +) +from coremltools.converters.mil.mil.input_type import ( + DefaultInputs, + InputSpec, + ListOrTensorInputType, + TensorInputType, + TupleInputType, +) +from coremltools.converters.mil.mil.operation import ( + NONE, + SYMBOL, + VALUE, + Operation, + precondition, +) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import MAX_SIZE_CONSTANT_FOLDING +from coremltools.converters.mil.mil.types.symbolic import ( + any_symbolic, + is_compatible_symbolic_vector, + is_symbolic, +) + + +@register_op +class band_part(Operation): + """ + Returns a tensor setting everything outside a center band to zeros for the innermost + matrix. Special cases: + + - ``band_part(x, 0, -1)`` returns upper triangular part. + - ``band_part(x, -1, 0)`` returns lower triangular part. + - ``band_part(x, 0, 0)`` returns diagonal. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + lower: const (Optional) + * Number of lower / below sub-diagonals to keep. If negative, keep entire + lower triangle. + * Defaults to ``-1`` (keep the entire lower triangle). + upper: const (Optional) + * Number of upper / above sub-diagonals to keep. If negative, keep entire + lower triangle. + * Defaults to ``-1`` (keep the entire upper triangle). + + Returns + ------- + tensor<\*?, T> + * Same type and shape as the input tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + lower=TensorInputType(const=True, optional=True, type_domain=types.int32), + upper=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + lower=-1, + upper=-1) + + def type_inference(self): + return self.x.sym_type + + +@register_op +class cumsum(Operation): + """ + Returns the cumulative sum of the input along the given axis. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + axis: const (Optional) + * Defaults to ``0``. + * Axis for which the cumulative sum is computed. + exclusive: const (Optional) + * Defaults to ``False``. + * When set to ``False``, inclusive cumsum is computed, that is the first element of + the output is identical to the first element in the input. + * When set to ``True``, exclusive cumsum is computed, which makes the first element + of output to ``0``. + reverse: const (Optional) + * Defaults to ``False``. + * When set to ``True``, perform cumsum in the reverse order. + + Returns + ------- + tensor<\*?, T> + * Same type and shape as the input tensor. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + exclusive=TensorInputType(const=True, optional=True, type_domain=types.bool), + reverse=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + exclusive=False, + reverse=False) + + @precondition(allow=VALUE) + def value_inference(self): + data = np.copy(self.x.val) + axis = self.axis.val + reverse = self.reverse.val + exclusive = self.exclusive.val + if reverse: + data = np.flip(data, axis=axis) + data = np.cumsum(data, axis=axis) + if exclusive: + zero_shape = np.copy(data.shape) + zero_shape[axis] = 1 + data = np.concatenate((np.zeros(zero_shape, data)), axis=axis) + if reverse: + data = np.flip(data, axis=axis) + return data + + def type_inference(self): + # Check range of axis + if self.axis.val < -1 or self.axis.val > self.x.rank - 1: + raise ValueError( + "axis should be in the range [-1, {}]".format(self.x.rank - 1) + ) + + return self.x.sym_type + + +@register_op +class fill(Operation): + """ + Returns a tensor with a given shape filled with a constant value. + + Parameters + ---------- + shape: tensor<[K], i32> (Required) + * Target output tensor shape. + * ``K`` is the rank of the output tensor. ``shape[k] > 0`` for ``k = 0,..., K-1``. + value: const (Optional) + * Defaults to ``0.0``. + * Constant value to fill in. + + Returns + ------- + tensor<\*?, T> + * Tensor with shape determined by the input shape. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + shape=TensorInputType(type_domain=types.int32), + value=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + value=0.) + + def type_inference(self): + if any_symbolic(self.shape.shape): + # We can't infer any shape if shape has variable length. + return types.tensor(self.value.dtype, (get_new_variadic_symbol(),)) + + # shape has fixed length here. + if self.shape.sym_val is None: + ret_shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])]) + return types.tensor(self.value.dtype, ret_shape) + + return types.tensor(self.value.dtype, tuple(self.shape.sym_val.tolist())) + + @precondition(allow=VALUE) + def value_inference(self): + return np.full(shape=self.shape.val, fill_value=self.value.val) + + +@register_op +class non_maximum_suppression(Operation): + """ + Applies non-maximum suppression (NMS) on the input box coordinates according + to their intersection-over-union (IoU). + + NMS selects a subset of bounding boxes in descending order of score, and removes + boxes that have high intersection-over-union (IOU) overlap with previously-selected + boxes. + + + Parameters + ---------- + + boxes: tensor<[n, B, 4], T> (Required) + * Box coordinates on which to perform NMS. The coordinates are expected in + CENTER_SIZE_WIDTH_FIRST format (x, y, width, height) where (x, y) is the center. + scores: tensor<[n, B, K], T> (Required) + * Scores for each one of the boxes. K is the number of classes. + iou_threshold: const (Required) + * The intersection over union (``IoU``) threshold over which boxes are + suppressed. NMS remove all overlapping boxes with ``IoU > iou_threshold``. + score_threshold: const (Required) + * Before IoU suppression is performed, boxes with class scores below this + threshold are rejected. + max_boxes: const (Required) + * Maximum number of boxes to select. If the number of surviving boxes are + less, output is padded up to this number. + per_class_suppression: const (Optional) + * Defaults to ``False``. + * If ``True``, suppression is performed independently within boxes of each class. + + Returns + ------- + tensor<[n, max_boxes, 4], T> + * Coordinates of selected boxes. + tensor<[n, max_boxes, K], T> + * Scores of selected boxes. + tensor<[n, max_boxes], i32> + * Indices of selected boxes. + tensor<[n], i32> + * Number of boxes selected for each batch. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + boxes=TensorInputType(type_domain="T"), + scores=TensorInputType(type_domain="T"), + iou_threshold=TensorInputType(const=True, type_domain="T"), + score_threshold=TensorInputType(const=True, type_domain="T"), + max_boxes=TensorInputType(const=True, type_domain=types.int32), + per_class_suppression=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + per_class_suppression=False) + + def type_inference(self): + boxes_dtype = self.boxes.dtype + scores_dtype = self.scores.dtype + n_batch, _, n_score = self.scores.shape + max_boxes = self.max_boxes.val + + return ( + types.tensor(boxes_dtype, (n_batch, max_boxes, 4)), + types.tensor(scores_dtype, (n_batch, max_boxes, n_score)), + types.tensor(types.int32, (n_batch, max_boxes)), + types.tensor(types.int32, (n_batch,)), + ) + + +@register_op +class non_zero(Operation): + """ + Returns the indices of the elements in the given tensor that are non-zero. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Tensor, values selected at indices where its values is not equal to ``0``. + + Returns + ------- + tensor<[N, R], int32> + * 2-dimensional tensor contains indices of elements that are non-zero. + Each row is the index for a non-zero value. + * ``N`` is the number of non-zero elements, ``R`` is the rank of the input. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T") + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + if self.x.val is not None: + value = self.value_inference() + return types.tensor(types.int32, value.shape) + shape = tuple([get_new_symbol(), self.x.rank]) + return types.tensor(types.int32, shape) + + @precondition(allow=VALUE) + def value_inference(self): + return np.transpose(np.nonzero(self.x.val)) + + +@register_op +class one_hot(Operation): + """ + Returns one-hot vectors whose locations represented in ``indices`` take the ``on_value``, + while other locations take the ``off_value``. + + Parameters + ---------- + indices: tensor<[D], i32> (Required) + * Tensor, values indicate the locations for each one-hot vector to take the ``on_value``. + one_got_vector_size: i32 (Required) + * Indicates the number of returning vectors. + axis: const i32 (Optional) + * Indicates which dimension to append the new axis. + * If the input indices is rank ``D``, the output tensor will have rank ``D+1``. + * Defaults to ``-1`` (the last dimension). + on_value: const T (Optional) + * Values for locations where defined in ``indices``. + * Defaults to ``1``. + off_value: const T (Optional) + * Defaults to ``0``. + + Returns + ------- + tensor<\*?,T> + * A tensor that contains one-hot vectors. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + indices=TensorInputType(type_domain=types.int32), + one_hot_vector_size=TensorInputType(type_domain=types.int32), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + on_value=TensorInputType(const=True, optional=True, type_domain="T"), + off_value=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + axis=-1, + on_value=1, + off_value=0, + ) + + def type_inference(self): + on_type = self.on_value.dtype + off_type = self.off_value.dtype + + if on_type != off_type: + raise TypeError( + "Parameters on_value and off_value must have same input types." + ) + + if self.axis.val < -self.indices.rank - 1 or self.axis.val > self.indices.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + + indices_shape = list(self.indices.shape) + + depth_value = self.one_hot_vector_size.val + if depth_value is None: + depth_value = get_new_symbol() + elif depth_value < 0: + raise ValueError("Parameter one_hot_vector_size must be non-negative") + + retshape = indices_shape + + if self.axis.val < 0: + cut = len(retshape) + self.axis.val + 1 + else: + cut = self.axis.val + retshape = retshape[0:cut] + [depth_value] + retshape[cut:] + + return types.tensor(on_type, retshape) + + +@register_op +class pad(Operation): + """ + Pad a tensor. + + Parameters + ---------- + + x: tensor<[\*D_in], T> (Required) + + pad: tensor<[2\*N], i32> (Required) + ``N <= D_in``. Last ``N`` dimensions of ``x`` are padded as follows: + + * For each dimension ``i`` of ``x`` if ``i >= D_in - N``: + * pad ``pad[2*i]`` elements before ``x[..,i,..]``. + * pad ``pad[2*i+1]`` elements after ``x[..,i,..]``. + * If mode is "reflect" then ``pad[2*i]`` and ``pad[2*i+1]`` can be at + most ``D[i]-1``. + * If mode is "replicate" then ``pad[2*i]`` and ``pad[2*i+1]`` can be + at most ``D[i]``. + + mode: const (Optional) + * Defaults to ``constant``. + * Must be one of the following values: + ``constant``, ``reflect``, or ``replicate``. + + constant_val: const (Optional) + * Defaults to ``0``. + * Constant value to pad. Ignored if ``mode != constant``. + + Returns + ------- + tensor<[\*D_out],T> + * Tensor with same type as the input. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + pad=TensorInputType(type_domain=types.int32), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + constant_val=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + mode="constant", + constant_val=0., + ) + + def type_inference(self): + in_shape = self.x.shape + ret_shape = list(in_shape) + pad = self.pad + if len(pad.shape) != 1: + raise ValueError("Pad should be a 1D tensor!") + if self.mode and not self.mode.val in {'constant', 'reflect', 'replicate'}: + raise ValueError("Pad mode should be one of {'constant', 'reflect', 'replicate'}") + + if pad.val is None: + for i in range(self.pad.shape[0] // 2): + ret_shape[-self.pad.shape[0] // 2 + i] = get_new_symbol() + else: + pad = pad.val + pad = pad.copy() + + if len(pad) % 2 != 0: + raise ValueError("Number of elements in the argument Pad must be divisible by 2.") + + pad = pad.reshape(-1, 2) + + if pad.shape[0] > len(ret_shape): + raise ValueError( + "Number of dimensions specified through pad must less than or equal to rank " + "of input x" + ) + + for i in range(len(pad)): + ret_shape[-len(pad) + i] = ret_shape[-len(pad) + i] + pad[i][0] + pad[i][1] + + return types.tensor(self.x.dtype, tuple(ret_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + # NumPy `edge` mode is equivalent to `replicate` mode of PyTorch and CoreML + mode = "edge" if self.mode.val == "replicate" else self.mode.val + pad_val = self.pad.val + + if pad_val is None: + return None + + if len(self.x.val.shape) > (pad_val.shape[0] // 2): + updated_pad = np.zeros(len(self.x.val.shape) * 2) + updated_pad[-pad_val.shape[0] :] = pad_val + pad_val = updated_pad + pad_val = pad_val.reshape(-1, 2).astype(np.int32) + if mode == "constant": + return np.pad( + self.x.val, pad_val, mode, constant_values=self.constant_val.val + ) + # NumPy does not support non-constant mode and constant_values argument + return np.pad(self.x.val, pad_val, mode) + + +@register_op +class range_1d(Operation): + """ + Returns a numpy-like 1-D range sequence. + + Parameters + ---------- + end: (Required) + * The upper limit of the sequence, exclusive. + start: (Required) + * The start point of the sequence. + step: (Required) + * Number that increments ``start``. + + Returns + ------- + tensor + * A 1-D tensor, where ``M`` is the length of the sequence. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + input_spec = InputSpec( + end=TensorInputType(type_domain="T"), + start=TensorInputType(type_domain="T"), + step=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + @precondition(allow=VALUE) + def value_inference(self): + start = self.start.val + end = self.end.val + step = self.step.val + shape = (end - start) / step + # To prevent from creating constant greater then 1MB, + # a upper bound of the size of the resulting array is set. + if shape > MAX_SIZE_CONSTANT_FOLDING: + return None + return np.arange(start, end, step) + + def type_inference(self): + start = self.start.sym_val + end = self.end.sym_val + step = self.step.sym_val + + if ( + (self.start.dtype != self.end.dtype) + or (self.start.dtype != self.step.dtype) + or (self.end.dtype != self.step.dtype) + ): + raise TypeError( + "All inputs to the range operation must have same input types." + ) + + if all(sym_val is not None for sym_val in (start, end, step)): + shape = (end - start) / step + shape = shape if is_symbolic(shape) else int(math.ceil(shape)) + shape = tuple([shape]) + else: + shape = tuple( + [ + get_new_symbol(), + ] + ) + + return types.tensor(self.start.dtype, shape) + + +@register_op +class tile(Operation): + """ + Returns a new tensor by replicating input ``x`` multiples times. + Dimension ``i`` of ``x`` will be replicated ``reps[i]`` times. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + reps: tensor<[rank(x)], i32> (Required) + * A 1-D tensor with length ``rank(x)``, which indicates the number to replicate the input along each dimension. + + Returns + ------- + tensor<\*?, T>: + * An n-D tensor with same type as the input. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + reps=TensorInputType(type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + x_type = self.x.dtype + x_shape = np.array(self.x.shape) + + reps = self.reps.sym_val + + if reps is None: + out_shape = tuple([get_new_symbol() for _ in range(self.x.rank)]) + return types.tensor(x_type, out_shape) + + if len(reps) == 0 or len(reps) != self.x.rank: + msg = ( + "Length of the reps ({}) must be at least 1, and " + "equal to the rank of the input x ({})" + ) + raise ValueError(msg.format(len(reps), self.x.rank)) + + out_shape = [] + for i, rep in enumerate(reps): + if not is_symbolic(rep): + if rep <= 0: + raise ValueError("All entries of reps parameter must be greater than 0") + + if is_symbolic(rep) or is_symbolic(x_shape[i]): + out_shape.append(get_new_symbol()) + else: + out_shape.append(rep * x_shape[i]) + + out_shape = tuple(out_shape) + + return types.tensor(x_type, out_shape) + + @precondition(allow=VALUE) + def value_inference(self): + # Infer only if don't have symbolic values. + if self.reps.val is None: + return None + return np.tile(self.x.val, reps=self.reps.val) + + +@register_op +class argsort(Operation): + """ + Returns a tensor containing the indices of the sorted values along a given axis + of the input tensor. + + Parameters + ---------- + x: <\*?, T> (Required) + * Input tensor. + * axis: const (Optional) + * Defaults to ``-1`` (the last dimension). + * Axis to perform the operation. + * ascending: const (Optional) + * Defaults to ``False``, sort in descending order. + * ``True`` to sort in ascending order. + + Returns + ------- + tensor<\*?, int32> + * Tensor containing the indices of the sorted values + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ascending=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=-1, + ascending=False, + ) + + def type_inference(self): + return types.tensor(types.int32, self.x.shape) + + @precondition(allow=VALUE) + def value_inference(self): + # The default np argsort mode is ascending, which is opposite to MIL's argsort op. + if self.ascending.val: + return np.argsort(self.x.val, axis=self.axis.val) + return np.argsort(-self.x.val, axis=self.axis.val) + + +@register_op +class topk(Operation): + """ + Returns a tensor containing top or bottom ``k`` values and the corresponding + indices of the input tensor along a given axis. + + Parameters + ---------- + x: <\*?, T> (Required) + * Input tensor. + k: const (Optional) + * Defaults to ``1``. + * Number of values/indices to be computed along each axis. + axis: const (Optional) + * Defaults to ``-1`` (last dimension). + * Axis to perform the operation. + ascending: const (Optional) + * Defaults to ``False``, sort in descending order. + * ``True`` to sort in ascending order. + + Returns + ------- + tensor<\*?, T> + * Values of top/bottom ``k`` elements. + tensor<\*?, int32> + * Indices of the top/bottom ``k`` elements along axis. + + Attributes + ---------- + T: fp16, fp32, int32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + k=TensorInputType(const=True, optional=True, type_domain=types.int32), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ascending=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + k=1, + axis=-1, + ascending=False, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + k = self.k.val + axis = self.axis.val + + if not is_symbolic(x_shape[axis]) and k > x_shape[axis]: + msg = "K={} is greater than size of the given axis={}" + raise ValueError(msg.format(k, axis)) + + ret_shape = list(x_shape) + ret_shape[axis] = k + return types.tensor(x_type, ret_shape), types.tensor(types.int32, ret_shape) + + @precondition(allow=VALUE) + def value_inference(self): + indices = np.argsort(self.x.val, axis=self.axis.val) + if not self.ascending.val: + indices = np.argsort(-self.x.val, axis=self.axis.val) + slc = [slice(None)] * self.x.rank + slc[self.axis.val] = slice(0, self.k.val) + indices = indices[tuple(slc)] + values = np.take_along_axis(self.x.val, indices, axis=self.axis.val) + return values, indices + + +@register_op +class flatten2d(Operation): + """ + Flattens input tensor into 2d tensor by flattening dimensions before and + after the provided axis. + + Parameters + ---------- + x: tensor<[*d], T> (Required) + * Input tensor. + axis: const (Optional) + * Defaults to ``1``. + * Negative axis is supported. + + Returns + ------- + tensor + * ``d_prior`` is product of dimensions ``x[:axis]`` + * ``d_post`` is product of dimensions ``x[axis:]`` + + Examples + -------- + 1. ``input_shape = (3, ), axis = -1, output_shape = (1, 3)`` + 2. ``input_shape = (3, ), axis = 1, output_shape = (3, 1)`` + 3. ``input_shape = (4, 3), axis = -1, output_shape = (4, 3)`` + 4. ``input_shape = (2, 3, 2), axis = -1, output_shape = (6, 2)`` + 5. ``input_shape = (5, 5, 2), axis = 1, output_shape = (5, 10)`` + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32) + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + axis=1, + ) + + def type_inference(self): + shape = list(self.x.shape) + axis = self.axis.val + dim_pre_axis = np.prod(shape[:axis]) + dim_post_axis = np.prod(shape[axis:]) + new_shape = [dim_pre_axis, dim_post_axis] + return types.tensor(self.x.dtype, tuple(new_shape)) + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + shape = self.x.shape + axis = self.axis.val + + dim_pre_axis = np.prod(shape[:axis]) + dim_post_axis = np.prod(shape[axis:]) + return self.x.val.reshape(dim_pre_axis, dim_post_axis) + + +@register_op +class shape(Operation): + """ + Returns a 1-dimensional tensor with the shape of the input tensor. + + Parameters + ---------- + x: tensor<[*?], T> (Required) + * Input tensor. + + Returns + ------- + tensor + * Shape of the input tensor. + * ``K = x.rank``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec(x=TensorInputType(type_domain="T")) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + input_rank = self.x.rank + return types.tensor(types.int32, tuple([input_rank])) + + def value_inference(self): + if any_symbolic(self.x.shape): + # convert elements in shape to int32 + res = [x if is_symbolic(x) else np.int32(x) for x in self.x.shape] + return np.array(res) + else: + return np.array(self.x.shape).astype(np.int32) + + +@register_op +class concat(Operation): + """ + Concatenates tensors along a dimension. + + Parameters + ---------- + values: Tuple[tensor<[d0, d1, ..., d_axis_i, ..., d_n],T>] (Required) + * The number of dimensions of the input tensors must match, and all + dimensions except ``axis`` must be equal. + * The tensors may be variadic, but the number of tensors must be + determined at compile time (i.e. a tuple). + axis: const (Required) + * The dimension along which to concatenate. Must be in the range + ``[-rank(values[i]), rank(values[i]))`` for all ``i``. + interleave: const (Optional, Default=False) + * If True, concatenate the inputs by interleaving them. + * If True, all the inputs to this op must have the exact same shape. + + Examples + -------- + + .. sourcecode:: python + + in1 = [[1, 2], [3, 4], [5, 6]] # shape (3, 2) + in2 = [[7, 8], [9, 10], [11, 12]] # shape (3, 2) + axis = 0 # output shape is (6, 2) + + if interleave is False: # default + # output[0:3, :] = in1 + # output[3:6, :] = in2 + output = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]] + + if interleave is True: + # output[0::2, :] = in1 + # output[1::2, :] = in2 + output = [[1, 2], [7, 8], [3, 4], [9, 10], [5, 6], [11, 12]] + + Returns + ------- + tensor<[d0, d1,...d_axis_out, ..., d_n],T> + * Where ``d_axis_out = sum(d_axis_i)``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + values=TupleInputType(), + axis=TensorInputType(const=True, type_domain=types.int32), + interleave=TensorInputType(const=True, optional=True, type_domain=types.bool) + ) + + def default_inputs(self): + return DefaultInputs( + interleave=False, + ) + + def type_inference(self): + concat_dim_len = 0 + if len(self.values) == 0: + raise ValueError("Concat {} got 0 values".format(self.name)) + + # Validate values have the same rank + rank = self.values[0].rank + for v in self.values: + if v.rank != rank: + msg = "Input {} has rank {} != other inputs rank {}" + raise ValueError(msg.format(v.name, v.rank, rank)) + + # Check concat axis is within (-rank, rank) + concat_axis = self.axis.val + if concat_axis < 0: + concat_axis += rank + if rank > 0 and (concat_axis < 0 or concat_axis >= rank): + msg = "In {} of op_type {}: axis out of bound for input " + "(rank {})" + raise ValueError(msg.format(self.name, self.op_type, rank)) + + # Validate values share the same data type + dtype = self.values[0].dtype + for v in self.values[1:]: + if v.dtype != dtype: + msg = ( + "Tensors in 'values' of the concat op ({}) should share the " + "same data type. Got {}." + ).format(self.name, [x.dtype for x in self.values]) + raise ValueError(msg) + + # validate that non-axis dimensions match + retshape = list(self.values[0].shape) + for v in self.values[1:]: + for i in range(rank): + if is_symbolic(retshape[i]) or is_symbolic(v.shape[i]): + continue + if i != concat_axis and retshape[i] != v.shape[i]: + msg = 'Dimension mismatch in {} ("{}"): shapes {} vs. {}' + raise ValueError( + msg.format(self.op_type, self.name, retshape, v.shape) + ) + if self.interleave.val and retshape[i] != v.shape[i]: + msg = 'Dimension mismatch in {} ("{}"): shapes {} vs. {}. ' \ + 'All inputs must have same shape when \'interleave\' option is True.' + raise ValueError( + msg.format(self.op_type, self.name, retshape, v.shape) + ) + + # Get length of concat dim + concat_dim_len = 0 + for v in self.values: + if len(v.shape) == 0: + taxis = 1 + else: + taxis = v.shape[concat_axis] + if is_symbolic(taxis): + concat_dim_len = get_new_symbol() + break + concat_dim_len += taxis + + if len(retshape) == 0: + retshape = [concat_dim_len] + else: + retshape[concat_axis] = concat_dim_len + + return types.tensor(dtype, retshape) + + @precondition(allow=VALUE | SYMBOL | NONE) + def value_inference(self): + + values = [] + for v in self.values: + if v.sym_val is not None: + values.append(v.sym_val) + continue + if v.rank == 0: + values.append(get_new_symbol()) + continue + if any_symbolic(v.shape): + values.append(None) + continue + + # we support value inference when number of elements for each tensor is less than 10 + shape = v.shape + num_element = np.prod(shape) + if num_element > 10: + values.append(None) + continue + + symbolic_tensor = [get_new_symbol() for _ in range(num_element)] + symbolic_tensor = np.reshape(np.array(symbolic_tensor), shape) + values.append(symbolic_tensor) + + if any([val is None for val in values]): + return None + + if not isinstance(values[0], np.ndarray) or values[0].shape == (): + return np.stack(values, axis=self.axis.val) + + return np.concatenate(values, axis=self.axis.val) + + +@register_op +class split(Operation): + """ + Split tensors into a tuple + + Parameters + ---------- + x: <\*?,T> (Required) + * The tensor to split. + * The tensors may be variadic, but the number of tensors must be determined + at compile time (i.e. a tuple). + + num_splits: (Optional) + If specified, divide ``x`` into ``num_splits`` tensors along ``axis``. + Its behavior depends on ``split_sizes``: + + * If ``split_sizes`` is defined, ``num_splits == S``, and the output + sizes may be uneven. + * If ``split_sizes`` is not defined, ``value.shape[axis]`` must be + divisible by ``num_splits``, and the output sizes must be even. + + At least one of ``num_splits`` or ``split_sizes`` must be provided. + If ``split_sizes`` length ``S`` cannot be determined at compile time, + ``num_splits`` must be supplied to determine the number of outputs. + + split_sizes: const (Optional) + * Sizes to split to. The sum of ``split_sizes`` must equal to + ``value.shape[axis]``. + + axis: const (Required) + * The dimension along which to concatenate. Must be in the + range ``[-rank(x), rank(x))``. + + Returns + ------- + Tuple[tensor<\*?, T>] + * Where the length of the tuple is the number of splits (determined + from ``num_splits`` or ``split_sizes``). + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + num_splits=TensorInputType(const=True, optional=True, type_domain=types.int32), + split_sizes=TensorInputType(const=True, optional=True, type_domain=types.int32), + axis=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + num_splits, sizes = self._get_num_splits_and_sizes() + x_shape = list(self.x.shape) + ret_shapes = [x_shape[:] for _ in range(num_splits)] + axis = self.axis.val + for i, d in enumerate(sizes): + ret_shapes[i][axis] = d + self.sizes = sizes + return tuple([types.tensor(self.x.dtype, s) for s in ret_shapes]) + + def _get_num_splits_and_sizes(self): + """ + Return: + - num_splits: int + - sizes: list of int/symbols. Of length num_splits + + Raise ValueError if num_splits cannot be determined. + """ + if self.num_splits is None and self.split_sizes is None: + msg = ( + "At least one of num_splits and split_sizes " + + "must be specified in split op {}" + ) + raise ValueError(msg.format(self.name)) + + axis = self.axis.val + + if self.num_splits is not None: + num_splits = self.num_splits.val + if self.split_sizes is None: + # Even split + if ( + not is_symbolic(self.x.shape[axis]) + and self.x.shape[axis] % num_splits != 0 + ): + msg = "num_split {} does not divide split " + "dim (length = {})" + raise ValueError(msg.format(num_splits, self.x.shape[axis])) + size = self.x.shape[axis] / num_splits + return num_splits, [size] * num_splits + + # self.split_sizes is not None + if self.split_sizes.sym_val is not None: + return num_splits, self.split_sizes.sym_val + + # self.split_size.sym_val is None. + sizes = [get_new_symbol() for _ in range(num_splits)] + return num_splits, sizes + + # self.num_splits is None, self.split_sizes is not None + if self.split_sizes.sym_val is not None: + return len(self.split_sizes.sym_val), self.split_sizes.sym_val + + # self.num_splits is None, self.split_sizes is not None + # self.split_sizes.sym_val is None + if any_symbolic(self.split_sizes.shape): + raise ValueError("Unable to determine number of splits") + + num_splits = len(self.split_sizes.shape) + sizes = [get_new_symbol() for _ in range(num_splits)] + return num_splits, sizes + + @precondition(allow=VALUE | SYMBOL | NONE) + def value_inference(self): + num_splits, sizes = self._get_num_splits_and_sizes() + if self.x.sym_val is None or any_symbolic(sizes): + raise NotImplementedError() + + if num_splits == 1: + # No split_indices possible. + return self.x.sym_val + + split_indices = np.cumsum(sizes).astype(np.int32) + return tuple(np.split(self.x.sym_val, split_indices[:-1], axis=self.axis.val)) + + +@register_op +class stack(Operation): + """ + Concatenates tensors along a dimension. + + Parameters + ---------- + values: Tuple[tensor<[d0, d1,...d_axis_i, ..., d_n], T>] (Required) + * All tensors must have identical shape. + axis: const (Required) + * The dimension along which to concatenate. Must be in the range ``[-rank(values[i]), rank(values[i]))`` for all ``i``. + + Returns + ------- + tenor<[d0, d1,...d_axis_out, ..., d_n], T> + * Where ``d_axis_out = sum(d_axis_i)``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + values=TupleInputType(), + axis=TensorInputType(const=True, type_domain=types.int32) + ) + + def type_inference(self): + + num_tensors = len(self.values) + if num_tensors == 0: + raise ValueError("Cannot stack 0 tensor") + + # get the first value without symbolic shape + t_shape = None + for value in self.values: + if not any_symbolic(value.shape): + t_shape = value.shape + break + t_shape = self.values[0].shape if t_shape is None else t_shape + + # compare all shape + for t in self.values: + if not is_compatible_symbolic_vector(t.shape, t_shape): + msg = "Component tensor {} has shape {}, others have {}" + raise ValueError(msg.format(t.name, t.shape, t_shape)) + + # Validate values share the same data type + dtype = self.values[0].dtype + for v in self.values[1:]: + if v.dtype != dtype: + msg = ( + "Tensors in 'values' of the stack op ({}) should share the " + "same data type. Got {}." + ).format(self.name, [x.dtype for x in self.values]) + raise ValueError(msg) + + axis = self.axis.val + if axis < 0: + axis += (self.values[0].rank + 1) + ret_shape = list(t_shape) + ret_shape.insert(axis, num_tensors) + return types.tensor(self.values[0].dtype, ret_shape) + + @precondition(allow=VALUE | SYMBOL | NONE) + def value_inference(self): + + is_all_rank_zero = all([v.rank == 0 for v in self.values]) + values = [ + v.sym_val if v.sym_val is not None else get_new_symbol() + for v in self.values + ] + + if any([is_symbolic(v) for v in values]) and not is_all_rank_zero: + return None + + return np.stack(values, self.axis.val) + + +# identity is used for renaming and is rarely necessary. See +# `loop_invariant_elimination` pass for a rare use case. +@register_op +class identity(Operation): + """ + Returns a tensor with the same shape and contents as input. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + + Returns + ------- + tensor<\*?, T> + * Same type and shape as the input tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=ListOrTensorInputType() + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + return self.x.sym_val diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py new file mode 100644 index 00000000..fe2480f4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py @@ -0,0 +1,1069 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import sympy as sm + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import (Operation, get_new_symbol, + get_new_variadic_symbol, + precondition, types) +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import SYMBOL, VALUE +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import \ + solve_slice_by_index_shape +from coremltools.converters.mil.mil.types.symbolic import (any_symbolic, + any_variadic, + is_symbolic, + isscalar) + + +@register_op +class depth_to_space(Operation): + """ + Rearrange elements in a tensor from depth (channel) into spatial dimensions. + + Parameters + ---------- + x: tensor<[n, C, H, W], T> (Required) + * Input tensor of rank ``4``. + block_size: const i32 (Required) + * The size of the spatial block. Must be greater than ``1`` and divisible by + channel dimension ``C``. + + Returns + ------- + tensor<[n, C / block_size^2, H x block_size, W x block_size], T> + * Where ``b`` is the block size. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + block_size=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_type = self.x.dtype + n, c, h, w = self.x.shape + bs = self.block_size.val + ret_shape = (n, c // (bs * bs), h * bs, w * bs) + return types.tensor(x_type, ret_shape) + + +@register_op +class expand_dims(Operation): + """ + Insert a single-dimension in a 1-D or higher tensor at each axis in axes. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Scalar or tensor. + axes: const tensor<[K], i32> Required + * ``K`` is the number of dimensions expanded. + * Insert single dimension at dimension index at each axes. + * Negative value to index from the end. ``-d-1 <= axis <= d`` + where ``d`` is the rank of ``x``. + + Returns + ------- + tensor<\*(rank(x)+K), T> + * Same type as the input ``x`` with rank ``rank(x)+K``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axes=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + x_rank = self.x.rank + x_type = self.x.dtype + x_shape = list(self.x.shape) + axes = self.axes.val + out_rank = x_rank + len(axes) + + for axis in axes: + if axis <= -out_rank - 1 or axis >= out_rank: + msg = 'Axis value {} is out of bounds for {} node "{}" of shape {}' + raise IndexError( + msg.format(axis, self.op_type, self.name, self.x.shape) + ) + + ret_shape = x_shape + axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes]) + for axis in axes: + ret_shape.insert(axis, 1) + + return types.tensor(x_type, tuple(ret_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + axes = self.axes.val + out_rank = self.x.rank + len(axes) + + for axis in axes: + if axis <= -out_rank - 1 or axis >= out_rank: + msg = 'Axis value {} is out of bounds for {} node "{}" of shape {}' + raise IndexError( + msg.format(axis, self.op_type, self.name, self.x.shape) + ) + + axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes]) + ret_shape = list(self.x.shape) + for axis in axes: + ret_shape.insert(axis, 1) + return np.reshape(self.x.val, ret_shape) + + +def reshape_with_symbol(v, shape): + """ + Perform basic reshape if v is symbolic (not array of symbols). + """ + if is_symbolic(v): + return np.array(v).reshape(shape) + shape = [int(s) for s in shape] + return v.reshape(shape) + + +@register_op +class reshape(Operation): + """ + Return a tensor that has the same values as ``x`` with shape ``shape``. + ``shape`` must have the same volume (number of elements) as ``x``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + * A n-D tensor or a scalar. + * If ``x`` is fixed rank (and possibly contains symbolic dimension), + shape may contain elements that are not positive integers (see below). + * If ``x`` is variadic rank, shape can only contain positive integers. + + shape: tensor<[K], i32> (Required) + + A 1-D tensor, with elements from the following: + + * Positive integers. + * Symbols: All but one symbol in shape must be present in ``x.shape``. + The new symbol that is not present in ``x.shape`` represent a dimension + such that the total size remains constant. Symbol is illegal + if ``x`` is variadic rank. + * ``-1``: ``-1`` introduces a new symbol (see Symbols). Therefore, ``-1`` is + allowed if all symbols in the shape appear in ``x.shape``. ``-1`` is illegal + if ``x`` is variadic rank. + * ``0``: If ``K == rank(x)`` then ``0`` means inheriting from the corresponding + dimension in ``x.shape``. ``0`` is illegal if ``x`` is variadic rank. + + Returns + ------- + tensor<\*?, T> + * Tensor with shape determined by the input shape. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + shape=TensorInputType(type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + if any_symbolic(self.shape.shape): + # We can't infer any shape if shape has variable length. + return types.tensor(self.x.dtype, (get_new_variadic_symbol(),)) + + # shape has fixed length here. + if self.shape.sym_val is None: + shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])]) + return types.tensor(self.x.dtype, shape) + t, _ = self._get_type_val() + return t + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + _, val = self._get_type_val() + return val + + def _get_type_val(self): + x_type = self.x.dtype + x_shape = self.x.shape + x_vol = np.prod(x_shape) + # shape is const, and thus sym_val is not None + sym_shape = self.shape.sym_val + sym_shape = [get_new_symbol() if d == -1 else d for d in sym_shape] + try: + ret_shape = reshape.enforce_volumetric_constraint(x_vol, sym_shape) + except: + ret_shape = sym_shape + ret_val = None + if self.x.val is not None and all(isscalar(a) and not is_symbolic(a) for a in ret_shape): + ret_val = reshape_with_symbol(self.x.val, ret_shape) + return types.tensor(x_type, tuple(ret_shape)), ret_val + + @staticmethod + def enforce_volumetric_constraint(left_volume, inshape): + left_symbols = set() + if is_symbolic(left_volume): + left_symbols = left_volume.free_symbols + # Generally, we want to solve for right in terms of left. But this + # is kinda annoying actually. + shape = list(inshape) + + # Handling when reshape is given 0 instead of actual input + # input tensor shape: [4, 3, 2], reshape:[0, -1], output tensor shape: [4, 6] + if shape.count(-1) > 1: + raise ValueError( + "Reshape op supports only one dimension to be -1. Given {}".format( + shape.count(-1) + ) + ) + + infer_dim_index = shape.index(-1) if -1 in shape else None + right_volume = 1 + for i in shape: + if i != -1: + right_volume = right_volume * i + + if infer_dim_index: + shape[infer_dim_index] = left_volume // right_volume + + if not is_symbolic(right_volume): + return shape + + constraints = [left_volume - right_volume] + solve_for = [s for s in shape if is_symbolic(s)] + + for rightsym in solve_for: + sol = sm.solve(constraints, [rightsym], dict=True) + if not isinstance(sol, list): + sol = [sol] + # look for an acceptable solution + for s in sol: + if 0 in s.values(): + continue + for i in range(len(shape)): + if shape[i] in s: + v = s[shape[i]] + if len(v.free_symbols - left_symbols) > 0: + continue + try: + shape[i] = int(v) + except: + shape[i] = v + return shape + + +@register_op +class reverse(Operation): + """ + Reverse the order of the input tensor ``x`` along specified ``axes`` (dimensions). + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + + axes: const (Optional) + * Dimension(s) to reverse. Each axis must be in the range ``[-rank(x), rank(x))``. + * Defaults to None (reverse on all dimensions). + + Returns + ------- + tensor<\*?, T> + * Same type and shape as the input tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + + References + ---------- + See `tf.reverse `_ + and `TORCH `_. + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axes=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + axes=None, + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + res = self.x.val + axes = self.axes.val if self.axes is not None else range(self.x.rank) + for axis in axes: + res = np.flip(res, axis=axis) + return res + + +@register_op +class reverse_sequence(Operation): + """ + Reverse variable length slices for specified axes / dimensions of the input + tensor. This op first slices input tensor along the ``batch_axis`` dimension, then + partially reverses the elements along the ``seq_axis`` for the first ``lengths[i]`` + elements. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + lengths: tensor (Required) + * 1-dimensional tensor of length ``x.shape[batch_axis]`` specifying the length + of the sequence to reverse. + * Values must be in range ``[0, x.shape[seq_axis]]``. + seq_axis: const (Optional) + * The dimension to reverse. + * Defaults to ``0``. + batch_axis: const (Optional) + * Dimension for slicing. + * Defaults to ``0``. + + Returns + ------- + tensor<\*?, T> + * Same type and shape as the input tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + + References + ---------- + `tf.reverse_sequence `_ + + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + lengths=TensorInputType(type_domain=types.int32), + seq_axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + batch_axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + seq_axis=0, + batch_axis=0) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + raise NotImplementedError("TODO") + + +@register_op +class slice_by_index(Operation): + """ + Method for numpy style indexing and slicing. + With a tensor ``x``, this method achieves the following: + + ``result = x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...]`` + + Note: This method does not support pure indexing. You would need to do a + squeeze if indexing is intended. + + Parameters + ---------- + x: tensor<*?, T> (Required) + * Input tensor + begin: tensor<[rank(x)], i32> (Required) + * Starting index for the dimension of slicing. + end: tensor<[rank(x)], i32> (Required) + * Ending index for the dimension of slicing. + stride: tensor<[rank(x)], i32> (Optional) + * Default is all ``1``. + * Stride for the dimension of slicing. + begin_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``begin_mask[i]==True``, ignores ``begin[i]``, and set ``begin[i]`` to ``0``. + end_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``end_mask[i]==True``, ignores ``end[i]``, and set ``end[i]`` to ``x.shape[i]``. + squeeze_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``squeeze_mask[i]==true``, ignores ``end[i]``, and do the pure index at ``begin[i]``. + + Returns + ------- + tensor<\*?, T> + - Scalar or tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + begin=TensorInputType(type_domain=types.int32), + end=TensorInputType(type_domain=types.int32), + stride=TensorInputType(const=True, optional=True, type_domain=types.int32), + begin_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + end_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + squeeze_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + stride=None, + begin_mask=None, + end_mask=None, + squeeze_mask=None, + ) + + def type_inference(self): + + # get tensor and set default value + begin = self.begin.val + end = self.end.val + x_rank = self.x.rank + stride = self.stride.val if self.stride is not None else [1] * x_rank + begin_mask = ( + self.begin_mask.val if self.begin_mask is not None else [False] * x_rank + ) + end_mask = self.end_mask.val if self.end_mask is not None else [False] * x_rank + squeeze_mask = ( + self.squeeze_mask.val if self.squeeze_mask is not None else [False] * x_rank + ) + + # solve shape + x_shape = self.x.shape + ret_shape = solve_slice_by_index_shape(x_shape, begin, end, stride, begin_mask, end_mask, squeeze_mask) + + if len(ret_shape) == 0: + # Scalar case. + return self.x.dtype + else: + return types.tensor(self.x.dtype, tuple(ret_shape)) + + def value_inference(self): + if self.x.sym_val is None or self.begin.val is None or self.end.val is None: + return None + begin = [int(i) for i in list(self.begin.val[:])] + end = [int(i) for i in list(self.end.val[:])] + stride = [1] * self.x.rank if self.stride is None else self.stride.val + begin_mask = ( + [False] * self.x.rank if self.begin_mask is None else self.begin_mask.val + ) + end_mask = [False] * self.x.rank if self.end_mask is None else self.end_mask.val + squeeze_mask = ( + [False] * self.x.rank + if self.squeeze_mask is None + else self.squeeze_mask.val + ) + + slices = [] + for idx, mask in enumerate(begin_mask): + if mask: + begin[idx] = None + for idx, mask in enumerate(end_mask): + if mask: + end[idx] = None + squeeze_axes = [] + for idx, mask in enumerate(squeeze_mask): + if mask: + end[idx] = None + stride[ + idx + ] = 2147483647 # We slice out only 1 element by setting stride to INF + squeeze_axes.append(idx) + for idx in range(self.x.rank): + slices.append(slice(begin[idx], end[idx], stride[idx])) + + slices = tuple(slices) + res = self.x.sym_val[slices] + + # remove squeezed axes + if len(squeeze_axes) > 0: + if len(squeeze_axes) == len(res.shape): + if len(res) == 0: + logger.warning("%s seems to be a 0 sized tensor", self.name) + return np.array([]) + res = np.squeeze(res).tolist() + if is_symbolic(res): + return res + elif self.x.dtype == types.int32 or self.x.dtype == types.int64: + res = np.int32(res) + elif self.x.dtype == types.float or self.x.dtype == types.double: + res = np.float32(res) + else: + raise ValueError( + "Unable to convert type {}".format(self.x.sym_val.dtype) + ) + else: + res = np.squeeze(res, axis=tuple(squeeze_axes)) + return res + + +@register_op +class slice_by_size(Operation): + """ + Slice input tensor starting from the given ``begin`` index and by + the amount specified by the ``size`` input, for each dimension. + + Parameters + ---------- + x: tensor<*?, T> (Required) + * Input tensor. + begin: tensor<[rank(x)], i32> Required + * The begin index for slice. + size: tensor<[rank(x)], i32> Required + * The size that is to be sliced. If ``size`` is ``-1``, + all the remaining elements starting with "begin" are sliced. + + Returns + ------- + tensor<\*?, T> + * Scalar or tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + begin=TensorInputType(type_domain=types.int32), + size=TensorInputType(type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + if self.begin.rank != 1: + raise ValueError( + "begin should be 1-D tensor, got {}-D tensor instead".format( + self.begin.rank + ) + ) + if self.size.rank != 1: + raise ValueError( + "size should be 1-D tensor, got {}-D tensor instead".format( + self.size.rank + ) + ) + if self.x.rank != self.begin.shape[0]: + raise ValueError( + "Length of begin {} doesn't equal to input rank {}.".format( + len(self.begin.shape[0]), len(self.x.rank) + ) + ) + if self.x.rank != self.size.shape[0]: + raise ValueError( + "Length of size {} doesn't equal to input rank {}.".format( + len(self.size.shape[0]), len(self.x.rank) + ) + ) + + x_shape = self.x.shape + ret_shape = [] + if self.size.sym_val is None: + ret_shape = [get_new_symbol() for _ in range(self.x.rank)] + return types.tensor(self.x.dtype, tuple(ret_shape)) + + for idx, s in enumerate(self.size.sym_val): + if is_symbolic(s): + ret_shape.append(s) + elif s != -1: + ret_shape.append(s) + elif self.begin.sym_val is not None: + ret_shape.append(x_shape[idx] - self.begin.sym_val[idx]) + else: + ret_shape.append(get_new_symbol()) + + return types.tensor(self.x.dtype, tuple(ret_shape)) + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + if any_symbolic(self.begin.sym_val): + return None + if any_symbolic(self.size.sym_val): + return None + if self.x.val is None: + return None + slices = [] + for i in range(self.x.rank): + begin_val = self.begin.val[i] + if begin_val < 0: + if is_symbolic(self.x.shape[i]): + return None + begin_val += self.x.shape[i] + if self.size.val[i] > 0: + slices.append(slice(begin_val, begin_val + self.size.val[i])) + else: + slices.append(slice(begin_val, None, None)) + return self.x.val[tuple(slices)] + + +@register_op +class space_to_depth(Operation): + """ + Rearrange elements in a tensor from spatial into depth (channel) dimension. + + Parameters + ---------- + x: tensor<[n, C, H, W], T> (Required) + * Input tensor of rank ``4``. + block_size: const (Required) + * The size of the spatial block. Must be greater than ``1`` and divisible + by spatial dimensions ``H, W``. + + Returns + ------- + tensor<[n, C x block_size^2, H / block_size, W / block_size], T> + * Where ``b`` is the block size. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + block_size=TensorInputType(const=True, type_domain=types.int32) + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_type = self.x.dtype + n, c, h, w = self.x.shape + bs = self.block_size.val + ret_shape = (n, c * (bs * bs), h // bs, w // bs) + return types.tensor(x_type, ret_shape) + +@register_op +class space_to_batch(Operation): + """ + Rearrange elements in a tensor from spatial into batch dimension. + + Parameters + ---------- + x: tensor<[n, C, H, W], T> (Required) + * Input tensor must have rank 4. + * The first and the second dimension are batch, channel, respectively + * The remaining dimensions (H, W) are treated as "spatial dimensions" + block_shape: const tensor<[2], i32> (Required) + * The length of the block_shape must be `2` + * It defines the shapes of the block in which the spatial dimensions are divided + paddings: const tensor<[2, 2], i32> (Required) + * It must have shape `(2, 2)` + * It defines the padding for each spatial dimensions + + Returns + ------- + tensor<[new_n, C, new_H, new_W], T> + * new_n = n * block_shape[0] * block_shape[1] + * new_H = (H + paddings[0][0] + padding[0][1])/block_shape[0] + * new_W = (W + paddings[1][0] + padding[1][1])/block_shape[1] + * The output has the same rank as the input + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + block_shape=TensorInputType(const=True, type_domain=types.int32), + paddings=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_shape = self.x.shape + block_shape = self.block_shape.val + paddings = self.paddings.val + + if self.x.rank != 4: + msg = "Input to space_to_batch op must be rank 4. Instead got an input with rank {}".format(self.x.rank) + raise ValueError(msg) + + if paddings.shape != (block_shape.shape[0], 2): + msg = "block_shape and paddings must have shape [2], [2, 2] accordingly in the space_to_batch op. "\ + "Got {}, {}.".format(block_shape.shape, paddings.shape) + raise ValueError(msg) + + m = block_shape.shape[0] + if m != 2: + msg = "space_to_batch op only supports spatial dimensions = 2. Got {}".format(m) + raise ValueError(msg) + + b = x_shape[0] + c = x_shape[1] + spatial_shape = x_shape[2:2+m] + + if self.x.rank != m + 2: + raise ValueError("The input rank of space_to_batch op must exactly be " \ + "len(block_shape){} + 2! Got {}".format(self.block_shape.val, self.x.rank)) + + padded_spatial_shape = [x + paddings[i][0] + paddings[i][1] for i, x in enumerate(spatial_shape)] + new_b = b * np.prod(block_shape) + new_spatial_shape = [padded_spatial_shape[i]/block_shape[i] for i in range(m)] + ret_shape = [new_b, c] + new_spatial_shape + x_type = self.x.dtype + + return types.tensor(x_type, ret_shape) + +@register_op +class batch_to_space(Operation): + """ + Rearrange elements in a tensor from batch into spatial dimension. + + Parameters + ---------- + x: tensor<[n, C, H, W], T> (Required) + * Input tensor must have rank 4. + * The first and the second dimension are batch, channel, respectively + * The remaining dimensions (H, W) are treated as "spatial dimensions" + block_shape: const tensor<[2], i32> (Required) + * The length of the block_shape must be `2` + * It defines the shapes of the block in which the spatial dimensions are multiplied + crops: const tensor<[2, 2], i32> (Required) + * It must have shape `(2, 2)` + * It defines the amount to crop from each spatial dimensions + + Returns + ------- + tensor<[new_n, C, new_H, new_W], T> + * new_n = n / (block_shape[0] * block_shape[1]) + * new_H = (H * block_shape[0]) - paddings[0][0] - padding[0][1] + * new_W = (W * block_shape[1]) - paddings[1][0] - padding[1][1] + * The output has the same rank as the input + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + block_shape=TensorInputType(const=True, type_domain=types.int32), + crops=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_shape = self.x.shape + block_shape = self.block_shape.val + crops = self.crops.val + + if self.x.rank != 4: + msg = "Input to batch_to_space op must be rank 4. Instead got an input with rank {}".format(self.x.rank) + raise ValueError(msg) + + if crops.shape != (block_shape.shape[0], 2): + msg = "block_shape and crops must have shape [2], [2, 2] accordingly in the batch_to_space op. "\ + "Got {}, {}.".format(block_shape.shape, crops.shape) + raise ValueError(msg) + + m = block_shape.shape[0] + if m != 2: + msg = "batch_to_space op only supports spatial dimensions = 2. Got {}".format(m) + raise ValueError(msg) + + b = x_shape[0] + c = x_shape[1] + spatial_shape = x_shape[2:2+m] + + if self.x.rank != m + 2: + raise ValueError("The input rank of batch_to_space op must exactly be " \ + "len(block_shape){} + 2! Got {}".format(self.block_shape.val, self.x.rank)) + + if not is_symbolic(b) and b % np.prod(block_shape) != 0: + msg = ("Batch size must be perfectly divided by the product of block_shape. Got batch size {}, and block_shape {}." + ).format(b, block_shape) + raise ValueError(msg) + + new_b = b / np.prod(block_shape) + new_spatial_shape = [spatial_shape[i] * block_shape[i] for i in range(m)] + cropped_spatial_shape = [x - crops[i][0] - crops[i][1] for i, x in enumerate(new_spatial_shape)] + ret_shape = [new_b, c] + cropped_spatial_shape + x_type = self.x.dtype + + return types.tensor(x_type, ret_shape) + +@register_op +class squeeze(Operation): + """ + Remove single-dimension dimensions in a 1-D or higher tensor. + + Parameters + ---------- + x: tensor<\*?,T> (Required) + * Must be at least 1-D. + axes: const (Optional) + * Axes to squeeze out. + * Default to remove all single-dimensions. + + Returns + ------- + tensor<\*(rank(x)-K),T> + * Tensor with same type as input ``x`` and rank ``rank(x)-K``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axes=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + axes=None, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + squeezed_shape = list(x_shape) + if self.axes is None: + # Squeeze all single-dim, assuming symbolic dims != 1 + squeezed_shape = [s for s in squeezed_shape if s != 1] + else: + axes = self.axes.val + axes = [axis if axis >= 0 else axis + self.x.rank for axis in axes] + for i in sorted(axes)[::-1]: # descending order + if len(squeezed_shape) <= i: + raise ValueError( + "Cannot squeeze dim {} for shape {}".format(i, squeezed_shape) + ) + squeezed_shape.pop(i) + + return types.tensor(x_type, tuple(squeezed_shape)) if len(squeezed_shape) != 0 else x_type + + @precondition(allow=VALUE) + def value_inference(self): + if self.x.val is None: + return None + if self.axes is None: + val = np.squeeze(self.x.val) + else: + val = np.squeeze(self.x.val, axis=tuple(self.axes.val)) + return val if val.shape != () else self.x.val[0] + +@register_op +class transpose(Operation): + """ + Permute tensor ``x`` dimensions according to ``perm``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Must be at least 1-D. ``x`` may have a symbolic shape. + perm: const<[rank(x)], i32> (Required) + * Permutation order. -rank(x) <= perm[I] < rank(x) for all perm entries. + + Returns + ------- + tensor<\*?,T> + * Tensor with same rank and type as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + + References + ---------- + `torch.Tensor.permute `_ + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + perm=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + x_type = self.x.dtype + perm = self.perm.val + x_shape = np.array(self.x.shape) + if len(perm) != self.x.rank: + msg = "perm should have the same length as rank(x): {} != {}" + raise ValueError(msg.format(len(perm), self.x.rank)) + if self.x.rank == 0: + return self.x.sym_type # scalar cannot be transposed + if any_variadic(self.x.shape): + ret_shape = get_new_variadic_symbol() + else: + ret_shape = x_shape[perm] + return types.tensor(x_type, tuple(ret_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + return np.transpose(self.x.val, axes=self.perm.val) + + +@register_op +class pixel_shuffle(Operation): + """ + Rearrange elements in a tensor from depth (channel) into spatial dimensions. + Equivalent to PyTorch's ``PixelShuffle``. + + Parameters + ---------- + x: tensor<[n, C x f^2, H, W], T> (Required) + * Input tensor of rank ``4``. + upscale_factor: const + * Factor to increase spatial resolution by. + + Returns + ------- + tensor<[n, C, H x f, W x f], T> + * Where ``f`` is the upscale factor. + + Attributes + ---------- + T: fp16, fp32 + + References + ---------- + `torch.nn.PixelShuffle `_ + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + upscale_factor=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_type = self.x.dtype + n, c, h, w = self.x.shape + f = self.upscale_factor.val + ret_shape = (n, c // (f * f), h * f, w * f) + return types.tensor(x_type, ret_shape) + + +@register_op +class sliding_windows(Operation): + """ + Return a tensor containing all windows of ``size``, separated by stride along the + given ``axis``. + + Parameters + ---------- + x: tensor<[\*d0, d_axis, *dn], T> + * Input tensor. + + axis: const + * Axis to perform the operation. + + size: const + * Number of elements in the sliding window. + + stride: const Optional + * Default to ``1``. + * The stride of the input elements in the sliding window. + + Returns + ------- + tensor<[\*d0, d_axis - size // stride + 1, size, \*dn], T> + * The output will be a tensor of rank ``N+1`` where ``N`` is the input tensor + rank. + + Attributes + ---------- + T: fp16, fp32, int32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, type_domain=types.int32), + size=TensorInputType(const=True, type_domain=types.int32), + stride=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs(stride=1) + + def type_inference(self): + x_shape = self.x.shape + axis = self.axis.val + size = self.size.val + stride = self.stride.val + ret_shape = list(x_shape) + ret_shape[axis] = (x_shape[axis] - size) // stride + 1 + pos_axis = axis if axis >= 0 else axis + self.x.rank + ret_shape.insert(pos_axis + 1, size) + return types.tensor(self.x.dtype, tuple(ret_shape)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/__init__.py new file mode 100644 index 00000000..e83fcb3b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target + +_IOS16_TARGET = target.iOS16 + +from .constexpr_ops import (constexpr_affine_dequantize, constexpr_cast, + constexpr_lut_to_dense, constexpr_sparse_to_dense) +from .image_resizing import crop_resize, resample, upsample_bilinear +from .scatter_gather import gather, gather_nd +from .tensor_operation import fill_like, topk +from .tensor_transformation import pixel_unshuffle, reshape_like diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/constexpr_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/constexpr_ops.py new file mode 100644 index 00000000..5306bbf3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/constexpr_ops.py @@ -0,0 +1,383 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import Operation +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET + + +@register_op(opset_version=_IOS16_TARGET) +class constexpr_affine_dequantize(Operation): + """ + A compile-time operation that returns a constant output value upon dequantizing its constant inputs. + + This operation is used to represent constant 8-bit quantized data with affine/linear quantization. + The quantized data is stored in the parameter ``quantized_data``. + The other parameters -- ``scale``, ``zero_point``, and ``axis`` -- describe how + unquantized values can be extracted from it, using the equation for affine/linear quantization: + :: + unquantized_data = scale * (quantized_data - zero_point) + + Although all of the parameters of this op are constants, this op is not constant folded + to a single const op at the time of model serialization. The unquantized output will + be decompressed later, based on the implementation detail (either at model load time or runtime). + + Parameters + ---------- + quantized_data: const tensor (Required) + + zero_point: const tensor (Required) + * ``zero_point`` can be either a scalar or a vector. + * ``zero_point`` follows similar broadcasting rules and size constraints as ``scale``. + + scale: const tensor (Required) + * ``scale`` can be either a scalar or a vector. If ``scale`` is a vector, + for implementation it is broadcast to the following shape: + * The rank of ``scale`` becomes the same as the rank of ``quantized_data``. + * The constraint: ``size(scale-vector) == quantized_data.shape[axis]``. + * For ``i == axis``, ``scale.shape[i] == quantized_data.shape[i]``. + * For ``i != axis``, ``scale.shape == 1``. + For example, assume ``quantized_data.shape = (2, 3, 4, 5)`` and ``axis = 1``. + If ``scale`` is a vector, then ``scale.size`` needs to be equal to + ``quantized_data.shape[axis] i.e = 3``, which would be broadcast to ``(1, 3, 1, 1)``. + + axis: const tensor (Required) + + Returns + ------- + const tensor + + Attributes + ---------- + SrcT: uint8, int8 + DstT: fp16, fp32 + """ + + input_spec = InputSpec( + quantized_data=TensorInputType(const=True, type_domain="SrcT"), + zero_point=TensorInputType(const=True, type_domain="ZeroPointT"), + scale=TensorInputType(const=True, type_domain="DstT"), + axis=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "DstT": (types.fp16, types.fp32), + "SrcT": (types.uint8, types.int8), + "ZeroPointT": (types.uint8, types.int8), + } + + def type_inference(self): + def assert_is_scalar_or_vector(param, name): + if param.rank not in (0, 1): + raise ValueError( + "Parameter {} needs to be either a scalar or vector".format(name) + ) + + def assert_vector_size_same_as_axial_dimension(param, axis_dim_size, name): + if param.rank == 1 and param.shape[0] != axis_dim_size: + raise ValueError( + "Parameter {}, if vector, needs to have same size as the dimension size along the parameter quantized_data".format( + name + ) + ) + + if self.zero_point.dtype != self.quantized_data.dtype: + raise ValueError( + "Parameters quantized_data and zero_point needs to be of the same dtype" + ) + + rank = self.quantized_data.rank + if self.axis.val < -rank or self.axis.val >= rank: + raise ValueError( + "Parameter axis needs to be in the range -quantized_data.rank <= axis < quantized_data.rank" + ) + + assert_is_scalar_or_vector(self.scale, "scale") + assert_is_scalar_or_vector(self.zero_point, "zero_point") + + assert_vector_size_same_as_axial_dimension( + self.scale, self.quantized_data.shape[self.axis.val], "scale" + ) + assert_vector_size_same_as_axial_dimension( + self.zero_point, self.quantized_data.shape[self.axis.val], "zero_point" + ) + + dtype = self.scale.dtype + shape = self.quantized_data.shape + return types.tensor(dtype, shape) + + def value_inference(self): + return self.decompress( + self.quantized_data.val, + self.zero_point.val, + self.scale.val, + self.axis.val + ) + + @staticmethod + def decompress(quantized_data, zero_point, scale, axis): + + axis = axis if axis >= 0 else axis + len(quantized_data.shape) + + def rank_promoted_to_same_as_quantized_data(param): + if len(param.shape) == 0: + return np.reshape(param, np.ones(len(quantized_data.shape), np.int32)) + else: + axes = [i for i in range(len(quantized_data.shape)) if i != axis] + return np.expand_dims(param, axis=tuple(axes)) + + sc = rank_promoted_to_same_as_quantized_data(scale) + zp = rank_promoted_to_same_as_quantized_data(zero_point) + val = sc * (quantized_data.astype(np.float32) - zp.astype(np.float32)) + return val.astype(scale.dtype) + + +@register_op(opset_version=_IOS16_TARGET) +class constexpr_cast(Operation): + """ + A compile-time operation that returns a constant output value upon casting its constant input. + :: + Expression: output = constexpr_cast(source_val, output_dtype="fp32") + + Parameters + ---------- + source_val: const tensor (Required) + + output_dtype: const tensor (Required) + + Returns + ------- + const tensor + + Attributes + ---------- + SrcT: fp16 + DstT: fp32 + """ + + input_spec = InputSpec( + source_val=TensorInputType(const=True, type_domain=types.fp16), + output_dtype=TensorInputType(const=True, type_domain=types.str), + ) + + def type_inference(self): + + dtype = types.string_to_builtin(self.output_dtype.val) + if dtype != types.fp32: + raise NotImplementedError("Only output_dtype = fp32 is supported") + + shape = self.source_val.shape + return types.tensor(dtype, shape) + + def value_inference(self): + return np.float32(self.source_val.val) + + +@register_op(opset_version=_IOS16_TARGET) +class constexpr_lut_to_dense(Operation): + """ + A compile-time operation that returns a constant output value upon decompressing + a look-up table (LUT) to a dense tensor. + + This operation is used to store constant weights in a LUT format (also known as + `palettized` weights). A LUT is a mapping from index to values. + Weights are quantized and stored as indices (or keys) into the LUT. + Before computation, these keys are mapped to corresponding values in the LUT. + + Parameters + ---------- + indices: const tensor (Required) + + lut: const tensor (Required) + + shape: const tensor (Required) + + Notes + ----- + + * Any data is packed and read in a row-major order. + * ``NUM_PALETTES`` can be one of ``{2, 4, 16, 64 or 256}``. + * ``n_bits = log2(NUM_PALETTES)`` can thus be one of ``{1, 2, 4, 6, 8}``. + * Indices are packed in bytes of size ``M``, where ``M = ceil(n_bits * product(shape) / 8)``. + + The bit fields are packed one byte at a time, starting with the least significant bit (LSB) and + moving upward to the most significant bit (MSB). It follows, naturally, that if an index is split + across two bytes, the LSBs of that index is filled over the MSBs of current byte, and the remaining + bits of the same index are filled in the LSBs of the next byte. + + For example: + :: + if n_bits = 2, shape = (5,) => M = 2 bytes + + MSB LSB + | | + indices = | 01 10 11 00 | xx xx xx 11 | <== packed elements + | i3 | i2 | i1 | i0 | -- | -- | -- | i4 | <== tagged element ids + | byte 0 | byte 1 | <== tagged bytes + + Returns + ------- + const tensor + + Attributes + ---------- + T: uint8, int8, fp16, fp32 + """ + + input_spec = InputSpec( + indices=TensorInputType(const=True, type_domain=types.uint8), + lut=TensorInputType(const=True, type_domain="T"), + shape=TensorInputType(const=True, type_domain=types.uint32), + ) + + type_domains = { + "T": (types.int8, types.uint8, types.fp16, types.fp32) + } + + def type_inference(self): + def assert_is_vector(param, name): + if param.rank != 1: + raise ValueError("Parameter {} needs to have rank == 1".format(name)) + + assert_is_vector(self.indices, "indices") + assert_is_vector(self.lut, "lut") + + if self.lut.shape[0] not in (2, 4, 16, 64, 256): + raise ValueError( + "Parameter lut should be a vector of size from one of {2, 4, 16, 64, 256}" + ) + + nbits = int(np.log2(self.lut.shape[0])) + output_size = np.prod(self.shape.val) + if self.indices.shape[0] != np.ceil(nbits * (output_size / 8.0)): + raise AssertionError( + "Constraint violated, M = ceil(n_bits * product(shape) / 8) where M = indices.size" + ) + + dtype = self.lut.dtype + shape = self.shape.val + return types.tensor(dtype, shape) + + def value_inference(self): + return self.decompress( + self.lut.val, + self.indices.val, + self.shape.val, + ) + + @staticmethod + def decompress(lut, indices, shape): + bitarray = np.unpackbits(indices, bitorder="little") + nbits = np.log2(lut.size).astype(np.int32) + + pad_required = bitarray.size % nbits != 0 + if pad_required: + bitarray = np.concatenate([bitarray, np.zeros(nbits - bitarray.size % nbits)]).astype(bitarray.dtype) + + assert bitarray.size % nbits == 0 + + size = np.prod(shape) + bitarray = bitarray.reshape(-1, nbits)[:size, :] + + indices = np.packbits(bitarray, bitorder="little", axis=-1).reshape(-1) + flatten_val = lut[indices] + return flatten_val.reshape(shape) + + +@register_op(opset_version=_IOS16_TARGET) +class constexpr_sparse_to_dense(Operation): + """ + A compile-time operation that returns a constant output value upon de-sparsification of its constant inputs. + + This operation represents unstructured sparsity and uses bit mask binary representation. + If a bit is set, then the corresponding element in the output tensor is non-zero and the + value is read from the ``nonzero_data`` attribute. Likewise, if the bit is not set, + then the corresponding element in the output tensor is zero. + + Parameters + ---------- + nonzero_data: const tensor (Required) + + mask: const tensor (Required) + + shape: const tensor (Required) + + Notes + ----- + * Any data is packed and read in a row-major order. + * ``mask`` contains ``M`` bytes, where ``M = ceil( product(shape) / 8)``. That is, each bit + field corresponds to one element in the output tensor. + * ``D ==`` the total number of set bits in ``mask``. + + The bit fields are packed one byte at a time, starting with the least significant bit and + moving up to the most significant bit. + + For example: + :: + shape = (5,) => M = 1 bytes + + MSB LSB + | | + mask = |x x x 0 1 1 0 0 | <== packed elements + |--|--|--|i4|i3|i2|i1|i0| <== tagged element ids + | byte 0 | <== tagged bytes + + Returns + ------- + const tensor + + Attributes + ---------- + T: uint8, int8, fp16, fp32 + """ + + input_spec = InputSpec( + nonzero_data=TensorInputType(const=True, type_domain="T"), + mask=TensorInputType(const=True, type_domain=types.uint8), + shape=TensorInputType(const=True, type_domain=types.uint32), + ) + + type_domains = { + "T": (types.int8, types.uint8, types.fp16, types.fp32) + } + + def type_inference(self): + def assert_is_vector(param, name): + if param.rank != 1: + raise ValueError("Parameter {} needs to have rank == 1".format(name)) + + assert_is_vector(self.nonzero_data, "nonzero_data") + assert_is_vector(self.mask, "mask") + + if sum(bin(x).count("1") for x in self.mask.val) != self.nonzero_data.shape[0]: + raise AssertionError( + "Number of set bits in mask needs to be equal to number of elements in parameter nonzero_data" + ) + + output_size = np.prod(self.shape.val) + if self.mask.shape[0] != np.ceil(output_size / 8.0): + raise AssertionError( + "Constraint Violated: M = ceil( product(shape) / 8) where M = mask.size" + ) + + bitarray = np.unpackbits(self.mask.val, bitorder="little") + if any(bitarray[i] != 0 for i in range(output_size, len(bitarray))): + raise AssertionError("Padded bits in mask should be unset or equals to zero") + + dtype = self.nonzero_data.dtype + shape = self.shape.val + return types.tensor(dtype, shape) + + def value_inference(self): + return self.decompress(self.nonzero_data.val, self.mask.val, self.shape.val) + + @staticmethod + def decompress(nonzero_data, mask, shape): + flattend_val = np.zeros(shape, dtype=nonzero_data.dtype).flatten() + flattend_val[ + np.where(np.unpackbits(mask, bitorder="little") != 0) + ] = nonzero_data + return flattend_val.reshape(shape) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/image_resizing.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/image_resizing.py new file mode 100644 index 00000000..da1f5dfb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/image_resizing.py @@ -0,0 +1,86 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs.iOS15.image_resizing import \ + crop_resize as _crop_resize_iOS15 +from coremltools.converters.mil.mil.ops.defs.iOS15.image_resizing import \ + resample as _resample_iOS15 +from coremltools.converters.mil.mil.ops.defs.iOS15.image_resizing import \ + upsample_bilinear as _upsample_bilinear_iOS15 +from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET + + +@register_op(opset_version=_IOS16_TARGET) +class resample(_resample_iOS15): + """ + The iOS 16 version of ``resample`` supports float 16 coordinates. + + For the complete documentation, see the + `iOS 15 version <#module-coremltools.converters.mil.mil.ops.defs.iOS15.image_resizing>`_. + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + coordinates=TensorInputType(type_domain="U"), + sampling_mode=TensorInputType(const=True, type_domain=types.str), + padding_mode=TensorInputType(const=True, type_domain=types.str), + padding_value=TensorInputType(const=True, type_domain="T"), + coordinates_mode=TensorInputType(const=True, type_domain=types.str), + align_corners=TensorInputType(const=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + "U": (types.int32, types.fp16, types.fp32), + } + + def type_inference(self): + return super().type_inference() + +@register_op(opset_version=_IOS16_TARGET) +class upsample_bilinear(_upsample_bilinear_iOS15): + """ + iOS16 version of upsample_bilinear supports half_pixel_centers + + Additional Parameters + ---------- + half_pixel_centers: const (Optional) + * Default to !align_corners if not provided + """ + + input_spec = _upsample_bilinear_iOS15.input_spec + InputSpec( + half_pixel_centers=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + def default_inputs(self): + return super().default_inputs() + DefaultInputs(half_pixel_centers=not self.align_corners.val) + +@register_op(opset_version=_IOS16_TARGET) +class crop_resize(_crop_resize_iOS15): + """ + iOS16 version of crop_resize, which supports ``pad_value`` + + Additional Parameters + ---------- + pad_value : const (Optional, default=1.0) + * If the box indexes go beyond the input boundary, the input image is padded with pad_value. + * Defaults to 0. + * It is the same as extrapolation_value in tf.image.crop_and_resize. + + Attributes + ---------- + T: fp16, fp32 + """ + input_spec = _crop_resize_iOS15.input_spec + InputSpec( + pad_value=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + def default_inputs(self): + return super().default_inputs() + DefaultInputs(pad_value=1.0) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/scatter_gather.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/scatter_gather.py new file mode 100644 index 00000000..82653b0f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/scatter_gather.py @@ -0,0 +1,170 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import (SYMBOL, VALUE, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import compute_gather +from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET + + +@register_op(opset_version=_IOS16_TARGET) +class gather(Operation): + """ + An iOS16 version of gather + + The new gather op supports `batch_dims` + similar to `tf.gather `_. + + Parameters + ---------- + x: tensor<\*D, U> (Required) + indices: tensor<\*N, I> (Required) + * Indices values may be negative. More precisely, ``-D[axis]<= v < D[axis]`` for ``v`` in ``indices``. + axis: const i32 (Optional. Default=``0``) + * Negative axis is supported. + batch_dims: const i32 (Optional. Default=``0``) + * The number of batch dimensions + + Returns + ------- + tensor<\*K, T> + * Where ``K = D[:axis] + N[batch_dims:] + D[axis+1:]``. + + Attributes + ---------- + T: fp16, fp32, i32 + I: uint16, int16, int32 + + References + ---------- + See `tf.gather `_. + + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="U"), + indices=TensorInputType(type_domain="I"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + batch_dims=TensorInputType(const=True, optional=True, type_domain=types.int32) + ) + + type_domains = { + "U": (types.fp16, types.fp32, types.int32), + "I": (types.int32, types.uint16, types.int16), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + batch_dims=0, + ) + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + x = self.x.sym_val + indices = self.indices.val + if indices is None: + # only allow x to be symbolic. indices cannot. + return None + return compute_gather( + params=self.x.sym_val, + indices=self.indices.val, + axis=self.axis.val, + batch_dims=self.batch_dims.val + ) + + def type_inference(self): + # validate parameters + if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + if self.batch_dims.val >= self.x.rank: + raise ValueError( + "batch_dims {} must be less than x.rank {} for node {}".format( + self.batch_dims.val, self.x.rank, self.name + ) + ) + if self.batch_dims.val > self.indices.rank: + raise ValueError( + "batch_dims {} must be less or equal to than indices.rank {} for node {}".format( + self.batch_dims.val, self.indices.rank, self.name + ) + ) + + output_rank = self.x.rank - 1 + self.indices.rank - self.batch_dims.val + if output_rank == 0: + # output scalar + return self.x.dtype + + # compute output shape + axis = self.axis.val + axis = axis if axis >= 0 else axis + self.x.rank + batch_dims = self.batch_dims.val + out_shape = self.x.shape[:axis] + self.indices.shape[batch_dims:] + self.x.shape[axis + 1 :] + + return types.tensor(self.x.dtype, out_shape) + +@register_op(opset_version=_IOS16_TARGET) +class gather_nd(Operation): + """ + An iOS16 version of gather_nd + The new gather_nd op supports `batch_dims` + + Parameters + ---------- + x: tensor<\*D, T> (Required) + indices: tensor<\*K, i32> (Required) + batch_dims: const i32 (Optional. Default=``0``) + * The number of batch dimensions + + Returns + ------- + tensor<\*V, T> + * ``V = K[:-1] + D[batch_dims + K[-1]:]``, where ``D = x.shape`` and ``K = indices.shape``. + + Attributes + ---------- + T: fp16, fp32, i32 + + References + ---------- + See `tf.gather_nd `_. + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="U"), + indices=TensorInputType(type_domain="I"), + batch_dims=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "U": (types.fp16, types.fp32, types.int32), + "I": (types.int32, types.uint16, types.int16), + } + + def default_inputs(self): + return DefaultInputs( + batch_dims=0, + ) + + def type_inference(self): + batch_dims = self.batch_dims.val + indices_depth = self.indices.shape[-1] + if indices_depth > self.x.rank - batch_dims: + msg = "For node {}, indices.shape[-1] ({}) + batch_dims ({}) must be smaller or equal to the input rank {}".format( + self.name, indices_depth, batch_dims, self.x.rank + ) + raise ValueError(msg) + out_type = self.x.dtype + out_shape = self.indices.shape[:-1] + self.x.shape[batch_dims+indices_depth:] + return types.tensor(out_type, out_shape) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_operation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_operation.py new file mode 100644 index 00000000..e29a3ff6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_operation.py @@ -0,0 +1,115 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clausefrom + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import (VALUE, Operation, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs.iOS15.tensor_operation import \ + topk as _topk_iOS15 +from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET + + +@register_op(opset_version=_IOS16_TARGET) +class fill_like(Operation): + """ + Returns a tensor with the same size as the input tensor filled with a constant value. + + Parameters + ---------- + ref_tensor: tensor<\*?, T> (Required) + * Input tensor. + value: const (Optional) + * Default is ``0.0``. + * Constant value to fill in. + + Returns + ------- + tensor<\*?, T> + * Tensor with shape determined by the input tensor. + + Attributes + ---------- + T: fp16, fp32, int32, bool + U: fp16, fp32, int32, bool + """ + + input_spec = InputSpec( + ref_tensor=TensorInputType(type_domain="T"), + value=TensorInputType(const=True, optional=True, type_domain="U"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + "U": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + value=0. + ) + + def type_inference(self): + return types.tensor(self.value.dtype, self.ref_tensor.shape) + + @precondition(allow=VALUE) + def value_inference(self): + return np.full(shape=self.ref_tensor.shape, fill_value=self.value.val) + +@register_op(opset_version=_IOS16_TARGET) +class topk(_topk_iOS15): + """ + A version of ``topk`` for iOS 16+. This section documents the differences. The following are additional parameters for the iOS 16+ version. For the + rest of the documentation, see `the iOS 15 version of topk <#coremltools.converters.mil.mil.ops.defs.iOS15.tensor_operation.topk>`_. + + Parameters + ---------- + sort: const (Optional) + * Defaults to ``True``. + * If ``True``, ``top-k`` elements are themselves sorted. + Otherwise, no particular ordering is guaranteed. + return_indices: const (Optional) + * Defaults to ``True``. + * If ``True``, returns both values and indices. Otherwise, returns only the ``top-k`` values. + + Returns + ------- + tensor<\*?, T> + * Values of top/bottom ``k`` elements. + + tensor<\*?, int32> + * Only returned when ``return_indices = True`` + * Indices of the top/bottom ``k`` elements along axis. + + Attributes + ---------- + T: fp32, int32 + """ + + input_spec = _topk_iOS15.input_spec + InputSpec( + sort=TensorInputType(const=True, optional=True, type_domain=types.bool), + return_indices=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + def default_inputs(self): + return super().default_inputs() + DefaultInputs(sort=True, return_indices=True) + + def type_inference(self): + value_type, indices_type = super().type_inference() + if not self.return_indices.val: + return value_type + return value_type, indices_type + + @precondition(allow=VALUE) + def value_inference(self): + values, indices = super().value_inference() + if not self.return_indices.val: + return values + return values, indices diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py new file mode 100644 index 00000000..473b7c68 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py @@ -0,0 +1,186 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clausefrom coremltools.converters.mil.mil import types + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (InputSpec, + TensorInputType, + TupleInputType) +from coremltools.converters.mil.mil.operation import Operation +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +@register_op(opset_version=_IOS16_TARGET) +class reshape_like(Operation): + """ + Reshape a tensor to an output shape specified by some or all dimensions of a tuple of reference tensors ``ref_tensors``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * The input tensor to be reshaped. + + ref_tensors: Tuple[tensor<\*?, R>] (Required) + * A tuple of tensors that define the output shape. + + begins: Tuple[const] (Required) + * A tuple of integers specifying the begin index into the shape vector of the corresponding ``ref_tensor``. + + ends: Tuple[const] (Required) + * A tuple of integers specifying the end index into the shape vector of the corresponding ``ref_tensor``. + + end_masks: Tuple[const] (Required) + * If ``True``, select all axes from the begin index until the end of the corresponding ``ref_tensor``, as in + ``ref_tensors[i].shape[begins[i]:]``. + + Notes + ----- + The output shape is computed as follows: + + .. sourcecode:: python + + output_shape = [] + num_of_refs = len(begins) + for i in range(num_of_refs): + if end_masks[i]: + output_shape.append(ref_tensor_i.shape[begins[i]:]) + else: + output_shape.append(ref_tensor_i.shape[begins[i]:ends[i]]) + output_shape = np.concat(output_shape, axis=0) + + The following is an example: + + .. sourcecode:: python + + ref_tensors=[tensor[2, 3, 4], tensor[1, 5, 6]] + begins=[0, 1] + ends=[2, 0] + end_masks=[False, True] + + The output shape would be ``(2, 3, 5, 6)``. + + Returns + ------- + tensor<\*?, T> + * Same type as input tensor ``x``. + * Output shape is computed by ``ref_tensors``, ``begins``, ``ends``, and ``end_masks``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + R: fp16, fp32, i32, bool + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + ref_tensors=TupleInputType(), + begins=TupleInputType(), + ends=TupleInputType(), + end_masks=TupleInputType(), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def _check_is_const_tuple_with_scalar(self, param, expected_type, param_name): + """ + This utility function checks the param is a Tuple of scalar with expected data type. + """ + for x in param: + if x.dtype != expected_type or x.shape != (): + msg = "In op reshape_like {}, {} must be a Tuple of scalar {}. Got a {} tensor with shape {}.".format( + self.name, + param_name, + expected_type.__type_info__(), + x.dtype.__type_info__(), + x.shape, + ) + raise ValueError(msg) + + def type_inference(self): + # Validation the inputs + ref_number = len(self.ref_tensors) + if len(self.begins) != ref_number or len(self.ends) != ref_number or len(self.end_masks) != ref_number: + msg = ( + "Op reshape_like {}'s ref_tensors, begins, ends and end_masks must have exactly the same length. " + "Got {}, {}, {} and {}." + ).format(self.name, ref_number, len(self.begins), len(self.ends), len(self.end_masks)) + + self._check_is_const_tuple_with_scalar(self.begins, types.int32, "begins") + self._check_is_const_tuple_with_scalar(self.ends, types.int32, "ends") + self._check_is_const_tuple_with_scalar(self.end_masks, types.bool, "end_masks") + + # Compute the output shape + out_shape = () + for ref_tensor, begin, end, end_mask in zip(self.ref_tensors, self.begins, self.ends, self.end_masks): + shape = ref_tensor.shape + begin, end, end_mask = begin.val, end.val, end_mask.val + ref_shape = shape[begin:end] if not end_mask else shape[begin:] + out_shape += tuple(ref_shape) + + # Output shape must be known at compile time + if any_symbolic(out_shape): + msg = "Output shape of a reshape_like op {} must not be symbolic. Got {}".format(self.name, out_shape) + raise ValueError(msg) + + # Output shape must be consistent with the input shape + if not any_symbolic(self.x.shape): + if np.prod(self.x.shape) != np.prod(out_shape): + msg = "At reshape_like op {}, input shape {} not consistent with the output shape {}.".format( + self.name, + self.x.shape, + out_shape + ) + raise ValueError(msg) + + return types.tensor(self.x.dtype, out_shape) + +@register_op(opset_version=_IOS16_TARGET) +class pixel_unshuffle(Operation): + """ + Rearrange elements in a tensor from spatial dimensions into depth (channel). + It is basically the inverse operation of `pixel_shuffle <#coremltools.converters.mil.mil.ops.defs.iOS15.tensor_transformation.pixel_shuffle>`_. + Equivalent to PyTorch's ``PixelUnshuffle``. + + Parameters + ---------- + x: tensor<[n, C, H / f , W / f], T> (Required) + * Input tensor of rank ``4``. + + downscale_factor: const + * Factor to decrease spatial resolution by. + + Returns + ------- + tensor<[n, C * f^2, H, W], T> + * Where ``f`` is the downscale factor. + + Attributes + ---------- + T: fp16, fp32 + + References + ---------- + `torch.nn.PixelUnshuffle `_ + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + downscale_factor=TensorInputType(const=True, type_domain=types.uint32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_type = self.x.dtype + n, c, h, w = self.x.shape + f = self.downscale_factor.val + ret_shape = (n, c * f * f, h / f, w / f) + return types.tensor(x_type, ret_shape) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/helper.py new file mode 100644 index 00000000..fc699b63 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/helper.py @@ -0,0 +1,28 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +def _get_version_of_op(op_variants, opset_version): + """ + A utility function that retrieves an op cls given a dictionary of op variants and target version + """ + assert isinstance(op_variants, dict) + opset_versions = list(op_variants.keys()) + opset_versions.sort() + if opset_version is None: + op_cls = op_variants[opset_versions[0]] + elif opset_version > opset_versions[-1]: + # TODO(rdar://103267345): Remove when no longer required. + # MIL opsets inherit ops from previous ones by default. + op_cls = op_variants[opset_versions[-1]] + else: + if opset_version not in op_variants: + op_type = list(op_variants.values())[0].__name__ + msg = ( + "No available version for {} in the {!s} opset. Please update the " + "minimum_deployment_target to at least {!s}" + ).format(op_type, opset_version, opset_versions[0]) + raise ValueError(msg) + op_cls = op_variants[opset_version] + return op_cls diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/registry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/registry.py new file mode 100644 index 00000000..d24f5f4e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/registry.py @@ -0,0 +1,190 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from collections import defaultdict + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target +from coremltools.converters.mil.mil.block import curr_opset_version + +from ..builder import Builder +from .helper import _get_version_of_op + + +class SSAOpRegistry: + + """ + There are three kinds of operations that we could register: + + (1) core_ops: dict[str, dict[Operation]] + - These are the core ops in PyMIL, which have a direct mapping to the backend in neural_network or mlprogram + - The registered op is considered a core op if the namespace is not provided + - coreml_ops[op_type] is a dict that tracks different opset versions for an op. For instance + - ``core_ops[op_1] = { + ct.target.iOS13: op_1_iOS13, + ct.target.iOS14: op_1_iOS13, + ct.target.iOS15: op_1_iOS13, + ct.target.iOS16: op_1_iOS13, + }`` + . Only one version of op type ``op_1`` is registered, and it is defined in iOS13, which both + neural_network and mlprogram backend support + - ``core_ops[op_2] = { + ct.target.iOS13: op_2_iOS13, + ct.target.iOS14: op_2_iOS13, + ct.target.iOS15: op_2_iOS13, + ct.target.iOS16: op_2_iOS16, + }`` + . Two versions of op type ``op_2`` are registered, one each for iOS13, iOS16. + . The builder picks up correct version of the op according to curr_opset_version(), which returns the opset version of + the current function. + -- If ``curr_opset_version()`` is ``None`` (the version of the function is not set), ``mb.op_2`` would call the oldest version of the op by default, which is ``op_2_ios13`` + -- Otherwise, the builder would pick up core_ops[op_2][curr_opset_version()] + - In the highest level, users can choose the desired version by specifying the ``minum_deployment_target`` argument in ``coremltools.convert`` + - The default ``opset_version`` for the core ops would be set to iOS13, for which neural_network backend supports + + (2) dialect_ops: dict[str, Operation] + - These are the ops that are created for specific frontend framework, for instance: ``tf_lstm_block, torch_upsample_nearest_neighbor`` + - A graph pass must be customized by the developer to translate a dialect_ops into core ops + + (3) custom_ops: dict[str, Operation] + - These are the custom ops, in which an additional ``bindings`` which should be specificed in operator + """ + SUPPORTED_OPSET_VERSIONS = ( + target.iOS13, + target.iOS14, + target.iOS15, + target.iOS16 + ) + core_ops = defaultdict(dict) + dialect_ops = {} + custom_ops = {} + + @staticmethod + def _get_core_op_cls(op_type=None): + """ + A utility function that retrieves an op cls using the curr_opset_version + """ + if op_type not in SSAOpRegistry.core_ops: + raise ValueError("op {} not registered.".format(op_type)) + candidate_ops = SSAOpRegistry.core_ops[op_type] + return _get_version_of_op(candidate_ops, curr_opset_version()) + + @staticmethod + def register_op(_cls=None, is_custom_op=False, namespace=None, opset_version=target.iOS13, allow_override=False): + """ + Registration routine for MIL Program operators + + Parameters + ---------- + is_custom_op: boolean + - If ``True``, maps current operator to ``custom_op``. ``custom_op`` requires additional ``bindings`` which should be specified in operator + - Default ``False`` + + namespace: str + - If provided, the op is registered as a dialect op + - Otherwise is considered as a core op + + opset_version: int + - Specify the minimum spec version that supports this op + - Default to ``ct.target.iOS13``, which is for the neural_network backend + + allow_override: boolean + - If True, it is allowed for an operation to override the previous operation with the same registered name + - Default ``False`` + """ + def class_wrapper(op_cls): + op_type = op_cls.__name__ + op_cls.__name__ = op_type + + # debug message + op_msg = "op" + is_dialect_op = (namespace is not None) + if is_custom_op: + op_msg = "Custom op" + elif is_dialect_op: + op_msg = "Dialect op" + logger.debug("Registering {} {}".format(op_msg, op_type)) + + # pick the right dict for registration + if is_custom_op: + op_reg = SSAOpRegistry.custom_ops + elif is_dialect_op: + op_reg = SSAOpRegistry.dialect_ops + # Check that op_type is prefixed with namespace + if op_type[: len(namespace)] != namespace: + msg = ( + "Dialect pp type {} registered under {} namespace must " + + "prefix with {}" + ) + raise ValueError(msg.format(op_type, namespace, namespace)) + else: + op_reg = SSAOpRegistry.core_ops + + # verify that the op have not been registered before if allow_override = False + msg = "SSA {} {} already registered.".format(op_msg, op_type) + if is_custom_op or is_dialect_op: + if op_type in op_reg and not allow_override: + raise ValueError(msg) + else: + if opset_version in op_reg[op_type] and not allow_override: + if opset_version - 1 not in op_reg[op_type] or (op_reg[op_type][opset_version - 1] != op_reg[op_type][opset_version]): + raise ValueError(msg) + + # add the op to op_reg + if is_custom_op or is_dialect_op: + op_reg[op_type] = op_cls + else: + # The older version of the op must be registered first, or it will override the + # newer version. For example, assuming an op has two versions: IOS13 and IOS15. If + # the IOS15 is registered first, the op_reg[op_type] will have that op class for + # IOS15/16/..., and when IOS13 is registered, it will override all op classes for + # IOS13/14/15/16/... where IOS15 op class will get lost. So we error out early + # instead of keep registering when this happens. + if opset_version in op_reg[op_type]: + old_op_cls = op_reg[op_type][opset_version] + for i in range(opset_version, SSAOpRegistry.SUPPORTED_OPSET_VERSIONS[-1] + 1): + if op_reg[op_type][i] != old_op_cls: + raise ValueError( + f"Older version of op {op_type} must be registered " + f"before a newer version." + ) + idx = SSAOpRegistry.SUPPORTED_OPSET_VERSIONS.index(opset_version) + for i in range(idx, len(SSAOpRegistry.SUPPORTED_OPSET_VERSIONS)): + op_reg[op_type][SSAOpRegistry.SUPPORTED_OPSET_VERSIONS[i]] = op_cls + + # add the version information to the op cls + op_cls._op_variants = op_reg[op_type] + + @classmethod + def add_op(cls, **kwargs): + """ + An utility function that help the builder to pickup the correct op class when calling ``mb.op`` + + There are two cases: + + (1) custom op / dialect op: + If the op is a custom op or a dialect op, we could directly pick up the op class through + ``SSAOpRegistry.custom_ops[op_type]`` or ``SSAOpRegistry.dialect_ops[op_type]`` + + (2) core op: + For the core op, the builder would pick up the correct version according to ``curr_opset_version()`` + """ + op_cls_to_add = None + is_core_op = (op_reg == SSAOpRegistry.core_ops) + if is_core_op: + op_cls_to_add = SSAOpRegistry._get_core_op_cls(op_type) + else: + op_cls_to_add = op_reg[op_type] + + return cls._add_op(op_cls_to_add, **kwargs) + + setattr(Builder, op_type, add_op) + return op_cls + + if _cls is None: + return class_wrapper + + return class_wrapper(_cls) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_activation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_activation.py new file mode 100644 index 00000000..2daf5ad8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_activation.py @@ -0,0 +1,1080 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest +import scipy + +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import run_compare_builder + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + + +class TestClampedReLU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + return mb.clamped_relu(x=x, alpha=2.0, beta=1.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[-2, 1, -6], [1, -10, 1]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.clamped_relu(x=x_val, alpha=2.0, beta=1.0) + + x = np.minimum(np.maximum(x_val, 0), 1.0) + y = np.minimum(np.minimum(x_val, 0) * 2.0, 1.0) + np.testing.assert_allclose(x + y, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, alpha, beta", + itertools.product( + compute_units, + backends, + [2, 4, 8], + [2.0, 3.0], + [4.0, 5.0] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, alpha, beta): + shape_x = np.array([dim, dim]) + x_val = np.random.rand(*shape_x) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.clamped_relu(x=x, alpha=alpha, beta=beta)] + + x = np.minimum(np.maximum(x_val, 0), 1.0) + y = np.minimum(np.minimum(x_val, 0) * 2.0, 1.0) + + expected_outputs = [x + y] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestELU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + return mb.elu(x=x, alpha=2.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [[-1.2642411, 2.0, -1.9004259], [4.0, -1.9865241, 6.0]], dtype=np.float32 + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.elu(x=x_val, alpha=2.0) + + b = np.copy(x_val) + b[b < 0] = 2.0 * (np.exp(b[b < 0]) - 1) + + np.testing.assert_allclose(b, v.val, atol=1e-04, rtol=1e-05) + + +class TestGeLU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + return mb.gelu(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [ + [-1.58691406e-01, 1.95410156e00, -4.04968858e-03], + [3.99987316e00, -1.49011612e-06, 6.00000000e00], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-3, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + + mode = "TANH_APPROXIMATION" + v = mb.gelu(x=x_val, mode=mode) + a = np.sqrt(2 / np.pi) * (x_val + 0.044715 * np.power(x_val, 3)) + out = 0.5 * x_val * (1 + np.tanh(a)) + np.testing.assert_allclose(out, v.val, atol=1e-04, rtol=1e-05) + + mode = "SIGMOID_APPROXIMATION" + v = mb.gelu(x=x_val, mode=mode) + out = x_val * (1 / (1 + np.exp(-(1.702 * x_val)))) + np.testing.assert_allclose(out, v.val, atol=1e-04, rtol=1e-05) + + v = mb.gelu(x=x_val) + out = 0.5 * x_val * (1 + scipy.special.erf(x_val / np.sqrt(2))) + np.testing.assert_allclose(out, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, mode", + itertools.product( + compute_units, + backends, + [2, 6], + ["EXACT", "TANH_APPROXIMATION", "SIGMOID_APPROXIMATION"], + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, mode): + shape = np.array([dim, dim]) + x_val = np.random.rand(*shape) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.gelu(x=x, mode=mode)] + + if mode == "TANH_APPROXIMATION": + a = np.sqrt(2 / np.pi) * (x_val + 0.044715 * np.power(x_val, 3)) + out = 0.5 * x_val * (1 + np.tanh(a)) + elif mode == "SIGMOID_APPROXIMATION": + out = x_val * (1 / (1 + np.exp(-(1.702 * x_val)))) + else: + out = 0.5 * x_val * (1 + scipy.special.erf(x_val / np.sqrt(2))) + + expected_outputs = [out] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-3, + ) + + +class TestLeakyReLU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + return mb.leaky_relu(x=x, alpha=2.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[-2, 2, -6], [4, -10, 6]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.leaky_relu(x=x_val, alpha=2.0) + + b = np.copy(x_val) + b[b < 0] *= 2.0 + np.testing.assert_allclose(b, v.val, atol=1e-04, rtol=1e-05) + + +class TestLinearActivation: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.linear_activation(x=x, alpha=2.0, beta=3.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[1, 7, -3], [11, -7, 15]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.linear_activation(x=x_val, alpha=2.0, beta=3.0) + np.testing.assert_allclose(x_val * 2.0 + 3.0, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, dim", + itertools.product(compute_units, backends, [2, 4, 8]), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim): + shape = np.array([dim, dim]) + x_val = np.random.rand(*shape) + alpha = np.random.uniform() + beta = np.random.uniform() + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + } + input_values = {"x": x_val} + + def build(x): + return [mb.linear_activation(x=x, alpha=alpha, beta=beta)] + + expected_outputs = [x_val * alpha + beta] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPReLU: + @pytest.mark.parametrize( + "rank, alpha_values, compute_unit, backend", + itertools.product( + [3, 4, 5], + [[1.0, 2.0, 3.0], [4.0, 4.0, 4.0]], + compute_units, + backends, + ) + ) + def test_builder_to_backend_smoke(self, rank, alpha_values, compute_unit, backend): + if (backend[0] == "mlprogram" and backend[1] == "fp16"): + pytest.xfail( + "rdar://92175249 ([MIL] TestActivation::test_prelu[backend=(mlprogram, fp16)] CI failure)" + ) + + alpha = np.array(alpha_values, dtype=np.float32) + + if rank == 3 or rank == 5: + are_alpha_values_same = np.where(np.abs(alpha - alpha[0]) > 1e-5)[0].size == 0 + if not are_alpha_values_same: + pytest.xfail("rdar://91442339") + + t = np.array([[[[-1, 3]], [[-1, 2]], [[4, -5]]]], dtype=np.float32) + expected_outputs = np.array( + [[[[-1 * alpha[0], 3]], [[-1 * alpha[1], 2]], [[4, -5 * alpha[2]]]]], dtype=np.float32 + ) + + shape = None + if rank == 3: + shape = (1, 3, 2) + elif rank == 4: + shape = (1, 3, 1, 2) + elif rank == 5: + shape = (1, 3, 1, 1, 2) + else: + raise ValueError("rank not supported") + + t = np.reshape(t, shape) + expected_outputs = np.reshape(expected_outputs, shape) + expected_output_types = tuple([s for s in shape]) + (types.fp32,) + + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.prelu(x=x, alpha=alpha) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]]], dtype=np.float32) + alpha = np.array([1, 2, 3], dtype=np.float32) + v = mb.prelu(x=x_val, alpha=alpha) + + alpha_br = alpha + + for i in range(1, len(x_val.shape)): + alpha_br = np.expand_dims(alpha_br, i) + + x_pos = np.maximum(x_val, 0) + b = np.minimum(x_val, 0) + + np.testing.assert_allclose(x_pos + b * alpha_br, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_eval1(self): + x_val = np.array([[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]], dtype=np.float32) + with pytest.raises(ValueError, match=r".* dimension 1 .*"): + mb.prelu(x=x_val, alpha=np.array([1, 2], dtype=np.float32)) + + @ssa_fn + def test_builder_eval2(self): + x_val = np.array([[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]], dtype=np.float32) + with pytest.raises(ValueError, match=r"alpha .* rank 1"): + mb.prelu(x=x_val, alpha=np.array([[1, 2, 3]], dtype=np.float32)) + + @ssa_fn + def test_builder_eval3(self): + with pytest.raises(ValueError, match=r"x .* rank 3"): + mb.prelu( + x=np.array([1], dtype=np.float32), + alpha=np.array([[1, 2, 3]], dtype=np.float32), + ) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, chan", + itertools.product( + compute_units, + backends, + [1, 2, 4, 8], + [2, 3, 4] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, chan): + shape = np.array([1, chan, dim, dim]) + x_val = np.random.rand(*shape) + alpha_val = np.random.rand(chan).astype(np.float32) + + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.prelu(x=x, alpha=alpha_val)] + + alpha_br = np.copy(alpha_val) + for i in range(1, len(x_val.shape) - 1): + alpha_br = np.expand_dims(alpha_br, i) + x_pos = np.maximum(x_val, 0) + b = np.minimum(x_val, 0) + + expected_outputs = [x_pos + b * alpha_br] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestReLU: + @pytest.mark.parametrize( + "compute_unit, backend, data_type", + itertools.product(compute_units, backends, [np.float32, np.float16]), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, data_type): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=data_type) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.relu(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[0, 2, 0], [4, 0, 6]], dtype=data_type) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.relu(x=x_val) + np.testing.assert_allclose(np.maximum(x_val, 0), v.val, atol=1e-04, rtol=1e-05) + + +class TestReLU6: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 7, -3], [4, -5, 8]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.relu6(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[0, 6, 0], [4, 0, 6]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 7, -3], [4, -5, 8]], dtype=np.float32) + v = mb.relu6(x=x_val) + np.testing.assert_allclose( + np.minimum(np.maximum(x_val, 0), 6), v.val, atol=1e-04, rtol=1e-05 + ) + + +class TestScaledTanh: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.scaled_tanh(x=x, alpha=2.0, beta=1.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [[-1.5231884, 1.9280552, -1.9901096], [1.9986587, -1.9998184, 1.9999754]], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.scaled_tanh(x=x_val, alpha=2.0, beta=1.0) + np.testing.assert_allclose(2.0 * np.tanh(x_val * 1.0), v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, alpha, beta", + itertools.product( + compute_units, + backends, + [2, 4, 8], + [2.0, 3.0], + [4.0, 5.0] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, alpha, beta): + shape_x = np.array([dim, dim]) + x_val = np.random.rand(*shape_x) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.scaled_tanh(x=x, alpha=alpha, beta=beta)] + + expected_outputs = [alpha * np.tanh(x_val * beta)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSigmoid: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.sigmoid(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [ + [0.2689414213699951, 0.8807970779778823, 0.04742587], + [0.98201376, 0.00669285, 0.9975274], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.sigmoid(x=x_val) + np.testing.assert_allclose(1 / (1 + np.exp(-x_val)), v.val, atol=1e-04, rtol=1e-05) + + +class TestSigmoidHard: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.sigmoid_hard(x=x, alpha=1.0, beta=2.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=np.float32 + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + alpha = 1.0 + beta = 2.0 + v = mb.sigmoid_hard(x=x_val, alpha=alpha, beta=beta) + np.testing.assert_allclose( + np.minimum(np.maximum((alpha * x_val) + beta, 0), 1), + v.val, + atol=1e-04, + rtol=1e-05, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, alpha, beta", + itertools.product( + compute_units, + backends, + [2, 4, 8], + [2.0, 3.0], + [4.0, 5.0] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, alpha, beta): + shape_x = np.array([dim, dim]) + x_val = np.random.rand(*shape_x) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.sigmoid_hard(x=x, alpha=alpha, beta=beta)] + + expected_outputs = [np.minimum(np.maximum((alpha * x_val) + beta, 0), 1)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSiLU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([-1.1, 2.2, -3.3, 4.4], dtype=np.float32).reshape((1, 2, 1, 2)) + + input_placeholder_dict = { + "x": mb.placeholder(shape=x_val.shape), + } + input_value_dict = {"x": x_val} + expected_output_type = x_val.shape + (types.fp32,) + + def build(x): + return mb.silu(x=x) + + expected_output = np.array( + [-0.2747, 1.9805, -0.1174, 4.3466], dtype=np.float32 + ).reshape(expected_output_type[:-1]) + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSoftplus: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.softplus(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [[0.31326166, 2.126928, 0.04858733], [4.01815, 0.00671535, 6.0024757]], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.softplus(x=x_val) + np.testing.assert_allclose( + np.log(1 + np.exp(-np.abs(x_val))) + np.maximum(x_val, 0), v.val, atol=1e-04, rtol=1e-05 + ) + + +# No torch test because there is no direct torch translation to this layer +class TestSoftplusParametric: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.softplus_parametric( + x=x, + alpha=np.array([1, 2, 3], dtype=np.float32), + beta=np.array([4, 5, 6], dtype=np.float32), + ) + + expected_output_types = (1, 3, 1, 3, types.fp32) + expected_outputs = np.array( + [[ + [[1.8142700e-02, 1.2000000e01, 2.4000000e01]], + [[1.3427734e-02, 2.0000000e01, 7.1525574e-07]], + [[7.2000000e01, 0.0000000e00, 1.0800000e02]], + ]], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]]], dtype=np.float32) + v = mb.softplus_parametric( + x=x_val, + alpha=np.array([1, 2, 3], dtype=np.float32), + beta=np.array([4, 5, 6], dtype=np.float32), + ) + + alpha_br = np.array([1, 2, 3], dtype=np.float32) + beta_br = np.array([4, 5, 6], dtype=np.float32) + for i in range(1, len(x_val.shape)): + alpha_br = np.expand_dims(alpha_br, i) + beta_br = np.expand_dims(beta_br, i) + out = alpha_br * np.log(np.exp(x_val * beta_br) + 1) + + np.testing.assert_allclose(out, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_eval2(self): + x_val = np.array([[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]], dtype=np.float32) + with pytest.raises(ValueError, match=r".* dimension 1 .*"): + mb.softplus_parametric( + x=x_val, + alpha=np.array([1, 2], dtype=np.float32), + beta=np.array([4, 5, 6], dtype=np.float32), + ) + + @ssa_fn + def test_builder_eval3(self): + x_val = np.array([[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]], dtype=np.float32) + with pytest.raises(ValueError, match=r"alpha .* rank 1"): + mb.softplus_parametric( + x=x_val, + alpha=np.array([[1, 2, 3]], dtype=np.float32), + beta=np.array([4, 5, 6], dtype=np.float32), + ) + + @ssa_fn + def test_builder_eval4(self): + with pytest.raises(ValueError, match=r"x .* rank 3"): + mb.softplus_parametric( + x=np.array([1], dtype=np.float32), + alpha=np.array([[1, 2, 3]], dtype=np.float32), + beta=np.array([4, 5, 6], dtype=np.float32), + ) + + @ssa_fn + def test_builder_eval5(self): + x_val = np.array([[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]], dtype=np.float32) + with pytest.raises(ValueError, match=r".* dimension 1 .*"): + mb.softplus_parametric( + x=x_val, + alpha=np.array([1, 2, 3], dtype=np.float32), + beta=np.array([5, 6], dtype=np.float32), + ) + + @ssa_fn + def test_builder_eval6(self): + x_val = np.array([[[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]]], dtype=np.float32) + with pytest.raises(ValueError, match=r"beta .* rank 1"): + mb.softplus_parametric( + x=x_val, + alpha=np.array([1, 2, 3], dtype=np.float32), + beta=np.array([[4, 5, 6]], dtype=np.float32), + ) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, chan", + itertools.product( + compute_units, + backends, + [1, 2, 4, 8], + [1, 2, 3] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, chan): + shape = np.array([1, chan, dim, dim]) + x_val = np.random.rand(*shape) + alpha_val = np.random.rand(chan).astype(np.float32) + beta_val = np.random.rand(chan).astype(np.float32) + + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.softplus_parametric(x=x, alpha=alpha_val, beta=beta_val)] + + alpha_br = np.copy(alpha_val) + beta_br = np.copy(beta_val) + for i in range(1, len(x_val.shape) - 1): + alpha_br = np.expand_dims(alpha_br, i) + beta_br = np.expand_dims(beta_br, i) + expected_outputs = [alpha_br * np.log(np.exp(x_val * beta_br) + 1)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSoftmax: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_buidler_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.softmax(x=x, axis=0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [ + [6.69285092e-03, 9.99088949e-01, 1.23394576e-04], + [9.93307149e-01, 9.11051194e-04, 9.99876605e-01], + ], + dtype=np.float32, + ) + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.softmax(x=x_val, axis=0) + np.testing.assert_allclose( + scipy.special.softmax(x_val, axis=0), v.val, atol=1e-04, rtol=1e-05 + ) + + @pytest.mark.parametrize( + "input_size", [(1), (2), (1, 2), (2, 2), (2, 3, 4), (2, 3, 4, 10)] + ) + def test_value_inference(self, input_size): + rs = np.random.RandomState(1234) + x = rs.random(input_size) + + for axis in range(-x.ndim, x.ndim - 1): + @mb.program(input_specs=[]) + def prog(): + return mb.softmax(x=x, axis=axis) + + op = list(prog.functions.values())[0].operations[2] + assert op.op_type == "softmax" + np.testing.assert_allclose( + op.value_inference(), + scipy.special.softmax(x, axis=axis), + atol=1e-04, + rtol=1e-05, + ) + + +class TestSoftsign: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.softsign(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [[-0.5, 0.66666667, -0.75], [0.8, -0.83333333, 0.85714286]], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.softsign(x=x_val) + np.testing.assert_allclose(x_val / (1 + np.abs(x_val)), v.val, atol=1e-04, rtol=1e-05) + + +class TestThresholdedReLU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.thresholded_relu(x=x, alpha=2.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[0, 2, 0], [4, 0, 6]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[0, 2, 0], [4, 0, 6]], dtype=np.float32) + v = mb.thresholded_relu(x=x_val, alpha=2.0) + y = x_val + y[y < 2.0] = 0 + np.testing.assert_allclose(y, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, alpha", + itertools.product( + compute_units, + backends, + [2, 4, 8], + [2.0, 3.0] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, alpha): + shape_x = np.array([dim, dim]) + x_val = np.random.rand(*shape_x) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.thresholded_relu(x=x, alpha=alpha)] + + y = x_val + y[y < alpha] = 0 + expected_outputs = [y] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_const.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_const.py new file mode 100644 index 00000000..b484e357 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_const.py @@ -0,0 +1,62 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types + +from .testing_utils import run_compare_builder + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + + +class TestConst: + @pytest.mark.parametrize( + "compute_unit, backend, dtype", itertools.product( + compute_units, + backends, + [ + np.int32, + np.int64, + np.float16, + np.float32, + np.float64, + ] + ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, dtype): + if backend[0] == "mlprogram" and dtype in [np.uint8, np.int8, np.uint32]: + pytest.skip("Data type not supported") + + t = np.random.randint(0, 5, (4, 2)).astype(np.float32) + constant = np.random.randint(0, 5, (4, 2)).astype(dtype) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + y = mb.const(val=constant) + y = mb.cast(x=y, dtype='fp32') + return mb.add(x=x, y=y) + + expected_output_types = (4, 2, types.fp32) + expected_outputs = t + constant.astype(np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_constexpr_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_constexpr_ops.py new file mode 100644 index 00000000..4e8aa47b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_constexpr_ops.py @@ -0,0 +1,646 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.ops.defs.iOS16 import constexpr_ops +from coremltools.converters.mil.mil.ops.tests.testing_utils import \ + run_compare_builder +from coremltools.converters.mil.testing_utils import (get_op_types_in_program, + ssa_fn) + +backends = [("mlprogram", "fp32"), ("mlprogram", "fp16")] +compute_units = testing_reqs.compute_units + + +@pytest.mark.skipif( + ct.utils._macos_version() < (13, 0), + reason="ConstExpr ops available from macOS13 onwards.", +) +class TestConstexprAffineDequantize: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + + t = np.array(range(4)).reshape(1, 1, 2, 2).astype(np.float32) + decompressed_constant = ( + np.array([1, 2, 3, 4]).reshape(1, 1, 2, 2).astype(np.float32) + ) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + quantized_data = np.array([3, 5, 5, 6]).reshape(1, 1, 2, 2).astype(np.uint8) + scale = np.array([1, 2]).astype(np.float32) + zero_point = np.array([2, 4]).astype(np.uint8) + axis = 3 + y = mb.constexpr_affine_dequantize( + quantized_data=quantized_data, + zero_point=zero_point, + scale=scale, + axis=axis, + ) + return mb.add(x=x, y=y) + + expected_output_types = (1, 1, 2, 2, types.fp32) + expected_outputs = t + decompressed_constant.astype(np.float32) + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + assert "constexpr_affine_dequantize" in get_op_types_in_program(prog) + + @ssa_fn + def test_builder_eval(self): + # scalar zero-point & scalar scale + v = mb.constexpr_affine_dequantize( + quantized_data=np.array([[1, 2, 3], [1, 2, 3]]).astype(np.uint8), + zero_point=np.uint8(1), + scale=np.float32(2), + axis=0, + ) + np.testing.assert_allclose(np.float32([[0, 2, 4], [0, 2, 4]]), v.val) + + # vector zero-point & scalar scale + v = mb.constexpr_affine_dequantize( + quantized_data=np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int8), + zero_point=np.array([1, 2]).astype(np.int8), + scale=np.float32(2), + axis=0, + ) + np.testing.assert_allclose(np.float32([[0, 2, 4], [-2, 0, 2]]), v.val) + + # scalar zero-point & vector scale + v = mb.constexpr_affine_dequantize( + quantized_data=np.array([[1, 2, 3], [1, 2, 3]]).astype(np.uint8), + zero_point=np.uint8(1), + scale=np.array([2, 4]).astype(np.float32), + axis=0, + ) + np.testing.assert_allclose(np.float32([[0, 2, 4], [0, 4, 8]]), v.val) + + # vector zero-point & vector scale + v = mb.constexpr_affine_dequantize( + quantized_data=np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int8), + zero_point=np.array([1, 2]).astype(np.int8), + scale=np.array([2, 4]).astype(np.float32), + axis=0, + ) + np.testing.assert_allclose(np.float32([[0, 2, 4], [-4, 0, 4]]), v.val) + + @staticmethod + def affine_dequant_config_generator(): + np.random.seed(1984) + + for quant_dtype in [np.int8, np.uint8]: + low = 0 if quant_dtype == np.uint8 else -128 + high = 255 if quant_dtype == np.uint8 else 127 + for rank in range(1, 6): + shape = np.random.randint(low=2, high=5, size=rank) + quantized_data = np.random.randint( + low=low, high=high, size=shape, dtype=quant_dtype + ) + axis = np.random.choice(range(-rank, rank)) + scalar_zp = np.random.choice([True, False]) + scalar_sc = np.random.choice([True, False]) + zero_point = ( + np.random.randint( + low=low, + high=high, + size=quantized_data.shape[axis], + dtype=quant_dtype, + ) + if not scalar_zp + else np.random.choice(range(low, high)).astype(quant_dtype) + ) + scale = ( + np.random.rand(quantized_data.shape[axis]).astype(np.float32) + if not scalar_sc + else np.float32(np.random.rand()) + ) # fp16 is already covered under backends parameterization + + params = { + "quantized_data": quantized_data, + "zp": zero_point, + "sc": scale, + "axis": axis, + } + yield params + + @pytest.mark.parametrize( + "compute_unit, backend, config", + itertools.product( + compute_units, + backends, + affine_dequant_config_generator.__func__() + ), + ) + def test_builder_stress(self, compute_unit, backend, config): + + quantized_data, zero_point, scale, axis = ( + config["quantized_data"], + config["zp"], + config["sc"], + config["axis"], + ) + + def build(x): + y = mb.constexpr_affine_dequantize( + quantized_data=quantized_data, + zero_point=zero_point, + scale=scale, + axis=axis, + ) + return mb.add(x=x, y=y) + + expected_output_types = ( + *quantized_data.shape, + types.numpy_type_to_builtin_type(scale.dtype), + ) + + t = np.random.rand(*quantized_data.shape).astype(scale.dtype) + decompressed_constant = constexpr_ops.constexpr_affine_dequantize.decompress( + quantized_data, zero_point, scale, axis + ) + expected_outputs = t + decompressed_constant + + input_placeholders = { + "x": mb.placeholder(shape=quantized_data.shape), + } + input_values = {"x": t} + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + if "constexpr_affine_dequantize" not in get_op_types_in_program(prog): + raise AssertionError("Invalidated: Test Failed") + + +@pytest.mark.skipif( + ct.utils._macos_version() < (13, 0), + reason="ConstExpr ops available from macOS13 onwards.", +) +class TestConstexprCast: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + + t = np.array(range(4)).reshape(4, 1).astype(np.float32) + decompressed_constant = np.array([1, 2, 3, 4]).reshape(4, 1).astype(np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + source_val = np.array([1, 2, 3, 4]).reshape(4, 1).astype(np.float16) + y = mb.constexpr_cast(source_val=source_val, output_dtype="fp32") + return mb.add(x=x, y=y) + + expected_output_types = (4, 1, types.fp32) + expected_outputs = t + decompressed_constant.astype(np.float32) + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + if "constexpr_cast" not in get_op_types_in_program(prog): + raise AssertionError("Invalidated: Test Failed") + + @ssa_fn + def test_builder_eval(self): + v = mb.constexpr_cast(source_val=np.float16([1, 2]), output_dtype="fp32") + np.testing.assert_allclose(np.float32([1, 2]), v.val) + + @staticmethod + def cast_config_generator(): + np.random.seed(1984) + + for rank in range(1, 6): + shape = np.random.randint(low=2, high=5, size=rank) + source_val = np.random.rand(*shape).astype(np.float16) + params = { + "source_val": source_val, + "output_dtype": "fp32", + } + yield params + + @pytest.mark.parametrize( + "compute_unit, backend, config", + itertools.product( + compute_units, + backends, + cast_config_generator.__func__() + ), + ) + def test_builder_stress(self, compute_unit, backend, config): + + source_val, output_dtype = ( + config["source_val"], + config["output_dtype"], + ) + + def build(x): + y = mb.constexpr_cast( + source_val=source_val, + output_dtype=output_dtype, + ) + return mb.add(x=x, y=y) + + expected_output_types = ( + *source_val.shape, + types.string_to_builtin(output_dtype), + ) + + output_np_type = types.nptype_from_builtin( + types.string_to_builtin(output_dtype) + ) + t = np.random.rand(*source_val.shape).astype(output_np_type) + decompressed_constant = source_val.astype(output_np_type) + expected_outputs = t + decompressed_constant + + input_placeholders = { + "x": mb.placeholder(shape=source_val.shape), + } + input_values = {"x": t} + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + assert "constexpr_cast" in get_op_types_in_program(prog) + + +@pytest.mark.skipif( + ct.utils._macos_version() < (13, 0), + reason="ConstExpr ops available from macOS13 onwards.", +) +class TestConstexprLutToDense: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + + t = np.array(range(4)).reshape(4, 1).astype(np.float32) + decompressed_constant = np.array([1, 2, 3, 4]).reshape(4, 1).astype(np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + lut_data = np.array( + [ + -19.0, + 4.0, + 0.0, + -1.0, + 1.0, + 3.0, + 5.0, + -8.0, + 19, + 13, + 42, + 4.5, + 5.4, + 2.0, + -6, + -7, + ] + ).astype(np.float32) + indices = np.array([212, 21]).astype(np.uint8) + shape = np.array([4, 1]).astype(np.uint32) + y = mb.constexpr_lut_to_dense(lut=lut_data, indices=indices, shape=shape) + return mb.add(x=x, y=y) + + expected_output_types = (4, 1, types.fp32) + expected_outputs = t + decompressed_constant.astype(np.float32) + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + assert "constexpr_lut_to_dense" in get_op_types_in_program(prog) + + @ssa_fn + def test_builder_eval(self): + v = mb.constexpr_lut_to_dense( + lut=np.array([1.0, 2.0, 3.0, 4.0]), + indices=np.array([10, 4]).astype(np.uint8), + shape=np.array( + [ + 5, + ] + ).astype(np.uint32), + ) + np.testing.assert_allclose( + np.float32([3, 3, 1, 1, 1]).astype(np.float32), v.val + ) + + @staticmethod + def lut_config_generator(): + np.random.seed(1999) + for lut_dtype in [np.float32]: # [np.uint8, np.int8]: + # float16 already covered under backends parameterization + # Not possible to write 8-bit tests since no other op consumes uint8/int8 tensors + for nbits in [1, 2, 4, 6, 8]: + lut_size = 2**nbits + if lut_dtype == np.uint8: + lut = np.random.randint(low=255, size=lut_size, dtype=np.uint8) + elif lut_dtype == np.int8: + lut = np.random.randint( + low=-128, high=127, size=lut_size, dtype=np.int8 + ) + else: + lut = np.random.rand(lut_size).astype(lut_dtype) + for output_rank in range(1, 6): + output_shape = np.random.randint(low=2, high=5, size=output_rank) + + indices = np.random.randint( + low=0, high=2**nbits, size=output_shape, dtype=np.uint8 + ) + indices_bitarray = np.unpackbits( + indices, bitorder="little" + ).reshape(-1, 8) + packed_indices = np.packbits( + indices_bitarray[:, :nbits], bitorder="little" + ) + + assert packed_indices.size == np.ceil( + nbits * np.prod(output_shape) / 8 + ).astype(np.int32) + params = { + "indices": packed_indices, + "shape": output_shape, + "lut": lut, + } + yield params + + @pytest.mark.parametrize( + "compute_unit, backend, config", + itertools.product( + compute_units, + backends, + lut_config_generator.__func__() + ), + ) + def test_builder_stress(self, compute_unit, backend, config): + + indices, lut, shape = ( + config["indices"], + config["lut"], + config["shape"], + ) + + def build(x): + y = mb.constexpr_lut_to_dense( + indices=indices, + lut=lut, + shape=shape.astype(np.uint32), + ) + return mb.add(x=x, y=y) + + expected_output_types = ( + *shape, + types.numpy_type_to_builtin_type(lut.dtype), + ) + + t = np.random.rand(*shape).astype(lut.dtype) + decompressed_constant = constexpr_ops.constexpr_lut_to_dense.decompress( + lut, indices, shape + ) + expected_outputs = t + decompressed_constant + + input_placeholders = { + "x": mb.placeholder(shape=shape), + } + input_values = {"x": t} + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + if "constexpr_lut_to_dense" not in get_op_types_in_program(prog): + raise AssertionError("Invalidated: Test Failed") + + +@pytest.mark.skipif( + ct.utils._macos_version() < (13, 0), + reason="ConstExpr ops available from macOS13 onwards.", +) +class TestConstexprSparseToDense: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + + t = np.array(range(4)).reshape(4, 1).astype(np.float32) + decompressed_constant = np.array([1, 2, 0, 4]).reshape(4, 1).astype(np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + nonzero_data = np.array([1, 2, 4]).astype(np.float32) + mask = np.array([11]).astype(np.uint8) + shape = np.array([4, 1]).astype(np.uint32) + y = mb.constexpr_sparse_to_dense( + nonzero_data=nonzero_data, mask=mask, shape=shape + ) + return mb.add(x=x, y=y) + + expected_output_types = (4, 1, types.fp32) + expected_outputs = t + decompressed_constant.astype(np.float32) + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + assert "constexpr_sparse_to_dense" in get_op_types_in_program(prog) + + @ssa_fn + def test_builder_eval(self): + v = mb.constexpr_sparse_to_dense( + nonzero_data=np.array([1.0, 2.0, 4.0]), + mask=np.array([11]).astype(np.uint8), + shape=np.array( + [ + 4, + ] + ).astype(np.uint32), + ) + np.testing.assert_allclose(np.float32([1.0, 2.0, 0.0, 4.0]), v.val) + + @staticmethod + def sparse_config_generator(): + np.random.seed(1999) + + for nonzero_data_dtype in [np.float32]: # [np.uint8, np.int8]: + # float16 already covered under backends parameterization + # Not possible to write 8-bit tests since no other op consumes uint8/int8 tensors + for output_rank in range(1, 6): + output_shape = np.random.randint(low=2, high=5, size=output_rank) + output_size = np.prod(output_shape) + nBytes = np.ceil(output_size / 8).astype(np.int32) + + mask = np.random.randint(low=255, size=nBytes, dtype=np.uint8) + bitarray = np.unpackbits(mask, bitorder="little") + while any(bitarray[i] != 0 for i in range(output_size, len(bitarray))): + mask = np.random.randint(low=255, size=nBytes, dtype=np.uint8) + bitarray = np.unpackbits(mask, bitorder="little") + + nonzero_size = np.sum( + np.where(np.unpackbits(mask, bitorder="little") != 0, 1, 0) + ) + + if nonzero_data_dtype == np.uint8: + nonzero_data = np.random.randint( + low=255, size=nonzero_size, dtype=np.uint8 + ) + elif nonzero_data_dtype == np.int8: + nonzero_data = np.random.randint( + low=-128, high=127, size=nonzero_size, dtype=np.int8 + ) + else: + nonzero_data = np.random.rand(nonzero_size).astype( + nonzero_data_dtype + ) + + params = { + "nonzero_data": nonzero_data, + "shape": output_shape, + "mask": mask, + } + yield params + + @pytest.mark.parametrize( + "compute_unit, backend, config", + itertools.product( + compute_units, + backends, + sparse_config_generator.__func__() + ), + ) + def test_builder_stress(self, compute_unit, backend, config): + + nonzero_data, mask, shape = ( + config["nonzero_data"], + config["mask"], + config["shape"], + ) + + def build(x): + y = mb.constexpr_sparse_to_dense( + nonzero_data=nonzero_data, + mask=mask, + shape=shape.astype(np.uint32), + ) + return mb.add(x=x, y=y) + + expected_output_types = ( + *shape, + types.numpy_type_to_builtin_type(nonzero_data.dtype), + ) + + t = np.random.rand(*shape).astype(nonzero_data.dtype) + decompressed_constant = constexpr_ops.constexpr_sparse_to_dense.decompress( + nonzero_data, mask, shape + ) + expected_outputs = t + decompressed_constant + + input_placeholders = { + "x": mb.placeholder(shape=shape), + } + input_values = {"x": t} + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + if "constexpr_sparse_to_dense" not in get_op_types_in_program(prog): + raise AssertionError("Invalidated: Test Failed") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_control_flow.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_control_flow.py new file mode 100644 index 00000000..f541f1ca --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_control_flow.py @@ -0,0 +1,419 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen, ssa_fn + +from .testing_utils import UNK_SYM, run_compare_builder + + +class TestSelect: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + cond_val = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.float32) + a_val = np.array([[3, 1, 1], [1, 4, 1], [5, 6, 1]], dtype=np.float32) + b_val = np.array([[3, 2, 2], [2, 4, 2], [5, 6, 2]], dtype=np.float32) + input_placeholders = { + "cond": mb.placeholder(shape=cond_val.shape), + "a": mb.placeholder(shape=a_val.shape), + "b": mb.placeholder(shape=b_val.shape), + } + input_values = {"cond": cond_val, "a": a_val, "b": b_val} + + def build(cond, a, b): + if not types.is_bool(cond.dtype): + cond = mb.cast(x=cond, dtype="bool") + return [mb.select(cond=cond, a=a, b=b)] + + expected_output_types = [(3, 3, types.fp32)] + expected_outputs = [ + np.array( + [[3.0, 2.0, 2.0], [2.0, 4.0, 2.0], [5.0, 6.0, 2.0]], dtype=np.float32 + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke_broadcast(self, compute_unit, backend): + cond_val = np.array([[1], [0], [2]], dtype=np.float32) + a_val = np.array([[3, 1, 1], [1, 4, 1], [5, 6, 1]], dtype=np.float32) + b_val = np.array([[3, 2, 2], [2, 4, 2], [5, 6, 2]], dtype=np.float32) + input_placeholders = { + "cond": mb.placeholder(shape=cond_val.shape), + "a": mb.placeholder(shape=a_val.shape), + "b": mb.placeholder(shape=b_val.shape), + } + input_values = {"cond": cond_val, "a": a_val, "b": b_val} + + def build(cond, a, b): + if not types.is_bool(cond.dtype): + cond = mb.cast(x=cond, dtype="bool") + return [mb.select(cond=cond, a=a, b=b)] + + expected_output_types = [(3, 3, types.fp32)] + expected_outputs = [ + np.array( + [[3.0, 1.0, 1.0], [2.0, 4.0, 2.0], [5.0, 6.0, 1.0]], dtype=np.float32 + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + cond = np.random.randint(low=0, high=2, size=(6, 1, 7)).astype(bool) + a = random_gen(shape=(6, 1, 7), rand_min=-1962.0, rand_max=0.0) + b = random_gen(shape=(6, 1, 7), rand_min=0.0, rand_max=1964.0) + res = mb.select(cond=cond, a=a, b=b) + np.testing.assert_allclose(np.where(cond, a, b), res.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_eval_broadcast(self): + cond = np.array([[True], [False], [True]]) + a = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + b = np.array([[7, 8], [9, 10], [11, 12]], dtype=np.float32) + res = mb.select(cond=cond, a=a, b=b) + np.testing.assert_allclose(np.array([[1, 2], [9, 10], [5, 6]], dtype=np.float32), res.val, atol=1e-04, rtol=1e-05) + + +class TestCond: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + input_placeholders = { + "a": mb.placeholder(shape=(1,), dtype=types.bool), + "b": mb.placeholder(shape=(1,)), + } + + def build(a, b): + def true_fn(): + return mb.add(x=b, y=1.), mb.mul(x=b, y=2.) + + def false_fn(): + return mb.add(x=b, y=-1.), mb.mul(x=b, y=-2.) + + pred = mb.squeeze(x=a) + return mb.cond(pred=pred, _true_fn=true_fn, _false_fn=false_fn) + + input_values = { + "a": np.array([0], dtype=np.float32), + "b": np.array([2], dtype=np.float32), + } + + expected_output_types = [ + (1, types.fp32), + (1, types.fp32), + ] + + expected_outputs = [ + np.array([1], dtype=np.float32), + np.array([-4], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestWhileLoop: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + def body(a, b): + return mb.add(x=a, y=np.float32(1)), b + + def cond(a, b): + return mb.less(x=a, y=b) + + input_placeholders = { + "a": mb.placeholder(shape=(1,)), + "b": mb.placeholder(shape=(1,)), + } + + def build(a, b): + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + input_values = { + "a": np.array([1], dtype=np.float32), + "b": np.array([2], dtype=np.float32), + } + + expected_output_types = [ + (1, types.fp32), + (1, types.fp32), + ] + + expected_outputs = [ + np.array([2], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_power(self, compute_unit, backend): + + input_placeholders = { + "a": mb.placeholder(shape=(1,)), + "b": mb.placeholder(shape=(1,)), + } + + def build(a, b): + # Compute a^b + def body(res, bx): + return mb.mul(x=res, y=a), mb.add(x=bx, y=np.float32(1)) + + def cond(res, bx): + return mb.less(x=bx, y=b) + + res, ignored = mb.while_loop(_cond=cond, _body=body, + loop_vars=([1.], [0.])) + return res + + input_values = { + "a": np.array([2], dtype=np.float32), + "b": np.array([4], dtype=np.float32), + } + + expected_output_types = [ + (1, types.fp32), + ] + + expected_outputs = [ + np.array([16], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_nested(self, compute_unit, backend): + if backend[0] == 'neuralnetwork': + pytest.xfail("rdar://96862073 (test_control_folw::TestWhileLoop::test_builder_to_backend_nested failing on nnv1)") + + input_placeholders = { + "x": mb.placeholder(shape=(1,)), + "y": mb.placeholder(shape=(1,)), + } + + def build(x, y): + # i, j = x, y + # while i < j: + # while 2*i < i+2: + # i += 1 + # i += 2 + # return i, j + + # Create const outside of while loop for testing purpose + two = mb.const(val=[2.], name='const_two') + one = mb.const(val=[1.], name='const_one') + + def cond2(i): + return mb.less(x=mb.mul(x=two, y=i), y=mb.add(x=i, y=two)) + + def body2(i): + return mb.add(x=i, y=one) + + def cond1(i, j): + return mb.less(x=i, y=j) + + def body1(i, j): + new_i = mb.while_loop(_cond=cond2, _body=body2, + loop_vars=(i,)) + return mb.add(x=new_i, y=two), j + + return mb.while_loop(_cond=cond1, _body=body1, + loop_vars=(x, y)) + + input_values = { + "x": np.array([0], dtype=np.float32), + "y": np.array([10], dtype=np.float32), + } + + expected_output_types = [ + (1, types.fp32), + (1, types.fp32), + ] + + expected_outputs = [ + np.array([10], dtype=np.float32), + np.array([10], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestList: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + elem_shape = (2,) + input_placeholders = { + "a": mb.placeholder(shape=elem_shape), + "b": mb.placeholder(shape=elem_shape), + } + + def build(a, b): + ls = mb.make_list(init_length=2, elem_shape=elem_shape) + # list is initially all 0 + init_t = mb.list_read(ls=ls, index=0) + ls = mb.list_write(ls=ls, index=0, value=a) + # this write is out of bound + ls = mb.list_write(ls=ls, index=4, value=b) + ls = mb.list_scatter( + ls=ls, + indices=[2, 1], + value=np.array([[-1, -2], [-4, -5]], dtype=np.float32), + ) + return ( + init_t, + mb.list_read(ls=ls, index=0), + mb.list_gather(ls=ls, indices=[4, 2, 3]), + ) + + input_values = { + "a": np.array([1, 3], dtype=np.float32), + "b": np.array([2, 4], dtype=np.float32), + } + + expected_output_types = [ + (2, types.fp32), + (2, types.fp32), + (3, 2, types.fp32), + ] + + expected_outputs = [ + np.array([0, 0], dtype=np.float32), + np.array([1, 3], dtype=np.float32), + np.array([[2, 4], [-1, -2], [0, 0]], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_while(self, compute_unit, backend): + # The while_loop appends [1, 2]*i to `ls` for each iteration + # i = 0, ... num_iters-1. + def body(i, num_iters, ls, update): + y = mb.cast(x=i, dtype="fp32") + new_elem = mb.mul(x=update, y=y) + return ( + mb.add(x=i, y=1), + num_iters, + mb.list_write(ls=ls, index=i, value=new_elem), + update, + ) + + def cond(i, num_iters, ls, update): + i = mb.cast(x=i, dtype="fp32") + return mb.less(x=i, y=num_iters) + + elem_shape = (2,) + input_placeholders = { + "num_iters": mb.placeholder(shape=(1,)), + "update": mb.placeholder(shape=elem_shape), + } + + def build(num_iters, update): + i = 0 + ls = mb.make_list(init_length=1, elem_shape=elem_shape) + _, _, final_tensor_list, _ = mb.while_loop( + _cond=cond, _body=body, loop_vars=(i, num_iters, ls, update) + ) + list_len = mb.list_length(ls=final_tensor_list) + indices = mb.range_1d(start=0, end=list_len, step=1) + return mb.list_gather(ls=final_tensor_list, indices=indices) + + input_values = { + "num_iters": np.array([3], dtype=np.float32), + "update": np.array([1, 2], dtype=np.float32), + } + + expected_output_types = [ + # Type inference does not unroll loop + (UNK_SYM, 2, types.fp32), + ] + + expected_outputs = [ + np.array([[0, 0], [1, 2], [2, 4]], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_conv.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_conv.py new file mode 100644 index 00000000..d1404382 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_conv.py @@ -0,0 +1,940 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.models.utils import _macos_version + +from .testing_utils import run_compare_builder + + +class TestConvTranspose: + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason="PyTorch not installed.") + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "conv_dim", + "config", + ] + ), + itertools.product( + compute_units, + backends, + ["conv1d", "conv2d", "conv3d"], + [{ + "padding": (1, 2, 3), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": False, + "groups": 1, + "test_symbolic": False, + "test_output_shape": True, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": False, + "groups": 2, + "test_symbolic": True, + "test_output_shape": False, + }, + { + "padding": (1, 2, 3), + "DHWKdKhKw": (7, 7, 7, 2, 2, 2), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": True, + "groups": 1, + "test_symbolic": True, + "test_output_shape": False, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (7, 7, 7, 2, 2, 2), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": True, + "groups": 2, + "test_symbolic": False, + "test_output_shape": False, + }, + ], + ), + ) + def test_builder_to_backend_stress( + self, + compute_unit, + backend, + conv_dim, + config, + ): + padding = config["padding"] + DHWKdKhKw = config["DHWKdKhKw"] + stride = config["stride"] + dilation = config["dilation"] + has_bias = config["has_bias"] + groups = config["groups"] + test_symbolic = config["test_symbolic"] + test_output_shape = config["test_output_shape"] + + D, H, W, Kd, Kh, Kw = DHWKdKhKw + N, C_in, C_out = 1, 1 * groups, 2 * groups + + import torch + import torch.nn as nn + + isDeconv1d = conv_dim == "conv1d" + isDeconv2d = conv_dim == "conv2d" + + if isDeconv1d: + strides = [stride[0]] + dilations = [dilation[0]] + kernels = [Kh] + m = nn.ConvTranspose1d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=padding[0], + ) + input_shape = [N, C_in, H] + paddings = [padding[0], padding[0]] + + elif isDeconv2d: + strides = [stride[0], stride[1]] + dilations = [dilation[0], dilation[1]] + kernels = [Kh, Kw] + m = nn.ConvTranspose2d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=(padding[0], padding[1]), + ) + input_shape = [N, C_in, H, W] + paddings = [padding[0], padding[0], padding[1], padding[1]] + else: + strides = [stride[0], stride[1], stride[2]] + dilations = [dilation[0], dilation[1], dilation[2]] + kernels = [Kd, Kh, Kw] + m = nn.ConvTranspose3d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=padding, + ) + input_shape = [N, C_in, D, H, W] + paddings = [ + padding[0], + padding[0], + padding[1], + padding[1], + padding[2], + padding[2], + ] + + wts = m.state_dict() + weight = wts["weight"].detach().numpy() + bias = wts["bias"].detach().numpy() if has_bias else None + + input = torch.randn(*input_shape) + output = m(input) + output = output.detach().numpy() + input = input.detach().numpy() + + output_shape = list(output.shape) + if test_symbolic: + # For symbolic input test + # Make Batch Size and input channel as symbolic + symbolic_batch_size = get_new_symbol() + input_shape[0] = symbolic_batch_size + output_shape[0] = symbolic_batch_size + + expected_output_types = tuple(output_shape[:]) + (types.fp32,) + expected_outputs = [output] + + input_placeholders = {"x": mb.placeholder(shape=input_shape)} + input_values = {"x": input} + + def build(x): + arguments = { + "x": x, + "weight": weight, + "pad": paddings, + "pad_type": "custom", + "strides": strides, + "dilations": dilations, + "groups": groups, + } + if has_bias: + arguments["bias"] = bias + if test_output_shape: + arguments["output_shape"] = output.shape + return mb.conv_transpose(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestConv: + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason="PyTorch not installed.") + @pytest.mark.parametrize( + "compute_unit, backend, padding_mode, conv_dim", + itertools.product( + compute_units, + backends, + ["same_lower", "same", "valid"], + ["conv1d", "conv2d", "conv3d"], + ), + ) + def test_padding_mode_stress(self, compute_unit, backend, padding_mode, conv_dim): + import torch + def rotation_tensor(tensor): + assert tensor.shape[0] == tensor.shape[1] == 1 + tensor = tensor[0][0] + rank = len(tensor.shape) + new_tensor = np.copy(np.flip(tensor, axis=tuple(range(rank)))) + return np.expand_dims(new_tensor, axis=(0, 1)) + + if conv_dim == "conv3d" and padding_mode == "same_lower": + if backend[0] == "neuralnetwork": + pytest.skip("same_lower mode not supported for conv3d in neuralnetwork backend") + + if padding_mode == "same_lower" and backend[0] == "mlprogram" and ct.utils._macos_version() < (13, 0): + pytest.skip("same_lower pad_type not supported in macOS12 or older.") + + minimum_deployment_target = ct.target.iOS16 if backend[0] == "mlprogram" else None + if _macos_version() < (13, 0) and minimum_deployment_target == ct.target.iOS16: + pytest.skip("iOS16 target not available on macOS 13") + + batch, in_channels, out_channels = 1, 1, 1 + input_shape = (batch, in_channels, 4, 5, 6) # batch, channel, height, width + kernel_size = (2, 4, 3) + torch_padding_mode = padding_mode if padding_mode != "same_lower" else "same" + + # Get the right shape for each conv_dim + if conv_dim == "conv1d": + input_shape = input_shape[:3] + kernel_size = kernel_size[:1] + elif conv_dim == "conv2d": + input_shape = input_shape[:4] + kernel_size = kernel_size[:2] + + # Get the ground truth answer from torch + if conv_dim == "conv1d": + m = torch.nn.Conv1d( + in_channels, + out_channels, + kernel_size, + stride=1, + padding=torch_padding_mode, + bias=False, + ) + elif conv_dim == "conv2d": + m = torch.nn.Conv2d( + in_channels, + out_channels, + kernel_size, + stride=1, + padding=torch_padding_mode, + bias=False, + ) + elif conv_dim == "conv3d": + m = torch.nn.Conv3d( + in_channels, + out_channels, + kernel_size, + stride=1, + padding=torch_padding_mode, + bias=False, + ) + + # Original weight / inputs for the torch model + weight = torch.clone(m.state_dict()["weight"]) + input = torch.randn(*input_shape, dtype=torch.float32) + + # Coreml weights / inputs values + coreml_weight = weight.detach().numpy() + coreml_input = input.detach().numpy() + + if padding_mode == "same_lower": + # For the same_lower padding mode, we get the ground truth output by doing the following steps + # (1) Rotate the input value + # (2) Rotate the kernel value + # (3) Rotate the torch out + rotated_input = torch.tensor(rotation_tensor(input.detach().numpy()), dtype=torch.float32) + rotated_weight = torch.tensor(rotation_tensor(weight.detach().numpy()), dtype=torch.float32) + m.load_state_dict({'weight': rotated_weight}, strict=False) + output = m(rotated_input).detach().numpy() + output = rotation_tensor(output) + else: + output = m(input).detach().numpy() + + output_shape = list(output.shape) + expected_output_types = tuple(output_shape[:]) + (types.fp32,) + expected_outputs = [output] + input_placeholders = {"x": mb.placeholder(shape=input_shape)} + input_values = {"x": coreml_input} + + def build(x): + arguments = { + "x": x, + "weight": coreml_weight, + "pad_type": padding_mode, + } + return mb.conv(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason="PyTorch not installed.") + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "conv_dim", + "config", + ] + ), + itertools.product( + compute_units, + backends, + ["conv1d", "conv2d", "conv3d"], + [{ + "padding": (1, 1, 1), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": False, + "groups": 1, + "symbolic": False, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": False, + "groups": 2, + "symbolic": True, + }, + { + "padding": (1, 1, 1), + "DHWKdKhKw": (5, 5, 5, 2, 2, 2), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": True, + "groups": 1, + "symbolic": True, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (5, 5, 5, 2, 2, 2), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": True, + "groups": 2, + "symbolic": False, + }, + ], + ), + ) + def test_builder_to_backend_stress( + self, + compute_unit, + backend, + conv_dim, + config, + ): + padding = config["padding"] + DHWKdKhKw = config["DHWKdKhKw"] + stride = config["stride"] + dilation = config["dilation"] + has_bias = config["has_bias"] + groups = config["groups"] + symbolic = config["symbolic"] + + D, H, W, Kd, Kh, Kw = DHWKdKhKw + N, C_in, C_out = 1, 1 * groups, 2 * groups + + import torch + import torch.nn as nn + + isConv1d = conv_dim == "conv1d" + isConv2d = conv_dim == "conv2d" + + if isConv1d: + strides = [stride[0]] + dilations = [dilation[0]] + kernels = [Kh] + m = nn.Conv1d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=padding[0], + ) + input_shape = [N, C_in, H] + paddings = [padding[0], padding[0]] + elif isConv2d: + strides = [stride[0], stride[1]] + dilations = [dilation[0], dilation[1]] + kernels = [Kh, Kw] + m = nn.Conv2d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=(padding[0], padding[1]), + ) + input_shape = [N, C_in, H, W] + paddings = [padding[0], padding[0], padding[1], padding[1]] + else: + strides = [stride[0], stride[1], stride[2]] + dilations = [dilation[0], dilation[1], dilation[2]] + kernels = [Kd, Kh, Kw] + m = nn.Conv3d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=padding, + ) + input_shape = [N, C_in, D, H, W] + paddings = [ + padding[0], + padding[0], + padding[1], + padding[1], + padding[2], + padding[2], + ] + + wts = m.state_dict() + weight = wts["weight"].detach().numpy() + bias = wts["bias"].detach().numpy() if has_bias else None + + # PyTorch and CoreML weight format is same + # PyTorch weight format: C_out, C_in, H, W + # MIL weight format: C_out, C_in, H, W + + input = torch.randn(*input_shape) + output = m(input) + output = output.detach().numpy() + input = input.detach().numpy() + + output_shape = list(output.shape) + if symbolic: + # For symbolic input test + # Make Batch Size and input channel as symbolic + symbolic_batch_size = get_new_symbol() + input_shape[0] = symbolic_batch_size + output_shape[0] = symbolic_batch_size + + expected_output_types = tuple(output_shape[:]) + (types.fp32,) + expected_outputs = [output] + + input_placeholders = {"x": mb.placeholder(shape=input_shape)} + input_values = {"x": input} + + def build(x): + arguments = { + "x": x, + "weight": weight, + "pad": paddings, + "pad_type": "custom", + "strides": strides, + "dilations": dilations, + "groups": groups, + } + if has_bias: + arguments["bias"] = bias + return mb.conv(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason="PyTorch not installed.") + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "conv_dim", + "config", + ] + ), + itertools.product( + compute_units, + backends, + ["conv1d", "conv2d"], + [ + { + "padding": (1, 1, 1), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": False, + "groups": 1, + "symbolic": False, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": False, + "groups": 2, + "symbolic": True, + }, + { + "padding": (1, 1, 1), + "DHWKdKhKw": (5, 5, 5, 2, 2, 2), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": True, + "groups": 1, + "symbolic": True, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (5, 5, 5, 2, 2, 2), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": True, + "groups": 2, + "symbolic": False, + }, + ], + ), + ) + def test_builder_to_backend_stress_weights_input( + self, + compute_unit, + backend, + conv_dim, + config, + ): + padding = config["padding"] + DHWKdKhKw = config["DHWKdKhKw"] + stride = config["stride"] + has_bias = config["has_bias"] + groups = config["groups"] + symbolic = config["symbolic"] + + if backend[0] == "neuralnetwork" and groups > 1: + pytest.skip("dynamic conv with groups > 1 is not supported on the neuralnetwork backend") + + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://97398343 (test_builder_to_backend_stress_weights_input is failing on mlprogram + GPU)") + + D, H, W, Kd, Kh, Kw = DHWKdKhKw + N, C_in, C_out = 1, 1 * groups, 2 * groups + + import torch + import torch.nn as nn + + isConv1d = conv_dim == "conv1d" + isConv2d = conv_dim == "conv2d" + + if isConv1d: + strides = [stride[0]] + kernels = [Kh] + m = nn.Conv1d( + C_in, + C_out, + kernels, + stride=strides, + bias=has_bias, + groups=groups, + padding=padding[0], + ) + input_shape = [N, C_in, H] + paddings = [padding[0], padding[0]] + elif isConv2d: + strides = [stride[0], stride[1]] + kernels = [Kh, Kw] + m = nn.Conv2d( + C_in, + C_out, + kernels, + stride=strides, + groups=groups, + padding=(padding[0], padding[1]), + bias=has_bias, + ) + input_shape = [N, C_in, H, W] + paddings = [padding[0], padding[0], padding[1], padding[1]] + + wts = m.state_dict() + weight = wts["weight"].detach().numpy() + bias = wts["bias"].detach().numpy() if has_bias else None + + # PyTorch and CoreML weight format is same + # PyTorch weight format: C_out, C_in, H, W + # MIL weight format: C_out, C_in, H, W + + input = torch.randn(*input_shape) + output = m(input) + output = output.detach().numpy() + input = input.detach().numpy() + + output_shape = list(output.shape) + if symbolic: + # For symbolic input test + # Make Batch Size and input channel as symbolic + symbolic_batch_size = get_new_symbol() + input_shape[0] = symbolic_batch_size + output_shape[0] = symbolic_batch_size + + expected_output_types = tuple(output_shape[:]) + (types.fp32,) + expected_outputs = [output] + + input_placeholders = {"x": mb.placeholder(shape=input_shape), "input_weight":mb.placeholder(shape=weight.shape)} + input_values = {"x": input, "input_weight": weight} + + def build(x, input_weight): + arguments = { + "x": x, + "weight": input_weight, + "pad": paddings, + "pad_type": "custom", + "strides": strides, + "groups": groups, + } + if has_bias: + arguments["bias"] = bias + return mb.conv(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_conv_bias_fusion(self, compute_unit, backend): + """ + Test conv bias fusion when const input. + + + Input graph: + Const + | + V + input -----> convolution -----> add/sub ---> out + + Output graph: + input -----> convolution -----> out + """ + weight = np.array([2.5], dtype=np.float32).reshape([1, 1, 1, 1]) + + def build(x): + x = mb.conv(x=x, weight=weight) + bias = mb.const(val=[10.]) + return mb.add(x=x, y=bias) + + input = np.array([1, 2, 3, 4], dtype=np.float32).reshape((1, 1, 2, 2)) + output = np.array([12.5, 15.0, 17.5, 20.0], dtype=np.float32).reshape((1, 1, 2, 2)) + expected_output_types = output.shape + (types.fp32,) + expected_outputs = [output] + input_placeholders = {"x": mb.placeholder(shape=input.shape)} + input_values = {"x": input} + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestInvalidConvConfig: + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_weight(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=16, high=32, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + K = tuple(np.random.randint(low=1, high=4, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + groups = np.random.randint(low=1, high=C_in + 1) + while C_in % groups != 0: + groups = np.random.randint(low=1, high=C_in + 1) + + weight = np.random.rand(C_out, C_in // groups + + np.random.randint(low=1, high=8), *K) * 2.0 - 1.0 + + def build(x): + return mb.conv(x=x, weight=weight, groups=groups) + + with pytest.raises( + ValueError, + match=r"C_in / groups = [0-9]+/[0-9]+ != weight\[1\] \([0-9]+\)" + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_bias(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=1, high=10, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + K = tuple(np.random.randint(low=1, high=4, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + weight = np.random.rand(C_out, C_in, *K) * 2.0 - 1.0 + + wrong_bias_size = C_out + np.random.randint(low=1, high=8) + bias = np.random.rand(wrong_bias_size) * 2.0 - 1.0 + + def build(x): + return mb.conv(x=x, weight=weight, bias=bias) + + with pytest.raises( + ValueError, + match=r"# of bias values [0-9]+ not equal to # output channels [0-9]+" + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_kernel(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=1, high=10, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + K = tuple(np.random.randint(low=16, high=32, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + weight = np.random.rand(C_out, C_in, *K) * 2.0 - 1.0 + + def build(x): + return mb.conv(x=x, weight=weight) + + with pytest.raises( + ValueError, + match=r"spatial dimension [0-9]+ has invalid output size -?[0-9]+" + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_dilation(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=1, high=10, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + K = tuple(np.random.randint(low=2, high=4, size=conv_dim)) + dilations = tuple(np.random.randint(low=16, high=32, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + weight = np.random.rand(C_out, C_in, *K) * 2.0 - 1.0 + + def build(x): + return mb.conv(x=x, weight=weight, dilations=dilations) + + with pytest.raises( + ValueError, + match=r"spatial dimension [0-9]+ has invalid output size -?[0-9]+" + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_groups(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=16, high=32, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + K = tuple(np.random.randint(low=1, high=4, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + groups = np.random.randint(low=1, high=C_in) + while C_in % groups == 0: + groups = np.random.randint(low=1, high=C_in) + + weight = np.random.rand(C_out, C_in // groups, *K) * 2.0 - 1.0 + + def build(x): + return mb.conv(x=x, weight=weight, groups=groups) + + with pytest.raises( + ValueError, + match=r"# of input channels [0-9]+ not divisible by groups [0-9]+" + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_rank(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=16, high=32, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + wrong_K = tuple(np.random.randint(low=1, high=4, size=conv_dim - 1)) + + weight = np.random.rand(C_out, C_in, *wrong_K) * 2.0 - 1.0 + strides = tuple(np.random.randint(low=1, high=4, size=conv_dim + 1)) + dilations = tuple(np.random.randint(low=1, high=4, size=conv_dim + 2)) + pad = tuple(np.random.randint(low=1, high=4, size=2 * conv_dim + 3)) + + def build(x): + return mb.conv(x=x, weight=weight, strides=strides, dilations=dilations, pad_type="custom", pad=pad) + + with pytest.raises( + ValueError, + match=r"input_shape \(length [0-9]+\), " + r"kernel_shape \(length [0-9]+\), " + r"strides \(length [0-9]+\), " + r"dilations \(length [0-9]+\), " + r"and custom_pad \(length [0-9]+\) divided by two " + r"must all be the same length", + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_binary.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_binary.py new file mode 100644 index 00000000..fa2790ce --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_binary.py @@ -0,0 +1,592 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import run_compare_builder + + +class TestElementwiseBinary: + # All in this test share the same backends + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + [ + "add", + "floor_div", + "maximum", + "minimum", + "mod", + "mul", + "pow", + "real_div", + "sub", + ], + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, mode): + if mode == "add": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[0, 4, 0], [8, 0, 12]], dtype=np.float32) + + build = lambda x, y: mb.add(x=x, y=y) + elif mode == "floor_div": + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array([[0, 1, 2], [2, 3, 3]], dtype=np.float32) + + build = lambda x, y: mb.floor_div(x=x, y=y) + elif mode == "maximum": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + + build = lambda x, y: mb.maximum(x=x, y=y) + elif mode == "minimum": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + + build = lambda x, y: mb.minimum(x=x, y=y) + elif mode == "mod": + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array([[10, 8, 4], [12, 5, 12]], dtype=np.float32) + + build = lambda x, y: mb.mod(x=x, y=y) + elif mode == "mul": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[-1, 4, -9], [16, -25, 36]], dtype=np.float32) + + build = lambda x, y: mb.mul(x=x, y=y) + elif mode == "pow": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[1, 4, 0.037], [256, 0.00032, 46656]], dtype=np.float32 + ) + + build = lambda x, y: mb.pow(x=x, y=y) + elif mode == "real_div": + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array( + [[0.90909091, 1.66666667, 2.30769231], [2.85714286, 3.33333333, 3.75]], + dtype=np.float32, + ) + + build = lambda x, y: mb.real_div(x=x, y=y) + elif mode == "sub": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[2, 0, 6], [0, 10, 0]], dtype=np.float32) + + build = lambda x, y: mb.sub(x=x, y=y) + + expected_output_types = (2, 3, types.fp32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_output_dim_for_same_symbolic_dim_inputs(self): + symbolic_input_shape = (get_new_symbol(), 4, 5) + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=symbolic_input_shape), + mb.TensorSpec(shape=symbolic_input_shape), + ] + ) + def prog(x, y): + return mb.add(x=x, y=y) + + add_op = prog.find_ops(op_type="add")[0] + output_shape = add_op.outputs[0].shape + if output_shape != symbolic_input_shape: + raise AssertionError( + "Invalid Output shape {}. Should instead be {}".format( + output_shape, symbolic_input_shape + ) + ) + + @ssa_fn + def test_builder_add(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[0, 4, 0], [8, 0, 12]], dtype=np.float32) + v = mb.add(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_floor_div(self): + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array([[0, 1, 2], [2, 3, 3]], dtype=np.float32) + v = mb.floor_div(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_maximum(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.maximum(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_minimum(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.minimum(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_mod(self): + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array([[10, 8, 4], [12, 5, 12]], dtype=np.float32) + v = mb.mod(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_mul(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[-1, 4, -9], [16, -25, 36]], dtype=np.float32) + v = mb.mul(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_pow(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[1, 4, 0.037], [256, 0.00032, 46656]], dtype=np.float32 + ) + v = mb.pow(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_real_div(self): + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array( + [[0.90909091, 1.66666667, 2.30769231], [2.85714286, 3.33333333, 3.75]], + dtype=np.float32, + ) + v = mb.real_div(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_real_div_both_ints(self): + x = np.array([5], dtype=np.int32) + y = np.array([2], dtype=np.int32) + expected_outputs = np.array([2], dtype=np.int32) + v = mb.real_div(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + assert isinstance(v.val[0], (float, np.int32)) + # make sure the dtype is float + assert types.is_int(v.dtype) + # make sure the symbolic type matches the value type + assert v._sym_type.get_primitive() == v._sym_val.get_primitive() + + @ssa_fn + def test_builder_sub(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[2, 0, 6], [0, 10, 0]], dtype=np.float32) + v = mb.sub(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_real_div_int_builder_to_backend(self, compute_unit, backend): + """ + For the neuralnetwork backend, the real_div is producing float output even for int inputs, + while the mlprogram backend produces int type output. + """ + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + + if backend[0] == "neuralnetwork": + dtype = np.float32 + else: + dtype = np.int32 + expected_outputs = np.array(x / y, dtype=dtype) + + build = lambda x, y: mb.real_div(x=x, y=y) + + expected_output_types = (2, 3, types.int32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape, dtype=types.int32), + "y": mb.placeholder(shape=y.shape, dtype=types.int32), + } + input_values = {"x": x, "y": y} + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestEqual: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.equal(x=x, y=y), mb.equal(x=-3., y=y) + + expected_output_types = [ + (2, 3, types.bool), + (2, 3, types.bool), + ] + expected_outputs = [ + np.array([[0, 1, 0], [1, 0, 1]], dtype=bool), + np.array([[0, 0, 1], [0, 0, 0]], dtype=bool), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[0, 1, 0], [1, 0, 1]], dtype=bool) + v = mb.equal(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + +class TestGreater: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.greater(x=x, y=y), mb.greater(x=x, y=3.5) + + expected_output_types = [ + (2, 3, types.bool), + (2, 3, types.bool), + ] + expected_outputs = [ + np.array([[1, 0, 1], [0, 1, 0]], dtype=bool), + np.array([[0, 0, 0], [1, 1, 1]], dtype=bool), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 0, 1], [0, 1, 0]], dtype=bool) + v = mb.greater(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + +class TestGreaterEqual: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.greater_equal(x=x, y=y), mb.greater_equal(x=x, y=3.5) + + expected_output_types = [ + (2, 3, types.bool), + (2, 3, types.bool), + ] + expected_outputs = [ + np.array([[1, 1, 1], [1, 1, 1]], dtype=bool), + np.array([[0, 0, 0], [1, 1, 1]], dtype=bool), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 1, 1], [1, 1, 1]], dtype=bool) + v = mb.greater_equal(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + +class TestLess: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.less(x=x, y=y) + + expected_output_types = (2, 3, types.bool) + expected_outputs = np.array([[0, 0, 0], [0, 0, 0]], dtype=bool) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke2(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x.shape)} + input_values = {"x": x} + + def build(x): + # y is const + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + return mb.less(x=x, y=y) + + expected_output_types = (2, 3, types.bool) + expected_outputs = np.array([[0, 0, 0], [0, 0, 0]], dtype=bool) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_broadcast(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x.shape)} + input_values = {"x": x} + + def build(x): + # y is const + return mb.less(x=x, y=3.5) + + expected_output_types = (2, 3, types.bool) + expected_outputs = np.array([[1, 1, 1], [0, 0, 0]], dtype=bool) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[0, 0, 0], [0, 0, 0]], dtype=bool) + v = mb.less(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + +class TestLessEqual: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.less_equal(x=x, y=y) + + expected_output_types = (2, 3, types.bool) + expected_outputs = np.array([[0, 1, 0], [1, 0, 1]], dtype=bool) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[0, 1, 0], [1, 0, 1]], dtype=bool) + v = mb.less_equal(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + +class TestNotEqual: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.not_equal(x=x, y=y) + + expected_output_types = (2, 3, types.bool) + expected_outputs = np.array([[1, 0, 1], [0, 1, 0]], dtype=bool) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 0, 1], [0, 1, 0]], dtype=bool) + v = mb.not_equal(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_unary.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_unary.py new file mode 100644 index 00000000..f1b0640a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_unary.py @@ -0,0 +1,688 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest +import scipy + +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import (Function, get_new_symbol, + types) +from coremltools.converters.mil.mil.types.symbolic import \ + is_compatible_symbolic_vector +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import run_compare_builder + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + + +class TestElementwiseUnary: + # All ops in this test share the same backends + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + [ + "abs", + "acos", + "asin", + "atan", + "atanh", + "cast", + "clip", + "cos", + "cosh", + "erf", + "exp", + "exp2", + "floor", + "inverse", + "log", + "round", + "rsqrt", + "sign", + "sin", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + "threshold", + ], + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, mode): + if mode == "abs": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + + build = lambda x: mb.abs(x=x) + elif mode == "acos": + val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + expected_outputs = np.array( + [ + [3.14159265, 2.0943951, 1.57079633], + [1.15927948, 1.04719755, 0.64350111], + ], + dtype=np.float32, + ) + + build = lambda x: mb.acos(x=x) + elif mode == "asin": + val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + expected_outputs = np.array( + [[-1.57079633, -0.52359878, 0.0], [0.41151685, 0.52359878, 0.92729522]], + dtype=np.float32, + ) + + build = lambda x: mb.asin(x=x) + elif mode == "atan": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [-0.78539816, 1.10714872, -1.24904577], + [1.32581766, -1.37340077, 1.40564765], + ], + dtype=np.float32, + ) + build = lambda x: mb.atan(x=x) + elif mode == "atanh": + val = np.array([[-0.8, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + expected_outputs = np.array( + [[-1.09861229, -0.54930614, 0.0], [0.42364893, 0.54930614, 1.09861229]], + dtype=np.float32, + ) + + build = lambda x: mb.atanh(x=x) + elif mode == "cast": + val = np.array([[-1.2, 2, -3.6], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.int32) + build = lambda x: mb.cast(x=x, dtype="int32") + elif mode == "ceil": + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32) + + build = lambda x: mb.ceil(x=x) + elif mode == "clip": + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[0, 2, 0], [4.5, 0, 5]], dtype=np.float32) + + build = lambda x: mb.clip(x=x, alpha=0.0, beta=5.0) + elif mode == "cos": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [0.54030231, -0.41614684, -0.9899925], + [-0.65364362, 0.28366219, 0.96017029], + ], + dtype=np.float32, + ) + + build = lambda x: mb.cos(x=x) + elif mode == "cosh": + val = np.array([[-1, -2, -3], [1, 2, 3]], dtype=np.float32) + expected_outputs = np.array( + [ + [1.54308063, 3.76219569, 10.067662], + [1.54308063, 3.76219569, 10.067662], + ], + dtype=np.float32, + ) + + build = lambda x: mb.cosh(x=x) + elif mode == "erf": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [-0.8427007929497148, 0.9953222650189527, -0.9999779095030014], + [0.9999999845827421, -0.9999999999984626, 1.0], + ], + dtype=np.float32, + ) + + build = lambda x: mb.erf(x=x) + elif mode == "exp": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [0.36787944, 7.3890561, 0.04978707], + [54.5981500, 0.0067379, 403.428793], + ], + dtype=np.float32, + ) + + build = lambda x: mb.exp(x=x) + elif mode == "exp2": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[0.5, 4.0, 0.125], [16, 0.03125, 64]], dtype=np.float32 + ) + + build = lambda x: mb.exp2(x=x) + elif mode == "floor": + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[-2, 2, -4], [4, -5, 6]], dtype=np.float32) + + build = lambda x: mb.floor(x=x) + elif mode == "inverse": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[-1.0, 0.5, -0.33333334], [0.25, -0.2, 0.16666667]], dtype=np.float32 + ) + build = lambda x: mb.inverse(x=x) + elif mode == "log": + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[0.0, 0.69314718, 1.09861229], [1.38629436, 1.60943791, 1.79175947]], + dtype=np.float32, + ) + + build = lambda x: mb.log(x=x) + elif mode == "round": + val = np.array([[-1.2, 2, -3.4], [4.6, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32) + + build = lambda x: mb.round(x=x) + elif mode == "rsqrt": + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[1.0, 0.70710678, 0.57735027], [0.5, 0.4472136, 0.40824829]], + dtype=np.float32, + ) + + build = lambda x: mb.rsqrt(x=x) + elif mode == "sign": + val = np.array([[-1, 2, 0], [0, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[-1, 1, 0], [0, -1, 1]], dtype=np.float32) + + build = lambda x: mb.sign(x=x) + elif mode == "sin": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [-0.84147098, 0.90929743, -0.14112001], + [-0.7568025, 0.95892427, -0.2794155], + ], + dtype=np.float32, + ) + + build = lambda x: mb.sin(x=x) + elif mode == "sinh": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[-1.1752, 3.62686, -10.017874], [27.289917, -74.20321, 201.71315]], + dtype=np.float32, + ) + + build = lambda x: mb.sinh(x=x) + elif mode == "sqrt": + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[1.0, 1.41421356, 1.73205081], [2.0, 2.23606798, 2.44948974]], + dtype=np.float32, + ) + + build = lambda x: mb.sqrt(x=x) + elif mode == "square": + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[1.0, 4.0, 9.0], [16.0, 25.0, 36.]], + dtype=np.float32, + ) + + build = lambda x: mb.square(x=x) + elif mode == "tan": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[-1.5574, -2.185, 0.1425], [1.15782, 3.3805, -0.291]], dtype=np.float32 + ) + + build = lambda x: mb.tan(x=x) + elif mode == "tanh": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [-0.7615942, 0.9640276, -0.9950548], + [0.9993293, -0.9999092, 0.9999877], + ], + dtype=np.float32, + ) + + build = lambda x: mb.tanh(x=x) + elif mode == "threshold": + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array( + [[1.0, 2, 1.0], [4.5, 1.0, 6.7]], dtype=np.float32 + ) + + build = lambda x: mb.threshold(x=x, alpha=1.0) + + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + expected_output_types = ( + (2, 3, types.int32) if mode == "cast" else (2, 3, types.fp32) + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_abs_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.abs(x=val) + expected_outputs = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_acos_eval(self): + val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + v = mb.acos(x=val) + expected_outputs = np.array( + [[3.14159265, 2.0943951, 1.57079633], [1.15927948, 1.04719755, 0.64350111]], + dtype=np.float32, + ) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_asin_eval(self): + val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + v = mb.asin(x=val) + expected_outputs = np.array( + [[-1.57079633, -0.52359878, 0.0], [0.41151685, 0.52359878, 0.92729522]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_atan_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.atan(x=val) + expected_outputs = np.array( + [ + [-0.78539816, 1.10714872, -1.24904577], + [1.32581766, -1.37340077, 1.40564765], + ], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_atanh_eval(self): + val = np.array([[-0.8, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + v = mb.atanh(x=val) + expected_outputs = np.array( + [[-1.09861229, -0.54930614, 0.0], [0.42364893, 0.54930614, 1.09861229]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_cast_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.int32) + + v = mb.cast(x=val, dtype="int32") + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_ceil_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + v = mb.ceil(x=val) + expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_clip_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + v = mb.clip(x=val, alpha=0.0, beta=5.0) + expected_outputs = np.array([[0, 2, 0], [4.5, 0, 5]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_cos_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.cos(x=val) + expected_outputs = np.array( + [ + [0.54030231, -0.41614684, -0.9899925], + [-0.65364362, 0.28366219, 0.96017029], + ], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_cosh_eval(self): + val = np.array([[-1, -2, -3], [1, 2, 3]], dtype=np.float32) + v = mb.cosh(x=val) + expected_outputs = np.array( + [[1.54308063, 3.76219569, 10.067662], [1.54308063, 3.76219569, 10.067662]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_erf_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.erf(x=x_val) + np.testing.assert_allclose(scipy.special.erf(x_val), v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_exp_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.exp(x=val) + expected_outputs = np.array( + [[0.36787944, 7.3890561, 0.04978707], [54.5981500, 0.0067379, 403.428793]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_exp2_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.exp2(x=val) + expected_outputs = np.array( + [[0.5, 4.0, 0.125], [16, 0.03125, 64]], dtype=np.float32 + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_floor_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + v = mb.floor(x=val) + expected_outputs = np.array([[-2, 2, -4], [4, -5, 6]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_inverse_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.inverse(x=val) + expected_outputs = np.array( + [[-1.0, 0.5, -0.33333334], [0.25, -0.2, 0.16666667]], dtype=np.float32 + ) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_log_eval(self): + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.log(x=val) + expected_outputs = np.array( + [[0.0, 0.69314718, 1.09861229], [1.38629436, 1.60943791, 1.79175947]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_round_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.6, -5, 6.7]], dtype=np.float32) + v = mb.round(x=val) + expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_rsqrt_eval(self): + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.rsqrt(x=val) + expected_outputs = np.array( + [[1.0, 0.70710678, 0.57735027], [0.5, 0.4472136, 0.40824829]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_sign_eval(self): + val = np.array([[-1, 2, 0], [0, -5, 6]], dtype=np.float32) + v = mb.sign(x=val) + expected_outputs = np.array([[-1, 1, 0], [0, -1, 1]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_sin_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.sin(x=val) + expected_outputs = np.array( + [ + [-0.84147098, 0.90929743, -0.14112001], + [-0.7568025, 0.95892427, -0.2794155], + ], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_sinh_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.sinh(x=val) + expected_outputs = np.array( + [[-1.1752, 3.62686, -10.017874], [27.289917, -74.20321, 201.71315]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_sqrt_eval(self): + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.sqrt(x=val) + expected_outputs = np.array( + [[1.0, 1.41421356, 1.73205081], [2.0, 2.23606798, 2.44948974]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_tan_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.tan(x=val) + expected_outputs = np.array( + [[-1.5574, -2.185, 0.1425], [1.15782, 3.3805, -0.291]], dtype=np.float32 + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_tanh_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.tanh(x=x_val) + np.testing.assert_allclose(np.tanh(x_val), v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_threshold_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + v = mb.threshold(x=val, alpha=1.0) + expected_outputs = np.array([[1.0, 2, 1.0], [4.5, 1.0, 6.7]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + def test_cast_with_symbolic_value(self): + input_shape = [get_new_symbol(), 1] + input_placeholders = { + "x": mb.placeholder(shape=input_shape), + } + + def build(x): + shape = mb.shape(x=x) + return mb.cast(x=shape, dtype="int32") + + with Function(input_placeholders) as ssa_func: + output_vars = build(**ssa_func.inputs) + assert is_compatible_symbolic_vector(output_vars.sym_val, [get_new_symbol(), 1]) + + @pytest.mark.parametrize( + "compute_unit, backend, epsilon", + itertools.product( + compute_units, + backends, + [1e-3, 1e-1, 1.0], + ), + ) + def test_builder_to_backend_stress_inverse( + self, compute_unit, backend, epsilon + ): + x = np.array([[1, -2, 3], [4, -5, 6]], dtype=np.float32) + numpy_pred = 1 / (x + epsilon) + + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.inverse(x=x, epsilon=epsilon) + + expected_output_type = x.shape + (types.fp32,) + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + numpy_pred, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, epsilon", + itertools.product( + compute_units, + backends, + [1e-3, 1e-1, 1.0], + ), + ) + def test_builder_to_backend_stress_rsqrt( + self, compute_unit, backend, epsilon + ): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + numpy_pred = 1.0 / np.sqrt(x + epsilon) + + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.rsqrt(x=x, epsilon=epsilon) + + expected_output_type = x.shape + (types.fp32,) + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + numpy_pred, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, epsilon", + itertools.product( + compute_units, + backends, + [1e-3, 1e-1, 1.0], + ), + ) + def test_builder_to_backend_stress_log( + self, compute_unit, backend, epsilon + ): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + numpy_pred = np.log(x + epsilon) + + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.log(x=x, epsilon=epsilon) + + expected_output_type = x.shape + (types.fp32,) + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + numpy_pred, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, src_dst", + itertools.product( + compute_units, + backends, + [("fp16", "fp32"), ("fp32", "fp16")], + ), + ) + def test_builder_to_backend_stress_cast( + self, compute_unit, backend, src_dst + ): + src_dtype, dst_dtype = src_dst + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + numpy_pred = x.astype(dtype=np.float16) + + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build(x): + x = mb.cast(x=x, dtype=src_dtype) + x = mb.square(x=x) + x = mb.cast(x=x, dtype=dst_dtype) + x = mb.sqrt(x=x) + x = mb.cast(x=x, dtype="fp32") + return x + + expected_output_type = x.shape + (types.fp32,) + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + numpy_pred, + compute_unit=compute_unit, + backend=backend, + ) + + def test_erf_value_inference(self): + INPUT_SIZE=(2, 3, 4) + rs = np.random.RandomState(1234) + x = rs.random(INPUT_SIZE) + + @mb.program(input_specs=[]) + def prog(): + return mb.erf(x=x) + + ops = list(prog.functions.values())[0].operations + assert len(ops) == 2 + assert ops[0].op_type == 'const' + erf_op = ops[1] + assert erf_op.op_type == 'erf' + np.testing.assert_allclose(erf_op.value_inference(), scipy.special.erf(x), atol=1e-04, rtol=1e-05) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_image_resizing.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_image_resizing.py new file mode 100644 index 00000000..ab0e542e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_image_resizing.py @@ -0,0 +1,934 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import functools +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen +from coremltools.models.utils import _macos_version + +from .testing_utils import run_compare_builder + +if _HAS_TORCH: + import torch + + +class TestAffine: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + x_val = np.array([11.0, 22.0, 33.0, 44.0], dtype=np.float32).reshape( + [1, 1, 2, 2] + ) + transform_matrix_val = np.array( + [-1.0, -2.0, -3.7, -1.0, 3.5, 1.2], dtype=np.float32 + ).reshape([1, 6]) + + input_placeholder_dict = { + "x": mb.placeholder(shape=x_val.shape), + "transform_matrix": mb.placeholder(shape=transform_matrix_val.shape), + } + input_value_dict = {"x": x_val, "transform_matrix": transform_matrix_val} + + def build(x, transform_matrix): + return [ + mb.affine( + x=x, + transform_matrix=transform_matrix, + output_height=3, + output_width=3, + sampling_mode="bilinear", + padding_mode="constant", + padding_value=0.0, + coordinates_mode="normalized_minus_one_to_one", + align_corners=True, + ), + mb.affine( + x=x, + transform_matrix=transform_matrix, + output_height=2, + output_width=5, + sampling_mode="bilinear", + padding_mode="constant", + padding_value=0.0, + coordinates_mode="normalized_minus_one_to_one", + align_corners=True, + ), + ] + + expected_output_types = [ + (1, 1, 3, 3, types.fp32), + (1, 1, 2, 5, types.fp32), + ] + expected_outputs = [ + np.array( + [10.752501, 2.5025, 0.0, 1.9799997, 0.0, 0.0, 0.0, 0.0, 0.0], + dtype=np.float32, + ).reshape([1, 1, 3, 3]), + np.array( + [10.752501, 5.94, 2.5025, 0.44000006, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + dtype=np.float32, + ).reshape([1, 1, 2, 5]), + ] + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestResample: + @pytest.mark.parametrize( + "compute_unit, backend, minimum_deployment_target", + itertools.product( + compute_units, + backends, + [ct.target.iOS15, ct.target.iOS16], + ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, minimum_deployment_target): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + if minimum_deployment_target == ct.target.iOS16 and _macos_version() < (13, 0): + pytest.skip("New functionality in macOS13/iOS16") + + x_ = np.array([11.0, 22.0, 33.0, 44.0], dtype=np.float32).reshape([1, 1, 2, 2]) + coordinates_ = np.array( + [-1.0, -2.0, -3.7, -1.0, 0.0, 0.0, 3.5, 1.2], dtype=np.float32 + ).reshape([1, 2, 2, 2]) + + input_placeholder_dict = { + "x": mb.placeholder(shape=x_.shape), + "coordinates": mb.placeholder(shape=coordinates_.shape), + } + input_value_dict = {"x": x_, "coordinates": coordinates_} + expected_output_type = (1, 1, 2, 2, types.fp32) + + def build_0(x, coordinates): + return mb.resample( + x=x, + coordinates=coordinates, + sampling_mode="bilinear", + padding_mode="constant", + padding_value=6.17, + coordinates_mode="normalized_minus_one_to_one", + align_corners=True, + ) + + expected_output_0 = np.array( + [8.585, 6.17, 27.5, 6.17], dtype=np.float32 + ).reshape(expected_output_type[:-1]) + + def build_1(x, coordinates): + return mb.resample( + x=x, + coordinates=coordinates, + sampling_mode="nearest", + padding_mode="border", + padding_value=-1.0, + coordinates_mode="unnormalized", + align_corners=False, + ) + + expected_output_1 = np.array( + [11.0, 11.0, 11.0, 44.0], dtype=np.float32 + ).reshape(expected_output_type[:-1]) + + def build_2(x, coordinates): + return mb.resample( + x=x, + coordinates=coordinates, + sampling_mode="bilinear", + padding_mode="reflection", + padding_value=-1.0, + coordinates_mode="normalized_zero_to_one", + align_corners=True, + ) + + expected_output_2 = np.array( + [22.0, 36.3, 11.0, 34.1], dtype=np.float32 + ).reshape(expected_output_type[:-1]) + + def build_3(x, coordinates): + return mb.resample( + x=x, + coordinates=coordinates, + sampling_mode="nearest", + padding_mode="symmetric", + padding_value=-1.0, + coordinates_mode="normalized_zero_to_one", + align_corners=False, + ) + + expected_output_3 = np.array( + [22.0, 33.0, 11.0, 33.0], dtype=np.float32 + ).reshape(expected_output_type[:-1]) + + for build, expected_output in zip( + [build_0, build_1, build_2, build_3], + [ + expected_output_0, + expected_output_1, + expected_output_2, + expected_output_3, + ], + ): + mlmodel = run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + prog = mlmodel._mil_program + number_of_cast = len(prog["main"].find_ops(op_type="cast")) + # for the new iOS16 resample op, the coordinates is cast to fp16 + if minimum_deployment_target == ct.target.iOS15: + assert number_of_cast == 2 + elif minimum_deployment_target == ct.target.iOS16: + assert number_of_cast == 3 + else: + raise ValueError("Unrecognized target {}".format(minimum_deployment_target)) + + +class TestResizeNearestNeighbor: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([0.37, 6.17], dtype=np.float32).reshape([1, 1, 2, 1]) + input_placeholder_dict = {"x": mb.placeholder(shape=x_val.shape)} + input_value_dict = {"x": x_val} + + def build_model(x): + return [ + mb.resize_nearest_neighbor( + x=x, target_size_height=2, target_size_width=1, + ), + mb.resize_nearest_neighbor( + x=x, target_size_height=2, target_size_width=3, + ), + ] + + expected_output_types = [ + (1, 1, 2, 1, types.fp32), + (1, 1, 2, 3, types.fp32), + ] + expected_outputs = [ + x_val, + np.array([0.37, 0.37, 0.37, 6.17, 6.17, 6.17], dtype=np.float32).reshape( + [1, 1, 2, 3] + ), + ] + + run_compare_builder( + build_model, + input_placeholder_dict, + input_value_dict, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestUpsampleNearestNeighborFractionalScales: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://97398448 (TestUpsampleNearestNeighborFractionalScales failing on GPU)") + + x_val = np.array([1.5, -2.5, 3.5], dtype=np.float32).reshape([1, 1, 1, 3]) + input_placeholder_dict = {"x": mb.placeholder(shape=x_val.shape)} + input_value_dict = {"x": x_val} + + def build(x): + return [ + mb.upsample_nearest_neighbor( + x=x, scale_factor_height=1.0, scale_factor_width=1.0, + ), + mb.upsample_nearest_neighbor( + x=x, scale_factor_height=3.17, scale_factor_width=0.67 + ), + mb.upsample_nearest_neighbor( + x=x, scale_factor_height=2.0, scale_factor_width=1.12, + ), + ] + + expected_output_types = [ + (1, 1, 1, 3, types.fp32), + (1, 1, 3, 2, types.fp32), + (1, 1, 2, 3, types.fp32), + ] + expected_outputs = [ + x_val, + np.array([1.5, -2.5, 1.5, -2.5, 1.5, -2.5], dtype=np.float32).reshape( + [1, 1, 3, 2] + ), + np.array([1.5, -2.5, 3.5, 1.5, -2.5, 3.5], dtype=np.float32).reshape( + [1, 1, 2, 3] + ), + ] + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestResizeBilinear: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + if backend[0] == "mlprogram": + pytest.xfail("Seg fault: rdar://78343191 ((MIL GPU) Core ML Tools Unit Test failures [failure to load or Seg fault])") + + if backend[0] == "neuralnetwork" and compute_unit == ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://85318710 (Coremltools Smoke test on ResizeBilinear failing on NNv1 backend.)") + + x = np.array([0, 1], dtype=np.float32).reshape(1, 1, 2) + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build_mode_0(x): + return mb.resize_bilinear( + x=x, + target_size_height=1, + target_size_width=5, + sampling_mode="STRICT_ALIGN_CORNERS", + ) + + expected_output_type = (1, 1, 5, types.fp32) + expected_output = np.array([0, 0.25, 0.5, 0.75, 1], dtype=np.float32).reshape( + 1, 1, 5 + ) + + run_compare_builder( + build_mode_0, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + def build_mode_2(x): + return mb.resize_bilinear( + x=x, target_size_height=1, target_size_width=5, sampling_mode="DEFAULT" + ) + + expected_output = np.array([0, 0.4, 0.8, 1, 1], dtype=np.float32).reshape( + 1, 1, 5 + ) + + run_compare_builder( + build_mode_2, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + def build_mode_3(x): + return mb.resize_bilinear( + x=x, + target_size_height=1, + target_size_width=5, + sampling_mode="OFFSET_CORNERS", + ) + + expected_output = np.array([0.1, 0.3, 0.5, 0.7, 0.9], dtype=np.float32).reshape( + 1, 1, 5 + ) + + run_compare_builder( + build_mode_3, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + if backend[0] != "neuralnetwork": + def build_mode_4(x): + return mb.resize_bilinear( + x=x, + target_size_height=1, + target_size_width=5, + sampling_mode="UNALIGN_CORNERS", + ) + + expected_output = np.array([0.0, 0.1, 0.5, 0.9, 1.0], dtype=np.float32).reshape( + 1, 1, 5 + ) + + run_compare_builder( + build_mode_4, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestUpsampleBilinear: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([0, 1], dtype=np.float32).reshape(1, 1, 2) + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build_upsample_integer(x): + return mb.upsample_bilinear( + x=x, scale_factor_height=1, scale_factor_width=3 + ) + + expected_output_type = (1, 1, 6, types.fp32) + expected_output = np.array( + [0, 0.2, 0.4, 0.6, 0.8, 1], dtype=np.float32 + ).reshape(1, 1, 6) + + run_compare_builder( + build_upsample_integer, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + def build_upsample_fractional(x): + return mb.upsample_bilinear( + x=x, scale_factor_height=1.0, scale_factor_width=2.6, align_corners=False + ) + + expected_output_type = (1, 1, 5, types.fp32) + expected_output = np.array([0, 0.1, 0.5, 0.9, 1], dtype=np.float32).reshape( + 1, 1, 5 + ) + + run_compare_builder( + build_upsample_fractional, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, align_corners, half_pixel_centers", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + ) + ) + def test_builder_to_backend_smoke_iOS16(self, compute_unit, backend, align_corners, half_pixel_centers): + if backend[0] == "neuralnetwork" or ct.utils._macos_version() < (13, 0): + pytest.skip("The new half_pixel_centers argument only available in iOS16") + + if align_corners and half_pixel_centers: + pytest.skip("Invalid configuration of align_corners and half_pixel_centers") + + x = np.array([1, 2], dtype=np.float32).reshape(1, 1, 1, 2) + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build_upsample_bilinear(x): + return mb.upsample_bilinear( + x=x, + scale_factor_height=2, + scale_factor_width=3, + align_corners=align_corners, + half_pixel_centers=half_pixel_centers, + ) + + expected_output_type = (1, 1, 2, 6, types.fp32) + + if align_corners and not half_pixel_centers: + expected_output = [1., 1.2, 1.4, 1.6, 1.8, 2., 1., 1.2, 1.4, 1.6, 1.8, 2.] + elif not align_corners and half_pixel_centers: + expected_output = [1., 1., 1.33334, 1.66667, 2., 2., 1., 1., 1.33334, 1.66667, 2., 2.] + elif not align_corners and not half_pixel_centers: + expected_output = [1., 1.33334, 1.66667, 2., 2., 2., 1., 1.33334, 1.66667, 2., 2., 2.] + else: + raise ValueError("align_corners and half_pixel_centers cannot be both True") + + expected_output = [np.array(expected_output, dtype=np.float32).reshape(1, 1, 2, 6)] + + run_compare_builder( + build_upsample_bilinear, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, scale_factor, align_corners, recompute_scale_factor", + itertools.product( + compute_units, + backends, + [(2, 5, 10, 22)], + [(3, 4), (2.5, 2.0), (0.5, 0.75)], + [True, False], + [True, False], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, input_shape, scale_factor, align_corners, recompute_scale_factor + ): + scale_factor_height, scale_factor_width = scale_factor + _, _, height, width = input_shape + height = height * scale_factor_height + width = width * scale_factor_width + is_h_float = height - np.floor(height) > 0.001 + is_w_float = width - np.floor(width) > 0.001 + + # Currently, MIL is not suporting recompute_scale_factor=False + align_corners=False + # with fractional output size + if not recompute_scale_factor and not align_corners and (is_h_float or is_w_float): + pytest.xfail("rdar://81124053 (Support recompute_scale_factor)") + + def _get_torch_upsample_prediction(x, scale_factor=(2, 2), align_corners=False, recompute_scale_factor=True): + x = torch.from_numpy(x) + out = torch.nn.functional.interpolate( + x, + scale_factor=scale_factor, + mode="bilinear", + align_corners=align_corners, + recompute_scale_factor=recompute_scale_factor, + ) + return out.numpy() + + x = random_gen(input_shape, rand_min=-100, rand_max=100) + torch_pred = _get_torch_upsample_prediction( + x, + scale_factor=scale_factor, + align_corners=align_corners, + recompute_scale_factor=recompute_scale_factor, + ) + + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build_upsample(x): + return mb.upsample_bilinear( + x=x, + scale_factor_height=scale_factor[0], + scale_factor_width=scale_factor[1], + align_corners=align_corners, + ) + + expected_output_type = torch_pred.shape + (types.fp32,) + run_compare_builder( + build_upsample, + input_placeholder_dict, + input_value_dict, + expected_output_type, + torch_pred, + compute_unit=compute_unit, + backend=backend, + rtol=0.5, + ) + + +class TestUpsampleNearestNeighbor: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([1.5, 2.5, 3.5], dtype=np.float32).reshape([1, 1, 1, 3]) + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.upsample_nearest_neighbor( + x=x, scale_factor_height=1, scale_factor_width=2 + ) + + expected_output_type = (1, 1, 1, 6, types.fp32) + expected_output = np.array( + [1.5, 1.5, 2.5, 2.5, 3.5, 3.5], dtype=np.float32 + ).reshape([1, 1, 1, 6]) + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCrop: + @pytest.mark.parametrize( + "compute_unit, backend, is_symbolic", + itertools.product(compute_units, backends, compute_units), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, is_symbolic): + x = np.array( + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], + dtype=np.float32, + ).reshape(1, 1, 4, 4) + + input_shape = list(x.shape) + placeholder_input_shape = input_shape + if is_symbolic: + # set batch and channel dimension symbolic + placeholder_input_shape[0] = get_new_symbol() + placeholder_input_shape[1] = get_new_symbol() + + input_placeholder_dict = {"x": mb.placeholder(shape=placeholder_input_shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.crop(x=x, crop_height=[0, 1], crop_width=[1, 1]) + + expected_output_type = ( + placeholder_input_shape[0], + placeholder_input_shape[1], + 3, + 2, + types.fp32, + ) + expected_output = np.array([2, 3, 6, 7, 10, 11], dtype=np.float32).reshape(1, 1, 3, 2) + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, C, H, W", + itertools.product( + compute_units, + backends, + [x for x in range(2, 4)], + [x for x in range(5, 8)], + [x for x in range(8, 10)], + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, C, H, W): + input_shape = (1, C, H, W) + x = np.random.random(input_shape) + + crop_h = [np.random.randint(H)] + crop_h.append(np.random.randint(H - crop_h[0])) + crop_w = [np.random.randint(W)] + crop_w.append(np.random.randint(W - crop_w[0])) + + input_placeholder_dict = {"x": mb.placeholder(shape=input_shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.crop(x=x, crop_height=crop_h, crop_width=crop_w) + + expected_output_type = ( + 1, + C, + H - crop_h[0] - crop_h[1], + W - crop_w[0] - crop_w[1], + types.fp32, + ) + expected_output = x[:, :, crop_h[0] : H - crop_h[1], crop_w[0] : W - crop_w[1]] + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCropResize: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_builder_to_backend_smoke_pad_value(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("pad_mode only supported on iOS16 or above") + + if ct.utils._macos_version() < (13, 0): + pytest.skip("pad_value not supported in macOS12 or older.") + + x = np.array( + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], + dtype=np.float32, + ).reshape(1, 1, 4, 4) + + roi = np.array([ + [0, 0.1, 0.3, 1.3, 1], + [0, 0.5, 1.8, 1., 0.3], + [0, 0.0, 0.4, 0.6, 0.7], + ], dtype=np.float32).reshape(3, 1, 5, 1, 1) + + def build(x): + return mb.crop_resize( + x=x, + roi=roi, + target_width=2, + target_height=2, + normalized_coordinates=True, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + pad_value=10.0, + ) + + expected_output_type = [ + (3, 1, 1, 2, 2, types.fp32), + ] + expected_output = [ + np.array([ 3.1, 5.2, 10, 10, 10, 7.899, 10, 13.9, 2.2, 3.1, 9.4, 10.3], dtype=np.float32).reshape(3, 1, 1, 2, 2), + ] + + input_placeholder_dict = {"x": mb.placeholder(shape=(1, 1, 4, 4))} + input_value_dict = {"x": x} + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + + @pytest.mark.parametrize( + "compute_unit, backend, is_symbolic", + itertools.product(compute_units, backends, compute_units), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, is_symbolic): + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://97398582 (TestCropResize failing on mlprogram + GPU)") + x = np.array( + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], + dtype=np.float32, + ).reshape(1, 1, 4, 4) + + input_shape = list(x.shape) + placeholder_input_shape = input_shape + if is_symbolic: + # set batch and channel dimension symbolic + placeholder_input_shape[0] = get_new_symbol() + placeholder_input_shape[1] = get_new_symbol() + + input_placeholder_dict = {"x": mb.placeholder(shape=placeholder_input_shape)} + input_value_dict = {"x": x} + N = 1 + roi = np.array([[1, 1, 2, 2]], dtype=np.float32).reshape(1, 1, 4, 1, 1) + roi_normalized = np.array( + [[0, 0.0, 0.0, 1.0 / 3, 1.0 / 3]], dtype=np.float32 + ).reshape(1, 1, 5, 1, 1) + roi_invert = np.array([[2, 2, 1, 1]], dtype=np.float32).reshape(1, 1, 4, 1, 1) + + def build(x, mode=0): + if mode == 0: + return mb.crop_resize( + x=x, + roi=roi, + target_width=2, + target_height=2, + normalized_coordinates=False, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + + elif mode == 1: + return mb.crop_resize( + x=x, + roi=roi, + target_width=4, + target_height=4, + normalized_coordinates=False, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + + elif mode == 2: + return mb.crop_resize( + x=x, + roi=roi, + target_width=1, + target_height=1, + normalized_coordinates=False, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + + elif mode == 3: + return mb.crop_resize( + x=x, + roi=roi_normalized, + target_width=2, + target_height=2, + normalized_coordinates=True, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + + elif mode == 4: + return mb.crop_resize( + x=x, + roi=roi_invert, + target_width=2, + target_height=2, + normalized_coordinates=False, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + + elif mode == 5: + return mb.crop_resize( + x=x, + roi=roi_invert, + target_width=2, + target_height=2, + normalized_coordinates=True, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="UNALIGN_CORNERS", + ) + + expected_output_type = [ + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 2, + 2, + types.fp32, + ), + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 4, + 4, + types.fp32, + ), + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 1, + 1, + types.fp32, + ), + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 2, + 2, + types.fp32, + ), + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 2, + 2, + types.fp32, + ), + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 2, + 2, + types.fp32, + ), + ] + expected_output = [ + np.array([6, 7, 10, 11], dtype=np.float32).reshape(1, 1, 1, 2, 2), + np.array( + [ + [6, 6.333333, 6.66666, 7], + [7.333333, 7.666666, 8, 8.333333], + [8.666666, 9, 9.3333333, 9.666666], + [10, 10.333333, 10.666666, 11], + ], + dtype=np.float32, + ).reshape(1, 1, 1, 4, 4), + np.array([8.5], dtype=np.float32).reshape(1, 1, 1, 1, 1), + np.array([1, 2, 5, 6], dtype=np.float32).reshape(1, 1, 1, 2, 2), + np.array([11, 10, 7, 6], dtype=np.float32).reshape(1, 1, 1, 2, 2), + np.array([3.5, 5.5, 11.5, 13.5], dtype=np.float32).reshape(1, 1, 1, 2, 2), + ] + + for mode in range(6): + # nn-proto does not support UNALIGN_CORNERS + if not (backend[0] == 'neuralnetwork' and mode == 5): + run_compare_builder( + functools.partial(build, mode=mode), + input_placeholder_dict, + input_value_dict, + expected_output_type[mode], + expected_output[mode], + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_linear.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_linear.py new file mode 100644 index 00000000..31763287 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_linear.py @@ -0,0 +1,333 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import itertools +import platform + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen, ssa_fn + +from .testing_utils import run_compare_builder + + +class TestLinear: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[-4.7182, 11.94], [-3.3939, 9.2166]], dtype=np.float32) + weight_val = np.array([[1.2313, -0.095], [-1.4075, -0.8816]], dtype=np.float32) + bias_val = np.array([1.0, 2.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.linear(x=x, weight=weight_val, bias=bias_val)] + + expected_output_types = [(2, 2, types.fp32)] + expected_outputs = [ + np.array( + [[-5.9438195, -1.8854373], [-4.054486, -1.3484411]], dtype=np.float32 + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = random_gen(shape=(2, 2), rand_min=-37, rand_max=64) + weight_val = random_gen(shape=(2, 2), rand_min=-91, rand_max=84) + bias_val = random_gen(shape=(2,), rand_min=0.0, rand_max=9.0) + v = mb.linear(x=x_val, weight=weight_val, bias=bias_val) + np.testing.assert_allclose(np.matmul(x_val, weight_val.T) + bias_val, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, [2, 3, 5]), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, rank): + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://97398733 (TestLinear failing on mlprogram + GPU)") + + if backend[0] == "neuralnetwork" and compute_unit != ct.ComputeUnit.CPU_ONLY and platform.machine() == "arm64" and rank == 5: + pytest.xfail("rdar://98015195 ([M1 native tests] Some MIL unittests are failing on M1 native)") + + x_shape = np.random.randint(low=1, high=3, size=(rank,)) + x_val = np.random.rand(*x_shape) + out_channels = 3 + w_shape = np.array([out_channels, x_shape[-1]]) + weight_val = np.random.rand(*w_shape).astype(np.float32) + bias_val = np.random.rand(out_channels).astype(np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + } + input_values = {"x": x_val} + + def build(x): + return [mb.linear(x=x, weight=weight_val, bias=bias_val)] + + expected_outputs = [np.matmul(x_val, np.transpose(weight_val)) + bias_val] + + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestMatMul: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[-4.0, 13.0], [-3.0, 9.0]], dtype=np.float32) + y_val = np.array([[1.0, -7.0], [-1.0, -8.0]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "y": mb.placeholder(shape=y_val.shape), + } + input_values = {"x": x_val, "y": y_val} + + def build(x, y): + return [ + mb.matmul(x=x_val, y=y), + mb.matmul(x=x, y=y_val), + mb.matmul(x=x, y=y), + mb.matmul(x=x, y=y, transpose_x=True, transpose_y=True), + mb.matmul(x=x_val, y=y, transpose_x=True, transpose_y=True), + mb.matmul(x=x, y=y_val, transpose_x=True, transpose_y=True), + mb.matmul(x=x, y=y_val, transpose_x=True, transpose_y=False), + mb.matmul(x=x, y=y_val, transpose_x=False, transpose_y=True), + ] + + expected_output_types = [ + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + ] + expected_outputs = [ + np.array([[-17.0, -76.0], [-12.0, -51.0]], dtype=np.float32), + np.array([[-17.0, -76.0], [-12.0, -51.0]], dtype=np.float32), + np.array([[-17.0, -76.0], [-12.0, -51.0]], dtype=np.float32), + np.array([[17.0, 28.0], [-50.0, -85.0]], dtype=np.float32), + np.array([[17.0, 28.0], [-50.0, -85.0]], dtype=np.float32), + np.array([[17.0, 28.0], [-50.0, -85.0]], dtype=np.float32), + np.array([[-1.0, 52.0], [4.0, -163.0]], dtype=np.float32), + np.array([[-95.0, -100.0], [-66.0, -69.0]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = random_gen(shape=(2, 2, 4), rand_min=-37, rand_max=64) + y_val = random_gen(shape=(2, 4, 2), rand_min=-91, rand_max=84) + v = mb.matmul(x=x_val, y=y_val) + np.testing.assert_allclose(np.matmul(x_val, y_val), v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + ((3, 2, 3, 4), (3, 2, 4, 5)), + ((1, 1, 1, 3, 4), (1, 3, 2, 4, 5)), + ((1, 3, 1, 2, 3), (1, 4, 3, 2)), + ((1, 3, 4), (3, 2, 4, 6)), + ((7, 4), (3, 9, 5, 4, 3)), + ], + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, shapes): + shape_x, shape_y = shapes + x_val = np.random.rand(*shape_x) + y_val = np.random.rand(*shape_y) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "y": mb.placeholder(shape=y_val.shape), + } + input_values = {"x": x_val, "y": y_val} + + def build(x, y): + return [mb.matmul(x=x, y=y, transpose_x=False, transpose_y=False)] + + expected_outputs = [np.matmul(x_val, y_val)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_x", + itertools.product( + compute_units, + backends, + [ + (5,), + (2, 5), + (2, 2, 5), + (4, 3, 2, 5), + (5, 4, 2, 3, 5), + ], + ), + ) + def test_builder_y_rank_2_const(self, compute_unit, backend, shape_x): + x_val = np.random.rand(*shape_x) + y_val = np.random.rand(5, 10) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + } + input_values = {"x": x_val} + + def build(x): + return [mb.matmul(x=x, y=y_val, transpose_x=False, transpose_y=False)] + + expected_outputs = [np.matmul(x_val, y_val)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestEinsum: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + equation = "abcd,adce->abce" + + x_val = np.arange(12).astype(np.float32).reshape((2, 1, 3, 2)) + y_val = np.arange(48).astype(np.float32).reshape((2, 2, 3, 4)) + input_placeholder_dict = { + "x": mb.placeholder(shape=x_val.shape), + "y": mb.placeholder(shape=y_val.shape), + } + input_value_dict = {"x": x_val, "y": y_val} + out_shape = list(x_val.shape) + out_shape[-1] = y_val.shape[-1] + expected_output_type = tuple(out_shape) + (types.fp32,) + + def build(x, y): + return mb.einsum(values=(x, y), equation=equation) + + expected_output = np.einsum(equation, x_val, y_val) + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, rank, broadcast, backend", + itertools.product( + compute_units, + [3, 4], + [True, False], + backends, + ) + ) + def test_builder_to_backend_stress(self, compute_unit, rank, broadcast, backend): + equation = "abcd,adce->abce" if rank == 4 else "vnm,mno->vno" + shape_x = np.random.randint(low=2, high=16, size=rank).astype(np.int32) + shape_y = np.random.randint(low=2, high=12, size=rank).astype(np.int32) + shape_y[-3] = shape_x[-1] + shape_y[-2] = 1 if broadcast else shape_x[-2] + if rank == 4: + shape_x[-4] = 1 if broadcast else shape_y[-4] + + x_val = np.random.rand(*shape_x) + y_val = np.random.rand(*shape_y) + input_placeholder_dict = { + "x": mb.placeholder(shape=x_val.shape), + "y": mb.placeholder(shape=y_val.shape), + } + + input_value_dict = {"x": x_val, "y": y_val} + out_shape = [shape_y[-4], shape_x[-3], shape_x[-2], shape_y[-1]] if rank == 4 else \ + [shape_x[-3], shape_x[-2], shape_y[-1]] + expected_output_type = tuple(out_shape) + (types.fp32,) + + def build(x, y): + return mb.einsum(values=(x, y), equation=equation) + + if rank == 3: + expected_output = np.einsum(equation, + np.broadcast_to(x_val, [shape_x[-3], shape_x[-2], shape_x[-1]]), + np.broadcast_to(y_val, [shape_y[-3], shape_x[-2], shape_y[-1]])) + else: + expected_output = np.einsum(equation, + np.broadcast_to(x_val, [shape_y[-4], shape_x[-3], shape_x[-2], shape_x[-1]]), + np.broadcast_to(y_val, [shape_y[-4], shape_y[-3], shape_x[-2], shape_y[-1]])) + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.arange(6).astype(np.float32).reshape((1, 3, 2)) + y_val = np.arange(24).astype(np.float32).reshape((2, 3, 4)) + equation = "bcd,dce->bce" + v = mb.einsum(values=(x_val, y_val), equation=equation) + np.testing.assert_allclose(np.einsum(equation, x_val, y_val), v.val, atol=1e-04, rtol=1e-05) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_normalization.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_normalization.py new file mode 100644 index 00000000..abff161b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_normalization.py @@ -0,0 +1,751 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import platform + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import (_HAS_TF_2, _HAS_TORCH, MSG_TF2_NOT_FOUND, + MSG_TORCH_NOT_FOUND) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen + +from .testing_utils import UNK_SYM, run_compare_builder + +if _HAS_TORCH: + import torch + +if _HAS_TF_2: + import tensorflow as tf + + +class TestNormalizationBatchNorm: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array( + [ + [ + [[-16.0, 13.0], [11.0, -16.0]], + [[13.0, -15.0], [13.0, 9.0]], + [[-9.0, -4.0], [-6.0, 3.0]], + ] + ], + dtype=np.float32, + ) + mean_val = np.array([9.0, 6.0, 3.0], dtype=np.float32) + variance_val = np.array([6.0, 1.0, 7.0], dtype=np.float32) + gamma_val = np.array([1.0, 1.0, 1.0], dtype=np.float32) + beta_val = np.array([1.0, 3.0, 0.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.batch_norm(x=x, mean=mean_val, variance=variance_val), + mb.batch_norm( + x=x, + mean=mean_val, + variance=variance_val, + gamma=gamma_val, + beta=beta_val, + epsilon=1e-4, + ), + ] + + expected_output_types = [ + (1, 3, 2, 2, types.fp32), + (1, 3, 2, 2, types.fp32), + ] + expected_outputs = [ + np.array( + [ + [ + [[-10.206199, 1.6329918], [0.8164959, -10.206199]], + [[6.999965, -20.999895], [6.999965, 2.9999852]], + [[-4.53557, -2.6457493], [-3.4016776, 0.0]], + ] + ], + dtype=np.float32, + ), + np.array( + [ + [ + [[-9.206122, 2.6329796], [1.8164899, -9.206122]], + [[9.99965, -17.998951], [9.99965, 5.9998503]], + [[-4.535541, -2.6457324], [-3.4016557, 0.0]], + ] + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestNormalizationInstanceNorm: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array( + [ + [ + [[-16.0, 13.0], [11.0, 16.0]], + [[13.0, 15.0], [13.0, 9.0]], + [[-9.0, 4.0], [-6.0, 3.0]], + ], + + [ + [[-5.0, 1.0], [12.0, 3.0]], + [[0.0, 9.0], [2.0, -8.0]], + [[2.0, 5.0], [10.0, 0.0]], + + ] + ], + dtype=np.float32, + ) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return mb.instance_norm(x=x, epsilon=1e-2) + + expected_output_types = [(2, 3, 2, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [[-1.71524656, 0.54576027], [0.38982874, 0.77965748]], + [[0.22917463, 1.14587319], [0.22917463, -1.60422242]], + [[-1.2470212, 1.06887531], [-0.71258354, 0.89072943]], + ], + + [ + [[-1.27070526, -0.28693344], [1.51664821, 0.04099049]], + [[-0.12380638, 1.36187018], [0.20634397, -1.44440776]], + [[-0.59714057, 0.19904686], [1.5260259, -1.12793219]], + ] + ], + dtype=np.float32, + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke_with_gamma_and_beta(self, compute_unit, backend): + x_val = np.array( + [ + [ + [[-16.0, 13.0], [11.0, 16.0]], + [[13.0, 15.0], [13.0, 9.0]], + [[-9.0, 4.0], [-6.0, 3.0]], + ], + + [ + [[-5.0, 1.0], [12.0, 3.0]], + [[0.0, 9.0], [2.0, -8.0]], + [[2.0, 5.0], [10.0, 0.0]], + + ] + ], + dtype=np.float32, + ) + gamma_val = np.array([-9.0, 3.2, 1.3], dtype=np.float32) + beta_val = np.array([-0.8, 3.4, 1.2], dtype=np.float32) + + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return mb.instance_norm(x=x, gamma=gamma_val, beta=beta_val, epsilon=1e-2) + + expected_output_types = [(2, 3, 2, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [[14.63721807, -5.71184211], [-4.30845865, -7.8169173]], + [[4.1333588, 7.06679399], [4.1333588, -1.73351158]], + [[-0.42112757, 2.58953791], [0.27364139, 2.35794826]], + ], + + [ + [[10.6363473, 1.782401], [-14.44983388, -1.16891443]], + [[3.00381959, 7.75798456], [4.06030069, -1.22210484]], + [[0.42371726, 1.45876091], [3.18383368, -0.26631185]], + ] + ], + dtype=np.float32, + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + "rank, compute_unit, backend, epsilon", + itertools.product( + [3, 4], + compute_units, + backends, + [1e-3, 1e-5, 1e-10] + ), + ) + def test_builder_to_backend_stress(self, rank, compute_unit, backend, epsilon): + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return mb.instance_norm(x=x, epsilon=epsilon) + + layer = torch.nn.InstanceNorm2d if rank == 4 else torch.nn.InstanceNorm1d + torch_op = layer(num_features=shape[1], eps=epsilon) + expected_outputs = [torch_op(torch.as_tensor(x_val)).numpy()] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-4, + also_compare_shapes=True + ) + + +class TestNormalizationL2Norm: + + @staticmethod + def _compute_l2_norm(val, eps): + shape = val.shape + rank = len(shape) + batch_dims = rank - 3 + if batch_dims == 0: + square_sum = np.sum(val**2) + output = val/np.power(square_sum + eps, 0.5) + else: + batch_dim_prod = np.prod(shape[:batch_dims]) + reshape_val = np.reshape(val, (batch_dim_prod, -1)) + square_sum = np.sum(reshape_val * reshape_val, axis=1, keepdims=True) + eps + output = reshape_val/np.power(square_sum, 0.5) + output = np.reshape(output, shape) + return output + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.l2_norm(x=x, epsilon=1e-10)] + + expected_output_types = [(1, 3, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [0.08304548, -0.58131838], + [0.41522741, -0.4982729], + [-0.24913645, -0.41522741], + ] + ], + dtype=np.float32, + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, epsilon", + itertools.product( + compute_units, + backends, + [3, 4, 5], + [1e-4, 5.7] + ) + ) + def test_builder_to_backend_stress(self, compute_unit, backend, rank, epsilon): + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-1.0, rand_max=1.0) + input_placeholders = {"x": mb.placeholder(shape=shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.l2_norm(x=x, epsilon=epsilon)] + + output = TestNormalizationL2Norm._compute_l2_norm(x_val, epsilon) + expected_output_types = [list(output.shape) + [types.fp32]] + expected_outputs = [ + output + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize("rank, epsilon", + itertools.product( + [3, 4, 5], + [1e-4, 11.2], + ), + ) + def test_builder_eval_stress(self, rank, epsilon): + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-1, rand_max=1) + with Function({}): + res = mb.l2_norm(x=x_val, epsilon=epsilon) + ref = TestNormalizationL2Norm._compute_l2_norm(x_val, epsilon) + np.testing.assert_allclose(ref, res.val, atol=1e-6, rtol=1e-5) + + +class TestNormalizationLayerNorm: + + @staticmethod + def _keras_layer_norm(x, axes, epsilon): + layer = tf.keras.layers.LayerNormalization(axis=axes, epsilon=epsilon) + data = tf.constant(x, dtype=tf.float32) + output = layer(data) + return output.numpy() + + @staticmethod + def _np_layer_norm(x, axes, gamma=None, beta=None, epsilon=1e-5): + rank = len(x.shape) + axes = [axis + rank if axis < 0 else axis for axis in axes] + normalized_shape = [x.shape[i] if i in axes else 1 for i in range(rank)] + gamma = np.ones(shape=normalized_shape) if gamma is None else np.reshape(gamma, normalized_shape) + beta = np.zeros(shape=normalized_shape) if beta is None else np.reshape(beta, normalized_shape) + num = x - np.mean(x, axis=tuple(axes), keepdims=True) + dem = np.sqrt( + np.sum(np.square(num), axis=tuple(axes), keepdims=True) + / np.prod(normalized_shape) + + epsilon + ) + return num / dem * gamma + beta + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + gamma_val = np.array([1.0, 1.0], dtype=np.float32) + beta_val = np.array([1.0, 0.0], dtype=np.float32) + + def build(x): + return [ + # V2->V1 lowering (op_mappings.py): if branch + mb.layer_norm(x=x, axes=[2], epsilon=1e-4), + # V2->V1 lowering (op_mappings.py): else branch + mb.layer_norm(x=x, axes=[-2, -1], epsilon=1e-4), + # V2->V1 lowering (op_mappings.py): if branch with scale + mb.layer_norm(x=x, axes=[2], epsilon=1e-4, gamma=gamma_val, beta=beta_val), + ] + + expected_output_types = [(1, 3, 2, types.fp32), (1, 3, 2, types.fp32), (1, 3, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [0.9999969, -0.9999969 ], + [0.99999833, -0.99999833], + [0.99995005, -0.99995005], + ] + ], + dtype=np.float32, + ), + np.array( + [ + [ + [0.82687193, -1.06312108], + [1.77186835, -0.82687193], + [-0.11812456, -0.59062278], + ] + ], + dtype=np.float32, + ), + np.array( + [ + [ + [1.9999969, -0.9999969 ], + [1.99999833, -0.99999833], + [1.99995005, -0.99995005], + ] + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke_rank_2(self, compute_unit, backend): + x_val = np.array([[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]], dtype=np.float32) + gamma_val = np.array([1.0, 1.0], dtype=np.float32) + beta_val = np.array([1.0, 0.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + # V2->V1 lowering (op_mappings.py): if branch + mb.layer_norm(x=x, axes=[1], epsilon=1e-4), + mb.layer_norm(x=x, axes=[1], epsilon=1e-4, gamma=gamma_val, beta=beta_val) + ] + + expected_output_types = [(3, 2, types.fp32), (3, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ 0.9999969, -0.9999969 ], + [ 0.99999833, -0.99999833], + [ 0.99995005, -0.99995005], + ], + dtype=np.float32, + ), + np.array( + [ + [ 1.9999969, -0.9999969 ], + [ 1.99999833, -0.99999833], + [ 1.99995005, -0.99995005], + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke_with_dynamic_shape(self, compute_unit, backend): + x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32) + shape = (get_new_symbol(), get_new_symbol(), 2) + input_placeholders = {"x": mb.placeholder(shape=shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.layer_norm(x=x, axes=[2], epsilon=1e-4), + ] + + expected_output_types = [(UNK_SYM, UNK_SYM, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [ 0.9999969, -0.9999969 ], + [ 0.99999833, -0.99999833], + [ 0.99995005, -0.99995005], + ] + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes, epsilon, provides_gamma_beta", + itertools.product( + compute_units, + backends, + [ + [3, [0, 2]], + [3, [-2]], + [4, [0, 1, 3]], + [5, [0, 4]], + [5, [-5, -4, -3, -2, -1]] + ], + [0.0001, 0.01], + [True, False] + ), + ) + def test_builder_to_backend_stress_numpy(self, compute_unit, backend, rank_and_axes, epsilon, provides_gamma_beta): + + if backend == ("mlprogram", "fp16") and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://80662357 ([GPU failures] LayerNorm FP16 tests failing on GPU with numerical errors)") + + if backend[0] == "neuralnetwork" and compute_unit != ct.ComputeUnit.CPU_ONLY and platform.machine() == "arm64": + pytest.xfail("rdar://98015195 ([M1 native tests] Some MIL unittests are failing on M1 native)") + + rank, axes = rank_and_axes + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + gamma, beta = None, None + + if provides_gamma_beta: + positive_axes = [axis+rank if axis < 0 else axis for axis in axes] + normalized_shape = [shape[i] for i in range(rank) if i in positive_axes] + gamma = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100) + beta = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100) + + def build(x): + return [ + mb.layer_norm(x=x, axes=axes, epsilon=epsilon, gamma=gamma, beta=beta) + ] + + output = TestNormalizationLayerNorm._np_layer_norm(x=x_val, axes=axes, epsilon=epsilon, gamma=gamma, beta=beta) + expected_output_types = [tuple(output.shape) + (types.fp32,)] + expected_outputs = [ + output + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-4, + ) + + @pytest.mark.skipif(not _HAS_TF_2, reason=MSG_TF2_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes, epsilon", + itertools.product( + compute_units, + backends, + [ + [3, [0, 2]], + [3, [-2]], + [4, [0, 1, 3]], + [5, [0, 4]], + [5, [-5, -4, -3, -2, -1]] + ], + [0.0001, 0.01] + ), + ) + def test_builder_to_backend_stress_keras(self, compute_unit, backend, rank_and_axes, epsilon): + rank, axes = rank_and_axes + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.layer_norm(x=x, axes=axes, epsilon=epsilon) + ] + + output = TestNormalizationLayerNorm._keras_layer_norm(x=x_val, axes=axes, epsilon=epsilon) + expected_output_types = [tuple(output.shape) + (types.fp32,)] + expected_outputs = [ + output + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize("rank_and_axes, epsilon", + itertools.product( + [ + [3, [0, 2]], + [3, [-2, -1]], + [4, [0, 1, 2, 3]], + [5, [0, 2, -1]], + [5, [-5, -4, -3, -2, -1]] + ], + [0.0001, 0.01], + ), + ) + def test_builder_eval_stress(self, rank_and_axes, epsilon): + rank, axes = rank_and_axes + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0) + positive_axes = [axis+rank if axis < 0 else axis for axis in axes] + normalized_shape = [shape[i] for i in range(rank) if i in positive_axes] + gamma_val = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100) + beta_val = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100) + with Function({}): + res = mb.layer_norm(x=x_val, axes=axes, epsilon=epsilon, gamma=gamma_val, beta=beta_val) + ref = TestNormalizationLayerNorm._np_layer_norm(x=x_val, axes=axes, epsilon=epsilon, gamma=gamma_val, beta=beta_val) + np.testing.assert_allclose(ref, res.val, atol=1e-04, rtol=1e-05) + + +class TestNormalizationLocalResponseNorm: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.local_response_norm(x=x, size=2), + mb.local_response_norm(x=x, size=3, alpha=0.0001, beta=0.75, k=1.0), + ] + + expected_output_types = [(1, 3, 2, types.fp32), (1, 3, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [0.99996257, -6.98716545], + [4.99531746, -5.99191284], + [-2.99898791, -4.99531746], + ] + ], + dtype=np.float32, + ), + np.array( + [ + [ + [0.99997497, -6.99143696], + [4.99687672, -5.99460602], + [-2.99932504, -4.99687672], + ] + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, rank, size, alpha, beta, k", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 6)], + [2, 3, 5], + [0.0001, 0.01], + [0.75, 1.0], + [1.0, 2.0], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, rank, size, alpha, beta, k + ): + shape = np.random.randint(low=2, high=5, size=rank) + x_val = random_gen(shape=shape) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return mb.local_response_norm(x=x, size=size, alpha=alpha, beta=beta, k=k) + + torch_lrn = torch.nn.LocalResponseNorm(size=size, alpha=alpha, beta=beta, k=k) + expected_outputs = [torch_lrn(torch.as_tensor(x_val)).numpy()] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-2, + rtol=1e-3, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_pool.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_pool.py new file mode 100644 index 00000000..a42f3fb3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_pool.py @@ -0,0 +1,494 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_reqs import backends, compute_units + +from .testing_utils import run_compare_builder + + +class TestAvgPool: + @pytest.mark.parametrize( + "compute_unit, backend, inputshape_kernelshape", + itertools.product( + compute_units, + backends, + [ + [(1, 1, 2), (2,)], + [(1, 1, 2, 2), (2, 2)], + [(1, 1, 2, 2, 2), (2, 2, 2)], + ] + ), + ) + def test_avgpool_builder_to_backend_smoke_samelower_padtype( + self, compute_unit, backend, inputshape_kernelshape + ): + input_shape, kernel_shape = inputshape_kernelshape + rank = len(input_shape) - 2 + + if backend[0] == "neuralnetwork" and rank == 3: + pytest.skip( + "pad_type `same_lower` not supported for 3d pooling in neuralnetwork backend" + ) + if backend[0] == "mlprogram" and rank == 1: + pytest.xfail( + "rdar://98852008 (MIL backend producing wrong result for 1d pooling with pad_type " + "same_lower)" + ) + if backend[0] == "mlprogram" and ct.utils._macos_version() < (13, 0): + pytest.skip("same_lower pad_type not supported in macOS12 or older.") + + minimum_deployment_target = ct.target.iOS16 if backend[0] == "mlprogram" else None + + x_val = np.arange(1, np.prod(input_shape) + 1).reshape(*input_shape).astype(np.float32) + + if rank == 1: + expected_output_val = [0.5, 1.5] + elif rank == 2: + expected_output_val = [0.25, 0.75, 1, 2.5] + else: + expected_output_val = [0.125, 0.375, 0.5, 1.25, 0.75, 1.75, 2, 4.5] + + expected_output_types = [input_shape + (types.fp32,)] + expected_outputs = [np.array(expected_output_val).reshape(*input_shape).astype(np.float32)] + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return mb.avg_pool( + x=x, + kernel_sizes=kernel_shape, + pad_type="same_lower", + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, num_dims", + itertools.product( + compute_units, + backends, + [1, 2, 3] + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, num_dims): + kernel_sizes = [1, 2, 3] + strides = [2, 1, 3] + + if num_dims == 1: + x_val = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]]], dtype=np.float32) + expected_output_types = [(1, 1, 4, types.fp32), (1, 1, 3, types.fp32)] + expected_outputs = [ + np.array([[[1.0, 3.0, 5.0, 7.0]]], dtype=np.float32), + np.array([[[1.5, 4.0, 6.5]]], dtype=np.float32), + ] + elif num_dims == 2: + x_val = np.array( + [ + [ + [[-10.80291205, -6.42076184], [-7.07910997, 9.1913279]], + [[-3.18181497, 0.9132147], [11.9785544, 7.92449539]], + ] + ], + dtype=np.float32, + ) + expected_output_types = [(1, 2, 1, 1, types.fp32), (1, 2, 2, 1, types.fp32)] + expected_outputs = [ + np.array([[[[-8.611837]], [[-1.1343001]]]], dtype=np.float32), + np.array( + [[[[-3.7778642], [1.056109]], [[4.4086123], [9.951525]]]], + dtype=np.float32, + ), + ] + else: # num_dims == 3 + x_val = np.array( + [ + [ + [ + [[-1, -5, -1], [-3, -3, 8], [2, 6, 2]], + [[-4, 7, -4], [4, 6, 7], [4, 4, 8]], + [[5, -3, 5], [0, -5, 8], [1, 7, 2]], + ] + ], + [ + [ + [[7, -3, -5], [5, 4, 7], [-2, -4, -3]], + [[-4, 3, -1], [6, -4, 4], [3, 6, 2]], + [[-1, 4, -4], [-2, -1, -2], [3, 2, 8]], + ] + ], + ], + dtype=np.float32, + ) + expected_output_types = [ + (2, 1, 2, 2, 1, types.fp32), + (2, 1, 2, 3, 1, types.fp32), + ] + expected_outputs = [ + np.array( + [ + [[[[-0.8333334], [2.0]], [[1.6666667], [2.1666667]]]], + [[[[2.5], [1.1666667]], [[-1.0], [1.3333334]]]], + ], + dtype=np.float32, + ), + np.array( + [ + [ + [ + [[-0.8333334], [2.0], [3.3333335]], + [[1.6666667], [2.1666667], [3.3333335]], + ] + ], + [ + [ + [[2.5], [1.1666667], [-3.0]], + [[-1.0], [1.3333334], [4.3333335]], + ] + ], + ], + dtype=np.float32, + ), + ] + + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return [ + mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes[:num_dims], + strides=strides[:num_dims], + pad_type="valid", + ), + mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes[-num_dims:], + strides=strides[-num_dims:], + pad_type="same", + exclude_padding_from_average=True, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestMaxPool: + + @pytest.mark.parametrize( + "compute_unit, backend, inputshape_kernelshape", + itertools.product( + compute_units, + backends, + [ + [(1, 1, 2), (2,)], + [(1, 1, 2, 2), (2, 2)], + [(1, 1, 2, 2, 2), (2, 2, 2)], + ] + ), + ) + def test_maxpool_builder_to_backend_smoke_samelower_padtype( + self, compute_unit, backend, inputshape_kernelshape + ): + input_shape, kernel_shape = inputshape_kernelshape + rank = len(input_shape) - 2 + + if backend[0] == "neuralnetwork" and rank == 3: + pytest.skip( + "pad_type `same_lower` not supported for 3d pooling in neuralnetwork backend" + ) + if backend[0] == "mlprogram" and rank == 1: + pytest.xfail( + "rdar://98852008 (MIL backend producing wrong result for 1d pooling with pad_type " + "same_lower)" + ) + if backend[0] == "mlprogram" and ct.utils._macos_version() < (13, 0): + pytest.skip("same_lower pad_type not supported in macOS12 or older.") + + minimum_deployment_target = ct.target.iOS16 if backend[0] == "mlprogram" else None + + x_val = np.arange(1, np.prod(input_shape) + 1).reshape(*input_shape).astype(np.float32) + + if rank == 1: + expected_output_val = [1, 2] + elif rank == 2: + expected_output_val = [1, 2, 3, 4] + else: + expected_output_val = [1, 2, 3, 4, 5, 6, 7, 8] + + expected_output_types = [input_shape + (types.fp32,)] + expected_outputs = [np.array(expected_output_val).reshape(*input_shape).astype(np.float32)] + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return mb.max_pool( + x=x, + kernel_sizes=kernel_shape, + pad_type="same_lower", + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, num_dims", + itertools.product( + compute_units, + backends, + [1, 2, 3] + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, num_dims): + kernel_sizes = [1, 2, 3] + strides = [2, 1, 3] + + if num_dims == 1: + x_val = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]]], dtype=np.float32) + expected_output_types = [(1, 1, 4, types.fp32), (1, 1, 3, types.fp32)] + expected_outputs = [ + np.array([[[1.0, 3.0, 5.0, 7.0]]], dtype=np.float32), + np.array([[[2.0, 5.0, 7.0]]], dtype=np.float32), + ] + elif num_dims == 2: + x_val = np.array( + [ + [ + [[-10.80291205, -6.42076184], [-7.07910997, 9.1913279]], + [[-3.18181497, 0.9132147], [11.9785544, 7.92449539]], + ] + ], + dtype=np.float32, + ) + expected_output_types = [(1, 2, 1, 1, types.fp32), (1, 2, 2, 1, types.fp32)] + expected_outputs = [ + np.array([[[[-6.42076184]], [[0.9132147]]]], dtype=np.float32), + np.array( + [[[[9.191328], [9.191328]], [[11.978555], [11.978555]]]], + dtype=np.float32, + ), + ] + else: # num_dims == 3 + x_val = np.array( + [ + [ + [ + [[-1, -5, -1], [-3, -3, 8], [2, 6, 2]], + [[-4, 7, -4], [4, 6, 7], [4, 4, 8]], + [[5, -3, 5], [0, -5, 8], [1, 7, 2]], + ] + ], + [ + [ + [[7, -3, -5], [5, 4, 7], [-2, -4, -3]], + [[-4, 3, -1], [6, -4, 4], [3, 6, 2]], + [[-1, 4, -4], [-2, -1, -2], [3, 2, 8]], + ] + ], + ], + dtype=np.float32, + ) + expected_output_types = [ + (2, 1, 2, 2, 1, types.fp32), + (2, 1, 2, 3, 1, types.fp32), + ] + expected_outputs = [ + np.array( + [ + [[[[8.0], [8.0]], [[8.0], [8.0]]]], + [[[[7.0], [7.0]], [[4.0], [8.0]]]], + ], + dtype=np.float32, + ), + np.array( + [ + [[[[8.0], [8.0], [6.0]], [[8.0], [8.0], [7.0]]]], + [[[[7.0], [7.0], [-2.0]], [[4.0], [8.0], [8.0]]]], + ], + dtype=np.float32, + ), + ] + + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return [ + mb.max_pool( + x=x, + kernel_sizes=kernel_sizes[:num_dims], + strides=strides[:num_dims], + pad_type="valid", + ), + mb.max_pool( + x=x, + kernel_sizes=kernel_sizes[-num_dims:], + strides=strides[-num_dims:], + pad_type="same", + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestL2Pool: + + @pytest.mark.parametrize( + "compute_unit, backend, inputshape_kernelshape", + itertools.product( + compute_units, + backends, + [ + [(1, 1, 2), (2,)], + [(1, 1, 2, 2), (2, 2)], + ] + ), + ) + def test_l2pool_builder_to_backend_smoke_samelower_padtype( + self, compute_unit, backend, inputshape_kernelshape + ): + input_shape, kernel_shape = inputshape_kernelshape + rank = len(input_shape) - 2 + + if backend[0] == "mlprogram" and rank == 1: + pytest.xfail( + "rdar://98852008 (MIL backend producing wrong result for 1d pooling with pad_type " + "same_lower)" + ) + if backend[0] == "mlprogram" and ct.utils._macos_version() < (13, 0): + pytest.skip("same_lower pad_type not supported in macOS12 or older.") + + minimum_deployment_target = ct.target.iOS16 if backend[0] == "mlprogram" else None + + x_val = np.arange(1, np.prod(input_shape) + 1).reshape(*input_shape).astype(np.float32) + + if rank == 1: + expected_output_val = [1, 2.236068] + else: + expected_output_val = [1, 2.236068, 3.162278, 5.477226] + + expected_output_types = [input_shape + (types.fp32,)] + expected_outputs = [np.array(expected_output_val).reshape(*input_shape).astype(np.float32)] + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return mb.l2_pool( + x=x, + kernel_sizes=kernel_shape, + pad_type="same_lower", + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, num_dims", + itertools.product(compute_units, backends, [1, 2]), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, num_dims): + kernel_sizes = [1, 2, 3] + strides = [2, 1, 3] + + if num_dims == 1: + x_val = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]]], dtype=np.float32) + expected_output_types = [(1, 1, 4, types.fp32), (1, 1, 3, types.fp32)] + expected_outputs = [ + np.array([[[1.0, 3.0, 5.0, 7.0]]], dtype=np.float32), + np.array([[[2.236068, 7.071068, 9.219544]]], dtype=np.float32), + ] + elif num_dims == 2: + x_val = np.array( + [[[[-10.0, -6.0], [-7.0, 9.0]], [[-3.0, 0.0], [11.0, 7.0]]]], + dtype=np.float32, + ) + expected_output_types = [(1, 2, 1, 1, types.fp32), (1, 2, 2, 1, types.fp32)] + expected_outputs = [ + np.array([[[[11.66190338]], [[3.0]]]], dtype=np.float32), + np.array( + [[[[16.309507], [11.401754]], [[13.379088], [13.038404]]]], + dtype=np.float32, + ), + ] + else: # num_dims == 3 + pass # Enum PoolingType3D has no value defined for name L2 + + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return [ + mb.l2_pool( + x=x, + kernel_sizes=kernel_sizes[:num_dims], + strides=strides[:num_dims], + pad_type="valid", + ), + mb.l2_pool( + x=x, + kernel_sizes=kernel_sizes[-num_dims:], + strides=strides[-num_dims:], + pad_type="same", + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_random.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_random.py new file mode 100644 index 00000000..abfb9dd3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_random.py @@ -0,0 +1,443 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import get_core_ml_prediction +from coremltools.models.utils import _macos_version + +from .testing_utils import UNK_SYM, run_compare_builder + + +class TestRandomBernoulli: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + + x_val = np.array([0.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.add(x=x, y=x), + mb.random_bernoulli(shape=np.array([2, 1, 3], np.int32), prob=1.0), + mb.random_bernoulli(shape=np.array([3, 1, 2], np.int32), prob=0.0), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.array(np.ones(shape=(2, 1, 3)), np.float32), + np.array(np.zeros(shape=(3, 1, 2)), np.float32), + ] + + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, prob, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [1.0, 0.0], + [True, False], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, rank, prob, dynamic + ): + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + x_val = np.array([0.0], dtype=np.float32) + if dynamic: + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "dyn_shape": mb.placeholder(shape=shape.shape, dtype=types.int32), + } + input_values = {"x": x_val, "dyn_shape": shape} + else: + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.add(x=x, y=x), mb.random_bernoulli(shape=shape, prob=prob)] + + def build_dyn(x, dyn_shape): + return [mb.add(x=x, y=x), mb.random_bernoulli(shape=dyn_shape, prob=prob)] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.random.binomial(1, prob, shape), + ] + + if dynamic: + expected_output_types = [ + tuple([UNK_SYM for _ in o.shape]) + (types.fp32,) + for o in expected_outputs + ] + else: + expected_output_types = [ + o.shape[:] + (types.fp32,) for o in expected_outputs + ] + + builder = build_dyn if dynamic else build + + run_compare_builder( + builder, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestRandomCategorical: + def softmax(self, data): + e_data = np.exp(data - np.max(data)) + return e_data / e_data.sum() + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([1], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.random_categorical(x=x, seed=1), + mb.random_categorical(x=x, seed=1, size=4), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), dtype=np.float32), + np.array(np.zeros(shape=(4,)), dtype=np.float32), + ] + + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(_macos_version() < (12, 0), reason="Can only get predictions for ml program on macOS 12+") + @pytest.mark.parametrize( + "compute_unit, backend, n_sample, n_class", + itertools.product( + compute_units, + backends, + [50000], + [2, 10, 20] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, n_sample, n_class): + output_name = "random_categorical" + logits = np.random.rand(2, n_class) + probs = [self.softmax(logits[0]), self.softmax(logits[1])] + + # Test logits input + input_placeholders = {"x": mb.placeholder(shape=(2, n_class))} + input_values = {"x": logits} + + def build(x): + return [ + mb.random_categorical( + x=x, size=n_sample, mode="logits", name=output_name + ) + ] + + prediction = get_core_ml_prediction( + build, input_placeholders, input_values, backend=backend, compute_unit=compute_unit, + ) + + ref0 = np.random.multinomial(n_sample, probs[0]) + ref1 = np.random.multinomial(n_sample, probs[1]) + + pred0 = prediction[output_name].reshape(2, n_sample)[0] + pred1 = prediction[output_name].reshape(2, n_sample)[1] + + # convert to bincount and validate probabilities + pred0 = np.bincount(np.array(pred0).astype(np.int32), minlength=n_class) + pred1 = np.bincount(np.array(pred1).astype(np.int32), minlength=n_class) + + assert np.allclose(np.true_divide(pred0, n_sample), probs[0], atol=1e-2) + assert np.allclose( + np.true_divide(pred0, n_sample), + np.true_divide(ref0, n_sample), + atol=1e-2, + ) + + assert np.allclose(np.true_divide(pred1, n_sample), probs[1], atol=1e-2) + assert np.allclose( + np.true_divide(pred1, n_sample), + np.true_divide(ref1, n_sample), + atol=1e-2, + ) + + # Test probs input + input_placeholders = {"x": mb.placeholder(shape=(2, n_class))} + input_values = {"x": np.array(probs)} + + def build(x): + return [ + mb.random_categorical( + x=x, size=n_sample, mode="probs", name=output_name + ) + ] + + prediction = get_core_ml_prediction( + build, input_placeholders, input_values, backend=backend, compute_unit=compute_unit + ) + + pred0 = prediction[output_name].reshape(2, n_sample)[0] + pred1 = prediction[output_name].reshape(2, n_sample)[1] + + # convert to bincount and validate probabilities + pred0 = np.bincount(np.array(pred0).astype(np.int32), minlength=n_class) + pred1 = np.bincount(np.array(pred1).astype(np.int32), minlength=n_class) + + assert np.allclose(np.true_divide(pred0, n_sample), probs[0], atol=1e-2) + assert np.allclose( + np.true_divide(pred0, n_sample), + np.true_divide(ref0, n_sample), + atol=1e-2, + ) + + assert np.allclose(np.true_divide(pred1, n_sample), probs[1], atol=1e-2) + assert np.allclose( + np.true_divide(pred1, n_sample), + np.true_divide(ref1, n_sample), + atol=1e-2, + ) + + +class TestRandomNormal: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([0.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.add(x=x, y=x), + mb.random_normal( + shape=np.array([2, 1, 3], np.int32), mean=1.0, stddev=0.0 + ), + mb.random_normal( + shape=np.array([3, 1, 2], np.int32), mean=0.0, stddev=0.0 + ), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.array(np.ones(shape=(2, 1, 3)), np.float32), + np.array(np.zeros(shape=(3, 1, 2)), np.float32), + ] + + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, mean, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [1.0, 0.0], + [True, False], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, rank, mean, dynamic + ): + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + x_val = np.array([0.0], dtype=np.float32) + if dynamic: + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "dyn_shape": mb.placeholder(shape=shape.shape, dtype=types.int32), + } + input_values = {"x": x_val, "dyn_shape": shape} + else: + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.add(x=x, y=x), + mb.random_normal(shape=shape, mean=mean, stddev=0.0), + ] + + def build_dyn(x, dyn_shape): + return [ + mb.add(x=x, y=x), + mb.random_normal(shape=dyn_shape, mean=mean, stddev=0.0), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.random.normal(loc=mean, scale=0.0, size=shape), + ] + + if dynamic: + expected_output_types = [ + tuple([UNK_SYM for _ in o.shape]) + (types.fp32,) + for o in expected_outputs + ] + else: + expected_output_types = [ + o.shape[:] + (types.fp32,) for o in expected_outputs + ] + + builder = build_dyn if dynamic else build + run_compare_builder( + builder, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestRandomUniform: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([0.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.add(x=x, y=x), + mb.random_uniform( + shape=np.array([2, 1, 3], np.int32), low=0.0, high=0.0 + ), + mb.random_uniform( + shape=np.array([3, 1, 2], np.int32), low=1.0, high=1.0 + ), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.array(np.zeros(shape=(2, 1, 3)), np.float32), + np.array(np.ones(shape=(3, 1, 2)), np.float32), + ] + + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, low, high, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [0.0], + [0.0], + [True, False], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, rank, low, high, dynamic + ): + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + x_val = np.array([0.0], dtype=np.float32) + if dynamic: + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "dyn_shape": mb.placeholder(shape=shape.shape, dtype=types.int32), + } + input_values = {"x": x_val, "dyn_shape": shape} + else: + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.add(x=x, y=x), + mb.random_uniform(shape=shape, low=low, high=high), + ] + + def build_dyn(x, dyn_shape): + return [ + mb.add(x=x, y=x), + mb.random_uniform(shape=dyn_shape, low=low, high=high), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.random.uniform(low=low, high=high, size=shape), + ] + + if dynamic: + expected_output_types = [ + tuple([UNK_SYM for _ in o.shape]) + (types.fp32,) + for o in expected_outputs + ] + else: + expected_output_types = [ + o.shape[:] + (types.fp32,) for o in expected_outputs + ] + + builder = build_dyn if dynamic else build + run_compare_builder( + builder, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_recurrent.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_recurrent.py new file mode 100644 index 00000000..43c44ead --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_recurrent.py @@ -0,0 +1,790 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units + +from .testing_utils import run_compare_builder + +if _HAS_TORCH: + import torch + + +class TestGRU: + @pytest.mark.parametrize( + argnames=[ + "compute_unit", + "backend", + "seq_len", + "batch_size", + "input_size", + "hidden_size", + "has_bias", + "output_sequence", + "direction", + "activation_functions", + "symbolic", + ], + argvalues=itertools.product( + compute_units, + backends, + [1, 3], + [1], # [MIL] GRU with batch size 1 produces incorrect + # output(always 0) for second batch onwards + [1, 2], + [1, 2], + [True, False], + [True, False], + ["forward", "reverse"], + [ + ["TANH", "SIGMOID"], + ["SIGMOID", "TANH"], + ], + [True, False], + ), + ) + def test_builder_to_backend_smoke( + self, + compute_unit, + backend, + seq_len, + batch_size, + input_size, + hidden_size, + has_bias, + output_sequence, + direction, + activation_functions, + symbolic, + ): + torch.manual_seed(5) + + R_z = 2 * np.random.rand(hidden_size, hidden_size) - 1 + R_r = 2 * np.random.rand(hidden_size, hidden_size) - 1 + R_o = 2 * np.random.rand(hidden_size, hidden_size) - 1 + W_z = 2 * np.random.rand(hidden_size, input_size) - 1 + W_r = 2 * np.random.rand(hidden_size, input_size) - 1 + W_o = 2 * np.random.rand(hidden_size, input_size) - 1 + b_z = 2 * np.random.rand(hidden_size) - 1 if has_bias else np.zeros((hidden_size)) + b_r = 2 * np.random.rand(hidden_size) - 1 if has_bias else np.zeros((hidden_size)) + b_o = 2 * np.random.rand(hidden_size) - 1 if has_bias else np.zeros((hidden_size)) + + def apply_act(x, option): + if option == 'TANH': + return np.tanh(x) + elif option == 'SIGMOID': + return 1. / (1 + np.exp(-x)) + else: + raise ValueError("activation invalid") + + def get_numpy_prediction_gru(X, H, return_seq, direction, + inner_activation_str='SIGMOID', + activation_str='TANH', + ): + """ + shape of X : (B, Seq, input_size) + + shape of H : (B, hidden_size) + + shape of return = (B, 1, hidden_size) if return_seq=False else (B, Seq, hidden_size) + """ + assert X.shape == (batch_size, seq_len, input_size) + assert H.shape == (batch_size, hidden_size) + out = [] + for i in range(batch_size): + numpy_input = X[i] + hidden_state = H[i] + out.append( + get_numpy_prediction_gru_single_batch( + numpy_input, + hidden_state, + return_seq, + direction, + inner_activation_str=inner_activation_str, + activation_str=activation_str, + ) + ) + output = np.stack(out, axis=0) + output = np.transpose(output, (1, 0, 2)) + return output, output[-1, :, :] + + def get_numpy_prediction_gru_single_batch(X, h, return_seq, direction, + inner_activation_str='SIGMOID', + activation_str='TANH'): + np_out = np.zeros((seq_len, hidden_size)) + batch_x = X if direction == "forward" else X[::-1, :] + for k in range(seq_len): + x = batch_x[k, :] + z = apply_act(np.dot(W_z, x) + np.dot(R_z, h) + b_z, inner_activation_str) + r = apply_act(np.dot(W_r, x) + np.dot(R_r, h) + b_r, inner_activation_str) + c = h * r + o = apply_act(np.dot(W_o, x) + np.dot(R_o, c) + b_o, activation_str) + h = (1 - z) * o + z * h + np_out[k, :] = h + + if return_seq: + np_out_final = np_out + else: + np_out_final = np_out[-1:, :] + + return np_out_final + + x = np.random.rand(batch_size, seq_len, input_size) + h = np.random.rand(batch_size, hidden_size) + + activation, inner_activation = activation_functions + output, state = get_numpy_prediction_gru( + x, h, output_sequence, direction, inner_activation, activation + ) + expected_outputs = [output, state] + + if symbolic: + batch_size = get_new_symbol() + seq_len = get_new_symbol() + + hh_wt = np.concatenate([R_r, R_o, R_z], axis=0) + ih_wt = np.concatenate([W_r, W_o, W_z], axis=0) + b = np.concatenate([b_r, b_o, b_z], axis=0) + + input_shape = [seq_len, batch_size, input_size] + h_shape = [batch_size, hidden_size] + + input_placeholders = { + "x": mb.placeholder(shape=input_shape), + "initial_h": mb.placeholder(shape=h_shape), + } + + coreml_x = np.transpose(x, (1, 0, 2)) + input_values = {"x": coreml_x, "initial_h": h} + + expected_output_types = [ + (seq_len if output_sequence else 1, batch_size, hidden_size, types.fp32), + (batch_size, hidden_size, types.fp32), + ] + + def build(x, initial_h): + arguments = { + "x": x, + "initial_h": initial_h, + "weight_ih": ih_wt, + "weight_hh": hh_wt, + "direction": direction, + "output_sequence": output_sequence, + "activation": activation, + "recurrent_activation": inner_activation, + } + # If bias is provided, add in arguments + if has_bias: + arguments["bias"] = b + return mb.gru(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestLSTM: + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_dims", + "output_dim", + "activation", + "inner_activation", + "outer_activation", + "return_seq", + "has_bias", + "forget_bias", + "has_peephole", + "coupled_input_forget", + "clip", + ] + ), + itertools.product( + compute_units, + backends, + [[8, 32, 32]], + [1, 4], + ["SIGMOID"], + ["TANH"], + ["TANH", "SIGMOID"], + [False, True], + [False, True], + [False, True], + [True, False], + [False], # We have not exposed this option yet! + [50.0, 0.2, 0.01], + ), + ) + def test_numpy_numerical( + self, + compute_unit, + backend, + input_dims, + output_dim, + activation, + inner_activation, + outer_activation, + return_seq, + has_bias, + forget_bias, + has_peephole, + coupled_input_forget, + clip, + ): + def _apply_act(x, option): + if option == "TANH": + return np.tanh(x) + elif option == "RELU": + return np.maximum(0, x) + elif option == "SIGMOID": + return 1.0 / (1 + np.exp(-x)) + elif option == "SIGMOID_HARD": + return np.minimum(np.maximum(0.2 * x + 0.5, 0), 1) + elif option == "LINEAR": + return x + else: + raise ValueError("activation invalid") + + def _clip(x, threshold=500.0): + return np.maximum(np.minimum(x, threshold), -threshold) + + def _get_numpy_prediction_lstm(Weights, X): + # X : (batch, seq_len, channel) + batch, _, _ = X.shape + out = [] + for i in range(batch): + out.append( + _get_numpy_prediction_lstm_single_batch( + Weights, np.expand_dims(X[i, :, :], axis=0) + ) + ) + return np.stack(out, axis=0) + + def _get_numpy_prediction_lstm_single_batch(Weights, X): + + batch_size, seq_len, input_size = X.shape + X = X[0, :, :] + hidden_size = output_dim + + b = Weights["b"] + Wx_i, Wx_f, Wx_o, Wx_g = np.split(Weights["W_x"], 4) + Wh_i, Wh_f, Wh_o, Wh_g = np.split(Weights["W_h"], 4) + b_i, b_f, b_o, b_g = np.split(b, 4) + p_i, p_f, p_o = np.split(Weights["p"], 3) + + act1 = activation + act2 = inner_activation + act3 = outer_activation + + h = np.zeros((hidden_size)) + c = np.zeros((hidden_size)) + np_out = np.zeros((seq_len, hidden_size)) + for k in range(seq_len): + x = X[k, :] + i = _apply_act(np.dot(Wx_i, x) + np.dot(Wh_i, h) + b_i + c * p_i, act1) + f = _apply_act(np.dot(Wx_f, x) + np.dot(Wh_f, h) + b_f + c * p_f, act1) + g = _apply_act(np.dot(Wx_g, x) + np.dot(Wh_g, h) + b_g, act2) + if coupled_input_forget: + c = c * (1 - i) + i * g + else: + c = c * f + i * g + c = _clip(c, clip) + o = _apply_act(np.dot(Wx_o, x) + np.dot(Wh_o, h) + b_o + c * p_o, act1) + h = o * _apply_act(c, act3) + np_out[k, :] = h + + if return_seq: + np_out_final = np_out + else: + np_out_final = np_out[-1:, :] + return np_out_final + + batch = input_dims[0] + seq_len = input_dims[1] + input_size = input_dims[2] + hidden_size = output_dim + + # define random weights + W_x = np.random.rand(4 * hidden_size, input_size) + W_h = np.random.rand(4 * hidden_size, hidden_size) + + if has_bias: + b = np.random.rand(4 * hidden_size) - 0.5 + if forget_bias: + b = b + 1 + else: + b = np.zeros((4 * hidden_size)) + + if has_peephole: + p = np.random.rand(3 * hidden_size) - 0.5 + else: + p = np.zeros((3 * hidden_size)) + + Weights = {} + Weights["W_x"] = W_x + Weights["W_h"] = W_h + Weights["b"] = b + Weights["p"] = p + + input_data = np.random.rand(batch, seq_len, input_size) + numpy_preds = _get_numpy_prediction_lstm(Weights, input_data) + numpy_preds = np.transpose(numpy_preds, [1, 0, 2]) + + coreml_input_data = np.transpose(input_data, [1, 0, 2]) + input_placeholders = {"x": mb.placeholder(shape=coreml_input_data.shape)} + input_values = {"x": coreml_input_data} + + def build(x): + h_all, ht, ct = mb.lstm( + x=x, + initial_h=np.zeros((batch, hidden_size)).astype(np.float32), + initial_c=np.zeros((batch, hidden_size)).astype(np.float32), + weight_ih=W_x, + weight_hh=W_h, + peephole=p, + direction="forward", + bias=b, + output_sequence=return_seq, + recurrent_activation=activation, + cell_activation=inner_activation, + activation=outer_activation, + clip=clip, + ) + return h_all + + expected_output_types = ( + seq_len if return_seq else 1, + batch, + hidden_size, + types.fp32, + ) + expected_outputs = numpy_preds + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-3, + ) + + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + argnames=[ + "compute_unit", + "backend", + "seq_len", + "batch_size", + "input_size", + "hidden_size", + "has_bias", + "output_sequence", + "direction", + "symbolic", + ], + argvalues=itertools.product( + compute_units, + backends, + [1, 8], + [1, 32], + [1, 64], + [1, 16], + [True, False], + [True, False], + ["forward", "reverse"], + [True, False], + ), + ) + def test_builder_to_backend_smoke_unilstm( + self, + compute_unit, + backend, + seq_len, + batch_size, + input_size, + hidden_size, + has_bias, + output_sequence, + direction, + symbolic, + ): + + torch.manual_seed(50) + rnn = torch.nn.LSTM(input_size, hidden_size, 1, bias=has_bias) + state_dict = rnn.state_dict() + + ih_wt = state_dict["weight_ih_l0"].detach().numpy() + hh_wt = state_dict["weight_hh_l0"].detach().numpy() + + # Make weight compatible to CoreML format + def ifzo_to_ifoz(x): + i, f, z, o = np.split(x, 4) + return np.concatenate([i, f, o, z], axis=0) + + w_x = ifzo_to_ifoz(ih_wt) + w_h = ifzo_to_ifoz(hh_wt) + + b = None + if has_bias: + ih_b = state_dict["bias_ih_l0"].detach().numpy() + hh_b = state_dict["bias_hh_l0"].detach().numpy() + ih_b = ifzo_to_ifoz(ih_b) + hh_b = ifzo_to_ifoz(hh_b) + b = ih_b + hh_b + + t = torch.randn(seq_len, batch_size, input_size) + h0 = torch.randn(1, batch_size, hidden_size) + c0 = torch.randn(1, batch_size, hidden_size) + + n_t = t + if direction == "reverse": + n_t = torch.flip(n_t, [0]) + + output, (hn, cn) = rnn(n_t, (h0, c0)) + if not output_sequence: + output = output[-1].unsqueeze(0) + + output = output.detach().numpy() + hn = hn.detach().numpy().squeeze(0) + cn = cn.detach().numpy().squeeze(0) + + t = np.reshape(t.detach().numpy(), [seq_len, batch_size, input_size]) + h = np.reshape(h0.detach().numpy().squeeze(0), [batch_size, hidden_size]) + c = np.reshape(c0.detach().numpy().squeeze(0), [batch_size, hidden_size]) + + if symbolic: + batch_size = get_new_symbol() + seq_len = get_new_symbol() + + input_shape = [seq_len, batch_size, input_size] + h_shape = [batch_size, hidden_size] + c_shape = [batch_size, hidden_size] + + expected_output_types = [ + (seq_len if output_sequence else 1, batch_size, hidden_size, types.fp32), + (batch_size, hidden_size, types.fp32), + (batch_size, hidden_size, types.fp32), + ] + expected_outputs = [output, hn, cn] + + input_placeholders = { + "x": mb.placeholder(shape=input_shape), + "initial_h": mb.placeholder(shape=h_shape), + "initial_c": mb.placeholder(shape=c_shape), + } + input_values = {"x": t, "initial_h": h, "initial_c": c} + + def build(x, initial_h, initial_c): + arguments = { + "x": x, + "initial_h": initial_h, + "initial_c": initial_c, + "weight_ih": w_x, + "weight_hh": w_h, + "direction": direction, + "output_sequence": output_sequence, + } + # If bias is provided, add in arguments + if b is not None: + arguments["bias"] = b + return mb.lstm(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + argnames=[ + "compute_unit", + "backend", + "seq_len", + "batch_size", + "input_size", + "hidden_size", + "has_bias", + "output_sequence", + "symbolic", + ], + argvalues=itertools.product( + compute_units, + backends, + [1, 8], + [1, 32], + [1, 64], + [2, 16], + [True, False], + [True, False], + [True, False], + ), + ) + def test_builder_to_backend_smoke_bidirlstm( + self, + compute_unit, + backend, + seq_len, + batch_size, + input_size, + hidden_size, + has_bias, + output_sequence, + symbolic, + ): + def _pytorch_hidden_to_coreml(x): + x = x.detach().numpy() + # Split of Direction axis + f, b = np.split(x, 2, axis=0) + # Concat on Hidden Size axis + x = np.concatenate([f, b], axis=2) + x = np.squeeze(x, axis=0) + return x + + direction = "bidirectional" + torch.manual_seed(20) + rnn = torch.nn.LSTM( + input_size, hidden_size, 1, bidirectional=True, bias=has_bias + ) + state_dict = rnn.state_dict() + + ih_wt = state_dict["weight_ih_l0"].detach().numpy() + hh_wt = state_dict["weight_hh_l0"].detach().numpy() + ih_wt_r = state_dict["weight_ih_l0_reverse"].detach().numpy() + hh_wt_r = state_dict["weight_hh_l0_reverse"].detach().numpy() + + def ifzo_to_ifoz(x): + i, f, z, o = np.split(x, 4) + return np.concatenate([i, f, o, z], axis=0) + + wx = ifzo_to_ifoz(ih_wt) + wh = ifzo_to_ifoz(hh_wt) + r_wx = ifzo_to_ifoz(ih_wt_r) + r_wh = ifzo_to_ifoz(hh_wt_r) + + b, r_b = None, None + if has_bias: + ih_b = state_dict["bias_ih_l0"].detach().numpy() + hh_b = state_dict["bias_hh_l0"].detach().numpy() + r_ih_b = state_dict["bias_ih_l0_reverse"].detach().numpy() + r_hh_b = state_dict["bias_hh_l0_reverse"].detach().numpy() + # Convert forward bias into [4*H] + b = ih_b + hh_b + b = ifzo_to_ifoz(b) + # Convert reverse bias into [*H] + r_b = r_ih_b + r_hh_b + r_b = ifzo_to_ifoz(r_b) + + t = torch.randn(seq_len, batch_size, input_size) + h0 = torch.randn(2, batch_size, hidden_size) + c0 = torch.randn(2, batch_size, hidden_size) + + output, (hn, cn) = rnn(t, (h0, c0)) + if not output_sequence: + output_f = output[-1].unsqueeze(0)[:, :, :hidden_size] + output_r = output[0].unsqueeze(0)[:, :, hidden_size:] + output = torch.cat([output_f, output_r], dim=2) + + output = output.detach().numpy() + hn = _pytorch_hidden_to_coreml(hn) + cn = _pytorch_hidden_to_coreml(cn) + + if symbolic: + batch_size = get_new_symbol() + seq_len = get_new_symbol() + + input_shape = [seq_len, batch_size, input_size] + h_shape = [batch_size, 2 * hidden_size] + c_shape = [batch_size, 2 * hidden_size] + + expected_output_types = [ + ( + seq_len if output_sequence else 1, + batch_size, + 2 * hidden_size, + types.fp32, + ), + (batch_size, 2 * hidden_size, types.fp32), + (batch_size, 2 * hidden_size, types.fp32), + ] + expected_outputs = [output, hn, cn] + + t = t.detach().numpy() + h = _pytorch_hidden_to_coreml(h0) + c = _pytorch_hidden_to_coreml(c0) + + input_placeholders = { + "x": mb.placeholder(shape=input_shape), + "initial_h": mb.placeholder(shape=h_shape), + "initial_c": mb.placeholder(shape=c_shape), + } + input_values = {"x": t, "initial_h": h, "initial_c": c} + + def build(x, initial_h, initial_c): + arguments = { + "x": x, + "initial_h": initial_h, + "initial_c": initial_c, + "weight_ih": wx, + "weight_hh": wh, + "weight_ih_back": r_wx, + "weight_hh_back": r_wh, + "direction": direction, + "output_sequence": output_sequence, + } + # If bias is provided, add in arguments + if b is not None: + arguments["bias"] = b + arguments["bias_back"] = r_b + return mb.lstm(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestRNN: + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + argnames=[ + "compute_unit", + "backend", + "seq_len", + "batch_size", + "input_size", + "hidden_size", + "has_bias", + "output_sequence", + "direction", + "symbolic", + ], + argvalues=itertools.product( + compute_units, + backends, + [2, 8], + [1, 32], + [1, 64], + [1, 16], + [True, False], + [True, False], + ["forward", "reverse"], + [True, False], + ), + ) + def test_builder_to_backend_smoke( + self, + compute_unit, + backend, + seq_len, + batch_size, + input_size, + hidden_size, + has_bias, + output_sequence, + direction, + symbolic, + ): + torch.manual_seed(50) + rnn = torch.nn.RNN(input_size, hidden_size, 1, bias=has_bias) + state_dict = rnn.state_dict() + + ih_wt = state_dict["weight_ih_l0"].detach().numpy() + hh_wt = state_dict["weight_hh_l0"].detach().numpy() + + b = None + if has_bias: + ih_b = state_dict["bias_ih_l0"].detach().numpy() + hh_b = state_dict["bias_hh_l0"].detach().numpy() + b = ih_b + hh_b + + t = torch.randn(seq_len, batch_size, input_size) + h0 = torch.randn(1, batch_size, hidden_size) + + n_t = t + if direction == "reverse": + n_t = torch.flip(n_t, [0]) + + output, hn = rnn(n_t, h0) + if not output_sequence: + output = output[-1].unsqueeze(0) + + output = output.detach().numpy() + hn = hn.detach().numpy().squeeze(0) + + t = np.reshape(t.detach().numpy(), [seq_len, batch_size, input_size]) + h = np.reshape(h0.detach().numpy().squeeze(0), [batch_size, hidden_size]) + + if symbolic: + batch_size = get_new_symbol() + seq_len = get_new_symbol() + + input_shape = [seq_len, batch_size, input_size] + h_shape = [batch_size, hidden_size] + + expected_output_types = [ + (seq_len if output_sequence else 1, batch_size, hidden_size, types.fp32), + (batch_size, hidden_size, types.fp32), + ] + expected_outputs = [output, hn] + + input_placeholders = { + "x": mb.placeholder(shape=input_shape), + "initial_h": mb.placeholder(shape=h_shape), + } + input_values = {"x": t, "initial_h": h} + + def build(x, initial_h): + arguments = { + "x": x, + "initial_h": initial_h, + "weight_ih": ih_wt, + "weight_hh": hh_wt, + "direction": direction, + "output_sequence": output_sequence, + } + # If bias is provided, add in arguments + if b is not None: + arguments["bias"] = b + return mb.rnn(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_reduction.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_reduction.py new file mode 100644 index 00000000..2a10db8a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_reduction.py @@ -0,0 +1,356 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest +import scipy + +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.mil.ops.tests.testing_utils import \ + run_compare_builder +from coremltools.converters.mil.testing_utils import random_gen, ssa_fn + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + + +class TestReduction: + # All ops in this test share the same backends + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + [ + "argmax", + "argmin", + "l1_norm", + "l2_norm", + "log_sum", + "log_sum_exp", + "max", + "mean", + "min", + "prod", + "sum", + "sum_square", + ], + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, mode): + val = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + if mode in {"argmax", "argmin"}: + expected_output_types = (2, types.int32) + else: + expected_output_types = (2, types.fp32) + + if mode == "argmax": + build = lambda x: mb.reduce_argmax(x=x, axis=1, keep_dims=False) + expected_outputs = np.array([2, 2], dtype=np.int32) + elif mode == "argmin": + build = lambda x: mb.reduce_argmin(x=x, axis=1, keep_dims=False) + expected_outputs = np.array([0, 0], dtype=np.int32) + elif mode == "l1_norm": + build = lambda x: mb.reduce_l1_norm(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([6.0, 15.0], dtype=np.float32) + elif mode == "l2_norm": + build = lambda x: mb.reduce_l2_norm(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([3.74165738, 8.77496438], dtype=np.float32) + elif mode == "log_sum": + build = lambda x: mb.reduce_log_sum(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([1.7917595, 2.70805025], dtype=np.float32) + elif mode == "log_sum_exp": + build = lambda x: mb.reduce_log_sum_exp(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([3.40760589, 6.40760612], dtype=np.float32) + elif mode == "max": + build = lambda x: mb.reduce_max(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([3.0, 6.0], dtype=np.float32) + elif mode == "mean": + build = lambda x: mb.reduce_mean(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([2.0, 5.0], dtype=np.float32) + elif mode == "min": + build = lambda x: mb.reduce_min(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([1.0, 4.0], dtype=np.float32) + elif mode == "prod": + build = lambda x: mb.reduce_prod(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([6.0, 120.0], dtype=np.float32) + elif mode == "sum": + build = lambda x: mb.reduce_sum(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([6.0, 15.0], dtype=np.float32) + elif mode == "sum_square": + build = lambda x: mb.reduce_sum_square(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([14.0, 77.0], dtype=np.float32) + else: + raise NotImplementedError() + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + ["max", "mean"] + ), + ) + def test_builder_to_backend_global_pool_2d(self, compute_unit, backend, mode): + # test lowering to spatial reduction to global_pool path + val = np.array([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + expected_output_types = (1, 1, 1, 1, types.fp32) + + if mode == "max": + build = lambda x: mb.reduce_max(x=x, axes=[2, -1], keep_dims=True) + expected_outputs = np.array([[[[6.0]]]], dtype=np.float32) + elif mode == "mean": + build = lambda x: mb.reduce_mean(x=x, axes=[3, -2], keep_dims=True) + expected_outputs = np.array([[[[3.5]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + ["max", "mean"] + ), + ) + def test_builder_to_backend_global_pool_none(self, compute_unit, backend, mode): + # test lowering to spatial reduction to global_pool path for axis = None + val = np.array([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + expected_output_types = (1, 1, 1, 1, types.fp32) + + if mode == "max": + build = lambda x: mb.reduce_max(x=x, axes=None, keep_dims=True) + expected_outputs = np.array([[[[6.0]]]], dtype=np.float32) + elif mode == "mean": + build = lambda x: mb.reduce_mean(x=x, axes=None, keep_dims=True) + expected_outputs = np.array([[[[3.5]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + ["max", "mean"] + ), + ) + def test_builder_to_backend_global_pool_3d(self, compute_unit, backend, mode): + # test lowering to spatial reduction to global_pool path + val = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + expected_output_types = (1, 1, 1, 1, 1, types.fp32) + + if mode == "max": + build = lambda x: mb.reduce_max(x=x, axes=[2, -1, 3], keep_dims=True) + expected_outputs = np.array([[[[[6.0]]]]], dtype=np.float32) + elif mode == "mean": + build = lambda x: mb.reduce_mean(x=x, axes=[-3, 3, 4], keep_dims=True) + expected_outputs = np.array([[[[[3.5]]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + + @pytest.mark.parametrize( + ["axis", "keep_dims"], + itertools.product( + [1, -3], + [True, False] + ) + ) + def test_builder_eval(self, axis, keep_dims): + x_val = random_gen(shape=(1, 3, 4, 4), rand_min=-100.0, rand_max=100.0) + + @ssa_fn + def test_reduce_argmax(): + res = mb.reduce_argmax(x=x_val, axis=axis, keep_dims=keep_dims).val + ref = np.argmax(x_val, axis=axis) + if keep_dims: + ref = np.expand_dims(ref, axis=axis) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_argmin(): + res = mb.reduce_argmin(x=x_val, axis=axis, keep_dims=keep_dims).val + ref = np.argmin(x_val, axis=axis) + if keep_dims: + ref = np.expand_dims(ref, axis=axis) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_l1_norm(): + res = mb.reduce_l1_norm(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.sum(np.abs(x_val), axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_l2_norm(): + res = mb.reduce_l2_norm(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.sqrt(np.sum(np.square(x_val), axis=axis, keepdims=keep_dims)) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_log_sum(): + x_val = random_gen(shape=(1, 3, 4, 4), rand_min=0.0, rand_max=100.0) + res = mb.reduce_log_sum(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.log(np.sum(x_val, axis=axis, keepdims=keep_dims)) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_log_sum_exp(): + res = mb.reduce_log_sum_exp(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = scipy.special.logsumexp(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_max(): + res = mb.reduce_max(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.max(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_mean(): + res = mb.reduce_mean(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.mean(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_min(): + res = mb.reduce_min(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.min(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_prod(): + res = mb.reduce_prod(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.prod(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_sum(): + res = mb.reduce_sum(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.sum(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_sum_square(): + res = mb.reduce_sum_square(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.sum(np.square(x_val), axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + test_reduce_argmax() + test_reduce_argmin() + test_reduce_l1_norm() + test_reduce_l2_norm() + test_reduce_log_sum() + test_reduce_log_sum_exp() + test_reduce_max() + test_reduce_mean() + test_reduce_min() + test_reduce_prod() + test_reduce_sum() + test_reduce_sum_square() + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + val = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=(s0, 3))} + input_values = {"x": val} + + def build(x): + return [ + mb.reduce_argmax(x=x, axis=1, keep_dims=True), + mb.reduce_argmin(x=x, axis=0, keep_dims=True), + ] + + expected_output_types = [(s0, 1, types.int32), (1, 3, types.int32)] + expected_outputs = [ + np.array([[2], [2]], dtype=np.int32), + np.array([[0, 0, 0]], dtype=np.int32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "input_size", [(1), (2), (1,2), (2,2), (2,3,4), (2,3,4,10)] + ) + def test_reduce_log_sum_exp_value_inference(self, input_size): + rs = np.random.RandomState(1234) + x = rs.random(input_size) + + for axis in range(-x.ndim, x.ndim - 1): + @mb.program(input_specs=[]) + def prog(): + return mb.reduce_log_sum_exp(x=x, axes=(axis,)) + + op = list(prog.functions.values())[0].operations[3] + assert op.op_type == 'reduce_log_sum_exp' + np.testing.assert_allclose( + op.value_inference(), + scipy.special.logsumexp(x, axis=axis), + atol=1e-04, + rtol=1e-05 + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_scatter_gather.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_scatter_gather.py new file mode 100644 index 00000000..6829e9a8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_scatter_gather.py @@ -0,0 +1,750 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TF_2, MSG_TF2_NOT_FOUND +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import run_compare_builder + +if _HAS_TF_2: + import tensorflow as tf + + +class TestScatter: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([1, 0], dtype=np.int32) + updates = np.array([[5, 6, 7], [8, 9, 10]], dtype=np.float32) + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + def build(data, indices, updates): + return (mb.scatter(data=data, indices=indices, updates=updates),) + + expected_output_types = (2, 3, types.fp32) + + expected_outputs = np.array([[9, 11, 13], [9, 11, 13]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not _HAS_TF_2, reason=MSG_TF2_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, rankData_rankIndices, accumulate_mode", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (2, 1), + (3, 2), + (2, 3), + (2, 2), + (1, 1), + (3, 3), + (3, 3), + (3, 3), + (1, 3), + (3, 1), + (3, 1), + ], + ["update", "add", "sub", "mul", "div", "max", "min"], + ), + ) + def test_builder_to_backend_programmatic( + self, compute_unit, backend, rankData_rankIndices, accumulate_mode + ): + data_rank, indices_rank = rankData_rankIndices + data_shape = np.random.randint(low=2, high=5, size=data_rank) + indices_shape = np.random.randint(low=2, high=5, size=indices_rank) + updates_shape = list(indices_shape) + list(data_shape[1:]) + + data = np.random.rand(*data_shape).astype(np.float32) + updates = np.random.rand(*updates_shape).astype(np.float32) + indices = np.random.randint(0, data_shape[0], size=indices_shape).astype( + np.int32 + ) + + def build(data, indices, updates): + return mb.scatter( + data=data, indices=indices, updates=updates, mode=accumulate_mode + ) + + tf_output = tf.Variable(data) + if accumulate_mode == "update": + tf.compat.v1.scatter_update(tf_output, indices, updates) + if accumulate_mode == "add": + tf.compat.v1.scatter_add(tf_output, indices, updates) + if accumulate_mode == "sub": + tf.compat.v1.scatter_sub(tf_output, indices, updates) + if accumulate_mode == "mul": + tf.compat.v1.scatter_mul(tf_output, indices, updates) + if accumulate_mode == "div": + tf.compat.v1.scatter_div(tf_output, indices, updates) + if accumulate_mode == "max": + tf.compat.v1.scatter_max(tf_output, indices, updates) + if accumulate_mode == "min": + tf.compat.v1.scatter_min(tf_output, indices, updates) + expected_output = tf_output.numpy() + + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + expected_output_types = tuple(data_shape[:]) + (types.fp32,) + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestScatterAlongAxis: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0, 1], [1, 1, 0]], dtype=np.int32) + updates = np.array([[5, 6, 7], [8, 9, 10]], dtype=np.float32) + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + def build(data, indices, updates): + return mb.scatter_along_axis( + data=data, indices=indices, updates=updates, axis=0, mode="update" + ) + + expected_output_types = (2, 3, types.fp32) + + expected_outputs = np.array([[1, 6, 10], [8, 9, 7]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0, 1], [1, 1, 0]], dtype=np.int32) + updates = np.array([[5, 6, 7], [8, 9, 10]], dtype=np.float32) + v = mb.scatter_along_axis( + data=x, indices=indices, updates=updates, axis=0, mode="update" + ) + np.testing.assert_allclose(np.array([[1, 6, 10], [8, 9, 7]], dtype=np.float32), v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_axis", + itertools.product( + compute_units, + backends, + [(rank, axis) for rank in range(1, 5) for axis in range(-rank, rank)], + ), + ) + def test_builder_to_backend_programmatic(self, compute_unit, backend, rank_axis): + rank, axis = rank_axis + data_shape = np.random.randint(low=2, high=8, size=rank) + indices_shape = np.copy(data_shape) + indices_shape[axis] = np.random.randint(low=1, high=8) + updates_shape = indices_shape + + data = np.random.rand(*data_shape).astype(np.float32) + updates = np.random.rand(*updates_shape).astype(np.float32) + indices = np.random.randint( + -data_shape[axis], data_shape[axis], size=indices_shape + ).astype(np.int32) + + def build(data, indices, updates): + return mb.scatter_along_axis( + data=data, indices=indices, updates=updates, axis=axis, mode="update" + ) + + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + expected_output_types = tuple(data_shape[:]) + (types.fp32,) + + np_output = np.copy(data) + np.put_along_axis(np_output, indices, updates, axis=axis) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + np_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestScatterNd: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0], [0, 2]], dtype=np.int32) + updates = np.array([5, 10], dtype=np.float32) + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + def build(data, indices, updates): + return (mb.scatter_nd(data=data, indices=indices, updates=updates),) + + expected_output_types = (2, 3, types.fp32) + + expected_outputs = np.array([[1, 2, 13], [9, 5, 6]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not _HAS_TF_2, reason=MSG_TF2_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, rankData_rankIndices, accumulate_mode", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (2, 2), + (3, 2), + (2, 3), + (1, 4), + (5, 2), + (2, 5), + (4, 3), + (3, 4), + (2, 4), + (4, 2), + (1, 5), + ], + ["update", "add", "sub"], + ), + ) + def test_builder_to_backend_programmatic( + self, compute_unit, backend, rankData_rankIndices, accumulate_mode + ): + data_rank, indices_rank = rankData_rankIndices + data_shape = np.random.randint(low=2, high=5, size=data_rank) + indices_shape = np.random.randint(low=2, high=5, size=indices_rank) + indices_shape[-1] = np.random.randint(low=1, high=data_rank + 1) + updates_shape = list(indices_shape[:-1]) + list(data_shape[indices_shape[-1] :]) + + data = np.random.rand(*data_shape).astype(np.float32) + updates = np.random.rand(*updates_shape).astype(np.float32) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append( + np.random.randint(0, data_shape[i], size=indices_shape[:-1]) + ) + + indices = np.stack(indices_list, axis=-1).astype(np.int32) + + def build(data, indices, updates): + return mb.scatter_nd( + data=data, indices=indices, updates=updates, mode=accumulate_mode + ) + + tf_output = tf.Variable(data) + if accumulate_mode == "update": + tf.compat.v1.scatter_nd_update(tf_output, indices, updates) + if accumulate_mode == "add": + tf.compat.v1.scatter_nd_add(tf_output, indices, updates) + if accumulate_mode == "sub": + tf.compat.v1.scatter_nd_sub(tf_output, indices, updates) + expected_output = tf_output.numpy() + + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + expected_output_types = tuple(data_shape[:]) + (types.fp32,) + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestGather: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([1, 0], dtype=np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + def build(x, indices): + return [ + mb.gather(x=x, indices=indices, axis=0), + mb.gather(x=x, indices=indices, axis=1), + mb.gather(x=x, indices=indices, axis=-2), + mb.gather(x=x, indices=indices, axis=-1), + mb.gather(x=x, indices=indices), + # mb.gather(x=x, indices=1), #shape of scalar indices is incorrect. + # mb.gather(x=x, indices=1, axis=1), #Scalar index passes on axis=0 but fails on axis=1, + # Need to handle rank 0 correctly, rdar://73160449 + ] + + expected_output_types = [ + (2, 3, types.fp32), + (2, 2, types.fp32), + (2, 3, types.fp32), + (2, 2, types.fp32), + (2, 3, types.fp32), + # (3, types.fp32), + ] + + expected_outputs = [ + np.array([[4, 5, 6], [1, 2, 3]], dtype=np.float32), + np.array([[2, 1], [5, 4]], dtype=np.float32), + np.array([[4, 5, 6], [1, 2, 3]], dtype=np.float32), + np.array([[2, 1], [5, 4]], dtype=np.float32), + np.array([[4, 5, 6], [1, 2, 3]], dtype=np.float32), + # np.array([4, 5, 6], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke_iOS16(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + if ct.utils._macos_version() < (13, 0): + pytest.skip("batch_dims not supported in macOS12 or older.") + + x = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.float32) + indices = np.array([[[1, 0], [0, 1]], [[1, 0], [0, 0]]], dtype=np.int32) + + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + def build(x, indices): + return [ + mb.gather(x=x, indices=indices, axis=1, batch_dims=0), + mb.gather(x=x, indices=indices, axis=1, batch_dims=1), + mb.gather(x=x, indices=indices, axis=2, batch_dims=0), + mb.gather(x=x, indices=indices, axis=2, batch_dims=1), + mb.gather(x=x, indices=indices, axis=2, batch_dims=2), + ] + + expected_output_types = [ + (2, 2, 2, 2, 3, types.fp32), + (2, 2, 2, 3, types.fp32), + (2, 2, 2, 2, 2, types.fp32), + (2, 2, 2, 2, types.fp32), + (2, 2, 2, types.fp32), + ] + + expected_outputs = [ + np.array([[[[[ 4, 5, 6], + [ 1, 2, 3]], + [[ 1, 2, 3], + [ 4, 5, 6]]], + [[[ 4, 5, 6], + [ 1, 2, 3]], + [[ 1, 2, 3], + [ 1, 2, 3]]]], + [[[[10, 11, 12], + [ 7, 8, 9]], + [[ 7, 8, 9], + [10, 11, 12]]], + [[[10, 11, 12], + [ 7, 8, 9]], + [[ 7, 8, 9], + [ 7, 8, 9]]]]], dtype=np.float32 + ), + np.array([[[[ 4, 5, 6], + [ 1, 2, 3]], + [[ 1, 2, 3], + [ 4, 5, 6]]], + [[[10, 11, 12], + [ 7, 8, 9]], + [[ 7, 8, 9], + [ 7, 8, 9]]]], dtype=np.float32 + ), + np.array([[[[[ 2, 1], + [ 1, 2]], + [[ 2, 1], + [ 1, 1]]], + [[[ 5, 4], + [ 4, 5]], + [[ 5, 4], + [ 4, 4]]]], + [[[[ 8, 7], + [ 7, 8]], + [[ 8, 7], + [ 7, 7]]], + [[[11, 10], + [10, 11]], + [[11, 10], + [10, 10]]]]], dtype=np.float32 + ), + np.array([[[[ 2, 1], + [ 1, 2]], + [[ 5, 4], + [ 4, 5]]], + [[[ 8, 7], + [ 7, 7]], + [[11, 10], + [10, 10]]]], dtype=np.float32 + ), + np.array([[[ 2, 1], + [ 4, 5]], + [[ 8, 7], + [10, 10]]], dtype=np.float32 + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + + def test_builder_eval_iOS16(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, ), dtype=types.fp32)], opset_version=ct.target.iOS16) + def prog(x): + params = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.float32) + indices = np.array([[[1, 0], [0, 1]], [[1, 0], [0, 0]]], dtype=np.int32) + res = mb.gather(x=params, indices=indices, axis=2, batch_dims=2) + return res + + main_func = prog.functions["main"] + gather_ops = main_func.find_ops(op_type="gather")[0] + + np.testing.assert_allclose( + np.array([[[ 2, 1], [ 4, 5]], [[ 8, 7], [10, 10]]], dtype=np.float32), + gather_ops.outputs[0].val, + atol=1e-04, + rtol=1e-05 + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_embedding_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([1, 0], dtype=np.int32) + input_placeholders = { + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"indices": indices} + + def build(indices): + return [ + mb.gather(x=x, indices=indices, axis=0), + mb.gather(x=x, indices=indices, axis=-2), + ] + + expected_output_types = [ + (2, 3, types.fp32), + (2, 3, types.fp32), + ] + + expected_outputs = [ + np.array([[4, 5, 6], [1, 2, 3]], dtype=np.float32), + np.array([[4, 5, 6], [1, 2, 3]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([1, 0], dtype=np.int32) + v = mb.gather(x=x, indices=indices, axis=-1) + np.testing.assert_allclose(np.array([[2, 1], [5, 4]], dtype=np.float32), v.val, atol=1e-04, rtol=1e-05) + + +class TestGatherAlongAxis: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0, 1], [1, 1, 0]], dtype=np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + def build(x, indices): + return [ + mb.gather_along_axis(x=x, indices=indices, axis=0), + mb.gather_along_axis(x=x, indices=indices, axis=1), + mb.gather_along_axis(x=x, indices=indices, axis=-2), + mb.gather_along_axis(x=x, indices=indices, axis=-1), + mb.gather_along_axis(x=x, indices=indices), + ] + + expected_output_types = [ + (2, 3, types.fp32), + (2, 3, types.fp32), + (2, 3, types.fp32), + (2, 3, types.fp32), + (2, 3, types.fp32), + ] + + expected_outputs = [ + np.array([[4, 2, 6], [4, 5, 3]], dtype=np.float32), + np.array([[2, 1, 2], [5, 5, 4]], dtype=np.float32), + np.array([[4, 2, 6], [4, 5, 3]], dtype=np.float32), + np.array([[2, 1, 2], [5, 5, 4]], dtype=np.float32), + np.array([[4, 2, 6], [4, 5, 3]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0, 1], [0, 0, 1]], dtype=np.int32) + v = mb.gather_along_axis(x=x, indices=indices, axis=0) + np.testing.assert_allclose(np.array([[4, 2, 6], [1, 2, 6]], dtype=np.float32), v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_axis", + itertools.product( + compute_units, + backends, + [(rank, axis) for rank in range(1, 5) for axis in range(-rank, rank)], + ), + ) + def test_builder_to_backend_programmatic(self, compute_unit, backend, rank_axis): + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://97398875 (TestGatherAlongAxis failing on mlprgram + GPU)") + rank, axis = rank_axis + x_shape = np.random.randint(low=2, high=8, size=rank) + indices_shape = np.copy(x_shape) + indices_shape[axis] = np.random.randint(low=1, high=8) + + x = np.random.rand(*x_shape).astype(np.float32) + indices = np.random.randint( + -x_shape[axis], x_shape[axis], size=indices_shape + ).astype(np.int32) + + def build(x, indices): + return mb.gather_along_axis(x=x, indices=indices, axis=axis) + + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + expected_output_types = tuple(indices_shape[:]) + (types.fp32,) + expected_output = np.take_along_axis(x, indices, axis=axis) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestGatherNd: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0], [0, 2]], dtype=np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + def build(x, indices): + return (mb.gather_nd(x=x, indices=indices),) + + expected_output_types = (2, types.fp32) + expected_outputs = np.array([4, 3], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + frontend_only=False, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke_iOS16(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + if ct.utils._macos_version() < (13, 0): + pytest.skip("batch_dims not supported in macOS12 or older.") + + x = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.float32) + indices = np.array([[[1, 0], [0, 1]], [[1, 0], [0, 0]]], dtype=np.int32) + + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + def build(x, indices): + return [ + mb.gather_nd(x=x, indices=indices, batch_dims=0), + mb.gather_nd(x=x, indices=indices, batch_dims=1), + ] + + expected_output_types = [ + (2, 2, 3, types.fp32), + (2, 2, types.fp32) + ] + + expected_outputs = [ + np.array([[[7, 8, 9], + [4, 5, 6]], + [[7, 8, 9], + [1, 2, 3]]], dtype=np.float32 + ), + np.array([[ 4, 2], + [10, 7]], dtype=np.float32 + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_slice.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_slice.py new file mode 100644 index 00000000..a9fa669e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_slice.py @@ -0,0 +1,394 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import UNK_SYM, run_compare_builder + + +class TestSliceByIndex: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array(list(range(24))).reshape((2, 3, 4)).astype(np.float32) + begin_val = np.array([1, 1, 1], dtype=np.int32) + end_val = np.array([2, 3, 3], dtype=np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "begin": mb.placeholder(shape=begin_val.shape, dtype=types.int32), + "end": mb.placeholder(shape=end_val.shape, dtype=types.int32), + } + input_values = {"x": x_val, "begin": begin_val, "end": end_val} + + def build(x, begin, end): + begin_c = mb.const(val=begin_val) + end_c = mb.const(val=end_val) + return [ + mb.slice_by_index(x=x, begin=begin, end=end), + mb.slice_by_index(x=x, begin=begin_c, end=end_c) + ] + + expected_output_types = [(UNK_SYM, UNK_SYM, UNK_SYM, types.fp32)] * 2 + expected_outputs = [np.array([[[17, 18], [21, 22]]], dtype=np.float32)] * 2 + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_type_inference(self): + s0 = get_new_symbol() + s1 = get_new_symbol() + s2 = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(10, s0, s1, s2)), + } + + def build(x): + return [ + mb.slice_by_index( + x=x, begin=[2, 5, 6, 12], end=[6, 9, 20, -9], stride=[2, 1, 2, 1] + ), + mb.slice_by_index( + x=x, + begin=[-2, -5, -3, 9], + end=[-6, -9, -6, -7], + stride=[-2, -1, -2, 1], + ), + mb.slice_by_index( + x=x, + begin=[0, 0, 0, 0], + end=[-6, -9, 3, -2], + stride=[-2, -3, 1, 2], + begin_mask=[True, True, True, True], + end_mask=[False, False, False, False], + ), + mb.slice_by_index( + x=x, + begin=[-2, 5, -1, -7], + end=[0, 0, 0, 0], + stride=[-2, -3, 1, -2], + begin_mask=[False, False, False, False], + end_mask=[True, True, True, True], + ), + mb.slice_by_index( + x=x, begin=[4, -1, 0, -5], end=[4, -1, 0, -5], stride=[1, -1, 2, -2] + ), + ] + + expected_output_types = [ + (2, 4, 7, UNK_SYM, types.fp32), + (2, 4, 2, UNK_SYM, types.fp32), + (3, 3, 3, UNK_SYM, types.fp32), + (5, 2, 1, UNK_SYM, types.fp32), + (0, 0, 0, 0, types.fp32), + ] + + run_compare_builder( + build, + input_placeholders, + expected_output_types=expected_output_types, + frontend_only=True, + ) + + + @pytest.mark.xfail(reason="rdar://99664032") + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_single_element_edge_case(self, compute_unit, backend): + x_val = np.array(list(range(6))).reshape((1, 3, 2)).astype(np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + } + input_values = {"x": x_val} + + def build(x): + return mb.slice_by_index( + x=x, + begin=[-1, 0, 0], + end=[-2, 0, 0], + stride=[-1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True] + ) + + expected_output_types = [(1, 3, 2, types.fp32)] + expected_outputs = [np.array([[[0, 1], [2, 3], [4, 5]]], dtype=np.float32)] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval_scalar_output_corner_cases(self): + x1 = np.array([2.]) + x2 = np.array([[[[1.],[3.]]]]) + v = [ + mb.slice_by_index( + x=x1, begin=[0,], end=[0], squeeze_mask=[True], + ), + mb.slice_by_index( + x=x2, begin=[0, 0, 0, 0], end=[0, 0, 0, 0], squeeze_mask=[True, True, True, True], + ), + ] + assert v[0].val.shape == () + assert v[0].val == 2 + assert v[1].val.shape == () + assert v[1].val == 1 + + @ssa_fn + def test_builder_eval(self): + x_val = np.array(list(range(24))).reshape((2, 3, 4)) + v = [ + mb.slice_by_index( + x=x_val, begin=[1, 1, 1], end=[2, 2, 2] + ), # x_val[1:2, 1:2, 1:2] + mb.slice_by_index( + x=x_val, begin=[1, 1, 1], end=[2, 3, 4], stride=[1, 1, 2] + ), # x_val[1:2, 1:3, 1:4:2] + mb.slice_by_index( + x=x_val, begin=[-3, -3, -3], end=[-1, -1, -1] + ), # x_val[-3:-1, -3:-1, -3:-1] + mb.slice_by_index( + x=x_val, begin=[0, 0, -3], end=[-1, -2, -2] + ), # x_val[0:-1, 0:-2, -3:-2] + mb.slice_by_index( + x=x_val, begin=[-1, -1, -1], end=[0, 1, -3], stride=[-2, -1, -3] + ), # x_val[-1:0:-2, -1:1:-1, -1:-3:-3] + mb.slice_by_index( + x=x_val, + begin=[1, 1, 1], + end=[2, 3, 4], + stride=[1, 1, 2], + begin_mask=[True, False, True], + ), # x_val[:2, 1:3, :4:2] + mb.slice_by_index( + x=x_val, + begin=[1, 1, 1], + end=[2, 3, 4], + stride=[1, 1, 2], + begin_mask=[True, False, True], + end_mask=[True, True, False], + ), # x_val[:, 1:, :4:2] + mb.slice_by_index( + x=x_val, + begin=[1, 1, 1], + end=[2, 3, 4], + stride=[1, 1, 2], + begin_mask=[False, False, True], + end_mask=[True, False, False], + squeeze_mask=[False, True, False], + ), # x_val[1::1, 1, :3:2] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[True, True, True], + end_mask=[True, True, True], + ), # x_val[:, :, :] + mb.slice_by_index( + x=x_val, + begin=[1, 1, 1], + end=[2, 2, 0], + stride=[1, 1, 1], + squeeze_mask=[False, False, True], + ), # x_val[1:2, 1:2, 1] + mb.slice_by_index( + x=x_val, + begin=[1, 0, 0], + end=[2, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True], + ), # x_val[1:2, ...] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[True, True, True], + end_mask=[True, True, True], + ), # x_val[...] + mb.slice_by_index( + x=x_val, + begin=[1, 0, 1], + end=[2, 0, 2], + stride=[1, 1, 1], + begin_mask=[False, True, False], + end_mask=[False, True, False], + ), # x_val[1:2, ..., 1:2] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 1], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[True, True, False], + end_mask=[True, True, False], + squeeze_mask=[False, False, True], + ), # x_val[..., 1] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, False, True], + end_mask=[False, False, True], + squeeze_mask=[True, True, False], + ), # x_val[0, 0, :] + mb.slice_by_index( + x=x_val, + begin=[1, 0, 0], + end=[2, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True], + ), # x_val[1:2] + mb.slice_by_index( + x=x_val, + begin=[1, 1, 0], + end=[2, 2, 0], + stride=[1, 1, 1], + begin_mask=[False, False, True], + end_mask=[False, False, True], + ), # x_val[1:2, 1:2] + mb.slice_by_index( + x=x_val, + begin=[1, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True], + squeeze_mask=[True, False, False], + ), # x_val[1] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 0], + end=[0, 0, 0], + begin_mask=[True, True, True], + end_mask=[True, True, True], + ), # x_val[:] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 0], + end=[0, 0, 0], + stride=[1, 1, -1], + begin_mask=[True, True, True], + end_mask=[True, True, True], + ), # x_val[..., ::-1] + ] + ans = [ + x_val[1:2, 1:2, 1:2], + x_val[1:2, 1:3, 1:4:2], + x_val[-3:-1, -3:-1, -3:-1], + x_val[0:-1, 0:-2, -3:-2], + x_val[-1:0:-2, -1:1:-1, -1:-3:-3], + x_val[:2, 1:3, :4:2], + x_val[:, 1:, :4:2], + x_val[1::1, 1, :3:2], + x_val[:, :, :], + x_val[1:2, 1:2, 1], + x_val[1:2, ...], + x_val[...], + x_val[1:2, ..., 1:2], + x_val[..., 1], + x_val[0, 0, :], + x_val[1:2], + x_val[1:2, 1:2], + x_val[1], + x_val[:], + x_val[..., ::-1], + ] + for idx in range(len(v)): + assert ans[idx].shape == v[idx].shape + np.testing.assert_allclose(ans[idx], v[idx].val, atol=1e-04, rtol=1e-05) + + + @staticmethod + def test_slice_by_index(): + INPUT_SHAPE = (1, 2, 8, 16) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + x = mb.slice_by_index( + x=x, + begin=[0, 0, 0, 0], + end=[1, 2, 8, 12], + stride=[1, 1, 2, 2], + begin_mask=None, + end_mask=None, + squeeze_mask=None, + ) + return x + + x = np.random.rand(*INPUT_SHAPE) + + # slice by index is x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...] + y_numpy = x[0:1:1, 0:2:1, 0:8:2, 0:12:2] + + model = ct.convert(prog, source="milinternal", convert_to="neuralnetwork") + y_neuralnetwork = list(model.predict({'x': x}).values())[0] + np.testing.assert_allclose(y_numpy, y_neuralnetwork) + + model = ct.convert(prog, source="milinternal", convert_to="mlprogram") + y_mlprogram = list(model.predict({'x': x}).values())[0] + # rdar://102217935 needs to be fixed before mlprogram will pass + # np.testing.assert_allclose(y_numpy, y_mlprogram) + + @staticmethod + def test_slice_by_index_slice_squeeze_separate(): + INPUT_SHAPE = (1, 2, 8, 16) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + x = mb.slice_by_index( + x=x, + begin=[0, 0, 0, 0], + end=[1, 2, 8, 12], + stride=[1, 1, 1, 2], + begin_mask=None, + end_mask=None, + squeeze_mask=[True, False, False, False], + ) + return x + + x = np.random.rand(*INPUT_SHAPE) + + # slice by index is x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...] + # and squeeze dim 0 + y_numpy = x[0:1:1, 0:2:1, 0:8:1, 0:12:2] + y_numpy = np.squeeze(y_numpy, axis=0) + + model = ct.convert(prog, source="milinternal", convert_to="neuralnetwork") + y_neuralnetwork = list(model.predict({'x': x}).values())[0] + + assert y_numpy.shape == y_neuralnetwork.shape + np.testing.assert_allclose(y_numpy, y_neuralnetwork) + + model = ct.convert(prog, source="milinternal", convert_to="mlprogram") + y_mlprogram = list(model.predict({'x': x}).values())[0] + # TODO: rdar://103365766 MLProgram does not apply squeeze_mask. + # np.testing.assert_allclose(y_numpy, y_mlprogram) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_operation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_operation.py new file mode 100644 index 00000000..dc1d8672 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_operation.py @@ -0,0 +1,1645 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import platform + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TF_2, MSG_TF2_NOT_FOUND +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_utils import (get_op_types_in_program, + random_gen, ssa_fn) +from coremltools.models.utils import _macos_version + +from .testing_utils import UNK_SYM, UNK_VARIADIC, run_compare_builder + +if _HAS_TF_2: + import tensorflow as tf + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + + +class TestBandPart: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array( + [ + [3.0, 3.0, 5.0, 1.0], + [5.0, 6.0, 3.0, 8.0], + [7.0, 2.0, 7.0, 2.0], + [6.0, 7.0, 7.0, 1.0], + ], + dtype=np.float32, + ) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.band_part(x=x), + mb.band_part(x=x, lower=0, upper=-1), + mb.band_part(x=x, lower=-1, upper=0), + mb.band_part(x=x, lower=0, upper=0), + ] + + expected_output_types = [ + (4, 4, types.fp32), + (4, 4, types.fp32), + (4, 4, types.fp32), + (4, 4, types.fp32), + ] + + expected_outputs = [ + np.array( + [ + [3.0, 3.0, 5.0, 1.0], + [5.0, 6.0, 3.0, 8.0], + [7.0, 2.0, 7.0, 2.0], + [6.0, 7.0, 7.0, 1.0], + ], + dtype=np.float32, + ), + np.array( + [ + [3.0, 3.0, 5.0, 1.0], + [0.0, 6.0, 3.0, 8.0], + [0.0, 0.0, 7.0, 2.0], + [0.0, 0.0, 0.0, 1.0], + ], + dtype=np.float32, + ), + np.array( + [ + [3.0, 0.0, 0.0, 0.0], + [5.0, 6.0, 0.0, 0.0], + [7.0, 2.0, 7.0, 0.0], + [6.0, 7.0, 7.0, 1.0], + ], + dtype=np.float32, + ), + np.array( + [ + [3.0, 0.0, 0.0, 0.0], + [0.0, 6.0, 0.0, 0.0], + [0.0, 0.0, 7.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCumSum: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.cumsum(x=x, axis=0, reverse=True, exclusive=False) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[5, 7, 9], [4, 5, 6]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + v = mb.cumsum(x=x_val) + np.testing.assert_allclose(np.cumsum(x_val, axis=0), v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_invalid_arg(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, axis=0, invalid_arg=3) + + @ssa_fn + def test_invalid_axis1(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, axis=-2) + + @ssa_fn + def test_invalid_axis2(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, axis=len(x_val.shape)) + + @ssa_fn + def test_invalid_axis3(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, axis="") + + @ssa_fn + def test_invalid_reverse1(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, reverse="") + + @ssa_fn + def test_invalid_reverse2(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, reverse=0) + + @ssa_fn + def test_invalid_reverse3(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, reverse=1) + + @ssa_fn + def test_invalid_exclusive1(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, exclusive="") + + @ssa_fn + def test_invalid_exclusive2(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, exclusive=0) + + @ssa_fn + def test_invalid_exclusive3(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, exclusive=1) + + @ssa_fn + def test_invalid_input1(self): + x_val = 1 + with pytest.raises(ValueError): + mb.cumsum(x=x_val) + + @ssa_fn + def test_invalid_input2(self): + x_val = ["1"] + with pytest.raises(ValueError): + mb.cumsum(x=x_val) + + +class TestFillLike: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.xfail("nn backend not supported") + + if ct.utils._macos_version() < (13, 0): + pytest.skip("fill_like not supported in macOS12 or older.") + + shape = (2, 1, 3) + x_val = np.zeros(shape=shape, dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape, dtype=types.int32)} + + input_values = {"x": x_val} + + def build(x): + return mb.fill_like(ref_tensor=x, value=1.0) + + expected_output_types = [(2, 1, 3, types.fp32)] + expected_outputs = [np.full(shape=shape, fill_value=1.0)] + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + +class TestFill: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + shape = (2, 1, 3) + x_val = np.zeros(shape=shape, dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + input_values = {"x": x_val} + + def build(x): + return mb.add(x=x, y=mb.fill(shape=shape, value=1.0)) + + expected_output_types = [(2, 1, 3, types.fp32)] + expected_outputs = [np.full(shape=shape, fill_value=1.0)] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + shape = np.random.randint(low=1, high=3, size=5).astype(np.int32) + res = mb.fill(shape=shape, value=1991.0).val + np.testing.assert_allclose(np.full(shape, fill_value=1991.0), res, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, value", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [-1917.0, 0.0, 2048.0], + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, rank, value): + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + x_val = np.zeros(shape=shape, dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return mb.add(x=x, y=mb.fill(shape=shape, value=value)) + + expected_outputs = [np.full(shape=shape, fill_value=value)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s_len = get_new_symbol() + input_placeholders = { + "shape": mb.placeholder(shape=(s_len,), dtype=types.int32), + } + + def build(shape): + return [mb.fill(shape=shape)] + + expected_output_types = [(UNK_VARIADIC, types.fp32)] + expected_outputs = [np.zeros(shape=(2, 1, 3), dtype=np.float32)] + input_values = {"shape": np.array([2, 1, 3], dtype=np.float32)} + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +@pytest.mark.skipif(not _HAS_TF_2, reason=MSG_TF2_NOT_FOUND) +class TestNonMaximumSuppression: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + boxes_val = np.array( + [ + [ + [0.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 1.0, 1.0], + [2.0, 2.0, 2.0, 2.0], + [3.0, 3.0, 3.0, 3.0], + ] + ], + dtype=np.float32, + ) + scores_val = np.array([[[-3.5], [9.4], [2.3], [0.7]]], dtype=np.float32) + input_placeholders = { + "boxes": mb.placeholder(shape=(1, 4, 4)), + "scores": mb.placeholder(shape=(1, 4, 1)), + } + input_values = {"boxes": boxes_val, "scores": scores_val} + + expected_output_types = [ + (1, 2, 4, types.fp32), + (1, 2, 1, types.fp32), + (1, 2, types.int32), + (1, types.int32), + ] + expected_outputs = [ + np.array([[[1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]]], dtype=np.float32), + np.array([[[9.4], [2.3]]], dtype=np.float32), + np.array([[1, 2]], dtype=np.int32), + np.array([2], dtype=np.int32), + ] + + def build(boxes, scores): + return mb.non_maximum_suppression( + boxes=boxes, + scores=scores, + iou_threshold=0.2, + score_threshold=0.4, + max_boxes=2, + per_class_suppression=True, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @staticmethod + def _compute_iou_matrix(boxes): + # input is (N, 4), in order [center_w, center_h, width, height] + boxes = boxes.astype(np.float32) + center_w, center_h, width, height = np.split(boxes, 4, axis=1) + top = center_h + 0.5 * height + bottom = center_h - 0.5 * height + left = center_w - 0.5 * width + right = center_w + 0.5 * width + area = width * height + + h_b = np.minimum(top, np.transpose(top)) + w_b = np.minimum(right, np.transpose(right)) + h_a = np.maximum(bottom, np.transpose(bottom)) + w_a = np.maximum(left, np.transpose(left)) + + intersection_area = np.maximum(0, h_b - h_a) * np.maximum(0, w_b - w_a) + union_area = area + np.transpose(area) - intersection_area + return intersection_area / union_area + + @staticmethod + def _ref_non_maximum_suppression( + boxes, scores, iou_threshold, score_threshold, max_boxes, per_class_suppression + ): + """ + Reference implementation of Core ML's NMS op using TensorFlow. + boxes of shape (n_batch, n_box, 4), [center_w, center_h, width, height] + scores of shape (n_batch, n_box, n_score) + output shapes [ + (n_batch, max_boxes, 4), + (n_batch, max_boxes, n_score), + (n_batch, max_boxes), + (n_batch,) + ] + """ + n_batch, n_box, n_score = scores.shape + + iou_threshold = iou_threshold.astype(np.float32) + score_threshold = score_threshold.astype(np.float32) + + # convert box ids to TF style + center_w, center_h, width, height = np.split( + boxes, 4, axis=-1 + ) # (n_batch,n_box,1) + y1 = center_h - 0.5 * height + y2 = center_h + 0.5 * height + x1 = center_w - 0.5 * width + x2 = center_w + 0.5 * width + boxes_tf = np.concatenate((y1, x1, y2, x2), axis=-1) # (n_batch,n_box,4) + + out1 = np.zeros((n_batch, max_boxes, 4)) + out2 = np.zeros((n_batch, max_boxes, n_score)) + out3 = -1 * np.ones((n_batch, max_boxes)) + out4 = np.zeros((n_batch,)) + + for b in range(n_batch): + box_coord_matrix = boxes_tf[b, :, :] # (n_box,4) + score_vector = np.max(scores[b, :, :], axis=-1) # (n_box,) + if not per_class_suppression: + # this is the simple case as TF directly supports it + ids_g = tf.image.non_max_suppression( + box_coord_matrix, + score_vector, + max_output_size=max_boxes, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + ids = ids_g.numpy() + else: + # this is slightly complicated as TF does not directly support it + class_ids = np.argmax(scores[b, :, :], axis=-1) # (n_box,) + sorted_score_ids = np.argsort(-score_vector) + box_coord_matrix2 = np.take(box_coord_matrix, sorted_score_ids, axis=0) + score_vector2 = np.take(score_vector, sorted_score_ids) + class_ids = np.take(class_ids, sorted_score_ids) + classes_seen = dict() + ids_intermediate = np.array([], dtype=np.int32) + for n in range(n_box): + if class_ids[n] in classes_seen: + continue + c = class_ids[n] + classes_seen[c] = True + current_class_ids = np.where(class_ids == c)[0] + if len(current_class_ids) > 0: + feed_in1 = np.take(box_coord_matrix2, current_class_ids, axis=0) + feed_in2 = np.take(score_vector2, current_class_ids) + cur_ids_g = tf.image.non_max_suppression( + feed_in1, + feed_in2, + max_output_size=max_boxes, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + cur_ids = cur_ids_g.numpy() + + from_sort_ids = np.take(current_class_ids, cur_ids) + ids_intermediate = np.append(ids_intermediate, from_sort_ids) + ids_intermediate.sort() + ids = np.take(sorted_score_ids, ids_intermediate) + + xx = len(ids) + if xx == 0: + ids = np.array([np.argmax(score_vector)]) + xx = 1 + if xx > max_boxes: + ids = ids[:max_boxes] + xx = len(ids) + out1[b, :xx, :] = np.take(boxes[b, :, :], ids, axis=0) + out2[b, :xx, :] = np.take(scores[b, :, :], ids, axis=0) + out3[b, :xx] = ids + out4[b] = xx + + return out1, out2, out3, out4 + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "iou_threshold_percentile", + "score_threshold_percentile", + "n_boxes", + "n_batch", + "n_score", + "per_class_suppression", + ] + ), + itertools.product( + compute_units, + backends, + [0, 30, 80, 100], + [0, 40, 100], + [(10, 7), (30, 37), (100, 64)], + [1], + [1, 4, 7], + [True, False], + ), + ) + def test_builder_to_backend_stress( + self, + compute_unit, + backend, + iou_threshold_percentile, + score_threshold_percentile, + n_boxes, + n_batch, + n_score, + per_class_suppression, + ): + if backend[0] == "mlprogram" and iou_threshold_percentile == 0: + pytest.xfail("rdar://78080118") + + if backend[0] == "neuralnetwork" and n_boxes == (10, 7) and platform.machine() == "x86_64": + pytest.xfail("rdar://78080118 (Investigate failing tests for NMS in coremltools)") + + if backend == ("mlprogram", "fp16"): + pytest.xfail("CPU: rdar://80662705 and GPU: rdar://80661262") + + n_boxes_in, n_boxes_out = n_boxes + boxes_val = random_gen((n_batch, n_boxes_in, 4), 0, 100) + scores_val = random_gen((n_batch, n_boxes_in, n_score), -100, 100) + + iou_matrix = self._compute_iou_matrix(boxes_val[0, :, :]) + iou_matrix = iou_matrix[~np.eye(iou_matrix.shape[0], dtype=bool)].reshape( + iou_matrix.shape[0], -1 + ) + + if score_threshold_percentile == 0: + score_threshold = np.min(scores_val) - 1 + elif score_threshold_percentile == 100: + score_threshold = np.max(scores_val) + 1 + else: + score_threshold = ( + np.percentile(scores_val, score_threshold_percentile) + 0.01 + ) + + if iou_threshold_percentile == 0: + iou_threshold = np.maximum(np.min(iou_matrix) - 0.01, 0.0) + else: + iou_threshold = np.percentile(iou_matrix, iou_threshold_percentile) + 0.01 + iou_threshold = np.maximum(iou_threshold, 1e-8) + + ( + tf_boxes, + tf_scores, + tf_indices, + tf_num_boxes, + ) = self._ref_non_maximum_suppression( + boxes_val, + scores_val, + iou_threshold, + score_threshold, + n_boxes_out, + per_class_suppression, + ) + expected_outputs = [tf_boxes, tf_scores, tf_indices, tf_num_boxes] + expected_output_types = [ + tf_boxes.shape[:] + (types.fp32,), + tf_scores.shape[:] + (types.fp32,), + tf_indices.shape[:] + (types.int32,), + tf_num_boxes.shape[:] + (types.int32,), + ] + + input_placeholders = { + "boxes": mb.placeholder(shape=(n_batch, n_boxes_in, 4)), + "scores": mb.placeholder(shape=(n_batch, n_boxes_in, n_score)), + } + input_values = {"boxes": boxes_val, "scores": scores_val} + + def build(boxes, scores): + return mb.non_maximum_suppression( + boxes=boxes, + scores=scores, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + max_boxes=n_boxes_out, + per_class_suppression=per_class_suppression, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestNonZero: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.non_zero(x=x)] + + expected_output_types = [(UNK_SYM, 2, types.int32)] + expected_outputs = [np.array(np.transpose(np.nonzero(x_val)))] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.random.randint(low=-1, high=2, size=(6, 1, 7)) + res = mb.non_zero(x=x_val) + np.testing.assert_allclose(np.transpose(np.nonzero(x_val)), res.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_shape_inference_for_deterministic_input(self): + # If the input is compile time known, the builder should be able to infer the shape from value + x_val = np.array([[0, 2], [1, 1]]) + res = mb.non_zero(x=x_val) + assert res.shape == (3, 2) + +class TestOneHot: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([1, 0], dtype=np.int32) + depth = 4 + + input_placeholders = { + "x": mb.placeholder(shape=x.shape, dtype=types.int32), + "y": mb.placeholder(shape=(1,), dtype=types.int32), + } + + input_values = {"x": x, "y": depth} + + def build(x, y): + return [ + mb.one_hot(indices=x, one_hot_vector_size=4), + mb.one_hot(indices=x, one_hot_vector_size=4, axis=0), + mb.one_hot( + indices=x, one_hot_vector_size=4, on_value=1.0, off_value=0.1 + ), + mb.one_hot( + indices=x, one_hot_vector_size=mb.squeeze(x=y), on_value=1, off_value=9 + ), + ] + + expected_output_types = [ + (2, 4, types.int32), + (4, 2, types.int32), + (2, 4, types.fp32), + (2, UNK_SYM, types.int32), + ] + + expected_outputs = [ + np.array([[0, 1, 0, 0], [1, 0, 0, 0]], dtype=np.float32), + np.array([[0, 1], [1, 0], [0, 0], [0, 0]], dtype=np.float32), + np.array([[0.1, 1, 0.1, 0.1], [1, 0.1, 0.1, 0.1]], dtype=np.float32), + np.array([[9, 1, 9, 9], [1, 9, 9, 9]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPad: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + def test_constant_mode(): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + pad = np.array([1, 1, 2, 2], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.pad(x=x, pad=pad, mode="constant", constant_val=0.0) + + expected_output_types = (4, 7, types.fp32) + expected_outputs = np.array( + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 2.0, 3.0, 0.0, 0.0], + [0.0, 0.0, 4.0, 5.0, 6.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_constant_mode_constant_val(): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + pad = np.array([1, 1, 2, 2], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.pad(x=x, pad=pad, mode="constant", constant_val=0.5) + + expected_output_types = (4, 7, types.fp32) + expected_outputs = np.array( + [ + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 1.0, 2.0, 3.0, 0.5, 0.5], + [0.5, 0.5, 4.0, 5.0, 6.0, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_reflect_mode(): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + pad = np.array([1, 1, 2, 2], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.pad(x=x, pad=pad, mode="reflect") + + expected_output_types = (4, 7, types.fp32) + expected_outputs = np.array( + [ + [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0], + [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0], + [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0], + [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_replicate_mode(): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + pad = np.array([1, 1, 2, 2], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.pad(x=x, pad=pad, mode="replicate") + + expected_output_types = (4, 7, types.fp32) + expected_outputs = np.array( + [ + [1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0], + [1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0], + [4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 6.0], + [4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 6.0], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_constant_general(): + t = np.arange(12, dtype=np.float32).reshape([2, 2, 3]) + pad = np.array([[1, 1], [2, 2], [1, 1]], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.pad( + x=x, pad=pad.reshape(-1), mode="constant", constant_val=0.0 + ) + + expected_output_types = (4, 6, 5, types.fp32) + expected_outputs = np.pad(t, pad, mode="constant") + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + # Test different modes + test_constant_mode() + test_constant_mode_constant_val() + test_reflect_mode() + test_replicate_mode() + test_constant_general() + + @ssa_fn + def test_builder_eval(self): + def test_constant_mode(): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.pad( + x=x_val, + pad=np.array([1, 1, 2, 2], dtype=np.int32), + mode="constant", + constant_val=0.0, + ) + expected_outputs = np.array( + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 2.0, 3.0, 0.0, 0.0], + [0.0, 0.0, 4.0, 5.0, 6.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float32, + ) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + def test_reflect_mode(): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.pad( + x=x_val, pad=np.array([1, 1, 2, 2], dtype=np.int32), mode="reflect" + ) + expected_outputs = np.array( + [ + [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0], + [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0], + [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0], + [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0], + ], + dtype=np.float32, + ) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + def test_replicate_mode(): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.pad( + x=x_val, pad=np.array([1, 1, 2, 2], dtype=np.int32), mode="replicate" + ) + expected_outputs = np.array( + [ + [1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0], + [1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0], + [4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 6.0], + [4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 6.0], + ], + dtype=np.float32, + ) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + def test_constant_general(): + x_val = np.arange(12, dtype=np.float32).reshape([2, 2, 3]) + pad = np.array([[1, 1], [2, 2], [1, 1]], dtype=np.int32) + v = mb.pad(x=x_val, pad=pad.reshape(-1), mode="constant", constant_val=0.0) + expected_outputs = np.pad(x_val, pad, mode="constant") + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + # Test different modes + test_constant_mode() + test_reflect_mode() + test_replicate_mode() + test_constant_general() + + +class TestRange1d: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = 15.0 + y = 5.0 + z = 2.0 + # Model inputs must have rank at least 1 + input_placeholders = { + "x": mb.placeholder(shape=(1,)), + "y": mb.placeholder(shape=(1,)), + "z": mb.placeholder(shape=(1,)), + } + input_values = {"x": x, "y": y, "z": z} + + def build(x, y, z): + return [ + mb.range_1d(start=mb.squeeze(x=y), end=15.0, step=2.0), + mb.range_1d(start=mb.squeeze(x=y), end=15.0, step=mb.squeeze(x=z)), + mb.range_1d(start=mb.squeeze(x=y), end=mb.squeeze(x=x), step=2.0), + mb.range_1d(start=mb.squeeze(x=y), end=mb.squeeze(x=x), step=mb.squeeze(x=z)), + mb.range_1d(start=5.0, end=15.0, step=mb.squeeze(x=z)), + mb.range_1d(start=5.0, end=mb.squeeze(x=x), step=2.0), + mb.range_1d(start=5.0, end=mb.squeeze(x=x), step=mb.squeeze(x=z)), + ] + + expected_output_types = [ + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + ] + + expected_outputs = [ + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_large_array(self, compute_unit, backend): + input_placeholders = { + "x": mb.placeholder(shape=(1,)), # dummpy input + } + input_values = {"x": 0.5} + + def build(x): + return [mb.range_1d(start=0.0, end=2000000.0, step=1.0)] + + expected_output_types = [ + (2000000, types.fp32) + ] + + expected_outputs = [ + np.arange(0.0, 2000000.0, 1.0), + ] + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + # verify that the range_1d op is not const folded + prog = mlmodel._mil_program + ops = get_op_types_in_program(prog) + assert ops == ["range_1d", "identity"] + + @ssa_fn + def test_builder_eval(self): + v = mb.range_1d(start=5, end=15, step=2) + np.testing.assert_allclose(np.arange(5, 15, 2), v.val, atol=1e-04, rtol=1e-05) + + +class TestTile: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x.shape)} + + input_values = {"x": x} + + def build(x): + return [ + mb.tile(x=x, reps=(1, 1)), + mb.tile(x=x, reps=(2, 1)), + ] + + expected_output_types = [ + (2, 3, types.fp32), + (4, 3, types.fp32), + ] + + expected_outputs = [ + x, + np.array([[1, 2, 3], [4, 5, 6], [1, 2, 3], [4, 5, 6]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.tile(x=x, reps=(1, 2)) + np.testing.assert_allclose(np.tile(x, reps=(1, 2)), v.val, atol=1e-04, rtol=1e-05) + + +class TestDynamicTile: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + rep1 = np.array([1, 1]).astype(np.int32) + rep2 = np.array([2, 1]).astype(np.int32) + rep3 = np.array([2, 3]).astype(np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "reps1": mb.placeholder(shape=rep1.shape, dtype=types.int32), + "reps2": mb.placeholder(shape=rep2.shape, dtype=types.int32), + "reps3": mb.placeholder(shape=rep3.shape, dtype=types.int32), + } + + input_values = {"x": x, "reps1": rep1, "reps2": rep2, "reps3": rep3} + + def build(x, reps1, reps2, reps3): + return [ + mb.tile(x=x, reps=reps1), + mb.tile(x=x, reps=reps2), + mb.tile(x=x, reps=reps3), + ] + + expected_output_types = [ + (UNK_SYM, UNK_SYM, types.fp32), + (UNK_SYM, UNK_SYM, types.fp32), + (UNK_SYM, UNK_SYM, types.fp32), + ] + + expected_outputs = [ + x, + np.array([[1, 2, 3], [4, 5, 6], [1, 2, 3], [4, 5, 6]], dtype=np.float32), + np.array( + [ + [1, 2, 3, 1, 2, 3, 1, 2, 3], + [4, 5, 6, 4, 5, 6, 4, 5, 6], + [1, 2, 3, 1, 2, 3, 1, 2, 3], + [4, 5, 6, 4, 5, 6, 4, 5, 6], + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestTopK: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + val = np.array([[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return mb.topk(x=x, k=2, axis=1) + + expected_output_types = [ + (2, 2, types.fp32), + (2, 2, types.int32), + ] + expected_outputs = [ + np.array([[2.0, -1.0], [6.0, 4.0]], dtype=np.float32), + np.array([[1, 0], [2, 0]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, return_indices, sort", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + ) + ) + def test_builder_to_backend_smoke_iOS16(self, compute_unit, backend, return_indices, sort): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + if _macos_version() < (13, 0): + pytest.skip("New functionality in macOS13/iOS16") + + if not return_indices: + pytest.xfail( + "rdar://92880117 (Topk with return_indices = False error out at the MIL->EIR stage)" + ) + + val = np.array([[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return mb.topk(x=x, k=2, axis=1, return_indices=return_indices, sort=sort) + + expected_output_types = [ + (2, 2, types.fp32), + (2, 2, types.int32), + ] + expected_outputs = [ + np.array([[2.0, -1.0], [6.0, 4.0]], dtype=np.float32), + np.array([[1, 0], [2, 0]], dtype=np.float32), + ] + + if not return_indices: + expected_output_types = expected_output_types[:1] + expected_outputs = expected_outputs[:1] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + @ssa_fn + def test_builder_eval(self): + def np_topk(x, k, axis, ascending=False): + indices = np.argsort(x, axis=axis) + if not ascending: + indices = np.argsort(-x, axis=axis) + slc = [slice(None)] * len(x.shape) + slc[axis] = slice(0, k) + indices = indices[tuple(slc)] + values = np.take_along_axis(x, indices, axis=axis) + return values, indices + + val = np.array([[-1.0, 7.0, -3.0], [4.0, -5.0, 8.0]], dtype=np.float32) + res_values, res_indices = mb.topk(x=val, k=1, axis=0) + ref_values, ref_indices = np_topk(x=val, k=1, axis=0) + np.testing.assert_allclose(ref_values, res_values.val, atol=1e-04, rtol=1e-05) + np.testing.assert_allclose(ref_indices, res_indices.val, atol=1e-04, rtol=1e-05) + res_values, res_indices = mb.topk(x=val, k=2, axis=-1, ascending=True) + ref_values, ref_indices = np_topk(x=val, k=2, axis=-1, ascending=True) + np.testing.assert_allclose(ref_values, res_values.val, atol=1e-04, rtol=1e-05) + np.testing.assert_allclose(ref_indices, res_indices.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + val = np.array([[1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=(s0, 3))} + input_values = {"x": val} + + def build(x): + return mb.topk(x=x, k=2, axis=-1, ascending=True) + + expected_output_types = [ + (s0, 2, types.fp32), + (s0, 2, types.int32), + ] + expected_outputs = [ + np.array([[-3.0, 1.0], [-5.0, 4.0]], dtype=np.float32), + np.array([[2, 0], [1, 0]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestFlatten2d: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array( + [[[1, 2, 3], [4, 5, 6]], [[-1, -2, -3], [-4, -5, -6]]], dtype=np.float32 + ) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return [mb.flatten2d(x=x)] + + expected_output_types = [ + (2, 6, types.fp32), + ] + expected_outputs = [ + np.array([[1, 2, 3, 4, 5, 6], [-1, -2, -3, -4, -5, -6]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, rank, axis, backend", + itertools.product( + compute_units, + range(1, 6), + range(-5, 6), + backends, + ), + ) + def test_builder_to_backend_stress(self, compute_unit, rank, axis, backend): + if axis < -rank or axis >= rank + 1: + return + + shape = np.random.randint(low=2, high=6, size=rank) + t = np.random.random(shape) + + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return [mb.flatten2d(x=x, axis=axis)] + + np_axis = axis + rank if axis < 0 else axis + pl, pr = 1, 1 + for i in range(0, np_axis): + pl *= shape[i] + for i in range(np_axis, len(shape)): + pr *= shape[i] + + new_shape = [pl, pr] + ref = t.reshape(new_shape) + + expected_outputs = [ref] + expected_output_types = [ + tuple(list(ref.shape) + [types.fp32]), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + t = np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32) + f = mb.flatten2d(x=t) + expected_f = np.array([[1, 2, 3, 4, 5, 6]], dtype=np.float32) + np.testing.assert_allclose(expected_f, f.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(s0, 4, 5, 6)), + } + + def build(x): + return [mb.flatten2d(x=x)] + + input = np.random.rand(10, 4, 5, 6) + output = input.reshape(10, -1) + + expected_output_types = (s0, 120, types.fp32) + expected_outputs = [output] + + input_values = {"x": input} + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestShape: + @pytest.mark.parametrize( + "compute_unit, backend, input_type", + itertools.product( + compute_units, + backends, + ["int32", "float32"] + ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, input_type): + np_type = np.int32 if input_type == "int32" else np.float32 + mb_type = types.int32 if input_type == "int32" else types.fp32 + + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np_type) + input_placeholders = {"x": mb.placeholder(shape=t.shape, dtype=mb_type)} + input_values = {"x": t} + + def build(x): + return mb.shape(x=x) + + expected_output_types = (2, types.int32) + expected_outputs = [ + np.array([2, 3], dtype=np.int32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + t = np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32) + f = mb.shape(x=t) + expected_f = np.array([1, 2, 3], dtype=np.float32) + np.testing.assert_allclose(expected_f, f.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, input_type", + itertools.product( + compute_units, + backends, + ["int32", "float32"] + ) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend, input_type): + np_type = np.int32 if input_type == "int32" else np.float32 + mb_type = types.int32 if input_type == "int32" else types.fp32 + + s0 = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(s0, 4, 5, 6), dtype=mb_type), + } + + def build(x): + return [mb.shape(x=x)] + + input = np.random.rand(10, 4, 5, 6) + input = input.astype(np_type) + output = np.array([10, 4, 5, 6], dtype=np.int32) + + expected_output_types = (4, types.int32) + expected_outputs = [output] + + input_values = {"x": input} + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestIdentity: + @pytest.mark.parametrize( + "compute_unit, backend, input_type", + itertools.product( + compute_units, + backends, + ["int32", "float32"] + ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, input_type): + np_type = np.int32 if input_type == "int32" else np.float32 + mb_type = types.int32 if input_type == "int32" else types.fp32 + + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np_type) + input_placeholders = {"x": mb.placeholder(shape=t.shape, dtype=mb_type)} + input_values = {"x": t} + + def build(x): + return mb.identity(x=x) + + expected_output_types = [(2, 3, mb_type)] + expected_outputs = [ + np.array([[1, 2, 3], [4, 5, 6]], dtype=np_type), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + t = np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32) + f = mb.identity(x=t) + expected_f = np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32) + np.testing.assert_allclose(expected_f, f.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + input_placeholders = { + "x": mb.placeholder(shape=(10, 4, 5, 6)), + } + + def build(x): + return [mb.identity(x=x)] + + input = np.random.rand(10, 4, 5, 6) + output = input + + expected_output_types = [(10, 4, 5, 6, types.fp32)] + expected_outputs = [output] + + input_values = {"x": input} + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestArgSort: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + val = np.array([[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.argsort(x=x), mb.argsort(x=x, axis=0, ascending=True)] + + expected_output_types = [ + (2, 3, types.int32), + (2, 3, types.int32), + ] + expected_outputs = [ + np.array([[1, 0, 2], [2, 0, 1]], dtype=np.int32), + np.array([[0, 1, 0], [1, 0, 1]], dtype=np.int32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = random_gen(shape=(1, 3, 2, 2), rand_min=-100, rand_max=100) + res = mb.argsort(x=x_val, axis=-3) + # The default np argsort mode is ascending, which is opposite to MIL's argsort op. + np.testing.assert_allclose(np.argsort(-x_val, axis=-3), res.val, atol=1e-04, rtol=1e-05) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_transformation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_transformation.py new file mode 100644 index 00000000..11106529 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_transformation.py @@ -0,0 +1,1347 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.mil.types import nptype_from_builtin +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import UNK_SYM, UNK_VARIADIC, run_compare_builder + +if _HAS_TORCH: + import torch + + +class TestDepthToSpace: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (1, 4, 1, 1, fp32) + val = np.array([[[[9.0]], [[5.0]], [[1.0]], [[3.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.depth_to_space(x=x, block_size=2)] + + expected_output_types = (1, 1, 2, 2, types.fp32) + expected_outputs = np.array([[[[9.0, 5.0], [1.0, 3.0]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSpaceToBatch: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (2, 1, 2, 4, fp32) + val = np.array([[[[ 1, 2, 3, 4], + [ 5, 6, 7, 8]]], + [[[ 9, 10, 11, 12], + [13, 14, 15, 16]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.space_to_batch(x=x, block_shape=[2, 2], paddings=[[0, 0], [2, 0]])] + + expected_output_types = (8, 1, 1, 3, types.fp32) + expected_outputs = np.array([[[[ 0, 1, 3]]], + [[[ 0, 9, 11]]], + [[[ 0, 2, 4]]], + [[[ 0, 10, 12]]], + [[[ 0, 5, 7]]], + [[[ 0, 13, 15]]], + [[[ 0, 6, 8]]], + [[[ 0, 14, 16]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestBatchToSpace: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (8, 1, 1, 3, fp32) + val = np.array([[[[ 0, 1, 3]]], + [[[ 0, 9, 11]]], + [[[ 0, 2, 4]]], + [[[ 0, 10, 12]]], + [[[ 0, 5, 7]]], + [[[ 0, 13, 15]]], + [[[ 0, 6, 8]]], + [[[ 0, 14, 16]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.batch_to_space(x=x, block_shape=[2, 2], crops=[[0, 0], [2, 0]])] + + expected_output_types = (2, 1, 2, 4, types.fp32) + expected_outputs = np.array([[[[ 1, 2, 3, 4], + [ 5, 6, 7, 8]]], + [[[ 9, 10, 11, 12], + [13, 14, 15, 16]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestExpandDims: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return [ + mb.expand_dims(x=x, axes=[0]), + mb.expand_dims(x=x, axes=[1]), + mb.expand_dims(x=x, axes=[2]), + mb.expand_dims(x=x, axes=[-1]), + mb.expand_dims(x=x, axes=[0, 1]), + mb.expand_dims(x=x, axes=[-2, -1]), + ] + + expected_output_types = [ + (1, 2, 3, types.fp32), + (2, 1, 3, types.fp32), + (2, 3, 1, types.fp32), + (2, 3, 1, types.fp32), + (1, 1, 2, 3, types.fp32), + (2, 3, 1, 1, types.fp32), + ] + expected_outputs = [ + np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32), + np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), + np.array([[[1], [2], [3]], [[4], [5], [6]]], dtype=np.float32), + np.array([[[1], [2], [3]], [[4], [5], [6]]], dtype=np.float32), + np.array([[[[1, 2, 3], [4, 5, 6]]]], dtype=np.float32), + np.array([[[[1]], [[2]], [[3]]], [[[4]], [[5]], [[6]]]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(2, s0)), + } + + def build(x): + return [ + mb.expand_dims(x=x, axes=[-1]), + mb.expand_dims(x=x, axes=[1]), + ] + + expected_output_types = [ + (2, s0, 1, types.fp32), + (2, 1, s0, types.fp32), + ] + expected_outputs = [ + np.array([[[1], [2], [3]], [[4], [5], [6]]], dtype=np.float32), + np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), + ] + + input_values = { + "x": np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), + } + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.random.rand(1, 6) + v1 = mb.expand_dims(x=x_val, axes=[2]) + np.testing.assert_allclose(np.expand_dims(x_val, 2), v1.val, atol=1e-04, rtol=1e-05) + + v2 = mb.expand_dims(x=x_val, axes=[-1]) + np.testing.assert_allclose(np.expand_dims(x_val, -1), v2.val, atol=1e-04, rtol=1e-05) + + v3 = mb.expand_dims(x=x_val, axes=[-1, -2]) + ref = np.expand_dims(np.expand_dims(x_val, -1), -1) + np.testing.assert_allclose(ref, v3.val, atol=1e-04, rtol=1e-05) + + v4 = mb.expand_dims(x=x_val, axes=[0, -1, -2]) + np.testing.assert_allclose(np.reshape(x_val, (1, 1, 6, 1, 1)), v4.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis", + itertools.product( + compute_units, + backends, + [ + (rank, axis) + for rank in range(1, 5) + for axis in range(-rank - 1, rank + 1) + ], + ), + ) + def test_builder_to_backend_programmatic_one_axis( + self, compute_unit, backend, rank_and_axis + ): + rank, axis = rank_and_axis + x_shape = np.random.randint(low=2, high=6, size=rank) + input_placeholders = {"x": mb.placeholder(shape=x_shape)} + input_values = {"x": np.random.sample(x_shape).astype(np.float32)} + + def build(x): + return mb.expand_dims(x=x, axes=[axis]) + + adjusted_axis = axis if axis >= 0 else rank + axis + 1 + x_shape = list(x_shape) + out_shape = x_shape[:adjusted_axis] + [1] + x_shape[adjusted_axis:] + expected_output_types = tuple(out_shape[:]) + (types.fp32,) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + np.expand_dims(input_values["x"], axis), + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes", + itertools.product( + compute_units, + backends, + [ + (3, [0, 1]), + (3, [1, 0]), + (3, [-2, -1]), + (3, [-1, -2]), + (2, [-3, -1]), + (2, [-3, 1, -1]), + (2, [-2, 0]), + (1, [-1, -2, -3, -4]), + (1, [0, -1]), + (1, [0, 1, -2, -1]), + ], + ), + ) + def test_builder_to_backend_programmatic_multiple_axes( + self, compute_unit, backend, rank_and_axes + ): + rank, axes = rank_and_axes + x_shape = np.random.randint(low=1, high=6, size=rank) + input_placeholders = {"x": mb.placeholder(shape=x_shape)} + input_values = {"x": np.random.sample(x_shape).astype(np.float32)} + + def build(x): + return mb.expand_dims(x=x, axes=axes) + + out_shape = list(x_shape) + out_rank = rank + len(axes) + pos_axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes]) + for axis in pos_axes: + out_shape.insert(axis, 1) + + expected_outputs = np.reshape(input_values["x"], out_shape) + expected_output_types = tuple(out_shape) + (types.fp32,) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestReshapeLike: + @pytest.mark.parametrize( + "compute_unit, backend, InputShape_RefShapes_Begins_Ends_EndMasks, InputType_RefType", + itertools.product( + compute_units, + backends, + [ + [(4, 3), ((2, 2, 3), (1, 3)), (0, 1), (2, 2), (False, False)], + [(32,), ((1, 2, 2, 2), (3, 2, 2)), (1, 1), (0, 0), (True, True)], + [(72, 1), ((1, 2, 3, 4, 1), (3,)), (1, 0), (0, 1), (True, False)], + ], + [(types.bool, types.fp32), (types.fp32, types.bool)], + ) + ) + def test_builder_to_backend_smoke( + self, + compute_unit, + backend, + InputShape_RefShapes_Begins_Ends_EndMasks, + InputType_RefType, + ): + if backend[0] == "neuralnetwork": + pytest.skip("reshape_like not supoprted in neuralnetwork backend.") + + if ct.utils._macos_version() < (13, 0): + pytest.skip("reshape_like not supported in macOS12 or older.") + + input_shape, ref_shapes, begins, ends, end_masks = InputShape_RefShapes_Begins_Ends_EndMasks + ref_shape_1, ref_shape_2 = ref_shapes + input_type, ref_type = InputType_RefType + + t = np.random.rand(*input_shape).astype(np.float32) + ref_tensor_1 = np.random.rand(*ref_shape_1).astype(np.float32) + ref_tensor_2 = np.random.rand(*ref_shape_2).astype(np.float32) + + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + "ref_tensor_1": mb.placeholder(shape=ref_shape_1), + "ref_tensor_2": mb.placeholder(shape=ref_shape_2), + } + input_values = { + "x": t, + "ref_tensor_1": ref_tensor_1, + "ref_tensor_2": ref_tensor_2, + } + + def build(x, ref_tensor_1, ref_tensor_2): + if input_type == types.bool: + x = mb.cast(x=x, dtype="bool") + + if ref_type == types.bool: + ref_tensor_1 = mb.cast(x=ref_tensor_1, dtype="bool") + ref_tensor_2 = mb.cast(x=ref_tensor_2, dtype="bool") + + ref_tensors = (ref_tensor_1, ref_tensor_2) + return mb.reshape_like(x=x, ref_tensors=ref_tensors, begins=begins, ends=ends, end_masks=end_masks) + + output_shape = () + for ref_shape, begin, end, end_mask in zip((ref_shape_1, ref_shape_2), begins, ends, end_masks): + if end_mask: + output_shape += tuple(ref_shape[begin:]) + else: + output_shape += tuple(ref_shape[begin:end]) + + expected_output_types = [ + output_shape + (input_type,), + ] + expected_outputs = [ + np.reshape(t, output_shape).astype(nptype_from_builtin(input_type)), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16 + ) + + +class TestReshape: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return [ + mb.reshape(x=x, shape=[3, 2]), + mb.reshape(x=x, shape=[2, -1]), + mb.reshape(x=x, shape=[2, 1, 1, 3]), + ] + + expected_output_types = [ + (3, 2, types.fp32), + (2, 3, types.fp32), + (2, 1, 1, 3, types.fp32), + ] + expected_outputs = [ + np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32), + np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), + np.array([[[[1.0, 2.0, 3.0]]], [[[4.0, 5.0, 6.0]]]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + r = mb.reshape(x=t, shape=[3, 2]) + expected_r = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + np.testing.assert_allclose(expected_r, r.val, atol=1e-04, rtol=1e-05) + r2 = mb.reshape(x=t, shape=[2, -1]) + expected_r2 = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + np.testing.assert_allclose(expected_r2, r2.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + s_len = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(2, s0)), + "shape": mb.placeholder(shape=(3,), dtype=types.int32), + "shape2": mb.placeholder(shape=(s_len,), dtype=types.int32), + } + + def build(x, shape, shape2): + return [ + mb.reshape(x=x, shape=[2, -1]), + mb.reshape(x=x, shape=[1, -1]), + mb.reshape(x=x, shape=[2, 1, 1, -1]), + mb.reshape(x=x, shape=shape), + mb.reshape(x=x, shape=shape2), + ] + + expected_output_types = [ + (2, s0, types.fp32), + (1, 2 * s0, types.fp32), + (2, 1, 1, s0, types.fp32), + (UNK_SYM, UNK_SYM, UNK_SYM, types.fp32), + (UNK_VARIADIC, types.fp32), + ] + expected_outputs = [ + np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), + np.array([[1, 2, 3, 4, 5, 6]], dtype=np.float32), + np.array([[[[1.0, 2.0, 3.0]]], [[[4.0, 5.0, 6.0]]]], dtype=np.float32), + np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), + np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), + ] + + input_values = { + "x": np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), + "shape": np.array([2, 1, 3], dtype=np.float32), + "shape2": np.array([2, 1, 3], dtype=np.float32), + } + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestReverse: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + val = np.array([[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.reverse(x=x), mb.reverse(x=x, axes=[0])] + + expected_output_types = [(2, 3, types.fp32), (2, 3, types.fp32)] + expected_outputs = [ + np.array([[6.0, -5.0, 4.0], [-3.0, 2.0, -1.0]], dtype=np.float32), + np.array([[4.0, -5.0, 6.0], [-1.0, 2.0, -3.0]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + val = np.array([[-1.0, 7.0, -3.0], [4.0, -5.0, 8.0]], dtype=np.float32) + res = mb.reverse(x=val, axes=[0]) + np.testing.assert_allclose(np.flip(val, axis=0), res.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + val = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=(s0, 3))} + input_values = {"x": val} + + def build(x): + return [ + mb.reverse(x=x, axes=[1]), + mb.reverse(x=x, axes=[0]), + ] + + expected_output_types = [ + (s0, 3, types.fp32), + (s0, 3, types.fp32), + ] + expected_outputs = [ + np.array([[3.0, 2.0, 1.0], [6.0, 5.0, 4.0]], dtype=np.float32), + np.array([[4.0, 5.0, 6.0], [1.0, 2.0, 3.0]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestReverseSequence: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array( + [ + [1, 2, 3, 4, 5, 0, 0, 0], + [1, 2, 0, 0, 0, 0, 0, 0], + [1, 2, 3, 4, 0, 0, 0, 0], + [1, 2, 3, 4, 5, 6, 7, 8], + ], + dtype=np.float32, + ) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.reverse_sequence( + x=x, lengths=[7, 2, 3, 5], seq_axis=1, batch_axis=0 + ), + ] + + expected_output_types = [ + (4, 8, types.fp32), + ] + expected_outputs = [ + np.array( + [ + [0, 0, 5, 4, 3, 2, 1, 0], + [2, 1, 0, 0, 0, 0, 0, 0], + [3, 2, 1, 4, 0, 0, 0, 0], + [5, 4, 3, 2, 1, 6, 7, 8], + ], + dtype=np.float32, + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + x_val = np.array( + [ + [1, 2, 3, 4, 5, 0, 0, 0], + [1, 2, 0, 0, 0, 0, 0, 0], + [1, 2, 3, 4, 0, 0, 0, 0], + [1, 2, 3, 4, 5, 6, 7, 8], + ], + dtype=np.float32, + ) + input_placeholders = {"x": mb.placeholder(shape=(4, s0))} + input_values = {"x": x_val} + + def build(x): + return [ + mb.reverse_sequence( + x=x, lengths=[7, 2, 3, 5], seq_axis=1, batch_axis=0 + ), + ] + + expected_output_types = [ + (4, s0, types.fp32), + ] + expected_outputs = [ + np.array( + [ + [0, 0, 5, 4, 3, 2, 1, 0], + [2, 1, 0, 0, 0, 0, 0, 0], + [3, 2, 1, 4, 0, 0, 0, 0], + [5, 4, 3, 2, 1, 6, 7, 8], + ], + dtype=np.float32, + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSliceBySize: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array(list(range(24))).reshape((2, 3, 4)).astype(np.float32) + begin_val = np.array([1, 1, 1], dtype=np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "begin": mb.placeholder(shape=begin_val.shape, dtype=types.int32), + } + input_values = {"x": x_val, "begin": begin_val} + + def build_non_single(x, begin): + return [ + mb.slice_by_size(x=x, begin=begin, size=[1, 2, 3]), + ] + + def build_single(x, begin): + return [ + mb.slice_by_size(x=x, begin=begin, size=[-1, 2, -1]), + ] + + expected_output_types = [(1, 2, 3, types.fp32)] + expected_outputs = [np.array([[[17, 18, 19], [21, 22, 23]]], dtype=np.float32)] + run_compare_builder( + build_non_single, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + expected_output_types = [(UNK_SYM, 2, UNK_SYM, types.fp32)] + run_compare_builder( + build_single, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array(list(range(24))).reshape(2, 3, 4) + v_1 = mb.slice_by_size(x=x, begin=(0, 1, 0), size=(-1, -1, -1)) + v_2 = mb.slice_by_size(x=x, begin=(0, 1, 0), size=(-1, -1, 3)) + v_3 = mb.slice_by_size(x=x, begin=(0, -2, 0), size=(-1, -1, 3)) + np.testing.assert_allclose(x[:, 1:, :], v_1.val, atol=1e-04, rtol=1e-05) + np.testing.assert_allclose(x[:, 1:, :3], v_2.val, atol=1e-04, rtol=1e-05) + np.testing.assert_allclose(x[:, -2:, :3], v_3.val, atol=1e-04, rtol=1e-05) + + +class TestSpaceToDepth: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (1, 1, 2, 2, fp32) + val = np.array([[[[7.0, 9.0], [4.0, 6.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.space_to_depth(x=x, block_size=2)] + + expected_output_types = (1, 4, 1, 1, types.fp32) + expected_outputs = np.array( + [[[[7.0]], [[9.0]], [[4.0]], [[6.0]]]], dtype=np.float32 + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSqueeze: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[[[1], [2], [3]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x.shape)} + + input_values = {"x": x} + + def build(x): + return [ + mb.squeeze(x=x, axes=(-1,)), + mb.squeeze(x=x, axes=(-3, 0)), + mb.squeeze(x=x, axes=(0, 1, 3)), + mb.squeeze(x=x), + ] + + expected_output_types = [ + (1, 1, 3, types.fp32), + (3, 1, types.fp32), + (3, types.fp32), + (3, types.fp32), + ] + + expected_outputs = [ + np.array([[[1, 2, 3]]], dtype=np.float32), + np.array([[1], [2], [3]], dtype=np.float32), + np.array([1, 2, 3], dtype=np.float32), + np.array([1, 2, 3], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[[[1], [2], [3]], [[4], [5], [6]]]], dtype=np.float32) + v = mb.squeeze(x=x, axes=(-4, 3)) + np.testing.assert_allclose(np.squeeze(x, axis=(-4, 3)), v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_eval_rank_0(self): + x = np.array([1], dtype=np.float32) + v = mb.squeeze(x=x) + assert v.shape == () + assert type(v.val) == np.float32 + assert np.isclose(np.squeeze(x), v.val) + + +class TestTranspose: + @pytest.mark.parametrize( + "compute_unit, backend, is_symbolic", + itertools.product(compute_units, backends, [True, False],), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, is_symbolic): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + + input_shape = x.shape + if is_symbolic: + input_shape = [get_new_symbol(), get_new_symbol()] + + input_placeholders = {"x": mb.placeholder(shape=input_shape)} + + input_values = {"x": x} + + def build(x): + return [ + mb.transpose(x=x, perm=(0, 1)), + mb.transpose(x=x, perm=(1, 0)), + mb.transpose(x=x, perm=(-1, 0)), + mb.transpose(x=x, perm=(-2, -1)), + ] + + d0 = input_shape[0] + d1 = input_shape[1] + expected_output_types = [ + (d0, d1, types.fp32), + (d1, d0, types.fp32), + (d1, d0, types.fp32), + (d0, d1, types.fp32), + ] + + expected_outputs = [x, x.T, x.T, x] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.transpose(x=x, perm=(1, 0)) + np.testing.assert_allclose(x.T, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(2, s0)), + } + + def build(x): + return [ + mb.transpose(x=x, perm=[1, 0]), + ] + + expected_output_types = [ + (s0, 2, types.fp32), + ] + expected_outputs = [ + np.array([[1, 4], [2, 5], [3, 6]], dtype=np.float32), + ] + + input_values = { + "x": np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), + } + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPixelShuffle: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (1, 4, 1, 1, fp32) + val = np.array([[[[9.0]], [[5.0]], [[1.0]], [[3.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.pixel_shuffle(x=x, upscale_factor=2)] + + expected_output_types = (1, 1, 2, 2, types.fp32) + expected_outputs = np.array([[[[9.0, 5.0], [1.0, 3.0]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, shape, upscale_factor", + itertools.product( + compute_units, + backends, + [(1, 16, 1, 1), (2, 16, 3, 3), (1, 32, 1, 1)], + [2, 4], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, shape, upscale_factor + ): + val = np.random.rand(*shape) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.pixel_shuffle(x=x, upscale_factor=upscale_factor)] + + torch_pixel_shuffle = torch.nn.PixelShuffle(upscale_factor) + expected_outputs = [torch_pixel_shuffle(torch.Tensor(val)).numpy()] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +@pytest.mark.skipif(ct.utils._macos_version() < (13, 0), reason="New functionality in macOS13/iOS16") +class TestPixelUnshuffle: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + val = np.array([[[[9.0, 5.0], [1.0, 3.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(2))] + + expected_output_types = (1, 4, 1, 1, types.fp32) + expected_outputs = np.array([[[[9.0]], [[5.0]], [[1.0]], [[3.0]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, shape, downscale_factor", + itertools.product( + compute_units, + backends, + [(1, 2, 4, 4), (2, 1, 8, 4)], + [2, 4], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, shape, downscale_factor, + ): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + val = np.random.rand(*shape) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(downscale_factor))] + + torch_pixel_unshuffle = torch.nn.PixelUnshuffle(downscale_factor) + expected_outputs = [torch_pixel_unshuffle(torch.Tensor(val)).numpy()] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + +class TestSlidingWindows: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (1, 4, 1, 1, fp32) + val = np.array([[[[9.0]], [[5.0]], [[1.0]], [[3.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.sliding_windows(x=x, axis=1, size=2)] + + expected_output_types = (1, 3, 2, 1, 1, types.fp32) + expected_outputs = np.array( + [[[[[9.0]], [[5.0]]], [[[5.0]], [[1.0]]], [[[1.0]], [[3.0]]]]], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis, size, stride", + itertools.product( + compute_units, + backends, + [(rank, axis) for rank in range(1, 5) for axis in range(-rank, rank)], + [1, 2], + [1, 2], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, rank_and_axis, size, stride + ): + def np_sliding_windows(a, np_axis, np_size, np_stride): + n = (a.shape[np_axis] - np_size) // np_stride + 1 + x_shape = list(a.shape) + x_shape[np_axis] = n + if np_axis < 0: + np_axis += len(x_shape) + x_shape.insert(np_axis + 1, np_size) + strides = list(a.strides) + eff_stride = strides[np_axis] * np_stride + strides.insert(np_axis, eff_stride) + return np.lib.stride_tricks.as_strided(a, x_shape, strides) + + rank, axis = rank_and_axis + shape = np.random.randint(low=2, high=5, size=rank) + val = np.random.rand(*shape) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.sliding_windows(x=x, axis=axis, size=size, stride=stride)] + + expected_outputs = [ + np_sliding_windows(val, np_axis=axis, np_size=size, np_stride=stride) + ] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestConcat: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends, ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t1 = np.array([[1, 2], [4, 5]], dtype=np.float32) + t2 = np.array([[7, 8]], dtype=np.float32) + + input_placeholders = { + "x": mb.placeholder(shape=t1.shape), + "y": mb.placeholder(shape=t2.shape), + } + input_values = {"x": t1, "y": t2} + + def build(x, y): + return (mb.concat(values=(x, y), axis=0),) + + expected_output_types = [ + (3, 2, types.fp32), + ] + expected_outputs = [ + np.array([[1, 2], [4, 5], [7, 8]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, n_inputs, negative_index", + itertools.product( + compute_units, + backends, + [1, 2, 3, 4, 5], + [2, 3], + [False, True], + ) + ) + def test_builder_to_backend_stress_interleave(self, compute_unit, backend, + rank, n_inputs, negative_index): + + def np_concat_interleave(arrays, axis): + step = len(arrays) + in_shape = arrays[0].shape + out_shape = list(in_shape) + if axis < 0: + axis += len(in_shape) + out_shape[axis] = step * in_shape[axis] + concat_tensor = np.empty(tuple(out_shape), dtype=np.float32) + for i in range(step): + if rank == 5: + if axis == 4: + concat_tensor[:, :, :, :, i::step] = arrays[i] + if axis == 3: + concat_tensor[:, :, :, i::step, :] = arrays[i] + if axis == 2: + concat_tensor[:, :, i::step, :, :] = arrays[i] + if axis == 1: + concat_tensor[:, i::step, :, :, :] = arrays[i] + if axis == 0: + concat_tensor[i::step, :, :, :, :] = arrays[i] + if rank == 4: + if axis == 3: + concat_tensor[:, :, :, i::step] = arrays[i] + if axis == 2: + concat_tensor[:, :, i::step, :] = arrays[i] + if axis == 1: + concat_tensor[:, i::step, :, :] = arrays[i] + if axis == 0: + concat_tensor[i::step, :, :, :] = arrays[i] + if rank == 3: + if axis == 2: + concat_tensor[:, :, i::step] = arrays[i] + if axis == 1: + concat_tensor[:, i::step, :] = arrays[i] + if axis == 0: + concat_tensor[i::step, :, :] = arrays[i] + if rank == 2: + if axis == 1: + concat_tensor[:, i::step] = arrays[i] + if axis == 0: + concat_tensor[i::step, :] = arrays[i] + if rank == 1: + concat_tensor[i::step] = arrays[i] + return concat_tensor + + input_shape = [4, 2, 3, 6, 5] + for axis in range(rank): + if negative_index: + axis = axis - rank + shape = tuple(input_shape[:rank]) + t1 = np.random.normal(size=shape).astype(np.float32) + t2 = np.random.normal(size=shape).astype(np.float32) + all_input_arrs = [t1, t2] + input_placeholders = { + "x": mb.placeholder(shape=t1.shape), + "y": mb.placeholder(shape=t2.shape), + } + input_values = {"x": t1, "y": t2} + if n_inputs == 3: + t3 = np.random.normal(size=shape).astype(np.float32) + input_placeholders["z"] = mb.placeholder(shape=t3.shape) + input_values["z"] = t3 + all_input_arrs.append(t3) + + def build_2_inputs(x, y): + return (mb.concat(values=(x, y), axis=axis, interleave=True),) + + def build_3_inputs(x, y, z): + return (mb.concat(values=(x, y, z), axis=axis, interleave=True),) + + np_out = np_concat_interleave(all_input_arrs, axis) + expected_output_types = [np_out.shape + (types.fp32,)] + expected_outputs = [np_out] + + run_compare_builder( + build_3_inputs if n_inputs == 3 else build_2_inputs, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + values = [ + np.random.rand(1, 1, 6, 2), + np.random.rand(1, 1, 3, 2), + ] + v = mb.concat(values=values, axis=2) + np.testing.assert_allclose(np.concatenate(values, 2), v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_eval_failure(self): + values = [ + np.random.rand(1, 1, 6, 2), + np.random.rand(1, 1, 3, 1), + ] + with pytest.raises(ValueError): + mb.concat(values=values, axis=2) + + +class TestSplit: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends, ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + return mb.split(x=x, num_splits=2, axis=1) + mb.split( + x=x, split_sizes=[1, 2], axis=0 + ) + + expected_output_types = [ + (3, 1, types.fp32), + (3, 1, types.fp32), + (1, 2, types.fp32), + (2, 2, types.fp32), + ] + expected_outputs = [ + np.array([[1], [3], [5]], dtype=np.float32), + np.array([[2], [4], [6]], dtype=np.float32), + np.array([[1, 2]], dtype=np.float32), + np.array([[3, 4], [5, 6]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + t = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + vs = mb.split(x=t, num_splits=3, axis=0) + es = np.split(t, [1, 2, 3], axis=0) + for v, e in zip(vs, es): + np.testing.assert_allclose(e, v.val, atol=1e-04, rtol=1e-05) + + +class TestStack: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends, ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t1 = np.array([1, 2, 3], dtype=np.float32) + t2 = np.array([7, 8, 9], dtype=np.float32) + + input_placeholders = { + "x": mb.placeholder(shape=t1.shape), + "y": mb.placeholder(shape=t2.shape), + } + input_values = {"x": t1, "y": t2} + + def build(x, y): + return [mb.stack(values=(x, y), axis=0), mb.stack(values=(x, y), axis=1), mb.stack(values=(x, y), axis=-1)] + + expected_output_types = [ + (2, 3, types.fp32), + (3, 2, types.fp32), + (3, 2, types.fp32), + ] + expected_outputs = [ + np.array([[1, 2, 3], [7, 8, 9]], dtype=np.float32), + np.array([[1, 7], [2, 8], [3, 9]], dtype=np.float32), + np.array([[1, 7], [2, 8], [3, 9]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + values = [ + np.random.rand(1, 1, 3, 2).astype(np.float32), + np.random.rand(1, 1, 3, 2).astype(np.float32), + ] + v = mb.stack(values=values, axis=2) + np.testing.assert_allclose(np.stack(values, 2), v.val, atol=1e-04, rtol=1e-05) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_utils.py new file mode 100644 index 00000000..82e22c74 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_utils.py @@ -0,0 +1,262 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil.ops.defs._utils import ( + aggregated_pad, effective_kernel, spatial_dimensions_out_shape) + + +class TestDilation: + def test_kernel_and_dilations_not_same_size(self): + np.testing.assert_raises_regex( + ValueError, + "kernel_shape.*dilations.*length", + effective_kernel, + kernel_shape=(1, 2, 3), + dilations=(1, 2), + ) + + def test_effective_kernel_dilation_1(self): + actual = effective_kernel(kernel_shape=(1, 2, 3), dilations=(1, 1, 1)) + + expected = [1, 2, 3] + np.testing.assert_equal(actual, expected) + + def test_effective_kernel_dilation_2(self): + actual = effective_kernel(kernel_shape=(1, 2, 3), dilations=(2, 2, 2)) + + expected = [1, 3, 5] + np.testing.assert_equal(actual, expected) + + def test_effective_kernel_dilation_3(self): + actual = effective_kernel(kernel_shape=(1, 2, 3), dilations=(3, 3, 3)) + + expected = [1, 4, 7] + np.testing.assert_equal(actual, expected) + + +class TestAggregatePadding: + def test_invalid_pad_type(self): + np.testing.assert_raises_regex( + ValueError, + "Invalid padding pad_type", + aggregated_pad, + pad_type="bananas", + kernel_shape=(1, 2, 3), + ) + + def test_dilations_rank_different_from_input_rank(self): + np.testing.assert_raises_regex( + ValueError, + "dilations must have same length as kernel_shape", + aggregated_pad, + pad_type="valid", # doesn't matter + kernel_shape=(1, 2, 3), + dilations=(4, 5), + ) + + def test_custom_pad(self): + actual = aggregated_pad( + pad_type="custom", kernel_shape=(1, 2, 3), custom_pad=(7, 8, 9, 10, 11, 12) + ) + + expected = [7 + 8, 9 + 10, 11 + 12] + np.testing.assert_equal(actual, expected) + + def test_custom_pad_none(self): + np.testing.assert_raises_regex( + ValueError, + "Invalid custom_pad", + aggregated_pad, + pad_type="custom", + kernel_shape=(1, 2, 3), # doesn't matter + custom_pad=None, + ) + + def test_custom_pad_invalid(self): + np.testing.assert_raises_regex( + ValueError, + "Invalid custom_pad", + aggregated_pad, + pad_type="custom", + kernel_shape=(1, 2, 3), # doesn't matter + custom_pad=(7, 8, 9, 10), # too few elements + ) + + def test_valid_pad(self): + actual = aggregated_pad(pad_type="valid", kernel_shape=(1, 2, 3),) + + expected = [0, 0, 0] + np.testing.assert_equal(actual, expected) + + def test_valid_pad_4d(self): + actual = aggregated_pad(pad_type="valid", kernel_shape=(1, 2, 3, 4),) + + expected = [0, 0, 0, 0] + np.testing.assert_equal(actual, expected) + + def test_valid_pad_2d(self): + actual = aggregated_pad(pad_type="valid", kernel_shape=(1, 2),) + + expected = [0, 0] + np.testing.assert_equal(actual, expected) + + def test_valid_pad_1d(self): + actual = aggregated_pad(pad_type="valid", kernel_shape=[4]) + + expected = [0] + np.testing.assert_equal(actual, expected) + + def test_same_padding_no_dilation(self): + actual = aggregated_pad( + pad_type="same", + input_shape=(5, 6, 7), + kernel_shape=(2, 2, 2), + strides=(1, 2, 2), + ) + + expected = [1, 0, 1] + np.testing.assert_equal(actual, expected) + + def test_same_padding_dilation_with_dilation(self): + actual = aggregated_pad( + pad_type="same", + input_shape=(19, 20, 21), + kernel_shape=(2, 2, 2), + strides=(1, 2, 2), + dilations=(5, 6, 7), + ) + + expected = [5, 5, 7] + np.testing.assert_equal(actual, expected) + + def test_same_padding_stride_same_as_input(self): + actual = aggregated_pad( + pad_type="same", input_shape=(5, 5), kernel_shape=(3, 3), strides=(5, 5), + ) + + expected = [0, 0] + np.testing.assert_equal(actual, expected) + + def test_same_padding_stride_larger_than_kernel_but_less_than_input(self): + actual = aggregated_pad( + pad_type="same", input_shape=(5, 5), kernel_shape=(3, 3), strides=(4, 4), + ) + + expected = [2, 2] + np.testing.assert_equal(actual, expected) + + def test_same_padding_none_input_shape(self): + np.testing.assert_raises_regex( + ValueError, + "input_shape.*None", + aggregated_pad, + pad_type="same", + kernel_shape=(1, 2, 3), + strides=(1, 2, 3), + ) + + def test_same_padding_input_shape_wrong_size(self): + np.testing.assert_raises_regex( + ValueError, + "input_shape.*same length", + aggregated_pad, + pad_type="same", + kernel_shape=(1, 2, 3), + input_shape=(1, 2), + strides=(1, 2, 3), + ) + + def test_same_padding_none_strides(self): + np.testing.assert_raises_regex( + ValueError, + "strides.*None", + aggregated_pad, + pad_type="same", + kernel_shape=(1, 2, 3), + input_shape=(1, 2, 3), + ) + + def test_same_padding_strides_wrong_size(self): + np.testing.assert_raises_regex( + ValueError, + "strides.*same length", + aggregated_pad, + pad_type="same", + kernel_shape=(1, 2, 3), + input_shape=(1, 2, 3), + strides=(1, 2), + ) + + +class TestOutputShape: + def test_custom_padding_shape(self): + actual = spatial_dimensions_out_shape( + pad_type="custom", + input_shape=(3, 3, 3), + kernel_shape=(2, 2, 2), + strides=(2, 2, 2), + custom_pad=(2, 0, 1, 2, 2, 3), + ) + + expected = [2, 3, 4] + np.testing.assert_equal(actual, expected) + + def test_valid_padding_shape(self): + actual = spatial_dimensions_out_shape( + pad_type="valid", input_shape=(7, 7), kernel_shape=(3, 3), strides=(1, 1) + ) + + expected = [5, 5] + np.testing.assert_equal(actual, expected) + + def test_valid_padding_shape_dilation_2(self): + actual = spatial_dimensions_out_shape( + pad_type="valid", + input_shape=(7, 7), + kernel_shape=(3, 3), + strides=(1, 1), + dilations=(2, 2), + ) + + expected = [3, 3] + np.testing.assert_equal(actual, expected) + + def test_valid_padding_shape_with_stride_2(self): + actual = spatial_dimensions_out_shape( + pad_type="valid", input_shape=(7, 7), kernel_shape=(3, 3), strides=(2, 2) + ) + + expected = [3, 3] + np.testing.assert_equal(actual, expected) + + def test_same_padding_shape(self): + actual = spatial_dimensions_out_shape( + pad_type="same", input_shape=(6, 6), kernel_shape=(2, 2), strides=(2, 2) + ) + + expected = [3, 3] + np.testing.assert_equal(actual, expected) + + def test_same_padding_shape_stride_2_input_not_multiple_of_kernel(self): + actual = spatial_dimensions_out_shape( + pad_type="same", input_shape=(5, 5), kernel_shape=(2, 2), strides=(2, 2) + ) + + expected = [3, 3] + np.testing.assert_equal(actual, expected) + + def test_same_padding_shape_dilation_2(self): + actual = spatial_dimensions_out_shape( + pad_type="same", + input_shape=(5, 5), + kernel_shape=(2, 2), + strides=(1, 1), + dilations=(2, 2), + ) + + expected = [5, 5] + np.testing.assert_equal(actual, expected) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/testing_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/testing_utils.py new file mode 100644 index 00000000..c6528b40 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/testing_utils.py @@ -0,0 +1,159 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import coremltools as ct +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Function, Program +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.testing_utils import (compare_backend, + ct_convert) + + +UNK_VARIADIC = "*s_unk" +UNK_SYM = "s_unk" + + +def run_compare_builder( + build, + input_placeholders, + input_values=None, + expected_output_types=None, + expected_outputs=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + backend=("neuralnetwork", "fp32"), + atol=1e-04, + rtol=1e-05, + inputs=None, + also_compare_shapes=False, + converter=ct.convert, + minimum_deployment_target=None, +): + """ + Inputs: + - build: python function taking input of Vars and returning Var or + list[Var]. Each input argument in build must match a key in + input_values / input_placeholders. + + - input_placeholders: str -> placeholder. It may not be an empty + dict as MLModel doesn't support function with + no input. + + - input_values: str -> np.array or PIL.Image. Keys must match those in + input_placeholders. + + - expected_output_types: list[(shape, builtin_type)] or (shape, + builtin_type). None skips type inference validation. + + - compute_unit: Enum[ct.ComputeUnit]. Compute unit for the coreml model + + - expected_outputs: list[np.array] or np.array. Required iff + frontend_only == False + + - frontend_only: True to test up to proto generation. + + - inputs: type of inputs (either None (defaults to tensor) or [ct.ImageType]) + + - converter: function + Reference to convert function to be used. + Default: ct.convert + + - minimum_deployment_target : coremltools.target enumeration (optional) + A member of the ``coremltools.target`` enum. + + Returns: + The converted mlmodel + """ + if not isinstance(expected_output_types, list): + expected_output_types = [expected_output_types] + + if expected_outputs is not None and not isinstance(expected_outputs, list): + expected_outputs = [expected_outputs] + + prog = Program() + with Function(input_placeholders, opset_version=minimum_deployment_target) as ssa_func: + output_vars = build(**ssa_func.inputs) + if isinstance(output_vars, tuple): + output_vars = list(output_vars) + elif not isinstance(output_vars, list): + output_vars = [output_vars] + ssa_func.set_outputs(output_vars) + prog.add_function("main", ssa_func) + + # get output names for output_vars + output_names = [x.name for x in output_vars] + + # Validate type inference + msg = ( + "Provided expected outputs types {} should match number of output" + + " variables {}" + ) + assert_msg = msg.format(len(expected_output_types), len(output_vars)) + assert len(output_vars) == len(expected_output_types), assert_msg + + for out_var, s in zip(output_vars, expected_output_types): + if out_var.dtype != s[-1]: + raise ValueError( + "Output {} type: expect {}, got {}. Program:\n{}".format( + out_var.name, s[-1].__type_info__(), + out_var.dtype.__type_info__(), prog + ) + ) + if UNK_VARIADIC in s[:-1]: + msg = "Skip type checking for UNK_VARIADIC. Output shape: {} vs expected shape: {}" + logger.debug(msg.format(out_var.shape, s[:-1])) + continue + expected_shape = s[:-1] + msg = "Output {} shape: expect {}, got {}. Program:\n{}".format( + out_var.name, expected_shape, out_var.shape, prog + ) + # No more variadic here. + if len(out_var.shape) != len(expected_shape): + raise ValueError(msg) + # replace UNK_SYM in out_var.shape. + output_shape = [ + 0 if es == UNK_SYM else os for os, es in zip(out_var.shape, expected_shape) + ] + expected_shape = [0 if es == UNK_SYM else es for es in expected_shape] + # convert float etc to int. + output_shape = [i if is_symbolic(i) else int(i) for i in output_shape] + expected_shape = [i if is_symbolic(i) else int(i) for i in expected_shape] + if output_shape != expected_shape: + raise ValueError(msg) + + mlmodel = ct_convert(prog, + converter=converter, + source="milinternal", + convert_to=backend, + inputs=inputs, + compute_units=compute_unit, + minimum_deployment_target=minimum_deployment_target + ) + + if frontend_only: + return mlmodel + + if expected_outputs: + assert len(output_vars) == len(expected_outputs), ( + "Provided expected_outputs {}" + " should match number of output" + " variables {}".format(len(expected_outputs), len(output_vars)) + ) + + expected_outputs = { + name: val for name, val in zip(output_names, expected_outputs) + } + + compare_backend( + mlmodel=mlmodel, + input_key_values=input_values, + expected_outputs=expected_outputs, + atol=atol, + rtol=rtol, + also_compare_shapes=also_compare_shapes, + dtype=backend[1] + ) + + return mlmodel diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/__init__.py new file mode 100644 index 00000000..ec624d2d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/__init__.py @@ -0,0 +1,43 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +# Import all frontend/backend passes to make sure they got registered. +from coremltools.converters.mil.backend.mil.passes import ( + adjust_io_to_supported_types, + fuse_activation_silu, + insert_image_preprocessing_op, + sanitize_name_strings, +) +from coremltools.converters.mil.backend.nn.passes import ( + alert_return_type_cast, + commingle_loop_vars, + conv1d_decomposition, + handle_return_inputs_as_outputs, + handle_return_unused_inputs, + handle_unused_inputs, + mlmodel_passes, +) +from coremltools.converters.mil.frontend.tensorflow2.ssa_passes import remove_vacuous_cond +from coremltools.converters.mil.frontend.tensorflow.ssa_passes import ( + backfill_make_list_elem_type, + expand_tf_lstm, + tf_lstm_to_core_lstm, +) +from coremltools.converters.mil.frontend.torch.ssa_passes import ( + torch_tensor_assign_to_core, + torch_upsample_to_core_upsample, +) +from coremltools.converters.mil.mil.passes.defs import ( + cleanup, + lower_complex_dialect_ops, + optimize_activation, + optimize_conv, + optimize_elementwise_binary, + optimize_linear, + optimize_normalization, + optimize_repeat_ops, + optimize_tensor_operation, + preprocess, +) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/__init__.py new file mode 100644 index 00000000..25c7d28c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/__init__.py new file mode 100644 index 00000000..5c534eb7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .const_elimination import const_elimination +from .dead_code_elimination import dead_code_elimination +from .dedup_op_and_var_names import dedup_op_and_var_names +from .fuse_reduce_mean import fuse_reduce_mean +from .loop_invariant_elimination import loop_invariant_elimination +from .noop_elimination import noop_elimination +from .remove_redundant_ops import remove_redundant_ops +from .remove_symbolic_reshape import remove_symbolic_reshape +from .topological_reorder import topological_reorder diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/const_elimination.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/const_elimination.py new file mode 100644 index 00000000..41db68e7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/const_elimination.py @@ -0,0 +1,103 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Program +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class const_elimination(AbstractGraphPass): + """ + Replace non-``const`` ops that have ``const`` Var. Outputs are replaced with the ``const`` op. Example: + + .. code-block:: + + Given: + %2, %3 = non_const_op(...) # %2 is const, %3 isn't const + %4 = other_op(%2, %3) + + Result: + _, %3 = non_const_op(...) # _ is the ignored output + %2_const = const() # %2_const name is for illustration only + %4 = other_op(%2_const, %3) + + Support options: + - skip_const_by_size: Skip folding consts that have larger number of elements than a threshold. + """ + + _skip_const_by_size = None + + @property + def skip_const_by_size(self): + return self._skip_const_by_size + + @skip_const_by_size.setter + def skip_const_by_size(self, threshold: str): + try: + # Convert to float instead of int to support more flexible input such as `1e6`. + threshold = float(threshold) + except Exception as e: + raise ValueError( + f"Expected to get float threshold, but got `{threshold}` which cannot " + f"be converted to float. {e}" + ) + self._skip_const_by_size = float(threshold) + + def apply(self, prog: Program): + for f in prog.functions.values(): + self._const_elimination_block(f) + + @block_context_manager + def _const_elimination_block(self, block): + # shallow copy hides changes on f.operations during the loop + for op in list(block.operations): + if op.op_type == "const": + continue + + for b in op.blocks: + self._const_elimination_block(b) + + all_outputs_are_replaced = True + for output in op.outputs: + if output.can_be_folded_to_const(): + if ( + self._skip_const_by_size is not None + and len(output.shape) > 0 + and output.val.size > self._skip_const_by_size + ): + logger.warning( + f"The output ({output}) of op {op} is skipped in const elimination pass " + f"because its val size ({output.val.size}) is larger than threshold " + f"({self._skip_const_by_size})." + ) + all_outputs_are_replaced = False + break + + res = mb.const( + val=output.val, + before_op=op, + # same var name, but different python + # instance does not violate SSA property. + name=output.name, + ) + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=op, + old_var=output, + new_var=res, + ): + # rename the const output + output.set_name(output.name + "_ignored") + else: + all_outputs_are_replaced = False + else: + all_outputs_are_replaced = False + + if all_outputs_are_replaced: + op.remove_from_block() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dead_code_elimination.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dead_code_elimination.py new file mode 100644 index 00000000..bbe6578e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dead_code_elimination.py @@ -0,0 +1,79 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Program +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class dead_code_elimination(AbstractGraphPass): + """ + Eliminate unused ops in program. Ops whose outputs do not contribute to final outputs will be + deleted. + + .. code-block:: + + # Before dead_code_elimination pass. + main(%x: (2, 4, fp32)) { + block0() { + %const_2: (4, 2, fp32)* = const(val=[...]) + %const_3: (4, fp32)* = const(val=[...]) + %tx_0: (bool)* = const(val=False) + %ty_0: (bool)* = const(val=False) + %matmul_0: (2, 2, fp32) = matmul(x=%x, y=%const_2, transpose_x=%tx_0, transpose_y=%ty_0) + %linear_0: (2, 4, fp32) = linear(x=%x, weight=%const_2, bias=%const_3) + } -> (%linear_0) + } + + # After dead_code_elimination pass. + main(%x: (2, 4, fp32)) { + block0() { + %const_2: (4, 2, fp32)* = const(val=[...]) + %const_3: (4, fp32)* = const(val=[...]) + %linear_0: (2, 4, fp32) = linear(x=%x, weight=%const_2, bias=%const_3) + } -> (%linear_0) + } + + In the example above, ``%matmul_0`` is an op that is not used in the computation. This op and + its input ops (``%tx_0`` and ``%ty_0``) are eliminated in this pass. + """ + + def apply(self, prog: Program): + for f in prog.functions.values(): + self._dead_code_elimination_block(f) + + @staticmethod + def _dead_code_elimination_block(block): + used_vars = set() + ops_to_remove = list() + + # mark block's outputs to used + used_vars.update(block.outputs) + + for op in reversed(block.operations): + # if none of op's output is used, delete op + if not set(op.outputs).intersection(used_vars): + ops_to_remove.append(op) + continue + + # mark all op's inputs to used + for _, input_var in op.inputs.items(): + if isinstance(input_var, (tuple, list)): + used_vars.update(list(input_var)) + else: + used_vars.update([input_var]) + + for b in op.blocks: + used_in_block = dead_code_elimination._dead_code_elimination_block(b) + used_vars.update(used_in_block) + + for op in ops_to_remove: + logger.info('Removing op "{}" (type: {})'.format(op.name, op.op_type)) + op.remove_from_block() + + return used_vars diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dedup_op_and_var_names.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dedup_op_and_var_names.py new file mode 100644 index 00000000..f2067552 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dedup_op_and_var_names.py @@ -0,0 +1,94 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import collections +import itertools + +from coremltools.converters.mil.mil import Function +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class dedup_op_and_var_names(AbstractGraphPass): + """ + For each function, this pass renames ops and variables with the same name + as any preceding ops/variables across all scopes in the given function, + where the precedence is implementation-specific. Note that an op name and + variable names are tracked separately, so an op may have the same name as + a variable. + + The pass preserves input and output name. Raises ValueError if we cannot + dedup without changing the input/output var names. + + .. code-block:: + + def prog(x): + x = mb.cast(x=x, dtype="fp16", name="castop") + x = mb.cast(x=x, dtype="fp32", name="castop") + x = mb.square(x=x, name="square_last") + return x + + # Before dedup pass, the op names are ["castop", "castop", "square_last"]. + # After dedup pass, the op names are ["castop", "castop_1", "square_last"]. + """ + + def apply(self, prog): + for func in prog.functions.values(): + # Handle function input/outputs as they cannot be changed (to maintain user interface) + inputs = list(func.function_inputs) + io_vars = set(inputs + func.outputs) + self._ensure_unique_var_names(io_vars) + seen_var_names = set([v.name for v in io_vars]) + seen_op_names = set() + self._deduplicate_block(func, set(func.outputs), seen_var_names, seen_op_names) + + @staticmethod + def _gen_new_name(seen_names, curr_name): + if curr_name not in seen_names: + return curr_name + # make sure the name is unique + for i in itertools.count(start=1): # loop from 1 to infinity + # rename duplicated name start from 1: 'xxx_1' + new_name = curr_name + "_" + str(i) + if new_name not in seen_names: + return new_name + + def _deduplicate_block(self, block, func_outputs, seen_var_names, seen_op_names): + """ + seen_var_names: set[str] + seen_op_names: set[str] + """ + # Add block input (function input is handled separately) + if not isinstance(block, Function): + for v in block.inputs: + v.name = self._gen_new_name(seen_var_names, v.name) + seen_var_names.add(v.name) + + for op in list(block.operations): + for b in op.blocks: + self._deduplicate_block(b, func_outputs, seen_var_names, seen_op_names) + if op.name is not None: + op.name = self._gen_new_name(seen_op_names, op.name) + seen_op_names.add(op.name) + for v in op.outputs: + if v in func_outputs: + # func output is never renamed + continue + v.name = self._gen_new_name(seen_var_names, v.name) + seen_var_names.add(v.name) + + @staticmethod + def _ensure_unique_var_names(v_set): + """ + v_set: set[Variable] + + All variables in v_set should have different names. Raise ValueError + otherwise + """ + names = [v.name for v in v_set] + dup_names = [name for name, count in collections.Counter(names).items() if count > 1] + if len(dup_names) > 0: + raise ValueError(f"Var names {dup_names} is used both as function's input and output") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/fuse_reduce_mean.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/fuse_reduce_mean.py new file mode 100644 index 00000000..815c6076 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/fuse_reduce_mean.py @@ -0,0 +1,123 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import ( + _check_child_op_type, + _check_var_scalar_value, + block_context_manager, +) +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + + +@register_pass(namespace="common") +class fuse_reduce_mean(AbstractGraphPass): + """ + Detect the "``reduce_sum``--->``mul/real_div``" pattern than can be mapped to ``reduce_mean``. + That is, the operation ``reduce_sum/count == reduce_mean``. + + .. code-block:: + + Input graph: + + const (scalar) + | + input ----> reduce_sum ----> mul/real_div -----------> output + + Output graph: + + input --------> reduce_mean ---------> output + + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_reduce_mean_block(f) + + @staticmethod + def _try_to_transform(reduce_sum_op, block): + + ops_to_remove = [] + + # check that the dimensions in the shape of the input to the reduce_sum op, + # over which the reduction operation is being performed, are known + input_shape = reduce_sum_op.x.shape + if input_shape is None: + return False + axes = None + if reduce_sum_op.axes is not None: + axes = reduce_sum_op.axes.val + if axes is None: + return False + count = 1 + for dim in axes: + if is_symbolic(input_shape[dim]): + return False + count *= input_shape[dim] + + # check that output of reduce_sum is not a block output + if reduce_sum_op.outputs[0] in block.outputs: + return False + ops_to_remove.append(reduce_sum_op) + + # check that reduce_sum op is followed by either: + # - mul op with scalar value 1/count + # or + # - real_div op with scalar value count + if _check_child_op_type(reduce_sum_op, "mul"): + child_op = list(reduce_sum_op.outputs[0].child_ops)[0] + other_input = child_op.x if child_op.y == reduce_sum_op.outputs[0] else child_op.y + if not _check_var_scalar_value(other_input, 1.0 / count, 1e-6): + return False + elif _check_child_op_type(reduce_sum_op, "real_div"): + child_op = list(reduce_sum_op.outputs[0].child_ops)[0] + if child_op.x != reduce_sum_op.outputs[0]: + return False + other_input = child_op.y + if not _check_var_scalar_value(other_input, count, 1e-2): + return False + else: + return False + + ops_to_remove.append(child_op) + + # remove all the ops, and replace with a reduce_mean op + out_name = child_op.outputs[0].name + x = mb.reduce_mean( + x=reduce_sum_op.x, + axes=reduce_sum_op.axes.val, + keep_dims=reduce_sum_op.keep_dims.val, + name=out_name, + before_op=child_op, + ) + child_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=child_op, old_var=child_op.outputs[0], new_var=x + ) + block.remove_ops(ops_to_remove) + return True + + @block_context_manager + def _fuse_reduce_mean_block(self, block): + fusion_status = False + for i, op in enumerate(list(block.operations)): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_reduce_mean_block(b) + if len(op.blocks) > 0: + continue + + # start pattern match if mul op is encountered + if op.op_type == "reduce_sum": + fusion_status = self._try_to_transform(op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/loop_invariant_elimination.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/loop_invariant_elimination.py new file mode 100644 index 00000000..774c6b20 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/loop_invariant_elimination.py @@ -0,0 +1,169 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class loop_invariant_elimination(AbstractGraphPass): + """ + When a block does not modify a block input var, eliminate that block + input var and use the corresponding var in the outer scope. Example: + + .. code-block:: + + # Before loop_invariant_elimination pass. + # Notice that ``%b.x`` is constant through while loop iterates. + main(%a: (1, 2, fp32), + %b: (1, 2, fp32)) { + block0() { + %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \ + while_loop(loop_vars=(%a, %b)) + loop_cond(%a.x, %b.x) { + %cond_var: (bool) = some_op(x=%a.x, y=%b.x) + } -> (%cond_var) + loop_body(%a.x, %b.x) { + %add_0: (1, 2, fp32) = add(x=%a.x, y=%b.x) + } -> (%add_0, %b.x) + } -> (%loop:0, %loop:1) + } + + # After loop_invariant_elimination pass. + main(%a: (1, 2, fp32), + %b: (1, 2, fp32)) { + block0() { + %loop:1: (1, 2, fp32) = identity(x=%b) + %loop:0: (1, 2, fp32) = \ + while_loop(loop_vars=(%a)) + loop_cond(%a.x) { + %cond_var: (bool) = some_op(x=%a.x, y=%b) + } -> (%cond_var) + loop_body(%a.x) { + %add_0: (1, 2, fp32) = add(x=%a.x, y=%b) + } -> (%add_0) + } -> (%loop:0, %loop:1) + } + + where we eliminate loop invariant ``%b.x`` from ``while_loop``, which returns 1 + instead of 2 outputs. We also preserve the return var names with identity. + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._loop_invariant_elimination_block(f) + + @staticmethod + def _detect_loop_invariants(while_op): + block = while_op.blocks[1] # body block + loop_invariant_ids = [] # list of index in op.loop_vars, block.inputs + for i, vx_in in enumerate(block.inputs): + vx_out = block.outputs[i] # first output is cond var. + return_input_as_output = vx_in == vx_out + # this block output is a var from outside of the block + + enclosing_block = while_op.enclosing_block + while_op_id = enclosing_block.find_op_id_in_block(while_op) + output_from_outside_of_block = ( + True + if enclosing_block.is_var_visible_in_block(vx_out, upto_op_with_id=while_op_id) + else False + ) + if return_input_as_output or output_from_outside_of_block: + loop_invariant_ids.append(i) + + # TODO: All outputs that depend on only invariants are invariant. We + # need to move computation out of while loop. + return loop_invariant_ids + + @block_context_manager + def _loop_invariant_elimination_block(self, block): + # Phase 1: Find vars needed to be renamed. + # + # while_loop outputs need to be renamed if the output will be eliminated + # (due to loop invariant) and is returned as block output (which would + # change the return var name and the program interface). + # + # list[(v_src, v_tgt, before_op)]: will rename v_src to v_tgt before + # before_op (a while_loop) + output_rename = [] + for op in list(block.operations): + for b in op.blocks: + self._loop_invariant_elimination_block(b) + + if op.op_type != "while_loop": + continue + + loop_invariant_ids = self._detect_loop_invariants(op) + for i in loop_invariant_ids: + output_rename.append((op.loop_vars[i], op.outputs[i], op)) + if len(loop_invariant_ids) > 0: + # Avoid the following case: + # %a, %b = while_loop(..., name="b") + # becomes + # %b = identity(..., name="b") + # %a = while_loop(..., name="b") + # (two ops with the same name -> name collision) + op.name = op.name + "_renamed" + + # Phase 2: insert rename ops. This changes block.operations + for v_src, v_tgt, op in output_rename: + if v_tgt in block.outputs: + # rename the loop output to existing block output names + res = mb.identity(x=v_src, before_op=op, name=v_tgt.name) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=v_tgt, new_var=res + ) + + # Phase 3: Perform loop invariant elimination without fear! + for op in list(block.operations): + if op.op_type != "while_loop": + continue + loop_invariant_ids = self._detect_loop_invariants(op) + + # replace uses of loop_invariants with its source from outside of the + # while_loop op. + for i in loop_invariant_ids: + for block in op.blocks: + block.replace_uses_of_var_after_op( + anchor_op=None, old_var=block.inputs[i], new_var=op.loop_vars[i] + ) + + # replace block inputs + for block in op.blocks: + block.remove_inputs([block.inputs[i] for i in loop_invariant_ids]) + + # remove invariants from while_loop loop_vars + for i in loop_invariant_ids: + # replace usage of while_loop outputs that we'll eliminate. + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[i], new_var=op.loop_vars[i] + ) + + # Remove after replacing to ensure program is valid + for i in loop_invariant_ids: + op.loop_vars[i].remove_child_op(op) + + op.loop_vars = tuple( + v for i, v in enumerate(op.loop_vars) if i not in loop_invariant_ids + ) + op._input_vars["loop_vars"] = op.loop_vars + + # remove invariants from while_loop body_block outputs + body_block = op.blocks[1] + body_block.set_outputs( + [v for i, v in enumerate(body_block.outputs) if i not in loop_invariant_ids] + ) + + # op._output_vars doesn't include cond var + op._output_vars = [ + v for i, v in enumerate(op._output_vars) if i not in loop_invariant_ids + ] + + # check healthy state + op.enclosing_block.validate() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/noop_elimination.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/noop_elimination.py new file mode 100644 index 00000000..0e9aac55 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/noop_elimination.py @@ -0,0 +1,243 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +import numpy as np + +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class noop_elimination(AbstractGraphPass): + """ + Remove ops that have no effect. + + .. code-block:: + + Given: + %1 (1, 96, 128, 64, fp32) = ... + %2 (1, 96, 128, 64, fp32) = reshape(%1) + ... + %3 (1, 96, 128, 64, fp32) = add(%2, constant) + ... + + Result: + %1 (1, 96, 128, 64, fp32) = ... + %3 (1, 96, 128, 64, fp32) = add(%1, constant) + ... + """ + + _SUPPORTED_OPS = { + "add", + "mul", + "floor_div", + "pow", + "real_div", + "sub", + "reshape", + "split", + "slice_by_index", + "slice_by_size", + "pad", + "tile", + "transpose", + "upsample_nearest_neighbor", + "upsample_bilinear", + "resize_bilinear", + "crop", + "linear_activation", + } + + def apply(self, prog): + for f in prog.functions.values(): + self._noop_elimination_block_wrapper(f) + + @staticmethod + def _match_pattern(op, block): + def _remove_elementwise_binary(op, x, y): + # We remove the ops that has op.x == x or op.y == y + def has_all_elements_equal_to(var, value): + if value is None: + return False + + if var.val is not None: + return np.all(var.val == value) + elif var.op is not None and var.op.op_type == "fill": + fill_value = var.op.value.val + return fill_value is not None and (fill_value == value) + else: + return False + + if has_all_elements_equal_to(op.x, x): + input_var = op.y + input_op = input_var.op + elif has_all_elements_equal_to(op.y, y): + input_var = op.x + input_op = input_var.op + else: + return False + + input_shape = input_var.sym_type + output_shape = op.outputs[0].sym_type + + # We might be using elementwise as broadcasting + if input_shape != output_shape: + return False + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=input_op, + old_var=op.outputs[0], + new_var=input_var, + ): + op.enclosing_block.remove_ops([op]) + return True + return False + + def remove_elementwise(op, block): + if op.op_type in {"add"}: + return _remove_elementwise_binary(op, 0, 0) + elif op.op_type in {"mul"}: + return _remove_elementwise_binary(op, 1, 1) + elif op.op_type in {"floor_div", "pow", "real_div"}: + return _remove_elementwise_binary(op, None, 1) + elif op.op_type in {"sub"}: + return _remove_elementwise_binary(op, None, 0) + else: + return False + + def remove_slice_by_index(op, block): + input_shape = op.x.sym_type + output_shape = op.outputs[0].sym_type + + if input_shape != output_shape: + return False + + if op.stride is not None and op.stride.val is not None: + stride = op.stride.val.flatten().tolist() + if any([x < 0 for x in stride]): + return False + + input_var = op.x + input_op = input_var.op + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=input_op, + old_var=op.outputs[0], + new_var=input_var, + ): + op.enclosing_block.remove_ops([op]) + return True + return False + + def remove_same_shape(op, block): + input_shape = op.x.sym_type + output_shape = op.outputs[0].sym_type + + if input_shape != output_shape: + return False + + input_var = op.x + input_op = input_var.op + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=input_op, + old_var=op.outputs[0], + new_var=input_var, + ): + op.enclosing_block.remove_ops([op]) + return True + return False + + def remove_linear(op, block): + if op.alpha.val != 1 or op.beta.val != 0: + return False + + input_var = op.x + input_op = input_var.op + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=input_op, + old_var=op.outputs[0], + new_var=input_var, + ): + op.enclosing_block.remove_ops([op]) + return True + return False + + def remove_transpose(op, block): + perm = np.array([p if p >= 0 else p + len(op.perm.val) for p in op.perm.val]) + sorted_perm = np.sort(perm) + if (perm != sorted_perm).any(): + return False + + input_var = op.x + input_op = input_var.op + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=input_op, + old_var=op.outputs[0], + new_var=input_var, + ): + op.enclosing_block.remove_ops([op]) + return True + return False + + op_to_removal_fn = { + "add": remove_elementwise, + "mul": remove_elementwise, + "floor_div": remove_elementwise, + "pow": remove_elementwise, + "real_div": remove_elementwise, + "sub": remove_elementwise, + "reshape": remove_same_shape, + "split": remove_same_shape, + "slice_by_index": remove_slice_by_index, + "slice_by_size": remove_same_shape, + "pad": remove_same_shape, + "tile": remove_same_shape, + "transpose": remove_transpose, + "upsample_nearest_neighbor": remove_same_shape, + "upsample_bilinear": remove_same_shape, + "resize_bilinear": remove_same_shape, + "crop": remove_same_shape, + "linear_activation": remove_linear, + } + + # abort if op output is a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + + if op.op_type in noop_elimination._SUPPORTED_OPS: + + if len(op.outputs) != 1: + return None + return op_to_removal_fn[op.op_type] + + return None + + @block_context_manager + def _noop_elimination_block_wrapper(self, block): + def _noop_elimination_block(block): + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = _noop_elimination_block(b) + if len(op.blocks) > 0: + continue + + remove_fn = noop_elimination._match_pattern(op, block) + if remove_fn is not None: + status = remove_fn(op, block) + # has to break as the downstream iterator is affected. + if status: + return status + return False + + block_changed = True + while block_changed: + block_changed = _noop_elimination_block(block) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_redundant_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_redundant_ops.py new file mode 100644 index 00000000..2c0905e0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_redundant_ops.py @@ -0,0 +1,196 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import collections + +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import _are_ops_identical, block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class remove_redundant_ops(AbstractGraphPass): + """ + If there are multiple ops with "identical" inputs, then they are redundant and all but one of them can be removed. + This pass checks and removes such ops. + + Since all inputs to ops in MIL are named, two ops with same ``op_types`` can be compared by comparing their + correspondingly named inputs. Inputs are treated as identical if one of the following is true: + + - The input is a constant var, in which case its value should have the same dtype and numerical value. + - The input is a non constant var, in which case it should be the same var object. + + This pass iterates over the ops, takes its first output var, and then builds a candidate op list from the child + ops of this var. + This candidate ops list contains ops of the same ``op_type``, arranged in topological order. + From each of these candidate ops in the list, the second, third, and subsequent ops are pairwise compared with the first op, + and if identical to it, they are removed. For example: + + .. code-block:: + + Input: + %0 = op0(...) + %1 = op1(...) + %2 = const(val=4.5) + %3 = const(val=4.5) + %4 = op2(%1, %0, %2) + %5 = op3(%1, %0, %3) + + Output: + %0 = op0(...) + %1 = op1(...) + %2 = const(val=4.5) + %3 = const(val=4.5) # this will get removed later by dead code elimination pass + %4 = op2(%1, %0, %2) + + In the example above, ``op3`` is removed and all uses of ``%5`` is replaced by ``%4``. + For more examples, see "TestRemoveRedundantOpsPass". + """ + + _NON_REDUNDANT_OPS = tuple() + + def apply(self, prog): + for f in prog.functions.values(): + self._remove_redundant_ops_in_block_wrapper(f) + + @staticmethod + def _is_op_eligible_to_be_removed(op): + if ( + len(op.blocks) != 0 + or op.op_type.startswith("random") + or op.op_type in remove_redundant_ops._NON_REDUNDANT_OPS + ): + return False + else: + return True + + @staticmethod + def _get_candidate_ops_list(prospective_ops_list): + od = collections.OrderedDict() + enclosing_block = [op.enclosing_block for op in prospective_ops_list] + if len(set(enclosing_block)) > 1: # all candidate ops must belong to the same block + return [] + for op in prospective_ops_list: + if remove_redundant_ops._is_op_eligible_to_be_removed(op): + od[op] = enclosing_block[0].operations.index(op) + # Sort the ops according to their index of appearing in block.operations, which is + # topologically sorted + return [x[0] for x in sorted(od.items(), key=lambda t: t[1])] + + @staticmethod + def _get_candidate_ops_lists_from_var(var): + """ + Return a list of lists. + Each element is a list of a subset of the child ops of var, which satisifies the following conditions: + - they are of the same op_type + - ops are not repeated in it. The .child_ops property of a var may sometimes contain an op repeated more than once + - the ops are ordered based on the order in which they appear in the block.operations list (which is topologically sorted), + with ops appearing earlier in that list appearing first here. + """ + candidate_ops_lists = [] + + op_types_to_ops = collections.OrderedDict() + for op in var.child_ops: + if op.op_type in op_types_to_ops: + op_types_to_ops[op.op_type].append(op) + else: + op_types_to_ops[op.op_type] = [op] + + for v in op_types_to_ops.values(): + if len(v) > 1: + candidate_ops_list = remove_redundant_ops._get_candidate_ops_list(v) + if len(candidate_ops_list) > 1: + candidate_ops_lists.append(candidate_ops_list) + + return candidate_ops_lists + + @staticmethod + def _try_to_remove_ops(candidate_ops_list): + # candidate_ops_list contains ops in topological order. + # All the ops in candidate_ops_list will be compared to the first op, and removed if identical to it. + # Removing ops later in the topological order is much easier, as their output vars + # can simply be replaced by the output var of the first_op, this doesn't require + # changing any op order in the block. + if len(candidate_ops_list) < 2: + return False + first_op = candidate_ops_list[0] + block = first_op.enclosing_block + + # currently, we only consider the cases when the op has 1 output. + # The replace var logic below only handles the single output case. + if len(first_op.outputs) > 1: + return False + + ops_to_remove = [] + for op in candidate_ops_list[1:]: + if op.outputs[0] not in block.outputs: # to make sure we don't remove an output op + if _are_ops_identical(first_op, op): + ops_to_remove.append(op) + + if len(ops_to_remove) == 0: + return False + + # remove uses of output vars of the ops to be removed. + # This can be safely done, since all the ops in ops_to_remove + # appear after first_op, hence first_op.outputs[0] variable is in + # scope before the op's output var + for op in ops_to_remove: + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=first_op.outputs[0] + ) + block.remove_ops(ops_to_remove) + return True + + @staticmethod + def _try_to_transform(parent_var): + """ + scan the children ops to parent_var, to find and remove indentical ops, if any. + Returns True, if succesful in finding such redundant ops. + """ + candidate_ops_lists = remove_redundant_ops._get_candidate_ops_lists_from_var(parent_var) + block_changed = False + for ops_list in candidate_ops_lists: + if remove_redundant_ops._try_to_remove_ops(ops_list): + block_changed = True + return block_changed + + @block_context_manager + def _remove_redundant_ops_in_block_wrapper(self, block): + def _remove_redundant_ops_in_block(block): + if isinstance(block.inputs, dict): + block_input_var_list = list(block.inputs.values()) + elif isinstance(block.inputs, (list, tuple)): + block_input_var_list = block.inputs + else: + raise ValueError("Unrecognized type of block.inputs, its neither a list nor dict.") + + # iterate over the block inputs + for input_var in block_input_var_list: + if len(input_var.child_ops) > 1: + self._try_to_transform(input_var) + + # iterate over the ops in the block + graph_updated = False + for op in block.operations: + if op.op_type == "const": + continue + + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = _remove_redundant_ops_in_block(b) + + if len(op.outputs) > 0 and len(op.outputs[0].child_ops) > 1: + # currently, we only check the first output of the op + # this can be extended, if required, to check for other outputs. + graph_updated = self._try_to_transform(op.outputs[0]) + # has to break as the downstream iterator is affected. + if graph_updated: + return graph_updated + return graph_updated + + block_changed = True + while block_changed: + block_changed = _remove_redundant_ops_in_block(block) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_symbolic_reshape.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_symbolic_reshape.py new file mode 100644 index 00000000..60db0130 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_symbolic_reshape.py @@ -0,0 +1,95 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Program +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import any_variadic, is_symbolic, num_symbolic + + +@register_pass(namespace="common") +class remove_symbolic_reshape(AbstractGraphPass): + """ + Convert symbolic shape in ``reshape`` to integers. + + Note: This does not perform any optimization, but simply + replaces symbols with positive integers if solved from volumetric + constraint, or -1. Therefore, this pass fails if more than one symbol + needs to be resolved to -1. + + .. code-block:: + + # Before remove_symbolic_reshape pass. + main(%x: (s0, 4, fp32)) { + block0() { + %reshape_0_shape_0: (3,i32)^ = const(val=(s0, s1, 2)) + %reshape_0: (s0, 2, 2, fp32) = reshape(x=%x, shape=%reshape_0_shape_0) + } -> (%reshape_0) + } + + # After remove_symbolic_reshape pass. + main(%x: (s0, 4, fp32)) { + block0() { + %reshape_0_shape_0x: (3,i32)* = const(val=[-1, 2, 2]) + %reshape_0: (-1, 2, 2, fp32) = reshape(x=%x, shape=%reshape_0_shape_0x) + } -> (%reshape_0) + } + + TODO (rdar://59165842): Use expand_dims, squeeze etc to use 0 instead of dynamic reshape with -1. + """ + + def apply(self, prog: Program): + for f in prog.functions.values(): + num_changes = self._remove_symbolic_reshape_block(f) + msg = "remove_symbolic_reshape: changed {} reshapes." + logger.info(msg.format(num_changes)) + + @block_context_manager + def _remove_symbolic_reshape_block(self, block): + num_changes = 0 + for op in list(block.operations): + for b in op.blocks: + num_changes += self._remove_symbolic_reshape_block(b) + if op.op_type != "reshape": + continue + if op.shape.val is not None: + # shape does not contain symbol. + continue + if op.shape.sym_val is None: + # shape is runtime determined. + continue + if len(op.shape.child_ops) > 1: + continue + # Use output shape as `shape` + shape = op.outputs[0].shape + if any_variadic(shape): + msg = ( + "Cannot reshape to variadic from a compile time " + + "shape argument. Variadic shape can only be achieved " + + "via runtime shape argument. op: {}" + ) + raise ValueError(msg.format(op)) + num_symbols = num_symbolic(shape) + if num_symbols > 1: + continue + # Convert the one symbol to -1 + integer_shape = [-1 if is_symbolic(i) else i for i in shape] + shape_const = mb.const( + val=integer_shape, + name=op.shape.name + "x", + before_op=op, + ) + reshaped = mb.reshape(x=op.x, shape=shape_const, name=op.name, before_op=op) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=reshaped + ) + # Remove all the ops at once + block.remove_ops([op, op.shape.op]) + num_changes += 1 + return num_changes diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/topological_reorder.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/topological_reorder.py new file mode 100644 index 00000000..afbc88ee --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/topological_reorder.py @@ -0,0 +1,169 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class topological_reorder(AbstractGraphPass): + """ + Topologically re-orders the list of operations in a program by places each operation closer to its + first use, or at the end if it's not consumed by any other operation. + + Currently, This pass re-orders only Transpose and Cast operations. + + .. code-block:: + + # Example: input program + main(x: (2, 4, fp32)) { + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x1_t = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.cast(x=x1_t, dtype="fp32") + x3 = mb.log(x=x) + x3_t = mb.transpose(x=x3, perm=[1, 0]) + x4 = mb.cast(x=x3_t, dtype="fp32") + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + x8 = mb.relu(x=x) + } -> x2, x4, x7, x8 + + # After moving `cast` ops becomes + main(x: (2, 4, fp32)) { + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x1_t = mb.transpose(x=x1, perm=[1, 0]) + x3 = mb.log(x=x) + x3_t = mb.transpose(x=x3, perm=[1, 0]) + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + x8 = mb.relu(x=x) + x4 = mb.cast(x=x3_t, dtype="fp32") + x2 = mb.cast(x=x1_t, dtype="fp32") + } -> x2, x4, x7, x8 + + # After moving `transpose` ops becomes + main(x: (2, 4, fp32)) { + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x3 = mb.log(x=x) + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + x8 = mb.relu(x=x) + x3_t = mb.transpose(x=x3, perm=[1, 0]) + x4 = mb.cast(x=x3_t, dtype="fp32") + x1_t = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.cast(x=x1_t, dtype="fp32") + } -> x2, x4, x7, x8 + """ + + def apply(self, prog): + for f_name, f in prog.functions.items(): + self._move_operations_to_the_end_block(f, ["cast", "transpose"]) + + @staticmethod + @block_context_manager + def _move_operations_to_the_end_block(block, op_type_to_move): + # Moves ops with `op_type_to_move` in `block.operations` (list) to the end of the program. + # Note: ops with `op_type_to_move` and is dead code are moved toward end, which can be eliminated + # later with dead-code-elimination pass. + # + # Inputs: + # - block (mil.Block): block to be modified in-place + # - op_type_to_move (List[str]) + # Returns: + # - set[Var]: Set of vars consumed in block (or returned as block output) + + # first_use maps var to (index, op) representing the first op in block.operation that consumes this var. + first_use = {} # var -> op + ops_to_remove = [] # list of ops to be deleted at the end of pass + for index, op in enumerate(reversed(block.operations[:])): + current_op = op + + if op.op_type in op_type_to_move: + # Mark op for deletion + ops_to_remove.append(op) + + # Create list of operations consuming each output of current operation + first_consumers = [first_use[v] for v in op.outputs if v in first_use] + + before_op = None # None means adding at the end of block + if len(first_consumers) > 0: + # Current op should be moved right before this first consumer of one of it's output. + # 1. Find indices for all the consumer ops of outputs + # 2. Move current op right before first consumer i.e. smallest index in block.operations + first_use_indices = [ + block.operations.index(first_use_op) for first_use_op in first_consumers + ] + before_op = block.operations[min(first_use_indices)] + + # Create new copy of current operation + new_var = getattr(mb, op.op_type)(**op.inputs, before_op=before_op) + + if not isinstance(new_var, (list, tuple)): + new_var = [new_var] + + # Override current_op to be newly created op to ensure `first_use` + # points to newly created op instead of old one. + current_op = new_var[0].op + + for old_output_var, new_output_var in zip(op.outputs, new_var): + block.replace_uses_of_var_after_op( + anchor_op=None, old_var=old_output_var, new_var=new_output_var + ) + + # Collect input vars from sub-block if present + relevant_inputs = set() + for b in current_op.blocks: + relevant_inputs |= topological_reorder._move_operations_to_the_end_block( + b, op_type_to_move + ) + + # Collect vars from operation input + for v in current_op.inputs.values(): + if isinstance(v, (tuple, list)): + relevant_inputs |= set(v) + continue + relevant_inputs.add(v) + + # Mark current op as first use for all the input vars + # a) of it's sub-block + # b) of current op + for v in relevant_inputs: + # input is seen for the first time or + # current_op is first_use i.e. appears before earlier recorded first_use. + # Note: since ops are moved to the end, it's possible that an op is moved right after + # earlier recorded first_use and in such cases, first_use should not be modified. + # + # == Example == + # main( %x: (10, 20, fp32)(Tensor)) { + # block0() { + # %cast_0: (10, 20, fp16)(Tensor) = cast(x= %x, dtype = "fp16", name = "cast_0") + # %cast_1: (10, 20, fp32)(Tensor) = cast(x= %cast_0, dtype = "fp32", name = "cast_1") + # %transpose_0: (20, 10, fp16)(Tensor) = transpose(x= %cast_0, perm = [1, 0], name = "transpose_0") + # %transpose_1: (10, 20, fp16)(Tensor) = transpose(x= %transpose_0, perm = [1, 0], name = "transpose_1") + # } -> (% cast_1, % transpose_1) + # } + # In above example, `%cast_1` will be moved to the end of the block and first_use info for `%cast_0` + # should point to `%transpose_0` and not to `%cast_1` + if v not in first_use or block.operations.index( + first_use[v] + ) > block.operations.index(current_op): + first_use[v] = current_op + + # Remove ops that are reordered + block.remove_ops(ops_to_remove) + + # Returns set of vars consumed in current block + vars_consumed_in_block = set([v for v in first_use]) + vars_consumed_in_block.update(block.outputs) + return vars_consumed_in_block diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/lower_complex_dialect_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/lower_complex_dialect_ops.py new file mode 100644 index 00000000..64943197 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/lower_complex_dialect_ops.py @@ -0,0 +1,552 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +This file contains a pass for lowering complex dialect ops into core ops. + +Steps for adding a new complex dialect op: +1. Add a dialect op in complex_dialect_ops.py +2. Add a corresponding lowering function + +In Step 2, notice that when implementing lower functions, we need to specify before_op during +lowering to core ops. It's for both correctness as well as SSA graph's readability, because the +generated core ops should be placed before the ops which were placed after that dialect op. +More specifically, here is the SSA graph before lowering: + block0() { + %1 = complex_dialect_op(data=%input) + %2 = core_op1(x=%1) + %3 = core_op2(x=%2) + } -> (%3) +During lowering `complex_dialect_op`, we want all newly generated core ops are placed before the +`core_op1`. +""" + +import functools +from typing import Callable, Dict, Optional, Tuple + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.operation import Operation +from coremltools.converters.mil.mil.ops.defs.complex_dialect_ops import ( + fft_canonicalize_length_dim, + fft_canonicalize_shapes_dims, +) +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.var import ComplexVar, Var + + +class LowerComplex: + # The map recording each complex dialect op's lowering function. + _lower_map: Dict[str, Callable] = dict() + + @staticmethod + def register_lower_func(op_type: str) -> Callable: + """Register lowering function for complex dialect ops.""" + + def lower_func_wrapper(func): + @functools.wraps(func) + def wrapper_inner(*args, **kwargs): + return func(*args, **kwargs) + + if op_type in LowerComplex._lower_map: + raise ValueError(f"The op {op_type} already got lowering function registered.") + LowerComplex._lower_map[op_type] = func + return wrapper_inner + + return lower_func_wrapper + + @staticmethod + def has_lower_func(op_type: str) -> bool: + """Check if the complex dialect op has corresponding lowering function.""" + return op_type in LowerComplex._lower_map + + @staticmethod + def get_lower_func(op_type: str) -> Callable: + """Get the complex dialect op's lowering function.""" + if not LowerComplex.has_lower_func(op_type): + raise ValueError(f"The op {op_type} doesn't have any lowering function registered.") + return LowerComplex._lower_map[op_type] + + +def _resize_data(input_data: Var, dims: Tuple[int], sizes: Tuple[int], before_op: Operation) -> Var: + """ + For each dim in `dims`, resize the input data size to corresponding size in `sizes`. + If the `size` is smaller than the data's size at `dim`, trim the data to `size`. + If the `size` is larger, pad zeros to make the data reaches `size`. + """ + for (dim, size) in zip(dims, sizes): + if size < input_data.shape[dim]: + indices = mb.range_1d(start=0, end=size, step=1, before_op=before_op) + input_data = mb.gather(x=input_data, indices=indices, axis=dim, before_op=before_op) + elif size > input_data.shape[dim]: + zero_shape = list(input_data.shape) + zero_shape[dim] = size - input_data.shape[dim] + zero_data = mb.fill(shape=zero_shape, value=0.0, before_op=before_op) + input_data = mb.concat(values=[input_data, zero_data], axis=dim, before_op=before_op) + + return input_data + + +def _restore_conj( + input_data: ComplexVar, n: Var, dim: Var, before_op: Operation +) -> Tuple[Var, Var]: + """ + The input is interpreted as a one-sided Hermitian signal in the Fourier domain, as produced + by rfft(). So we need to restore it to the full matrix by following X[i] = conj(X[-i]). + Real part's conj is itself, and imaginary part's conj is negative of the original value. + For odd number n, the last element is also included in mirroring input. + """ + real_data: Var = input_data.real + imag_data: Var = input_data.imag + + size = 2 * (input_data.real.shape[dim.val] - 1) + if n is not None and n.val is not None: + size = n.val + real_data = _resize_data( + real_data, dims=(dim.val,), sizes=(size // 2 + 1,), before_op=before_op + ) + imag_data = _resize_data( + imag_data, dims=(dim.val,), sizes=(size // 2 + 1,), before_op=before_op + ) + + range_end = real_data.shape[dim.val] - 2 if size % 2 == 0 else real_data.shape[dim.val] - 1 + if range_end > 0: + mirror_indices = mb.range_1d(start=range_end, end=0, step=-1, before_op=before_op) + real_part_mirror_values = mb.gather( + x=real_data, indices=mirror_indices, axis=dim.val, before_op=before_op + ) + imag_part_mirror_values = mb.gather( + x=imag_data, indices=mirror_indices, axis=dim.val, before_op=before_op + ) + imag_part_mirror_values = mb.mul(x=imag_part_mirror_values, y=-1.0, before_op=before_op) + + real_data = mb.concat( + values=[real_data, real_part_mirror_values], + axis=dim.val, + before_op=before_op, + ) + imag_data = mb.concat( + values=[imag_data, imag_part_mirror_values], + axis=dim.val, + before_op=before_op, + ) + + return real_data, imag_data + + +def _fft_1d( + input_real: Var, + input_imag: Var, + n: Optional[Var], + dim: Optional[Var], + norm: Optional[Var], + before_op: Operation, + inverse: bool = False, # For inverse FFT. +) -> Tuple[Var, Var]: + """ + 1-D FFT by DFT Matrix Multiplication. + + The core issue is how to derive the DFT matrix. As the DFT matrix is consist of different powers + of `w`, where w=e^(2pi/N i), we need to separate the real and imaginary part of w. To achieve + that, we need to find a way to construct the following matrix (from the power of `w` in DFT): + 0 0 0 ... 0 + 0 1 2 ... N-1 + 0 2 4 ... 2(N-1) + ... .... ... + 0 N-1 2(N-1) ... (N-1)(N-1) + This matrix could be derived by outer product of two range tensors. + + After getting that base matrix, we can take sin and cos to get the corresponding `sin_base` and + `cos_base` matrix. Now based on some math formulas including: + * The addition of complex numbers is: (a+bi)+(c+di)=(a+c)+(b+d)i. + * The multiplication of complex numbers is: (a+bi)(c+di)=ac+adi+bci−bd=(ac−bd)+(ad+bc)i. + * Euler’s formula: e^xi=cosx+isinx. + * Cosine is an even function: cos(−x)=cosx. + * Sine is an odd function: sin(−x)=−(sinx). + We can get + * The real part output is: cos_base * input_real + sin_base * input_imag + * The imaginary part output is: - (sin_base * input_real - cos_base * input_imag) + That's how we calculate the real and imaginary part separately for the FFT. + """ + n, dim = fft_canonicalize_length_dim(input_real, n, dim) + + # Swaps target dim axis to the first axis. + axes = list(range(len(input_real.shape))) + axes[0] = dim + axes[dim] = 0 + transposed_input_real = mb.transpose(x=input_real, perm=axes, before_op=before_op) + transposed_input_imag = mb.transpose(x=input_imag, perm=axes, before_op=before_op) + + # Trim or pad input according to n. + transposed_input_real = _resize_data( + input_data=transposed_input_real, + dims=(0,), + sizes=(n,), + before_op=before_op, + ) + transposed_input_imag = _resize_data( + input_data=transposed_input_imag, + dims=(0,), + sizes=(n,), + before_op=before_op, + ) + + # Calculate DFT matrix. + original_shape = transposed_input_real.shape + N = transposed_input_real.shape[0] + reshaped_input_real = mb.reshape(x=transposed_input_real, shape=[N, -1], before_op=before_op) + reshaped_input_imag = mb.reshape(x=transposed_input_imag, shape=[N, -1], before_op=before_op) + tmp = mb.range_1d(start=0, end=N, step=1, before_op=before_op) + # Use MIL ops to calculate base = torch.outer(tmp, tmp) * (2 * torch.pi / N). + tmp_x = mb.reshape(x=tmp, shape=[-1, 1], before_op=before_op) + tmp_y = mb.reshape(x=tmp, shape=[1, -1], before_op=before_op) + base = mb.matmul(x=tmp_x, y=tmp_y, before_op=before_op) + base = mb.cast(x=base, dtype="fp32", before_op=before_op) + base = mb.mul(x=base, y=2 * np.pi, before_op=before_op) + N = mb.cast(x=N, dtype="fp32", before_op=before_op) + base = mb.real_div(x=base, y=N, before_op=before_op) + # Get real part and imaginary part separately. + cos_base = mb.cos(x=base, before_op=before_op) + sin_base = mb.sin(x=base, before_op=before_op) + + if not inverse: + real_part = mb.add( + x=mb.matmul(x=cos_base, y=reshaped_input_real, before_op=before_op), + y=mb.matmul(x=sin_base, y=reshaped_input_imag, before_op=before_op), + before_op=before_op, + ) + imag_part = mb.sub( + x=mb.matmul(x=sin_base, y=reshaped_input_real, before_op=before_op), + y=mb.matmul(x=cos_base, y=reshaped_input_imag, before_op=before_op), + before_op=before_op, + ) + imag_part = mb.mul(x=imag_part, y=-1.0, before_op=before_op) + else: + real_part = mb.sub( + x=mb.matmul(x=cos_base, y=reshaped_input_real, before_op=before_op), + y=mb.matmul(x=sin_base, y=reshaped_input_imag, before_op=before_op), + before_op=before_op, + ) + imag_part = mb.add( + x=mb.matmul(x=sin_base, y=reshaped_input_real, before_op=before_op), + y=mb.matmul(x=cos_base, y=reshaped_input_imag, before_op=before_op), + before_op=before_op, + ) + + real_part = mb.reshape(x=real_part, shape=original_shape, before_op=before_op) + imag_part = mb.reshape(x=imag_part, shape=original_shape, before_op=before_op) + + # Swaps dim back. + real_part = mb.transpose(x=real_part, perm=axes, before_op=before_op) + imag_part = mb.transpose(x=imag_part, perm=axes, before_op=before_op) + + # Normalization if needed. + apply_scale = False + scale = 1 + if norm.val is not None: + # For FFT, "forward" means normalize 1/N, while in IFFT, "backward" means normalize 1/N. + if (not inverse) and (norm.val in ["forward", "ortho"]): + apply_scale = True + scale = N if norm.val == "forward" else mb.sqrt(x=N, before_op=before_op) + if inverse and (norm.val in ["backward", "ortho"]): + apply_scale = True + scale = N if norm.val == "backward" else mb.sqrt(x=N, before_op=before_op) + if apply_scale: + real_part = mb.real_div(x=real_part, y=scale, before_op=before_op) + imag_part = mb.real_div(x=imag_part, y=scale, before_op=before_op) + + return real_part, imag_part + + +def _rfft_1d( + input_real: Var, + n: Optional[Var], + dim: Optional[Var], + norm: Optional[Var], + before_op: Operation, +) -> Tuple[Var, Var]: + """ + It's similar to fft, but as the input is real data, the redundant info (the conjugate part) is + removed in the result. + """ + input_imag = mb.fill( + shape=mb.shape(x=input_real, before_op=before_op), + value=0.0, + before_op=before_op, + ) + real_data, imag_data = _fft_1d(input_real, input_imag, n, dim, norm, before_op=before_op) + remain_len = real_data.shape[dim.val] // 2 + 1 + remain_indices = mb.range_1d(start=0, end=remain_len, step=1, before_op=before_op) + real_data = mb.gather(x=real_data, indices=remain_indices, axis=dim.val, before_op=before_op) + imag_data = mb.gather(x=imag_data, indices=remain_indices, axis=dim.val, before_op=before_op) + + return real_data, imag_data + + +def _wrap_complex_output(original_output: Var, real_data: Var, imag_data: Var) -> ComplexVar: + return ComplexVar( + name=original_output.name + "_lowered", + sym_type=original_output.sym_type, + real=real_data, + imag=imag_data, + ) + + +@LowerComplex.register_lower_func(op_type="complex") +def _lower_complex(op: Operation): + return _wrap_complex_output(op.outputs[0], op.real_data, op.imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_real") +def _lower_complex_real(op: Operation): + complex_input: ComplexVar = op.data + # Use an identity op to avoid the block's input name inconsistency issue. If we directly use + # complex_input.real, the var's name could be inconsistent with the block's input name. + result = mb.identity(x=complex_input.real, before_op=op) + return result + + +@LowerComplex.register_lower_func(op_type="complex_imag") +def _lower_complex_imag(op: Operation): + complex_input: ComplexVar = op.data + # Use an identity op to avoid the block's input name inconsistency issue. If we directly use + # complex_input.imag, the var's name could be inconsistent with the block's input name. + result = mb.identity(x=complex_input.imag, before_op=op) + return result + + +@LowerComplex.register_lower_func(op_type="complex_fft") +def _lower_complex_fft(op: Operation): + if types.is_complex(op.data.dtype): + real_data = op.data.real + imag_data = op.data.imag + else: + real_data = op.data + imag_data = mb.fill( + shape=mb.shape(x=real_data, before_op=op), + value=mb.cast( + x=mb.const(val=0.0, before_op=op), + dtype=real_data.dtype.__name__, + before_op=op, + ), + before_op=op, + ) + real_data, imag_data = _fft_1d( + real_data, + imag_data, + op.n, + op.dim, + op.norm, + before_op=op, + ) + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_fftn") +def _lower_complex_fftn(op: Operation): + if types.is_complex(op.data.dtype): + real_data = op.data.real + imag_data = op.data.imag + else: + real_data = op.data + imag_data = mb.fill( + shape=mb.shape(x=real_data, before_op=op), + value=mb.cast( + x=mb.const(val=0.0, before_op=op), + dtype=real_data.dtype.__name__, + before_op=op, + ), + before_op=op, + ) + + shapes, dims = fft_canonicalize_shapes_dims(real_data, op.shapes, op.dims) + for shape, dim in zip(shapes, dims): + real_data, imag_data = _fft_1d( + real_data, + imag_data, + n=mb.const(val=shape, before_op=op), + dim=mb.const(val=dim, before_op=op), + norm=op.norm, + before_op=op, + ) + + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_rfft") +def _lower_complex_rfft(op: Operation): + real_data, imag_data = _rfft_1d(op.data, op.n, op.dim, op.norm, before_op=op) + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_rfftn") +def _lower_complex_rfftn(op: Operation): + shapes, dims = fft_canonicalize_shapes_dims(op.data, op.shapes, op.dims) + real_data, imag_data = _rfft_1d( + op.data, + mb.const(val=shapes[-1], before_op=op), + mb.const(val=dims[-1], before_op=op), + op.norm, + before_op=op, + ) + for shape, dim in zip(shapes[:-1], dims[:-1]): + real_data, imag_data = _fft_1d( + real_data, + imag_data, + n=mb.const(val=shape, before_op=op), + dim=mb.const(val=dim, before_op=op), + norm=op.norm, + before_op=op, + ) + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_ifft") +def _lower_complex_ifft(op: Operation): + real_data, imag_data = _fft_1d( + op.data.real, op.data.imag, op.n, op.dim, op.norm, before_op=op, inverse=True + ) + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_ifftn") +def _lower_complex_ifftn(op: Operation): + real_data = op.data.real + imag_data = op.data.imag + shapes, dims = fft_canonicalize_shapes_dims(real_data, op.shapes, op.dims) + for shape, dim in zip(shapes, dims): + real_data, imag_data = _fft_1d( + real_data, + imag_data, + n=mb.const(val=shape, before_op=op), + dim=mb.const(val=dim, before_op=op), + norm=op.norm, + before_op=op, + inverse=True, + ) + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_irfft") +def _lower_complex_irfft(op: Operation): + real_data, imag_data = _restore_conj(op.data, op.n, op.dim, before_op=op) + n, dim = fft_canonicalize_length_dim(op.data, op.n, op.dim, c2r=True) + real_data, imag_data = _fft_1d( + real_data, + imag_data, + mb.const(val=n, before_op=op), + mb.const(val=dim, before_op=op), + op.norm, + before_op=op, + inverse=True, + ) + return real_data + + +@LowerComplex.register_lower_func(op_type="complex_irfftn") +def _lower_complex_irfftn(op: Operation): + real_data = op.data.real + imag_data = op.data.imag + shapes, dims = fft_canonicalize_shapes_dims(real_data, op.shapes, op.dims, c2r=True) + + # For all but last dim/shape, do N-D IFFT. + for shape, dim in zip(shapes[:-1], dims[:-1]): + real_data, imag_data = _fft_1d( + real_data, + imag_data, + n=mb.const(val=shape, before_op=op), + dim=mb.const(val=dim, before_op=op), + norm=op.norm, + before_op=op, + inverse=True, + ) + + # For the last dim/shape, do 1-D IRFFT. + n: Var = mb.const(val=shapes[-1], before_op=op) + dim: Var = mb.const(val=dims[-1], before_op=op) + real_data, imag_data = _restore_conj( + input_data=_wrap_complex_output(op.outputs[0], real_data, imag_data), + n=n, + dim=dim, + before_op=op, + ) + real_data, imag_data = _fft_1d( + real_data, imag_data, n, dim, op.norm, before_op=op, inverse=True + ) + real_data = _resize_data(real_data, dims=(dim.val,), sizes=(n.val,), before_op=op) + + return real_data + + +@LowerComplex.register_lower_func(op_type="complex_shape") +def _lower_complex_shape(op: Operation): + return mb.shape(x=op.data.real, before_op=op) + + +def _match_and_replace_dialect_op(block, op): + if not LowerComplex.has_lower_func(op.op_type): + return False + + lower_res = LowerComplex.get_lower_func(op.op_type)(op) + + if not op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=op, + old_var=op.outputs[0], + new_var=lower_res, + ): + raise ValueError(f"Unable to lower complex dialect op {op}") + block.remove_ops([op]) + return True + + +@block_context_manager +def _lower_complex_dialect_ops_in_block(block): + def help_lower_complex_dialect_ops(block): + for op in list(block.operations): + if _match_and_replace_dialect_op(block, op): + return True + return False + + block_changed = True + while block_changed: + block_changed = help_lower_complex_dialect_ops(block) + + +@register_pass(namespace="common") +class lower_complex_dialect_ops(AbstractGraphPass): + """ + Identify complex data related ops and replace it by using real and imaginary parts separately. + The goal of this pass it to lower complex dialect ops into core ops. + + This pass also checks if the output is complex. As Core ML doesn't support complex data yet, + it errors out early when detecting complex output. + + Input graph (`complex` and `complex_real` are complex dialect ops): + %complex_data = complex(real_data=%real_data, imag_data=%imag_data) + %real_data = complex_real(data=%complex_data) + return %real_data + + Output graph (only core ops, no complex dialect ops): + %complex_data_real = identity(x=%real_data) + %complex_data_imag = identity(x=%imag_data) + %real_data = identity(data=%complex_data_real) + return %real_data + """ + + def apply(self, prog): + for block in prog.functions.values(): + # Early error out for complex data output. + for out_var in block.outputs: + if types.is_complex(out_var.dtype): + raise ValueError( + "MIL doesn't support complex data as model's output, please " + "extract real and imaginary parts explicitly." + ) + + _lower_complex_dialect_ops_in_block(block) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_activation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_activation.py new file mode 100644 index 00000000..95ce1ffb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_activation.py @@ -0,0 +1,649 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import ( + fuse_all_blocks, +) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import ( + _check_child_op_type, + _check_var_scalar_value, + _check_var_scalar_value_in_interval, + block_context_manager, +) +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class fuse_gelu_exact(AbstractGraphPass): + """ + Identify the pattern that corresponds to the exact version of ``gelu``, and replace it with a single + ``gelu`` layer with ``mode=EXACT``. The pattern is ``y = 0.5 * x * (1 + erf (x / srqt (2))``, which + can be represented by one of the following: + + .. code-block:: + + (1) + [...] ----> div (1.414) ---> erf ---> add (1) -----> mul (0.5) ---> mul ---> [...] + | ^ + | | + |------------------------------------------------------------------- + + (2) + [...] ----> div (1.414) ---> erf ---> add (1) -----> mul ---> mul (0.5) ---> [...] + | ^ + | | + |---------------------------------------------------- + + (3) + [...] ----> div (1.414) ---> erf ---> add (1) -----> mul ------> [...] + | ^ + | | + |---------------> mul(0.5) -------------------------- + + All of them are converted to: + [...] ----> gelu (mode=EXACT) ---> [...] + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_gelu_exact_block(f) + + @staticmethod + def _try_to_transform(op, block): + ops_to_remove = [] + if op.x.val is None and op.y.val is None: + return False + + # check either the op is mul(1/sqrt(2)) or real_div(sqrt(2)) + root_var = op.x if op.y.val is not None else op.y + if op.op_type == "real_div": + if not _check_var_scalar_value(op.y, 2**0.5): + return False + elif op.op_type == "mul": + if not ( + _check_var_scalar_value(op.x, 2**-0.5) or _check_var_scalar_value(op.y, 2**-0.5) + ): + return False + ops_to_remove.append(op) + + # check if the child op is erf + if not _check_child_op_type(op, "erf"): + return False + erf_op = list(op.outputs[0].child_ops)[0] + ops_to_remove.append(erf_op) + + # check if the child op is add + if not _check_child_op_type(erf_op, "add"): + return False + add_op = list(erf_op.outputs[0].child_ops)[0] + if not (_check_var_scalar_value(add_op.x, 1) or _check_var_scalar_value(add_op.y, 1)): + return False + ops_to_remove.append(add_op) + + # check if the child op is mul + if not _check_child_op_type(add_op, "mul"): + return False + mul_op = list(add_op.outputs[0].child_ops)[0] + + # now we have two case: + # (1) first mul by 0.5 and by the root var + if _check_var_scalar_value(mul_op.x, 0.5) or _check_var_scalar_value(mul_op.y, 0.5): + ops_to_remove.append(mul_op) + if not _check_child_op_type(mul_op, "mul"): + return False + mul_op_2 = list(mul_op.outputs[0].child_ops)[0] + if not (mul_op_2.x == root_var or mul_op_2.y == root_var): + return False + ops_to_remove.append(mul_op_2) + + # (2) first mul by the root var and then mul by 0.5 + elif mul_op.x == root_var or mul_op.y == root_var: + ops_to_remove.append(mul_op) + if not _check_child_op_type(mul_op, "mul"): + return False + mul_op_2 = list(mul_op.outputs[0].child_ops)[0] + if not ( + _check_var_scalar_value(mul_op_2.x, 0.5) or _check_var_scalar_value(mul_op_2.y, 0.5) + ): + return False + ops_to_remove.append(mul_op_2) + + else: + other_parent_op = mul_op.x.op if mul_op.y == add_op.outputs[0] else mul_op.y.op + if other_parent_op.op_type != "mul": + return False + if not ( + _check_var_scalar_value(other_parent_op.x, 0.5) + or _check_var_scalar_value(other_parent_op.y, 0.5) + ): + return False + if not (other_parent_op.x == root_var or other_parent_op.y == root_var): + return False + ops_to_remove.append(other_parent_op) + ops_to_remove.append(mul_op) + mul_op_2 = mul_op + + # check that none of the op in this pattern is connected to the output + # (except the last mul op) + for op in ops_to_remove[:-1]: + for out in op.outputs: + if out in block.outputs: + return False + + # remove all the ops, and replace with a gelu op + out_name = mul_op_2.outputs[0].name + x = mb.gelu(x=root_var, mode="EXACT", name=out_name, before_op=op) + + mul_op_2.enclosing_block.replace_uses_of_var_after_op( + anchor_op=mul_op_2, old_var=mul_op_2.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops(ops_to_remove) + return True + + @block_context_manager + def _fuse_gelu_exact_block(self, block): + fusion_occurred = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_gelu_exact_block(b) + if len(op.blocks) > 0: + # This op can't be real_div or mul + continue + + if op.op_type in ["mul", "real_div"]: + fusion_occurred = self._try_to_transform(op, block) + # has to break as the downstream iterator is affected. + if fusion_occurred: + return fusion_occurred + return fusion_occurred + + +@register_pass(namespace="common") +class fuse_gelu_tanh_approximation(AbstractGraphPass): + """ + Identify the pattern that corresponds to the ``tanh`` approximate version of ``gelu``, and replace it + with a single ``gelu`` layer with ``mode=TANH_APPROXIMATION``. + + The implementation of this pass uses the generic graph pattern matching and transform algorithm + implemented in ``coremltools.converters.mil.experimental.passes.generic_pass_infrastructure`` and + documented in ``coremltools/converters/mil/experimental/passes/readme.md``. + """ + + def apply(self, prog): + fuse_all_blocks( + ops_arrangement=self.get_gelu_pattern1(), + var_constraints=self.is_var_constraint_satisifed, + transform_pattern=self.transform_pattern, + prog=prog, + ) + + fuse_all_blocks( + ops_arrangement=self.get_gelu_pattern2(), + var_constraints=self.is_var_constraint_satisifed, + transform_pattern=self.transform_pattern, + prog=prog, + ) + + @staticmethod + def is_var_constraint_satisifed(pattern): + + passed = _check_var_scalar_value(pattern.mul.y, 0.5) or _check_var_scalar_value( + pattern.mul.x, 0.5 + ) + passed = passed and _check_var_scalar_value(pattern.pow.y, 3.0) + + passed = passed and ( + _check_var_scalar_value(pattern.mul_1.y, 0.044715) + or _check_var_scalar_value(pattern.mul_1.x, 0.044715) + ) + + passed = passed and ( + _check_var_scalar_value(pattern.mul_2.y, 0.79788) + or _check_var_scalar_value(pattern.mul_2.x, 0.79788) + ) + + passed = passed and ( + _check_var_scalar_value(pattern.add_1.y, 1) + or _check_var_scalar_value(pattern.add_1.x, 1) + ) + + return passed + + @staticmethod + def transform_pattern(pattern): + # remove all the ops, and replace with a gelu op + out_name = pattern.mul_3.outputs[0].name + x = mb.gelu( + x=pattern.root_var, mode="TANH_APPROXIMATION", name=out_name, before_op=pattern.mul + ) + + pattern.mul_3.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.mul_3, old_var=pattern.mul_3.outputs[0], new_var=x + ) + + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + @staticmethod + def get_gelu_pattern1(): + """ + ``y = x * (0.5 * (tanh(((.0447)x^3 + x ) * sqrt(2/pi)) + 1))`` + + .. code-block:: + + [...] -----> pow (3) ----> mul (.044715) ---> add -----> mul (sqrt(2/pi)) ---> tanh ----> add (1) ----> mul (0.5) -----> mul ---> [...] + | ^ ^ + | | | + |------------------------------------------------------------------------------------------------------------------------ + + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=([get_new_symbol(), get_new_symbol(), get_new_symbol()])), + ] + ) + def gelu_to_detect_1(x): + # MIL operation takes named inputs (instead of positional inputs). + # Here `name` argument is MANDATORY. + pow = mb.pow(x=x, y=3.0, name="pow") + mul_1 = mb.mul(x=0.044714998453855515, y=pow, name="mul_1") + add = mb.add(x=x, y=mul_1, name="add") + mul_2 = mb.mul(x=0.7978845834732056, y=add, name="mul_2") + tanh = mb.tanh(x=mul_2, name="tanh") + add_1 = mb.add(x=1.0, y=tanh, name="add_1") + mul = mb.mul(x=0.5, y=add_1, name="mul") + mul_3 = mb.mul(x=mul, y=x, name="mul_3") + return mul_3 + + return gelu_to_detect_1 + + @staticmethod + def get_gelu_pattern2(): + """ + ``y = (0.5 * x) * (tanh(((.0447)x^3 + x ) * sqrt(2/pi)) + 1)`` + + .. code-block:: + + --------------------------------------------------------------------------------------------------------- + ^ | + | V + [...] -----> mul(0.5) pow (3) ----> mul (.044715) ---> add -----> mul (sqrt(2/pi)) ---> tanh ----> add (1) -----> mul ---> [...] + | ^ ^ + | | | + |------------------------------------------------------------ + + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=([get_new_symbol(), get_new_symbol(), get_new_symbol()])), + ] + ) + def gelu_to_detect_2(x): + pow = mb.pow(x=x, y=3.0, name="pow") + mul_1 = mb.mul(x=0.044714998453855515, y=pow, name="mul_1") + add = mb.add(x=x, y=mul_1, name="add") + mul_2 = mb.mul(x=0.7978845834732056, y=add, name="mul_2") + tanh = mb.tanh(x=mul_2, name="tanh") + add_1 = mb.add(x=1.0, y=tanh, name="add_1") + mul = mb.mul(x=0.5, y=x, name="mul") + mul_3 = mb.mul(x=mul, y=add_1, name="mul_3") + return mul_3 + + return gelu_to_detect_2 + + +@register_pass(namespace="common") +class fuse_leaky_relu(AbstractGraphPass): + """ + Detect the ``mul`` ---> ``max`` pattern than can be mapped to ``leaky_relu``. + + .. code-block:: + + In code form: + ------------ + + Input: + %2 = const(value = alpha) # where 0 <= alpha <= 1 + %3 = mul(%1, %2) # alpha * x + %4 = max(%3, %1) # max(alpha * x, x) + + Output: + %4 = leaky_relu(x=%1, alpha=%2) + + + In graphical form: + ----------------- + + Input graph: + + const (val = alpha) + | + input ----> mul ---------------> maximum -----------> output + | | + |---------------------------------- + + Output graph: + + input --------> leaky_relu ---------> output + + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_leaky_relu_block(f) + + @staticmethod + def _try_to_transform(mul_op, block): + + ops_to_remove = [] + + # check that one of the inputs of the mul op is a constant that is between 0 and 1 + if _check_var_scalar_value_in_interval(mul_op.x, 0, 1): + alpha_input_var = mul_op.x + parent_var = mul_op.y + elif _check_var_scalar_value_in_interval(mul_op.y, 0, 1): + alpha_input_var = mul_op.y + parent_var = mul_op.x + else: + return False + + # check that output of mul is not a block output + if mul_op.outputs[0] in block.outputs: + return False + ops_to_remove.append(mul_op) + + # check if the child op of the mul op is maximum + if not _check_child_op_type(mul_op, "maximum"): + return False + + # check that the other input of the max op is same as the parent of the mul op + max_op = list(mul_op.outputs[0].child_ops)[0] + if not ( + (max_op.x == mul_op.outputs[0] and max_op.y == parent_var) + or (max_op.y == mul_op.outputs[0] and max_op.x == parent_var) + ): + return False + ops_to_remove.append(max_op) + + # remove all the ops, and replace with a leaky relu op + out_name = max_op.outputs[0].name + x = mb.leaky_relu(x=parent_var, alpha=alpha_input_var.val, name=out_name, before_op=max_op) + max_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=max_op, old_var=max_op.outputs[0], new_var=x + ) + block.remove_ops(ops_to_remove) + return True + + @block_context_manager + def _fuse_leaky_relu_block(self, block): + fusion_status = False + for i, op in enumerate(list(block.operations)): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_leaky_relu_block(b) + if len(op.blocks) > 0: + continue + + # start pattern match if mul op is encountered + if op.op_type == "mul": + fusion_status = self._try_to_transform(op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + + +class FusePreluPattern1: + @staticmethod + def is_var_constraint_satisifed(pattern): + # input must be rank 4 + if pattern.root_var.rank != 4: + return False + # output must be rank 4 + if pattern.out_op.outputs[0].rank != 4: + return False + if not ( + _check_var_scalar_value(pattern.neg.y, -1) or _check_var_scalar_value(pattern.neg.x, -1) + ): + return False + if pattern.alpha_mul.x.val is not None: + alpha = pattern.alpha_mul.x.val + elif pattern.alpha_mul.y.val is not None: + alpha = pattern.alpha_mul.y.val + else: + return False + # alpha must be of shape (1, C, 1, 1) or (C, 1, 1) + if len(alpha.shape) not in (3, 4): + return False + if alpha.size != alpha.shape[-3]: + return False + + return True + + @staticmethod + def transform_pattern(pattern): + # remove all the ops, and replace with a prelu op + out_var = pattern.out_op.outputs[0] + if pattern.alpha_mul.x.val is not None: + alpha = pattern.alpha_mul.x.val + else: + alpha = pattern.alpha_mul.y.val + + alpha_vector = -1 * alpha.flatten() + x = mb.prelu( + x=pattern.root_var, alpha=alpha_vector, name=out_var.name, before_op=pattern.out_op + ) + pattern.out_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.out_op, old_var=out_var, new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + @staticmethod + def get_prelu_pattern(): + """ + ``y = a * relu(-1 * x) + relu(x)`` + + When ``x`` is rank 4, and ``a`` is of shape ``(1, C, 1, 1)`` or ``(C, 1, 1)``, + this is equivalent to ``prelu`` with ``alpha = -a.flatten()``. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec( + shape=([get_new_symbol(), get_new_symbol(), get_new_symbol(), get_new_symbol()]) + ), + ] + ) + def prelu_pattern(x): + return fuse_prelu._prelu_pattern(x) + + return prelu_pattern + + +class FusePreluPattern2: + @staticmethod + def is_var_constraint_satisifed(pattern): + perm = pattern.transpose.perm.val + if not np.array_equal(perm, np.array([0, 2, 3, 1])): + return False + # output must be rank 4 + if pattern.out_op.outputs[0].rank != 4: + return False + if not ( + _check_var_scalar_value(pattern.neg.y, -1) or _check_var_scalar_value(pattern.neg.x, -1) + ): + return False + if pattern.alpha_mul.x.val is not None: + alpha = pattern.alpha_mul.x.val + elif pattern.alpha_mul.y.val is not None: + alpha = pattern.alpha_mul.y.val + else: + return False + # alpha must be of shape (C,) or (1,C) or (1,1,C) or (1,1,1,C) + if alpha.size != alpha.shape[-1]: + return False + + return True + + @staticmethod + def transform_pattern(pattern): + # remove all the ops, and replace with a prelu op + transpose op + perm = pattern.transpose.perm.val + out_var = pattern.out_op.outputs[0] + if pattern.alpha_mul.x.val is not None: + alpha = pattern.alpha_mul.x.val + else: + alpha = pattern.alpha_mul.y.val + + alpha_vector = -1 * alpha.flatten() + x = mb.prelu(x=pattern.root_var, alpha=alpha_vector, before_op=pattern.out_op) + x = mb.transpose(x=x, perm=perm, name=out_var.name, before_op=pattern.out_op) + pattern.out_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.out_op, old_var=out_var, new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + @staticmethod + def get_prelu_pattern(): + """ + ``x1 = transpose(perm=(0,2,3,1))(x)`` + + ``y = a * relu(-1 * x1) + relu(x1)`` + + When ``x`` is rank 4, and ``a`` is of shape (``C,)``, ``(1, C)``, ``(1,1,C)``, or ``(1,1,1,C)``, + this is equivalent to ``prelu`` with ``alpha = -a.flatten()``, followed by a ``transpose`` + with ``perm (0,2,3,1)``. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec( + shape=([get_new_symbol(), get_new_symbol(), get_new_symbol(), get_new_symbol()]) + ), + ] + ) + def prelu_pattern(x): + # perm value can be anything, it will be checked in "is_var_constraint_satisifed" method + x = mb.transpose(x=x, perm=[0, 1, 2, 3], name="transpose") + return fuse_prelu._prelu_pattern(x) + + return prelu_pattern + + +@register_pass(namespace="common") +class fuse_prelu(AbstractGraphPass): + """ + Detect the following patterns that can be mapped to a ``prelu`` op. + Essentially, the ``prelu`` op can be broken down into the following ops: + + ``y = a * relu(-1 * x) + relu(x)`` + + .. code-block:: + + Pattern 1: + + + | ------------> relu --------------------| + | V + x (BCHW) ------| add -----> y (BCHW) + | ^ + --------> mul -------> relu -----> mul---| + ^ ^ + | | + Const(val=-1) Const(name=a, shape=(C,1,1) or (1,C,1,1)) + + This will be mapped to: + x (BCHW) ------> prelu(alpha=a, shape=(C,)) ---------> y (BCHW) + + + Pattern 2: + + | ------------> relu --------------------| + | V + x (BCHW) -->transpose(BHWC)---->| add -----> y (BHWC) + | ^ + --------> mul -------> relu -----> mul---| + ^ ^ + | | + Const(val=-1) Const(shape=(C,) or (1,C) or (1,1,C) or (1,1,1,C)) + + This will be mapped to: + x (BCHW) ------> prelu ---------> transpose ------> y (BHWC) + """ + + def apply(self, prog): + for pattern in (FusePreluPattern1, FusePreluPattern2): + fuse_all_blocks( + ops_arrangement=pattern.get_prelu_pattern(), + var_constraints=pattern.is_var_constraint_satisifed, + transform_pattern=pattern.transform_pattern, + prog=prog, + ) + + @staticmethod + def _prelu_pattern(x): + # MIL operation takes named inputs (instead of positional inputs). + # Here `name` argument is MANDATORY. + neg = mb.mul(x=x, y=-1.0, name="neg") + relu1 = mb.relu(x=neg, name="relu1") + # Use any constant here to match, rank and shape will be verified in + # `is_var_constraint_satisifed`. + mul = mb.mul(x=relu1, y=np.random.rand(2, 2, 2, 2), name="alpha_mul") + relu2 = mb.relu(x=x, name="relu2") + out = mb.add(x=relu2, y=mul, name="out_op") + return out + + +@register_pass(namespace="common") +class prelu_to_lrelu(AbstractGraphPass): + """ + If ``prelu`` has the same leakage factor across all channels, it will be converted to ``leaky_relu``. + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._prelu_to_lrelu_block(f) + + @block_context_manager + def _prelu_to_lrelu_block(self, block): + for op in list(block.operations): + for b in op.blocks: + self._prelu_to_lrelu_block(b) + if len(op.blocks) > 0: + # This op can't be prelu. + continue + + if op.op_type == "prelu": + alpha_val = op.alpha.val + common_leakage_factor = True + for c in range(1, op.alpha.val.shape[0]): + if alpha_val[c] != alpha_val[0]: + common_leakage_factor = False + break + if common_leakage_factor: + lrelu_out = mb.leaky_relu( + x=op.x, alpha=alpha_val[0], name=op.outputs[0].name, before_op=op + ) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=lrelu_out + ) + block.remove_ops([op]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_conv.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_conv.py new file mode 100644 index 00000000..a6359d25 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_conv.py @@ -0,0 +1,1142 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Block +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import ( + _check_child_op_type, + _check_no_output_connection, + block_context_manager, +) +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +@register_pass(namespace="common") +class add_conv_transpose_output_shape(AbstractGraphPass): + """ + The ``conv_transpose`` input ``output_shape`` is an optional input. + Since we can infer the output shape from ``type_inference``, we add + ``output_shape`` input whenever it is known to be constant at + compile time. For example: + + .. code-block:: + + Given: + %1: (1, 5, 39, fp32) = conv_transpose(...) # no output_shape input. + + Result: + %2: (3, i32) = const(val=[1,5,39]) + %3: (1, 5, 39, fp32) = conv_transpose(..., output_shape=%2) + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._handle_block(f) + + @staticmethod + def _match_pattern(op): + return ( + op.op_type == "conv_transpose" + and op.output_shape is None + and not any_symbolic(op.outputs[0].shape) + ) + + @block_context_manager + def _handle_block(self, block): + for op in list(block.operations): + for b in op.blocks: + self._handle_block(b) + + if not self._match_pattern(op): + continue + + # matched pattern + x = mb.conv_transpose( + **op.inputs, + output_shape=op.outputs[0].shape, + name=op.name + "_has_output_shape", + before_op=op, + ) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=x + ) + block.remove_ops([op]) + + +@register_pass(namespace="common") +class compose_conv1d(AbstractGraphPass): + """ + In `TensorFlow `_, + ``tf.keras.layers.Conv1D`` is a composite op: + + .. code-block:: + + expand a dummy dim -> Conv2D -> squeeze the dummy dim + + In `PyTorch `_, + this is also true for some backends (``mkldnn`` and ``xpu``). + + This decomposition wrecks the coremltools ``conv1d`` graph passes, + so we should recompose the fragments back to MIL ``conv``, which natively supports ``conv1d``: + + .. code-block:: + + Pattern 1: + Given: + %2 = expand_dims(%1, axes=-2) or expand_dims(%1, axes=2), %1.rank = 3 + %3 = conv(%2) + %4 = squeeze(%3, axes=-2) or squeeze(%3, axes=2) + ... + + Result: + %4 = conv(%1) + ... + + Pattern 2 (TensorFlow channel_last): + Given: + %2 = expand_dims(%1, axes=-3) or expand_dims(%1, axes=1), %1.rank = 3 + %3 = transpose(%2, perm=(0, 3, 1, 2)) + %4 = conv(%3) + %5 = transpose(%4, perm=(0, 2, 3, 1)) + %6 = squeeze(%5, axes=-3) or squeeze(%5, axes=1) + ... + + Result: + %3 = transpose(%1, perm=(0, 2, 1)) + %4 = conv(%3) + %6 = transpose(%4, perm=(0, 2, 1)) + ... + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._compose_conv1d_block(f) + + @block_context_manager + def _compose_conv1d_block(self, block: Block): + def help_compose_conv1d_block(block: Block) -> bool: + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = help_compose_conv1d_block(b) + + # must start with expanding a 3-D tensor, + # who has batch, channel, length dimensions + if op.op_type != "expand_dims" or op.x.rank != 3: + continue + + # try pattern `expand_dim` -> `conv2d` -> `squeeze` + if self._try_match_and_transform_pattern(op, block): + # has to break as the downstream iterator is affected + return True + + # try pattern `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze` + if self._try_match_and_transform_pattern_channel_last(op, block): + # has to break as the downstream iterator is affected + return True + + return False + + block_changed = True + while block_changed: + block_changed = help_compose_conv1d_block(block) + + def _try_match_and_transform_pattern(self, expand_op: Operation, block: Block) -> bool: + """ + identify the pattern: `expand_dim` -> `conv2d` -> `squeeze` + """ + # abort composition if dummy dimension is not added as height + if expand_op.axes.rank != 1 or expand_op.axes.val[0] not in (-2, 2): + return False + + # `expand_dims` -> `conv` + if not _check_child_op_type(expand_op, "conv"): + return False + conv_op = expand_op.outputs[0].child_ops[0] + + # `conv` -> `squeeze` + if not _check_child_op_type(conv_op, "squeeze"): + return False + squeeze_op = conv_op.outputs[0].child_ops[0] + + # abort composition if not squeezing the dummy height + if squeeze_op.axes.rank != 1 or squeeze_op.axes.val[0] not in (-2, 2): + return False + + # everything looks good + return self._try_apply_transform(expand_op, conv_op, squeeze_op, block) + + def _try_match_and_transform_pattern_channel_last( + self, expand_op: Operation, block: Block + ) -> bool: + """ + identify the pattern: `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze` + """ + # abort composition if dummy dimension is not added as height + if expand_op.axes.rank != 1 or expand_op.axes.val[0] not in (-3, 1): + return False + + # `expand_dims` -> `transpose` + if not _check_child_op_type(expand_op, "transpose"): + return False + transpose1_op = expand_op.outputs[0].child_ops[0] + + # abort composition if permutation is not (0, 3, 1, 2) + perm1 = transpose1_op.perm.val.copy() + perm1[np.where(perm1 < 0)] += 4 + if np.any(perm1 != (0, 3, 1, 2)): + return False + + # `transpose` -> `conv` + if not _check_child_op_type(transpose1_op, "conv"): + return False + conv_op = transpose1_op.outputs[0].child_ops[0] + + # `conv` -> `transpose` + if not _check_child_op_type(conv_op, "transpose"): + return False + transpose2_op = conv_op.outputs[0].child_ops[0] + + # abort composition if permutation is not (0, 2, 3, 1) + perm2 = transpose2_op.perm.val.copy() + perm2[np.where(perm2 < 0)] += 4 + if np.any(perm2 != (0, 2, 3, 1)): + return False + + # `transpose` -> `squeeze` + if not _check_child_op_type(transpose2_op, "squeeze"): + return False + squeeze_op = transpose2_op.outputs[0].child_ops[0] + + # abort composition if not squeezing the dummy height + if squeeze_op.axes.rank != 1 or squeeze_op.axes.val[0] not in (-3, 1): + return False + + # everything looks good + return self._try_apply_transform_channel_last( + expand_op, transpose1_op, conv_op, transpose2_op, squeeze_op, block + ) + + @staticmethod + def _try_apply_transform( + expand_op: Operation, conv_op: Operation, squeeze_op: Operation, block: Block + ) -> bool: + ops_to_remove = [expand_op, conv_op, squeeze_op] + if not _check_no_output_connection(block, ops_to_remove): + return False + + # prepare `conv1d` + conv_kwargs = {"name": squeeze_op.outputs[0].name, "before_op": conv_op} + + # inherit `x` from `expand_dim` + conv_kwargs["x"] = expand_op.x + + # inherit `pad_type`, `groups`, `bias` from `conv2d` + conv_kwargs["pad_type"] = conv_op.inputs["pad_type"].val + conv_kwargs["groups"] = conv_op.inputs["groups"].val + bias = conv_op.inputs.get("bias", None) + if bias is not None: + conv_kwargs["bias"] = bias + + # squeeze `weight`, `strides`, `pad`, `dilations` from `conv2d` + conv_kwargs["weight"] = mb.squeeze( + x=conv_op.inputs["weight"], axes=(-2,), before_op=conv_op + ) + conv_kwargs["strides"] = (conv_op.inputs["strides"].val[-1],) + conv_kwargs["pad"] = (conv_op.inputs["pad"].val[-2], conv_op.inputs["pad"].val[-1]) + conv_kwargs["dilations"] = (conv_op.inputs["dilations"].val[-1],) + + # compose `conv1d` + out = mb.conv(**conv_kwargs) + + # try replacing `expand_dim` -> `conv2d` -> `squeeze` output + # with the new `conv1d` output + if squeeze_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=squeeze_op, old_var=squeeze_op.outputs[0], new_var=out + ): + # remove `expand_dim` -> `conv2d` -> `squeeze` + block.remove_ops(ops_to_remove) + return True + return False + + @staticmethod + def _try_apply_transform_channel_last( + expand_op: Operation, + transpose1_op: Operation, + conv_op: Operation, + transpose2_op: Operation, + squeeze_op: Operation, + block: Block, + ) -> bool: + ops_to_remove = [expand_op, transpose1_op, conv_op, transpose2_op, squeeze_op] + if not _check_no_output_connection(block, ops_to_remove): + return False + + # create `transpose1` + transpose1_out = mb.transpose( + x=expand_op.x, perm=(0, 2, 1), name=transpose1_op.outputs[0].name, before_op=expand_op + ) + + # prepare `conv1d` + conv_kwargs = {"name": conv_op.outputs[0].name, "x": transpose1_out, "before_op": conv_op} + + # inherit `pad_type`, `groups`, `bias` from `conv2d` + conv_kwargs["pad_type"] = conv_op.inputs["pad_type"].val + conv_kwargs["groups"] = conv_op.inputs["groups"].val + bias = conv_op.inputs.get("bias", None) + if bias is not None: + conv_kwargs["bias"] = bias + + # squeeze `weight`, `strides`, `pad`, `dilations` from `conv2d` + conv_kwargs["weight"] = mb.squeeze( + x=conv_op.inputs["weight"], axes=(-2,), before_op=conv_op + ) + conv_kwargs["strides"] = (conv_op.inputs["strides"].val[-1],) + conv_kwargs["pad"] = (conv_op.inputs["pad"].val[-2], conv_op.inputs["pad"].val[-1]) + conv_kwargs["dilations"] = (conv_op.inputs["dilations"].val[-1],) + + # compose `conv1d` + conv_out = mb.conv(**conv_kwargs) + + # create `transpose2` + transpose2_out = mb.transpose( + x=conv_out, perm=(0, 2, 1), name=squeeze_op.outputs[0].name, before_op=transpose2_op + ) + + # try replacing `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze` output + # with the new `transpose` -> `conv1d` -> `transpose` output + if squeeze_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=squeeze_op, old_var=squeeze_op.outputs[0], new_var=transpose2_out + ): + # remove `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze` + block.remove_ops(ops_to_remove) + return True + return False + + +@register_pass(namespace="common") +class fuse_conv_batchnorm(AbstractGraphPass): + """ + Fuse the following ``batch_norm`` layer into ``conv`` and ``conv_transpose``. + That is, convert ``conv + batch_norm`` to ``conv``, by modifying the weight and bias in the ``conv`` layer. + + .. code-block:: + + Given: + %2 = conv(%1) + ... + %3 = batch_norm(%2) + ... + + Result: + %3 = conv(%1) + ... + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_conv_batchnorm_block(f) + + @staticmethod + def _try_to_transform(conv_op, bn_op): + # get parameters from batch_norm layer + gamma = bn_op.gamma.val + beta = bn_op.beta.val + mean = bn_op.mean.val + variance = bn_op.variance.val + epsilon = bn_op.epsilon.val + + # get weight, bias and groups from conv layer + if conv_op.weight.val is None: + return False + conv_weight = conv_op.weight.val + conv_bias = conv_op.bias + groups = conv_op.groups.val + + # get type of the conv layer + is_deconv = conv_op.op_type == "conv_transpose" + # The deconv weight transpose axes is determined by the dimension of convolution. + # Conv1d should be [1, 0, 2], Conv2d should be [1, 0, 2, 3], Conv3d should be [1, 0, 2, 3, 4] + if not 3 <= len(conv_weight.shape) <= 5: + raise AssertionError( + f"Only supports Conv1/2/3d, which means weight's dimension should" + f"between 3 and 5, but got weight with {len(conv_weight.shape)} " + f"dimensions. " + ) + deconv_weight_transpose_axes = [1, 0] + [axis for axis in range(2, len(conv_weight.shape))] + + # D_in denotes the spatial dimensions for conv kernel weight + # for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in] + # for conv, conv_weight has shape [Cout, Cin / groups, *D_in] + if is_deconv: + Cout = conv_weight.shape[1] * groups + Cin = conv_weight.shape[0] + else: + Cout = conv_weight.shape[0] + Cin = conv_weight.shape[1] * groups + + # get the type of the conv weight + conv_weight_type = conv_weight.dtype + + # create bias for conv if not exist + if conv_bias is None: + conv_bias = np.zeros(Cout) + else: + conv_bias = conv_bias.val + conv_bias = conv_bias.astype(conv_weight_type) + + # get the original shape of weight and bias + origin_weight_shape = conv_weight.shape + origin_bias_shape = conv_bias.shape + + # update the weight for conv layer + new_conv_weight = [] + new_conv_bias = [] + + if is_deconv: + conv_weight = np.transpose(conv_weight, deconv_weight_transpose_axes) + conv_weight = np.reshape( + conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]) + ) + + for i in range(Cout): + # get batch norm parameters for each channel + _gamma = gamma[i] + _beta = beta[i] + _mean = mean[i] + _variance = variance[i] + _scale = _gamma / np.sqrt(_variance + epsilon) + + # get conv weight and bias for each channel + _conv_weight = conv_weight[i] + _conv_bias = conv_bias[i] + + # update the conv weight and bias + _conv_weight = _conv_weight * _scale + _conv_bias = _scale * (_conv_bias - _mean) + _beta + new_conv_weight.append(_conv_weight) + new_conv_bias.append(_conv_bias) + + new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type) + new_conv_bias = np.array(new_conv_bias).astype(conv_weight_type) + + if is_deconv: + new_conv_weight = np.reshape( + new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]) + ) + new_conv_weight = np.transpose(new_conv_weight, deconv_weight_transpose_axes) + + # make sure the updated weight and bias have the same shape as the original ones + if new_conv_weight.shape != origin_weight_shape: + raise AssertionError( + "conv weight should have the same shape before and after the fuse_" + "conv_batchnorm pass. " + ) + if new_conv_bias.shape != origin_bias_shape: + raise AssertionError( + "conv bias should have the same shape before and after the fuse_" + "conv_batchnorm pass. " + ) + + # create a new conv op with the new bias value, copying rest of the attributes + out_name = bn_op.outputs[0].name + conv_kargs = { + "weight": new_conv_weight, + "bias": new_conv_bias, + "name": out_name, + "before_op": conv_op, + } + + for k, v in conv_op.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + if bn_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=bn_op, + old_var=bn_op.outputs[0], + new_var=x, + ): + bn_op.enclosing_block.remove_ops([conv_op, bn_op]) + return True + return False + + @block_context_manager + def _fuse_conv_batchnorm_block(self, block): + def _match_pattern(op): + if op.op_type == "conv" or op.op_type == "conv_transpose": + # abort fusion if op output is also a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + # find batch_norm op + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + bn_op_candidate = list(child_ops)[0] + if bn_op_candidate.op_type == "batch_norm": + return bn_op_candidate + return None + + fusion_occurred = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_conv_batchnorm_block(b) + if len(op.blocks) > 0: + # This op can't be conv or conv_transpose + continue + + bn_op = _match_pattern(op) + if bn_op is not None: + fusion_occurred = self._try_to_transform(op, bn_op) + # has to break as the downstream iterator is affected. + if fusion_occurred: + return fusion_occurred + return fusion_occurred + + +@register_pass(namespace="common") +class fuse_conv_bias(AbstractGraphPass): + """ + Fold ``add``/``sub`` into ``bias`` of ``conv`` and ``conv_transpose``. + That is, convert ``conv + add/sub`` to ``conv``, when ``add``/``sub`` is adding a constant. + + Two patterns are supported: + + .. code-block:: + + Pattern 1: + Given: + %2 = conv(%1) + ... + %3 = add(%2, constant) # where constant has shape (1,C,1)/(C,1) for 1d conv, (1,C,1,1)/(C,1,1) for 2d conv etc + ... + + Result: + %3 = conv(%1) + ... + + + Pattern 2: + Given: + %2 = conv(%1) + %3 = transpose(%2) + ... + %4 = add(%3, constant) # where constant has a broacasable shape + ... + + Result: + %2 = conv(%1) + %4 = transpose(%2) + ... + """ + + child_op_types = ["add", "sub"] + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_conv_bias_block(f) + + def _match_pattern(self, op): + if op.op_type == "conv" or op.op_type == "conv_transpose": + # abort fusion if op output is also a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + # find add + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + add_op_candidate = list(child_ops)[0] + if add_op_candidate.op_type in self.child_op_types: + return add_op_candidate + return None + + @staticmethod + def _try_to_transform_transpose_pattern(conv_op, block): + + ops_to_remove = [] + + # conv layer + if conv_op.op_type != "conv" and conv_op.op_type != "conv_transpose": + return False + is_deconv = conv_op.op_type == "conv_transpose" + ops_to_remove.append(conv_op) + + # transpose layer + if not _check_child_op_type(conv_op, "transpose"): + return False + transpose_op = list(conv_op.outputs[0].child_ops)[0] + ops_to_remove.append(transpose_op) + + # add/sub layer + if not _check_child_op_type(transpose_op, "add") and not _check_child_op_type( + transpose_op, "sub" + ): + return False + add_or_sub_op = list(transpose_op.outputs[0].child_ops)[0] + + ops_to_remove.append(add_or_sub_op) + + # get the bias + if add_or_sub_op.x.val is None and add_or_sub_op.y.val is None: + return False + bias = add_or_sub_op.x.val if add_or_sub_op.x.val is not None else add_or_sub_op.y.val + is_first_input = add_or_sub_op.y.val is not None + is_sub = add_or_sub_op.op_type == "sub" + + # get the conv bias/weight + conv_shape = conv_op.outputs[0].shape + Cout = conv_shape[1] + conv_weight = conv_op.weight.val + conv_weight_type = conv_weight.dtype + conv_bias = ( + np.zeros(Cout).astype(conv_weight_type) if conv_op.bias is None else conv_op.bias.val + ) + + # check if the bias is compatible for fusion + is_bias_scalar = True + if isinstance(bias, np.ndarray): + if bias.shape == (): + bias = bias.tolist() + elif np.prod(bias.shape) == 1: + bias = np.squeeze(bias).tolist() + else: + is_bias_scalar = False + + if not is_bias_scalar: + if np.prod(bias.shape) != Cout: + return False + rank = transpose_op.outputs[0].rank + cout_dim = transpose_op.perm.val.tolist().index(1) - rank + if bias.shape[cout_dim] != Cout: + return False + bias = np.reshape(bias, (Cout)) + + # compute the new bias + if is_sub: + if is_first_input: + bias = -bias + else: + conv_bias = -conv_bias + + new_bias = conv_bias + bias + + # compute the new weight + if is_sub and not is_first_input: + new_weight = -conv_weight + else: + new_weight = conv_weight + + if not _check_no_output_connection(block, ops_to_remove): + return False + + # create a new conv op with the new weight, bias value, copying rest of the attributes + conv_kargs = {"weight": new_weight, "bias": new_bias, "before_op": conv_op} + + for k, v in conv_op.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + # create a new transpose op + out_name = add_or_sub_op.outputs[0].name + tranpose_kargs = {"x": x, "name": out_name, "before_op": transpose_op} + for k, v in transpose_op.inputs.items(): + if k == "x": + continue + tranpose_kargs[k] = v + x = mb.transpose(**tranpose_kargs) + + if add_or_sub_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=add_or_sub_op, + old_var=add_or_sub_op.outputs[0], + new_var=x, + ): + add_or_sub_op.enclosing_block.remove_ops(ops_to_remove) + return True + return False + + @staticmethod + def _try_to_transform(conv_op, add_op): + + if add_op.op_type == "sub": + bias_var = add_op.y + else: + bias_var = add_op.x if add_op.x.val is not None else add_op.y + bias_value = bias_var.val + + is_conv_op = conv_op.op_type == "conv" + + # check that the bias value is a constant array or a scalar constant + if not isinstance(bias_value, (np.ndarray, np.generic)): + return False + + is_bias_scalar = False + if not isinstance(bias_value, np.ndarray): + is_bias_scalar = True + + # find rank of the conv input + rank = conv_op.x.rank + if rank is None: + return False + if not (rank == 3 or rank == 4 or rank == 5): + return False + + # check compatibility of bias value with the rank of the conv op + # either bias value should be a scalar or: + # rank=3 ==> (B,C,D), which means bias must be (1,C,1) or (C,1) + # rank=4 ==> (B,C,D1,D2), which means bias must be (1,C,1,1) or (C,1,1) + # rank=5 ==> (B,C,D1,D2,D3), which means bias must be (1,C,1,1,1) or (C,1,1,1) + + if is_bias_scalar: + bias_value = np.array([bias_value]) + else: + # check that there is at most one dimension in the shape that is not 1 + if len(np.squeeze(bias_value).shape) > 1: + return False + # check that addition is not happening on the batch dimension + if len(bias_value.shape) == rank: + if bias_value.shape[0] != 1: + return False + # check that last rank-2 entries in the shape vector are all 1s + if np.prod(bias_value.shape[-(rank - 2) :]) != 1: + return False + bias_value = np.squeeze(bias_value) + + if add_op.op_type == "sub": + bias_value *= -1 + + # everything looks good, now find the new updated bias + old_bias = conv_op.inputs.get("bias", None) + old_bias_value = None + if old_bias is not None and old_bias.val is not None: + old_bias_value = old_bias.val + if old_bias is None: + # need to create a fresh numpy array for bias + if np.prod(bias_value.shape) == 1: + # its a scalar bias + # need to find the value of Cout to form a new bias + if conv_op.weight.val is None: + return False + # conv_transpose has weight format [K, C_out, spatial dims] + # conv has weight format [C_out, K, spatial dims] + Cout = conv_op.weight.val.shape[0 if is_conv_op else 1] + new_bias_value = np.broadcast_to(bias_value, (Cout,)) + else: + new_bias_value = bias_value + else: + # just need to update the existing bias array + try: + new_bias_value = old_bias_value + bias_value + except: + return False + + # create a new conv op with the new bias value, copying rest of the attributes + out_name = add_op.outputs[0].name + if new_bias_value.dtype != np.float32 and new_bias_value.dtype != np.float16: + # cast the bias to match the weight type + weight_np_type = types.nptype_from_builtin( + conv_op.inputs["weight"].sym_type.get_primitive() + ) + logger.warning( + "conv_bias_fusion pass: casting bias " + "from {} to {} to match the dtype of the weight of the conv layer".format( + new_bias_value.dtype, weight_np_type + ) + ) + new_bias_value = new_bias_value.astype(weight_np_type) + new_bias_var = mb.const(val=new_bias_value, before_op=conv_op) + + conv_kargs = {"bias": new_bias_var, "name": out_name, "before_op": conv_op} + + for k, v in conv_op.inputs.items(): + if k == "bias": + continue + conv_kargs[k] = v + + if is_conv_op: + x = mb.conv(**conv_kargs) + else: + x = mb.conv_transpose(**conv_kargs) + + if add_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=add_op, + old_var=add_op.outputs[0], + new_var=x, + ): + add_op.enclosing_block.remove_ops([conv_op, add_op]) + return True + return False + + @block_context_manager + def _fuse_conv_bias_block(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_conv_bias_block(b) + if len(op.blocks) > 0: + # This op can't be conv or conv_transpose + continue + + # pattern 1 : conv + add/sub + add_op = self._match_pattern(op) + if add_op is not None: + fusion_status = self._try_to_transform(op, add_op) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + + # pattern 2 : conv + transpose + add/sub + fusion_status = self._try_to_transform_transpose_pattern(op, block) + if fusion_status: + return fusion_status + + return fusion_status + + +@register_pass(namespace="common") +class fuse_conv_scale(AbstractGraphPass): + """ + Fold ``mul``/``div`` into ``conv``/``conv_transpose`` by updating the weight/bias of the convolution layers. + + The scale ``const`` can be a single number (scalar) or a vector with a broadcastable shape. + For example, if the output of the ``conv``/``deconv`` layer is ``(B, Cout, H, W)``, + ``const`` of shape ``(Cout, 1, 1)`` and ``(1, Cout, 1, 1)`` are allowed. + + .. code-block:: + + Given: + %2 = conv(%1) + ... + %3 = mul(%2, constant) # where constant is the scale constant + ... + + Result: + %3 = conv(%1) + ... + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_conv_scale_block(f) + + @staticmethod + def _try_to_transform(conv_op, scale_op): + # get the scale + if scale_op.x.val is None and scale_op.y.val is None: + return False + scale_var = scale_op.x if scale_op.x.val is not None else scale_op.y + scale = scale_var.val + + # for the scalar case, the scalar can be either + # 1. a python int/float + # 2. a 0d numpy array + # 3. a 1d numpy array with shape (1,) + + is_scalar = True + if isinstance(scale, np.ndarray): + if scale.shape == (): + scale = scale.tolist() + elif scale.shape == (1) or scale.shape == (1,): + scale = scale[0] + else: + is_scalar = False + + # get weight and bias and groups from conv layer + if conv_op.weight.val is None: + return False + conv_weight = conv_op.weight.val + conv_bias = conv_op.bias + groups = conv_op.groups.val + + # get type of the conv layer + is_deconv = conv_op.op_type == "conv_transpose" + is_conv_1d = len(conv_weight.shape) == 3 + + # D_in denotes the spatial dimensions for conv kernel weight + # for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in] + # for conv, conv_weight has shape [Cout, Cin / groups, *D_in] + if is_deconv: + Cout = conv_weight.shape[1] * groups + Cin = conv_weight.shape[0] + else: + Cout = conv_weight.shape[0] + Cin = conv_weight.shape[1] * groups + + # for the vector scale case, check if the shape is broacastable + if not is_scalar: + if not np.product(scale.shape) == Cout: + return False + if len(scale.shape) == len(conv_weight.shape): + if not scale.shape[1] == Cout: + return False + elif len(scale.shape) == len(conv_weight.shape) - 1: + if not scale.shape[0] == Cout: + return False + else: + return False + + # transform the scale to 1./scale for the real_div case + if scale_op.op_type == "real_div": + scale = 1.0 / scale + + # get the type of the conv weight + conv_weight_type = conv_weight.dtype + + # create bias for conv if not exist + if conv_bias is None: + conv_bias = np.zeros(Cout) + else: + conv_bias = conv_bias.val + conv_bias = conv_bias.astype(conv_weight_type) + + # get the original shape of weight and bias + origin_weight_shape = conv_weight.shape + origin_bias_shape = conv_bias.shape + + # update the weight/bias for conv layer + if is_scalar: + new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type) + new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type) + + else: + scale = np.reshape(scale, (Cout)) + new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type) + new_conv_weight = [] + if is_deconv: + conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]) + conv_weight = np.reshape( + conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]) + ) + + for i in range(Cout): + _conv_weight = conv_weight[i] * scale[i] + new_conv_weight.append(_conv_weight) + new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type) + + if is_deconv: + new_conv_weight = np.reshape( + new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]) + ) + new_conv_weight = np.transpose( + new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3] + ) + + # make sure the updated weight and bias have the same shape as the original ones + assert ( + new_conv_weight.shape == origin_weight_shape + ), "conv weight should have the same shape before and after the fuse_conv_scale pass." + assert ( + new_conv_bias.shape == origin_bias_shape + ), "conv bias should have the same shape before and after the fuse_conv_scale pass." + + # create a new conv op with the new weight, bias value, copying rest of the attributes + out_name = scale_op.outputs[0].name + conv_kargs = { + "weight": new_conv_weight, + "bias": new_conv_bias, + "name": out_name, + "before_op": conv_op, + } + + for k, v in conv_op.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + if scale_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=scale_op, + old_var=scale_op.outputs[0], + new_var=x, + ): + scale_op.enclosing_block.remove_ops([conv_op, scale_op]) + return True + return False + + @block_context_manager + def _fuse_conv_scale_block(self, block): + def _match_pattern(op): + if op.op_type == "conv" or op.op_type == "conv_transpose": + # abort fusion if op output is also a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + # find batch_norm op + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + scale_op_candidate = list(child_ops)[0] + if scale_op_candidate.op_type in ["mul", "real_div"]: + return scale_op_candidate + return None + + fusion_occurred = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_conv_scale_block(b) + if len(op.blocks) > 0: + # This op can't be conv or conv_transpose + continue + + scale_op = _match_pattern(op) + + if scale_op is not None: + fusion_occurred = self._try_to_transform(op, scale_op) + # has to break as the downstream iterator is affected. + if fusion_occurred: + return fusion_occurred + return fusion_occurred + + +@register_pass(namespace="common") +class fuse_pad_conv(AbstractGraphPass): + """ + When we observe ``pad -> transpose -> conv``, we move the ``pad`` to be next to ``conv``. + This allows us to meld ``pad + conv`` if possible. + + .. code-block:: + + Given: + %1 = pad(%0, ...) + %2 = transpose(%1, ...) + %3 = conv(%2, ...) + ... + + Result: + %1.a = transpose(%0, ...) + $2.a = pad(%1.a, ...) + %3 = conv(%2.a) + ... + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._pad_conv_connect_block(f) + + @staticmethod + def _match_pattern(op): + ret = set([]) + child_ops = op.outputs[0].child_ops + + for child_op in child_ops: + if child_op.op_type != "transpose": + continue + skip_ops = child_op.outputs[0].child_ops + for skip_op in skip_ops: + if "conv" not in skip_op.op_type: + continue + ret.update([child_op]) + + return ret if len(ret) != 0 else None + + @staticmethod + def _try_to_transform(pad_op, transpose_ops, block): + def _compute_new_pad_values(transpose_op): + if pad_op.inputs["pad"].val is None: + return None + pad_amounts = np.reshape(pad_op.inputs["pad"].val, [-1, 2]) + transpose_axes = transpose_op.inputs["perm"].val + rank_diff = len(transpose_axes) - pad_amounts.shape[0] + pad_amounts_new = copy.deepcopy(pad_amounts) + # append "rank_diff" rows of zeros to the top + pad_amounts_new = np.concatenate( + (np.zeros((2 * rank_diff)).reshape(-1, 2), pad_amounts_new) + ) + pad_amounts_new = pad_amounts_new.astype(pad_amounts.dtype) + pad_amounts = np.concatenate((np.zeros((2 * rank_diff)).reshape(-1, 2), pad_amounts)) + for i, axis in enumerate(transpose_axes): + pad_amounts_new[i][0] = pad_amounts[axis][0] + pad_amounts_new[i][1] = pad_amounts[axis][1] + + # get the top "rank_diff" rows + top_rows = pad_amounts_new[:rank_diff, :] + if not np.all(top_rows == 0): + return False + # cut "rank_diff" from the top + pad_amounts_new = pad_amounts_new[rank_diff:, :] + pad_amounts_new = pad_amounts_new.flatten() + return pad_amounts_new + + if pad_op.outputs[0] in pad_op.enclosing_block.outputs: + return False + if len(set(pad_op.outputs[0].child_ops)) != len(transpose_ops): + return False + + for transpose_op in transpose_ops: + pad_amounts_new = _compute_new_pad_values(transpose_op) + if pad_amounts_new is None: + continue + + with pad_op.enclosing_block: + new_transpose_var = mb.transpose( + x=pad_op.inputs["x"], + perm=transpose_op.inputs["perm"].val, + before_op=transpose_op, + ) + new_pad_inputs = {"x": new_transpose_var, "pad": pad_amounts_new} + for k, v in pad_op.inputs.items(): + if k not in new_pad_inputs: + new_pad_inputs[k] = v + new_pad_var = mb.pad(before_op=transpose_op, **new_pad_inputs) + pad_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=transpose_op, old_var=transpose_op.outputs[0], new_var=new_pad_var + ) + + pad_op.enclosing_block.remove_ops(list(transpose_ops) + [pad_op]) + + return True + + @block_context_manager + def _pad_conv_connect_block(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._pad_conv_connect_block(b) + + if op.op_type != "pad": + continue + + transpose_ops = self._match_pattern(op) + if transpose_ops is not None: + fusion_status = self._try_to_transform(op, transpose_ops, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_elementwise_binary.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_elementwise_binary.py new file mode 100644 index 00000000..1b0fd507 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_elementwise_binary.py @@ -0,0 +1,321 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types as _types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class divide_to_multiply(AbstractGraphPass): + """ + Convert divide into multiply if the divisor is ``const``. + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._divide_to_multiply_block(f) + + @block_context_manager + def _divide_to_multiply_block(self, block): + for op in list(block.operations): + for b in op.blocks: + self._divide_to_multiply_block(b) + if len(op.blocks) > 0: + # This op can't be divided. + continue + + # If real_div has integer input, the result is an integer (following TensorFlow spec). + # Hence, this pass needs disabled if the input is not float, since it translates y + # to a floating point number. If x or y was originally an integer, and y becomes + # a floating point number, then the original type + # signature (with integer output) would not be preserved. + if op.op_type == "real_div" and op.y.val is not None and _types.is_float(op.x.dtype): + new_y_val = np.array(1.0, dtype=op.y.val.dtype) / op.y.val + if not np.isfinite(new_y_val).all(): + continue + + x = mb.mul(x=op.x, y=new_y_val, name="_inversed_" + op.name, before_op=op) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=x + ) + block.remove_ops([op]) + + +@register_pass(namespace="common") +class fuse_elementwise_to_batchnorm(AbstractGraphPass): + """ + Fold ``mul`` + ``add`` into a ``batchnorm`` + if the ``const`` feeding into the ``mul``/``add`` is of shape ``(1,C,1,1)`` or ``(C,1,1)`` + and input to ``mul`` is of rank 4. + + .. code-block:: + + Given: + [Const] [Const] + | | + V V + [...] --> [Mul] --> [Add] --> [...] + + That is, + + %2 = op1(%1) + %3 = mul(%2, constant) + %4 = add(%3, constant) + %5 = op2(%4) + ... + + Result: + + [...] --> [BatchNorm] --> [...] + + That is, + %2 = op1(%1) + %4 = batchnorm(%2) + %5 = op2(%4) + ... + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_elementwise_to_batchnorm_block(f) + + @staticmethod + def _match_pattern(op): + if op.outputs[0] in op.enclosing_block.outputs: + return None + + if op.op_type == "mul": + # find add + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + add_op_candidate = list(child_ops)[0] + if add_op_candidate.op_type == "add": + return add_op_candidate + return None + + @staticmethod + def _try_to_transform(mul_op, add_op, block): + def _find_const_input_val(op): + if op.x.val is not None: + return op.x.val + if op.y.val is not None: + return op.y.val + return None + + def _check_shape(arr): + """ + return True if shape is of form + (1,C,1,1) or (C,1,1) + """ + rank = len(arr.shape) + if not (rank == 3 or rank == 4): + return False + C = arr.shape[-3] + if not (arr.shape == (1, C, 1, 1) or arr.shape == (C, 1, 1)): + return False + return True + + non_const_input_mul = mul_op.x if mul_op.x.val is None else mul_op.y + if non_const_input_mul.rank != 4: + return False + + gamma = _find_const_input_val(mul_op) + beta = _find_const_input_val(add_op) + if gamma is None or beta is None: + return False + + if not (isinstance(gamma, np.ndarray) and isinstance(beta, np.ndarray)): + return False + + # check that gamma and beta have shape (1,C,1,1) or (C,1,1) + # that is they are doing vector addition on the axis=-3, which is what the + # batchnorm layer does (batchnorm layer only works on rank 4 input tensors) + if not (_check_shape(gamma) and _check_shape(beta)): + return False + + C = gamma.shape[-3] + if C == 1: + return False + + out_name = add_op.outputs[0].name + x = mb.batch_norm( + x=non_const_input_mul, + mean=np.zeros((C,), np.float32), + variance=np.ones((C,), np.float32), + gamma=np.squeeze(gamma), + beta=np.squeeze(beta), + name=out_name, + before_op=mul_op, + ) + + add_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=add_op, old_var=add_op.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops([mul_op, add_op]) + return True + + @block_context_manager + def _fuse_elementwise_to_batchnorm_block(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_elementwise_to_batchnorm_block(b) + if len(op.blocks) > 0: + # This op can't be mul + continue + + add_op = self._match_pattern(op) + if add_op is not None: + fusion_status = self._try_to_transform(op, add_op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + + +@register_pass(namespace="common") +class rank0_expand_dims_swap(AbstractGraphPass): + """ + Identify the pattern of a ``rank-0`` binary elementwise operation followed by an ``expand_dims`` op. + In the MIL backend, the output of the ``elementwise`` op becomes rank 1. Hence, an ``expand_dims`` op + should be added after both of the ``rank-0`` tensors, and the final ``expand_dims`` should be removed. + If the output var of the binary elementwise op is consumed by more than one op, a ``squeeze`` op + is inserted. + + .. code-block:: + + Input: + + [...](rank-0) --> sub --> expand_dims (axes=[0]) --> [...] + ^ | + | |--> op2 + | | + | |--> op3 + | + [scalar const] + + Output: + [...](rank-0) --> expand_dims (axes=[0]) --> sub --> [...] + ^ | + | |--> squeeze ---> op2 + | | + | |--> op3 + | + expand_dims (axes=[0]) + ^ + | + | + [scalar const] + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._rank0_expand_dims_swap(f) + + @staticmethod + def _try_to_transform(op, block): + op_type = op.op_type + ops_to_remove = [] + if op.x.rank != 0 or op.y.rank != 0: + return False + + # One and only one input is a scalar const + if (op.x.val is None) == (op.y.val is None): + return False + + var_1, var_2 = op.x, op.y + ops_to_remove.append(op) + + # check if the output is consumed by exact one expand_dims op and other ops + expand_dims_ops = [] + other_ops = [] + child_ops = list(op.outputs[0].child_ops) + for child_op in child_ops: + if child_op.op_type == "expand_dims": + expand_dims_ops.append(child_op) + else: + other_ops.append(child_op) + if len(expand_dims_ops) != 1: + return False + + # check the expand_dim op has axes = [0] + expand_dims_op = expand_dims_ops[0] + if expand_dims_op.axes.val != [0]: + return False + ops_to_remove.append(expand_dims_op) + ops_to_remove += other_ops + + for out in op.outputs: + if out in block.outputs: + return False + + # add a expand_dims op after each rank-0 tensor + var_1_expand = mb.expand_dims(x=var_1, axes=[0], before_op=op) + var_2_expand = mb.expand_dims(x=var_2, axes=[0], before_op=op) + + # add a new elementwise binary op + elem_op = getattr(mb, op_type) + + # replace var for the expand_dims op + x = elem_op( + x=var_1_expand, y=var_2_expand, name=expand_dims_op.outputs[0].name, before_op=op + ) + expand_dims_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=expand_dims_op, old_var=expand_dims_op.outputs[0], new_var=x + ) + + # replace var for other ops + if len(other_ops) >= 1: + elem_op_output = op.outputs[0] + squeeze = mb.squeeze(x=x, before_op=op) + for other_op in other_ops: + new_op = getattr(mb, other_op.op_type) + kargs = {} + for k, v in other_op.inputs.items(): + if v == elem_op_output: + kargs[k] = squeeze + else: + kargs[k] = v + kargs["name"] = other_op.name + kargs["before_op"] = other_op + new_var = new_op(**kargs) + other_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=other_op, old_var=other_op.outputs[0], new_var=new_var + ) + + # Remove all the ops at once + block.remove_ops(ops_to_remove) + return True + + @block_context_manager + def _rank0_expand_dims_swap(self, block): + fusion_occurred = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._rank0_expand_dims_swap(b) + if len(op.blocks) > 0: + # This op can't be elementwise binary ops + continue + + if op.op_type in ["add", "sub", "mul", "real_div", "floor_div"]: + fusion_occurred = self._try_to_transform(op, block) + # has to break as the downstream iterator is affected. + if fusion_occurred: + return fusion_occurred + return fusion_occurred diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_linear.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_linear.py new file mode 100644 index 00000000..b72f30f7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_linear.py @@ -0,0 +1,306 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Program +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class fuse_linear_bias(AbstractGraphPass): + """ + Convert ``linear + add/sub`` to a single ``linear`` by updating the weight and bias of the ``linear`` layer. + + .. code-block:: + + Example 1: + Original: + %4 = linear(x=%1, weight=%2, bias=%3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + %6 = add(x=%4, y=%5) # %5 is a const tensor with same shape as %3 + + Result: + %8 = linear(x=%1, weight=%2, bias=%7) # where %7 is a new const tensor with value + # %7 = %3 + %6 + + Example 2: + Original: + %4 = linear(x=%1, weight=%2, bias=%3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + %6 = sub(x=%5, y=%4) # %5 is a const tensor with a broacasable shape with %3. + i.e. if %3 has shape (Dout), %5 could be (1, Dout). + + Result: + %9 = linear(x=%1, weight=%7, bias=%8) # where %7 is a new const tensor with value %7 = -%2 + # %8 = %5 - %3 + """ + + def apply(self, prog: Program): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_linear_bias_block(f) + + @staticmethod + def _try_to_transform(linear_op, add_or_sub_op, block): + + if add_or_sub_op.x.val is None and add_or_sub_op.y.val is None: + return False + + is_sub = add_or_sub_op.op_type == "sub" + is_first_input = add_or_sub_op.x == linear_op.outputs[0] + + # compute the new bias + linear_bias = linear_op.bias.val + bias = add_or_sub_op.y.val if is_first_input else add_or_sub_op.x.val + + # check if the shape is broadcasable + if np.prod(linear_bias.shape) != np.prod(bias.shape): + return False + Dout = linear_bias.shape[0] + if bias.shape[-1] != Dout: + return False + bias = np.reshape(bias, (Dout,)) + + if is_sub: + if is_first_input: + bias = -bias + else: + linear_bias = -linear_bias + + new_bias = linear_bias + bias + + # compute the new weight + if is_sub and not is_first_input: + new_weight = -linear_op.weight.val + else: + new_weight = linear_op.weight.val + + # create a new linear op with the new weight, bias value, copying rest of the attributes + out_name = add_or_sub_op.outputs[0].name + linear_kargs = { + "weight": new_weight, + "bias": new_bias, + "name": out_name, + "before_op": linear_op, + } + + for k, v in linear_op.inputs.items(): + if k in ["weight", "bias"]: + continue + linear_kargs[k] = v + + x = mb.linear(**linear_kargs) + + if add_or_sub_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=add_or_sub_op, + old_var=add_or_sub_op.outputs[0], + new_var=x, + ): + add_or_sub_op.enclosing_block.remove_ops([linear_op, add_or_sub_op]) + return True + return False + + @block_context_manager + def _fuse_linear_bias_block(self, block): + def _find_candicate_op(op): + if op.op_type != "linear": + return None + # abort fusion if op output is also a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + # find add/sub op + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + op_candidate = list(child_ops)[0] + if op_candidate.op_type in ["add", "sub"]: + return op_candidate + + fusion_occurred = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_linear_bias_block(b) + if len(op.blocks) > 0: + # This op can't be conv or conv_transpose + continue + + add_or_sub_op = _find_candicate_op(op) + if add_or_sub_op is not None: + fusion_occurred = self._try_to_transform(op, add_or_sub_op, block) + # has to break as the downstream iterator is affected. + if fusion_occurred: + return fusion_occurred + return fusion_occurred + + +@register_pass(namespace="common") +class fuse_matmul_weight_bias(AbstractGraphPass): + """ + Convert ``matmul + add/sub`` to ``linear`` whenever possible. + + .. code-block:: + + Given: + %3 = matmul(x=%1, y=%2) # %1 or %2 is const and rank 2 (weight) + ... + %5 = add(x=%3, y=%4) # %4 is const. add(x=%4, y=%3) is equivalent + # sub is similar. + + Result: + # assuming %2 above is const and rank 2 + %5 = linear(x=%1, weight=%2, bias=%4) + """ + + def apply(self, prog: Program): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_matmul_weight_bias_block(f) + + @staticmethod + def _find_candidate_op(op): + _CHILD_OP_TYPES = ["add", "sub"] + + if op.op_type != "matmul": + return None + # find add + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + add_op_candidate = list(child_ops)[0] + if add_op_candidate.op_type in _CHILD_OP_TYPES: + return add_op_candidate + + @staticmethod + def _transpose(v, before_op, name=None): + """ + Transpose the last 2 dims. + + - ``v``: (Var, must be a tensor). + - ``before_op``: (Operation) The op right before the newly added ``transpose`` op. + - ``name``: Name for the ``transpose`` op if provided. + """ + perm = list(range(v.rank)) + perm[-2], perm[-1] = perm[-1], perm[-2] + + if name is None: + return mb.transpose(x=v, perm=perm, before_op=before_op) + else: + return mb.transpose(x=v, perm=perm, before_op=before_op, name=name) + + def _try_to_transform(self, matmul_op, add_op, block): + if matmul_op.x.val is None and matmul_op.y.val is None: + # This is a dynamic matmul. + return False + if add_op.x.val is None and add_op.y.val is None: + # This is a dynamic add. + return False + + x_is_weight = matmul_op.x.val is not None + if x_is_weight: + weight, linear_x = matmul_op.x, matmul_op.y + transpose_weight = matmul_op.transpose_x.val + transpose_x = matmul_op.transpose_y.val + else: + weight, linear_x = matmul_op.y, matmul_op.x + transpose_weight = matmul_op.transpose_y.val + transpose_x = matmul_op.transpose_x.val + + # We potentially are going to transpose the weight, so if the weight itself is not removable, we skip this path + if len(weight.nonreplaceable_vars_upstream) > 0: + return False + + if linear_x.rank < 2 or weight.rank != 2: + # We don't support these cases yet. + return False + + # For those weights which are the input for more than one op, + # we don't do the fusion. + # The reason is that it might cause memory explosion by adding + # those weight as a numpy array in the inner product or + # the batch_mat_mul kernel. + if len(weight.child_ops) > 1: + return False + + d_out = weight.shape[1] if not transpose_weight else weight.shape[0] + bias = add_op.x.val if add_op.x.val is not None else add_op.y.val + if len(bias.shape) > 1: + if any([d != 1 for d in bias.shape[:-1]]): + return # cannot transform + + # squeeze leading dims of size 1 + bias = np.squeeze(bias) + + if len(bias.shape) != 1 or bias.shape[0] != d_out: + return # cannot transform + + if add_op.op_type == "sub": + bias = -bias + out_name = add_op.outputs[0].name + + if x_is_weight: + # If transpose_x == transpose_weight == False: + # w*x = (x^T w^T)^T = linear(x^T, w)^T + x_transposed = ( + self._transpose(linear_x, before_op=matmul_op) if not transpose_x else linear_x + ) + w_no_transpose = ( + weight if not transpose_weight else self._transpose(weight, before_op=matmul_op) + ) + x = mb.linear(x=x_transposed, weight=w_no_transpose, bias=bias, before_op=matmul_op) + x = self._transpose(x, before_op=matmul_op, name=out_name) + else: + # If transpose_x == transpose_weight == False + # x*w = x*(w^T)^T = linear(x, w^T) + x_no_transpose = ( + self._transpose(linear_x, before_op=matmul_op) if transpose_x else linear_x + ) + w_transposed = ( + weight if transpose_weight else self._transpose(weight, before_op=matmul_op) + ) + x = mb.linear( + x=x_no_transpose, + weight=w_transposed, + bias=bias, + before_op=matmul_op, + name=out_name, + ) + + if add_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=add_op, + old_var=add_op.outputs[0], + new_var=x, + ): + add_op.enclosing_block.remove_ops([matmul_op, add_op]) + return True + return False + + @block_context_manager + def _fuse_matmul_weight_bias_block(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_matmul_weight_bias_block(b) + if len(op.blocks) > 0: + # This op can't be matmul + continue + + add_op = self._find_candidate_op(op) + + if add_op is not None: + fusion_status = self._try_to_transform(op, add_op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_normalization.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_normalization.py new file mode 100644 index 00000000..79720a23 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_normalization.py @@ -0,0 +1,851 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from typing import List, Optional + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Block +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Operation, Program, Var +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import ( + _check_no_output_connection, + block_context_manager, +) +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class fuse_layernorm_or_instancenorm(AbstractGraphPass): + """ + A graph optimization pass on PyMIL to detect and fuse several variants of ``layer_norm`` or + ``instance_norm``. Pattern 1 corresponds to either ``layer_norm`` or ``instance_norm``. Patterns 2-4 + are ``instance_norm``. + """ + + _DEBUG = False # set to true to plot the block before and after the transformation + + def apply(self, prog: Program): + for f in prog.functions.values(): + block_changed = True + while block_changed: + if self._DEBUG: + import graphviz + + graphviz.Source( + f.get_dot_string( + highlight_debug_op_types=["instance_norm"], + ) + ).view(filename="/tmp/block_before_fuse_layernorm_or_instancenorm") + logger.debug("Block before fuse_layernorm_or_instancenorm transform:\n{}".format(f)) + + block_changed = self._fuse_layernorm_or_instancenorm_block(f) + + if self._DEBUG: + graphviz.Source( + f.get_dot_string( + highlight_debug_op_types=["instance_norm"], + ) + ).view(filename="/tmp/block_after_fuse_layernorm_or_instancenorm") + + logger.debug("Block after fuse_layernorm_or_instancenorm transform:\n{}".format(f)) + + @staticmethod + def _check_reduce_op(reduce_op: Operation, mode: str = "reduce_mean") -> bool: + """ + Check whether or not the ``reduction`` op satisfies following conditions: + + - Mode is expected. + - Does not change rank (``keep_dims`` is ``True``). + - The ``axes`` is known at compile time. + + Parameters + ---------- + + param reduce_op : ``reduce_op`` to check on. + + param mode : ``reduce`` mode + + """ + if reduce_op is None: + return False + if reduce_op.op_type != mode: + return False + if reduce_op.keep_dims is None or reduce_op.keep_dims.val is None: + return False + if reduce_op.keep_dims.val is False: + return False + if reduce_op.axes is None or reduce_op.axes.val is None: + return False + return True + + @staticmethod + def _check_child_op_types( + op: Operation, child_op_types: List[str], check_order: bool = True + ) -> bool: + """ + Returns ``True`` for child op types matching ``child_op_types``, otherwise returns ``False``. + + Parameters + ---------- + + param op : Current op. + + param child_op_type : Expected child op type. + + param check_order : Ensure child in given order, defaults to ``True``. + """ + if op is None or len(op.outputs) != 1: + return False + child_ops = list(op.outputs[0].child_ops) + if len(child_ops) != len(child_op_types): + return False + ops_types = [c.op_type for c in child_ops] + if check_order is False: + ops_types = sorted(ops_types) + child_op_types = sorted(child_op_types) + return ops_types == child_op_types + + @staticmethod + def _try_get_child_op_type( + op: Operation, child_op_type: str, index: int = 0 + ) -> Optional[Operation]: + """ + Returns child op if type matches, otherwise returns ``None``. + + Parameters + ---------- + + param op : Current op. + + param child_op_type : Expected child op type. + + param index : Child op index. + """ + if op is None: + return None + if len(op.outputs) != 1: + return None + child_ops = list(op.outputs[0].child_ops) + if index >= len(child_ops): + return None + if child_ops[index].op_type != child_op_type: + return None + return child_ops[index] + + @staticmethod + def _try_apply_transform( + reduce_op: Operation, + block: Block, + gamma_var: Var, + beta_var: Var, + epsilon_var: Var, + end_op: Operation, + ops_to_remove: List[Operation], + ) -> bool: + """ + Insert instance_norm / layer_norm and delete all ops. + + :param reduce_op: Start operation of the pattern. + :param block: Block + :param gamma_var: Gamma variable. + :param beta_var: Beta variable. + :param epsilon_var: Epsilon variable. + :param end_op: End operation of the pattern. + :param ops_to_remove: Operations to remove. + """ + if not _check_no_output_connection(block, ops_to_remove): + return False + + axes = reduce_op.axes.val + rank = len(reduce_op.x.shape) + + # check whether the pattern is instance_norm or layer_norm + is_layernorm = False + is_instancenorm = False + is_require_rank4_transpose = False + + negative_axes = [a - rank if a >= 0 else a for a in axes] + negative_axes.sort() + + if len(gamma_var.val.shape) == len(axes) and len(beta_var.val.shape) == len(axes): + # axes for layer_norm must be [-1] or [-1, -2] or [-1, -2, -3] and so on + if negative_axes == list(range(-len(negative_axes), 0)): + is_layernorm = True + + if rank == 4 and (negative_axes == [-2, -1] or negative_axes == [-3, -2]): + if ( + len(np.squeeze(gamma_var.val).shape) == 1 + and len(np.squeeze(beta_var.val).shape) == 1 + ): + is_instancenorm = True + if negative_axes == [-3, -2]: + is_require_rank4_transpose = True + + if not (is_instancenorm or is_layernorm): + return False + + # remove all the ops, and replace with a layer_norm or instance_norm op + out_name = end_op.outputs[0].name + + if is_require_rank4_transpose: + x = mb.transpose( + x=reduce_op.x, + perm=[0, 3, 1, 2], + name=out_name + "_transpose_nhwc_nchw", + before_op=end_op, + ) + if is_instancenorm: + x = mb.instance_norm( + x=x if is_require_rank4_transpose else reduce_op.x, + gamma=np.squeeze(gamma_var.val), + beta=np.squeeze(beta_var.val), + epsilon=epsilon_var, + name=out_name + "_instancenorm" if is_require_rank4_transpose else out_name, + before_op=end_op, + ) + else: # is_layernorm + x = mb.layer_norm( + x=x if is_require_rank4_transpose else reduce_op.x, + axes=axes, + gamma=gamma_var, + beta=beta_var, + epsilon=epsilon_var, + name=out_name + "_layernorm" if is_require_rank4_transpose else out_name, + before_op=end_op, + ) + if is_require_rank4_transpose: + x = mb.transpose( + x=x, + perm=[0, 2, 3, 1], + name=out_name + "_transpose_nchw_nhwc", + before_op=end_op, + ) + + end_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=end_op, old_var=end_op.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops(ops_to_remove) + return True + + def _try_match_and_transform_pattern_1(self, reduce_op, block) -> bool: + """ + Identify the pattern: + + ``y = gamma * (x - mean) / sqrt(variance + epsilon) + beta`` + + ``y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])`` + + .. code-block:: + + x --> reduce_mean --> sub --> square --> reduce_mean --> add(epsilon) --> rsqrt + | | ^ | + | | | V + |----------------------- mul (gamma) + | | | + | | --------|--------- + | | | | + | | | V + | |----------------------------------------------------------------> mul + | | | + | V | + |--------------------------------------------------------------> mul | + | V + | sub (beta) --> add --> [...] + | ^ + |------------------------------- + + This pattern corresponds to either ``layer_norm`` or ``instance_norm``. + + It is ``instance_norm`` if all of the following are true: + - ``input`` is rank 4. + - ``axes`` of ``reduce_mean`` is ``[-2, -1]`` or ``[-3, -2]`` + (when ``[-3, -2]``, a channel first to channel last transpose would be inserted). + - ``gamma`` and ``beta`` are rank 1, after ``squeeze``. + + It is ``layer_norm`` if all of the following are true: + - ``axes`` is either ``[-1]``, ``[-1, -2]``, or ``[-1, -2, -3]``, and so on. + - ``rank`` of ``gamma`` and ``beta`` is equal to the length of the ``axes``. + """ + ops_to_remove = [] + root_var = reduce_op.x + + if root_var.shape is None: + return False + + # check that root_var feeds into exactly 3 ops + if len(list(root_var.child_ops)) != 3: + return False + if root_var.op is not None and not self._check_child_op_types( + root_var.op, child_op_types=["reduce_mean", "sub", "mul"] + ): + return False + + # check 1st reduce_mean op + if not self._check_reduce_op(reduce_op): + return False + ops_to_remove.append(reduce_op) + + # check 1st sub op + if not self._check_child_op_types(reduce_op, ["sub", "mul"], check_order=False): + return False + child_ops_reduce_mean = list(reduce_op.outputs[0].child_ops) + op_a = child_ops_reduce_mean[0] + op_b = child_ops_reduce_mean[1] + sub_op1 = op_a if op_a.op_type == "sub" else op_b + if not (sub_op1.x == root_var and sub_op1.y == reduce_op.outputs[0]): + return False + ops_to_remove.append(sub_op1) + + # check square op + square_op = self._try_get_child_op_type(sub_op1, "square") + if square_op is None: + return False + ops_to_remove.append(square_op) + + # check second reduce mean + reduce_op2 = self._try_get_child_op_type(square_op, "reduce_mean") + if not self._check_reduce_op(reduce_op2): + return False + ops_to_remove.append(reduce_op2) + + # check add op (with epsilon) + add_op1 = self._try_get_child_op_type(reduce_op2, "add") + if add_op1 is None: + return False + epsilon_var = add_op1.y if add_op1.x == reduce_op2.outputs[0] else add_op1.x + if epsilon_var.val is None or len(epsilon_var.val.shape) != 0: + return False # must be scalar + ops_to_remove.append(add_op1) + + # check rsqrt + rsqrt_op = self._try_get_child_op_type(add_op1, "rsqrt") + if rsqrt_op is None: + return False + ops_to_remove.append(rsqrt_op) + + # check mul (gamma) + mul_op1 = self._try_get_child_op_type(rsqrt_op, "mul") + if mul_op1 is None: + return False + gamma_var = mul_op1.y if mul_op1.x == rsqrt_op.outputs[0] else mul_op1.x + if gamma_var.val is None: + return False + ops_to_remove.append(mul_op1) + + # check 2 muls after the gamma mul + if not self._check_child_op_types(mul_op1, ["mul", "mul"]): + return False + child_ops = list(mul_op1.outputs[0].child_ops) + mul_op2 = child_ops[0] + mul_op3 = child_ops[1] + mul_op2_other_var = mul_op2.x if mul_op2.y == mul_op1.outputs[0] else mul_op2.y + mul_op3_other_var = mul_op3.x if mul_op3.y == mul_op1.outputs[0] else mul_op3.y + if not ( + (mul_op2_other_var == root_var and mul_op3_other_var == reduce_op.outputs[0]) + or (mul_op2_other_var == reduce_op.outputs[0] and mul_op3_other_var == root_var) + ): + return False + if mul_op2_other_var == root_var: + mul_root_op = mul_op2 + mul_mean_op = mul_op3 + else: + mul_root_op = mul_op3 + mul_mean_op = mul_op2 + ops_to_remove.append(mul_mean_op) + ops_to_remove.append(mul_root_op) + + # check sub with beta + sub_op2 = self._try_get_child_op_type(mul_mean_op, "sub") + if sub_op2 is None: + return False + if sub_op2.y != mul_mean_op.outputs[0]: + return False + beta_var = sub_op2.x + if beta_var.val is None: + return False + ops_to_remove.append(sub_op2) + + # check last add op + add_op2 = self._try_get_child_op_type(sub_op2, "add") + if add_op2 is None: + return False + if not (add_op2.x == mul_root_op.outputs[0] or add_op2.y == mul_root_op.outputs[0]): + return False + ops_to_remove.append(add_op2) + + return self._try_apply_transform( + reduce_op, block, gamma_var, beta_var, epsilon_var, add_op2, ops_to_remove + ) + + def _try_match_and_transform_pattern_2(self, reduce_op, block) -> bool: + """ + Identify the pattern: + + ``y = (x - mean) / pow(variance + epsilon) * gamma + beta`` + + This pattern corresponds to, and should be fused as, ``instance_norm``. + + All of the following conditions must be satisfied: + + 1. ``input`` is rank 4 tensor. + 2. ``reduce`` operates on spatial dimensions ``axes=[-2, -1]``, or ``axes=[-3, -2]`` (a + channel first to channel last transpose would be inserted in such cases). + 3. ``gamma`` and ``beta`` are both shape ``(C,)`` after ``squeeze``, where ``C`` is number of channels. + + .. code-block:: + + |----> sub -----| const (0.5) + | ^ | | + | | V V + x ---> mean square --> mean1 --> add_eps ---> pow const_gamma const_beta + | | | | | + | V V V V + |----> sub1 --------------------------------> real_div --> mul_gamma --> add_beta --> ... + """ + ops_to_remove = [] + root_var = reduce_op.x + + if root_var.shape is None: + return False + + # check that root_var feeds into exactly 3 ops + if len(root_var.child_ops) != 3: + return False + if root_var.op is not None and not self._check_child_op_types( + root_var.op, child_op_types=["reduce_mean", "sub", "sub"] + ): + return False + + # check 1st reduce_mean op + if not self._check_reduce_op(reduce_op): + return False + ops_to_remove.append(reduce_op) + + # check 1st sub op + if not self._check_child_op_types(reduce_op, ["sub", "sub"]): + return False + child_ops_reduce_mean = list(reduce_op.outputs[0].child_ops) + reduce_mean_child_op_a = child_ops_reduce_mean[0] + reduce_mean_child_op_b = child_ops_reduce_mean[1] + # One of sub op directly goes square, the other one goes real_div + if list(reduce_mean_child_op_a.outputs[0].child_ops)[0].op_type == "square": + sub_op0 = reduce_mean_child_op_a + sub_op1 = reduce_mean_child_op_b + else: + sub_op0 = reduce_mean_child_op_b + sub_op1 = reduce_mean_child_op_a + if not (sub_op0.x == root_var and sub_op0.y == reduce_op.outputs[0]): + return False + if not (sub_op1.x == root_var and sub_op1.y == reduce_op.outputs[0]): + return False + ops_to_remove.append(sub_op0) + ops_to_remove.append(sub_op1) + + # check square op + square_op = self._try_get_child_op_type(sub_op0, "square") + if square_op is None: + return False + ops_to_remove.append(square_op) + + # check second reduce mean + reduce_op2 = self._try_get_child_op_type(square_op, "reduce_mean") + if not self._check_reduce_op(reduce_op2): + return False + ops_to_remove.append(reduce_op2) + + # check add op (with epsilon) + add_eps_op = self._try_get_child_op_type(reduce_op2, "add") + if add_eps_op is None: + return False + epsilon_var = add_eps_op.y if add_eps_op.x == reduce_op2.outputs[0] else add_eps_op.x + if epsilon_var.val is None or len(epsilon_var.val.shape) != 0: + return False # must be scalar + ops_to_remove.append(add_eps_op) + + # check pow + pow_op = self._try_get_child_op_type(add_eps_op, "pow") + if pow_op is None: + return False + if pow_op.y.val is None or not np.isclose(pow_op.y.val, 0.5): + return False + ops_to_remove.append(pow_op) + + # check real_div + real_div_op = self._try_get_child_op_type(pow_op, "real_div") + if real_div_op is None: + return False + if not (real_div_op.x == sub_op1.outputs[0] and real_div_op.y == pow_op.outputs[0]): + return False + ops_to_remove.append(real_div_op) + + # check mul with gamma + mul_gamma_op = self._try_get_child_op_type(real_div_op, "mul") + if mul_gamma_op is None: + return False + gamma_var = mul_gamma_op.y if mul_gamma_op.x == real_div_op.outputs[0] else mul_gamma_op.x + if gamma_var.val is None: + return False + ops_to_remove.append(mul_gamma_op) + + # check add with beta + add_beta_op = self._try_get_child_op_type(mul_gamma_op, "add") + if add_beta_op is None: + return False + beta_var = add_beta_op.y if add_beta_op.x == mul_gamma_op.outputs[0] else add_beta_op.x + if beta_var.val is None: + return False + ops_to_remove.append(add_beta_op) + + return self._try_apply_transform( + reduce_op, block, gamma_var, beta_var, epsilon_var, add_beta_op, ops_to_remove + ) + + def _try_match_and_transform_pattern_3(self, reduce_op, block) -> bool: + """ + Detect ``InstanceNorm`` pattern in TensorFlow-Addons. + + This pattern corresponds to, and should be fused as, ``instance_norm``. + + All of the following conditions must be satisfied: + + 1. ``input`` is rank 4 tensor. + 2. ``reduce`` operates on spatial dimensions ``axes=[-2, -1]``, or ``axes=[-3, -2]`` (a + channel first to channel last transpose would be inserted in such cases). + 3. ``gamma`` and ``beta`` are absent. Default values for ``gamma`` and ``beta`` would be used. + + .. code-block:: + + |-------------------------------------------------| + | | + | V + x --> mean square --> mean1 --> add_eps --> rsqrt --> mul2 --> mul_sub + | | ^ | | + | V | | | + | --> sub -----| | | + | V V + |--------------------------------------------> mul1 -------------> add --> ... + """ + ops_to_remove = [] + root_var = reduce_op.x + + if root_var.shape is None: + return False + + # check that root_var feeds into exactly 3 ops + if len(root_var.child_ops) != 3: + return False + if root_var.op is not None and not self._check_child_op_types( + root_var.op, ["sub", "mul", "reduce_mean"] + ): + return False + + # check 1st reduce_mean op + if not self._check_reduce_op(reduce_op): + return False + ops_to_remove.append(reduce_op) + + # check 1st sub op + if not self._check_child_op_types(reduce_op, ["sub", "mul"], check_order=False): + return False + child_ops_reduce_mean = list(reduce_op.outputs[0].child_ops) + reduce_mean_child_op_a = child_ops_reduce_mean[0] + reduce_mean_child_op_b = child_ops_reduce_mean[1] + sub_op1 = ( + reduce_mean_child_op_a + if reduce_mean_child_op_a.op_type == "sub" + else reduce_mean_child_op_b + ) + if not (sub_op1.x == root_var and sub_op1.y == reduce_op.outputs[0]): + return False + ops_to_remove.append(sub_op1) + + # check square op + square_op = self._try_get_child_op_type(sub_op1, "square") + if square_op is None: + return False + ops_to_remove.append(square_op) + + # check second reduce mean + reduce_op2 = self._try_get_child_op_type(square_op, "reduce_mean") + if reduce_op2 is None or not self._check_reduce_op(reduce_op2): + return False + ops_to_remove.append(reduce_op2) + + # check add op (with epsilon) + add_eps_op = self._try_get_child_op_type(reduce_op2, "add") + if add_eps_op is None: + return False + epsilon_var = add_eps_op.y if add_eps_op.x == reduce_op2.outputs[0] else add_eps_op.x + if epsilon_var.val is None or len(epsilon_var.val.shape) != 0: + return False # must be scalar + ops_to_remove.append(add_eps_op) + + # check rsqrt + rsqrt_op = self._try_get_child_op_type(add_eps_op, "rsqrt") + if rsqrt_op is None: + return False + ops_to_remove.append(rsqrt_op) + + # check mul 1 + mul_op1 = self._try_get_child_op_type(rsqrt_op, "mul") + if mul_op1 is None: + return False + if not ( + (mul_op1.x == root_var and mul_op1.y == rsqrt_op.outputs[0]) + or (mul_op1.x == rsqrt_op.outputs[0] and mul_op1.y == root_var) + ): + return False + ops_to_remove.append(mul_op1) + + # check mul 2 + mul_op2 = self._try_get_child_op_type(rsqrt_op, "mul", index=1) + if mul_op2 is None: + return False + if not ( + (mul_op2.x == reduce_op.outputs[0] and mul_op2.y == rsqrt_op.outputs[0]) + or (mul_op2.x == rsqrt_op.outputs[0] and mul_op2.y == reduce_op.outputs[0]) + ): + return False + ops_to_remove.append(mul_op2) + + # check mul (sub) + mul_sub_op = self._try_get_child_op_type(mul_op2, "mul") + if mul_sub_op is None: + return False + if mul_sub_op.y.val is None or mul_sub_op.y.val != -1: + return False + ops_to_remove.append(mul_sub_op) + + # check last add op + add_op = self._try_get_child_op_type(mul_sub_op, "add") + if add_op is None: + return False + if not ( + (add_op.x == mul_op1.outputs[0] and add_op.y == mul_sub_op.outputs[0]) + or (add_op.x == mul_sub_op.outputs[0] and add_op.y == mul_op1.outputs[0]) + ): + return False + ops_to_remove.append(add_op) + + gamma_var = mb.const( + val=np.ones(shape=(1, root_var.shape[1], 1, 1)), + name="_fuse_layernorm_or_instancenorm_gamma", + ) + beta_var = mb.const( + val=np.zeros(shape=(1, root_var.shape[1], 1, 1)), + name="_fuse_layernorm_or_instancenorm_beta", + ) + + return self._try_apply_transform( + reduce_op, block, gamma_var, beta_var, epsilon_var, add_op, ops_to_remove + ) + + def _try_match_and_transform_pattern_4(self, reduce_op: Operation, block: Block) -> bool: + """ + Identify the pattern: + + ``y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])`` + + This pattern corresponds to, and should be fused as, ``instance_norm``. + + All of the following conditions must be satisfied: + + 1. ``input`` is rank 4 tensor. + 2. ``reduce`` operates on spatial dimensions ``axes=[-2, -1]`` or ``axes=[-3, -2]`` (a + channel first to channel last transpose would be inserted in such cases). + 3. ``gamma`` and ``beta`` are both shape ``(C,)`` after ``squeeze``, where ``C`` is number of channels. + + .. code-block:: + + |-----------| + | V + |------> mul_square1 -----> sum1 -----> mul_mean1 + | | + | V + x --> sum --> mul_mean ==> mul_square --> sub_variance --> add_eps --> rsqrt + | | | + | | V + | | mul_gamma + | | | + | | |----------------| + | | | V + | |--------------------------------------------+-------------> mul2 + | V | + |----------------------------------------------------------> mul1 | + | V + | sub_beta --> add --> [...] + | ^ + |---------------------------| + """ + ops_to_remove = [] + root_var = reduce_op.x + + if root_var.shape is None: + return False + + # check that root_var feeds into exactly 4 ops + if len(root_var.child_ops) != 4: + return False + if root_var.op is not None and not self._check_child_op_types( + root_var.op, child_op_types=["mul", "mul", "reduce_sum", "mul"] + ): + return False + + # check 1st reduce_sum op + if not self._check_reduce_op(reduce_op, mode="reduce_sum"): + return False + ops_to_remove.append(reduce_op) + + # check mul (mean) op + mul_mean_op = self._try_get_child_op_type(reduce_op, "mul") + if mul_mean_op is None: + return False + if mul_mean_op.y.shape != (): + return False + ops_to_remove.append(mul_mean_op) + + # check 1st mul (square) op + if not self._check_child_op_types(mul_mean_op, child_op_types=["mul", "mul", "mul"]): + return False + # both 0 and 1 should be mul square op + mul_square_op = self._try_get_child_op_type(mul_mean_op, "mul") + if mul_square_op is None: + return False + if self._try_get_child_op_type(mul_mean_op, "mul", index=1) is None: + return False + ops_to_remove.append(mul_square_op) + + # Check another branch + + # check 2nd mul (square) op + # both 0 and 1 should be mul square op 1 + mul_square_op2 = list(root_var.child_ops)[0] + ops_to_remove.append(mul_square_op2) + + # check 2nd reduce sum + reduce_op2 = self._try_get_child_op_type(mul_square_op2, child_op_type="reduce_sum") + if not self._check_reduce_op(reduce_op2, "reduce_sum"): + return False + ops_to_remove.append(reduce_op2) + + # check mul after 2nd reduce op + mul_mean_op2 = self._try_get_child_op_type(reduce_op2, "mul") + if mul_mean_op2 is None: + return False + if mul_mean_op2.y.shape != (): + return False + ops_to_remove.append(mul_mean_op2) + + # check sub (variance) + sub_variance_op = self._try_get_child_op_type(mul_mean_op2, "sub") + if sub_variance_op is None: + return False + if sub_variance_op.y != mul_square_op.outputs[0]: + return False + ops_to_remove.append(sub_variance_op) + + # check add op (epsilon) + add_eps_op = self._try_get_child_op_type(sub_variance_op, "add") + if add_eps_op is None: + return False + epsilon_var = add_eps_op.y if add_eps_op.x == sub_variance_op.outputs[0] else add_eps_op.x + if epsilon_var.val is None or len(epsilon_var.val.shape) != 0: + return False # must be scalar + ops_to_remove.append(add_eps_op) + + # check rsqrt + rsqrt_op = self._try_get_child_op_type(add_eps_op, "rsqrt") + if rsqrt_op is None: + return False + ops_to_remove.append(rsqrt_op) + + # check mul (gamma) + mul_gamma_op = self._try_get_child_op_type(rsqrt_op, "mul") + if mul_gamma_op is None: + return False + gamma_var = mul_gamma_op.y if mul_gamma_op.x == rsqrt_op.outputs[0] else mul_gamma_op.x + if gamma_var.val is None: + return False + ops_to_remove.append(mul_gamma_op) + + # check 2 muls after the gamma mul + if not self._check_child_op_types(mul_gamma_op, ["mul", "mul"]): + return False + mul_gamma_child_ops = list(mul_gamma_op.outputs[0].child_ops) + mul_op1 = mul_gamma_child_ops[0] + mul_op2 = mul_gamma_child_ops[1] + mul_op1_other_var = mul_op1.x if mul_op1.y == mul_gamma_op.outputs[0] else mul_op1.y + mul_op2_other_var = mul_op2.x if mul_op2.y == mul_gamma_op.outputs[0] else mul_op2.y + if not ( + (mul_op1_other_var == root_var and mul_op2_other_var == mul_square_op.x) + or (mul_op1_other_var == mul_square_op.x and mul_op2_other_var == root_var) + ): + return False + if mul_op1_other_var == root_var: + mul_op1, mul_op2 = mul_op1, mul_op2 + else: + mul_op2, mul_op1 = mul_op1, mul_op2 + ops_to_remove.append(mul_op1) + ops_to_remove.append(mul_op2) + + # check sub with beta + sub_beta_op = self._try_get_child_op_type(mul_op2, "sub") + if sub_beta_op is None: + return False + if sub_beta_op.y != mul_op2.outputs[0]: + return False + beta_var = sub_beta_op.x + if beta_var.val is None: + return False + ops_to_remove.append(sub_beta_op) + + # check last add op + add_op = self._try_get_child_op_type(sub_beta_op, "add") + if add_op is None: + return False + if not ( + (add_op.x == mul_op1.outputs[0] and add_op.y == sub_beta_op.outputs[0]) + or (add_op.y == mul_op1.outputs[0] and add_op.x == sub_beta_op.outputs[0]) + ): + return False + ops_to_remove.append(add_op) + + return self._try_apply_transform( + reduce_op, block, gamma_var, beta_var, epsilon_var, add_op, ops_to_remove + ) + + @block_context_manager + def _fuse_layernorm_or_instancenorm_block(self, block: Block): + fusion_status = False + for i, op in enumerate(list(block.operations)): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_layernorm_or_instancenorm_block(b) + if len(op.blocks) > 0: + continue + + # start pattern match if reduce_mean op is encountered + if op.op_type == "reduce_mean": + if fusion_status is False: + fusion_status = self._try_match_and_transform_pattern_1(op, block) + if fusion_status is False: + fusion_status = self._try_match_and_transform_pattern_2(op, block) + if fusion_status is False: + fusion_status = self._try_match_and_transform_pattern_3(op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + elif op.op_type == "reduce_sum": + if fusion_status is False: + fusion_status = self._try_match_and_transform_pattern_4(op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py new file mode 100644 index 00000000..e7f7c95c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py @@ -0,0 +1,1755 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +from collections import defaultdict +from typing import List, Text + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import _check_child_op_type, block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import any_symbolic +from coremltools.converters.mil.mil.var import Var + + +@register_pass(namespace="common") +class merge_consecutive_paddings(AbstractGraphPass): + """ + Identify two consecutive ``pad`` layers which could be merged into a single ``pad`` layer. + + This is possible only if one of the following conditions is satisfied: + + - The paddings are "constant" and have the same ``constant_val``. + - The paddings act along different axes. + + .. code-block:: + + Input graph: + input(1, 2, 6, 8) ------> pad([1, 1], mode='reflect) -----> pad([1, 1, 0, 0], mode='reflect') ---> out(1, 2, 8, 10) + + Output graph: + input(1, 2, 6, 8) ------> pad([1, 1, 1, 1], mode='reflect) ---> out(1, 2, 8, 10) + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._merge_padding_block(f) + + def _match_pattern(self, block, padding_op): + + if padding_op.op_type != "pad": + return False + + if not _check_child_op_type(padding_op, "pad"): + return False + + child_padding_op = list(padding_op.outputs[0].child_ops)[0] + + if padding_op.inputs["mode"].val != child_padding_op.inputs["mode"].val: + return False + + # Ensure the paddings have the same length by prepending zeros to the shorter one + first_pad = padding_op.inputs["pad"].val + child_pad = child_padding_op.inputs["pad"].val + if len(first_pad) > len(child_pad): + child_pad = np.insert(child_pad, 0, [0] * (len(first_pad) - len(child_pad))) + elif len(child_pad) > len(first_pad): + first_pad = np.insert(first_pad, 0, [0] * (len(child_pad) - len(first_pad))) + final_pad = child_pad + first_pad + + if padding_op.inputs["mode"].val == "constant": + # if the padding is constant, then the values need to be equal + if padding_op.inputs["constant_val"].val != child_padding_op.inputs["constant_val"].val: + return False + else: + # if the padding is not constant, then we can't merge if both pads affected the same + # side of the image + if any(i != 0 and j != 0 for (i, j) in zip(first_pad, child_pad)): + return False + + return self._replace_ops(block, padding_op, child_padding_op, final_pad) + + @staticmethod + def _replace_ops(block, padding_op, child_padding_op, final_pad): + mode = padding_op.inputs["mode"].val + x = mb.pad( + x=padding_op.inputs["x"], + pad=final_pad, + mode=mode, + constant_val=padding_op.inputs["constant_val"].val, + before_op=padding_op, + ) + padding_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=padding_op, old_var=child_padding_op.outputs[0], new_var=x + ) + block.remove_ops([padding_op, child_padding_op]) + return True + + @block_context_manager + def _merge_padding_block(self, block): + for op in list(block.operations): + result = self._match_pattern(block, op) + if result: + return True + return False + +@register_pass(namespace="common") +class merge_consecutive_transposes(AbstractGraphPass): + """ + Identify consecutive 'transpose' layers which could be merged into a single 'transpose' layer. + + .. code-block:: + + Input graph: + input ------> transpose -----> 1 or more transpose layers ---> out + + Output graph: + input ------> transpose ---> out + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._merge_transposes_in_block(f) + + def _match_and_replace_pattern(self, block, transpose_op): + if not (transpose_op.op_type == "transpose" and _check_child_op_type(transpose_op, "transpose")): + return False + if transpose_op.outputs[0] in block.outputs: + return False + + child_transpose_op = list(transpose_op.outputs[0].child_ops)[0] + return self._replace_ops(block, transpose_op, child_transpose_op) + + @staticmethod + def _replace_ops(block, transpose_op, child_transpose_op): + perm = transpose_op.perm.val + new_perm = [perm[i] for i in child_transpose_op.perm.val] + x = mb.transpose(x=transpose_op.x, perm=new_perm, before_op=transpose_op) + if transpose_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=transpose_op, old_var=child_transpose_op.outputs[0], new_var=x, + ): + block.remove_ops([transpose_op, child_transpose_op]) + return True + return False + + @block_context_manager + def _merge_transposes_in_block(self, block): + def help_merge_transpose_ops(block): + for op in list(block.operations): + if self._match_and_replace_pattern(block, op): + return True + return False + + block_changed = True + while block_changed: + block_changed = help_merge_transpose_ops(block) + + +@register_pass(namespace="common") +class merge_consecutive_relus(AbstractGraphPass): + """ + Identify consecutive ``relu`` layers which could be merged into a single ``relu`` layer. + + .. code-block:: + + Input graph: + input ------> relu -----> 1 or more relu layers ---> out + + Output graph: + input ------> relu ---> out + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._merge_relus_in_block(f) + + def _match_and_replace_pattern(self, block, relu_op): + if not (relu_op.op_type == "relu" and _check_child_op_type(relu_op, "relu")): + return False + + child_relu_op = list(relu_op.outputs[0].child_ops)[0] + return self._replace_ops(block, relu_op, child_relu_op) + + @staticmethod + def _replace_ops(block, relu_op, child_relu_op): + if relu_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=relu_op, old_var=child_relu_op.outputs[0], new_var=relu_op.outputs[0] + ): + block.remove_ops([child_relu_op]) + return True + return False + + @block_context_manager + def _merge_relus_in_block(self, block): + def help_merge_relu_ops(block): + for op in list(block.operations): + if self._match_and_replace_pattern(block, op): + return True + return False + + block_changed = True + while block_changed: + block_changed = help_merge_relu_ops(block) + + +@register_pass(namespace="common") +class merge_consecutive_reshapes(AbstractGraphPass): + """ + Identify consecutive ``reshape`` ops which could be merged into a single ``reshape``. + + .. code-block:: + + Input graph: + input -> reshape -> 1 or more reshapes -> output + + Output graph: + input -> reshape -> output + """ + + # TODO (rdar://105227587): merge a tree of consecutive reshapes + + def apply(self, prog): + for f in prog.functions.values(): + self._merge_consecutive_reshapes_block(f) + + @staticmethod + def _match_pattern(reshape_op): + """ + Given a ``reshape`` op, + consider it as the head of a sequence of ``reshape`` ops, and + then end the sequence at a non-removable ``reshape`` op. + Return this sequence as a list. + """ + res = [] + op = reshape_op + + while op.op_type == "reshape": + res.append(op) + + # current reshape has 0 or 2+ child ops: + # * no child: this is the end of graph + # * 2+ children: only pattern of sequential reshape ops (1 child) + # is supported for now. For more general cases, please see TODO below + if len(op.outputs[0].child_ops) != 1: + break + # current reshape output is a block output, so it is non-removable + if op.outputs[0] in op.enclosing_block.outputs: + break + + op = op.outputs[0].child_ops[0] + + return res + + @block_context_manager + def _merge_consecutive_reshapes_block(self, block): + def help_merge_consecutive_reshapes_block(block): + for op in block.operations: + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = help_merge_consecutive_reshapes_block(b) + # move on to the next op if this op is not reshape + if op.op_type != "reshape": + continue + + reshape_ops = self._match_pattern(op) + # merge the list of consecutive reshape ops + if len(reshape_ops) > 1: + # create a new reshape op + reshape_out = mb.reshape( + x=reshape_ops[0].x, + shape=reshape_ops[-1].shape, + name=reshape_ops[-1].outputs[0].name, + before_op=reshape_ops[-1], + ) + # replace the consecutive reshape ops with the new reshape op + reshape_ops[-1].enclosing_block.replace_uses_of_var_after_op( + anchor_op=reshape_ops[-1], + old_var=reshape_ops[-1].outputs[0], + new_var=reshape_out, + ) + reshape_ops[-1].enclosing_block.remove_ops(reshape_ops) + return True + + return False + + block_changed = True + while block_changed: + block_changed = help_merge_consecutive_reshapes_block(block) + + +class CastOptimizationNode: + def __init__(self, op_type, match_criterion=None): + """ + Parameters + ---------- + + param op_type : Type of an operation. + param match_criterion : A callable function that matches a MIL op and returns a boolean. + + Examples + -------- + + .. sourcecode:: python + + CastOptimizationNode("mul"), + CastOptimizationNode("round"), + CastOptimizationNode("add", lambda op: op.y.val == 0), + CastOptimizationNode("clip", lambda op: op.alpha.val == -128 and op.beta.val == 127), + CastOptimizationNode("cast", lambda op: op.dtype.val == "int8"), + CastOptimizationNode("cast", lambda op: op.dtype.val == "fp32"), + + """ + + self.op_type = op_type + if not match_criterion: + match_criterion = lambda op: True + + self.match_criterion = match_criterion + + +@register_pass(namespace="common") +class cast_optimization(AbstractGraphPass): + """ + This optimization pass performs the following: + + - Removes redundant ``cast`` op; that is, ``cast`` where source and destination tensors have same dtypes. + - Either cancels or fuses any two consecutive `cast` ops, repeatedly. + + After this pass, there can't be any consecutive `cast` ops present in the program. + For examples, see ``TestCastOptimization``. + This is a non-algebraic translation which assumes that the upcasting doesn't change the user's intent. + + For example: + + .. code-block:: + + Input graph: + input -----> cast(dtype="fp16") -----> cast(dtype="fp32") ----> square ---> out + + Output graph: + input -----> square -----> out + + The input graph has a maximum precision of fp16 while the output graph has fp32 precision. + + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._fuse_or_cancel_consecutive_casts_block_wrapper(f, {}) + + # main function's output_vars are treated differently, which are not handled by the method + # above, "_fuse_or_cancel_consecutive_casts_block". + # For that, we invoke another method + block_changed = True + while block_changed: + block_changed = self._cancel_consecutive_casts_connected_to_outputs( + prog.functions["main"] + ) + + def _match_linear_pattern(self, root, pattern): + """ + Use Depth First Search to match the pattern + + :param root: operation + :param pattern: List[CastOptimizationNode] + :return: Return List[operation] if pattern matches entirely else [] + """ + op = root + if not pattern or len(op.outputs) != 1: + return [] + + node = pattern[0] + if op.op_type != node.op_type: + return [] + + if not node.match_criterion(op): + return [] + + for child in op.outputs[0].child_ops: + op_list = [op] + self._match_linear_pattern(child, pattern[1:]) + if len(op_list) == len(pattern): + return op_list + + return [] + + def _try_to_transform(self, root_op, cached_vars): + block = root_op.enclosing_block + + # Scenario: Redundant cast when source and destination dtype are same. + if root_op.op_type == "cast" and root_op.x.is_tensor_or_scalar_of(dtype=root_op.dtype.val): + block.replace_uses_of_var_after_op( + anchor_op=root_op, + old_var=root_op.outputs[0], + new_var=root_op.x, + ) + block.remove_ops([root_op]) + return True + + # Scenario: Consecutive casts + list_of_ops_in_pattern = self._match_linear_pattern( + root_op, + [ + CastOptimizationNode("cast"), + CastOptimizationNode("cast"), + ], + ) + + if not list_of_ops_in_pattern: + return False + + cast_1, cast_2 = list_of_ops_in_pattern + + fused_output_var_name = cast_1.x.name + "_to_{}".format(cast_2.dtype.val) + + if cast_1.x.is_tensor_or_scalar_of(dtype=cast_2.dtype.val): + # when consecutive casts cancel each other + # Please check out: test_linear_consecutive_cast_ops_cancellation in TestCastOptimization + new_output_var = cast_1.x + elif fused_output_var_name in cached_vars: + # When the output of 1 cast goes into multiple casts of same configuration + # Please check out: test_consecutive_fusable_casts_on_all_branches in TestCastOptimization + new_output_var = cached_vars[fused_output_var_name] + else: + new_output_var = mb.cast( + x=cast_1.x, + dtype=cast_2.dtype, + name=fused_output_var_name, + before_op=cast_2, + ) + cached_vars[fused_output_var_name] = new_output_var + + # It's important to use `cast_2.enclosing_block` over `block` since `cast_2` might be present in + # a block nested under `block` + cast_2.enclosing_block.replace_uses_of_var_after_op( + anchor_op=cast_2, + old_var=cast_2.outputs[0], + new_var=new_output_var, + ) + + # Remove just the last cast op and let dce eliminate the rest of the ops if needed, + # The reason is that first cast op could be feeding into other non-cast ops. + cast_2.enclosing_block.remove_ops([cast_2]) + return True + + @block_context_manager + def _fuse_or_cancel_consecutive_casts_block_wrapper(self, block, cached_vars): + def _fuse_or_cancel_consecutive_casts_block(block, cached_vars): + block_changed = False + for i, op in enumerate(list(block.operations)): + for b in op.blocks: + nested_block_changed = True + nested_block_cached_vars = {} + nested_block_cached_vars.update(cached_vars) + self._fuse_or_cancel_consecutive_casts_block_wrapper( + b, nested_block_cached_vars + ) + + if len(op.blocks) > 0: + continue + + # start pattern match if cast op is encountered + if op.op_type == "cast": + block_changed = self._try_to_transform(op, cached_vars) + # has to break as the downstream iterator is affected. + if block_changed: + return block_changed + return block_changed + + block_changed = True + """ + Cached vars are used when `all` of the following conditions are met: + + 1. The output of a ``cast`` is fed into multiple ``cast`` ops of same configuration. + 2. These 2 consecutive ``cast`` ops can be fused into a single ``cast``. + + When these conditions are satisfied, we create a `new` fused ``cast`` op `only` once, and + the output of all these consecutive ``cast`` ops are replaced with the ouptut of this fused ``cast``. + + .. code-block:: + + Input graph: + |---->cast(dtype="fp16")---->square--->out_1 + | + input---->cast(dtype="int32")---->cast(dtype="fp16")---->relu--->out_2 + | + |---->cast(dtype="fp16")---->log--->out_3 + + Output graph: + |---->square--->out_1 + | + input---->new_fused_cast(dtype="fp16")---->relu--->out_2 + | + |---->log--->out_3 + + """ + while block_changed: + block_changed = _fuse_or_cancel_consecutive_casts_block(block, cached_vars) + + @staticmethod + def _cancel_consecutive_casts_connected_to_outputs(block): + """ + Lets say the ops in the block have the following pattern + "some_op"---->{var1}---->"cast_op1"---->"cast_op2"--->{var2} + , where var2 is one of the outputs in block.outputs + + If cast_op1 and cast_op2 can be cancelled, this means, var1 and var2 are duplicates + of each other. The program can then be updated to + "some_op"---->{var1} + where var1 replaces var2 in block.outputs + This also requires replacing var1's name with var2's so that the model output names remain unchanged + """ + new_output_vars = [] + block_changed = False + for output_var in block.outputs: + cast_op2 = output_var.op + if cast_op2 is None: + continue + if cast_op2.op_type != "cast": + new_output_vars.append(output_var) + continue + cast_op1 = cast_op2.x.op + if cast_op1 is None: + new_output_vars.append(output_var) + continue + if cast_op1.op_type != "cast": + new_output_vars.append(output_var) + continue + var1 = cast_op1.x + if var1.op is None or var1.dtype != output_var.dtype: + new_output_vars.append(output_var) + continue + var1.set_name(output_var.name) + new_output_vars.append(var1) + block_changed = True + + if block_changed: + block.set_outputs(new_output_vars) + + return block_changed + + +class TransformAxisUpdateOps: + """ + Parent class for every axis update op's class + + An axis update op is an op that can be updated, such that it can allow a transpose layer to "pass" through it. + That is, + + op(transpose(x)) == transpose(op_updated(x)) + + where + "op" : original op, + "op_updated": op after being updated. + + Example: + + if x is a tensor of rank 2, and transpose has perm=[1,0], + then + + reduce_mean[axis=1](transpose(x)) == transpose(reduce_mean[axis=0](x)) + + here reduce_mean op with axis=1 can be updated to a reduce_mean op with axis=0, + to allow the transpose to "pass" through it, i.e. get applied after it. + + """ + + def __init__(self, op, transpose_axes, var_to_hypothetical_value_dict=None): + self.op = op + self.transpose_axes = transpose_axes + self.var_to_hypothetical_value_dict = var_to_hypothetical_value_dict + + def can_transpose_pass(self): + """ + Each "axis" op must determine whether it can act like a unary op + and allow the transpose to pass through. + Return True if it can allow the transpose to pass through, otherwise return False. + + :return: bool + """ + raise NotImplementedError("This function must be implemented by each op") + + def update(self): + """ + A method that updates some attribute of the axis op, + based on the transpose axes value. + This method only gets called if "can_transpose_pass" returns True. + + Update the op such that the output %i2 should be equal to %o2 + + Before: + %i_1 = transpose_op(%i_0, perm=transpose_axes) + %i2 = op(%i1) + + After: + %o1 = op_updated(%i0) + %o2 = transpose_op(%o1, perm=transpose_axes) + + :return: None + """ + raise NotImplementedError("This function must be implemented by each op") + + @staticmethod + def _find_transpose_compliment(perm): + """ + return the permutation value that when applied will reverse the + effect of the given permutation. + + e.g.: if perm == (1, 2, 3, 0), then return (3, 0, 1, 2), which will undo the + first permutation's effect + """ + rank = len(perm) + all_positive_perm = [p + rank if p < 0 else p for p in perm] + perm_inverse = [0] * rank + for i in range(rank): + perm_inverse[i] = all_positive_perm.index(i) + return perm_inverse + + +class _HypotheticalValue: + """ + A hypothetical value that simply wraps a Var. Actual Var it wraps doesn't really matter, as + its mainly for debugging. + This class really exists to differentiate a "_LazyTransposeHypotheticalValue" type with a + non-"_LazyTransposeHypotheticalValue" type. + """ + + def __init__(self, var=None): + self.value = var # type : Var + + +class _LazyTransposeHypotheticalValue: + """ + A hypothetical value that represents a transpose op on top of a hypothetical value, or a + collection of transpose_ops, which have the same "perm" parameter. + """ + + def __init__(self, hypothetical_value, transpose_ops, perm): + # Input hypothetical value to the transpose op. + # When there are multiple transpose ops, this is the incoming hypothetical value to any one of those + self.wrapped_hypothetical_value = hypothetical_value # type : _HypotheticalValue + + if not isinstance(hypothetical_value, _HypotheticalValue): + raise ValueError( + "transpose optimization pass: incorrect type passed for hypothetical_value" + ) + + for op in transpose_ops: + if op.op_type != "transpose": + raise ValueError( + "transpose optimization pass: _LazyTransposeHypotheticalValue can only be made with transpose ops" + ) + perm_op = list(op.inputs["perm"].val) + if perm_op != perm: + raise ValueError( + "transpose optimization pass: _LazyTransposeHypotheticalValue can only be made with transpose ops with the same 'perm' values" + ) + + self.perm = perm # type : list[int], perm parameter of all the transpose ops + self.transpose_ops = transpose_ops # type : Set(op) + + +class _TransposeOptimization: + _DEBUG = False # Set to true to plot the block before and after the transformation. + + # Dictionary from axis update op to its class + # This is filled in by child classes of the class "TransformAxisUpdateOps". + _AXIS_UPDATE_OPS = dict() + + # TODO: instead of a hard-coded set, use op-traits + # These are the ops that satisfy the following property: + # - single non constant input + # - single output + # - non rank changing + # - doesn't need to be updated of a transpose passes through it. i.e. + # Transpose(op(x)) == op(Transpose(x)) + _UNARY_LIKE_OP_TYPES = { + "relu", + "log", + "relu6", + "abs", + "acos", + "asin", + "atan", + "atanh", + "ceil", + "clip", + "cos", + "cosh", + "erf", + "exp", + "exp2", + "floor", + "identity", + "logical_not", + "round", + "rsqrt", + "sign", + "sin", + "sinh", + "sqrt", + "square", + "pow", + "tan", + "tanh", + "threshold", + "clamped_relu", + "elu", + "gelu", + "leaky_relu", + "linear_activation", + "scaled_tanh", + "sigmoid", + "sigmoid_hard", + "softplus", + "softplus_parametric", + "softsign", + "thresholded_relu", + } + + def __init__(self, block): + self.block = block + + # for each var in the block, this dictionary stores the hypothetical value that is assigned to it during + # graph traversal + self.var_to_hypothetical_value = ( + {} + ) # type : var : _HypotheticalValue or _LazyTransposeHypotheticalValue + # start out by filling this dictionary with all the inputs of the block + for _, input_var in block.inputs.items(): + self.var_to_hypothetical_value[input_var] = _HypotheticalValue(input_var) + + # Dictionaries below are used to store transpose cancellation/fusion information. + # These are filled during the traversal of the graph, + # after which they are used by the `_apply_transform` method + + # transpose op to the list of transpose ops that are its compliments and can be cancelled away with it + self.transpose_op_to_cancel_ops = defaultdict(lambda: []) # type : op : List[op] + + # transpose op to the list of ops before which it has to materialize, i.e. the root transpose op + # can be moved downstream in the graph, as far as these materialize ops + self.transpose_op_to_materialize_ops = defaultdict( + lambda: [] + ) # type : op : List[Tuple(op, Var)] + + # list of the ops that need to be updated (either their axis parameter or one of their constant inputs) + # if the transpose op is fused away or moved downstream in the graph + self.transpose_op_to_axis_update_ops = defaultdict(lambda: []) # type : op : List[op] + + # for book keeping + self.ops_updated = set() + self.materialized_ops_handled = set() + self.transpose_ops_removed = set() + + # save the output sinks' information + self.old_output_vars = [] + self.output_sink_ops = [] + + # We modify the graph temporarily for outputs + self._add_output_sinks() + + def _add_output_sinks(self): + # We add an identity sink for all outputs. + self.old_output_vars = {var: var.name for var in self.block.outputs} + new_outputs = [] + output_sinks_var = {} + for out_var in self.block.outputs: + if out_var not in output_sinks_var: + out_sink = mb.identity(x=out_var) + output_sinks_var[out_var] = out_sink + else: + out_sink = output_sinks_var[out_var] + new_outputs.append(out_sink) + self.output_sink_ops.append(out_sink.op) + self.block.set_outputs(new_outputs) + + def _visit_unary_like_op(self, op, input_var=None): + # pass the input var's hypothetical_value to the output var's, since shape invariant ops do + # not modify the incoming hypothetical_value + + if input_var is None: + input_var = op.inputs["x"] + + if len(op.outputs) > 1: + msg = ( + "transpose optimization pass: op '{}', of type = '{}', has multiple outputs, hence it" + "cannot be handled like a unary op" + ) + raise ValueError(msg.format(op.name, op.op_type)) + self.var_to_hypothetical_value[op.outputs[0]] = self.var_to_hypothetical_value[input_var] + + def _visit_materialize_op(self, op): + # this is the catch all category of ops + # these are the "not-lazy-transpose-pass-through" kind of ops + # output hypothetical_value is same as the vars + for out_var in op.outputs: + self.var_to_hypothetical_value[out_var] = _HypotheticalValue(out_var) + + # check for the inputs + # if there is a lazy transpose hypothetical value as an input, + # all the transpose ops it hold, + # need to be materialized here now, i.e., we should update "transpose_op_to_materialize_ops" + for input_var in self._get_input_vars(op): + input_hypothetical_value = self.var_to_hypothetical_value[input_var] + if isinstance(input_hypothetical_value, _LazyTransposeHypotheticalValue): + all_lazy_transpose_ops = input_hypothetical_value.transpose_ops + for transpose_op in all_lazy_transpose_ops: + self.transpose_op_to_materialize_ops[transpose_op].append((op, input_var)) + + def _visit_axis_update_op(self, op): + """ + Check: + - at least one of the non-constant inputs to this op is of type _LazyTransposeHypotheticalValue + - for all non-constant inputs, that are of type _LazyTransposeHypotheticalValue, they + have the same perm value. + These checks are common for all "axis update" ops. + """ + input_vars = self._get_input_vars(op, only_nonconst_vars=True) + perm = None + num_lazy_input_vars = 0 + for var in input_vars: + hypothetical_value = self.var_to_hypothetical_value[var] + if isinstance(hypothetical_value, _LazyTransposeHypotheticalValue): + num_lazy_input_vars += 1 + if perm is None: + perm = hypothetical_value.perm + elif perm != hypothetical_value.perm: + self._visit_materialize_op(op) + return + + if num_lazy_input_vars == 0: + self._visit_materialize_op(op) + return + + # checks specific to the op type + op_cls = self._AXIS_UPDATE_OPS.get(op.op_type, None) + if op_cls is None: + raise ValueError("Transform class for op of type '{}' not found".format(op.op_type)) + + if not op_cls( + **{ + "op": op, + "transpose_axes": perm, + "var_to_hypothetical_value_dict": self.var_to_hypothetical_value, + } + ).can_transpose_pass(): + self._visit_materialize_op(op) + return + + # add this op to the dictionary "transpose_op_to_axis_update_ops" + # and update self.var_to_hypothetical_value[op.outputs[0]] + all_lazy_transpose_ops = set() + wrapped_hypothetical_value = None + for var in input_vars: + input_hypothetical_value = self.var_to_hypothetical_value[var] + if isinstance(input_hypothetical_value, _LazyTransposeHypotheticalValue): + all_lazy_transpose_ops.update(input_hypothetical_value.transpose_ops) + wrapped_hypothetical_value = input_hypothetical_value.wrapped_hypothetical_value + + for transpose_op in all_lazy_transpose_ops: + self.transpose_op_to_axis_update_ops[transpose_op].append(op) + + for output in op.outputs: + self.var_to_hypothetical_value[output] = _LazyTransposeHypotheticalValue( + wrapped_hypothetical_value, + all_lazy_transpose_ops, + perm, + ) + + @staticmethod + def _do_transposes_cancel(perm1, perm2): + if len(perm1) != len(perm2): + return False + x = list(range(len(perm1))) + x1 = [x[i] for i in perm1] + x2 = [x1[i] for i in perm2] + if x == x2: + return True + return False + + def _visit_transpose_op(self, op): + input_var = op.inputs["x"] + if op.inputs["perm"].val is None: + self._visit_materialize_op(op) + return + perm = list(op.inputs["perm"].val) + input_hypothetical_value = self.var_to_hypothetical_value[input_var] + + """ + There are 3 cases to handle: + + 1. input type == _HypotheticalValue + 2. input type == _LazyTransposeHypotheticalValue and this op is the transpose compliment of it + 3. input type == _LazyTransposeHypotheticalValue and this op is NOT the transpose compliment of it + """ + + if isinstance(input_hypothetical_value, _HypotheticalValue): + # case 1 + # the input is not a lazy transpose. + # Since the current node is a transpose, there are two sub-cases. + # a) It's a output node. We materialize it directly. + # b) It might get cancelled downstream, so make the output var's + # hypothetical_value a lazy transpose + if op.outputs[0] in self.old_output_vars: + self._visit_materialize_op(op) + else: + self.var_to_hypothetical_value[op.outputs[0]] = _LazyTransposeHypotheticalValue( + input_hypothetical_value, set([op]), perm + ) + return + + # input is a Lazy transpose hypothetical value. Lets first check whether the current + # transpose cancels it or not + do_cancel = self._do_transposes_cancel(input_hypothetical_value.perm, perm) + if do_cancel: + # case 2 + # transposes cancel, so now the hypothetical_value of the output will + # be same as the hypothetical value wrapped inside the upstream lazy transpose + self.var_to_hypothetical_value[ + op.outputs[0] + ] = input_hypothetical_value.wrapped_hypothetical_value + # also update the dictionary "transpose_op_to_cancel_ops" + all_lazy_transpose_ops = input_hypothetical_value.transpose_ops + for transpose_op in all_lazy_transpose_ops: + self.transpose_op_to_cancel_ops[transpose_op].append(op) + else: + # case 3 + # transposes don't cancel + # this is same as a materialize op then + self._visit_materialize_op(op) + + def _visit_op(self, op): + + input_vars = self._get_input_vars(op) + for var in input_vars: + assert ( + var in self.var_to_hypothetical_value + ), "transpose optimization pass: hypothetical value for var '{}', not found".format( + var.name + ) + + if op in self.output_sink_ops: + self._visit_materialize_op(op) + elif op.op_type in self._UNARY_LIKE_OP_TYPES: + self._visit_unary_like_op(op) + elif op.op_type in self._AXIS_UPDATE_OPS: + self._visit_axis_update_op(op) + elif op.op_type == "transpose": + self._visit_transpose_op(op) + elif op.op_type == "const": + self.var_to_hypothetical_value[op.outputs[0]] = _HypotheticalValue(op.outputs[0]) + else: + self._visit_materialize_op(op) + + def block_traversal(self): + + # Since the ops are already organized in a topological manner, + # simply iterate through all the ops + + for op in self.block.operations: + self._visit_op(op) + + def _verify_cancellable_transposes(self): + + # invert "transpose_op_to_cancel_ops" + transpose_cancel_ops_to_starting_transpose_set = defaultdict(lambda: set()) + for op, cancel_ops_list in self.transpose_op_to_cancel_ops.items(): + for cancel_op in cancel_ops_list: + transpose_cancel_ops_to_starting_transpose_set[cancel_op].update(set([op])) + + for op in transpose_cancel_ops_to_starting_transpose_set: + assert ( + op not in self.transpose_op_to_cancel_ops + ), "transpose reduction optimization: transpose op '{}' cannot be both a starting and cancel op".format( + op.name + ) + + # invert "transpose_op_to_materialize_ops" + materizalize_ops_to_starting_transpose_set = defaultdict(lambda: set()) + for op, materialize_ops in self.transpose_op_to_materialize_ops.items(): + for materialize_op, edge in materialize_ops: + materizalize_ops_to_starting_transpose_set[materialize_op].update(set([op])) + + # the starting transpose op may not be in "transpose_op_to_cancel_ops" + # but it needs to be removed if it materializes later, hence we need to add it + # to the "transpose_op_to_cancel_ops", with an empty value, i.e. no other ops to cancel because of it + if op not in self.transpose_op_to_cancel_ops: + self.transpose_op_to_cancel_ops[op] = [] + + # (starting transpose ops) and (transpose cancel ops + materialize ops) form a bipartite graph. + # Find the connected components of this graph, by doing a BFS traversal + connected_components = [] # List[(Set(op), Set(op)), Set(op)] + visited = {} + for op in list(self.transpose_op_to_cancel_ops.keys()): + if op in visited: + continue + visited[op] = 1 + set_a = set([op]) # set of starting transpose ops + set_b1 = set() # set of transpose cancel ops connected to set_a + set_b2 = set() # set of materialize ops connected to set_a + queue = [] + queue.extend(self.transpose_op_to_cancel_ops[op]) + if op in self.transpose_op_to_materialize_ops: + materialize_ops_list = list(list(zip(*self.transpose_op_to_materialize_ops[op]))[0]) + queue.extend(materialize_ops_list) + while len(queue) > 0: + o = queue.pop(0) + visited[o] = 1 + # enque nodes connected to o + if o in self.transpose_op_to_cancel_ops: + set_a.update(set([o])) + for neighbor_op in self.transpose_op_to_cancel_ops[o]: + if neighbor_op not in visited: + queue.append(neighbor_op) + if o in self.transpose_op_to_materialize_ops: + materialize_ops_list = list( + list(zip(*self.transpose_op_to_materialize_ops[o]))[0] + ) + for neighbor_op in materialize_ops_list: + if neighbor_op not in visited: + queue.append(neighbor_op) + elif o in transpose_cancel_ops_to_starting_transpose_set: + set_b1.update(set([o])) + for neighbor_op in transpose_cancel_ops_to_starting_transpose_set[o]: + if neighbor_op not in visited: + queue.append(neighbor_op) + else: + set_b2.update(set([o])) + for neighbor_op in materizalize_ops_to_starting_transpose_set[o]: + if neighbor_op not in visited: + queue.append(neighbor_op) + connected_components.append((set_a, set_b1, set_b2)) + + starting_ops_to_remove = set() # starting ops to remove from the optimization list + + # now for each connected component, make a decision whether to cancel it or not + # (either all transpose ops in a set get cancelled or they don't) + for op_set, op_cancel_set, materialize_op_set in connected_components: + + block_output = False + # check that output is not directly connected to a starting transpose op + for op in op_set: + if op.outputs[0] in self.block.outputs: + starting_ops_to_remove.update(op_set) + block_output = True + break + if block_output: + continue + + materizalize_set = set(list(materialize_op_set)) + if len(materizalize_set) >= len(op_set) + len(op_cancel_set): + starting_ops_to_remove.update(op_set) + + # remove ops + for op in starting_ops_to_remove: + self.transpose_op_to_cancel_ops.pop(op, None) + + def _remove_transpose_ops(self, starting_transpose_op): + + perm = list(starting_transpose_op.inputs["perm"].val) + starting_transpose_op_out_var = starting_transpose_op.outputs[0] + starting_transpose_op_input_var = starting_transpose_op.inputs["x"] + + # update all the "axis_update" ops + for op in self.transpose_op_to_axis_update_ops.get(starting_transpose_op, []): + if op not in self.ops_updated: + op_cls = self._AXIS_UPDATE_OPS.get(op.op_type, None) + op_cls( + **{ + "op": op, + "transpose_axes": perm, + "var_to_hypothetical_value_dict": self.var_to_hypothetical_value, + } + ).update() + self.ops_updated.add(op) + + # short circuit starting_transpose_op and its cancel ops + + to_be_removed_ops = [] + name_changed_vars = set() + + for op in [starting_transpose_op] + self.transpose_op_to_cancel_ops[starting_transpose_op]: + if op in self.transpose_ops_removed: + continue + + to_be_removed_ops.append(op) + self.transpose_ops_removed.add(op) + + input_var = op.inputs["x"] # input to the transpose op + output_var = op.outputs[0] # output of the transpose op + parent_op = input_var.op # parent op of the transpose op + + if output_var in self.old_output_vars: + # output is a block output, so this must be one of the "edge" transpose compliment ops + # We need to set `input_var` as the block output var + # Change the name of the input_var to match the block output if input_var is not changed. + # If the same input_var is in output twice, we can't rename it twice, therefore we initiate an + # Identity op to match the name + if input_var in self.block.inputs.values(): + input_var = mb.identity(x=input_var, before_op=op, name=output_var.name) + parent_op = None # set anchor op as None. + elif input_var not in name_changed_vars: + input_var.name = output_var.name + input_var.op.name = output_var.op.name + name_changed_vars.update([input_var]) + else: + input_var = mb.identity(x=input_var, before_op=op, name=output_var.name) + parent_op = input_var.op + + # connect all the child ops of the output_var to the parent of the transpose op. + self.block.replace_uses_of_var_after_op( + anchor_op=parent_op, + old_var=output_var, + new_var=input_var, + no_check_var_types=True, + ) + + """ + Insert a transpose op JUST before each one of the materialize ops + i.e. + Given: %i1 = op(...) + ... + ... = materialize_op(..., %i1 ,...) + ... + + Result: %i1 = op(...) + ... + %i2 = transpose_op(%i1, %perm) + ... = materialize_op(..., %i2 ,...) + ... + """ + for op, input_var in self.transpose_op_to_materialize_ops.get(starting_transpose_op, []): + if (op, input_var) in self.materialized_ops_handled: + continue + + self.materialized_ops_handled.add((op, input_var)) + if input_var == starting_transpose_op_out_var: + # materialize op is connected to the starting transpose op + # in this case, connect to its parent + if op in self.output_sink_ops: + continue + i1 = starting_transpose_op_input_var + else: + i1 = input_var + + if op in self.output_sink_ops: + # The input_var of output sink is itself a output. We can safely + # modify the name of the input_var since it should only be consumed + # by block output here. + if i1 not in name_changed_vars: + x = mb.transpose(x=i1, perm=perm, before_op=op, name=i1.name) + i1.name = "_before_transpose_op_" + x.op.name + i1.op.name = "_before_transpose_op_" + x.op.name + else: + x = mb.transpose(x=i1, perm=perm, before_op=op, name=self.old_output_vars[i1]) + else: + x = mb.transpose(x=i1, perm=perm, before_op=op) + + self.block.replace_uses_of_var_after_op( + anchor_op=x.op, + end_op=op, + old_var=i1, + new_var=x, + no_check_var_types=True, + ) + + self.block.remove_ops(to_be_removed_ops) + + def apply_transform(self): + """ + Take in the data collected during graph traversal + and transform the graph by cancelling out transpose ops that can be removed. + """ + + logger.debug("Block before optimize transpose transform:\n{}".format(self.block)) + if self._DEBUG: + import graphviz + + graphviz.Source( + self.block.get_dot_string( + highlight_debug_op_names=[], highlight_debug_op_types=["transpose"] + ) + ).view(filename="/tmp/block_before_reduce_transpose") + + """ + First check which transposes can be cancelled. + After this function call we get an updated dictionary "transpose_op_to_cancel_ops" + with only the transpose ops that can really be cancelled in the graph + Reasons to not cancel: + - materialize_ops are greater than cancel_ops, so removing transpose will instead end up increasing the count of transposes + - removing a transpose op can only be successful, if all of its cancel ops are removed, removing all the cancel ops + is only successful if all of their starting transpose ops are removed and so on. This check is also done in + "_verify_cancellable_transposes()" + """ + self._verify_cancellable_transposes() + + # apply transform + for transpose_op in self.transpose_op_to_cancel_ops: + self._remove_transpose_ops(transpose_op) + self.block.set_outputs([sink_op.x for sink_op in self.output_sink_ops]) + self.block.remove_ops(list(self.output_sink_ops)) + + if self._DEBUG: + graphviz.Source( + self.block.get_dot_string( + highlight_debug_op_names=[], highlight_debug_op_types=["transpose"] + ) + ).view(filename="/tmp/block_after_reduce_transpose") + + logger.debug("Block after optimize transpose transform:\n{}".format(self.block)) + + for op in self.block.operations: + op.type_value_inference(overwrite_output=True) + + @staticmethod + def register_axis_update_op(ops: List[Text]): + """ + :param ops: Ops that will be registered. For example: the class "_TransformReduceMean" can + be used to register ops including "reduce_prod", "reduce_sum" etc. + """ + + def class_wrapper(op_update_cls): + for op_type in ops: + if op_type in _TransposeOptimization._AXIS_UPDATE_OPS: + raise ValueError( + "Update class for op of type '{}' already defined".format(op_type) + ) + _TransposeOptimization._AXIS_UPDATE_OPS[op_type] = op_update_cls + return op_update_cls + + return class_wrapper + + @staticmethod + def _get_input_vars(op, only_nonconst_vars=False) -> List[Var]: + input_vars = [] + for name, val in op.inputs.items(): + if isinstance(val, Var): + if only_nonconst_vars: + if val.op and val.op.op_type == "const": + continue + input_vars.append(val) + elif isinstance(val, (list, tuple)): + for var in val: + if not isinstance(var, Var): + raise ValueError( + f"transpose optimization pass: unrecognized input type of " + f"op='{op.name}', input='{name}'" + ) + if only_nonconst_vars: + if var.op and var.op.op_type == "const": + continue + input_vars.append(var) + else: + raise ValueError( + f"transpose optimization pass: unrecognized input type of " + f"op='{op.name}', input='{name}'" + ) + return input_vars + + +@_TransposeOptimization.register_axis_update_op(ops=["concat"]) +class _TransformConcat(TransformAxisUpdateOps): + def __init__(self, **kwargs): + super(_TransformConcat, self).__init__(**kwargs) + self.axis_var = self.op.inputs["axis"] + + def can_transpose_pass(self): + # Check that all non const inputs are of type _LazyTransposeHypotheticalValue. + # That they have the same perm value has already been checked before. + input_vars = _TransposeOptimization._get_input_vars(self.op, only_nonconst_vars=True) + for var in input_vars: + hypothetical_value = self.var_to_hypothetical_value_dict[var] + if not isinstance(hypothetical_value, _LazyTransposeHypotheticalValue): + return False + if self.axis_var.val is not None: + return True + return False + + def update(self): + new_axis_val = self.transpose_axes[self.axis_var.val] + + # to be used, if there is a consant inputs to the concat op + self._update_const_inputs() + + # insert a new constant for the new axis, JUST before the op + with self.op.enclosing_block: + new_axis_var = mb.const(val=new_axis_val, before_op=self.op) + + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_axis_var.op, + end_op=self.op, + old_var=self.axis_var, + new_var=new_axis_var, + no_check_var_types=True, + ) + + def _update_const_inputs(self): + transpose_perm_for_const = [0] * len(self.transpose_axes) + for i, axis in enumerate(self.transpose_axes): + transpose_perm_for_const[axis] = i + + # if there is a constant input, transpose it + inputs = list(self.op.inputs["values"]) + for input_var in inputs: + if input_var.op.op_type == "const": + const_val = input_var.val + new_const_val = np.transpose(const_val, transpose_perm_for_const) + # insert a new constant JUST before the op + with self.op.enclosing_block: + new_const_input_var = mb.const(val=new_const_val, before_op=self.op) + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_const_input_var.op, + end_op=self.op, + old_var=input_var, + new_var=new_const_input_var, + no_check_var_types=True, + ) + + +@_TransposeOptimization.register_axis_update_op(ops=["split"]) +class _TransformSplit(_TransformConcat): + def __init__(self, **kwargs): + super(_TransformSplit, self).__init__(**kwargs) + + # The split op is handled the same as the concat op, except it does not need + # to transform const inputs + def _update_const_inputs(self): + pass + + +@_TransposeOptimization.register_axis_update_op(ops=["pad"]) +class _TransformPad(TransformAxisUpdateOps): + def __init__(self, **kwargs): + super(_TransformPad, self).__init__(**kwargs) + self.pad_var = self.op.inputs["pad"] + self.pad_op = self.pad_var.op + self.mode = self.op.mode.val + self.pad_amounts_new = None + + def _compute_new_pad_values(self): + pad_amounts = np.reshape(self.pad_var.val, [-1, 2]) + rank_diff = len(self.transpose_axes) - pad_amounts.shape[0] + self.pad_amounts_new = copy.deepcopy(pad_amounts) + # append "rank_diff" rows of zeros to the top + self.pad_amounts_new = np.concatenate( + (np.zeros((2 * rank_diff)).reshape(-1, 2), self.pad_amounts_new) + ) + self.pad_amounts_new = self.pad_amounts_new.astype(pad_amounts.dtype) + pad_amounts = np.concatenate((np.zeros((2 * rank_diff)).reshape(-1, 2), pad_amounts)) + for i, axis in enumerate(self.transpose_axes): + self.pad_amounts_new[axis][0] = pad_amounts[i][0] + self.pad_amounts_new[axis][1] = pad_amounts[i][1] + # get the top "rank_diff" rows + top_rows = self.pad_amounts_new[:rank_diff, :] + if not np.all(top_rows == 0): + return False + # cut "rank_diff" from the top + self.pad_amounts_new = self.pad_amounts_new[rank_diff:, :] + self.pad_amounts_new = self.pad_amounts_new.flatten() + return True + + def can_transpose_pass(self): + if ( + len(_TransposeOptimization._get_input_vars(self.op, only_nonconst_vars=True)) != 1 + or self.pad_op.op_type != "const" + ): + return False + if len(self.transpose_axes) < 2: + return False + if not self._compute_new_pad_values(): + return False + # check that if mode is not constant, the updated padding + # would stay limited to last 2 axes + if self.mode != "constant" and not np.all(self.pad_amounts_new[:-4] == 0): + return False + return True + + def update(self): + self._compute_new_pad_values() + # insert a new constant for pad val, JUST before the op + with self.op.enclosing_block: + new_pad_var = mb.const(val=self.pad_amounts_new, before_op=self.op) + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_pad_var.op, + end_op=self.op, + old_var=self.pad_var, + new_var=new_pad_var, + no_check_var_types=True, + ) + + +@_TransposeOptimization.register_axis_update_op( + ops=[ + "reduce_l1_norm", + "reduce_l2_norm", + "reduce_max", + "reduce_log_sum", + "reduce_log_sum_exp", + "reduce_mean", + "reduce_min", + "reduce_prod", + "reduce_sum", + "reduce_sum_square", + ] +) +class _TransformReduceMean(TransformAxisUpdateOps): + def __init__(self, **kwargs): + super(_TransformReduceMean, self).__init__(**kwargs) + self.axes_var = self.op.inputs["axes"] + self.axes_op = self.axes_var.op + + def can_transpose_pass(self): + # allow transpose to push through it only if keep_dims are True since that doesn't change the rank + if self.op.inputs["keep_dims"].val: + if self.axes_op.op_type == "const": + return True + return False + + def update(self): + # update axis of the op + old_axes_val = self.axes_var.val + new_axes_val = [0] * len(old_axes_val) + for i, axis in enumerate(old_axes_val): + new_axes_val[i] = self.transpose_axes[axis] + + # insert a new constant for the axis, JUST before the op + with self.op.enclosing_block: + new_axis_var = mb.const(val=new_axes_val, before_op=self.op) + + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_axis_var.op, + end_op=self.op, + old_var=self.axes_var, + new_var=new_axis_var, + no_check_var_types=True, + ) + + +@_TransposeOptimization.register_axis_update_op( + ops=["add", "mul", "sub", "real_div", "maximum", "minimum"] +) +class _TransformAdd(TransformAxisUpdateOps): + def __init__(self, **kwargs): + super(_TransformAdd, self).__init__(**kwargs) + # self.tranpose_input: this is the input coming from an upstream transpose op. If both inputs are + # connected to an upstream transpose, this will be set to one of those + # self.other_input: the other input, that is not coming from a transpose + is_x_input_lazy_transpose = isinstance( + self.var_to_hypothetical_value_dict[self.op.x], _LazyTransposeHypotheticalValue + ) + is_y_input_lazy_transpose = isinstance( + self.var_to_hypothetical_value_dict[self.op.y], _LazyTransposeHypotheticalValue + ) + if is_x_input_lazy_transpose and is_y_input_lazy_transpose: + self.other_input = None + self.tranpose_input = self.op.x + elif is_y_input_lazy_transpose and not is_x_input_lazy_transpose: + self.other_input = self.op.x + self.tranpose_input = self.op.y + elif is_x_input_lazy_transpose and not is_y_input_lazy_transpose: + self.other_input = self.op.y + self.tranpose_input = self.op.x + else: + # we should not be here since this class is only invoked, + # when there is at least one input var of type _LazyTransposeHypotheticalValue + self.tranpose_input = None + self.other_input = None + + def can_transpose_pass(self): + """ + Return True if the one of the following is true: + - (scenario 1) both inputs are of type _LazyTransposeHypotheticalValue, with the same perm value + - one input is of type _LazyTransposeHypotheticalValue and the other satisfies one of the following: + - (scenario 2) it is constant. In this case, the constant can be updated accordingly to allow the transpose to pass through + - (scenario 3) if its non constant, then all of the following must be true + - its shape is fully defined + - the transpose compliment operation on the other input can be expressed via a reshape. This can + be done if there is only 1 non unit dimension in its shape, or if there are more than 1 non unit dims, + the transpose compliment operation only permutes the unit dimensions. + + In scenario 3, the transpose will be removed, by adding an extra static reshape. + This is based on the assumption that a static reshape op will be less expensive than transpose. + An example of scenario 3 is displayed below: + + Input pattern: + + (shape=(10, 20, 30)) + | + | + V + Transpose op + (shape = (20, 30, 10)) + | + | + V + this op <--------- (shape = (10,)) (other non const input) + | + V + + + After transpose passes through: + + (shape=(10, 20, 30)) + | + | + V + this op <--------- (shape = (10, 1, 1)) Reshape op <---------- (shape = (10,)) (other non const input) + | + V + Transpose op + (shape = (20, 30, 10)) + | + V + + """ + + # --------------------- + # check for scenario 1 + # -------------------- + # are both inputs _LazyTransposeHypotheticalValue? + if self.other_input is None: + return True + + # --------------------- + # check for scenario 2 + # -------------------- + # is the second input a constant? + rank = len(self.tranpose_input.shape) + if len(self.transpose_axes) != rank: + return False + other_input_shape = self.other_input.shape + if any_symbolic(other_input_shape): + return False + if len(other_input_shape) > rank: + return False + if isinstance(self.other_input.val, (np.ndarray, np.generic)): + return True + + # --------------------- + # check for scenario 3 + # -------------------- + # can other input be "reshaped" to allow the transpose to pass through? + if any_symbolic(self.other_input.shape): + return False + transpose_compliment_perm = self._find_transpose_compliment(self.transpose_axes) + # make the rank of the other input, same as that of the transpose input, + # by broadcasting + if len(other_input_shape) < rank: + other_input_shape = [1] * (rank - len(other_input_shape)) + list(other_input_shape) + + # how many non unit dimensions in the other input's shape? + if other_input_shape.count(1) in [rank, rank - 1]: + # 0 or 1 non unit dimension + return True + else: + # more than 1 non unit dimensions in other input + # check if transpose is moving only dimensions that have values 1 + # if true, then the transpose compliment can be expressed via a reshape + for i, axis in enumerate(transpose_compliment_perm): + if i != axis and other_input_shape[axis] != 1: + return False + return True + + def update(self): + # ---------------------- + # update for scenario 1 + # ---------------------- + if self.other_input is None: + # nothing to update + return + + # -------------------------- + # update for scenario 2 & 3 + # -------------------------- + if len(self.other_input.shape) == 0: + # other input is a scalar, no need to modify it + return + + # broadcast the shape of other input to match the rank + rank = len(self.tranpose_input.shape) + other_input_shape = self.other_input.shape + if len(other_input_shape) < rank: + other_input_shape = [1] * (rank - len(other_input_shape)) + list(other_input_shape) + + # find new shape after transpose compliment + transpose_compliment_perm = self._find_transpose_compliment(self.transpose_axes) + new_shape = [0] * rank + for i, axis in enumerate(transpose_compliment_perm): + new_shape[i] = other_input_shape[axis] + + if self.other_input.val is not None: + # update the const (scenario 2) + const_value = self.other_input.val + new_const_val = np.transpose( + const_value.reshape(other_input_shape), transpose_compliment_perm + ) + # insert a new constant JUST before the op + with self.op.enclosing_block: + new_const_var = mb.const(val=new_const_val, before_op=self.op) + + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_const_var.op, + end_op=self.op, + old_var=self.other_input, + new_var=new_const_var, + no_check_var_types=True, + ) + else: + # insert a reshape (scenario 3) + with self.op.enclosing_block: + new_other_var = mb.reshape(x=self.other_input, shape=new_shape, before_op=self.op) + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_other_var.op, + end_op=self.op, + old_var=self.other_input, + new_var=new_other_var, + no_check_var_types=True, + ) + + +@register_pass(namespace="common") +class reduce_transposes(AbstractGraphPass): + """ + Reduce transposes when it is applicable. For example: + + .. code-block:: + + # Example 1 + Input graph: + input -----> transpose(axis=[1,0]) -----> transpose(axis=[1,0]) ---> out + + Output graph: + input -----> identity -----> out + + # Example 2 + Input graph: + input---->transpose(axis=[0,3,1,2])---->relu---->transpose(axis=[0,2,3,1])--->out + + Output graph: + input----->relu----->out + + # Example 3 + Input graph: + input(shape=10,2,3,5)--->transpose(axis=[0,2,3,1])----->relu---->pool----->out1 + | + | + --->relu----->log---->transpose(axis=[0,3,1,2])---->out2 + + Output graph: + input(shape=10,2,3,5)----->relu---->transpose(axis=[0,2,3,1])---->pool----->out1 + | + | + --->relu----->log---->out2 + + Please see ``TransposeOptimizationPass`` for more details. + + Notes + ----- + + This pass is divided into 3 phases: + + `1st phase:` Information gathering. + + - Plug in Identity ops for all output nodes. This allows us to treat all ops uniformly during traversal. + - Block is traversed in the topological order, starting from the ops connected to the inputs. + - During the traversal, a value is associated with every var in the block. + This value can be either of type ``_HypotheticalValue`` or ``_LazyTransposeHypotheticalValue``. + The main purpose of type ``_HypotheticalValue`` is to indicate that it is `not` of type ``_LazyTransposeHypotheticalValue``. + - ``_LazyTransposeHypotheticalValue`` represents either one or multiple transpose ops with the same perm value. This information + is stored in this class. It also wraps a ``_HypotheticalValue`` that was the last hypothetical value which was generated + prior to the origin of ``_LazyTransposeHypotheticalValue``. + - Each op decides which type of hypothetical value to associate with its output vars, based on its op type, + attributes, and the types of the hypothetical values of its input vars. + - Ops are classified into 4 categories: `unary like`, `axis update`, `transpose`, and `materialize` (for all the rest). + - Transpose ops are the ops from which a ``_LazyTransposeHypotheticalValue`` originate. + - If the input to it is a ``_HypotheticalValue``, its output will be a ``_LazyTransposeHypotheticalValue``, + indicating that this ``transpose`` op is available to get cancelled downstream. + - If the input to it is a ``_LazyTransposeHypotheticalValue``, then it is checked whether this op cancels it or not. + - If the op cancels it, a ``_HypotheticalValue`` value is generated at the output and the information about this ``transpose`` cancellation + is recorded in the dictionary ``transpose_op_to_cancel_ops``. + - If the op does not cancel, the current ``transpose`` op is categrorized as a `materialize` op. Therefore, the information in + dictionary ``transpose_op_to_materialize_ops`` is updated accordingly. The output of the op is now mapped to a + ``_HypotheticalValue``. + - Unary like ops: These simply transfer their input hypothetical value type to the output. + - Axis update ops: If a ``transpose`` can pass through them, they are treated like a unary op and the dictionary + ``transpose_op_to_axis_update_ops`` is updated. If the op cannot be updated in any manner to + allow a ``transpose`` to pass through, this op is then categorized as a `materialize` op and handled accordingly. + - Materialize ops: All ``_LazyTransposeHypotheticalValue`` input vars, if present, materialize here. Output of this op + is always of type ``_HypotheticalValue``. If the input is a ``_LazyTransposeHypotheticalValue``, update the dictionary + ``transpose_op_to_materialize_ops``. + - To treat an op like a unary op, add its type to ``_UNARY_LIKE_OP_TYPES``. In future changes we want to make this process + automatic by detecting an op as a `unary like` by its "traits". + - To treat an op like `axis update` op, add a class specific to the op implementing the class ``TransformAxisUpdateOps``. + For examples, see classes ``_TransformConcat``, ``_TransformPad``, and so on. The dictionary ``AXIS_UPDATE_OPS`` is automatically filled + in by the decorator ``_TransposeOptimization.register_axis_update_op``. + + `2nd phase:` Determining which ``transpose`` ops to remove from the graph. + + All ``transpose`` ops that have a corresponding compliment op in dict ``transpose_op_to_cancel_ops`` is a candidate. + However, you need to ensure the following: + + - If a ``transpose`` op is removed, then all of its ``cancel`` ops in ``transpose_op_to_cancel_ops`` must also be removed, + to ensure correctness of the graph. The same is true in the reverse direction as well; + that is, for every ``cancel`` op that is removed, all its parent ``transpose`` ops upstream must also be removed. + - ``transpose`` ops should be removed only if the number of ``cancel`` ops is greater than the number of ``transpose`` ops + that would get freshly introduced to the block as a result of materialization ops. Currently in the algorithm, + each materialization op/output var (dicts ``transpose_op_to_materialize_ops``/``old_output_vars``) + results in one more ``transpose`` op, although this can be further optimized in the future. + + To resolve this, we recognize that nodes consisting of sets ``(a)`` and ``(b)`` form a bipartitle graph, where, + ``(a) ==`` starting ``transpose`` ops (originators of ``_LazyTransposeHypotheticalValue``) + and ``(b) ==`` set of ``transpose`` ``cancel`` ops and ``materialize`` ops. + + - In this bipartite graph, we find all the connected components for each connected component. + Either the entire set of ``transpose`` ops in it are removed/materialized, or none + of them are touched. + - Thus for each set, a determination is made based on counting the number of ``cancel`` ops and ``materialize`` ops. + - Based on this determination, the final set of ``transpose`` ops to be removed is updated. + + `3rd phase:` Transforming the graph. + + - ``transpose`` starting ops and the ``cancel`` ops are removed. + - Axis update ops, affected by these ``transpose`` ops, are updated. + - Transposes are materialized; that is, added just before the ``materialize`` ops, which are linked to the starting ``transpose`` ops. + The starting ``transpose`` op can be materialized (inserted) multiple times, before each of the ``materialize`` ops downstream. + - Block outputs are handled in a similar fashion as the `materialize` ops. + - Type inference on all ops is invoked after all the transformations. + - All Identity ops that are plugged into the graph to treat outputs as materialized are removed. + + `Debugging` + + If the ``debug`` flag is set to ``True``, the block before and after the transformation is plotted, + with transpose nodes highlighted. + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._reduce_transposes_block(f) + + @staticmethod + def _reduce_transposes_block(block): + """ + Only apply the optimization if the block is flat, + i.e, it does not contain any op which contains a sub-block. + TODO: + Removing transposes and transpose compliments requires re-running + type inference for the set of ops in between the fused transpose ops, + which is simpler to do when all the ops in the block are free of sub blocks. + The case of transpose fusion with sub-block containing ops needs to be handled with more care and test cases. + """ + for op in list(block.operations): + if len(op.blocks) > 0: + return + + with block: + opt_transposes = _TransposeOptimization(block) + opt_transposes.block_traversal() + opt_transposes.apply_transform() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_tensor_operation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_tensor_operation.py new file mode 100644 index 00000000..82ac2dec --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_tensor_operation.py @@ -0,0 +1,831 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import ( + _check_child_op_type, + _check_var_scalar_value, + block_context_manager, +) +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + +@register_pass(namespace="common") +class expand_high_rank_reshape_and_transpose(AbstractGraphPass): + """ + Detect the pattern ``reshape_1-->transpose-->reshape_2``, where ``reshape_1`` has + a output tensor with rank >= 6, and the reshape_2 produces a tensor with rank <= 5. + + In general, we can expand this pattern into a sequence of rank 4 ``reshape`` and ``transpose`` ops, + which is supported by Core ML runtime. + + .. code-block:: + + Given: + %1 = reshape(%x, shape=(d1, d2, d3, d4, ..., dn)) + %2 = transpose(%1, perm=(p1, p2, ..., pn)) + %3 = reshape(%2, shape=(o1, o2, o3, o4, o5)) + + Result: + %t1 = reshape(%x, shape=(y11, y12, y13, y14)) + %h1 = transpose(%t1, perm=[0, 2, 1, 3]) + %t2 = reshape(%h1, shape=(y21, y22, y23, 214)) + %h2 = transpose(%t2, perm=[0, 2, 1, 3]) + .... + %hn = transpose(%tn, perm=[0, 2, 1, 3]) + %3 = reshape(%hn, shape=(o1, o2, o3, o4, o5)) + """ + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self.expand_high_rank_reshape_and_transpose_block(f) + + @staticmethod + def _match_pattern(op): + # We are detecting the + # reshape(>= rank 6) -> transpose -> reshape(<= rank 5) pattern + ops = [op] + if op.op_type != "reshape": + return None + if op.outputs[0].rank <= 5: + return None + if any_symbolic(op.outputs[0].shape): + return None + + if not _check_child_op_type(op, "transpose"): + return None + transpose_op = op.outputs[0].child_ops[0] + ops.append(transpose_op) + + if not _check_child_op_type(transpose_op, "reshape"): + return None + reshape_op = transpose_op.outputs[0].child_ops[0] + ops.append(reshape_op) + if reshape_op.outputs[0].rank >= 6: + return None + + for candidate_op in ops[:-1]: + if candidate_op.outputs[0] in op.enclosing_block.outputs: + return None + return ops + + @staticmethod + def _try_to_transform(ops, block): + def _get_prod(start, end, arr, skip_indices): + res = 1 + for i in range(start, end): + if i in skip_indices: + continue + res *= arr[i] + return res + + reshape_op, transpose_op, last_reshape_op = ops[0], ops[1], ops[2] + original_shape = reshape_op.outputs[0].shape + original_perm = transpose_op.perm.val.tolist() + + # Group the consecutive axes in the perm, sometimes this could directly lower the + # rank under 6. + # + # For instance: + # + # reshape = mb.reshape(x=x, shape=[1, 2, 3, 4, 5, 6]) + # transpose = mb.transpose(x=reshape, perm=[4, 5, 3, 2, 0, 1]) + # output = mb.reshape(x=transpose, shape=[6, 20, 6]) + # + # Have 4 groups of axes: [4, 5], [3], [2], [0, 1] + # We can transform the ops to + # + # new_reshape = mb.reshape(x=x, shape=[1*2, 3, 4, 5*6]) + # new_transpose = mb.transpose(x=reshape, perm=[3, 2, 1, 0]) + # output = mb.reshape(x=new_transpose, shape=[6, 20, 6]) + # + # Note that, the output of new_transpose have different rank than transpose, + # however, they have the same data layout, so the final output is still unchanged. + group_axes = [] + i = 0 + res = [] + for i in range(len(original_perm)): + if i > 0 and original_perm[i] == original_perm[i-1] + 1: + res.append(original_perm[i]) + else: + if len(res) > 0: + group_axes.append(res) + res = [original_perm[i]] + if i == len(original_perm) - 1: + group_axes.append(res) + + group_shape = [] + for axes in group_axes: + start, end = axes[0], axes[-1] + 1 + group_shape.append(_get_prod(start, end, original_shape, set())) + + start_group_axis = [axes[0] for axes in group_axes] + group_axis_order = np.argsort(start_group_axis) + shape = np.array(group_shape)[group_axis_order].tolist() + + sorted_start_group_axis = np.sort(start_group_axis).tolist() + perm = [sorted_start_group_axis.index(i) for i in start_group_axis] + + rank = len(perm) + x = reshape_op.x + + if rank < 6: + # If the intermediate tensors have rank < 6, + # we can directly use them to replace the original pattern + x = mb.reshape(x=x, shape=shape, before_op=reshape_op) + x = mb.transpose(x=x, perm=perm, before_op=reshape_op) + + else: + # Otherwise, we need to expand the rank-N tensor into N reshape, and N transpose ops. + # Note that all intrermediate tensors have rank 4. + # + # The algorithm is as followed: + # + # reshape shape: [d_1, d_2, ..., d_n] + # transpose perm: [p_1, p_2, ..., p_n] + # + # reshape to [1, d_1*d_2*...*d_(p_1-1), d_(p_1), d_(p_1+1)*...*d_n] + # transpose to [1, d_(p_1), d_1*d_2*...*d_(p_1-1), d_(p_1+1)*...*d_n] + # + # reshape to [d_(p_1), d_1*d_2*...*d_(p_2-1), d_(p_2), d_(p_2+1)*...*d_n] + # transpose to [d_(p_1), d_(p_2), d_1*d_2*...*d_(p_2-1), d_(p_2+1)*...*d_n] + # + # reshape to [d_(p_1)*d_(p_2), d_1*d_2*...*d_(p_3-1), d_(p_3), d_(p_3+1)*...*d_n] + # .... + # so on and so forth + leading_dim = 1 + memo = set() + for i in range(rank): + axis = perm[i] + dim = shape[axis] + memo.add(axis) + reshape_shape = [ + leading_dim, + _get_prod(0, axis, shape, memo), + dim, + _get_prod(axis + 1, rank, shape, memo) + ] + x = mb.reshape(x=x, shape=reshape_shape, before_op=reshape_op) + x = mb.transpose(x=x, perm=[0, 2, 1, 3], before_op=reshape_op) + leading_dim *= dim + + + x = mb.reshape(x=x, shape=last_reshape_op.shape.val, before_op=reshape_op) + if reshape_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=reshape_op, old_var=last_reshape_op.outputs[0], new_var=x, + ): + # Remove all the ops at once + block.remove_ops(ops) + return True + return False + + @block_context_manager + def expand_high_rank_reshape_and_transpose_block(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self.expand_high_rank_reshape_and_transpose_block(b) + if len(op.blocks) > 0: + continue + + ops = self._match_pattern(op) + if ops is not None: + fusion_status = self._try_to_transform(ops, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + +@register_pass(namespace="common") +class concat_to_pixel_shuffle(AbstractGraphPass): + """ + Identify nested, interleaved ``concat`` ops which can be replaced by a single ``concat`` and a `pixel shuffle` layer. + + This pattern occurs with the faster up-convolution from the FCRN model (Laina et al., 2016). + + .. code-block:: + + # Before the concat_to_pixel_shuffle pass. + input(N, C, H, W) ------------------- + | + v + input(N, C, H, W) -----> concat(axis=2, interleave=True) -----> concat(axis=3, interleave=True) ----> output + ^ + | + input(N, C, H, W) -----> concat(axis=2, interleave=True) -------------------- + | ^ + | | + input(N, C, H, W) ------------------- + + + # After the concat_to_pixel_shuffle pass. + input(N, C, H, W) --------------- + | + v + input(N, C, H, W) -----> concat(axis=1, interleave=True) -----> pixel_shuffle(upscale_factor=2) ----> output + ^ + | + input(N, C, H, W) --------------| + | + | + input(N, C, H, W) --------------- + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._concat_to_pixel_shuffle_block(f) + + @staticmethod + def _match_pattern(op): + + # Identify if this is an op we can transform + if op.op_type != "concat": + return None + + w_concat = op + if w_concat.inputs["values"][0].rank != 4: + return None + + if w_concat.inputs["axis"].val != 3: + return None + if not w_concat.inputs["interleave"].val: + return None + + inputs = list(w_concat.inputs["values"]) + if len(inputs) != 2: + return None + + if not inputs[0].op or not inputs[1].op: + return None + + if inputs[0].op.op_type != "concat" or inputs[1].op.op_type != "concat": + return None + + h_concat_0 = inputs[0].op + if not h_concat_0.inputs["interleave"].val: + return None + + h_concat_0_inputs = list(h_concat_0.inputs["values"]) + if len(h_concat_0_inputs) != 2: + return None + + h_concat_1 = inputs[1].op + if not h_concat_1.inputs["interleave"].val: + return None + + h_concat_1_inputs = list(h_concat_1.inputs["values"]) + if len(h_concat_1_inputs) != 2: + return None + + if h_concat_0.inputs["axis"].val != 2 or h_concat_1.inputs["axis"].val != 2: + return None + + return w_concat, h_concat_0, h_concat_1 + + @staticmethod + def _replace_ops(block, w_concat, h_concat_0, h_concat_1): + + h_concat_0_inputs = list(h_concat_0.inputs["values"]) + h_concat_1_inputs = list(h_concat_1.inputs["values"]) + + all_inputs = [ + h_concat_0_inputs[0], + h_concat_1_inputs[0], + h_concat_0_inputs[1], + h_concat_1_inputs[1], + ] + + # Concatenate all 4 inputs on the channel axis + x = mb.concat(values=all_inputs, axis=1, before_op=h_concat_0, interleave=True) + # Shuffle into place + x = mb.pixel_shuffle(x=x, upscale_factor=2, before_op=h_concat_0) + + w_concat.enclosing_block.replace_uses_of_var_after_op( + anchor_op=h_concat_0, old_var=w_concat.outputs[0], new_var=x + ) + + block.remove_ops([w_concat, h_concat_0, h_concat_1]) + + @block_context_manager + def _concat_to_pixel_shuffle_block(self, block): + for op in list(block.operations): + layers = self._match_pattern(op) + if layers: + self._replace_ops(block, layers[0], layers[1], layers[2]) + + +@register_pass(namespace="common") +class detect_concat_interleave(AbstractGraphPass): + """ + Detect the pattern ``concat-->reshape--->transpose--->reshape``, where ``concat`` is + along the channel axis ``(axis=-3)``, and map this pattern to the ``concat`` with ``interleave`` op. + + This pattern occurs, for example, in the ``shufflenet`` model in ``torchvision``. + + .. code-block:: + + Given: + %3 = concat(%1.a, %1.b, ..., axis=-3, interleave=False) #shape = (B, n*C, H, W) + %4 = reshape(%3) #shape = (B, n, C, H, W) + %5 = transpose(%4, perm=[0, 2, 1, 3, 4]) # shape = (B, C, n, H, W) + %6 = reshape(%5) # shape = (B, C*n, H, W) + + Result: + %6 = concat(%1.a, %1.b, ..., axis=-3, interleave=True) + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_concat_interleave(f) + + @staticmethod + def _match_pattern(op): + if op.outputs[0] in op.enclosing_block.outputs: + return None + + if op.op_type == "concat": + if op.interleave.val: + return None + + # check that axis is -3 and rank is 4 + rank = op.values[0].rank + if rank != 4: + return None + axis = op.axis.val + if axis > 0: + axis = axis - rank + if axis != -3: + return None + + # check that all inputs to concat have fully defined shapes + for in_ in op.values: + if any_symbolic(in_.shape): + return None + + # check that all inputs to concat have the same shape + inshape = list(op.values[0].shape) + for v in op.values[1:]: + for i in range(rank): + if inshape[i] != v.shape[i]: + return None + + # check that this concat is connected to exactly 1 reshape op + child_ops = list(op.outputs[0].child_ops) + if len(child_ops) == 1: + if list(child_ops)[0].op_type == "reshape": + return op + return None + + @staticmethod + def _try_to_transform(concat_op, add_op, block): + all_ops = [concat_op] + B, C, H, W = list(concat_op.values[0].shape) + n = len(concat_op.values) + + # check that reshape shapes the input to (B, n, C, H, W) + reshape_op1 = concat_op.outputs[0].child_ops[0] + reshape_shape1 = reshape_op1.shape.val + if reshape_shape1 is None: + return False + if not isinstance(reshape_shape1, np.ndarray): + return False + reshape_shape1 = list(reshape_shape1) + if reshape_shape1 != [B, n, C, H, W]: + return False + all_ops.append(reshape_op1) + + # check that after reshape is a transpose op with perm=[0, 2, 1, 3, 4] + if len(list(reshape_op1.outputs[0].child_ops)) != 1: + return False + transpose_op = list(reshape_op1.outputs[0].child_ops)[0] + if transpose_op.op_type != "transpose": + return False + perm = transpose_op.perm.val + if perm is None: + return + if list(perm) != [0, 2, 1, 3, 4]: + return False + all_ops.append(transpose_op) + + # check that after transpose is another reshape with [B, . , H, W] + if len(list(transpose_op.outputs[0].child_ops)) != 1: + return False + reshape_op2 = list(transpose_op.outputs[0].child_ops)[0] + if reshape_op2.op_type != "reshape": + return False + reshape_shape2 = reshape_op2.shape.val + if reshape_shape2 is None: + return False + if not isinstance(reshape_shape2, np.ndarray): + return False + reshape_shape2 = list(reshape_shape2) + if len(reshape_shape2) != 4: + return False + if [reshape_shape2[0], reshape_shape2[-2], reshape_shape2[-1]] != [B, H, W]: + return False + all_ops.append(reshape_op2) + + # check that none of the op in this pattern is connected to the output + # (except the last mul op) + for i, op in enumerate(all_ops): + if i == len(all_ops) - 1: + continue + for out in op.outputs: + if out in block.outputs: + return False + + # add a new concat op + out_name = reshape_op2.outputs[0].name + x = mb.concat( + values=concat_op.values, + axis=concat_op.axis.val, + interleave=True, + name=out_name, + before_op=concat_op, + ) + + reshape_op2.enclosing_block.replace_uses_of_var_after_op( + anchor_op=reshape_op2, old_var=reshape_op2.outputs[0], new_var=x + ) + + # Remove all the ops at once + block.remove_ops(all_ops) + return True + + @block_context_manager + def _fuse_concat_interleave(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_concat_interleave(b) + if len(op.blocks) > 0: + continue + + concat_op = self._match_pattern(op) + if concat_op is not None: + fusion_status = self._try_to_transform(op, concat_op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + + +@register_pass(namespace="common") +class fuse_onehot_matmul_to_gather(AbstractGraphPass): + """ + Detect if ``onehot (axis=-1, on_value=1, off_value=0)`` is followed by a ``matmul`` op (no bias). + If so, they can be replaced by a ``gather`` op. + + .. code-block:: + + Input: + %2 = one_hot(%1, on_value=1, off_value=0, axis=-1) + %3 = const() # rank 2 + %4 = matmul(%2, %3) + + Output: + %4 = gather(%3, %2, axis=0) + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_onehot_matmul_to_gather_block(f) + + @staticmethod + def _try_to_transform(onehot_op, block): + root_var = onehot_op.indices + + # check that the output of the onehot op is not a block output + if onehot_op.outputs[0] in block.outputs: + return False + + # check that onehot op has axis=-1, on_value=1 and off_value=0 + # and constant one_hot_vector_size + axis = onehot_op.axis.val + if axis is None: + return False + if onehot_op.indices.shape is None: + return False + rank = len(onehot_op.indices.shape) + if axis >= 0: + axis -= rank + if axis != -1: + return False + if not _check_var_scalar_value(onehot_op.on_value, 1): + return False + if not _check_var_scalar_value(onehot_op.off_value, 0): + return False + if onehot_op.one_hot_vector_size.val is None: + return False + + # checks for the following matmul op + if not _check_child_op_type(onehot_op, "matmul"): + return False + matmul_op = list(onehot_op.outputs[0].child_ops)[0] + if matmul_op.x != onehot_op.outputs[0]: + return False + if matmul_op.transpose_x.val or matmul_op.transpose_y.val: + return False + W_var = matmul_op.y + if W_var.val is None: + return False + if len(W_var.val.shape) != 2: + return False + + # remove onehot and matmul and replace with gather op + out_name = matmul_op.outputs[0].name + x = mb.gather(x=W_var, indices=root_var, axis=0, name=out_name, before_op=matmul_op) + + matmul_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=matmul_op, old_var=matmul_op.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops([onehot_op, matmul_op]) + return True + + @block_context_manager + def _fuse_onehot_matmul_to_gather_block(self, block): + fusion_status = False + for i, op in enumerate(list(block.operations)): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_onehot_matmul_to_gather_block(b) + if len(op.blocks) > 0: + # This op can't be pow + continue + + # start pattern match if one_hot op is encountered + if op.op_type == "one_hot": + fusion_status = self._try_to_transform(op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + + +@register_pass(namespace="common") +class replace_stack_reshape(AbstractGraphPass): + """ + A stack followed by a reshape layer can be replaced by a ``concat`` if the reshape + simply removes the new axis and doubles the size of one of the axes next to it. + + If the new axis is reshaped to the "right" (that is, the axis just after it is + doubled), then we can use a ``concat``. If it is reshaped to the "left" (the axis + just before it is doubled), then the ``concat`` needs to set the ``interleaved`` flag. + + Examples: + + .. code-block:: + + Given: + %1 = tensor(1, 5, 3, 4) + %2 = tensor(1, 5, 3, 4) + %3 = stack((%1,%2), axis=2) # shape = (1, 5, 2, 3, 4) + %4 = reshape(%3, shape=[1, 10, 3, 4]) + + Result: + %1 = tensor(1, 5, 3, 4) + %2 = tensor(1, 5, 3, 4) + %4 = concat((%1,%2), axis=1, interleave=True) # shape = (1, 10, 3, 4) + + Given: + %1 = tensor(1, 5, 3, 4) + %2 = tensor(1, 5, 3, 4) + %3 = stack((%1, %2), axis=1) # shape = (1, 2, 5, 3, 4) + %4 = reshape(%3, shape=[1, 10, 3, 4]) + + Result: + %1 = tensor(1, 5, 3, 4) + %2 = tensor(1, 5, 3, 4) + %4 = concat((%1, %2), axis = 1) # shape = (1, 10, 3, 4) + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._replace_stack_reshape_block(f) + + @staticmethod + def _match_operation(stack_op): + + # Identify if this is an op we can transform + if stack_op.op_type != "stack": + return None, None + + child_ops = stack_op.outputs[0].child_ops + if len(child_ops) != 1: + return None, None + + if child_ops[0].op_type != "reshape": + return None, None + + stack_axis = stack_op.inputs["axis"] + if not stack_axis: + return None, None + stack_axis_val = stack_axis.val + + reshape_op = child_ops[0] + + # Now, op is a stack op followed by a reshape op + # So we need to check that the stack really gets eliminated + stack_output_rank = len(stack_op.outputs[0].shape) + reshape_output_rank = len(reshape_op.outputs[0].shape) + + if stack_output_rank != (reshape_output_rank + 1): + return None, None + + # Compare the input to stack to the output from reshape + # These shapes should differ in either the stack_axis_val place (by a factor of 2), + # or in the stack_axis_val-1 place by the same factor + input_shape = list(stack_op.inputs["values"][0].shape) + concat_axis = [ + idx + for idx, (x, y) in enumerate(zip(input_shape, reshape_op.outputs[0].shape)) + if x != y + ] + if len(concat_axis) != 1: + return None, None + + concat_axis = concat_axis[0] + + if input_shape[concat_axis] * 2 != reshape_op.outputs[0].shape[concat_axis]: + return None, None + + if concat_axis != stack_axis_val and concat_axis != stack_axis_val - 1: + return None, None + + return stack_op, reshape_op + + @staticmethod + def _replace_stack_reshape_ops(block, stack_op, reshape_op): + + stack_axis = stack_op.inputs["axis"] + if not stack_axis: + return None, None + stack_axis_val = stack_axis.val + + input_shape = list(stack_op.outputs[0].shape) + input_shape.pop(stack_axis_val) + + concat_axis = [ + idx + for idx, (x, y) in enumerate(zip(input_shape, reshape_op.outputs[0].shape)) + if x != y + ] + if len(concat_axis) != 1: + return + concat_axis = concat_axis[0] + + interleave = concat_axis == stack_axis_val - 1 + + x = mb.concat( + values=stack_op.values, axis=concat_axis, before_op=stack_op, interleave=interleave + ) + + reshape_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=stack_op, old_var=reshape_op.outputs[0], new_var=x + ) + block.remove_ops([stack_op, reshape_op]) + + @block_context_manager + def _replace_stack_reshape_block(self, block): + for op in list(block.operations): + + stack_op, reshape_op = self._match_operation(op) + + if stack_op: + self._replace_stack_reshape_ops(block, stack_op, reshape_op) + + +@register_pass(namespace="common") +class use_reflection_padding(AbstractGraphPass): + """ + Identify a reflection padding layer composed out of `slices` and `concats`. + + .. code-block:: + + Input graph: + ------------------------------------------------------------------------------------- | + | v + input(1, 2, 6, 8) ------> slice_by_index(begin=[0, 0, 0, 1], end=[0, 0, 0, 2]) -----> concat(axis=3) ---> out(1, 2, 6, 10) + | ^ + ----------------> slice_by_index(begin=[0, 0, 0, -2], end=[0, 0, 0, -1]) -------------| + + Output graph: + input(1, 2, 6, 8) -----0> pad(mode=reflect, size=[0, 0, 1, 1]) -----> out(1, 2, 6, 10) + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._reflection_padding_block(f) + + @staticmethod + def _match_pattern(concat_op, block): + if concat_op.op_type != "concat": + return False + + concat_inputs = list(concat_op.inputs["values"]) + # There need to be an odd number of inputs, and at least one model has a concat input of + # length 1 + if len(concat_inputs) % 2 != 1 or len(concat_inputs) == 1: + return False + + # The original input will need to be in the middle of the concatenated inputs + original_input = concat_inputs[len(concat_inputs) // 2] + + axis = None + slice_ops_out = [] + end_mask = None + begin_index = len(concat_inputs) // 2 + + for slice_op in concat_inputs: + + # one of the concat inputs is the original input (to the slices) + if slice_op == original_input: + # We'll now start checking indices from the end + begin_index = begin_index - 2 + continue + + slice_op = slice_op.op + if not slice_op: + return False + + if slice_op.op_type != "slice_by_index": + return False + + # check that the input to slice op is the original input + if slice_op.inputs["x"] != original_input: + return False + + # If the slice is an output + if slice_op.outputs[0] in block.outputs: + return False + + if end_mask is None: + end_mask = slice_op.inputs["end_mask"].val + axis = list(end_mask).index(False, 0, len(end_mask)) + + if end_mask is None: + return False + + if axis != list(end_mask).index(False, 0, len(end_mask)): + return False + + # Check that we're only taking a slice of size 1 + end = slice_op.inputs["end"].val + begin = slice_op.inputs["begin"].val + if end[axis] - begin[axis] != 1: + return False + + input_shape = original_input.shape + # Check that the slices are in order + if begin[axis] != begin_index and begin[axis] != begin_index + input_shape[axis]: + return False + begin_index = begin_index - 1 + + slice_ops_out.append(slice_op) + + if axis is None: + return False + + return use_reflection_padding._replace_ops( + block, concat_op, slice_ops_out, axis - len(end_mask) + ) + + @staticmethod + def _replace_ops(block, concat_op, slice_ops, axis): + + pad_size = len(slice_ops) // 2 + if axis == -1: + pad = [pad_size, pad_size] + elif axis == -2: + pad = [pad_size, pad_size, 0, 0] + else: + return False + + x = mb.pad(x=slice_ops[0].inputs["x"], pad=pad, mode="reflect", before_op=concat_op) + concat_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=concat_op, old_var=concat_op.outputs[0], new_var=x + ) + + block.remove_ops([concat_op] + slice_ops) + return True + + @block_context_manager + def _reflection_padding_block(self, block): + for op in list(block.operations): + self._match_pattern(op, block) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/preprocess.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/preprocess.py new file mode 100644 index 00000000..6add95b6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/preprocess.py @@ -0,0 +1,362 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clausefrom + +import re +import warnings +from collections import OrderedDict + +from coremltools.converters.mil.input_types import EnumeratedShapes, ImageType, Shape +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class image_input_preprocess(AbstractGraphPass): + """ + Plug in to ``transpose`` image input in NHWC format to NCHW format. + + Follow these steps: + + 1. Check whether there are any inputs that the users specify as ImageType. + 2. Check the channel's dimension for all inputs that are ImageType. + + a) ``channel_first == True`` + We do not modify this input, since ``channel_first`` is the intended + behaviour for feeding images for optimal performance. + b) ``channel_first == False`` + We convert the input into a "channel_first" input, and plug in a + ``transpose`` for the input to maintain the remaining graph's dimensionality. + """ + + def apply(self, prog): + for f_name, f in prog.functions.items(): + if f_name == "main": + # We need to make sure main exist and start here. + self._image_input_preprocess(prog) + + @staticmethod + def _image_input_preprocess(prog): + def _transform_to_channel_first(shape): + if isinstance(shape, tuple): + shape = list(shape) + return tuple(shape[:-3] + [shape[-1]] + shape[-3:-1]) + else: + return shape[:-3] + [shape[-1]] + shape[-3:-1] + + main_input_types = list(prog.main_input_types) + for idx, input_type in enumerate(main_input_types): + if isinstance(input_type, ImageType) and not input_type.channel_first: + name = input_type.name + # Build new ImageType to change data layout + if isinstance(input_type.shape, Shape): + new_shape = _transform_to_channel_first(input_type.shape.shape) + new_default = _transform_to_channel_first(input_type.shape.default) + shape_type = Shape(shape=new_shape, default=new_default) + elif isinstance(input_type.shape, EnumeratedShapes): + shape_list = [] + for shape in input_type.shape.shapes: + if isinstance(shape, Shape): + shape_list.append(_transform_to_channel_first(shape.shape)) + else: + shape_list.append(_transform_to_channel_first(shape)) + shape_type = EnumeratedShapes( + shapes=shape_list, + default=_transform_to_channel_first(input_type.shape.default), + ) + new_image_type = ImageType( + name=name, + shape=shape_type, + bias=input_type.bias, + scale=input_type.scale, + color_layout=input_type.color_layout, + channel_first=True, + ) + main_input_types[idx] = new_image_type + + # Reconstruct Placeholder of Function inputs. + placeholder_op = prog.functions["main"].placeholder_inputs[name] + old_var = placeholder_op.outputs[0] + nchw_shape = _transform_to_channel_first(placeholder_op.sym_shape) + placeholder_op.__init__( + nchw_shape, dtype=placeholder_op.dtype, name=placeholder_op.name + ) + + # Update Function input var + prog.functions["main"]._input_dict[name] = placeholder_op.outputs[0] + prog.functions["main"].function_inputs = tuple( + prog.functions["main"]._input_dict.values() + ) + + # Add transpose into graph (Transpose from NCHW back to NHWC) + curr_block = prog.functions["main"] + curr_var = prog.functions["main"].inputs[name] + + perm = list(range(curr_var.rank)) + perm = perm[:-3] + [perm[-2], perm[-1], perm[-3]] + with curr_block: + new_input = mb.transpose( + x=curr_var, + perm=perm, + before_op=prog.functions["main"].operations[0], + name=curr_var.name + "__transpose_from_nchw__", + ) + curr_block.replace_uses_of_var_after_op( + anchor_op=None, old_var=old_var, new_var=new_input + ) + prog.main_input_types = tuple(main_input_types) + + +class NameSanitizer: + def __init__(self, prefix=None): + # to hold all names encountered, + # to make sure that all new names are unique + self.all_names = set() + self.prefix = "_" if prefix is None else prefix + + def sanitize_name(self, name): + """ + Sanitize the input string and return it back. + Input string should be of the format: [a-zA-Z_][a-zA-Z0-9_]* + + If it is not, then it is sanitized in the following manner: + - first, any character that is not [a-zA-Z0-9_] is replaced with "_" + - if the starting character is not [a-zA-Z_], it is prefixed with self.prefix + - the resulting string must be unique. If it has been encountered before, + it is appended by "_0" or "_1" and so on, until it becomes unique. + + :name: str + current name + + :return: str + updated name. Returns the same string, if sanitization not required. + """ + + # replace any character that is not [a-zA-Z0-9_] with an underscore + new_name = re.sub("[^a-zA-Z0-9_]", "_", name) + + # now check if the name starts with anything but [A-Za-z_] + # if so, then add the prefix + if re.match("[^a-zA-Z_]", new_name): + new_name = self.prefix + new_name + + reserved_names = [ + "any", + "bool", + "program", + "func", + "tensor", + "list", + "dict", + "tuple", + "true", + "false", + "string", + "bf16", + "fp16", + "fp32", + "fp64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + if new_name in reserved_names: + new_name += "_workaround" + + if new_name == name: + # return if nothing has changed + self.all_names.add(name) + return name + else: + # name has changed + # make sure it is unique, then return + if new_name in self.all_names: + idx = 0 + new_name += "_" + str(idx) + while new_name in self.all_names: + idx += 1 + new_name += "_" + str(idx) + # now we have a unique name + self.all_names.add(new_name) + return new_name + + @staticmethod + def sanitize_block( + block, + sanitizer_vars, + sanitizer_ops, + main_input_types=None, + sanitize_model_inputs_outputs_only=False, + ): + """ + Sanitize the vars and op names inside the block to adhere to the format [a-zA-Z_][a-zA-Z0-9_]* + """ + + if sanitize_model_inputs_outputs_only: + NameSanitizer._sanitize_block_input_vars( + block, sanitizer_vars, main_input_types, sanitize_main_input_only=True + ) + NameSanitizer._sanitize_main_outputs_only(block, sanitizer_vars) + else: + NameSanitizer._sanitize_block_input_vars(block, sanitizer_vars, main_input_types) + NameSanitizer._sanitize_output_vars_and_nested_blocks( + block, sanitizer_vars, sanitizer_ops + ) + NameSanitizer._sanitize_op_names(block, sanitizer_ops) + + @staticmethod + def _sanitize_block_input_vars( + block, sanitizer_vars, main_input_types, sanitize_main_input_only=False + ): + + # iterate over all the block input vars and sanitize the names + if isinstance(block, Function): + # this is the "main" block + # block.inputs is a dict from input names to input vars + # iterate over the input vars of the main program and sanitize their names + new_input_dict = OrderedDict() + input_name_updated = False + for input_name, var in block.inputs.items(): + msg = "Main block's input name, '{}', is different from its corresponding var's name, '{}'." + assert input_name == var.name, msg.format(input_name, var.name) + new_name = sanitizer_vars.sanitize_name(var.name) + new_input_dict[new_name] = var + if new_name != var.name: + msg = "Input, '{}', of the source model, has been renamed to '{}' in the Core ML model." + warnings.warn(msg.format(var.name, new_name)) + if var.name in block.placeholder_inputs: + block.placeholder_inputs[new_name] = block.placeholder_inputs.pop(var.name) + block.placeholder_inputs[new_name].set_name(new_name) + var.set_name(new_name) + input_name_updated = True + if main_input_types is not None: + # update prog's main_input_types, since we are updating the name of a model input here + for i in range(len(main_input_types)): + if main_input_types[i].name == input_name: + main_input_types[i].name = new_name + break + if input_name_updated: + block._input_dict = new_input_dict + elif not sanitize_main_input_only: + # in this case block is not the "main" function + # in this case block.inputs is a list of input vars of the block + for var in block.inputs: + new_name = sanitizer_vars.sanitize_name(var.name) + if new_name != var.name: + var.set_name(new_name) + + @staticmethod + def _sanitize_var_names(var, sanitizer_vars, emit_warning=False): + new_name = sanitizer_vars.sanitize_name(var.name) + if new_name != var.name: + if emit_warning: + msg = "Output, '{}', of the source model, has been renamed to '{}' in the Core ML model." + warnings.warn(msg.format(var.name, new_name)) + var.set_name(new_name) + + @staticmethod + def _sanitize_op_names(block, sanitizer_ops): + # iterate over all the ops and sanitize the op names + for op in list(block.operations): + if op.name is not None: + op.name = sanitizer_ops.sanitize_name(op.name) + + @staticmethod + def _sanitize_output_vars_and_nested_blocks(block, sanitizer_vars, sanitizer_ops): + for op in list(block.operations): + for b in op.blocks: + NameSanitizer.sanitize_block(b, sanitizer_vars, sanitizer_ops) + + for var in op.outputs: + if isinstance(block, Function) and var in block.outputs: + NameSanitizer._sanitize_var_names(var, sanitizer_vars, emit_warning=True) + else: + NameSanitizer._sanitize_var_names(var, sanitizer_vars) + + @staticmethod + def _sanitize_main_outputs_only(block, sanitizer_vars): + for op in list(block.operations): + for var in op.outputs: + if isinstance(block, Function) and var in block.outputs: + NameSanitizer._sanitize_var_names(var, sanitizer_vars, emit_warning=True) + + +@register_pass(namespace="common") +class sanitize_input_output_names(AbstractGraphPass): + """ + Sanitize the names of model input and output vars to make sure + that they are of the format as described in the NameSanitizer class; that is, + of the format ``[a-zA-Z_][a-zA-Z0-9_]*``. + """ + + def apply(self, prog): + sanitizer_vars = NameSanitizer(prefix="var_") + sanitizer_ops = NameSanitizer(prefix="op_") + + # sanitize the input/output of the main block + NameSanitizer.sanitize_block( + prog.functions["main"], + sanitizer_vars, + sanitizer_ops, + prog.main_input_types, + sanitize_model_inputs_outputs_only=True, + ) + + +@register_pass(namespace="common") +class update_output_dtypes(AbstractGraphPass): + """ + Update the dtypes of output vars of the main block to match the dtypes + provided in ``prog.main_output_types``, which in turn is populated by the + ``outputs`` argument provided by the user in the ``coremltools.convert()`` API. + This graph pass assumes that the list of outputs in ``prog.main_output_types`` (if not ``None``), + are in the same order as the output vars. + """ + + def apply(self, prog): + user_provided_output_types = prog.main_output_types + main_func = prog.functions["main"] + output_vars = main_func.outputs + if user_provided_output_types is None or len(user_provided_output_types) == 0: + return + if len(output_vars) != len(user_provided_output_types): + msg = ( + "Number of outputs provided by the user, which is {}, " + "does not match the number of outputs generated by the model, which is {}" + ) + raise ValueError(msg.format(len(user_provided_output_types), len(output_vars))) + + new_outputs = [] + for i, output_type in enumerate(user_provided_output_types): + required_output_dtype = output_type.dtype + output_var = output_vars[i] + if ( + required_output_dtype is None + or not ( + types.is_tensor(output_var.sym_type) or types.is_scalar(output_var.sym_type) + ) + or required_output_dtype == output_var.dtype + ): + # no need to update the output var's dtype in this case + new_outputs.append(output_var) + else: + output_var_name = output_var.name + output_var.set_name( + output_var_name + "_type_" + types.builtin_to_string(output_var.dtype) + ) + with main_func: + output_var = mb.cast( + x=output_var, dtype=types.builtin_to_string(required_output_dtype) + ) + output_var.set_name(output_var_name) + new_outputs.append(output_var) + + main_func.set_outputs(new_outputs) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/quantization.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/quantization.py new file mode 100644 index 00000000..1a8383aa --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/quantization.py @@ -0,0 +1,857 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from enum import Enum as _Enum +from typing import Set, Text + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.backend.mil.load import should_use_weight_file +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.ops.defs.iOS16 import ( + constexpr_affine_dequantize, + constexpr_lut_to_dense, + constexpr_sparse_to_dense, +) +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.program import Program +from coremltools.converters.mil.mil.types.type_mapping import ( + is_builtin, + nptype_from_builtin, + numpy_type_to_builtin_type, +) +from coremltools.models.neural_network.quantization_utils import _get_kmeans_lookup_table_and_weight + + +class ComputePrecision(_Enum): + FLOAT16 = "float16" + FLOAT32 = "float32" + + +class AbstractQuantizationPass(AbstractGraphPass): + """ + Base class for Post-Training Quantization transforms. + + Derived class needs to implement following two methods: + - is_valid_op(op) + - transform_op(op) + """ + + type_eps = {} + type_min = {} + type_negmin = {} + + def __init__(self, op_selector=None): + super().__init__() + if op_selector is not None and not callable(op_selector): + raise TypeError( + "Argument `op_selector` needs to be a callable function which accepts " + "a MIL operation object and returns a boolean value." + ) + self.op_selector = op_selector + + def apply(self, prog): + """ + Walks over each operation in the graph and performs following two steps, + 1. Checks whether an operation is valid for that quantized transform using `is_valid_op` method. + 2. If yes, calls `transform_op` method of the derived quantized transform class. + + :param prog: MIL program + :return: Transformed MIL program + """ + if not isinstance(prog, Program): + raise TypeError('Transform "{}" can only be applied on PyMIL programs.'.format(self)) + + if getattr(self, "skip_ops_by_type", set()) and self.op_selector is not None: + raise ValueError( + "The graph pass option `skip_ops_by_type` cannot be set along with " + "the `op_selector` in FP16ComputePrecision. Please only use one " + "method to control which ops to operate on." + ) + + @block_context_manager + def apply_block(block): + for op in list(block.operations): + for b in op.blocks: + apply_block(b) + + if self.is_valid_op(op): + need_transform: bool + if self.op_selector is not None: + need_transform = self.op_selector(op) + else: + need_transform = op.op_type not in getattr(self, "skip_ops_by_type", set()) + if need_transform: + self.transform_op(op) + + for f in prog.functions.values(): + apply_block(f) + + def transform_op(self, op): + """ + Replaces an op with a transformed op. + + :param op: MIL operation + :return: None + """ + raise NotImplementedError( + 'Op transformation for quantization mode "{}" not implemented.'.format(self) + ) + + def is_valid_op(self, op): + """ + Checks whether an operation is valid for given quantized transform. + + :param op: MIL operation + :return: true | false + """ + raise NotImplementedError( + 'Operation Preconditions for quantization mode "{}" not implemented.'.format(self) + ) + + @classmethod + def _close_to_zero(cls, val, np_type): + if np_type not in cls.type_eps: + cls.type_eps[np_type] = np.finfo(np_type).eps + cls.type_min[np_type] = np.nextafter(0.0, 1.0, dtype=np_type) + cls.type_negmin[np_type] = np.nextafter(0.0, -1.0, dtype=np_type) + + return np.isclose(val, 0, atol=cls.type_min[np_type], rtol=cls.type_eps[np_type]) + + def __repr__(self): + return str(self) + + def __str__(self): + return type(self).__name__ + + +class FP16ComputePrecision(AbstractQuantizationPass): + """ + This transform does the following, for each valid op and if the "op_selector" return True: + - For each input of dtype float32, inject a "cast" op to change it to float16 dtype + - For each output of dtype float16, inject a "cast" op to change it back to float32 + """ + + def __init__(self, op_selector=None): + super(FP16ComputePrecision, self).__init__(op_selector=op_selector) + self.target_dtype = "fp16" + + # Var that feeds into multiple ops will be casted once and cached into this dict + # For reference: Checkout test_single_input_to_multiple_operations in `TestFP16CastTransform`. + self.cache_vars = {} + + def fp16_overflow(self, op): + # Constants with values more than 65504 or less than -65504 overflows in FP16 + for _, inputs in op.inputs.items(): + is_list_input = isinstance(inputs, (list, tuple)) + if not is_list_input: + inputs = [inputs] + for var in inputs: + if ( + var.op is not None + and var.op.op_type == "const" + and var.is_tensor_or_scalar_of(dtype="fp32") + ): + if np.max(np.abs(var.op.val.val), initial=0.0) > 65504: + return True + return False + + def is_valid_op(self, op): + + if op.op_type in ["cast", "while_loop", "cond"]: + return False + + if op.op_type in [ + "make_list", + "list_gather", + "list_scatter", + "list_read", + "list_write", + "list_length", + ]: + return False # rdar://74458192 + + if op.op_type in ["gru", "rnn", "lstm"]: + return False + + if self.fp16_overflow(op): + return False + + return True + + def is_valid_parameter(self, op, param_name): + type_domain = getattr(op.input_spec.input_types[param_name], "type_domain", None) + if type_domain is not None: + if len(type_domain) == 0: + return True + return types.fp16 in type_domain + return True + + def _check_underflow_to_zero(self, new_var, var): + # We check whether there are casted values that "becomes" 0 which is not ideal for eps purposes. + # However we skip arrays with more than 400 in case we compare through a large sparse matrix. + if ( + new_var.val is not None + and len(var.val.flatten()) < 400 + and self._close_to_zero(new_var.val, np.float16).any() + ): + value_modified = False + original_val = var.val.flatten() + new_val = new_var.val.flatten() + + for idx in range(len(original_val)): + if not self._close_to_zero(original_val[idx], np.float32) and self._close_to_zero( + new_val[idx], np.float16 + ): + new_val[idx] = ( + self.type_min[np.float16] + if np.sign(original_val[idx]) > 0 + else self.type_negmin[np.float16] + ) + value_modified = True + + if value_modified: + if np.isscalar(new_var.val): + new_var._sym_val.val = new_val[0] + else: + new_var._sym_val.val = new_val.reshape(new_var.val.shape) + + def transform_op(self, op): + block = op.enclosing_block + casted_inputs = {} + inputs_modified = False + + for param, inputs in op.inputs.items(): + # First loop, iterates over all the input parameters of an operation. + if not self.is_valid_parameter(op, param): + continue + + is_list_input = isinstance(inputs, (list, tuple)) + if not is_list_input: + inputs = [inputs] + + casted_inputs[param] = list(inputs[:]) + for i, var in enumerate(inputs): + # Second loop, iterates over all the vars of a python list corresponding to an input parameter. + if not var.is_tensor_or_scalar_of(dtype="fp32"): + continue + + inputs_modified = True + casted_var_name = var.name + "_to_fp16" + if ( + len(var._child_ops) > 1 + and casted_var_name in self.cache_vars + and (block.is_var_visible_in_block(self.cache_vars[casted_var_name])) + ): + casted_inputs[param][i] = self.cache_vars[casted_var_name] + else: + x = mb.cast(x=var, dtype="fp16", name=casted_var_name, before_op=op) + self._check_underflow_to_zero(x, var) + + casted_inputs[param][i] = x + if len(var._child_ops) > 1: + self.cache_vars[casted_var_name] = casted_inputs[param][i] + + if not is_list_input: + casted_inputs[param] = casted_inputs[param][0] + + if inputs_modified: + casted_inputs.update({k: v for k, v in op.inputs.items() if k not in casted_inputs}) + casted_inputs["name"] = op.name + "_cast" + casted_inputs["before_op"] = op + quant_output = getattr(mb, op.op_type)(**casted_inputs) + + if not isinstance(quant_output, (list, tuple)): + quant_output = [quant_output] + + for old_output_var, new_output_var in zip(op.outputs, quant_output): + if old_output_var.is_tensor_or_scalar_of(dtype="fp32") and ( + not new_output_var.is_tensor_or_scalar_of(dtype="fp32") + ): + x = mb.cast( + x=new_output_var, + dtype="fp32", + name=new_output_var.name + "_to_fp32", + before_op=op, + ) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=old_output_var, + new_var=x, + force_replace=True, + ) + else: + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=old_output_var, + new_var=new_output_var, + force_replace=True, + ) + + block.remove_ops([op]) + + +@register_pass(namespace="common") +class add_fp16_cast(FP16ComputePrecision): + """ + For each input of dtype float32, inject a ``cast`` op to change it to float16 dtype. + + For each output of dtype float16, inject a ``cast`` op to change it back to float32. + + This pass is the registered interface for FP16ComputePrecision, which makes it consistent with + other passes' interfaces. + + Support options: + + - ``skip_ops_by_type``: Skip op types specified by comma-separated string; for example, ``"mul,const"``. + """ + + _skip_ops_by_type: Set[Text] = set() + + @property + def skip_ops_by_type(self): + return self._skip_ops_by_type + + @skip_ops_by_type.setter + def skip_ops_by_type(self, criteria: Text): + self._skip_ops_by_type = set(criteria.split(",")) + + +class SparseParams: + def __init__(self, nonzero_data=None, mask=None, shape=None): + self.nonzero_data = nonzero_data + self.mask = mask + self.shape = shape + + +class WeightSparsifier(AbstractQuantizationPass): + """ + This transform does the following, for each const op and if the "op_selector" return True: + - (self.sparsity) fraction of values with the least absolute value are zeroed out. + - If fake_compression=False, Zeroed-Out Value is encoded via constexpr_sparse_to_dense op + - If fake_compression=True, Zeroed-Out Value is encoded via const op + - Old const is replaced by a new operation with zeroed-out value. + """ + + WEIGHT_SPARSIFICATION_MODES = ("THRESHOLD_BASED", "PERCENTILE_BASED") + + def __init__( + self, + mode="threshold_based", + threshold=1e-3, + target_percentile=1.0, + fake_compression=False, + op_selector=None, + ): + super().__init__(op_selector=op_selector) + self.fake_compression = fake_compression + self.mode = mode.upper() + self.threshold = threshold + self.target_percentile = target_percentile + + if not self.mode in WeightSparsifier.WEIGHT_SPARSIFICATION_MODES: + msg = "Only mode {} supported for weight sparsification. Got mode {}.".format( + WeightSparsifier.WEIGHT_SPARSIFICATION_MODES, self.mode + ) + raise ValueError(msg) + + if self.mode == "PERCENTILE_BASED" and ( + self.target_percentile < 0 or self.target_percentile > 1 + ): + raise ValueError( + "Invalid value of target_percentile: {}. Needs to be in [0, 1]".format( + self.target_percentile + ) + ) + + if self.mode == "THRESHOLD_BASED" and self.threshold < 0: + raise ValueError( + "Invalid value of threshold: {}. Needs to be in [0, inf)".format(self.threshold) + ) + + def is_valid_op(self, op): + if op.op_type == "const" and should_use_weight_file(op.val.val): + return True + return False + + @staticmethod + def compress(val, mode, target_percentile=None, threshold=None): + + mode = mode.upper() + + def sparsify_with_percentile(val, target_percentile): + q = target_percentile * 100 + return np.where(np.abs(val) <= np.percentile(np.abs(val), q), 0, val) + + def sparsify_with_thresohld(val, threshold): + return np.where(np.abs(val) <= threshold, 0, val) + + if not isinstance(val, (np.ndarray, np.generic)): + raise ValueError("Only numpy arrays are supported") + + flattened_val = val.flatten() + + if mode == "PERCENTILE_BASED": + flattened_val = sparsify_with_percentile(flattened_val, target_percentile) + elif mode == "THRESHOLD_BASED": + flattened_val = sparsify_with_thresohld(flattened_val, threshold) + + params = SparseParams() + params.nonzero_data = flattened_val[np.where(flattened_val != 0)] + params.mask = np.packbits(np.where(flattened_val != 0, 1, 0), bitorder="little") + params.shape = val.shape + return params + + @staticmethod + def decompress(params): + if not isinstance(params, SparseParams): + raise ValueError("Invalid type of params") + return constexpr_sparse_to_dense.decompress(params.nonzero_data, params.mask, params.shape) + + def transform_op(self, op): + block = op.enclosing_block + sparse_params = self.compress(op.val.val, self.mode, self.target_percentile, self.threshold) + + if not self.fake_compression: + new_var = mb.constexpr_sparse_to_dense( + nonzero_data=sparse_params.nonzero_data, + mask=sparse_params.mask, + shape=np.uint32(sparse_params.shape), + before_op=op, + name=op.name + "_sparsified", + ) + else: + decompressed_val = self.decompress(sparse_params) + new_var = mb.const( + val=decompressed_val, + before_op=op, + name=op.name + "_fake_sparsified", + ) + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=op.outputs[0], + new_var=new_var, + no_check_var_types=True, + ) + + block.remove_ops([op]) + + +class LutParams: + def __init__(self, lut=None, indices=None, shape=None): + self.lut = lut + self.indices = indices + self.shape = shape + + +class WeightPalettizer(AbstractQuantizationPass): + """ + This transform does the following, for each const op and if the "op_selector" return True: + - A linear look up table with 2**(nbits) entries is created and value is represented via indexing into this look up table. + - If fake_compression=False, compressed value is encoded via constexpr_lut_to_dense op + - If fake_compression=True, compressed value is decompressed and then encoded via const op + - Old const op is replaced by a newly created operation. + """ + + WEIGHT_PALETTIZATION_MODES = ("KMEANS", "UNIFORM", "UNIQUE", "CUSTOM") + + def __init__( + self, nbits, fake_compression=False, op_selector=None, mode="kmeans", lut_function=None + ): + super().__init__(op_selector=op_selector) + self.fake_compression = fake_compression + self.nbits = nbits + self.mode = mode.upper() + self.lut_function = lut_function + + if not self.mode in WeightPalettizer.WEIGHT_PALETTIZATION_MODES: + msg = "Only mode {} supported for weight palettization. Got mode {}.".format( + WeightPalettizer.WEIGHT_PALETTIZATION_MODES, self.mode + ) + raise ValueError(msg) + + if nbits is None and self.mode in ("KMEANS", "UNIFORM"): + msg = "nbits must be provided for mode {}".format(mode) + raise ValueError(msg) + + if nbits is not None and self.mode in ("UNIQUE", "CUSTOM"): + msg = "nbits must NOT be provided for mode {}".format(mode) + raise ValueError(msg) + + if self.nbits is not None and self.nbits not in (1, 2, 4, 6, 8): + raise ValueError( + "Invalid value of nbits ({}) for palettization. Supported bits are {{1, 2, 4, 6, 8}}".format( + nbits + ) + ) + + if (self.mode == "CUSTOM") ^ (lut_function is not None): + msg = "lut_function must be None if mode is not custom, and that it cannot be None when the mode is custom." + raise ValueError(msg) + + if self.mode == "CUSTOM" and not callable(self.lut_function): + msg = "A function object must be provided as lut_function. Got a lut_functions as type {}".format( + type(self.lut_function) + ) + raise ValueError(msg) + + def is_valid_op(self, op): + if op.op_type == "const" and should_use_weight_file(op.val.val): + return True + return False + + @staticmethod + def compress(val, mode, nbits=None, lut_function=None): + + mode = mode.upper() + + def compress_kmeans(val, nbits): + lut, indices = _get_kmeans_lookup_table_and_weight(nbits, val) + lut = lut.astype(val.dtype) + indices = indices.astype(np.uint8) + return lut, indices + + def compress_uniform(val, nbits): + val = val.flatten() + val_min = np.amin(val) + val_max = np.amax(val) + scale = (val_max - val_min) / ((1 << nbits) - 1) + indices = np.round(((val - val_min) / (val_max - val_min)) * ((1 << nbits) - 1)).astype( + np.uint8 + ) + lut = np.array(range(0, 1 << nbits)) * scale + val_min + lut = lut.astype(val.dtype) + return lut, indices + + def get_nbits_for_unique_mode(val): + val = val.flatten() + unique_vals = np.unique(val).tolist() + for nbits in (1, 2, 4, 6, 8): + if len(unique_vals) <= 1 << nbits: + return nbits + msg = "weight value cannot be represented in an 8 bits palettization. Skipped." + logger.warning(msg) + return None + + def compress_unique(val, nbits): + val = val.flatten() + unique_vals = np.unique(val).tolist() + if len(unique_vals) > 1 << nbits: + msg = "Too many unique values {} in the weight. Couldn't represented in {} bits.".format( + len(unique_vals), nbits + ) + raise ValueError(msg) + lut = [0] * (1 << nbits) + lut[: len(unique_vals)] = unique_vals + indices = np.zeros((len(val),)) + for i, k in enumerate(lut[:len(unique_vals)]): + indices += (i + 1) * (val == k).astype(np.int32) + indices = indices - 1 + assert ( + len(np.where(indices == -1)[0]) == 0 + ), "weight must be corresponding to one existing indice" + + lut = np.array(lut).astype(val.dtype) + indices = indices.astype(np.uint8) + return lut, indices + + def pack_indices_into_bytes_array(indices, nbits): + bitarray = np.unpackbits(indices.reshape(-1, 1), bitorder="little", axis=-1)[:, :nbits] + return np.packbits(bitarray.flatten(), bitorder="little") + + def check_lut_parameters_are_valid(val, lut, indices): + if not isinstance(lut, np.ndarray) or not isinstance(indices, np.ndarray): + raise ValueError("LUT and indices must be type of numpy array.") + + if indices.size != val.size: + msg = "Indices size ({}) mismatched with the original weight({}).".format( + indices.size, val.size + ) + raise ValueError(msg) + + if len(indices.shape) != 1 or indices.dtype != np.uint8: + msg = "Indices must be a numpy vector of type uint8. Found shape {} with type {}".format( + indices.shape, indices.dtype + ) + raise ValueError(msg) + + if lut.dtype != val.dtype: + msg = "Dtype mismatched between LUT ({}) and weight ({})".format( + lut.dtype, val.dtype + ) + raise ValueError(msg) + + if not isinstance(val, (np.ndarray, np.generic)): + raise ValueError("Only numpy arrays are supported") + + if mode == "KMEANS": + lut, indices = compress_kmeans(val, nbits) + elif mode == "UNIFORM": + lut, indices = compress_uniform(val, nbits) + elif mode == "UNIQUE": + nbits = get_nbits_for_unique_mode(val) + if nbits is None: + return None + lut, indices = compress_unique(val, nbits) + elif mode == "CUSTOM": + lut, indices = lut_function(val) + + check_lut_parameters_are_valid(val, lut, indices) + + params = LutParams() + params.lut = lut + params.shape = val.shape + params.indices = pack_indices_into_bytes_array(indices, int(np.log2(lut.shape[0]))) + return params + + @staticmethod + def decompress(params): + if not isinstance(params, LutParams): + raise ValueError("Invalid type of params") + return constexpr_lut_to_dense.decompress(params.lut, params.indices, params.shape) + + def transform_op(self, op): + block = op.enclosing_block + lut_params = self.compress(op.val.val, self.mode, self.nbits, self.lut_function) + + if lut_params is None: + return + + if not self.fake_compression: + new_var = mb.constexpr_lut_to_dense( + indices=lut_params.indices, + lut=lut_params.lut, + shape=np.uint32(lut_params.shape), + before_op=op, + name=op.name + "_palettized", + ) + else: + decompressed_val = self.decompress(lut_params) + new_var = mb.const( + val=decompressed_val, + before_op=op, + name=op.name + "_fake_palettized", + ) + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=op.outputs[0], + new_var=new_var, + no_check_var_types=True, + ) + + block.remove_ops([op]) + + +class AffineQuantParams: + def __init__(self, quantized_data=None, zero_point=None, scale=None, axis=None): + self.quantized_data = quantized_data + self.zero_point = zero_point + self.scale = scale + self.axis = axis + + +class WeightAffineQuantizer(AbstractQuantizationPass): + """ + This transform does the following, for each const op and if the "op_selector" return True: + - Values are linearly quantized into unsigned 8-bits. + - If fake_compression=False, compressed value is encoded via constexpr_affine_dequantize op + - If fake_compression=True, compressed value is decompressed and then encoded via const op + - Old const is replaced by a newly created operation. + """ + + WEIGHT_AFFINE_QUANTIZATION_MODES = ("LINEAR_SYMMETRIC", "LINEAR") + WEIGHT_AFFINE_DTYPES = (types.int8, types.uint8) + + def __init__(self, fake_compression=False, op_selector=None, mode="linear", dtype=np.int8): + super().__init__(op_selector=op_selector) + self.fake_compression = fake_compression + self.mode = mode.upper() + + # check mode + if not self.mode in WeightAffineQuantizer.WEIGHT_AFFINE_QUANTIZATION_MODES: + msg = "Only mode {} supported for weight affine quantization. Got mode {}.".format( + WeightAffineQuantizer.WEIGHT_AFFINE_QUANTIZATION_MODES, self.mode + ) + raise ValueError(msg) + + # check dtype + msg = f"dtype={dtype} is unsupported for affine_quantize_weights." + if is_builtin(dtype): + self.dtype = dtype + else: + try: + self.dtype = numpy_type_to_builtin_type(dtype) + except TypeError: + raise ValueError(msg) + + if self.dtype not in WeightAffineQuantizer.WEIGHT_AFFINE_DTYPES: + raise ValueError(msg) + + def is_valid_op(self, op): + if op.op_type == "const" and should_use_weight_file(op.val.val): + return True + return False + + @staticmethod + def _get_axis(op): + axis = 0 + var = op.outputs[0] + if len(var.child_ops) == 1 and var.child_ops[0].op_type == "conv_transpose": + axis = 1 + return axis + + @staticmethod + def compress(val, axis, mode, dtype): + def _ensure_numerical_range_and_cast(val, low, high, np_dtype): + ''' + For some cases, the computed quantized data might exceed the data range. + For instance, after rounding and addition, we might get `128` for the int8 quantization. + This utility function ensures the val in the data range before doing the cast. + ''' + val = np.minimum(val, high) + val = np.maximum(val, low) + return val.astype(np_dtype) + + mode = mode.upper() + mode_dtype_to_range = { + (types.int8, "LINEAR"): (-128, 127), + (types.int8, "LINEAR_SYMMETRIC"): (-127, 127), + (types.uint8, "LINEAR"): (0, 255), + (types.uint8, "LINEAR_SYMMETRIC"): (0, 254), + } + + if not isinstance(val, (np.ndarray, np.generic)): + raise ValueError("Only numpy arrays are supported") + + params = AffineQuantParams() + axes = tuple([i for i in range(len(val.shape)) if i != axis]) + val_min = np.amin(val, axis=axes, keepdims=True) + val_max = np.amax(val, axis=axes, keepdims=True) + + if mode == "LINEAR_SYMMETRIC": + # For the linear_symmetric mode, the range is symmetrical to 0 + max_abs = np.maximum(np.abs(val_min), np.abs(val_max)) + val_min = -max_abs + val_max = max_abs + else: + assert mode == "LINEAR" + # For the linear mode, we need to make sure the data range contains `0` + val_min = np.minimum(0.0, val_min) + val_max = np.maximum(0.0, val_max) + + q_val_min, q_val_max = mode_dtype_to_range[(dtype, mode)] + + # Set the zero point to symmetric mode + np_dtype = nptype_from_builtin(dtype) + if mode == "LINEAR_SYMMETRIC": + if dtype == types.int8: + params.zero_point = (0 * np.ones(val_min.shape)).astype(np.int8) + else: + assert dtype == types.uint8 + params.zero_point = (127 * np.ones(val_min.shape)).astype(np.uint8) + else: + assert mode == "LINEAR" + params.zero_point = (q_val_min * val_max - q_val_max * val_min) / (val_max - val_min) + params.zero_point = np.round(params.zero_point) + params.zero_point = _ensure_numerical_range_and_cast(params.zero_point, q_val_min, q_val_max, np_dtype) + + # compute the params + params.scale = (val_max - val_min) / (q_val_max - q_val_min) + params.scale = params.scale.astype(val.dtype).squeeze() + + params.quantized_data = np.round( + val * (q_val_max - q_val_min) / (val_max - val_min) + ) + params.quantized_data = (params.quantized_data + params.zero_point) + params.quantized_data = _ensure_numerical_range_and_cast(params.quantized_data, q_val_min, q_val_max, np_dtype) + + params.zero_point = params.zero_point.squeeze() + params.axis = axis + + return params + + @staticmethod + def decompress(params): + if not isinstance(params, AffineQuantParams): + raise ValueError("Invalid type of params") + return constexpr_affine_dequantize.decompress( + params.quantized_data, params.zero_point, params.scale, params.axis + ) + + def transform_op(self, op): + block = op.enclosing_block + quant_params = self.compress(op.val.val, self._get_axis(op), self.mode, self.dtype) + + if not self.fake_compression: + new_var = mb.constexpr_affine_dequantize( + quantized_data=quant_params.quantized_data, + zero_point=quant_params.zero_point, + scale=quant_params.scale, + axis=quant_params.axis, + before_op=op, + name=op.name + "_affine_quantized", + ) + else: + decompressed_val = self.decompress(quant_params) + new_var = mb.const( + val=decompressed_val, + before_op=op, + name=op.name + "_fake_affine_quantized", + ) + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=op.outputs[0], + new_var=new_var, + no_check_var_types=True, + ) + + block.remove_ops([op]) + + +class WeightDecompressor(AbstractQuantizationPass): + """ + This graph pass transforms the constexpr ops back into mb.const op. + constexpr ops includes: + (1) constexpr_affine_dequantize + (2) constexpr_lut_to_dense + (3) constexpr_sparse_to_dense + """ + + def __init__(self, op_selector): + super().__init__(op_selector=op_selector) + + def is_valid_op(self, op): + return op.op_type in ( + "constexpr_affine_dequantize", + "constexpr_lut_to_dense", + "constexpr_sparse_to_dense", + ) + + def transform_op(self, op): + block = op.enclosing_block + + decompressed_val = op.value_inference() + new_var = mb.const( + val=decompressed_val, + before_op=op, + name=op.name, + ) + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=op.outputs[0], + new_var=new_var, + no_check_var_types=True, + force_replace=True, + ) + + block.remove_ops([op]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/graph_pass.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/graph_pass.py new file mode 100644 index 00000000..14869c50 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/graph_pass.py @@ -0,0 +1,73 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from abc import ABC, abstractmethod +from typing import Callable, List, Optional, Text, Union + +from coremltools.converters.mil import Operation, Program + + +class PassOption: + """ + Option that will be applied in a graph pass. + + Each graph pass need to have their own implementation to support the corresponding option. + Available options are documented in each pass's docstring. + """ + + # The Callable option_val is for op_selector backward compatibility only. + def __init__(self, option_name: Text, option_val: Union[Text, Callable[[Operation], bool]]): + if not isinstance(option_name, Text): + raise ValueError(f"The option name should be text, but got {type(option_name)}") + if not isinstance(option_val, Text) and not isinstance(option_val, Callable): + raise ValueError( + f"The option value should be text or callable, but got {type(option_val)}" + ) + self._option_name = option_name + self._option_val = option_val + + def __str__(self): + return f"{self.option_name}: {self.option_val}" + + @property + def option_name(self): + return self._option_name + + @property + def option_val(self): + return self._option_val + + +class AbstractGraphPass(ABC): + """ + Base class for a graph pass. + + Each graph pass should be a subclass of this and implement the `apply` method. + Each graph pass can also implement their own supported options. + See examples of `skip_ops_by_type` in `add_fp16_cast` and `skip_const_by_size` in + `const_elimination` about how to support new options in each pass. + """ + + def __call__(self, prog: Program): + if not prog.skip_all_passes: + self.apply(prog) + + def __str__(self): + return type(self).__name__ + + @abstractmethod + def apply(self, prog: Program): + pass + + def set_options(self, pass_options: Optional[List[PassOption]] = None): + """Set pass options.""" + if pass_options is not None: + for pass_option in pass_options: + option_name = pass_option.option_name + if not hasattr(self, option_name): + raise NotImplementedError( + f"The graph pass `{self}` doesn't support option `{option_name}`." + ) + setattr(self, option_name, pass_option.option_val) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/helper.py new file mode 100644 index 00000000..d9dddca8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/helper.py @@ -0,0 +1,188 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from typing import List + +import numpy as np + +from coremltools.converters.mil.mil import Block, Operation, Var +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass + + +def block_context_manager(func): + """ + This decorator executes a function under the context manager `with block`. + For instance, given a function `func` with an input block and other arguments: + + def func(block, *args): + ... + with block: + op_1 = mb.add(...) + ... + with block: + op_2 = mb.relu...() + + It can be be streamlined as: + + @block_context_manager + def func(block, *args): + ... + op_1 = mb.add(...) + ... + op_2 = mb.relu...() + + Note that, the first argument of the function must have type Block. + It is highly recommended to decorate a function with block_context_manager if it is calling `with block` multiple times, + since when the code exit `block`, an expensive _propagate_nonreplaceable_vars() is invoked. + The decorator reduces the amount of calling `with block` overally. + """ + def wrapper(*args): + # Make it compatible with class method. + if isinstance(args[0], AbstractGraphPass): + block = args[1] + else: + block = args[0] + + if not isinstance(block, Block): + raise ValueError( + "The function decorated with block_context_manager must have a Block " + "type argument as the first input." + ) + with block: + return func(*args) + return wrapper + + +def _check_child_op_type(op, child_op_type): + """ + :param op: operation + :param child_op_type: str + :return: Return True if op has 1 child and type of that child matches child_op_type + """ + if len(op.outputs) != 1: + return False + child_ops = list(op.outputs[0].child_ops) + if len(child_ops) != 1: + return False + if child_ops[0].op_type == child_op_type: + return True + return False + + +def _check_no_output_connection(block: Block, ops: List[Operation]) -> bool: + """ + Check that none of the op in this pattern is connected to the output + (except the last op) + + :param block: Block + :param ops: List of operations to check on. + """ + for op in ops[:-1]: + for out in op.outputs: + if out in block.outputs: + return False + return True + + +def _check_var_scalar_value_in_interval(x, lower_bound, upper_bound): + """ + :param x: var + :param lower_bound: a scalar value + :param upper_bound: a scalar value + :return: True if the value of var is in the interval [lower_bound, upper_bound] + """ + if x.val is None: + return False + if not isinstance(x.val, (np.ndarray, np.generic)): + return False + + if isinstance(x.val, np.ndarray): + if x.val.size != 1: + return False + x_val = x.val[:][0] if len(x.val.shape) > 0 else x.val[()] + else: + x_val = x.val + + if x_val >= lower_bound and x_val <= upper_bound: + return True + return False + + +def _check_var_scalar_value(x, val, tol=1e-3): + """ + :param x: var + :param val: a scalar value + :return: True if x.val is equal to val otherwise return False + """ + if x.val is None: + return False + if not isinstance(x.val, np.ndarray) and not np.isscalar(x.val): + return False + + if isinstance(x.val, np.ndarray): + if x.val.size != 1: + return False + if len(x.val.shape) == 0: + x_val = x.val + else: + x_val = x.val[:][0] if len(x.val.shape) > 0 else x.val[()] + else: + x_val = x.val + + if abs(x_val - val) < tol: + return True + return False + +def _are_ops_identical(op1, op2): + ''' + Return True, if all inputs of op1 and op2 are identical. + non-constant inputs must refer to the same object, and constant inputs must have the same value + ''' + + def _are_values_identical(val1, val2): + np_arr1 = np.array(val1) + np_arr2 = np.array(val2) + return np.array_equal(np_arr1, np_arr2) + + def _are_vars_identical(var1, var2): + if var1.val is None and var2.val is None: + if var1 != var2: + return False + elif var1.val is not None and var2.val is not None: + if var1.dtype != var2.dtype: + return False + if not _are_values_identical(var1.val, var2.val): + return False + else: + return False + return True + + if op1 == op2: + return True + if op1.op_type != op2.op_type: + return False + if len(op1.inputs) != len(op2.inputs): + return False + + for key, value1 in op1.inputs.items(): + if key not in op2.inputs: + return False + value2 = op2.inputs[key] + if isinstance(value1, Var) and isinstance(value2, Var): + if not _are_vars_identical(value1, value2): + return False + elif isinstance(value1, (list, tuple)) and isinstance(value2, (list, tuple)): + if len(value1) != len(value2): + return False + else: + for i, v in enumerate(value1): + if not _are_vars_identical(v, value2[i]): + return False + else: + return False + + assert len(op1.blocks) == 0, "this method does not handle ops that have blocks in it" + assert len(op2.blocks) == 0, "this method does not handle ops that have blocks in it" + return True diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_pipeline.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_pipeline.py new file mode 100644 index 00000000..359876e9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_pipeline.py @@ -0,0 +1,380 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from __future__ import annotations + +from typing import Dict, List, Optional, Set, Text, Union + +from tqdm import tqdm + +from coremltools import _logger as logger +from coremltools.converters._profile_utils import _profile +from coremltools.converters.mil import Program +from coremltools.converters.mil.mil.passes.graph_pass import PassOption +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY + +_COMMON_PASSES: List[Text] = [ + "common::lower_complex_dialect_ops", + "common::update_output_dtypes", + "common::cast_optimization", + "common::const_elimination", + "common::sanitize_input_output_names", + "common::divide_to_multiply", + "common::add_conv_transpose_output_shape", + "common::const_elimination", + "common::loop_invariant_elimination", + "common::remove_symbolic_reshape", + "common::noop_elimination", + "common::fuse_matmul_weight_bias", + "common::fuse_linear_bias", + "common::fuse_gelu_tanh_approximation", + "common::fuse_gelu_exact", + "common::fuse_leaky_relu", + "common::rank0_expand_dims_swap", + "common::compose_conv1d", # compose conv1d before any other conv passes + "common::use_reflection_padding", + "common::merge_consecutive_paddings", + # Should come after use_reflection_padding, which will introduce new padding layers + "common::fuse_pad_conv", # Should come after merge_consecutive_paddings + "common::image_input_preprocess", + "common::replace_stack_reshape", + # should come before detect_concat_interleave since it may add concat + "common::reduce_transposes", + "common::fuse_conv_scale", + "common::fuse_conv_bias", + "common::fuse_onehot_matmul_to_gather", + "common::fuse_layernorm_or_instancenorm", + # should come after reduce_transposes, to detect instance_norm + "common::fuse_elementwise_to_batchnorm", # should come after fuse_layernorm_or_instancenorm + "common::fuse_reduce_mean", # should come after fuse_layernorm_or_instancenorm + "common::fuse_conv_batchnorm", # should come after fuse_elementwise_to_batchnorm + "common::fuse_conv_scale", + # Re-run the fuse conv scale pass after the conv and batch_norm are fused + "common::fuse_conv_bias", + # Re-run the fuse conv bias pass after the conv and batch_norm are fused + "common::fuse_conv_batchnorm", + # In some cases, we need to run conv / batch_norm fusion again after the fuse_conv_scale and fuse_conv_bias passes + "common::detect_concat_interleave", + "common::concat_to_pixel_shuffle", + # should come after detect_concat_interleave and after replace_stack_reshape + "common::fuse_prelu", + # reduce_transpose pass should run before and after this pass (the one after will be run during the cleanup passes stage) + "common::prelu_to_lrelu", + "common::merge_consecutive_relus", + "common::merge_consecutive_reshapes", + "common::merge_consecutive_transposes", + # "expand_high_rank_reshape_and_transpose" must come after "common::merge_consecutive_transposes" + "common::expand_high_rank_reshape_and_transpose", + "common::reduce_transposes", + # "remove_redundant_ops" pass should be applied towards the end, once other graph passes have done their optimizations. + # For instance, it should come after passes such as "reduce_transpose" that can introduce redundant transposes + # in the network (while reducing the total number of transposes), and after passes such as "fuse_layernorm_or_instancenorm" + # which detects patterns that involve redundant ops ("sub") etc. + "common::remove_redundant_ops", + "common::add_fp16_cast", # Will be removed if compute precision is not FP16. + "common::dead_code_elimination", # always end with dce +] + +_CLEANUP_PASSES: List[Text] = [ + "common::dead_code_elimination", + "common::const_elimination", + "common::cast_optimization", + "common::const_elimination", + "common::loop_invariant_elimination", + "common::noop_elimination", + "common::dedup_op_and_var_names", + "common::reduce_transposes", # fuse_layernorm_or_instancenorm can potentially add transposes + "common::remove_redundant_ops", + "common::topological_reorder", + "common::dead_code_elimination", # always end with dce +] + +_FRONTEND_TORCH_PASSES = [ + "common::dead_code_elimination", + "common::loop_invariant_elimination", + "common::dead_code_elimination", + "torch::torch_upsample_to_core_upsample", + "torch::torch_tensor_assign_to_core", +] + +_FRONTEND_TF1_PASSES = [ + "common::dead_code_elimination", + "common::loop_invariant_elimination", + "tensorflow::backfill_make_list_elem_type", + # DCE to reduce tf_lstm_block outputs and allow lstm_rewrite to + # ssa lstm + "common::dead_code_elimination", + # tensorflow::tf_lstm_to_core_lstm must come before + # tensorflow::expand_tf_lstm + "tensorflow::tf_lstm_to_core_lstm", + "tensorflow::expand_tf_lstm", +] + +_FRONTEND_TF2_PASSES = [ + "common::dead_code_elimination", + "common::loop_invariant_elimination", + # tensorflow2::remove_vacuous_cond should come before + # tensorflow::backfill_make_list_elem_type. + "tensorflow2::remove_vacuous_cond", + "tensorflow::backfill_make_list_elem_type", + # DCE to reduce tf_lstm_block outputs and allow lstm_rewrite to + # ssa lstm + "common::dead_code_elimination", + # tensorflow::tf_lstm_to_core_lstm must come before + # tensorflow::expand_tf_lstm + "tensorflow::tf_lstm_to_core_lstm", + "tensorflow::expand_tf_lstm", +] + +_BACKEND_MIL_PASSES = [ + "common::const_elimination", + "mil_backend::adjust_io_to_supported_types", + "mil_backend::insert_image_preprocessing_ops", + "mil_backend::fuse_activation_silu", + "common::const_elimination", # rank0_expand_dims_swap might introduce some new const tensor + "common::cast_optimization", + "common::dead_code_elimination", + "mil_backend::sanitize_name_strings", + "common::dedup_op_and_var_names", + "nn_backend::handle_unused_inputs", # must come after dce. +] + +_BACKEND_NN_PASSES = [ + "nn_backend::decompose_conv1d", # at the beginning of nn pass + "nn_backend::commingle_loop_vars", + "nn_backend::handle_return_inputs_as_outputs", + "common::const_elimination", + # "remove_redundant_ops" pass should be applied towards the end, once other graph passes have done their optimizations. + # For instance, it should come after passes such as "reduce_transpose" that can introduce redundant transposes + # in the network (while reducing the total number of transposes), and after passes such as "fuse_layernorm_or_instancenorm" + # which detects patterns that involve redundant ops ("sub") etc. + "common::remove_redundant_ops", + "common::dead_code_elimination", + "nn_backend::handle_unused_inputs", # must come after dce. + "nn_backend::alert_return_type_cast", # must be at the end. +] + + +class PassPipeline: + """ + A pipeline that contains graph passes. + + Create a default pipeline (with all default graph passes that will operate on the program): + + .. sourcecode:: python + + pipeline = PassPipeline() + + Create an empty pipeline (this will result in no graph passes being applied to the model): + + .. sourcecode:: python + + pipeline = PassPipeline.get_empty_pipeline() + + Add passes to pipeline: + + .. sourcecode:: python + + pipeline=ct.PassPipeline() + pipeline.append_pass("common::reduce_transposes") + pipeline.insert_pass(index=0, pass_name="common::reduce_transposes") + # Can also specify all passes by setting the passes of the pipeline. + pipeline.passes = ["common::reduce_transposes", "common::add_fp16_cast"] + + Remove passes: + + .. sourcecode:: python + + # Remove a pass at a specific index. + pipeline.remove_pass(index=10) + # Remove passes by names. + pipeline.remove_passes({"common::add_fp16_cast", "common::reduce_transposes"}) + + Inspect passes in the pipeline: + + .. sourcecode:: python + + # Get all passes. + pass_names = pipeline.passes + # Find indexes of a specific pass. + pass_indexes = [idx for idx, pass_name in enumerate(pass_names) if pass_names[idx] == "common::reduce_transposes"] + + Set options for a specific pass: + + .. sourcecode:: python + + pipeline=ct.PassPipeline() + pipeline.set_options(pass_name="common::const_elimination", options={"skip_const_by_size": + "100000"}, override=False) + """ + + _PIPELINE_NAME_TO_PASSES = { + "default": _COMMON_PASSES + _CLEANUP_PASSES, + "empty": [], + # Frontend pipelines. + "frontend_milinternal": [], + "frontend_pytorch": _FRONTEND_TORCH_PASSES, + "frontend_tensorflow": _FRONTEND_TF1_PASSES, + "frontend_tensorflow2": _FRONTEND_TF2_PASSES, + # Backend pipelines. + "backend_mlprogram": _BACKEND_MIL_PASSES, + "backend_neuralnetwork": _BACKEND_NN_PASSES, + "backend_milinternal": [], + } + + def __init__(self, pass_names=None, pipeline_name="default"): + if pass_names is None: + pass_names = _COMMON_PASSES + _CLEANUP_PASSES + self._pass_names: List[Text] = pass_names + self._pass_options: Dict[Text, List[PassOption]] = dict() + self._pipeline_name = pipeline_name + + def __str__(self): + return self._pipeline_name + + @property + def passes(self): + return self._pass_names + + @passes.setter + def passes(self, passes: List[Text]): + for pass_name in passes: + if pass_name not in PASS_REGISTRY: + raise ValueError(f"The pass {pass_name} is not registered.") + self._pass_names = list(passes) + + @property + def pipeline_name(self): + return self._pipeline_name + + @pipeline_name.setter + def pipeline_name(self, pipeline_name: Text): + self._pipeline_name = pipeline_name + + def append_pass(self, pass_name: Text): + """Append a pass at the end of the current passes in the pipeline.""" + if pass_name not in PASS_REGISTRY: + raise ValueError(f"The pass {pass_name} is not registered.") + self._pass_names.append(pass_name) + + def insert_pass(self, index: int, pass_name: Text) -> None: + """Adds a pass at a specific index""" + if pass_name not in PASS_REGISTRY: + raise ValueError(f"The pass {pass_name} is not registered.") + self._pass_names.insert(index, pass_name) + + def remove_pass(self, index: int) -> None: + """Removes a pass at a specific index.""" + del self._pass_names[index] + + def remove_passes(self, passes_names: Union[Set[Text], List[Text]]) -> None: + """Removes all passes with specific name.""" + self._pass_names = [ + pass_name for pass_name in self._pass_names if pass_name not in passes_names + ] + + def get_options(self, pass_name: Text) -> Optional[List[PassOption]]: + """ + Gets options of a pass that has been set by the user. Return None if the pass doesn't have + any associated option set by the user. + """ + return self._pass_options.get(pass_name, None) + + def get_all_options(self) -> Dict[Text, List[PassOption]]: + """Gets all options in the pipeline.""" + return self._pass_options + + def set_options(self, pass_name: Text, options: Dict[Text, Text], override: bool = False): + """Sets options for a specific pass.""" + if self._pass_options.get(pass_name, None) and not override: + raise ValueError(f"The pass {pass_name} already has associated options.") + pass_options: List[PassOption] = [] + for option_name, option_val in options.items(): + if not (isinstance(option_name, str) and isinstance(option_val, str)): + raise ValueError( + f"The options must be specified by Dict[Text, Text], but got " + f"Dict[{type(option_name)}, {type(option_val)}]" + ) + pass_option = PassOption(option_name=option_name, option_val=option_val) + pass_options.append(pass_option) + self._pass_options[pass_name] = pass_options + + def set_options_by_another_pipeline(self, other_pipeline: PassPipeline): + """ + Convenience method for setting options from another pipeline's options. + For each option in other_pipeline, set it if it's also applicable to this pipeline. + """ + for pass_name, options in other_pipeline.get_all_options().items(): + if pass_name in self.passes: + self._pass_options[pass_name] = options + + def validate(self): + """Validates the pipeline (including options).""" + pass_names_set = set(self._pass_names) + for pass_name in self._pass_options.keys(): + if pass_name not in pass_names_set: + raise ValueError( + f"This pass pipeline is not valid. The pass {pass_name} has " + f"associated options but it's not in the passes. Passes in this " + f"pipeline: {self._pass_names}" + ) + + @staticmethod + def get_empty_pipeline() -> PassPipeline: + """Creates an empty pipeline without any pass.""" + return PassPipeline(pass_names=[]) + + @staticmethod + def get_pipeline(pipeline_name: Text) -> PassPipeline: + """ + Gets a pipeline based on the name. Raises an error if no pipeline is found. + Available Pipelines: + - "default": _COMMON_PASSES + _CLEANUP_PASSES + - "empty": empty + - "frontend_pytorch": _FRONTEND_TORCH_PASSES + - "frontend_tensorflow": _FRONTEND_TF1_PASSES + - "frontend_tensorflow2": _FRONTEND_TF2_PASSES + - "frontend_milinternal": empty + - "backend_mlprogram": _BACKEND_MIL_PASSES + - "backend_neuralnetwork": _BACKEND_NN_PASSES + - "backend_milinternal": empty + """ + if pipeline_name not in PassPipeline._PIPELINE_NAME_TO_PASSES: + raise ValueError( + f"There is no pipeline for `{pipeline_name}`. " + f"Available pipelines: {PassPipeline._PIPELINE_NAME_TO_PASSES.keys()}" + ) + return PassPipeline(PassPipeline._PIPELINE_NAME_TO_PASSES[pipeline_name], pipeline_name) + + +class PipelineManager: + @staticmethod + @_profile + def apply_pipeline(prog: Program, pass_pipeline: PassPipeline): + """Apply a pass pipeline to a program, which modifies the program in-place.""" + if pass_pipeline is None: + raise ValueError("The pass_pipeline cannot be None.") + + pass_pipeline.validate() + prog.validate() + + logger.debug(f"Program before {pass_pipeline} pipeline:\n{prog}") + for pass_name in tqdm( + pass_pipeline.passes, + desc=f"Running MIL {pass_pipeline} pipeline", + unit=" passes", + ): + logger.info(f'Performing pass: "{pass_name}"') + pass_options = pass_pipeline.get_options(pass_name) + if pass_options is not None: + logger.debug( + f"The graph pass options for {pass_name} is set to {pass_options}. " + f"It will change the pass behavior. Make sure the option is intended." + ) + graph_pass = PASS_REGISTRY[pass_name] + graph_pass.set_options(pass_options) + graph_pass(prog) + prog.validate() + logger.debug(f"Program after {pass_pipeline} pipeline:\n{prog}") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_registry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_registry.py new file mode 100644 index 00000000..c562bcca --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_registry.py @@ -0,0 +1,65 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import inspect +from typing import Dict, Optional, Text, Type + +from coremltools import _logger as logger +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass + + +class PassRegistry: + def __init__(self): + """ + Store the pass class instead of instance to avoid the same instance got modified by several + callers. + """ + self.passes: Dict[Text, Type[AbstractGraphPass]] = {} + + def __getitem__(self, pass_id: Text) -> AbstractGraphPass: + """ + pass_id: namespace::func_name (e.g., 'common::const_elimination') + """ + if pass_id not in self.passes: + raise KeyError(f"Pass {pass_id} not found") + current_pass = self.passes[pass_id] + # The current_pass could be a PassContainer instance if registered by register_generic_pass. + return current_pass() if inspect.isclass(current_pass) else current_pass + + def __contains__(self, pass_id: Text) -> bool: + return pass_id in self.passes + + def add( + self, + namespace: Text, + pass_cls: Type[AbstractGraphPass], + override: bool, + name: Optional[Text], + ): + cls_name = pass_cls.__name__ if name is None else name + pass_id = namespace + "::" + cls_name + logger.debug(f"Registering pass {pass_id}") + if pass_id in self.passes and not override: + raise KeyError(f"Pass {pass_id} already registered.") + self.passes[pass_id] = pass_cls + + +PASS_REGISTRY = PassRegistry() + + +def register_pass(namespace: Text, override: bool = False, name: Optional[Text] = None): + """ + namespaces like {'common', 'nn_backend', , } + + Params: + override: indicate the graph pass can override an existing pass with the same name. + name: name of the graph pass. Default to class name if not provided + """ + + def class_wrapper(pass_cls: Type[AbstractGraphPass]): + PASS_REGISTRY.add(namespace, pass_cls, override, name) + return pass_cls + + return class_wrapper diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/__init__.py new file mode 100644 index 00000000..25c7d28c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_lower_complex_dialect_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_lower_complex_dialect_ops.py new file mode 100644 index 00000000..a7a03fd1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_lower_complex_dialect_ops.py @@ -0,0 +1,56 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.testing_utils import ( + apply_pass_and_basic_check, + assert_model_is_valid, + get_op_types_in_program, +) + +np.random.seed(9) + + +class TestLowerComplexDialectOps: + def test_lower_complex_real(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog(x): + complex_data = mb.complex(real_data=x, imag_data=x) + real_data = mb.complex_real(data=complex_data) + return real_data + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::lower_complex_dialect_ops") + assert get_op_types_in_program(prev_prog) == ["complex", "complex_real"] + assert get_op_types_in_program(prog) == ["identity"] + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 3)}, + ) + + def test_lower_fft(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog(x): + fft_res = mb.complex_fft(data=x) + real_data = mb.complex_real(data=fft_res) + return real_data + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::lower_complex_dialect_ops") + assert get_op_types_in_program(prev_prog) == ["complex_fft", "complex_real"] + after_pass_op_types_set = set(get_op_types_in_program(prog)) + # Verifies that the complex dialect ops got lowered to core ops. + assert "complex_fft" not in after_pass_op_types_set + assert "complex_real" not in after_pass_op_types_set + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 3)}, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_pass_pipeline.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_pass_pipeline.py new file mode 100644 index 00000000..553f6072 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_pass_pipeline.py @@ -0,0 +1,113 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.pass_pipeline import PassPipeline, PipelineManager +from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program + +np.random.seed(1984) + + +class TestPassPipeline: + def test_add_pass(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3))]) + def prog(x): + x = mb.relu(x=x) + x = mb.relu(x=x) + x = mb.add(x=x, y=1.0) + return x + + assert get_op_types_in_program(prog) == ["relu", "relu", "add"] + pipeline = PassPipeline.get_empty_pipeline() + pipeline.append_pass("common::merge_consecutive_relus") + assert pipeline.passes == ["common::merge_consecutive_relus"] + PipelineManager.apply_pipeline(prog, pipeline) + assert get_op_types_in_program(prog) == ["relu", "add"] + + inputs = {"x": (2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={prog.functions["main"].outputs[0].name: (2, 3)}, + ) + + def test_insert_pass_at_index(self): + pipeline = PassPipeline.get_empty_pipeline() + pipeline.insert_pass(index=0, pass_name="common::merge_consecutive_relus") + pipeline.insert_pass(index=0, pass_name="common::noop_elimination") + pipeline.insert_pass(index=1, pass_name="common::noop_elimination") + pipeline.insert_pass(index=1, pass_name="common::merge_consecutive_reshapes") + assert pipeline.passes == [ + "common::noop_elimination", + "common::merge_consecutive_reshapes", + "common::noop_elimination", + "common::merge_consecutive_relus", + ] + + def test_insert_invalid_pass(self): + pipeline = PassPipeline.get_empty_pipeline() + with pytest.raises(ValueError, match="The pass test_pass is not registered."): + pipeline.append_pass("test_pass") + with pytest.raises(ValueError, match="The pass test_pass is not registered."): + pipeline.insert_pass(0, "test_pass") + with pytest.raises(ValueError, match="The pass invalid_pass is not registered."): + pipeline.passes = ["invalid_pass"] + + def test_remove_passes(self): + pipeline = PassPipeline.get_empty_pipeline() + pipeline.passes = [ + "common::noop_elimination", + "common::merge_consecutive_reshapes", + "common::noop_elimination", + "common::merge_consecutive_relus", + ] + pipeline.remove_passes(passes_names=["common::noop_elimination"]) + assert pipeline.passes == [ + "common::merge_consecutive_reshapes", + "common::merge_consecutive_relus", + ] + pipeline.remove_pass(index=1) + assert pipeline.passes == ["common::merge_consecutive_reshapes"] + + def test_set_pass_options(self): + pipeline = PassPipeline.get_empty_pipeline() + pipeline.append_pass("common::add_fp16_cast") + assert pipeline.get_options("common::add_fp16_cast") is None + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "matmul,const"}) + assert len(pipeline.get_options("common::add_fp16_cast")) == 1 + assert pipeline.get_options("common::add_fp16_cast")[0].option_name == "skip_ops_by_type" + assert pipeline.get_options("common::add_fp16_cast")[0].option_val == "matmul,const" + + def test_set_pass_options_already_exist(self): + pipeline = PassPipeline() + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "matmul,const"}) + with pytest.raises( + ValueError, match="The pass common::add_fp16_cast already has associated options." + ): + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "concat"}) + # Override the options. + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "concat"}, override=True) + assert pipeline.get_options("common::add_fp16_cast")[0].option_name == "skip_ops_by_type" + assert pipeline.get_options("common::add_fp16_cast")[0].option_val == "concat" + + def test_set_pass_options_for_pass_not_in_pipeline(self): + pipeline = PassPipeline.get_empty_pipeline() + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "matmul,const"}) + with pytest.raises( + ValueError, + match="This pass pipeline is not valid. The pass common::add_fp16_cast " + "has associated options but it's not in the passes.", + ): + pipeline.validate() + + def test_get_invalid_pipeline(self): + with pytest.raises( + ValueError, + match="There is no pipeline for `invalid`.", + ): + PassPipeline.get_pipeline("invalid") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_passes.py new file mode 100644 index 00000000..aa7b1671 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_passes.py @@ -0,0 +1,7475 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import itertools +import unittest + +import numpy as np +import pytest +from mock import patch + +import coremltools as ct +from coremltools._deps import _IS_MACOS +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import ( + register_generic_pass, +) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, Program, Symbol, get_new_symbol, types +from coremltools.converters.mil.mil.passes.defs import quantization +from coremltools.converters.mil.mil.passes.defs.cleanup import topological_reorder +from coremltools.converters.mil.mil.passes.helper import _check_var_scalar_value +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.mil.types import numpy_type_to_builtin_type +from coremltools.converters.mil.testing_reqs import backends +from coremltools.converters.mil.testing_utils import ( + apply_pass_and_basic_check, + assert_model_is_valid, + assert_op_count_match, + assert_same_output_names, + get_op_names_in_program, + get_op_types_in_program, +) + +np.random.seed(1984) +_VALIDATE_MODEL = True + + +class TestConstElimination: + def test_const_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + a = np.random.rand(2, 4).astype(np.float32) + double_a = mb.add(x=a, y=a) + return mb.add(x=x, y=double_a) + + assert_op_count_match(prog, expect=2, op="const") + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["common::const_elimination"](prog) + assert_same_output_names(prev_prog, prog) + assert_op_count_match(prog, expect=3, op="const") + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"x": (2, 4)}) + + def test_const_elimination_nonreplaceable(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + a = np.random.rand(2, 4).astype(np.float16) + constexpr_a = mb.constexpr_cast(source_val=a, output_dtype="fp32") + double_a = mb.add(x=constexpr_a, y=a.astype(np.float32)) + return mb.add(x=x, y=double_a) + + prev_prog, _, _ = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prev_prog) == ["constexpr_cast", "add", "add"] + # Not fold into const because the upstream constexpr_cast op is non-replaceable. + assert get_op_types_in_program(prog) == ["constexpr_cast", "add", "add"] + + @patch( + "coremltools.converters.mil.mil.passes.defs.cleanup.const_elimination._skip_const_by_size", + 1000, + ) + def test_const_elimination_larger_than_threshold(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3))]) + def prog(x): + # Construct a 10 x 10 matrix (100 elements) which is smaller than the threshold (1000). + tmp = mb.range_1d(start=0, end=10, step=1) + tmp_x = mb.reshape(x=tmp, shape=[-1, 1]) + tmp_y = mb.reshape(x=tmp, shape=[1, -1]) + return mb.matmul(x=tmp_x, y=tmp_y) + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3))]) + def prog_large_const_size(x): + # Construct a 100 x 100 matrix (10000 elements) which is larger than the threshold (1000). + tmp = mb.range_1d(start=0, end=100, step=1) + tmp_x = mb.reshape(x=tmp, shape=[-1, 1]) + tmp_y = mb.reshape(x=tmp, shape=[1, -1]) + return mb.matmul(x=tmp_x, y=tmp_y) + + prev_prog, _, _ = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prev_prog) == [ + "range_1d", + "reshape", + "reshape", + "matmul", + ] + # All ops (range_1d, reshape, matmul) constructing that 10x10 matrix is folded into a const. + assert get_op_types_in_program(prog) == [] + + prev_prog_large_const_size, _, _ = apply_pass_and_basic_check( + prog_large_const_size, "common::const_elimination" + ) + assert get_op_types_in_program(prev_prog_large_const_size) == [ + "range_1d", + "reshape", + "reshape", + "matmul", + ] + # The matmul op constructing the large matrix is kept due to size larger than threshold. + assert get_op_types_in_program(prog_large_const_size) == ["matmul"] + + +class TestDeadCodeElimination: + def test_dead_code_elimination(self): + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(2, 4)), + mb.TensorSpec(shape=(2, 4)), + ] + ) + def program0(x, y): + # following three unused op should be eliminated + a = mb.const(val=np.zeros(shape=(1,))) + b = mb.const(val=np.zeros(shape=(1,))) + _ = mb.add(x=a, y=b) + return mb.add(x=x, y=y) + + assert_op_count_match(program0, expect=4) + prev_prog = copy.deepcopy(program0) + PASS_REGISTRY["common::dead_code_elimination"](program0) + assert_same_output_names(prev_prog, program0) + assert_op_count_match(program0, expect=1) + + if _VALIDATE_MODEL: + assert_model_is_valid(program0, {"x": (2, 4), "y": (2, 4)}) + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def program1(x): + weights_val = np.random.rand(4, 2).T.astype(np.float32) + weights = mb.const(val=weights_val) + bias_val = np.random.rand(2).astype(np.float32) + bias = mb.const(val=bias_val) + + # unused op and its inputs should be eliminated + weights_for_matmul = mb.transpose(x=weights, perm=[1, 0]) + mb.matmul(x=x, y=weights_for_matmul) + + return mb.linear(x=x, weight=weights, bias=bias) + + assert_op_count_match(program1, expect=8) + prev_prog = copy.deepcopy(program1) + PASS_REGISTRY["common::dead_code_elimination"](program1) + assert_same_output_names(prev_prog, program1) + assert_op_count_match(program1, expect=3) + + if _VALIDATE_MODEL: + assert_model_is_valid(program1, {"x": (2, 4)}) + + +class TestDedupOpAndVarNames(unittest.TestCase): + def test_unchanged(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + x = mb.reshape(x=x, shape=(1, 8), name="reshape") + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::dedup_op_and_var_names") + + self.assertEqual(get_op_types_in_program(prev_prog), ["reshape"]) + self.assertEqual(get_op_names_in_program(prev_prog), ["reshape"]) + + self.assertEqual(get_op_types_in_program(prog), ["reshape"]) + self.assertEqual(get_op_names_in_program(prog), ["reshape"]) + + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (1, 8)}, + ) + + def test_op_name_duplicated_once(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16", name="castop") + x = mb.cast(x=x, dtype="fp32", name="castop") + x = mb.square(x=x, name="square_last") + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::dedup_op_and_var_names") + + self.assertEqual(get_op_types_in_program(prev_prog), ["cast", "cast", "square"]) + self.assertEqual(get_op_names_in_program(prev_prog), ["castop", "castop", "square_last"]) + + self.assertEqual(get_op_types_in_program(prog), ["cast", "cast", "square"]) + self.assertEqual(get_op_names_in_program(prog), ["castop", "castop_1", "square_last"]) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + def test_op_name_duplicated_many(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16", name="castop") + x = mb.cast(x=x, dtype="fp16", name="castop") + x = mb.cast(x=x, dtype="int32", name="castop_2") + x = mb.cast(x=x, dtype="int64", name="castop") + x = mb.cast(x=x, dtype="fp32", name="castop_2") + x = mb.square(x=x, name="square") + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::dedup_op_and_var_names") + + self.assertEqual( + get_op_types_in_program(prev_prog), ["cast", "cast", "cast", "cast", "cast", "square"] + ) + self.assertEqual( + get_op_names_in_program(prev_prog), + ["castop", "castop", "castop_2", "castop", "castop_2", "square"], + ) + + self.assertEqual( + get_op_types_in_program(prog), ["cast", "cast", "cast", "cast", "cast", "square"] + ) + self.assertEqual( + get_op_names_in_program(prog), + ["castop", "castop_1", "castop_2", "castop_3", "castop_2_1", "square"], + ) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + def test_input_name_shadow(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + # op name "x" results in output var name "x", which shadows prog + # input var name "x" + x = mb.transpose(x=x, perm=[1, 0], name="x") + x = mb.relu(x=x, name="relu") + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::dedup_op_and_var_names") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "relu"]) + self.assertEqual(get_op_names_in_program(prev_prog), ["x", "relu"]) + + self.assertEqual(get_op_types_in_program(prog), ["transpose", "relu"]) + self.assertEqual(get_op_names_in_program(prog), ["x", "relu"]) + + op = prog["main"].find_ops(op_type="transpose")[0] + self.assertEqual("x_1", op.outputs[0].name) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (20, 10)}, + ) + + def test_nested_block(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1,))]) + def prog(x): + def true_fn(): + # returns var with name x shadows input 'x' + return mb.add(x=x, y=1.0, name="x") + + def false_fn(): + # two ops with name "x" + return mb.add(x=x, y=-1.0, name="x") + + pred = mb.equal(x=mb.squeeze(x=x), y=1.0) + return mb.cond(pred=pred, _true_fn=true_fn, _false_fn=false_fn) + + cond_op = prog.functions["main"].operations[-1] + assert cond_op.blocks[0].outputs[0].name == "x" + assert cond_op.blocks[1].outputs[0].name == "x" + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::dedup_op_and_var_names") + cond_op = prog.functions["main"].operations[-1] + assert cond_op.blocks[0].outputs[0].name == "x_1" + assert cond_op.blocks[1].outputs[0].name == "x_2" + + assert_model_is_valid( + prog, + {"x": (1,)}, + expected_output_shapes={block.outputs[0].name: (1,)}, + ) + + +class TestAddConvTransposeOutputShape: + def test_add_conv_transpose_output_shape(self): + """ + Given: + %1: (1, 5, 39, fp32) = conv_transpose(...) # no output_shape input. + + Result: + %2: (3, i32) = const(val=[1,5,39]) + %3: (1, 5, 39, fp32) = conv_transpose(..., output_shape=%2) + """ + N, C_in, C_out, D1 = 1, 3, 5, 20 + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, C_in, D1))]) + def prog(x): + weight = np.random.rand(C_in, C_out, D1).astype(np.float32) + return mb.conv_transpose(x=x, weight=weight) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::add_conv_transpose_output_shape" + ) + assert get_op_types_in_program(prev_prog) == ["conv_transpose"] + assert get_op_types_in_program(prog) == ["conv_transpose"] + prev_conv_transpose_op = prev_prog.find_ops(op_type="conv_transpose", exactly_one=True)[0] + conv_transpose_op = prog.find_ops(op_type="conv_transpose", exactly_one=True)[0] + assert np.all(conv_transpose_op.output_shape.val == prev_conv_transpose_op.outputs[0].shape) + + +class TestNoopElimination: + @pytest.mark.parametrize( + "op_type, pos, val", + itertools.product( + ["add", "mul", "floor_div", "pow", "real_div", "sub"], + ["x", "y"], + [0.0, 1.0, [0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]], + ), + ) + def test_elementwise_elimination(self, op_type, pos, val): + if "div" in op_type and np.prod(val) == 0: + return + if "pow" in op_type and (val != 0 or val != 1): + return + + test_op = getattr(mb, op_type) + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + if pos == "x": + r1 = test_op(x=val, y=x) + else: + r1 = test_op(x=x, y=val) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + original_program = [op_type, "relu"] + new_program = original_program + if op_type in {"add"}: + if val == 0.0 or val == [0.0, 0.0, 0.0, 0.0]: + new_program = ["relu"] + elif op_type in {"mul"}: + if val == 1.0 or val == [1.0, 1.0, 1.0, 1.0]: + new_program = ["relu"] + elif op_type in {"real_div"}: + if pos == "y" and (val == 1.0 or val == [1.0, 1.0, 1.0, 1.0]): + new_program = ["relu"] + elif op_type in {"pow", "floor_div"}: + if pos == "y" and (val == 1.0 or val == [1.0, 1.0, 1.0, 1.0]): + new_program = ["relu"] + elif op_type in {"sub"}: + if pos == "y" and (val == 0.0 or val == [0.0, 0.0, 0.0, 0.0]): + new_program = ["relu"] + + assert get_op_types_in_program(prev_prog) == original_program + assert get_op_types_in_program(prog) == new_program + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_elementwise_broadcast(self): + @mb.program(input_specs=[mb.TensorSpec(shape=[4])]) + def prog(x): + r1 = mb.add(x=x, y=[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + original_program = ["add", "relu"] + + assert get_op_types_in_program(prev_prog) == original_program + assert get_op_types_in_program(prog) == original_program + assert_model_is_valid( + prog, + {"x": [4]}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_elementwise_elimination_fill(self): + """ + When fill layer with dynamic shape is fed to elementwise-binary operation, + even though the tensor can't be materialized at conversion time but no-op + elimination can still be performed based on fill-value + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, get_new_symbol()))]) + def prog(x): + shape = mb.shape(x=x) + y = mb.fill(value=0.0, shape=shape) + x = mb.add(x=x, y=y) + return mb.relu(x=x) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["shape", "fill", "add", "relu"] + assert get_op_types_in_program(prog) == ["shape", "fill", "relu"] + + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["relu"] + + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_reshape_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.reshape(x=x, shape=[1, 8]) + mb.reshape(x=r1, shape=[1, 8]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "relu"] + assert get_op_types_in_program(prog) == ["reshape", "relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (1, 8)}, + ) + + def test_oneway_split_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.split(x=x, num_splits=1, axis=-1) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["split", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_full_split_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.split(x=x, split_sizes=[4], axis=-1) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["split", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_slicebysize_full_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[2, 4]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_slicebysize_to_end_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[-1, -1]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_slicebyindex_full_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.slice_by_index(x=x, begin=[0, 0], end=[2, 4]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_slicebyindex_negative_stride(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.slice_by_index( + x=x, + begin=[0, 0], + end=[0, 0], + stride=[1, -1], + begin_mask=[True, True], + end_mask=[True, True], + ) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"] + assert get_op_types_in_program(prog) == ["slice_by_index", "relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + @pytest.mark.parametrize( + "begin_mask, end_mask", + itertools.product( + itertools.product([True, False], [True, False]), + itertools.product([True, False], [True, False]), + ), + ) + def test_slicebyindex_mask_elimination(self, begin_mask, end_mask): + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 4))]) + def prog(x): + begin = [1, 1] + end = [1, 1] + for i in range(2): + if not begin_mask[i]: + begin[i] = 0 + if not end_mask[i]: + end[i] = 4 + r1 = mb.slice_by_index( + x=x, begin=begin, end=end, begin_mask=begin_mask, end_mask=end_mask + ) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (4, 4)}, + expected_output_shapes={block.outputs[0].name: (4, 4)}, + ) + + def test_pad_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.pad(x=x, pad=[0, 0, 0, 0]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["pad", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_keep_pad(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.pad(x=x, pad=[4, 4, 2, 2]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["pad", "relu"] + assert get_op_types_in_program(prog) == ["pad", "relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (10, 8)}, + ) + + def test_tile_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.tile(x=x, reps=[1, 1]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["tile", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_keep_tile(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.tile(x=x, reps=[2, 2]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["tile", "relu"] + assert get_op_types_in_program(prog) == ["tile", "relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (4, 8)}, + ) + + def test_upsample_nearest_neighbor_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))]) + def prog(x): + r1 = mb.upsample_nearest_neighbor(x=x) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["upsample_nearest_neighbor", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (3, 2, 4)}, + expected_output_shapes={block.outputs[0].name: (3, 2, 4)}, + ) + + def test_upsample_bilinear_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))]) + def prog(x): + r1 = mb.upsample_bilinear(x=x) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["upsample_bilinear", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (3, 2, 4)}, + expected_output_shapes={block.outputs[0].name: (3, 2, 4)}, + ) + + def test_resize_bilinear_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))]) + def prog(x): + r1 = mb.resize_bilinear(x=x, target_size_height=2, target_size_width=4) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["resize_bilinear", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (3, 2, 4)}, + expected_output_shapes={block.outputs[0].name: (3, 2, 4)}, + ) + + def test_crop_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))]) + def prog(x): + r1 = mb.crop(x=x, crop_height=[0, 0], crop_width=[0, 0]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["crop", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (3, 2, 4)}, + expected_output_shapes={block.outputs[0].name: (3, 2, 4)}, + ) + + def test_linear_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.linear_activation(x=x, alpha=1.0, beta=0.0) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["linear_activation", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_transpose_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 4))]) + def prog(x): + r1 = mb.transpose(x=x, perm=[0, 1, 2]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["transpose", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 3, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 3, 4)}, + ) + + +class TestRemoveSymbolicReshape: + def test_remove_symbolic_reshape(self): + sym_b = Symbol("s0") + original_shape = (sym_b, Symbol("s1"), 2) + reshape_name = "reshape" + + @mb.program(input_specs=[mb.TensorSpec(shape=(sym_b, 4))]) + def prog(x): + # const cannot represent symbolic values. Use _const_symbolic + shape = mb._const_symbolic(val=original_shape) + return mb.reshape(x=x, shape=shape, name=reshape_name) + + reshape_op = prog.find_ops(prefix=reshape_name, op_type="reshape", exactly_one=True)[0] + shape_var = reshape_op.shape + reshaped_var = reshape_op.outputs[0] + assert np.all(shape_var.sym_val == original_shape) + assert np.all(reshaped_var.shape == (sym_b, 2, 2)) + + # Note: we cannot deepcopy prog with symbol. + prev_outputs = [o.name for o in prog["main"].outputs] + PASS_REGISTRY["common::remove_symbolic_reshape"](prog) + curr_outputs = [o.name for o in prog["main"].outputs] + assert curr_outputs == prev_outputs + + reshape_op = prog.find_ops(prefix=reshape_name, op_type="reshape", exactly_one=True)[0] + shape_var = reshape_op.shape + reshaped_var = reshape_op.outputs[0] + # shape param cannot be symbolic after the pass + assert np.all(shape_var.sym_val == (-1, 2, 2)) + # output shape is still symbolic + assert np.all(reshaped_var.shape == (sym_b, 2, 2)) + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"x": (3, 4)}) + + +class TestLoopInvariantElimination: + def test_loop_invariant_elimination1(self): + """ + Invariant pattern: Block input vars are returned as block output vars. + """ + + def body(a, b): + return mb.add(x=a, y=b), b + + def cond(a, b): + a_mean = mb.reduce_mean(x=a, axes=[0, 1]) + b_mean = mb.reduce_mean(x=b, axes=[0, 1]) + return mb.less(x=a_mean, y=b_mean) + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2)), + mb.TensorSpec(shape=(1, 2)), + ] + ) + def prog(a, b): + # b is loop invariant + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert len(while_op.blocks[0].inputs) == 2 + assert len(while_op.outputs) == 2 + assert len(while_op.loop_vars) == 2 + assert while_op.blocks[0].inputs[0].name == "a_x0" + assert while_op.blocks[0].inputs[1].name == "b_x0" + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["common::loop_invariant_elimination"](prog) + assert_same_output_names(prev_prog, prog) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert len(while_op.blocks[0].inputs) == 1 + assert len(while_op.outputs) == 1 + assert len(while_op.loop_vars) == 1 + assert while_op.blocks[0].inputs[0].name == "a_x0" + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"a": (1, 2), "b": (1, 2)}) + + def test_loop_invariant_elimination2(self): + """ + Invariant pattern: Block outputs var from outside of the block + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2)), + mb.TensorSpec(shape=(1, 2)), + ] + ) + def prog(a, b): + def body(a, bx): + return mb.add(x=a, y=b), b + + def cond(a, bx): + a_mean = mb.reduce_mean(x=a, axes=[0, 1]) + b_mean = mb.reduce_mean(x=bx, axes=[0, 1]) + return mb.less(x=a_mean, y=b_mean) + + # b is loop invariant + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert len(while_op.blocks[0].inputs) == 2 + assert len(while_op.outputs) == 2 + assert len(while_op.loop_vars) == 2 + assert while_op.blocks[0].inputs[0].name == "a_x0" + assert while_op.blocks[0].inputs[1].name == "b_x0" + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["common::loop_invariant_elimination"](prog) + assert_same_output_names(prev_prog, prog) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert len(while_op.blocks[0].inputs) == 1 + assert len(while_op.outputs) == 1 + assert len(while_op.loop_vars) == 1 + assert while_op.blocks[0].inputs[0].name == "a_x0" + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"a": (1, 2), "b": (1, 2)}) + + +class TestReduceMeanFusion: + def test_valid_pattern1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + x1 = mb.mul(x=1.0 / 30, y=x1) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_reduce_mean") + assert get_op_types_in_program(prev_prog) == ["reduce_sum", "mul"] + assert get_op_types_in_program(prog) == ["reduce_mean"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 1, 1)}, + ) + + def test_valid_pattern2(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 5))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[0], keep_dims=False) + x1 = mb.real_div(x=x1, y=4.0) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_reduce_mean") + assert get_op_types_in_program(prev_prog) == ["reduce_sum", "real_div"] + assert get_op_types_in_program(prog) == ["reduce_mean"] + assert_model_is_valid( + prog, + {"x": (4, 5)}, + expected_output_shapes={block.outputs[0].name: (5,)}, + ) + + def test_invalid_pattern1(self): + """ + The mul does not correspond to "1/count" + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + x1 = mb.mul(x=5.0, y=x1) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_reduce_mean") + assert get_op_types_in_program(prog) == ["reduce_sum", "mul"] + + def test_invalid_pattern2(self): + """ + The div does not correspond to "count" + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + x1 = mb.real_div(x=x1, y=31.0) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_reduce_mean") + assert get_op_types_in_program(prog) == ["reduce_sum", "real_div"] + + def test_invalid_pattern3(self): + """ + One of the reduction dim is symbolic + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, get_new_symbol(), 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + x1 = mb.real_div(x=x1, y=30.0) + return x1 + + pass_name = "common::fuse_reduce_mean" + PASS_REGISTRY[pass_name](prog) + assert get_op_types_in_program(prog) == ["reduce_sum", "real_div"] + + def test_invalid_pattern4(self): + """ + output of reduce_sum is model output + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + y1 = mb.real_div(x=x1, y=30.0) + return y1, x1 + + pass_name = "common::fuse_reduce_mean" + PASS_REGISTRY[pass_name](prog) + assert get_op_types_in_program(prog) == ["reduce_sum", "real_div"] + + def test_invalid_pattern5(self): + """ + output of reduce_sum is feeding into another op + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + y1 = mb.real_div(x=x1, y=30.0) + y2 = mb.mul(x=x1, y=10.0) + y3 = mb.add(x=y1, y=y2) + return y3 + + pass_name = "common::fuse_reduce_mean" + PASS_REGISTRY[pass_name](prog) + assert get_op_types_in_program(prog) == ["reduce_sum", "real_div", "mul", "add"] + + +class TestRemoveRedundantOps: + def test_redundant_ops_just_after_input_valid_pattern_1(self): + """ + Input graph: + input----->transpose(perm=[0, 2, 1])--->add---> add ---> out + | ^ ^ + | | | + |---->transpose(perm=[0, 2, 1])---- | + | | + | | + |---->transpose(perm=[0, 2, 1])------------ + + Output graph: + input----->transpose(perm=[0, 2, 1])--->add---> add ----> out + | ^ ^ + | | | + |------------- | + | | + |-------------------- + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 1]) + x3 = mb.transpose(x=x, perm=[0, 2, 1]) + z = mb.add(x=x1, y=x2) + z = mb.add(x=z, y=x3) + return z + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == [ + "transpose", + "transpose", + "transpose", + "add", + "add", + ] + assert get_op_types_in_program(prog) == ["transpose", "add", "add"] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={block.outputs[0].name: (2, 5, 3)}, + ) + + def test_redundant_ops_just_after_input_valid_pattern_2(self): + """ + Input graph: + input----->leaky_relu(alpha=0.3)--->add---> add ---> out + | ^ ^ + | | | + |----->leaky_relu(alpha=0.3)--- | + | | + | | + |---->leaky_relu(alpha=0.3)------------ + + Output graph: + input--------->leaky_relu(alpha=0.3)--->add---> add ----> out + | ^ ^ + | | | + |------------- | + | | + |--------------------- + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.leaky_relu(x=x, alpha=0.3) + x2 = mb.leaky_relu(x=x, alpha=0.3) + x3 = mb.leaky_relu(x=x, alpha=0.3) + z = mb.add(x=x1, y=x2) + z = mb.add(x=z, y=x3) + return z + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == [ + "leaky_relu", + "leaky_relu", + "leaky_relu", + "add", + "add", + ] + assert get_op_types_in_program(prog) == ["leaky_relu", "add", "add"] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={block.outputs[0].name: (2, 3, 5)}, + ) + + def test_redundant_ops_just_after_input_invalid_pattern_1(self): + """ + input----->transpose(perm=[0, 2, 1])---> reshape(shape=[-1]) -----> add ---> out + | ^ + | | + |---->transpose(perm=[1, 0, 2])----> reshape(shape=[-1])------ + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 1]) + x2 = mb.transpose(x=x, perm=[1, 0, 2]) + x1 = mb.reshape(x=x1, shape=[-1]) + x2 = mb.reshape(x=x2, shape=[-1]) + z = mb.add(x=x1, y=x2) + return z + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == [ + "transpose", + "transpose", + "reshape", + "reshape", + "add", + ] + assert get_op_types_in_program(prog) == [ + "transpose", + "transpose", + "reshape", + "reshape", + "add", + ] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={block.outputs[0].name: (30,)}, + ) + + def test_redundant_ops_just_after_input_invalid_pattern_2(self): + """ + input----->leaky_relu(alpha=0.3) -----> add ---> out + | ^ + | | + |---->leaky_relu(alpha=0.4)------- + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.leaky_relu(x=x, alpha=0.3) + x2 = mb.leaky_relu(x=x, alpha=0.4) + z = mb.add(x=x1, y=x2) + return z + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == ["leaky_relu", "leaky_relu", "add"] + assert get_op_types_in_program(prog) == ["leaky_relu", "leaky_relu", "add"] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={block.outputs[0].name: (2, 3, 5)}, + ) + + def test_redundant_ops_just_after_input_invalid_pattern_3(self): + """ + test case, when inputs of 1 op is a subset of the inputs of the other op + + input----->layer_norm1 -----> add ---> out + | ^ + | | + |---->layer_norm2------- + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 2))]) + def prog(x): + x1 = mb.layer_norm(x=x, axes=[2], epsilon=1e-4) + gamma_val = np.array([1.0, 1.0], dtype=np.float32) + beta_val = np.array([1.0, 0.0], dtype=np.float32) + x2 = mb.layer_norm(x=x, axes=[2], epsilon=1e-4, gamma=gamma_val, beta=beta_val) + z = mb.add(x=x1, y=x2) + return z + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == ["layer_norm", "layer_norm", "add"] + assert get_op_types_in_program(prog) == ["layer_norm", "layer_norm", "add"] + assert_model_is_valid( + prog, + {"x": (1, 3, 2)}, + expected_output_shapes={block.outputs[0].name: (1, 3, 2)}, + ) + + @staticmethod + def _make_repeated_conv_prog(redundant_conv=True): + prog = Program() + func_inputs = {"x": mb.placeholder(shape=[1, 4, 5, 5])} + with Function(func_inputs) as ssa_fun: + x = ssa_fun.inputs["x"] + x = mb.relu(x=x) + W = np.random.rand(8, 4, 3, 3) + if redundant_conv: + bias = np.random.rand(8) + x1 = mb.conv(x=x, weight=W, bias=bias, pad_type="same", strides=[1, 1]) + x2 = mb.conv(x=x, weight=W, bias=bias, pad_type="same", strides=[1, 1]) + else: + x1 = mb.conv(x=x, weight=W, bias=np.random.rand(8), pad_type="same", strides=[1, 1]) + x2 = mb.conv(x=x, weight=W, bias=np.random.rand(8), pad_type="same", strides=[1, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x1 = mb.avg_pool(x=x1, kernel_sizes=[2, 2], strides=[1, 1], pad_type="same") + z = mb.concat(values=(x1, x2), axis=-3) + ssa_fun.set_outputs([z]) + prog.add_function("main", ssa_fun) + return prog + + def test_redundant_ops_inside_graph_valid_pattern(self): + """ + Input graph: + input--> relu--------->conv------>relu----> pool ---> concat ---> out + | ^ + | | + |---->conv---->relu---------------------------- + + Output graph: + input-> relu--->conv------>relu----> pool ---> concat ---> out + | ^ + | | + |------------------- + """ + prog = self._make_repeated_conv_prog(redundant_conv=True) + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == [ + "relu", + "conv", + "conv", + "relu", + "relu", + "avg_pool", + "concat", + ] + assert get_op_types_in_program(prog) == ["relu", "conv", "relu", "avg_pool", "concat"] + assert_model_is_valid( + prog, + {"x": (1, 4, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 16, 5, 5)}, + ) + + def test_redundant_ops_inside_graph_invalid_pattern(self): + """ + input--->relu--------->conv1------>relu----> pool ---> concat ---> out + | ^ + | | + |---->conv2---->relu--------------------------- + """ + prog = self._make_repeated_conv_prog(redundant_conv=False) + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == [ + "relu", + "conv", + "conv", + "relu", + "relu", + "avg_pool", + "concat", + ] + assert get_op_types_in_program(prog) == [ + "relu", + "conv", + "conv", + "relu", + "relu", + "avg_pool", + "concat", + ] + assert_model_is_valid( + prog, + {"x": (1, 4, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 16, 5, 5)}, + ) + + def test_redundant_op_as_output_valid_pattern_1(self): + """ + Input graph: + input--------->relu------> out1 + | + | + |---->relu---->tanh---> out2 + + Output graph: + input--------->relu------> out1 + | + | + |---->tanh---> out2 + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.relu(x=x) + x2 = mb.relu(x=x) + return x1, mb.tanh(x=x2) + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "tanh"] + assert get_op_types_in_program(prog) == ["relu", "tanh"] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (2, 3, 5), + block.outputs[1].name: (2, 3, 5), + }, + ) + + def test_redundant_op_as_output_invalid_pattern_1(self): + """ + Input graph: + input--------->relu------> out1 + | + | + |---->relu---> out2 + + "common::remove_redundant_ops" pass does not remove ops if their outputs + are block outputs. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.relu(x=x) + x2 = mb.relu(x=x) + return x1, x2 + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::remove_redundant_ops", + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu"] + assert get_op_types_in_program(prog) == ["relu", "relu"] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (2, 3, 5), + block.outputs[1].name: (2, 3, 5), + }, + ) + + def test_cond_block_program(self): + """ + - Test identical ops within different blocks are not removed. The "relu" op inside true and + false blocks are not removed since they are in different blocks. + - Test ops that have blocks inside them are not removed. There are two cond ops here, + with identical inputs but they are not removed, since they are ops that have nested block + inside them. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1,))]) + def prog(x): + x1 = mb.cast(x=x, dtype="bool") + + def true_fn(): + x = mb.shape(x=x1) + x = mb.cast(x=x, dtype="fp32") + return mb.add(x=x, y=1.0) + + def false_fn(): + x = mb.shape(x=x1) + x = mb.cast(x=x, dtype="fp32") + return mb.add(x=x, y=-1.0) + + z1 = mb.cond(pred=x1, _true_fn=true_fn, _false_fn=false_fn) + z2 = mb.cond(pred=x1, _true_fn=true_fn, _false_fn=false_fn) + z = mb.add(x=z1, y=z2) + return z + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::remove_redundant_ops", + ) + assert get_op_types_in_program(prev_prog) == ["cast", "cond", "cond", "add"] + assert get_op_types_in_program(prog) == ["cast", "cond", "cond", "add"] + cond_op = prog.find_ops(op_type="cond")[0] + assert cond_op.blocks[0].operations[0].op_type == "shape" + assert cond_op.blocks[1].operations[0].op_type == "shape" + assert_model_is_valid( + prog, + {"x": (1,)}, + expected_output_shapes={block.outputs[0].name: (1,)}, + ) + + def test_concat_op_pattern(self): + """ + Input graph: + ---------------> concat ------> log ------> out1 + | ^ + | | + input--------->relu------> concat ------> relu----> out2 + | ^ | + | | | + |---->tanh-------------------- + + Output graph: + |------>log ------> out1 + | + | + input--------->relu------> concat ------> relu----> out2 + | ^ + | | + |---->tanh--------- + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 5))]) + def prog(x): + x1 = mb.relu(x=x) + x2 = mb.tanh(x=x) + c1 = mb.concat(values=(x1, x2), axis=0) + c2 = mb.concat(values=(x1, x2), axis=0) + z1 = mb.log(x=c1) + z2 = mb.relu(x=c2) + return z1, z2 + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::remove_redundant_ops", + ) + assert get_op_types_in_program(prev_prog) == [ + "relu", + "tanh", + "concat", + "concat", + "log", + "relu", + ] + assert get_op_types_in_program(prog) == ["relu", "tanh", "concat", "log", "relu"] + assert_model_is_valid( + prog, + {"x": (10, 5)}, + expected_output_shapes={block.outputs[0].name: (20, 5), block.outputs[1].name: (20, 5)}, + ) + + def test_multiple_redundant_child_ops_pattern(self): + """ + Input graph + + input -------------> reshape ----------> add ---------> out1 + | ^ + | | + |-------> reshape --------------- + | + |------> slice_by_size-----> add ----------> out2 + | ^ + | | + |------> slice_by_size ------- + + Output graph + + input -------------> reshape ----------> add ------------> out1 + | | ^ + | | | + | |--------- + | + |------> slice_by_size----------> add -----------------> out2 + | ^ + | | + |--------------------- + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 5, 4))]) + def prog(x): + x1 = mb.reshape(x=x, shape=[5, 2, -1]) + x2 = mb.reshape(x=x, shape=[5, 2, -1]) + x3 = mb.slice_by_size(x=x, begin=[0, 0, 1], size=[2, 4, 3]) + x4 = mb.slice_by_size(x=x, begin=[0, 0, 1], size=[2, 4, 3]) + z1 = mb.add(x=x1, y=x2) + z2 = mb.add(x=x3, y=x4) + return z1, z2 + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::remove_redundant_ops", + ) + assert get_op_types_in_program(prev_prog) == [ + "reshape", + "reshape", + "slice_by_size", + "slice_by_size", + "add", + "add", + ] + assert get_op_types_in_program(prog) == ["reshape", "slice_by_size", "add", "add"] + assert_model_is_valid( + prog, + {"x": (10, 5, 4)}, + expected_output_shapes={ + block.outputs[0].name: (5, 2, 20), + block.outputs[1].name: (2, 4, 3), + }, + ) + + def test_random_distribution_op_invalid_pattern(self): + """ + Identical random ops are not removed + + input----->cast---->random_uniform------> add ---> out + | ^ + | | + |---->random_uniform------------ + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3,))]) + def prog(shape): + shape = mb.cast(x=shape, dtype="int32") + x1 = mb.random_uniform(shape=shape, low=0.0, high=1.0, seed=11) + x2 = mb.random_uniform(shape=shape, low=0.0, high=1.0, seed=11) + return mb.add(x=x1, y=x2) + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::remove_redundant_ops", + ) + assert get_op_types_in_program(prev_prog) == [ + "cast", + "random_uniform", + "random_uniform", + "add", + ] + assert get_op_types_in_program(prog) == ["cast", "random_uniform", "random_uniform", "add"] + + +class TestTopologicalReorder: + def test_move_sink_casts_to_the_end(self): + """ + Input graph: + x (input) ---> square ---> cast (output) + | + | -----------> log ------> cast (output) + | + | -----------> relu -----> cast ----> relu (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x2 = mb.cast(x=x1, dtype="fp32") + x3 = mb.log(x=x) + x4 = mb.cast(x=x3, dtype="fp32") + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + return x2, x4, x7 + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "cast", + "log", + "cast", + "relu", + "cast", + "relu", + ] + + apply_pass_and_basic_check(prog, "common::topological_reorder") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "log", + "relu", + "cast", + "relu", + "cast", + "cast", + ] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + block.outputs[2].name: (10, 20), + }, + ) + + def test_move_sink_cast_transpose_to_the_end(self): + """ + Input graph: + x (input) ---> square ---> transpose ---> cast (output) + | + | -----------> log ------> transpose ---> cast (output) + | + | -----------> relu -----> cast ----> relu (output) + | + | -----------> relu (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x1_t = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.cast(x=x1_t, dtype="fp32") + x3 = mb.log(x=x) + x3_t = mb.transpose(x=x3, perm=[1, 0]) + x4 = mb.cast(x=x3_t, dtype="fp32") + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + x8 = mb.relu(x=x) + return x2, x4, x7, x8 + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "transpose", + "cast", + "log", + "transpose", + "cast", + "relu", + "cast", + "relu", + "relu", + ] + + apply_pass_and_basic_check(prog, "common::topological_reorder") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "log", + "relu", + "cast", + "relu", + "relu", + "transpose", + "cast", + "transpose", + "cast", + ] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (20, 10), + block.outputs[1].name: (20, 10), + block.outputs[2].name: (10, 20), + block.outputs[3].name: (10, 20), + }, + ) + + def test_move_multiple_uses_overlapping(self): + """ + Input graph: + x (input) ---> cast ---> cast (output) + | + |-------> transpose ---> transpose (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x1 = mb.cast(x=x, dtype="fp16") + x2 = mb.cast(x=x1, dtype="fp32") + x3 = mb.transpose(x=x1, perm=[1, 0]) + x4 = mb.transpose(x=x3, perm=[1, 0]) + return x2, x4 + + assert get_op_types_in_program(prog) == ["cast", "cast", "transpose", "transpose"] + + apply_pass_and_basic_check(prog, "common::topological_reorder") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "transpose", "transpose", "cast"] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + }, + ) + + def test_move_split_to_first_use(self): + """ + Input graph: + x (input) ---> split ---> square ---> add (output) + | | | + | | --------------------| + | + | -----------> square --------------> relu (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + s1, s2 = mb.split(x=x, num_splits=2, axis=0) + x2 = mb.square(x=x) + x3 = mb.relu(x=x2) + s1_1 = mb.square(x=s1) + s3 = mb.add(x=s1_1, y=s2) + return x3, s3 + + assert get_op_types_in_program(prog) == ["split", "square", "relu", "square", "add"] + + block = prog.functions["main"] + # Reorder `split` op to test op with multiple output case + topological_reorder._move_operations_to_the_end_block(block, ["split"]) + assert get_op_types_in_program(prog) == ["square", "relu", "split", "square", "add"] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (5, 20), + }, + ) + + def test_move_transpose_before_subblock(self): + """ + Input graph: + x (input) ---> cast ---> transpose ---> cast (output) + | + | -----------> square ------> transpose (x1_t) ---> cast (output) + | + | -----------> squeeze ----> equal ----> squeeze + | + (true) <--- / \ ---> (false) + | | + | /<-(x1_t)->\ | + add <-/ \--> add + |---------> | <---------| + | + add ---> cast (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x1_t = mb.transpose(x=x1, perm=[1, 0]) + + def true_fn(): + return mb.add(x=x1_t, y=np.float16(1), name="x2") + + def false_fn(): + return mb.add(x=x1_t, y=np.float16(2), name="x2") + + is_one = mb.equal(x=mb.squeeze(x=x), y=np.float16(1.0)) + pred = mb.squeeze(x=is_one) + x3 = mb.cond(pred=pred, _true_fn=true_fn, _false_fn=false_fn) + x4 = mb.add(x=x1_t, y=x3) + x5 = mb.cast(x=x4, dtype="fp32") + return x5 + + apply_pass_and_basic_check(prog, "common::topological_reorder") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "squeeze", + "equal", + "squeeze", + "transpose", + "cond", + "add", + "cast", + ] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (20, 10)}, + ) + + def test_cast_transpose_already_at_the_end(self): + """ + Input graph: + x (input) ---> square ---> transpose ---> cast (output) + | + | -----------> log ------> transpose ---> cast (output) + | + | -----------> relu -----> cast ----> relu (output) + | + | -----------> relu (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x3 = mb.log(x=x) + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + x8 = mb.relu(x=x) + x1_t = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.cast(x=x1_t, dtype="fp32") + x3_t = mb.transpose(x=x3, perm=[1, 0]) + x4 = mb.cast(x=x3_t, dtype="fp32") + return x2, x4, x7, x8 + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "log", + "relu", + "cast", + "relu", + "relu", + "transpose", + "cast", + "transpose", + "cast", + ] + + apply_pass_and_basic_check(prog, "common::topological_reorder") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "log", + "relu", + "cast", + "relu", + "relu", + "transpose", + "cast", + "transpose", + "cast", + ] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (20, 10), + block.outputs[1].name: (20, 10), + block.outputs[2].name: (10, 20), + block.outputs[3].name: (10, 20), + }, + ) + + +class TestChildOrdering: + def test_generic_child_ordering(self): + """ + Checks that the new generic pattern matching infrastructure works + regardless of the ordering of an operation's children + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + power = mb.pow(x=x, y=3.0, name="thepowerop") + add_0 = mb.add(x=power, y=5.0, name="add_0") + sub_0 = mb.sub(x=power, y=5.0, name="sub_0") + mul_0 = mb.mul(x=power, y=5.0, name="mul_0") + add_1 = mb.add(x=add_0, y=mul_0, name="add_1") + add_2 = mb.add(x=sub_0, y=add_1, name="add_2") + return add_2 + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def ops_arrangement(x): + power = mb.pow(x=x, y=3.0, name="thepowerop") + sub_0 = mb.sub(x=power, y=5.0, name="sub_0") + add_0 = mb.add(x=power, y=5.0, name="add_0") + mul_0 = mb.mul(x=power, y=5.0, name="mul_0") + add_1 = mb.add(x=add_0, y=mul_0, name="add_1") + add_2 = mb.add(x=sub_0, y=add_1, name="add_2") + return add_2 + + def var_constraints(pattern): + constraints_passed = True + constraints_passed &= _check_var_scalar_value(pattern.thepowerop.y, 3) + constraints_passed &= _check_var_scalar_value(pattern.sub_0.y, 5) + constraints_passed &= _check_var_scalar_value( + pattern.add_0.x, 5 + ) or _check_var_scalar_value(pattern.add_0.y, 5) + constraints_passed &= _check_var_scalar_value( + pattern.mul_0.x, 5 + ) or _check_var_scalar_value(pattern.mul_0.y, 5) + return constraints_passed + + def transform_pattern(pattern): + out_name = "new operation" + x = mb.gelu( + x=pattern.root_var, + mode="TANH_APPROXIMATION", + name=out_name, + before_op=pattern.thepowerop, + ) + + pattern.add_2.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.add_2, old_var=pattern.add_2.outputs[0], new_var=x + ) + + pattern.block.remove_ops(pattern.op_list()) + + register_generic_pass( + ops_arrangement=ops_arrangement, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="test_generic_child_ordering", + namespace="common", + ) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::test_generic_child_ordering" + ) + assert get_op_types_in_program(prev_prog) == [ + "pow", + "add", + "sub", + "mul", + "add", + "add", + ] + assert get_op_types_in_program(prog) == ["gelu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + +class TestGeluFusion: + def test_gelu_tanh_approximation1(self): + """ + Detect gelu tanh approx pattern, found in the TF bert model. + y = ( tanh((.0447)x^3 + x ) * (sqrt(2/pi)) + 1 ) * 0.5 * x + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.pow(x=x, y=3.0) + x1 = mb.mul(x=0.044715, y=x1) + x1 = mb.add(x=x1, y=x) + x1 = mb.mul(x=x1, y=np.sqrt(2 / np.pi)) + x1 = mb.tanh(x=x1) + x1 = mb.add(x=1.0, y=x1) + x1 = mb.mul(x=0.5, y=x1) + x1 = mb.mul(x=x, y=x1) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_gelu_tanh_approximation" + ) + assert get_op_types_in_program(prev_prog) == [ + "pow", + "mul", + "add", + "mul", + "tanh", + "add", + "mul", + "mul", + ] + assert get_op_types_in_program(prog) == ["gelu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + @pytest.mark.parametrize( + "first_op_1, first_op_2, first_op_3, first_op_4, first_op_5, first_op_6", + itertools.product( + [True, False], [True, False], [True, False], [True, False], [True, False], [True, False] + ), + ) + def test_gelu_tanh_approximation2( + self, first_op_1, first_op_2, first_op_3, first_op_4, first_op_5, first_op_6 + ): + """ + Detect gelu tanh approx pattern, found in the TF Sanitized GPT2 model. + y = ( tanh((.0447)x^3 + x ) * (sqrt(2/pi)) + 1 ) * 0.5 * x + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + firstmul = mb.mul(x=x, y=0.5) if first_op_1 else mb.mul(x=0.5, y=x) + x1 = mb.pow(x=x, y=3.0) + x1 = mb.mul(x=0.044715, y=x1) if first_op_2 else mb.mul(x=x1, y=0.044715) + x1 = mb.add(x=x1, y=x) if first_op_3 else mb.add(x=x, y=x1) + x1 = ( + mb.mul(x=x1, y=np.sqrt(2 / np.pi)) + if first_op_4 + else mb.mul(x=np.sqrt(2 / np.pi), y=x1) + ) + x1 = mb.tanh(x=x1) + x1 = mb.add(x=1.0, y=x1) if first_op_5 else mb.add(x=x1, y=1.0) + x1 = mb.mul(x=firstmul, y=x1) if first_op_6 else mb.mul(x=x1, y=firstmul) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_gelu_tanh_approximation" + ) + assert get_op_types_in_program(prev_prog) == [ + "mul", + "pow", + "mul", + "add", + "mul", + "tanh", + "add", + "mul", + ] + + assert get_op_types_in_program(prog) == ["gelu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + @pytest.mark.parametrize( + "op_type, is_first_op1, is_first_op2, is_first_op3, is_first_op4, const_mul_first", + itertools.product( + ["real_div", "mul"], + [True, False], + [True, False], + [True, False], + [True, False], + [True, False], + ), + ) + def test_gelu_exact( + self, op_type, is_first_op1, is_first_op2, is_first_op3, is_first_op4, const_mul_first + ): + """ + Detect gelu exact pattern. + y = 0.5 * (x * ( 1 + erf ( x / srqt(2)))) + or + y = x * (0.5 * ( 1 + erf ( x / srqt(2)))) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + if op_type == "real_div": + x1 = mb.real_div(x=x, y=2**0.5) + elif op_type == "mul": + x1 = mb.mul(x=x, y=2**-0.5) if is_first_op1 else mb.mul(x=2**-0.5, y=x) + + x2 = mb.erf(x=x1) + x3 = mb.add(x=x2, y=1.0) if is_first_op2 else mb.add(x=1.0, y=x2) + + if const_mul_first: + y1 = mb.const(val=0.5) + y2 = x + else: + y1 = x + y2 = mb.const(val=0.5) + + x4 = mb.mul(x=x3, y=y1) if is_first_op3 else mb.mul(x=y1, y=x3) + x5 = mb.mul(x=x4, y=y2) if is_first_op4 else mb.mul(x=y2, y=x4) + + return x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_gelu_exact") + + assert get_op_types_in_program(prev_prog) == [ + op_type, + "erf", + "add", + "mul", + "mul", + ] + assert get_op_types_in_program(prog) == ["gelu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + @pytest.mark.parametrize( + "is_first_op0, is_first_op4", + itertools.product( + [True, False], + [True, False], + ), + ) + def test_gelu_exact_pattern_2(self, is_first_op0, is_first_op4): + """ + Detect gelu exact pattern. + y = (0.5 * x) * ( 1 + erf ( x / srqt(2))) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x0 = mb.mul(x=x, y=0.5) if is_first_op0 else mb.mul(x=0.5, y=x) + x1 = mb.mul(x=x, y=2**-0.5) + x2 = mb.erf(x=x1) + x3 = mb.add(x=x2, y=1.0) + x4 = mb.mul(x=x0, y=x3) if is_first_op4 else mb.mul(x=x3, y=x0) + return x4 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_gelu_exact") + + assert get_op_types_in_program(prev_prog) == [ + "mul", + "mul", + "erf", + "add", + "mul", + ] + assert get_op_types_in_program(prog) == ["gelu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + +class TestLeakyReluFusion: + @pytest.mark.parametrize( + "swap_mul_input_order, swap_max_input_order", + itertools.product( + [True, False], + [True, False], + ), + ) + def test_valid_leaky_relu_pattern(self, swap_mul_input_order, swap_max_input_order): + """ + Input graph: + + const (val = 0.3) + | + input ----> mul ---------------> maximum -----------> output + | | + |---------------------------------- + + Output graph: + + input --------> leaky_relu ---------> output + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + if swap_mul_input_order: + x1 = mb.mul(x=x, y=0.3) + else: + x1 = mb.mul(x=0.3, y=x) + if swap_max_input_order: + x1 = mb.maximum(x=x1, y=x) + else: + x1 = mb.maximum(x=x, y=x1) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_leaky_relu") + assert get_op_types_in_program(prev_prog) == ["mul", "maximum"] + assert get_op_types_in_program(prog) == ["leaky_relu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + def test_invalid_leaky_relu_pattern1(self): + """ + Invalid because alpha value greater than 1 + + Input graph: + + const (val = 1.3) + | + input ----> mul ---------------> maximum -----------> output + | | + |---------------------------------- + + Output graph: same as input graph + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.mul(x=x, y=1.3) + x1 = mb.maximum(x=x1, y=x) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_leaky_relu") + assert get_op_types_in_program(prev_prog) == ["mul", "maximum"] + assert get_op_types_in_program(prog) == ["mul", "maximum"] + + def test_invalid_leaky_relu_pattern2(self): + """ + Invalid because input to the "maximum" op is not same as the input of the "mul" op + + Input graph: + + const (val = 0.3) + | + input ----> mul ---------------> maximum -----------> output + | + const + + + Output graph: same as input graph + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.mul(x=x, y=0.3) + x1 = mb.maximum(x=x1, y=0.4) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_leaky_relu") + assert get_op_types_in_program(prev_prog) == ["mul", "maximum"] + assert get_op_types_in_program(prog) == ["mul", "maximum"] + + +class TestPreluFusion: + @pytest.mark.parametrize( + "swap_input_order, alpha_rank", + itertools.product( + [True, False], + [3, 4], + ), + ) + def test_channel_first_pattern(self, swap_input_order, alpha_rank): + """ + Input: + | ------------> relu --------------------| + | V + x (BCHW) ------| add -----> y (BCHW) + | ^ + --------> mul -------> relu -----> mul---| + ^ ^ + | | + Const(val=-1) Const(name=a, shape=(1,C,1,1)) + + Output: + x (BCHW) ------> prelu(alpha=a, shape=(C,)) ---------> y (BCHW) + """ + B, C, H, W = 2, 3, 5, 6 + + if alpha_rank == 3: + alpha = np.random.rand(C, 1, 1) + elif alpha_rank == 4: + alpha = np.random.rand(1, C, 1, 1) + else: + raise NotImplementedError("alpha rank must be 3 or 4") + + @mb.program(input_specs=[mb.TensorSpec(shape=(B, C, H, W))]) + def prog(x): + if swap_input_order: + neg = mb.mul(x=x, y=-1.0) + else: + neg = mb.mul(x=-1.0, y=x) + relu1 = mb.relu(x=neg) + if swap_input_order: + mul = mb.mul(x=relu1, y=alpha) + else: + mul = mb.mul(x=alpha, y=relu1) + relu2 = mb.relu(x=x) + if swap_input_order: + out = mb.add(x=relu2, y=mul) + else: + out = mb.add(x=mul, y=relu2) + return out + + prev_prog, _, _ = apply_pass_and_basic_check( + prog, + "common::fuse_prelu", + ) + assert get_op_types_in_program(prev_prog) == ["mul", "relu", "mul", "relu", "add"] + assert get_op_types_in_program(prog) == ["prelu"] + + @pytest.mark.parametrize( + "swap_input_order, alpha_rank", + itertools.product( + [True, False], + [1, 2, 3], + ), + ) + def test_channel_last_transpose_pattern(self, swap_input_order, alpha_rank): + """ + Input: + + | ------------> relu --------------------| + | V + x (shappe=BCHW)-->transpose(out_shape=BHWC)---->| add -----> y (BHWC) + | ^ + --------> mul -------> relu -----> mul---| + ^ ^ + | | + Const(val=-1) Const(shape=(1,1,C)) + + Output: + x (BCHW) ------> prelu ---------> transpose ------> y (BHWC) + """ + B, C, H, W = 2, 3, 5, 6 + if alpha_rank == 1: + alpha = np.random.rand(C) + elif alpha_rank == 2: + alpha = np.random.rand(1, C) + elif alpha_rank == 3: + alpha = np.random.rand(1, 1, C) + else: + raise NotImplementedError("alpha rank must be 1 or 2 or 3") + + @mb.program(input_specs=[mb.TensorSpec(shape=(B, C, H, W))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + if swap_input_order: + neg = mb.mul(x=x, y=-1.0) + else: + neg = mb.mul(x=-1.0, y=x) + relu1 = mb.relu(x=neg) + if swap_input_order: + mul = mb.mul(x=relu1, y=alpha) + else: + mul = mb.mul(x=alpha, y=relu1) + relu2 = mb.relu(x=x) + if swap_input_order: + out = mb.add(x=relu2, y=mul) + else: + out = mb.add(x=mul, y=relu2) + return out + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::fuse_prelu", + ) + assert get_op_types_in_program(prev_prog) == [ + "transpose", + "mul", + "relu", + "mul", + "relu", + "add", + ] + assert get_op_types_in_program(prog) == ["prelu", "transpose"] + assert_model_is_valid( + prog, + {"x": (B, C, H, W)}, + expected_output_shapes={block.outputs[0].name: (B, H, W, C)}, + ) + + +class TestPreluToLrelu: + def test_prelu_to_lrelu(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 2, 3, 1))]) + def prog(x): + # Not a common leakage factor. + alpha_0 = np.array([1.0, 2.0], dtype=np.float32) + x = mb.prelu(x=x, alpha=alpha_0) + + add_val = np.random.rand(4, 2, 3, 1).astype(np.float32) + x = mb.add(x=x, y=add_val) + + # Common leakage factor. + alpha_1 = np.array([1.5, 1.5], dtype=np.float32) + x = mb.prelu(x=x, alpha=alpha_1) + + return x + + assert_op_count_match(prog, expect=2, op="prelu") + assert_op_count_match(prog, expect=0, op="leaky_relu") + prev_prog, _, _ = apply_pass_and_basic_check(prog, "common::prelu_to_lrelu") + assert_same_output_names(prev_prog, prog) + # The prelu with a common leakage factor becomes leaky_relu. + assert_op_count_match(prog, expect=1, op="prelu") + assert_op_count_match(prog, expect=1, op="leaky_relu") + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"x": (4, 2, 3, 1)}) + + +class TestSkipConstexprOps: + @staticmethod + def _get_constexpr_cast(shape): + val = np.random.rand(*shape).astype(np.float16) + return mb.constexpr_cast(source_val=val, output_dtype="fp32") + + @staticmethod + def _get_constexpr_sparse_to_dense(shape): + val = np.random.rand(*shape) + sparse_params = quantization.WeightSparsifier.compress( + val=val, mode="PERCENTILE_MODE", target_percentile=0.4 + ) + return mb.constexpr_sparse_to_dense( + nonzero_data=sparse_params.nonzero_data, + mask=sparse_params.mask, + shape=np.uint32(sparse_params.shape), + ) + + @staticmethod + def _get_constexpr_lut_to_dense(shape): + val = np.random.rand(*shape) + lut_params = quantization.WeightPalettizer.compress(val=val, nbits=4, mode="UNIFORM") + return mb.constexpr_lut_to_dense( + indices=lut_params.indices, + lut=lut_params.lut, + shape=np.uint32(lut_params.shape), + ) + + @staticmethod + def _get_constexpr_affine_dequantize(shape): + val = np.random.rand(*shape) + quant_params = quantization.WeightAffineQuantizer.compress( + val=val, axis=0, mode="LINEAR_SYMMETRIC", dtype=types.uint8 + ) + return mb.constexpr_affine_dequantize( + quantized_data=quant_params.quantized_data, + zero_point=quant_params.zero_point, + scale=quant_params.scale, + axis=quant_params.axis, + ) + + # Static method cannot be stored as a function without attribute access. + CONSTEXPR_FUNCS = { + "constexpr_cast": _get_constexpr_cast.__func__, + "constexpr_sparse_to_dense": _get_constexpr_sparse_to_dense.__func__, + "constexpr_lut_to_dense": _get_constexpr_lut_to_dense.__func__, + "constexpr_affine_dequantize": _get_constexpr_affine_dequantize.__func__, + } + + CONSTEXPR_OPS = [ + "constexpr_cast", + "constexpr_sparse_to_dense", + "constexpr_lut_to_dense", + "constexpr_affine_dequantize", + ] + + @staticmethod + @pytest.mark.parametrize( + "constexpr_op", + CONSTEXPR_OPS, + ) + def test_skip_const_elimination(constexpr_op): + """ + constexpr_op + | + v + const -> linear + | + v + input --------------> add -> output + + We are testing that: + 1. constexpr_op can serve as a const input weight for linear op + 2. linear op shoudn't be removed by the const_elimination pass + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(4,))]) + def prog(x): + a = np.random.rand( + 2, + ) + constexpr = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((4, 2)) + linear = mb.linear(x=a, weight=constexpr) + return mb.add(x=x, y=linear) + + PASS_REGISTRY["common::const_elimination"](prog) + assert get_op_types_in_program(prog) == [constexpr_op, "linear", "add"] + + @staticmethod + @pytest.mark.parametrize( + "constexpr_op, weight_constexpr, bias_constexpr", + itertools.product( + CONSTEXPR_OPS, + [True, False], + [True, False], + ), + ) + def test_skip_fuse_matmul_weight_bias(constexpr_op, weight_constexpr, bias_constexpr): + """ + const_1 const_2 + | | + v v + input -----> matmul -----> add ---> out + + In this case, if either const_1 or const_2 is constexpr op, they should be not fused into a single linear op + """ + + def get_matmul(x, weight_constexpr): + weight = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((3, 2)) + if not weight_constexpr: + weight = weight.val + return mb.matmul(x=x, y=weight) + + def get_add(x, bias_constexpr): + bias = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((2,)) + if not bias_constexpr: + bias = bias.val + return mb.add(x=x, y=bias) + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3))]) + def prog(x): + x = get_matmul(x, weight_constexpr) + x = get_add(x, bias_constexpr) + return x + + apply_pass_and_basic_check(prog, "common::fuse_matmul_weight_bias") + apply_pass_and_basic_check(prog, "common::const_elimination") + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + if not weight_constexpr and not bias_constexpr: + expected_ops = ["linear"] + else: + expected_ops = [] + if weight_constexpr: + expected_ops.append(constexpr_op) + expected_ops.append("matmul") + if bias_constexpr: + expected_ops.append(constexpr_op) + expected_ops.append("add") + + assert get_op_types_in_program(prog) == expected_ops + + @staticmethod + @pytest.mark.parametrize( + "constexpr_op, op, weight_constexpr, const_constexpr", + itertools.product( + CONSTEXPR_OPS, + ["mul", "add"], + [True, False], + [True, False], + ), + ) + def test_skip_fuse_conv(constexpr_op, op, weight_constexpr, const_constexpr): + + """ + const_1 const_2 + | | + v v + input -----> conv -----> mul/add ---> out + + This pattern shouldn't be fused into a single conv layer if one of const_1 or const_2 is a constexpr op. + """ + Cin, Cout = 3, 3 + input_shape = (2, Cin, 5, 5) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + conv_weight = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((Cout, Cin, 2, 2)) + if not weight_constexpr: + conv_weight = conv_weight.val + x = mb.conv(x=x, weight=conv_weight) + const = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((Cout, 1, 1)) + if not const_constexpr: + const = const.val + return getattr(mb, op)(x=x, y=const) + + apply_pass_and_basic_check(prog, "common::fuse_conv_scale") + apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + apply_pass_and_basic_check(prog, "common::const_elimination") + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + expected_ops = [] + if not weight_constexpr and not const_constexpr: + expected_ops = ["conv"] + else: + if weight_constexpr: + expected_ops.append(constexpr_op) + expected_ops.append("conv") + if const_constexpr: + expected_ops.append(constexpr_op) + if op != "add" or const_constexpr: + expected_ops.append(op) + + assert get_op_types_in_program(prog) == expected_ops + + @staticmethod + @pytest.mark.parametrize( + "constexpr_op, weight_constexpr, bias_constexpr", + itertools.product( + CONSTEXPR_OPS, + [True, False], + [True, False], + ), + ) + def test_skip_fuse_linear_bias(constexpr_op, weight_constexpr, bias_constexpr): + """ + const_1 const_2 + | | + v V + input -----> linear -----> add ---> out + + This pattern shouldn't be fused into a single linear layer if one of const_1 or const_2 is a constexpr op. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2,))]) + def prog(x): + weight = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((4, 2)) + if not weight_constexpr: + weight = weight.val + linear = mb.linear(x=x, weight=weight) + bias = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((4,)) + if not bias_constexpr: + bias = bias.val + return mb.add(x=linear, y=bias) + + apply_pass_and_basic_check(prog, "common::fuse_linear_bias") + apply_pass_and_basic_check(prog, "common::const_elimination") + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + expected_ops = [] + if not weight_constexpr and not bias_constexpr: + expected_ops = ["linear"] + else: + if weight_constexpr: + expected_ops.append(constexpr_op) + expected_ops.append("linear") + if bias_constexpr: + expected_ops.append(constexpr_op) + expected_ops.append("add") + + assert get_op_types_in_program(prog) == expected_ops + + @staticmethod + @pytest.mark.parametrize( + "constexpr_op, weight_constexpr, bias_constexpr", + itertools.product( + CONSTEXPR_OPS, + [True, False], + [True, False], + ), + ) + def test_skip_fuse_conv_batchnorm(constexpr_op, weight_constexpr, bias_constexpr): + """ + weight bias + | | + |_____ ____| + | | + v v + input -----> conv -----> batch_norm ---> out + + This pattern shouldn't be fused into a single conv layer if one of the weight / bias is a constexpr op. + """ + Cin, Cout = 2, 3 + input_shape = (2, Cin, 5, 5) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv layer + weight = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((Cout, Cin, 2, 2)) + if not weight_constexpr: + weight = weight.val + bias = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((Cout,)) + if not bias_constexpr: + bias = bias.val + + x = mb.conv( + x=x, + weight=weight, + bias=bias, + ) + + # batch_norm layer + gamma = np.random.rand(Cout) + beta = np.random.rand(Cout) + mean = np.random.rand(Cout) + variance = np.random.rand(Cout) + epsilon = 1e-2 + return mb.batch_norm( + x=x, + mean=mean, + variance=variance, + gamma=gamma, + beta=beta, + epsilon=epsilon, + ) + + apply_pass_and_basic_check(prog, "common::fuse_conv_batchnorm") + apply_pass_and_basic_check(prog, "common::const_elimination") + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + expected_ops = [] + if not weight_constexpr and not bias_constexpr: + expected_ops = ["conv"] + else: + expected_ops = [constexpr_op] * sum([weight_constexpr, bias_constexpr]) + [ + "conv", + "batch_norm", + ] + + assert get_op_types_in_program(prog) == expected_ops + + +class TestMergeConsecutivePaddings: + def test_success_reflect(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode="reflect") + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="reflect") + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)}, + ) + + @pytest.mark.parametrize("swap_axes", [False, True]) + def test_success_different_rank1(self, swap_axes): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + if swap_axes: + pad1 = mb.pad(x=x1, pad=[1, 1], mode="reflect") + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="reflect") + else: + pad1 = mb.pad(x=x1, pad=[1, 1, 0, 0], mode="reflect") + pad2 = mb.pad(x=pad1, pad=[1, 1], mode="reflect") + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)}, + ) + + def test_success_constant(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode="constant", constant_val=3.0) + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="constant", constant_val=3.0) + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad"] + + pad_ops = [op for op in prog["main"].operations if op.op_type == "pad"] + assert pad_ops[0].inputs["constant_val"].val == 3.0 + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)}, + ) + + def test_success_3_layers(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode="constant", constant_val=3.0) + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="constant", constant_val=3.0) + pad3 = mb.pad(x=pad2, pad=[1, 1, 0, 0], mode="constant", constant_val=3.0) + + return pad3 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad", "pad"] + assert get_op_types_in_program(prog) == ["pad"] + + pad_ops = [op for op in prog["main"].operations if op.op_type == "pad"] + assert pad_ops[0].inputs["constant_val"].val == 3.0 + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 10, 10)}, + ) + + def test_failure_different_mode(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode="reflect") + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="constant") + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad", "pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)}, + ) + + def test_failure_different_constants(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode="constant", constant_val=1.0) + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="constant", constant_val=2.0) + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad", "pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)}, + ) + + def test_failure_repeat_on_same_axis(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[1, 1], mode="reflect") + pad2 = mb.pad(x=pad1, pad=[1, 1], mode="reflect") + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad", "pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 12)}, + ) + +class TestMergeConsecutiveTransposes: + def test_success_reduce_consecutive_transposes(self): + """ + Input: + |--> transpose_1 -> transpose_2 -> output_1 + x - + |--> transpose_3 -> tranpose_4 -> transpose_5 -> output_2 + + Output: + |--> transpose_6 -> output_1 + x - + |--> transpose_7 -> output_2 + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x1 = mb.transpose(x=x1, perm=[3, 2, 0, 1]) + x2 = mb.transpose(x=x, perm=[3, 2, 1, 0]) + x2 = mb.transpose(x=x2, perm=[2, 3, 0, 1]) + x2 = mb.transpose(x=x2, perm=[0, 2, 1, 3]) + + return x1, x2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_transposes") + assert get_op_types_in_program(prev_prog) == ["transpose"] * 5 + assert get_op_types_in_program(prog) == ["transpose"] * 2 + + inputs = {"x": (1, 2, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={ + block.outputs[0].name: (4, 2, 1, 3), + block.outputs[1].name: (2, 4, 1, 3), + }, + ) + + def test_success_reduce_consecutive_transposes_with_output_constrain(self): + """ + Input: + x --> transpose_1 -> transpose_2 -> transpose_3 -> transpose_4 -> transpose_5 -> add -> output_3 + | | + v v + output_1 output_2 + + Output: + x --> transpose_1 -> transpose_6 -> transpose_7-> add -> output_1 + | | + v v + output_2 output_3 + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[3, 2, 1, 0], name="output_1") + x2 = mb.transpose(x=x1, perm=[1, 3, 2, 0]) + x2 = mb.transpose(x=x2, perm=[2, 3, 0, 1], name="output_2") + x3 = mb.transpose(x=x2, perm=[0, 2, 1, 3]) + x3 = mb.transpose(x=x3, perm=[3, 2, 1, 0]) + x3 = mb.add(x=x3, y=1., name="output_3") + return x1, x2, x3 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_transposes") + assert get_op_types_in_program(prev_prog) == ["transpose"] * 5 + ["add"] + assert get_op_types_in_program(prog) == ["transpose"] * 3 + ["add"] + + inputs = {"x": (1, 2, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={ + block.outputs[0].name: (4, 3, 2, 1), + block.outputs[1].name: (2, 4, 3, 1), + block.outputs[2].name: (1, 4, 3, 2), + }, + ) + + assert block.outputs[0].name == "output_1" + assert block.outputs[1].name == "output_2" + assert block.outputs[2].name == "output_3" + + def test_not_merge_transposes(self): + """ + Input: + x --> transpose_1 -> add -> transpose_2 -> output + + Output: + x --> transpose_1 -> add -> transpose_2 -> output + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x = mb.transpose(x=x, perm=[3, 2, 1, 0]) + x = mb.add(x=x, y=1.) + x = mb.transpose(x=x, perm=[1, 3, 2, 0]) + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_transposes") + assert get_op_types_in_program(prev_prog) == ["transpose", "add", "transpose"] + assert get_op_types_in_program(prog) == ["transpose", "add", "transpose"] + + inputs = {"x": (1, 2, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (3, 1, 2, 4),}, + ) + +class TestExpandHighRankReshapeAndTranspose: + @staticmethod + def _test_numerical(prog, input_shape, reshape_shape, perm, output_shape): + x = np.random.rand(*input_shape) + coreml_input = {"x": x} + mlmodel = ct.convert(prog, source="milinternal") + coreml_output = list(mlmodel.predict(coreml_input).values())[0] + + gt = np.reshape(x, reshape_shape) + gt = np.transpose(gt, perm) + gt = np.reshape(gt, output_shape) + np.testing.assert_allclose(gt, coreml_output, rtol=1e-03, atol=1e-05) + + def test_rank6(self): + input_shape = (1, 2, 3, 4, 5) + reshape_shape = (1, 2, 3, 2, 2, 5) + perm = (4, 5, 3, 2, 0, 1) + output_shape = (5, 24) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + x = mb.reshape(x=x, shape=reshape_shape) + x = mb.transpose(x=x, perm=perm) + x = mb.reshape(x=x, shape=output_shape) + return x + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::expand_high_rank_reshape_and_transpose") + + prog._check_invalid_tensor_rank() + assert get_op_types_in_program(prog) == ["reshape", "transpose", "reshape"] + TestExpandHighRankReshapeAndTranspose._test_numerical(prev_prog, input_shape, reshape_shape, perm, output_shape) + + def test_rank10(self): + input_shape = (2, 3, 4, 5, 6) + reshape_shape = (1, 2, 1, 3, 2, 2, 1, 5, 2, 3) + perm = (0, 1, 2, 3, 4, 9, 5, 6, 7, 8) + output_shape = (30, 24) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + x = mb.reshape(x=x, shape=reshape_shape) + x = mb.transpose(x=x, perm=perm) + x = mb.reshape(x=x, shape=output_shape) + return x + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::expand_high_rank_reshape_and_transpose") + + prog._check_invalid_tensor_rank() + assert get_op_types_in_program(prog) == ["reshape", "transpose", "reshape"] + TestExpandHighRankReshapeAndTranspose._test_numerical(prev_prog, input_shape, reshape_shape, perm, output_shape) + + def test_rank20(self): + input_shape = (4, 6, 8, 20, 40) + reshape_shape = (1, 2, 2, 1, 2, 3, 2, 2, 2, 2, 2, 1, 1, 1, 5, 2, 2, 2, 1, 5) + perm = (19, 14, 13, 12, 0, 3, 1, 2, 10, 5, 4, 6, 15, 11, 17, 18, 7, 8, 9, 16) + output_shape = (24, 160, 40) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + x = mb.reshape(x=x, shape=reshape_shape) + x = mb.transpose(x=x, perm=perm) + x = mb.reshape(x=x, shape=output_shape) + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::expand_high_rank_reshape_and_transpose") + + prog._check_invalid_tensor_rank() + assert get_op_types_in_program(prog) == ["reshape", "transpose"] * 16 + ["reshape"] + TestExpandHighRankReshapeAndTranspose._test_numerical(prev_prog, input_shape, reshape_shape, perm, output_shape) + + def test_negative_case(self): + input_shape = (4, 6, 8, 20, 40) + reshape_shape = (1, 2, 2, 1, 2, 3, 2, 2, 2, 2, 2, 1, 1, 1, 5, 2, 2, 2, 1, 5) + perm = (19, 14, 13, 12, 0, 3, 1, 2, 10, 5, 4, 6, 15, 11, 17, 18, 7, 8, 9, 16) + output_shape = (24, 160, 40) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + x1 = mb.reshape(x=x, shape=reshape_shape) + x2 = mb.transpose(x=x1, perm=perm) + x3 = mb.reshape(x=x2, shape=output_shape) + return x, x1 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::expand_high_rank_reshape_and_transpose") + + with pytest.raises(ValueError, match="Core ML only supports tensors with rank <= 5"): + prog._check_invalid_tensor_rank() + + +class TestMergeConsecutiveRelus: + @pytest.mark.parametrize( + "relu_num", + [2, 3, 4], + ) + def test_success_reduce_consecutive_relus(self, relu_num): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog(x): + for _ in range(relu_num): + x = mb.relu(x=x) + x = mb.add(x=x, y=1.0) + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_relus") + assert get_op_types_in_program(prev_prog) == ["relu"] * relu_num + ["add"] + assert get_op_types_in_program(prog) == ["relu", "add"] + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 3)}, + ) + + @pytest.mark.parametrize( + "relu_num", + [2, 3, 4], + ) + def test_keep_not_consecutive_relus(self, relu_num): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog(x): + for _ in range(relu_num): + x = mb.relu(x=x) + x = mb.add(x=x, y=1.0) + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_relus") + assert get_op_types_in_program(prev_prog) == ["relu", "add"] * relu_num + assert get_op_types_in_program(prog) == get_op_types_in_program(prev_prog) + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 3)}, + ) + + def test_mix_situation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog(x): + relu1 = mb.relu(x=x) + relu_after_add = mb.add(x=relu1, y=1.0) + relu2 = mb.relu(x=relu_after_add) + relu3 = mb.relu(x=relu2) + return relu3 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_relus") + assert get_op_types_in_program(prev_prog) == ["relu", "add", "relu", "relu"] + assert get_op_types_in_program(prog) == ["relu", "add", "relu"] + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 3)}, + ) + + def test_name_change_depend_on_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog_output_transpose_2(x): + transpose_1 = mb.relu(x=x, name="transpose_1") + transpose_2 = mb.relu(x=transpose_1, name="transpose_2") + transpose_3 = mb.transpose(x=transpose_2, perm=[0, 2, 1], name="transpose_3") + return transpose_2, transpose_3 + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog_output_transpose_3(x): + transpose_1 = mb.relu(x=x, name="transpose_1") + transpose_2 = mb.relu(x=transpose_1, name="transpose_2") + transpose_3 = mb.transpose(x=transpose_2, perm=[0, 2, 1], name="transpose_3") + return transpose_3 + + prev_prog_output_transpose_2, _, block = apply_pass_and_basic_check( + prog_output_transpose_2, "common::merge_consecutive_relus" + ) + assert get_op_types_in_program(prev_prog_output_transpose_2) == [ + "relu", + "relu", + "transpose", + ] + assert get_op_types_in_program(prog_output_transpose_2) == ["relu", "transpose"] + assert prog_output_transpose_2["main"].operations[0].name == "transpose_1" + # As the block's output has transpose_2, the original output name of the first operation + # is replaced. + assert prog_output_transpose_2["main"].operations[0].outputs[0].name == "transpose_2" + + prev_prog_output_transpose_3, _, block = apply_pass_and_basic_check( + prog_output_transpose_3, "common::merge_consecutive_relus" + ) + assert get_op_types_in_program(prev_prog_output_transpose_3) == [ + "relu", + "relu", + "transpose", + ] + assert get_op_types_in_program(prog_output_transpose_3) == ["relu", "transpose"] + assert prog_output_transpose_3["main"].operations[0].name == "transpose_1" + # As the block's output only has transpose_3, the entire transpose_2 gets removed. + assert prog_output_transpose_3["main"].operations[0].outputs[0].name == "transpose_1" + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog_output_transpose_2, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 3, 2)}, + ) + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog_output_transpose_3, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 3, 2)}, + ) + + +class TestMergeConsecutiveReshapes: + @pytest.mark.parametrize( + "backend", + backends, + ) + def test_merge_consecutive_2reshapes(self, backend): + INPUT_SHAPE = (2, 3) + OUTPUT_SHAPE = (3, 2) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + y1 = mb.reshape(x=x, shape=(-1,)) + y2 = mb.reshape(x=y1, shape=OUTPUT_SHAPE) + return y2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape"] * 2 + assert get_op_types_in_program(prog) == ["reshape"] + + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes={block.outputs[0].name: OUTPUT_SHAPE}, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend", + backends, + ) + def test_merge_consecutive_4reshapes(self, backend): + INPUT_SHAPE = (2, 3, 5) + OUTPUT_SHAPE = (10, 3) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + y1 = mb.reshape(x=x, shape=(15, 2)) + y2 = mb.reshape(x=y1, shape=(2, 5, 3)) + y3 = mb.reshape(x=y2, shape=(6, 5)) + y4 = mb.reshape(x=y3, shape=OUTPUT_SHAPE) + return y4 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape"] * 4 + assert get_op_types_in_program(prog) == ["reshape"] + + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes={block.outputs[0].name: OUTPUT_SHAPE}, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend", + backends, + ) + def test_keep_separate_reshapes(self, backend): + INPUT_SHAPE = (3, 5, 7) + OUTPUT_SHAPE = (7, 3, 5) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + y1 = mb.reshape(x=x, shape=(21, 5)) + + # Note [elementwise op and reshape] + # In principle, elementwise ops can be swapped with the reshapes, e.g. + # in -> reshape1 -> elementwise1 -> reshape2 -> elementwise2 -> reshape3 -> out + # is equivalent to + # in -> elementwise1 -> elementwise2 -> reshape1 -> reshape2 -> reshape3 -> out + # which can then be optimized to + # in -> elementwise1 -> elementwise2 -> reshape3 -> out + # + # so here we divide the reshape sequence with something non-elementwise + bias = np.random.rand(5) * 2.0 - 1.0 + y2 = mb.add(x=y1, y=bias) + + y3 = mb.reshape(x=y2, shape=OUTPUT_SHAPE) + return y3 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape", "add", "reshape"] + assert get_op_types_in_program(prog) == ["reshape", "add", "reshape"] + + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes={block.outputs[0].name: OUTPUT_SHAPE}, + backend=backend, + ) + + @pytest.mark.parametrize("backend", backends) + def test_merge_2consecutive_keep_1separate(self, backend): + INPUT_SHAPE = (5, 7, 11) + OUTPUT_SHAPE = (11, 5, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=(INPUT_SHAPE))]) + def prog(x): + # these 2 reshapes will be merged + y1 = mb.reshape(x=x, shape=(35, 11)) + y2 = mb.reshape(x=y1, shape=(55, 7)) + + # see Note [elementwise op and reshape] + bias = np.random.rand(7) * 2.0 - 1.0 + y3 = mb.sub(x=y2, y=bias) + + # this reshape is seperated, so it will be kept + y4 = mb.reshape(x=y3, shape=OUTPUT_SHAPE) + return y4 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "sub", "reshape"] + assert get_op_types_in_program(prog) == ["reshape", "sub", "reshape"] + + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes={block.outputs[0].name: OUTPUT_SHAPE}, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend", + backends, + ) + def test_keep_block_outputs(self, backend): + INPUT_SHAPE = (5, 6) + OUTPUT0_SHAPE = (15, 2) + OUTPUT1_SHAPE = (3, 10) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + y1 = mb.reshape(x=x, shape=OUTPUT0_SHAPE) + y2 = mb.reshape(x=y1, shape=OUTPUT1_SHAPE) + return y1, y2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape", "reshape"] + assert get_op_types_in_program(prog) == ["reshape", "reshape"] + + assert len(block.outputs) == 2 + expected_output_shapes = { + block.outputs[0].name: OUTPUT0_SHAPE, + block.outputs[1].name: OUTPUT1_SHAPE, + } + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes=expected_output_shapes, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend", + backends, + ) + def test_keep_nonreshape_child(self, backend): + INPUT_SHAPE = (6, 7) + OUTPUT_SHAPE = (14, 3) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + y1 = mb.reshape(x=x, shape=(21, 2)) + y2 = mb.reshape(x=y1, shape=OUTPUT_SHAPE) + # the 1st reshape creating y1 has a non-reshape child op (matmul), + # so it will not be merged + y3 = mb.matmul(x=y1, y=np.random.rand(2, 5)) + return y2, y3 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "matmul"] + assert get_op_types_in_program(prog) == ["reshape", "reshape", "matmul"] + + assert len(block.outputs) == 2 + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes={block.outputs[0].name: OUTPUT_SHAPE}, + backend=backend, + ) + + +class TestCastOptimization: + """Test the cast optimization pass.""" + + """ + Input graph: + input -----> cast(dtype="fp32") -----> square -----> cast(dtype="fp32") ---> out + + Output graph: + input -----> square -----> out + """ + + def test_remove_redundant_casts(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp32") + x = mb.square(x=x) + x = mb.cast(x=x, dtype="fp32") + return x + + assert get_op_types_in_program(prog) == ["cast", "square", "cast"] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["square"] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input -----> cast(dtype="fp16") -----> cast(dtype="fp32") ----> square ---> out + + Output graph: + input -----> square -----> out + """ + + def test_linear_consecutive_cast_ops_cancellation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x = mb.cast(x=x, dtype="fp32") + x = mb.square(x=x) + return x + + assert get_op_types_in_program(prog) == ["cast", "cast", "square"] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["square"] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input---->cast(dtype="int32")---->cast(dtype="fp16")--->square--->out + + Output graph: + input----->cast(dtype="fp16")----->square--->out + """ + + def test_linear_consecutive_cast_ops_fusion(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="int32") + x = mb.cast(x=x, dtype="fp16") + x = mb.square(x=x) + return x + + assert get_op_types_in_program(prog) == ["cast", "cast", "square"] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "square"] + assert block.find_ops(op_type="cast")[0].dtype.val == "fp16" + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input-->cast(dtype="fp16")-->cast(dtype="fp16")-->cast(dtype="int32")-->cast(dtype="int64")-->cast(dtype="fp32")-->cast(dtype="fp16")-->square->out + + Output graph: + input---->cast(dtype="fp16")----->square--->out + """ + + def test_linear_multiple_consecutive_cast_ops(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x = mb.cast(x=x, dtype="fp16") + x = mb.cast(x=x, dtype="int32") + x = mb.cast(x=x, dtype="int64") + x = mb.cast(x=x, dtype="fp32") + x = mb.cast(x=x, dtype="fp16") + x = mb.square(x=x) + return x + + assert get_op_types_in_program(prog) == [ + "cast", + "cast", + "cast", + "cast", + "cast", + "cast", + "square", + ] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "square"] + assert block.find_ops(op_type="cast")[0].dtype.val == "fp16" + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + |---->cast(dtype="fp32")---->square--->out_1 + | + input---->cast(dtype="fp16")---->cast(dtype="fp32")---->relu--->out_2 + | + |---->cast(dtype="fp32")---->log--->out_3 + + Output graph: + + |---->square--->out_1 + | + input---->relu--->out_2 + | + |---->log--->out_3 + """ + + def test_same_consecutive_cancelling_casts_on_all_branches(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x1 = mb.cast(x=x, dtype="fp32") + x2 = mb.cast(x=x, dtype="fp32") + x3 = mb.cast(x=x, dtype="fp32") + x4 = mb.square(x=x1) + x5 = mb.relu(x=x2) + x6 = mb.log(x=x3) + return x4, x5, x6 + + assert get_op_types_in_program(prog) == [ + "cast", + "cast", + "cast", + "cast", + "square", + "relu", + "log", + ] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["square", "relu", "log"] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + block.outputs[2].name: (10, 20), + }, + ) + + """ + Input graph: + |---->cast(dtype="fp16")---->square--->out_1 + | + input---->cast(dtype="int32")---->cast(dtype="fp16")---->relu--->out_2 + | + |---->cast(dtype="fp16")---->log--->out_3 + + Output graph: + + |---->square--->out_1 + | + input---->cast(dtype="fp16")---->relu--->out_2 + | + |---->log--->out_3 + """ + + def test_consecutive_fusable_casts_on_all_branches(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="int32") + x1 = mb.cast(x=x, dtype="fp16") + x2 = mb.cast(x=x, dtype="fp16") + x3 = mb.cast(x=x, dtype="fp16") + x4 = mb.square(x=x1) + x5 = mb.relu(x=x2) + x6 = mb.log(x=x3) + return x4, x5, x6 + + assert get_op_types_in_program(prog) == [ + "cast", + "cast", + "cast", + "cast", + "square", + "relu", + "log", + ] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "square", "relu", "log"] + assert block.find_ops(op_type="cast")[0].dtype.val == "fp16" + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + block.outputs[2].name: (10, 20), + }, + ) + + """ + Input graph: + + |---->cast(dtype="fp32")---->square--->out_1 + | + |---->cast(dtype="fp16")---->square--->out_2 + | + input---->cast(dtype="int32")---->cast(dtype="fp16")---->relu--->out_3 + | + |---->cast(dtype="fp16")---->log--->out_4 + | + |---->cast(dtype="fp32")---->log--->out_5 + + Output graph: + + |---->square--->out_1 + | + | |---->square--->out_2 + | | + input---->cast(dtype="fp16")---->relu--->out_3 + | | + | |---->log--->out_4 + | + | + |---->log--->out_5 + + """ + + def test_mixed_consecutive_casts_on_different_branches(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="int32") + x1 = mb.cast(x=x, dtype="fp32") + x2 = mb.cast(x=x, dtype="fp16") + x3 = mb.cast(x=x, dtype="fp16") + x4 = mb.cast(x=x, dtype="fp16") + x5 = mb.cast(x=x, dtype="fp32") + x6 = mb.square(x=x1) + x7 = mb.square(x=x2) + x8 = mb.relu(x=x3) + x9 = mb.log(x=x4) + x10 = mb.log(x=x5) + return x6, x7, x8, x9, x10 + + assert get_op_types_in_program(prog) == [ + "cast", + "cast", + "cast", + "cast", + "cast", + "cast", + "square", + "square", + "relu", + "log", + "log", + ] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "square", "square", "relu", "log", "log"] + assert block.find_ops(op_type="cast")[0].dtype.val == "fp16" + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + block.outputs[2].name: (10, 20), + }, + ) + + """ + Input graph: + + |---->cast(dtype="fp32")---->square--->out_1 + | + input---->cast(dtype="int32")---->cast(dtype="fp16")---->relu--->out_2 + | + |---->log--->out_3 + + + Output graph: + + |---->square--->out_1 + | + | + | + input---->cast(dtype="fp16")---->relu--->out_2 + | + | + | + | + |---->cast(dtype="int32")---->abs--->out_3 + + """ + + def test_different_consecutive_casts__config_on_different_branches(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="int32") + x1 = mb.cast(x=x, dtype="fp32") + x2 = mb.cast(x=x, dtype="fp16") + x3 = mb.square(x=x1) + x4 = mb.relu(x=x2) + x5 = mb.abs(x=x) + return x3, x4, x5 + + assert get_op_types_in_program(prog) == ["cast", "cast", "cast", "square", "relu", "abs"] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "cast", "square", "relu", "abs"] + + # Asserting first cast configuration + cast_1 = block.find_ops(op_type="cast")[0] + assert cast_1.dtype.val == "int32" + assert len(cast_1.outputs) == 1 + assert len(cast_1.outputs[0].child_ops) == 1 + assert cast_1.outputs[0].child_ops[0].op_type == "abs" + + # Asserting second cast configuration + cast_2 = block.find_ops(op_type="cast")[1] + assert cast_2.dtype.val == "fp16" + assert len(cast_2.outputs) == 1 + assert len(cast_2.outputs[0].child_ops) == 1 + assert cast_2.outputs[0].child_ops[0].op_type == "relu" + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + block.outputs[2].name: (10, 20), + }, + ) + + """ + Input graph: + input(dtype="fp16")---->relu----->relu + | + --------| + | + V + cast(dtype="fp32")---->cast(dtype="fp16") + | + ----------------------| + | + V + cast(dtype="fp32")---->cast(dtype="fp16")---->output(dtype="fp16") + + Output graph: + input(dtype="fp16")---->relu----->relu---->output(dtype="fp16") + """ + + def test_two_casts_at_the_end(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20), dtype=types.fp16)]) + def prog(x): + x = mb.relu(x=x) + x = mb.relu(x=x) + x = mb.cast(x=x, dtype="fp32") + x = mb.cast(x=x, dtype="fp16") + x = mb.cast(x=x, dtype="fp32") + x = mb.cast(x=x, dtype="fp16", name="original_output_name") + return x + + assert get_op_types_in_program(prog) == ["relu", "relu", "cast", "cast", "cast", "cast"] + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, prev_block, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + assert get_op_types_in_program(prog) == ["relu", "relu"] + assert prev_block.outputs[0].name == "original_output_name" + assert block.outputs[0].name == "original_output_name" + assert block.outputs[0].dtype == types.fp16 + + +class TestConv1dCompositionPasses: + @pytest.mark.parametrize( + "backend, has_strides, pad_type, has_pad, has_dilations, has_bias", + itertools.product( + backends, + (True, False), + ("valid", "custom", "same"), + (True, False), + (True, False), + (True, False), + ), + ) + def test_conv1d_composition( + self, backend, has_strides, pad_type, has_pad, has_dilations, has_bias + ): + """ + Input graph: + input -> expand_dims -> conv2d -> squeeze -> out + + Output graph: + input -> conv1d -> out + """ + N, L = 2, 8 + C_in, C_out = 3, 4 + K = 3 + + conv_kwargs = {"weight": np.random.rand(C_out, C_in, 1, K), "pad_type": pad_type} + if has_strides: + conv_kwargs["strides"] = (2, 2) + if has_pad: + conv_kwargs["pad"] = (1, 1, 1, 1) + if has_dilations: + conv_kwargs["dilations"] = (2, 2) + if has_bias: + conv_kwargs["bias"] = np.random.rand(C_out) + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, C_in, L))]) + def prog(x): + y_expand = mb.expand_dims(x=x, axes=(2,)) + y_conv = mb.conv(x=y_expand, **conv_kwargs) + y_squeeze = mb.squeeze(x=y_conv, axes=(2,)) + return y_squeeze + + assert get_op_types_in_program(prog) == ["expand_dims", "conv", "squeeze"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == ["squeeze", "conv"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["conv"] + + # infer output shape + strides = conv_kwargs["strides"] if has_strides else (1, 1) + pad = conv_kwargs["pad"] if has_pad else (0, 0, 0, 0) + dilations = conv_kwargs["dilations"] if has_dilations else (1, 1) + L_out = None + if pad_type == "valid": + L_out = (L - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "custom": + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "same": + L_out = np.ceil(L / strides[-1]) + else: + raise Exception("unsupported pad type") + output_shape = (N, C_out, L_out) + + assert_model_is_valid( + prog, + {"x": (N, C_in, L)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize("backend", backends) + def test_conv1d_composotion_dynamic_weight(self, backend): + """ + Input graph: + input -> expand_dims -> conv2d -> squeeze -> out + + Output graph: + input -> conv1d -> out + """ + N, L = 2, 9 + C_in, C_out = 4, 3 + K = 4 + + strides = (1, 2) + pad = (0, 0, 1, 1) + # MIL convolution with dynamic weights does not support dilations != 1 + # see coremltools/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py + dilations = (1, 1) + + # infer L_out with pad_type fixed to custom + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + + conv_kwargs = { + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + } + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(N, C_in, L)), + mb.TensorSpec(shape=(C_out, C_in, 1, K)), + ] + ) + def prog(x, weight): + y_expand = mb.expand_dims(x=x, axes=(-2,)) + y_conv = mb.conv(x=y_expand, weight=weight, **conv_kwargs) + y_squeeze = mb.squeeze(x=y_conv, axes=(-2,)) + return y_squeeze + + assert get_op_types_in_program(prog) == ["expand_dims", "conv", "squeeze"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == ["squeeze", "conv"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["squeeze", "conv"] + + output_shape = (N, C_out, L_out) + assert_model_is_valid( + prog, + {"x": (N, C_in, L), "weight": (C_out, C_in, 1, K)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend, has_bias, bias_op_type", + itertools.product( + backends, + (True, False), + ("add", "sub"), + ), + ) + def test_conv1d_bias_fusion(self, backend, has_bias, bias_op_type): + """ + After recomposing the shattered conv1d, conv1d optimization passes should work + + Input graph: + input -> expand_dims -> conv2d -> squeeze -> add/sub a constant -> out + + Output graph: + input -> conv1d -> out + """ + N, L = 2, 8 + C_in, C_out = 3, 5 + K = 3 + + strides = (1, 2) + pad = (0, 0, 0, 1) + dilations = (1, 2) + + # infer L_out with pad_type fixed to custom + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + + conv_kwargs = { + "weight": np.random.rand(C_out, C_in, 1, K), + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + } + if has_bias: + conv_kwargs["bias"] = np.random.rand(C_out) + + bias2 = np.random.rand(C_out, 1) + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, C_in, L))]) + def prog(x): + y_expand = mb.expand_dims(x=x, axes=(-2,)) + y_conv = mb.conv(x=y_expand, **conv_kwargs) + y_squeeze = mb.squeeze(x=y_conv, axes=(-2,)) + y_bias2 = ( + mb.add(x=y_squeeze, y=bias2) + if bias_op_type == "add" + else mb.sub(x=y_squeeze, y=bias2) + ) + return y_bias2 + + assert get_op_types_in_program(prog) == ["expand_dims", "conv", "squeeze", bias_op_type] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == ["squeeze", "conv", bias_op_type] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + assert get_op_types_in_program(prog) == ["squeeze", "conv"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["conv"] + + output_shape = (N, C_out, L_out) + assert_model_is_valid( + prog, + {"x": (N, C_in, L)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +class TestConv1dChannellastCompositionPasses: + @pytest.mark.parametrize( + "backend, has_strides, pad_type, has_pad, has_dilations, has_bias", + itertools.product( + backends, + (True, False), + ("valid", "custom", "same"), + (True, False), + (True, False), + (True, False), + ), + ) + def test_conv1d_channellast_composition( + self, backend, has_strides, pad_type, has_pad, has_dilations, has_bias + ): + """ + Input graph: + input -> expand_dims -> transpose -> conv2d -> transpose -> squeeze -> out + + Output graph: + input -> transpose -> conv1d -> transpose -> out + """ + N, L = 2, 8 + C_in, C_out = 5, 3 + K = 3 + + conv_kwargs = { + "weight": np.random.rand(C_out, C_in, 1, K), + "pad_type": pad_type, + } + if has_strides: + conv_kwargs["strides"] = (2, 2) + if has_pad: + conv_kwargs["pad"] = (1, 1, 1, 1) + if has_dilations: + conv_kwargs["dilations"] = (2, 2) + if has_bias: + conv_kwargs["bias"] = np.random.rand(C_out) + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, L, C_in))]) + def prog(x): + y_expand = mb.expand_dims(x=x, axes=(1,)) + y_transpose1 = mb.transpose(x=y_expand, perm=(0, 3, 1, 2)) + y_conv = mb.conv(x=y_transpose1, **conv_kwargs) + y_transpose2 = mb.transpose(x=y_conv, perm=(0, 2, 3, 1)) + y_squeeze = mb.squeeze(x=y_transpose2, axes=(1,)) + return y_squeeze + + assert get_op_types_in_program(prog) == [ + "expand_dims", + "transpose", + "conv", + "transpose", + "squeeze", + ] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == ["transpose", "squeeze", "conv", "transpose"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["transpose", "conv", "transpose"] + + # infer output shape + strides = conv_kwargs["strides"] if has_strides else (1, 1) + pad = conv_kwargs["pad"] if has_pad else (0, 0, 0, 0) + dilations = conv_kwargs["dilations"] if has_dilations else (1, 1) + L_out = None + if pad_type == "valid": + L_out = (L - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "custom": + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "same": + L_out = np.ceil(L / strides[-1]) + else: + raise Exception("unsupported pad type") + output_shape = (N, L_out, C_out) + + assert_model_is_valid( + prog, + {"x": (N, L, C_in)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize("backend", backends) + def test_conv1d_channellast_composotion_dynamic_weight(self, backend): + """ + Input graph: + input -> expand_dims -> transpose -> conv2d -> transpose -> squeeze -> out + + Output graph: + input -> transpose -> conv1d -> transpose -> out + """ + N, L = 2, 9 + C_in, C_out = 4, 5 + K = 4 + + strides = (1, 2) + pad = (1, 0, 0, 1) + # MIL convolution with dynamic weights does not support dilations != 1 + # see coremltools/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py + dilations = (1, 1) + + # infer L_out with pad_type fixed to custom + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + + conv_kwargs = { + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + } + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(N, L, C_in)), + mb.TensorSpec(shape=(C_out, C_in, 1, K)), + ] + ) + def prog(x, weight): + y_expand = mb.expand_dims(x=x, axes=(1,)) + y_transpose1 = mb.transpose(x=y_expand, perm=(0, 3, 1, 2)) + y_conv = mb.conv(x=y_transpose1, weight=weight, **conv_kwargs) + y_transpose2 = mb.transpose(x=y_conv, perm=(0, 2, 3, 1)) + y_squeeze = mb.squeeze(x=y_transpose2, axes=(1,)) + return y_squeeze + + assert get_op_types_in_program(prog) == [ + "expand_dims", + "transpose", + "conv", + "transpose", + "squeeze", + ] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == ["transpose", "squeeze", "conv", "transpose"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["transpose", "squeeze", "conv", "transpose"] + + output_shape = (N, L_out, C_out) + assert_model_is_valid( + prog, + {"x": (N, L, C_in), "weight": (C_out, C_in, 1, K)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend, has_bias, bias_op_type", + itertools.product( + backends, + (True, False), + ("add", "sub"), + ), + ) + def test_conv1d_channellast_bias_fusion(self, backend, has_bias, bias_op_type): + """ + After recomposing the shattered conv1d, conv1d optimization passes should work + + Input graph: + input -> expand_dims -> transpose -> conv2d -> transpose -> squeeze -> add/sub a constant -> out + + Output graph: + input -> transpose -> conv1d -> transpose -> out + """ + N, L = 2, 8 + C_in, C_out = 5, 4 + K = 4 + + strides = (1, 2) + pad = (0, 1, 1, 0) + dilations = (1, 2) + + # infer L_out with pad_type fixed to custom + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + + conv_kwargs = { + "weight": np.random.rand(C_out, C_in, 1, K), + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + } + if has_bias: + conv_kwargs["bias"] = np.random.rand(C_out) + + bias2 = np.random.rand(C_out) + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, L, C_in))]) + def prog(x): + y_expand = mb.expand_dims(x=x, axes=(-3,)) + y_transpose1 = mb.transpose(x=y_expand, perm=(0, 3, 1, 2)) + y_conv = mb.conv(x=y_transpose1, **conv_kwargs) + y_transpose2 = mb.transpose(x=y_conv, perm=(0, 2, 3, 1)) + y_squeeze = mb.squeeze(x=y_transpose2, axes=(-3,)) + y_bias2 = ( + mb.add(x=y_squeeze, y=bias2) + if bias_op_type == "add" + else mb.sub(x=y_squeeze, y=bias2) + ) + return y_bias2 + + assert get_op_types_in_program(prog) == [ + "expand_dims", + "transpose", + "conv", + "transpose", + "squeeze", + bias_op_type, + ] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == [ + "transpose", + "squeeze", + "conv", + "transpose", + bias_op_type, + ] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + assert get_op_types_in_program(prog) == ["transpose", "squeeze", "conv", "transpose"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["transpose", "conv", "transpose"] + + output_shape = (N, L_out, C_out) + assert_model_is_valid( + prog, + {"x": (N, L, C_in)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +class TestConvBatchNormFusion: + @staticmethod + def _apply_weight_transform(inputs, is_deconv, dtype=np.float32): + """ + Utility funtion to test the weight transform function in conv batch_norm fusion pass. + """ + Cin, _, groups = 10, 20, 10 + input_shape = (1, Cin, 2, 2) + + @mb.program( + input_specs=[mb.TensorSpec(shape=input_shape, dtype=numpy_type_to_builtin_type(dtype))] + ) + def prog(x): + + if is_deconv: + x = mb.conv_transpose( + x=x, + weight=inputs["conv_weight"], + bias=inputs["conv_bias"], + groups=groups, + ) + else: + x = mb.conv( + x=x, + weight=inputs["conv_weight"], + bias=inputs["conv_bias"], + groups=groups, + ) + + x = mb.batch_norm( + x=x, + mean=inputs["mean"], + variance=inputs["variance"], + gamma=inputs["gamma"], + beta=inputs["beta"], + epsilon=inputs["epsilon"], + ) + return x + + apply_pass_and_basic_check(prog, "common::fuse_conv_batchnorm") + + # get the updated weight from the prog + conv_op = [] + for op in prog["main"].operations: + if op.op_type == "const": + continue + conv_op.append(op) + assert len(conv_op) == 1, "should only have one conv / conv_transpose layer." + + return conv_op[0].weight.val, conv_op[0].bias.val + + @pytest.mark.parametrize( + "conv_type", + ["conv", "conv_transpose"], + ) + def test_weight_transform_conv_identity(self, conv_type): + """ + Test the weight transform function with an identity batchnorm layer. + """ + # parameters for conv + is_deconv = conv_type == "conv_transpose" + conv_weight = np.arange(20).astype(np.float32) + conv_weight = ( + np.reshape(conv_weight, (10, 2, 1, 1)) + if is_deconv + else np.reshape(conv_weight, (20, 1, 1, 1)) + ) + conv_bias = np.arange(20).astype(np.float32) + + # parameters for batch_norm + gamma = np.ones(20).astype(np.float32) + beta = np.zeros(20).astype(np.float32) + mean = np.zeros(20).astype(np.float32) + variance = np.ones(20).astype(np.float32) + epsilon = 0.0 + + inputs = { + "conv_weight": conv_weight, + "conv_bias": conv_bias, + "gamma": gamma, + "beta": beta, + "mean": mean, + "variance": variance, + "epsilon": epsilon, + } + + new_conv_weight, new_conv_bias = self._apply_weight_transform(inputs, is_deconv) + + np.testing.assert_equal(new_conv_weight, conv_weight) + np.testing.assert_equal(new_conv_bias, conv_bias) + + @pytest.mark.parametrize( + "conv_type, dtype", + itertools.product( + ["conv", "conv_transpose"], + [np.float16, np.float32], + ), + ) + def test_weight_transform_conv_type(self, conv_type, dtype): + """ + The weight transform function should return an updated conv weight with correct data type + """ + # parameters for conv + is_deconv = conv_type == "conv_transpose" + conv_weight = np.arange(20).astype(dtype) + conv_weight = ( + np.reshape(conv_weight, (10, 2, 1, 1)) + if is_deconv + else np.reshape(conv_weight, (20, 1, 1, 1)) + ) + conv_bias = np.arange(20).astype(dtype) + + # parameters for batch_norm + gamma = np.ones(20).astype(dtype) + beta = np.zeros(20).astype(dtype) + mean = np.zeros(20).astype(dtype) + variance = np.ones(20).astype(dtype) + epsilon = dtype(0.1) + + inputs = { + "conv_weight": conv_weight, + "conv_bias": conv_bias, + "gamma": gamma, + "beta": beta, + "mean": mean, + "variance": variance, + "epsilon": epsilon, + } + + new_conv_weight, _ = self._apply_weight_transform(inputs, is_deconv, dtype) + + assert ( + new_conv_weight.dtype == dtype + ), "the weight transform function should retain the weight's original dtype." + + @pytest.mark.parametrize( + "rank, groups, has_bias, backend", + itertools.product([3, 4, 5], [1, 2, 10], [False, True], backends), + ) + def test_conv(self, rank, groups, has_bias, backend): + """ + Input graph: + input -----> conv -----> batch_norm ---> out + + Output graph: + input -----> conv ----> out + + Different `rank` represents different conv dimensions: rank=3 for Conv1d, rank=4 for Conv2d, rank=5 for Conv3d. + """ + Cin, Cout = 10, 30 + rank_to_input_shape = {3: (2, Cin, 20), 4: (2, Cin, 20, 24), 5: (2, Cin, 20, 24, 24)} + rank_to_conv_weight_shape = { + 3: (Cout, Cin // groups, 2), + 4: (Cout, Cin // groups, 2, 3), + 5: (Cout, Cin // groups, 2, 3, 3), + } + rank_to_output_shape = {3: (2, Cout, 19), 4: (2, Cout, 19, 22), 5: (2, Cout, 19, 22, 22)} + + input_shape = rank_to_input_shape[rank] + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv layer + conv_weight = np.random.rand(*rank_to_conv_weight_shape[rank]) + conv_bias = np.random.rand(Cout) if has_bias else None + x = mb.conv( + x=x, + weight=conv_weight, + bias=conv_bias, + groups=groups, + ) + + # batch_norm layer + gamma = np.random.rand(Cout) + beta = np.random.rand(Cout) + mean = np.random.rand(Cout) + variance = np.random.rand(Cout) + epsilon = 1e-2 + x = mb.batch_norm( + x=x, + mean=mean, + variance=variance, + gamma=gamma, + beta=beta, + epsilon=epsilon, + ) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_conv_batchnorm" + ) + + assert get_op_types_in_program(prev_prog) == ["conv", "batch_norm"] + assert get_op_types_in_program(prog) == ["conv"] + + # validate graph pass + output_shape = rank_to_output_shape[rank] + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize( + "rank, groups, has_bias, backend", + itertools.product([3, 4, 5], [1, 2, 10], [False, True], backends), + ) + def test_conv_transpose(self, rank, groups, has_bias, backend): + """ + Input graph: + input -----> conv_transpose -----> batch_norm ---> out + + Output graph: + input -----> conv_transpose ----> out + """ + Cin, Cout = 10, 30 + rank_to_input_shape = {3: (2, Cin, 20), 4: (2, Cin, 20, 24), 5: (2, Cin, 20, 24, 24)} + rank_to_conv_weight_shape = { + 3: (Cin, Cout // groups, 2), + 4: (Cin, Cout // groups, 2, 3), + 5: (Cin, Cout // groups, 2, 3, 3), + } + rank_to_output_shape = {3: (2, Cout, 21), 4: (2, Cout, 21, 26), 5: (2, Cout, 21, 26, 26)} + + input_shape = rank_to_input_shape[rank] + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv layer + conv_weight = np.random.rand(*rank_to_conv_weight_shape[rank]) + conv_bias = np.random.rand(Cout) if has_bias else None + x = mb.conv_transpose( + x=x, + weight=conv_weight, + bias=conv_bias, + groups=groups, + ) + + # batch_norm layer + gamma = np.random.rand(Cout) + beta = np.random.rand(Cout) + mean = np.random.rand(Cout) + variance = np.random.rand(Cout) + + epsilon = 1e-5 + x = mb.batch_norm( + x=x, + mean=mean, + variance=variance, + gamma=gamma, + beta=beta, + epsilon=epsilon, + ) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_conv_batchnorm" + ) + + assert get_op_types_in_program(prev_prog) == ["conv_transpose", "batch_norm"] + assert get_op_types_in_program(prog) == ["conv_transpose"] + + # validate graph pass + output_shape = rank_to_output_shape[rank] + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +class TestConvBiasFusion: + @staticmethod + def get_conv(x, name, Cin=3, Cout=3): + conv_weight = np.random.rand(Cout, Cin, 2, 2) + x = mb.conv(x=x, weight=conv_weight, name=name) + return x + + @staticmethod + def get_linear(x, name, linear_op, C=3): + bias = np.arange(C).astype(np.float32) + bias = np.reshape(bias, (C, 1, 1)) + x = getattr(mb, linear_op)(x=x, y=bias, name=name) + return x + + @pytest.mark.parametrize( + "rank, linear_op", + itertools.product([4], ["add", "sub"]), + ) + def test_conv(self, rank, linear_op): + """ + Input graph: + input -----> conv -----> add/sub ---> out + + Output graph: + If the linear op is trainable, the program is not modified. + Otherwise, conv and the linear op will be fused: + input -----> conv ----> out + """ + Cin, Cout = 3, 3 + input_shape = (2, Cin, 100, 100) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + x = self.get_conv(x, "conv") + x = self.get_linear(x, "linear", linear_op) + return x + + apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + assert get_op_types_in_program(prog) == ["conv"] + + """ + Input graph: + Const + | + V + input -----> convolution -----> add/sub ----> relu ---> out + + Output graph: + input -----> convolution -----> relu ----> out + """ + + @pytest.mark.parametrize( + "conv_dim, \ + flip_add_input_order, \ + add_batch_dim_to_const, \ + use_sub_instead, \ + prebuilt_bias, \ + scalar_elementwise, \ + use_conv_transpose", + itertools.product( + [2, 3], # 1D conv conversion broken even without the pass: rdar://problem/62960720 + [True, False], # flip_add_input_order + [True, False], # add_batch_dim_to_const + [True, False], # use_sub_instead + [True, False], # prebuilt_bias + [True, False], # scalar_elementwise + [True, False], # use_conv_transpose + ), + ) + def test_fuse_conv_bias( + self, + conv_dim, + flip_add_input_order, + add_batch_dim_to_const, + use_sub_instead, + prebuilt_bias, + scalar_elementwise, + use_conv_transpose, + ): + + if flip_add_input_order and use_sub_instead: + return + + if use_conv_transpose and conv_dim != 2: + return + + input_shape = None + W = None + Cout = 8 + Cin = 3 + D = 10 + const = np.random.rand(Cout) if add_batch_dim_to_const else np.random.rand(1, Cout) + const = np.expand_dims(const, axis=-1) + + if conv_dim == 1: + input_shape = (1, Cin, D) + W = np.random.rand(Cout, Cin, 1) + elif conv_dim == 2: + input_shape = (1, Cin, D, D) + W = np.random.rand(Cout, Cin, 1, 1) + const = np.expand_dims(const, axis=-1) + elif conv_dim == 3: + input_shape = (1, Cin, D, D, D) + W = np.random.rand(Cout, Cin, 1, 1, 1) + const = np.expand_dims(const, axis=-1) + const = np.expand_dims(const, axis=-1) + + if use_conv_transpose: + W = np.swapaxes(W, 0, 1) + output_shape = list(input_shape) + output_shape[1] = Cout + + if scalar_elementwise: + const = np.random.uniform(0) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + kwargs = { + "x": x, + "weight": W, + "pad_type": "valid", + "dilations": [1] * conv_dim, + "strides": [1] * conv_dim, + } + if prebuilt_bias: + kwargs["bias"] = np.random.rand(Cout) + + x = mb.conv_transpose(**kwargs) if use_conv_transpose else mb.conv(**kwargs) + + if use_sub_instead: + x = mb.sub(x=x, y=const) + else: + x = mb.add( + x=const if flip_add_input_order else x, + y=x if flip_add_input_order else const, + ) + x = mb.relu(x=x) + return x + + element_op = "sub" if use_sub_instead else "add" + conv_op = "conv" if not use_conv_transpose else "conv_transpose" + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + assert get_op_types_in_program(prev_prog) == [conv_op, element_op, "relu"] + assert get_op_types_in_program(prog) == [conv_op, "relu"] + + old_bias = prev_block.find_ops(op_type=conv_op)[0].inputs.get("bias", None) + old_bias_val = 0 if old_bias is None else old_bias.val + assert old_bias_val is not None + assert block.find_ops(op_type=conv_op)[0].inputs["bias"] is not None + new_bias_val = block.find_ops(op_type=conv_op)[0].inputs["bias"].val + assert new_bias_val is not None + if use_sub_instead: + np.testing.assert_almost_equal(old_bias_val - np.squeeze(const), new_bias_val) + else: + np.testing.assert_almost_equal(old_bias_val + np.squeeze(const), new_bias_val) + + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: tuple(output_shape)}, + ) + + """ + Input graph: + Const + | + V + input -----> convolution -----> transpose -----> add/sub ---> out + + Output graph: + input -----> convolution -----> transpose -----> out + """ + + @pytest.mark.parametrize( + "conv_dim, has_bias, is_sub, is_conv_first_input, is_bias_scalar, is_deconv, is_all_1s", + itertools.product( + [1, 2, 3], # conv_dim + [True, False], # has_bias + [True, False], # is_sub + [True, False], # is_conv_first_input + [True, False], # is_bias_scalar + [True, False], # is_deconv + [True, False], # is_all_1s + ), + ) + def test_fuse_conv_bias_transpose_pattern( + self, + conv_dim, + has_bias, + is_sub, + is_conv_first_input, + is_bias_scalar, + is_deconv, + is_all_1s, + ): + if is_all_1s and is_bias_scalar: + return + + # construct the conv weight/bias + input_shape = None + Cout = 8 + Cin = 3 + D = 10 + conv_weight = None + conv_bias = ( + np.arange(Cout).astype(np.float32) if has_bias else np.zeros(Cout).astype(np.float32) + ) + rank = conv_dim + 2 + + if conv_dim == 1: + input_shape = (1, Cin, D) + conv_weight = np.random.rand(Cout, Cin, 1) + elif conv_dim == 2: + input_shape = (1, Cin, D, D) + conv_weight = np.random.rand(Cout, Cin, 1, 1) + elif conv_dim == 3: + input_shape = (1, Cin, D, D, D) + conv_weight = np.random.rand(Cout, Cin, 1, 1, 1) + + if is_deconv: + conv_weight = np.swapaxes(conv_weight, 0, 1) + + output_shape = list(input_shape) + output_shape[1] = Cout + output_shape = np.array(output_shape) + + # generate the perm for the tranpose op + perm = np.arange(rank) + np.random.shuffle(perm) + output_shape = output_shape[perm] + cout_index = np.where(perm == 1)[0][0] + + # generate the const bias, and reshape it to a random broadcasable shape + bias = np.arange(Cout).astype(np.float32) + bias_shape = [1] * rank + bias_shape[cout_index] = Cout + if cout_index != 0: + crop_index = np.random.randint(low=0, high=cout_index + 1) + bias_shape = bias_shape[crop_index:] + bias = np.reshape(bias, bias_shape) + + # for the scalar case, random generate a number + if is_bias_scalar: + bias = np.random.uniform(0) + + # for the all 1s case, random generate a number and reshape it to (1, 1, ..., 1) + if is_all_1s: + bias = np.array([np.random.uniform(0)]) + bias_rank = np.random.randint(low=1, high=rank + 1) + bias_shape = [1] * bias_rank + bias = np.reshape(bias, bias_shape) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv or conv_transpose + kwargs = { + "x": x, + "weight": conv_weight, + "pad_type": "valid", + "dilations": [1] * conv_dim, + "strides": [1] * conv_dim, + } + if has_bias: + kwargs["bias"] = conv_bias + x = mb.conv_transpose(**kwargs) if is_deconv else mb.conv(**kwargs) + + # transpose + x = mb.transpose(x=x, perm=perm) + + # elementwise op + element_args = {"x": x, "y": bias} if is_conv_first_input else {"x": bias, "y": x} + element_op = mb.sub if is_sub else mb.add + x = element_op(**element_args) + return x + + element_op = "sub" if is_sub else "add" + conv_op = "conv" if not is_deconv else "conv_transpose" + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + assert get_op_types_in_program(prev_prog) == [conv_op, "transpose", element_op] + assert get_op_types_in_program(prog) == [conv_op, "transpose"] + + # get the value of new weight/bias + new_bias_val = block.find_ops(op_type=conv_op)[0].inputs["bias"].val + assert new_bias_val is not None + + new_weight_val = block.find_ops(op_type=conv_op)[0].inputs["weight"].val + assert new_weight_val is not None + + # compare the weight + if is_sub and not is_conv_first_input: + np.testing.assert_almost_equal(new_weight_val, -conv_weight) + else: + np.testing.assert_almost_equal(new_weight_val, conv_weight) + + # compare the bias + if is_sub: + if is_conv_first_input: + bias = -bias + else: + conv_bias = -conv_bias + expected_conv_bias_val = conv_bias + np.squeeze(bias) + np.testing.assert_almost_equal(expected_conv_bias_val, new_bias_val) + + # run the model + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: tuple(output_shape)}, + ) + + +class TestConvScaleFusion: + @staticmethod + def _apply_weight_transform(inputs, is_deconv, is_real_div, is_conv_first_input, const_type): + """ + Utility funtion to test the weight transform function in conv scale fusion pass. + """ + Cin, _, groups = 10, 20, 10 + input_shape = (1, Cin, 2, 2) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # create conv or deconv op + if is_deconv: + conv = mb.conv_transpose( + x=x, + weight=inputs["conv_weight"], + bias=inputs["conv_bias"], + groups=groups, + ) + else: + conv = mb.conv( + x=x, + weight=inputs["conv_weight"], + bias=inputs["conv_bias"], + groups=groups, + ) + + # create const op based on different mode + scale = inputs["scale"] + + if const_type == "python_scale": + scale = mb.const(val=scale) + elif const_type == "numpy_scale": + if type(scale) == int: + np_value = np.int32(scale) + elif type(scale) == float: + np_value = np.float32(scale) + scale = mb.const(val=np_value) + elif const_type == "numpy_0d_array": + scale = mb.const(val=np.array(scale)) + elif const_type == "numpy_1d_array": + scale = mb.const(val=np.array([scale])) + else: + scale = mb.const(val=scale) + + # do the scale operation + if is_real_div: + x = mb.real_div( + x=conv, + y=scale, + ) + else: + if is_conv_first_input: + x = mb.mul( + x=conv, + y=scale, + ) + else: + x = mb.mul( + x=scale, + y=conv, + ) + + return x + + apply_pass_and_basic_check(prog, "common::fuse_conv_scale") + + # get the updated weight from the prog + conv_op = [] + for op in prog["main"].operations: + if op.op_type == "const": + continue + conv_op.append(op) + assert len(conv_op) == 1, "should only have one conv / conv_transpose layer." + + return conv_op[0].weight.val, conv_op[0].bias.val + + @pytest.mark.parametrize( + "conv_type, is_real_div, is_conv_first_input, const_type", + itertools.product( + ["conv", "conv_transpose"], + [True, False], + [True, False], + [ + "python_scale", + "numpy_scale", + "numpy_0d_array", + "numpy_1d_array", + "numpy_3d_array", + "numpy_4d_array", + ], + ), + ) + def test_weight_transform_conv(self, conv_type, is_real_div, is_conv_first_input, const_type): + """ + Test the weight transform function in the conv scale fusion pass + """ + # parameters for conv + is_deconv = conv_type == "conv_type" + conv_weight = np.arange(20).astype(np.float32) + conv_weight = ( + np.reshape(conv_weight, (10, 2, 1, 1)) + if is_deconv + else np.reshape(conv_weight, (20, 1, 1, 1)) + ) + conv_bias = np.arange(20).astype(np.float32) + + if const_type == "numpy_3d_array": + scale = np.reshape(np.arange(20).astype(np.float32), (20, 1, 1)) + elif const_type == "numpy_4d_array": + scale = np.reshape(np.arange(20).astype(np.float32), (1, 20, 1, 1)) + else: + scale = 12.7 + + inputs = { + "conv_weight": conv_weight, + "conv_bias": conv_bias, + "scale": scale, + } + + new_conv_weight, new_conv_bias = self._apply_weight_transform( + inputs, is_deconv, is_real_div, is_conv_first_input, const_type + ) + + if is_real_div: + scale = 1.0 / scale + + if const_type != "numpy_3d_array" and const_type != "numpy_4d_array": + expected_bias = conv_bias * scale + expected_weight = conv_weight * scale + else: + scale = np.reshape(scale, (20)) + expected_bias = conv_bias * scale + if is_deconv: + scale = np.reshape(scale, (20, 1, 1)) + expected_weight = np.reshape(np.arange(20), (20, 1, 1)) + expected_weight = expected_weight * scale + expected_weight = np.reshape(expected_weight, (10, 2, 1, 1)).astype(np.float32) + else: + scale = np.reshape(scale, (20, 1, 1, 1)) + expected_weight = conv_weight * scale + + np.testing.assert_almost_equal(new_conv_weight, expected_weight) + np.testing.assert_almost_equal(new_conv_bias, expected_bias) + + assert ( + new_conv_weight.dtype == conv_weight.dtype + ), "weight data type should not be changed after conv_scale_fusion pass." + assert ( + new_conv_bias.dtype == conv_weight.dtype + ), "bias data type should be the same as the weight for conv layer." + + @pytest.mark.parametrize( + "rank, groups, has_bias, scale_op, scale_type, backend", + itertools.product( + [3, 4], [1, 10], [False, True], ["mul", "real_div"], ["scalar", "vector"], backends + ), + ) + def test_conv(self, rank, groups, has_bias, scale_op, scale_type, backend): + """ + Input graph: + input -----> conv -----> mul/real_div ---> out + + Output graph: + input -----> conv ----> out + """ + Cin, Cout = 10, 30 + input_shape = (2, Cin, 20) if rank == 3 else (2, Cin, 20, 24) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv layer + conv_weight = ( + np.random.rand(Cout, Cin // groups, 2) + if rank == 3 + else np.random.rand(Cout, Cin // groups, 2, 3) + ) + conv_bias = np.random.rand(Cout) if has_bias else None + x = mb.conv( + x=x, + weight=conv_weight, + bias=conv_bias, + groups=groups, + ) + if scale_type == "scalar": + scale = np.array([2.3]) + else: + scale = np.arange(Cout).astype(np.float32) + scale = np.reshape(scale, (1, Cout, 1) if rank == 3 else (Cout, 1, 1)) + + # scale layer + if scale_op == "mul": + x = mb.mul(x=x, y=scale) + elif scale_op == "real_div": + x = mb.real_div(x=x, y=scale) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_scale") + + assert get_op_types_in_program(prev_prog) == ["conv", scale_op] + assert get_op_types_in_program(prog) == ["conv"] + + # validate graph pass + output_shape = (2, Cout, 19) if rank == 3 else (2, Cout, 19, 22) + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize( + "rank, groups, has_bias, scale_op, scale_type, backend", + itertools.product( + [3, 4], [1, 10], [False, True], ["mul", "real_div"], ["scalar", "vector"], backends + ), + ) + def test_conv_transpose(self, rank, groups, has_bias, scale_op, scale_type, backend): + """ + Input graph: + input -----> conv_transpose -----> mul/real_div ---> out + + Output graph: + input -----> conv_transpose ----> out + """ + Cin, Cout = 10, 30 + input_shape = (2, Cin, 20) if rank == 3 else (2, Cin, 20, 24) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv layer + conv_weight = ( + np.random.rand(Cin, Cout // groups, 2) + if rank == 3 + else np.random.rand(Cin, Cout // groups, 2, 3) + ) + conv_bias = np.random.rand(Cout) if has_bias else None + x = mb.conv_transpose( + x=x, + weight=conv_weight, + bias=conv_bias, + groups=groups, + ) + + if scale_type == "scalar": + scale = np.array([2.3]) + else: + scale = np.arange(Cout).astype(np.float32) + scale = np.reshape(scale, (Cout, 1) if rank == 3 else (1, Cout, 1, 1)) + + # scale layer + if scale_op == "mul": + x = mb.mul(x=x, y=scale) + elif scale_op == "real_div": + x = mb.real_div(x=x, y=scale) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_scale") + + assert get_op_types_in_program(prev_prog) == ["conv_transpose", scale_op] + assert get_op_types_in_program(prog) == ["conv_transpose"] + + # validate graph pass + output_shape = (2, Cout, 21) if rank == 3 else (2, Cout, 21, 26) + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +class TestFusePadConv(unittest.TestCase): + """ + Input graph: + input -----> pad -----> transpose -----> conv -----> transpose ---> out + + Output graph: + input -----> transpose -----> pad ----> conv -----> transpose ----> out + """ + + def test_simple_direct_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 16, 20, 24))]) + def prog(x): + x = mb.pad(x=x, pad=[0, 0, 1, 1, 1, 1, 0, 0]) + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.conv(x=x, weight=np.random.random([24, 24, 3, 3]), pad_type="valid") + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_pad_conv") + self.assertEqual( + get_op_types_in_program(prev_prog), ["pad", "transpose", "conv", "transpose"] + ) + self.assertEqual(get_op_types_in_program(prog), ["transpose", "pad", "conv", "transpose"]) + assert_model_is_valid( + prog, + {"x": (1, 16, 20, 24)}, + expected_output_shapes={block.outputs[0].name: (1, 16, 20, 24)}, + ) + + """ + Input graph: + input -----> pad -----> transpose -----> conv -----> transpose ---> out + | + | + --------> transpose -----> conv -----> transpose ---> out + + Output graph: + input ---------> transpose -----> pad -----> conv -----> transpose ---> out + | + | + ------> transpose -----> pad -----> conv -----> transpose ---> out + + """ + + def test_pad_transposed_forked_conv(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 16, 20, 24))]) + def prog(x): + pad = mb.pad(x=x, pad=[0, 0, 1, 1, 1, 1, 0, 0]) + x = mb.transpose(x=pad, perm=[0, 3, 1, 2]) + x = mb.conv(x=x, weight=np.random.random([24, 24, 3, 3]), pad_type="valid") + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + y = mb.transpose(x=pad, perm=[0, 3, 1, 2]) + y = mb.conv(x=y, weight=np.random.random([24, 24, 3, 3]), pad_type="valid") + y = mb.transpose(x=y, perm=[0, 2, 3, 1]) + return x, y + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_pad_conv") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["pad", "transpose", "conv", "transpose", "transpose", "conv", "transpose"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["transpose", "pad", "conv", "transpose", "transpose", "pad", "conv", "transpose"], + ) + assert_model_is_valid( + prog, + {"x": (1, 16, 20, 24)}, + expected_output_shapes={ + block.outputs[0].name: (1, 16, 20, 24), + block.outputs[1].name: (1, 16, 20, 24), + }, + ) + + """ + Input graph: + input -----> pad -----> transpose -----> conv -----> transpose ---> out + | + | + ---------> out + + Output graph: + No change. + """ + + def test_pad_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 16, 20, 24))]) + def prog(x): + pad = mb.pad(x=x, pad=[0, 0, 1, 1, 1, 1, 0, 0]) + x = mb.transpose(x=pad, perm=[0, 3, 1, 2]) + x = mb.conv(x=x, weight=np.random.random([24, 24, 3, 3]), pad_type="valid") + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + return x, pad + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_pad_conv") + self.assertEqual( + get_op_types_in_program(prev_prog), ["pad", "transpose", "conv", "transpose"] + ) + self.assertEqual(get_op_types_in_program(prog), ["pad", "transpose", "conv", "transpose"]) + assert_model_is_valid( + prog, + {"x": (1, 16, 20, 24)}, + expected_output_shapes={ + block.outputs[0].name: (1, 16, 20, 24), + block.outputs[1].name: (1, 18, 22, 24), + }, + ) + + +class TestConcatToPixelShuffle(unittest.TestCase): + def test_success(self): + """ + Input graph: + input1(1, 2, 3, 4) -----> concat(axis=2, interleave=True) -----> concat(axis=3, interleave=True) ---> out(1, 2, 6, 8) + ^ ^ + | | + input2(1, 2, 3, 4) ------------------- | + | + input3(1, 2, 3, 4) -----> concat(axis=2, interleave=True) -----------------------| + ^ + | + input4(1, 2, 3, 4) ------------------| + + Output graph: + input1(1, 2, 3, 4) -----> concat(axis=1) ---> pixel_shuffle(upsample_factor=2) ----> out(1, 2, 6, 8) + ^ + input2(1, 2, 3, 4) ----------| + | + input3(1, 2, 3, 4) ----------| + | + input4(1, 2, 3, 4) ----------| + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "pixel_shuffle"]) + + inputs = {"x1": (1, 2, 3, 4), "x2": (1, 2, 3, 4), "x3": (1, 2, 3, 4), "x4": (1, 2, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 8)}, + ) + + mlmodel = ct.convert( + prog, + source="milinternal", + convert_to="neuralnetwork", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + if not _IS_MACOS: + # Can not get predictions unless on macOS. + return + + input_dict = dict() + input_dict["x1"] = np.ones(inputs["x1"]) + input_dict["x2"] = np.ones(inputs["x2"]) * 2 + input_dict["x3"] = np.ones(inputs["x3"]) * 3 + input_dict["x4"] = np.ones(inputs["x4"]) * 4 + + output_name = block.outputs[0].name + + ab = np.reshape( + np.stack((input_dict["x1"], input_dict["x2"]), axis=3), newshape=[1, 2, 6, 4] + ) + cd = np.reshape( + np.stack((input_dict["x3"], input_dict["x4"]), axis=3), newshape=[1, 2, 6, 4] + ) + old_prediction = np.reshape(np.stack((ab, cd), axis=4), newshape=[1, 2, 6, 8]) + + prediction = mlmodel.predict(input_dict) + np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05) + + def test_nested(self): + """ + Two nested blocks that will each be transformed. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4, x5, x6, x7, x8): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + ef = mb.concat(values=[x5, x6], axis=2, interleave=True) + gh = mb.concat(values=[x7, x8], axis=2, interleave=True) + y = mb.concat(values=[ef, gh], axis=3, interleave=True) + + z = mb.concat(values=[x, y], axis=1) + + return z + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual( + get_op_types_in_program(prev_prog), + ["concat", "concat", "concat", "concat", "concat", "concat", "concat"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["concat", "pixel_shuffle", "concat", "pixel_shuffle", "concat"], + ) + + inputs = { + "x1": (1, 2, 3, 4), + "x2": (1, 2, 3, 4), + "x3": (1, 2, 3, 4), + "x4": (1, 2, 3, 4), + "x5": (1, 2, 3, 4), + "x6": (1, 2, 3, 4), + "x7": (1, 2, 3, 4), + "x8": (1, 2, 3, 4), + } + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 4, 6, 8)}, + ) + + input_dict = dict() + for name, shape in inputs.items(): + input_dict[name] = np.random.rand(*shape) + + output_name = block.outputs[0].name + + ab = np.reshape( + np.stack((input_dict["x1"], input_dict["x2"]), axis=3), newshape=[1, 2, 6, 4] + ) + cd = np.reshape( + np.stack((input_dict["x3"], input_dict["x4"]), axis=3), newshape=[1, 2, 6, 4] + ) + x = np.reshape(np.stack((ab, cd), axis=4), newshape=[1, 2, 6, 8]) + + ef = np.reshape( + np.stack((input_dict["x5"], input_dict["x6"]), axis=3), newshape=[1, 2, 6, 4] + ) + gh = np.reshape( + np.stack((input_dict["x7"], input_dict["x8"]), axis=3), newshape=[1, 2, 6, 4] + ) + y = np.reshape(np.stack((ef, gh), axis=4), newshape=[1, 2, 6, 8]) + + old_prediction = np.concatenate((x, y), axis=1) + + mlmodel = ct.convert( + prog, + source="milinternal", + convert_to="neuralnetwork", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + if _IS_MACOS: + prediction = mlmodel.predict(input_dict) + np.testing.assert_allclose( + old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05 + ) + + def test_failure_0(self): + """ + The h_concat has three inputs, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2, x3], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4, x1], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_1(self): + """ + The first concat is on the wrong axis, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=3, interleave=True) + cd = mb.concat(values=[x3, x4], axis=3, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_2(self): + """ + The last concat is on the wrong axis, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=2, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_3(self): + """ + The first concat is not interleaved, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=False) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_4(self): + """ + The second concat is not interleaved, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=False) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_5(self): + """ + The last concat is not interleaved, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=False) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_6(self): + """ + The inputs are the wrong rank, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4, 5)), + mb.TensorSpec(shape=(1, 2, 3, 4, 5)), + mb.TensorSpec(shape=(1, 2, 3, 4, 5)), + mb.TensorSpec(shape=(1, 2, 3, 4, 5)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_7(self): + """ + Extra input to the w_concats means the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 4, 4)), + mb.TensorSpec(shape=(1, 2, 4, 4)), + mb.TensorSpec(shape=(1, 2, 4, 4)), + mb.TensorSpec(shape=(1, 2, 4, 4)), + mb.TensorSpec(shape=(1, 2, 8, 4)), + ] + ) + def prog(x1, x2, x3, x4, x5): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd, x5], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + +class TestConcatInterleave: + def test_concat_interleave_fusion_pass(self): + """ + Given: + %3 = concat(%1.a, %1.b, axis=-3, interleave=False) #shape = (B, n*C, H, W) + %4 = reshape(%3) #shape = (B, n, C, H, W) + %5 = transpose(%4, perm=[0, 2, 1, 3, 4]) # shape = (B, C, n, H, W) + %6 = reshape(%5) # shape = (B, C*n, H, W) + + Result: + %6 = concat(%1.a, %1.b, axis=-3, interleave=True) + """ + B, C, H, W = 1, 10, 20, 20 + + @mb.program( + input_specs=[mb.TensorSpec(shape=(B, C, H, W)), mb.TensorSpec(shape=(B, C, H, W))] + ) + def prog(x, y): + z = mb.concat(values=[x, y], axis=1) + z = mb.reshape(x=z, shape=(B, 2, C, H, W)) + z = mb.transpose(x=z, perm=[0, 2, 1, 3, 4]) + z = mb.reshape(x=z, shape=(B, -1, H, W)) + return z + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::detect_concat_interleave" + ) + assert get_op_types_in_program(prev_prog) == ["concat", "reshape", "transpose", "reshape"] + assert get_op_types_in_program(prog) == ["concat"] + concat_op = prog.find_ops(op_type="concat", exactly_one=True)[0] + assert concat_op.interleave.val + assert_model_is_valid( + prog, + {"x": (B, C, H, W), "y": (B, C, H, W)}, + expected_output_shapes={block.outputs[0].name: (B, 2 * C, H, W)}, + ) + + +class TestFuseOnehotMatmulToGather: + @pytest.mark.parametrize("rank", [1, 2, 3, 4]) + def test_fuse_onehot_matmul_to_gather(self, rank): + """ + Input: + %2 = one_hot(%1, on_value=1, off_value=0, axis=-1) + %3 = const() # rank 2 + %4 = matmul(%2, %3) + + Output: + %4 = gather(%3, %2, axis=0) + """ + rank4_shape = (10, 3, 6, 7) + input_shape = rank4_shape[-rank:] + vocab_size = 15 + embedding_size = 12 + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape, dtype=types.int32)]) + def prog(x): + x = mb.one_hot( + indices=x, on_value=1.0, off_value=0.0, axis=-1, one_hot_vector_size=vocab_size + ) + x = mb.matmul(x=x, y=np.random.rand(vocab_size, embedding_size)) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_onehot_matmul_to_gather" + ) + assert get_op_types_in_program(prev_prog) == ["one_hot", "matmul"] + assert get_op_types_in_program(prog) == ["gather"] + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: input_shape + (embedding_size,)}, + ) + + +class TestReplaceStackReshape(unittest.TestCase): + def test_with_interleave(self): + """ + input1(1, 5, 3, 4) -----> stack(axis=2) -----> reshape(shape=(1, 10, 3, 4)) ---> out(1, 10, 3, 4) + ^ + | + input2(1, 5, 3, 4) ---------- + + Output graph: + input -----> concat ----> out + + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + x = mb.stack(values=[x1, x2], axis=2) + x = mb.reshape(x=x, shape=[1, 10, 3, 4]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["concat"]) + + inputs = {"x1": (1, 5, 3, 4), "x2": (1, 5, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 10, 3, 4)}, + ) + + concat_ops = [op for op in block.operations if op.op_type == "concat"] + concat_op = concat_ops[0] + assert concat_op.interleave.val == True + + output_name = block.outputs[0].name + + mlmodel = ct.convert( + prog, + source="milinternal", + convert_to="neuralnetwork", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + if not _IS_MACOS: + # Can not get predictions unless on macOS. + return + + input_dict = dict() + for name, shape in inputs.items(): + input_dict[name] = np.random.rand(*shape) + + old_prediction = np.reshape( + np.stack([input_dict["x1"], input_dict["x2"]], axis=2), newshape=[1, 10, 3, 4] + ) + + prediction = mlmodel.predict(input_dict) + + np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05) + + def test_without_interleave(self): + """ + Input graph: + input1(1, 5, 3, 4) -----> stack(axis=1) -----> reshape(shape=(1, 10, 3, 4)) ---> out(1, 10, 3, 4) + ^ + | + input2(1, 5, 3, 4) ---------- + + Output graph: + input -----> concat ----> out + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + x = mb.stack(values=[x1, x2], axis=1) + x = mb.reshape(x=x, shape=[1, 10, 3, 4]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["concat"]) + + inputs = {"x1": (1, 5, 3, 4), "x2": (1, 5, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 10, 3, 4)}, + ) + + concat_ops = [op for op in block.operations if op.op_type == "concat"] + concat_op = concat_ops[0] + assert concat_op.interleave.val == False + + output_name = block.outputs[0].name + + mlmodel = ct.convert( + prog, + source="milinternal", + convert_to="neuralnetwork", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + if not _IS_MACOS: + # Can not get predictions unless on macOS. + return + + input_dict = dict() + for name, shape in inputs.items(): + input_dict[name] = np.random.rand(*shape) + + old_prediction = np.reshape( + np.stack([input_dict["x1"], input_dict["x2"]], axis=1), newshape=[1, 10, 3, 4] + ) + + prediction = mlmodel.predict(input_dict) + np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05) + + def test_multiple(self): + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + a = mb.stack(values=[x1, x2], axis=1) + a = mb.reshape(x=a, shape=[1, 4, 3, 4]) + + b = mb.stack(values=[x3, x4], axis=1) + b = mb.reshape(x=b, shape=[1, 4, 3, 4]) + + c = mb.stack(values=[a, b], axis=2) + c = mb.reshape(x=c, shape=[1, 4, 6, 4]) + + return c + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + self.assertEqual( + get_op_types_in_program(prev_prog), + ["stack", "reshape", "stack", "reshape", "stack", "reshape"], + ) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + inputs = {"x1": (1, 2, 3, 4), "x2": (1, 2, 3, 4), "x3": (1, 2, 3, 4), "x4": (1, 2, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 4, 6, 4)}, + ) + + output_name = block.outputs[0].name + + mlmodel = ct.convert( + prog, + source="milinternal", + convert_to="neuralnetwork", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + if not _IS_MACOS: + # Can not get predictions unless on macOS. + return + + input_dict = dict() + for name, shape in inputs.items(): + input_dict[name] = np.random.rand(*shape) + + branch_1 = np.reshape( + np.stack([input_dict["x1"], input_dict["x2"]], axis=1), newshape=[1, 4, 3, 4] + ) + branch_2 = np.reshape( + np.stack([input_dict["x3"], input_dict["x4"]], axis=1), newshape=[1, 4, 3, 4] + ) + old_prediction = np.reshape(np.stack([branch_1, branch_2], axis=2), newshape=[1, 4, 6, 4]) + + prediction = mlmodel.predict(input_dict) + + np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05) + + def test_negative_1(self): + """ + Input graph: + input1(1, 5, 3, 4) -----> stack(axis=1) -----> reshape(shape=(-1, 5, 6, 4)) ---> out(1, 5, 6, 4) + ^ + | + input2(1, 5, 3, 4) ---------- + + Output graph: + Unchanged -- this graph is not equivalent to a concat. + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + a = mb.reshape(x=a, shape=[-1, 5, 6, 4]) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["stack", "reshape"]) + + def test_negative_2(self): + """ + Input graph: + input1(1, 5, 3, 4) -----> stack(axis=1) -----> reshape(shape=(-1, 5, 12, 2)) ---> out(1, 5, 6, 4) + ^ + | + input2(1, 5, 3, 4) ---------- + + Output graph: + Unchanged -- this graph is not equivalent to a concat. + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + a = mb.reshape(x=a, shape=[-1, 5, 12, 2]) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["stack", "reshape"]) + + def test_negative_3(self): + """ + Input graph: + input1(1, 5, 3, 4) -----> stack(axis=1) -----> reshape(shape=(-1, 2, 5, 4, 3)) ---> out(1, 5, 6, 4) + ^ + | + input2(1, 5, 3, 4) ---------- + + Output graph: + Unchanged -- this graph is not equivalent to a concat. + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + a = mb.reshape(x=a, shape=[-1, 2, 5, 4, 3]) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["stack", "reshape"]) + + def test_negative_4(self): + """ + More than two inputs to the stack op -- can't be transformed. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 5, 3, 4)), + mb.TensorSpec(shape=(1, 5, 3, 4)), + mb.TensorSpec(shape=(1, 5, 3, 4)), + ] + ) + def prog(x1, x2, x3): + a = mb.stack(values=[x1, x2, x3], axis=1) + a = mb.reshape(x=a, shape=[-1, 15, 4, 3]) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["stack", "reshape"]) + + def test_negative_5(self): + """ + The stack and reshape are not adjacent, so the graph is not transformed. + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + a = mb.relu(x=a) + a = mb.reshape(x=a, shape=[-1, 10, 4, 3]) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "relu", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["stack", "relu", "reshape"]) + + def test_negative_6(self): + """ + The stack op's output is used elsewhere in the graph, so it can't be removed + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + b = mb.reshape(x=a, shape=[-1, 10, 4, 3]) + c = mb.relu(x=a) + c = mb.reshape(x=c, shape=[-1, 10, 4, 3]) + d = mb.add(x=b, y=c) + return d + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual( + get_op_types_in_program(prev_prog), ["stack", "reshape", "relu", "reshape", "add"] + ) + self.assertEqual( + get_op_types_in_program(prog), ["stack", "reshape", "relu", "reshape", "add"] + ) + + def test_negative_7(self): + """ + The stack op is not followed by any other ops. + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack"]) + self.assertEqual(get_op_types_in_program(prog), ["stack"]) + + +class TestUseReflectionPadding: + def test_success_w_axis(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 1], end=[0, 0, 0, 2], end_mask=[True, True, True, False] + ) + right = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -2], end=[0, 0, 0, -1], end_mask=[True, True, True, False] + ) + x = mb.concat(values=[left, x1, right], axis=3) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "slice_by_index", "concat"] + assert get_op_types_in_program(prog) == ["pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 10)}, + ) + + def test_success_w_axis_multiple(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 2], end=[0, 0, 0, 3], end_mask=[True, True, True, False] + ) + left1 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 1], end=[0, 0, 0, 2], end_mask=[True, True, True, False] + ) + right0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -2], end=[0, 0, 0, -1], end_mask=[True, True, True, False] + ) + right1 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -3], end=[0, 0, 0, -2], end_mask=[True, True, True, False] + ) + x = mb.concat(values=[left0, left1, x1, right0, right1], axis=3) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == [ + "slice_by_index", + "slice_by_index", + "slice_by_index", + "slice_by_index", + "concat", + ] + assert get_op_types_in_program(prog) == ["pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 12)}, + ) + + def test_success_h_axis(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left = mb.slice_by_index( + x=x1, begin=[0, 0, 1, 0], end=[0, 0, 2, 0], end_mask=[True, True, False, True] + ) + right = mb.slice_by_index( + x=x1, begin=[0, 0, -2, 0], end=[0, 0, -1, 0], end_mask=[True, True, False, True] + ) + x = mb.concat(values=[left, x1, right], axis=2) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "slice_by_index", "concat"] + assert get_op_types_in_program(prog) == ["pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 8)}, + ) + + def test_failure_wrong_concat_order(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left = mb.slice_by_index( + x=x1, begin=[0, 0, 1, 0], end=[0, 0, 2, 0], end_mask=[True, True, False, True] + ) + right = mb.slice_by_index( + x=x1, begin=[0, 0, -2, 0], end=[0, 0, -1, 0], end_mask=[True, True, False, True] + ) + # Concat is not in correct order + x = mb.concat(values=[left, right, x1], axis=2) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "slice_by_index", "concat"] + assert get_op_types_in_program(prog) == ["slice_by_index", "slice_by_index", "concat"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 8)}, + ) + + def test_failure_wrong_concat_order_2(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 1], end=[0, 0, 0, 2], end_mask=[True, True, True, False] + ) + left1 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 2], end=[0, 0, 0, 3], end_mask=[True, True, True, False] + ) + right0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -3], end=[0, 0, 0, -2], end_mask=[True, True, True, False] + ) + right1 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -2], end=[0, 0, 0, -1], end_mask=[True, True, True, False] + ) + # concat args are out of order + x = mb.concat(values=[left0, left1, x1, right1, right0], axis=3) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == [ + "slice_by_index", + "slice_by_index", + "slice_by_index", + "slice_by_index", + "concat", + ] + assert get_op_types_in_program(prog) == [ + "slice_by_index", + "slice_by_index", + "slice_by_index", + "slice_by_index", + "concat", + ] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 12)}, + ) + + def test_failure_wrong_slice_size(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + # slice is too big + left = mb.slice_by_index( + x=x1, begin=[0, 0, 1, 0], end=[0, 0, 3, 0], end_mask=[True, True, False, True] + ) + right = mb.slice_by_index( + x=x1, begin=[0, 0, -2, 0], end=[0, 0, -1, 0], end_mask=[True, True, False, True] + ) + x = mb.concat(values=[left, x1, right], axis=2) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "slice_by_index", "concat"] + assert get_op_types_in_program(prog) == ["slice_by_index", "slice_by_index", "concat"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 9, 8)}, + ) + + def test_failure_not_all_same_input(self): + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8)), mb.TensorSpec(shape=(1, 2, 6, 8))] + ) + def prog(x1, x2): + left0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 1], end=[0, 0, 0, 2], end_mask=[True, True, True, False] + ) + left1 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 2], end=[0, 0, 0, 3], end_mask=[True, True, True, False] + ) + right0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -3], end=[0, 0, 0, -2], end_mask=[True, True, True, False] + ) + # one of the slices consumes a different input from the others + right1 = mb.slice_by_index( + x=x2, begin=[0, 0, 0, -2], end=[0, 0, 0, -1], end_mask=[True, True, True, False] + ) + x = mb.concat(values=[left0, left1, x1, right0, right1], axis=3) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == [ + "slice_by_index", + "slice_by_index", + "slice_by_index", + "slice_by_index", + "concat", + ] + assert get_op_types_in_program(prog) == [ + "slice_by_index", + "slice_by_index", + "slice_by_index", + "slice_by_index", + "concat", + ] + + inputs = {"x1": (1, 2, 6, 8), "x2": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 12)}, + ) + + def test_failure_slice_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 1], end=[0, 0, 0, 2], end_mask=[True, True, True, False] + ) + right = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -2], end=[0, 0, 0, -1], end_mask=[True, True, True, False] + ) + x = mb.concat(values=[left, x1, right], axis=3) + + # slice is an output + return x, right + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "slice_by_index", "concat"] + assert get_op_types_in_program(prog) == ["slice_by_index", "slice_by_index", "concat"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={ + block.outputs[0].name: (1, 2, 6, 10), + block.outputs[1].name: (1, 2, 6, 1), + }, + ) + + def test_concat_input_only(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x): + x = mb.concat(values=[x, x, x], axis=0) + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prog) == ["concat"] + + inputs = {"x": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (3, 2, 6, 8)}, + ) + + +class TestDivideToMultiply: + def test_divide_to_multiply(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + div_val = np.random.rand(2, 4).astype(np.float32) + div_const = mb.const(val=div_val) + + div_val_1 = np.random.rand(2, 4).astype(np.float32) + div_const_1 = mb.const(val=div_val_1) + + real_div = mb.real_div(x=x, y=div_const) + + return mb.real_div(x=real_div, y=div_const_1) + + assert_op_count_match(prog, expect=2, op="real_div") + assert_op_count_match(prog, expect=0, op="mul") + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["common::divide_to_multiply"](prog) + assert_same_output_names(prev_prog, prog) + assert_op_count_match(prog, expect=0, op="real_div") + assert_op_count_match(prog, expect=2, op="mul") + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"x": (2, 4)}) + + +class TestFuseElementwiseToBatchNorm: + """ + Input graph: + Const Const + | | + V V + input -----> transpose -----> mul ----> add ---> out + + Output graph: + input -----> transpose -----> batchnorm ----> out + """ + + @pytest.mark.parametrize( + "flip_mul_input_order, flip_add_input_order, rank_3_const_input", + itertools.product([False, True], [False, True], [False, True]), + ) + def test_mul_add_fusion_to_batchnorm( + self, flip_mul_input_order, flip_add_input_order, rank_3_const_input + ): + + C = 3 + gamma = np.random.rand(1, C, 1, 1) + beta = np.random.rand(1, C, 1, 1) + if rank_3_const_input: + gamma = np.squeeze(gamma, axis=0) + beta = np.squeeze(beta, axis=0) + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 10, 10, C))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + if flip_mul_input_order: + x = mb.mul(x=gamma, y=x) + else: + x = mb.mul(x=x, y=gamma) + if flip_add_input_order: + x = mb.add(x=beta, y=x) + else: + x = mb.add(x=x, y=beta) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_elementwise_to_batchnorm" + ) + assert get_op_types_in_program(prev_prog) == ["transpose", "mul", "add"] + assert get_op_types_in_program(prog) == ["transpose", "batch_norm"] + assert_model_is_valid( + prog, + {"x": (1, 10, 10, C)}, + expected_output_shapes={block.outputs[0].name: (1, C, 10, 10)}, + ) + + +class TestRank0ExpandDimsSwap: + """ + Input graph: + 2.0 + | + v + input --> slice_by_index --> sub --> expand_dims --> output + + Output graph: + [2.0] + | + v + input --> slice_by_index --> expand_dims --> sub --> output + """ + + @pytest.mark.skipif( + ct.utils._macos_version() < (12, 0), reason="mlprogram predict available only on macOS12+" + ) + @pytest.mark.parametrize( + "reverse_order, elem_op", + itertools.product( + [True, False], + ["add", "sub", "mul", "real_div", "floor_div"], + ), + ) + def test(self, reverse_order, elem_op): + x_shape = [ + 1, + ] + + @mb.program(input_specs=[mb.TensorSpec(shape=x_shape)]) + def program(x): + x = mb.slice_by_index(x=x, begin=[0], end=[1], squeeze_mask=[True]) + func = getattr(mb, elem_op) + + if reverse_order: + x = func(x=2.0, y=x) + else: + x = func(x=x, y=2.0) + + expand = mb.expand_dims(x=x, axes=[0]) + other_1 = mb.add(x=x, y=[1.0, 2.0, 3.0]) + other_2 = mb.sub(x=x, y=[1.0, 2.0, 3.0]) + return expand, other_1, other_2 + + prev_prog, prev_block, block = apply_pass_and_basic_check( + program, "common::rank0_expand_dims_swap" + ) + assert get_op_types_in_program(prev_prog) == [ + "slice_by_index", + elem_op, + "expand_dims", + "add", + "sub", + ] + assert get_op_types_in_program(program) == [ + "slice_by_index", + "expand_dims", + "expand_dims", + elem_op, + "squeeze", + "add", + "sub", + ] + assert_model_is_valid( + program=program, + inputs={"x": x_shape}, + expected_output_shapes={ + block.outputs[0].name: tuple(x_shape), + block.outputs[1].name: (3,), + block.outputs[2].name: (3,), + }, + ) + + +class TestImageInputPreprocess(unittest.TestCase): + """ + Input graph: + input (format=NHWC) ------> transpose(axis=[0, 3, 1, 2]) ---------> add ----> relu ---> out + | ^ + | | + ---> relu ---> transpose(axis=[0, 3, 1, 2]) --- + + Intermediate graph: + input (format=NCHW) -----> transpose(axis=[0, 2, 3, 1]) ----> transpose(axis=[0, 3, 1, 2]) ---------> add ----> relu ---> out + | ^ + | | + ---> relu ---> transpose(axis=[0, 3, 1, 2]) --- + + + Output graph: + input (format=NCHW) -----> relu -----> add -----> relu -----> out + | ^ + | | + ------------------- + """ + + def test_fusion_with_image_intermediate_graph(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30, 3))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x2 = mb.relu(x=x) + x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + x4 = mb.add(x=x1, y=x3) + return mb.relu(x=x4) + + prog.main_input_types = [ct.ImageType(name="x", shape=(10, 20, 30, 3), channel_first=False)] + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::image_input_preprocess" + ) + self.assertEqual( + get_op_types_in_program(prev_prog), ["transpose", "relu", "transpose", "add", "relu"] + ) + self.assertEqual( + get_op_types_in_program(prog), + ["transpose", "transpose", "relu", "transpose", "add", "relu"], + ) + + def test_fusion_with_image_full(self): + # Avoid circular import + from coremltools import convert + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30, 3))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x2 = mb.relu(x=x) + x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + x4 = mb.add(x=x1, y=x3) + return mb.relu(x=x4) + + mlmodel = convert( + prog, + inputs=[ct.ImageType(name="x", shape=(10, 20, 30, 3), channel_first=False)], + source="milinternal", + convert_to="neuralnetwork", + ) + assert mlmodel is not None + assert len(mlmodel.get_spec().neuralNetwork.layers) == 3 + + +class TestSanitizeInputOutputNames: + def test_nn_backend_style_sanitization(self): + """ + Test that intermediate var names are unchanged, and + only model input and output names are modified, i.e. + sanitized (adhering to the format [a-zA-Z_][a-zA-Z0-9_]*) + for the NN backend. + """ + + prog = Program() + func_inputs = {"x/0": mb.placeholder(shape=[2, 3]), "y": mb.placeholder(shape=[2, 3])} + with Function(func_inputs) as ssa_fun: + x, y = ssa_fun.inputs["x/0"], ssa_fun.inputs["y"] + x = mb.relu(x=x, name="relu/1") + z = mb.add(x=x, y=y, name="out/1") + ssa_fun.set_outputs([z]) + prog.add_function("main", ssa_fun) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::sanitize_input_output_names", skip_output_name_check=True + ) + + relu_op = prog.find_ops(op_type="relu", exactly_one=True)[0] + assert relu_op.inputs["x"].name == "x_0" # input name: sanitized + assert relu_op.outputs[0].name == "relu/1" # intermediate name: unchanged + assert block.outputs[0].name == "out_1" # output name: sanitized + + # convert prev_prog to NN backend + mlmodel = ct.convert(prev_prog) + spec = mlmodel._spec + assert spec.description.input[0].name == "x_0" + assert spec.description.output[0].name == "out_1" + relu_layer = spec.neuralNetwork.layers[0] + assert relu_layer.output[0] == "relu/1" + + +class TestUpdateOutputDtypes: + def test_single_output(self): + """ + Given: + ------ + main(%input: (1, 20, int32)(Tensor)) { + block0() { + %abs: (1, 20, int32)(Tensor) = abs(x=%input, name="abs") + %output_square: (1, 20, int32)(Tensor) = square(x=%input, name="output_square") + } -> (%output_square) + } + prog.main_output_types = [ct.TensorType(dtype=np.float16)] + + Result: + ------ + main(%input: (1, 20, int32)(Tensor)) { + block0() { + %abs: (1, 20, int32)(Tensor) = abs(x=%input, name="abs") + %output_square_type_int32: (1, 20, int32)(Tensor) = square(x=%input, name="output_square") + %output_square: (1, 20, fp16)(Tensor) = cast(x=%output_square_type_int32, dtype="fp16", name="cast_0") + } -> (%output_square) + } + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 20), dtype=types.int32)]) + def prog(input): + x = mb.abs(x=input, name="abs") + x = mb.square(x=input, name="output_square") + return x + + prog.set_main_output_types([ct.TensorType(dtype=np.float16)]) + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::update_output_dtypes" + ) + assert get_op_types_in_program(prev_prog) == ["abs", "square"] + assert prev_block.outputs[0].dtype == types.int32 + assert get_op_types_in_program(prog) == ["abs", "square", "cast"] + assert block.outputs[0].dtype == types.fp16 + assert block.outputs[0].name == "output_square" + + def test_multiple_outputs(self): + """ + Given: + ----- + main(%input: (1, 20, int32)(Tensor)) { + block0() { + %split_0: (1, 10, int32)(Tensor), %split_1: (1, 10, int32)(Tensor) = split(x=%input, num_splits=2, axis=1, name="split") + } -> (%split_0, %split_1) + } + prog.main_output_types = [ct.TensorType(), ct.TensorType(dtype=np.float16)] + + Result: + ------ + main(%input: (1, 20, int32)(Tensor)) { + block0() { + %split_0: (1, 10, int32)(Tensor), %split_1_type_int32: (1, 10, int32)(Tensor) = split(x=%input, num_splits=2, axis=1, name="split") + %split_1: (1, 10, fp16)(Tensor) = cast(x=%split_1_type_int32, dtype="fp16", name="cast_0") + } -> (%split_0, %split_1) + } + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 20), dtype=types.int32)]) + def prog(input): + x1, x2 = mb.split(x=input, num_splits=2, axis=1, name="split") + return x1, x2 + + prog.set_main_output_types([ct.TensorType(), ct.TensorType(dtype=np.float16)]) + _, _, block = apply_pass_and_basic_check(prog, "common::update_output_dtypes") + assert get_op_types_in_program(prog) == ["split", "cast"] + assert block.outputs[1].dtype == types.fp16 + assert block.outputs[1].name == "split_1" + + +class TestFuseLayerNormOrInstanceNorm: + @pytest.mark.parametrize("axes_size", [1, 2, 3]) + def test_layer_norm(self, axes_size): + """ + Detect layer norm pattern, found in the TF bert model. + y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + + where mean and variance are computed along axes [-1] or [-1,-2] and so on + and gamma and beta are constants with rank equal to the length of the axes parameter. + """ + shape = (3, 5, 6) + rank = len(shape) + axes = list(range(rank - axes_size, rank)) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + x1 = mb.reduce_mean(x=x, axes=axes, keep_dims=True) + x2 = mb.sub(x=x, y=x1) + x2 = mb.square(x=x2) + x2 = mb.reduce_mean(x=x2, axes=axes, keep_dims=True) + x2 = mb.add(x=x2, y=1e-5) + x2 = mb.rsqrt(x=x2) + x3 = mb.mul(x=np.random.rand(*shape[-len(axes) :]), y=x2) + x4 = mb.mul(x=x3, y=x1) + x5 = mb.mul(x=x, y=x3) + x4 = mb.sub(x=np.random.rand(*shape[-len(axes) :]), y=x4) + y = mb.add(x=x4, y=x5) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "reduce_mean", + "sub", + "square", + "reduce_mean", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "sub", + "add", + ] + assert get_op_types_in_program(prog) == ["layer_norm"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + def test_instance_norm_pattern_1(self): + """ + Detect instance norm pattern + y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + + where input is rank 4, (N,C,H,W), axis=[2, 3], along which reduction happens, + and gamma and beta are of shape (1,C,1,1) + """ + shape = (3, 5, 6, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + x1 = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True) + x2 = mb.sub(x=x, y=x1) + x2 = mb.square(x=x2) + x2 = mb.reduce_mean(x=x2, axes=[2, 3], keep_dims=True) + x2 = mb.add(x=x2, y=1e-5) + x2 = mb.rsqrt(x=x2) + x3 = mb.mul(x=np.random.rand(1, shape[1], 1, 1), y=x2) + x4 = mb.mul(x=x3, y=x1) + x5 = mb.mul(x=x, y=x3) + x4 = mb.sub(x=np.random.rand(1, shape[1], 1, 1), y=x4) + y = mb.add(x=x4, y=x5) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "reduce_mean", + "sub", + "square", + "reduce_mean", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "sub", + "add", + ] + assert get_op_types_in_program(prog) == ["instance_norm"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + def test_instance_norm_pattern_1_rank_1_gamma_beta(self): + """ + Detect instance norm pattern + y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + + where input is rank 4, (N,C,H,W), axis=[2, 3], along which reduction happens, + and gamma and beta are of shape (C,) + """ + shape = (3, 5, 6, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + x1 = mb.reduce_mean(x=x, axes=[1, 2], keep_dims=True) + x2 = mb.sub(x=x, y=x1) + x2 = mb.square(x=x2) + x2 = mb.reduce_mean(x=x2, axes=[1, 2], keep_dims=True) + x2 = mb.add(x=x2, y=1e-5) + x2 = mb.rsqrt(x=x2) + x3 = mb.mul(x=np.random.rand(shape[3]), y=x2) + x4 = mb.mul(x=x3, y=x1) + x5 = mb.mul(x=x, y=x3) + x4 = mb.sub(x=np.random.rand(shape[3]), y=x4) + y = mb.add(x=x4, y=x5) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "reduce_mean", + "sub", + "square", + "reduce_mean", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "sub", + "add", + ] + assert get_op_types_in_program(prog) == ["transpose", "instance_norm", "transpose"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + def test_instance_norm_pattern_1_with_channel_last_data_format(self): + """ + Detect instance norm pattern with channel last data format + x = transpose(x) # channel first to channel last, NCHW -> NHWC + x = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + x = transpose(x) # channel last to channel first, NHWC -> NCHW + + The input is rank 4 (N, C, H, W) and the input for fused "instance_norm" op is + rank 4 (N, H, W, C), and axis=[1, 2] or [-3, -2], along which reduction happens. + + This is common in TensorFlow model when data format is channel last. + PyMIL inserts transposes around "conv" layer to make "conv" channel first. + "fuse_layernorm_or_instancenorm" pass is expected to fuse this pattern as well. + """ + shape = (1, 3, 5, 5) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.reduce_mean(x=x, axes=[1, 2], keep_dims=True) + x2 = mb.sub(x=x, y=x1) + x2 = mb.square(x=x2) + x2 = mb.reduce_mean(x=x2, axes=[1, 2], keep_dims=True) + x2 = mb.add(x=x2, y=1e-5) + x2 = mb.rsqrt(x=x2) + x3 = mb.mul(x=np.random.rand(1, 1, 1, shape[1]), y=x2) + x4 = mb.mul(x=x3, y=x1) + x5 = mb.mul(x=x, y=x3) + x4 = mb.sub(x=np.random.rand(1, 1, 1, shape[1]), y=x4) + x6 = mb.add(x=x4, y=x5) + y = mb.transpose(x=x6, perm=[0, 3, 1, 2]) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "transpose", + "reduce_mean", + "sub", + "square", + "reduce_mean", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "sub", + "add", + "transpose", + ] + assert get_op_types_in_program(prog) == [ + "transpose", + "transpose", + "instance_norm", + "transpose", + "transpose", + ] + assert_model_is_valid( + prog, + {"x": shape}, + expected_output_shapes={block.outputs[0].name: shape}, + ) + # reduce transpose pass should remove extra ones + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + assert get_op_types_in_program(prog) == ["instance_norm"] + assert_model_is_valid( + prog, + {"x": shape}, + expected_output_shapes={block.outputs[0].name: shape}, + ) + + def test_instance_norm_pattern_2(self): + """ + Detect instance norm pattern 2 and fusion. + + |----> sub0 ----| const (0.5) + | ^ | | + | | V V + x ---> mean0 square --> mean1 --> add_eps ---> pow const_gamma const_beta + | | | | | + | V V V V + |----> sub1 --------------------------------> real_div --> mul_gamma --> add_beta --> ... + """ + shape = (3, 5, 6, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + mean0 = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True) + sub0 = mb.sub(x=x, y=mean0) + sub1 = mb.sub(x=x, y=mean0) + square = mb.square(x=sub0) + mean1 = mb.reduce_mean(x=square, axes=[2, 3], keep_dims=True) + add_eps = mb.add(x=mean1, y=1e-5) # epsilon + pow = mb.pow(x=add_eps, y=0.5) + div = mb.real_div(x=sub1, y=pow) + mul_gamma = mb.mul(x=np.random.rand(1, shape[1], 1, 1), y=div) # + add_beta = mb.add(x=np.random.rand(1, shape[1], 1, 1), y=mul_gamma) + return add_beta + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "reduce_mean", + "sub", + "sub", + "square", + "reduce_mean", + "add", + "pow", + "real_div", + "mul", + "add", + ] + assert get_op_types_in_program(prog) == ["instance_norm"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + def test_instance_norm_pattern_3(self): + """ + Detect and fuse instance norm pattern 3 (pattern in TensorFlow-Addons). + + |-------------------------------------------------| + | | + | V + x --> mean square --> mean1 --> add_eps --> rsqrt --> mul2 --> mul_sub + | | ^ | | + | V | | | + | --> sub -----| | | + | V V + |--------------------------------------------> mul1 -------------> add --> ... + """ + shape = (3, 5, 6, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + mean0 = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True) + sub = mb.sub(x=x, y=mean0) + square = mb.square(x=sub) + mean1 = mb.reduce_mean(x=square, axes=[2, 3], keep_dims=True) + add_eps = mb.add(x=mean1, y=1e-5) # epsilon + rsqrt = mb.rsqrt(x=add_eps) + mul1 = mb.mul(x=rsqrt, y=x) + mul2 = mb.mul(x=mean0, y=rsqrt) + mul_sub = mb.mul(x=mul2, y=-1.0) + add = mb.add(x=mul1, y=mul_sub) + return add + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "reduce_mean", + "sub", + "square", + "reduce_mean", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "add", + ] + assert get_op_types_in_program(prog) == ["instance_norm"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + def test_instance_norm_pattern_4(self): + """ + Detect and fuse instance norm pattern 4. + + |-----------| + | V + |------> mul_square1 -----> sum1 -----> mul_mean1 + | | + | V + x --> sum --> mul_mean ==> mul_square --> sub_variance --> add_eps --> rsqrt + | | | + | | V + | | mul_gamma + | | | + | | |----------------| + | | | V + | |--------------------------------------------+-------------> mul2 + | V | + |----------------------------------------------------------> mul1 | + | V + | sub_beta --> add --> [...] + | ^ + |---------------------------| + """ + shape = (3, 5, 6, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + mul_square1 = mb.mul(x=x, y=x) + sum = mb.reduce_sum(x=x, axes=[2, 3], keep_dims=True) + mul_mean = mb.mul(x=sum, y=3.3333334e-05) # dummy value here + mul_square = mb.mul(x=mul_mean, y=mul_mean) + sum1 = mb.reduce_sum(x=mul_square1, axes=[2, 3], keep_dims=True) + mul_mean1 = mb.mul(x=sum1, y=8.333333e-06) # dummy value here + sub_variance = mb.sub(x=mul_mean1, y=mul_square) + add_eps = mb.add(x=sub_variance, y=1e-5) # epsilon + rsqrt = mb.rsqrt(x=add_eps) + mul_gamma = mb.mul(x=rsqrt, y=np.random.rand(1, shape[1], 1, 1)) + mul1 = mb.mul(x=mul_gamma, y=x) + mul2 = mb.mul(x=mul_mean, y=mul_gamma) + sub_beta = mb.sub(x=np.random.rand(1, shape[1], 1, 1), y=mul2) + add = mb.add(x=mul1, y=sub_beta) + return add + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "mul", + "reduce_sum", + "mul", + "mul", + "reduce_sum", + "mul", + "sub", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "sub", + "add", + ] + assert get_op_types_in_program(prog) == ["instance_norm"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + +class TestFuseLinearBias: + @staticmethod + def _apply_transform(inputs, func, is_first_input, has_bias): + """ + Utility funtion to test the weight/bias transform function in linear bias fusion pass. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 4))]) + def prog(x): + + if has_bias: + linear = mb.linear( + x=x, + weight=inputs["linear_weight"], + bias=inputs["linear_bias"], + ) + else: + linear = mb.linear( + x=x, + weight=inputs["linear_weight"], + ) + + if is_first_input: + kwargs = { + "x": linear, + "y": inputs["bias"], + } + else: + kwargs = { + "x": inputs["bias"], + "y": linear, + } + + x = func(**kwargs) + return x + + apply_pass_and_basic_check( + prog, + "common::fuse_linear_bias", + ) + + # get the updated weight from the prog + linear_op = [] + for op in prog["main"].operations: + if op.op_type == "const": + continue + linear_op.append(op) + assert len(linear_op) == 1, "should only have one linear layer." + + return linear_op[0].weight.val, linear_op[0].bias.val + + @pytest.mark.parametrize( + "op_type, is_first_input, has_bias, broadcast", + itertools.product( + ["add", "sub"], + [True, False], + [True, False], + [True, False], + ), + ) + def test_transform_linear(self, op_type, is_first_input, has_bias, broadcast): + """ + Test the weight / bias transform function in the linear bias fusion pass + """ + weight = np.reshape(np.arange(8), (2, 4)).astype(np.float32) + linear_bias = ( + np.array([1, 2]).astype(np.float32) if has_bias else np.array([0, 0]).astype(np.float32) + ) + bias = np.array([3, 4]).astype(np.float32) + if broadcast: + bias = np.reshape(bias, (1, 2)) + + inputs = { + "linear_weight": weight, + "linear_bias": linear_bias, + "bias": bias, + } + + if op_type == "add": + func = mb.add + elif op_type == "sub": + func = mb.sub + + new_weight, new_bias = self._apply_transform( + inputs, + func, + is_first_input, + has_bias, + ) + if broadcast: + bias = np.reshape(bias, (2,)) + + if op_type == "sub" and not is_first_input: + expected_weight = -weight + else: + expected_weight = weight + + if op_type == "sub": + if is_first_input: + expected_bias = linear_bias - bias + else: + expected_bias = bias - linear_bias + else: + expected_bias = linear_bias + bias + + np.testing.assert_almost_equal(new_weight, expected_weight) + np.testing.assert_almost_equal(new_bias, expected_bias) + + @pytest.mark.parametrize( + "rank, op_type, is_first_input, broadcast, backend", + itertools.product([1, 2, 3], ["add", "sub"], [True, False], [True, False], backends), + ) + def test_linear_bias_fusion(self, rank, op_type, is_first_input, broadcast, backend): + """ + Input graph: + Const + | + V + input -----> linear -----> add/sub ---> out + + Output graph: + input -----> linear ----> out + """ + input_shape = [1, 2, 3] + input_shape = input_shape[-rank:] + input_shape = tuple(input_shape) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + linear_weight = np.reshape(np.arange(6), (2, 3)).astype(np.float32) + linear_bias = np.array([1.0, 2.0]) + bias = np.array([3.0, 4.0]) + if broadcast: + if rank >= 2: + bias = np.reshape(bias, (1, 2)) + + x = mb.linear( + x=x, + weight=linear_weight, + bias=linear_bias, + ) + + func = mb.add if op_type == "add" else mb.sub + if is_first_input: + kwargs = { + "x": x, + "y": bias, + } + else: + kwargs = { + "x": bias, + "y": x, + } + x = func(**kwargs) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_linear_bias") + + assert get_op_types_in_program(prev_prog) == ["linear", op_type] + assert get_op_types_in_program(prog) == ["linear"] + + # validate graph pass + output_shape = [1, 2, 2] + output_shape = tuple(output_shape[-rank:]) + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +class TestFuseMatmulWeightBias: + def test_fuse_matmul_weight_bias(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + weights_val = np.random.rand(2, 4).T.astype(np.float32) + weights = mb.const(val=weights_val) + bias_val = np.random.rand(2).astype(np.float32) + bias = mb.const(val=bias_val) + + matmul = mb.matmul(x=x, y=weights) + return mb.add(x=matmul, y=bias) + + assert_op_count_match(prog, expect=1, op="matmul") + assert_op_count_match(prog, expect=0, op="linear") + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["common::fuse_matmul_weight_bias"](prog) + assert_same_output_names(prev_prog, prog) + assert_op_count_match(prog, expect=0, op="matmul") + assert_op_count_match(prog, expect=1, op="linear") + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"x": (2, 4)}) + + +class TestCompressionGraphPass: + """ + Most of the numerical tests are already convered in coremltools.tests.ml_program.test_compression_utils. + This test is checking the basic behavior of the graph pass classes. + """ + + @staticmethod + def _get_conv_program(): + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 30, 10, 10))], opset_version=ct.target.iOS16 + ) + def prog(x): + conv_weight = np.random.rand(90, 30, 2, 2).astype(np.float32) + x = mb.conv(x=x, weight=conv_weight) + return x + + return prog + + @pytest.mark.parametrize( + "fake_compression", + [True, False], + ) + def test_affine_quantizer(self, fake_compression): + quantizer = quantization.WeightAffineQuantizer( + fake_compression=fake_compression, op_selector=lambda const: True + ) + prog = self._get_conv_program() + quantizer.apply(prog) + expected_ops = ["constexpr_affine_dequantize", "conv"] if not fake_compression else ["conv"] + assert get_op_types_in_program(prog) == expected_ops + + @pytest.mark.parametrize( + "fake_compression", + [True, False], + ) + def test_weight_sparsifier(self, fake_compression): + quantizer = quantization.WeightSparsifier( + fake_compression=fake_compression, + op_selector=lambda const: True, + mode="percentile_based", + target_percentile=0.75, + ) + prog = self._get_conv_program() + quantizer.apply(prog) + expected_ops = ["constexpr_sparse_to_dense", "conv"] if not fake_compression else ["conv"] + assert get_op_types_in_program(prog) == expected_ops + + @pytest.mark.parametrize( + "fake_compression", + [True, False], + ) + def test_weight_palettization(self, fake_compression): + quantizer = quantization.WeightPalettizer( + fake_compression=fake_compression, + op_selector=lambda const: True, + mode="uniform", + nbits=4, + ) + prog = self._get_conv_program() + quantizer.apply(prog) + expected_ops = ["constexpr_lut_to_dense", "conv"] if not fake_compression else ["conv"] + assert get_op_types_in_program(prog) == expected_ops + + @pytest.mark.parametrize( + "axis, mode, source_dtype, target_dtype, data_range", + itertools.product( + [0, 1, 2, 3, -1], + ["linear", "linear_symmetric"], + [np.float16, np.float32], + [types.uint8, types.int8], + [ + [-1., 1.], + [-3., -1.], + [1., 3.], + # Test corner case of same values + [0., 0.], + [1., 1.], + [-1., -1.], + ] + ), + ) + def test_affine_quantizer_compression(self, axis, mode, source_dtype, target_dtype, data_range): + input_shape = (10, 20, 30, 40) + low, high = data_range + val = np.random.uniform(low, high, input_shape).astype(source_dtype) + + params = quantization.WeightAffineQuantizer.compress(val, axis, mode, target_dtype) + decompressed_val = quantization.WeightAffineQuantizer.decompress(params) + + np.testing.assert_allclose(val, decompressed_val, rtol=1e-02, atol=1e-02) + + @pytest.mark.parametrize( + "mode, nbits, shape", + itertools.product( + ["KMEANS", "UNIFORM", "UNIQUE"], + [1, 2, 4, 6, 8], + [ + (1,), + (1, 1), + (1, 10), + (2, 20), + (3, 7, 9), + (17, 17, 17), + ] + ), + ) + def test_palettizer_compression(self, mode, nbits, shape): + val_size = np.prod(shape) + max_val = 2 ** nbits + val = np.arange(max_val).tolist() + val = np.array(val * (val_size // max_val + 1))[:val_size].astype(np.float32) + params = quantization.WeightPalettizer.compress(val, mode=mode, nbits=nbits) + decompressed_val = quantization.WeightPalettizer.decompress(params) + + # For + # 1. UNIQUE / KMEANS mode + # 2. UNIFORM mode with the data range <= tensor size + # We can perfecting re-construct the original value + if (mode in ["UNIQUE", "KMEANS"]) or (mode == "UNIFORM" and max_val <= val_size): + np.testing.assert_allclose(val, decompressed_val, rtol=1e-02, atol=1e-02) + +class TestFP16CastTransform(unittest.TestCase): + """""" + + """ + Input graph: + input -----> square -----> out + + Output graph: + input -----> cast(dtype="fp16") -----> square -----> cast(dtype="fp32") ---> out + """ + + def test_single_input_to_single_operation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.square(x=x) + return x + + self.assertEqual(get_op_types_in_program(prog), ["square"]) + + apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + self.assertEqual(get_op_types_in_program(prog), ["cast", "square", "cast"]) + + # Asserting first cast configuration + cast_1 = block.find_ops(op_type="cast")[0] + self.assertEqual(cast_1.dtype.val, "fp16") + self.assertEqual(len(cast_1.outputs), 1) + self.assertEqual(len(cast_1.outputs[0].child_ops), 1) + self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "square") + + # Asserting second cast configuration + cast_2 = block.find_ops(op_type="cast")[1] + self.assertEqual(cast_2.dtype.val, "fp32") + self.assertEqual(len(cast_2.outputs), 1) + self.assertEqual(len(cast_2.outputs[0].child_ops), 0) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input -----> div -----> out + ^ + const(eps) ---| + + Output graph: + input --------> cast(dtype="fp16") -----> div -----> cast(dtype="fp32") ---> out + ^ + const(eps) ---> cast(dtype="fp16") --------| + """ + + def test_divide_by_zero_operation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + eps = mb.const(val=1e-10) + x = mb.real_div(x=x, y=eps) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + + mlmodel = ct.convert(prog, source="milinternal", compute_units=ct.ComputeUnit.CPU_ONLY) + input_dict = {"x": np.random.rand(10, 20)} + + if _IS_MACOS: + prediction = mlmodel.predict(input_dict) + assert not np.isnan(prediction["real_div_0"]).any() + assert np.isfinite(prediction["real_div_0"]).all() + + """ + Input graph: + input1 ----->| + concat -----> out + input2 ----->| + + Output graph: + input1 -----> cast(dtype="fp16") ----->| + concat -----> cast(dtype="fp32") ---> out + input2 -----> cast(dtype="fp16") ----->| + + """ + + def test_multiple_inputs_to_single_operation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20)), mb.TensorSpec(shape=(10, 20))]) + def prog(x, y): + x = mb.concat(values=(x, y), axis=0) + return x + + self.assertEqual(get_op_types_in_program(prog), ["concat"]) + + apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + self.assertEqual(get_op_types_in_program(prog), ["cast", "cast", "concat", "cast"]) + + # Asserting first cast configuration + cast_1 = block.find_ops(op_type="cast")[0] + self.assertEqual(cast_1.dtype.val, "fp16") + self.assertEqual(len(cast_1.outputs), 1) + self.assertEqual(len(cast_1.outputs[0].child_ops), 1) + self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "concat") + + # Asserting second cast configuration + cast_2 = block.find_ops(op_type="cast")[1] + self.assertEqual(cast_2.dtype.val, "fp16") + self.assertEqual(len(cast_2.outputs), 1) + self.assertEqual(len(cast_2.outputs[0].child_ops), 1) + self.assertEqual(cast_2.outputs[0].child_ops[0].op_type, "concat") + + # Asserting third cast configuration + cast_3 = block.find_ops(op_type="cast")[2] + self.assertEqual(cast_3.dtype.val, "fp32") + self.assertEqual(len(cast_3.outputs), 1) + self.assertEqual(len(cast_3.outputs[0].child_ops), 0) + + assert_model_is_valid( + prog, + {"x": (10, 20), "y": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (20, 20)}, + ) + + """ + Input graph: + |-----> output_1 + input -----> split + |-----> output_2 + + Output graph: + + |-----> cast(dtype="fp32") ---> output_1 + input -----> cast(dtype="fp16") -----> split + |-----> cast(dtype="fp32") ---> output_2 + + """ + + def test_multiple_outputs_from_single_operation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.split(x=x, axis=0, num_splits=2) + return x + + self.assertEqual(get_op_types_in_program(prog), ["split"]) + + apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + self.assertEqual(get_op_types_in_program(prog), ["cast", "split", "cast", "cast"]) + + # Asserting first cast configuration + cast_1 = block.find_ops(op_type="cast")[0] + self.assertEqual(cast_1.dtype.val, "fp16") + self.assertEqual(len(cast_1.outputs), 1) + self.assertEqual(len(cast_1.outputs[0].child_ops), 1) + self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "split") + + # Asserting second cast configuration + cast_2 = block.find_ops(op_type="cast")[1] + self.assertEqual(cast_2.dtype.val, "fp32") + self.assertEqual(len(cast_2.outputs), 1) + self.assertEqual(len(cast_2.outputs[0].child_ops), 0) + + # Asserting third cast configuration + cast_3 = block.find_ops(op_type="cast")[2] + self.assertEqual(cast_3.dtype.val, "fp32") + self.assertEqual(len(cast_3.outputs), 1) + self.assertEqual(len(cast_3.outputs[0].child_ops), 0) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (5, 20), block.outputs[1].name: (5, 20)}, + ) + + """ + Input graph: + + |----> square ---> output_1 + input| + |----> relu ---> output_2 + + Output graph: + + |---->square-----> cast(dtype="fp32") ---> output_1 + input -----> cast(dtype="fp16") + |----> relu -----> cast(dtype="fp32") ---> output_2 + + """ + + def test_single_input_to_multiple_operations(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + y = mb.square(x=x) + z = mb.relu(x=x) + return y, z + + self.assertEqual(get_op_types_in_program(prog), ["square", "relu"]) + + apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + self.assertEqual(get_op_types_in_program(prog), ["cast", "square", "cast", "relu", "cast"]) + + # Asserting first cast configuration + cast_1 = block.find_ops(op_type="cast")[0] + self.assertEqual(cast_1.dtype.val, "fp16") + self.assertEqual(len(cast_1.outputs), 1) + self.assertEqual(len(cast_1.outputs[0].child_ops), 2) + self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "square") + self.assertEqual(cast_1.outputs[0].child_ops[1].op_type, "relu") + + # Asserting second cast configuration + cast_2 = block.find_ops(op_type="cast")[1] + self.assertEqual(cast_2.dtype.val, "fp32") + self.assertEqual(len(cast_2.outputs), 1) + self.assertEqual(len(cast_2.outputs[0].child_ops), 0) + + # Asserting third cast configuration + cast_3 = block.find_ops(op_type="cast")[2] + self.assertEqual(cast_3.dtype.val, "fp32") + self.assertEqual(len(cast_3.outputs), 1) + self.assertEqual(len(cast_3.outputs[0].child_ops), 0) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + }, + ) + + def test_duplicate_output_vars(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2))]) + def prog(x): + relu1 = mb.relu(x=x) + return relu1, relu1 + + _, _, block = apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + self.assertEqual(get_op_types_in_program(prog), ["cast", "relu", "cast"]) + + assert_model_is_valid( + prog, + {"x": (1, 2)}, + expected_output_shapes={block.outputs[0].name: (1, 2), block.outputs[1].name: (1, 2)}, + backend=("mlprogram", "fp16"), + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_reduce_transposes_pass.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_reduce_transposes_pass.py new file mode 100644 index 00000000..c42a5b44 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_reduce_transposes_pass.py @@ -0,0 +1,1967 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol +from coremltools.converters.mil.mil.passes.defs.optimize_repeat_ops import TransformAxisUpdateOps +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import ( + apply_pass_and_basic_check, + assert_model_is_valid, + get_op_types_in_program, +) + +np.random.seed(1984) + + +class TransposeOptimizationPass(unittest.TestCase): + """ + Input graph: + input -----> transpose(axis=[1,0]) -----> transpose(axis=[1,0]) ---> out + + Output graph: + input -----> identity -----> out + """ + + def test_simple_consecutive_ops_fusion_direct_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.transpose(x=x, perm=[1, 0]) + x = mb.transpose(x=x, perm=[1, 0]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["identity"]) + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input -----> transpose(axis=[1,0]) -----> transpose(axis=[1,0]) ----> relu ---> out + + Output graph: + input -----> relu -----> out + """ + + def test_simple_consecutive_ops_fusion(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.transpose(x=x, perm=[1, 0]) + x = mb.transpose(x=x, perm=[1, 0]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "transpose", "relu"]) + self.assertEqual(get_op_types_in_program(prog), ["relu"]) + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input---->transpose(axis=[0,3,1,2])---->relu---->log--->transpose(axis=[0,2,3,1])--->relu--->out + + Output graph: + input----->relu----->log----->relu--->out + """ + + def test_linear_graph_two_op_fusion(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.relu(x=x) + x = mb.log(x=x) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "log", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "log", "relu"]) + assert_model_is_valid( + prog, + {"x": (1, 2, 3, 4)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 3, 4)}, + ) + + """ + Input graph: + input---->transpose(axis=[0,3,1,2])---->relu---->identity--->transpose(axis=[0,2,3,1])--->relu--->out + + Output graph: + input----->relu----->identity----->relu--->out + """ + + def test_linear_graph_two_op_fusion_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.relu(x=x) + x = mb.identity(x=x) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "identity", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "identity", "relu"]) + assert_model_is_valid( + prog, + {"x": (1, 2, 3, 4)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 3, 4)}, + ) + + """ + Input graph: + input(shape=1,2,3,4)---->transpose(axis=[0,3,1,2])---->relu---->log--->transpose(axis=[0,2,3,1])--->relu--->out1(shape=1,2,3,4) + | + v + out2(shape=1,4,2,3) + + Output graph: + input(shape=1,2,3,4)---->relu---->log--->relu--->out1(shape=1,2,3,4) + | + |----->transpose(axis=[0,3,1,2])----->out2(shape=1,4,2,3) + """ + + def test_fusion_with_output_edge_inbetween(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x1 = mb.relu(x=x) + x2 = mb.log(x=x1) + x3 = mb.transpose(x=x2, perm=[0, 2, 3, 1]) + x4 = mb.relu(x=x3) + return x4, x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "log", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "log", "relu", "transpose"]) + assert_model_is_valid( + prog, + {"x": (1, 2, 3, 4)}, + expected_output_shapes={ + block.outputs[0].name: (1, 2, 3, 4), + block.outputs[1].name: (1, 4, 2, 3), + }, + ) + + """ + Input graph: + input---->transpose(axis=[0,3,1,2])---->relu---->transpose(axis=[0,2,3,1])--->out + + Output graph: + input----->relu----->out + """ + + def test_linear_graph_two_op_fusion_with_last_op_removal(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.relu(x=x) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "relu", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["relu"]) + assert_model_is_valid( + prog, + {"x": (1, 2, 3, 4)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 3, 4)}, + ) + + """ + Input graph: + input(shape=10,2,3)--->transpose(axis=[0,2,1])----->relu---->transpose(axis=[0,2,1])---->out1 + | + | + --->relu----->log---->transpose(axis=[0,2,1])---->out2 + + Output graph: + input(shape=10,2,3)----->relu---->out1 + | + | + --->relu----->log---->out2 + """ + + def test_multiple_fusions(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 1]) + x1 = mb.relu(x=x) + x2 = mb.relu(x=x) + y1 = mb.transpose(x=x1, perm=[0, 2, 1]) + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 2, 1]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "relu", "transpose", "log", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "log"]) + + assert prev_block.inputs["x"] == prev_block.find_ops(op_type="transpose")[0].inputs["x"] + assert block.find_ops(op_type="log")[0].outputs[0] in block.outputs + assert_model_is_valid( + prog, + {"x": (10, 2, 3)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3), + block.outputs[1].name: (10, 2, 3), + }, + ) + + """ + Input graph: + input(shape=10,2,3,5)--->transpose(axis=[0,2,3,1])----->relu---->pool----->out1 + | + | + --->relu----->log---->transpose(axis=[0,3,1,2])---->out2 + + + Output graph: + input(shape=10,2,3,5)----->relu---->transpose(axis=[0,2,3,1])---->pool----->out1 + | + | + --->relu----->log---->out2 + """ + + def test_partial_fusion_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x) + x2 = mb.relu(x=x) + y1 = mb.avg_pool(x=x1, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "relu", "avg_pool", "log", "transpose"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "relu", "transpose", "avg_pool", "log"], + ) + + assert prev_block.inputs["x"] == prev_block.find_ops(op_type="transpose")[0].inputs["x"] + assert block.find_ops(op_type="log")[0].outputs[0] == block.outputs[1] + assert ( + block.find_ops(op_type="transpose")[0].outputs[0] + == block.find_ops(op_type="avg_pool")[0].inputs["x"] + ) + assert list(block.find_ops(op_type="transpose")[0].perm.val) == [0, 2, 3, 1] + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (10, 3, 5, 2), + block.outputs[1].name: (10, 2, 3, 5), + }, + ) + + """ + Input graph: + input(shape=10,2,3,5)--->transpose(axis=[0,2,1,3])----->relu---->transpose(axis=[0,2,1,3])---->out1 + | + | + --->pool--->log---->transpose(axis=[0,2,1,3])---->out2 + + Output graph: + input(shape=10,2,3,5)----->relu---->out1 + | + | + --->transpose(axis=[0,2,1,3])---->pool----->log---->transpose(axis=[0,2,1,3])---->out2 + """ + + def test_partial_fusion_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x1 = mb.relu(x=x) + x2 = mb.avg_pool(x=x, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + y1 = mb.transpose(x=x1, perm=[0, 2, 1, 3]) + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 2, 1, 3]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "avg_pool", "transpose", "log", "transpose"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "transpose", "avg_pool", "log", "transpose"], + ) + + assert block.inputs["x"] == block.find_ops(op_type="relu")[0].inputs["x"] + assert block.outputs[0] == block.find_ops(op_type="relu")[0].outputs[0] + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3, 5), + block.outputs[1].name: (10, 2, 3, 5), + }, + ) + + """ + Input graph: + + |-------> transpose(axis=[0,2,1,3]) ---->out1(shape=10,2,3,5) + | + input(shape=10,2,3,5)-->relu-->transpose(axis=[0,2,1,3])--->relu--->transpose(axis=[0,2,1,3]) ---->out2(shape=10,2,3,5) + | + |----->pool--------------->out3(shape=10,3,2,5) + | + |----->pool--------------->out4(shape=10.3.2.5) + + + Output graph: + + |---->out1(shape=10,2,3,5) + | + input---->relu---------->relu------->out2(shape=10,2,3,5) + | + |----->transpose(axis=[0,2,1,3])--->pool---->out3(shape=10,3,2,5) + | + |----->transpose(axis=[0,2,1,3])---->pool--->out4(shape=10.3.2.5) + """ + + def test_partial_fusion_2(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.relu(x=x) + x = mb.transpose(x=x, perm=[0, 2, 1, 3]) + y1 = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x1 = mb.relu(x=x) + y2 = mb.transpose(x=x1, perm=[0, 2, 1, 3]) + y3 = mb.avg_pool(x=x1, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + y4 = mb.avg_pool(x=x1, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + return y1, y2, y3, y4 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "relu", + "transpose", + "transpose", + "relu", + "transpose", + "avg_pool", + "avg_pool", + ], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "relu", "transpose", "avg_pool", "transpose", "avg_pool"], + ) + + assert block.outputs[0] == block.find_ops(op_type="relu")[0].outputs[0] + assert block.outputs[1] == block.find_ops(op_type="relu")[1].outputs[0] + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + # Two consecutive relus are merged, so the first two outputs have the same name. See + # `test_name_change_depend_on_output` in TestMergeConsecutiveRelus. + block.outputs[1].name: (10, 2, 3, 5), + block.outputs[2].name: (10, 3, 2, 5), + block.outputs[3].name: (10, 3, 2, 5), + }, + # rdar://100243127 ([PyTorch] Duplicate Output Tensor Doesn't work for neuralnetwork). + backend=("mlprogram", "fp16"), + ) + + """ + Input graph: + + input(shape=10,2,3,5)-->relu--->transpose(axis=[0,2,1,3])----->transpose(axis=[0,2,1,3])---->out1(shape=10,2,3,5) + | + ---->relu------>out2(shape=10,3,2,5) + + Output graph: + + input(shape=10,2,3,5)-->relu---->out1(shape=10,2,3,5) + | + ---->relu--->transpose(axis=[0,2,1,3])------>out2(shape=10,3,2,5) + """ + + def test_partial_fusion_3(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.relu(x=x) + x = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x1 = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x2 = mb.relu(x=x) + return x1, x2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["relu", "transpose", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "transpose"]) + + assert block.outputs[0] == block.find_ops(op_type="relu")[0].outputs[0] + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3, 5), + block.outputs[1].name: (10, 3, 2, 5), + }, + ) + + """ + Input graph: + + input(shape=10,2,3,5)-->relu--->transpose(axis=[0,2,1,3])----->transpose(axis=[0,2,1,3])---->out1(shape=10,2,3,5) + | + ------>out2(shape=10,3,2,5) + + Output graph: + same as input graph as one of the optimizing transpose is connected to model output + """ + + def test_partial_fusion_4(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.relu(x=x) + out2 = mb.transpose(x=x, perm=[0, 2, 1, 3]) + out1 = mb.transpose(x=out2, perm=[0, 2, 1, 3]) + return out1, out2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["relu", "transpose", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["relu", "transpose", "transpose"]) + + assert block.outputs[1] == block.find_ops(op_type="transpose")[0].outputs[0] + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3, 5), + block.outputs[1].name: (10, 3, 2, 5), + }, + ) + + """ + Input graph: + input(shape=10,2,3,5)-->relu-->transpose(axis=[0,2,1,3])--->relu--->transpose(axis=[0,2,1,3]) ---->out1(shape=10,2,3,5) + | + |--->relu-->pool--------------->out2(shape=10,3,2,5) + | + |----->pool--------------->out3(shape=10.3.2.5) + + + Output graph: + same as the input graph as materialization ops are greater than cancel ops + """ + + def test_no_fusion_more_materialization_ops(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.relu(x=x) + x = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x1 = mb.relu(x=x) + y2 = mb.transpose(x=x1, perm=[0, 2, 1, 3]) + x2 = mb.relu(x=x1) + y3 = mb.avg_pool(x=x2, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + y4 = mb.avg_pool(x=x1, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + return y2, y3, y4 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["relu", "transpose", "relu", "transpose", "relu", "avg_pool", "avg_pool"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "transpose", "relu", "transpose", "relu", "avg_pool", "avg_pool"], + ) + + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3, 5), + block.outputs[1].name: (10, 3, 2, 5), + block.outputs[2].name: (10, 3, 2, 5), + }, + ) + + """ + Input graph: + input(shape=10,2,3)--->transpose(axis=[0,2,1])----->relu---->transpose(axis=[0,2,1])---->out1 + | + | + --->reduce(axis=2)----->log---->transpose(axis=[0,2,1])---->out2 + + Output graph: + input(shape=10,2,3)----->relu---->out1 + | + | + --->reduce(axis=1)----->log---->out2 + """ + + def test_fusion_with_axis_op(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 1]) + x1 = mb.relu(x=x) + x2 = mb.reduce_mean(x=x, axes=[2], keep_dims=True) + y1 = mb.transpose(x=x1, perm=[0, 2, 1]) + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 2, 1]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "reduce_mean", "transpose", "log", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "reduce_mean", "log"]) + + assert list(block.find_ops(op_type="reduce_mean")[0].inputs["axes"].val) == [1] + assert_model_is_valid( + prog, + {"x": (10, 2, 3)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3), + block.outputs[1].name: (10, 1, 3), + }, + ) + + """ + Input graph: + input(shape=11,2,3,6)--->transpose(axis=[0,3,1,2])--- + | + | + --->pad(pad=[0,0,0,0,1,2,3,4]) + | + |-->log--->transpose(axis=[0,2,3,1])-->out1(shape=11,5,10,6) + + Output graph: + same as input graph, as transpose cannot be pushed through the pad op since "reflect" mode is only supported + along the last two axis + """ + + def test_fusion_with_pad_reflective_op_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(11, 2, 3, 6))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x2 = mb.pad(x=x, pad=[0, 0, 0, 0, 1, 2, 3, 4], mode="reflect") + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 2, 3, 1]) + return y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), ["transpose", "pad", "log", "transpose"] + ) + self.assertEqual(get_op_types_in_program(prog), ["transpose", "pad", "log", "transpose"]) + + assert list(block.find_ops(op_type="pad")[0].inputs["pad"].val.flatten()) == [ + 0, + 0, + 0, + 0, + 1, + 2, + 3, + 4, + ] + assert_model_is_valid( + prog, + {"x": (11, 2, 3, 6)}, + expected_output_shapes={block.outputs[0].name: (11, 5, 10, 6)}, + ) + + """ + Input graph: + input(shape=11,2,3,6)--->transpose(axis=[0,1,3,2])--- + | + | + --->pad(pad=[0,0,0,0,1,2,3,4]) + | + |-->log--->transpose(axis=[0,1,3,2])-->out1(shape=11,2,10,9) + + Output graph: + input(shape=11,2,3,6)--->pad(pad=[0,0,0,0,3,4,1,2])-->log-->out1(shape=11,2,10,9) + """ + + def test_fusion_with_pad_reflective_op_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(11, 2, 3, 6))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 1, 3, 2]) + x2 = mb.pad(x=x, pad=[0, 0, 0, 0, 1, 2, 3, 4], mode="reflect") + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 1, 3, 2]) + return y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), ["transpose", "pad", "log", "transpose"] + ) + self.assertEqual(get_op_types_in_program(prog), ["pad", "log"]) + + assert list(block.find_ops(op_type="pad")[0].inputs["pad"].val.flatten()) == [ + 0, + 0, + 0, + 0, + 3, + 4, + 1, + 2, + ] + assert_model_is_valid( + prog, + {"x": (11, 2, 3, 6)}, + expected_output_shapes={block.outputs[0].name: (11, 2, 10, 9)}, + ) + + """ + Input graph: + input(shape=11,2,3,6)--->transpose(axis=[0,3,1,2])--- + | + | + --->pad(pad=[0,0,0,0,1,2,3,4]) + | + |-->log--->transpose(axis=[0,2,3,1])-->out1(shape=11,5,10,6) + + Output graph: + input(shape=11,2,3,6)--->pad(pad=[0,0,1,2,3,4,0,0])-->log-->out1(shape=11,5,10,6) + """ + + def test_fusion_with_pad_constant_op(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(11, 2, 3, 6))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x2 = mb.pad(x=x, pad=[0, 0, 0, 0, 1, 2, 3, 4], mode="constant", constant_val=3.0) + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 2, 3, 1]) + return y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), ["transpose", "pad", "log", "transpose"] + ) + self.assertEqual(get_op_types_in_program(prog), ["pad", "log"]) + + assert list(block.find_ops(op_type="pad")[0].inputs["pad"].val.flatten()) == [ + 0, + 0, + 1, + 2, + 3, + 4, + 0, + 0, + ] + assert_model_is_valid( + prog, + {"x": (11, 2, 3, 6)}, + expected_output_shapes={block.outputs[0].name: (11, 5, 10, 6)}, + ) + + """ + Input graph: + const(shape=2) + | + V + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5) + + Output graph: + const(shape=1,2,1,1) + | + V + input(shape=1,2,5,5)--->add--->out(shape=1,2,5,5) + """ + + def test_fusion_with_add_constant_op(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.add(x=x, y=np.array([10.0, 100.0])) + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "add", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["add"]) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)}, + ) + + """ + Input graph: + const(scalar) + | + V + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5) + + Output graph: + const(scalar) + | + V + input(shape=1,2,5,5)--->add--->out(shape=1,2,5,5) + """ + + def test_fusion_with_add_scalar_constant_op(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.add(x=5.0, y=x) + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "add", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["add"]) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)}, + ) + + """ + Input graph: + input(shape=1,2,5,5)----->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5) + | ^ + | | + |---->relu---->transpose(axis=[0,2,3,1]) + + Output graph: + input(shape=1,2,5,5)----->add--->out(shape=1,2,5,5) + | ^ + | | + |------>relu + """ + + def test_fusion_with_add_broadcastable_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.relu(x=x) + x2 = mb.transpose(x=x2, perm=[0, 2, 3, 1]) + x3 = mb.add(x=x1, y=x2) + y = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "transpose", "add", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "add"]) + + assert block.find_ops(op_type="relu")[0].inputs["x"] == block.inputs["x"] + assert block.find_ops(op_type="add")[0].inputs["x"] == block.inputs["x"] + assert ( + block.find_ops(op_type="add")[0].inputs["y"] + == block.find_ops(op_type="relu")[0].outputs[0] + ) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)}, + ) + + """ + Input graph: + input(shape=1,2,5,5)----->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5) + | ^ + | | + |----------------------->transpose(axis=[0,2,3,1]) + + Output graph: + input(shape=1,2,5,5)----->add--->out(shape=1,2,5,5) + | ^ + | | + |--------- + """ + + def test_fusion_with_add_broadcastable_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x3 = mb.add(x=x1, y=x2) + y = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "add", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["add"]) + + assert block.find_ops(op_type="add")[0].inputs["x"] == block.inputs["x"] + assert block.find_ops(op_type="add")[0].inputs["y"] == block.inputs["x"] + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)}, + ) + + """ + Input graph: + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5) + | ^ + | | + |->transpose(axis=[0,2,3,1])--->relu------------ + + Output graph: + input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5) + | ^ + | | + |---->relu------------ + """ + + def test_concat_pattern_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x3 = mb.concat(values=[x1, x2], axis=3) + x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return x4 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "relu", "relu", "concat", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "concat"]) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 4, 5, 5)}, + ) + + """ + Input graph: + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5) + | ^ + | | + |->transpose(axis=[0,2,3,1])------->relu-------- + | + V + pool--->out2(shape=1,5,5,2) + + + + Output graph: + input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5) + | ^ + | | + |---->relu------------ + | + |--->transpose(axis=[0,2,3,1])---->pool--->out2(shape=1,5,5,2) + """ + + def test_concat_pattern_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x3 = mb.concat(values=[x1, x2], axis=3) + x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + x5 = mb.avg_pool(x=x2, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + return x4, x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "transpose", + "relu", + "relu", + "concat", + "transpose", + "avg_pool", + ], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "relu", "concat", "transpose", "avg_pool"], + ) + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 5), + block.outputs[1].name: (1, 5, 5, 2), + }, + ) + + """ + Input graph: + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5) + | ^ + | | + |->transpose(axis=[0,2,3,1])------->relu-------- + | + V + relu--->out2(shape=1,5,5,2) + + + + Output graph: + input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5) + | ^ + | | + |---->relu------------ + | + |--->relu---->transpose(axis=[0,2,3,1])---->out2(shape=1,5,5,2) + """ + + def test_concat_pattern_2(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x3 = mb.concat(values=[x1, x2], axis=3) + x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + x5 = mb.relu(x=x2) + return x4, x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "relu", "relu", "concat", "transpose", "relu"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "relu", "concat", "relu", "transpose"], + ) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 5), + block.outputs[1].name: (1, 5, 5, 2), + }, + ) + + """ + Input graph: + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5) + | ^ + | | + |->transpose(axis=[0,2,3,1])------->relu-------- + | + V + out2(shape=1,5,5,2) + + + + Output graph: + input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5) + | ^ + | | + |---->relu------------ + | + |--->transpose(axis=[0,2,3,1])---->out2(shape=1,5,5,2) + """ + + def test_concat_pattern_3(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x3 = mb.concat(values=[x1, x2], axis=3) + x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return x4, x2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "relu", "relu", "concat", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "concat", "transpose"]) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 5), + block.outputs[1].name: (1, 5, 5, 2), + }, + ) + + """ + Input graph: + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5) + | ^ + | | + |->transpose(axis=[0,2,3,1])------->relu-------- + | + V + transpose(axis=[0,3,1,2]) -----> out2(shape=1,2,5,5) + + Output graph: + input(shape=1,2,5,5)---> relu---->concat(axis=1)----->out1(shape=1,4,5,5) + | ^ + | | + |------------------->relu-------->out2(shape=1,2,5,5) + """ + + def test_concat_pattern_4(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x3 = mb.concat(values=[x1, x2], axis=3) + x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + x5 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + return x4, x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "transpose", + "relu", + "relu", + "concat", + "transpose", + "transpose", + ], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "concat"]) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 5), + block.outputs[1].name: (1, 2, 5, 5), + }, + ) + + """ + Input graph: + constant(shape=[30,10,5]) + | + V + input(shape=10,20,30)--->transpose(axis=[2,0,1])--->concat(axis=2)----->transpose(axis=[1,2,0])----->out1(shape=10,25,30) + + Output graph: + constant(shape=[10,5,30]) + | + V + input(shape=10,20,30)--->concat(axis=1)----->out1(shape=10,25,30) + """ + + def test_concat_pattern_5(self): + const = np.random.rand(30, 10, 5) + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[2, 0, 1]) + c = mb.const(val=const) + x2 = mb.concat(values=[x1, c], axis=2) + x3 = mb.transpose(x=x2, perm=[1, 2, 0]) + return x3 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "concat", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["concat"]) + + assert_model_is_valid( + prog, + {"x": (10, 20, 30)}, + expected_output_shapes={block.outputs[0].name: (10, 25, 30)}, + ) + + """ + Input graph: + input2(shape=30,10,20)-----| + | + input(shape=10,20,30)--->transpose(axis=[2,0,1])----->relu-----|----->concat(axis=2)------>out1(shape=90,10,20) + | | + |-->relu-----| + | + |-->relu---->transpose(axis=[1,2,0])---->out2(shape=10,20,30) + | + |-->relu---->transpose(axis=[1,2,0])---->out3(shape=10,20,30) + | + |-->relu---->transpose(axis=[1,2,0])---->out4(shape=10,20,30) + + Output graph: + + input2(shape=30,10,20)-----| + | + input(shape=10,20,30)----->relu--->transpose(axis=[2,0,1])-----|----->concat(axis=2)------>out1(shape=90,10,20) + | | + |-->relu--->transpose(axis=[2,0,1])-----| + | + |-->relu---->out2(shape=10,20,30) + | + |-->relu---->out3(shape=10,20,30) + | + |-->relu---->out4(shape=10,20,30) + + Output graph: + """ + + def test_concat_pattern_6(self): + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(10, 20, 30)), + mb.TensorSpec(shape=(30, 10, 20)), + ] + ) + def prog(x, y): + x1 = mb.transpose(x=x, perm=[2, 0, 1]) + r1 = mb.relu(x=x1) + r2 = mb.relu(x=x1) + r3 = mb.relu(x=x1) + r4 = mb.relu(x=x1) + r5 = mb.relu(x=x1) + + x2 = mb.concat(values=[r1, r2, y], axis=0) + x3 = mb.transpose(x=r3, perm=[1, 2, 0]) + x4 = mb.transpose(x=r4, perm=[1, 2, 0]) + x5 = mb.transpose(x=r5, perm=[1, 2, 0]) + return x2, x3, x4, x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "relu", + "relu", + "relu", + "relu", + "relu", + "concat", + "transpose", + "transpose", + "transpose", + ], + ) + self.assertEqual( + get_op_types_in_program(prog), + [ + "relu", + "relu", + "relu", + "relu", + "relu", + "transpose", + "transpose", + "concat", + ], + ) + + assert_model_is_valid( + prog, + {"x": (10, 20, 30), "y": (30, 10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (90, 10, 20), + block.outputs[1].name: (10, 20, 30), + block.outputs[2].name: (10, 20, 30), + block.outputs[3].name: (10, 20, 30), + }, + ) + + """ + Input graph: + input(shape=1,4,5,6)--->transpose(axis=[0,3,2,1])--->relu---->split(axis=1, num_splits=2)----->transpose(axis=[0,3,2,1])----->out1(shape=1,4,5,3) + | + v + transpose(axis[0,3,2,1])-------------------------->out2(shape=1,4,5,3) + + Output graph: + input(shape=1,4,5,6)------> relu ---->split(axis=3)--->out1(shape=1,4,5,3) + | + v + out2(shape=1,4,5,3) + """ + + def test_split_nd_pattern_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 5, 6))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 3, 2, 1]) + x1 = mb.relu(x=x1) + x2, x3 = mb.split(x=x1, axis=1, num_splits=2) + x4 = mb.transpose(x=x2, perm=[0, 3, 2, 1]) + x5 = mb.transpose(x=x3, perm=[0, 3, 2, 1]) + return x4, x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "split", "transpose", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "split"]) + + assert_model_is_valid( + prog, + {"x": (1, 4, 5, 6)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 3), + block.outputs[1].name: (1, 4, 5, 3), + }, + ) + + self.assertEqual(block.find_ops(op_type="split")[0].axis.val, 3) + + """ + Input graph: + input(shape=1,4,5,6)--->transpose(axis=[0,3,2,1])--->relu---->splitd(axis=1, num_splits=6)----->transpose(axis=[0,3,2,1])----->out1(shape=1,4,5,3) + | + v + transpose(axis[0,3,2,1])-------------------------------------->out2(shape=1,4,5,3) + + Output graph: + input(shape=1,4,5,6)------>relu---->split(axis=3)--->out1(shape=1,4,5,3) + | + v + out2(shape=1,4,5,3) + """ + + def test_split_nd_pattern_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 5, 6))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 3, 2, 1]) + x1 = mb.relu(x=x1) + x2, x3, x4, x5, x6, x7 = mb.split(x=x1, axis=1, num_splits=6) + x2 = mb.transpose(x=x2, perm=[0, 3, 2, 1]) + x3 = mb.transpose(x=x3, perm=[0, 3, 2, 1]) + x4 = mb.transpose(x=x4, perm=[0, 3, 2, 1]) + x5 = mb.transpose(x=x5, perm=[0, 3, 2, 1]) + x6 = mb.transpose(x=x6, perm=[0, 3, 2, 1]) + x7 = mb.transpose(x=x7, perm=[0, 3, 2, 1]) + return x2, x3, x4, x5, x6, x7 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "relu", + "split", + "transpose", + "transpose", + "transpose", + "transpose", + "transpose", + "transpose", + ], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "split"]) + + assert_model_is_valid( + prog, + {"x": (1, 4, 5, 6)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 1), + block.outputs[1].name: (1, 4, 5, 1), + block.outputs[2].name: (1, 4, 5, 1), + block.outputs[3].name: (1, 4, 5, 1), + block.outputs[4].name: (1, 4, 5, 1), + block.outputs[5].name: (1, 4, 5, 1), + }, + ) + + self.assertEqual(block.find_ops(op_type="split")[0].axis.val, 3) + + """ + Input graph: + input(shape=1,4,5,6)--->transpose(axis=[0,3,2,1])---> split(axis=1, num_splits=2) ----> concat(axis=1) ----->transpose(axis=[0,3,2,1]) ----->out1(shape=1,4,5,6) + | ^ + v | + relu() ---------------------- + + Output graph: + input(shape=1,4,5,6)------>split(axis=3)--->concat(axis=3) -------> out1(shape=1,4,5,6) + | ^ + v | + relu() -------------- + """ + + def test_split_nd_pattern_2(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 5, 6))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 3, 2, 1]) + x2, x3 = mb.split(x=x1, axis=1, num_splits=2) + x4 = mb.relu(x=x2) + x5 = mb.concat(values=[x4, x3], axis=1) + x6 = mb.transpose(x=x5, perm=[0, 3, 2, 1]) + return x6 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "split", "relu", "concat", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["split", "relu", "concat"]) + + assert_model_is_valid( + prog, + {"x": (1, 4, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (1, 4, 5, 6)}, + ) + + self.assertEqual(block.find_ops(op_type="split")[0].axis.val, 3) + + """ + Input graph: + input(shape=1,5,5,3)----->transpose(axis=[0,3,1,2]) + | + ---->relu-------------->transpose(axis=[0,2,3,1]) + | | + | V + | relu + | | + | V + | transpose(axis=[0,3,1,2]) + | | + | V + ----------------> add --------> relu---->pool---->out(shape=1,3,5,5) + + + Output graph: + + + input(shape=1,5,5,3)---->relu------------------------> relu + | | + | V + ----------------> add + | + V + relu + | + V + transpose(axis=[0,3,1,2])-->pool---->out(shape=1,3,5,5) + + """ + + def test_skip_connection_pattern_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 5, 5, 3))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.relu(x=x) + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.relu(x=x1) + x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + x4 = mb.add(x=x, y=x3) + x5 = mb.relu(x=x4) + x6 = mb.avg_pool(x=x5, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + return x6 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "relu", + "transpose", + "relu", + "transpose", + "add", + "relu", + "avg_pool", + ], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "relu", "add", "relu", "transpose", "avg_pool"], + ) + assert_model_is_valid( + prog, + {"x": (1, 5, 5, 3)}, + expected_output_shapes={block.outputs[0].name: (1, 3, 5, 5)}, + ) + + """ + Input graph: + input(shape=1,5,5,3)----->transpose(axis=[0,3,1,2]) + | + ---->relu-------------->transpose(axis=[0,2,3,1]) + | | + | V + | relu + | | + | V + | transpose(axis=[0,3,1,2]) + | | + | V + ----------------> add -->transpose(axis=[0,2,3,1]) + | + V + relu---->pool---->out(shape=1,5,5,3) + + + Output graph: + + + input(shape=1,5,5,3)---->relu------------------------> relu + | | + | V + ----------------> add + | + V + relu + | + V + pool---->out(shape=1,5,5,3) + + """ + + def test_skip_connection_pattern_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 5, 5, 3))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.relu(x=x) + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.relu(x=x1) + x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + x4 = mb.add(x=x, y=x3) + x4 = mb.transpose(x=x4, perm=[0, 2, 3, 1]) + x5 = mb.relu(x=x4) + x6 = mb.avg_pool(x=x5, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + return x6 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "relu", + "transpose", + "relu", + "transpose", + "add", + "transpose", + "relu", + "avg_pool", + ], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "add", "relu", "avg_pool"]) + assert_model_is_valid( + prog, + {"x": (1, 5, 5, 3)}, + expected_output_shapes={block.outputs[0].name: (1, 5, 5, 3)}, + ) + + """ + Input graph: + input(shape=2,5)--->transpose(axis=[1,0])--->transpose(axis=[1,0])-->reduce(axis=1) + | | + | V + | transpose(axis=[1,0]) + | | + | V + -------------------------------------------->add------->out(shape=5,2) + + Output graph: + input(shape=2,5)--->reduce(axis=1)---->add---->transpose(axis=[1,0])--->out(shape=5,2) + | ^ + | | + ------------------------ + """ + + def test_residual_with_unmaterialized_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[1, 0]) + t1 = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.reduce_mean(x=t1, axes=[1], keep_dims=True) + t2 = mb.transpose(x=x2, perm=[1, 0]) + return mb.add(x=x1, y=t2) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "reduce_mean", "transpose", "add"], + ) + self.assertEqual(get_op_types_in_program(prog), ["reduce_mean", "add", "transpose"]) + + assert_model_is_valid( + prog, {"x": (2, 5)}, expected_output_shapes={block.outputs[0].name: (5, 2)} + ) + + """ + Input graph: + input(shape=2,5)--->transpose(axis=[1,0])--->transpose(axis=[1,0])-->reduce(axis=1) + | | + | V + | transpose(axis=[1,0]) + | | + | V + -------------------------------------------->add------->out1(shape=5,2) + | + V + relu------->out2(shape=5,2) + + Output graph: + input(shape=2,5)--->reduce(axis=1)----> add ----->transpose(axis=[1,0])----->out1(shape=5,2) + | | + | V + ---------------------> relu----->transpose(axis=[1,0])----->out2(shape=5,2) + """ + + def test_residual_with_unmaterialized_multiple_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[1, 0]) + t1 = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.reduce_mean(x=t1, axes=[1], keep_dims=True) + t2 = mb.transpose(x=x2, perm=[1, 0]) + out1 = mb.add(x=x1, y=t2) + out2 = mb.relu(x=out1) + return out1, out2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "reduce_mean", "transpose", "add", "relu"], + ) + self.assertEqual( + get_op_types_in_program(prog), ["reduce_mean", "add", "relu", "transpose", "transpose"] + ) + + assert_model_is_valid( + prog, + {"x": (2, 5)}, + expected_output_shapes={block.outputs[0].name: (5, 2), block.outputs[1].name: (5, 2)}, + ) + + """ + Input graph: + input(shape=2,5)---->transpose(axis=[1,0])------>relu----->transpose(axis=[1,0])------>out2(shape=2,5) + | + ------->out1(shape=5,2) + + Output graph: + input(shape=2,5)---->relu-----> out2(shape=2,5) + | + V + transpose(axis=[1,0]) -----> out1(shape=5,2) + """ + + def test_materialized_output_reuse(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[1, 0]) + y1 = mb.relu(x=x1) + y2 = mb.transpose(x=y1, perm=[1, 0]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "relu", + "transpose", + ], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "transpose"]) + + assert_model_is_valid( + prog, + {"x": (2, 5)}, + expected_output_shapes={block.outputs[0].name: (5, 2), block.outputs[1].name: (2, 5)}, + ) + + """ + Input graph: + input(shape=1,2,5,5)----->transpose(axis=[0,2,3,1])------->add------------>transpose(axis=[0,3,1,2])--->out1(shape=1,2,5,5) + | ^ | + | | | + ---->relu ----->transpose(axis=[0,3,1,2])--->out2(shape=1,2,5,5) + + Output graph: + input(shape=1,2,5,5)----->add------->out1(shape=1,2,5,5) + | ^ | + | | | + |------>relu ------identity(renaming)---->out2(shape=1,2,5,5) + """ + + def test_fusion_with_double_outputs(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.relu(x=x1) + x3 = mb.add(x=x1, y=x2) + y1 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + y2 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "add", "transpose", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "add", "identity"]) + + assert block.find_ops(op_type="relu")[0].inputs["x"] == block.inputs["x"] + assert block.find_ops(op_type="add")[0].inputs["x"] == block.inputs["x"] + assert ( + block.find_ops(op_type="add")[0].inputs["y"] + == block.find_ops(op_type="relu")[0].outputs[0] + ) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)}, + ) + + def test_pass_through_broadcasted_binary_op(self): + """ + Input graph: + const (shape=(1,1,1,3)) + | + input (shape=(1,4,3,2)) --> transpose (shape=(1,2,4,3)) --> add --> transpose --> relu + + Output graph: + + const (shape=(1,1,3,1)) + | + input (shape=(1,4,3,2)) --> add --> relu + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 3, 2))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.add(x=x, y=np.array(np.ones(shape=(1, 1, 1, 3)))) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "add", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["add", "relu"]) + assert_model_is_valid( + prog, + {"x": (1, 4, 3, 2)}, + expected_output_shapes={block.outputs[0].name: (1, 4, 3, 2)}, + ) + + def test_binary_op_with_constant_input(self): + """ + Input graph: + const (shape=(4,3)) + | + input (shape=(1,4,3,2)) --> transpose (shape=(1,2,4,3)) --> add --> transpose --> relu + + Output graph: + + const (shape=(1,4,3,1)) + | + input (shape=(1,4,3,2)) --> add --> relu + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 3, 2))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.add(x=x, y=np.array(np.ones(shape=(4, 3)))) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "add", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["add", "relu"]) + assert_model_is_valid( + prog, + {"x": (1, 4, 3, 2)}, + expected_output_shapes={block.outputs[0].name: (1, 4, 3, 2)}, + ) + + def test_binary_op_with_non_constant_input1(self): + """ + Input graph: + input (shape=(3,)) + | + input (shape=(1,4,3,2)) --> transpose (shape=(1,2,4,3)) --> add --> transpose --> relu + + Output graph: + + input (shape=(3,)) + | + reshape (shape=(1,1,3,1)) + | + input (shape=(1,4,3,2)) --> add --> relu + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 3, 2)), mb.TensorSpec(shape=(3,))]) + def prog(x, y): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.add(x=x, y=y) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "add", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["reshape", "add", "relu"]) + reshape_op = prog.find_ops(op_type="reshape", exactly_one=True)[0] + assert reshape_op.outputs[0].shape == (1, 1, 3, 1) + assert_model_is_valid( + prog, + {"x": (1, 4, 3, 2), "y": (3,)}, + expected_output_shapes={block.outputs[0].name: (1, 4, 3, 2)}, + ) + + def test_binary_op_with_non_constant_input2(self): + """ + Input graph: + input (shape=(3,1,2)) + | + input (shape=(5,3,4,2)) --> transpose (shape=(4,3,5,2)) --> add --> transpose --> relu + + Output graph: + + input (shape=(3,1,2)) + | + reshape (shape=(1,3,1,2)) + | + input (shape=(5,3,4,2)) --> add --> relu + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(5, 3, 4, 2)), mb.TensorSpec(shape=(3, 1, 2))]) + def prog(x, y): + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.add(x=x, y=y) + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "add", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["reshape", "add", "relu"]) + reshape_op = prog.find_ops(op_type="reshape", exactly_one=True)[0] + assert reshape_op.outputs[0].shape == (1, 3, 1, 2) + assert_model_is_valid( + prog, + {"x": (5, 3, 4, 2), "y": (3, 1, 2)}, + expected_output_shapes={block.outputs[0].name: (5, 3, 4, 2)}, + ) + + def test_binary_op_with_non_constant_input3(self): + """ + Input graph: + input (shape=(3,1,2)) + | + input (shape=(s,3,4,2)) --> transpose (shape=(4,3,s,2)) --> add --> transpose --> relu + + Output graph: + + input (shape=(3,1,2)) + | + reshape (shape=(1,3,1,2)) + | + input (shape=(s,3,4,2)) --> add --> relu + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(get_new_symbol(), 3, 4, 2)), + mb.TensorSpec(shape=(3, 1, 2)), + ] + ) + def prog(x, y): + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.add(x=x, y=y) + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.relu(x=x) + return x + + pass_name = "common::reduce_transposes" + PASS_REGISTRY[pass_name](prog) + self.assertEqual(get_op_types_in_program(prog), ["reshape", "add", "relu"]) + reshape_op = prog.find_ops(op_type="reshape", exactly_one=True)[0] + assert reshape_op.outputs[0].shape == (1, 3, 1, 2) + block = prog.functions["main"] + assert_model_is_valid( + prog, + {"x": (5, 3, 4, 2), "y": (3, 1, 2)}, + expected_output_shapes={block.outputs[0].name: (5, 3, 4, 2)}, + ) + + def test_binary_op_with_non_constant_input4(self): + """ + Input graph: + input (shape=(3,s,2)) + | + input (shape=(1,3,4,2)) --> transpose (shape=(4,3,1,2)) --> add --> transpose --> relu + + Output graph: same as input graph since the non-transpose input of the add op has symbolic shape + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 3, 4, 2)), + mb.TensorSpec(shape=(3, get_new_symbol(), 2)), + ] + ) + def prog(x, y): + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.add(x=x, y=y) + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.relu(x=x) + return x + + pass_name = "common::reduce_transposes" + PASS_REGISTRY[pass_name](prog) + self.assertEqual(get_op_types_in_program(prog), ["transpose", "add", "transpose", "relu"]) + block = prog.functions["main"] + assert_model_is_valid( + prog, + {"x": (1, 3, 4, 2), "y": (3, 10, 2)}, + expected_output_shapes={block.outputs[0].name: (10, 3, 4, 2)}, + ) + + def test_binary_op_with_non_constant_input5(self): + """ + Input graph: + input (shape=(3,4)) + | + input (shape=(5,3,4,2)) --> transpose (shape=(5,2,3,4)) --> add --> transpose --> relu + + Output graph: same as input graph since transpose compliment for 2nd input of add cannot be represented + as a static reshape + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(5, 3, 4, 2)), mb.TensorSpec(shape=(3, 4))]) + def prog(x, y): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.add(x=x, y=y) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "add", "transpose", "relu"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["transpose", "add", "transpose", "relu"], + ) + assert_model_is_valid( + prog, + {"x": (5, 3, 4, 2), "y": (3, 4)}, + expected_output_shapes={block.outputs[0].name: (5, 3, 4, 2)}, + ) + + def test_input_duplicate_output(self): + """ + Input graph: + input -----> out (consist of duplicated input) + + Output graph: + input -----> out (consist of duplicated input) + + Notice that a temp identity sink is added for all outputs, so the block before going through the pass is: + function[CoreML3](%x: (2, 2, 1, 1, fp32)(Tensor)) { + block0() { + %identity_0: (2, 2, 1, 1, fp32)(Tensor) = identity(x=%x, name="identity_0") + } -> (%identity_0, %identity_0) + } + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 2, 1, 1))]) + def prog(x): + return x, x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + + self.assertEqual(get_op_types_in_program(prev_prog), []) + self.assertEqual(get_op_types_in_program(prog), []) + assert_model_is_valid( + prog, + {"x": (2, 2, 1, 1)}, + backend=("mlprogram", "fp16"), + expected_output_shapes={ + block.outputs[0].name: (2, 2, 1, 1), + block.outputs[1].name: (2, 2, 1, 1), + }, + ) + + +class TestTransposePassUtilityMethods: + @staticmethod + @pytest.mark.parametrize("rank", [1, 2, 3, 4, 5]) + def test_transpose_compliment_method(rank): + x = np.random.rand(*np.random.randint(low=1, high=15, size=rank)) + perm = np.random.permutation(rank) + reverse_perm = TransformAxisUpdateOps._find_transpose_compliment(perm) + x_transpose = np.transpose(x, perm) + x_transpose_transpose = np.transpose(x_transpose, reverse_perm) + np.testing.assert_equal(x, x_transpose_transpose) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/program.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/program.py new file mode 100644 index 00000000..39e67cf3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/program.py @@ -0,0 +1,274 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np +import sympy as _sm + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as _target +from coremltools.converters.mil.input_types import InputType +from coremltools.converters.mil.mil.var import ListVar +from coremltools.converters.mil.mil.ops.helper import _get_version_of_op + +from . import types +from .block import Function +from .types.symbolic import k_num_internal_syms, k_used_symbols +from .var import Var + + +class Program: + def __init__(self): + self.main_input_types = [] + self.main_output_types = None + self.functions = {} + self.parameters = {} + self.skip_all_passes = False + + def _get_max_opset_version_and_op(self): + max_opset_version = _target.iOS13 + op_with_max_opset_version = None + def update_max_opset_version_block(block): + nonlocal max_opset_version + nonlocal op_with_max_opset_version + for op in list(block.operations): + for b in op.blocks: + update_max_opset_version_block(b) + if not hasattr(op, "_op_variants") or not isinstance(op._op_variants, dict): + continue + if op.opset_version > max_opset_version: + max_opset_version = op.opset_version + op_with_max_opset_version = op + for func in self.functions.values(): + update_max_opset_version_block(func) + return max_opset_version, op_with_max_opset_version + + def _check_ops_version_compatibility(self, max_opset_version): + def check_version_compatibility_block(block): + for op in list(block.operations): + for b in op.blocks: + check_version_compatibility_block(b) + if not hasattr(op, "_op_variants") or not isinstance(op._op_variants, dict): + continue + expected_op_cls = _get_version_of_op(op._op_variants, max_opset_version) + if type(op) is not expected_op_cls: + msg = ( + "Op {} with an out of date version {!s} is detected. Please use @mb.program(input_specs=..., " + "opset_version={!s})" + ).format(op.op_type, op.opset_version, max_opset_version) + raise ValueError(msg) + for func in self.functions.values(): + check_version_compatibility_block(func) + + def _check_or_set_functions_opset_version(self, max_opset_version): + funcs = list(self.functions.values()) + for func in funcs: + if func.opset_version is None: + func.opset_version = max_opset_version + else: + if func.opset_version < max_opset_version: + msg = "function should have at least opset_version {!s}. Got {!s}".format(max_opset_version, func.opset_version) + raise ValueError(msg) + for func in funcs: + if func.opset_version != funcs[0].opset_version: + msg = "all functions must have the same opset_version. Got {!s} and {!s}.".format(func.opset_version, funcs[0].opset_version) + raise ValueError(msg) + + def _check_program_opset_version(self): + max_opset_version, _ = self._get_max_opset_version_and_op() + self._check_ops_version_compatibility(max_opset_version) + self._check_or_set_functions_opset_version(max_opset_version) + + def _check_invalid_tensor_rank(self): + ''' + Early error out for tensor with rank >= 6 + ''' + def _check_invalid_tensor_rank_block(block): + for op in block.operations: + for b in op.blocks: + _check_invalid_tensor_rank_block(b) + for o in op.outputs: + if not isinstance(o, ListVar) and (o.rank < 0 or o.rank >= 6): + raise ValueError( + f'Core ML only supports tensors with rank <= 5. Layer "{op.name}", ' + f'with type "{op.op_type}", outputs a rank {o.rank} tensor. ' + ) + for f in self.functions.values(): + _check_invalid_tensor_rank_block(f) + + def add_function(self, name, ssa_func): + if not isinstance(ssa_func, Function): + raise ValueError("Only Function can be added to Program.") + self.functions[name] = ssa_func + self._check_program_opset_version() + + def add_parameters(self, name, ssa_val): + raise NotImplementedError() + + def set_main_input_types(self, inputs): + if not isinstance(inputs, tuple): + raise ValueError("main inputs should be tuple of TensorType or ImageType") + elif not all([isinstance(inp, InputType) for inp in inputs]): + raise ValueError("main inputs should be tuple of InputSpec") + self.main_input_types = inputs + + def set_main_output_types(self, outputs=None): + if outputs is not None: + if not (isinstance(outputs, list) and all([isinstance(out, InputType) for out in outputs])): + raise TypeError("main outputs should be a list of type ct.TensorType or ct.ImageType") + self.main_output_types = outputs + + + def find_ops(self, prefix=None, op_type=None, exactly_one=False): + """ + Return list of ops with name matching `prefix` if specified, and + op_type, if specified. At least one of {prefix, op_type} must be + specified. + + If `exactly_one` == True, raise ValueError if we find <1 or >1 ops satisfying + the criteria. + + prefix: str + + Return list[Operation]. Empty list if no op satisfies. + """ + found_ops = [] + for f_name, f in self.functions.items(): + found_ops.extend(f.find_ops(prefix=prefix, op_type=op_type)) + if exactly_one and len(found_ops) != 1: + msg = "Found matching ops not exactly one. Found ops: {}" + raise ValueError(msg.format(found_ops)) + return found_ops + + def validate(self): + for f in self.functions.values(): + f.validate() + + def __getitem__(self, func_name): + if func_name not in self.functions: + msg = "Function {} not found in among functions {}." + raise KeyError(msg.format(func_name, self.functions.keys())) + return self.functions[func_name] + + def __repr__(self): + return self.__str__() + + def __str__(self): + s = "" + for f_name, f in self.functions.items(): + s += f.to_str(f_name) + return s + + +class Placeholder: + counter = 0 + + def __init__(self, sym_shape, dtype=None, name=None, allow_rank0_input=False): + """ + sym_shape: () or [] for scalar. list, tuple, np.ndarray for tensor. May + contain Symbol as symbolic shape (but not string). + + dtype: types.float or other scalar builtin types. + allow_rank0_input: A flag that allows the rank 0 placeholder. + """ + if not isinstance(sym_shape, (list, tuple, _np.ndarray)): + raise ValueError("Illegal shape for Placeholder: {}".format(sym_shape)) + + if len(sym_shape) == 0: + if not allow_rank0_input: + raise ValueError('Rank-0 (input {}) is unsupported'.format(name)) + else: + logger.warning('Rank-0 (input {}) is unsupported in coreml. You might run into error while\ + running this model'.format(name)) + + for i, d in enumerate(sym_shape): + if not isinstance(d, (_np.generic, int, Symbol)): + msg = 'Placeholder dim {} in {} is not integer or symbol' + raise ValueError(msg.format(i, sym_shape)) + self.sym_shape = sym_shape + self.dtype = dtype + if self.dtype is None: + self.dtype = types.float + sym_type = self.type_inference() + + # Globally unique var name for placeholders + if name is None: + name = 'placeholder_' + str(self.__class__.counter) + self.__class__.counter += 1 + + # List of output vars (consistent w/ other ops) + self.outputs = [Var(name, sym_type)] + + def set_name(self, name): + self.name = name + self.outputs[0].name = name + + def type_inference(self): + if len(self.sym_shape) == 0: + return self.dtype + return types.tensor(self.dtype, self.sym_shape) + + def __str__(self): + return str(self.outputs[0]) + + +def get_new_variadic_symbol(): + global k_num_internal_syms + s = Symbol("*is" + str(k_num_internal_syms)) + k_num_internal_syms += 1 + return s + + +def get_new_symbol(name=None): + """ + Returns a new symbol, optionally named. + + name: str (optional) + Optional name that provides more readability. If the name specified is + not available, an extra integer will be appended. + """ + global k_used_symbols + global k_num_internal_syms + + if name is not None: + s = Symbol(name) + if s in k_used_symbols: + new_name = name + k_num_internal_syms + msg = 'Symbol name "{}" already occupied. Renaming to {}' + logger.warning(msg.format(name, new_name)) + s = Symbol(new_name) + else: + s = Symbol("is" + str(k_num_internal_syms)) + k_num_internal_syms += 1 + return s + +def get_existing_symbol(name): + global k_used_symbols + if name not in k_used_symbols: + msg = 'Symbol name {} does not exist' + raise ValueError(msg.format(name)) + return k_used_symbols[name] + + +class Symbol(_sm.Symbol): + def __init__(self, sym_name): + """ + Essentially sympy.Symbol representing an i32 value in shape. + + sym_name: str. If first character is *, then this symbol represents + variadic rank. Otherwise the symbol name should start with a alpha + character. `sym_name` must be unique if specified, or it'd be auto + generated (to a non-variadic symbol). Furthermore, sym_name may not + start with 'is' (internal symbol) + """ + if not (sym_name[0].isalpha() or sym_name[0] == "*"): + msg = "Symbol name must start with a letter or *. Got {}" + raise ValueError(msg.format(sym_name)) + global k_used_symbols + if sym_name in k_used_symbols: + msg = "Symbol `{}` is used already." + raise ValueError(msg.format(sym_name)) + k_used_symbols[sym_name] = self + self.name = sym_name diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/__init__.py new file mode 100644 index 00000000..af4a4e02 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_block.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_block.py new file mode 100644 index 00000000..a4ccfe27 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_block.py @@ -0,0 +1,495 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.tests.test_passes import TestSkipConstexprOps +from coremltools.converters.mil.testing_utils import ( + assert_same_output_names, + assert_same_output_shapes, + get_op_types_in_program, +) + +""" +Test manipulating variable and operations in the Block. + +In the test, we are actually testing Function, which is a child class of +Block. Technically Function should not inherit from Block, which is a +debt to be resolved in the future. + +Function has some different behaviors from Block that are irrelevant to +the core API being tested here. +""" + + +def test_empty_block(): + """ + Test an empty program + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + return x0 + + block = prog.functions["main"] + assert len(block.operations) == 0 + assert len(block.inputs) == 1 + assert len(block.outputs) == 1 + assert block.inputs["x0"] == block.outputs[0] + + +def test_add_op(): + """ + Test add statement to an empty program, also change the output + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + return x0 + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + x0 = block.inputs["x0"] + with block: + x1 = mb.log(x=x0) + block.set_outputs([x1]) + print("after:\n{}".format(prog)) + assert block.inputs["x0"] == block.find_ops(op_type="log")[0].inputs["x"] + assert len(block.operations) == 2 # const op for epsilon + log + assert block.operations[1].op_type == "log" + assert block.outputs[0] == x1 + + +def test_remove_op(): + """ + Test remove all ops and return empty program + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.log(x=x0) + return x1 + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + assert len(block.operations) == 2 + x0 = block.inputs["x0"] + ops = block.find_ops(op_type="log") + block.set_outputs([x0]) + block.remove_ops(ops) + print("after:\n{}".format(prog)) + assert len(block.operations) == 1 + assert len(block.inputs) == 1 + assert len(block.outputs) == 1 + assert block.inputs["x0"] == block.outputs[0] + + +def test_remove_op2(): + """ + Test remove ops with multiple identical inputs + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.add(x=x0, y=x0) + return x1 + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + x0 = block.inputs["x0"] + ops = block.find_ops(op_type="add") + block.set_outputs([x0]) + block.remove_ops(ops) + print("after:\n{}".format(prog)) + assert len(block.operations) == 0 + assert len(block.inputs) == 1 + assert len(block.outputs) == 1 + assert block.inputs["x0"] == block.outputs[0] + + +def test_remove_duplicate_ops(): + """Test remove duplicated ops.""" + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.add(x=x0, y=x0) + return x1 + + block = prog.functions["main"] + x0 = block.inputs["x0"] + ops = block.find_ops(op_type="add") + duplicate_ops = ops + ops + block.set_outputs([x0]) + block.remove_ops(duplicate_ops) + assert len(block.operations) == 0 + assert len(block.inputs) == 1 + assert len(block.outputs) == 1 + assert block.inputs["x0"] == block.outputs[0] + + +def test_remove_duplicate_ops_not_affect_others(): + """ + Test remove duplicated ops doesn't affect other ops. We add another `add` op here, but keep + the input to remove_ops only restricted to the first `add` op. This test is for checking that + the second add op doesn't get removed. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.add(x=x0, y=x0) + x2 = mb.add(x=x0, y=x0) + return x1, x2 + + block = prog.functions["main"] + x0 = block.inputs["x0"] + ops = [block.find_ops(op_type="add")[0]] + block.set_outputs([x0]) + block.remove_ops(ops) + # Deleting one add operation should not affect the other one. + assert len(block.operations) == 1 + assert len(block.inputs) == 1 + assert len(block.outputs) == 1 + + +def test_remove_ops_fail_for_block_output(): + """Block's output cannot be removed.""" + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.add(x=x0, y=x0) + x2 = mb.add(x=x0, y=x0) + return x1, x2 + + block = prog.functions["main"] + ops = block.find_ops(op_type="add") + expected_err_str = "cannot delete op add_.* with output 0: add_.* that's block block.*'s output" + with pytest.raises(ValueError, match=expected_err_str): + block.remove_ops(ops) + assert len(block.operations) == 2 + assert len(block.inputs) == 1 + assert len(block.outputs) == 2 + + +def test_op_removal_and_insertion(): + """ + Remove a transpose pair and materialize one transpose before another op + Given: + %x1 = transpose(%x) + %x2 = relu(%x1) + %out1 = avg_pool(%x2) + %x3 = transpose(%x2) + %out2 = log(%x3) + + After removing both transposes: + %x2 = relu(%x) + %out1 = avg_pool(%x2) + %out2 = log(%x2) + + After inserting a transpose: + %x2 = relu(%x) + %x4 = transpose(%x2) + %out1 = avg_pool(%x4) + %out2 = log(%x2) + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 6))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.relu(x=x1) + out1 = mb.avg_pool(x=x2, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + out2 = mb.log(x=x3) + return out1, out2 + + prev_prog = copy.deepcopy(prog) + + print("before:\n{}".format(prog)) + assert get_op_types_in_program(prog) == [ + "transpose", + "relu", + "avg_pool", + "transpose", + "log", + ] + block = prog.functions["main"] + + def remove_transpose(block): + op = block.find_ops(op_type="transpose")[0] + block.replace_uses_of_var_after_op( + anchor_op=op.inputs["x"].op, + old_var=op.outputs[0], + new_var=op.inputs["x"], + no_check_var_types=True, + ) + block.remove_ops([op]) + + with block: + # remove 1st transpose + remove_transpose(block) + assert get_op_types_in_program(prog) == ["relu", "avg_pool", "transpose", "log"] + + # remove 2nd transpose + remove_transpose(block) + assert get_op_types_in_program(prog) == ["relu", "avg_pool", "log"] + + print("after transpose ops removal:\n{}".format(prog)) + + # insert transpose before pool + pool_op = block.find_ops(op_type="avg_pool")[0] + with block: + y = mb.transpose(x=pool_op.inputs["x"], perm=[0, 2, 3, 1], before_op=pool_op) + + block.replace_uses_of_var_after_op( + anchor_op=y.op, + end_op=pool_op, + old_var=pool_op.inputs["x"], + new_var=y, + no_check_var_types=True, + ) + + print("after transpose insertion:\n{}".format(prog)) + assert get_op_types_in_program(prog) == ["relu", "transpose", "avg_pool", "log"] + + for op in block.operations: + op.type_value_inference(overwrite_output=True) + + assert_same_output_names(prev_prog, prog) + assert_same_output_shapes(prev_prog, prog) + + +def test_replace_nonreplaceable_vars(): + """ + The conversion should error out if an invalid replacement is invoked with nonreplaceable vars + """ + constexpr_op = "constexpr_sparse_to_dense" + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 2))]) + def prog(x): + constexpr = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((4, 2)) + return mb.add(x=x, y=constexpr) + + block = prog.functions["main"] + constexpr_op = block.find_ops(op_type=constexpr_op)[0] + + with block: + const = mb.const(val=np.random.rand(4, 2), before_op=constexpr_op) + expected_err_str = "might potentially be removed during the replacement of those vars." + with pytest.raises(ValueError, match=expected_err_str): + block.replace_uses_of_var_after_op( + anchor_op=constexpr_op, + old_var=constexpr_op.outputs[0], + new_var=const + ) + + +def test_replace_nonreplaceable_vars_force(): + """ + The conversion should not error out if the replace_uses_of_vars_after_op is executed with + force_replace=True Also we test that, the new nonreplaceable_vars_upstream is propagated + after the code exist `with block`. + """ + constexpr_op = "constexpr_sparse_to_dense" + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 2))]) + def prog(x): + constexpr = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((4, 2)) + return mb.add(x=x, y=constexpr) + + block = prog.functions["main"] + constexpr_op = block.find_ops(op_type=constexpr_op)[0] + add_op = block.find_ops(op_type="add")[0] + + assert len(add_op.outputs[0].nonreplaceable_vars_upstream) == 1 + + with block: + const = mb.const(val=np.random.rand(4, 2), before_op=constexpr_op) + block.replace_uses_of_var_after_op( + anchor_op=constexpr_op, + old_var=constexpr_op.outputs[0], + new_var=const, + force_replace=True, + ) + block.remove_ops([constexpr_op]) + + assert len(add_op.outputs[0].nonreplaceable_vars_upstream) == 0 + + +def test_simple_substituion(): + """ + Replace log(x+y) with log(x*y) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4)), mb.TensorSpec(shape=(2, 4))]) + def prog(x0, y0): + x1 = mb.add(x=x0, y=y0) + z = mb.log(x=x1) + return z + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + assert len(block.find_ops(op_type="log")) == 1 + assert len(block.find_ops(op_type="add")) == 1 + assert len(block.find_ops(op_type="mul")) == 0 + + add = block.find_ops(op_type="add")[0] + + x0 = add.inputs["x"] + y0 = add.inputs["y"] + x1 = add.outputs[0] + + with block: + # It's important to add 'mul' before 'add' (its even better to do it + # immediately after 'add' but we don't have the API) + # because we need to replace any op affected by add with 'mul' + x2 = mb.mul(x=x0, y=y0, before_op=add) + + assert len(block.find_ops(op_type="mul")) == 1 + assert len(block.find_ops(op_type="add")) == 1 + assert len(block.find_ops(op_type="log")) == 1 + + # It's important to set anchor_op = 'mul' because new_var is only visible + # after 'mul'. + block.replace_uses_of_var_after_op(anchor_op=x2.op, old_var=x1, new_var=x2) + block.remove_ops([add]) + + print("after:\n{}".format(prog)) + assert len(block.find_ops(op_type="add")) == 0 + assert len(block.find_ops(op_type="mul")) == 1 + assert len(block.find_ops(op_type="log")) == 1 + + +def test_substitute_nested_op(): + """" + Replace an conditional op with nested block + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4)), mb.TensorSpec(shape=(2, 4))]) + def prog(x0, y0): + pred = mb.less(x=x0, y=y0) + z = mb.cond( + pred=pred, _true_fn=lambda: mb.abs(x=x0), _false_fn=lambda: mb.abs(x=y0) + ) + z1 = mb.log(x=z) + return z1 + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + assert len(block.find_ops(op_type="less")) == 1 + assert len(block.find_ops(op_type="abs")) == 2 + assert len(block.find_ops(op_type="cond")) == 1 + assert len(block.find_ops(op_type="log")) == 1 + + cond = block.find_ops(op_type="cond")[0] + x0 = block.inputs["x0"] + z = cond.outputs[0] + block.replace_uses_of_var_after_op(anchor_op=None, old_var=z, new_var=x0) + + # removing cond will also remove the abs ops within its block + block.remove_ops([cond]) + + print("after:\n{}".format(prog)) + assert len(block.find_ops(op_type="less")) == 1 + assert len(block.find_ops(op_type="log")) == 1 + assert len(block.find_ops(op_type="cond")) == 0 + assert len(block.find_ops(op_type="abs")) == 0 + + +def test_simple_transpose_squash(): + """ + Test eliminate consecutive transpose can be canceled + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.transpose(x=x0, perm=[1, 0]) + x2 = mb.transpose(x=x1, perm=[1, 0]) + x3 = mb.log(x=x2) + x4 = mb.transpose(x=x3, perm=[1, 0]) + x5 = mb.transpose(x=x4, perm=[1, 0]) + x6 = mb.transpose(x=x5, perm=[1, 0]) + x7 = mb.transpose(x=x6, perm=[1, 0]) + return x7 + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + assert len(block.find_ops(op_type="transpose")) == 6 + + def can_squash(trans1, trans2): + return ( + len(trans1.outputs) == 1 + and len(trans2.outputs) == 1 + and all(trans1.perm.val == trans2.perm.val) + ) + + # Find all candidate pairs transposes + # we ignore all const (transpose_perm_%x), and add pairs of transpose op as + # candidate. This won't generalize to more complicated program with other + # shape invariant ops in between. + candidates = [] + non_const_ops = [op for op in block.operations if op.op_type != "const"] + for i in range(len(non_const_ops) - 1): + op = non_const_ops[i] + if len(candidates) and op == candidates[-1][1]: + # op is already a squash candidate + continue + next_op = non_const_ops[i + 1] + if ( + op.op_type == "transpose" + and next_op.op_type == "transpose" + and can_squash(op, next_op) + ): + candidates.append((op, next_op)) + + # Remove each candidate pairs + for (trans1, trans2) in candidates: + before = trans1.inputs["x"] + after = trans2.outputs[0] + block.replace_uses_of_var_after_op( + anchor_op=trans2, old_var=after, new_var=before + ) + block.remove_ops([trans1, trans2]) + + print("after:\n{}".format(prog)) + assert len(block.find_ops(op_type="transpose")) == 0 + + +def test_duplicate_outputs_add_consuming_block_once(): + """The same consuming block should only be added once.""" + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4)), mb.TensorSpec(shape=(2, 4))]) + def prog(x0, y0): + x1 = mb.add(x=x0, y=y0) + return x1, x1, x1 + + block = prog.functions["main"] + assert len(block.outputs[0].consuming_blocks) == 1 + assert len(block.outputs[1].consuming_blocks) == 1 + assert len(block.outputs[2].consuming_blocks) == 1 + + +def test_duplicate_outputs_substituion(): + """Replaces var that appears more than once in outputs.""" + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4)), mb.TensorSpec(shape=(2, 4))]) + def prog(x0, y0): + x1 = mb.add(x=x0, y=y0) + z = mb.log(x=x1) + return x1, x1, z + + block = prog.functions["main"] + add = block.find_ops(op_type="add")[0] + x0 = add.inputs["x"] + y0 = add.inputs["y"] + x1 = add.outputs[0] + + with block: + x2 = mb.mul(x=x0, y=y0, before_op=add, name="new_output") + + block.replace_uses_of_var_after_op(anchor_op=x2.op, old_var=x1, new_var=x2) + block.remove_ops([add]) + assert block.outputs[0].op.name == "new_output" + assert block.outputs[1].op.name == "new_output" + assert len(block.outputs[0].consuming_blocks) == 1 diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_debug.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_debug.py new file mode 100644 index 00000000..b6601ccf --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_debug.py @@ -0,0 +1,302 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import os +import tempfile + +import pytest +import numpy as np + +import coremltools as ct +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.debugging_utils import extract_submodel +from coremltools.converters.mil.mil import get_new_symbol +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.testing_utils import get_op_types_in_program + +def get_simple_program(): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4)),]) + def prog(x): + x = mb.add(x=x, y=1.2, name="add") + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.square(x=x, name="output_0") + x = mb.tanh(x=x, name="output_1") + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + return x + + return prog + +def compute_ground_truth_answer(input): + x = input + 1.2 + x = np.transpose(x, axes=[0, 2, 3, 1]) + square = x * x + tanh = np.tanh(square) + return {"output_0": square, "output_1":tanh} + +class TestExtractSubModel: + + def test_extract_submodel_error_handling(self): + prog = get_simple_program() + mlmodel = ct.convert(prog, convert_to="neuralnetwork") + + invalid_outputs = set() + with pytest.raises(ValueError, match="outputs must be of type list/tuple. Got "): + extract_submodel(mlmodel, outputs=invalid_outputs) + + invalid_outputs = ["output_1", 1] + with pytest.raises(ValueError, match="outputs must be a list of str. Got element 1 with type ."): + extract_submodel(mlmodel, outputs=invalid_outputs) + + invalid_outputs = ["output_1", "output_1"] + with pytest.raises(ValueError, match="outputs must be a list of unique elements. 'output_1' occurs 2 times"): + extract_submodel(mlmodel, outputs=invalid_outputs) + + invalid_outputs = ["error"] + with pytest.raises(ValueError, match="outputs \['error'\] not found in the function."): + extract_submodel(mlmodel, outputs=invalid_outputs) + + model_dir = tempfile.TemporaryDirectory() + mlmodel_path = os.path.join(model_dir.name, "model.mlmodel") + mlmodel.save(mlmodel_path) + mlmodel = ct.models.MLModel(mlmodel_path) + with pytest.raises(ValueError, match="NeuralNetwork model loaded from the disk is not supported by the extract_submodel util"): + extract_submodel(mlmodel, outputs=["output_0", "output_1"]) + + def test_extract_submodel_symbolic_input(self): + """ + Input graph: + x -> sin ---> sub -> output_1 + | + v + mul -> tan -> output_2 + + If x has symbolic shape, then the subgraph mil -> tan should also have symbolic shape + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, get_new_symbol()))]) + def prog(x): + sin = mb.sin(x=x, name="sin") + sub = mb.sub(x=sin, y=1.5, name="sub") + mul = mb.mul(x=sin, y=1.2, name="mul") + tan = mb.tan(x=mul, name="tan") + return sub, tan + model = ct.convert(prog, convert_to="neuralnetwork") + submodel = extract_submodel(model, outputs=["tan"], inputs=["mul"]) + func = submodel._mil_program.functions["main"] + + input = list(func.inputs.values())[0] + assert input.shape[0] == 1 + assert is_symbolic(input.shape[1]) + + output = func.outputs[0] + assert output.shape[0] == 1 + assert is_symbolic(output.shape[1]) + + def test_extract_submodel_complex(self): + """ + Input graph: + x -> sin ------> sub -> output_1 + | | + v v + y -> add -> mul -> tan -> realdiv -> output_2 + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2))]) + def prog(x, y): + sin = mb.sin(x=x, name="sin") + add = mb.add(x=sin, y=y, name="add") + sub = mb.sub(x=sin, y=1.5, name="sub") + mul = mb.mul(x=sin, y=add, name="mul") + tan = mb.tan(x=mul, name="tan") + realdiv = mb.real_div(x=tan, y=4.7, name="realdiv") + return sub, realdiv + model = ct.convert(prog, convert_to="neuralnetwork") + + """ + Case 1: + inputs = None + outputs = [sin, mul] + + Output graph: + x -> sin ------> output_1 + | | + v v + y -> add -> mul -> output_2 + """ + submodel = extract_submodel(model, outputs=["sin", "mul"]) + assert get_op_types_in_program(submodel._mil_program) == ["sin", "add", "mul"] + + """ + Case 2: + inputs = None + outputs = [sin, add] + + Output graph: + x -> sin -> output_1 + | + v + y -> add -> output_2 + """ + submodel = extract_submodel(model, outputs=["sin", "add"]) + assert get_op_types_in_program(submodel._mil_program) == ["sin", "add"] + + """ + Case 3: + inputs = None + outputs = [mul] + + Output graph: + x -> sin ----- + | | + v v + y -> add -> mul -> output_1 + """ + submodel = extract_submodel(model, outputs=["mul"]) + assert get_op_types_in_program(submodel._mil_program) == ["sin", "add", "mul"] + + """ + Case 4: + inputs = None + outputs = [sin, sub] + + Output graph: + x -> sin -> sub -> output_2 + | + V + output_1 + y + """ + submodel = extract_submodel(model, outputs=["sin", "sub"]) + print(submodel._mil_program) + assert get_op_types_in_program(submodel._mil_program) == ["sin", "sub"] + + """ + Case 5: + inputs = [x, y] + outputs = [mul] + + Output graph: + x -> sin ----- + | | + v v + y -> add -> mul -> output_1 + """ + submodel = extract_submodel(model, outputs=["mul"], inputs=["x", "y"]) + assert get_op_types_in_program(submodel._mil_program) == ["sin", "add", "mul"] + + """ + Case 6: + inputs = [mul] + outputs = [tan] + + mul -> tan -> output_1 + """ + submodel = extract_submodel(model, outputs=["tan"], inputs=["mul"]) + assert get_op_types_in_program(submodel._mil_program) == ["tan"] + + """ + Case 7: + inputs = [sin, add] + outputs = [sub, mul] + + sin ------> sub -> output_1 + | + v + add -> mul -> output_2 + """ + submodel = extract_submodel(model, outputs=["sub", "mul"], inputs=["sin", "add"]) + assert get_op_types_in_program(submodel._mil_program) == ["sub", "mul"] + + """ + Case 8 (Negative): + inputs = [sin] + outputs = [mul] + + mul not reachable merely through sin + """ + with pytest.raises(ValueError, match="output mul not reachable from inputs"): + submodel = extract_submodel(model, outputs=["mul"], inputs=["sin"]) + + """ + Case 9 (Negative): + inputs = [mul] + outputs = [sin] + + sin not reachable merely through sin + """ + with pytest.raises(ValueError, match="output sin not reachable from inputs"): + submodel = extract_submodel(model, outputs=["sin"], inputs=["mul"]) + + @pytest.mark.parametrize( + "compute_unit", + [ + ct.ComputeUnit.ALL, + ct.ComputeUnit.CPU_ONLY, + ], + ) + def test_extract_submodel_neuralnetwork(self, compute_unit): + prog = get_simple_program() + model = ct.convert(prog, convert_to="neuralnetwork", compute_units=compute_unit) + submodel = extract_submodel(model, outputs=["output_0", "output_1"]) + + # check that the submodel retains the same backend + assert submodel.get_spec().WhichOneof("Type") == "neuralNetwork" + + # check that the submodel retains the same compute unit + assert submodel.compute_unit == compute_unit + + # check the subgraph + assert get_op_types_in_program(submodel._mil_program) == ["add", "transpose", "square", "tanh"] + + # check the numerical outputs + coreml_in = np.random.rand(1, 2, 3, 4) + coreml_out = submodel.predict({"x": coreml_in}) + gt = compute_ground_truth_answer(coreml_in) + assert len(coreml_out) == len(gt) + for k, v in gt.items(): + np.testing.assert_allclose(v, coreml_out[k], atol=0.2) + + @pytest.mark.parametrize( + "compute_unit, store_to_disk", + itertools.product( + [ + ct.ComputeUnit.ALL, + ct.ComputeUnit.CPU_ONLY, + ], + [True, False], + ) + ) + def test_extract_submodel_mlprogram(self, compute_unit, store_to_disk): + prog = get_simple_program() + model = ct.convert( + prog, + convert_to="mlprogram", + compute_units=compute_unit, + compute_precision=ct.precision.FLOAT32 + ) + + if store_to_disk: + model_dir = tempfile.TemporaryDirectory() + mlmodel_path = os.path.join(model_dir.name, "model.mlpackage") + model.save(mlmodel_path) + model = ct.models.MLModel(mlmodel_path, compute_units=compute_unit) + + submodel = extract_submodel(model, outputs=["output_0", "output_1"]) + + # check that the submodel retains the same backend + assert submodel.get_spec().WhichOneof("Type") == "mlProgram" + + # check that the submodel retains the same compute unit + assert submodel.compute_unit == compute_unit + + # check the subgraph + assert get_op_types_in_program(submodel._mil_program) == ["add", "transpose", "square", "tanh"] + + # check the numerical outputs + coreml_in = np.random.rand(1, 2, 3, 4) + coreml_out = submodel.predict({"x": coreml_in}) + gt = compute_ground_truth_answer(coreml_in) + assert len(coreml_out) == len(gt) + for k, v in gt.items(): + np.testing.assert_allclose(v, coreml_out[k], atol=0.2) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_programs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_programs.py new file mode 100644 index 00000000..1a7e8e1f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_programs.py @@ -0,0 +1,347 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import pytest + +import coremltools as ct +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types + +np.random.seed(0) + +def test_single_layer_example(): + batch_size, input_dim, output_dim = 2, 4, 2 + + @mb.program( + input_specs=[mb.TensorSpec(shape=(batch_size, input_dim)),] + ) + def prog(x): + # Weight + W_val = ( + np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) + .reshape(input_dim, output_dim) + .T.astype(np.float32) + ) + W = mb.const(val=W_val, name="const_W") + + # bias + b_val = np.array([-0.5, 0.5]).astype(np.float32) + b = mb.const(val=b_val, name="const_b") + + return mb.linear(x=x, weight=W, bias=b, name="lin") + + logger.info("prog:\n" + str(prog)) + + mlmodel = ct.convert(prog, source="milinternal", convert_to="neuralnetwork") + + feed_dict = { + "x": np.random.rand(batch_size, input_dim).astype(np.float32), + } + assert mlmodel is not None + + if ct.utils._is_macos(): + prediction = mlmodel.predict(feed_dict) + assert len(prediction) == 1 + + +def test_conv_example(): + batch, C_in, C_out, H, W = 2, 2, 3, 7, 10 + kH, kW = 3, 5 + img_shape, seq_shape = (batch, C_in, H, W), (batch, C_in, H) + + @mb.program( + input_specs=[mb.TensorSpec(shape=img_shape), mb.TensorSpec(shape=seq_shape),] + ) + def prog(img, seq): + ## 2D convolution + # Weight + W_2d = np.random.rand(C_out, C_in, kH, kW).astype(np.float32) + W_2d = mb.const(val=W_2d, name="const_W") + + # Test 1: provide only required arguments. + conv1 = mb.conv(x=img, weight=W_2d, pad_type="valid") + logger.info("conv1 shape: {}".format(conv1.shape)) + + # Test 2: stride > 1 + conv2 = mb.conv(x=img, weight=W_2d, pad_type="valid", strides=[2, 3]) + logger.info("conv2 shape: {}".format(conv2.shape)) + + # Test 3: same padding + conv3 = mb.conv(x=img, weight=W_2d, pad_type="same", strides=[2, 3]) + logger.info("conv3 shape: {}".format(conv3.shape)) + + # Test max_pool + pool1 = mb.max_pool( + x=img, kernel_sizes=[kH, kW], pad_type="valid", strides=[2, 3] + ) + logger.info("pool1 shape: {}".format(pool1.shape)) + + # Test max_pool + pool2 = mb.max_pool( + x=img, kernel_sizes=[kH, kW], pad_type="same", strides=[2, 3] + ) + logger.info("pool2 shape: {}".format(pool2.shape)) + + ## 1D convolution + W_1d = np.random.rand(C_out, C_in, kH).astype(np.float32) + W_1d = mb.const(val=W_1d, name="const_W_1d") + logger.info("W_1d val: {}".format(W_1d.val)) + + # Test 4: provide only required arguments for 1D. + conv4 = mb.conv(x=seq, weight=W_1d, pad_type="valid") + + logger.info("conv4 shape: {}".format(conv4.shape)) + + return conv1, conv2, conv3, pool1, pool2, conv4 + + # rdar://105988903 ([Infra] re-enable the test_conv_example unit test on M1 with compute_units=ALL) + mlmodel = ct.convert(prog, source="milinternal", convert_to="neuralnetwork", compute_units=ct.ComputeUnit.CPU_ONLY) + + feed_dict = { + "img": np.random.rand(*img_shape).astype(np.float32), + "seq": np.random.rand(*seq_shape).astype(np.float32), + } + assert mlmodel is not None + + if ct.utils._is_macos(): + prediction = mlmodel.predict(feed_dict) + assert len(prediction) == 6 + + +def test_while_example(): + def body(a, b): + return mb.add(x=a, y=b), b + + def cond(a, b): + a_mean = mb.reduce_mean(x=a, axes=[0, 1]) + b_mean = mb.reduce_mean(x=b, axes=[0, 1]) + return mb.less(x=a_mean, y=b_mean) + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)),] + ) + def prog(a, b): + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + logger.info("prog:\n" + str(prog)) + + mlmodel = ct.convert(prog, source="milinternal", convert_to="neuralnetwork") + + feed_dict = { + "a": np.random.rand(1, 2).astype(np.float32), + "b": np.random.rand(1, 2).astype(np.float32), + } + assert mlmodel is not None + + if ct.utils._is_macos(): + prediction = mlmodel.predict(feed_dict) + assert len(prediction) == 2 + +def test_reserved_node_names(): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + return mb.square(x=x, name="tensor") + + mlmodel = ct.convert(prog, source="milinternal", convert_to="mlprogram") + + feed_dict = { + "x": np.random.rand(10, 20).astype(np.float32), + } + assert mlmodel is not None + + if ct.utils._is_macos(): + prediction = mlmodel.predict(feed_dict) + assert len(prediction) == 1 + +def get_simple_topk_program(opset_version=None): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=opset_version) + def prog(x): + x = mb.topk(x=x, k=1, axis=-1, ascending=True) + return x + return prog + +def get_simple_pixel_unshuffle_program(opset_version=None): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=opset_version) + def prog(x): + x = mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(2)) + return x + return prog + +def get_simple_topk_pixel_unshuffle_program(opset_version=None): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=opset_version) + def prog(x): + x = mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(2)) + x = mb.topk(x=x, k=1, axis=-1, ascending=True) + return x + return prog + +def get_simple_nested_block_program(opset_version=None): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=opset_version) + def prog(x): + def true_fn(): + topk, _ = mb.topk(x=x, k=1, axis=-1, ascending=True) + return mb.add(x=topk, y=1.) + + def false_fn(): + topk, _ = mb.topk(x=x, k=1, axis=-1, ascending=True) + return mb.add(x=topk, y=2.) + + shape = mb.shape(x=x) + rank = mb.shape(x=shape) + pred = mb.squeeze(x=rank) + return mb.cond(pred=mb.cast(x=pred, dtype="bool"), _true_fn=true_fn, _false_fn=false_fn) + return prog + +class TestMLProgramVersionHandling: + + @staticmethod + def test_multi_versions_op_selection(): + ''' + Builder should pick up the right version of op based on opset_version + ''' + # pick up the oldest version (iOS13) topk by default + prog = get_simple_topk_program() + main_func = prog.functions["main"] + topk_op = main_func.find_ops(op_type="topk")[0] + assert topk_op.opset_version == ct.target.iOS13 + + # pick up iOS13 version topk + prog = get_simple_topk_program(opset_version=ct.target.iOS15) + main_func = prog.functions["main"] + topk_op = main_func.find_ops(op_type="topk")[0] + assert topk_op.opset_version == ct.target.iOS13 + + # pick up iOS16 version topk + prog = get_simple_topk_program(opset_version=ct.target.iOS16) + main_func = prog.functions["main"] + topk_op = main_func.find_ops(op_type="topk")[0] + assert topk_op.opset_version == ct.target.iOS16 + + @staticmethod + def test_pymil_front_end_conversion(): + prog = get_simple_topk_pixel_unshuffle_program(opset_version=ct.target.iOS16) + mlmodel = ct.convert(prog, minimum_deployment_target=ct.target.iOS16) + + @staticmethod + def test_nested_block_opset_version_selection(): + # pick up the oldest version (iOS13) topk by default + prog = get_simple_nested_block_program() + main_func = prog.functions["main"] + topk_ops = main_func.find_ops(op_type="topk") + assert all([topk.opset_version == ct.target.iOS13 for topk in topk_ops]) + + # pick up iOS16 version topk + prog = get_simple_nested_block_program(opset_version=ct.target.iOS16) + main_func = prog.functions["main"] + topk_ops = main_func.find_ops(op_type="topk") + assert all([topk.opset_version == ct.target.iOS16 for topk in topk_ops]) + + @staticmethod + def test_pymil_opset_version_inference(): + ''' + The program consist of pixel_unshuffle should be inferred as an iOS16 version program + ''' + prog = get_simple_pixel_unshuffle_program() + assert prog.functions["main"].opset_version == ct.target.iOS16 + + expected_err_str = ( + "Please update the minimum_deployment_target to coremltools.target.iOS16, " + "since op pixel_unshuffle is only available in opset coremltools.target.iOS16 or newer." + ) + with pytest.raises(ValueError, match=expected_err_str): + mlmodel = ct.convert(prog, convert_to="mlprogram") + + @staticmethod + def test_pymil_front_end_conversion_early_error_out(): + prog = get_simple_topk_pixel_unshuffle_program(opset_version=ct.target.iOS16) + expected_err_str = ( + "Please update the minimum_deployment_target to coremltools.target.iOS16, " + "since op pixel_unshuffle is only available in opset coremltools.target.iOS16 or newer." + ) + with pytest.raises(ValueError, match=expected_err_str): + mlmodel = ct.convert(prog, minimum_deployment_target=ct.target.iOS15) + + @staticmethod + def test_unsupported_op_early_error_out(): + ''' + We should error out at the point when Builder tries to add an op which is only supported in a newer spec version + ''' + expected_err_str = ( + "No available version for pixel_unshuffle in the coremltools.target.iOS15 opset. " + "Please update the minimum_deployment_target to at least coremltools.target.iOS16" + ) + with pytest.raises(ValueError, match=expected_err_str): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=ct.target.iOS15) + def prog(x): + x = mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(2)) + return x + + @staticmethod + def test_bulid_non_compatible_program_early_error_out(): + ''' + `mb.program` API should detect potential non compatible ops in the program, and error out early + In this example, `pixel_unshuffle` is an iO16 op, and `topk` has iOS13 and iOS16 version. + If the builder version is not set, it is picking up the iOS13 version of topk, which would + potentially create an invalid program. + In this case, `mb.program` should error out, and tell the user to set `opset_version=target.iOS16` + ''' + expected_err_str = ( + "Op topk with an out of date version coremltools.target.iOS13 is detected. Please use @mb.program\(input_specs=..., opset_version=coremltools.target.iOS16\)" + ) + with pytest.raises(ValueError, match=expected_err_str): + get_simple_topk_pixel_unshuffle_program() + + @staticmethod + def test_type_domain_validation(): + ''' + The builder should error out early when detecting the input type violation against the defined type_domain + ''' + expected_err_str = ( + "In op, of type rsqrt, named rsqrt_0, the named input `epsilon` must have the same data type as the named input `x`. However, epsilon has dtype int32 whereas x has dtype fp32" + ) + with pytest.raises(ValueError, match=expected_err_str): + @mb.program(input_specs=[mb.TensorSpec(shape=(2,), dtype=types.fp32)]) + def prog(x): + res = mb.rsqrt(x=x, epsilon=1) + return res + + @staticmethod + def test_rank6_tensor_early_error_out(): + ''' + The builder should error out early when detecting a rank 6 (or higher) tensor which cannot be eliminated by graph passes + ''' + expected_err_str = ( + "Core ML only supports tensors with rank <= 5. Layer \"reshape_0\", with type \"reshape\", outputs a rank 6 tensor" + ) + with pytest.raises(ValueError, match=expected_err_str): + @mb.program(input_specs=[mb.TensorSpec(shape=(1,), dtype=types.fp32)]) + def prog(x): + res = mb.reshape(x=x, shape=(1, 1, 1, 1, 1, 1), name="reshape_0") + return res + ct.convert(prog, source="milinternal") + + @staticmethod + def test_rank5_list_early_error_out(): + ''' + The builder should error out early when detecting a list of rank 5 (or higher) tensors is created + ''' + expected_err_str = ( + "Core ML only supports list of elements with rank <= 4. Layer \"list_0\", with type \"make_list\", outputs a list of rank 5 tensors." + ) + with pytest.raises(ValueError, match=expected_err_str): + @mb.program(input_specs=[mb.TensorSpec(shape=(1,), dtype=types.fp32)]) + def prog(x): + ls = mb.make_list( + init_length=1, + dtype="fp32", + elem_shape=(1, 1, 1, 1, 1), + dynamic_length=True, + name="list_0", + ) + return ls + + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_types.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_types.py new file mode 100644 index 00000000..134f64cb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_types.py @@ -0,0 +1,27 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import pytest + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types import type_mapping + + +class TestTypeMapping: + def test_promote_dtypes_basic(self): + assert type_mapping.promote_dtypes([types.int32, types.int32]) == types.int32 + assert type_mapping.promote_dtypes([types.int32, types.int64, types.int16]) == types.int64 + assert type_mapping.promote_dtypes([types.fp16, types.fp32, types.fp64]) == types.fp64 + assert type_mapping.promote_dtypes([types.fp16, types.int32, types.int64]) == types.fp16 + + @pytest.mark.parametrize( + "input_size", + [10, 10000], + ) + def test_promote_dtypes_different_input_sizes(self, input_size): + assert ( + type_mapping.promote_dtypes([types.int32, types.int64, types.int16] * input_size) + == types.int64 + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/__init__.py new file mode 100644 index 00000000..6cdc9fc3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .annotate import annotate, apply_delayed_types, class_annotate, delay_type +from .get_type_info import get_type_info +from .global_methods import global_remap +from .type_bool import bool, is_bool +from .type_complex import complex, complex64, complex128, is_complex +from .type_dict import dict, empty_dict +from .type_double import double, float, fp16, fp32, fp64, is_float +from .type_globals_pseudo_type import globals_pseudo_type +from .type_int import (int8, int16, int32, int64, is_int, uint, uint8, + uint16, uint32, uint64) +from .type_list import empty_list, is_list, list +from .type_mapping import (builtin_to_proto_types, builtin_to_string, + is_builtin, is_dict, is_primitive, is_scalar, + is_str, is_subtype, is_tensor, is_tuple, + np_dtype_to_py_type, nptype_from_builtin, + numpy_type_to_builtin_type, + numpy_val_to_builtin_val, promote_dtypes, + promote_types, proto_to_builtin_types, + string_to_builtin, type_to_builtin_type) +from .type_str import str +from .type_tensor import (is_compatible_type, is_tensor_and_is_compatible, + is_tensor_and_is_compatible_general_shape, tensor, + tensor_has_complete_shape) +from .type_tuple import tuple +from .type_unknown import unknown +from .type_void import void + +apply_delayed_types() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/annotate.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/annotate.py new file mode 100644 index 00000000..0ccd104c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/annotate.py @@ -0,0 +1,115 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +class delay_type_cls: + def __getattr__(self, t): + return t + + +# this delay type thingee is useful for class annotations. +# for instance: the following code is invalid because when the annotate +# function is invoked, the "double" class does not yet exist +# +# class double: +# @annotate(double, other=double) +# def __add__(self, other): +# +# So it is necessary to add one level of laziness and delay the type +# +# class double: +# @annotate(delay_type.double, other=delay_type.double) +# def __add__(self, other): +# +# This basically replaces the annotation with the string "double" which we will +# then replace with the actual type later +# +delay_type = delay_type_cls() + +annotated_function_list = [] +annotated_class_list = {} + + +class _invalid_placeholder_type: + pass + + +def annotate(return_type=_invalid_placeholder_type, **kwargs): + """ + A decorator that informs the compyler about the return type of a function + and a collection of hint for other variable names. These can include + - captured variables + - function arguments + - other variables within the function + + Ex: + + @annotate(compyler.double, a=compyler.double, b=compyler.double) + def add(a, b): + + In certain cases when the class members are annotated this does not work. + For instance this fails because the annotate decorator is called before + the class double is fully defined. + + class double: + @annotate(double, other=double) + def __add__(self, other): + + So it is necessary to add one level of laziness and delay the type + + @class_annotate() + class double: + @annotate(delay_type.double, other=delay_type.double) + def __add__(self, other): + + After which apply_delayed_types() must be called to fill in the delayed + type. + """ + global annotated_function_list + + def decorator(func): + global annotated_function_list + func.type_annotations = kwargs + if return_type is not _invalid_placeholder_type: + func.return_type = return_type + annotated_function_list += [func] + return func + + return decorator + + +def class_annotate(): + """ + Registers a class to be used by delay_type. See annotate() + """ + global annotated_class_list + + def decorator(cls): + global annotated_class_list + annotated_class_list[cls.__name__] = cls + return cls + + return decorator + + +def apply_delayed_types( + type_map=annotated_class_list, fnlist=annotated_function_list +): # pylint: disable=dangerous-default-value + """ + Apply all delayed types. See annotate() + """ + # pylint: disable=no-member + # type name is a dict from str to type + for func in fnlist: + if ( + hasattr(func, "return_type") + and isinstance(func.return_type, str) + and func.return_type in type_map + ): + func.return_type = type_map[func.return_type] + if hasattr(func, "type_annotations"): + for key in func.type_annotations: + if func.type_annotations[key] in type_map: + func.type_annotations[key] = type_map[func.type_annotations[key]] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/get_type_info.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/get_type_info.py new file mode 100644 index 00000000..74d47139 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/get_type_info.py @@ -0,0 +1,59 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .type_spec import FunctionType, Type +from .type_void import void + + +def get_python_method_type(py_function): + # given a python class method, parse the annotations to figure out the type + function_inputs = [] + function_output = get_type_info(void) + annotations = {} + if hasattr(py_function, "type_annotations"): + annotations = { + k: get_type_info(v) for k, v in py_function.type_annotations.items() + } + if hasattr(py_function, "return_type"): + function_output = get_type_info(py_function.return_type) + try: + if hasattr(py_function, "__func__"): + argcount = py_function.__func__.__code__.co_argcount + argnames = py_function.__func__.__code__.co_varnames[:argcount] + else: + argcount = py_function.__code__.co_argcount + argnames = py_function.__code__.co_varnames[:argcount] + except: + raise TypeError( + "Unable to derive type information from method %s. " + "You might have a misspecified type. Ex: use compyler.int and not int" + % py_function + ) + + for arg in argnames: + if arg in annotations: + function_inputs.append(annotations[arg]) + elif arg != "self": + raise TypeError( + "Function " + + str(py_function) + + " insufficient annotations. " + + arg + + " needs a type" + ) + typeinfo = FunctionType(function_inputs, function_output, py_function) + return typeinfo + + +def get_type_info(t): + if hasattr(t, "__type_info__"): + ret = t.__type_info__() + assert ret.python_class is not None + return ret + elif isinstance(t, type): + return Type(t.__name__, python_class=t) + elif hasattr(t, "__call__"): + return get_python_method_type(t) + raise TypeError("Unsupported type %s" % t) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/global_methods.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/global_methods.py new file mode 100644 index 00000000..b739beed --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/global_methods.py @@ -0,0 +1,49 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +""" +This defines a list of all the "global methods" like len. Or type cast +operators like int, list, double, etc. + +The difficulty with some of these methods is that they don't have fixed types. +For instance len(x) allows x to be list or a dictionary. + +However we don't support function overloading based on types, and we don't +intend to. (It is complicated, requires the parser to be far more intelligent +and do good type inference; will either require genre to support overloading +or do name mangling. + +The final quirk is that we probably should not call these functions "len" +or "int" because that will conflict with the existing python methods. + +So what we will simply do is to rewrite them to things like __len__, __str__ +and __int__ and __double__ +""" + +global_remap = { + "len": "__len__", + "str": "__str__", + "int": "__int__", + "double": "__double__", + "float": "__double__", + "bool": "__bool__", + "log": "__log__", + "exp": "__exp__", + "max": "__max__", + "min": "__min__", +} + +global_invremap = { + "__len__": "len", + "__str__": "str", + "__int__": "int", + "__double__": "float", + "__bool__": "bool", + "__log__": "math.log", + "__exp__": "math.exp", + "__max__": "max", + "__min__": "min", +} diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/symbolic.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/symbolic.py new file mode 100644 index 00000000..6222383a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/symbolic.py @@ -0,0 +1,81 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import sympy as sm + +k_used_symbols = {} +k_num_internal_syms = 0 + + +def is_compatible_symbolic_vector(val_a, val_b): + """ + compare two vector and check if they are compatible. + ([is0, 4], [9, 4]), ([is0, 1],[is1, is2]) are twp compatible examples. + """ + val_a = tuple(val_a) + val_b = tuple(val_b) + + if len(val_a) != len(val_b): + return False + + for a, b in zip(val_a, val_b): + if not is_symbolic(a) and not is_symbolic(b): + if a != b: + return False + return True + + +def is_symbolic(val): + return issubclass(type(val), sm.Basic) # pylint: disable=consider-using-ternary + + +def is_variadic(val): + return ( + issubclass(type(val), sm.Symbol) and val.name[0] == "*" + ) # pylint: disable=consider-using-ternary + + +def num_symbolic(val): + """ + Return the number of symbols in val + """ + if is_symbolic(val): + return 1 + elif isinstance(val, np.ndarray) and np.issctype(val.dtype): + return 0 + elif hasattr(val, "__iter__"): + return sum(any_symbolic(i) for i in val) + return 0 + + +def any_symbolic(val): + if is_symbolic(val): + return True + if isinstance(val, np.ndarray) and val.ndim == 0: + return is_symbolic(val[()]) + elif isinstance(val, np.ndarray) and np.issctype(val.dtype): + return False + elif isinstance(val, str): # string is iterable + return False + elif hasattr(val, "__iter__"): + return any(any_symbolic(i) for i in val) + return False + + +def any_variadic(val): + if is_variadic(val): + return True + elif isinstance(val, np.ndarray) and np.issctype(val.dtype): + return False + elif isinstance(val, str): # string is iterable + return False + elif hasattr(val, "__iter__"): + return any(any_variadic(i) for i in val) + return False + + +def isscalar(val): + return np.isscalar(val) or issubclass(type(val), sm.Basic) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_bool.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_bool.py new file mode 100644 index 00000000..9c74bd3d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_bool.py @@ -0,0 +1,49 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .annotate import annotate, class_annotate, delay_type +from .type_spec import Type + + +@class_annotate() +class bool: + def __init__(self, v=False): + self.val = v + + @classmethod + def __type_info__(cls): + return Type("bool", python_class=cls) + + @annotate(delay_type.bool, other=delay_type.bool) + def __eq__(self, other): + return bool(self.val == other.val) + + @annotate(delay_type.bool, other=delay_type.bool) + def __ne__(self, other): + return bool(self.val != other.val) + + @annotate(delay_type.bool) + def __not__(self, other): + return bool(not other.val) + + @annotate(delay_type.bool) + def __bool__(self): + return self.val + + @annotate(delay_type.int) + def __int__(self): + return int(self) + + @annotate(delay_type.double) + def __double__(self): + return float(self.val) + + @annotate(delay_type.str) + def __str__(self): + return str(self.val) + + +def is_bool(t): + return t is bool or isinstance(t, bool) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_complex.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_complex.py new file mode 100644 index 00000000..ed92614d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_complex.py @@ -0,0 +1,171 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools import _logger as logger + +from .annotate import annotate, class_annotate, delay_type +from .type_bool import bool +from .type_spec import Type + + +def make_complex(width): + delay_type_complex = getattr(delay_type, "complex" + str(width)) + + @class_annotate() + class complex: + _width = width + + def __init__(self, v=0 + 0j): + self._val: np.complexfloating = ( + np.complex64(v) if width == 64 else np.complex128(v) + ) + + @property + def val(self): + return self._val + + @val.setter + def val(self, v): + from .type_mapping import ( + builtin_to_string, + nptype_from_builtin, + numpy_type_to_builtin_type, + ) + + if not isinstance(v, np.generic): + + if isinstance(v, np.ndarray) and v.ndim == 0: + # Rank zero tensor case. Use as a scalar. + self._val = v.item() + else: + raise ValueError( + f"Types should have zero-rank ndarray input, got {v} instead." + ) + + elif isinstance(v, np.complexfloating): + v_type = numpy_type_to_builtin_type(v.dtype) + if v_type.get_bitwidth() <= self.get_bitwidth(): + self._val = v + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might lose precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might be incompatible or " + "loses precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + + @classmethod + def __type_info__(cls): + return Type("complex" + str(cls._width), python_class=cls) + + @classmethod + def get_bitwidth(cls): + return cls._width + + @annotate(delay_type_complex, other=delay_type_complex) + def __add__(self, other): + assert isinstance(other, complex) + return complex(self.val + other.val) + + @annotate(delay_type_complex, other=delay_type_complex) + def __sub__(self, other): + assert isinstance(other, complex) + return complex(self.val - other.val) + + @annotate(delay_type_complex, other=delay_type_complex) + def __mul__(self, other): + assert isinstance(other, complex) + return complex(self.val * other.val) + + @annotate(delay_type_complex, other=delay_type_complex) + def __div__(self, other): + assert isinstance(other, complex) + return complex(self.val / other.val) + + @annotate(delay_type_complex, other=delay_type_complex) + def __mod__(self, other): + raise ValueError("Can't mod complex numbers.") + + @annotate(delay_type.bool, other=delay_type_complex) + def __lt__(self, other): + return bool(self.val < other.val) + + @annotate(delay_type.bool, other=delay_type_complex) + def __gt__(self, other): + return bool(self.val > other.val) + + @annotate(delay_type.bool, other=delay_type_complex) + def __le__(self, other): + return bool(self.val <= other.val) + + @annotate(delay_type.bool, other=delay_type_complex) + def __ge__(self, other): + return bool(self.val >= other.val) + + @annotate(delay_type.bool, other=delay_type_complex) + def __eq__(self, other): + return bool(self.val == other.val) + + @annotate(delay_type.bool, other=delay_type_complex) + def __ne__(self, other): + return bool(self.val != other.val) + + @annotate(delay_type.bool) + def __bool__(self): + return self.val + + @annotate(delay_type.int) + def __int__(self): + logger.warning( + "ComplexWarning: Casting complex to real discards the imaginary part." + ) + return int(np.real(self.val)) + + @annotate(delay_type_complex) + def __complex__(self): + return complex(self.val) + + @annotate(delay_type.str) + def __str__(self): + return str(self.val) + + @annotate(delay_type_complex) + def __log__(self): + # The `math.log` doesn't support complex numbers yet. + return np.log(self.val) + + @annotate(delay_type_complex) + def __exp__(self): + return np.exp(self.val) + + @annotate(delay_type_complex) + def __neg__(self): + return complex(-self.val) + + complex.__name__ = "complex%d" % complex.get_bitwidth() + return complex + + +# We keep consistent with PyTorch and Tensorflow: +# - complex64 consists of a fp32 real and a fp32 imag. +# - complex128 consists of a fp64 real and a fp64 imag. +complex64 = make_complex(64) +complex128 = make_complex(128) +complex = complex64 + + +def is_complex(t): + complex_types_set = (complex64, complex128) + return (t in complex_types_set) or isinstance(t, complex_types_set) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_dict.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_dict.py new file mode 100644 index 00000000..bf711211 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_dict.py @@ -0,0 +1,62 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import type_bool, type_int +from .annotate import annotate +from .get_type_info import get_type_info +from .type_spec import Type +from .type_void import void + + +def memoize(f): + memo = {} + + def helper(x, y): + if (x, y) not in memo: + memo[(x, y)] = f(x, y) + return memo[(x, y)] + + return helper + + +class empty_dict: + @classmethod + def __type_info__(cls): + return Type("empty_dict", python_class=cls) + + +@memoize +def dict(keytype, valuetype): + class dict: + T = [keytype, valuetype] + + def __init__(self): + self.val = {} + + @classmethod + def __type_info__(cls): + return Type("dict", [get_type_info(keytype), get_type_info(valuetype)], cls) + + @annotate(T[1], key=T[0]) + def __getitem__(self, key): + assert isinstance(key, self.T[0]) + return self.val[key] + + @annotate(void, key=T[0], newval=T[1]) + def __setitem__(self, key, newval): + assert isinstance(key, self.T[0]) + assert isinstance(newval, self.T[1]) + self.val[key] = newval + + @annotate(type_int.int64) + def __len__(self): + return type_int.int64(len(self.val)) + + @annotate(type_bool.bool, key=T[0]) + def __contains__(self, key): + return key in self.val[key] + + dict.__template_name__ = "dict[" + keytype.__name__ + "," + valuetype.__name__ + "]" + return dict diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_double.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_double.py new file mode 100644 index 00000000..6c3024df --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_double.py @@ -0,0 +1,162 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math + +import numpy as np + +from coremltools import _logger as logger + +from .annotate import annotate, class_annotate, delay_type +from .type_bool import bool +from .type_spec import Type + + +def make_float(width): + delay_type_float = getattr(delay_type, "fp" + str(width)) + + @class_annotate() + class double: + _width = width + + def __init__(self, v=0.0): + self._val = v + + @property + def val(self): + return self._val + + @val.setter + def val(self, v): + from .type_mapping import (builtin_to_string, nptype_from_builtin, + numpy_type_to_builtin_type) + + if not isinstance(v, np.generic): + + if isinstance(v, np.ndarray) and v.ndim == 0: + # Rank zero tensor case. Use as a scalar. + self._val = v.item() + else: + raise ValueError( + f"Types should have zero-rank ndarray input, got {v} instead." + ) + + elif isinstance(v, np.floating): + v_type = numpy_type_to_builtin_type(v.dtype) + if v_type.get_bitwidth() <= self.get_bitwidth(): + self._val = v + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might lose precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might be incompatible or loses precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + + @classmethod + def __type_info__(cls): + return Type("fp" + str(cls._width), python_class=cls) + + @classmethod + def get_bitwidth(cls): + return cls._width + + @annotate(delay_type_float, other=delay_type_float) + def __add__(self, other): + assert isinstance(other, double) + return double(self.val + other.val) + + @annotate(delay_type_float, other=delay_type_float) + def __sub__(self, other): + assert isinstance(other, double) + return double(self.val - other.val) + + @annotate(delay_type_float, other=delay_type_float) + def __mul__(self, other): + assert isinstance(other, double) + return double(self.val * other.val) + + @annotate(delay_type_float, other=delay_type_float) + def __div__(self, other): + assert isinstance(other, double) + return double(self.val / other.val) + + @annotate(delay_type_float, other=delay_type_float) + def __mod__(self, other): + assert isinstance(other, double) + return double(self.val % other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __lt__(self, other): + return bool(self.val < other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __gt__(self, other): + return bool(self.val > other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __le__(self, other): + return bool(self.val <= other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __ge__(self, other): + return bool(self.val >= other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __eq__(self, other): + return bool(self.val == other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __ne__(self, other): + return bool(self.val != other.val) + + @annotate(delay_type.bool) + def __bool__(self): + return self.val != 0 + + @annotate(delay_type.int) + def __int__(self): + return int(self) + + @annotate(delay_type_float) + def __double__(self): + return float(self.val) + + @annotate(delay_type.str) + def __str__(self): + return str(self.val) + + @annotate(delay_type_float) + def __log__(self): + return math.log(self.val) + + @annotate(delay_type_float) + def __exp__(self): + return math.exp(self.val) + + @annotate(delay_type_float) + def __neg__(self): + return double(-self.val) + + double.__name__ = "fp%d" % double.get_bitwidth() + return double + + +fp16 = make_float(16) +fp32 = make_float(32) +fp64 = make_float(64) +float = fp32 +double = fp64 + + +def is_float(t): + return any(t is i or isinstance(t, i) for i in [fp16, fp32, fp64]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_globals_pseudo_type.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_globals_pseudo_type.py new file mode 100644 index 00000000..b849ba95 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_globals_pseudo_type.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .type_spec import Type + + +class globals_pseudo_type: + @classmethod + def __type_info__(cls): + return Type("globals", python_class=cls) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_int.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_int.py new file mode 100644 index 00000000..132ee9f1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_int.py @@ -0,0 +1,177 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math + +import numpy as np +import sympy as sm + +from coremltools import _logger as logger + +from .annotate import annotate, class_annotate, delay_type +from .type_bool import bool +from .type_spec import Type + + +def make_int(width, unsigned): + delay_type_int = getattr(delay_type, unsigned + "int" + str(width)) + + @class_annotate() + class int: + _width = width + _unsigned = unsigned + + @annotate(v=delay_type_int) + def __init__(self, v=0): + self._val = v + + @property + def val(self): + return self._val + + @val.setter + def val(self, v): + from .type_mapping import (builtin_to_string, nptype_from_builtin, + numpy_type_to_builtin_type) + + if not isinstance(v, (np.generic, sm.Basic)): + raise ValueError( + "types should have value of numpy type or Symbols, got {} instead".format( + type(v) + ) + ) + + if isinstance(v, sm.Basic): + self._val = v + elif isinstance(v, np.integer): + v_type = numpy_type_to_builtin_type(v.dtype) + if v_type.get_bitwidth() <= self.get_bitwidth() and ( + v >= 0 or v < 0 and not self.is_unsigned() + ): + self._val = v + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might overflow or loses precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might be incompatible or loses precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + + @classmethod + def __type_info__(cls): + return Type(cls._unsigned + "int" + str(cls._width), python_class=cls) + + @classmethod + def get_bitwidth(cls): + return cls._width + + @classmethod + def is_unsigned(cls): + return cls._unsigned == "u" + + @annotate(delay_type_int, other=delay_type_int) + def __add__(self, other): + assert isinstance(other, int) + return int(self.val + other.val) + + @annotate(delay_type_int, other=delay_type_int) + def __sub__(self, other): + assert isinstance(other, int) + return int(self.val - other.val) + + @annotate(delay_type_int, other=delay_type_int) + def __mul__(self, other): + assert isinstance(other, int) + return int(self.val * other.val) + + @annotate(delay_type_int, other=delay_type_int) + def __div__(self, other): + assert isinstance(other, int) + return int(self.val // other.val) + + @annotate(delay_type_int, other=delay_type_int) + def __mod__(self, other): + assert isinstance(other, int) + return int(self.val % other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __lt__(self, other): + return bool(self.val < other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __gt__(self, other): + return bool(self.val > other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __le__(self, other): + return bool(self.val <= other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __ge__(self, other): + return bool(self.val >= other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __eq__(self, other): + return bool(self.val == other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __ne__(self, other): + return bool(self.val != other.val) + + @annotate(delay_type.bool) + def __bool__(self): + return self.val != 0 + + @annotate(delay_type_int) + def __int__(self): + return int(self) + + @annotate(delay_type.double) + def __double__(self): + return float(self.val) + + @annotate(delay_type.str) + def __str__(self): + return str(self.val) + + @annotate(delay_type.double) + def __log__(self): + return math.log(self.val) + + @annotate(delay_type.double) + def __exp__(self): + return math.exp(self.val) + + @annotate(delay_type_int) + def __neg__(self): + return int(-self.val) + + return int + + +int8 = make_int(8, "") +int16 = make_int(16, "") +int32 = make_int(32, "") +int64 = make_int(64, "") + +uint8 = make_int(8, "u") +uint16 = make_int(16, "u") +uint32 = make_int(32, "u") +uint64 = make_int(64, "u") +uint = uint64 + + +def is_int(t): + return any( + t is i or isinstance(t, i) + for i in [int8, int16, int32, int64, uint8, uint16, uint32, uint64] + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_list.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_list.py new file mode 100644 index 00000000..5d3134c2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_list.py @@ -0,0 +1,69 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import type_int +from .annotate import annotate +from .get_type_info import get_type_info +from .type_spec import Type +from .type_void import void + + +def memoize(f): + memo = {} + + def helper(x, init_length=None, dynamic_length=True): + if x not in memo: + memo[(x, init_length, dynamic_length)] = f(x, init_length, dynamic_length) + return memo[(x, init_length, dynamic_length)] + + return helper + + +class empty_list: + @classmethod + def __type_info__(cls): + return Type("empty_list", python_class=cls) + + +@memoize +def list(arg, init_length=None, dynamic_length=True): + class list: + T = [arg, init_length, dynamic_length] + + def __init__(self): + self.val = [] + + @classmethod + def __type_info__(cls): + return Type("list", [get_type_info(arg)], python_class=cls) + + @annotate(void, other=T[0]) + def append(self, other): + assert isinstance(other, self.T[0]) + self.val.append(other) + + @annotate(T[0], index=type_int.int64) + def __getitem__(self, index): + assert isinstance(index, type_int.int64) + return self.val[index.val] + + @annotate(void, index=type_int.int64, newval=T[0]) + def __setitem__(self, index, newval): + assert isinstance(index, type_int.int64) + assert isinstance(newval, self.T[0]) + self.val[index.val] = newval + + @annotate(type_int.int64) + def __len__(self): + return type_int.int64(len(self.val)) if self.T[1] is None else self.T[1] + + list.__template_name__ = "list[" + arg.__name__ + "]" + return list + + +def is_list(t): + if t is None: + return False + return get_type_info(t).name == "list" diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_mapping.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_mapping.py new file mode 100644 index 00000000..9dbdcd9c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_mapping.py @@ -0,0 +1,449 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import numpy as _np +import numpy as np +import sympy as sm + +import coremltools.proto.MIL_pb2 as _mil_pm + +from .get_type_info import get_type_info +from .type_bool import bool as types_bool +from .type_bool import is_bool +from .type_complex import complex64 as types_complex64 +from .type_complex import complex128 as types_complex128 +from .type_complex import is_complex +from .type_double import fp16 as types_fp16 +from .type_double import fp32 as types_fp32 +from .type_double import fp64 as types_fp64 +from .type_double import is_float +from .type_int import int8 as types_int8 +from .type_int import int16 as types_int16 +from .type_int import int32 as types_int32 +from .type_int import int64 as types_int64 +from .type_int import is_int +from .type_int import uint8 as types_uint8 +from .type_int import uint16 as types_uint16 +from .type_int import uint32 as types_uint32 +from .type_int import uint64 as types_uint64 +from .type_list import is_list +from .type_str import str as types_str +from .type_unknown import unknown + +_types_TO_NPTYPES = { + types_bool: np.bool_, + types_int8: np.int8, + types_int16: np.int16, + types_int32: np.int32, + types_int64: np.int64, + types_uint8: np.uint8, + types_uint16: np.uint16, + types_uint32: np.uint32, + types_uint64: np.uint64, + types_fp16: np.float16, + types_fp32: np.float32, + types_fp64: np.float64, + types_complex64: np.complex64, + types_complex128: np.complex128, + types_str: np.str_, +} + +_types_TO_STRINGS = { + types_bool: "bool", + types_int8: "int8", + types_int16: "int16", + types_int32: "int32", + types_int64: "int64", + types_uint8: "uint8", + types_uint16: "uint16", + types_uint32: "uint32", + types_uint64: "uint64", + types_fp16: "fp16", + types_fp32: "fp32", + types_fp64: "fp64", + types_complex64: "complex64", + types_complex128: "complex128", + types_str: "string", +} + +builtin_to_proto_types = { + # bool: + types_bool: _mil_pm.BOOL, + + # fp + types_fp16: _mil_pm.FLOAT16, + types_fp32: _mil_pm.FLOAT32, + types_fp64: _mil_pm.FLOAT64, + + # int + types_uint8: _mil_pm.UINT8, + types_int8: _mil_pm.INT8, + + types_uint16: _mil_pm.UINT16, + types_int16: _mil_pm.INT16, + + types_uint32: _mil_pm.UINT32, + types_int32: _mil_pm.INT32, + + types_uint64: _mil_pm.UINT64, + types_int64: _mil_pm.INT64, + + # str + types_str: _mil_pm.STRING, +} + +proto_to_builtin_types = {v: k for k, v in builtin_to_proto_types.items()} + + +def np_dtype_to_py_type(np_dtype): + # Can't use dict, as hash(np.int32) != hash(val.dtype) + if np_dtype in [np.int32, np.int64]: + return int + if np_dtype in [bool, np.bool_]: + return bool + if np_dtype in [np.float32, np.float64]: + return float + if np_dtype in [np.complex64, np.complex128]: + return complex + raise NotImplementedError('{} is not supported'.format(np_dtype)) + + +_STRINGS_TO_types = {v: k for k, v in _types_TO_STRINGS.items()} + + +def string_to_builtin(s): + """ + Given a str, return its corresponding builtin type. + """ + return _STRINGS_TO_types.get(s, None) + + +def builtin_to_string(builtin_type): + """ + Given a builtin type, return its corresponding string representation. + """ + return _types_TO_STRINGS.get(builtin_type, None) + + +def nptype_from_builtin(btype): + """ + Given a builtin type, return its corresponding Numpy dtype. + """ + return _types_TO_NPTYPES.get(btype, None) + + +def promote_types(dtype1, dtype2): + """ + Get the smallest type to which the given scalar types can be cast. + + Args: + dtype1 (builtin): + dtype2 (builtin): + + Returns: + A builtin datatype or None. + + Examples: + >>> promote_types(int32, int64) + builtin('int64') + + >>> promote_types(fp16, fp32) + builtin('fp32') + + >>> promote_types(fp16, int32) + builtin('fp16') + """ + nptype1 = nptype_from_builtin(dtype1) + nptype2 = nptype_from_builtin(dtype2) + # Circumvent the undesirable np type promotion: + # >> np.promote_types(np.float32, np.int32) + # dtype('float64') + if np.issubdtype(nptype1, np.floating) and np.issubdtype(nptype2, np.signedinteger): + nppromoted = nptype1 + elif np.issubdtype(nptype2, np.floating) and np.issubdtype( + nptype1, np.signedinteger + ): + nppromoted = nptype2 + else: + nppromoted = np.promote_types(nptype1, nptype2) + return numpy_type_to_builtin_type(nppromoted) + + +def promote_dtypes(dtypes): + """ + Get the smallest promoted dtype, to which all scalar dtypes (provided through dtypes list argument) can be casted. + Args: + List [dtype (builtin)] + Returns: + A builtin datatype or None. + + Examples: + >>> promote_dtypes([int32, int64, int16]) + builtin('int64') + + >>> promote_dtypes([fp16, fp32, fp64]) + builtin('fp64') + + >>> promote_dtypes([fp16, int32, int64]) + builtin('fp16') + + """ + if not isinstance(dtypes, (list, tuple)) or len(dtypes) < 1: + raise ValueError("dtypes needs to be a list/tuple of at least 1 element") + + # Deduplicate inputs to avoid redundant calculations. + # Without dedup, too large input will cause maximum recursion depth exceeded error. + dtypes = list(set(dtypes)) + + if len(dtypes) == 1: + return dtypes[0] + + return promote_types(dtypes[0], promote_dtypes(dtypes[1:])) + + +def is_primitive(btype): + """ + Is the indicated builtin type a primitive? + """ + return ( + btype is types_bool + or btype is types_str + or is_float(btype) + or is_int(btype) + or is_complex(btype) + ) + + +def is_scalar(btype): + """ + Is the given builtin type a scalar integer, float, boolean or string? + """ + return ( + is_bool(btype) + or is_int(btype) + or is_float(btype) + or is_str(btype) + or is_complex(btype) + ) + + +def is_tensor(tensor_type): + if tensor_type is None: + return False + try: + type_info = get_type_info(tensor_type).name + except TypeError: + return False + return type_info == "tensor" + + +def is_str(t): + if t is None: + return False + try: + type_info = get_type_info(t).name + except TypeError: + return False + return type_info == "str" + + +def is_tuple(t): + if t is None: + return False + try: + type_info = get_type_info(t).name + except TypeError: + return False + return type_info == "tuple" + + +def is_dict(t): + if t is None: + return False + try: + type_info = get_type_info(t).name + except TypeError: + return False + return type_info == "dict" + + +def is_builtin(t): + return is_scalar(t) or is_tensor(t) or is_str(t) or is_tuple(t) + + +# Converts a numpy type to its types equivalent. +# Supports both dtypes and numpy primitive types. +def numpy_type_to_builtin_type(nptype): + # If this is a data type object, use the corresponding scalar data type. + if np.issubclass_(type(nptype), np.dtype): + nptype = nptype.type + + if np.issubclass_(nptype, (bool, np.bool_)): + # numpy as 2 bool types it looks like. what is the difference? + return types_bool + # Because np.uint is a subclass of int, + # we need to first check for np.uint before + # checking for int + elif np.issubclass_(nptype, np.uint8): + return types_uint8 + elif np.issubclass_(nptype, np.int8): + return types_int8 + elif np.issubclass_(nptype, np.uint16): + return types_uint16 + elif np.issubclass_(nptype, np.int16): + return types_int16 + elif np.issubclass_(nptype, np.uint32): + return types_uint32 + elif np.issubclass_(nptype, np.int32): + return types_int32 + elif np.issubclass_(nptype, np.uint64): + return types_uint64 + elif np.issubclass_(nptype, np.int64): + return types_int64 + elif np.issubclass_(nptype, int) or nptype == int: + # Catch all int + return types_int32 + elif np.issubclass_(nptype, np.object_): + # symbolic shape is considered int32 + return types_int32 + elif np.issubclass_(nptype, np.float16): + return types_fp16 + elif ( + np.issubclass_(nptype, (np.float32, np.single)) or nptype == float + ): + return types_fp32 + elif np.issubclass_(nptype, (np.float64, np.double)): + return types_fp64 + elif np.issubclass_(nptype, np.complex64): + return types_complex64 + elif np.issubclass_(nptype, (np.complex128, complex)): + return types_complex128 + elif np.issubclass_(nptype, (str, np.string_, np.str_)): + return types_str + else: + raise TypeError(f"Unsupported numpy type: {nptype}.") + + +# Tries to get the equivalent builtin type of a +# numpy or python type. +def type_to_builtin_type(type): + # Infer from numpy type if it is one + if type.__module__ == np.__name__: + return numpy_type_to_builtin_type(type) + + # Otherwise, try to infer from a few generic python types + if np.issubclass_(type, bool): + return types_bool + elif np.issubclass_(type, int): + return types_int32 + elif np.issubclass_(type, str): + return types_str + elif np.issubclass_(type, float): + return types_fp32 + elif np.issubclass_(type, complex): + return types_complex64 + else: + raise TypeError("Could not determine builtin type for " + str(type)) + + +def numpy_val_to_builtin_val(npval): + if np.isscalar(npval): + ret_type = type_to_builtin_type(type(npval)) + ret = ret_type() + ret.val = npval + return ret, ret_type + else: + builtintype = numpy_type_to_builtin_type(npval.dtype) + from . import tensor as types_tensor + + ret_type = types_tensor(builtintype, npval.shape) + ret = ret_type() + ret.val = npval + return ret, ret_type + + +def is_subtype_tensor(type1, type2): + # requires primitive types match + if type1.get_primitive() != type2.get_primitive(): + return False + + shape1 = type1.get_shape() + shape2 = type2.get_shape() + # Same rank + if len(shape1) != len(shape2): + return False + + for d1, d2 in zip(shape1, shape2): + if d1 == d2: + continue + + # tensor with shape (3, s0) is not a subtype of tensor with shape (3, + # 1), but is a subtype of tensor with shape (3, s1) + d1_is_symbolic = issubclass(type(d1), sm.Basic) + d2_is_symbolic = issubclass(type(d2), sm.Basic) + if d1_is_symbolic and d2_is_symbolic: + continue + if d1_is_symbolic and not d2_is_symbolic: + return False + if not d1_is_symbolic and not d2_is_symbolic and d1 != d2: + return False + return True + + +def is_subtype(type1, type2): + """ + Return True if type1 is a subtype of type2. False otherwise. + """ + if type2 == unknown: + return True # any class is a subclass of unknown (None) type. + if is_list(type2): + return is_list(type1) and is_subtype(type1.T[0], type2.T[0]) + if is_tensor(type1) and is_tensor(type2): + return is_subtype_tensor(type1, type2) + return type1 == type2 + + +def np_val_to_py_type(val): + """Convert numpy val to python primitive equivalent. Ex: + + Given: val = np.array([True, False]) + Returns: [True, False] + + Given: val = np.array(32, dtype=np.int32) + Returns 32 + """ + if not isinstance(val, (_np.ndarray, _np.generic)): + return val + + if val.dtype in [_np.float16, _np.uint8, _np.int8, _np.uint32]: + return val.tobytes() + else: + # val is np.ndarray or np.generic + is_np_scalar = isinstance(val, _np.generic) or val.shape == () + py_type = np_dtype_to_py_type(val.dtype) + return py_type(val) if is_np_scalar else tuple(py_type(v) for v in val.flatten()) + + +def infer_complex_dtype(real_dtype, imag_dtype): + """Infers the complex dtype from real and imaginary part's dtypes.""" + promoted_dtype = promote_types(real_dtype, imag_dtype) + if promoted_dtype == types_fp32: + return types_complex64 + elif promoted_dtype == types_fp64: + return types_complex128 + else: + raise ValueError( + f"Unsupported real/imag dtype ({real_dtype}/{imag_dtype}) to construct a " + f"complex dtype." + ) + + +def infer_fp_dtype_from_complex(complex_dtype): + """Infers the fp dtype of real and imaginary part from the complex dtype.""" + if complex_dtype == types_complex64: + return types_fp32 + elif complex_dtype == types_complex128: + return types_fp64 + else: + raise ValueError(f"Unsupported complex dtype ({complex_dtype}).") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_spec.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_spec.py new file mode 100644 index 00000000..ef46bd89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_spec.py @@ -0,0 +1,89 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +class Type: + """ + - Type.name : A string with the name of the object + - Type.tparam : For classes with template parameters, (list, dict), this + contains a list of Type objects of the template parameters + - Type.python_class : The original python class implementing this type. + Two Type objects compare equal + only on name and tparam and not python_class + """ + + __slots__ = ["name", "tparam", "python_class"] + + def __init__(self, name, tparam=None, python_class=None): + if tparam is None: + tparam = [] + assert isinstance(name, str) + assert isinstance(tparam, list) + self.name = name + self.tparam = tparam + self.python_class = python_class + + def __hash__(self): + return hash((self.name, tuple(self.tparam))) + + def __eq__(self, other): + return self.name == other.name and self.tparam == other.tparam + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + ret = self.name + if len(self.tparam) > 0: + ret += "[" + ",".join(repr(x) for x in self.tparam) + "]" + return ret + + def __str__(self): + return self.__repr__() + + def sexp(self): + if len(self.tparam) == 0: + return self.name + else: + ret = [self.name] + ret.append([a.sexp() if hasattr(a, "sexp") else a for a in self.tparam]) + return ret + + +class FunctionType: + """ + - FunctionType.inputs : A list of Type objects defining the types of the input + - FunctionType.output: A Type object defining the type of the output + - FunctionType.python_function : The original python function implementing + this type. Two FunctionType objects compare + equal only on inputs and output and not + python_function + """ + + __slots__ = ["inputs", "output", "python_function"] + + def __init__(self, inputs, output, python_function=None): + assert isinstance(inputs, list) + assert isinstance(output, (FunctionType, Type)) + self.inputs = inputs + self.output = output + self.python_function = python_function + + def __hash__(self): + return hash((tuple(self.inputs), self.output)) + + def __eq__(self, other): + return self.inputs == other.inputs and self.output == other.output + + def __repr__(self): + return "(" + ",".join(repr(x) for x in self.inputs) + ")->" + repr(self.output) + + def __str__(self): + return self.__repr__() + + def return_sexp(self): + return self.output.sexp() + + def inputs_sexp(self): + return [i.sexp() for i in self.inputs] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_str.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_str.py new file mode 100644 index 00000000..98ddc177 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_str.py @@ -0,0 +1,22 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .annotate import annotate, class_annotate, delay_type +from .type_spec import Type + + +@class_annotate() +class str: + def __init__(self, v=""): + self.val = v + + @classmethod + def __type_info__(cls): + return Type("str", python_class=cls) + + @annotate(delay_type.str, other=delay_type.str) + def __add__(self, other): + assert isinstance(other, str) + return str(self.val + other.val) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tensor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tensor.py new file mode 100644 index 00000000..a56cf3cc --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tensor.py @@ -0,0 +1,233 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import sympy as sm + +from coremltools import _logger as logger + +from .get_type_info import get_type_info +from .type_mapping import (builtin_to_string, is_subtype, is_tensor, + nptype_from_builtin, numpy_type_to_builtin_type, + promote_types) +from .type_spec import Type + + +def memoize(f): + memo = {} + + def helper(x, y): + y = tuple(y) + if (x, y,) not in memo: + memo[(x, y,)] = f(x, y,) + return memo[(x, y,)] + + return helper + + +def canonical_shape(shape): + """ Return shape as tuple of int or Symbol. + + This utility function ensures the shape tuple + using a single integer type (to its best effort). + + Args: + shape: tuple(int|long|np.int*|Symbol|SymbolExpr...) + """ + + def try_cast(x): + try: + # In python2.7, long and int are different types. + # If we cast a long int whose value is out of the range of int, + # the result is still long, avoiding overflow: + # + # `type(2<<64) == long # true` + # `type(int(2<<64)) == long # true` + x = int(x) + except TypeError: + # ignore symbolic value (sm.Symbol or sm.Expr) + pass + return x + + return tuple(try_cast(x) for x in shape) + + +@memoize +def tensor(primitive, shape): + shape = canonical_shape(shape) + + class tensor: + T = [primitive, shape] + + def __init__(self): + self._val = [] + + @classmethod + def __type_info__(cls): + return Type( + "tensor", list(shape) + [get_type_info(primitive)], python_class=cls + ) + + @classmethod + def get_primitive(cls): + return primitive + + @classmethod + def get_shape(cls): + return shape + + @property + def val(self): + return self._val + + @val.setter + def val(self, v): + if not isinstance(v, np.ndarray): + raise ValueError( + "tensor should have value of type ndarray, got {} instead".format( + type(v) + ) + ) + + v_type = numpy_type_to_builtin_type(v.dtype) + promoted_type = promote_types(v_type, primitive) + if v_type == primitive or v.dtype == np.dtype("O"): + # np.array of symbolic has object type. Don't cast type. + self._val = v + elif promoted_type == primitive: + self._val = v.astype(nptype_from_builtin(primitive)) + else: + logger.warning( + "Saving value type of {} into a builtin type of {}, might lose precision!".format( + v.dtype, builtin_to_string(primitive) + ) + ) + self._val = v.astype(nptype_from_builtin(primitive)) + + tensor.__template_name__ = ( + "tensor[" + primitive.__name__ + "," + ",".join(str(s) for s in shape) + "]" + ) + tensor.__name__ = ( + "tensor[" + ",".join(str(s) for s in shape) + "," + primitive.__name__ + "]" + ) + return tensor + + +def tensor_has_complete_shape(tensor_type): + if not is_tensor(tensor_type): + return True + s = tensor_type.get_shape() + if -1 in s: + return False + elif len(s) == 0: + return False + else: + return True + +def is_tensor_and_is_compatible(tensor_type1, tensor_type2, allow_promotion=False): + """ + Try to find a tensor type compatible with both input types. + + Compatible means that the tensors have the same rank and matching or unspecified + dimensions. For example, (10, -1) is compatible with (-1, 20) with the compatible + shape (10, 20). + + Args: + tensor_type1 (types.tensor) + tensor_type2 (types.tensor) + allow_promotion (bool): If True, allow primitive types to be promoted. + + Returns: + A pair of (bool, type). If the given types are not tensor types with + (1) compatible shapes and (2) either identical primitive types or + allow_promition=True, return is False, None. Otherwise, return True + and the compatible shape. Note that the returned shape may + not be the same as either input. For example, + + is_tensor_and_is_compatible( + tensor[fp32,[10,-1]], + tensor[fp32,[-1,20]]) --> tensor[fp32, [10,20]] + """ + + if not is_tensor(tensor_type1) or not is_tensor(tensor_type2): + return False, None + shape1 = tensor_type1.get_shape() + shape2 = tensor_type2.get_shape() + + primitive_type = tensor_type1.get_primitive() + if primitive_type != tensor_type2.get_primitive(): + promoted_type = promote_types(primitive_type, tensor_type2.get_primitive()) + if allow_promotion: + primitive_type = promoted_type + else: + return False, promoted_type + + if len(shape1) == 0: + return True, tensor_type2 + if len(shape2) == 0: + return True, tensor_type1 + + if len(shape1) != len(shape2): + return False, None + + most_specific_shape = [] + for i in range(len(shape1)): + if shape1[i] == -1 or issubclass(type(shape1[i]), sm.Basic): + most_specific_shape.append(shape2[i]) + elif shape2[i] == -1 or issubclass(type(shape2[i]), sm.Basic): + most_specific_shape.append(shape1[i]) + elif shape1[i] == shape2[i]: + most_specific_shape.append(shape1[i]) + elif shape1[i] != shape2[i]: + return False, None + + return True, tensor(primitive_type, most_specific_shape) + +def is_tensor_and_is_compatible_general_shape(tensor_type1, tensor_type2): + # returns a pair of (bool, type) + # If Both are tensors, and have compatible shape, the first return is true + # The return will be the most general version of the tensor type. + # Note that this may not be either tensor types. i.e. + # + # is_tensor_and_is_compatible(tensor[fp32,[10,-1]] ,tensor[fp32,[-1,20]]) + # will return True, tensor[fp32, [-1,-1]] + + if not is_tensor(tensor_type1) or not is_tensor(tensor_type2): + return False, None + shape1 = tensor_type1.get_shape() + shape2 = tensor_type2.get_shape() + + if tensor_type1.get_primitive() != tensor_type2.get_primitive(): + return False, None + + if len(shape1) == 0: + return True, tensor_type2 + if len(shape2) == 0: + return True, tensor_type1 + + if len(shape1) != len(shape2): + return False, None + + most_general_shape = [] + for i in range(len(shape1)): + if shape1[i] == -1 or issubclass(type(shape1[i]), sm.Basic): + most_general_shape.append(shape1[i]) + elif shape2[i] == -1 or issubclass(type(shape2[i]), sm.Basic): + most_general_shape.append(shape2[i]) + elif shape1[i] == shape2[i]: + most_general_shape.append(shape1[i]) + elif shape1[i] != shape2[i]: + return False, None + + return True, tensor(tensor_type1.get_primitive(), most_general_shape) + +def is_compatible_type(type1, type2): + """ + Return if type1 and type2 are compatible. + """ + if not is_subtype(type1, type2): + is_comp, _ = is_tensor_and_is_compatible(type1, type2) + return is_comp + return True diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tuple.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tuple.py new file mode 100644 index 00000000..a2a8fe7d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tuple.py @@ -0,0 +1,53 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import type_int, type_unknown +from .annotate import annotate +from .get_type_info import get_type_info +from .type_spec import Type + +_global_tuple = tuple + + +def memoize(f): + memo = {} + + def helper(x): + x = _global_tuple(x) + if x not in memo: + memo[x] = f(x) + return memo[x] + + return helper + + +class empty_list: + @classmethod + def __type_info__(cls): + return Type("empty_list", python_class=cls) + + +@memoize +def tuple(args): + args = _global_tuple(i if i is not None else type_unknown.unknown for i in args) + + class tuple: + T = args + + def __init__(self): + self.val = [arg() for arg in args] + + @classmethod + def __type_info__(cls): + return Type("tuple", [get_type_info(arg) for arg in args], python_class=cls) + + @annotate(type_int.int64) + def __len__(self): + return len(args) + + tuple.__template_name__ = ( + "tuple[" + ",".join([get_type_info(arg).name for arg in args]) + "]" + ) + return tuple diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_unknown.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_unknown.py new file mode 100644 index 00000000..af940229 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_unknown.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .type_spec import Type + + +class unknown: + """ + unknown is basically Any type. + """ + + @classmethod + def __type_info__(cls): + return Type("unknown", python_class=cls) + + def __init__(self, val=None): + self.val = val diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_void.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_void.py new file mode 100644 index 00000000..7abb9008 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_void.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .type_spec import Type + + +class void: + @classmethod + def __type_info__(cls): + return Type("void", python_class=cls) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/var.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/var.py new file mode 100644 index 00000000..8af32bad --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/var.py @@ -0,0 +1,397 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from typing import Optional + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types import builtin_to_string +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +class Var: + """ + Var represents the outputs of an Operation. Most Vars are derived from an + Operation (including const), and all Vars must have `sym_type`. + + Example Usage: + + from coremltools.converters.mil.mil import ( + Builder as mb, + Function, + types + ) + + func_inputs = {"a": mb.placeholder(shape=(1,2)), + "b": mb.placeholder(shape=(1,2)) } + with Function(func_inputs) as ssa_func: + a, b = ssa_func.inputs["a"], ssa_func.inputs["b"] + res = mb.add(x=a, y=b) # res is Var + assert types.is_tensor(res.sym_type) + assert res.rank == 2 + assert res.dtype == types.float # since a, b are by default float + + # value is not available at compile time in this case. If + # materializable, res.val would be a numpy / primitive value + assert res.val is None + + + Comment: Except InternalVar and Vars created in while_loop and by + placeholder, all Var should only be constructed by Operation to represent + outputs. + + Comment: Var hides the details of sym_type vs sym_val vs materialized + value, which was represented by 2 objects prior to refactoring. + + + # Properties: + + name: (str) + name in MIL proto NamedValueType. Name is assigned by the parent + Operation. + + sym_type [_sym_type]: (builtin type class) + All Var must have a (possibly symbolic) type, usually derived from + type inference of upstream ops or from default values in _Input. + + sym_val [_sym_val]: (builtin type instance) + Possibly symbolic value. + + val [_sym_val]: (np.ndarray or python primitive scalar) + Numpy (scalar / tensor) value. `val` is not None iff `sym_val` is + not None and does not contain symbols. Read-only. + + op [_op]: (Operation) + The Operation this Var is derived from. May not be None except + for InternalVar. Read-only. + + op_output_idx: (int) + Idx of the output from Operation corresponding to _Input. May be + None. + + child_ops [_child_ops]: list[Operation] + Ops that take this Var as an input. + + nonreplaceable_vars_upstream: set[Var] + Set that consists of nonreplaceable vars upstream + """ + + __slots__ = [ + "name", + "_sym_type", + "_sym_val", + "_op", + "op_output_idx", + "_child_ops", + "consuming_blocks", + "_nonreplaceable_vars_upstream", + ] + + def __init__( + self, + name, + sym_type, + sym_val=None, + op=None, + op_output_idx=None, + ): + """ + sym_type (builtin type) + sym_val (builtin value) + op (Operation) + op_output_idx (int) + """ + self.name = name + self._sym_type = sym_type + self._sym_val = sym_val + self._op = op + self.op_output_idx = op_output_idx + # An op can appear twice if it consumes a var twice (e.g., + # add(%1, %1), while_loop(loop_vars=(%1, %1)). + self._child_ops = list() + + # A variable may not be consumed by any op (i.e. len(self._child_ops) + # == 0) but is still used as block output. A var can be output of + # multiple blocks (e.g., both current block and nested blocks) + self.consuming_blocks = list() + + # replaceability + self._nonreplaceable_vars_upstream = set() + self._set_nonreplaceable_vars_upstream() + + @property + def nonreplaceable_vars_upstream(self): + return self._nonreplaceable_vars_upstream + + @nonreplaceable_vars_upstream.setter + def nonreplaceable_vars_upstream(self, val): + assert isinstance(val, set) + self._nonreplaceable_vars_upstream = val + + @staticmethod + def _is_nonreplaceable_var(var): + op = var.op + if op is None: + return False + return op.op_type.startswith("constexpr_") + + def _set_nonreplaceable_vars_upstream(self): + """ + A utility function to set the value of the "nonreplaceable_vars_upstream" property. + If self is a non-replaceable var, then "nonreplaceable_vars_upstream" is a single element set, containing self. + Otherwise, it is a union of the "nonreplaceable_vars_upstream" sets of all the input vars of its parent ops. + """ + op = self.op + if op is None: + return + if Var._is_nonreplaceable_var(self): + self.nonreplaceable_vars_upstream = set([self]) + else: + flattened_inputs = op.get_flattened_inputs() + inputs_nonreplaceable_vars_upstream = [p.nonreplaceable_vars_upstream for p in flattened_inputs] + if len(inputs_nonreplaceable_vars_upstream) > 0: + self.nonreplaceable_vars_upstream = set.union(*inputs_nonreplaceable_vars_upstream) + + def _reset_nonreplaceable_vars_upstream(self): + self.nonreplaceable_vars_upstream = set() + + def can_be_replaced_by_var(self, new_var): + """ + A var can be replaced by a new var only if the new var's nonreplaceable_vars_upstream is the super set of the old one + """ + return self.nonreplaceable_vars_upstream.issubset(new_var.nonreplaceable_vars_upstream) + + def can_be_folded_to_const(self) -> bool: + """ + When translating frontend ops to PyMIL ops, some vars could be directly folded into a const. + For example, in PyTorch's `to()` op, the input could be converted by `cast` op, or directly + be folded to const. + + We only fold the var to a const when its value is known AND it doesn't have any + non-replaceable vars in the upstream. + """ + return self.val is not None and not self.nonreplaceable_vars_upstream + + @property + def sym_type(self): + return self._sym_type + + @property + def shape(self): + if types.is_tensor(self._sym_type): + return self._sym_type.get_shape() + return tuple() + + @property + def rank(self): + return len(self.shape) + + @property + def dtype(self): + if types.is_tensor(self._sym_type): + return self._sym_type.get_primitive() + return self._sym_type + + @property + def sym_val(self): + if self._sym_val is None: + return None + return self._sym_val.val + + @property + def val(self): + if self._sym_val is None or any_symbolic(self._sym_val.val): + return None + return self._sym_val.val + + @property + def op(self): + return self._op + + @property + def child_ops(self): + return self._child_ops + + def add_child_op(self, new_op): + self._child_ops.append(new_op) + + def remove_child_op(self, target_op, no_check=False): + if target_op not in self._child_ops: + if no_check: + return # no-op + msg = "Op {} does not takes Var {} as input" + raise ValueError(msg.format(target_op.name, self.name)) + self._child_ops.remove(target_op) + + def shape_str(self): + annotation = "" + if self.val is not None: + annotation = "*" + elif self.sym_val is not None: + annotation = "^" + shape_str = str(self.shape)[:-1] # trim the ")" + if self.rank > 1: + shape_str += ", " + if types.builtin_to_string(self.dtype) is None: + shape_str += ")" + annotation + else: + shape_str += types.builtin_to_string(self.dtype) + ")" + annotation + return shape_str + + def type_str(self): + is_tensor = types.is_tensor(self.sym_type) + is_list = types.is_list(self.sym_type) + if is_tensor: + type_string = "(Tensor)" + elif is_list: + type_string = "(List)" + else: + type_string = "(Scalar)" + return type_string + + def set_name(self, name): + self.name = name + + def is_tensor_or_scalar_of(self, dtype: str): + return (types.is_tensor(self.sym_type) or types.is_scalar(self.sym_type)) and builtin_to_string(self.dtype) == dtype + + def __str__(self): + return "%" + self.name + ": " + self.shape_str() + self.type_str() + + +class ListVar(Var): + __slots__ = ["_elem_type", "init_length", "dynamic_length"] + + def __init__( + self, name, elem_type=None, init_length=None, dynamic_length=True, sym_val=None, **kwargs + ): + """ + elem_type (builtin.tensor) + + init_length (int): initial length + + dynamic_length (bool): True to allow list to grow. False uses + init_length as the fixed size (init_length is runtime length). + + sym_val: value of the list, if available + """ + super().__init__( + name=name, + sym_type=types.list(elem_type, init_length, dynamic_length), + sym_val=sym_val, + **kwargs + ) + self._elem_type = elem_type + self.init_length = init_length + self.dynamic_length = dynamic_length + + @property + def shape(self): + raise ValueError("shape not applicable to ListVar '{}'.".format(self.name)) + + @property + def rank(self): + raise ValueError("rank not applicable to ListVar '{}'".format(self.name)) + + @property + def dtype(self): + raise ValueError("dtype not applicable to ListVar '{}'".format(self.name)) + + @property + def elem_type(self): + return self._elem_type + + @property + def elem_shape(self): + if self._elem_type == types.unknown: + return None + elif types.is_tensor(self._elem_type): + return self._elem_type.get_shape() + return () + + def shape_str(self): + length = "?" + if not self.dynamic_length: + length = str(self.init_length) + if self._elem_type == types.unknown: + return "List[{}, unknown]".format(length) + if self._elem_type == types.str: + return "List[{}, str]".format(length) + elif self._elem_type == types.int64: + return "List[{}, int]".format(length) + else: + elem_shape = self._elem_type.get_shape() + elem_dtype = self._elem_type.get_primitive() + shape_str = str(elem_shape)[:-1] # trim the ")" + if len(elem_shape) > 1: + shape_str += ", " + shape_str += types.builtin_to_string(elem_dtype) + ")" + return "List[{}, {}]".format(length, shape_str) + + +class InternalVar(Var): + """ + Internal Var (with '__' prefix and won't appear in SSA) will ALWAYS have + `sym_val == builtin.unknown`. InternalVar are constructed by builder only. + + Comment: Internal Var can be used to represent diverse types such as enum + type `DataType.FLOAT32`. + """ + + def __init__(self, val, name=None): + super().__init__( + name=name, sym_type=types.unknown, sym_val=types.unknown(val) + ) + + +class ComplexVar(Var): + """Var to handle complex data.""" + + __slots__ = ["_real", "_imag"] + + def __init__( + self, + name, + sym_type, + sym_val=None, + op=None, + op_output_idx=None, + real: Optional[Var] = None, + imag: Optional[Var] = None, + ): + super().__init__( + name=name, + sym_type=sym_type, + sym_val=sym_val, + op=op, + op_output_idx=op_output_idx, + ) + + # Handle complex data types. + self._real: Optional[Var] = real + self._imag: Optional[Var] = imag + + @property + def real(self): + return self._real + + @property + def imag(self): + return self._imag + + @real.setter + def real(self, real): + if not types.is_complex(self.dtype): + raise ValueError( + f"Only complex number can set `real`. This var is {self.dtype}." + ) + self._real = real + + @imag.setter + def imag(self, imag): + if not types.is_complex(self.dtype): + raise ValueError( + f"Only complex number can set `imag`. This var is {self.dtype}." + ) + self._imag = imag diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/dot_visitor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/dot_visitor.py new file mode 100644 index 00000000..4471f61b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/dot_visitor.py @@ -0,0 +1,206 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..var import Var + + +def _get_input_vars(op, only_nonconst_vars=False): + """ + Return type : List[Var] + """ + input_vars = [] + for name, val in op.inputs.items(): + if isinstance(val, Var): + if only_nonconst_vars: + if val.op and val.op.op_type == "const": + continue + input_vars.append(val) + elif isinstance(val, (list, tuple)): + for var in val: + if not isinstance(var, Var): + msg = "unrecognized input type of op='{}', input='{}'" + raise ValueError(msg.format(op.name, name)) + if only_nonconst_vars: + if var.op and var.op.op_type == "const": + continue + input_vars.append(var) + else: + msg = "unrecognized input type of op='{}', input='{}'" + raise ValueError(msg.format(op.name, name)) + return input_vars + + +class DotVisitor: + """ + Generates a dot description of a ssa block + """ + + def __init__(self, annotation=True): + self.result = [] + self.visited_memo = {} + self.highlights = {} + self.alternate_labeller = lambda o: o.op_type + ": " + o.name + self.annotation = annotation + + def labeller(self, labeller): + self.alternate_labeller = labeller + return self + + def highlight_nodes(self, nodeset, color="yellow"): + for i in nodeset: + self.highlights[i] = color + return self + + def visit(self, block, op, nodename_prefix=""): + """ + Append edges connecting parents of op to the op + """ + + if op in self.visited_memo: + return self + + label = self.alternate_labeller(op) + self.visited_memo[op] = 1 + + if op.name in self.highlights and op.name not in [ + o.name for o in block.outputs + ]: + self.result.append( + '"' + + nodename_prefix + + "op: " + + op.name + + '"' + + '[label="' + + label + + '",fillcolor=%s,style=filled,fontcolor=%s]' + % (self.highlights[op.name], "violetred") + ) + else: + self.result.append( + '"' + + nodename_prefix + + "op: " + + op.name + + '"' + + '[label="' + + label + + '",fontcolor=%s]' % ("violetred") + ) + + for input_var in _get_input_vars(op, only_nonconst_vars=True): + if input_var.op is not None: + input_name = "op: " + input_var.op.name + else: + input_name = input_var.name + + edge = ( + '"' + + nodename_prefix + + input_name + + '"' + + " -> " + + '"' + + nodename_prefix + + "op: " + + op.name + + '"' + ) + self.result.append(edge) + if input_var.op is not None: + self.visit(block, input_var.op, nodename_prefix) + else: + self.visit_input_var(input_var, nodename_prefix) + + return self + + def visit_input_var(self, var, nodename_prefix=""): + label = "input: " + var.name + + if var.name in self.highlights: + self.result.append( + '"' + + nodename_prefix + + var.name + + '"' + + '[label="' + + label + + '",fillcolor=%s,style=filled,fontcolor=%s]' + % (self.highlights[var.name], "violetred") + ) + else: + self.result.append( + '"' + + nodename_prefix + + var.name + + '"' + + '[label="' + + label + + '",fontcolor=%s]' % ("violetred") + ) + + def visit_output_vars(self, block, var, nodename_prefix=""): + + label = "output: " + var.name + if var.name in self.highlights: + self.result.append( + '"' + + nodename_prefix + + var.name + + '"' + + '[label="' + + label + + '",fillcolor=%s,style=filled,fontcolor=%s]' + % (self.highlights[var.name], "violetred") + ) + else: + self.result.append( + '"' + + nodename_prefix + + var.name + + '"' + + '[label="' + + label + + '",fontcolor=%s]' % ("violetred") + ) + + parent_op = var.op + edge = ( + '"' + + nodename_prefix + + "op: " + + parent_op.name + + '"' + + " -> " + + '"' + + nodename_prefix + + var.name + + '"' + ) + self.result.append(edge) + self.visit(block, parent_op, nodename_prefix=nodename_prefix) + + def visit_all(self, block, nodename_prefix=""): + for out_var in block.outputs: + self.visit_output_vars(block, out_var, nodename_prefix=nodename_prefix) + for op in block.operations: + if op.op_type != "const": + self.visit(block, op, nodename_prefix=nodename_prefix) + return self + + def get_result(self, graphtype="digraph", graph_name="g"): + return ( + graphtype + + " " + + graph_name + + " {\n\t" + + "\n\t".join(str(i) for i in self.result) + + ';\n\tlabel="' + + graph_name[8:] + + '";\n\tfontsize=96;\n}' + ) + + def __str__(self): + return self.get_result() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/test_flexible_shape_inputs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/test_flexible_shape_inputs.py new file mode 100644 index 00000000..9920b758 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/test_flexible_shape_inputs.py @@ -0,0 +1,146 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np +import PIL.Image +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND + +if _HAS_TORCH: + import torch + torch.manual_seed(10) + + class TestConvModule(torch.nn.Module): + def __init__(self, in_channels=3, out_channels=10, kernel_size=3): + super(TestConvModule, self).__init__() + self.conv = torch.nn.Conv2d(in_channels, out_channels, + kernel_size) + + def forward(self, x): + return self.conv(x) + + +def _numpy_array_to_pil_image(x): + """ + convert x of shape (1, 3, H, W) to PIL image + """ + assert len(x.shape) == 4 + assert list(x.shape[:2]) == [1, 3] + x = x[0, :, :, :] # (3, H, W) + x = _np.transpose(x, [1, 2, 0]) # (H, W, 3) + x = x.astype(_np.uint8) + return PIL.Image.fromarray(x) + + +def _compute_snr(arr1, arr2): + arr1 = arr1.flatten() + arr2 = arr2.flatten() + noise = arr1 - arr2 + noise_var = _np.sum(noise ** 2) / len(noise) + 1e-7 + signal_energy = _np.sum(arr2 ** 2) / len(arr2) + max_signal_energy = _np.amax(arr2 ** 2) + snr = 10 * _np.log10(signal_energy / noise_var) + psnr = 10 * _np.log10(max_signal_energy / noise_var) + return snr, psnr + +def _assert_torch_coreml_output_shapes(coreml_model, spec, torch_model, torch_example_input, is_image_input=False): + torch_out = torch_model(torch_example_input) + input_name = spec.description.input[0].name + output_name = spec.description.output[0].name + input_dict = {} + if is_image_input: + input_dict[input_name] = _numpy_array_to_pil_image(torch_example_input.numpy()) + else: + input_dict[input_name] = torch_example_input.numpy() + coreml_out = coreml_model.predict(input_dict)[output_name] + assert torch_out.shape == coreml_out.shape + snr, psnr = _compute_snr(torch_out.cpu().detach().numpy(), coreml_out) + _np.testing.assert_array_less(20, snr) + _np.testing.assert_array_less(30, psnr) + + +@pytest.mark.skipif(not _HAS_TORCH or not ct.utils._is_macos(), reason=MSG_TORCH_NOT_FOUND) +class TestFlexibleInputShapes: + + @pytest.mark.parametrize("convert_to", ['neuralnetwork', 'mlprogram']) + def test_multiarray_input_rangedim(self, convert_to): + if convert_to == "mlprogram" and ct.utils._macos_version() < (12, 0): + return + + example_input = torch.rand(1, 3, 50, 50) * 100 + traced_model = torch.jit.trace(TestConvModule().eval(), example_input) + + input_shape = ct.Shape(shape=(1, 3, ct.RangeDim(25, 100, default=45), ct.RangeDim(25, 100, default=45))) + model = ct.convert(traced_model, + inputs=[ct.TensorType(shape=input_shape)], + convert_to=convert_to) + + spec = model.get_spec() + assert list(spec.description.input[0].type.multiArrayType.shape) == [1, 3, 45, 45] + assert spec.description.input[0].type.multiArrayType.shapeRange.sizeRanges[2].lowerBound == 25 + assert spec.description.input[0].type.multiArrayType.shapeRange.sizeRanges[2].upperBound == 100 + _assert_torch_coreml_output_shapes(model, spec, traced_model, example_input) + + @pytest.mark.parametrize("convert_to", ['neuralnetwork', 'mlprogram']) + def test_multiarray_input_enumerated(self, convert_to): + if convert_to == "mlprogram" and ct.utils._macos_version() < (12, 0): + return + + example_input = torch.rand(1, 3, 50, 50) * 100 + traced_model = torch.jit.trace(TestConvModule().eval(), example_input) + + input_shape = ct.EnumeratedShapes(shapes=[[1, 3, 25, 25], [1, 3, 50, 50], [1, 3, 67, 67]], + default=[1, 3, 67, 67]) + model = ct.convert(traced_model, + inputs=[ct.TensorType(shape=input_shape)], + convert_to=convert_to) + + spec = model.get_spec() + assert list(spec.description.input[0].type.multiArrayType.shape) == [1, 3, 67, 67] + assert list(spec.description.input[0].type.multiArrayType.enumeratedShapes.shapes[0].shape) == [1, 3, 67, 67] + assert len(spec.description.input[0].type.multiArrayType.enumeratedShapes.shapes) == 3 + _assert_torch_coreml_output_shapes(model, spec, traced_model, example_input) + + @pytest.mark.skipif(ct.utils._macos_version() < (12, 0), reason="Image input with RangeDim works correctly on macOS12+") + @pytest.mark.parametrize("convert_to", ['neuralnetwork', 'mlprogram']) + def test_image_input_rangedim(self, convert_to): + example_input = torch.rand(1, 3, 50, 50) * 255 + traced_model = torch.jit.trace(TestConvModule().eval(), example_input) + + input_shape = ct.Shape(shape=(1, 3, ct.RangeDim(25, 100, default=45), ct.RangeDim(25, 100, default=45))) + model = ct.convert(traced_model, + inputs=[ct.ImageType(shape=input_shape)], + convert_to=convert_to) + + spec = model.get_spec() + assert spec.description.input[0].type.imageType.width == 45 + assert spec.description.input[0].type.imageType.height == 45 + assert spec.description.input[0].type.imageType.imageSizeRange.widthRange.lowerBound == 25 + assert spec.description.input[0].type.imageType.imageSizeRange.widthRange.upperBound == 100 + _assert_torch_coreml_output_shapes(model, spec, traced_model, example_input, is_image_input=True) + + @pytest.mark.parametrize("convert_to", ['neuralnetwork', 'mlprogram']) + def test_image_input_enumerated(self, convert_to): + if convert_to == "mlprogram" and ct.utils._macos_version() < (12, 0): + return + + example_input = torch.rand(1, 3, 50, 50) * 255 + traced_model = torch.jit.trace(TestConvModule().eval(), example_input) + + input_shape = ct.EnumeratedShapes(shapes=[[1, 3, 25, 25], [1, 3, 50, 50], [1, 3, 67, 67]], + default=[1, 3, 67, 67]) + model = ct.convert(traced_model, + inputs=[ct.ImageType(shape=input_shape)], + convert_to=convert_to) + + spec = model.get_spec() + assert spec.description.input[0].type.imageType.width == 67 + assert spec.description.input[0].type.imageType.height == 67 + assert len(spec.description.input[0].type.imageType.enumeratedSizes.sizes) == 3 + assert spec.description.input[0].type.imageType.enumeratedSizes.sizes[0].width == 25 + assert spec.description.input[0].type.imageType.enumeratedSizes.sizes[0].height == 25 + _assert_torch_coreml_output_shapes(model, spec, traced_model, example_input, is_image_input=True) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_reqs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_reqs.py new file mode 100644 index 00000000..00c07487 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_reqs.py @@ -0,0 +1,54 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import os + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import (_HAS_TF_1, _HAS_TF_2, _HAS_TORCH) + + +# Setting up backend / precision +backends = [] +if 'PYMIL_TEST_TARGETS' in os.environ: + targets = os.environ['PYMIL_TEST_TARGETS'].split(',') + for i in range(len(targets)): + targets[i] = targets[i].strip() + + if 'mlprogram' in targets: + backends.append(('mlprogram', 'fp16')) + if os.getenv('INCLUDE_MIL_FP32_UNIT_TESTS') == '1': + backends.append(('mlprogram', 'fp32')) + if 'neuralnetwork' in targets: + backends.append(('neuralnetwork', 'fp32')) + + if not backends: + raise ValueError("PYMIL_TEST_TARGETS can be set to one or more of: neuralnetwork, mlprogram") +else: + backends = [('mlprogram', "fp16"), ('neuralnetwork', "fp32")] + if os.getenv('INCLUDE_MIL_FP32_UNIT_TESTS') == '1': + backends.append(('mlprogram', 'fp32')) + +# Setting up compute unit +compute_units = [] +if 'COMPUTE_UNITS' in os.environ: + for i, cur_str_val in enumerate(os.environ['COMPUTE_UNITS'].split(',')): + cur_str_val = cur_str_val.strip().upper() + if cur_str_val not in ct.ComputeUnit.__members__: + raise ValueError("Compute unit \"{}\" not supported in coremltools.".format(cur_str_val)) + compute_units.append(ct.ComputeUnit[cur_str_val]) +else: + compute_units = [ct.ComputeUnit.CPU_ONLY] + +np.random.seed(1984) + +if _HAS_TF_1: + tf = pytest.importorskip("tensorflow") + tf.compat.v1.set_random_seed(1234) + +if _HAS_TF_2: + tf = pytest.importorskip("tensorflow") + tf.random.set_seed(1234) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_utils.py new file mode 100644 index 00000000..781d2fb9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_utils.py @@ -0,0 +1,545 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import os +import re +from functools import partial +from pathlib import Path + +import numpy as np +from PIL import Image + +import coremltools as ct +import coremltools.models.utils as coremltoolsutils +from coremltools._deps import _IS_MACOS +from coremltools.converters.mil.mil import Function, Program +from coremltools.converters.mil.mil.passes.defs.quantization import AbstractQuantizationPass +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.proto import FeatureTypes_pb2 as ft + +np.random.seed(10) + +DTYPE_TO_FEATURE_TYPE_MAP = {"int32": ft.ArrayFeatureType.INT32, + "fp32": ft.ArrayFeatureType.FLOAT32, + "fp16": ft.ArrayFeatureType.FLOAT16, + } + +einsum_equations = [ + # hardcoded cases + "abcd,adce->abce", + "abc,cbd->abd", + "bnqd,bnkd->bnqk", + "abc,cd->abd", + "abc,cde->abde", + "btnh,bfnh->bnft", + "bnft,btnh->bfnh", + "abcd,cde->abe", + "a b c d , a d c e -> a b c e", + # with-diagonal generic cases + "jiii,ijjk->jk", + "iji,ji->j", + "jii,ijk->jk", + "ijij,iij->ij", + # no-diagonal generic cases + "i,j->ij", # outer product + "a,a->a", # batched outer product + "ija,la->ijal", # batched outer product + "i,i->", # inner product + "ia,ia->a", # batched inner product + "ai,ia->a", # batched inner product + "abi,abi->ab", # batched inner product + "iab,iab->ab", # batched inner product + "abi,bai->ba", # batched inner product + "ij,j->i", # matrix-vector multiplication + "i,ij->j", # vector-matrix multiplication + "ai,ija->aj", # batched vector-matrix multiplication + "aibj,bi->jba", # batched matrix-vector multiplication + "ij,jk->ik", # matrix multiplication + "aij,ajk->iak", # batched matrix multiplication + "abij,abjk->abik", # batched matrix multiplication + "aijb,bajk->abik", # batched matrix multiplication + "ij,ij->", # double-inner product + "ij,ji->", # double-inner product + "aij,aij->a", # batched double-inner product + "ija,ija->a", # batched double-inner product + "ija,jia->a", # batched double-inner product + "aijb,ajbi->ab", # batched double-inner product + "aibj,cdij->cadb", # batched double-inner product + "ijk,lmj->iklm", # 3rd-order tensor contraction + "ijak,akl->aijl", # batched 3rd-order tensor and matrix contraction + # Generic with sum + "ij,j->ij", + "ij,kjl->j", + "iijj,j->j", +] + +def _serialize_current_pytest(mlmodel): + class_name = os.environ.get('PYTEST_CURRENT_TEST').split("::")[1].strip() + test_name = "::".join(os.environ.get('PYTEST_CURRENT_TEST').split("::")[2:]).split("(call)")[0].strip() + mlpackage_path = "/tmp/pytest_failures/{}/{}/model.mlpackage".format(class_name, test_name) + Path(mlpackage_path).mkdir(parents=True, exist_ok=True) + mlmodel.save(mlpackage_path) + +def assert_op_count_match(program, expect, op=None, verbose=False): + """ + Assert number of ops match expected number. If op is not specified, + Count total number of ops and match with expect. + """ + if verbose: + print(program) + + count = 0 + for _, func in program.functions.items(): + for o in func.operations: + if not op: + count += 1 + elif o.op_type.lower() == op.lower(): + count += 1 + np.testing.assert_equal(count, expect) + + +def assert_model_is_valid( + program, inputs, backend=("neuralnetwork", "fp32"), verbose=True, expected_output_shapes=None +): + """ + Assert Core ML model is valid. + + Inputs: + + - input: str -> shape tuple. All program input names need to appear in str. + shape tuple can only contain positive integers. + """ + # Avoid circular import + from coremltools.converters.mil.testing_reqs import ct + + input_dict = dict() + for name, shape in inputs.items(): + input_dict[name] = np.random.rand(*shape) + + mlmodel = ct_convert(program, source="milinternal", convert_to=backend, + compute_units=ct.ComputeUnit.CPU_ONLY) + assert mlmodel is not None + + if verbose: + from coremltools.models.neural_network.printer import print_network_spec + print_network_spec(mlmodel.get_spec(), style="coding") + + if _IS_MACOS and (not mlmodel.is_package or coremltoolsutils._macos_version() >= (12, 0)): + prediction = mlmodel.predict(input_dict) + assert prediction is not None + if expected_output_shapes is not None: + for out_name, out_shape in expected_output_shapes.items(): + assert out_name in prediction + assert out_shape == prediction[out_name].shape, \ + "{} != {}".format(out_shape, prediction[out_name].shape) + + +def assert_same_output_names(prog1, prog2, func_name="main"): + prog1_outputs = [o.name for o in prog1[func_name].outputs] + prog2_outputs = [o.name for o in prog2[func_name].outputs] + assert prog1_outputs == prog2_outputs + + +def assert_same_output_shapes(prog1, prog2, func_name="main"): + prog1_output_shapes = [o.shape for o in prog1[func_name].outputs] + prog2_output_shapes = [o.shape for o in prog2[func_name].outputs] + assert prog1_output_shapes == prog2_output_shapes + +def get_op_names_in_program(prog, func_name="main", skip_const_ops=True): + """ + Return the operations names in prog[func_name], + in the same order as they are stored (topological) + """ + op_names_in_program = [] + for op in prog[func_name].operations: + if skip_const_ops: + if op.op_type == "const": + continue + op_names_in_program.append(op.name) + return op_names_in_program + +def get_op_types_in_program(prog, func_name="main", skip_const_ops=True): + """ + Return the operation types in prog[func_name], + in the same order as they are stored (topological) + """ + op_types_in_program = [] + for op in prog[func_name].operations: + if skip_const_ops: + if op.op_type == "const": + continue + op_types_in_program.append(op.op_type) + return op_types_in_program + + +def random_gen( + shape, + rand_min=0.0, + rand_max=1.0, + eps_from_int=0.0, + allow_duplicate=True, + dtype=np.float32, +): + """ + This helper function generates a random array of shape `shape` + The range of generated numbers will be between (rand_min, rand_max]. + The value of generated numbers will be at least `eps_from_int` apart from integers. + If allow_duplicate is set to false, it is guaranteed that value generated are all different. + Default data type is np.float32. + """ + elem = np.prod(shape).astype(np.int32) + ret = [] + for _ in range(elem): + while True: + r = dtype((rand_max - rand_min) * np.random.random() + rand_min) + if not allow_duplicate and r in ret: + continue + if np.issubdtype(dtype, np.integer) or np.fabs(np.round(r) - r) > eps_from_int: + ret.append(r) + break + ret = np.array(ret).reshape(shape) + return ret.astype(dtype) + + +def ssa_fn(func): + """ + Deprecated: use @mb.program() + """ + + def wrapper(*args, **kwargs): + prog = Program() + with Function({}) as ssa_func: + func(*args, **kwargs) + + return wrapper + + +def to_tuple(v): + if not isinstance(v, (list, tuple)): + return tuple([v]) + return tuple(v) + + +def run_core_ml_predict(mlmodel, input_key_values): + for k, v in input_key_values.items(): + if isinstance(v, Image.Image): + continue + elif not np.isscalar(v) and not v.shape == (): + input_key_values[k] = v.astype(np.float32) + else: + input_key_values[k] = np.array([v], dtype=np.float32) + return mlmodel.predict(input_key_values) + +def _get_coreml_out_from_dict(out_dict, out_name): + if out_name in out_dict: + return out_dict[out_name] + elif re.sub("[^a-zA-Z0-9_]", "_", out_name) in out_dict: + return out_dict[re.sub("[^a-zA-Z0-9_]", "_", out_name)] + else: + raise KeyError("{} output not found in Core ML outputs".format(out_name)) + +def compare_backend( + mlmodel, + input_key_values, + expected_outputs, + dtype = "fp32", + atol=1e-04, + rtol=1e-05, + also_compare_shapes=True, +): + """ + Inputs: + - mlmodel: MLModel. + + - input_key_values: str -> np.array. Keys must match those in + input_placeholders. + + - expected_outputs: dict[str, np.array]. Required iff + frontend_only is False + """ + if _IS_MACOS and (not mlmodel.is_package or coremltoolsutils._macos_version() >= (12, 0)): + + if dtype not in ["fp32", "fp16"]: + raise ValueError("Unsupported dtype config") + + pred = run_core_ml_predict(mlmodel, input_key_values) + if also_compare_shapes: + compare_shapes( + mlmodel, + input_key_values, + expected_outputs, + pred=pred, + ) + if mlmodel.compute_unit != ct.ComputeUnit.CPU_ONLY or (dtype == "fp16"): + atol = max(atol * 100.0, 5e-1) + rtol = max(rtol * 100.0, 5e-2) + for o, expected in expected_outputs.items(): + coreml_out = _get_coreml_out_from_dict(pred, o) + + if isinstance(coreml_out, np.ndarray): + np.testing.assert_allclose(coreml_out, expected, atol=atol, rtol=rtol) + elif isinstance(coreml_out, dict): + for k, v in coreml_out.items(): + assert k in expected + assert expected[k] == v + else: + assert coreml_out == expected + + return pred + return None + + +def compare_shapes( + mlmodel, input_key_values, expected_outputs, pred=None +): + """ + Inputs: + - mlmodel: MLModel. + + - input_key_values: str -> np.array or PIL.Image. Keys must match those in + input_placeholders. + + - expected_outputs: dict[str, np.array]. + + - pred: Prediction to use, if it has already been computed. + """ + + if _IS_MACOS: + if not pred: + pred = run_core_ml_predict(mlmodel, input_key_values) + for o, expected in expected_outputs.items(): + coreml_out = _get_coreml_out_from_dict(pred, o) + + # output is dictionary (for classifier) + if isinstance(coreml_out, dict) and isinstance(expected, dict): + assert len(coreml_out) == len(expected) + continue + + # output is numpy objects + np_types = (np.generic, np.ndarray) + if isinstance(coreml_out, np_types) and isinstance(expected, np_types): + msg = "Output: {}. expected shape {} != actual shape {}".format( + o, expected.shape, coreml_out.shape + ) + # Core ML does not support scalar as output + # remove this special case when support is added + if expected.shape == () and coreml_out.shape == (1,): + continue + assert coreml_out.shape == expected.shape, msg + continue + + # output is other types (for classifier) + assert type(coreml_out) == type(expected) + +def ct_convert( + program, + source="auto", + inputs=None, + outputs=None, + classifier_config=None, + minimum_deployment_target=None, + convert_to=None, + compute_precision=None, + skip_model_load=False, + converter=ct.convert, + **kwargs, +): + + """ + Overloaded ct.convert function with the only difference being in the argument `convert_to` + which in this overloaded call accepts a tuple of (target, dtype). + Ex: ("neuralnetwork", "fp32"), ("mlprogram", "fp16") + """ + + if isinstance(converter, partial): + raise ValueError("Partial function is not supported for function-parameter 'converter' since its keywords arguments could get overriden.") + + target, dtype = convert_to + + if dtype not in ["fp32", "fp16"]: + raise ValueError("Unsupported dtype config") + + compute_precision = ct.precision.FLOAT16 if dtype == "fp16" else ct.precision.FLOAT32 + if target == "neuralnetwork": + compute_precision = None + + mlmodel = converter( + program, + source=source, + inputs=inputs, + outputs=outputs, + classifier_config=classifier_config, + minimum_deployment_target=minimum_deployment_target, + convert_to=target, + compute_precision=compute_precision, + skip_model_load=skip_model_load, + **kwargs + ) + + if os.environ.get("DEBUG_SAVE_MLMODEL", "0") == "1": + from coremltools.converters.mil.testing_utils import _serialize_current_pytest + _serialize_current_pytest(mlmodel) + + return mlmodel + +def get_core_ml_prediction( + build, input_placeholders, input_values, compute_unit=ct.ComputeUnit.CPU_ONLY, + backend=("neuralnetwork", "fp32")): + """ + Return predictions of the given model. + """ + program = Program() + with Function(input_placeholders) as ssa_func: + output_vars = build(**ssa_func.inputs) + if isinstance(output_vars, tuple): + output_vars = list(output_vars) + elif not isinstance(output_vars, list): + output_vars = [output_vars] + ssa_func.set_outputs(output_vars) + program.add_function("main", ssa_func) + + mlmodel = ct_convert( + program, + source="milinternal", + convert_to=backend, + compute_units=compute_unit + ) + return mlmodel.predict(input_values) + + +def apply_pass_and_basic_check(prog, pass_name, skip_output_name_check=False): + """ + Apply pass to the program + """ + prev_prog = copy.deepcopy(prog) + graph_pass = pass_name if isinstance(pass_name, AbstractQuantizationPass) else PASS_REGISTRY[pass_name] + graph_pass(prog) + block = prog.functions["main"] + prev_block = prev_prog.functions["main"] + if not skip_output_name_check: + assert_same_output_names(prev_prog, prog) + assert_same_output_shapes(prev_prog, prog) + return prev_prog, prev_block, block + + +def assert_prog_input_type(prog, expected_dtype_str, expected_name=None, index=0): + block = prog.functions["main"] + if expected_name is None: + input_var = list(block.inputs.values())[index] + assert input_var.is_tensor_or_scalar_of(dtype=expected_dtype_str) + else: + for input_var in block.inputs.values(): + if input_var.name == expected_name: + assert input_var.is_tensor_or_scalar_of(dtype=expected_dtype_str) + +def assert_spec_input_type(spec, expected_feature_type, expected_name=None, index=0): + if expected_name is None: + assert spec.description.input[index].type.multiArrayType.dataType == expected_feature_type + else: + for input in spec.description.input: + if input.name == expected_name: + assert input.type.multiArrayType.dataType == expected_feature_type + +def assert_input_dtype(mlmodel, expected_type_str, expected_name=None, index=0): + assert_prog_input_type(mlmodel._mil_program, expected_type_str, + expected_name=expected_name, index=index) + assert_spec_input_type(mlmodel._spec, DTYPE_TO_FEATURE_TYPE_MAP[expected_type_str], + expected_name=expected_name, index=index) + +def assert_spec_output_type(spec, expected_feature_type, expected_name=None, index=0): + assert spec.description.output[index].type.multiArrayType.dataType == expected_feature_type + if expected_name is not None: + assert spec.description.output[index].name == expected_name + +def assert_prog_output_type(prog, expected_dtype_str, expected_name=None, index=0): + block = prog.functions["main"] + output_var = block.outputs[index] + assert output_var.is_tensor_or_scalar_of(dtype=expected_dtype_str) + if expected_name is not None: + assert output_var.name == expected_name + +def assert_output_dtype(mlmodel, expected_type_str, expected_name=None, index=0): + assert_prog_output_type(mlmodel._mil_program, expected_type_str, + expected_name=expected_name, index=index) + assert_spec_output_type(mlmodel._spec, DTYPE_TO_FEATURE_TYPE_MAP[expected_type_str], + expected_name=expected_name, index=index) + +def random_gen_input_feature_type(input_desc): + if input_desc.type.WhichOneof("Type") == "multiArrayType": + shape = [s for s in input_desc.type.multiArrayType.shape] + if input_desc.type.multiArrayType.dataType == ft.ArrayFeatureType.FLOAT32: + dtype = np.float32 + elif input_desc.type.multiArrayType.dataType == ft.ArrayFeatureType.INT32: + dtype = np.int32 + elif input_desc.type.multiArrayType.dataType == ft.ArrayFeatureType.FLOAT16: + dtype = np.float16 + elif input_desc.type.multiArrayType.dataType == ft.ArrayFeatureType.FLOAT64: + dtype = np.float64 + else: + raise ValueError("unsupported type") + return np.random.rand(*shape).astype(dtype) + elif input_desc.type.WhichOneof("Type") == "imageType": + if input_desc.type.imageType.colorSpace in (ft.ImageFeatureType.BGR, ft.ImageFeatureType.RGB): + shape = [3, input_desc.type.imageType.height, input_desc.type.imageType.width] + x = np.random.randint(low=0, high=256, size=shape) + return Image.fromarray(np.transpose(x, [1, 2, 0]).astype(np.uint8)) + elif input_desc.type.imageType.colorSpace == ft.ImageFeatureType.GRAYSCALE: + shape = [input_desc.type.imageType.height, input_desc.type.imageType.width] + x = np.random.randint(low=0, high=256, size=shape) + return Image.fromarray(x.astype(np.uint8), 'L') + elif input_desc.type.imageType.colorSpace == ft.ImageFeatureType.GRAYSCALE_FLOAT16: + shape = (input_desc.type.imageType.height, input_desc.type.imageType.width) + x = np.random.rand(*shape) + return Image.fromarray(x.astype(np.float32), 'F') + else: + raise ValueError("unrecognized image type") + else: + raise ValueError('unsupported type') + +def gen_input_shapes_einsum(equation, dynamic): + equation = equation.replace(" ", "") + left = equation.split("->")[0] + a_desc, b_desc = left.split(",") + converter_shapes = {} + shapes = {} + cur_default_shape = 2 + for symbol in a_desc + b_desc: + if symbol not in shapes: + shapes[symbol] = cur_default_shape + if dynamic: + converter_shapes[symbol] = ct.RangeDim(default=cur_default_shape) + else: + converter_shapes[symbol] = cur_default_shape + cur_default_shape += 1 + a_shape = [shapes[symbol] for symbol in a_desc] + b_shape = [shapes[symbol] for symbol in b_desc] + a_converter_shape = [converter_shapes[symbol] for symbol in a_desc] + b_converter_shape = [converter_shapes[symbol] for symbol in b_desc] + return ([a_shape, b_shape], + [ct.TensorType(shape=a_converter_shape, dtype=np.float32), + ct.TensorType(shape=b_converter_shape, dtype=np.float32)]) + +def verify_prediction(mlmodel, multiarray_type=None): + spec = mlmodel._spec + input_dict = {} + for input_desc in spec.description.input: + input_dict[input_desc.name] = random_gen_input_feature_type(input_desc) + if multiarray_type is not None: + input_dict[input_desc.name] = input_dict[input].astype(multiarray_type) + mlmodel.predict(input_dict) + +def assert_spec_input_image_type(spec, expected_feature_type): + assert spec.description.input[0].type.imageType.colorSpace == expected_feature_type + +def assert_spec_output_image_type(spec, expected_feature_type): + assert spec.description.output[0].type.imageType.colorSpace == expected_feature_type + +def assert_cast_ops_count(mlmodel, expected_count): + block = mlmodel._mil_program.functions["main"] + assert len(block.find_ops(op_type="cast")) == expected_count + +def assert_ops_in_mil_program(mlmodel, expected_op_list): + assert expected_op_list == get_op_types_in_program(mlmodel._mil_program) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVC.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVC.py new file mode 100644 index 00000000..433c9b78 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVC.py @@ -0,0 +1,58 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel + +if _HAS_SKLEARN: + from sklearn.svm import LinearSVC as _LinearSVC + + sklearn_class = _LinearSVC + from . import _sklearn_util + +from . import _logistic_regression + +model_type = "classifier" + + +def convert(model, feature_names, target): + """Convert a LinearSVC model to the protobuf spec. + Parameters + ---------- + model: LinearSVC + A trained LinearSVC model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _LinearSVC) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + + return _MLModel(_logistic_regression._convert(model, feature_names, target)) + + +def supports_output_scores(model): + return True + + +def get_output_classes(model): + return _logistic_regression.get_output_classes(model) + + +def get_input_dimension(model): + return _logistic_regression.get_input_dimension(model) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVR.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVR.py new file mode 100644 index 00000000..d64719a5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVR.py @@ -0,0 +1,53 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel + +if _HAS_SKLEARN: + import sklearn + from sklearn.svm import LinearSVR as _LinearSVR + + from . import _sklearn_util + + sklearn_class = sklearn.svm.LinearSVR + +from . import _linear_regression + +model_type = "regressor" + + +def convert(model, features, target): + """Convert a LinearSVR model to the protobuf spec. + Parameters + ---------- + model: LinearSVR + A trained LinearSVR model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Check the scikit learn model + _sklearn_util.check_expected_type(model, _LinearSVR) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + + return _MLModel(_linear_regression._convert(model, features, target)) + + +def get_input_dimension(model): + return _linear_regression.get_input_dimension(model) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVC.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVC.py new file mode 100644 index 00000000..53414882 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVC.py @@ -0,0 +1,68 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from . import _SVC as _SVC + +if _HAS_SKLEARN: + from sklearn.svm import NuSVC as _NuSVC + + from . import _sklearn_util + from ._sklearn_util import check_fitted + + sklearn_class = _NuSVC + +model_type = "classifier" + + +def convert(model, feature_names, target): + """Convert a Nu-Support Vector Classification (NuSVC) model to the protobuf spec. + Parameters + ---------- + model: NuSVC + A trained NuSVC encoder model. + + feature_names: [str], optional (default=None) + Name of the input columns. + + target: str, optional (default=None) + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _NuSVC) + return _SVC.convert(model, feature_names, target) + + +def supports_output_scores(model): + return _SVC.supports_output_scores(model) + + +def get_output_classes(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return _SVC.get_output_classes(model) + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return _SVC.get_input_dimension(model) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVR.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVR.py new file mode 100644 index 00000000..65e3d868 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVR.py @@ -0,0 +1,54 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from . import _SVR as _SVR + +if _HAS_SKLEARN: + from sklearn.svm import NuSVR as _NuSVR + + from . import _sklearn_util + from ._sklearn_util import check_fitted + + sklearn_class = _NuSVR + +model_type = "regressor" + + +def convert(model, feature_names, target): + """Convert a Nu Support Vector Regression (NuSVR) model to the protobuf spec. + Parameters + ---------- + model: NuSVR + A trained NuSVR encoder model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _NuSVR) + return _SVR.convert(model, feature_names, target) + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return _SVR.get_input_dimension(model) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVC.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVC.py new file mode 100644 index 00000000..fa8a3fad --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVC.py @@ -0,0 +1,132 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ... import SPECIFICATION_VERSION as _SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._interface_management import set_classifier_interface_params +from ...proto import Model_pb2 as _Model_pb2 + +if _HAS_SKLEARN: + from sklearn.svm import SVC as _SVC + + from ._sklearn_util import check_fitted + + sklearn_class = _SVC + +model_type = "classifier" + +from ._svm_common import _set_kernel + + +def _generate_base_svm_classifier_spec(model): + """ + Takes an SVM classifier produces a starting spec using the parts. that are + shared between all SVMs. + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + + spec = _Model_pb2.Model() + spec.specificationVersion = _SPECIFICATION_VERSION + svm = spec.supportVectorClassifier + + _set_kernel(model, svm) + + for cur_rho in model.intercept_: + if len(model.classes_) == 2: + # For some reason Scikit Learn doesn't negate for binary classification + svm.rho.append(cur_rho) + else: + svm.rho.append(-cur_rho) + + for i in range(len(model._dual_coef_)): + svm.coefficients.add() + for cur_alpha in model._dual_coef_[i]: + svm.coefficients[i].alpha.append(cur_alpha) + + for cur_src_vector in model.support_vectors_: + cur_dest_vector = svm.denseSupportVectors.vectors.add() + for i in cur_src_vector: + cur_dest_vector.values.append(i) + return spec + + +def convert(model, feature_names, target): + """Convert a Support Vector Classtion (SVC) model to the protobuf spec. + Parameters + ---------- + model: SVC + A trained SVC encoder model. + + feature_names: [str], optional (default=None) + Name of the input columns. + + target: str, optional (default=None) + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + spec = _generate_base_svm_classifier_spec(model) + spec = set_classifier_interface_params( + spec, + feature_names, + model.classes_, + "supportVectorClassifier", + output_features=target, + ) + + svm = spec.supportVectorClassifier + for i in model.n_support_: + svm.numberOfSupportVectorsPerClass.append(int(i)) + + if len(model.probA_) != 0 and len(model.classes_) == 2: + print( + "[WARNING] Scikit Learn uses a technique to normalize pairwise probabilities even for binary classification. " + "This can cause differences in predicted probabilities, usually less than 0.5%." + ) + + # If this is an empty list, then model.probA_ will be an empty list. + if len(model.probA_) != 0: + for i in model.probA_: + svm.probA.append(i) + + for i in model.probB_: + svm.probB.append(i) + + return _MLModel(spec) + + +def supports_output_scores(model): + return len(model.probA_) != 0 + + +def get_output_classes(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return list(model.classes_) + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return len(model.support_vectors_[0]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVR.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVR.py new file mode 100644 index 00000000..6e66a337 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVR.py @@ -0,0 +1,81 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._interface_management import set_regressor_interface_params +from ...proto import Model_pb2 as _Model_pb2 + +if _HAS_SKLEARN: + from sklearn.svm import SVR as _SVR + + from ._sklearn_util import check_fitted + + sklearn_class = _SVR + +model_type = "regressor" + +from ._svm_common import _set_kernel + + +def _generate_base_svm_regression_spec(model): + """ + Takes an SVM regression model produces a starting spec using the parts. + that are shared between all SVMs. + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + svm = spec.supportVectorRegressor + + _set_kernel(model, svm) + + svm.rho = -model.intercept_[0] + for i in range(len(model._dual_coef_)): + for cur_alpha in model._dual_coef_[i]: + svm.coefficients.alpha.append(cur_alpha) + + for cur_src_vector in model.support_vectors_: + cur_dest_vector = svm.denseSupportVectors.vectors.add() + for i in cur_src_vector: + cur_dest_vector.values.append(i) + return spec + + +def convert(model, features, target): + """Convert a Support Vector Regressor (SVR) model to the protobuf spec. + Parameters + ---------- + model: SVR + A trained SVR encoder model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + spec = _generate_base_svm_regression_spec(model) + spec = set_regressor_interface_params(spec, features, target) + return _MLModel(spec) + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return len(model.support_vectors_[0]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/__init__.py new file mode 100644 index 00000000..77268c40 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +# A single function to manage the importing. + +from ._converter import convert diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter.py new file mode 100644 index 00000000..6dd251b2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter.py @@ -0,0 +1,161 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import __version__ as ct_version +from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION + +""" +Defines the primary function for converting scikit-learn models. +""" + + +def convert(sk_obj, input_features=None, output_feature_names=None): + """ + Convert scikit-learn pipeline, classifier, or regressor to Core ML format. + + Parameters + ---------- + sk_obj: model | [model] of scikit-learn format. + Scikit learn model(s) to convert to a Core ML format. + + The input model may be a single scikit learn model, a scikit learn + pipeline model, or a list of scikit learn models. + + Currently supported scikit learn models are: + + - Linear and Logistic Regression + - LinearSVC and LinearSVR + - Ridge Regression + - SVC and SVR + - NuSVC and NuSVR + - Gradient Boosting Classifier and Regressor + - Decision Tree Classifier and Regressor + - Random Forest Classifier and Regressor + - Normalizer + - Imputer + - Standard Scaler + - DictVectorizer + - One Hot Encoder + - KNeighborsClassifier + + The input model, or the last model in a pipeline or list of models, + determines whether this is exposed as a Transformer, Regressor, + or Classifier. + + Note that there may not be a one-to-one correspondence between scikit + learn models and the Core ML models chosen to represent them. For + example, many scikit learn models are embedded in a pipeline to handle + processing of input features. + + + input_features: str | dict | list + + Optional name(s) that can be given to the inputs of the scikit-learn + model. Defaults to ``"input"``. + + Input features can be specified in a number of forms. + + - Single string: In this case, the input is assumed to be a single + array, with the number of dimensions set using ``num_dimensions``. + + - List of strings: In this case, the overall input dimensions to the + scikit-learn model are assumed to be the length of the list. If + neighboring names are identical, they are assumed to be an input + array of that length. For example: + + ``["a", "b", "c"]`` + + resolves to: + + ``[("a", Double), ("b", Double), ("c", Double)]``. + + In addition: + + ``["a", "a", "b"]`` + + resolves to: + + ``[("a", Array(2)), ("b", Double)]``. + + - Dictionary: Where the keys are the names and the indices or ranges of + feature indices. + + In this case, the Dictionary is presented as a mapping from keys to indices or + ranges of contiguous indices. For example: + + ``{"a" : 0, "b" : [2,3], "c" : 1}`` + + resolves to: + + ``[("a", Double), ("c", Double), ("b", Array(2))]``. + + Note that the ordering is determined by the indices. + + - List of tuples of the form ``(name, datatype)``, in which ``name`` is the + name of the exposed feature, and ``datatype`` is an instance of + ``String``, ``Double``, ``Int64``, ``Array``, or ``Dictionary``. + + output_feature_names: string or list of strings + Optional name(s) that can be given to the inputs of the scikit-learn + model. + + The ``output_feature_names`` is interpreted according to the model type: + + - If the scikit-learn model is a transformer, it is the name of the + array feature output by the final sequence of the transformer + (defaults to ``"output"``). + - If it is a classifier, it should be a 2-tuple of names giving the top + class prediction and the array of scores for each class (defaults to + ``"classLabel"`` and ``"classScores"``). + - If it is a regressor, it should give the name of the prediction value + (defaults to ``"prediction"``). + + Returns + ------- + model:MLModel + Returns an MLModel instance representing a Core ML model. + + Examples + -------- + .. sourcecode:: python + + >>> from sklearn.linear_model import LinearRegression + >>> import pandas as pd + + # Load data + >>> data = pd.read_csv('houses.csv') + + # Train a model + >>> model = LinearRegression() + >>> model.fit(data[["bedroom", "bath", "size"]], data["price"]) + + # Convert and save the scikit-learn model + >>> import coremltools + >>> coreml_model = coremltools.converters.sklearn.convert(model, + ["bedroom", "bath", "size"], + "price") + >>> coreml_model.save('HousePricer.mlmodel') + """ + + # This function is just a thin wrapper around the internal converter so + # that sklearn isn't actually imported unless this function is called + from ...models import MLModel + # NOTE: Providing user-defined class labels will be enabled when + # several issues with the ordering of the classes are worked out. For now, + # to use custom class labels, directly import the internal function below. + from ._converter_internal import _convert_sklearn_model + + spec = _convert_sklearn_model( + sk_obj, input_features, output_feature_names, class_labels=None + ) + + model = MLModel(spec) + from sklearn import __version__ as sklearn_version + + model.user_defined_metadata[_METADATA_VERSION] = ct_version + model.user_defined_metadata[_METADATA_SOURCE] = "scikit-learn=={0}".format( + sklearn_version + ) + return model diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter_internal.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter_internal.py new file mode 100644 index 00000000..2c2a9e54 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter_internal.py @@ -0,0 +1,350 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +The primary file for converting Scikit-learn models. + + +""" + +from ..._deps import _HAS_SKLEARN +from ...models import _feature_management as _fm +from ...models import datatypes +from ...models.feature_vectorizer import create_feature_vectorizer +from ...models.pipeline import Pipeline, PipelineClassifier, PipelineRegressor + +if _HAS_SKLEARN: + from sklearn.pipeline import Pipeline as sk_Pipeline + +from collections import namedtuple as _namedtuple + + +from . import (_SVC, _SVR, _decision_tree_classifier, _decision_tree_regressor, + _dict_vectorizer, _gradient_boosting_classifier, + _gradient_boosting_regressor, _imputer, _k_neighbors_classifier, + _linear_regression, _LinearSVC, _LinearSVR, + _logistic_regression, _normalizer, _NuSVC, _NuSVR, + _one_hot_encoder, _random_forest_classifier, + _random_forest_regressor, _standard_scaler, _ridge_regression) + +_PIPELINE_INTERNAL_FEATURE_NAME = "__feature_vector__" + +_converter_module_list = [ + _dict_vectorizer, + _one_hot_encoder, + _normalizer, + _standard_scaler, + _imputer, + _NuSVC, + _NuSVR, + _SVC, + _SVR, + _linear_regression, + _LinearSVC, + _LinearSVR, + _logistic_regression, + _random_forest_classifier, + _random_forest_regressor, + _decision_tree_classifier, + _decision_tree_regressor, + _gradient_boosting_classifier, + _gradient_boosting_regressor, + _k_neighbors_classifier, + _ridge_regression +] + + +def _test_module(m): + assert m.model_type in ["transformer", "regressor", "classifier"], m.__name__ + if m.model_type == "transformer": + assert hasattr(m, "update_dimension"), m.__name__ + if m.model_type == "classifier": + assert hasattr(m, "supports_output_scores"), m.__name__ + assert hasattr(m, "get_output_classes"), m.__name__ + assert hasattr(m, "sklearn_class"), m.__name__ + assert hasattr(m, "get_input_dimension"), m.__name__ + + return True + + +assert all(_test_module(m) for m in _converter_module_list) + +_converter_lookup = dict( + (md.sklearn_class, i) for i, md in enumerate(_converter_module_list) +) +_converter_functions = [md.convert for md in _converter_module_list] + + +def _get_converter_module(sk_obj): + """ + Returns the module holding the conversion functions for a + particular model). + """ + try: + cv_idx = _converter_lookup[sk_obj.__class__] + except KeyError: + raise ValueError( + "Transformer '%s' not supported; supported transformers are %s." + % (repr(sk_obj), ",".join(k.__name__ for k in _converter_module_list)) + ) + + return _converter_module_list[cv_idx] + + +def _is_sklearn_model(sk_obj): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + from sklearn.pipeline import Pipeline as sk_Pipeline + + return isinstance(sk_obj, sk_Pipeline) or sk_obj.__class__ in _converter_lookup + + +def _convert_sklearn_model( + input_sk_obj, input_features=None, output_feature_names=None, class_labels=None +): + """ + Converts a generic sklearn pipeline, transformer, classifier, or regressor + into an coreML specification. + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + from sklearn.pipeline import Pipeline as sk_Pipeline + + if input_features is None: + input_features = "input" + + if isinstance(input_sk_obj, sk_Pipeline): + sk_obj_list = input_sk_obj.steps + else: + sk_obj_list = [("SKObj", input_sk_obj)] + + if len(sk_obj_list) == 0: + raise ValueError("No SKLearn transformers supplied.") + + # Put the transformers into a pipeline list to hold them so that they can + # later be added to a pipeline object. (Hold off adding them to the + # pipeline now in case it's a single model at the end, in which case it + # gets returned as is.) + # + # Each member of the pipeline list is a tuple of the proto spec for that + # model, the input features, and the output features. + pipeline_list = [] + + # These help us keep track of what's going on a bit easier. + Input = _namedtuple("InputTransformer", ["name", "sk_obj", "module"]) + Output = _namedtuple( + "CoreMLTransformer", ["spec", "input_features", "output_features"] + ) + + # Get a more information rich representation of the list for convenience. + # obj_list is a list of tuples of (name, sk_obj, and the converter module for + # that step in the list. + obj_list = [ + Input(sk_obj_name, sk_obj, _get_converter_module(sk_obj)) + for sk_obj_name, sk_obj in sk_obj_list + ] + + # Various preprocessing steps. + + # If the first component of the object list is the sklearn dict vectorizer, + # which is unique in that it accepts a list of dictionaries, then we can + # get the feature type mapping from that. This then may require the addition + # of several OHE steps, so those need to be processed in the first stage. + if isinstance(obj_list[0].sk_obj, _dict_vectorizer.sklearn_class): + + dv_obj = obj_list[0].sk_obj + output_dim = len(_dict_vectorizer.get_input_feature_names(dv_obj)) + + if not isinstance(input_features, str): + raise TypeError( + "If the first transformer in a pipeline is a " + "DictVectorizer, then the input feature must be the name " + "of the input dictionary." + ) + + input_features = [(input_features, datatypes.Dictionary(str))] + + if len(obj_list) > 1: + output_feature_name = _PIPELINE_INTERNAL_FEATURE_NAME + + else: + if output_feature_names is None: + output_feature_name = "transformed_features" + + elif isinstance(output_feature_names, str): + output_feature_name = output_feature_names + + else: + raise TypeError( + "For a transformer pipeline, the " + "output_features needs to be None or a string " + "for the predicted value." + ) + + output_features = [(output_feature_name, datatypes.Array(output_dim))] + + spec = _dict_vectorizer.convert(dv_obj, input_features, output_features)._spec + pipeline_list.append(Output(spec, input_features, output_features)) + + # Set up the environment for the rest of the pipeline + current_input_features = output_features + current_num_dimensions = output_dim + + # In the corner case that it's only the dict vectorizer here, just return + # and exit with that at this point. + if len(obj_list) == 1: + return spec + else: + del obj_list[0] + + else: + + # First, we need to resolve the input feature types as the sklearn pipeline + # expects just an array as input, but what we want to expose to the coreML + # user is an interface with named variables. This resolution has to handle + # a number of cases. + + # Can we get the number of features from the model? If so, pass that + # information into the feature resolution function. If we can't, then this + # function should return None. + first_sk_obj = obj_list[0].sk_obj + num_dimensions = _get_converter_module(first_sk_obj).get_input_dimension( + first_sk_obj + ) + # Resolve the input features. + features = _fm.process_or_validate_features(input_features, num_dimensions) + current_num_dimensions = _fm.dimension_of_array_features(features) + + # Add in a feature vectorizer that consolodates all of the feature inputs + # into the form expected by scipy's pipelines. Essentially this is a + # translation layer between the coreML form with named arguments and the + # scikit learn variable form. + if len(features) == 1 and isinstance(features[0][1], datatypes.Array): + current_input_features = features + else: + spec, _output_dimension = create_feature_vectorizer( + features, _PIPELINE_INTERNAL_FEATURE_NAME + ) + + assert _output_dimension == current_num_dimensions + ft_out_features = [ + ( + _PIPELINE_INTERNAL_FEATURE_NAME, + datatypes.Array(current_num_dimensions), + ) + ] + pipeline_list.append(Output(spec, features, ft_out_features)) + current_input_features = ft_out_features + + # Now, validate the sequence of transformers to make sure we have something + # that can work with all of this. + for i, (_, _, m) in enumerate(obj_list[:-1]): + if m.model_type != "transformer": + raise ValueError( + "Only a sequence of transformer classes followed by a " + "single transformer, regressor, or classifier is currently supported. " + "(object in position %d interpreted as %s)" % (i, m.model_type) + ) + + overall_mode = obj_list[-1].module.model_type + assert overall_mode in ("transformer", "regressor", "classifier") + + # Now, go through each transformer in the sequence of transformers and add + # it to the pipeline. + for _, sk_obj, sk_m in obj_list[:-1]: + next_dimension = sk_m.update_dimension(sk_obj, current_num_dimensions) + + output_features = [ + (_PIPELINE_INTERNAL_FEATURE_NAME, datatypes.Array(next_dimension)) + ] + spec = sk_m.convert(sk_obj, current_input_features, output_features)._spec + + pipeline_list.append(Output(spec, current_input_features, output_features)) + + current_input_features = output_features + current_num_dimensions = next_dimension + + # Now, handle the final transformer. This is where we need to have different + # behavior depending on whether it's a classifier, transformer, or regressor. + _, last_sk_obj, last_sk_m = obj_list[-1] + + if overall_mode == "classifier": + supports_output_scores = last_sk_m.supports_output_scores(last_sk_obj) + _internal_output_classes = list(last_sk_m.get_output_classes(last_sk_obj)) + + if class_labels is None: + class_labels = _internal_output_classes + + output_features = _fm.process_or_validate_classifier_output_features( + output_feature_names, class_labels, supports_output_scores + ) + + elif overall_mode == "regressor": + if output_feature_names is None: + output_features = [("prediction", datatypes.Double())] + elif isinstance(output_feature_names, str): + output_features = [(output_feature_names, datatypes.Double())] + else: + raise TypeError( + "For a regressor object or regressor pipeline, the " + "output_features needs to be None or a string for the predicted value." + ) + + else: # transformer + final_output_dimension = last_sk_m.update_dimension( + last_sk_obj, current_num_dimensions + ) + + if output_feature_names is None: + output_features = [ + ("transformed_features", datatypes.Array(final_output_dimension)) + ] + + elif isinstance(output_feature_names, str): + output_features = [ + (output_feature_names, datatypes.Array(final_output_dimension)) + ] + + else: + raise TypeError( + "For a transformer object or transformer pipeline, the " + "output_features needs to be None or a string for the " + "name of the transformed value." + ) + + last_spec = last_sk_m.convert( + last_sk_obj, current_input_features, output_features + )._spec + + pipeline_list.append(Output(last_spec, current_input_features, output_features)) + + # Now, create the pipeline and return the spec for it. + + # If it's just one element, we can return it. + if len(pipeline_list) == 1: + return pipeline_list[0].spec + + original_input_features = pipeline_list[0].input_features + + if overall_mode == "regressor": + pipeline = PipelineRegressor(original_input_features, output_features) + + elif overall_mode == "classifier": + pipeline = PipelineClassifier( + original_input_features, class_labels, output_features + ) + + else: + pipeline = Pipeline(original_input_features, output_features) + + # Okay, now we can build the pipeline spec. + for spec, input_features, output_features in pipeline_list: + pipeline.add_model(spec) + + return pipeline.spec diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_classifier.py new file mode 100644 index 00000000..e22d6298 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_classifier.py @@ -0,0 +1,68 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble + +if _HAS_SKLEARN: + import sklearn.tree as _tree + + from . import _sklearn_util + +model_type = "classifier" +sklearn_class = _tree.DecisionTreeClassifier + + +def convert(model, input_name, output_features): + """Convert a decision tree model to protobuf format. + + Parameters + ---------- + decision_tree : DecisionTreeClassifier + A trained scikit-learn tree model. + + input_name: str + Name of the input columns. + + output_name: str + Name of the output columns. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _tree.DecisionTreeClassifier) + _sklearn_util.check_fitted( + model, lambda m: hasattr(m, "tree_") and model.tree_ is not None + ) + + return _MLModel( + convert_tree_ensemble( + model, + input_name, + output_features, + mode="classifier", + class_labels=model.classes_, + ) + ) + + +def supports_output_scores(model): + return True + + +def get_output_classes(model): + return list(model.classes_) + + +def get_input_dimension(model): + return model.n_features_ diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_regressor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_regressor.py new file mode 100644 index 00000000..15dfb967 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_regressor.py @@ -0,0 +1,51 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble + +if _HAS_SKLEARN: + import sklearn.tree as _tree + + from . import _sklearn_util + +model_type = "regressor" +sklearn_class = _tree.DecisionTreeRegressor + + +def convert(model, feature_names, target): + """Convert a decision tree model to protobuf format. + + Parameters + ---------- + decision_tree : DecisionTreeRegressor + A trained scikit-learn tree model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _tree.DecisionTreeRegressor) + _sklearn_util.check_fitted( + model, lambda m: hasattr(m, "tree_") and model.tree_ is not None + ) + return _MLModel(_convert_tree_ensemble(model, feature_names, target)) + + +def get_input_dimension(model): + return model.n_features_ diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_dict_vectorizer.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_dict_vectorizer.py new file mode 100644 index 00000000..ac6cbf2f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_dict_vectorizer.py @@ -0,0 +1,113 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._feature_management import process_or_validate_features +from ...models._interface_management import set_transform_interface_params +from ...models.feature_vectorizer import create_feature_vectorizer +from ...proto import Model_pb2 as _Model_pb2 + +if _HAS_SKLEARN: + from sklearn.feature_extraction import DictVectorizer + + sklearn_class = DictVectorizer + +from ...models import datatypes +from ...models.pipeline import Pipeline + +model_type = "transformer" + + +def convert(model, input_features, output_features): + """Convert a _imputer model to the protobuf spec. + + Parameters + ---------- + model: Imputer + A trained Imputer model. + + input_features: str + Name of the input column. + + output_features: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + + _INTERMEDIATE_FEATURE_NAME = "__sparse_vector_features__" + + n_dimensions = len(model.feature_names_) + input_features = process_or_validate_features(input_features) + + # Ensure that the output_features are also solid. + output_features = process_or_validate_features(output_features, n_dimensions) + + # The DictVectorizer in the framework outputs a sparse dictionary + # of index to value due to other considerations, but we are expecting + # the output of this to be a dense feature vector. To make that happen, + # put a feature_vectorizer immediately after the dict vectorizer. + pline = Pipeline(input_features, output_features) + + # Set the basic model parameters of the dict vectorizer component. + dv_spec = _Model_pb2.Model() + dv_spec.specificationVersion = SPECIFICATION_VERSION + + # Set up the dict vectorizer parameters + tr_spec = dv_spec.dictVectorizer + is_str = None + for feature_name in model.feature_names_: + if isinstance(feature_name, str): + if is_str is False: + raise ValueError("Mapping of DictVectorizer mixes int and str types.") + + tr_spec.stringToIndex.vector.append(feature_name) + is_str == True + + if isinstance(feature_name, int): + if is_str is True: + raise ValueError("Mapping of DictVectorizer mixes int and str types.") + + tr_spec.int64ToIndex.vector.append(feature_name) + is_str == False + + intermediate_features = [ + (_INTERMEDIATE_FEATURE_NAME, datatypes.Dictionary(key_type=int)) + ] + + # Set the interface for the dict vectorizer with the input and the + # intermediate output + set_transform_interface_params(dv_spec, input_features, intermediate_features) + + pline.add_model(dv_spec) + + # Follow the dict vectorizer by a feature_vectorizer to change the sparse + # output layer into a dense vector as expected. + fvec, _num_out_dim = create_feature_vectorizer( + intermediate_features, + output_features[0][0], + {"__sparse_vector_features__": n_dimensions}, + ) + + pline.add_model(fvec) + + return _MLModel(pline.spec) + + +def update_dimension(m, current_num_dimensions): + return len(m.feature_names_) + + +def get_input_dimension(m): + return None + + +def get_input_feature_names(m): + return m.feature_names_ diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_classifier.py new file mode 100644 index 00000000..df790aee --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_classifier.py @@ -0,0 +1,102 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble, get_input_dimension + +if _HAS_SKLEARN: + import sklearn.ensemble as _ensemble + + from . import _sklearn_util + + sklearn_class = _ensemble.GradientBoostingClassifier + +model_type = "classifier" + + +def convert(model, feature_names, target): + """Convert a boosted tree model to protobuf format. + + Parameters + ---------- + decision_tree : GradientBoostingClassifier + A trained scikit-learn tree model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _ensemble.GradientBoostingClassifier) + + def is_gbr_model(m): + if len(m.estimators_) == 0: + return False + if hasattr(m, "estimators_") and m.estimators_ is not None: + for t in m.estimators_.flatten(): + if not hasattr(t, "tree_") or t.tree_ is None: + return False + return True + else: + return False + + _sklearn_util.check_fitted(model, is_gbr_model) + post_evaluation_transform = None + if model.n_classes_ == 2: + post_evaluation_transform = "Regression_Logistic" + else: + post_evaluation_transform = "Classification_SoftMax" + # Here we enumerate known methods GradientBoostingClassifier use for initializing the raw predictions. + # Alternatively we can enumerate known estimators/strategies combinations. + # This covers more combinations with less hacks + base_prediction = None + dummy_x = np.zeros((1, model.n_features_)) + for base_init_func in ('_init_decision_function', '_raw_predict_init'): + if not hasattr(model, base_init_func): + continue + raw_predictions = getattr(model, base_init_func)(dummy_x)[0, :] + if '_init_decision_function' == base_init_func and model.n_classes_ > 2: + # fix initial default prediction for multiclass classification + # https://github.com/scikit-learn/scikit-learn/pull/12983 + raw_predictions = np.log(raw_predictions) + base_prediction = list(raw_predictions) + break + if base_prediction is None: + raise ValueError("We don't support your classifier: cannot initialize base_prediction. " + "Please file a bug report.") + + return _MLModel( + _convert_tree_ensemble( + model, + feature_names, + target, + mode="classifier", + base_prediction=base_prediction, + class_labels=model.classes_, + post_evaluation_transform=post_evaluation_transform, + ) + ) + + +def supports_output_scores(model): + return True + + +def get_output_classes(model): + return list(model.classes_) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_regressor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_regressor.py new file mode 100644 index 00000000..f1494d7e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_regressor.py @@ -0,0 +1,74 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from distutils.version import StrictVersion + +from ..._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble, get_input_dimension + +if _HAS_SKLEARN: + import sklearn.ensemble as _ensemble + + from . import _sklearn_util + + sklearn_class = _ensemble.GradientBoostingRegressor + +model_type = "regressor" + + +def convert(model, input_features, output_features): + """Convert a boosted tree model to protobuf format. + + Parameters + ---------- + decision_tree : GradientBoostingRegressor + A trained scikit-learn tree model. + + input_feature: [str] + Name of the input columns. + + output_features: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _ensemble.GradientBoostingRegressor) + + def is_gbr_model(m): + if len(m.estimators_) == 0: + return False + if hasattr(m, "estimators_") and m.estimators_ is not None: + for t in m.estimators_.flatten(): + if not hasattr(t, "tree_") or t.tree_ is None: + return False + return True + else: + return False + + _sklearn_util.check_fitted(model, is_gbr_model) + + if model.loss == "huber": + base_prediction = model.init_.quantile + else: + # >= 0.22 GradientBoostingRegressor deprecated "mean" in favor of "constant_" attribute + if _SKLEARN_VERSION < StrictVersion("0.22"): + base_prediction = model.init_.mean + else: + base_prediction = model.init_.constant_ + + return _MLModel( + _convert_tree_ensemble( + model, input_features, output_features, base_prediction=base_prediction + ) + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_imputer.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_imputer.py new file mode 100644 index 00000000..fc4d5866 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_imputer.py @@ -0,0 +1,113 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from distutils.version import StrictVersion + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from ...models import MLModel as _MLModel +from ...models import datatypes +from ...models._interface_management import set_transform_interface_params +from ...proto import Model_pb2 as _Model_pb2 +from . import _sklearn_util + +if _HAS_SKLEARN: + import sklearn + + try: + # scikit-learn >= 0.21 + from sklearn.impute import SimpleImputer as Imputer + + sklearn_class = sklearn.impute.SimpleImputer + except ImportError: + # scikit-learn < 0.21 + from sklearn.preprocessing import Imputer + + sklearn_class = sklearn.preprocessing.Imputer + + model_type = "transformer" + + +def convert(model, input_features, output_features): + """Convert a DictVectorizer model to the protobuf spec. + + Parameters + ---------- + model: DictVectorizer + A fitted DictVectorizer model. + + input_features: str + Name of the input column. + + output_features: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Set the interface params. + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + + assert len(input_features) == 1 + assert isinstance(input_features[0][1], datatypes.Array) + + # feature name in and out are the same here + spec = set_transform_interface_params(spec, input_features, output_features) + + # Test the scikit-learn model + _sklearn_util.check_expected_type(model, Imputer) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "statistics_")) + + # model.axis deprecated in SimpleImputer >= 0.22. which now imputes only + # along columns as desired here. + if _SKLEARN_VERSION < StrictVersion("0.22"): + if model.axis != 0: + raise ValueError("Imputation is only supported along axis = 0.") + + # The imputer in our framework only works on single columns, so + # we need to translate that over. The easiest way to do that is to + # put it in a nested pipeline with a feature extractor and a + + tr_spec = spec.imputer + + for v in model.statistics_: + tr_spec.imputedDoubleArray.vector.append(v) + + try: + tr_spec.replaceDoubleValue = float(model.missing_values) + except ValueError: + raise ValueError( + "Only scalar values or NAN as missing_values " "in _imputer are supported." + ) + + return _MLModel(spec) + + +def update_dimension(model, input_dimension): + """ + Given a model that takes an array of dimension input_dimension, returns + the output dimension. + """ + + # This doesn't expand anything. + return input_dimension + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "statistics_")) + return len(model.statistics_) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_k_neighbors_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_k_neighbors_classifier.py new file mode 100644 index 00000000..09df92b6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_k_neighbors_classifier.py @@ -0,0 +1,291 @@ +# Copyright (c) 2019, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import coremltools +from coremltools.proto import FeatureTypes_pb2 + +from ..._deps import _HAS_SCIPY, _HAS_SKLEARN +from ...models import MLModel as _MLModel + +if _HAS_SKLEARN: + import sklearn.neighbors as _neighbors + + from . import _sklearn_util + +if _HAS_SCIPY: + import scipy as sp + +import numpy as np + +model_type = "classifier" +sklearn_class = _neighbors.KNeighborsClassifier + + +def convert(model, input_name, output_name): + """Convert a scikit KNeighborsClassifier to protobuf format. + + Parameters + ---------- + model : KNeighborsClassifier + A trained scikit-learn KNeighborsClassifier model. + + input_name: str + Name of the input column. + + output_name: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, sklearn_class) + + _check_fitted(model) + _check_algorithm(model) + _check_weighting_scheme(model) + _check_distance_metric(model) + + return _MLModel(_convert_k_neighbors_classifier(model, input_name, output_name)) + + +def supports_output_scores(model): + """KNeighborsClassifier models do not support output scores.""" + return False + + +def get_output_classes(model): + """Get the candidate classes for the model.""" + _check_fitted(model) + return list(model.classes_) + + +def _convert_k_neighbors_classifier(model, input_name, output_name): + """Convert the scikit KNeighborsClassifier to CoreML. Assumes initial validation of the scikit model has been done.""" + + spec = coremltools.proto.Model_pb2.Model() + spec.specificationVersion = coremltools.SPECIFICATION_VERSION + + spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue = model.n_neighbors + spec.kNearestNeighborsClassifier.numberOfNeighbors.range.minValue = 1 + spec.kNearestNeighborsClassifier.numberOfNeighbors.range.maxValue = _number_of_samples( + model, spec + ) # is there a better heuristic to use here? + + number_of_dimensions = 0 + if _is_algorithm_brute(model): + number_of_dimensions = model._fit_X.shape[1] + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.linearIndex.MergeFromString( + b"" + ) + elif _is_algorithm_kd_tree(model): + npdata = np.asarray(model._tree.data) + number_of_dimensions = get_input_dimension(model) + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.singleKdTreeIndex.leafSize = ( + model.leaf_size + ) + else: + raise TypeError( + "KNeighbors algorithm not supported for CoreML conversion: {}".format( + model.algorithm + ) + ) + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions = ( + number_of_dimensions + ) + + # Make sure the distance function is set + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.squaredEuclideanDistance.MergeFromString( + b"" + ) + + input_features = spec.description.input.add() + input_features.name = input_name[0][0] + input_features.type.multiArrayType.shape.extend([number_of_dimensions]) + input_features.type.multiArrayType.dataType = ( + FeatureTypes_pb2.ArrayFeatureType.FLOAT32 + ) + + output_label = spec.description.output.add() + output_label.name = output_name[0][0] + + # predictedFeatureName is required since KNN is a classifier and it should be same as outputName. + spec.description.predictedFeatureName = output_label.name + + # Need to confirm if scikit only accepts integer labels + output_label.type.int64Type.MergeFromString(b"") + spec.kNearestNeighborsClassifier.uniformWeighting.MergeFromString(b"") + + _extract_training_data(model, spec) + + return spec + + +def _number_of_samples(model, spec): + """Get the number of samples the model is fitted to.""" + + if _is_algorithm_brute(model): + return model._fit_X.shape[0] + elif _is_algorithm_kd_tree(model): + return len(np.asarray(model._tree.data)) + return 0 + + +def _extract_training_data(model, spec): + """Extract the training data from the scikit model and add it to the CoreML spec""" + + if _is_algorithm_brute(model): + X = model._fit_X + if _is_valid_sparse_format(X): + X = _unpack_sparse(X) + + for sample in X: + coreml_sample = ( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.floatSamples.add() + ) + for feature in sample: + coreml_sample.vector.append(feature) + + elif _is_algorithm_kd_tree(model): + # sklearn guarantees that tree data is not stored in a sparse format + npdata = np.asarray(model._tree.data) + for sample in npdata: + coreml_sample = ( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.floatSamples.add() + ) + for feature in sample: + coreml_sample.vector.append(feature) + + for label in model._y: + spec.kNearestNeighborsClassifier.int64ClassLabels.vector.append(label) + + +def get_input_dimension(model): + """Get the input dimension for the model""" + _check_fitted(model) + number_of_dimensions = 0 + if _is_algorithm_brute(model): + number_of_dimensions = model._fit_X.shape[1] + elif _is_algorithm_kd_tree(model): + npdata = np.asarray(model._tree.data) + number_of_dimensions = len(npdata[0]) + else: + raise TypeError( + "KNeighbors algorithm not supported for CoreML conversion: {}".format( + model.algorithm + ) + ) + return number_of_dimensions + + +def _check_fitted(model): + """Simple wrapper to check if the KNeighborsClassifier has been fitted.""" + return _sklearn_util.check_fitted( + model, lambda m: hasattr(m, "_fit_method") or hasattr(m, "_fit_X") + ) + + +def _check_algorithm(model): + """Ensure the kNeighbors algorithm for the given scikit model is a supported type""" + is_valid = False + print_name = "" + if model.algorithm == "brute" or model.algorithm == "kd_tree": + is_valid = True + print_name = model.algorithm + elif model.algorithm == "auto" and model._fit_method == "kd_tree": + is_valid = True + print_name = "kd_tree" + elif model.algorithm == "auto" and model._fit_method == "brute": + is_valid = True + print_name = "brute" + if not is_valid: + raise TypeError( + "KNeighbors algorithm not supported for CoreML conversion: {}".format( + print_name + ) + ) + + +def _check_weighting_scheme(model): + """Simple wrapper to ensure the weighting scheme is valid for CoreML conversion""" + is_valid = False + if model.weights == "uniform": + is_valid = True + + # Other cases CoreML doesn't support include weighting by distance or a user-provided 'callable' object. + + if not is_valid: + print_name = "" + if _is_printable(model.weights): + print_name = model.weights + else: + print_name = getattr(model.weights, "__name__", repr(model.weights)) + raise TypeError( + "KNeighbors weight function not supported for CoreML conversion: {}".format( + print_name + ) + ) + + +def _check_distance_metric(model): + """Simple wrapper to ensure the distance metric is valid for CoreML conversion""" + is_valid = False + if model.metric == "euclidean": + is_valid = True + elif model.metric == "minkowski" and model.p == 2: + is_valid = True + + # There are a number of other distance metrics supported by scikit that CoreML doesn't currently support. + + if not is_valid: + print_name = "" + if _is_printable(model.metric): + print_name = model.metric + else: + print_name = getattr(model.metric, "__name__", repr(model.metric)) + raise TypeError( + "KNeighbors distance metric not supported for CoreML conversion: {}".format( + print_name + ) + ) + + +def _is_algorithm_brute(model): + """Checks if the algorithm for the scikit model is set to 'brute'.""" + return model.algorithm == "brute" or ( + model.algorithm == "auto" and model._fit_method == "brute" + ) + + +def _is_algorithm_kd_tree(model): + """Checks if the algorithm for the scikit model is set to 'kd_tree'.""" + return model.algorithm == "kd_tree" or ( + model.algorithm == "auto" and model._fit_method == "kd_tree" + ) + + +def _is_printable(obj): + """Check if the object is a valid text type.""" + return isinstance(obj, str) + + +def _is_valid_sparse_format(obj): + """Check if the object is in CSR sparse format (the only valid type for KNeighborsClassifier)""" + if not _HAS_SCIPY: + return False + return isinstance(obj, sp.sparse.csr_matrix) + + +def _unpack_sparse(obj): + """Unpack the sparse matrix into a format that we can easily iterate over for insertion into a CoreML model.""" + if not _HAS_SCIPY and not sp.sparse.issparse(obj): + raise TypeError("Object {} is not a scipy sparse matrix type".format(type(obj))) + return obj.toarray() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_linear_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_linear_regression.py new file mode 100644 index 00000000..46f4b6da --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_linear_regression.py @@ -0,0 +1,81 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._interface_management import set_regressor_interface_params +from ...proto import Model_pb2 as _Model_pb2 + +if _HAS_SKLEARN: + import sklearn + from sklearn.linear_model import LinearRegression + + from . import _sklearn_util + + model_type = "regressor" + sklearn_class = sklearn.linear_model.LinearRegression + + +def convert(model, features, target): + """Convert a linear regression model to the protobuf spec. + Parameters + ---------- + model: LinearRegression + A trained linear regression encoder model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Check the scikit learn model + _sklearn_util.check_expected_type(model, LinearRegression) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + + return _MLModel(_convert(model, features, target)) + + +def _convert(model, features, target): + # Set the model class (regressor) + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + spec = set_regressor_interface_params(spec, features, target) + + # Add parameters for the linear regression. + lr = spec.glmRegressor + + if isinstance(model.intercept_, _np.ndarray): + assert len(model.intercept_) == 1 + lr.offset.append(model.intercept_[0]) + else: + lr.offset.append(model.intercept_) + + weights = lr.weights.add() + for i in model.coef_: + weights.value.append(i) + return spec + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + return model.coef_.size diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_logistic_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_logistic_regression.py new file mode 100644 index 00000000..f1dc91f6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_logistic_regression.py @@ -0,0 +1,108 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from collections.abc import Iterable + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel + +if _HAS_SKLEARN: + from sklearn.linear_model import LogisticRegression + + from . import _sklearn_util + + sklearn_class = LogisticRegression + +from ... import SPECIFICATION_VERSION +from ...models._interface_management import set_classifier_interface_params +from ...proto import Model_pb2 as _Model_pb2 + +model_type = "classifier" + + +def convert(model, feature_names, target): + """Convert a Logistic Regression model to the protobuf spec. + Parameters + ---------- + model: LogisticRegression + A trained LogisticRegression model. + + feature_names: [str], optional (default=None) + Name of the input columns. + + target: str, optional (default=None) + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, LogisticRegression) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + + return _MLModel(_convert(model, feature_names, target)) + + +def _convert(model, feature_names, target): + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + + set_classifier_interface_params( + spec, feature_names, model.classes_, "glmClassifier", output_features=target + ) + + glmClassifier = spec.glmClassifier + + if model.multi_class == "ovr": + glmClassifier.classEncoding = glmClassifier.OneVsRest + else: + print( + '[ERROR] Currently "One Vs Rest" is the only supported multiclass option.' + ) + return None + + glmClassifier.postEvaluationTransform = glmClassifier.Logit + + if isinstance(model.intercept_, Iterable): + for val in model.intercept_: + glmClassifier.offset.append(val) + else: + for _ in model.coef_: + glmClassifier.offset.append(model.intercept_) + + for cur_in_row in model.coef_: + cur_out_row = glmClassifier.weights.add() + for val in cur_in_row: + cur_out_row.value.append(val) + + return spec + + +def supports_output_scores(model): + return True + + +def get_output_classes(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + return list(model.classes_) + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + return len(model.coef_[0]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_normalizer.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_normalizer.py new file mode 100644 index 00000000..3bfcb61b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_normalizer.py @@ -0,0 +1,82 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._interface_management import \ + set_transform_interface_params as _set_transform_interface_params +from ...proto import Model_pb2 as _Model_pb2 +from ...proto.Normalizer_pb2 import Normalizer as _proto__normalizer + +if _HAS_SKLEARN: + from sklearn.preprocessing import Normalizer + + from . import _sklearn_util + + sklearn_class = Normalizer + +model_type = "transformer" + + +def convert(model, input_features, output_features): + """Convert a normalizer model to the protobuf spec. + + Parameters + ---------- + model: Normalizer + A Normalizer. + + input_features: str + Name of the input column. + + output_features: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Test the scikit-learn model + _sklearn_util.check_expected_type(model, Normalizer) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "norm")) + + # Set the interface params. + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + spec = _set_transform_interface_params(spec, input_features, output_features) + + # Set the one hot encoder parameters + _normalizer_spec = spec.normalizer + if model.norm == "l1": + _normalizer_spec.normType = _proto__normalizer.L1 + elif model.norm == "l2": + _normalizer_spec.normType = _proto__normalizer.L2 + elif model.norm == "max": + _normalizer_spec.normType = _proto__normalizer.LMax + return _MLModel(spec) + + +def update_dimension(model, input_dimension): + """ + Given a model that takes an array of dimension input_dimension, returns + the output dimension. + """ + + # No change + return input_dimension + + +def get_input_dimension(model): + # Cannot determine this now. + return None diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_one_hot_encoder.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_one_hot_encoder.py new file mode 100644 index 00000000..f12a6619 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_one_hot_encoder.py @@ -0,0 +1,264 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from ...models import MLModel as _MLModel +from ...models import datatypes +from ...models._interface_management import set_transform_interface_params +from ...models.array_feature_extractor import create_array_feature_extractor +from ...models.feature_vectorizer import create_feature_vectorizer +from ...models.pipeline import Pipeline +from ...proto import Model_pb2 as _Model_pb2 +from ...proto import OneHotEncoder_pb2 as _OHE_pb2 +from . import _sklearn_util + +if _HAS_SKLEARN: + from distutils.version import StrictVersion + + from sklearn.preprocessing import OneHotEncoder + + sklearn_class = OneHotEncoder + +# model type determines the behavior of this module. +model_type = "transformer" + + +def convert(model, input_features, output_features): + """Convert a one-hot-encoder model to the protobuf spec. + + Parameters + ---------- + model: OneHotEncoder + A trained one-hot encoder model. + + input_features: str, optional + Name of the input column. + + output_features: str, optional + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Make sure the model is fitted. + _sklearn_util.check_expected_type(model, OneHotEncoder) + if _SKLEARN_VERSION >= StrictVersion("0.22"): + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "categories_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_features_in_")) + else: + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "active_features_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_values_")) + + input_dimension = get_input_dimension(model) + + if input_dimension is not None: + # Make sure that our starting dimensions are correctly managed. + assert len(input_features) == 1 + assert input_features[0][1] == datatypes.Array(input_dimension) + + input_dimension = input_features[0][1].num_elements + + expected_output_dimension = update_dimension(model, input_dimension) + assert output_features[0][1] == datatypes.Array(expected_output_dimension) + + if _SKLEARN_VERSION >= StrictVersion("0.22"): + model.categorical_features = "all" + model.active_features_ = range(expected_output_dimension) + model.feature_indices_ = [0] + t = 0 + for i in model._n_features_outs: + t = t + i + model.feature_indices_.append(t) + + # Create a pipeline that can do all of the subsequent feature extraction. + feature_vectorizer_input_features = [] + feature_vectorizer_size_map = {} + + if model.categorical_features == "all": + _categorical_features = set(range(input_dimension)) + _cat_feature_idx_mapping = dict((i, i) for i in range(input_dimension)) + else: + _categorical_features = set(model.categorical_features) + _cat_feature_idx_mapping = dict( + (_idx, i) for i, _idx in enumerate(sorted(model.categorical_features)) + ) + + pline = Pipeline(input_features, output_features) + + # Track the overall packing index, which determines the output ordering. + pack_idx = 0 + + # First, go through all the columns that are encoded. The sklearn OHE puts + # all of these first, regardless of their original ordering. + for idx in range(input_dimension): + f_name = "__OHE_%d__" % pack_idx + + if idx in _categorical_features: + + # This input column is one hot encoded + feature_extractor_spec = create_array_feature_extractor( + input_features, f_name, idx, output_type="Int64" + ) + + pline.add_model(feature_extractor_spec) + + _cat_feature_idx = _cat_feature_idx_mapping[idx] + + ohe_input_features = [(f_name, datatypes.Int64())] + ohe_output_features = [(f_name, datatypes.Dictionary("Int64"))] + + # Create a one hot encoder per column + o_spec = _Model_pb2.Model() + o_spec.specificationVersion = SPECIFICATION_VERSION + o_spec = set_transform_interface_params( + o_spec, ohe_input_features, ohe_output_features + ) + + ohe_spec = o_spec.oneHotEncoder + ohe_spec.outputSparse = True + + if model.handle_unknown == "error": + ohe_spec.handleUnknown = _OHE_pb2.OneHotEncoder.HandleUnknown.Value( + "ErrorOnUnknown" + ) + else: + ohe_spec.handleUnknown = _OHE_pb2.OneHotEncoder.HandleUnknown.Value( + "IgnoreUnknown" + ) + + # Need to do a quick search to find the part of the active_features_ mask + # that represents the categorical variables in our part. Could do this + # with binary search, but we probably don't need speed so much here. + def bs_find(a, i): + lb, k = 0, len(a) + while k > 0: + _idx = lb + (k // 2) + if a[_idx] < i: + lb = _idx + 1 + k -= 1 + k = k // 2 + + return lb + + # Here are the indices we are looking for + f_idx_bottom = model.feature_indices_[_cat_feature_idx] + f_idx_top = model.feature_indices_[_cat_feature_idx + 1] + + # Now find where in the active features list we should look. + cat_feat_idx_bottom = bs_find(model.active_features_, f_idx_bottom) + cat_feat_idx_top = bs_find(model.active_features_, f_idx_top) + n_cat_values = cat_feat_idx_top - cat_feat_idx_bottom + + for i in range(cat_feat_idx_bottom, cat_feat_idx_top): + # The actual categorical value is stored as an offset in the active_features list. + cat_idx = model.active_features_[i] - f_idx_bottom + ohe_spec.int64Categories.vector.append(cat_idx) + + # Add the ohe to the pipeline + pline.add_model(o_spec) + + # Add the result to the feature_vectorizer at the end. + feature_vectorizer_input_features.append( + (f_name, datatypes.Dictionary("Int64")) + ) + feature_vectorizer_size_map[f_name] = n_cat_values + + pack_idx += 1 + + # Now go through all the columns that are not encoded as the sklearn OHE puts + # these after the encoded ones. For speed, we can put these all in a single + # ArrayFeatureExtractor + # + pass_through_features = [ + idx for idx in range(input_dimension) if idx not in _categorical_features + ] + + if pass_through_features: + f_name = "__OHE_pass_through__" + + # This input column is not one hot encoded + feature_extractor_spec = create_array_feature_extractor( + input_features, f_name, pass_through_features + ) + + pline.add_model(feature_extractor_spec) + feature_vectorizer_input_features.append( + (f_name, datatypes.Array(len(pass_through_features))) + ) + + # Finally, add the feature vectorizer to the pipeline. + output_feature_name = output_features[0][0] + output_feature_dimension = output_features[0][1].num_elements + + fvec, _num_out_dim = create_feature_vectorizer( + feature_vectorizer_input_features, + output_features[0][0], + feature_vectorizer_size_map, + ) + + # Make sure that the feature vectorizer input actually matches up with the + assert _num_out_dim == output_features[0][1].num_elements + + pline.add_model(fvec) + + return _MLModel(pline.spec) + + +def update_dimension(model, input_dimension): + """ + Given a model that takes an array of dimension input_dimension, returns + the output dimension. + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + if _SKLEARN_VERSION >= StrictVersion("0.22"): + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "categories_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_features_in_")) + return sum(model._n_features_outs) + else: + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "active_features_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_values_")) + + if model.categorical_features == "all": + return len(model.active_features_) + else: + out_dimension = len(model.active_features_) + ( + input_dimension - len(model.n_values_) + ) + + return out_dimension + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + if _SKLEARN_VERSION >= StrictVersion("0.22"): + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "categories_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_features_in_")) + return model.n_features_in_ + else: + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "active_features_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_values_")) + + if model.categorical_features == "all": + return len(model.feature_indices_) - 1 + else: + # This can't actually be determined from the model as indices after the + # rest of the categorical values don't seem to be tracked + return None diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_classifier.py new file mode 100644 index 00000000..905f2d12 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_classifier.py @@ -0,0 +1,70 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble, get_input_dimension + +if _HAS_SKLEARN: + import sklearn.ensemble as _ensemble + + from . import _sklearn_util + + sklearn_class = _ensemble.RandomForestClassifier + +model_type = "classifier" + + +def convert(model, feature_names, target): + """Convert a boosted tree model to protobuf format. + + Parameters + ---------- + decision_tree : RandomForestClassifier + A trained scikit-learn tree model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _ensemble.RandomForestClassifier) + + def is_rf_model(m): + if len(m.estimators_) == 0: + return False + if hasattr(m, "estimators_") and m.estimators_ is not None: + for t in m.estimators_: + if not hasattr(t, "tree_") or t.tree_ is None: + return False + return True + else: + return False + + _sklearn_util.check_fitted(model, is_rf_model) + return _MLModel( + _convert_tree_ensemble( + model, feature_names, target, mode="classifier", class_labels=model.classes_ + ) + ) + + +def supports_output_scores(model): + return True + + +def get_output_classes(model): + return list(model.classes_) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_regressor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_regressor.py new file mode 100644 index 00000000..df61a135 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_regressor.py @@ -0,0 +1,58 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble, get_input_dimension + +if _HAS_SKLEARN: + import sklearn.ensemble as _ensemble + + from . import _sklearn_util + + sklearn_class = _ensemble.RandomForestRegressor + +model_type = "regressor" + + +def convert(model, feature_names, target): + """Convert a boosted tree model to protobuf format. + + Parameters + ---------- + decision_tree : RandomForestRegressor + A trained scikit-learn tree model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _ensemble.RandomForestRegressor) + + def is_rf_model(m): + if len(m.estimators_) == 0: + return False + if hasattr(m, "estimators_") and m.estimators_ is not None: + for t in m.estimators_: + if not hasattr(t, "tree_") or t.tree_ is None: + return False + return True + else: + return False + + _sklearn_util.check_fitted(model, is_rf_model) + return _MLModel(_convert_tree_ensemble(model, feature_names, target)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_ridge_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_ridge_regression.py new file mode 100644 index 00000000..84208c6c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_ridge_regression.py @@ -0,0 +1,53 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel + +if _HAS_SKLEARN: + import sklearn + from sklearn.linear_model import Ridge as _Ridge + + from . import _sklearn_util + + sklearn_class = sklearn.linear_model.Ridge + +from . import _linear_regression + +model_type = "regressor" + + +def convert(model, features, target): + """Convert a Ridge Regression model to the protobuf spec. + Parameters + ---------- + model: LinearSVR + A trained Ridge Regression model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Check the scikit learn model + _sklearn_util.check_expected_type(model, _Ridge) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + + return _MLModel(_linear_regression._convert(model, features, target)) + + +def get_input_dimension(model): + return _linear_regression.get_input_dimension(model) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_sklearn_util.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_sklearn_util.py new file mode 100644 index 00000000..e313de1e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_sklearn_util.py @@ -0,0 +1,37 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +def check_fitted(model, func): + """Check if a model is fitted. Raise error if not. + + Parameters + ---------- + model: model + Any scikit-learn model + + func: model + Function to check if a model is not trained. + """ + if not func(model): + raise TypeError("Expected a 'fitted' model for conversion") + + +def check_expected_type(model, expected_type): + """Check if a model is of the right type. Raise error if not. + + Parameters + ---------- + model: model + Any scikit-learn model + + expected_type: Type + Expected type of the scikit-learn. + """ + if model.__class__.__name__ != expected_type.__name__: + raise TypeError( + "Expected model of type '%s' (got %s)" + % (expected_type.__name__, model.__class__.__name__) + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_standard_scaler.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_standard_scaler.py new file mode 100644 index 00000000..92e96a9a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_standard_scaler.py @@ -0,0 +1,89 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._interface_management import \ + set_transform_interface_params as _set_transform_interface_params +from ...proto import Model_pb2 as _Model_pb2 + +if _HAS_SKLEARN: + from sklearn.preprocessing import StandardScaler + + from . import _sklearn_util + + sklearn_class = StandardScaler + +model_type = "transformer" + + +def convert(model, input_features, output_features): + """Convert a _imputer model to the protobuf spec. + + Parameters + ---------- + model: Imputer + A trained Imputer model. + + input_features: str + Name of the input column. + + output_features: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Test the scikit-learn model + _sklearn_util.check_expected_type(model, StandardScaler) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "mean_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "scale_")) + + # Set the interface params. + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + spec = _set_transform_interface_params(spec, input_features, output_features) + + # Set the parameters + tr_spec = spec.scaler + for x in model.mean_: + tr_spec.shiftValue.append(-x) + + for x in model.scale_: + tr_spec.scaleValue.append(1.0 / x) + + return _MLModel(spec) + + +def update_dimension(model, input_dimension): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "mean_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "scale_")) + # Nothing to do for this model + return input_dimension + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "mean_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "scale_")) + return len(model.mean_) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_svm_common.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_svm_common.py new file mode 100644 index 00000000..77289a49 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_svm_common.py @@ -0,0 +1,37 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Common stuff for SVMs +""" + + +def _set_kernel(model, spec): + """ + Takes the sklearn SVM model and returns the spec with the protobuf kernel for that model. + """ + + def gamma_value(model): + return model._gamma + + result = None + if model.kernel == "linear": + spec.kernel.linearKernel.MergeFromString( + b"" + ) # hack to set kernel to an empty type + elif model.kernel == "rbf": + spec.kernel.rbfKernel.gamma = gamma_value(model) + elif model.kernel == "poly": + spec.kernel.polyKernel.gamma = gamma_value(model) + spec.kernel.polyKernel.c = model.coef0 + spec.kernel.polyKernel.degree = model.degree + elif model.kernel == "sigmoid": + spec.kernel.sigmoidKernel.gamma = gamma_value(model) + spec.kernel.sigmoidKernel.c = model.coef0 + else: + raise ValueError( + "Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid." + ) + return result diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_tree_ensemble.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_tree_ensemble.py new file mode 100644 index 00000000..a65e61bd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_tree_ensemble.py @@ -0,0 +1,263 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models._feature_management import process_or_validate_features +from ...models.tree_ensemble import (TreeEnsembleClassifier, + TreeEnsembleRegressor) + +if _HAS_SKLEARN: + from sklearn.tree import _tree + + + +def _get_value(scikit_value, mode="regressor", scaling=1.0, n_classes=2, tree_index=0): + """ Get the right value from the scikit-tree + """ + # Regression + if mode == "regressor": + return scikit_value[0] * scaling + + # Binary classification + if n_classes == 2: + # Decision tree + if len(scikit_value[0]) != 1: + value = scikit_value[0][1] * scaling / scikit_value[0].sum() + # boosted tree + else: + value = scikit_value[0][0] * scaling + if value == 0.5: + value = value - 1e-7 + + # Multiclass classification + else: + # Decision tree + if len(scikit_value[0]) != 1: + value = scikit_value[0] / scikit_value[0].sum() + # boosted tree + else: + value = {tree_index: scikit_value[0] * scaling} + return value + + +def _recurse( + coreml_tree, + scikit_tree, + tree_id, + node_id, + scaling=1.0, + mode="regressor", + n_classes=2, + tree_index=0, +): + """Traverse through the tree and append to the tree spec. + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + ## Recursion should not be called on the leaf node. + if node_id == _tree.TREE_LEAF: + raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) + + # Add a branch node to the tree + if scikit_tree.children_left[node_id] != _tree.TREE_LEAF: + branch_mode = "BranchOnValueLessThanEqual" + feature_index = scikit_tree.feature[node_id] + feature_value = scikit_tree.threshold[node_id] + left_child_id = scikit_tree.children_left[node_id] + right_child_id = scikit_tree.children_right[node_id] + + # Add a branch node + coreml_tree.add_branch_node( + tree_id, + node_id, + feature_index, + feature_value, + branch_mode, + left_child_id, + right_child_id, + ) + + # Now recurse + _recurse( + coreml_tree, + scikit_tree, + tree_id, + left_child_id, + scaling, + mode, + n_classes, + tree_index, + ) + _recurse( + coreml_tree, + scikit_tree, + tree_id, + right_child_id, + scaling, + mode, + n_classes, + tree_index, + ) + + # Add a leaf node to the tree + else: + # Get the scikit-learn value + if scikit_tree.n_outputs != 1: + raise ValueError("Expected only 1 output in the scikit-learn tree.") + value = _get_value( + scikit_tree.value[node_id], mode, scaling, n_classes, tree_index + ) + coreml_tree.add_leaf_node(tree_id, node_id, value) + + +def get_input_dimension(model): + if hasattr(model, "n_features_"): + return model.n_features_ + + elif hasattr(model, "n_estimators"): + if model.n_estimators == 0: + raise ValueError("model not trained.") + + try: + return model.estimators_[0, 0].n_features_ + except IndexError: + raise ValueError("Model not trained or invalid model.") + else: + raise ValueError("Unable to obtain input dimension from model.") + + +def convert_tree_ensemble( + model, + input_features, + output_features=("predicted_class", float), + mode="regressor", + base_prediction=None, + class_labels=None, + post_evaluation_transform=None, +): + """ + Convert a generic tree regressor model to the protobuf spec. + + This currently supports: + * Decision tree regression + * Gradient boosted tree regression + * Random forest regression + * Decision tree classifier. + * Gradient boosted tree classifier. + * Random forest classifier. + + ---------- + Parameters + model: [DecisionTreeRegressor | GradientBoostingRegression | RandomForestRegressor] + A scikit learn tree model. + + feature_names : list of strings, optional (default=None) + Names of each of the features. + + target: str + Name of the output column. + + base_prediction: double + Base prediction value. + + mode: str in ['regressor', 'classifier'] + Mode of the tree model. + + class_labels: list[int] + List of classes + + post_evaluation_transform: list[int] + Post evaluation transform + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + + num_dimensions = get_input_dimension(model) + features = process_or_validate_features(input_features, num_dimensions) + + n_classes = None + if mode == "classifier": + n_classes = model.n_classes_ + if class_labels is None: + class_labels = range(n_classes) + else: + if len(class_labels) != n_classes: + raise ValueError( + "Number of classes in model (%d) does not match " + "length of supplied class list (%d)." + % (n_classes, len(class_labels)) + ) + + coreml_tree = TreeEnsembleClassifier( + input_features, class_labels, output_features + ) + if post_evaluation_transform is not None: + coreml_tree.set_post_evaluation_transform(post_evaluation_transform) + + # Base prediction not provided + if base_prediction is None: + if n_classes == 2: + base_prediction = [0.0] + else: + base_prediction = [0.0 for c in range(n_classes)] + coreml_tree.set_default_prediction_value(base_prediction) + else: + if base_prediction is None: + base_prediction = 0.0 + coreml_tree = TreeEnsembleRegressor(input_features, output_features) + coreml_tree.set_default_prediction_value(base_prediction) + + # Single tree + if hasattr(model, "tree_"): + _recurse( + coreml_tree, + model.tree_, + tree_id=0, + node_id=0, + mode=mode, + n_classes=n_classes, + ) + + # Multiple trees + elif hasattr(model, "estimators_"): + is_ensembling_in_separate_trees = False + if type(model.estimators_) != list: + is_ensembling_in_separate_trees = ( + len(model.estimators_.shape) > 0 and model.estimators_.shape[1] > 1 + ) + estimators = model.estimators_.flatten() + else: + estimators = model.estimators_ + + scaling = ( + model.learning_rate + if hasattr(model, "learning_rate") + else 1.0 / len(estimators) + ) + for tree_id, base_model in enumerate(estimators): + if is_ensembling_in_separate_trees: + tree_index = tree_id % n_classes + else: + tree_index = 0 + _recurse( + coreml_tree, + base_model.tree_, + tree_id, + node_id=0, + scaling=scaling, + mode=mode, + n_classes=n_classes, + tree_index=tree_index, + ) + else: + raise TypeError("Unknown scikit-learn tree model type.") + + return coreml_tree.spec diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/__init__.py new file mode 100644 index 00000000..a619ce8f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ._tree import convert diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree.py new file mode 100644 index 00000000..46615fbf --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree.py @@ -0,0 +1,93 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import __version__ as ct_version +from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION + +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble + + +def convert( + model, + feature_names=None, + target="target", + force_32bit_float=True, + mode="regressor", + class_labels=None, + n_classes=None, +): + """ + Convert a trained XGBoost model to Core ML format. + + Parameters + ---------- + decision_tree : Booster + A trained XGboost tree model. + + feature_names: [str] | str + Names of input features that will be exposed in the Core ML model + interface. + + Can be set to one of the following: + + - ``None`` for using the feature names from the model. + - List of names of the input features that should be exposed in the + interface to the Core ML model. These input features are in the same + order as the XGboost model. + + target: str + Name of the output feature name exposed to the Core ML model. + + force_32bit_float: bool + If ``True``, then the resulting CoreML model will use 32 bit floats internally. + + mode: str in ['regressor', 'classifier'] + Mode of the tree model. + + class_labels: list[int] or None + List of classes. When set to None, the class labels are just the range from + 0 to ``n_classes - 1``. + + n_classes: int or None + Number of classes in classification. When set to ``None``, the number of + classes is expected from the model or ``class_labels`` should be provided. + + Returns + ------- + model:MLModel + Returns an MLModel instance representing a Core ML model. + + Examples + -------- + .. sourcecode:: python + + # Convert it with default input and output names + >>> import coremltools + >>> coreml_model = coremltools.converters.xgboost.convert(model) + + # Saving the Core ML model to a file. + >>> coreml_model.save('my_model.mlmodel') + """ + model = _MLModel( + _convert_tree_ensemble( + model, + feature_names, + target, + force_32bit_float=force_32bit_float, + mode=mode, + class_labels=class_labels, + n_classes=n_classes, + ) + ) + + from xgboost import __version__ as xgboost_version + + model.user_defined_metadata[_METADATA_VERSION] = ct_version + model.user_defined_metadata[_METADATA_SOURCE] = "xgboost=={0}".format( + xgboost_version + ) + + return model diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree_ensemble.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree_ensemble.py new file mode 100644 index 00000000..cf3fd9a6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree_ensemble.py @@ -0,0 +1,280 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +from ..._deps import _HAS_XGBOOST +from ...models.tree_ensemble import TreeEnsembleClassifier +from ...models.tree_ensemble import \ + TreeEnsembleRegressor as _TreeEnsembleRegressor + +if _HAS_XGBOOST: + import xgboost as _xgboost + + +def recurse_json( + mlkit_tree, + xgb_tree_json, + tree_id, + node_id, + feature_map, + force_32bit_float, + mode="regressor", + tree_index=0, + n_classes=2, +): + """Traverse through the tree and append to the tree spec. + """ + relative_hit_rate = None + + try: + relative_hit_rate = xgb_tree_json["cover"] + except KeyError: + pass + + # Fill node attributes + if "leaf" not in xgb_tree_json: + branch_mode = "BranchOnValueLessThan" + split_name = xgb_tree_json["split"] + feature_index = split_name if not feature_map else feature_map[split_name] + + # xgboost internally uses float32, but the parsing from json pulls it out + # as a 64bit double. To trigger the internal float32 detection in the + # tree ensemble compiler, we need to explicitly cast it to a float 32 + # value, then back to the 64 bit float that protobuf expects. This is + # controlled with the force_32bit_float flag. + feature_value = xgb_tree_json["split_condition"] + + if force_32bit_float: + feature_value = float(_np.float32(feature_value)) + + true_child_id = xgb_tree_json["yes"] + false_child_id = xgb_tree_json["no"] + + # Get the missing value behavior correct + missing_value_tracks_true_child = False + + try: + if xgb_tree_json["missing"] == true_child_id: + missing_value_tracks_true_child = True + except KeyError: + pass + + mlkit_tree.add_branch_node( + tree_id, + node_id, + feature_index, + feature_value, + branch_mode, + true_child_id, + false_child_id, + relative_hit_rate=relative_hit_rate, + missing_value_tracks_true_child=missing_value_tracks_true_child, + ) + + else: + value = xgb_tree_json["leaf"] + if force_32bit_float: + value = float(_np.float32(value)) + + if mode == "classifier" and n_classes > 2: + value = {tree_index: value} + + mlkit_tree.add_leaf_node( + tree_id, node_id, value, relative_hit_rate=relative_hit_rate + ) + + # Now recurse + if "children" in xgb_tree_json: + for child in xgb_tree_json["children"]: + recurse_json( + mlkit_tree, + child, + tree_id, + child["nodeid"], + feature_map, + force_32bit_float, + mode=mode, + tree_index=tree_index, + n_classes=n_classes, + ) + + +def convert_tree_ensemble( + model, + feature_names, + target, + force_32bit_float, + mode="regressor", + class_labels=None, + n_classes=None, +): + """Convert a generic tree model to the protobuf spec. + + This currently supports: + * Decision tree regression + + Parameters + ---------- + model: str | Booster + Path on disk where the XGboost JSON representation of the model is or + a handle to the XGboost model. + + feature_names : list of strings or None + Names of each of the features. When set to None, the feature names are + extracted from the model. + + target: str, + Name of the output column. + + force_32bit_float: bool + If True, then the resulting CoreML model will use 32 bit floats internally. + + mode: str in ['regressor', 'classifier'] + Mode of the tree model. + + class_labels: list[int] or None + List of classes. When set to None, the class labels are just the range from + 0 to n_classes - 1. + + n_classes: int or None + Number of classes in classification. When set to None, the number of + classes is expected from the model or class_labels should be provided. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_XGBOOST): + raise RuntimeError("xgboost not found. xgboost conversion API is disabled.") + accepted_modes = ["regressor", "classifier"] + if mode not in accepted_modes: + raise ValueError("mode should be in %s" % accepted_modes) + import json + import os + + feature_map = None + if isinstance( + model, (_xgboost.core.Booster, _xgboost.XGBRegressor, _xgboost.XGBClassifier) + ): + + # Testing a few corner cases that we don't support + if isinstance(model, _xgboost.XGBRegressor): + if mode == "classifier": + raise ValueError("mode is classifier but provided a regressor") + try: + objective = model.get_xgb_params()["objective"] + except: + objective = None + if objective in ["reg:gamma", "reg:tweedie"]: + raise ValueError( + "Regression objective '%s' not supported for export." % objective + ) + + if isinstance(model, _xgboost.XGBClassifier): + if mode == "regressor": + raise ValueError("mode is regressor but provided a classifier") + n_classes = model.n_classes_ + if class_labels is not None: + if len(class_labels) != n_classes: + raise ValueError( + "Number of classes in model (%d) does not match " + "length of supplied class list (%d)." + % (n_classes, len(class_labels)) + ) + else: + class_labels = list(range(n_classes)) + + # Now use the booster API. + if isinstance(model, (_xgboost.XGBRegressor, _xgboost.XGBClassifier)): + # Name change in 0.7 + if hasattr(model, "get_booster"): + model = model.get_booster() + else: + model = model.booster() + + # Xgboost sometimes has feature names in there. Sometimes does not. + if (feature_names is None) and (model.feature_names is None): + raise ValueError( + "The XGBoost model does not have feature names. They must be provided in convert method." + ) + feature_names = model.feature_names + if feature_names is None: + feature_names = model.feature_names + + xgb_model_str = model.get_dump(with_stats=True, dump_format="json") + + if model.feature_names: + feature_map = {f: i for i, f in enumerate(model.feature_names)} + + # Path on the file system where the XGboost model exists. + elif isinstance(model, str): + if not os.path.exists(model): + raise TypeError("Invalid path %s." % model) + with open(model) as f: + xgb_model_str = json.load(f) + + if feature_names is None: + raise ValueError( + "feature names must be provided in convert method if the model is a path on file system." + ) + else: + feature_map = {f: i for i, f in enumerate(feature_names)} + + else: + raise TypeError("Unexpected type. Expecting XGBoost model.") + + if mode == "classifier": + if n_classes is None and class_labels is None: + raise ValueError( + "You must provide class_labels or n_classes when not providing the XGBClassifier" + ) + elif n_classes is None: + n_classes = len(class_labels) + elif class_labels is None: + class_labels = range(n_classes) + if n_classes == 2: + # if we have only 2 classes we only have one sequence of estimators + base_prediction = [0.0] + else: + base_prediction = [0.0 for c in range(n_classes)] + # target here is the equivalent of output_features in scikit learn + mlkit_tree = TreeEnsembleClassifier(feature_names, class_labels, target) + mlkit_tree.set_default_prediction_value(base_prediction) + if n_classes == 2: + mlkit_tree.set_post_evaluation_transform("Regression_Logistic") + else: + mlkit_tree.set_post_evaluation_transform("Classification_SoftMax") + else: + mlkit_tree = _TreeEnsembleRegressor(feature_names, target) + mlkit_tree.set_default_prediction_value(0.5) + + for xgb_tree_id, xgb_tree_str in enumerate(xgb_model_str): + if mode == "classifier" and n_classes > 2: + tree_index = xgb_tree_id % n_classes + else: + tree_index = 0 + + try: + # this means that the xgb_tree_str is a json dump and needs to be loaded + xgb_tree_json = json.loads(xgb_tree_str) + except: + # this means that the xgb_tree_str is loaded from a path in file system already and does not need to be reloaded + xgb_tree_json = xgb_tree_str + + recurse_json( + mlkit_tree, + xgb_tree_json, + xgb_tree_id, + node_id=0, + feature_map=feature_map, + force_32bit_float=force_32bit_float, + mode=mode, + tree_index=tree_index, + n_classes=n_classes, + ) + + return mlkit_tree.spec diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/models/__init__.py new file mode 100644 index 00000000..526e1e5d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/__init__.py @@ -0,0 +1,37 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import datatypes + +from . import _feature_management + +from . import nearest_neighbors +from . import pipeline +from . import tree_ensemble +from . import feature_vectorizer + +from . import _interface_management + +from .model import MLModel +from .model import ( + _MLMODEL_FULL_PRECISION, + _MLMODEL_HALF_PRECISION, + _MLMODEL_QUANTIZED, + _VALID_MLMODEL_PRECISION_TYPES, + _SUPPORTED_QUANTIZATION_MODES, + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + _QUANTIZATION_MODE_LINEAR_SYMMETRIC, + _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR, + _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE, + _QUANTIZATION_MODE_DEQUANTIZE, + _LUT_BASED_QUANTIZATION, + _QUANTIZATION_MODE_DEQUANTIZE, + _METADATA_VERSION, + _METADATA_SOURCE, +) + +from . import neural_network +from . import ml_program diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/_deprecation.py b/__packaged__/coreml/.python_dependencies/coremltools/models/_deprecation.py new file mode 100644 index 00000000..5eb9d43d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/_deprecation.py @@ -0,0 +1,37 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import functools +import warnings + + +def deprecated(obj=None, suffix=""): + """ + Decorator to mark a function or a class as deprecated + """ + + def decorator_deprecation_warning(obj): + @functools.wraps(obj) + def wrapped(*args, **kwargs): + if isinstance(obj, type): + msg = ( + 'Class "%s" is deprecated and will be removed in 6.0.' + % obj.__name__ + ) + else: + msg = ( + 'Function "%s" is deprecated and will be removed in 6.0.' + % obj.__name__ + ) + if suffix: + msg += "; %s" % suffix + warnings.warn(msg, category=FutureWarning) + return obj(*args, **kwargs) + + return wrapped + + if obj is None: + return decorator_deprecation_warning + + return decorator_deprecation_warning(obj) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/_feature_management.py b/__packaged__/coreml/.python_dependencies/coremltools/models/_feature_management.py new file mode 100644 index 00000000..9e8a7c89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/_feature_management.py @@ -0,0 +1,354 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import operator as op +from collections import defaultdict +from copy import copy +from functools import reduce + +import numpy as _np + +from . import datatypes + + +def process_or_validate_classifier_output_features( + output_features, class_labels, supports_class_scores=True +): + """ + Given a list of class labels and a list of output_features, validate the + list and return a valid version of output_features with all the correct + data type information included. + """ + + def raise_error(msg): + + raise ValueError("Classifier error: %s" % msg) + + class_labels = list(class_labels) + + # First, we need to determine the type of the classes. + _int_types = (int, bool, _np.bool_, _np.int32, _np.int64) + + if all(isinstance(cl, _int_types) for cl in class_labels): + output_class_type = datatypes.Int64() + + elif all(isinstance(cl, str) for cl in class_labels): + output_class_type = datatypes.String() + + else: + raise ValueError("Class labels must be all of type int or all of type string.") + + if output_features is None: + + out = [("classLabel", output_class_type)] + + if supports_class_scores: + out += [("classProbability", datatypes.Dictionary(output_class_type))] + + elif isinstance(output_features, str): + + out = [(output_features, output_class_type)] + + if supports_class_scores: + out += [("classProbability", datatypes.Dictionary(output_class_type))] + + elif ( + isinstance(output_features, (list, tuple)) + and all(isinstance(fn, str) for fn in output_features) + and len(output_features) == 2 + ): + + if supports_class_scores: + out = [ + (output_features[0], output_class_type), + (output_features[1], datatypes.Dictionary(output_class_type)), + ] + else: + raise ValueError( + "Classifier model (as trained) does not support output scores for classes." + ) + + elif is_valid_feature_list(output_features): + + output_features = [ + (k, datatypes._normalize_datatype(dt)) for k, dt in output_features + ] + + if len(output_features) == 1 or not supports_class_scores: + if not output_features[0][1] == output_class_type: + raise ValueError( + "Type of output class feature does not match type of class labels." + ) + + else: + # Make sure the first two output features specified give the output + # class field and the output class scores dictionary field + if isinstance(output_features[0][1], datatypes.Dictionary) and isinstance( + output_features[1][1], output_class_type + ): + output_features[0], output_features[1] = ( + output_features[1], + output_features[0], + ) + + if not isinstance(output_features[1][1], datatypes.Dictionary): + raise_error("Output features class scores should be dictionary type.") + + if output_features[1][1].key_type != output_class_type: + raise_error( + "Class scores dictionary key type does not match type of class labels." + ) + + if output_features[0][1] != output_class_type: + raise_error( + "Specified type of output class does not match type of class labels." + ) + + # NOTE: We are intentionally allowing the case where additional fields are allowed + # beyond the original two features. + + out = output_features + + else: + raise_error("Form of output features not recognized") + + return out + + +def is_valid_feature_list(features): + # Just test all the ways this could be + return ( + type(features) is list + and len(features) >= 1 + and all(type(t) is tuple and len(t) == 2 for t in features) + and all(isinstance(n, str) for n, td in features) + and all(datatypes._is_valid_datatype(td) for n, td in features) + ) + + +def dimension_of_array_features(features): + if not is_valid_feature_list(features): + raise ValueError("Expected feature list in valid form.") + + dim = 0 + for n, td in features: + if isinstance(td, (datatypes.Int64, datatypes.Double)): + dim += 1 + elif isinstance(td, datatypes.Array): + dim += reduce(op.mul, td.dimensions, 1) + else: + raise ValueError( + "Unable to determine number of dimensions from feature list." + ) + + return dim + + +def process_or_validate_features(features, num_dimensions=None, feature_type_map={}): + """ + Puts features into a standard form from a number of different possible forms. + + The standard form is a list of 2-tuples of (name, datatype) pairs. The name + is a string and the datatype is an object as defined in the _datatype module. + + The possible input forms are as follows: + + * A list of strings. in this case, the overall dimension is assumed to be + the length of the list. If neighboring names are identical, they are + assumed to be an input array of that length. For example: + + ["a", "b", "c"] + + resolves to + + [("a", Double), ("b", Double), ("c", Double)]. + + And: + + ["a", "a", "b"] + + resolves to + + [("a", Array(2)), ("b", Double)]. + + * A dictionary of keys to indices or ranges of feature indices. + + In this case, it's presented as a mapping from keys to indices or + ranges of contiguous indices. For example, + + {"a" : 0, "b" : [2,3], "c" : 1} + + Resolves to + + [("a", Double), ("c", Double), ("b", Array(2))]. + + Note that the ordering is determined by the indices. + + * A single string. In this case, the input is assumed to be a single array, + with the number of dimensions set using num_dimensions. + + + Notes: + + If the features variable is in the standard form, it is simply checked and + returned. + + If num_dimensions is given, it is used to check against the existing features, + or fill in missing information in the case when features is a single string. + """ + + original_features = copy(features) + + if num_dimensions is not None and not isinstance(num_dimensions, int): + raise TypeError( + "num_dimensions must be None, an integer or a long, not '%s'" + % str(type(num_dimensions)) + ) + + def raise_type_error(additional_msg): + raise TypeError( + "Error processing feature list: %s\nfeatures = %s" + % (additional_msg, str(original_features)) + ) + + if type(features) is dict and is_valid_feature_list(features.items()): + features = features.items() + + # First, see if the features are already in the correct form. If they are, + # then we + if is_valid_feature_list(features): + if num_dimensions is not None: + try: + feature_dims = dimension_of_array_features(features) + except ValueError: + feature_dims = None + + if feature_dims is not None and feature_dims != num_dimensions: + raise_type_error("Dimension mismatch.") + + # We may need to translate some parts of this back to the actual + # datatype class -- e.g. translate str to datatypes.String(). + return [(k, datatypes._normalize_datatype(dt)) for k, dt in features] + + if isinstance(features, str): + if num_dimensions is None: + raise_type_error( + "If a single feature name is given, then " + "num_dimensions must be provided." + ) + features = {features: range(num_dimensions)} + + if isinstance(features, (list, tuple, _np.ndarray)): + # Change this into a dictionary + + mapping = defaultdict(lambda: []) + + for i, k in enumerate(features): + if not isinstance(k, str): + raise_type_error( + "List of feature names must either be a list of strings, or a list of (name, datatypes.Array instance) tuples." + ) + + if num_dimensions is not None and len(features) != num_dimensions: + raise_type_error( + ("List of feature names has wrong length; " "%d required, %d provided.") + % (num_dimensions, len(features)) + ) + + for i, k in enumerate(features): + mapping[k].append(i) + + # Replace the features + features = mapping + + if not isinstance(features, dict): + raise_type_error( + "features must be either a list of feature names " + "or a dictionary of feature names to ranges." + ) + + # We'll be invasive here so make a copy. + features = copy(features) + + for k, v in list(features.items()): + + if not isinstance(k, str): + raise_type_error("Feature names must be strings.") + + def test_index(val): + error = False + try: + if val != int(val): + error = True + except: + error = True + + if error: + raise_type_error( + "Specified indices for feature %s must be integers." % k + ) + + if val < 0 or (num_dimensions is not None and val >= num_dimensions): + raise_type_error("Index in feature %s out of range." % k) + + iterable_types = [tuple, list, set] + iterable_types.append(range) + if isinstance(v, tuple(iterable_types)): + for idx in v: + test_index(idx) + + # Replace and update + features[k] = v = list(sorted(v)) + + elif isinstance(v, int): + test_index(v) + features[k] = v = [v] + else: + raise_type_error( + ( + "Value type for feature %s not recognized; " + "values must be either integers, lists or range objects." + ) + % k + ) + + # check to make sure things are contiguous + if v != list(range(v[0], v[-1] + 1)): + raise_type_error( + "Index list for feature %s must consist of " + "a contiguous range of indices." % k + ) + + if len(set(v)) != len(v): + raise_type_error("Index list for feature %s contains duplicates." % k) + + # Now, set num dimensions from the list if it's actually None + if num_dimensions is None: + from itertools import chain + num_dimensions = 1 + max(chain.from_iterable(features.values())) + + if ( + set().union(*features.values()) != set(range(num_dimensions)) + or sum(len(v) for v in features.values()) != num_dimensions + ): + raise_type_error( + "Supplied indices must cover entire range of 0, ..., num_dimensions-1." + ) + + # Define the output feature types + output_features = [None] * len(features) + + # Finally, go through and map all these things out as types. + # Sort by first value of the index range. + for i, (k, v) in enumerate(sorted(features.items(), key=lambda t: t[1][0])): + if k in feature_type_map: + output_features[i] = (k, feature_type_map[k]) + + elif len(v) == 1: + output_features[i] = (k, datatypes.Double()) + else: + output_features[i] = (k, datatypes.Array(len(v))) + + return output_features diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/_interface_management.py b/__packaged__/coreml/.python_dependencies/coremltools/models/_interface_management.py new file mode 100644 index 00000000..e22ab742 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/_interface_management.py @@ -0,0 +1,211 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..proto import Model_pb2 +from . import _feature_management as _fm +from . import datatypes + + +def set_classifier_interface_params( + spec, + features, + class_labels, + model_accessor_for_class_labels, + output_features=None, + training_features=None, +): + """ + Common utilities to set the regression interface params. + """ + # Normalize the features list. + features = _fm.process_or_validate_features(features) + + if class_labels is None: + raise ValueError("List of class labels must be provided.") + + n_classes = len(class_labels) + + output_features = _fm.process_or_validate_classifier_output_features( + output_features, class_labels + ) + + if len(output_features) == 1: + predicted_class_output, pred_cl_type = output_features[0] + score_output = None + elif len(output_features) == 2: + predicted_class_output, pred_cl_type = output_features[0] + score_output, score_output_type = output_features[1] + else: + raise ValueError( + "Provided output classes for a classifier must be " + "a list of features, predicted class and (optionally) class_score." + ) + + spec.description.predictedFeatureName = predicted_class_output + + # Are they out of order? + if not (pred_cl_type == datatypes.Int64() or pred_cl_type == datatypes.String()): + raise ValueError( + "Provided predicted class output type not Int64 or String (%s)." + % repr(pred_cl_type) + ) + + if score_output is not None: + if not isinstance(score_output_type, datatypes.Dictionary): + raise ValueError( + "Provided class score output type not a Dictionary (%s)." + % repr(score_output_type) + ) + + if score_output_type.key_type != pred_cl_type: + raise ValueError( + ( + "Provided class score output (%s) key_type (%s) does not " + "match type of class prediction (%s)." + ) + % (score_output, repr(score_output_type.key_type), repr(pred_cl_type)) + ) + + spec.description.predictedProbabilitiesName = score_output + + # add input + for index, (cur_input_name, input_type) in enumerate(features): + input_ = spec.description.input.add() + input_.name = cur_input_name + datatypes._set_datatype(input_.type, input_type) + + # add output + for index, (cur_output_name, output_type) in enumerate(output_features): + output_ = spec.description.output.add() + output_.name = cur_output_name + datatypes._set_datatype(output_.type, output_type) + + # Add training features + if training_features is not None: + spec = set_training_features(spec, training_features) + + # Worry about the class labels + if pred_cl_type == datatypes.String(): + try: + for c in class_labels: + getattr( + spec, model_accessor_for_class_labels + ).stringClassLabels.vector.append(str(c)) + # Not all the classifiers have class labels; in particular the pipeline + # classifier. Thus it's not an error if we can't actually set them. + except AttributeError: + pass + + else: + for c in class_labels: + conv_error = False + try: + if not (int(c) == c): + conv_error = True + except: + conv_error = True + + if conv_error: + raise TypeError( + ("Cannot cast '%s' class to an int type " % str(c)) + + "(class type determined by type of first class)." + ) + + try: + getattr( + spec, model_accessor_for_class_labels + ).int64ClassLabels.vector.append(int(c)) + # Not all the classifiers have class labels; in particular the pipeline + # classifier. Thus it's not an error if we can't actually set them. + except AttributeError: + break + + # And we are done! + return spec + + +def set_regressor_interface_params( + spec, features, output_features, training_features=None +): + """ + Common utilities to set the regressor interface params. + """ + if output_features is None: + output_features = [("predicted_class", datatypes.Double())] + else: + output_features = _fm.process_or_validate_features(output_features, 1) + + if len(output_features) != 1: + raise ValueError( + "Provided output features for a regressor must be " "one Double feature." + ) + + if output_features[0][1] != datatypes.Double(): + raise ValueError("Output type of a regressor must be a Double.") + + prediction_name = output_features[0][0] + spec.description.predictedFeatureName = prediction_name + + # Normalize the features list. + features = _fm.process_or_validate_features(features) + + # add input and output features + for cur_input_name, feature_type in features: + input_ = spec.description.input.add() + input_.name = cur_input_name + datatypes._set_datatype(input_.type, feature_type) + + # Add training features + if training_features is not None: + spec = set_training_features(spec, training_features) + + output_ = spec.description.output.add() + output_.name = prediction_name + datatypes._set_datatype(output_.type, "Double") + return spec + + +def set_transform_interface_params( + spec, + input_features, + output_features, + are_optional=False, + training_features=None, + array_datatype=Model_pb2.ArrayFeatureType.DOUBLE, +): + """ + Common utilities to set transform interface params. + """ + input_features = _fm.process_or_validate_features(input_features) + output_features = _fm.process_or_validate_features(output_features) + + # Add input and output features + for (fname, ftype) in input_features: + input_ = spec.description.input.add() + input_.name = fname + datatypes._set_datatype(input_.type, ftype, array_datatype=array_datatype) + if are_optional: + input_.type.isOptional = are_optional + + for (fname, ftype) in output_features: + output_ = spec.description.output.add() + output_.name = fname + datatypes._set_datatype(output_.type, ftype, array_datatype=array_datatype) + + # Add training features + if training_features is not None: + spec = set_training_features(spec, training_features) + + return spec + + +def set_training_features(spec, training_features): + for (fname, ftype) in training_features: + training_input_ = spec.description.trainingInput.add() + training_input_.name = fname + if ftype: + datatypes._set_datatype(training_input_.type, ftype) + + return spec diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/array_feature_extractor.py b/__packaged__/coreml/.python_dependencies/coremltools/models/array_feature_extractor.py new file mode 100644 index 00000000..9363b6cb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/array_feature_extractor.py @@ -0,0 +1,60 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .. import SPECIFICATION_VERSION +from ..proto import Model_pb2 as _Model_pb2 +from . import datatypes +from ._interface_management import set_transform_interface_params + + +def create_array_feature_extractor( + input_features, output_name, extract_indices, output_type=None +): + """ + Creates a feature extractor from an input array ``(feature, return)``. + + Parameters + ---------- + input_features: + A list of one ``(name, array)`` tuple. + + extract_indices: + Either an integer or a list. + If it's an integer, the output type is by default a double (but may also be an integer). + If a list, the output type is an array. + """ + + # Make sure that our starting stuff is in the proper form. + assert len(input_features) == 1 + assert isinstance(input_features[0][1], datatypes.Array) + + # Create the model. + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + + if isinstance(extract_indices, int): + extract_indices = [extract_indices] + if output_type is None: + output_type = datatypes.Double() + + elif isinstance(extract_indices, (list, tuple)): + if not all(isinstance(x, int) for x in extract_indices): + raise TypeError("extract_indices must be an integer or a list of integers.") + + if output_type is None: + output_type = datatypes.Array(len(extract_indices)) + + else: + raise TypeError("extract_indices must be an integer or a list of integers.") + + output_features = [(output_name, output_type)] + + for idx in extract_indices: + assert idx < input_features[0][1].num_elements + spec.arrayFeatureExtractor.extractIndex.append(idx) + + set_transform_interface_params(spec, input_features, output_features) + + return spec diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/datatypes.py b/__packaged__/coreml/.python_dependencies/coremltools/models/datatypes.py new file mode 100644 index 00000000..6656f76a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/datatypes.py @@ -0,0 +1,244 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Basic Data Types. +""" +import numpy as _np + +from ..proto import Model_pb2 + + +class _DatatypeBase: + def __init__(self, type_tag, full_tag, num_elements): + self.type_tag, self.full_tag = type_tag, full_tag + self.num_elements = num_elements + + def __eq__(self, other): + return hasattr(other, "full_tag") and self.full_tag == other.full_tag + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self.full_tag) + + def __repr__(self): + return self.full_tag + + +class Int64(_DatatypeBase): + """ + Int64 Data Type + """ + + def __init__(self): + _DatatypeBase.__init__(self, "Int64", "Int64", 1) + + +class Double(_DatatypeBase): + """ + Double Data Type + """ + + def __init__(self): + _DatatypeBase.__init__(self, "Double", "Double", 1) + + +class String(_DatatypeBase): + """ + String Data Type + """ + + def __init__(self): + _DatatypeBase.__init__(self, "String", "String", 1) + + +class Array(_DatatypeBase): + """ + Array Data Type + """ + + def __init__(self, *dimensions): + """ + Constructs a Array, given its dimensions + + Parameters + ---------- + dimensions: ints | longs + + Examples + -------- + # Create a single dimensions array of length five + >>> arr = coremltools.models.datatypes.Array(5) + + # Create a multi dimension array five by two by ten. + >>> multi_arr = coremltools.models.datatypes.Array(5, 2, 10) + """ + assert len(dimensions) >= 1 + assert all( + isinstance(d, (int, _np.int64, _np.int32)) for d in dimensions + ), "Dimensions must be ints, not {}".format(str(dimensions)) + self.dimensions = dimensions + + num_elements = 1 + for d in self.dimensions: + num_elements *= d + + _DatatypeBase.__init__( + self, + "Array", + "Array({%s})" % (",".join("%d" % d for d in self.dimensions)), + num_elements, + ) + + +class Dictionary(_DatatypeBase): + """ + Dictionary Data Type + """ + + def __init__(self, key_type=None): + """ + Constructs a Dictionary, given its key type + + Parameters + ---------- + key_type: Int64 | String + + Examples + -------- + >>> from coremltools.models.datatypes import Dictionary, Int64, String + + # Create a dictionary with string keys + >>> str_key_dict = Dictionary(key_type=String) + + # Create a dictionary with int keys + >>> int_key_dict = Dictionary(Int64) + """ + # Resolve it to a class if it's + global _simple_type_remap + if key_type in _simple_type_remap: + key_type = _simple_type_remap[key_type] + + if not isinstance(key_type, (Int64, String)): + raise TypeError("Key type for dictionary must be either string or integer.") + + self.key_type = key_type + + _DatatypeBase.__init__( + self, "Dictionary", "Dictionary(%s)" % repr(self.key_type), None + ) + + +_simple_type_remap = { + int: Int64(), + str: String(), + float: Double(), + Double: Double(), + Int64: Int64(), + String: String(), + "Double": Double(), + "Int64": Int64(), + "String": String(), +} + + +def _is_valid_datatype(datatype_instance): + """ + Returns true if datatype_instance is a valid datatype object and false otherwise. + """ + + # Remap so we can still use the python types for the simple cases + global _simple_type_remap + if datatype_instance in _simple_type_remap: + return True + + # Now set the protobuf from this interface. + if isinstance(datatype_instance, (Int64, Double, String, Array)): + return True + + elif isinstance(datatype_instance, Dictionary): + kt = datatype_instance.key_type + + if isinstance(kt, (Int64, String)): + return True + + return False + + +def _normalize_datatype(datatype_instance): + """ + Translates a user specified datatype to an instance of the ones defined above. + + Valid data types are passed through, and the following type specifications + are translated to the proper instances: + + str, "String" -> String() + int, "Int64" -> Int64() + float, "Double" -> Double() + + If a data type is not recognized, then an error is raised. + """ + global _simple_type_remap + if datatype_instance in _simple_type_remap: + return _simple_type_remap[datatype_instance] + + # Now set the protobuf from this interface. + if isinstance(datatype_instance, (Int64, Double, String, Array)): + return datatype_instance + + elif isinstance(datatype_instance, Dictionary): + kt = datatype_instance.key_type + + if isinstance(kt, (Int64, String)): + return datatype_instance + + raise ValueError("Datatype instance not recognized.") + + +def _set_datatype( + proto_type_obj, datatype_instance, array_datatype=Model_pb2.ArrayFeatureType.DOUBLE +): + # Remap so we can still use the python types for the simple cases + global _simple_type_remap + if datatype_instance in _simple_type_remap: + datatype_instance = _simple_type_remap[datatype_instance] + + # Now set the protobuf from this interface. + if isinstance(datatype_instance, Int64): + proto_type_obj.int64Type.MergeFromString(b"") + + elif isinstance(datatype_instance, Double): + proto_type_obj.doubleType.MergeFromString(b"") + + elif isinstance(datatype_instance, String): + proto_type_obj.stringType.MergeFromString(b"") + + elif isinstance(datatype_instance, Array): + proto_type_obj.multiArrayType.MergeFromString(b"") + proto_type_obj.multiArrayType.dataType = array_datatype + + for n in datatype_instance.dimensions: + proto_type_obj.multiArrayType.shape.append(n) + + elif isinstance(datatype_instance, Dictionary): + proto_type_obj.dictionaryType.MergeFromString(b"") + + kt = datatype_instance.key_type + + if isinstance(kt, Int64): + proto_type_obj.dictionaryType.int64KeyType.MergeFromString(b"") + elif isinstance(kt, String): + proto_type_obj.dictionaryType.stringKeyType.MergeFromString(b"") + else: + raise ValueError("Dictionary key type must be either string or int.") + + else: + raise TypeError( + "Datatype parameter not recognized; must be an instance " + "of datatypes.{Double, Int64, String, Dictionary, Array}, or " + "python int, float, or str types." + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/feature_vectorizer.py b/__packaged__/coreml/.python_dependencies/coremltools/models/feature_vectorizer.py new file mode 100644 index 00000000..41f34ba6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/feature_vectorizer.py @@ -0,0 +1,98 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .. import SPECIFICATION_VERSION +from ..proto import Model_pb2 as _Model_pb2 +from . import datatypes +from ._feature_management import (is_valid_feature_list, + process_or_validate_features) +from ._interface_management import set_transform_interface_params + + +def create_feature_vectorizer(input_features, output_feature_name, known_size_map={}): + """ + Create a feature vectorizer from input features. This returns a 2-tuple + ``(spec, num_dimension)`` for a feature vectorizer that puts everything into a + single array with a length equal to the total size of all the input features. + + Parameters + ---------- + input_features: [list of 2-tuples] + Name(s) of the input features, given as a list of ``('name', datatype)`` + tuples. The datatypes entry is one of the data types defined in the + ``datatypes`` module. Allowed ``datatypes`` are ``datatype.Int64``, + ``datatype.Double``, ``datatypes.Dictionary``, and ``datatype.Array``. + + If the feature is a dictionary type, then the dictionary must have integer + keys, and the number of dimensions to expand it into must be provided by + ``known_size_map``. + + Feature indices in the final array are counted sequentially from the + from 0 through the total number of features. + + + output_feature_name: str + The name of the output feature. The type is an Array + List of the output features of the network. + + known_size_map: + A dictionary mapping the feature name to the expanded size in the final + array. This is most useful for specifying the size of sparse vectors + given as dictionaries of index to value. + + """ + + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + + input_features = process_or_validate_features(input_features) + + feature_vectorizer = spec.featureVectorizer + + num_output_dimensions = 0 + + for n, ft in input_features: + if n in known_size_map: + dim = known_size_map[n] + + if ft.num_elements is not None: + if dim != ft.num_elements: + raise ValueError( + "In feature {}, override size {} not compatible with inherent " + "value size {}.".format(n, dim, ft.num_elements) + ) + else: + if ft.num_elements is None: + raise ValueError( + "In feature {}, inherent size unknown so must be manually supplied.".format( + n + ) + ) + dim = ft.num_elements + + num_output_dimensions += dim + + new_feature = feature_vectorizer.inputList.add() + new_feature.inputColumn = n + new_feature.inputDimensions = dim + + if not isinstance(output_feature_name, str): + if ( + is_valid_feature_list(output_feature_name) + and len(output_feature_name) == 1 + and output_feature_name[0][1] == datatypes.Array(num_output_dimensions) + ): + + output_feature_name = output_feature_name[0][0] + + else: + raise TypeError( + "Output feature must be specified as a feature name or correct output feature list." + ) + + output_features = [(output_feature_name, datatypes.Array(num_output_dimensions))] + set_transform_interface_params(spec, input_features, output_features) + + return spec, num_output_dimensions diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/__init__.py new file mode 100644 index 00000000..9c0d8b44 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import compression_utils \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/compression_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/compression_utils.py new file mode 100644 index 00000000..ab63fc6f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/compression_utils.py @@ -0,0 +1,609 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +from coremltools import _SPECIFICATION_VERSION_IOS_16 +from coremltools.converters.mil import Operation as _Operation +from coremltools.converters.mil.converter import mil_convert as _mil_convert +from coremltools.converters.mil.frontend.milproto.load import load as _milproto_to_pymil +from coremltools.converters.mil.mil.passes.defs.quantization import ( + AbstractQuantizationPass as _AbstractQuantizationPass, +) +from coremltools.converters.mil.mil.passes.defs.quantization import ( + WeightAffineQuantizer as _WeightAffineQuantizer, +) +from coremltools.converters.mil.mil.passes.defs.quantization import ( + WeightDecompressor as _WeightDecompressor, +) +from coremltools.converters.mil.mil.passes.defs.quantization import ( + WeightPalettizer as _WeightPalettizer, +) +from coremltools.converters.mil.mil.passes.defs.quantization import ( + WeightSparsifier as _WeightSparsifier, +) + +_DEFAULT_MIN_WEIGHT_SIZE_TO_COMPRESS = 2048 +_DEFAULT_SPECIFICATION_VERSION_FOR_COMPRESSION = _SPECIFICATION_VERSION_IOS_16 + + +def _default_op_selector(const_op): + if not isinstance(const_op, _Operation) or const_op.op_type != "const": + raise ValueError("Input of the op_selector must be type of const Operation, got {}.".format(type(const_op))) + return const_op.val.val.size > _DEFAULT_MIN_WEIGHT_SIZE_TO_COMPRESS + +def _apply_graph_pass(mlmodel, graph_pass): + # Utility function which compresses a coreml model + # convert the fully precision mlmodel into pymil program + model_spec = mlmodel.get_spec() + model_type = model_spec.WhichOneof("Type") + if model_type in ("neuralNetwork", "neuralNetworkClassifier", "neuralNetworkRegressor", "pipeline", "PipelineClassifier", "PipelineRegressor"): + msg = ("coremltools.compression_utils are meant to be used only with mlprogram typed coreml models. " + "This model has type {}. Please use coremltools.models.neural_network.quantization_utils.quantize_weights" + "instead to compress the weights of the model.") + raise TypeError(msg.format(model_type)) + elif model_type == "mlProgram": + pass + else: + raise TypeError("weight compression not applicable for model type {}".format(model_type)) + + assert isinstance(graph_pass, _AbstractQuantizationPass), "compression pass must be an AbstractQuantizationPass instance" + specification_version = max(model_spec.specificationVersion, _DEFAULT_SPECIFICATION_VERSION_FOR_COMPRESSION) + prog = _milproto_to_pymil( + model_spec=model_spec, + specification_version=specification_version, + file_weights_dir=mlmodel.weights_dir, + ) + + # apply compression graph pass + graph_pass.apply(prog) + + # convert the pymil program back to mlmodel + compressed_mlmodel = _mil_convert( + prog, + convert_to="mlprogram", + convert_from="milinternal", + specification_version=specification_version, + compute_units=mlmodel.compute_unit, + model_description=model_spec.description, + ) + return compressed_mlmodel + +def affine_quantize_weights(mlmodel, mode="linear_symmetric", op_selector=None, dtype=_np.int8): + """ + Utility function to convert a float precision MLModel of type ``mlprogram`` that uses + float-precision weights into a compressed MLModel that uses 8-bit weights. This is + achieved by converting the float weight values that are stored in the ``const`` op + into the ``constexpr_affine_dequantize`` op. + + This function uses affine quantization on the float weights, providing up to 2x + savings in storage compared to float 16, or up to 4x savings compared to float 32. + All computation at runtime uses float precision; the precision of the intermediate + tensors and the compute precision of the ops are not altered. + + For each weight, this utility function converts the weight into the int8 or uint8 type using + either `Linear interpolation` (``"linear"`` mode) or `Linear symmetric + interpolation` (``"linear_symmetric"`` mode, the default). + + **Linear interpolation** + + Linear interpolation (``"linear"`` mode) maps the min/max of the float + range to the 8-bit integer range ``[low, high]`` using a zero point (also called quantization bias, or + offset) and a scale factor. For the int8 quantization, ``[low, high] = [-128, 127]``, while uint8 + quantization uses range ``[0, 255]``. + + ``"linear"`` mode uses the quantization formula: + + .. math:: + w_r = s * (w_q - z) + + Where: + + * :math:`w_r` and :math:`s` are of type float. + * :math:`w_r`` represents the float precision weight. + * :math:`s` represents the scale. + * :math:`w_q` and :math:`z` are of type 8-bit integer. + * :math:`w_q` represents quantized weight. + * :math:`z` represents the zero point. + + Quantized weights are computed as follows: + + .. math:: + w_q = cast\_to\_8\_bit\_integer(w_r / s + cast\_to\_float(z)) + + Note: :math:`cast\_to\_8\_bit\_integer` is the process of clipping the input to range ``[low, high]`` followed by rounding and casting to 8-bit integer. + + In ``"linear"`` mode, ``s, z`` are computed by mapping the original float range + ``[A, B]`` into the 8-bit integer range ``[-128, 127]`` or ``[0, 255]``. That is, you are solving the + following linear equations: + + * ``B = s * (high - z)`` + * ``A = s * (low - z)`` + + The equations result in the following: + + * ``s = (B - A) / (high - low)`` + * ``z = cast_to_8_bit_integer((low * B - high * A) / (B - A))`` + + When the rank of weight ``w`` is 1, then ``s`` and ``z`` are both scalars. When the + rank of the weight is greater than 1, then ``s`` and ``z`` are both vectors. In that + case, scales are computed per `channel`, in which `channel` is the output dimension, + which corresponds to the first dimension for ops such as ``conv`` and ``linear``, and + the second dimension for the ``conv_transpose`` op. + + For ``"linear"`` mode, :math:`A = min(w_r)`, :math:`B = max(w_r)`. + + **Linear symmetric interpolation** + + With linear symmetric interpolation (``"linear_symmetric"`` mode, the default), rather than + mapping the exact min/max of the float range to the quantized range, + + the function chooses the maximum absolute value between the min/max, which results in a + floating-point range that is symmetric with respect to zero. This also makes the resulting zero + point ``0`` for int8 weight and ``127`` for uint8 weight. + + For ``"linear_symmetric"`` mode: + + * :math:`A = -R` and :math:`B = R`, where :math:`R = max(abs(w_r))`. + * This function maps to the range of ``[-127, 127]`` for int8 weight and ``[0, 254]`` for uint8 weight. + * The result is ``s=(B-A)/254`` -> ``s=2R/254`` -> ``s=R/127``. + * Solving for ``z``: + * int8: ``z = (-127 * R + 127 * R)/2R`` -> ``z=0``. + * uint8: ``z = (0 * R + 254 * R)/2R`` -> ``z=127``. + + Parameters + ---------- + mlmodel: MLModel + Model to be quantized. This MLModel should be of type ``mlprogram``. + + mode: str + Mode for linear quantization: + + * ``"linear_symmetric"`` (default): Input data are quantized in the range + ``[-R, R]``, where :math:`R = max(abs(w_r))`. + * ``"linear"``: Input data are quantized in the range + :math:`[min(w_r), max(w_r)]`. + + op_selector: callable + This function takes a single parameter with type ``coremltools.converters.mil.Const``; + that is, a ``const`` operation. It returns a ``bool``: ``True`` to compress ``const_op``, + otherwise ``False``. See the following examples: + + * All constants in the network are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return True + + * Only the constant with ``tensor.size > 2048`` is compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + * Compress the constant if it is the weight of a convolution layer + and ``tensor.size > 2048``: + + .. sourcecode:: python + + def op_selector(const_op): + return ( + const_op.val.val.size > 2048 + and const_op.val.child_ops[0].op_type == "conv" + and const_op.val == const_op.val.child_ops[0].weight + ) + + * When creating a custom ``op_selector`` function, the following attributes are helpful: + + * ``const_op.val.val``: The numpy array holding the value of the const. + * ``const_op.val.child_ops``: A list of ops into which this constant is feeding. + * ``const_op.val.child_ops[i].op_type``: The string corresponding to the op type + of the i-th child op. + * ``const_op.val.child_ops[i].name``: The string corresponding to the name the + i-th child op. + + * If ``op_selector`` is not provided, it will be set to the behavior in which + weights bigger than 2048 elements are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + dtype: np.generic or mil.type type + Determines the quantizaed data type (int8/uint8). + + * The allowed values are: + * ``np.int8`` (the default) + * ``np.uint8`` + * ``coremltools.converters.mil.mil.types.int8`` + * ``coremltools.converters.mil.mil.types.uint8`` + + Returns + ------- + + model: MLModel + The quantized MLModel instance. + + Examples + -------- + + import coremltools as ct + model = ct.models.MLModel('my_model.mlpackage') + compressed_model = ct.compression_utils.affine_quantize_weights(model, mode="linear_symmetric") + + """ + if op_selector is None: + op_selector = _default_op_selector + affine_weight_quantizer = _WeightAffineQuantizer(fake_compression=False, mode=mode, op_selector=op_selector, dtype=dtype) + return _apply_graph_pass(mlmodel, affine_weight_quantizer) + + +def palettize_weights(mlmodel, nbits=None, mode="kmeans", op_selector=None, lut_function=None): + """ + Utility function to convert a float precision MLModel of type ``mlprogram`` to a + compressed MLModel by reducing the overall number of weights using a lookup table + (LUT). A LUT contains a list of float values. An `nbit` LUT has 2\ :sup:`nbits` entries. + + For example, a float weight vector such as ``{0.3, 0.3, 0.5, 0.5}`` can be compressed + using a 1-bit LUT: ``{0.3, 0.5}``. In this case the float vector can be replaced + with a 1-bit vector ``{0, 0, 1, 1}``. + + This function iterates over all the weights in the ``mlprogram``, discretizes its values, + and constructs the LUT according to the algorithm specified in ``mode``. The float + values are then converted to the `nbit` values, and the LUT is saved alongside each + weight. The ``const`` ops storing weight values are replaced by + ``constexpr_lut_to_dense`` ops. + + At runtime, the LUT and the `nbit` values are used to reconstruct the float weight + values, which are then used to perform the float operaton the weight is feeding into. + + Consider the following example of ``"uniform"`` mode (a linear histogram): + + * ``nbits = 4`` + * ``mode = "uniform"`` + * ``weight = [0.11, 0.19, 0.3, 0.08, 0.0, 0.02]`` + + The weight can be converted to a palette with indices ``[0, 1, 2, 3]`` (2 bits). The + indices are a byte array. + + The data range ``[0.0, 0.3]`` is divided into 4 partitions linearly, which is + ``[0.0, 0.1, 0.2, 0.3]``. + + * The LUT would be ``[0.0, 0.1, 0.2, 0.3]``. + + * The weight is rounded to ``[0.1, 0.2, 0.3, 0.1, 0.0, 0.0]``, and represented in + the palette as indices ``[01b, 10b, 11b, 01b, 00b, 00b]``. + + Parameters + ---------- + mlmodel: MLModel + Model to be converted by a LUT. This MLModel should be of type ``mlprogram``. + + nbits: int + Number of bits per weight. Required for ``kmeans`` or ``uniform`` mode, but must + not be set for ``unique`` or ``custom`` mode. A LUT would have + 2\ :sup:`nbits` entries, where `nbits` can be ``{1, 2, 4, 6, 8}``. + + mode: str + Determine how the LUT is constructed by specifying one of the following: + + * ``"kmeans"`` (default): The LUT is generated by `k-means clustering`, a method of vector + quantization that groups similar data points together to discover underlying + patterns by using a fixed number (`k`) of clusters in a dataset. A cluster + refers to a collection of data points aggregated together because of certain + similarities. `nbits` is required. + + * ``"uniform"``: The LUT is generated by a linear histogram. + + - ``[v_min, v_min + scale, v_min + 2 * scale, ..., v_max]`` + - Where the weight is in the range ``[v_min, v_max]``, and + ``scale = (v_max - v_min) / (1 << nbits - 1)``. + - ``nbits`` is required. + + A `histogram` is a representation of the distribution of a continuous variable, + in which the entire range of values is divided into a series of intervals (or + `bins`) and the representation displays how many values fall into each bin. + Linear histograms have one bin at even intervals, such as one bin per integer. + + * ``"unique"``: The LUT is generated by unique values in the weights. The weights + are assumed to be on a discrete lattice but stored in a float data type. This + parameter identifies the weights and converts them into the palettized representation. + + Do not provide ``nbits`` for this mode. ``nbits`` is picked up automatically, + with the smallest possible value in ``{1, 2, 4, 6, 8}`` such that the + number of the unique values is ``<= (1 << nbits)``. If the weight has ``> 256`` + unique values, the compression is skipped. + + For example: + + * If the weights are ``{0.1, 0.2, 0.3, 0.4}`` and ``nbits=2``, the weights are + converted to ``{00b, 01b, 10b, 11b}``, and the generated LUT is + ``[0.1, 0.2, 0.3, 0.4]``. + * If the weights are ``{0.1, 0.2, 0.3, 0.4}`` and ``nbits=1``, nothing happens + because the weights are not a 1-bit lattice. + * If the weights are ``{0.1, 0.2, 0.3, 0.4, 0.5}`` and ``nbits=2``, nothing + happens because the weights are not a 2-bit lattice. + + * ``"custom"``: The LUT and palettization parameters are calculated using a custom + function. If this mode is selected then ``lut_function`` must be provided. + + Do not provide ``nbits`` for this mode. The user should customize ``nbits`` in the + ``lut_function`` implementation. + + op_selector: callable + This function takes a single parameter with type ``coremltools.converters.mil.Operation``. + It returns a ``bool``: ``True`` to compress ``const_op``, otherwise ``False``. + See the following examples: + + * All constants in the network are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return True + + * Only the constant with ``tensor.size > 2048`` is compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + * Compress the constant if it is the weight of a convolution layer + and ``tensor.size > 2048``: + + .. sourcecode:: python + + def op_selector(const_op): + return ( + const_op.val.val.size > 2048 + and const_op.val.child_ops[0].op_type == "conv" + and const_op.val == const_op.val.child_ops[0].weight + ) + + * When creating a custom ``op_selector`` function, the following attributes are helpful: + + * ``const_op.val.val``: The numpy array holding the value of the const. + * ``const_op.val.child_ops``: A list of ops into which this constant is feeding. + * ``const_op.val.child_ops[i].op_type``: The string corresponding to the op type + of the i-th child op. + * ``const_op.val.child_ops[i].name``: The string corresponding to the name the + i-th child op. + + * If ``op_selector`` is not provided, it will be set to the behavior in which + weights bigger than 2048 elements are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + lut_function: callable + A callable function which computes the weight palettization parameters. This must + be provided if the mode is set to ``"custom"``. + + weight: np.ndarray + A float precision numpy array. + + Returns: lut: list[float] + The lookup table. + + indices: list[int] + A list of indices for each element. + + The following is an example that extract the ``top_k`` elements as the LUT. Given + that ``weight = [0.1, 0.5, 0.3, 0.3, 0.5, 0.6, 0.7]``, the ``lut_function`` + produces ``lut = [0, 0.5, 0.6, 0.7], indices = [0, 1, 0, 0, 2, 3]``. + + .. sourcecode:: python + + def lut_function(weight): + # In this example, we assume elements in the weights >= 0 + weight = weight.flatten() + nbits = 4 + + # Get the LUT, from extracting top k maximum unique elements in the weight to be the LUT + # Note that k = 1 << nbits - 1, so we have the first element be 0 + unique_elements = np.unique(weight) + k = (1 << nbits) - 1 + top_k = np.partition(weight, -k)[-k:] + np.sort(top_k) + lut = [0.0] + top_k.tolist() + + # Compute the indices + mapping = {v: idx for idx, v in enumerate(lut)} + indices = [mapping[v] if v in mapping else 0 for v in weight] + + return lut, indices + + Returns + ------- + model: MLModel + The palettized MLModel instance. + + Examples + -------- + + .. sourcecode:: python + + import coremltools as ct + + model = ct.models.MLModel("my_model.mlpackage") + compressed_model = ct.compression_utils.palettize_weights(model, mode="kmeans", nbits=4) + + + """ + if op_selector is None: + op_selector = _default_op_selector + weight_palettizer = _WeightPalettizer(nbits=nbits, fake_compression=False, op_selector=op_selector, mode=mode, lut_function=lut_function) + return _apply_graph_pass(mlmodel, weight_palettizer) + + +def sparsify_weights(mlmodel, mode="threshold_based", threshold=1e-3, target_percentile=1.0, op_selector=None): + """ + Utility function to convert a float precision MLModel of type ``mlprogram`` to a + compressed MLModel using sparse representation. The ``const`` ops storing weight + values are replaced by ``constexpr_sparse_to_dense`` ops. + + This function is useful if the model is trained with pruning techniques so that + a lot of weights have zero values. If a large percentage of weight values are zero, + a sparse representation is more efficient than a dense one (the default). + + The sparsified weights are stored in a bit mask. If the weight values are + ``{0, 0, 0, 0, 0, 0, 0, 56.3}``, its sparse representation contains a bit mask with + ones on locations where the value is non-zero: ``00000001b``. This is accompanied by + non-zero data, which is a size-1 vector of value ``{56.3}``. + + For example, given the following: + + * ``weight = [0.3, 0, 0, 0.5, 0, 0]`` + * ``non_zero_data, bit_mask = sparsify(weight)`` + + The indices of the non-zero elements are: + + * ``non_zero_data = [0.3, 0.5]`` + * ``bit_mask = "100100"`` + + Parameters + ---------- + mlmodel: MLModel + Model to be sparsified. This MLModel should be of type ``mlprogram``. + + mode: str + Determine the scheme to sparsify the model by specifying one of the following: + + * ``"threshold_based"`` (default): All the absolute weight values that are smaller + than ``threshold`` are changed to 0, and the tensor is stored in a sparse format. + For example, given the following: + + * ``weight = [0.3, -0.2, -0.01, 0.05]`` + * ``threshold = 0.03`` + + The sparsified weight would be ``[0.3, -0.2, 0, 0.05]``. + + * ``"percentile_based"``: Sparsify the weight with a constant sparsity percentile, + which is ``target_percentile``. Where + ``n = floor(size_of_weight_tensor * target_percentile)``, the ``n`` lowest + absolute weight values are changed to 0. For example, given the following: + + * ``weight = [0.3, -0.2, -0.01, 0.05]`` + * ``target_percentile = 0.75`` + + The sparsified weight would be ``[0.3, 0, 0, 0]``. + + threshold: float + Required when ``mode = "prune_threshold"``. The absolute threshold to sparsify the weight. + + target_percentile: float + Required when ``mode = "percentile_based"``. The percentage of sparsity for + compression, which needs to be in the range [0, 1]. When 0, no sparsification + occurs. For 1, all weights become 0. + + op_selector: callable + This function takes a single parameter with type ``coremltools.converters.mil.Operation``. + It returns a ``bool``: ``True`` to compress ``const_op``, otherwise ``False``. + See the following examples: + + * All constants in the network are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return True + + * Only the constant with ``tensor.size > 2048`` is compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + * Compress the constant if it is the weight of a convolution layer + and ``tensor.size > 2048``: + + .. sourcecode:: python + + def op_selector(const_op): + return ( + const_op.val.val.size > 2048 + and const_op.val.child_ops[0].op_type == "conv" + and const_op.val == const_op.val.child_ops[0].weight + ) + + * When creating a custom ``op_selector`` function, the following attributes are helpful: + + * ``const_op.val.val``: The numpy array holding the value of the const. + * ``const_op.val.child_ops``: A list of ops into which this constant is feeding. + * ``const_op.val.child_ops[i].op_type``: The string corresponding to the op type + of the i-th child op. + * ``const_op.val.child_ops[i].name``: The string corresponding to the name the + i-th child op. + + * If ``op_selector`` is not provided, it will be set to the behavior in which + weights bigger than 2048 elements are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + Returns + ------- + model: MLModel + The sparse MLModel instance. + + Examples + -------- + .. sourcecode:: python + + import coremltools as ct + + model = ct.models.MLModel("my_model.mlpackage") + compressed_model = ct.compression_utils.sparsify_weights( + model, mode="threshold_based", threshold=0.01 + ) + + """ + if op_selector is None: + op_selector = _default_op_selector + weight_sparsifier = _WeightSparsifier(mode=mode, threshold=threshold, target_percentile=target_percentile, op_selector=op_selector) + return _apply_graph_pass(mlmodel, weight_sparsifier) + +def decompress_weights(mlmodel): + """ + Utility function to convert weights that are sparse or palettized or affine quantized, back to the float format. + That is, convert any of the follwing three ops: + + (1) constexpr_affine_dequantize + (2) constexpr_lut_to_dense + (3) constexpr_sparse_to_dense + + to mb.const + + Parameters + ---------- + mlmodel: MLModel + Model which will be decompressed. + + Returns + ------- + model: MLModel + The MLModel with no constexpr ops included. + + Examples + -------- + .. sourcecode:: python + + import coremltools as ct + + model = ct.models.MLModel("my_compressed_model.mlpackage") + decompressed_model = ct.compression_utils.decompress_weights(model) + + """ + weight_decompressor = _WeightDecompressor(op_selector=lambda op: True) + return _apply_graph_pass(mlmodel, weight_decompressor) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/model.py b/__packaged__/coreml/.python_dependencies/coremltools/models/model.py new file mode 100644 index 00000000..ac7f1a11 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/model.py @@ -0,0 +1,670 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import atexit as _atexit +import os as _os +import shutil as _shutil +import tempfile as _tempfile +import warnings as _warnings +from copy import deepcopy as _deepcopy + +import numpy as _np +import numpy as _numpy + +from coremltools import ComputeUnit as _ComputeUnit +from coremltools._deps import _HAS_TF_1, _HAS_TF_2, _HAS_TORCH +from coremltools.converters.mil.mil.program import Program as _Program + +from ..proto import FeatureTypes_pb2 as _ft +from ..proto import MIL_pb2 as _MIL_pb2 +from ..proto import Model_pb2 as _Model_pb2 +from .utils import (_MLMODEL_EXTENSION, _MLPACKAGE_AUTHOR_NAME, + _MLPACKAGE_EXTENSION, _WEIGHTS_DIR_NAME, _create_mlpackage, + _has_custom_layer, _is_macos, _macos_version, + load_spec as _load_spec, save_spec as _save_spec, + ) + +if _HAS_TORCH: + import torch as _torch + +if _HAS_TF_1 or _HAS_TF_2: + import tensorflow as _tf + + +try: + from ..libmodelpackage import ModelPackage as _ModelPackage +except: + _ModelPackage = None + +_HAS_PIL = True +try: + from PIL import Image as _PIL_IMAGE +except: + _HAS_PIL = False + + +_MLMODEL_FULL_PRECISION = "float32" +_MLMODEL_HALF_PRECISION = "float16" +_MLMODEL_QUANTIZED = "quantized_model" + +_VALID_MLMODEL_PRECISION_TYPES = [ + _MLMODEL_FULL_PRECISION, + _MLMODEL_HALF_PRECISION, + _MLMODEL_QUANTIZED, +] + +# Linear quantization +_QUANTIZATION_MODE_LINEAR_QUANTIZATION = "_linear_quantization" +# Linear quantization represented as a lookup table +_QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR = "_lookup_table_quantization_linear" +# Lookup table quantization generated by K-Means +_QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS = "_lookup_table_quantization_kmeans" +# Custom lookup table quantization +_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE = "_lookup_table_quantization_custom" +# Dequantization +_QUANTIZATION_MODE_DEQUANTIZE = "_dequantize_network" # used for testing +# Symmetric linear quantization +_QUANTIZATION_MODE_LINEAR_SYMMETRIC = "_linear_quantization_symmetric" + +_SUPPORTED_QUANTIZATION_MODES = [ + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR, + _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE, + _QUANTIZATION_MODE_DEQUANTIZE, + _QUANTIZATION_MODE_LINEAR_SYMMETRIC, +] + +_LUT_BASED_QUANTIZATION = [ + _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR, + _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE, +] + +_METADATA_VERSION = "com.github.apple.coremltools.version" +_METADATA_SOURCE = "com.github.apple.coremltools.source" + + + +class _FeatureDescription: + def __init__(self, fd_spec): + self._fd_spec = fd_spec + + def __repr__(self): + return "Features(%s)" % ",".join(map(lambda x: x.name, self._fd_spec)) + + def __len__(self): + return len(self._fd_spec) + + def __getitem__(self, key): + for f in self._fd_spec: + if key == f.name: + return f.shortDescription + raise KeyError("No feature with name %s." % key) + + def __contains__(self, key): + for f in self._fd_spec: + if key == f.name: + return True + return False + + def __setitem__(self, key, value): + for f in self._fd_spec: + if key == f.name: + f.shortDescription = value + return + raise AttributeError("No feature with name %s." % key) + + def __iter__(self): + for f in self._fd_spec: + yield f.name + + +def _get_proxy_and_spec(filename, compute_units, skip_model_load=False): + try: + from ..libcoremlpython import _MLModelProxy + except Exception: + _MLModelProxy = None + + filename = _os.path.expanduser(filename) + specification = _load_spec(filename) + + if _MLModelProxy and not skip_model_load: + + # check if the version is supported + engine_version = _MLModelProxy.maximum_supported_specification_version() + if specification.specificationVersion > engine_version: + # in this case the specification is a newer kind of .mlmodel than this + # version of the engine can support so we'll not try to have a proxy object + return None, specification, None + + try: + return _MLModelProxy(filename, compute_units.name), specification, None + except RuntimeError as e: + _warnings.warn( + "You will not be able to run predict() on this Core ML model." + + " Underlying exception message was: " + + str(e), + RuntimeWarning, + ) + return None, specification, e + + return None, specification, None + + +def _try_get_weights_dir_path(mlpackage_path): + """ + Try to find the weights in mlpackage and return the path to the weights directory if found. + Return None if not found. + :param mlpackage_path: str, path to the mlpackage directory + :return: path to the weights directory inside the mlpackage directory + """ + weights_dir = None + try: + if _ModelPackage.isValid(mlpackage_path): + item_info = _ModelPackage(mlpackage_path).findItemByNameAuthor(_WEIGHTS_DIR_NAME, _MLPACKAGE_AUTHOR_NAME) + if item_info is not None: + weights_dir = item_info.path() + except: + pass + return weights_dir + + +class MLModel: + """ + This class defines the minimal interface to a CoreML object in Python. + + At a high level, the protobuf specification consists of: + + - Model description: Encodes names and type information of the inputs and outputs to the model. + - Model parameters: The set of parameters required to represent a specific instance of the model. + - Metadata: Information about the origin, license, and author of the model. + + With this class, you can inspect a CoreML model, modify metadata, and make + predictions for the purposes of testing (on select platforms). + + Examples + -------- + .. sourcecode:: python + + # Load the model + model = MLModel("HousePricer.mlmodel") + + # Set the model metadata + model.author = "Author" + model.license = "BSD" + model.short_description = "Predicts the price of a house in the Seattle area." + + # Get the interface to the model + model.input_description + model.output_description + + # Set feature descriptions manually + model.input_description["bedroom"] = "Number of bedrooms" + model.input_description["bathrooms"] = "Number of bathrooms" + model.input_description["size"] = "Size (in square feet)" + + # Set + model.output_description["price"] = "Price of the house" + + # Make predictions + predictions = model.predict({"bedroom": 1.0, "bath": 1.0, "size": 1240}) + + # Get the spec of the model + spec = model.get_spec() + + # Save the model + model.save("HousePricer.mlpackage") + + # Load the model from the spec object + spec = model.get_spec() + # modify spec (e.g. rename inputs/ouputs etc) + model = MLModel(spec) + # if model type is mlprogram, i.e. spec.WhichOneof('Type') == "mlProgram", then: + model = MLModel(spec, weights_dir=model.weights_dir) + + See Also + -------- + predict + """ + + def __init__( + self, + model, + is_temp_package=False, + mil_program=None, + skip_model_load=False, + compute_units=_ComputeUnit.ALL, + weights_dir=None, + ): + """ + Construct an MLModel from an ``.mlmodel``. + + Parameters + ---------- + model: str or Model_pb2 + + For MLProgram, the model can be a path string (``.mlpackage``) or ``Model_pb2``. + If its a path string, it must point to a directory containing bundle + artifacts (such as ``weights.bin``). + If it is of type ``Model_pb2`` (spec), then ``weights_dir`` must also be provided, if the model + has weights, since to initialize and load the model, both the proto spec and the weights are + required. Proto spec for an MLProgram, unlike the NeuralNetwork, does not contain the weights, + they are stored separately. If the model does not have weights, an empty weights_dir can be provided. + + For non mlprogram model types, the model can be a path string (``.mlmodel``) or type ``Model_pb2``, + i.e. a spec object. + + is_temp_package: bool + Set to True if the input model package dir is temporary and can be deleted upon interpreter termination. + + mil_program: coremltools.converters.mil.Program + Set to the MIL program object, if available. + It is available whenever an MLModel object is constructed using + the unified converter API `coremltools.convert() `_. + + skip_model_load: bool + Set to True to prevent coremltools from calling into the Core ML framework + to compile and load the model. In that case, the returned model object cannot + be used to make a prediction. This flag may be used to load a newer model + type on an older Mac, to inspect or load/save the spec. + + Example: Loading an ML Program model type on a macOS 11, since an ML Program can be + compiled and loaded only from macOS12+. + + Defaults to False. + + compute_units: coremltools.ComputeUnit + An enum with three possible values: + - ``coremltools.ComputeUnit.ALL``: Use all compute units available, including the + neural engine. + - ``coremltools.ComputeUnit.CPU_ONLY``: Limit the model to only use the CPU. + - ``coremltools.ComputeUnit.CPU_AND_GPU``: Use both the CPU and GPU, + but not the neural engine. + + weights_dir: str + Path to the weight directory, required when loading an MLModel of type mlprogram, + from a spec object, i.e. when the argument ``model`` is of type ``Model_pb2`` + + Notes + ----- + Internally this maintains the following: + + - ``_MLModelProxy``: A pybind wrapper around + CoreML::Python::Model (see + `coremltools/coremlpython/CoreMLPython.mm `_) + + - ``package_path`` (mlprogram only): Directory containing all artifacts (``.mlmodel``, + weights, and so on). + + - ``weights_dir`` (mlprogram only): Directory containing weights inside the package_path. + + Examples + -------- + loaded_model = MLModel('my_model.mlmodel') + loaded_model = MLModel("my_model.mlpackage") + """ + + def cleanup(package_path): + if _os.path.exists(package_path): + _shutil.rmtree(package_path) + + if not isinstance(compute_units, _ComputeUnit): + raise TypeError('"compute_units" parameter must be of type: coremltools.ComputeUnit') + elif (compute_units == _ComputeUnit.CPU_AND_NE + and _is_macos() + and _macos_version() < (13, 0) + ): + raise ValueError( + 'coremltools.ComputeUnit.CPU_AND_NE is only available on macOS >= 13.0' + ) + self.compute_unit = compute_units + + self.is_package = False + self.is_temp_package = False + self.package_path = None + self._weights_dir = None + if mil_program is not None and not isinstance(mil_program, _Program): + raise ValueError('"mil_program" must be of type "coremltools.converters.mil.Program"') + self._mil_program = mil_program + + if isinstance(model, str): + model = _os.path.abspath(_os.path.expanduser(_os.path.expandvars(model))) + if _os.path.isdir(model): + self.is_package = True + self.package_path = model + self.is_temp_package = is_temp_package + self._weights_dir = _try_get_weights_dir_path(model) + self.__proxy__, self._spec, self._framework_error = _get_proxy_and_spec( + model, compute_units, skip_model_load=skip_model_load, + ) + elif isinstance(model, _Model_pb2.Model): + model_type = model.WhichOneof('Type') + if model_type in ("mlProgram", 'pipelineClassifier', 'pipelineRegressor', 'pipeline'): + if model_type == "mlProgram" and weights_dir is None: + raise Exception('MLModel of type mlProgram cannot be loaded just from the model spec object. ' + 'It also needs the path to the weights file. Please provide that as well, ' + 'using the \'weights_dir\' argument.') + self.is_package = True + self.is_temp_package = True + filename = _create_mlpackage(model, weights_dir) + self.package_path = filename + self._weights_dir = _try_get_weights_dir_path(filename) + else: + filename = _tempfile.mktemp(suffix=_MLMODEL_EXTENSION) + _save_spec(model, filename) + + self.__proxy__, self._spec, self._framework_error = _get_proxy_and_spec( + filename, compute_units, skip_model_load=skip_model_load, + ) + try: + _os.remove(filename) + except OSError: + pass + else: + raise TypeError( + "Expected model to be a .mlmodel file, .mlpackage file or a Model_pb2 object" + ) + + self._input_description = _FeatureDescription(self._spec.description.input) + self._output_description = _FeatureDescription(self._spec.description.output) + + if self.is_package and self.is_temp_package: + _atexit.register(cleanup, self.package_path) + + @property + def short_description(self): + return self._spec.description.metadata.shortDescription + + @short_description.setter + def short_description(self, short_description): + self._spec.description.metadata.shortDescription = short_description + + @property + def input_description(self): + return self._input_description + + @property + def output_description(self): + return self._output_description + + @property + def user_defined_metadata(self): + return self._spec.description.metadata.userDefined + + @property + def author(self): + return self._spec.description.metadata.author + + @author.setter + def author(self, author): + self._spec.description.metadata.author = author + + @property + def license(self): + return self._spec.description.metadata.license + + @license.setter + def license(self, license): + self._spec.description.metadata.license = license + + @property + def version(self): + return self._spec.description.metadata.versionString + + @property + def weights_dir(self): + return self._weights_dir + + @version.setter + def version(self, version_string): + self._spec.description.metadata.versionString = version_string + + def __repr__(self): + return self._spec.description.__repr__() + + def __str__(self): + return self.__repr__() + + def save(self, save_path: str): + """ + Save the model to a ``.mlmodel`` format. For an MIL program, the save_path is + a package directory containing the ``mlmodel`` and weights. + + Parameters + ---------- + save_path: Target file path / bundle directory for the model. + + Examples + -------- + model.save('my_model_file.mlmodel') + loaded_model = MLModel('my_model_file.mlmodel') + """ + save_path = _os.path.expanduser(save_path) + + # Clean up existing file or directory. + if _os.path.exists(save_path): + if _os.path.isdir(save_path): + _shutil.rmtree(save_path) + else: + _os.remove(save_path) + + if self.is_package: + name, ext = _os.path.splitext(save_path) + if not ext: + save_path = "{}{}".format(save_path, _MLPACKAGE_EXTENSION) + elif ext != _MLPACKAGE_EXTENSION: + raise Exception("For an ML Program, extension must be {} (not {})".format(_MLPACKAGE_EXTENSION, ext)) + _shutil.copytree(self.package_path, save_path) + else: + _save_spec(self._spec, save_path) + + def get_spec(self): + """ + Get a deep copy of the protobuf specification of the model. + + Returns + ------- + model: Model_pb2 + Protobuf specification of the model. + + Examples + -------- + spec = model.get_spec() + """ + return _deepcopy(self._spec) + + + def predict(self, data): + """ + Return predictions for the model. + + Parameters + ---------- + data: dict[str, value] + Dictionary of data to make predictions from where the keys are + the names of the input features. + If value is array type, numpy.ndarray, tensorflow.Tensor and torch.Tensor are acceptable. + + Returns + ------- + dict[str, value] + Predictions as a dictionary where each key is the output feature + name. + + Examples + -------- + data = {'bedroom': 1.0, 'bath': 1.0, 'size': 1240} + predictions = model.predict(data) + data = {'array': numpy.array([[1.0, 2.0], [3.0, 4.0]])} + predictions = model.predict(data) + data = {'array': torch.Tensor([[1.0, 2.0], [3.0, 4.0]])} + predictions = model.predict(data) + data = {'array': tensorflow.Tensor([[1.0, 2.0], [3.0, 4.0]])} + predictions = model.predict(data) + """ + if self.is_package and _is_macos() and _macos_version() < (12, 0): + raise Exception( + "predict() for .mlpackage is not supported in macOS version older than 12.0." + ) + + if self.__proxy__: + self._verify_input_dict(data) + self._convert_tensor_to_numpy(data) + # TODO: remove the following call when this is fixed: rdar://92239209 + self._update_float16_multiarray_input_to_float32(data) + return self.__proxy__.predict(data) + else: + if _macos_version() < (10, 13): + raise Exception( + "Model prediction is only supported on macOS version 10.13 or later." + ) + + try: + from ..libcoremlpython import _MLModelProxy + except Exception as e: + print("Exception loading model proxy: %s\n" % e) + _MLModelProxy = None + except: + print("Exception while loading model proxy.\n") + _MLModelProxy = None + + if not _MLModelProxy: + raise Exception("Unable to load CoreML.framework. Cannot make predictions.") + elif ( + _MLModelProxy.maximum_supported_specification_version() + < self._spec.specificationVersion + ): + engineVersion = _MLModelProxy.maximum_supported_specification_version() + raise Exception( + "The specification has version " + + str(self._spec.specificationVersion) + + " but the Core ML framework version installed only supports Core ML model specification version " + + str(engineVersion) + + " or older." + ) + elif _has_custom_layer(self._spec): + raise Exception( + "This model contains a custom neural network layer, so predict is not supported." + ) + else: + if self._framework_error: + raise self._framework_error + else: + raise Exception("Unable to load CoreML.framework. Cannot make predictions.") + + + def _set_build_info_mil_attributes(self, metadata): + if self._spec.WhichOneof('Type') != "mlProgram": + # No MIL attributes to set + return + + ml_program_attributes = self._spec.mlProgram.attributes + build_info_proto = ml_program_attributes["buildInfo"] + + # Set ValueType to dictionary of string to string + str_type = _MIL_pb2.ValueType() + str_type.tensorType.dataType = _MIL_pb2.DataType.STRING + dict_type_str_to_str = _MIL_pb2.ValueType() + dict_type_str_to_str.dictionaryType.keyType.CopyFrom(str_type) + dict_type_str_to_str.dictionaryType.valueType.CopyFrom(str_type) + build_info_proto.type.CopyFrom(dict_type_str_to_str) + + # Copy the metadata + build_info_dict = build_info_proto.immediateValue.dictionary + for k, v in metadata.items(): + key_pair = _MIL_pb2.DictionaryValue.KeyValuePair() + key_pair.key.immediateValue.tensor.strings.values.append(k) + key_pair.key.type.CopyFrom(str_type) + key_pair.value.immediateValue.tensor.strings.values.append(v) + key_pair.value.type.CopyFrom(str_type) + build_info_dict.values.append(key_pair) + + + def _get_mil_internal(self): + """ + Get a deep copy of the MIL program object, if available. + It's available whenever an MLModel object is constructed using + the unified converter API [`coremltools.convert()`](https://apple.github.io/coremltools/source/coremltools.converters.mil.html#coremltools.converters._converters_entry.convert). + + Returns + ------- + program: coremltools.converters.mil.Program + + Examples + -------- + mil_prog = model._get_mil_internal() + """ + return _deepcopy(self._mil_program) + + def _verify_input_dict(self, input_dict): + # Check if the input name given by the user is valid. + # Although this is checked during prediction inside CoreML Framework, + # we still check it here to return early and + # return a more verbose error message + self._verify_input_name_exists(input_dict) + + # verify that the pillow image modes are correct, for image inputs + self._verify_pil_image_modes(input_dict) + + def _verify_pil_image_modes(self, input_dict): + if not _HAS_PIL: + return + for input_desc in self._spec.description.input: + if input_desc.type.WhichOneof("Type") == "imageType": + input_val = input_dict.get(input_desc.name, None) + if not isinstance(input_val, _PIL_IMAGE.Image): + msg = "Image input, '{}' must be of type PIL.Image.Image in the input dict" + raise TypeError(msg.format(input_desc.name)) + if input_desc.type.imageType.colorSpace in (_ft.ImageFeatureType.BGR, _ft.ImageFeatureType.RGB): + if input_val.mode != 'RGB': + msg = "RGB/BGR image input, '{}', must be of type PIL.Image.Image with mode=='RGB'" + raise TypeError(msg.format(input_desc.name)) + elif input_desc.type.imageType.colorSpace == _ft.ImageFeatureType.GRAYSCALE: + if input_val.mode != 'L': + msg = "GRAYSCALE image input, '{}', must be of type PIL.Image.Image with mode=='L'" + raise TypeError(msg.format(input_desc.name)) + elif input_desc.type.imageType.colorSpace == _ft.ImageFeatureType.GRAYSCALE_FLOAT16: + if input_val.mode != 'F': + msg = "GRAYSCALE_FLOAT16 image input, '{}', must be of type PIL.Image.Image with mode=='F'" + raise TypeError(msg.format(input_desc.name)) + + def _verify_input_name_exists(self, input_dict): + model_input_names = [inp.name for inp in self._spec.description.input] + model_input_names_set = set(model_input_names) + for given_input in input_dict.keys(): + if given_input not in model_input_names_set: + err_msg = "Provided key \"{}\", in the input dict, " \ + "does not match any of the model input name(s), which are: {}" + raise KeyError(err_msg.format(given_input, ",".join(model_input_names))) + + def _update_float16_multiarray_input_to_float32(self, input_data): + for k, v in input_data.items(): + if isinstance(v, _np.ndarray) and v.dtype == _np.float16: + input_data[k] = v.astype(_np.float32) + + def _convert_tensor_to_numpy(self, input_dict): + def convert(given_input): + if isinstance(given_input, _numpy.ndarray): + sanitized_input = given_input + elif _HAS_TORCH and isinstance(given_input, _torch.Tensor): + sanitized_input = given_input.detach().numpy() + elif (_HAS_TF_1 or _HAS_TF_2) and isinstance(given_input, _tf.Tensor): + sanitized_input = given_input.eval(session=_tf.compat.v1.Session()) + else: + sanitized_input = _numpy.array(given_input) + return sanitized_input + + model_input_to_types = {} + for inp in self._spec.description.input: + type_value = inp.type.multiArrayType.dataType + type_name = inp.type.multiArrayType.ArrayDataType.Name(type_value) + if type_name != "INVALID_ARRAY_DATA_TYPE": + model_input_to_types[inp.name] = type_name + + for given_input_name, given_input in input_dict.items(): + if not given_input_name in model_input_to_types: + continue + input_dict[given_input_name] = convert(given_input) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/__init__.py new file mode 100644 index 00000000..6dd839ad --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2019, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .builder import KNearestNeighborsClassifierBuilder diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/builder.py b/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/builder.py new file mode 100644 index 00000000..0026897d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/builder.py @@ -0,0 +1,664 @@ +# Copyright (c) 2019, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +import coremltools + +from ...proto import FeatureTypes_pb2 +from .. import datatypes + + +class KNearestNeighborsClassifierBuilder: + """ + Construct a CoreML KNearestNeighborsClassifier specification. + + Please see the Core ML Nearest Neighbors protobuf message for more information + on KNearestNeighborsClassifier parameters. + + Examples + -------- + .. sourcecode:: python + + from coremltools.models.nearest_neighbors import KNearestNeighborsClassifierBuilder + from coremltools.models.utils import save_spec + + # Create a KNearestNeighborsClassifier model that takes 4-dimensional input data and outputs a string label. + >>> builder = KNearestNeighborsClassifierBuilder(input_name='input', + ... output_name='output', + ... number_of_dimensions=4, + ... default_class_label='default_label') + + # save the spec by the builder + >>> save_spec(builder.spec, 'knnclassifier.mlmodel') + + + """ + + _VALID_INDEX_TYPES = ["linear", "kd_tree"] + + _VALID_WEIGHTING_SCHEMES = ["uniform", "inverse_distance"] + + _VALID_DISTANCE_METRICS = ["squared_euclidean"] + + # Optional parameter keys for constructor + _PARAMETER_KEY_NUMBER_OF_NEIGHBORS = "number_of_neighbors" + _PARAMETER_KEY_WEIGHTING_SCHEME = "weighting_scheme" + _PARAMETER_KEY_INDEX_TYPE = "index_type" + _PARAMETER_KEY_LEAF_SIZE = "leaf_size" + _PARAMETER_KEY_INPUT_TYPE = "input_type" + + # Optional parameter default values + _PARAMETER_DEFAULT_NUMBER_OF_NEIGHBORS = 5 + _PARAMETER_DEFAULT_WEIGHTING_SCHEME = "uniform" + _PARAMETER_DEFAULT_INDEX_TYPE = "linear" + _PARAMETER_DEFAULT_LEAF_SIZE = 30 + _PARAMETER_DEFAULT_INPUT_TYPE = "NotSpecified" + + def __init__( + self, + input_name, + output_name, + number_of_dimensions, + default_class_label, + **kwargs + ): + """ + Create a KNearestNeighborsClassifierBuilder object. + + Parameters + ---------- + input_name + Name of the model input. + + output_name + Name of the output. + + number_of_dimensions + Number of dimensions of the input data. + + default_class_label + The default class label to use for predictions. Must be either an + int64 or a string. + + number_of_neighbors + Number of neighbors to use for predictions. Default = 5 with allowed values + between 1-1000. + + weighting_scheme + Weight function used in prediction. One of ``'uniform'`` (default) or + ``'inverse_distance'``. + + index_type + Algorithm to compute nearest neighbors. One of ``'linear'`` (default), or + ``'kd_tree'``. + + leaf_size + Leaf size for the kd-tree. Ignored if index type is ``'linear'``. Default = 30. + """ + self.spec = coremltools.proto.Model_pb2.Model() + self.spec.specificationVersion = ( + coremltools._MINIMUM_NEAREST_NEIGHBORS_SPEC_VERSION + ) + + # the model is initially empty - assume it's updatable + self.is_updatable = True + + if number_of_dimensions <= 0: + raise ValueError("number_of_dimensions must be >= 0") + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions = ( + number_of_dimensions + ) + + input_type = kwargs.get( + self._PARAMETER_KEY_INPUT_TYPE, self._PARAMETER_DEFAULT_INPUT_TYPE + ) + input_feature_type = FeatureTypes_pb2.ArrayFeatureType.FLOAT32 + if input_type == datatypes.Double: + input_feature_type = FeatureTypes_pb2.ArrayFeatureType.DOUBLE + + input_feature = self.spec.description.input.add() + input_feature.name = input_name + input_feature.type.multiArrayType.dataType = input_feature_type + input_feature.type.multiArrayType.shape.extend([number_of_dimensions]) + + training_features = self.spec.description.trainingInput.add() + training_features.name = input_name + training_features.type.multiArrayType.dataType = input_feature_type + training_features.type.multiArrayType.shape.extend([number_of_dimensions]) + + output_label = self.spec.description.output.add() + output_label.name = output_name + output_label_probs = self.spec.description.output.add() + output_label_probs.name = output_name + "Probs" + training_features = self.spec.description.trainingInput.add() + training_features.name = output_name + + if self._is_valid_text_type(default_class_label): + output_label.type.stringType.MergeFromString(b"") + training_features.type.stringType.MergeFromString(b"") + output_label_probs.type.dictionaryType.stringKeyType.MergeFromString(b"") + self.spec.kNearestNeighborsClassifier.stringClassLabels.MergeFromString(b"") + self.spec.kNearestNeighborsClassifier.defaultStringLabel = ( + default_class_label + ) + elif self._is_valid_number_type(default_class_label): + output_label.type.int64Type.MergeFromString(b"") + training_features.type.int64Type.MergeFromString(b"") + output_label_probs.type.dictionaryType.int64KeyType.MergeFromString(b"") + self.spec.kNearestNeighborsClassifier.int64ClassLabels.MergeFromString(b"") + self.spec.kNearestNeighborsClassifier.defaultInt64Label = ( + default_class_label + ) + else: + raise TypeError( + "default_class_label type ({}) is invalid. Must be either string or int64".format( + type(default_class_label) + ) + ) + + self.spec.description.predictedFeatureName = output_label.name + self.spec.description.predictedProbabilitiesName = output_label_probs.name + + number_of_neighbors = kwargs.get( + self._PARAMETER_KEY_NUMBER_OF_NEIGHBORS, + self._PARAMETER_DEFAULT_NUMBER_OF_NEIGHBORS, + ) + self.set_number_of_neighbors_with_bounds( + number_of_neighbors, allowed_range=(1, 1000) + ) # Can we think of a more sensible default value? + + self.weighting_scheme = kwargs.get( + self._PARAMETER_KEY_WEIGHTING_SCHEME, + self._PARAMETER_DEFAULT_WEIGHTING_SCHEME, + ) + + index_type = kwargs.get( + self._PARAMETER_KEY_INDEX_TYPE, self._PARAMETER_DEFAULT_INDEX_TYPE + ) + leaf_size = kwargs.get( + self._PARAMETER_KEY_LEAF_SIZE, self._PARAMETER_DEFAULT_LEAF_SIZE + ) + self.set_index_type(index_type, leaf_size) + + # SED is currently the only supported distance metric + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.squaredEuclideanDistance.MergeFromString( + b"" + ) + + @property + def author(self): + """ + Get the author for the KNearestNeighborsClassifier model. + + Returns + ------- + The author + """ + return self.spec.description.metadata.author + + @author.setter + def author(self, author): + """ + Add an author for the KNearestNeighborsClassifier model. + + Parameters + ---------- + author + The author. + + Returns + ------- + None + """ + self.spec.description.metadata.author = author + + @property + def license(self): + """ + Get the license for the KNearestNeighborsClassifier model. + + Returns + ------- + The license. + """ + return self.spec.description.metadata.license + + @author.setter + def license(self, license): + """ + Add a license for the KNearestNeighborsClassifier model. + + Parameters + ---------- + license + The license. + + Returns + ------- + None + """ + self.spec.description.metadata.license = license + + @property + def description(self): + """ + Get the description for the KNearestNeighborsClassifier model. + + Returns + ------- + The description. + """ + return self.spec.description.metadata.shortDescription + + @description.setter + def description(self, description): + """ + Add a description for the model. + + Parameters + ---------- + description + The description + + Returns + ------- + None + """ + self.spec.description.metadata.shortDescription = description + + @property + def is_updatable(self): + """ + Check if the KNearestNeighborsClassifier is updatable. + + Returns + ------- + Is updatable. + """ + return self.spec.isUpdatable + + @is_updatable.setter + def is_updatable(self, is_updatable): + """ + Set the KNearestNeighborsClassifier to be updatable. + + Parameters + ---------- + is_updatable + Boolean + + Returns + ------- + None + """ + self.spec.isUpdatable = is_updatable + + @property + def weighting_scheme(self): + """ + Get the weighting scheme for the KNearestNeighborsClassifier model. + + Returns + ------- + The weighting scheme. + """ + return self._weighting_scheme + + @weighting_scheme.setter + def weighting_scheme(self, weighting_scheme): + """ + Set the weighting scheme for the KNearestNeighborsClassifier model. + + Parameters + ---------- + weighting_scheme + One of [ ``'uniform'``, ``'inverse_distance'`` ]. + + Returns + ------- + None + """ + weighting_scheme = weighting_scheme.lower() + if weighting_scheme not in self._VALID_WEIGHTING_SCHEMES: + raise TypeError("Invalid weighting scheme") + + if weighting_scheme == "inverse_distance": + self.spec.kNearestNeighborsClassifier.inverseDistanceWeighting.MergeFromString( + b"" + ) + else: + self.spec.kNearestNeighborsClassifier.uniformWeighting.MergeFromString(b"") + + # storing this in the object is just a convenience + self._weighting_scheme = weighting_scheme + + @property + def index_type(self): + """ + Get the index type for the KNearestNeighborsClassifier model. + + Returns + ------- + The index type. + """ + return self._index_type + + def set_index_type(self, index_type, leaf_size=30): + """ + Set the index type for the KNearestNeighborsClassifier model. + + Parameters + ---------- + index_type + One of [ ``'linear'``, ``'kd_tree'`` ]. + + leaf_size + For kd_tree indexes, the leaf size to use (default = 30). + + Returns + ------- + None + """ + index_type = index_type.lower() + if not index_type in self._VALID_INDEX_TYPES: + raise TypeError("Invalid index type") + + if index_type == "kd_tree": + if leaf_size <= 0: + raise TypeError("leaf_size must be > 0") + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.singleKdTreeIndex.leafSize = ( + leaf_size + ) + else: + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.linearIndex.MergeFromString( + b"" + ) + + # storing this in the object is just a convenience + self._index_type = index_type + + @property + def leaf_size(self): + """ + Get the leaf size for the KNearestNeighborsClassifier. + + Returns + ------- + The leaf size. + """ + return ( + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.singleKdTreeIndex.leafSize + ) + + @leaf_size.setter + def leaf_size(self, leaf_size): + """ + Set the leaf size for a KNearestNeighborsClassifier model. Only for kd-tree indexes. + + Parameters + ---------- + leaf_size + The leaf size. + + Returns + ------- + None + """ + if leaf_size <= 0: + raise ValueError("leaf_size must be > 0") + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.singleKdTreeIndex.leafSize = ( + leaf_size + ) + + @property + def number_of_dimensions(self): + """ + Get the number of dimensions of the input data for the + KNearestNeighborsClassifier model. + + Returns + ------- + Number of dimensions. + """ + return ( + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions + ) + + @property + def number_of_neighbors(self): + """ + Get the number of neighbors value for the KNearestNeighborsClassifier model. + + Returns + ------- + The number of neighbors default value. + """ + return self.spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue + + def set_number_of_neighbors_with_bounds( + self, number_of_neighbors, allowed_range=None, allowed_set=None + ): + """ + Set the numberOfNeighbors parameter for the KNearestNeighborsClassifier model. + + Parameters + ---------- + allowed_range + Tuple of (``min_value``, ``max_value``) defining the range of allowed values. + + allowed_values + Set of allowed values for the number of neighbors. + + Returns + ------- + None + """ + if number_of_neighbors <= 0: + raise ValueError("number_of_neighbors must be > 0") + if allowed_range is None and allowed_set is None: + raise ValueError( + "Exactly one of allowed_range or allowed_values must be provided" + ) + if allowed_range is not None and allowed_set is not None: + raise ValueError( + "Exactly one of allowed_range or allowed_values must be provided" + ) + + if allowed_range is not None: + if not isinstance(allowed_range, tuple): + raise TypeError( + "allowed_range expects a tuple of (min_value, max_value)" + ) + if len(allowed_range) != 2: + raise TypeError( + "allowed_range expects a tuple of (min_value, max_value)" + ) + + (min_value, max_value) = allowed_range + if min_value <= 0: + raise ValueError("allowed_range minimum must be > 0") + if max_value < min_value: + raise ValueError("allowed_range max_value must be >= min_value") + if number_of_neighbors < min_value or number_of_neighbors > max_value: + raise ValueError("number_of_neighbors is not within allowed range") + + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue = ( + number_of_neighbors + ) + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.range.minValue = ( + min_value + ) + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.range.maxValue = ( + max_value + ) + + elif allowed_set is not None: + if not isinstance(allowed_set, set): + raise TypeError("allowed_values expects 'set' type") + if len(allowed_set) == 0: + raise TypeError("allowed_values cannot be empty") + + found_match = False + for v in allowed_set: + if not self._is_valid_number_type(v): + raise TypeError("allowed_values must contain only integer types") + if v <= 0: + raise TypeError("allowed_values must only contain values > 0") + if number_of_neighbors == v: + found_match = True + + if found_match: + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue = ( + number_of_neighbors + ) + for v in allowed_set: + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.set.values.append( + v + ) + else: + raise ValueError("number_of_neighbors is not a valid value") + + def number_of_neighbors_allowed_range(self): + """ + Get the range of allowed values for the numberOfNeighbors parameter. + + Returns + ------- + Tuple of (``min_value``, ``max_value``) or ``None`` if the range hasn't been set. + """ + if self.spec.kNearestNeighborsClassifier.numberOfNeighbors.HasField("range"): + return ( + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.range.minValue, + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.range.maxValue, + ) + return None + + def number_of_neighbors_allowed_set(self): + """ + Get the set of allowed values for the numberOfNeighbors parameter. + + Returns + ------- + Set of allowed values or ``None`` if the set of allowed values hasn't been + populated. + """ + if self.spec.kNearestNeighborsClassifier.numberOfNeighbors.HasField("set"): + spec_values = ( + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.set.values + ) + allowed_values = set() + for v in spec_values: + allowed_values.add(v) + return allowed_values + return None + + def add_samples(self, data_points, labels): + """ + Add some samples to the KNearestNeighborsClassifier model. + + Parameters + ---------- + data_points + List of input data points. + + labels + List of corresponding labels. + + Returns + ------- + None + """ + if len(data_points) == 0: + raise TypeError("data_points is empty") + + if len(labels) == 0: + raise TypeError("labels is empty") + + if len(data_points[0]) != self.number_of_dimensions: + raise TypeError( + "dimensionality of data_points != expected number of dimensions" + ) + + if len(data_points) != len(labels): + raise TypeError("len(data_points) != len(labels)") + + # Validate the types of the labels before adding any points. + self._validate_label_types(labels) + + for data_point in data_points: + sample = ( + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.floatSamples.add() + ) + for feature in data_point: + sample.vector.append(feature) + + if self.spec.kNearestNeighborsClassifier.HasField("int64ClassLabels"): + for label in labels: + self.spec.kNearestNeighborsClassifier.int64ClassLabels.vector.append( + label + ) + else: + # string labels + for label in labels: + self.spec.kNearestNeighborsClassifier.stringClassLabels.vector.append( + label + ) + + def _validate_label_types(self, labels): + """ + Ensure the label types matched the expected types. + + Parameters + ---------- + spec + The spec. + + labels + The list of labels. + + Returns + ------- + None, throws a TypeError if not expected. + """ + if self.spec.kNearestNeighborsClassifier.HasField("int64ClassLabels"): + check_is_valid = KNearestNeighborsClassifierBuilder._is_valid_number_type + else: + check_is_valid = KNearestNeighborsClassifierBuilder._is_valid_text_type + for label in labels: + if not check_is_valid(label): + raise TypeError("Invalid type for label: {}".format(type(label))) + + @staticmethod + def _is_valid_text_type(obj): + """ + Checks if the object is a valid text type. + + Parameters + ---------- + obj + The object to check. + + Returns + ------- + True if a valid text type, False otherwise. + """ + return isinstance(obj, str) + + @staticmethod + def _is_valid_number_type(obj): + """ + Checks if the object is a valid number type. + + Parameters + ---------- + obj + The object to check. + + Returns + ------- + True if a valid number type, False otherwise. + """ + return isinstance(obj, (int, _np.integer)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/__init__.py new file mode 100644 index 00000000..3e3fe7e8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2018, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import (flexible_shape_utils, optimization_utils, printer, + quantization_utils, spec_inspection_utils, + update_optimizer_utils, utils) +from .builder import NeuralNetworkBuilder +from .update_optimizer_utils import AdamParams, SgdParams diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/builder.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/builder.py new file mode 100644 index 00000000..8500bf71 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/builder.py @@ -0,0 +1,8857 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Neural network builder class to construct Core ML models. +""" +from math import floor as _math_floor + +import numpy as _np + +from ... import (_MINIMUM_NDARRAY_SPEC_VERSION, + _MINIMUM_UPDATABLE_SPEC_VERSION, + _SPECIFICATION_VERSION_IOS_14) +from ... import SPECIFICATION_VERSION as _SPECIFICATION_VERSION +from ...proto import FeatureTypes_pb2 as _FeatureTypes_pb2 +from ...proto import Model_pb2 as _Model_pb2 +from ...proto import NeuralNetwork_pb2 as _NeuralNetwork_pb2 +from .. import datatypes +from .._interface_management import (set_training_features, + set_transform_interface_params) +from .quantization_utils import (_convert_array_to_nbit_quantized_bytes, + _unpack_to_bytes) +from .spec_inspection_utils import _summarize_network_layer_info +from .update_optimizer_utils import AdamParams, SgdParams + +_SUPPORTED_UPDATABLE_LAYERS = ["innerProduct", "convolution"] + + +def _set_recurrent_activation(param, activation): + if isinstance(activation, bytes): + activation = activation.decode("utf8") + + activation = ( + activation.upper() if isinstance(activation, str) else activation + ) + + if activation == "SIGMOID": + param.sigmoid.MergeFromString(b"") + elif activation == "TANH": + param.tanh.MergeFromString(b"") + elif activation == "LINEAR": + param.linear.MergeFromString(b"") + elif activation == "SIGMOID_HARD": + param.sigmoidHard.MergeFromString(b"") + elif activation == "SCALED_TANH": + param.scaledTanh.MergeFromString(b"") + elif activation == "RELU": + param.ReLU.MergeFromString(b"") + else: + raise TypeError( + "Unsupported activation type with Recurrent layer: %s." % activation + ) + + +def _verify_quantization_arguments(weight=bytes(), output_channels=1, **kwargs): + quantization_type = kwargs.get("quantization_type", "").lower() + nbits = kwargs.get("nbits", 8) + quant_scale = kwargs.get("quant_scale", None) + quant_bias = kwargs.get("quant_bias", None) + quant_lut = kwargs.get("quant_lut", None) + int_8_dynamic_quantize = kwargs.get("int_8_dynamic_quantize", False) + + if int_8_dynamic_quantize and nbits != 8: + raise ValueError("nbits must be 8 when 'int_8_dynamic_quantize' is true ") + + if int_8_dynamic_quantize and quant_bias is not None: + raise ValueError( + "quant_bias must be empty when 'int_8_dynamic_quantize' is true " + ) + + if int_8_dynamic_quantize and quant_scale.size != 1: + raise ValueError( + "quant_scale must be of size 1 when 'int_8_dynamic_quantize' is true " + ) + + if not isinstance(weight, bytes): + raise ValueError("Weight must be of type bytes() for quantization") + + if quantization_type == "linear": + if not int_8_dynamic_quantize: + if quant_scale is None or quant_bias is None: + raise ValueError( + "quant_scale and quant_bias parameters must be provided for linear quantization type" + ) + if not _np.isscalar(quant_scale) and (len(quant_scale) != 1 and len(quant_scale) != output_channels): + raise ValueError( + "quant_scale should be of type float or an array of length outputChannels" + ) + if not int_8_dynamic_quantize: + if not _np.isscalar(quant_scale) and len(quant_bias) != 1 and len(quant_bias) != output_channels: + raise ValueError( + "quant_bias should be of type float or an array of length outputChannels" + ) + elif quantization_type == "lut": + if quant_lut is None: + raise ValueError( + "quant_lut must be provided for look up table quantization type" + ) + if len(quant_lut) != 2 ** nbits: + raise ValueError("quant_lut must be an array of length 2^nbits") + else: + raise ValueError("quantization_type must be either linear or lut") + + if quantization_type == "linear" or "lut": + if nbits > 8 or nbits < 1: + raise ValueError("nbits must be between 1 and 8") + + +def _fill_quantized_weights(weights_message=None, W=bytes(), use_int_8=False, **kwargs): + if use_int_8: + weights_message.int8RawValue = bytes() + weights_message.int8RawValue += W + else: + weights_message.rawValue = bytes() + weights_message.rawValue += W + nbits = kwargs.get("nbits", 8) + weights_message.quantization.numberOfBits = nbits + quantization_type = kwargs.get("quantization_type", "").lower() + if quantization_type == "linear": + quant_scale = kwargs.get("quant_scale", [1.0]) + quant_bias = kwargs.get("quant_bias", [0.0]) + weights_message.quantization.linearQuantization.scale.extend(quant_scale) + if not use_int_8: + weights_message.quantization.linearQuantization.bias.extend(quant_bias) + else: + quant_lut = kwargs.get("quant_lut", [0.0, 1.0]) + weights_message.quantization.lookupTableQuantization.floatValue.extend( + quant_lut + ) + + +def _get_nn_spec(spec): + if spec.HasField("neuralNetworkClassifier"): + return spec.neuralNetworkClassifier + elif spec.HasField("neuralNetworkRegressor"): + return spec.neuralNetworkRegressor + elif spec.HasField("neuralNetwork"): + return spec.neuralNetwork + else: + return None + + +def _get_lstm_weight_fields(lstm_wp): + """ + Get LSTM weight fields. + lstm_wp: _NeuralNetwork_pb2.LSTMWeightParams + """ + return [ + lstm_wp.inputGateWeightMatrix, + lstm_wp.forgetGateWeightMatrix, + lstm_wp.blockInputWeightMatrix, + lstm_wp.outputGateWeightMatrix, + lstm_wp.inputGateRecursionMatrix, + lstm_wp.forgetGateRecursionMatrix, + lstm_wp.blockInputRecursionMatrix, + lstm_wp.outputGateRecursionMatrix, + lstm_wp.inputGateBiasVector, + lstm_wp.forgetGateBiasVector, + lstm_wp.blockInputBiasVector, + lstm_wp.outputGateBiasVector, + lstm_wp.inputGatePeepholeVector, + lstm_wp.forgetGatePeepholeVector, + lstm_wp.outputGatePeepholeVector, + ] + + +def _fill_tensor_fields(tensor_field, ranks=None, shapes=None): + """ + Fill the tensor fields. + ranks - ``NONE`` or a list of integers with the same length of number of inputs/outputs + shapes - ``NONE`` or a list of shapes the same length of number of inputs/outputs. Each shape is a list or tuple + """ + if ranks is None and shapes is None: + return + + if ranks is None and shapes is not None: + ranks = [len(shape) for shape in shapes] + + # Fill ranks only + for rank in ranks: + if rank is None: + continue + + if not _np.issubclass_(type(rank), (int, _np.integer)): + rank = -1 # Variable rank set to -1 + + field = tensor_field.add() + field.rank = rank + + if ranks is not None and shapes is not None: + if len(ranks) != len(shapes): + raise ValueError("Number of rank and shape of tensor field does not match.") + + for i in range(0, len(ranks)): + shape = shapes[i] + rank = ranks[i] + + # Ignore incomplete info + if shape is None or rank is None: + continue + + # Raise error on inconsistent input + if rank != len(shape): + raise ValueError("Rank and shape does not match") + + # Add the shape to the proto + is_symbolic = False + for s in shape: + if not _np.issubclass_(type(s), (int, _np.integer)): + s = -1 # Symbolic shape set to -1 + tensor_field[i].dimValue.append(s) + + +class NeuralNetworkBuilder: + """ + Neural network builder class to construct Core ML models. + + The NeuralNetworkBuilder constructs a Core ML neural network specification + layer by layer. The layers should be added in such an order that the inputs + to each layer (referred to as blobs of each layer) have been previously + defined. The builder can also set preprocessing steps to handle + specialized input formats (such as images), and set class labels for neural + network classifiers. + + Refer to the protobuf messages in the specification (NeuralNetwork.proto) + for more details. + + Examples + -------- + .. sourcecode:: python + + from coremltools.models.neural_network import datatypes, NeuralNetworkBuilder + from coremltools.models.utils import save_spec + + # Create a neural network binary classifier that classifies + # 3-dimensional data points + # Specify input and output dimensions + >>> input_dim = (3,) + >>> output_dim = (2,) + + # Specify input and output features + >>> input_features = [('data', datatypes.Array(*input_dim))] + >>> output_features = [('probs', datatypes.Array(*output_dim))] + + # Build a simple neural network with 1 inner product layer + >>> builder = NeuralNetworkBuilder(input_features, output_features) + >>> builder.add_inner_product(name='ip_layer', W=weights, b=bias, input_channels=3, output_channels=2, + ... has_bias=True, input_name='data', output_name='probs') + + # save the spec by the builder + >>> save_spec(builder.spec, 'network.mlmodel') + """ + + def __init__( + self, + input_features=None, + output_features=None, + mode=None, + spec=None, + nn_spec=None, + disable_rank5_shape_mapping=False, + training_features=None, + use_float_arraytype=False, + ): + """ + Construct a NeuralNetworkBuilder object to build an MLModel specification with a + model interface, or a NeuralNetwork protobuf message, either from scratch or using an + existing specification. + + Parameters + ---------- + + input_features: [(str, datatypes.Array)] or None + List of input feature of the network. + Each feature is a ``(name, array)`` tuple, where ``name`` is the + name of the feature, and ``array`` is a ``datatype.Array`` object + describing the feature type. + + * When ``spec`` is ``None`` (building from scratch), ``input_features`` must not be ``None``. + + output_features: [(str, datatypes.Array or None)] or None + List of output feature of the network. Each feature is a + ``(name, array)`` tuple, where ``name`` is the name of the feature, + and ``array`` is a ``datatypes.Array`` object describing the feature type. + + * The ``array`` can be ``None`` if not known. + + * When ``spec`` is ``None`` (building from scratch), ``output_features`` must not be ``None``. + + mode: str ('classifier', 'regressor' or None) + Mode (one of ``'classifier'``, ``'regressor'``, or ``None``). + + When ``mode = 'classifier'``, a NeuralNetworkClassifier spec will be + constructed. When ``mode = 'regressor'``, a NeuralNetworkRegressor + spec will be constructed. + + disable_rank5_shape_mapping: bool + Only applicable for neural networks. + + If True, inputs are no longer forced to map to rank 5 tensors + (rank is equal to the length of the shape of the tensor). + Instead, for multi-array inputs ``"EXACT_ARRAY_MAPPING"`` mapping is used, whereas + for image inputs ``"RANK4_IMAGE_MAPPING"`` is used. For details, + see description of enums ``NeuralNetworkMultiArrayShapeMapping`` + and ``NeuralNetworkImageShapeMapping`` in NeuralNetwork.proto. + + When ``spec`` is not ``None``, this argument will be ignored. + + spec: None or coremltools.proto.Model_pb2 + If ``None``, a new MLModel spec will be created by the builder with + input and output features. + + Otherwise, the builder will continue to build on ``spec``. + This is useful when the MLModel is built incrementally. + + nn_spec: None or coremltools.proto.NeuralNetwork_pb2 + If ``None``, a new, empty NeuralNetwork proto will be created for spec. + + If ``nn_spec`` is not ``None`` and ``spec`` is ``None``, the builder will + build a NeuralNetwork spec without wrapping it within an MLModel. + This is useful to create nested NeuralNetworks for models + with control flow operations. + + use_float_arraytype: bool + If true, the datatype of input/output multiarrays is set to Float32 instead + of double. + + Examples + -------- + .. sourcecode:: python + + # Construct a builder that builds a neural network classifier with a 299 x 299 x 3 + # dimensional input and 1000 dimensional output + >>> input_features = [('data', datatypes.Array((299, 299, 3)))] + >>> output_features = [('probs', datatypes.Array((1000,)))] + >>> builder = NeuralNetworkBuilder(input_features, output_features, mode='classifier') + + See Also + -------- + set_input, set_output, set_class_labels + """ + self.spec = spec + self.nn_spec = nn_spec + self._disable_rank5_shape_mapping = disable_rank5_shape_mapping + self.layers = [] + self.layer_specs = {} + self.named_parameters = [] + self.rank_dict = {} + + if self.spec is not None: # Existing spec + if self.nn_spec is None: + self.nn_spec = _get_nn_spec(self.spec) + for layer_spec in self.nn_spec.layers: + self.layers.append(layer_spec.name) + self.layer_specs[layer_spec.name] = layer_spec + else: + # Both spec and nn_spec are not None + raise ValueError( + "Attempting to assign another NeuralNetwork Spec to an existing MLModel Spec" + ) + if input_features is None and output_features is None: + return + + if ( + self.spec is None and self.nn_spec is not None + ): # Building nested Neural Network + return + + # Set the interface params. + if self.spec is None: + self.spec = _Model_pb2.Model() + self.spec.specificationVersion = _SPECIFICATION_VERSION + if disable_rank5_shape_mapping: + self.spec.specificationVersion = _MINIMUM_NDARRAY_SPEC_VERSION + + # When output_features in None, use some dummy sized type + out_features_with_shape = [] + for out_feature in output_features: + feat_name, feat_type = out_feature + if feat_type is None: + out_features_with_shape.append((str(feat_name), datatypes.Array(1))) + else: + out_features_with_shape.append(out_feature) + + # Set interface inputs and outputs + if len(self.spec.description.input) > 0: + del self.spec.description.input[:] + if len(self.spec.description.output) > 0: + del self.spec.description.output[:] + + if use_float_arraytype: + array_datatype = _Model_pb2.ArrayFeatureType.FLOAT32 + else: + array_datatype = _Model_pb2.ArrayFeatureType.DOUBLE + + self.spec = set_transform_interface_params( + self.spec, + input_features, + out_features_with_shape, + training_features=training_features, + array_datatype=array_datatype, + ) + + for input in input_features: + self.rank_dict[input[0]] = len(input[1].dimensions) + + for idx, output_feature in enumerate(output_features): + if output_features[idx][1] is None: + self.spec.description.output[idx].type.multiArrayType.ClearField( + "shape" + ) + + if self.nn_spec is None: + if mode == "classifier": + nn_spec = self.spec.neuralNetworkClassifier + elif mode == "regressor": + nn_spec = self.spec.neuralNetworkRegressor + else: + nn_spec = self.spec.neuralNetwork + self.nn_spec = nn_spec + + if disable_rank5_shape_mapping and self.nn_spec: + self.nn_spec.arrayInputShapeMapping = _NeuralNetwork_pb2.NeuralNetworkMultiArrayShapeMapping.Value( + "EXACT_ARRAY_MAPPING" + ) + self.nn_spec.imageInputShapeMapping = _NeuralNetwork_pb2.NeuralNetworkImageShapeMapping.Value( + "RANK4_IMAGE_MAPPING" + ) + + def set_input(self, input_names, input_dims): + """ + Set the inputs of the network spec. + + Parameters + ---------- + input_names: list of str + The input names of the network. + + input_dims: [tuple] + The input dimensions of the network. The ordering of ``input_dims`` + is the same as ``input_names``. + + Examples + -------- + .. sourcecode:: python + + # Set the neural network spec inputs to be 3 dimensional vector data1 and + # 4 dimensional vector data2. + >>> builder.set_input(input_names=['data1', 'data2'], input_dims=[(3,), (4,)]) + + See Also + -------- + set_output, set_class_labels + """ + + if len(input_names) != len(input_dims): + raise ValueError("input_names and input_dims must be of the same sizes.") + + spec = self.spec + for idx, dim in enumerate(input_dims): + if ( + hasattr(self, "_disable_rank5_shape_mapping") + and self._disable_rank5_shape_mapping + ): + input_shape = dim + else: + if len(dim) == 3: + input_shape = (dim[0], dim[1], dim[2]) + elif len(dim) == 2: + input_shape = (dim[1],) + elif len(dim) == 1: + input_shape = tuple(dim) + else: + raise RuntimeError( + "Attempting to add a neural network " + + "input with rank " + + str(len(dim)) + + ". All networks should take inputs of rank 1 or 3." + ) + + spec.description.input[idx].type.multiArrayType.ClearField("shape") + spec.description.input[idx].type.multiArrayType.shape.extend(input_shape) + + # TODO: if it's an embedding, this should be integer + spec.description.input[ + idx + ].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE + + spec.description.input[idx].name = input_names[idx] + + def set_output(self, output_names, output_dims): + """ + Set the outputs of the network spec. + + Parameters + ---------- + output_names: list of str + The output names of the network. + + output_dims: [tuple] + The output dimensions of the network. The ordering of ``output_dims`` is the same + as ``output_names``. + + Examples + -------- + .. sourcecode:: python + + # Set the neural network spec outputs to be 3 dimensional vector feature1 and + # 4 dimensional vector feature2. + >>> builder.set_output(output_names=['feature1', 'feature2'], output_dims=[(3,), (4,)]) + + See Also + -------- + set_input, set_class_labels + """ + + if len(output_names) != len(output_dims): + raise ValueError("output_names and output_dims must be of the same sizes.") + + spec = self.spec + for idx, dim in enumerate(output_dims): + spec.description.output[idx].type.multiArrayType.ClearField("shape") + spec.description.output[idx].type.multiArrayType.shape.extend(dim) + spec.description.output[ + idx + ].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE + + spec.description.output[idx].name = output_names[idx] + + def set_training_input(self, training_input): + """ + Set the training inputs of the network spec. + + Parameters + ---------- + training_input: [tuple] + The training input names and type of the network. + + Examples + -------- + .. sourcecode:: python + + # Set the neural network spec training inputs to be 3 dimensional vector for 'input' and + # Double for 'target'. + >>> builder.set_training_input([('input', datatypes.Array(3)), ('target', 'Double')]) + """ + spec = self.spec + set_training_features(spec, training_input) + + def set_class_labels( + self, class_labels, predicted_feature_name="classLabel", prediction_blob="" + ): + """ + Set class labels to the model spec to make it a neural network classifier. + + Parameters + ---------- + class_labels: list of int or list of str + A list of integers or strings that map the index of the output of a + neural network to labels in a classifier. + + predicted_feature_name: str + Name of the output feature for the class labels exposed in the + Core ML neural network classifier, defaults: ``'classLabel'``. + + prediction_blob: str + If provided, then this is the name of the neural network blob which + generates the probabilities for each class label (typically the output + of a softmax layer). If not provided, then the last output layer is + assumed. + + See Also + -------- + set_input, set_output, set_pre_processing_parameters + """ + spec = self.spec + nn_spec = self.nn_spec + + if len(spec.description.output) == 0: + raise ValueError( + "Model should have at least one output (the probabilities) to automatically make it a classifier." + ) + probOutput = spec.description.output[0] + probOutput.type.dictionaryType.MergeFromString(b"") + if len(class_labels) == 0: + return + class_type = type(class_labels[0]) + if not isinstance(class_labels[0], (int, str)): + raise TypeError( + "Class labels must be of type Integer or String. (not %s)" % class_type + ) + + spec.description.predictedProbabilitiesName = probOutput.name + spec.description.predictedFeatureName = predicted_feature_name + + classLabel = spec.description.output.add() + classLabel.name = predicted_feature_name + if class_type == int: + nn_spec.ClearField("int64ClassLabels") + probOutput.type.dictionaryType.int64KeyType.MergeFromString(b"") + classLabel.type.int64Type.MergeFromString(b"") + for c in class_labels: + nn_spec.int64ClassLabels.vector.append(c) + else: + nn_spec.ClearField("stringClassLabels") + probOutput.type.dictionaryType.stringKeyType.MergeFromString(b"") + classLabel.type.stringType.MergeFromString(b"") + for c in class_labels: + nn_spec.stringClassLabels.vector.append(c) + + if prediction_blob != "": + # correctness here will be checked in the validator -- i.e. to + # make sure this string corresponds to a real blob + nn_spec.labelProbabilityLayerName = prediction_blob + else: # not provided + # assume it's the last blob produced in the network + nn_spec.labelProbabilityLayerName = nn_spec.layers[-1].output[0] + + def set_optional_input(self, input_idx, value=None, format="float"): + """ + Marks given input as optional input. + Optionally, sets default value for optional input if value is not ``None``. + + Parameters + ---------- + input_idx: int + Index of input to be marked and fill with default value. + value: int/double/float/None + Value to be fill as default value. + format: str + Format of default value. + Must be one of ``'float'``, ``'double'``, or ``'int'``. + """ + if input_idx >= len(self.spec.description.input): + msg = ( + str(input_idx) + + " out of " + + str(len(self.spec.description.input)) + + " inputs!" + ) + raise ValueError("Setting invalid input as optional! {}".format(msg)) + self.spec.description.input[input_idx].type.isOptional = True + if value is None: + return + # Default value is supported from CoreML 4 onwards. + self.spec.specificationVersion = max( + self.spec.specificationVersion, _SPECIFICATION_VERSION_IOS_14 + ) + format = format.lower() + if format == "float": + self.spec.description.input[ + input_idx + ].type.multiArrayType.floatDefaultValue = value + elif format == "double": + self.spec.description.input[ + input_idx + ].type.multiArrayType.doubleDefaultValue = value + elif format == "int": + self.spec.description.input[ + input_idx + ].type.multiArrayType.intDefaultValue = value + else: + raise ValueError( + "Incorrect format for optional inputs! Expecting int/float/double, got {}!".format( + format + ) + ) + + def add_optionals(self, optionals_in, optionals_out): + """ + Add optional inputs and outputs to the model spec. + + Parameters + ---------- + optionals_in: list of str + List of inputs that are optionals. + + optionals_out: list of str + List of outputs that are optionals. + + See Also + -------- + set_input, set_output + + """ + spec = self.spec + if (not optionals_in) and (not optionals_out): + return + + input_types = [ + datatypes.Array(dim) if isinstance(dim, int) else datatypes.Array(*dim) + for (name, dim) in optionals_in + ] + output_types = [] + for name, dim in optionals_out: + if not dim: + output_types.append(None) + elif isinstance(dim, int): + output_types.append(datatypes.Array(dim)) + else: + output_types.append(datatypes.Array(*dim)) + + input_names = [str(name) for (name, dim) in optionals_in] + output_names = [str(name) for (name, dim) in optionals_out] + + input_features = list(zip(input_names, input_types)) + output_features = list(zip(output_names, output_types)) + + len_before_in = len(spec.description.input) + len_before_out = len(spec.description.output) + + # this appends to the existing model interface + set_transform_interface_params(spec, input_features, output_features, True) + + # add types for any extra hidden inputs + for idx in range(len_before_in, len(spec.description.input)): + spec.description.input[ + idx + ].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE + for idx in range(len_before_out, len(spec.description.output)): + spec.description.output[ + idx + ].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE + + + def _check_fp16_weight_params_lstms(self, lstm_wp, has_peephole=True): + """ + Checks if an LSTM layer has at least one ``weight_param`` which is in FP16 format. + + Parameters + ---------- + lstm_wp: LSTM weights. + has_peephole: if the LSTM has a peephole. + """ + if len(lstm_wp.inputGateWeightMatrix.float16Value) > 0: + return True + if len(lstm_wp.forgetGateWeightMatrix.float16Value) > 0: + return True + if len(lstm_wp.blockInputWeightMatrix.float16Value) > 0: + return True + if len(lstm_wp.outputGateWeightMatrix.float16Value) > 0: + return True + if len(lstm_wp.inputGateRecursionMatrix.float16Value) > 0: + return True + if len(lstm_wp.forgetGateRecursionMatrix.float16Value) > 0: + return True + if len(lstm_wp.blockInputRecursionMatrix.float16Value) > 0: + return True + if len(lstm_wp.outputGateRecursionMatrix.float16Value) > 0: + return True + if len(lstm_wp.inputGateWeightMatrix.float16Value) > 0: + return True + + if has_peephole: + if len(lstm_wp.inputGatePeepholeVector.float16Value) > 0: + return True + if len(lstm_wp.forgetGatePeepholeVector.float16Value) > 0: + return True + if len(lstm_wp.outputGatePeepholeVector.float16Value) > 0: + return True + + return False + + + def _check_fp16_weight_param_exists(self, layers): + """ + Checks if the network has at least one ``weight_param`` which is in FP16 format. + + Parameters + ---------- + layers: list of nn_spec.layer + List of layers. + """ + + for layer in layers: + layer_type = layer.WhichOneof("layer") + + # Convolution + if layer_type == "convolution": + if len(layer.convolution.weights.float16Value) > 0: + return True + if layer.convolution.hasBias and len(layer.convolution.bias.float16Value) > 0: + return True + # Batchnorm + elif layer_type == "batchnorm": + if len(layer.batchnorm.mean.float16Value) > 0: + return True + + # InnerProduct + elif layer_type == "innerProduct": + if len(layer.innerProduct.weights.float16Value) > 0: + return True + if layer.innerProduct.hasBias and len(layer.innerProduct.bias.float16Value) > 0: + return True + + # BatchedMatmul + elif layer_type == "batchedMatmul": + if len(layer.batchedMatmul.weights.float16Value) > 0: + return True + if layer.batchedMatmul.hasBias and len(layer.batchedMatmul.bias.float16Value) > 0: + return True + + # Embedding layer + elif layer_type == "embedding": + if len(layer.embedding.weights.float16Value) > 0: + return True + if layer.embedding.hasBias and len(layer.embedding.bias.float16Value) > 0: + return True + + # Embedding ND layer + elif layer_type == "embeddingND": + if len(layer.embeddingND.weights.float16Value) > 0: + return True + if layer.embeddingND.hasBias and len(layer.embeddingND.bias.float16Value) > 0: + return True + + # Scale layer + elif layer_type == "scale": + if len(layer.scale.shapeScale.float16Value) > 0: + return True + if layer.scale.hasBias and len(layer.scale.bias.float16Value) > 0: + return True + + # Bias layer + elif layer_type == "bias": + if len(layer.bias.bias.float16Value) > 0: + return True + + # LoadConstant layer + elif layer_type == "loadConstant": + if len(layer.loadConstant.data.float16Value) > 0: + return True + + # Simple Recurrent + elif layer_type == "simpleRecurrent": + if len(layer.simpleRecurrent.weightMatrix.float16Value) > 0: + return True + if layer.simpleRecurrent.hasBiasVector and len(layer.simpleRecurrent.biasVector.float16Value) > 0: + return True + + # GRU + elif layer_type == "gru": + if len(layer.gru.updateGateWeightMatrix.float16Value) > 0: + return True + if layer.gru.hasBiasVectors and len(layer.gru.outputGateBiasVector.float16Value) > 0: + return True + + # uniDirectionalLSTM Layers + elif layer_type == "uniDirectionalLSTM": + return self._check_fp16_weight_params_lstms(lstm_wp=layer.uniDirectionalLSTM.weightParams, + has_peephole=layer.uniDirectionalLSTM.params.hasPeepholeVectors) + + # biDirectionalLSTM Layers + elif layer_type == "biDirectionalLSTM": + for lstm_wp in layer.biDirectionalLSTM.weightParams: + if self._check_fp16_weight_params_lstms(lstm_wp=lstm_wp, + has_peephole=layer.biDirectionalLSTM.params.hasPeepholeVectors): + return True + + # branch Layers + elif layer_type == "branch": + if len(layer.branch.ifBranch.float16Value) > 0: + return True + if len(layer.branch.elseBranch.float16Value) > 0: + return True + + # loop Layers + elif layer_type == "loop": + if len(layer.loop.conditionNetwork.float16Value) > 0: + return True + if len(layer.loop.bodyNetwork.float16Value) > 0: + return True + + return False + + def make_updatable(self, trainables): + """ + Make the builder's NeuralNetwork spec updatable. + + Parameters + ---------- + trainables: list of str + List of layer names to be set trainable. + """ + if self.spec is None: + return + + # check if any layer weights/biases is in FP16 format + if self._check_fp16_weight_param_exists(self.nn_spec.layers): + raise ValueError("This model has at least one layer with FP16 weights or bias formats. These networks will " + "always be optimized to a full FP16 model format which is not supported to be marked " + "updatable. Either make sure the model has no FP16 WeightParams or split the " + "network to two models with updatable part of the model as a separate model with no FP16 " + "WeightParams. Note that updatable pipelines model can only have the last sub model marked " + "as updatable.") + + self.spec.isUpdatable = True + + if ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _MINIMUM_UPDATABLE_SPEC_VERSION + ): + self.spec.specificationVersion = _MINIMUM_UPDATABLE_SPEC_VERSION + + self.nn_spec.updateParams.MergeFromString(b"") + self.set_shuffle() + + for trainable in trainables: + if trainable not in self.layer_specs: + raise ValueError("Layer %s does not exist." % trainable) + spec_layer = self.layer_specs[trainable] + spec_layer_type = spec_layer.WhichOneof("layer") + if spec_layer_type not in _SUPPORTED_UPDATABLE_LAYERS: + raise ValueError( + "Layer %s is not supported to be marked as updatable. Only %s layers " + "are supported to be marked updatable." + % (trainable, _SUPPORTED_UPDATABLE_LAYERS) + ) + spec_layer.isUpdatable = True + typed_layer = getattr(spec_layer, spec_layer.WhichOneof("layer")) + for fd in typed_layer.DESCRIPTOR.fields: + field = getattr(typed_layer, fd.name) + if type(field) == _NeuralNetwork_pb2.LSTMWeightParams: + wfs = _get_lstm_weight_fields(field) + for wf in wfs: + wf.isUpdatable = True + elif type(field) == _NeuralNetwork_pb2.WeightParams: + field.isUpdatable = True + else: + pass + + def set_categorical_cross_entropy_loss(self, name, input): + r""" + Categorical Cross Entropy is used for single label categorization + (only one category is applicable for each data point). + + Parameters + ---------- + name: The name of the loss layer + input: The name of the input + The ``input`` should be a vector of length N representing the + distribution over N categories. This must be the output of a softmax. + + Notes + ----- + + .. math:: + Loss_ {CCE}(input, target) = -\sum_{i = 1} ^ {N}(target == i) log(input[i]) = - log(input[target]) + """ + if self.spec is None: + return + + if name in self.layer_specs: + raise ValueError("Name %s is already used." % name) + + if input is None: + raise ValueError("Loss Layer input must be specified") + + target = input + "_true" + + if len(self.nn_spec.layers) < 1: + raise ValueError( + "Loss layer (%s) cannot be attached to an empty model." % name + ) + + # validate input + # input must be a softmax layer output + input_validated = False + for _, layer in enumerate(self.nn_spec.layers[::-1]): + layer_outputs = list(layer.output) + layer_type = layer.WhichOneof("layer") + + if input in layer_outputs and layer_type == "softmax": + input_validated = True + break + + if not input_validated: + raise ValueError( + "Categorical Cross Entropy loss layer input (%s) must be a softmax layer output." + % input + ) + + # validate target + output_names = [x.name for x in self.spec.description.output] + if target in output_names: + raise ValueError( + "Loss layer target (%s) must not be a model output." % target + ) + + updating_classifier = False + predicted_probabilities_name = self.spec.description.predictedProbabilitiesName + predicted_feature_name = self.spec.description.predictedFeatureName + if ( + self.spec.HasField("neuralNetworkClassifier") + and input == predicted_probabilities_name + ): + updating_classifier = True + + loss_layer = self.nn_spec.updateParams.lossLayers.add() + self.layers.append(name) + self.layer_specs[name] = loss_layer + loss_layer.name = name + loss_layer.categoricalCrossEntropyLossLayer.input = input + loss_layer.categoricalCrossEntropyLossLayer.target = target + + training_inputs = self.spec.description.trainingInput + training_inputs.extend(self.spec.description.input) + training_input = training_inputs.add() + + if updating_classifier: + training_input.name = predicted_feature_name + classifier_output_type = [ + x.type + for x in self.spec.description.output + if x.name == predicted_feature_name + ] + + model_type = classifier_output_type[0].WhichOneof("Type") + if model_type == "stringType": + datatypes._set_datatype(training_input.type, datatypes.String()) + elif model_type == "int64Type": + datatypes._set_datatype(training_input.type, datatypes.Int64()) + else: + training_input.name = target + datatypes._set_datatype(training_input.type, datatypes.Array(1)) + training_input.type.multiArrayType.dataType = ( + _Model_pb2.ArrayFeatureType.INT32 + ) + + print( + "Now adding input {} as target for categorical cross-entropy loss layer.".format( + target + ) + ) + + def set_mean_squared_error_loss(self, name, input_feature=None): + """ + input_feature: [(str, datatypes.Array)] or None + The input feature of the loss layer. Each feature is a + ``(name, array)`` tuple, where ``name`` is the name of the model's + tensor our loss will be attached to, and ``array`` is a + ``datatypes.Array`` object describing the shape of that tensor. + Both the name and the array's shape must be provided in the tuple. + + Examples + -------- + + >>> feature = [('output_tensor', datatypes.Array((299, 299, 3)))] + """ + if self.spec is None: + return + + if name in self.layer_specs: + raise ValueError("Name %s is already used." % name) + + if input_feature is None: + raise ValueError("Loss Layer input must be specified") + + if not isinstance(input_feature, tuple): + raise ValueError( + "Loss layer input must be a tuple of type (string, datatype)" + ) + + (fname, ftype) = input_feature + if not isinstance(fname, str): + raise ValueError( + "Loss layer input must be a tuple of type (string, datatype)" + ) + if not isinstance(ftype, datatypes.Array): + raise ValueError( + "Loss layer input must be a tuple of type (string, datatype)" + ) + + target = fname + "_true" + + loss_layer = self.nn_spec.updateParams.lossLayers.add() + self.layers.append(name) + self.layer_specs[name] = loss_layer + loss_layer.name = name + + output_names = [x.name for x in self.spec.description.output] + if target in output_names: + raise ValueError( + "Loss Layer target (%s) must not be a model output" % target + ) + + loss_layer.meanSquaredErrorLossLayer.input = input_feature[0] + loss_layer.meanSquaredErrorLossLayer.target = target + + training_inputs = self.spec.description.trainingInput + training_inputs.extend(self.spec.description.input) + training_input = training_inputs.add() + training_input.name = target + + datatypes._set_datatype(training_input.type, input_feature[1]) + training_input.type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE + print( + "Now adding input {} as target for mean squared error loss layer.".format( + target + ) + ) + + def set_sgd_optimizer(self, sgd_params): + if self.spec is None: + return + + if not isinstance(sgd_params, SgdParams): + raise Exception("sgd_params must be of instance SgdParams") + + sgd_optimizer = self.nn_spec.updateParams.optimizer.sgdOptimizer + + # set learning rate + sgd_optimizer.learningRate.defaultValue = sgd_params.lr.value + sgd_optimizer.learningRate.range.minValue = sgd_params.lr.min + sgd_optimizer.learningRate.range.maxValue = sgd_params.lr.max + + # set mini batch size + sgd_optimizer.miniBatchSize.defaultValue = sgd_params.batch.value + sgd_optimizer.miniBatchSize.set.values.extend(sgd_params.batch.allowed_set) + + # set momentum + sgd_optimizer.momentum.defaultValue = sgd_params.momentum.value + sgd_optimizer.momentum.range.minValue = sgd_params.momentum.min + sgd_optimizer.momentum.range.maxValue = sgd_params.momentum.max + + def set_adam_optimizer(self, adam_params): + if self.spec is None: + return + + if not isinstance(adam_params, AdamParams): + raise Exception("adam_params must be of instance AdamParams") + + adam_optimizer = self.nn_spec.updateParams.optimizer.adamOptimizer + + # set learning rate + adam_optimizer.learningRate.defaultValue = adam_params.lr.value + adam_optimizer.learningRate.range.minValue = adam_params.lr.min + adam_optimizer.learningRate.range.maxValue = adam_params.lr.max + + # set mini batch size + adam_optimizer.miniBatchSize.defaultValue = adam_params.batch.value + adam_optimizer.miniBatchSize.set.values.extend(adam_params.batch.allowed_set) + + # set beta1 + adam_optimizer.beta1.defaultValue = adam_params.beta1.value + adam_optimizer.beta1.range.minValue = adam_params.beta1.min + adam_optimizer.beta1.range.maxValue = adam_params.beta1.max + + # set beta2 + adam_optimizer.beta2.defaultValue = adam_params.beta2.value + adam_optimizer.beta2.range.minValue = adam_params.beta2.min + adam_optimizer.beta2.range.maxValue = adam_params.beta2.max + + # set eps + adam_optimizer.eps.defaultValue = adam_params.eps.value + adam_optimizer.eps.range.minValue = adam_params.eps.min + adam_optimizer.eps.range.maxValue = adam_params.eps.max + + def set_epochs(self, epochs=1, allowed_set=None): + if self.spec is None: + return + + self.nn_spec.updateParams.epochs.defaultValue = epochs + + if allowed_set is None: + self.nn_spec.updateParams.epochs.set.values.extend([epochs]) + else: + self.nn_spec.updateParams.epochs.set.values.extend(allowed_set) + + def set_shuffle(self, seed=None): + if self.spec is None: + return + + # Validate that seed passed in is integer + if seed is not None: + if not isinstance(seed, int): + raise TypeError("Shuffle seed value must be integer") + + self.nn_spec.updateParams.shuffle.defaultValue = True + if seed is not None: + self.nn_spec.updateParams.seed.defaultValue = seed + + def _add_generic_layer( + self, + name, + input_names, + output_names, + input_ranks=None, + input_shapes=None, + output_ranks=None, + output_shapes=None, + ): + generic_layer = self.nn_spec.layers.add() + generic_layer.name = name + generic_layer.input.extend(input_names) + generic_layer.output.extend(output_names) + self.layers.append(name) + if name in self.layer_specs: + raise ValueError( + 'Layer with name "%s" has already been added. Please use a unique name.' + % name + ) + self.layer_specs[name] = generic_layer + _fill_tensor_fields(generic_layer.inputTensor, input_ranks, input_shapes) + _fill_tensor_fields(generic_layer.outputTensor, output_ranks, output_shapes) + + # Pass Rank Information + # Generic Layer copies rank of first input to all of its output + # All the layers that modifies rank apart from first input must override + if input_names is not None and len(input_names) > 0: + for output_ in output_names: + self.rank_dict[output_] = self._get_rank(input_names[0]) + return generic_layer + + def inspect_layers(self, last=-1, verbose=False): + """ Prints the summary for last "last" number of layers. + + Parameters + ---------- + last: int + The numbers of layers to inspect, starting from the last one. + verbose: bool + Whether to display layer-specific parameters or not. + """ + n_layers = len(self.nn_spec.layers) + if last < 0: + last = n_layers + for i, alayer in enumerate(self.nn_spec.layers[::-1]): + if i >= last: + break + ( + layer_type, + name, + in_blobs, + out_blobs, + params_info, + ) = _summarize_network_layer_info(alayer) + print( + "[Id: {}], Name: {} (Type: {})".format( + n_layers - i - 1, name, layer_type + ) + ) + print(" " * 10 + "Updatable: {}".format(alayer.isUpdatable)) + print(" " * 10 + "Input blobs: {}".format(in_blobs)) + print(" " * 10 + "Output blobs: {}".format(out_blobs)) + if verbose and len(params_info) > 0: + print(" " * 10 + "Parameters: ") + for param in params_info: + print(" " * 14 + "{} = {}".format(param[0], param[1])) + + def inspect_loss_layers(self): + """ Prints the summary for the loss layer. + """ + n_loss_layers = len(self.nn_spec.updateParams.lossLayers) + if n_loss_layers < 1: + print("no loss layer detected.") + for i, loss_layer in enumerate(self.nn_spec.updateParams.lossLayers[::-1]): + loss_type = loss_layer.WhichOneof("LossLayerType") + loss_name = loss_layer.name + loss_input = None + loss_target = None + if loss_type == "categoricalCrossEntropyLossLayer": + loss_input = loss_layer.categoricalCrossEntropyLossLayer.input + loss_target = loss_layer.categoricalCrossEntropyLossLayer.target + elif loss_type == "meanSquaredErrorLossLayer": + loss_input = loss_layer.meanSquaredErrorLossLayer.input + loss_target = loss_layer.meanSquaredErrorLossLayer.target + + print( + "[Id: {}], Name: {} (Type: {})".format( + n_loss_layers - i - 1, loss_name, loss_type + ) + ) + print(" " * 10 + "Loss Input: {}".format(loss_input)) + print(" " * 10 + "Loss Target: {}".format(loss_target)) + + def inspect_optimizer(self): + """ Prints the summary for the optimizer. + """ + optimizer = self.nn_spec.updateParams.optimizer + optimizer_type = optimizer.WhichOneof("OptimizerType") + print("Optimizer Type: {}".format(optimizer_type)) + if optimizer_type == "sgdOptimizer": + lr = optimizer.sgdOptimizer.learningRate + batch = optimizer.sgdOptimizer.miniBatchSize + momentum = optimizer.sgdOptimizer.momentum + print( + "lr: {}, min: {}, max: {}".format( + lr.defaultValue, lr.range.minValue, lr.range.maxValue + ) + ) + print( + "batch: {}, allowed_set: {}".format( + batch.defaultValue, batch.set.values + ) + ) + print( + "momentum: {}, min: {}, max: {}".format( + momentum.defaultValue, + momentum.range.minValue, + momentum.range.maxValue, + ) + ) + elif optimizer_type == "adamOptimizer": + lr = optimizer.adamOptimizer.learningRate + batch = optimizer.adamOptimizer.miniBatchSize + beta1 = optimizer.adamOptimizer.beta1 + beta2 = optimizer.adamOptimizer.beta2 + eps = optimizer.adamOptimizer.eps + print( + "lr: {}, min: {}, max: {}".format( + lr.defaultValue, lr.range.minValue, lr.range.maxValue + ) + ) + print( + "batch: {}, allowed_set: {}".format( + batch.defaultValue, batch.set.values + ) + ) + print( + "beta1: {}, min: {}, max: {}".format( + beta1.defaultValue, beta1.range.minValue, beta1.range.maxValue + ) + ) + print( + "beta2: {}, min: {}, max: {}".format( + beta2.defaultValue, beta2.range.minValue, beta2.range.maxValue + ) + ) + print( + "epsilon: {}, min: {}, max: {}".format( + eps.defaultValue, eps.range.minValue, eps.range.maxValue + ) + ) + + def inspect_updatable_layers(self): + """ Prints all updatable layers with their inputs and outputs. + """ + for _, layer in enumerate(self.nn_spec.layers[::-1]): + if layer.isUpdatable: + ( + layer_type, + name, + in_blobs, + out_blobs, + _, + ) = _summarize_network_layer_info(layer) + print("Name: {} (Type: {})".format(name, layer_type)) + print(" " * 10 + "Input blobs: {}".format(in_blobs)) + print(" " * 10 + "Output blobs: {}".format(out_blobs)) + + def inspect_input_features(self): + """ Prints the name and type of input features. + """ + input_features = self.spec.description.input + n_input_features = len(input_features) + if n_input_features < 1: + return + for i, input_feature in enumerate(input_features[::-1]): + print( + "[Id: {}] Name: {}".format(n_input_features - i - 1, input_feature.name) + ) + print(" " * 10 + "Type: {}".format(input_feature.type)) + + def inspect_output_features(self): + """ Prints the name and type of output features. + """ + output_features = self.spec.description.output + n_output_features = len(output_features) + if n_output_features < 1: + return + for i, output_feature in enumerate(output_features[::-1]): + print( + "[Id: {}] Name: {}".format( + n_output_features - i - 1, output_feature.name + ) + ) + print(" " * 10 + "Type: {}".format(output_feature.type)) + + def inspect_conv_channels(self, layer_name): + """ Prints the output and kernel channels of a convolution layer. + """ + if self.spec is None: + return + if layer_name not in self.layer_specs: + raise ValueError("Layer %s does not exist." % (layer_name)) + spec_layer = self.layer_specs[layer_name] + + if spec_layer.WhichOneof("layer") != "convolution": + raise ValueError("Layer %s is not a convolution layer." % (layer_name)) + + output_channels = spec_layer.convolution.outputChannels + kernel_channels = spec_layer.convolution.kernelChannels + print("outputChannels: {}".format(output_channels)) + print("kernelChannels: {}".format(kernel_channels)) + + def inspect_innerproduct_channels(self, layer_name): + """ Prints the output and kernel channels of an innerProduct layer. + """ + if self.spec is None: + return + if layer_name not in self.layer_specs: + raise ValueError("Layer %s does not exist." % (layer_name)) + spec_layer = self.layer_specs[layer_name] + + if spec_layer.WhichOneof("layer") != "innerProduct": + raise ValueError("Layer %s is not an innerProduct layer." % (layer_name)) + + input_channels = spec_layer.innerProduct.inputChannels + output_channels = spec_layer.innerProduct.outputChannels + print("inputChannels: {}".format(input_channels)) + print("outputChannels: {}".format(output_channels)) + + def _get_rank(self, name): + return self.rank_dict[name] if name in self.rank_dict else -1 + + def _set_max_input_rank(self, input_names, output_name): + if len(input_names) == 0: + raise ValueError("Input name list empty for collecting rank information") + self.rank_dict[output_name] = -1 + for i in range(0, len(input_names)): + input_rank = self._get_rank(input_names[i]) + if input_rank == -1: + self.rank_dict[output_name] = -1 + return + self.rank_dict[output_name] = max(self._get_rank(output_name), input_rank) + + def _set_rank_for_reduce_op( + self, input_name, output_name, axes, keepdims, reduce_all + ): + if keepdims: + self.rank_dict[output_name] = self._get_rank(input_name) + else: + if reduce_all or self._get_rank(input_name) == 1: + self.rank_dict[output_name] = 1 + elif axes is not None and len(axes) > 0: + rank = self._get_rank(input_name) - len(axes) + self.rank_dict[output_name] = rank if rank != 0 else 1 + else: + raise ValueError( + "Reduce Ops must provide axes to reduce on if reduce_all is False" + ) + + def add_inner_product( + self, + name, + W, + b, + input_channels, + output_channels, + has_bias, + input_name, + output_name, + int_8_dynamic_quantize=False, + is_quantized_weight=False, + quantization_type="linear", + nbits=8, + quant_scale=None, + quant_bias=None, + quant_lut=None, + ): + """ + Add an inner product layer to the model. + Refer to the ``InnerProductLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W: numpy.array or bytes() + Weight matrix of shape ``(output_channels, input_channels)``. + If ``W`` is of type ``bytes()`` (quantized), other quantization + related arguments must be provided as well (see below). + b: numpy.array + Bias vector of shape: ``(output_channels, )``. + input_channels: int + Number of input channels. + output_channels: int + Number of output channels. + has_bias: boolean + Whether the bias vector of this layer is ignored in the spec. + + - If True, the bias vector of this layer is not ignored. + - If False, the bias vector is ignored. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + Quantization arguments, used when ``W`` is of type ``bytes()``: + int_8_dynamic_quantize: boolean + Whether to quantize and dequantize before and after inner product, respectively. + Expects byte weights, representing int8 values, if True. + See NeuralNetwork.proto for other validation conditions. + + is_quantized_weight: bool, optional + Set it to true when ``W`` is of type ``bytes()``, representing + quantized weights, default: false. + + quantization_type: str + When weights are quantized (that is, ``W`` is of type ``bytes()``), + this should be either ``"linear"`` or ``"lut"``. + + nbits: int + Should be between 1 and 8 (inclusive). Number of bits per weight + value. Only applicable when weights are quantized. + + quant_scale: numpy.array(dtype=numpy.float32) + scale vector to be used with linear quantization. Must be of + length either 1 or output_channels. + + quant_bias: numpy.array(dtype=numpy.float32) + bias vector to be used with linear quantization. Must be of + length either 1 or output_channels. + + quant_lut: numpy.array(dtype=numpy.float32) + the LUT (look up table) to be used with LUT quantization. + Must be of length 2^n bits. + + See Also + -------- + add_embedding, add_convolution, add_batched_mat_mul + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.innerProduct + + # Fill in the parameters + spec_layer_params.inputChannels = input_channels + spec_layer_params.outputChannels = output_channels + spec_layer_params.hasBias = has_bias + spec_layer_params.int8DynamicQuantize = int_8_dynamic_quantize + + weights = spec_layer_params.weights + if not is_quantized_weight and isinstance(W, _np.ndarray): + weights.floatValue.extend(W.flatten()) + else: + + _verify_quantization_arguments( + weight=W, + output_channels=output_channels, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + int_8_dynamic_quantize=int_8_dynamic_quantize, + ) + + _fill_quantized_weights( + weights_message=weights, + W=W, + use_int_8=int_8_dynamic_quantize, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + if has_bias: + bias = spec_layer_params.bias + bias.floatValue.extend(b.flatten()) + + return spec_layer + + def add_embedding( + self, + name, + W, + b, + input_dim, + output_channels, + has_bias, + input_name, + output_name, + is_quantized_weight=False, + quantization_type="linear", + nbits=8, + quant_scale=None, + quant_bias=None, + quant_lut=None, + ): + """ + Add an embedding layer to the model. + Refer to the ``EmbeddingLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W: float32 numpy.array or bytes() + Weight matrix of shape ``(output_channels, input_dim)``. + If ``W`` is of type ``bytes()`` (quantized to 1-8 bits), other + quantization related arguments must be provided as well (see below). + b: numpy.array + Bias vector of shape ``(output_channels, )``. + input_dim: int + Size of the vocabulary (1 + maximum integer index of the words). + output_channels: int + Number of output channels. + has_bias: boolean + Whether the bias vector of this layer is ignored in the ``spec``. + + - If True, the bias vector of this layer is not ignored. + - If False, the bias vector is ignored. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + + Quantization arguments expected, when ``W`` is of type ``bytes()``: + + is_quantized_weight: bool + Set it to true when ``W`` is of type ``bytes()``, representing quantized weights. + + quantization_type: str + When weights are quantized (that is, ``W`` is of type ``bytes()``), + this should be either ``"linear"`` or ``"lut"``. + + nbits: int + Should be between 1 and 8 (inclusive). Number of bits per weight value. + + quant_scale: numpy.array(dtype=numpy.float32) + Scale vector to be used with linear quantization. + Must be of length either 1 or output_channels. + + quant_bias: numpy.array(dtype=numpy.float32) + Bias vector to be used with linear quantization. + Must be of length either 1 or output_channels. + + quant_lut: numpy.array(dtype=numpy.float32) + The LUT (look up table) to be used with LUT quantization. + Must be of length 2^n bits. + + See Also + -------- + add_inner_product + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + + # Fill in the parameters + spec_layer_params = spec_layer.embedding + + spec_layer_params.inputDim = input_dim + spec_layer_params.outputChannels = output_channels + spec_layer_params.hasBias = has_bias + + weights = spec_layer_params.weights + if not is_quantized_weight: + weights.floatValue.extend(W.flatten()) + else: + _verify_quantization_arguments( + weight=W, + output_channels=output_channels, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + _fill_quantized_weights( + weights_message=weights, + W=W, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + if has_bias: + bias = spec_layer_params.bias + bias.floatValue.extend(b.flatten()) + + return spec_layer + + def add_softmax(self, name, input_name, output_name): + """ + Add a softmax layer to the model. + Refer to the ``SoftmaxLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_activation, add_inner_product, add_convolution + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.softmax.MergeFromString(b"") + return spec_layer + + def add_activation( + self, + name, + non_linearity, + input_name, + output_name, + params=None, + input_rank=None, + input_shape=None, + output_rank=None, + output_shape=None, + ): + """ + Add an activation layer to the model. + Refer to the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + non_linearity: str + The ``non_linearity`` (activation) function of this layer. + It can be one of the following: + + - ``'RELU'``: Rectified Linear Unit (ReLU) function. + - ``'SIGMOID'``: sigmoid function. + - ``'TANH'``: tanh function. + - ``'SCALED_TANH'``: scaled tanh function, defined as: + + ``f(x) = alpha * tanh(beta * x)`` + + where ``alpha`` and ``beta`` are constant scalars. + + - ``'SOFTPLUS'``: softplus function. + - ``'SOFTSIGN'``: softsign function. + - ``'SIGMOID_HARD'``: hard sigmoid function, defined as: + + ``f(x) = min(max(alpha * x + beta, -1), 1)`` + + where ``alpha`` and ``beta`` are constant scalars. + + - ``'LEAKYRELU'``: leaky relu function, defined as: + + ``f(x) = (x >= 0) * x + (x < 0) * alpha * x`` + + where ``alpha`` is a constant scalar. + + - ``'PRELU'``: Parametric ReLU function, defined as: + + ``f(x) = (x >= 0) * x + (x < 0) * alpha * x`` + + where ``alpha`` is a multi-dimensional array of same size as ``x``. + + - ``'ELU'``: Exponential linear unit function, defined as: + + ``f(x) = (x >= 0) * x + (x < 0) * (alpha * exp(x) - 1)`` + + where ``alpha`` is a constant scalar. + + - ``'PARAMETRICSOFTPLUS'``: Parametric softplus function, defined as: + + ``f(x) = alpha * log(1 + exp(beta * x))`` + + where ``alpha`` and ``beta`` are two multi-dimensional arrays + of same size as ``x``. + + - ``'THRESHOLDEDRELU'``: Thresholded ReLU function, defined as: + + ``f(x) = (x >= alpha) * x`` + + where ``alpha`` is a constant scalar. + + - ``'LINEAR'``: linear function. + + ``f(x) = alpha * x + beta`` + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + params: list of float or numpy.array + Parameters for the activation, depending on non_linearity. + + - When ``non_linearity`` is one of [``'RELU'``, ``'SIGMOID'``, ``'TANH'``, ``'SCALED_TANH'``, ``'SOFTPLUS'``, ``'SOFTSIGN'``], + params is ignored. + - When ``non_linearity`` is one of [``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``], + param is a list of 2 floats ``[alpha, beta]``. + - When ``non_linearity`` is one of [``'LEAKYRELU'``, ``'ELU'``, ``'THRESHOLDEDRELU'``], + param is a list of 1 float ``[alpha]``. + - When ``non_linearity`` is ``'PRELU'``, param is a list of 1 numpy array ``[alpha]``. + The shape of ``alpha`` is ``(C,)``, where ``C`` is either the number of input channels or + 1. When ``C = 1``, same ``alpha`` is applied to all channels. + - When ``non_linearity`` is ``'PARAMETRICSOFTPLUS'``, param is a + list of 2 numpy arrays ``[alpha, beta]``. The shape of ``alpha`` and + `beta` is ``(C, )``, where ``C`` is either + the number of input channels or 1. When ``C = 1``, same ``alpha`` and + ``beta`` are applied to all channels. + + See Also + -------- + add_convolution, add_softmax + """ + input_rank = ( + len(input_shape) if (input_shape and not input_rank) else input_rank + ) + output_rank = ( + len(output_shape) if (output_shape and not output_rank) else output_rank + ) + spec_layer = self._add_generic_layer( + name, + [input_name], + [output_name], + [input_rank] if input_rank else None, + [input_shape] if input_shape else None, + [output_rank] if output_rank else None, + [output_shape] if output_shape else None, + ) + spec_layer_params = spec_layer.activation + + # Fill in the parameters + non_linearity = ( + non_linearity.upper() + if isinstance(non_linearity, str) + else non_linearity + ) + if non_linearity == "RELU": + spec_layer_params.ReLU.MergeFromString(b"") + + elif non_linearity == "SIGMOID": + spec_layer_params.sigmoid.MergeFromString(b"") + + elif non_linearity == "TANH": + spec_layer_params.tanh.MergeFromString(b"") + + elif non_linearity == "SCALED_TANH": + spec_layer_params.scaledTanh.MergeFromString(b"") + if params is None: + alpha, beta = (0.0, 0.0) + else: + alpha, beta = params[0], params[1] + spec_layer_params.scaledTanh.alpha = alpha + spec_layer_params.scaledTanh.beta = beta + + elif non_linearity == "SOFTPLUS": + spec_layer_params.softplus.MergeFromString(b"") + + elif non_linearity == "SOFTSIGN": + spec_layer_params.softsign.MergeFromString(b"") + + elif non_linearity == "SIGMOID_HARD": + if params is None: + alpha, beta = (0.2, 0.5) + else: + alpha, beta = params[0], params[1] + spec_layer_params.sigmoidHard.alpha = alpha + spec_layer_params.sigmoidHard.beta = beta + + elif non_linearity == "LEAKYRELU": + if params is None: + alpha = 0.3 + else: + alpha = params[0] + spec_layer_params.leakyReLU.alpha = float(alpha) + + elif non_linearity == "PRELU": + # PReLU must provide an np array in params[0] + spec_layer_params.PReLU.alpha.floatValue.extend(params.flatten()) + + elif non_linearity == "ELU": + # ELU must provide an alpha in params[0] + spec_layer_params.ELU.alpha = float(params) + + elif non_linearity == "PARAMETRICSOFTPLUS": + # Parametric softplus must provide two np arrays for alpha and beta + alphas, betas = (params[0], params[1]) + # Weight alignment: Keras [H,W,C,F] + spec_layer_params.parametricSoftplus.alpha.floatValue.extend( + alphas.flatten() + ) + spec_layer_params.parametricSoftplus.beta.floatValue.extend(betas.flatten()) + + elif non_linearity == "THRESHOLDEDRELU": + if params is None: + theta = 1.0 + else: + theta = params + spec_layer_params.thresholdedReLU.alpha = float(theta) + + elif non_linearity == "LINEAR": + if params is None: + alpha, beta = (1.0, 0.0) + else: + alpha, beta = params[0], params[1] + spec_layer_params.linear.alpha = alpha + spec_layer_params.linear.beta = beta + else: + raise TypeError("Unknown activation type %s." % non_linearity) + return spec_layer + + def add_elementwise(self, name, input_names, output_name, mode, alpha=None): + """ + Add an element-wise operation layer to the model. + Refer to the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + A list of input blob names of this layer. The input blobs should have the same shape. + output_name: str + The output blob name of this layer. + mode: str + A string specifying the mode of the elementwise layer. It can be one of the following: + + - ``'CONCAT'``: Concatenate input blobs along the channel axis. + - ``'SEQUENCE_CONCAT'``: Concatenate input blobs along the sequence axis. + - ``'ADD'``: Perform an element-wise summation over the input blobs. + - ``'MULTIPLY'``: Perform an element-wise multiplication over the input blobs. + - ``'DOT'``: Compute the dot product of the two input blobs. + In this mode, the length of ``input_names`` should be 2. + - ``'COS'``: Compute the cosine similarity of the two input blobs. + In this mode, the length of ``input_names`` should be 2. + - ``'MAX'``: Compute the element-wise maximum over the input blobs. + - ```'MIN'```: Compute the element-wise minimum over the input blobs. + - ``'AVE'``: Compute the element-wise average over the input blobs. + + alpha: float + * if ``mode == 'ADD'`` and there is only one ``input_name``, + ``alpha`` is added to the input. + * if ``mode == 'MULTIPLY'`` and there is only one ``input_name``, + ``alpha`` is multiplied to the input. + + See Also + -------- + add_upsample, add_sequence_repeat + + """ + input_names = input_names if isinstance(input_names, list) else [input_names] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + # add one of the following layers + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "CONCAT": + spec_layer.concat.sequenceConcat = False + elif mode == "SEQUENCE_CONCAT": + spec_layer.concat.sequenceConcat = True + elif mode == "ADD": + spec_layer.add.MergeFromString(b"") + if alpha: + spec_layer.add.alpha = alpha + elif mode == "MULTIPLY": + spec_layer.multiply.MergeFromString(b"") + if alpha: + spec_layer.multiply.alpha = alpha + elif mode == "COS": + spec_layer.dot.cosineSimilarity = True + elif mode == "DOT": + spec_layer.dot.cosineSimilarity = False + elif mode == "MAX": + spec_layer.max.MergeFromString(b"") + elif mode == "MIN": + spec_layer.min.MergeFromString(b"") + elif mode == "AVE": + spec_layer.average.MergeFromString(b"") + else: + raise ValueError("Unsupported elementwise mode %s" % mode) + return spec_layer + + def add_upsample( + self, + name, + scaling_factor_h, + scaling_factor_w, + input_name, + output_name, + mode="NN", + linear_upsample_mode="DEFAULT", + ): + """ + Add an upsample layer to the model. + Refer to the ``UpsampleLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + scaling_factor_h: int or float + Scaling factor on the vertical direction. Float values only + supported with ``BILINEAR`` and ``ALIGN_CORNERS_*``. + scaling_factor_w: int or float + Scaling factor on the horizontal direction. Float values only + supported with ``BILINEAR`` and ``ALIGN_CORNERS_*``. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + mode: str + Overall interpolation mode. The following values are supported: + + * ``'NN'``: nearest neighbour + * ``'BILINEAR'``: bilinear interpolation + + linear_upsample_mode: str + Specifies the behavior for linear upsampling. Only valid when + Interpolation Mode is ``BILINEAR``. + + If input grid is ``[0, Xin-1]`` (corresponding to an input size of + ``Xin``), and if the output size is ``Xout``, + then the grid points are sampled in the following manner: + + 'DEFAULT': + - ``spacing = (Xin-Xin/Xout) / (Xout-1)`` + - ``grid_point[i] = min(Xin-1, max(0, i * spacing)), for i = 0,1,2,..,Xout-1`` + + 'ALIGN_CORNERS_TRUE': + - ``spacing = (Xin-1) / (Xout-1)`` + - ``grid_point[i] = min(Xin-1, max(0, i * spacing)), for i = 0,1,2,..,Xout-1`` + + 'ALIGN_CORNERS_FALSE': + - ``spacing = Xin / Xout`` + - ``grid_point[i] = min(Xin-1, max(0, i * spacing + 0.5 * spacing - 0.5)), for i = 0,1,2,..,Xout-1`` + + See Also + -------- + add_resize_bilinear + + """ + + mode = mode.upper() if isinstance(mode, str) else mode + linear_upsample_mode = ( + linear_upsample_mode.upper() + if isinstance(linear_upsample_mode, str) + else linear_upsample_mode + ) + if not mode in ["NN", "BILINEAR"]: + raise ValueError("Unsupported upsampling mode %s" % mode) + if not linear_upsample_mode in [ + "DEFAULT", + "ALIGN_CORNERS_TRUE", + "ALIGN_CORNERS_FALSE", + ]: + raise ValueError( + "Unsupported linear upsampling mode %s" % linear_upsample_mode + ) + + # Default linear upsample mode is backwards compatible, else set spec to iOS14 + if ( + linear_upsample_mode != "DEFAULT" + and self.spec + and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ) + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.upsample + if ( + scaling_factor_h - _math_floor(scaling_factor_h) > 0.001 + or scaling_factor_w - _math_floor(scaling_factor_w) > 0.001 + ): + if mode != "BILINEAR" or linear_upsample_mode not in [ + "ALIGN_CORNERS_TRUE", + "ALIGN_CORNERS_FALSE", + ]: + raise ValueError( + "Fractional upsampling only compatible with BILINEAR and ALIGN_CORNERS_TRUE or ALIGN_CORNERS_FALSE" + ) + spec_layer_params.fractionalScalingFactor.append(float(scaling_factor_h)) + spec_layer_params.fractionalScalingFactor.append(float(scaling_factor_w)) + else: + spec_layer_params.scalingFactor.append(int(scaling_factor_h)) + spec_layer_params.scalingFactor.append(int(scaling_factor_w)) + + spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value( + mode + ) + spec_layer_params.linearUpsampleMode = _NeuralNetwork_pb2.UpsampleLayerParams.LinearUpsampleMode.Value( + linear_upsample_mode + ) + + return spec_layer + + def add_scale( + self, + name, + W, + b, + has_bias, + input_name, + output_name, + shape_scale=None, + shape_bias=None, + ): + """ + Add a scale layer to the model. + Refer to the ``ScaleLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W: int or numpy.array + Scale of the input. + b: int or numpy.array + Bias to add to the input. + has_bias: boolean + Whether the bias vector of this layer is ignored in the ``spec``. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + shape_scale: list of int or tuple of int + List of ints that specifies the shape of the scale parameter. + Can be ``[1]``, ``[C]``, ``[1,H,W]``, or ``[C,H,W]``. + shape_bias: list of int + List of ints that specifies the shape of the bias parameter + (if present). Can be ``[1]``, ``[C]``, ``[1,H,W]``, or ``[C,H,W]``. + + See Also + -------- + add_bias + """ + + if not shape_scale: + shape_scale = [1] + if not shape_bias: + shape_bias = [1] + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.scale + + spec_layer_params.hasBias = has_bias + + # add scale and its shape + scale = spec_layer_params.scale + spec_layer_params.shapeScale.extend(shape_scale) + if isinstance(W, int): + scale.floatValue.append(float(W)) + else: + scale.floatValue.extend(W.flatten()) + if len(scale.floatValue) != _np.prod(shape_scale): + raise ValueError( + "Dimensions of 'shape_scale' do not match the size of the provided 'scale' parameter" + ) + + # add bias and its shape + if has_bias: + bias = spec_layer_params.bias + spec_layer_params.shapeBias.extend(shape_bias) + if isinstance(b, int): + bias.floatValue.append(float(b)) + else: + bias.floatValue.extend(b.flatten()) + if len(bias.floatValue) != _np.prod(shape_bias): + raise ValueError( + "Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter" + ) + return spec_layer + + def add_bias(self, name, b, input_name, output_name, shape_bias=None): + """ + Add a bias layer to the model. + Refer to the ``BiasLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + b: int or numpy.array + Bias to add to the input. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + shape_bias: list of int + List of ints that specifies the shape of the bias parameter + (if present). Can be ``[1]``, ``[C]``, ``[1,H,W]``, or ``[C,H,W]``. + + See Also + -------- + add_scale + """ + + if not shape_bias: + shape_bias = [1] + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.bias + + # add bias and its shape + bias = spec_layer_params.bias + if len(shape_bias) != 1 and len(shape_bias) != 3: + raise ValueError("Shape of bias layer must have length 1 or 3.") + + spec_layer_params.shape.extend(shape_bias) + if isinstance(b, int): + bias.floatValue.append(float(b)) + else: + bias.floatValue.extend(b.flatten()) + if len(bias.floatValue) != _np.prod(shape_bias): + raise ValueError( + "Dimensions of 'shape_bias' do not match the size" + "of the provided 'b' parameter" + ) + return spec_layer + + def add_sequence_repeat(self, name, nrep, input_name, output_name): + """ + Add a sequence repeat layer to the model. + Refer to the ``SequenceRepeatLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + nrep: int + Number of repetitions of the input blob along the sequence axis. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_upsample, add_elementwise + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.sequenceRepeat + spec_layer_params.nRepetitions = nrep + return spec_layer + + def add_convolution( + self, + name, + kernel_channels, + output_channels, + height, + width, + stride_height, + stride_width, + border_mode, + groups, + W, + b, + has_bias, + is_deconv=False, + output_shape=None, + input_name="data", + output_name="out", + dilation_factors=[1, 1], + padding_top=0, + padding_bottom=0, + padding_left=0, + padding_right=0, + same_padding_asymmetry_mode="BOTTOM_RIGHT_HEAVY", + **kwargs + ): + """ + Add a convolution layer to the network. + Refer to the ``ConvolutionLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + + name: str + The name of this layer. + + kernel_channels: int + Number of channels for the convolution kernels. + + output_channels: int + Number of filter kernels. This is equal to the number of channels in the output blob. + + height: int + Height of each kernel. + + width: int + Width of each kernel. + + stride_height: int + Stride along the height direction. + + stride_width: int + Stride along the height direction. + + border_mode: str + Option for the padding type and output blob shape. Can be either 'valid' or 'same'. + + groups: int + Number of kernel groups. Input is divided into groups along the channel axis. + Each kernel group share the same weights. + + W: numpy.array or bytes() or None + + Weight of the convolution kernels. + + * If ``is_deconv`` is False, ``W`` should have + shape ``(height, width, kernel_channels, output_channels)``, where: + ``kernel_channel = input_channels / groups`` + * If ``is_deconv`` is True, ``W`` should have + shape ``(height, width, kernel_channels, output_channels / groups)``, where: + ``kernel_channel = input_channels`` + + If ``W`` is of type ``bytes()`` (quantized), other quantization + related arguments must be provided as well (see below). + + For Core ML specification version >=4, ``W`` can be ``None``. In this case, + the convolution layer takes 2 inputs, where the 1st input represents + the input feature map, and the 2nd input represents the weight blob. + + b: numpy.array + Biases of the convolution kernels. ``b`` should have shape ``(outputChannels, )``. + + has_bias: boolean + Whether bias is ignored. + + - If True, bias is not ignored. + - If False, bias is ignored. + + is_deconv: boolean + Whether the convolution layer is performing a convolution or a + transposed convolution (deconvolution). + + - If True, the convolution layer is performing transposed convolution. + - If False, the convolution layer is performing regular convolution. + + output_shape: tuple or None + Either ``None`` or a 2-tuple, specifying the output + shape ``(output_height, output_width)``. + + - Used only when ``is_deconv == True``. + - When ``is_deconv == False``, this parameter is ignored. + - If it is ``None``, the output shape is calculated automatically using the ``border_mode``. + + input_name: str or list of str + The input blob name(s) of this layer. + + output_name: str + The output blob name of this layer. + + dilation_factors: list of int + Dilation factors across height and width directions. Must be a list of two positive integers. + Defaults to ``[1, 1]``. + + padding_top, padding_bottom, padding_left, padding_right: int + Values of height (top, bottom) and width (left, right) padding + to be used if ``border_more`` is ``"valid"``. + + same_padding_asymmetry_mode: str + Type of asymmetric padding to be used when ``border_mode`` is ``'same'``. + Can be either ``'BOTTOM_RIGHT_HEAVY'`` or ``'TOP_LEFT_HEAVY'``. + + Quantization + Quantization arguments expected in ``kwargs``, when ``W`` is of type ``bytes()``. + + quantization_type: str + When weights are quantized (that is, ``W`` is of type ``bytes()``), + this should be either ``"linear"`` or ``"lut"``. + + nbits: int + Should be between 1 and 8 (inclusive). Number of bits per weight + value. Only applicable when weights are quantized. + + quant_scale: numpy.array(dtype=numpy.float32) + scale vector to be used with linear quantization. Must be of + length either 1 or ``output_channels``. + + quant_bias: numpy.array(dtype=numpy.float32) + bias vector to be used with linear quantization. Must be of + length either 1 or ``output_channels``. + + quant_lut: numpy.array(dtype=numpy.float32) + the LUT (look up table) to be used with LUT quantization. + Must be of length 2^n bits. + + Depthwise convolution + Depthwise convolution is a special case of convolution, in which: + + * ``kernel_channels = 1 (== input_channels / groups)`` + * ``output_channels = channel_multiplier * input_channels`` + * ``groups = input_channels`` + * ``W``: ``[Kernel_height, Kernel_width, 1, channel_multiplier * input_channels]`` + + See Also + -------- + add_convolution3d, add_pooling, add_activation, add_batchnorm + + """ + + if isinstance(input_name, tuple): + input_names = list(input_name) + elif isinstance(input_name, list): + input_names = input_name + else: + input_names = [input_name] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + # Set the layer params + spec_layer_params = spec_layer.convolution + spec_layer_params.isDeconvolution = is_deconv + + if is_deconv and output_shape: + spec_layer_params.outputShape.append(output_shape[0]) + spec_layer_params.outputShape.append(output_shape[1]) + + spec_layer_params.outputChannels = output_channels + spec_layer_params.kernelChannels = kernel_channels + spec_layer_params.kernelSize.append(height) + spec_layer_params.kernelSize.append(width) + spec_layer_params.stride.append(stride_height) + spec_layer_params.stride.append(stride_width) + + border_mode = ( + border_mode.lower() + if isinstance(border_mode, str) + else border_mode + ) + same_padding_asymmetry_mode = ( + same_padding_asymmetry_mode.upper() + if isinstance(same_padding_asymmetry_mode, str) + else same_padding_asymmetry_mode + ) + + if border_mode == "valid": + height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add() + height_border.startEdgeSize = padding_top + height_border.endEdgeSize = padding_bottom + width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add() + width_border.startEdgeSize = padding_left + width_border.endEdgeSize = padding_right + elif border_mode == "same": + if not ( + same_padding_asymmetry_mode == "BOTTOM_RIGHT_HEAVY" + or same_padding_asymmetry_mode == "TOP_LEFT_HEAVY" + ): + raise ValueError( + "Invalid value %d of same_padding_asymmetry_mode parameter" + % same_padding_asymmetry_mode + ) + spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value( + same_padding_asymmetry_mode + ) + else: + raise NotImplementedError( + "Border mode %s is not implemented." % border_mode + ) + + spec_layer_params.nGroups = groups + spec_layer_params.hasBias = has_bias + + # add dilation factors + spec_layer_params.dilationFactor.append(dilation_factors[0]) + spec_layer_params.dilationFactor.append(dilation_factors[1]) + + # If weight comes from another tensor just return + if len(input_names) > 1: + return + + # Weight assignments + quantization = len(kwargs) > 0 and ('quantization_type' in kwargs and kwargs.get('quantization_type') is not None) + if quantization: + _verify_quantization_arguments( + weight=W, output_channels=output_channels, **kwargs + ) + + nbits = kwargs.get("nbits", 8) + num_weights = (output_channels * kernel_channels * height * width) / groups + if nbits < 8: + byte_arr = _np.frombuffer(W, dtype=_np.uint8) + W = _unpack_to_bytes(byte_arr, num_weights, nbits) + else: + W = _np.frombuffer(W, dtype=_np.uint8) + + if is_deconv: + W = _np.reshape( + W, (height, width, kernel_channels, output_channels / groups) + ) + else: + W = _np.reshape(W, (height, width, kernel_channels, output_channels)) + + # Weight alignment: MLModel Spec requires following weight arrangement: + # is_deconv == False ==> (output_channels, kernel_channels, height, width), where kernel_channel = input_channels / groups + # is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels + if not is_deconv: + Wt = W.transpose((3, 2, 0, 1)) + Wt = Wt.flatten() + else: + Wt = W.transpose((2, 3, 0, 1)).flatten() + + # Assign weights + weights = spec_layer_params.weights + if not quantization: # no quantization + weights.floatValue.extend(Wt.flatten()) + else: # there is quantization + W_bytes = bytes() + if nbits == 8: + W_bytes += Wt.flatten().tobytes() + else: + W_bytes += _convert_array_to_nbit_quantized_bytes( + Wt.flatten(), nbits + ).tobytes() + _fill_quantized_weights(weights_message=weights, W=W_bytes, **kwargs) + + # Assign biases + if has_bias: + bias = spec_layer_params.bias + for f in range(output_channels): + bias.floatValue.append(float(b[f])) + + return spec_layer + + def add_convolution3d( + self, + name, + input_channels, + output_channels, + depth, + height, + width, + W, + b, + has_bias, + groups=1, + stride_depth=1, + stride_height=1, + stride_width=1, + dilation_width=1, + dilation_height=1, + dilation_depth=1, + is_deconv=False, + output_shape=None, + padding_mode="valid", + padding_front=0, + padding_back=0, + padding_top=0, + padding_bottom=0, + padding_left=0, + padding_right=0, + input_name="data", + output_name="out", + ): + """ + Add a 3 dimensional convolution layer to the network. + Refer to the ``Convolution3DLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + + name: str + The name of this layer. + + input_channels: int + Number of input channels. + + output_channels: int + Number of filter kernels. This is equal to the number of channels in the output blob. + + depth: int + Depth of each kernel. + + height: int + Height of each kernel. + + width: int + Width of each kernel. + + W: numpy.array or bytes() + Weight of the convolution kernels. ``W`` should have shape: + + - If ``deconv`` is False: + + ``(output_channels, kernel_channels, depth, height, width)``, where: + + ``kernel_channels = input_channels / groups`` + + - If ``deconv`` is True: + + ``(output_channels / groups, kernel_channels, depth, height, width)``, where: + + ``kernel_channels = input_channels`` + + b: numpy.array + Biases of the convolution kernels. ``b`` should have shape ``(outputChannels, )``. + + has_bias: boolean + Whether bias is ignored. + - If True, bias is not ignored. + - If False, bias is ignored. + + groups: int + Number of kernel groups. Input is divided into groups along the channel axis. Each + kernel group share the same weights. Defaults to 1. + + stride_depth, stride_height, stride_width: int + Stride along the depth, height, and width directions, respectively. Must all be positive + integers. Defaults to 1. + + dilation_depth, dilation_width, dilation_height: int + Dilation factors across depth, height, and width directions. Must all be positive + integers. Defaults to 1 in each dimension. + + is_deconv: bool + True if this is Convolution Transpose, otherwise False. + + output_shape: None or Tuple of int + Applicable only for Deconvolution layer. + ``None`` if Convolution. + Tuple of length 3 if Convolution Transpose. + + padding_mode: str + Option for the padding type and output blob shape. + Can be ``'custom'``, ``'valid'``, or ``'same'``. + Defaults to ``'valid'``. Case-insensitive. + + padding_front, padding_back, padding_top, padding_bottom, padding_left, padding_right: int + Values of depth (front, back), height (top, bottom), and width (left, right) padding to + be used. Must all be positive integers. All default to 0. + + input_name: str or list of str + The input blob name(s) of this layer. + + output_name: str + The output blob name of this layer. + + Depthwise convolution + Depthwise convolution is a special case of convolution, in which: + + * ``kernel_channels = 1`` (``== input_channels / groups``) + * ``output_channels = channel_multiplier * input_channels`` + * ``groups = input_channels`` + * ``W``: ``[Kernel_depth, Kernel_height, Kernel_width, 1, channel_multiplier * input_channels]`` + + See Also + -------- + add_convolution, add_pooling, add_activation, add_batchnorm + + """ + # Update spec version if necessary + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + if isinstance(input_name, tuple): + input_names = list(input_name) + elif isinstance(input_name, list): + input_names = input_name + else: + input_names = [input_name] + + # 3D convolution doesn't currently support 2-inputs + if len(input_names) > 1: + raise ValueError("3D convolution only supports 1 input.") + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + # Set the layer params + spec_layer_params = spec_layer.convolution3d + spec_layer_params.isDeconvolution = is_deconv + spec_layer_params.nGroups = groups + + spec_layer_params.outputChannels = output_channels + spec_layer_params.inputChannels = input_channels + + spec_layer_params.kernelDepth = depth + spec_layer_params.kernelHeight = height + spec_layer_params.kernelWidth = width + + spec_layer_params.strideDepth = stride_depth + spec_layer_params.strideHeight = stride_height + spec_layer_params.strideWidth = stride_width + + if is_deconv and output_shape: + spec_layer_params.outputShape.append(output_shape[0]) + spec_layer_params.outputShape.append(output_shape[1]) + spec_layer_params.outputShape.append(output_shape[2]) + + supported_padding_modes = {"CUSTOM", "VALID", "SAME"} + if padding_mode.upper() not in supported_padding_modes: + raise ValueError( + "Unsupported padding mode: %s. Must be one of %s" + % (padding_mode, supported_padding_modes) + ) + if padding_mode.upper() == "CUSTOM": + spec_layer_params.customPaddingFront = padding_front + spec_layer_params.customPaddingBack = padding_back + spec_layer_params.customPaddingTop = padding_top + spec_layer_params.customPaddingBottom = padding_bottom + spec_layer_params.customPaddingLeft = padding_left + spec_layer_params.customPaddingRight = padding_right + spec_layer_params.paddingType = _NeuralNetwork_pb2.Convolution3DLayerParams.PaddingType.Value( + padding_mode.upper() + ) + + spec_layer_params.dilationDepth = dilation_depth + spec_layer_params.dilationHeight = dilation_height + spec_layer_params.dilationWidth = dilation_width + + # Weight alignment: MLModel Spec requires following weight arrangement: + # is_deconv == False ==> (output_channels, kernel_channels, depth, height, width), where kernel_channel = input_channels / groups + # is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels + if is_deconv: + W = W.transpose((1, 0, 2, 3, 4)) + + # Assign weights + weights = spec_layer_params.weights + weights.floatValue.extend(W.flatten()) + + # Assign biases + spec_layer_params.hasBias = has_bias + if has_bias: + bias = spec_layer_params.bias + for f in range(output_channels): + bias.floatValue.append(float(b[f])) + + return spec_layer + + def add_pooling( + self, + name, + height, + width, + stride_height, + stride_width, + layer_type, + padding_type, + input_name, + output_name, + exclude_pad_area=True, + is_global=False, + padding_top=0, + padding_bottom=0, + padding_left=0, + padding_right=0, + same_padding_asymmetry_mode="BOTTOM_RIGHT_HEAVY", + ): + """ + Add a pooling layer to the model that performs spatial pooling. + Refer to the ``PoolingLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + + name: str + The name of this layer. + + height: int + Height of pooling region. + + width: int + Width of pooling region. + + stride_height: int + Stride along the height direction. + + stride_width: int + Stride along the width direction. + + layer_type: str + Type of pooling performed. Can either be ``'MAX'``, ``'AVERAGE'``, or ``'L2'``. + + padding_type: str + Option for the type of padding and output blob shape. Can be either + ``'VALID'``, ``'SAME'``, or ``'INCLUDE_LAST_PIXEL'``. + + input_name: str + The input blob name of this layer. + + output_name: str + The output blob name of this layer. + + exclude_pad_area: boolean + Whether to exclude padded area in the ``'AVERAGE'`` pooling operation, + default: true. This flag is only used with average pooling. + + - If True, the value of the padded area will be excluded. + - If False, the padded area will be included. + + is_global: boolean + Whether the pooling operation is global. Defaults to False. + + - If True, the pooling operation is global. The pooling region + is of the same size of the input blob. + Parameters ``height``, ``width``, ``stride_height``, and ``stride_width`` will be ignored. + - If False, the pooling operation is not global. + + padding_top, padding_bottom, padding_left, padding_right: int + Values of height (top, bottom) and width (left, right) padding + to be used if padding type is ``"VALID"`` or ``"INCLUDE_LAST_PIXEL"``. + + same_padding_asymmetry_mode: str. + Type of asymmetric padding to be used when ``padding_type = 'SAME'``. + Can be either ``'BOTTOM_RIGHT_HEAVY'`` or ``'TOP_LEFT_HEAVY'``. + + See Also + -------- + add_pooling3d, add_convolution, add_activation + """ + + # Create spec layer + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.pooling + + # Set the parameters + spec_layer_params.type = _NeuralNetwork_pb2.PoolingLayerParams.PoolingType.Value( + layer_type.upper() + ) + + padding_type = ( + padding_type.upper() + if isinstance(padding_type, str) + else padding_type + ) + same_padding_asymmetry_mode = ( + same_padding_asymmetry_mode.upper() + if isinstance(same_padding_asymmetry_mode, str) + else same_padding_asymmetry_mode + ) + + if padding_type == "VALID": + height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add() + height_border.startEdgeSize = padding_top + height_border.endEdgeSize = padding_bottom + width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add() + width_border.startEdgeSize = padding_left + width_border.endEdgeSize = padding_right + elif padding_type == "SAME": + if not ( + same_padding_asymmetry_mode == "BOTTOM_RIGHT_HEAVY" + or same_padding_asymmetry_mode == "TOP_LEFT_HEAVY" + ): + raise ValueError( + "Invalid value %d of same_padding_asymmetry_mode parameter" + % same_padding_asymmetry_mode + ) + spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value( + same_padding_asymmetry_mode + ) + elif padding_type == "INCLUDE_LAST_PIXEL": + if padding_top != padding_bottom or padding_left != padding_right: + raise ValueError( + "Only symmetric padding is supported with the INCLUDE_LAST_PIXEL padding type" + ) + spec_layer_params.includeLastPixel.paddingAmounts.append(padding_top) + spec_layer_params.includeLastPixel.paddingAmounts.append(padding_left) + else: + raise ValueError("Unknown padding_type %s in pooling" % padding_type) + + spec_layer_params.kernelSize.append(height) + spec_layer_params.kernelSize.append(width) + spec_layer_params.stride.append(stride_height) + spec_layer_params.stride.append(stride_width) + spec_layer_params.avgPoolExcludePadding = exclude_pad_area + spec_layer_params.globalPooling = is_global + return spec_layer + + def add_pooling3d( + self, + name, + input_name, + output_name, + pooling_type, + kernel_depth, + kernel_height, + kernel_width, + stride_depth, + stride_height, + stride_width, + padding_mode="valid", + custom_padding_front=0, + custom_padding_back=0, + custom_padding_top=0, + custom_padding_bottom=0, + custom_padding_left=0, + custom_padding_right=0, + average_pooling_count_excludes_padding=False, + ): + """ + Add a pooling layer to the model that performs spatial pooling across three dimensions. + Refer to the ``Pooling3DLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + pooling_type: str + Type of pooling performed. Can either be ``'MAX'`` OR ``'AVERAGE'``. + kernel_depth: int + Depth of the pooling region. + kernel_height: int + Height of pooling region. + kernel_width: int + Width of pooling region. + stride_depth: int + Stride along the depth direction + stride_height: int + Stride along the height direction. + stride_width: int + Stride along the width direction. + padding_mode: str + Option for the padding type and output blob shape. + Can be ``'VALID'``, ``'SAME'``, or ``'CUSTOM'``. + custom_padding_front: int + Padding before the input in the depth direction. + custom_padding_back: int + Padding after the input in the depth direction. + custom_padding_top: int + Padding before the input in the height direction. + custom_padding_bottom: int + Padding after the input in the height direction. + custom_padding_left: int + Padding before the input in the width direction. + custom_padding_right: int + Padding after the input in the width direction. + average_pooling_count_excludes_padding: boolean + If true, exclude zeros from padding in average pooling. + Can only be true for ``AVERAGE`` padding. + + See Also + -------- + add_pooling, add_global_pooling3d + """ + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.pooling3d + + spec_layer_params.type = _NeuralNetwork_pb2.Pooling3DLayerParams.PoolingType3D.Value( + pooling_type.upper() + ) + + spec_layer_params.kernelDepth = kernel_depth + spec_layer_params.kernelHeight = kernel_height + spec_layer_params.kernelWidth = kernel_width + + spec_layer_params.strideDepth = stride_depth + spec_layer_params.strideHeight = stride_height + spec_layer_params.strideWidth = stride_width + + supported_padding_modes = {"CUSTOM", "VALID", "SAME"} + if padding_mode.upper() not in supported_padding_modes: + raise ValueError( + "Unsupported padding mode: %s. Must be one of %s" + % (padding_mode, supported_padding_modes) + ) + if padding_mode.upper() == "CUSTOM": + spec_layer_params.customPaddingFront = custom_padding_front + spec_layer_params.customPaddingBack = custom_padding_back + spec_layer_params.customPaddingTop = custom_padding_top + spec_layer_params.customPaddingBottom = custom_padding_bottom + spec_layer_params.customPaddingLeft = custom_padding_left + spec_layer_params.customPaddingRight = custom_padding_right + spec_layer_params.paddingType = _NeuralNetwork_pb2.Pooling3DLayerParams.Pooling3DPaddingType.Value( + padding_mode.upper() + ) + + spec_layer_params.countExcludePadding = average_pooling_count_excludes_padding + + return spec_layer + + def add_global_pooling3d(self, name, input_name, output_name, pooling_type): + """ + Add a layer to pool three spatial dimensions down to one value. + This behaves like a special case of Pooling3DLayerParams in which + the Kernel is the size of the input and there is no padding. + + Refer to the ``GlobalPooling3DLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + pooling_type: str + Type of pooling performed. Can either be ``'MAX'`` OR ``'AVERAGE'``. + + See Also + -------- + add_pooling, add_pooling3d + """ + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.globalPooling3d + + spec_layer_params.type = _NeuralNetwork_pb2.GlobalPooling3DLayerParams.GlobalPoolingType3D.Value( + pooling_type.upper() + ) + + return spec_layer + + def add_padding( + self, + name, + left=0, + right=0, + top=0, + bottom=0, + value=0, + input_name="data", + output_name="out", + padding_type="constant", + ): + """ + + + Add a padding layer to the model that performs padding along spatial dimensions. + + Refer to the ``PaddingLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + left: int + Number of elements to be padded on the left side of the input blob. + right: int + Number of elements to be padded on the right side of the input blob. + top: int + Number of elements to be padded on the top of the input blob. + bottom: int + Number of elements to be padded on the bottom of the input blob. + value: float + Value of the elements padded. Used only when ``padding_type = 'constant'``. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + padding_type: str + Type of the padding. Can be one of ``'constant'``, ``'reflection'``, or ``'replication'``. + + See Also + -------- + add_crop, add_convolution, add_pooling, add_constant_pad + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.padding + + # Set the parameters + padding_type = ( + padding_type.lower() + if isinstance(padding_type, str) + else padding_type + ) + if padding_type == "constant": + spec_layer_params.constant.value = value + elif padding_type == "reflection": + spec_layer_params.reflection.MergeFromString(b"") + elif padding_type == "replication": + spec_layer_params.replication.MergeFromString(b"") + else: + raise ValueError("Unknown padding_type %s" % padding_type) + + height_border = spec_layer_params.paddingAmounts.borderAmounts.add() + height_border.startEdgeSize = top + height_border.endEdgeSize = bottom + width_border = spec_layer_params.paddingAmounts.borderAmounts.add() + width_border.startEdgeSize = left + width_border.endEdgeSize = right + return spec_layer + + def add_crop( + self, name, left, right, top, bottom, offset, input_names, output_name + ): + """ + Add a cropping layer to the model. + The cropping layer have two functional modes: + + - When it has 1 input blob, it crops the input blob based + on the 4 parameters ``[left, right, top, bottom]``. + - When it has 2 input blobs, it crops the first input blob based + on the dimension of the second blob with an offset. + + Refer to the ``CropLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + left: int + Number of elements to be cropped on the left side of the input blob. + When the crop layer takes 2 inputs, this parameter is ignored. + right: int + Number of elements to be cropped on the right side of the input blob. + When the crop layer takes 2 inputs, this parameter is ignored. + top: int + Number of elements to be cropped on the top of the input blob. + When the crop layer takes 2 inputs, this parameter is ignored. + bottom: int + Number of elements to be cropped on the bottom of the input blob. + When the crop layer takes 2 inputs, this parameter is ignored. + offset: list of int + Offset along the height and width directions when the crop layer takes 2 inputs. Must be a list of length 2. + When the crop layer takes 1 input, this parameter is ignored. + input_names: list of str + The input blob names of this layer. Must be either a list of 1 string (1 input crop layer), + or a list of 2 strings (2-input crop layer). + output_name: str + The output blob name of this layer. + + See Also + -------- + add_padding, add_convolution, add_pooling + """ + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.crop + + # Set the parameters + offset = [0, 0] if len(input_names) == 1 else offset + spec_layer_params.offset.extend(offset) + height_border = spec_layer_params.cropAmounts.borderAmounts.add() + height_border.startEdgeSize = top + height_border.endEdgeSize = bottom + width_border = spec_layer_params.cropAmounts.borderAmounts.add() + width_border.startEdgeSize = left + width_border.endEdgeSize = right + return spec_layer + + def add_simple_rnn( + self, + name, + W_h, + W_x, + b, + hidden_size, + input_size, + activation, + input_names, + output_names, + output_all=False, + reverse_input=False, + ): + """ + Add a simple recurrent layer to the model. + Refer to the ``SimpleRecurrentLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W_h: numpy.array + Weights of the recurrent layer's hidden state. + Must be of shape ``(hidden_size, hidden_size)``. + W_x: numpy.array + Weights of the recurrent layer's input. + Must be of shape ``(hidden_size, input_size)``. + b: numpy.array or None + Bias of the recurrent layer's output. If ``None``, bias is ignored. + Otherwise it must be of shape ``(hidden_size, )``. + hidden_size: int + Number of hidden units. This is equal to the number of channels of output shape. + input_size: int + Number of the number of channels of input shape. + activation: str + Activation function name. Can be one of the following option: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + See add_activation for more detailed description. + input_names: list of str + The input blob names list of this layer, in the order of ``[x, h_input]``. + output_names: list of str + The output blob names list of this layer, in the order of ``[y, h_output]``. + output_all: boolean + Whether the recurrent layer should output at every time step. + + - If False, the output is the result after the final state update. + - If True, the output is a sequence, containing outputs at all time steps. + + reverse_input: boolean + Whether the recurrent layer should process the input sequence in the reverse order. + + - If False, the input sequence order is not reversed. + - If True, the input sequence order is reversed. + + See Also + -------- + add_activation, add_gru, add_unilstm, add_bidirlstm + """ + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.simpleRecurrent + spec_layer_params.reverseInput = reverse_input + + # set the parameters + spec_layer_params.inputVectorSize = input_size + spec_layer_params.outputVectorSize = hidden_size + if b is not None: + spec_layer_params.hasBiasVector = True + spec_layer_params.sequenceOutput = output_all + + activation_f = spec_layer_params.activation + _set_recurrent_activation(activation_f, activation) + + # Write the weights + spec_layer_params.weightMatrix.floatValue.extend(W_x.flatten()) + spec_layer_params.recursionMatrix.floatValue.extend(W_h.flatten()) + + if b is not None: + spec_layer_params.biasVector.floatValue.extend(b.flatten()) + return spec_layer + + def add_gru( + self, + name, + W_h, + W_x, + b, + hidden_size, + input_size, + input_names, + output_names, + activation="TANH", + inner_activation="SIGMOID_HARD", + output_all=False, + reverse_input=False, + ): + """ + Add a Gated-Recurrent Unit (GRU) layer to the model. + Refer to the ``GRULayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W_h: [numpy.array] + List of recursion weight matrices. The ordering is ``[R_z, R_r, R_o]``, + where ``R_z``, ``R_r`` and ``R_o`` are weight matrices at update gate, + reset gate and output gate. + The shapes of these matrices are ``(hidden_size, hidden_size)``. + W_x: [numpy.array] + List of input weight matrices. The ordering is ``[W_z, W_r, W_o]``, + where ``W_z``, ``W_r``, and ``W_o`` are weight matrices at update gate, + reset gate and output gate. + The shapes of these matrices are ``(hidden_size, input_size)``. + b: [numpy.array] or None + List of biases of the GRU layer. The ordering is ``[b_z, b_r, b_o]``, + where ``b_z``, ``b_r``, and ``b_o`` are biases at update gate, + reset gate and output gate. + If ``None``, biases are ignored. Otherwise the shapes of the biases are ``(hidden_size, )``. + hidden_size: int + Number of hidden units. This is equal to the number of channels of output shape. + input_size: int + Number of the number of channels of input shape. + activation: str + Activation function used at the output gate. Can be one of the following options: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + Defaults to ``'TANH'``. + See add_activation for more detailed description. + inner_activation: str + Inner activation function used at update and reset gates. + Can be one of the following options: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + Defaults to ``'SIGMOID_HARD'``. + See add_activation for more detailed description. + input_names: list of str + The input blob names list of this layer, in the order of ``[x, h_input]``. + output_names: list of str + The output blob names list of this layer, in the order of ``[y, h_output]``. + output_all: boolean + Whether the recurrent layer should output at every time step. + + - If False, the output is the result after the final state update. + - If True, the output is a sequence, containing outputs at all time steps. + + reverse_input: boolean + Whether the recurrent layer should process the input sequence in the reverse order. + + - If False, the input sequence order is not reversed. + - If True, the input sequence order is reversed. + + See Also + -------- + add_activation, add_simple_rnn, add_unilstm, add_bidirlstm + """ + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.gru + + # set the parameters + spec_layer_params.inputVectorSize = input_size + spec_layer_params.outputVectorSize = hidden_size + if b is not None: + spec_layer_params.hasBiasVectors = True + spec_layer_params.sequenceOutput = output_all + spec_layer_params.reverseInput = reverse_input + + activation_f = spec_layer_params.activations.add() + activation_g = spec_layer_params.activations.add() + _set_recurrent_activation(activation_f, inner_activation) + _set_recurrent_activation(activation_g, activation) + + # Write the weights + R_z, R_r, R_o = W_h + W_z, W_r, W_o = W_x + + spec_layer_params.updateGateWeightMatrix.floatValue.extend(W_z.flatten()) + spec_layer_params.resetGateWeightMatrix.floatValue.extend(W_r.flatten()) + spec_layer_params.outputGateWeightMatrix.floatValue.extend(W_o.flatten()) + + spec_layer_params.updateGateRecursionMatrix.floatValue.extend(R_z.flatten()) + spec_layer_params.resetGateRecursionMatrix.floatValue.extend(R_r.flatten()) + spec_layer_params.outputGateRecursionMatrix.floatValue.extend(R_o.flatten()) + + if b is not None: + b_z, b_r, b_o = b + spec_layer_params.updateGateBiasVector.floatValue.extend(b_z.flatten()) + spec_layer_params.resetGateBiasVector.floatValue.extend(b_r.flatten()) + spec_layer_params.outputGateBiasVector.floatValue.extend(b_o.flatten()) + return spec_layer + + def add_unilstm( + self, + name, + W_h, + W_x, + b, + hidden_size, + input_size, + input_names, + output_names, + inner_activation="SIGMOID", + cell_state_update_activation="TANH", + output_activation="TANH", + peep=None, + output_all=False, + forget_bias=False, + coupled_input_forget_gate=False, + cell_clip_threshold=50000.0, + reverse_input=False, + ): + """ + Add a Uni-directional LSTM layer to the model. + Refer to the ``UniDirectionalLSTMLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W_h: [numpy.array] + List of recursion weight matrices. The ordering is [R_i, R_f, R_o, R_z], + where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate. + The shapes of these matrices are (hidden_size, hidden_size). + W_x: [numpy.array] + List of input weight matrices. The ordering is [W_i, W_f, W_o, W_z], + where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate. + The shapes of these matrices are (hidden_size, input_size). + b: [numpy.array] or None + List of biases. The ordering is [b_i, b_f, b_o, b_z], + where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate. + If ``None``, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ). + hidden_size: int + Number of hidden units. This is equal to the number of channels of output shape. + input_size: int + Number of the number of channels of input shape. + input_names: list of str + The input blob names list of this layer, in the order of [x, h_input, c_input]. + output_names: list of str + The output blob names list of this layer, in the order of [y, h_output, c_output]. + inner_activation: str + Inner activation function used at input and forget gate. Can be one of the following option: + ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. + cell_state_update_activation: str + Cell state update activation function used at the cell state update gate. + ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. + output_activation: str + Activation function used at the output gate. Can be one of the following option: + ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. + peep: [numpy.array] or None + List of peephole vectors. The ordering is [p_i, p_f, p_o], + where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate. + The shapes of the peephole vectors are (hidden_size,). + output_all: boolean + Whether the LSTM layer should output at every time step. + + - If False, the output is the result after the final state update. + - If True, the output is a sequence, containing outputs at all time steps. + + forget_bias: boolean + If True, a vector of 1s is added to forget gate bias. + coupled_input_forget_gate: boolean + If True, the input gate and forget gate is coupled. i.e. forget gate is not used. + cell_clip_threshold: float + The limit on the maximum and minimum values on the cell state. + If not provided, it is defaulted to 50.0. + reverse_input: boolean + Whether the LSTM layer should process the input sequence in the reverse order. + + - If False, the input sequence order is not reversed. + - If True, the input sequence order is reversed. + + See Also + -------- + add_activation, add_simple_rnn, add_gru, add_bidirlstm + """ + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.uniDirectionalLSTM + params = spec_layer_params.params + weight_params = spec_layer_params.weightParams + + # set the parameters + spec_layer_params.inputVectorSize = input_size + spec_layer_params.outputVectorSize = hidden_size + params.sequenceOutput = output_all + params.forgetBias = False + if b is not None: + params.hasBiasVectors = True + if peep is not None: + params.hasPeepholeVectors = True + params.coupledInputAndForgetGate = coupled_input_forget_gate + params.cellClipThreshold = cell_clip_threshold + params.forgetBias = forget_bias + + spec_layer_params.reverseInput = reverse_input + + activation_f = spec_layer_params.activations.add() + activation_g = spec_layer_params.activations.add() + activation_h = spec_layer_params.activations.add() + _set_recurrent_activation(activation_f, inner_activation) + _set_recurrent_activation(activation_g, cell_state_update_activation) + _set_recurrent_activation(activation_h, output_activation) + + # Write the weights + R_i, R_f, R_o, R_z = W_h + W_i, W_f, W_o, W_z = W_x + + weight_params.inputGateWeightMatrix.floatValue.extend(W_i.flatten()) + weight_params.forgetGateWeightMatrix.floatValue.extend(W_f.flatten()) + weight_params.outputGateWeightMatrix.floatValue.extend(W_o.flatten()) + weight_params.blockInputWeightMatrix.floatValue.extend(W_z.flatten()) + + weight_params.inputGateRecursionMatrix.floatValue.extend(R_i.flatten()) + weight_params.forgetGateRecursionMatrix.floatValue.extend(R_f.flatten()) + weight_params.outputGateRecursionMatrix.floatValue.extend(R_o.flatten()) + weight_params.blockInputRecursionMatrix.floatValue.extend(R_z.flatten()) + + if b is not None: + b_i, b_f, b_o, b_z = b + weight_params.inputGateBiasVector.floatValue.extend(b_i.flatten()) + weight_params.forgetGateBiasVector.floatValue.extend(b_f.flatten()) + weight_params.outputGateBiasVector.floatValue.extend(b_o.flatten()) + weight_params.blockInputBiasVector.floatValue.extend(b_z.flatten()) + + if peep is not None: + p_i, p_f, p_o = peep + weight_params.inputGatePeepholeVector.floatValue.extend(p_i.flatten()) + weight_params.forgetGatePeepholeVector.floatValue.extend(p_f.flatten()) + weight_params.outputGatePeepholeVector.floatValue.extend(p_o.flatten()) + + return spec_layer + + def add_bidirlstm( + self, + name, + W_h, + W_x, + b, + W_h_back, + W_x_back, + b_back, + hidden_size, + input_size, + input_names, + output_names, + inner_activation="SIGMOID", + cell_state_update_activation="TANH", + output_activation="TANH", + peep=None, + peep_back=None, + output_all=False, + forget_bias=False, + coupled_input_forget_gate=False, + cell_clip_threshold=50000.0, + ): + + """ + Add a Bi-directional LSTM layer to the model. + Refer to the ``BiDirectionalLSTMLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W_h: [numpy.array] + List of recursion weight matrices for the forward layer. + The ordering is ``[R_i, R_f, R_o, R_z]``, + where ``R_i``, ``R_f``, ``R_o``, and ``R_z`` are weight matrices at + input gate, forget gate, output gate and cell gate. + The shapes of these matrices are ``(hidden_size, hidden_size)``. + W_x: [numpy.array] + List of input weight matrices for the forward layer. The ordering + is ``[W_i, W_f, W_o, W_z]``, + where ``W_i``, ``W_f``, ``W_o``, and ``W_z`` are weight matrices at + input gate, forget gate, output gate and cell gate. + The shapes of these matrices are ``(hidden_size, input_size)``. + b: [numpy.array] + List of biases for the forward layer. The ordering is + ``[b_i, b_f, b_o, b_z]``, + where ``b_i``, ``b_f``, ``b_o``, and ``b_z`` are biases at input + gate, forget gate, output gate and cell gate. + If ``None``, biases are ignored. Otherwise the shapes of the biases + are ``(hidden_size, )``. + W_h_back: [numpy.array] + List of recursion weight matrices for the backward layer. The + ordering is ``[R_i, R_f, R_o, R_z]``, + where ``R_i``, ``R_f``, ``R_o``, and ``R_z`` are weight matrices + at input gate, forget gate, output gate and cell gate. + The shapes of these matrices are ``(hidden_size, hidden_size)``. + W_x_back: [numpy.array] + List of input weight matrices for the backward layer. The ordering + is `[W_i, W_f, W_o, W_z]``, + where ``W_i``, ``W_f``, ``W_o``, and ``W_z`` are weight matrices + at input gate, forget gate, output gate and cell gate. + The shapes of these matrices are ``(hidden_size, input_size)``. + b_back: [numpy.array] + List of biases for the backward layer. The ordering is ``[b_i, b_f, b_o, b_z]``, + where ``b_i``, ``b_f``, ``b_o``, and ``b_z`` are biases at input + gate, forget gate, output gate and cell gate. + The shapes of the biases ``(hidden_size)``. + hidden_size: int + Number of hidden units. This is equal to the number of channels of output shape. + input_size: int + Number of the number of channels of input shape. + input_names: list of str + The input blob names of this layer, in the order of + ``[x, h_input, c_input, h_reverse_input, c_reverse_input]``. + output_names: list of str + The output blob names of this layer, in the order of + ``[y, h_output, c_output, h_reverse_output, c_reverse_output]``. + inner_activation: str + Inner activation function used at input and forget gate. Can be one + of the following options: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + Defaults to ``'SIGMOID'``. + cell_state_update_activation: str + Cell state update activation function used at the cell state update gate. + Can be one of the following options: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + Defaults to ``'TANH'``. + output_activation: str + Activation function used at the output gate. Can be one of the following options: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + Defaults to ``'TANH'``. + peep: [numpy.array] or None + List of peephole vectors for the forward layer. The ordering + is ``[p_i, p_f, p_o]``, + where ``p_i``, ``p_f``, and ``p_o`` are peephole vectors at input + gate, forget gate, and output gate. + The shapes of the peephole vectors are ``(hidden_size,)``. Defaults to ``None``. + peep_back: [numpy.array] or None + List of peephole vectors for the backward layer. The ordering + is ``[p_i, p_f, p_o]``, + where ``p_i``, ``p_f``, and ``p_o`` are peephole vectors at input + gate, forget gate, and output gate. + The shapes of the peephole vectors are ``(hidden_size,)``. Defaults to ``None``. + output_all: boolean + Whether the LSTM layer should output at every time step. Defaults to ``False``. + + - If ``False``, the output is the result after the final state update. + - If ``True``, the output is a sequence, containing outputs at all time steps. + + forget_bias: boolean + If ``True``, a vector of 1s is added to forget gate bias. Defaults to ``False``. + coupled_input_forget_gate: boolean + If ``True``, the input gate and forget gate is coupled. That is, the + forget gate is not used. + Defaults to ``False``. + cell_clip_threshold: float + The limit on the maximum and minimum values on the cell state. + Defaults to 50.0. + + See Also + -------- + add_activation, add_simple_rnn, add_unilstm, add_bidirlstm + """ + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.biDirectionalLSTM + params = spec_layer_params.params + weight_params = spec_layer_params.weightParams.add() + weight_params_back = spec_layer_params.weightParams.add() + + # set the parameters + spec_layer_params.inputVectorSize = input_size + spec_layer_params.outputVectorSize = hidden_size + if b is not None: + params.hasBiasVectors = True + params.sequenceOutput = output_all + params.forgetBias = forget_bias + if peep is not None: + params.hasPeepholeVectors = True + params.coupledInputAndForgetGate = coupled_input_forget_gate + params.cellClipThreshold = cell_clip_threshold + + # set activations + activation_f = spec_layer_params.activationsForwardLSTM.add() + activation_g = spec_layer_params.activationsForwardLSTM.add() + activation_h = spec_layer_params.activationsForwardLSTM.add() + _set_recurrent_activation(activation_f, inner_activation) + _set_recurrent_activation(activation_g, cell_state_update_activation) + _set_recurrent_activation(activation_h, output_activation) + + activation_f_back = spec_layer_params.activationsBackwardLSTM.add() + activation_g_back = spec_layer_params.activationsBackwardLSTM.add() + activation_h_back = spec_layer_params.activationsBackwardLSTM.add() + _set_recurrent_activation(activation_f_back, inner_activation) + _set_recurrent_activation(activation_g_back, cell_state_update_activation) + _set_recurrent_activation(activation_h_back, output_activation) + + # Write the forward lstm weights + R_i, R_f, R_o, R_z = W_h + W_i, W_f, W_o, W_z = W_x + + weight_params.inputGateWeightMatrix.floatValue.extend(W_i.flatten()) + weight_params.forgetGateWeightMatrix.floatValue.extend(W_f.flatten()) + weight_params.outputGateWeightMatrix.floatValue.extend(W_o.flatten()) + weight_params.blockInputWeightMatrix.floatValue.extend(W_z.flatten()) + + weight_params.inputGateRecursionMatrix.floatValue.extend(R_i.flatten()) + weight_params.forgetGateRecursionMatrix.floatValue.extend(R_f.flatten()) + weight_params.outputGateRecursionMatrix.floatValue.extend(R_o.flatten()) + weight_params.blockInputRecursionMatrix.floatValue.extend(R_z.flatten()) + + if b is not None: + b_i, b_f, b_o, b_z = b + weight_params.inputGateBiasVector.floatValue.extend(b_i.flatten()) + weight_params.forgetGateBiasVector.floatValue.extend(b_f.flatten()) + weight_params.outputGateBiasVector.floatValue.extend(b_o.flatten()) + weight_params.blockInputBiasVector.floatValue.extend(b_z.flatten()) + + if peep is not None: + p_i, p_f, p_o = peep + weight_params.inputGatePeepholeVector.floatValue.extend(p_i.flatten()) + weight_params.forgetGatePeepholeVector.floatValue.extend(p_f.flatten()) + weight_params.outputGatePeepholeVector.floatValue.extend(p_o.flatten()) + + # Write the backward lstm weights + R_i, R_f, R_o, R_z = W_h_back + W_i, W_f, W_o, W_z = W_x_back + + weight_params_back.inputGateWeightMatrix.floatValue.extend(W_i.flatten()) + weight_params_back.forgetGateWeightMatrix.floatValue.extend(W_f.flatten()) + weight_params_back.outputGateWeightMatrix.floatValue.extend(W_o.flatten()) + weight_params_back.blockInputWeightMatrix.floatValue.extend(W_z.flatten()) + + weight_params_back.inputGateRecursionMatrix.floatValue.extend(R_i.flatten()) + weight_params_back.forgetGateRecursionMatrix.floatValue.extend(R_f.flatten()) + weight_params_back.outputGateRecursionMatrix.floatValue.extend(R_o.flatten()) + weight_params_back.blockInputRecursionMatrix.floatValue.extend(R_z.flatten()) + + if b_back is not None: + b_i, b_f, b_o, b_z = b_back + weight_params_back.inputGateBiasVector.floatValue.extend(b_i.flatten()) + weight_params_back.forgetGateBiasVector.floatValue.extend(b_f.flatten()) + weight_params_back.outputGateBiasVector.floatValue.extend(b_o.flatten()) + weight_params_back.blockInputBiasVector.floatValue.extend(b_z.flatten()) + + if peep_back is not None: + p_i, p_f, p_o = peep_back + weight_params_back.inputGatePeepholeVector.floatValue.extend(p_i.flatten()) + weight_params_back.forgetGatePeepholeVector.floatValue.extend(p_f.flatten()) + weight_params_back.outputGatePeepholeVector.floatValue.extend(p_o.flatten()) + return spec_layer + + def add_flatten(self, name, mode, input_name, output_name): + """ + Add a flatten layer. Only flattens the channel, height and width axis. + Leaves the sequence axis as is. + Refer to the ``FlattenLayerParams`` message in the + specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + mode: int + + - If mode == 0, the flatten layer is in CHANNEL_FIRST mode. + - If mode == 1, the flatten layer is in CHANNEL_LAST mode. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_permute, add_reshape + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.flatten + + # Set the parameters + if mode == 0: + spec_layer_params.mode = _NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value( + "CHANNEL_FIRST" + ) + elif mode == 1: + spec_layer_params.mode = _NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value( + "CHANNEL_LAST" + ) + else: + raise NotImplementedError("Unknown flatten mode %d " % mode) + + return spec_layer + + def add_slice( + self, name, input_name, output_name, axis, start_index=0, end_index=-1, stride=1 + ): + """ + Add a slice layer. Equivalent to to numpy slice [start_index:end_index:stride], + start_index is included, while end_index is exclusive. + Refer to the ``SliceLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: str + axis along which input is sliced. + allowed values: 'channel', 'height', 'width' + start_index: int + must be non-negative. + end_index: int + negative indexing is supported. + stride: int + must be positive. + + See Also + -------- + add_permute, add_reshape + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.slice + + # Set the parameters + if start_index < 0: + raise ValueError( + "Invalid start_index value %d. Must be non-negative." % start_index + ) + if stride < 1: + raise ValueError("Invalid stride value %d. Must be positive." % stride) + + spec_layer_params.startIndex = start_index + spec_layer_params.endIndex = end_index + spec_layer_params.stride = stride + + axis = axis.lower() if isinstance(axis, str) else axis + if axis == "channel": + spec_layer_params.axis = _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value( + "CHANNEL_AXIS" + ) + elif axis == "height": + spec_layer_params.axis = _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value( + "HEIGHT_AXIS" + ) + elif axis == "width": + spec_layer_params.axis = _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value( + "WIDTH_AXIS" + ) + else: + raise NotImplementedError("Unsupported Slice axis %s " % axis) + return spec_layer + + def add_slice_by_size(self, name, input_names, output_name, axis, size): + """ + Add a slice layer. Equivalent to to numpy slice [start_index: start_index+size], + Input is list of str which is [input_tensor, begin_id]. + + Assume input_tensor has shape (2, 3, 4), and axis=1, size=2. + This would produce input_tensor[:, begin_id:begin_id+2, :] + + Refer to the ``SliceBySizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int + axis along which input is sliced. + size: int + The size of which input will be taken + + See Also + -------- + add_slice, add_slice_static, add_slice_dynamic + """ + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.sliceBySize + + if size < 1: + raise ValueError("Invalid size value %d. Must be positive." % size) + + spec_layer_params.axis = axis + spec_layer_params.size = size + + return spec_layer + + def add_reorganize_data( + self, name, input_name, output_name, mode="SPACE_TO_DEPTH", block_size=2 + ): + """ + Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE". + Refer to the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + mode: str + + - If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension. + Input is spatially divided into non-overlapping blocks of size block_size X block_size + and data from each block is moved to the channel dimension. + Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size]. + + - If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension. + Reverse of the operation 'SPACE_TO_DEPTH'. + Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size]. + + - If mode == 'PIXEL_SHUFFLE': data is moved from the channel to the spatial dimension. + Reverse of the operation 'SPACE_TO_DEPTH'. + Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size]. + + block_size: int + Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size) + must divide C when mode is 'DEPTH_TO_SPACE' or 'PIXEL_SHUFFLE'. + + See Also + -------- + add_flatten, add_reshape + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reorganizeData + + # Set the parameters + if block_size < 2: + raise ValueError( + "Invalid block_size value %d. Must be greater than 1." % block_size + ) + spec_layer_params.blockSize = block_size + + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "SPACE_TO_DEPTH": + spec_layer_params.mode = _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value( + "SPACE_TO_DEPTH" + ) + elif mode == "DEPTH_TO_SPACE": + spec_layer_params.mode = _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value( + "DEPTH_TO_SPACE" + ) + elif mode == "PIXEL_SHUFFLE": + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + spec_layer_params.mode = _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value( + "PIXEL_SHUFFLE" + ) + else: + raise NotImplementedError("Unknown reorganization mode %s." % mode) + return spec_layer + + def add_batchnorm( + self, + name, + channels, + gamma, + beta, + mean=None, + variance=None, + input_name="data", + output_name="out", + compute_mean_var=False, + instance_normalization=False, + epsilon=1e-5, + ): + """ + Add a batch normalization layer. Batch normalization operation is + defined as: + + ``y = gamma * (x - mean) / sqrt(variance + epsilon) + beta`` + + Refer to the ``BatchnormLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + channels: int + Number of channels of the input blob. + gamma: numpy.array + Values of gamma. Must be numpy array of shape ``(channels, )``. + beta: numpy.array + Values of beta. Must be numpy array of shape ``(channels, )``. + mean: numpy.array + Means of the input blob on each channel. Must be numpy array of shape ``(channels, )``. + variance: + Variances of the input blob on each channel. Must be numpy array of shape ``(channels, )``. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + compute_mean_var: bool + Set to ``True`` if mean and variance is to be computed from the input data. + instance_normalization: bool + Set compute_mean_var and this to ``True`` to perform + instance normalization. That is, mean and variance are computed + from the single input instance. + epsilon: float + Value of epsilon. Defaults to ``1e-5`` if not specified. + + See Also + -------- + add_convolution, add_pooling, add_inner_product + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.batchnorm + + # Set the parameters + spec_layer_params.channels = channels + spec_layer_params.gamma.floatValue.extend(gamma.flatten()) + spec_layer_params.beta.floatValue.extend(beta.flatten()) + spec_layer_params.epsilon = epsilon + spec_layer_params.computeMeanVar = compute_mean_var + spec_layer_params.instanceNormalization = instance_normalization + + if compute_mean_var: + if not instance_normalization: + raise NotImplementedError( + "Batch-instance norm is currently not supported" + ) + + if not compute_mean_var: + spec_layer_params.mean.floatValue.extend(mean.flatten()) + spec_layer_params.variance.floatValue.extend(variance.flatten()) + + return spec_layer + + def add_permute(self, name, dim, input_name, output_name): + """ + Add a permute layer. Assumes that the input has dimensions in the order [Seq, C, H, W] + Refer to the ``PermuteLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + dim: tuple + The order in which to permute the input dimensions = [seq,C,H,W]. + Must have length 4 and a permutation of ``[0, 1, 2, 3]``. + + examples: + + Lets say input has shape: [seq, C, H, W]. + + If ``dim`` is set to ``[0, 3, 1, 2]``, + then the output has shape ``[W,C,H]`` + and has the same sequence length that of the input. + + If ``dim`` is set to ``[3, 1, 2, 0]``, + and the input is a sequence of data + with length ``Seq`` and shape ``[C, 1, 1]``, + then the output is a unit sequence of data with shape ``[C, 1, Seq]``. + + If ``dim`` is set to ``[0, 3, 2, 1]``, + the output is a reverse of the input: ``[C, H, W] -> [W, H, C]``. + + If ``dim`` is not set, or is set to ``[0, 1, 2, 3]``, + the output is the same as the input. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_flatten, add_reshape + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.permute + spec_layer_params.axis.extend(list(dim)) + + if len(dim) != 4: + raise ValueError("Length of the 'dim' parameter must be equal to 4") + return spec_layer + + def add_reshape(self, name, input_name, output_name, target_shape, mode): + """ + Add a reshape layer. + Refer to the ``ReshapeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + target_shape: tuple + Shape of the output blob. The product of target_shape must be equal + to the shape of the input blob. + Can be either length 3 (C,H,W) or length 4 (Seq,C,H,W). + mode: int + + - If mode == 0, the reshape layer is in CHANNEL_FIRST mode. + - If mode == 1, the reshape layer is in CHANNEL_LAST mode. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_flatten, add_permute + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reshape + spec_layer_params.targetShape.extend(target_shape) + if mode == 0: + spec_layer_params.mode = _NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value( + "CHANNEL_FIRST" + ) + else: + spec_layer_params.mode = _NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value( + "CHANNEL_LAST" + ) + + if len(target_shape) != 4 and len(target_shape) != 3: + raise ValueError( + "Length of the 'target-shape' parameter must be equal to 3 or 4" + ) + self.rank_dict[output_name] = len(target_shape) + return spec_layer + + def add_reduce(self, name, input_name, output_name, axis, mode, epsilon=1e-6): + """ + Add a reduce layer. Applies the function specified by the parameter mode, + along dimension(s) specified by the parameter axis. + Refer to the ``ReduceLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + axis: str + dimensions along which the reduction operation is applied. + Allowed values: 'CHW', 'HW', 'C', 'H', 'W' + + mode: str + Reduction operation to be applied. + Allowed values: + 'sum', 'avg', 'prod', 'logsum', 'sumsquare', 'L1', 'L2', 'max', 'min', 'argmax'. + 'argmax' is only supported with axis values 'C', 'H' and 'W'. + + epsilon: float + number that is added to the input when 'logsum' function is applied. + + See Also + -------- + add_activation + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduce + spec_layer_params.epsilon = epsilon + + mode = mode.lower() if isinstance(mode, str) else mode + if mode == "sum": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "SUM" + ) + elif mode == "avg": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "AVG" + ) + elif mode == "prod": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "PROD" + ) + elif mode == "logsum": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "LOGSUM" + ) + elif mode == "sumsquare": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "SUMSQUARE" + ) + elif mode == "l1": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "L1" + ) + elif mode == "l2": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "L2" + ) + elif mode == "max": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "MAX" + ) + elif mode == "min": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "MIN" + ) + elif mode == "argmax": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "ARGMAX" + ) + else: + raise NotImplementedError("Unknown reduction operation %s." % mode) + + axis = axis.upper() if isinstance(axis, str) else axis + if axis == "CHW": + spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value( + "CHW" + ) + elif axis == "HW": + spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value( + "HW" + ) + elif axis == "C": + spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value( + "C" + ) + elif axis == "H": + spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value( + "H" + ) + elif axis == "W": + spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value( + "W" + ) + else: + raise NotImplementedError("Unknown reduction axis %s." % axis) + return spec_layer + + def add_lrn(self, name, input_name, output_name, alpha, beta, local_size, k=1.0): + """ + Add a LRN (local response normalization) layer. Supports "across" channels normalization. + Refer to the ``LRNLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + alpha: float + multiplicative constant in the denominator. + beta: float + exponent of the normalizing term in the denominator. + k: float + bias term in the denominator. Must be positive. + local_size: int + size of the neighborhood along the channel axis. + + See Also + -------- + add_l2_normalize, add_mvn + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.lrn + spec_layer_params.alpha = alpha + spec_layer_params.beta = beta + spec_layer_params.localSize = local_size + spec_layer_params.k = k + return spec_layer + + def add_mvn( + self, + name, + input_name, + output_name, + across_channels=True, + normalize_variance=True, + epsilon=1e-5, + ): + """ + Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input. + Refer to the ``MeanVarianceNormalizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + across_channels: boolean + If False, each channel plane is normalized separately + If True, mean/variance is computed across all C, H and W dimensions + + normalize_variance: boolean + If False, only mean subtraction is performed. + + epsilon: float + small bias to avoid division by zero. + + + See Also + -------- + add_l2_normalize, add_lrn + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + + spec_layer_params = spec_layer.mvn + spec_layer_params.acrossChannels = across_channels + spec_layer_params.normalizeVariance = normalize_variance + spec_layer_params.epsilon = epsilon + return spec_layer + + def add_l2_normalize(self, name, input_name, output_name, epsilon=1e-5): + """ + Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the + the square root of the sum of squares of all elements of the input along C, H and W dimensions. + Refer to the ``L2NormalizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + epsilon: float + small bias to avoid division by zero. + + + See Also + -------- + add_mvn, add_lrn + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.l2normalize + spec_layer_params.epsilon = epsilon + return spec_layer + + def add_unary( + self, + name, + input_name, + output_name, + mode, + alpha=1.0, + shift=0, + scale=1.0, + epsilon=None, + ): + """ + Add a Unary layer. Applies the specified function (mode) to all the elements of the input. + Prior to the application of the function the input can be scaled and shifted by using the 'scale', + 'shift' parameters. + Refer to the ``UnaryFunctionLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + mode: str + Unary function. + Allowed values: 'sqrt', 'rsqrt', 'inverse', 'power', 'exp', 'log', 'abs', threshold'. + + alpha: float + constant used in with modes 'power' and 'threshold'. + + shift, scale: float + input is modified by scale and shift prior to the application of the unary function. + + epsilon: float + small bias to prevent division by zero. + + See Also + -------- + add_activation + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.unary + if epsilon is None: + # Use the default value of epsilon to be 1e-4, instead of 1e-6, if mode = "rsqrt" or "inverse" + if mode == "inverse" or mode == "rsqrt": + epsilon = 1e-4 + elif mode == "log": + epsilon = 1e-45 + else: + epsilon = 1e-6 + spec_layer_params.epsilon = epsilon + spec_layer_params.alpha = alpha + spec_layer_params.shift = shift + spec_layer_params.scale = scale + + mode = mode.lower() if isinstance(mode, str) else mode + if mode == "sqrt": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "SQRT" + ) + elif mode == "rsqrt": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "RSQRT" + ) + elif mode == "inverse": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "INVERSE" + ) + elif mode == "power": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "POWER" + ) + elif mode == "exp": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "EXP" + ) + elif mode == "log": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "LOG" + ) + elif mode == "abs": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "ABS" + ) + elif mode == "threshold": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "THRESHOLD" + ) + else: + raise NotImplementedError("Unknown unary function %s " % mode) + return spec_layer + + def add_split(self, name, input_name, output_names): + """ + Add a split layer that uniformly splits the input along the channel dimension + to produce multiple outputs. + Refer to the ``SplitLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_names: list of str + List of output blob names of this layer. + + See Also + -------- + add_elementwise + """ + spec_layer = self._add_generic_layer(name, [input_name], output_names) + spec_layer_params = spec_layer.split + spec_layer_params.nOutputs = len(output_names) + return spec_layer + + def add_load_constant(self, name, output_name, constant_value, shape): + """ + Add a load constant layer. + Refer to the ``LoadConstantLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + output_name: str + The output blob name of this layer. + + constant_value: numpy.array + value of the constant as a numpy array. + + shape: list of int or tuple of int + List of ints representing the shape of the constant. Must be of length 3: [C,H,W] + + + See Also + -------- + add_elementwise + """ + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.loadConstant + + data = spec_layer_params.data + data.floatValue.extend(constant_value.flatten()) + + spec_layer_params.shape.extend(shape) + + self.rank_dict[output_name] = 5 + if len(data.floatValue) != _np.prod(shape): + raise ValueError( + "Dimensions of 'shape' do not match the size of the provided constant" + ) + if not self._disable_rank5_shape_mapping: + if len(shape) != 3: + raise ValueError("'shape' must be of length 3") + return spec_layer + + def add_custom(self, name, input_names, output_names, custom_proto_spec=None): + """ + Add a custom layer. + Refer to the ``CustomLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_names: list of str + The input blob names to this layer. + + output_names: list of str + The output blob names from this layer. + + custom_proto_spec: CustomLayerParams + A protobuf CustomLayerParams message. This can also be left blank and filled in later. + """ + # custom layers require a newer specification version + from coremltools import _MINIMUM_CUSTOM_LAYER_SPEC_VERSION + + if self.spec: + self.spec.specificationVersion = max( + self.spec.specificationVersion, _MINIMUM_CUSTOM_LAYER_SPEC_VERSION + ) + + spec_layer = self._add_generic_layer(name, input_names, output_names) + + spec_layer.custom.MergeFromString(b"") + if custom_proto_spec: + spec_layer.custom.CopyFrom(custom_proto_spec) + return spec_layer + + def add_resize_bilinear( + self, + name, + input_name, + output_name, + target_height=1, + target_width=1, + mode="ALIGN_ENDPOINTS_MODE", + ): + """ + Add a resize bilinear layer to the model. A layer that resize the input to a given spatial size using bilinear interpolation. + Refer to the ``ResizeBilinearLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + target_height: int + Output height dimension. + target_width: int + Output width dimension. + mode: str + Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'. + This parameter determines the sampling grid used for bilinear interpolation. + + See Also + -------- + add_upsample + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.resizeBilinear + spec_layer_params.targetSize.append(target_height) + spec_layer_params.targetSize.append(target_width) + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "ALIGN_ENDPOINTS_MODE": + + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "ALIGN_ENDPOINTS_MODE" + ) + elif mode == "STRICT_ALIGN_ENDPOINTS_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "STRICT_ALIGN_ENDPOINTS_MODE" + ) + elif mode == "UPSAMPLE_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "UPSAMPLE_MODE" + ) + elif mode == "ROI_ALIGN_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "ROI_ALIGN_MODE" + ) + else: + raise ValueError("Unsupported resize bilinear mode %s" % mode) + return spec_layer + + def add_crop_resize( + self, + name, + input_names, + output_name, + target_height=1, + target_width=1, + mode="STRICT_ALIGN_ENDPOINTS_MODE", + normalized_roi=False, + box_indices_mode="CORNERS_HEIGHT_FIRST", + spatial_scale=1.0, + ): + """ + Add a crop resize layer to the model. A layer that extracts cropped spatial patches or RoIs (regions of interest) + from the input and resizes them to a pre-specified size using bilinear interpolation. + Note that RoI Align layer can be implemented with this layer followed by a pooling layer. + Refer to the ``CropResizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + + name: str + The name of this layer. + + input_names: list of str + * Must be a list of two names: image feature map and crop indices/RoI input. + * First input corresponds to a blob with shape ``[1, Batch, C, H_in, W_in]``. + This represents a batch of input image feature data with ``C`` channels. + * The second input shape must be ``[N, 1, 4, 1, 1]`` or ``[N, 1, 5, 1, 1]``. + This represents the bounding box coordinates for ``N`` patches/RoIs. + * ``N``: number of patches/RoIs to be extracted. + * If RoI shape = ``[N, 1, 4, 1, 1]``, the channel axis corresponds + to the four coordinates specifying the bounding box. + All the N~ RoIs are extracted from all the batches of the input. + * If RoI shape = ``[N, 1, 5, 1, 1]``, the first element of the + channel axis specifies the input batch id from which to extract the RoI and + must be in the interval ``[0, Batch - 1]``. That is, ``n`` -th RoI is + extracted from the ``RoI[n,0,0,0]`` -th input batch id. + The last four elements of the channel axis specify the + bounding box coordinates. + + output_name: str + The output blob name of this layer. + + target_height: int + Output height dimension. + + target_width: int + Output width dimension. + + mode: str + * The following values are supported: + ``'STRICT_ALIGN_ENDPOINTS_MODE'``, ``'ALIGN_ENDPOINTS_MODE'``, + ``'UPSAMPLE_MODE'``, ``'ROI_ALIGN_MODE'``. + * This parameter determines the sampling grid used for bilinear interpolation. + + normalized_roi: bool + * If true the bounding box coordinates must be in the interval ``[0, 1]``. + They are scaled by ``(input_height - 1)``, ``(input_width - 1)``; + that is, based on the input spatial dimensions. + * If false the bounding box coordinates must be in the interval + ``[0, input_height - 1]`` and ``[0, input_width - 1]``, + respectively for height and width dimensions. + + box_indices_mode: str + * The following values are supported: + ``'CORNERS_HEIGHT_FIRST'``, ``'CORNERS_WIDTH_FIRST'``, + ``'CENTER_SIZE_HEIGHT_FIRST'``, ``'CENTER_SIZE_WIDTH_FIRST'``. + * Representation used to interpret the bounding box coordinates (RoI) input. + * ``'CORNERS_HEIGHT_FIRST'``: ``[h_start, w_start, h_end, w_end]`` + * ``'CORNERS_WIDTH_FIRST'``: ``[w_start, h_start, w_end, h_end]`` + * ``'CENTER_SIZE_HEIGHT_FIRST'``: ``[h_center, w_center, box_height, box_width]`` + * ``'CENTER_SIZE_WIDTH_FIRST'``: ``[w_center, h_center, box_width, box_height]`` + + spatial_scale: float + Additional spatial scale that multiplies the bounding box coordinates. + Generally used while implementing the RoI Align layer, + which uses unnormalized RoI coordinates along with a spatial scale less than or equal to 1. + + See Also + -------- + add_resize_bilinear, add_crop + """ + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.cropResize + spec_layer_params.targetSize.append(target_height) + spec_layer_params.targetSize.append(target_width) + spec_layer_params.normalizedCoordinates = normalized_roi + spec_layer_params.spatialScale = spatial_scale + + mode = mode.upper() if isinstance(mode, str) else mode + box_indices_mode = ( + box_indices_mode.upper() + if isinstance(box_indices_mode, str) + else box_indices_mode + ) + + if mode == "ALIGN_ENDPOINTS_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "ALIGN_ENDPOINTS_MODE" + ) + elif mode == "STRICT_ALIGN_ENDPOINTS_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "STRICT_ALIGN_ENDPOINTS_MODE" + ) + elif mode == "UPSAMPLE_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "UPSAMPLE_MODE" + ) + elif mode == "ROI_ALIGN_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "ROI_ALIGN_MODE" + ) + else: + raise ValueError("Unsupported crop resize mode %s" % mode) + + if box_indices_mode == "CORNERS_HEIGHT_FIRST": + spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value( + "CORNERS_HEIGHT_FIRST" + ) + elif box_indices_mode == "CORNERS_WIDTH_FIRST": + spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value( + "CORNERS_WIDTH_FIRST" + ) + elif box_indices_mode == "CENTER_SIZE_HEIGHT_FIRST": + spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value( + "CENTER_SIZE_HEIGHT_FIRST" + ) + elif box_indices_mode == "CENTER_SIZE_WIDTH_FIRST": + spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value( + "CENTER_SIZE_WIDTH_FIRST" + ) + else: + raise ValueError( + "Unsupported crop resize box indices mode %s" % box_indices_mode + ) + return spec_layer + + def set_pre_processing_parameters( + self, + image_input_names=None, + is_bgr=False, + red_bias=0.0, + green_bias=0.0, + blue_bias=0.0, + gray_bias=0.0, + image_scale=1.0, + image_format="NCHW", + ): + """ + Add a pre-processing parameters layer to the neural network object. + + Parameters + ---------- + image_input_names: list of str + Name of input blobs that are images + + is_bgr: boolean or dict() + Channel order for input blobs that are images. BGR if True else RGB. + To specify a different value for each image input, + provide a dictionary with input names as keys. + + red_bias: float or dict() + Image re-centering parameter (red channel) + + blue_bias: float or dict() + Image re-centering parameter (blue channel) + + green_bias: float or dict() + Image re-centering parameter (green channel) + + gray_bias: float or dict() + Image re-centering parameter (for grayscale images) + + image_scale: float or dict() + Value by which to scale the images. + + image_format: str + Image format, either 'NCHW' / 'NHWC' + + See Also + -------- + set_input, set_output, set_class_labels + """ + if not image_input_names: + return # nothing to do here + + image_format = ( + image_format.upper() + if isinstance(image_format, str) + else image_format + ) + if image_format != "NCHW" and image_format != "NHWC": + raise ValueError( + "Input image format must be either 'NCHW' or 'NHWC'. Provided {}".format( + image_format + ) + ) + + if not isinstance(is_bgr, dict): + is_bgr = dict.fromkeys(image_input_names, is_bgr) + if not isinstance(red_bias, dict): + red_bias = dict.fromkeys(image_input_names, red_bias) + if not isinstance(blue_bias, dict): + blue_bias = dict.fromkeys(image_input_names, blue_bias) + if not isinstance(green_bias, dict): + green_bias = dict.fromkeys(image_input_names, green_bias) + if not isinstance(gray_bias, dict): + gray_bias = dict.fromkeys(image_input_names, gray_bias) + if not isinstance(image_scale, dict): + image_scale = dict.fromkeys(image_input_names, image_scale) + + # Raise error if any key in image preprocessing parameters + # are not in image_input_names. + def check_valid_preprocessing_keys(input, target, input_name): + for key in input: + if not key in target: + raise ValueError("Invalid key {} in {}.".format(key, input_name)) + + target = image_input_names + check_valid_preprocessing_keys(is_bgr, target, "is_bgr") + check_valid_preprocessing_keys(red_bias, target, "red_bias") + check_valid_preprocessing_keys(blue_bias, target, "blue_bias") + check_valid_preprocessing_keys(green_bias, target, "green_bias") + check_valid_preprocessing_keys(gray_bias, target, "gray_bias") + check_valid_preprocessing_keys(image_scale, target, "image_scale") + + spec = self.spec + + # Add image inputs + for input_ in spec.description.input: + if input_.name in image_input_names: + if input_.type.WhichOneof("Type") == "multiArrayType": + array_shape = tuple(input_.type.multiArrayType.shape) + + if len(array_shape) == 4: + input_indices = ( + [0, 1, 2, 3] if image_format == "NCHW" else [0, 3, 1, 2] + ) + elif len(array_shape) == 3: + # Adding dummy index for 'batch' for compatibility + input_indices = ( + [0, 0, 1, 2] if image_format == "NCHW" else [0, 2, 0, 1] + ) + else: + raise ValueError( + "Invalid input shape. Input of rank {}, but expecting input of either rank 3 or rank 4".format( + len(array_shape) + ) + ) + + # Extract image shape depending on input format + _, channels, height, width = [array_shape[e] for e in input_indices] + + if image_format == "NHWC": + # If input format is 'NHWC' for TF model, it will be + # 'NCHW' for CoreML model. Therefore, add transpose to + # NHWC after the input and replace all use of input + layers = self.nn_spec.layers + complement_transpose = True + transpose_names = set() + transpose_outputs = [] + for layer_ in layers: + if ( + layer_.HasField("transpose") + and layer_.input[0] == input_.name + ): + transpose_order = list(layer_.transpose.axes) + if transpose_order == [ + 0, + 3, + 1, + 2, + ] or transpose_order == [2, 0, 1]: + transpose_names.add(layer_.name) + transpose_outputs += list(layer_.output) + else: + complement_transpose = False + break + else: + for i in layer_.input: + if i == input_.name: + complement_transpose = False + break + if complement_transpose: + for layer_ in layers: + for i in range(len(layer_.input)): + if layer_.input[i] in transpose_names: + layer_.input[i] = input_.name + for layer_ in layers: + if layer_.name == input_.name: + del layer_.output[:] + layer_.output.extend(transpose_outputs) + break + while len(transpose_names) > 0: + for idx, layer_ in enumerate(layers): + if layer_.name in transpose_names: + del layers[idx] + transpose_names.remove(layer_.name) + else: + axes = [1, 2, 0] + if len(array_shape) == 4: + axes = [0, 2, 3, 1] + input_transpose = input_.name + "_to_nhwc" + transpose_layer = self.add_transpose( + name=input_transpose, + axes=axes, + input_name=input_.name, + output_name=input_transpose, + ) + layers.insert(0, layers.pop()) + for layer_ in layers: + for i in range(len(layer_.input)): + if layer_.name == input_transpose: + continue + if layer_.input[i] == input_.name: + layer_.input[i] = input_transpose + + # TODO: If input is not rank 3 or 4, then accordingly handle + # e.g. for rank-2 input, squeeze additional dimension in case of Gray scale image + if channels == 1: + input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "GRAYSCALE" + ) + elif channels == 3: + if input_.name in is_bgr: + if is_bgr[input_.name]: + input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "BGR" + ) + else: + input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "RGB" + ) + else: + input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "RGB" + ) + else: + raise ValueError( + "Channel Value %d not supported for image inputs" % channels + ) + input_.type.imageType.width = width + input_.type.imageType.height = height + + preprocessing = self.nn_spec.preprocessing.add() + preprocessing.featureName = input_.name + scaler = preprocessing.scaler + if input_.name in image_scale: + scaler.channelScale = image_scale[input_.name] + else: + scaler.channelScale = 1.0 + if input_.name in red_bias: + scaler.redBias = red_bias[input_.name] + if input_.name in blue_bias: + scaler.blueBias = blue_bias[input_.name] + if input_.name in green_bias: + scaler.greenBias = green_bias[input_.name] + if input_.name in gray_bias: + scaler.grayBias = gray_bias[input_.name] + + def add_transpose(self, name, axes, input_name, output_name): + """ + Add a N-D transpose layer with axes as a parameter. + Refer to the ``TransposeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + axes: list of int or tuple of int + The list containing a permutation of "[0,1,2,...,N-1]" where N is the rank of input/output tensor. + + input_name: str + The input blob name of this layer. + + output_name: str + The output blob name of this layer. + + See Also + -------- + add_permute, add_reshape + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + + rank = len(axes) + axes = [rank + axis if axis < 0 else axis for axis in axes] + spec_layer.transpose.axes.extend(axes) + + return spec_layer + + def add_softmax_nd(self, name, input_name, output_name, axis): + """ + Add a softmax_nd layer to the model that performs softmax operation along + the given axis. + Refer to the ``SoftmaxNDLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int + Axis to perform the softmax operation on. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.softmaxND + spec_layer_params.axis = axis + return spec_layer + + def add_concat_nd(self, name, input_names, output_name, axis, interleave=False): + """ + Add a concat_nd layer to the model that performs concatenation along the + given axis. + Refer to the ``ConcatNDLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int + Axis to perform the concat operation on. + interleave : bool + (Only available in Core ML Specification >= 5 (iOS >= 14, macOS >= 11.0) + If true, concatenate by interleaving the inputs + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.concatND + spec_layer_params.axis = axis + if interleave: + spec_layer_params.interleave = True + if self.spec: + self.spec.specificationVersion = max(self.spec.specificationVersion, _SPECIFICATION_VERSION_IOS_14) + return spec_layer + + def add_erf(self, name, input_name, output_name): + """ + Add an erf function (gaussian error function) layer to the model. + Refer to the ``ErfLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.erf.MergeFromString(b"") + return spec_layer + + def add_gelu(self, name, input_name, output_name, mode="EXACT"): + """ + Add a GELU (gaussian error linear unit) activation layer, which is: + ``0.5 * x * (1 + erf(x / sqrt(2)))``. + Refer to the ``GeluLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + mode: str, optional + Gelu mode in [EXACT | TANH_APPROXIMATION | SIGMOID_APPROXIMATION], default EXACT. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.gelu + + if mode == "EXACT": + spec_layer_params.mode = _NeuralNetwork_pb2.GeluLayerParams.GeluMode.Value( + "EXACT" + ) + elif mode == "TANH_APPROXIMATION": + spec_layer_params.mode = _NeuralNetwork_pb2.GeluLayerParams.GeluMode.Value( + "TANH_APPROXIMATION" + ) + elif mode == "SIGMOID_APPROXIMATION": + spec_layer_params.mode = _NeuralNetwork_pb2.GeluLayerParams.GeluMode.Value( + "SIGMOID_APPROXIMATION" + ) + else: + raise ValueError("Unsupported Gelu mode %s" % mode) + return spec_layer + + def add_sin(self, name, input_name, output_name): + """ + Add a sin layer to the model that computes element-wise sine for the + input tensor. + Refer to the ``SinLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_sinh, add_asin, add_asinh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.sin.MergeFromString(b"") + return spec_layer + + def add_cos(self, name, input_name, output_name): + """ + Add a cos layer to the model that computes element-wise cosine for the + input tensor. + Refer to the ``CosLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_cosh, add_acos, add_acosh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.cos.MergeFromString(b"") + return spec_layer + + def add_tan(self, name, input_name, output_name): + """ + Add a tan layer to the model that computes element-wise tangent for the + input tensor. + Refer to the ``TanLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_tanh, add_atan, add_atanh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.tan.MergeFromString(b"") + return spec_layer + + def add_asin(self, name, input_name, output_name): + """ + Add an asin layer to the model that computes element-wise arc-sine for + the input tensor. + Refer to the ``AsinLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_sin, add_sinh, add_asinh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.asin.MergeFromString(b"") + return spec_layer + + def add_acos(self, name, input_name, output_name): + """ + Add an acos layer to the model that computes element-wise arc-cosine + for the input tensor. + Refer to the ``AcosLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_cos, add_cosh, add_acosh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.acos.MergeFromString(b"") + return spec_layer + + def add_atan(self, name, input_name, output_name): + """ + Add an atan layer to the model that computes element-wise arc-tangent + for the input tensor. + Refer to the ``AtanLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_tan, add_tanh, add_atanh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.atan.MergeFromString(b"") + return spec_layer + + def add_sinh(self, name, input_name, output_name): + """ + Add a sinh layer to the model that computes element-wise hyperbolic sine for the input tensor. + Refer to the ``SinhLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_sin, add_asin, add_asinh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.sinh.MergeFromString(b"") + return spec_layer + + def add_cosh(self, name, input_name, output_name): + """ + Add a osh layer to the model that computes element-wise hyperbolic + cosine for the input tensor. + Refer to the ``CoshLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_cos, add_acos, add_acosh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.cosh.MergeFromString(b"") + return spec_layer + + def add_tanh(self, name, input_name, output_name): + """ + Add a tanh layer to the model that computes element-wise hyperbolic + tangent for the input tensor. + Refer to the ``TanhLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_tan, add_atan, add_atanh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.tanh.MergeFromString(b"") + return spec_layer + + def add_asinh(self, name, input_name, output_name): + """ + Add an asinh layer to the model that computes element-wise inverse + hyperbolic sine for the input tensor. + Refer to the ``AsinhLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_sin, add_sinh, add_asin + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.asinh.MergeFromString(b"") + return spec_layer + + def add_acosh(self, name, input_name, output_name): + """ + Add an acosh layer to the model that computes element-wise inverse + hyperbolic cosine for the input tensor. + Refer to the ``AcoshLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_cos, add_cosh, add_acos + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.acosh.MergeFromString(b"") + return spec_layer + + def add_atanh(self, name, input_name, output_name): + """ + Add an atanh layer to the model that computes element-wise inverse + hyperbolic tangent for the input tensor. + Refer to the ``AtanhLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_tan, add_tanh, add_atan + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.atanh.MergeFromString(b"") + return spec_layer + + def add_exp2(self, name, input_name, output_name): + """ + Add an exp2 layer to the model that performs element-wise experiential operation. + Refer to the ``Exp2LayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.exp2.MergeFromString(b"") + return spec_layer + + def add_add_broadcastable(self, name, input_names, output_name): + """ + Add an add_broadcastable layer to the model that performs element-wise + addition operation with broadcast support. + Refer to the ``AddBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.addBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_multiply_broadcastable(self, name, input_names, output_name): + """ + Add a multiply_broadcastable layer to the model that performs element-wise + multiplication operation with broadcast support. + Refer to the ``MultiplyBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.multiplyBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_divide_broadcastable(self, name, input_names, output_name): + """ + Add a divide_broadcastable layer to the model that performs element-wise + division operation with broadcast support. + Refer to the ``DivideBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.divideBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_subtract_broadcastable(self, name, input_names, output_name): + """ + Add a subtract_broadcastable layer to the model that performs element-wise + subtraction operation with broadcast support. + Refer to the ``SubtractBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.subtractBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_max_broadcastable(self, name, input_names, output_name): + """ + Add a max_broadcastable layer to the model that performs element-wise + maximum operation with broadcast support. + Refer to the ``MaxBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.maxBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_min_broadcastable(self, name, input_names, output_name): + """ + Add a min_broadcastable layer to the model that performs element-wise + minimum operation with broadcast support. + Refer to the ``MinBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.minBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_floor_div_broadcastable(self, name, input_names, output_name): + """ + Add a floor_div_broadcastable layer to the model that performs floor + division operation with broadcast support. + Refer to the ``FloorDivBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_divide_broadcastable + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.floorDivBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_mod_broadcastable(self, name, input_names, output_name): + """ + Add a mod_broadcastable layer to the model that performs element-wise + modular operation with broadcast support. + Refer to the ``ModBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.modBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_pow_broadcastable(self, name, input_names, output_name): + """ + Add a pow_broadcastable layer to the model that performs element-wise + power operation with broadcast support. + Refer to the ``PowBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.powBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_stack(self, name, input_names, output_name, axis=0): + """ + Add a stack layer to the model that performs stack operation on a list of + tensors into one rank+1 tensor on the given axis. + Refer to the ``StackLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + The axis to perform stack operation, default: 0. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.stack.axis = axis + self.rank_dict[output_name] = self._get_rank(input_names[0]) + 1 + return spec_layer + + def add_ceil(self, name, input_name, output_name): + """ + Add a ceil layer to the model that performs element-wise ceil operation + on the input tensor that rounds the value to the smallest integer not + less than x. + Refer to the ``CeilLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_floor, add_clip + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.ceil.MergeFromString(b"") + return spec_layer + + def add_floor(self, name, input_name, output_name): + """ + Add a floor layer to the model that performs element-wise floor operation + on the input tensor that rounds the value to the largest integer not + greater than x. + Refer to the ``FloorLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_ceil, add_clip + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.floor.MergeFromString(b"") + return spec_layer + + def add_round(self, name, input_name, output_name): + """ + Add a round layer to the model that performs element-wise round operation + on the input tensor that rounds the value to the nearest integer. + Refer to the ``RoundLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.round.MergeFromString(b"") + return spec_layer + + def add_sign(self, name, input_name, output_name): + """ + Add a sign layer to the model that performs element-wise sign operation + (+1 for positive values, -1 for negative values, 0 for zeroes). + Refer to the ``SignLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.sign.MergeFromString(b"") + return spec_layer + + def add_clip(self, name, input_name, output_name, min_value=0.0, max_value=1.0): + """ + Add a clip layer to the model that performs element-wise clip operation. + Clip the values in the input tensor to the range [min_value, max_value]. + Refer to the ``ClipLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + min_value: float, optional + Lower bound / minimum value for clip, default: 0.0. + max_value: float, optional + Upper bound / maximum value for clip, default: 1.0. + + See Also + -------- + add_floor, add_ceil + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.clip.MergeFromString(b"") + spec_params = spec_layer.clip + + spec_params.minVal = float(min_value) + spec_params.maxVal = float(max_value) + + return spec_layer + + def add_split_nd( + self, name, input_name, output_names, axis, num_splits=2, split_sizes=None + ): + """ + Add a split layer to the model that splits the input tensor into multiple + output tensors. Either uniformly split the input tensor into ``num_splits`` + tensors, or split into given size list ``split_sizes`` output tensors. + Refer to the ``SplitNDLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_names: list of str + The output blob names of this layer. + axis: int + Axis to perform split on. + num_splits: int, optional + Number of splits, default: 2. + split_sizes: list of int or tuple of int, optional + List of size to split, default ``[]`` or ``None``. + """ + + if not split_sizes: + split_sizes = [] + + spec_layer = self._add_generic_layer(name, [input_name], output_names) + spec_layer_params = spec_layer.splitND + spec_layer_params.axis = axis + + if split_sizes and len(split_sizes) > 0: + spec_layer_params.splitSizes.extend(split_sizes) + spec_layer_params.numSplits = len(split_sizes) + else: + spec_layer_params.numSplits = num_splits + + assert len(output_names) == spec_layer_params.numSplits + return spec_layer + + def add_slice_static( + self, + name, + input_name, + output_name, + begin_ids, + end_ids, + strides, + begin_masks, + end_masks, + squeeze_masks=None, + ): + """ + Add a slice_static layer to the model that extracts a slice of size + ``(end - begin) / stride`` from the given input tensor. + Refer to the ``SliceStaticLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + begin_ids: list of int or tuple of int + Begin offsets for slice layer. + end_ids: list of int or tuple of int + End offsets for slice layer. + strides: list of int or tuple of int + Strides for slice layer. + begin_masks: list of bool + Boolean masks for begin offsets. + end_masks: list of bool + Boolean masks for end offsets. + squeeze_masks: list of bool + Boolean masks for squeezing axis. + + See Also + -------- + add_slice_dynamic + """ + + rank = len(begin_ids) + assert len(end_ids) == rank + assert len(strides) == rank + assert len(begin_masks) == rank + assert len(end_masks) == rank + assert squeeze_masks is None or len(squeeze_masks) == rank + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.sliceStatic + + spec_layer_params.beginIds.extend(begin_ids) + spec_layer_params.endIds.extend(end_ids) + spec_layer_params.strides.extend(strides) + spec_layer_params.beginMasks.extend(begin_masks) + spec_layer_params.endMasks.extend(end_masks) + + if not (squeeze_masks and any(squeeze_masks)): + return spec_layer + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + spec_layer_params.squeezeMasks.extend(squeeze_masks) + + return spec_layer + + def add_slice_dynamic( + self, + name, + input_names, + output_name, + end_ids=None, + strides=None, + begin_masks=None, + end_masks=None, + squeeze_masks=None, + ): + """ + Add a slice_dynamic layer to the model that extracts a slice of size + ``(end - begin) / stride`` from the given input tensor. + Refer to the ``SliceDynamicLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + end_ids: list of int or tuple of int, optional + End offsets for slice layer, default: [1]. + strides: list of int or tuple of int, optional + Strides for slice layer, default: [1]. + begin_masks: list of bool, optional + Boolean masks for begin offsets, default: [false]. + end_masks: list of bool, optional + Boolean masks for end offsets, default: [false]. + squeeze_masks: list of bool, optional + Boolean masks for squeezing axis, default: [false]. + + See Also + -------- + add_slice_static + """ + + if not end_ids: + end_ids = [1 for _ in range(5)] + if not strides: + strides = [1 for _ in range(5)] + if not begin_masks: + begin_masks = [False for _ in range(5)] + if not end_masks: + end_masks = [False for _ in range(5)] + if not squeeze_masks: + squeeze_masks = [False for _ in range(5)] + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.sliceDynamic + + spec_layer_params.endIds.extend(end_ids) + spec_layer_params.strides.extend(strides) + spec_layer_params.beginMasks.extend(begin_masks) + spec_layer_params.endMasks.extend(end_masks) + if not any(squeeze_masks): + return spec_layer + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + spec_layer_params.squeezeMasks.extend(squeeze_masks) + + return spec_layer + + def add_tile(self, name, input_name, output_name, reps=[]): + """ + Add a tile layer to the model that construct a tensor by repeating the + input tensor multiple number of times. + Refer to the ``TileLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str or list[str] + The input blob name of this layer. + If second input is provided, reps parameter is ignored. + output_name: str + The output blob name of this layer. + reps: list of int or tuple of int + Number of times to replicate. + If `input_name` provides two inputs, second input is used as + reps and this parameter is ignored. + + See Also + -------- + add_stack, add_concat_nd + """ + if isinstance(input_name, tuple): + input_names = list(input_name) + elif isinstance(input_name, list): + input_names = input_name + else: + input_names = [input_name] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + spec_layer_params = spec_layer.tile + # If two inputs are provided, + # ignore reps attribute. + if len(input_names) == 2: + reps = [] + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + assert all([i > 0 for i in reps]) + spec_layer_params.reps.extend(reps) + return spec_layer + + def add_range_static( + self, name, output_name, input_names=None, end=1, start=0, step=1 + ): + """ + Add a range_static layer that returns a tensor that contains evenly spaced values. + This layer has no input and three parameters. + Refer to the ``RangeStaticLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + input_names: list of str + The input blob names of this layer. + end: int, optional + Range parameter: end, default: 1. + start: int, optional + Range parameter: start, default: 0. + step: int, optional + Range parameter: step size, default: 1. + + See Also + -------- + add_range_dynamic + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.rangeStatic.MergeFromString(b"") + spec_params = spec_layer.rangeStatic + + spec_params.endValue = float(end) + spec_params.startValue = float(start) + spec_params.stepSizeValue = float(step) + + self.rank_dict[output_name] = 1 + return spec_layer + + def add_range_dynamic(self, name, input_names, output_name, start=0, step=1): + """ + Add a range_dynamic layer that returns a tensor that contains evenly spaced values. + This layer has up to three inputs or no input and three parameters. + Refer to the ``RangeDynamicLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names. + If input size == 1: end is input, start and step are read from parameters + If input size == 2: end, start are inputs, step is read from parameters + If input size == 3: start, end, step are all inputs, none of the parameters are used. + output_name: str + The output blob name of this layer. + start: int, optional + Range parameter: start. Ignored if start is provided as input, default: 0. + step: int, optional + Range parameter: step. Ignored if step is provided as input, default: 1. + + See Also + -------- + add_range_static + """ + + if len(input_names) < 1 or len(input_names) > 3: + raise ValueError("RangeDynamic layer must have either 1, 2 or 3 inputs.") + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.rangeDynamic.MergeFromString(b"") + spec_params = spec_layer.rangeDynamic + + spec_params.startValue = float(start) + spec_params.stepSizeValue = float(step) + + self.rank_dict[output_name] = 1 + return spec_layer + + def add_branch(self, name, input_name, if_branch=None, else_branch=None): + """ + Add a branch layer to the model that provides the functionality of + branching or an ``if-else`` block. + Refer to the ``BranchLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + if_branch: NeuralNetwork + Neural network to execute if the absolute value of the input tensor is greater than 1e-6. + else_branch: NeuralNetwork, optional + Neural network to execute if the absolute value of the input tensor is less than 1e-6. + + See Also + -------- + add_loop, add_loop_continue, add_loop_break + """ + + layer = self._add_generic_layer(name, [input_name], []) + branch = layer.branch + if if_branch: + branch.ifBranch = if_branch + else: + branch.ifBranch.MergeFromString(b"") + if else_branch: + branch.elseBranch = else_branch + else: + branch.elseBranch.MergeFromString(b"") + return layer + + def add_loop( + self, + name, + body_network=None, + input_name=None, + condition=None, + condition_network=None, + max_iterations=None, + ): + """ + Add a loop layer to the model that provides the functionality of a ``for`` + loop, or a ``while`` loop. + Refer to the ``LoopLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + body_network: NeuralNetwork + Neural network to execute for the body of the loop. + input_name: str + The input blob name of this layer. + condition: str, optional + Condition of the loop. + condition_network: NeuralNetwork, optional + Neural network to execute for the condition of the loop. + max_iterations: int, optional + Maximum number of iterations of the loop. + + See Also + -------- + add_loop_break, add_loop_continue, add_branch + """ + + input_names = [] if input_name is None else [input_name] + spec_layer = self._add_generic_layer(name, input_names, []) + loop = spec_layer.loop + if condition_network is None: + loop.conditionNetwork.MergeFromString(b"") + else: + loop.conditionNetwork = condition_network + + if condition is not None: + loop.conditionVar = str(condition) + if max_iterations is not None: + loop.maxLoopIterations = ( + max_iterations if max_iterations is not None else -1 + ) + + if body_network is None: + loop.bodyNetwork.MergeFromString(b"") + else: + loop.bodyNetwork = body_network + return spec_layer + + def add_loop_break(self, name): + """ + Add a loop_break layer to the model that terminates the loop that + contains this layer. Must reside in the ``bodyNetwork`` of the loop layer. + Refer to the ``LoopBreakLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + See Also + -------- + add_loop, add_loop_continue, add_branch + """ + + spec_layer = self.nn_spec.layers.add() + spec_layer.name = name + spec_layer.loopBreak.MergeFromString(b"") + return spec_layer + + def add_loop_continue(self, name): + """ + Add a loop_continue layer to the model that stops the current loop + iteration and continue on the next iteration. Must reside in the + ``bodyNetwork`` of the loop layer. + Refer to the ``LoopContinueLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + See Also + -------- + add_loop, add_loop_break, add_branch + """ + + spec_layer = self.nn_spec.layers.add() + spec_layer.name = name + spec_layer.loopContinue.MergeFromString(b"") + return spec_layer + + def add_copy(self, name, input_name, output_name): + """ + Add a copy layer to the model that copies its input tensor to the output + tensor. Input tensor and output tensor must have distinct names. + Refer to the ``CopyLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.copy.MergeFromString(b"") + # If output name rank is different than earlier, + # mark it as unknown + if output_name in self.rank_dict and self._get_rank( + output_name + ) != self._get_rank(input_name): + self.rank_dict[output_name] = -1 + else: + self.rank_dict[output_name] = self._get_rank(input_name) + return spec_layer + + def add_greater_than( + self, name, input_names, output_name, use_greater_than_equal=False, alpha=0.0 + ): + """ + Add a greater_than layer to the model that performs the element-wise + greater-than (>) operation or greater-than-or-equal-to (>=) operation. + Broadcasting is supported. + Refer to the ``GreaterThanLayerParams``, ``GreaterEqualLayerParams`` messages + in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + use_greater_than_equal: bool, optional + Whether or not to allow greater than or equal to, default: false. + alpha: float, optional + y = x1 != alpha, if only one input is provided, default: 0. + + See Also + -------- + add_equal, add_not_equal, add_less_than + """ + + if isinstance(input_names, str): + input_names = [input_names] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + if use_greater_than_equal: + spec_layer.greaterEqual.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.greaterEqual.alpha = alpha + else: + spec_layer.greaterThan.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.greaterThan.alpha = alpha + + return spec_layer + + def add_less_than( + self, name, input_names, output_name, use_less_than_equal=False, alpha=0.0 + ): + """ + Add a less_than layer to the model that performs the element-wise + less-than (<) operation or less-than-or-equal-to (<=) operation. + Broadcasting is supported. + Refer to the ``LessThanL_ayerParams``, ``LessEqualLayerParams`` messages in + specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + use_less_than_equal: bool, optional + Whether or not to allow less than or equal to, default: false. + alpha: float, optional + y = x1 != alpha, if only one input is provided, default: 0. + + See Also + -------- + add_equal, add_not_equal, add_greater_than + """ + + if isinstance(input_names, str): + input_names = [input_names] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + if use_less_than_equal: + spec_layer.lessEqual.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.lessEqual.alpha = alpha + else: + spec_layer.lessThan.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.lessThan.alpha = alpha + return spec_layer + + def add_equal(self, name, input_names, output_name, alpha=0.0): + """ + Add an equal layer to the model that performs the element-wise equal + (=) operation. Broadcasting is supported. + Refer to the ``EqualLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + alpha: float, optional + y = x1 != alpha, if only one input is provided, default: 0. + + See Also + -------- + add_not_equal, add_greater_than, add_less_than + """ + + if isinstance(input_names, str): + input_names = [input_names] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.equal.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.equal.alpha = alpha + return spec_layer + + def add_not_equal(self, name, input_names, output_name, alpha=0.0): + """ + Add a not_equal layer to the model that performs the element-wise not + equal (!=) operation. Broadcasting is supported. + Refer to the ``NotEqualLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + alpha: float, optional + y = x1 != alpha, if only one input is provided, default: 0. + + See Also + -------- + add_equal, add_greater_than, add_less_than + """ + + if isinstance(input_names, str): + input_names = [input_names] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.notEqual.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.notEqual.alpha = alpha + return spec_layer + + def add_logical(self, name, input_names, output_name, mode): + """ + Add a logical layer to the model that performs element-wise logical + and/or/xor/not operation. Broadcasting is supported. + Refer to the ``LogicalOrLayerParams``, ``LogicalNotLayerParams``, + ``LogicalNotLayerParams``, and ``LogicalAndLayerParam`` messages in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + mode: str + Logical operation mode in [AND | OR | XOR | NOT]. + """ + + if isinstance(input_names, str): + input_names = [input_names] + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + if mode in ["AND", "OR", "XOR"] and len(input_names) != 2: + raise ValueError('Logical operation "%s" requires 2 inputs' % name) + if mode in ["NOT"] and len(input_names) != 1: + raise ValueError('Logical operation "%s" requires 1 input' % name) + + if mode == "AND": + spec_layer.logicalAnd.MergeFromString(b"") + elif mode == "OR": + spec_layer.logicalOr.MergeFromString(b"") + elif mode == "XOR": + spec_layer.logicalXor.MergeFromString(b"") + elif mode == "NOT": + spec_layer.logicalNot.MergeFromString(b"") + else: + raise ValueError('Logical operation "%s" is not supported' % mode) + + return spec_layer + + def add_sliding_windows( + self, name, input_name, output_name, axis, window_size, step=1 + ): + """ + Add a sliding_windows layer to the model that returns a tensor containing + all windows of size ``window_size`` * separated by ``step`` along the dimension ``axis``. + Refer to the ``SlidingWindowsLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The of input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int + Axis to perform the operation. + window_size: int + Number of elements in the sliding window. + step: int, optional + The stride of the input elements in the sliding window, default: 1. + + See Also + -------- + add_slice, add_slice_static, add_slice_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + + spec_layer_params = spec_layer.slidingWindows + spec_layer_params.axis = axis + spec_layer_params.windowSize = window_size + spec_layer_params.step = step + + self.rank_dict[output_name] = self._get_rank(input_name) + 1 + return spec_layer + + def add_reverse(self, name, input_name, output_name, reverse_dim=None): + """ + Add a reverse layer to the model that reverses specific dimensions of + the input tensor. + Refer to the ``ReverseLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + reverse_dim: list of int or tuple of int + Reverse along the dimension, default [1]. + + See Also + -------- + add_reverse_sequence + """ + + if not reverse_dim: + reverse_dim = [1] + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reverse + spec_layer_params.reverseDim.extend(map(bool, reverse_dim)) + return spec_layer + + def add_reverse_sequence( + self, name, input_names, output_name, batch_axis=0, seq_axis=-1 + ): + """ + Add a reverse sequence layer to the model that reverses variable length slices. + Refer to the ``ReverseSeqLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + batch_axis: int, optional + Slices input along the dimension batch_axis, default 0. + seq_axis: int, optional + Reverse along the dimension seq_axis, default: -1. + + See Also + -------- + add_reverse + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.reverseSeq.batchAxis = batch_axis + spec_layer.reverseSeq.sequenceAxis = seq_axis + + return spec_layer + + def add_gather(self, name, input_names, output_name, axis=0): + """ + Add a gather layer to the model that gathers elements or slices from + data and store to a tensor whose shape is defined by indices from the + input. + Refer to the ``GatherLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + The axis the operation perform on, default: 0. + + See Also + -------- + add_gather_nd, add_gather_along_axis, add_scatter, add_scatter_nd, add_scatter_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.gather.axis = axis + self.rank_dict[output_name] = ( + self._get_rank(input_names[0]) - 1 + self._get_rank(input_names[1]) + ) + return spec_layer + + def add_scatter(self, name, input_names, output_name, axis=0, mode="UPDATE"): + """ + Add a scatter layer to the model that scatters data into a new tensor + according to indices from the input. + Refer to the ``ScatterLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int + The axis the operation perform on, default: 0. + mode: str, optional + Scatter accumulation mode in [UPDATE | ADD | SUB | MUL | DIV | MAX | MIN], default: UPDATE. + + See Also + -------- + add_scatter_nd, add_scatter_along_axis, add_gather, add_gather_nd, add_gather_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.scatter + spec_layer_params.axis = axis + + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "UPDATE": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value( + "SCATTER_UPDATE" + ) + elif mode == "ADD": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_ADD") + elif mode == "SUB": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_SUB") + elif mode == "MUL": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MUL") + elif mode == "DIV": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_DIV") + elif mode == "MAX": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MAX") + elif mode == "MIN": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MIN") + else: + raise ValueError("Unsupported Scatter mode %s" % mode) + + return spec_layer + + def add_gather_along_axis(self, name, input_names, output_name, axis=0): + """ + Add a gather_along_axis layer to the model that gathers elements or slices + from data and store to a tensor whose shape is defined by indices from the + input along the given axis into the output tensor. + Refer to the ``GatherAlongAxisLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + The axis the operation perform on, default: 0. + + See Also + -------- + add_gather, add_gather_nd, add_scatter, add_scatter_nd, add_scatter_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.gatherAlongAxis.axis = axis + self.rank_dict[output_name] = self._get_rank(input_names[1]) + return spec_layer + + def add_scatter_along_axis( + self, name, input_names, output_name, axis=0, mode="UPDATE" + ): + """ + Add a scatter_along_axis layer to the model that scatters data into a new + tensor according to indices from the input along the given axis into the + output tensor. + Refer to the ``ScatterAlongAxisLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int + The axis to perform on, default: 0. + mode: str, optional + Scatter accumulation mode in [UPDATE | ADD | SUB | MUL | DIV | MAX | MIN], default: UPDATE + + See Also + -------- + add_scatter, add_scatter_nd, add_gather, add_gather_nd, add_gather_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.scatterAlongAxis + spec_layer_params.axis = axis + + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "UPDATE": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value( + "SCATTER_UPDATE" + ) + elif mode == "ADD": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_ADD") + elif mode == "SUB": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_SUB") + elif mode == "MUL": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MUL") + elif mode == "DIV": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_DIV") + elif mode == "MAX": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MAX") + elif mode == "MIN": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MIN") + else: + raise ValueError("Unsupported scatter_along_axis mode %s" % mode) + + return spec_layer + + def add_gather_nd(self, name, input_names, output_name): + """ + Add a gather layer to the model that gathers elements or slices from + data and store to a tensor whose shape is defined by indices from the + input. This is the reverse operation of the scatter operation. + Refer to the ``GatherNDLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_gather, add_gather_along_axis, add_scatter, add_scatter_nd, add_scatter_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.gatherND.MergeFromString(b"") + # NOTE: ideally, following is formula for computing output rank + # self.rank_dict[output_name] = self._get_rank(input_names[1]) - 1 + self._get_rank(input_names[0]) + # + shape_dict[input_names[1]][-1] + # But, shape of indices (input_names[1]) is unknown and hence marking as -1 + # Converter should update rank if indices are known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_scatter_nd(self, name, input_names, output_name, mode="UPDATE"): + """ + Add a scatter layer to the model that scatters data into a new tensor + according to indices from input. This is the reverse operation of the + gather operation. + Refer to the ``ScatterNDLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + mode: str, optional + Scatter accumulation mode in [UPDATE | ADD | SUB | MUL | DIV | MAX | MIN], default: UPDATE + + See Also + -------- + add_scatter, add_scatter_along_axis, add_gather, add_gather_nd, add_gather_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.scatterND + + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "UPDATE": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value( + "SCATTER_UPDATE" + ) + elif mode == "ADD": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_ADD") + elif mode == "SUB": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_SUB") + elif mode == "MUL": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MUL") + elif mode == "DIV": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_DIV") + elif mode == "MAX": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MAX") + elif mode == "MIN": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MIN") + else: + raise ValueError("Unsupported scatter mode %s" % mode) + + return spec_layer + + def add_topk( + self, name, input_names, output_names, k=0, axis=0, use_bottom_k=False + ): + """ + Add a topk layer to the model that returns top or bottom k values and + the corresponding indices of the input tensor along a given axis. + Refer to the ``TopKLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. It must be of length 1 or 2. + The optional second input corresponds to value of K. + output_names: list of str + The output blob names of this layer. First and second correspond to + values and indices, respectively. + k: int, optional + number of values/indices to be computed along the axis. + Need not be given of there are two inputs, default: 0. + axis: int, optional + axis along which the topk values/indices are computed. + negative indexing is supported, default: 0 + use_bottom_k: bool, optional + if true, bottom k values are computed instead, default: false. + """ + + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.topK + spec_layer_params.axis = axis + spec_layer_params.K = k + spec_layer_params.useBottomK = use_bottom_k + return spec_layer + + def add_argmax(self, name, input_name, output_name, axis, keepdims=True): + """ + Add an argmax layer to the model that returns the indices of the maximum + value along a specified axis in the input tensor. + Refer to the ``ArgMaxLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int + axis along which the argmax is computed. Negative indexing is supported. + keepdims: bool, optional + if true, output rank is same as input rank, default: true. + + See Also + -------- + add_argmin + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.argMax + spec_layer_params.axis = axis + spec_layer_params.removeDim = not keepdims + + input_rank = self._get_rank(input_name) + if input_rank == 1: + self.rank_dict[output_name] = 1 + else: + if keepdims: + self.rank_dict[output_name] = input_rank + else: + self.rank_dict[output_name] = input_rank - 1 + return spec_layer + + def add_argmin(self, name, input_name, output_name, axis, keepdims=True): + """ + Add an argmin layer to the model that returns the indices of the minimum + value along a specified axis in the input tensor. + Refer to the ``ArgMinLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int + axis along which the argmin is computed. Negative indexing is supported. + keepdims: bool, optional + if true, output rank is same as input rank, default: true. + + See Also + -------- + add_argmax + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.argMin + spec_layer_params.axis = axis + spec_layer_params.removeDim = not keepdims + + input_rank = self._get_rank(input_name) + if input_rank == 1: + self.rank_dict[output_name] = 1 + else: + if keepdims: + self.rank_dict[output_name] = input_rank + else: + self.rank_dict[output_name] = input_rank - 1 + return spec_layer + + def add_constant_pad( + self, + name, + input_names, + output_name, + value=0.0, + pad_to_given_output_size_mode=False, + pad_amounts=[], + ): + """ + Add a constant pad layer. + Refer to the ``ConstantPaddingLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob name(s) of this layer. + output_name: str + The output blob name of this layer. + value: float + value to be used for padding. + pad_to_given_output_size_mode: bool + if true, pad_amounts are interpreted as output shapes (see example in NeuralNetwork.proto) + pad_amounts: [int], optional + must be non negative. Amount to pad in each dimension. Length of the list must be twice the input/output rank. + Not required if second input is present. + + See Also + -------- + add_padding + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.constantPad + spec_layer_params.value = value + spec_layer_params.padToGivenOutputSizeMode = pad_to_given_output_size_mode + if len(pad_amounts) > 0: + spec_layer_params.padAmounts.extend(map(int, pad_amounts)) + if len(input_names) == 1 and len(pad_amounts) == 0: + raise ValueError( + "Constant_pad layer: pad_amounts must be provided when there is a single input" + ) + return spec_layer + + def add_nms( + self, + name, + input_names, + output_names, + iou_threshold=0.5, + score_threshold=0.0, + max_boxes=1, + per_class_suppression=False, + ): + """ + Add a non maximum suppression layer. + Refer to the ``NonMaximumSuppressionLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. Must be at least 2, and maximum 5. + output_names: list of str + The output blob names of this layer. Must be of length 4 exactly. + iou_threshold: float + intersection over union threshold for suppression. Ignored if 3rd input is present. + score_threshold: float + threshold for selecting boxes to be used for NMS algorithm. Ignored if 4th input is present. + max_boxes: int + maximum number of boxes to output. Ignored if 5th input is present. + per_class_suppression: bool + If true, boxes are organized into classes and suppression is applied to each class group separately + + See Also + -------- + add_constant_pad + """ + + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.NonMaximumSuppression + spec_layer_params.iouThreshold = iou_threshold + spec_layer_params.scoreThreshold = score_threshold + spec_layer_params.maxBoxes = max_boxes + spec_layer_params.perClassSuppression = per_class_suppression + + self.rank_dict[output_names[0]] = 3 + self.rank_dict[output_names[1]] = 3 + self.rank_dict[output_names[2]] = 2 + self.rank_dict[output_names[3]] = 1 + return spec_layer + + def add_embedding_nd( + self, + name, + input_name, + output_name, + vocab_size, + embedding_size, + W, + b=None, + is_quantized_weight=False, + quantization_type="linear", + nbits=8, + quant_scale=None, + quant_bias=None, + quant_lut=None, + ): + """ + Add an embedding layer to the model that performs a matrix lookup and + optionally adds a bias. + Refer to the ``EmbeddingNDLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + vocab_size: int + Size of the vocabulary (1 + maximum integer index of the words). + embedding_size: int + Size of the embedded vector. + W: float32 numpy.array or bytes() + Weight matrix of shape (embedding_size, vocab_size). + If W is of type bytes(), i.e. quantized to 1-8 bits, other quantization + related arguments must be provided as well (see below). + b: numpy.array , optional + Bias vector of shape (embedding_size, ). + Quantization arguments expected, when W is of type bytes(): + is_quantized_weight: bool + Set it to true when W is of type bytes(), representing quantized weights + quantization_type: str + When weights are quantized (i.e. W is of type bytes()), this should be either "linear" or "lut". + nbits: int + Should be between 1 and 8 (inclusive). Number of bits per weight value. + quant_scale: numpy.array(dtype=numpy.float32) + scale vector to be used with linear quantization. Must be of length either 1 or embedding_size. + quant_bias: numpy.array(dtype=numpy.float32) + bias vector to be used with linear quantization. Must be of length either 1 or embedding_size. + quant_lut: numpy.array(dtype=numpy.float32) + the LUT (look up table) to be used with LUT quantization. Must be of length 2^nbits. + + See Also + -------- + add_inner_product, add_embedding + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + + # Fill in the parameters + spec_layer_params = spec_layer.embeddingND + + spec_layer_params.vocabSize = vocab_size + spec_layer_params.embeddingSize = embedding_size + spec_layer_params.hasBias = b is not None + + weights = spec_layer_params.weights + if not is_quantized_weight: + weights.floatValue.extend(W.flatten()) + else: + _verify_quantization_arguments( + weight=W, + output_channels=embedding_size, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + _fill_quantized_weights( + weights_message=weights, + W=W, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + if b is not None: + bias = spec_layer_params.bias + bias.floatValue.extend(b.flatten()) + return spec_layer + + def add_batched_mat_mul( + self, + name, + input_names, + output_name, + transpose_a=False, + transpose_b=False, + weight_matrix_rows=0, + weight_matrix_columns=0, + W=None, + bias=None, + int_8_dynamic_quantize=False, + is_quantized_weight=False, + quantization_type="linear", + nbits=8, + quant_scale=None, + quant_bias=None, + quant_lut=None, + ): + """ + Add a N-D Batched Matrix Multiplication layer with NumPy-like broadcasting. + Refer to the ``BatchedMatMulLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_names: list of str + The input blob names of this layer. + + output_name: str + The output blob name of this layer. + + transpose_a: bool, optional + Whether or not to transpose A, default: false. + + transpose_b: bool, optional + Whether or not to transpose B, default: false. + + weight_matrix_rows: int, optional + Must be equal to the last dimension of the input, default: 0. + + weight_matrix_columns: int, optional + Must be equal to the last dimension of the output, default: 0. + + W: float32 numpy.array or bytes(), optional + Weight matrix of shape ``(weight_matrix_rows, weight_matrix_columns)``. + If ``W`` is of type ``bytes()`` (quantized to 1-8 bits), other + quantization-related arguments must be provided as well (see below). + + bias: float32 numpy.array, optional + Bias vector of shape (weight_matrix_columns,). + + Quantization + Quantization arguments, used when ``W`` is of type ``bytes()``: + + is_quantized_weight: bool, optional + Set it to true when ``W`` is of type ``bytes()``, representing + quantized weights, default: false. + + quantization_type: str, optional + When weights are quantized (that is, ``W`` is of type ``bytes()``), + this should be either ``"linear"`` or ``"lut"``, default: ``"linear"``. + + nbits: int, optional + Should be between 1 and 8 (inclusive). Number of bits per weight value, default: 8. + + quant_scale: numpy.array(dtype=numpy.float32), optional + Scale vector to be used with linear quantization. + Must be of length either 1 or ``weight_matrix_columns``, default: ``None``. + + quant_bias: numpy.array(dtype=numpy.float32), optional + Bias vector to be used with linear quantization. + Must be of length either 1 or ``weight_matrix_columns``, default: ``None``. + + quant_lut: numpy.array(dtype=numpy.float32), optional + The LUT (look up table) to be used with LUT quantization. + Must be of length 2^n bits, default: ``None``. + + int_8_dynamic_quantize: bool + Whether to quantize and dequantize before and after + batched matmul, respectively. + Expects byte weights, representing int8 values, if True. + See NeuralNetwork.proto for other validation conditions. + + See Also + -------- + add_inner_product + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + spec_layer_params = spec_layer.batchedMatmul + spec_layer_params.transposeA = transpose_a + spec_layer_params.transposeB = transpose_b + spec_layer_params.int8DynamicQuantize = int_8_dynamic_quantize + + if ((W is not None) or (bias is not None)) and len(input_names) == 2: + raise ValueError( + "batched_mat_mul: Weight and/or bias are ignored when there are two inputs" + ) + + if (W is None) and len(input_names) == 1: + raise ValueError( + "batched_mat_mul: Weight parameter must be provided when there is one input" + ) + + self.rank_dict[output_name] = 2 + for input_ in input_names: + self.rank_dict[output_name] = max( + self._get_rank(output_name), self._get_rank(input_) + ) + + if len(input_names) == 1: + spec_layer_params.weightMatrixFirstDimension = weight_matrix_rows + spec_layer_params.weightMatrixSecondDimension = weight_matrix_columns + spec_layer_params.hasBias = bias is not None + + weights = spec_layer_params.weights + + if not is_quantized_weight: + weights.floatValue.extend(_np.transpose(W).flatten()) + else: + _verify_quantization_arguments( + weight=W, + output_channels=weight_matrix_columns, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + int_8_dynamic_quantize=int_8_dynamic_quantize, + ) + + if nbits < 8: + num_weights = weight_matrix_rows * weight_matrix_columns + byte_arr = _np.frombuffer(W, dtype=_np.uint8) + W = _unpack_to_bytes(byte_arr, num_weights, nbits) + elif int_8_dynamic_quantize: + W = _np.frombuffer(W, dtype=_np.int8) + else: + W = _np.frombuffer(W, dtype=_np.uint8) + + W = _np.reshape(W, (weight_matrix_rows, weight_matrix_columns)) + W = _np.transpose(W) + + W_bytes = bytes() + if nbits == 8: + W_bytes += W.flatten().tobytes() + else: + W_bytes += _convert_array_to_nbit_quantized_bytes( + W.flatten(), nbits + ).tobytes() + + _fill_quantized_weights( + weights_message=weights, + W=W_bytes, + use_int_8=int_8_dynamic_quantize, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + if bias is not None: + bias_param = spec_layer_params.bias + bias_param.floatValue.extend(bias.flatten()) + + return spec_layer + + def add_get_shape(self, name, input_name, output_name): + """ + Add a get_shape layer to the model. + Refer to the ``GetShapeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_reshape, add_reshape_like, add_reshape_static, add_reshape_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.getShape.MergeFromString(b"") + self.rank_dict[output_name] = 1 + return spec_layer + + def add_load_constant_nd(self, name, output_name, constant_value, shape): + """ + Add a load_constant layer that loads data as a parameter and provides it + as an output. + Refer to the ``LoadConstantNDLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + constant_value: numpy.array() + value of the constant as a numpy array. + shape: list of int or tuple of int + List of ints representing the shape of the constant. + + See Also + -------- + add_elementwise + """ + + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.loadConstantND + + data = spec_layer_params.data + data.floatValue.extend(constant_value.flatten()) + spec_layer_params.shape.extend(shape) + + # Rank information + self.rank_dict[output_name] = len(shape) + + if len(data.floatValue) != _np.prod(shape): + raise ValueError( + "Dimensions of 'shape' do not match the size of the provided constant" + ) + return spec_layer + + def add_fill_like(self, name, input_name, output_name, value=0.0): + """ + Add a fill_like layer to the model outputs a tensor filled with a + scalar value. + Refer to the ``FillLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + value: float, optional + A scalar value for the fill operation, default 0. + + See Also + -------- + add_fill_static, add_fill_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.fillLike + spec_layer_params.value = value + return spec_layer + + def add_fill_static(self, name, output_name, output_shape, value=0.0): + """ + Add a fill_static layer to the model that outputs a tensor filled + with a scalar value given shape as parameter. + Refer to the ``FillStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + The target shape of the output tensor. + value: float, optional + A scalar value for the fill operation, default 0. + + See Also + -------- + add_fill_like, add_fill_static + """ + + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.fillStatic + spec_layer_params.value = value + spec_layer_params.targetShape.extend(output_shape) + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_fill_dynamic(self, name, input_name, output_name, value=0.0): + """ + Add a fill_dynamic layer to the model that outputs a tensor filled + with a scalar value. + Refer to the ``FillDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + value: float, optional + A scalar value for the fill operation, default: 0. + + See Also + -------- + add_fill_like, add_fill_static + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.fillDynamic + spec_layer_params.value = value + self.rank_dict[output_name] = -1 + return spec_layer + + def add_broadcast_to_like(self, name, input_names, output_name): + """ + Add a broadcast_to_like layer to the model that broadcasts a tensor + to a compatible shape. + Refer to the ``BroadcastToLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_broadcast_to_static, add_broadcast_to_dynamic + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.broadcastToLike.MergeFromString(b"") + + if len(input_names) != 2: + raise ValueError("BroadcastToLikeLayer must have two inputs") + + self.rank_dict[output_name] = self._get_rank(input_names[1]) + return spec_layer + + def add_broadcast_to_static(self, name, input_name, output_name, output_shape): + """ + Add a broadcast_to_static layer to the model that broadcasts a tensor + to a compatible shape. + Refer to the ``BroadcastToStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + The target shape of the output tensor. + + See Also + -------- + add_broadcast_to_like, add_broadcast_to_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.broadcastToStatic + spec_layer_params.targetShape.extend(output_shape) + + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_broadcast_to_dynamic(self, name, input_names, output_name): + """ + Add a broadcast_to_dynamic layer to the model that broadcasts a tensor + to a compatible shape. + Refer to the ``BroadcastToDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_broadcast_to_like, add_broadcast_to_static + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.broadcastToDynamic.MergeFromString(b"") + # Setting rank to -1 is a hint that Rank was not computed + # converter can modify if it's a constant and known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_expand_dims(self, name, input_name, output_name, axes): + """ + Add an expand dims layer to the model that increases the rank of the + input tensor by adding unit dimensions. + Refer to the ``ExpandDimsLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int + Dimensions the operation perform on. + + See Also + -------- + add_squeeze + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.expandDims + spec_layer_params.axes.extend(axes) + self.rank_dict[output_name] = self._get_rank(input_name) + len(axes) + return spec_layer + + def add_squeeze(self, name, input_name, output_name, axes=None, squeeze_all=False): + """ + Add a squeeze layer to the model that decrease the rank of the input + tensor by removing unit dimensions. + Refer to the ``SqueezeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + Dimensions to perform the operation, default: ``None`` (squeeze_all). + squeeze_all: bool, optional + If true, all dimensions that are 1 are squeezed, default: false. + + See Also + -------- + add_expand_dims + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.squeeze + if axes is not None: + spec_layer_params.axes.extend(axes) + spec_layer_params.squeezeAll = squeeze_all + + if squeeze_all or axes is None: + # All the dimensions that are 1 will be squeezed + # converter should update rank if shape is known + self.rank_dict[output_name] = -1 + else: + rank = self._get_rank(input_name) - len(axes) + self.rank_dict[output_name] = rank if rank != 0 else 1 + return spec_layer + + def add_flatten_to_2d(self, name, input_name, output_name, axis=1): + """ + Add a flatten_to_2d layer to the model that flattens the input tensor + into a 2-dimensional matrix. + Refer to the ``FlattenTo2DLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The of input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + Axis to perform the operation, default: 1. + + See Also + -------- + add_flatten + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.flattenTo2D + spec_layer_params.axis = axis + self.rank_dict[output_name] = 2 + return spec_layer + + def add_reshape_like(self, name, input_names, output_name): + """ + Add a reshape_like layer to the model that reshapes a tensor. + Refer to the ``ReshapeLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_reshape, add_reshape_static, add_reshape_dynamic, add_rank_preserving_reshape + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.reshapeLike.MergeFromString(b"") + self.rank_dict[output_name] = self._get_rank(input_names[1]) + return spec_layer + + def add_reshape_static(self, name, input_name, output_name, output_shape): + """ + Add a reshape_static layer to the model that reshapes a tensor. + Refer to the ``ReshapeStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + Target shape of the output tensor. + + See Also + -------- + add_reshape, add_reshape_like, add_reshape_dynamic, add_rank_preserving_reshape + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reshapeStatic + spec_layer_params.targetShape.extend(output_shape) + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_reshape_dynamic(self, name, input_names, output_name): + """ + Add a reshape_dynamic layer to the model that reshapes a tensor. + Refer to the ``ReshapeDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_reshape, add_reshape_like, add_reshape_static, add_rank_preserving_reshape + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.reshapeDynamic.MergeFromString(b"") + # Setting rank to -1 is a hint that Rank was not computed + # converter can modify if it's a constant and known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_rank_preserving_reshape(self, name, input_name, output_name, output_shape): + """ + Add a rank_preserving_reshape layer to the model that reshapes the input + tensor without altering the rank of the tensor. + Refer to the ``RankPreservingReshapeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + Determines the shape of the output blob. + 0: copy the dimension of the input to output + -1: calculate dimensions from the rest of the shape + + See Also + -------- + add_reshape, add_reshape_like, add_reshape_static, add_reshape_dynamic + """ + + spec_layer = self._add_generic_layer( + name, + [input_name], + [output_name], + input_ranks=[len(output_shape)], + input_shapes=[[int(x) for x in output_shape]], + output_ranks=[len(output_shape)], + output_shapes=[[int(x) for x in output_shape]], + ) + + spec_layer_params = spec_layer.rankPreservingReshape + spec_layer_params.targetShape.extend(map(int, output_shape)) + return spec_layer + + def add_random_normal_like( + self, name, input_name, output_name, mean=0.0, stddev=0.0, seed=-1 + ): + """ + Add a random_normal_like layer to the model that fills the output + tensor with random values from normal distribution. + Refer to the ``RandomNormalLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + mean: float, optional + The mean of the normal distribution, default: 0.0. + stddev: float, optional + The standard deviation of the normal distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution, default -1 (random). + + See Also + -------- + add_random_normal_static, add_random_normal_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.randomNormalLike + + spec_layer_params.mean = mean + spec_layer_params.stdDev = stddev + spec_layer_params.seed = seed + + return spec_layer + + def add_random_normal_static( + self, name, output_name, output_shape, mean=0.0, stddev=0.0, seed=-1 + ): + """ + Add a random_normal_static layer to the model that fills the output + tensor with random values from normal distribution. + Refer to the ``RandomNormaStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + Target shape of the output tensor. + mean: float, optional + The mean of the normal distribution, default: 0.0. + stddev: float, optional + The standard deviation of the normal distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution. Default -1 (random). + + See Also + -------- + add_random_normal_like, add_random_normal_dynamic + """ + + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.randomNormalStatic + + spec_layer_params.outputShape.extend(output_shape) + spec_layer_params.mean = mean + spec_layer_params.stdDev = stddev + spec_layer_params.seed = seed + + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_random_normal_dynamic( + self, name, input_names, output_name, mean=0.0, stddev=0.0, seed=-1 + ): + """ + Add a random_normal_dynamic layer to the model that fills the output + tensor with random values from normal distribution. + Refer to the ``RandomNormalDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + mean: float, optional + The mean of the normal distribution, default: 0.0. + stddev: float, optional + The standard deviation of the normal distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution. Default -1 (random). + + See Also + -------- + add_random_normal_like, add_random_normal_static + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.randomNormalDynamic + + spec_layer_params.mean = mean + spec_layer_params.stdDev = stddev + spec_layer_params.seed = seed + # Setting rank to -1 is a hint that Rank was not computed + # converter can modify if it's a constant and known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_random_uniform_like( + self, name, input_name, output_name, minval=0.0, maxval=1.0, seed=-1 + ): + """ + Add a random_uniform_like layer to the model that fills the output + tensors with random values from uniform distribution. + Refer to the ``RandomUniformLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + minval: float, optional + Lower bound / minimum value of the uniform distribution, default: 0.0. + maxval: float, optional + Upper bound / maximum value of the uniform distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_uniform_static, add_random_uniform_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.randomUniformLike + + spec_layer_params.minVal = minval + spec_layer_params.maxVal = maxval + spec_layer_params.seed = seed + + return spec_layer + + def add_random_uniform_static( + self, name, output_name, output_shape, minval=0.0, maxval=1.0, seed=-1 + ): + """ + Add a random_uniform_static layer to the model that fills the output + tensors with random values from uniform distribution. + Refer to the ``RandomUniformStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + Target shape of the output tensor. + minval: float, optional + Lower bound / minimum value of the uniform distribution, default: 0.0. + maxval: float, optional + Upper bound / maximum value of the uniform distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_uniform_like, add_random_uniform_dynamic + """ + + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.randomUniformStatic + + spec_layer_params.outputShape.extend(output_shape) + spec_layer_params.minVal = minval + spec_layer_params.maxVal = maxval + spec_layer_params.seed = seed + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_random_uniform_dynamic( + self, name, input_names, output_name, minval=0.0, maxval=1.0, seed=-1 + ): + """ + Add a random_uniform_dynamic layer to the model that fills the output + tensors with random values from uniform distribution. + Refer to the ``RandomUniformDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + minval: float, optional + Lower bound / minimum value of the uniform distribution, default: 0.0. + maxval: float, optional + Upper bound / maximum value of the uniform distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_uniform_like, add_random_uniform_static + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.randomUniformDynamic + + spec_layer_params.minVal = minval + spec_layer_params.maxVal = maxval + spec_layer_params.seed = seed + # Setting rank to -1 is a hint that Rank was not computed + # converter can modify if it's a constant and known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_random_bernoulli_like( + self, name, input_name, output_name, prob=0.5, seed=-1 + ): + """ + Add a random_bernoulli_like layer to the model that fills the output + tensor with random values from Bernoulli distribution. + Refer to the ``RandomBernoulliLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + prob: float, optional + Probabilities for Bernoulli distribution, default: 0.5. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_bernoulli_static, add_random_bernoulli_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.randomBernoulliLike + + spec_layer_params.prob = prob + spec_layer_params.seed = seed + + return spec_layer + + def add_random_bernoulli_static( + self, name, output_name, output_shape, prob=0.5, seed=-1 + ): + """ + Add a random_bernoulli_static layer to the model that fills the output + tensor with random values from Bernoulli distribution. + Refer to the ``RandomBernoulliStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + Target shape of the output tensor. + prob: float, optional + Probabilities for Bernoulli distribution, default: 0.5. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_bernoulli_like, add_random_bernoulli_dynamic + """ + + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.randomBernoulliStatic + + spec_layer_params.outputShape.extend(output_shape) + spec_layer_params.prob = prob + spec_layer_params.seed = seed + + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_random_bernoulli_dynamic( + self, name, input_names, output_name, prob=0.5, seed=-1 + ): + """ + Add a random_bernoulli_dynamic layer to the model that fills the output + tensor with random values from Bernoulli distribution. + Refer to the ``RandomBernoulliDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + prob: float, optional + Probabilities for Bernoulli distribution, default: 0.5. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_bernoulli_like, add_random_bernoulli_static + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.randomBernoulliDynamic + + spec_layer_params.prob = prob + spec_layer_params.seed = seed + + # Setting rank to -1 is a hint that Rank was not computed + # converter can modify if it's a constant and known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_categorical_distribution( + self, + name, + input_name, + output_name, + num_samples, + is_logits=True, + eps=1e-10, + temperature=1.0, + seed=-1, + ): + """ + Add a categorical_distribution layer to the model that fills the output + tensor with random values from categorical distribution. + Refer to the ``CategoricalDistributionLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + num_samples: int + List of dimensions for the reduce operations. + is_logits: bool, optional + If true, the input is log probabilities. If false, the input is + probabilities, default: True + eps: float, optional + Epsilon parameter for categorical distribution, default 1e-10. + temperature: float, optional + Temperature parameter for categorical distribution, default 1.0. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.categoricalDistribution + + spec_layer_params.numSamples = num_samples + spec_layer_params.isLogits = is_logits + spec_layer_params.eps = eps + spec_layer_params.temperature = temperature + spec_layer_params.seed = seed + + return spec_layer + + def add_reduce_sum( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_sum layer to the model that reduces the input tensor + using ``sum(elements across given dimensions)``. + Refer to the ``ReduceSumLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range ``[-rank(input), rank(input))``, default: ``None`` (``reduce_all``). + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_min, add_reduce_prod, + add_reduce_max, add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, + add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceSum + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_prod( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_prod layer to the model that reduces the input tensor + using ``prod(elements across given dimensions)``. + Refer to the ``ReduceProdLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes. If axes list is empty, it will + be set to true, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, + add_reduce_max, add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, + add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceProd + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_mean( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_mean layer to the model that reduces the input tensor + using ``mean(elements across given dimensions)``. + Refer to the ``ReduceMeanLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_prod + add_reduce_max, add_reduce_logsum, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceMean + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_max( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_max layer to the model that reduces the input tensor + using ``max(elements across given dimensions)``. + Refer to the ``ReduceMaxLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_prod + add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceMax + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_min( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_min layer to the model that reduces the input tensor + using ``min(elements across given dimensions)``. + Refer to the ``ReduceMinLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_max, add_reduce_prod + add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceMin + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_l2( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_l2 layer to the model that reduces the input tensor + using ``l2_normalization(elements across given dimensions)``. + Refer to the ``ReduceL2LayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_sum, add_reduce_min, add_reduce_max, add_reduce_prod + add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceL2 + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_l1( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_l1 layer to the model that reduces the input tensor + using ``l1_normalization(elements across given dimensions)``. + Refer to the ``ReduceL1LayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_max, add_reduce_prod + add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceL1 + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_sumsquare( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_sumsquare layer to the model that reduces the input tensor + using ``sum(square(elements across given dimensions))``. + Refer to the ``ReduceSumSquareLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_prod + add_reduce_max, add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceSumSquare + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_logsum( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_logsum layer to the model that reduces the input tensor + using log(sum(elements across given dimensions)). + Refer to the ``ReduceLogSumLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_prod + add_reduce_max, add_reduce_mean, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceLogSum + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_logsumexp( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_logsumexp layer to the model that computes ``log(sum(exp(tensor)))`` + and reduces along the given axis. + Refer to the ``ReduceLogSumExpLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_prod + add_reduce_max, add_reduce_mean, add_reduce_logsum, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceLogSumExp + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_where_nonzero(self, name, input_name, output_name): + """ + Add a where_nonzero layer to the model that returns a tensor containing + the indices of all non-zero elements of input tensor. + Refer to the ``WhereNonZeroLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_where_broadcastable + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.whereNonZero.MergeFromString(b"") + + self.rank_dict[output_name] = 2 + return spec_layer + + def add_matrix_band_part( + self, name, input_name, output_name, num_lower=-1, num_upper=-1 + ): + """ + Add a matrix_band_part layer to the model that copies a tensor setting + everything outside a central band in each inner-most matrix to zero. + Refer to the ``MatrixBandPartLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The of input blob name of this layer. + output_name: str + The output blob name of this layer. + num_lower: int, optional + Number of lower sub-diagonals to keep. + Default: -1 (keep entire lower triangle). + num_upper: int, optional + Number of upper sub-diagonals to keep. + Default: -1 (keep entire upper triangle). + + See Also + -------- + add_lower_triangular, add_lower_triangular + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.matrixBandPart + spec_layer_params.numLower = num_lower + spec_layer_params.numUpper = num_upper + return spec_layer + + def add_lower_triangular(self, name, input_name, output_name, k=0): + """ + Add a lower_triangular layer to the model that copies a tensor setting + everything outside lower triangular to zero. + Refer to the ``LowerTriangularLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The of input blob name of this layer. + output_name: str + The output blob name of this layer. + k: int, optional + Diagonal below which to zero elements, default: 0 (main diagonal), + k < 0 is lower it and k > 0 is upper. + + See Also + -------- + add_upper_triangular, add_matrix_band_part + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.lowerTriangular + spec_layer_params.k = k + return spec_layer + + def add_upper_triangular(self, name, input_name, output_name, k=0): + """ + Add a upper_triangular layer to the model that copies a tensor setting + everything outside upper triangular to zero. + Refer to the ``UpperTriangularLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The of input blob name of this layer. + output_name: str + The output blob name of this layer. + k: int, optional + Diagonal above which to zero elements, default: 0 (main diagonal), + k < 0 is lower it and k > 0 is upper. + + See Also + -------- + add_lower_triangular, add_matrix_band_part + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.upperTriangular + spec_layer_params.k = k + return spec_layer + + def add_where_broadcastable(self, name, input_names, output_name): + """ + Add a where_broadcastable layer to the model that returns the elements + either from tensor x or tensor y, depending on the value in the + condition tensor. + Refer to the ``WhereBroadcastableLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_where_nonzero + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.whereBroadcastable.MergeFromString(b"") + + self._set_max_input_rank(input_names, output_name) + + return spec_layer + + def add_layer_normalization( + self, name, input_name, output_name, normalized_shape, gamma, beta, eps=1e-5 + ): + """ + Add a layer normalization layer to the model that applies layer + normalization over the input tensor. + Refer to the ``LayerNormalizationLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + normalized_shape: list of int or tuple of int + Input shape from an expected input of size. + gamma: WeightParams + Weight parameters. + beta: WeightParams + Bias parameters. + eps: float, optional + Constant value added to the denominator, default: 1e-5. + """ + + if gamma.shape != tuple(normalized_shape): + raise ValueError("Shape of parameter gamma should match normalized_shape") + + if beta.shape != tuple(normalized_shape): + raise ValueError("Shape of parameter beta should match normalized_shape") + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.layerNormalization + + spec_layer_params.normalizedShape.extend(normalized_shape) + + weights = spec_layer_params.gamma + weights.floatValue.extend(gamma.flatten()) + + bias = spec_layer_params.beta + bias.floatValue.extend(beta.flatten()) + + spec_layer_params.eps = eps + + return spec_layer + + def add_one_hot( + self, + name, + input_names, + output_name, + one_hot_vector_size=None, + axis=-1, + on_value=1.0, + off_value=0.0, + ): + """ + Add a one hot layer to the model that computes the one hot representation of the input tensor. + Refer to the ``OneHotLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + one_hot_vector_size: int > 0 + size of the one hot vector. + axis: int, optional + refers to the axis in the output tensor, default: -1. + on_value: float, optional + Constant value on locations represented by first input, default: 1.0. + off_value: float, optional + Constant value at all other locations, default: 0.0. + """ + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.oneHot + spec_layer_params.axis = axis + if one_hot_vector_size: + spec_layer_params.oneHotVectorSize = one_hot_vector_size + spec_layer_params.onValue = on_value + spec_layer_params.offValue = off_value + return spec_layer + + def add_cumsum( + self, name, input_names, output_name, axis=-1, reverse=False, exclusive=False + ): + """ + Add a cum sum layer to the model computes the cumulative sum values of the input along a given axis. + Refer to the ``CumSumLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + Axis to perform the operation, default: -1. + reverse: bool, optional + if true, cumsum is performed in the opposite direction, default: False. + exclusive: bool, optional + whether to perform exclusive or inclusive cumulative summation, default: False. + """ + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.cumSum + spec_layer_params.axis = axis + spec_layer_params.reverse = reverse + spec_layer_params.excludeFinalSum = exclusive + return spec_layer + + def add_clamped_relu(self, name, input_name, output_name, alpha=0.0, beta=6.0): + """ + Add a clamped relu layer to the model. + Clamped relu formula is f(x) = min((x >= 0 ? x : alpha * x), beta) + Refer to the ``ClampedReluLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + alpha: float, optional + slope of the output when input is negative, default: 0.0. + beta: float, optional + Upper bound on the output value, default: 6.0. + + See Also + -------- + add_clip + """ + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.clampedReLU.MergeFromString(b"") + spec_params = spec_layer.clampedReLU + + spec_params.alpha = float(alpha) + spec_params.beta = float(beta) + + return spec_layer + + def add_argsort(self, name, input_name, output_name, axis=0, descending=False): + """ + Add an argsort layer to the model. + Refer to the ``ArgsortLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + axis along which to compute the sorting indices + descending: bool, optional + order of sorting + + See Also + -------- + add_topk + """ + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.argSort.MergeFromString(b"") + spec_params = spec_layer.argSort + + spec_params.axis = int(axis) + spec_params.descending = descending + + return spec_layer diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/flexible_shape_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/flexible_shape_utils.py new file mode 100644 index 00000000..f2e42794 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/flexible_shape_utils.py @@ -0,0 +1,738 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Utilities to annotate Neural Network Features with flexible shape information. +""" + +from ... import (_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION, + _MINIMUM_NDARRAY_SPEC_VERSION) +from ..utils import _get_feature + +_SEQUENCE_KEY = "S" +_BATCH_KEY = "B" +_CHANNEL_KEY = "C" +_HEIGHT_KEY = "H" +_WIDTH_KEY = "W" + +_CONSTRAINED_KEYS = [_CHANNEL_KEY, _HEIGHT_KEY, _WIDTH_KEY] + + +class Shape: + def __init__(self, shape_value): + if shape_value < 1: + raise Exception("Invalid value. Size/Shape values must be > 0") + self._value = shape_value + + @property + def value(self): + return self._value + + +class Size(Shape): + def __init__(self, size_value): + super(Size, self).__init__(size_value) + + +class NeuralNetworkMultiArrayShape: + """ + An object representing a shape for a multiArray feature in a + neural network. Valid shapes must have have only the Channel [C] + shape or the Channel, Height and Width [C, H, W] shapes populated + """ + + def __init__(self, channel=None, height=None, width=None): + self._shape = { + _CHANNEL_KEY: Shape(int(channel)) if channel else None, + _HEIGHT_KEY: Shape(int(height)) if height else None, + _WIDTH_KEY: Shape(int(width)) if width else None, + } + + def set_channel_shape(self, channel_shape): + self._shape[_CHANNEL_KEY] = Shape(channel_shape) + + def set_height_shape(self, height_shape): + self._shape[_HEIGHT_KEY] = Shape(height_shape) + + def set_width_shape(self, width_shape): + self._shape[_WIDTH_KEY] = Shape(width_shape) + + def _validate_multiarray_shape(self): + num_dims = len([v for v in self._shape.values() if v]) + if num_dims != 1 and num_dims != 3: + raise Exception( + "For neural networks, shape must be of length 1 or 3" + ", representing input shape [C] or [C,H,W], respectively" + ) + + if num_dims == 1: + if not self._shape["C"]: + raise Exception("Channel Shape not specified") + + @property + def multiarray_shape(self): + num_dims = len([v for v in self._shape.values() if v]) + if num_dims == 1: + return [self._shape[_CHANNEL_KEY].value] + elif num_dims == 3: + return [ + self._shape[_CHANNEL_KEY].value, + self._shape[_HEIGHT_KEY].value, + self._shape[_WIDTH_KEY].value, + ] + else: + raise Exception("Invalid multiarray shape for neural network") + + +class NeuralNetworkImageSize: + """ + An object representing a size for an image feature inside a + neural network. Valid sizess for height and width are > 0. + """ + + def __init__(self, height=None, width=None): + self._height = Size(height) + self._width = Size(width) + + def set_width(self, width): + self._width = Size(width) + + def set_height(self, height): + self._height = Size(height) + + @property + def width(self): + return self._width.value + + @property + def height(self): + return self._height.value + + +class ShapeRange: + def __init__(self, lowerBound, upperBound): + unBounded = False + + if upperBound == -1: + unBounded = True + + if not unBounded and lowerBound > upperBound: + raise Exception( + "lowerBound > upperBound for range ({},{})".format( + lowerBound, upperBound + ) + ) + + if not unBounded and upperBound < 1: + raise Exception("Invalid upperBound: {} ".format(upperBound)) + + if lowerBound == 0: + lowerBound = 1 + + if lowerBound < 1: + raise Exception("Invalid lowerBound: {}".format(lowerBound)) + + self._lowerBound = lowerBound + self._upperBound = upperBound + self._unBounded = unBounded + + @property + def lowerBound(self): + return self._lowerBound + + @property + def upperBound(self): + return self._upperBound + + @property + def isUnbounded(self): + return self._unBounded + + @property + def isFlexible(self): + return not (self._lowerBound == self._upperBound) + + +class NeuralNetworkMultiArrayShapeRange: + """ + An object representing a range of shapes for a multiArray feature in a + neural network. Valid shape ranges must have have only the Channel [C] + range or the Channel, Height and Width [C, H, W] ranges populated. A "-1" + value in an upper bound represents an unbounded range. + """ + + def __init__(self, input_ranges=None): + self.arrayShapeRange = {} + + if input_ranges: + if not isinstance(input_ranges, dict): + raise Exception( + "Attempting to initialize a shape range with something other than a dictionary of shapes." + ) + self.arrayShapeRange = {} + for key, value in input_ranges.items(): + if key in _CONSTRAINED_KEYS: + self.arrayShapeRange[key] = self._create_shape_range(value) + self.validate_array_shape_range() + + def _create_shape_range(self, r): + if not isinstance(r, tuple): + raise Exception("Range should be a ShapeRange or a tuple object") + elif len(r) != 2: + raise Exception("Range tuple should be at least length 2") + return ShapeRange(r[0], r[1]) + + def add_channel_range(self, channel_range): + if not isinstance(channel_range, ShapeRange): + channel_range = self._create_shape_range(channel_range) + self.arrayShapeRange[_CHANNEL_KEY] = channel_range + + def add_height_range(self, height_range): + if not isinstance(height_range, ShapeRange): + height_range = self._create_shape_range(height_range) + self.arrayShapeRange[_HEIGHT_KEY] = height_range + + def add_width_range(self, width_range): + if not isinstance(width_range, ShapeRange): + width_range = self._create_shape_range(width_range) + self.arrayShapeRange[_WIDTH_KEY] = width_range + + def get_shape_range_dims(self): + return len(self.arrayShapeRange.keys()) + + def validate_array_shape_range(self): + num_dims = self.get_shape_range_dims() + if num_dims != 1 and num_dims != 3: + raise Exception( + "For neural networks, shape must be of length 1 or 3" + ", representing input shape [C] or [C,H,W], respectively" + ) + + if num_dims == 1: + if _CHANNEL_KEY not in self.arrayShapeRange.keys(): + raise Exception("Channel Shape Range not specified") + + if num_dims == 3: + if ( + _CHANNEL_KEY not in self.arrayShapeRange.keys() + or _HEIGHT_KEY not in self.arrayShapeRange.keys() + or _WIDTH_KEY not in self.arrayShapeRange.keys() + ): + raise Exception( + "Shape range constraint missing for either channel, height, or width." + ) + + def get_channel_range(self): + return self.arrayShapeRange[_CHANNEL_KEY] + + def get_height_range(self): + return self.arrayShapeRange[_HEIGHT_KEY] + + def get_width_range(self): + return self.arrayShapeRange[_WIDTH_KEY] + + def isFlexible(self): + """ + Returns true if any one of the channel, height, or width ranges of this shape allow more than one input value. + """ + for key, value in self.arrayShapeRange.items(): + if key in _CONSTRAINED_KEYS: + if value.isFlexible: + return True + + return False + + +class NeuralNetworkImageSizeRange: + """ + An object representing a range of sizes for an image feature inside a + neural network. Valid ranges for height and width are > 0. A "-1" + upper bound value for either width or height represents an unbounded size + for that dimension. + """ + + def __init__(self, height_range=None, width_range=None): + if height_range and not isinstance(height_range, ShapeRange): + if not isinstance(height_range, tuple): + raise Exception("Height range should be a ShapeRange or a tuple object") + elif len(height_range) != 2: + raise Exception("Height range tuple should be at least length 2") + height_range = ShapeRange(height_range[0], height_range[1]) + + if width_range and not isinstance(width_range, ShapeRange): + if not isinstance(width_range, tuple): + raise Exception("Width range should be a ShapeRange or a tuple object") + elif len(width_range) != 2: + raise Exception("Width range tuple should be at least length 2") + width_range = ShapeRange(width_range[0], width_range[1]) + + self._height_range = height_range + self._width_range = width_range + + def add_width_range(self, width_range): + if not isinstance(width_range, ShapeRange): + if not isinstance(width_range, tuple): + raise Exception("Width range should be a ShapeRange or a tuple object") + elif len(width_range) != 2: + raise Exception("Width range tuple should be at least length 2") + + self._width_range = ShapeRange(width_range[0], width_range[1]) + + def add_height_range(self, height_range): + if not isinstance(height_range, ShapeRange): + if not isinstance(height_range, tuple): + raise Exception("Height range should be a ShapeRange or a tuple object") + elif len(height_range) != 2: + raise Exception("Height range tuple should be at least length 2") + + self._height_range = ShapeRange(height_range[0], height_range[1]) + + def get_width_range(self): + return self._width_range + + def get_height_range(self): + return self._height_range + + +def add_enumerated_multiarray_shapes(spec, feature_name, shapes): + """ + Annotate an input or output multiArray feature in a Neural Network spec to + to accommodate a list of enumerated array shapes + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the image feature for which to add shape information. + If the feature is not found in the input or output descriptions then + an exception is thrown + + :param shapes: [] | NeuralNetworkMultiArrayShape + A single or a list of NeuralNetworkImageSize objects which encode valid + size information for a image feature + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> array_shapes = [flexible_shape_utils.NeuralNetworkMultiArrayShape(3)] + >>> second_shape = flexible_shape_utils.NeuralNetworkMultiArrayShape() + >>> second_shape.set_channel_shape(3) + >>> second_shape.set_height_shape(10) + >>> second_shape.set_width_shape(15) + >>> array_shapes.append(second_shape) + >>> flexible_shape_utils.add_enumerated_multiarray_shapes(spec, feature_name='my_multiarray_featurename', shapes=array_shapes) + + :return: + None. The spec object is updated + """ + + if not isinstance(shapes, list): + shapes = [shapes] + + for shape in shapes: + if not isinstance(shape, NeuralNetworkMultiArrayShape): + raise Exception( + "Shape ranges should be of type NeuralNetworkMultiArrayShape" + ) + shape._validate_multiarray_shape() + + feature = _get_feature(spec, feature_name) + if feature.type.WhichOneof("Type") != "multiArrayType": + raise Exception( + "Trying to add enumerated shapes to " "a non-multiArray feature type" + ) + + if feature.type.multiArrayType.WhichOneof("ShapeFlexibility") != "enumeratedShapes": + feature.type.multiArrayType.ClearField("ShapeFlexibility") + + eshape_len = len(feature.type.multiArrayType.enumeratedShapes.shapes) + + # Add default array shape to list of enumerated shapes if enumerated shapes + # field is currently empty + if eshape_len == 0: + fixed_shape = feature.type.multiArrayType.shape + if len(fixed_shape) == 1: + fs = NeuralNetworkMultiArrayShape(fixed_shape[0]) + shapes.append(fs) + elif len(fixed_shape) == 3: + fs = NeuralNetworkMultiArrayShape() + fs.set_channel_shape(fixed_shape[0]) + fs.set_height_shape(fixed_shape[1]) + fs.set_width_shape(fixed_shape[2]) + shapes.append(fs) + else: + raise Exception( + "Original fixed multiArray shape for {} is invalid".format(feature_name) + ) + + for shape in shapes: + s = feature.type.multiArrayType.enumeratedShapes.shapes.add() + s.shape.extend(shape.multiarray_shape) + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION, spec.specificationVersion + ) + + +def add_enumerated_image_sizes(spec, feature_name, sizes): + """ + Annotate an input or output image feature in a Neural Network spec to + to accommodate a list of enumerated image sizes + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the image feature for which to add size information. + If the feature is not found in the input or output descriptions then + an exception is thrown + + :param sizes: [] | NeuralNetworkImageSize + A single or a list of NeuralNetworkImageSize objects which encode valid + size information for a image feature + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> image_sizes = [flexible_shape_utils.NeuralNetworkImageSize(128, 128)] + >>> image_sizes.append(flexible_shape_utils.NeuralNetworkImageSize(256, 256)) + >>> flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='my_multiarray_featurename', sizes=image_sizes) + + :return: + None. The spec object is updated + """ + if not isinstance(sizes, list): + sizes = [sizes] + + for size in sizes: + if not isinstance(size, NeuralNetworkImageSize): + raise Exception("Shape ranges should be of type NeuralNetworkImageSize") + + feature = _get_feature(spec, feature_name) + if feature.type.WhichOneof("Type") != "imageType": + raise Exception("Trying to add enumerated sizes to " "a non-image feature type") + + if feature.type.imageType.WhichOneof("SizeFlexibility") != "enumeratedSizes": + feature.type.imageType.ClearField("SizeFlexibility") + + esizes_len = len(feature.type.imageType.enumeratedSizes.sizes) + + # Add default image size to list of enumerated sizes if enumerated sizes + # field is currently empty + if esizes_len == 0: + fixed_height = feature.type.imageType.height + fixed_width = feature.type.imageType.width + sizes.append(NeuralNetworkImageSize(fixed_height, fixed_width)) + + shapes_added_so_far = [] + for size in sizes: + if [size.height, size.width] not in shapes_added_so_far: + s = feature.type.imageType.enumeratedSizes.sizes.add() + s.height = size.height + s.width = size.width + shapes_added_so_far.append([s.height, s.width]) + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION, spec.specificationVersion + ) + + +def update_image_size_range(spec, feature_name, size_range): + """ + Annotate an input or output Image feature in a Neural Network spec to + to accommodate a range of image sizes + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the Image feature for which to add shape information. + If the feature is not found in the input or output descriptions then + an exception is thrown + + :param size_range: NeuralNetworkImageSizeRange + A NeuralNetworkImageSizeRange object with the populated image size + range information. + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange() + >>> img_size_ranges.add_height_range(64, 128) + >>> img_size_ranges.add_width_range(128, -1) + >>> flexible_shape_utils.update_image_size_range(spec, feature_name='my_multiarray_featurename', size_range=img_size_ranges) + + :return: + None. The spec object is updated + """ + if not isinstance(size_range, NeuralNetworkImageSizeRange): + raise Exception("Shape ranges should be of type NeuralNetworkImageSizeRange") + + feature = _get_feature(spec, feature_name) + if feature.type.WhichOneof("Type") != "imageType": + raise Exception("Trying to add size ranges for " "a non-image feature type") + + feature.type.imageType.ClearField("SizeFlexibility") + feature.type.imageType.imageSizeRange.heightRange.lowerBound = ( + size_range.get_height_range().lowerBound + ) + feature.type.imageType.imageSizeRange.heightRange.upperBound = ( + size_range.get_height_range().upperBound + ) + + feature.type.imageType.imageSizeRange.widthRange.lowerBound = ( + size_range.get_width_range().lowerBound + ) + feature.type.imageType.imageSizeRange.widthRange.upperBound = ( + size_range.get_width_range().upperBound + ) + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION, spec.specificationVersion + ) + + +def update_multiarray_shape_range(spec, feature_name, shape_range): + """ + Annotate an input or output MLMultiArray feature in a Neural Network spec + to accommodate a range of shapes + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the feature for which to add shape range + information. If the feature is not found in the input or output + descriptions then an exception is thrown + + :param shape_range: NeuralNetworkMultiArrayShapeRange + A NeuralNetworkMultiArrayShapeRange object with the populated shape + range information. The shape_range object must either contain only + shape information for channel or channel, height and width. If + the object is invalid then an exception is thrown + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> shape_range = flexible_shape_utils.NeuralNetworkMultiArrayShapeRange() + >>> shape_range.add_channel_range((1, 3)) + >>> shape_range.add_width_range((128, 256)) + >>> shape_range.add_height_range((128, 256)) + >>> flexible_shape_utils.update_multiarray_shape_range(spec, feature_name='my_multiarray_featurename', shape_range=shape_range) + + :return: + None. The spec is updated + """ + if not isinstance(shape_range, NeuralNetworkMultiArrayShapeRange): + raise Exception("Shape range should be of type MultiArrayShapeRange") + + shape_range.validate_array_shape_range() + feature = _get_feature(spec, feature_name) + + if feature.type.WhichOneof("Type") != "multiArrayType": + raise Exception( + "Trying to update shape range for " "a non-multiArray feature type" + ) + + # Add channel range + feature.type.multiArrayType.ClearField("ShapeFlexibility") + s = feature.type.multiArrayType.shapeRange.sizeRanges.add() + s.lowerBound = shape_range.get_channel_range().lowerBound + s.upperBound = shape_range.get_channel_range().upperBound + + if shape_range.get_shape_range_dims() > 1: + # Add height range + s = feature.type.multiArrayType.shapeRange.sizeRanges.add() + s.lowerBound = shape_range.get_height_range().lowerBound + s.upperBound = shape_range.get_height_range().upperBound + # Add width range + s = feature.type.multiArrayType.shapeRange.sizeRanges.add() + s.lowerBound = shape_range.get_width_range().lowerBound + s.upperBound = shape_range.get_width_range().upperBound + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION, spec.specificationVersion + ) + + +def set_multiarray_ndshape_range(spec, feature_name, lower_bounds, upper_bounds): + """ + Annotate an input or output MLMultiArray feature in a Neural Network spec + to accommodate a range of shapes. + This is different from "update_multiarray_shape_range", which works with rank 5 + SBCHW mapping. + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the feature for which to add shape range + information. If the feature is not found in the input or output + descriptions then an exception is thrown + + :param lower_bounds: List[int] + list of integers specifying the lower bounds of each dimension. + Length must be same as the rank (length of shape) of the feature_name. + + :param upper_bounds: List[int] + list of integers specifying the upper bounds of each dimension. + -1 corresponds to unbounded range. + Length must be same as the rank (length of shape) of the feature_name. + + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> # say, the default shape of "my_multiarray_featurename" is (2,3) + >>> flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='my_multiarray_featurename', lower_bounds=[1,2], upper_bounds=[10,-1]) + + :return: + None. The spec is updated + """ + if not isinstance(lower_bounds, list): + raise Exception("lower_bounds must be a list") + if not isinstance(upper_bounds, list): + raise Exception("upper_bounds must be a list") + + feature = _get_feature(spec, feature_name) + + if feature.type.WhichOneof("Type") != "multiArrayType": + raise Exception( + "Trying to update shape range for " "a non-multiArray feature type" + ) + + shape = feature.type.multiArrayType.shape + + if len(shape) != len(lower_bounds): + raise Exception( + "Length of lower_bounds is not equal to the number of dimensions in the default shape" + ) + if len(shape) != len(upper_bounds): + raise Exception( + "Length of upper_bounds is not equal to the number of dimensions in the default shape" + ) + + feature.type.multiArrayType.ClearField("ShapeFlexibility") + + for i in range(len(lower_bounds)): + if shape[i] < lower_bounds[i]: + raise Exception( + "Default shape in %d-th dimension, which is %d, is smaller" + " than the lower bound of %d" % (i, int(shape[i]), lower_bounds[i]) + ) + if upper_bounds[i] != -1: + if shape[i] > upper_bounds[i]: + raise Exception( + "Default shape in %d-th dimension, which is %d, is greater" + " than the upper bound of %d" % (i, int(shape[i]), upper_bounds[i]) + ) + + s = feature.type.multiArrayType.shapeRange.sizeRanges.add() + s.lowerBound = lower_bounds[i] + s.upperBound = upper_bounds[i] + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_NDARRAY_SPEC_VERSION, spec.specificationVersion + ) + + +def add_multiarray_ndshape_enumeration(spec, feature_name, enumerated_shapes): + """ + Annotate an input or output MLMultiArray feature in a Neural Network spec + to accommodate a range of shapes. + Add provided enumerated shapes to the list of shapes already present. + This method is different from "add_enumerated_multiarray_shapes", which is applicable + for rank 5 mapping, SBCHW, arrays. + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the feature for which to add shape range + information. If the feature is not found in the input or output + descriptions then an exception is thrown + + :param enumerated_shapes: List[Tuple(int)] + list of shapes, where each shape is specified as a tuple of integers. + + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> # say, the default shape of "my_multiarray_featurename" is (2,3) + >>> flexible_shape_utils.add_multiarray_ndshape_enumeration(spec, feature_name='my_multiarray_featurename', enumerated_shapes=[(2,4), (2,6)]) + + :return: + None. The spec is updated + """ + if not isinstance(enumerated_shapes, list): + raise Exception("enumerated_shapes must be a list") + if len(enumerated_shapes) == 0: + raise Exception("enumerated_shapes is empty") + + feature = _get_feature(spec, feature_name) + if feature.type.WhichOneof("Type") != "multiArrayType": + raise Exception( + "Trying to update shape range for " "a non-multiArray feature type" + ) + + shape = feature.type.multiArrayType.shape + + if feature.type.multiArrayType.WhichOneof("ShapeFlexibility") != "enumeratedShapes": + feature.type.multiArrayType.ClearField("ShapeFlexibility") + + eshape_len = len(feature.type.multiArrayType.enumeratedShapes.shapes) + + shapes_added_so_far = [] + + # Add default array shape to list of enumerated shapes if enumerated shapes + # field is currently empty + if eshape_len == 0: + fixed_shape = feature.type.multiArrayType.shape + s = feature.type.multiArrayType.enumeratedShapes.shapes.add() + s.shape.extend(fixed_shape) + shapes_added_so_far.append(list(fixed_shape)) + + for shape in enumerated_shapes: + if not isinstance(shape, tuple): + raise Exception("An element in 'enumerated_shapes' is not a tuple") + if list(shape) not in shapes_added_so_far: + s = feature.type.multiArrayType.enumeratedShapes.shapes.add() + s.shape.extend(list(shape)) + shapes_added_so_far.append(list(shape)) + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_NDARRAY_SPEC_VERSION, spec.specificationVersion + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/optimization_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/optimization_utils.py new file mode 100644 index 00000000..61772c57 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/optimization_utils.py @@ -0,0 +1,255 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Neural Network optimization utilities. +""" + +import numpy as _np + + +def _fuse_layer_with_scale_layer(layer_idx, scale_idx, layers): + layer_type = layers[layer_idx].WhichOneof("layer") + if layer_type == "convolution": + layer = layers[layer_idx].convolution + elif layer_type == "innerProduct": + layer = layers[layer_idx].innerProduct + else: + raise Exception( + "Scale fusion not supper for layer " "type {} ".format(layer_type) + ) + + scale = layers[scale_idx].scale + + # Update weights + sw = _np.array(scale.scale.floatValue) + w = _np.array(layer.weights.floatValue) + w = w.reshape(layer.outputChannels, int(len(w) / layer.outputChannels)) + wp = w * sw[:, None] + del layer.weights.floatValue[:] + layer.weights.floatValue.extend(wp.flatten()) + + # Update biases + if scale.hasBias: + sb = _np.array(scale.bias.floatValue) + if not layer.hasBias: + layer.bias.floatValue.extend(sb) + layer.hasBias = True + else: + lb = _np.array(layer.bias.floatValue) + bp = sw * lb + sb + del layer.bias.floatValue[:] + layer.bias.floatValue.extend(bp) + + # re-wire outputs and delete scale layer + print("Fused {}->{}".format(layers[layer_idx].name, layers[scale_idx].name)) + del layers[layer_idx].output[:] + layers[layer_idx].output.extend(layers[scale_idx].output) + del layers[scale_idx] + + +def _fuse_layer_with_bias_layer(layer_idx, bias_idx, layers): + layer_type = layers[layer_idx].WhichOneof("layer") + if layer_type == "convolution": + layer = layers[layer_idx].convolution + elif layer_type == "innerProduct": + layer = layers[layer_idx].innerProduct + else: + raise Exception( + "Bias fusion not supper for layer " "type {} ".format(layer_type) + ) + + bias = layers[bias_idx].bias + + bb = _np.array(bias.bias.floatValue) + if not layer.hasBias: + layer.bias.floatValue.extend(bb) + layer.hasBias = True + else: + lb = _np.array(layer.bias.floatValue) + bp = lb + bb + del layer.bias.floatValue[:] + layer.bias.floatValue.extend(bp) + + # re-wire outputs and delete bias layer + print("Fused {}->{}".format(layers[layer_idx].name, layers[bias_idx].name)) + del layers[layer_idx].output[:] + layers[layer_idx].output.extend(layers[bias_idx].output) + del layers[bias_idx] + + +def _bn_scale_fusion(bn_idx, scale_idx, layers): + bn = layers[bn_idx].batchnorm + scale = layers[scale_idx].scale + + gamma = _np.array(bn.gamma.floatValue) + beta = _np.array(bn.beta.floatValue) + sw = _np.array(scale.scale.floatValue) + + gamma = gamma * sw + beta = beta * sw + + if scale.hasBias: + sb = _np.array(scale.bias.floatValue) + beta = beta + sb + + del bn.gamma.floatValue[:] + del bn.beta.floatValue[:] + + bn.gamma.floatValue.extend(gamma) + bn.beta.floatValue.extend(beta) + + # re-wire outputs and delete scale layer + print("Fused {}->{}".format(layers[bn_idx].name, layers[scale_idx].name)) + del layers[bn_idx].output[:] + layers[bn_idx].output.extend(layers[scale_idx].output) + del layers[scale_idx] + + +def _conv_bn_fusion(conv_idx, bn_idx, layers): + conv = layers[conv_idx].convolution + bn = layers[bn_idx].batchnorm + + mean = _np.array(bn.mean.floatValue) + variance = _np.array(bn.variance.floatValue) + bn.epsilon + gamma = _np.array(bn.gamma.floatValue) + beta = _np.array(bn.beta.floatValue) + w = _np.array(conv.weights.floatValue) + + if conv.hasBias: + b = _np.array(conv.bias.floatValue) + else: + b = _np.zeros(conv.outputChannels) + + w = w.reshape(conv.outputChannels, int(len(w) / conv.outputChannels)) + wp = (gamma / _np.sqrt(variance))[:, None] * w + bp = (gamma * b / _np.sqrt(variance)) - (gamma * mean / _np.sqrt(variance)) + beta + + del conv.weights.floatValue[:] + if conv.hasBias: + del conv.bias.floatValue[:] + + conv.weights.floatValue.extend(wp.flatten()) + conv.bias.floatValue.extend(bp) + conv.hasBias = True + + print("Fused {}->{}".format(layers[conv_idx].name, layers[bn_idx].name)) + # re-wire outputs and delete batchnorm layer + del layers[conv_idx].output[:] + layers[conv_idx].output.extend(layers[bn_idx].output) + del layers[bn_idx] + + +def _get_nn_mappings(layers): + layer_map = {} + type_map = {} + output_map = {} + input_map = {} + for idx, layer in enumerate(layers): + layer_name = "{}".format(idx) + layer_map[layer_name] = {"outputs": [], "inputs": []} + layer_type = layer.WhichOneof("layer") + if layer_type not in type_map.keys(): + type_map[layer_type] = [] + type_map[layer_type].append(layer_name) + + # Add inputs and outputs for layer + for o in layer.output: + layer_map[layer_name]["outputs"].append(o) + for i in layer.input: + layer_map[layer_name]["inputs"].append(i) + + # Construct input/output graph dict + for l in layer_map.keys(): + output_map[l] = [] + input_map[l] = [] + for cl in layer_map.keys(): + if any(x in layer_map[l]["outputs"] for x in layer_map[cl]["inputs"]): + output_map[l].append(cl) + if any(x in layer_map[l]["inputs"] for x in layer_map[cl]["outputs"]): + input_map[l].append(cl) + + return type_map, output_map, input_map + + +def _optimize_nn(layers): + type_map, output_map, input_map = _get_nn_mappings(layers) + bn_layers = [] + conv_layers = [] + ip_layers = [] + bias_layers = [] + scale_layers = [] + + # Only fuse with non-instance batchnorm layers + if "batchnorm" in type_map.keys(): + for bn_layer_idx in type_map["batchnorm"]: + if not layers[int(bn_layer_idx)].batchnorm.instanceNormalization: + bn_layers.append(bn_layer_idx) + + if "convolution" in type_map.keys(): + conv_layers = type_map["convolution"] + + if "innerProduct" in type_map.keys(): + ip_layers = type_map["innerProduct"] + + if "bias" in type_map.keys(): + bias_layers = type_map["bias"] + + if "scale" in type_map.keys(): + scale_layers = type_map["scale"] + + # Convolution optimizations + for conv_idx in conv_layers: + if len(output_map[conv_idx]) != 1: + continue + output_idx = output_map[conv_idx][0] + if len(input_map[output_idx]) != 1: + continue + + # Batchnorm fusion + if output_idx in bn_layers: + _conv_bn_fusion(int(conv_idx), int(output_idx), layers) + return _optimize_nn(layers) + + # Scale fusion + if output_idx in scale_layers: + _fuse_layer_with_scale_layer(int(conv_idx), int(output_idx), layers) + return _optimize_nn(layers) + + # Bias fusion + if output_idx in bias_layers: + _fuse_layer_with_bias_layer(int(conv_idx), int(output_idx), layers) + return _optimize_nn(layers) + + # Inner Product optimizations + for ip_idx in ip_layers: + if len(output_map[ip_idx]) != 1: + continue + output_idx = output_map[ip_idx][0] + if len(input_map[output_idx]) != 1: + continue + + # Scale Fusion + if output_idx in scale_layers: + _fuse_layer_with_scale_layer(int(ip_idx), int(output_idx), layers) + return _optimize_nn(layers) + + # Bias Fusion + if output_idx in bias_layers: + _fuse_layer_with_bias_layer(int(ip_idx), int(output_idx), layers) + return _optimize_nn(layers) + + # Batchnorm optimizations + for bn_idx in bn_layers: + if len(output_map[bn_idx]) != 1: + continue + output_idx = output_map[bn_idx][0] + if len(input_map[output_idx]) != 1: + continue + + # Scale Fusion + if output_idx in scale_layers: + _bn_scale_fusion(int(bn_idx), int(output_idx), layers) + return _optimize_nn(layers) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/printer.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/printer.py new file mode 100644 index 00000000..69cb8406 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/printer.py @@ -0,0 +1,114 @@ +# Copyright (c) 2018, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .spec_inspection_utils import (_get_feature_description_summary, + _summarize_neural_network_spec, + _summarize_neural_network_spec_code_style) + + +def _print_network_spec_parameter_info_style(mlmodel_spec, interface_only=False): + """ Print the network information summary. + Args: + mlmodel_spec : the mlmodel spec + interface_only : Shows only the input and output of the network + """ + inputs, outputs, layers_info = _summarize_neural_network_spec(mlmodel_spec) + + print("Inputs:") + for i in inputs: + name, description = i + print(" {} {}".format(name, description)) + + print("Outputs:") + for o in outputs: + name, description = o + print(" {} {}".format(name, description)) + + if layers_info is None: + print( + "\n(This MLModel is not a neural network model or does not contain any layers)" + ) + + if layers_info and not interface_only: + print("\nLayers:") + for idx, l in enumerate(layers_info): + layer_type, name, in_blobs, out_blobs, params_info = l + print("[{}] ({}) {}".format(idx, layer_type, name)) + print(" Input blobs: {}".format(in_blobs)) + print(" Output blobs: {}".format(out_blobs)) + if len(params_info) > 0: + print(" Parameters: ") + for param in params_info: + print(" {} = {}".format(param[0], param[1])) + + print("\n") + + +def _print_network_spec_coding_style(mlmodel_spec, interface_only=False): + """ + Args: + mlmodel_spec : the mlmodel spec + interface_only : Shows only the input and output of the network + """ + + inputs = [ + (blob.name, _get_feature_description_summary(blob)) + for blob in mlmodel_spec.description.input + ] + outputs = [ + (blob.name, _get_feature_description_summary(blob)) + for blob in mlmodel_spec.description.output + ] + + input_names = [] + print("Inputs:") + for i in inputs: + name, description = i + print(" {} {}".format(name, description)) + input_names.append(name) + + output_names = [] + print("Outputs:") + for o in outputs: + name, description = o + print(" {} {}".format(name, description)) + output_names.append(name) + + if interface_only: + return + + nn_spec = None + + if mlmodel_spec.HasField("neuralNetwork"): + nn_spec = mlmodel_spec.neuralNetwork + elif mlmodel_spec.HasField("neuralNetworkClassifier"): + nn_spec = mlmodel_spec.neuralNetworkClassifier + elif mlmodel_spec.HasField("neuralNetworkRegressor"): + nn_spec = mlmodel_spec.neuralNetworkRegressor + + if nn_spec is None: + print("\n(This MLModel is not a neural network model)") + return + + print("\n") + _summarize_neural_network_spec_code_style( + nn_spec, input_names=input_names, output_names=output_names + ) + + +def print_network_spec(mlmodel_spec, interface_only=False, style=""): + """ Print the network information summary. + Args: + mlmodel_spec : the mlmodel spec + interface_only : Shows only the input and output of the network + style : str. Either 'coding' or default, which prints information on parameters of layers. + """ + + if style == "coding": + _print_network_spec_coding_style(mlmodel_spec, interface_only=interface_only) + else: + _print_network_spec_parameter_info_style( + mlmodel_spec, interface_only=interface_only + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/quantization_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/quantization_utils.py new file mode 100644 index 00000000..cef3aff8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/quantization_utils.py @@ -0,0 +1,1651 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Utilities to compress Neural Network Models. +Only available in coremltools 2.0b1 and onwards +""" +from os import listdir as _listdir +from sys import stdout as _stdout + +import numpy as _np + +from coremltools import ComputeUnit as _ComputeUnit +from coremltools.models import (_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE, + _QUANTIZATION_MODE_DEQUANTIZE, + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + _QUANTIZATION_MODE_LINEAR_SYMMETRIC, + _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR, + _SUPPORTED_QUANTIZATION_MODES) +from coremltools.models import MLModel as _MLModel + +from ... import (_MINIMUM_FP16_SPEC_VERSION, + _MINIMUM_QUANTIZED_MODEL_SPEC_VERSION, + _SPECIFICATION_VERSION_IOS_14) +from ..._deps import _HAS_SKLEARN as _HAS_SKLEARN +from ..utils import _get_model, _macos_version, _wp_to_fp16wp +from .optimization_utils import _optimize_nn + + +class QuantizedLayerSelector: + """ + This is the base class to implement custom selectors to skip certain + layers during quantization. To implement a custom selector, create a class + that inherits this class and override `do_quantize()` method. + + Examples + -------- + .. highlight:: python + .. code-block:: python + + class MyLayerSelector(QuantizedLayerSelector): + def __init__(self): + super().__init__() + + def do_quantize(self, layer, **kwargs): + ret = super().do_quantize(layer) + if not ret or layer.name == 'dense_2': + return False + return True + + selector = MyLayerSelector() + quantized_model = quantize_weights(mlmodel, 8, quantization_mode='linear', selector=selector) + + """ + + def __init__(self): + self.quantizable_layer_types = { + "convolution", + "innerProduct", + "embedding", + "embeddingND", + "batchnorm", + "scale", + "bias", + "loadConstant", + "simpleRecurrent", + "gru", + "uniDirectionalLSTM", + "biDirectionalLSTM", + "batchedMatmul", + "depthwiseConv", + "loop", + "branch", + } + + def do_quantize(self, layer, **kwargs): + return layer.WhichOneof("layer") in self.quantizable_layer_types + + +class AdvancedQuantizedLayerSelector(QuantizedLayerSelector): + """ Quantized layer selector allowing the user to specify some types of + layers to skip during quantization process and the minimum size parameters + in quantized convolution layers. + + Examples + -------- + .. highlight:: python + .. code-block:: python + + from coremltools.models.neural_network.quantization_utils import AdvancedQuantizedLayerSelector + selector = AdvancedQuantizedLayerSelector( + skip_layer_types=['batchnorm', 'bias', 'depthwiseConv'], + minimum_conv_kernel_channels=4, + minimum_conv_weight_count=4096) + quantized_model = quantize_weights(model, 8, selector=selector) + + """ + + def __init__( + self, + skip_layer_types=[], + minimum_conv_kernel_channels=4, + minimum_conv_weight_count=4096, + ): + + super().__init__() + self.skip_layer_types = skip_layer_types + + # Error checking + invalid_skip_types = [] + for lt in skip_layer_types: + if lt not in self.quantizable_layer_types: + invalid_skip_types.append(lt) + if len(invalid_skip_types) > 0: + err_msg = "Skip quantization layer types ({}) is not supported.\n".format( + ",".join(invalid_skip_types) + ) + err_msg += "Supported quantization layers: ({})".format( + ",".join(self.quantizable_layer_types) + ) + raise ValueError(err_msg) + + self.minimum_conv_kernel_channels = minimum_conv_kernel_channels + self.minimum_conv_weight_count = minimum_conv_weight_count + + def do_quantize(self, layer, weight_param=None): + """ weight_param - should be name of the WeightParam field + """ + ret = super().do_quantize(layer) + if not ret: + return False + + layer_type = layer.WhichOneof("layer") + if layer_type in self.skip_layer_types: + return False + + if layer_type == "convolution": + oc = layer.convolution.outputChannels + kc = layer.convolution.kernelChannels + kh = layer.convolution.kernelSize[0] + kw = layer.convolution.kernelSize[1] + groups = layer.convolution.nGroups + counts = oc * kc * kh * kw + has_bias = layer.convolution.hasBias + + if weight_param is None or weight_param == "weights": + if "depthwiseConv" in self.skip_layer_types and kc == 1 and groups > 1: + return False + + if ( + kc < self.minimum_conv_kernel_channels + or counts < self.minimum_conv_weight_count + ): + return False + + elif weight_param == "bias": + return not "bias" in self.skip_layer_types + else: + raise ValueError( + "Unrecognized quantization weight field {}".format(weight_param) + ) + + elif layer_type == "innerProduct" or "batchedMatmul": + if weight_param is None or weight_param == "weights": + return True + if weight_param == "bias": + return not "bias" in self.skip_layer_types + else: + raise ValueError( + "Unrecognized quantization weight field {}".format(weight_param) + ) + + return True + + +class MatrixMultiplyLayerSelector(QuantizedLayerSelector): + """ + Layer selector object that allows users to select matrix multiplication layers + with one of the matrices being constant, based on some criterions like total + numbers of parameters/weights, number of input or output channels and/or layer + names. If any of the criterion is not valid, the corresponding layer is not + selected. + """ + + def __init__( + self, + minimum_weight_count=1, + minimum_input_channels=1, + minimum_output_channels=1, + maximum_input_channels=None, + maximum_output_channels=None, + include_layers_with_names=None, + ): + + super().__init__() + + # weight count refers to number of parameters/weights and is equal to product of input & output channels + self.minimum_weight_count = minimum_weight_count + self.minimum_input_channels = minimum_input_channels + self.minimum_output_channels = minimum_output_channels + self.maximum_input_channels = maximum_input_channels + self.maximum_output_channels = maximum_output_channels + if include_layers_with_names is None: + self.include_layers_with_names = [] + + if not ( + isinstance(self.include_layers_with_names, (list, tuple)) + and all( + [isinstance(s, str) for s in self.include_layers_with_names] + ) + ): + raise ValueError( + "Property 'include_layers_with_names' must be a list/tuple of str objects" + ) + + def do_quantize(self, layer, weight_param=None): + """ + weight_param - should be name of the WeightParam field + """ + ret = super().do_quantize(layer) + if not ret: + return False + + layer_type = layer.WhichOneof("layer") + + if layer_type in ["innerProduct", "batchedMatmul"]: + if weight_param == "bias": + return True + elif weight_param is None or weight_param == "weights": + + if layer_type == "innerProduct": + ic = layer.innerProduct.inputChannels + oc = layer.innerProduct.outputChannels + else: + ic = layer.batchedMatmul.weightMatrixFirstDimension + oc = layer.batchedMatmul.weightMatrixSecondDimension + + wc = ic * oc + + if wc < self.minimum_weight_count: + return False + if ic < self.minimum_input_channels: + return False + if oc < self.minimum_output_channels: + return False + if self.maximum_input_channels and ic > self.maximum_input_channels: + return False + if self.maximum_output_channels and oc > self.maximum_output_channels: + return False + if ( + self.include_layers_with_names + and layer.name not in self.include_layers_with_names + ): + return False + + return True + else: + raise ValueError( + "Unrecognized quantization weight field {}".format(weight_param) + ) + + elif layer_type in ["loop", "branch"]: + return True + + return False + + +def _convert_1bit_array_to_byte_array(arr): + """ + Convert bit array to byte array. + + arr: list + Bits as a list where each element is an integer of 0 or 1 + + Returns + ------- + numpy.array + 1D numpy array of type uint8 + """ + # Padding if necessary + while len(arr) < 8 or len(arr) % 8: + arr.append(0) + + arr = _np.array(arr, dtype="uint8") + bit_arr = [] + idx = 0 + # Iterate and combine 8-bits into a uint8 + for arr_idx in range(int(len(arr) / 8)): + bit_arr.append( + ((arr[idx] << 7) & (1 << 7)) + | ((arr[idx + 1] << 6) & (1 << 6)) + | ((arr[idx + 2] << 5) & (1 << 5)) + | ((arr[idx + 3] << 4) & (1 << 4)) + | ((arr[idx + 4] << 3) & (1 << 3)) + | ((arr[idx + 5] << 2) & (1 << 2)) + | ((arr[idx + 6] << 1) & (1 << 1)) + | ((arr[idx + 7] << 0) & (1 << 0)) + ) + idx += 8 + return _np.array(bit_arr, dtype="uint8") + + +def _convert_array_to_nbit_quantized_bytes(arr, nbits): + split_arr = [] + for idx in range(len(arr)): + for i in reversed(range(nbits)): + split_arr.append((arr[idx] >> i) & (1 << 0)) + + return _convert_1bit_array_to_byte_array(split_arr) + + +def _decompose_bytes_to_bit_arr(arr): + """ + Unpack bytes to bits + + arr: list + Byte Stream, as a list of uint8 values + + Returns + ------- + bit_arr: list + Decomposed bit stream as a list of 0/1s of length (len(arr) * 8) + """ + bit_arr = [] + for idx in range(len(arr)): + for i in reversed(range(8)): + bit_arr.append((arr[idx] >> i) & (1 << 0)) + return bit_arr + + +def _get_linear_lookup_table_and_weight(nbits, wp): + """ + Generate a linear lookup table. + + nbits: int + Number of bits to represent a quantized weight value + + wp: numpy.array + Weight blob to be quantized + + Returns + ------- + lookup_table: numpy.array + Lookup table of shape (2^nbits, ) + qw: numpy.array + Decomposed bit stream as a list of 0/1s of length (len(arr) * 8) + """ + w = wp.reshape(1, -1) + qw, scales, biases = _quantize_channelwise_linear(w, nbits, axis=0) + indices = _np.array(range(0, 2 ** nbits)) + lookup_table = indices * scales[0] + biases[0] + return lookup_table, qw + + +def _get_kmeans_lookup_table_and_weight( + nbits, w, init="k-means++", tol=1e-2, n_init=1, rand_seed=0 +): + """ + Generate K-Means lookup table given a weight parameter field + + nbits: + Number of bits for quantization + + w: + Weight as numpy array + + Returns + ------- + lut: numpy.array + Lookup table, numpy array of shape (1 << nbits, ); + wq: numpy.array + Quantized weight of type numpy.uint8 + """ + if _HAS_SKLEARN: + from sklearn.cluster import KMeans + else: + raise ModuleNotFoundError( + "scikit-learn is required for k-means quantization." + " To install, run: \"pip install -U scikit-learn\"." + ) + units = _np.prod(w.shape) + lut_len = 1 << nbits + n_clusters = units if (units < lut_len) else lut_len + wf = w.reshape(-1, 1) + kmeans = KMeans( + n_clusters=n_clusters, init=init, tol=tol, n_init=n_init, random_state=rand_seed + ).fit(wf) + wq = kmeans.labels_[:units] + lut = _np.zeros(lut_len) + lut[:n_clusters] = kmeans.cluster_centers_.flatten() + return lut, wq + + +def _quantize_channelwise_linear(weight, nbits, axis=0, symmetric=False): + """ + Linearly quantize weight blob. + + weight: numpy.array + Weight to be quantized. + + nbits: int + Number of bits per weight element + + axis: int + Axis of the weight blob to compute channel-wise quantization, can be 0 or 1 + + symmetric: bool + If true, set quantization range to be symmetrical to 0. + Otherwise, set quantization range to be the minimum and maximum of + weight parameters. + + Returns + ------- + quantized_weight: numpy.array + quantized weight as float numpy array, with the same shape as weight + scale: numpy.array + per channel scale + bias: numpy.array + per channel bias + """ + if len(weight.shape) == 1: # vector situation, treat as 1 channel + weight = weight.reshape((1, weight.shape[0])) + + rank = len(weight.shape) + if axis == 1: + transposed_axis_order = (1, 0) + tuple(range(2, rank)) + weight = _np.transpose(weight, transposed_axis_order) + + num_channels = weight.shape[0] + shape = weight.shape + weight = weight.reshape((num_channels, -1)) # [C, L] + + a = _np.amin(weight, axis=-1) # [C,] + b = _np.amax(weight, axis=-1) # [C,] + + if symmetric: + r = _np.maximum(_np.abs(a), _np.abs(b)) + scale = r / ((1 << nbits) / 2.0 - 1) + bias = -(1 << nbits) / 2.0 * scale + num = weight - bias[:, None] + denom = scale[:, None] + qw = _np.divide( + num, denom, out=_np.zeros_like(num), where=(_np.abs(denom) > 1e-6) + ) + qw = _np.round(qw) + else: + qb = (1 << nbits) - 1 + scale = (b - a) / qb + inv_scale = _np.divide( + 1.0, scale, out=_np.zeros_like(scale), where=(_np.abs(scale) > 1e-6) + ) + bias = a + qw = (weight - a[:, None]) * inv_scale[:, None] + qw = _np.round(qw) + + # Reshape + quantized_weight = qw.reshape(shape) + if axis == 1: + quantized_weight = _np.transpose(quantized_weight, transposed_axis_order) + + return (quantized_weight, scale, bias) + + +def _quantize_wp(wp, nbits, qm, axis=0, **kwargs): + """ + Quantize the weight blob + + wp: numpy.array + Weight parameters + nbits: int + Number of bits + qm: + Quantization mode + lut_function: (``callable function``) + Python callable representing a look-up table + + Returns + ------- + scale: numpy.array + Per-channel scale + bias: numpy.array + Per-channel bias + lut: numpy.array + Lookup table + quantized_wp: numpy.array + Quantized weight of same shape as wp, with dtype numpy.uint8 + """ + + scale = bias = lut = None + + # Linear Quantization + if qm in [ + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + _QUANTIZATION_MODE_LINEAR_SYMMETRIC, + ]: + symmetric = qm == _QUANTIZATION_MODE_LINEAR_SYMMETRIC + qw, scale, bias = _quantize_channelwise_linear(wp, nbits, axis, symmetric) + # Lookup tables + elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS: + lut, qw = _get_kmeans_lookup_table_and_weight(nbits, wp) + elif qm == _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE: + if "lut_function" not in kwargs.keys(): + raise Exception( + "Custom lookup table quantization mode " + "selected but no lookup table function passed" + ) + lut_function = kwargs["lut_function"] + if not callable(lut_function): + raise Exception( + "Argument for Lookup Table passed in but is " "not callable" + ) + try: + lut, qw = lut_function(nbits, wp) + except Exception as e: + raise Exception( + "{}\nCall to Lookup Table function failed".format(e.message) + ) + elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR: + lut, qw = _get_linear_lookup_table_and_weight(nbits, wp) + else: + raise NotImplementedError('Quantization method "{}" not supported'.format(qm)) + + quantized_wp = _np.uint8(qw) + return scale, bias, lut, quantized_wp + + +def _quantize_wp_field(wp, nbits, qm, shape, axis=0, **kwargs): + """ + Quantize WeightParam field in Neural Network Protobuf + + wp: MLModel.NeuralNetwork.WeightParam + WeightParam field + nbits: int + Number of bits to be quantized + qm: str + Quantization mode + shape: tuple + Tensor shape held by wp + axis: int + Axis over which quantization is performed on, can be either 0 or 1 + lut_function: (``callable function``) + Python callable representing a LUT table function + """ + + # De-quantization + if qm == _QUANTIZATION_MODE_DEQUANTIZE: + return _dequantize_wp(wp, shape, axis) + + # If the float32 field is empty do nothing and return + if len(wp.floatValue) == 0: + return + + # Half precision (16-bit) quantization + if nbits == 16: + return _wp_to_fp16wp(wp) + + if nbits > 8: + raise Exception("Only 8-bit and lower quantization is supported") + + if qm not in _SUPPORTED_QUANTIZATION_MODES: + raise Exception("Quantization mode {} not supported".format(qm)) + + # axis parameter check + if axis == 1 and len(shape) != 4: + raise Exception( + "Quantization on second axis is only supported " "for rank-4 weight blob." + ) + if axis != 0 and axis != 1: + raise Exception( + "Invalid quantization axis {} passed in. Allowed" + "values are 0 (first axis) and 1 (second axis)".format(axis) + ) + + # WeightParam size check - non-linear quantizations are applied on layer level + num_channels = ( + shape[axis] + if qm + in [_QUANTIZATION_MODE_LINEAR_QUANTIZATION, _QUANTIZATION_MODE_LINEAR_SYMMETRIC] + else 1 + ) + if len(wp.floatValue) % num_channels: + raise Exception( + "Number of quantization channels does not divide evenly into weights" + ) + + qparams = wp.quantization + qparams.numberOfBits = nbits + + weights = _np.array(wp.floatValue).reshape(shape) + scale, bias, lut, uint8_weights = _quantize_wp(weights, nbits, qm, axis, **kwargs) + uint8_weights = uint8_weights.flatten() + if qm in [ + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + _QUANTIZATION_MODE_LINEAR_SYMMETRIC, + ]: + qparams.linearQuantization.scale.extend(scale) + qparams.linearQuantization.bias.extend(bias) + else: + qparams.lookupTableQuantization.floatValue.extend(lut) + + wp.rawValue = bytes() + if nbits == 8: + wp.rawValue += uint8_weights.tobytes() + else: + wp.rawValue += _convert_array_to_nbit_quantized_bytes( + uint8_weights, nbits + ).tobytes() + del wp.floatValue[:] + + +def _unpack_to_bytes(byte_arr, num_weights, nbits): + assert num_weights % 1 == 0 + num_weights = int(num_weights) + bit_arr = _decompose_bytes_to_bit_arr(byte_arr.flatten().tolist()) + bit_arr = _np.array(bit_arr[: num_weights * nbits]).reshape((num_weights, nbits)) + expo = 2 ** _np.array(list(reversed(range(0, nbits)))) + byte_arr = _np.sum(bit_arr * expo, axis=1) + return byte_arr + + +def _dequantize_linear(weight_8bit, scale, bias, axis=0): + if len(weight_8bit.shape) == 1: # vector situation, treat as 1 channel + weight_8bit = weight_8bit.reshape((1, weight_8bit.shape[0])) + + rank = len(weight_8bit.shape) + if axis == 1: + transposed_axis_order = (1, 0) + tuple(range(2, rank)) + weight_8bit = _np.transpose(weight_8bit, transposed_axis_order) + + num_channels = weight_8bit.shape[0] + broadcast_shape = (num_channels,) + (1,) * (rank - 1) + scale = scale.reshape(broadcast_shape) + bias = bias.reshape(broadcast_shape) + weight = weight_8bit.astype("float") * scale + bias + if axis == 1: + weight = _np.transpose(weight, transposed_axis_order) + + return weight + + +def _dequantize_lut(weight_8bit, lut): + return lut[weight_8bit.astype("uint8")] + + +def _dequantize_wp(wp, shape, axis=0): + if len(wp.floatValue) != 0: + return + + is_linear = wp.quantization.WhichOneof("QuantizationType") == "linearQuantization" + if is_linear: + if len(wp.quantization.linearQuantization.scale) != len( + wp.quantization.linearQuantization.bias + ): + raise Exception( + "Linear quantization scale and bias vectors are " "different lengths" + ) + + # axis parameter check + if axis == 1 and len(shape) != 4: + raise Exception( + "Dequantization on second axis is only supported " "for rank-4 weight blob." + ) + if axis != 0 and axis != 1: + raise Exception( + "Invalid quantization axis {} passed in. Allowed" + "values are 0 (first axis) and 1 (second axis)".format(axis) + ) + + nbits = wp.quantization.numberOfBits + num_weights = _np.prod(shape) + byte_arr = _np.frombuffer(wp.rawValue, dtype=_np.uint8) + + weight_8bit = ( + byte_arr if nbits == 8 else _unpack_to_bytes(byte_arr, num_weights, nbits) + ) + weight_8bit = weight_8bit.reshape(shape) + + if is_linear: + scale = _np.array(wp.quantization.linearQuantization.scale) + bias = _np.array(wp.quantization.linearQuantization.bias) + dequantized_weight = _dequantize_linear(weight_8bit, scale, bias, axis) + else: + lut = _np.array(wp.quantization.lookupTableQuantization.floatValue) + dequantized_weight = _dequantize_lut(weight_8bit, lut) + + wp.rawValue = bytes() + wp.quantization.Clear() + wp.floatValue.extend(dequantized_weight.flatten()) + + +def _dequantize_nn_spec(spec): + """ + Dequantize weights in NeuralNetwork type mlmodel specifications. + """ + _quantize_nn_spec(spec, None, _QUANTIZATION_MODE_DEQUANTIZE) + + +def _quantize_nn_spec(nn_spec, nbits, qm, **kwargs): + """ + Quantize weights in NeuralNetwork type mlmodel specifications. + """ + selector = kwargs.get("selector", QuantizedLayerSelector()) + + if qm not in _SUPPORTED_QUANTIZATION_MODES: + raise Exception("Quantization mode {} not supported".format(qm)) + + if qm != _QUANTIZATION_MODE_DEQUANTIZE: + if nbits is None: + raise Exception('Missing argument "nbits"') + if not (nbits > 0 and nbits <= 8 or nbits == 16): + raise Exception( + "Only half precision (16-bit), 1 to 8-bit " "quantization is supported" + ) + + if qm == _QUANTIZATION_MODE_LINEAR_SYMMETRIC and nbits != 8: + raise Exception("Symmetric quantization is only applicable for 8 bit" "linear") + + layers = nn_spec.layers + + # Perform optimization step + if nbits is not None and nbits < 16 and qm != _QUANTIZATION_MODE_DEQUANTIZE: + print("Optimizing Neural Network before Quantization:") + _optimize_nn(layers) + print("Finished optimizing network. Quantizing neural network..") + + # Quantize each layer + for layer in layers: + layer_type = layer.WhichOneof("layer") + if not selector.do_quantize(layer): + continue + print("Quantizing layer {} of type {}".format(layer.name, layer_type)) + + # Convolution + if layer_type == "convolution": + output_channels = layer.convolution.outputChannels + kernel_channels = layer.convolution.kernelChannels + kernel_height = layer.convolution.kernelSize[0] + kernel_width = layer.convolution.kernelSize[1] + groups = layer.convolution.nGroups + counts = output_channels * kernel_channels * kernel_height * kernel_width + has_bias = layer.convolution.hasBias + if layer.convolution.isDeconvolution: + shape = ( + kernel_channels, + int(output_channels / groups), + kernel_height, + kernel_width, + ) + _quantize_wp_field( + layer.convolution.weights, nbits, qm, shape, axis=1, **kwargs + ) + else: + shape = (output_channels, kernel_channels, kernel_height, kernel_width) + _quantize_wp_field( + layer.convolution.weights, nbits, qm, shape, **kwargs + ) + + if has_bias and selector.do_quantize(layer, weight_param="bias"): + _quantize_wp_field( + layer.convolution.bias, + nbits, + qm, + shape=(output_channels,), + **kwargs + ) + + # Batchnorm + elif layer_type == "batchnorm": + nw = layer.batchnorm.channels + _quantize_wp_field(layer.batchnorm.gamma, nbits, qm, shape=(nw,), **kwargs) + _quantize_wp_field(layer.batchnorm.beta, nbits, qm, shape=(nw,), **kwargs) + _quantize_wp_field(layer.batchnorm.mean, nbits, qm, shape=(nw,), **kwargs) + _quantize_wp_field( + layer.batchnorm.variance, nbits, qm, shape=(nw,), **kwargs + ) + + # InnerProduct + elif layer_type == "innerProduct": + output_channels = layer.innerProduct.outputChannels + input_channels = layer.innerProduct.inputChannels + _quantize_wp_field( + layer.innerProduct.weights, + nbits, + qm, + shape=(output_channels, input_channels), + **kwargs + ) + has_bias = layer.innerProduct.hasBias + if has_bias and selector.do_quantize(layer, weight_param="bias"): + _quantize_wp_field( + layer.innerProduct.bias, + nbits, + qm, + shape=(output_channels,), + **kwargs + ) + + # BatchedMatmul + elif layer_type == "batchedMatmul": + x1 = layer.batchedMatmul.weightMatrixFirstDimension + x2 = layer.batchedMatmul.weightMatrixSecondDimension + _quantize_wp_field( + layer.batchedMatmul.weights, nbits, qm, shape=(x2, x1), **kwargs + ) + has_bias = layer.batchedMatmul.hasBias + if has_bias and selector.do_quantize(layer, weight_param="bias"): + _quantize_wp_field( + layer.batchedMatmul.bias, nbits, qm, shape=(x2,), **kwargs + ) + + # Embedding layer + elif layer_type == "embedding": + output_channels = layer.embedding.outputChannels + input_channels = layer.embedding.inputDim + _quantize_wp_field( + layer.embedding.weights, + nbits, + qm, + shape=(output_channels, input_channels), + **kwargs + ) + if layer.embedding.hasBias: + _quantize_wp_field( + layer.embedding.bias, nbits, qm, shape=(output_channels,), **kwargs + ) + + # Embedding ND layer + elif layer_type == "embeddingND": + output_channels = layer.embeddingND.embeddingSize + input_channels = layer.embeddingND.vocabSize + _quantize_wp_field( + layer.embeddingND.weights, + nbits, + qm, + shape=(output_channels, input_channels), + **kwargs + ) + if layer.embeddingND.hasBias: + _quantize_wp_field( + layer.embeddingND.bias, + nbits, + qm, + shape=(output_channels,), + **kwargs + ) + + # Scale layer + elif layer_type == "scale": + nw = _np.prod(layer.scale.shapeScale) + _quantize_wp_field(layer.scale.scale, nbits, qm, shape=(nw,), **kwargs) + if layer.scale.hasBias: + nw = _np.prod(layer.scale.shapeBias) + _quantize_wp_field(layer.scale.bias, nbits, qm, shape=(nw,), **kwargs) + + # Bias layer + elif layer_type == "bias": + nw = _np.prod(layer.bias.shape) + _quantize_wp_field(layer.bias.bias, nbits, qm, shape=(nw,), **kwargs) + + # LoadConstant layer + elif layer_type == "loadConstant": + nw = _np.prod(layer.loadConstant.shape) + _quantize_wp_field( + layer.loadConstant.data, nbits, qm, shape=(nw,), **kwargs + ) + + # Simple Recurrent + elif layer_type == "simpleRecurrent": + i_size = layer.simpleRecurrent.inputVectorSize + o_size = layer.simpleRecurrent.outputVectorSize + _quantize_wp_field( + layer.simpleRecurrent.weightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + layer.simpleRecurrent.recursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + if layer.simpleRecurrent.hasBiasVector: + _quantize_wp_field( + layer.simpleRecurrent.biasVector, + nbits, + qm, + shape=(o_size,), + **kwargs + ) + + # GRU + elif layer_type == "gru": + i_size = layer.gru.inputVectorSize + o_size = layer.gru.outputVectorSize + # Weight Matrix + _quantize_wp_field( + layer.gru.updateGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + layer.gru.resetGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + layer.gru.outputGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + # Recursion Weights + _quantize_wp_field( + layer.gru.updateGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + _quantize_wp_field( + layer.gru.resetGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + _quantize_wp_field( + layer.gru.outputGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + # Bias + if layer.gru.hasBiasVectors: + _quantize_wp_field( + layer.gru.updateGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + _quantize_wp_field( + layer.gru.resetGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + _quantize_wp_field( + layer.gru.outputGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + + # LSTM Layers + elif layer_type in ["uniDirectionalLSTM", "biDirectionalLSTM"]: + + def _lstmwp_to_fp16_lstmwp( + lstm_wp, nbits, qm, i_size, o_size, has_peephole=True + ): + assert lstm_wp + _quantize_wp_field( + lstm_wp.inputGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.forgetGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.blockInputWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.outputGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + + _quantize_wp_field( + lstm_wp.inputGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.forgetGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.blockInputRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.outputGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + + _quantize_wp_field( + lstm_wp.inputGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + _quantize_wp_field( + lstm_wp.forgetGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + _quantize_wp_field( + lstm_wp.blockInputBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + _quantize_wp_field( + lstm_wp.outputGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + + if has_peephole: + _quantize_wp_field( + lstm_wp.inputGatePeepholeVector, + nbits, + qm, + shape=(o_size,), + **kwargs + ) + _quantize_wp_field( + lstm_wp.forgetGatePeepholeVector, + nbits, + qm, + shape=(o_size,), + **kwargs + ) + _quantize_wp_field( + lstm_wp.outputGatePeepholeVector, + nbits, + qm, + shape=(o_size,), + **kwargs + ) + + if layer_type == "uniDirectionalLSTM": + _lstmwp_to_fp16_lstmwp( + lstm_wp=layer.uniDirectionalLSTM.weightParams, + nbits=nbits, + qm=qm, + i_size=layer.uniDirectionalLSTM.inputVectorSize, + o_size=layer.uniDirectionalLSTM.outputVectorSize, + has_peephole=layer.uniDirectionalLSTM.params.hasPeepholeVectors, + ) + + elif layer_type == "biDirectionalLSTM": + for lstm_wp in layer.biDirectionalLSTM.weightParams: + _lstmwp_to_fp16_lstmwp( + lstm_wp=lstm_wp, + nbits=nbits, + qm=qm, + i_size=layer.biDirectionalLSTM.inputVectorSize, + o_size=layer.biDirectionalLSTM.outputVectorSize, + has_peephole=layer.biDirectionalLSTM.params.hasPeepholeVectors, + ) + + elif layer_type == "custom": + print( + "Skipping custom layer {}. Weights for this layer need to" + "be converted manually".format(layer.name) + ) + elif layer_type == "branch": + _quantize_nn_spec(layer.branch.ifBranch, nbits, qm, **kwargs) + _quantize_nn_spec(layer.branch.elseBranch, nbits, qm, **kwargs) + elif layer_type == "loop": + _quantize_nn_spec(layer.loop.conditionNetwork, nbits, qm, **kwargs) + _quantize_nn_spec(layer.loop.bodyNetwork, nbits, qm, **kwargs) + else: + raise Exception("Unknown layer " + layer_type + " to be quantized") + + +def _quantize_spec_weights(spec, nbits, quantization_mode, **kwargs): + nn_model_types = [ + "neuralNetwork", + "neuralNetworkClassifier", + "neuralNetworkRegressor", + ] + + model_type = spec.WhichOneof("Type") + + # Neural network models + if model_type in nn_model_types: + # Bump up to appropriate spec version if required + if nbits == 16: + spec.specificationVersion = max( + _MINIMUM_FP16_SPEC_VERSION, spec.specificationVersion + ) + else: + spec.specificationVersion = max( + _MINIMUM_QUANTIZED_MODEL_SPEC_VERSION, spec.specificationVersion + ) + + if spec.WhichOneof("Type") == "neuralNetwork": + _quantize_nn_spec(spec.neuralNetwork, nbits, quantization_mode, **kwargs) + + elif spec.WhichOneof("Type") in "neuralNetworkClassifier": + _quantize_nn_spec( + spec.neuralNetworkClassifier, nbits, quantization_mode, **kwargs + ) + + elif spec.WhichOneof("Type") in "neuralNetworkRegressor": + _quantize_nn_spec( + spec.neuralNetworkRegressor, nbits, quantization_mode, **kwargs + ) + + # Recursively convert all pipeline models + elif spec.WhichOneof("Type") == "pipeline": + for model_spec in spec.pipeline.models: + _quantize_spec_weights(model_spec, nbits, quantization_mode, **kwargs) + + elif spec.WhichOneof("Type") in ["pipelineClassifier", "pipelineRegressor"]: + _quantize_spec_weights(spec.pipeline, nbits, quantization_mode, **kwargs) + + return spec + + +def _load_and_resize_image(image_path, size): + from PIL import Image + + img = Image.open(image_path) + return img.resize(size, Image.ANTIALIAS) + + +class TopKMetrics: + def __init__(self, topk): + self._topk = topk + self._correct_count = 0 + self._total_count = 0 + + def add_metric(self, output1, output2): + self._total_count += 1 + if self._topk == 1: + if output1 == output2: + self._correct_count += 1 + else: + self._topk = min(len(output1.keys()), self._topk) + out1_topk = sorted(output1, key=output1.get, reverse=True)[: self._topk] + out2_topk = sorted(output2, key=output2.get, reverse=True)[: self._topk] + if out1_topk[0] in out2_topk: + self._correct_count += 1 + + def display_metrics(self): + pcorrect = (float(self._correct_count) / float(self._total_count)) * 100 + pcorrect = _np.round(pcorrect, decimals=2) + if self._topk == 1: + print("Top 1 Agreement: {}%\n".format(pcorrect)) + else: + print("Top {} Agreement: {}%\n".format(self._topk, pcorrect)) + + +class NoiseMetrics: + def __init__(self): + self._snr = [] + self._psnr = [] + + @staticmethod + def _compute_snr(arr1, arr2): + noise = arr1 - arr2 + noise_var = _np.sum(noise ** 2) / len(noise) + 1e-7 + signal_energy = _np.sum(arr2 ** 2) / len(arr2) + max_signal_energy = _np.amax(arr2 ** 2) + snr = 10 * _np.log10(signal_energy / noise_var) + psnr = 10 * _np.log10(max_signal_energy / noise_var) + return snr, psnr + + def add_metric(self, output1, output2): + import PIL + + # Output is Image + if isinstance(output1, PIL.Image.Image): + if output1.mode == "RGBA": + output1 = output1.convert("RGB") + output2 = output2.convert("RGB") + arr1 = _np.array(output1).flatten() + arr2 = _np.array(output2).flatten() + snr, psnr = self._compute_snr(arr1, arr2) + self._snr.append(snr) + self._psnr.append(psnr) + + # Output is multiArray + else: + arr1 = output1.flatten() + arr2 = output2.flatten() + snr, psnr = self._compute_snr(arr1, arr2) + self._snr.append(snr) + self._psnr.append(psnr) + + def display_metrics(self): + print("SNR: {} +/- {}".format(_np.mean(self._snr), _np.var(self._snr))) + print("PSNR: {} +/- {}\n".format(_np.mean(self._psnr), _np.var(self._psnr))) + + +class OutputMetric: + """ + Utility class to calculate and hold metrics between + two model outputs + """ + + def __init__(self, name, type): + self.name = name + self._metrics = [] + + if type == "stringType": + self._metrics.append(TopKMetrics(topk=1)) + + elif type == "dictionaryType": + self._metrics.append(TopKMetrics(topk=5)) + + elif type == "imageType" or type == "multiArrayType": + self._metrics.append(NoiseMetrics()) + + else: + raise Exception( + """Unable to determine which metric to + compute for output: {}""".format( + name + ) + ) + + def add_metric(self, output1, output2): + for metric in self._metrics: + metric.add_metric(output1, output2) + + def display_metrics(self): + for metric in self._metrics: + metric.display_metrics() + + +class ModelMetrics: + """ + A utility class to hold evaluation metrics + """ + + def __init__(self, spec): + self.model_metrics = {} + for output in spec.description.output: + output_type = output.type.WhichOneof("Type") + self.model_metrics[output.name] = OutputMetric(output.name, output_type) + + def add_metrics(self, model1_output, model2_output): + outputs = model1_output.keys() + for output in outputs: + self.model_metrics[output].add_metric( + model1_output[output], model2_output[output] + ) + + def display_metrics(self): + for metric in self.model_metrics: + print("Output {}:".format(metric)) + dash = "----------" + for x in range(0, len(metric)): + dash += "-" + print(dash) + self.model_metrics[metric].display_metrics() + + +def _characterize_qmodel_perf_with_data_dir(fpmodel, qspec, data_dir): + supported_image_exts = ["jpg", "bmp", "png", "jpeg"] + test_image_paths = [ + "{}/{}".format(data_dir, fn) + for fn in _listdir(data_dir) + if any(fn.endswith(ext) for ext in supported_image_exts) + ] + + if not test_image_paths: + raise Exception( + "{} contains no supported image files. " + "Supported file types include jpg, bmp, png and jpeg.".format( + data_dir + ) + ) + + qmodel = _get_model(qspec, compute_units=_ComputeUnit.CPU_ONLY) + model_metrics = ModelMetrics(qspec) + + input_name = qspec.description.input[0].name + input_size = ( + qspec.description.input[0].type.imageType.width, + qspec.description.input[0].type.imageType.height, + ) + + print("\n\n") + print("Analyzing {} images".format(len(test_image_paths))) + print("Running Analysis this may take a while ...") + print("\n") + + analyzed = 0 + tried = 0 + if fpmodel.compute_unit != _ComputeUnit.CPU_ONLY: + fpmodel = _MLModel(fpmodel.get_spec(), compute_units=_ComputeUnit.CPU_ONLY) + for image in test_image_paths: + try: + input = {input_name: _load_and_resize_image(image, input_size)} + fp_pred = fpmodel.predict(input) + q_pred = qmodel.predict(input) + analyzed += 1 + model_metrics.add_metrics(fp_pred, q_pred) + + except Exception as e: + print(e) + continue + + # Update Progress + tried += 1 + if tried % 10 == 0: + _stdout.write("\r") + _stdout.write("Analyzed {}/{}".format(tried, len(test_image_paths))) + _stdout.flush() + + print("\n") + model_metrics.display_metrics() + + +def _characterize_quantized_model_perf(fpmodel, qspec, sample_data): + qmodel = _get_model(qspec) + model_metrics = ModelMetrics(qspec) + + print("\n\n") + print("Analyzing {} samples".format(len(sample_data))) + print("Running Analysis this may take a while ...") + print("\n") + + analyzed = 0 + tried = 0 + fpmodel = _MLModel(fpmodel.get_spec(), compute_units=_ComputeUnit.CPU_ONLY) + qmodel = _MLModel(qmodel.get_spec(), compute_units=_ComputeUnit.CPU_ONLY) + for data in sample_data: + try: + fp_pred = fpmodel.predict(data) + q_pred = qmodel.predict(data) + analyzed += 1 + model_metrics.add_metrics(fp_pred, q_pred) + + except Exception as e: + print(e) + continue + + # Update Progress + tried += 1 + if tried % 10 == 0: + _stdout.write("\r") + _stdout.write("Analyzed {}/{}".format(tried, len(sample_data))) + _stdout.flush() + + print("\n") + model_metrics.display_metrics() + + +def compare_models(full_precision_model, quantized_model, sample_data): + """ + Utility function to compare the performance of a full precision vs quantized model + + full_precision_model: MLModel + The full precision model with float32 weights + + quantized_model: MLModel + Quantized version of the model with quantized weights + + sample_data: str | [dict] + Data used to characterize performance of the quantized model in + comparison to the full precision model. Either a list of sample input + dictionaries or an absolute path to a directory containing images. + Path to a directory containing images is only valid for models with + one image input. For all other models a list of sample inputs must be + provided. + + :return: + None. Performance metrics are printed out + """ + emessage = """ + Invalid sample data provided. Only a list of dictionaries + containing sample data or path to a folder containing images is + supported""" + + spec = full_precision_model.get_spec() + num_inputs = len(spec.description.input) + if isinstance(sample_data, str): + input_type = spec.description.input[0].type.WhichOneof("Type") + if num_inputs != 1 or input_type != "imageType": + raise Exception( + """Unable to analyze quantized models. Sample data + was a path to a directory which is only supported with models with + one image type input. Please try passing in a list of sample inputs + as sample data. + """ + ) + _characterize_qmodel_perf_with_data_dir( + full_precision_model, quantized_model.get_spec(), sample_data + ) + + elif isinstance(sample_data, list): + if not all(type(d) is dict for d in sample_data): + raise Exception(emessage) + _characterize_quantized_model_perf( + full_precision_model, quantized_model.get_spec(), sample_data + ) + + else: + raise Exception(emessage) + + +def activate_int8_int8_matrix_multiplications(spec, selector=None): + """ + Utility function that takes in either a full precision (float) spec or + an nbit quantized spec to selectively enable int8 activation + weight quantization + of matrix multiplication operations where the second matrix represents a constant weight. + + spec: MLModel.get_spec() + Currently conversion for only neural network models is supported. + If a pipeline model is passed in then all embedded neural network models embedded within + will be modified. + + selector: (optional) MatrixMultiplyLayerSelector + A MatrixMultiplyLayerSelector object that enables int8 activation + weight quantization + only on those layers for which the user-specified criterion on the minimum/maximum number + of size/channels in constant weight parameters is met. + It can also be derived to provide custom selection. + + """ + + # Recursively convert all pipeline models + if spec.WhichOneof("Type") == "pipeline": + for model_spec in spec.pipeline.models: + activate_int8_int8_matrix_multiplications(model_spec, selector=selector) + return spec + + elif spec.WhichOneof("Type") in ["pipelineClassifier", "pipelineRegressor"]: + activate_int8_int8_matrix_multiplications(spec.pipeline, selector=selector) + return spec + + # Neural network models + elif spec.WhichOneof("Type") in [ + "neuralNetwork", + "neuralNetworkClassifier", + "neuralNetworkRegressor", + ]: + + if selector is None: + selector = MatrixMultiplyLayerSelector() + + # Dequantize all the selected matrix multiplication layers + spec = _quantize_spec_weights( + spec, + nbits=None, + quantization_mode=_QUANTIZATION_MODE_DEQUANTIZE, + selector=selector, + ) + + def _quantized_weight_and_scale(W): + W_max = max(_np.abs(_np.min(W)), _np.abs(_np.max(W))) + W_normalized = W / W_max # [-1,1] + W_quantized_int8 = 127.0 * W_normalized # [-127, 127] + W_quantized_int8 = W_quantized_int8.astype(_np.int8) + quant_scale = W_max / 127.0 + return W_quantized_int8, quant_scale + + if spec.WhichOneof("Type") == "neuralNetwork": + nn_spec = spec.neuralNetwork + + elif spec.WhichOneof("Type") in "neuralNetworkClassifier": + nn_spec = spec.neuralNetworkClassifier + + elif spec.WhichOneof("Type") in "neuralNetworkRegressor": + nn_spec = spec.neuralNetworkRegressor + + def _process_nn_layers(nn_spec): + layers = nn_spec.layers + + # Replacing each matrix multiplication + for layer in layers: + layer_type = layer.WhichOneof("layer") + if not selector.do_quantize(layer): + continue + + if layer_type == "branch": + _process_nn_layers(layer.branch.ifBranch) + _process_nn_layers(layer.branch.elseBranch) + + elif layer_type == "loop": + _process_nn_layers(layer.loop.conditionNetwork) + _process_nn_layers(layer.loop.bodyNetwork) + + elif layer_type in ["innerProduct", "batchedMatmul"]: + # Bump up to appropriate spec version if at least one replacement occurs + spec.specificationVersion = max( + _SPECIFICATION_VERSION_IOS_14, spec.specificationVersion, + ) + + # InnerProduct + if layer_type == "innerProduct": + matmul_layer = layer.innerProduct + + # BatchedMatmul + elif layer_type == "batchedMatmul": + matmul_layer = layer.batchedMatmul + + wp = matmul_layer.weights + + if len(wp.floatValue) == 0: + continue + else: + qw, qs = _quantized_weight_and_scale(wp.floatValue) + + print( + "Modifying layer {} with size of weights {}, to use Int8 * Int8 matrix multiplication".format( + layer.name, qw.size + ) + ) + + matmul_layer.int8DynamicQuantize = True + wp.quantization.numberOfBits = 8 + wp.quantization.linearQuantization.scale.extend(map(float, [qs])) + wp.int8RawValue = bytes() + wp.int8RawValue += qw.tobytes() + del wp.floatValue[:] + + _process_nn_layers(nn_spec) + + return spec + + else: + raise ValueError("Model Type {} not supported.".format(spec.WhichOneof("Type"))) + + +def quantize_weights( + full_precision_model, nbits, quantization_mode="linear", sample_data=None, **kwargs +): + """ + Utility function to convert a full precision (float) MLModel to a + nbit quantized MLModel (float16). + + full_precision_model: MLModel + Model which will be converted to half precision. Currently conversion + for only neural network models is supported. If a pipeline model is + passed in then all embedded neural network models embedded within + will be converted. + + nbits: int + Number of bits per quantized weight. Only 16-bit float point and + 1-8 bit is supported + + quantization_mode: str + One of the following: + + "linear": + Linear quantization with scale and bias assuming the range of weight + values is [A, B], where A = min(weight), B = max(weight) + "linear_lut": + Simple linear quantization represented as a lookup table + "kmeans_lut": + LUT based quantization, where LUT is generated by K-Means clustering + "custom_lut": + LUT quantization where LUT and quantized weight params are + calculated using a custom function. If this mode is selected then + a custom function must be passed in kwargs with key lut_function. + The function must have input params (nbits, wp) where nbits is the + number of quantization bits and wp is the list of weights for a + given layer. The function should return two parameters (lut, qw) + where lut is an array of length (2^n bits)containing LUT values and + qw is the list of quantized weight parameters. See + ``_get_linear_lookup_table_and_weight`` for a sample implementation. + "linear_symmetric": + Linear quantization with scale and bias assuming the range of weight + values is [-A, A], where A = max(abs(weight)). + + sample_data: str | [dict] + Data used to characterize performance of the quantized model in + comparison to the full precision model. Either a list of sample input + dictionaries or an absolute path to a directory containing images. + Path to a directory containing images is only valid for models with + one image input. For all other models a list of sample inputs must be + provided. + + kwargs: keyword arguments + *lut_function* : (``callable function``) + A callable function provided when quantization mode is set to + ``_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE``. See ``quantization_mode`` + for more details. + *selector*: QuantizedLayerSelector + A QuanatizedLayerSelector object that can be derived to provide + custom quantization selection. + + Returns + ------- + model: MLModel + The quantized MLModel instance if running on macOS 10.14 or later, + otherwise the quantized model specification is returned + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import quantization_utils + >>> model = coremltools.models.MLModel('my_model.mlmodel') + >>> quantized_model = quantization_utils.quantize_weights(model, 8, "linear") + """ + + qmode_mapping = { + "linear": _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + "kmeans": _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + "kmeans_lut": _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + "linear_lut": _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR, + "custom_lut": _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE, + "dequantization": _QUANTIZATION_MODE_DEQUANTIZE, + "linear_symmetric": _QUANTIZATION_MODE_LINEAR_SYMMETRIC, + } + try: + qmode = qmode_mapping[quantization_mode] + except KeyError: + # kmeans is deprecated. Instead kmeans_lut is used. No need to show it. + del qmode_mapping["kmeans"] + raise Exception( + "Invalid quantization mode. Quantization mode must be " + "one of {}".format(qmode_mapping) + ) + + print("Quantizing using {} quantization".format(quantization_mode)) + spec = full_precision_model.get_spec() + if nbits == 16 and spec.isUpdatable: + raise Exception("updatable models cannot get quantized to FP16.") + + qspec = _quantize_spec_weights(spec, nbits, qmode, **kwargs) + quantized_model = _get_model(qspec, compute_units=full_precision_model.compute_unit) + + if _macos_version() >= (10, 14) and sample_data: + compare_models(full_precision_model, quantized_model, sample_data) + + return quantized_model diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/spec_inspection_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/spec_inspection_utils.py new file mode 100644 index 00000000..52d481dd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/spec_inspection_utils.py @@ -0,0 +1,297 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ...proto import NeuralNetwork_pb2 as _NeuralNetwork_pb2 + + +def _get_weight_param_summary(wp): + """Get a summary of _NeuralNetwork_pb2.WeightParams + Args: + wp : _NeuralNetwork_pb2.WeightParams - the _NeuralNetwork_pb2.WeightParams message to display + Returns: + a str summary for wp + """ + summary_str = "" + if wp.HasField("quantization"): + nbits = wp.quantization.numberOfBits + quant_type = ( + "linearly" + if wp.quantization.HasField("linearQuantization") + else "lookup-table" + ) + summary_str += "{}-bit {} quantized".format(nbits, quant_type) + + if len(wp.floatValue) > 0: + summary_str += "({} floatValues)".format(len(wp.floatValue)) + if len(wp.float16Value) > 0: + summary_str += "({} bytes float16Values)".format(len(wp.float16Value)) + if len(wp.rawValue) > 0: + summary_str += "({} bytes rawValues)".format(len(wp.rawValue)) + + return summary_str + + +def _get_lstm_weight_param_summary(lstm_wp): + weight_name_list = [ + "W_i", + "W_f", + "W_z", + "W_o", + "H_i", + "H_f", + "H_z", + "H_o", + "b_i", + "b_f", + "b_z", + "b_o", + "p_i", + "p_f", + "p_o", + ] + wp_summary_list = [ + _get_weight_param_summary(lstm_wp.inputGateWeightMatrix), + _get_weight_param_summary(lstm_wp.forgetGateWeightMatrix), + _get_weight_param_summary(lstm_wp.blockInputWeightMatrix), + _get_weight_param_summary(lstm_wp.outputGateWeightMatrix), + _get_weight_param_summary(lstm_wp.inputGateRecursionMatrix), + _get_weight_param_summary(lstm_wp.forgetGateRecursionMatrix), + _get_weight_param_summary(lstm_wp.blockInputRecursionMatrix), + _get_weight_param_summary(lstm_wp.outputGateRecursionMatrix), + _get_weight_param_summary(lstm_wp.inputGateBiasVector), + _get_weight_param_summary(lstm_wp.forgetGateBiasVector), + _get_weight_param_summary(lstm_wp.blockInputBiasVector), + _get_weight_param_summary(lstm_wp.outputGateBiasVector), + _get_weight_param_summary(lstm_wp.inputGatePeepholeVector), + _get_weight_param_summary(lstm_wp.forgetGatePeepholeVector), + _get_weight_param_summary(lstm_wp.outputGatePeepholeVector), + ] + lstm_wp_summary_list = [] + for idx, summary in enumerate(wp_summary_list): + if len(summary) > 0: + lstm_wp_summary_list.append(weight_name_list[idx] + ", " + summary) + + return ("\n" + " " * 8).join(lstm_wp_summary_list) + + +def _get_feature_description_summary(feature): + if feature.type.HasField("multiArrayType"): + shape = list(feature.type.multiArrayType.shape) + int_shape = [int(x) for x in shape] + return str(int_shape) + else: + return ("({})".format(str(feature.type))).replace("\n", "") + + +def _summarize_network_layer_info(layer): + """ + Args: + layer - an MLModel NeuralNetwork Layer protobuf message + Returns: + layer_type : str - type of layer + layer_name : str - name of the layer + layer_inputs : list[str] - a list of strings representing input blobs of the layer + layer_outputs : list[str] - a list of strings representing output blobs of the layer + layer_field_content : list[(str, str)] - a list of two-tuple of (parameter_name, content) + """ + layer_type_str = layer.WhichOneof("layer") + + layer_name = layer.name + layer_inputs = list(layer.input) + layer_outputs = list(layer.output) + + typed_layer = getattr(layer, layer_type_str) + layer_field_names = [l.name for l in typed_layer.DESCRIPTOR.fields] + layer_field_content = [] + + for name in layer_field_names: + field = getattr(typed_layer, name) + summary_str = "" + if type(field) == _NeuralNetwork_pb2.LSTMWeightParams: + summary_str = _get_lstm_weight_param_summary(field) + elif type(field) == _NeuralNetwork_pb2.WeightParams: + summary_str = _get_weight_param_summary(field) + else: + field_str = str(field) + if len(field_str) > 0: + summary_str = field_str.replace("\n", " ") + if len(summary_str) > 0: + layer_field_content.append([name, summary_str]) + + return layer_type_str, layer_name, layer_inputs, layer_outputs, layer_field_content + + +def _summarize_neural_network_spec(mlmodel_spec): + """ Summarize network into the following structure. + Args: + mlmodel_spec : mlmodel spec + Returns: + inputs : list[(str, str)] - a list of two tuple (name, descriptor) for each input blob. + outputs : list[(str, str)] - a list of two tuple (name, descriptor) for each output blob + layers : list[(str, list[str], list[str], list[(str, str)])] - a list of layers represented by + layer name, input blobs, output blobs, a list of (parameter name, content) + """ + inputs = [ + (blob.name, _get_feature_description_summary(blob)) + for blob in mlmodel_spec.description.input + ] + outputs = [ + (blob.name, _get_feature_description_summary(blob)) + for blob in mlmodel_spec.description.output + ] + nn = None + + if mlmodel_spec.HasField("neuralNetwork"): + nn = mlmodel_spec.neuralNetwork + elif mlmodel_spec.HasField("neuralNetworkClassifier"): + nn = mlmodel_spec.neuralNetworkClassifier + elif mlmodel_spec.HasField("neuralNetworkRegressor"): + nn = mlmodel_spec.neuralNetworkRegressor + + layers = ( + [_summarize_network_layer_info(layer) for layer in nn.layers] + if nn != None + else None + ) + return (inputs, outputs, layers) + + +def _prRed(skk, end=None): + print("\033[91m {}\033[00m".format(skk), end=end) + + +def _prLightPurple(skk, end=None): + print("\033[94m {}\033[00m".format(skk), end=end) + + +def _prPurple(skk, end=None): + print("\033[95m {}\033[00m".format(skk), end=end) + + +def _prGreen(skk, end=None): + print("\033[92m {}\033[00m".format(skk), end=end) + + +def _print_layer_type_and_arguments( + layer_type_str, layer_inputs, indentation, to_indent=True, shape=None, value=None +): + if to_indent: + _prRed(indentation * "\t" + "{}".format(layer_type_str), end="") + else: + _prRed("{}".format(layer_type_str), end="") + + if shape is None: + _prLightPurple("({})".format(", ".join(layer_inputs))) + elif value is not None: + _prLightPurple("(shape = ", end="") + print("{}, ".format(str(shape)), end="") + _prLightPurple("value = ", end="") + values = ",".join(["{0: 0.1f}".format(v) for v in value]).lstrip() + print("[{}]".format(values), end="") + _prLightPurple(")") + else: + _prLightPurple("(shape = ", end="") + print("{}".format(str(shape)), end="") + _prLightPurple(")") + + +def _find_size(arr): + s = 1 + for a in arr: + s *= a + return s + + +def _summarize_neural_network_spec_code_style( + nn_spec, indentation=0, input_names=None, output_names=None +): + """ + print nn_spec as if writing code + """ + indentation_size = 1 + + if input_names: + print("def model({}):".format(", ".join(input_names))) + indentation += indentation_size + + for i, layer in enumerate(nn_spec.layers): + layer_type_str = layer.WhichOneof("layer") + layer_inputs = list(layer.input) + layer_outputs = list(layer.output) + + if layer_type_str == "loop": + if len(layer.loop.conditionNetwork.layers) > 0: + _prPurple(indentation * "\t" + "Condition Network: ") + _summarize_neural_network_spec_code_style( + layer.loop.conditionNetwork, indentation=indentation + ) + if layer.loop.conditionVar: + layer_inputs.append(layer.loop.conditionVar) + _print_layer_type_and_arguments(layer_type_str, layer_inputs, indentation) + indentation += indentation_size + _summarize_neural_network_spec_code_style( + layer.loop.bodyNetwork, indentation=indentation + ) + if len(layer.loop.conditionNetwork.layers) > 0: + _prPurple(indentation * "\t" + "Condition Network: ") + _summarize_neural_network_spec_code_style( + layer.loop.conditionNetwork, indentation=indentation + ) + indentation -= indentation_size + continue + + if layer_type_str == "branch": + _print_layer_type_and_arguments(layer_type_str, layer_inputs, indentation) + _prRed(indentation * "\t" + "IfBranch:") + indentation += indentation_size + _summarize_neural_network_spec_code_style( + layer.branch.ifBranch, indentation=indentation + ) + indentation -= indentation_size + if len(layer.branch.elseBranch.layers) > 0: + _prRed(indentation * "\t" + "ElseBranch:") + indentation += indentation_size + _summarize_neural_network_spec_code_style( + layer.branch.elseBranch, indentation=indentation + ) + indentation -= indentation_size + continue + + if layer_type_str == "loopBreak" or layer_type_str == "loopContinue": + _prRed(indentation * "\t" + layer_type_str) + continue + + shape = None + value = None + if layer_type_str == "loadConstant": + shape = layer.loadConstant.shape + shape = list(shape) + int_shape = [int(x) for x in shape] + shape = tuple([1, 1] + int_shape) + size = _find_size(shape) + if size < 4 and len(layer.loadConstant.data.floatValue) > 0: + value = map(float, list(layer.loadConstant.data.floatValue)) + + if layer_type_str == "loadConstantND": + shape = layer.loadConstantND.shape + shape = tuple(map(int, list(shape))) + size = _find_size(shape) + if size < 4 and len(layer.loadConstantND.data.floatValue) > 0: + value = map(float, list(layer.loadConstantND.data.floatValue)) + + print(indentation * "\t", end="") + print("{} =".format(", ".join(layer_outputs)), end="") + _print_layer_type_and_arguments( + layer_type_str, + layer_inputs, + indentation, + to_indent=False, + shape=shape, + value=value, + ) + + if output_names: + _prRed("\n" + indentation * "\t" + "return ", end="") + print("{}".format(", ".join(output_names))) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/update_optimizer_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/update_optimizer_utils.py new file mode 100644 index 00000000..760946ea --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/update_optimizer_utils.py @@ -0,0 +1,191 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Neural Network optimizer utilities. +""" + + +class AdamParams: + """ + Adam - A Method for Stochastic Optimization. + + Attributes + ---------- + lr: float + The learning rate that controls learning step size. Adjustable in progress, default: 0.01. + batch: int + The mini-batch size, number of examples used to compute single gradient step, default: 10. + beta1: float + Controls the exponential decay rate for the first moment estimates, default: 0.9. + beta2: float + Controls the exponential decay rate for the second moment estimates, default: 0.999. + eps: float + The epsilon, a very small number to prevent any division by zero in the implementation, default: 1e-8. + + Methods + ------- + set_lr(value, min, max) + Set value for learning rate. + set_batch(value, allow_set) + Set value for batch size. + set_beta1(value, min, max) + Set value for beta1. + set_beta2(value, min, max) + Set value for beta2. + set_eps(value, min, max) + Set value for epsilon. + """ + + def __init__(self, lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8): + self._lr = RangeParam(lr) + self._batch = Batch(batch) + self._beta1 = RangeParam(beta1) + self._beta2 = RangeParam(beta2) + self._eps = RangeParam(eps) + + def set_lr(self, value, min, max): + self._lr = RangeParam(value, min, max) + + def set_batch(self, value, allowed_set): + self._batch = Batch(value, allowed_set) + + def set_beta1(self, value, min, max): + self._beta1 = RangeParam(value, min, max) + + def set_beta2(self, value, min, max): + self._beta2 = RangeParam(value, min, max) + + def set_eps(self, value, min, max): + self._eps = RangeParam(value, min, max) + + @property + def lr(self): + return self._lr + + @property + def batch(self): + return self._batch + + @property + def beta1(self): + return self._beta1 + + @property + def beta2(self): + return self._beta2 + + @property + def eps(self): + return self._eps + + +class SgdParams: + """ + SGD - Stochastic Gradient Descent optimizer. + + Attributes + ---------- + lr: float + The learning rate that controls learning step size. Adjustable in progress, default: 0.01. + batch: int + The mini-batch size, number of examples used to compute single gradient step, default: 10. + momentum: float + The momentum factor that helps accelerate gradients vectors in the right direction, default 0. + + Methods + ------- + set_lr(value, min, max) + Set value for learning rate. + set_batch(value, allow_set) + Set value for batch size. + set_momentum(value, min, max) + Set value for momentum. + """ + + def __init__(self, lr=1e-2, batch=10, momentum=0): + self._lr = RangeParam(lr) + self._batch = Batch(batch) + self._momentum = RangeParam(momentum) + + def set_lr(self, value, min, max): + self._lr = RangeParam(value, min, max) + + def set_batch(self, value, allowed_set): + self._batch = Batch(value, allowed_set) + + def set_momentum(self, value, min, max): + self._momentum = RangeParam(value, min, max) + + @property + def lr(self): + return self._lr + + @property + def batch(self): + return self._batch + + @property + def momentum(self): + return self._momentum + + +class RangeParam: + """ + Range Parameter optimizer. + + Attributes + ---------- + value: float + min: float + max: float + """ + + def __init__(self, value, min=0, max=1): + self._value = value + if min >= max: + raise ValueError("min value must be less than max value.") + self._min = min + self._max = max + + @property + def value(self): + return self._value + + @property + def min(self): + return self._min + + @property + def max(self): + return self._max + + +class Batch: + """ + Batch optimizer. + + Attributes + ---------- + value: float + allowed_set: float + """ + + def __init__(self, value, allowed_set=None): + self._value = value + if allowed_set is None: + self._allowed_set = [value] + else: + if len(allowed_set) > len(set(allowed_set)): + raise ValueError("values in allowed_set must be unique.") + self._allowed_set = allowed_set + + @property + def value(self): + return self._value + + @property + def allowed_set(self): + return self._allowed_set diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/utils.py new file mode 100644 index 00000000..ed96300d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/utils.py @@ -0,0 +1,140 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy as _copy + +from coremltools.models.utils import _get_model + +from .builder import NeuralNetworkBuilder + + +def make_image_input( + model, + input_name, + is_bgr=False, + red_bias=0.0, + blue_bias=0.0, + green_bias=0.0, + gray_bias=0.0, + scale=1.0, + image_format="NHWC", +): + """ + Convert input of type multiarray to type image + + Parameters + ---------- + TODO + + Returns + ------- + model: MLModel + A coreML MLModel object + + Examples + -------- + TODO + """ + + spec = model.get_spec() + + if spec.WhichOneof("Type") not in [ + "neuralNetwork", + "neuralNetworkClassifier", + "neuralNetworkRegressor", + ]: + raise ValueError( + "Provided model must be of type neuralNetwork, neuralNetworkClassifier or neuralNetworkRegressor" + ) + + if not isinstance(input_name, list): + input_name = [input_name] + + spec_inputs = [i.name for i in spec.description.input] + for name in input_name: + if name not in spec_inputs: + msg = "Provided input_name: {}, is not an existing input to the model" + raise ValueError(msg.format(name)) + + builder = NeuralNetworkBuilder(spec=spec) + builder.set_pre_processing_parameters( + image_input_names=input_name, + is_bgr=is_bgr, + red_bias=red_bias, + green_bias=green_bias, + blue_bias=blue_bias, + gray_bias=gray_bias, + image_scale=scale, + image_format=image_format, + ) + return _get_model(spec) + + +def make_nn_classifier( + model, + class_labels, + predicted_feature_name=None, + predicted_probabilities_output=None, +): + """ + Convert a model of type "neuralNetwork" to type "neuralNetworkClassifier" + + Parameters + ---------- + TODO + + Returns + ------- + model: MLModel + A coreML MLModel object + + Examples + -------- + TODO + """ + + spec = model.get_spec() + + if spec.WhichOneof("Type") != "neuralNetwork": + raise ValueError('Provided model must be of type "neuralNetwork"') + + # convert type to "neuralNetworkClassifier" and copy messages from "neuralNetwork" + nn_spec = _copy.deepcopy(spec.neuralNetwork) + spec.ClearField("neuralNetwork") + for layer in nn_spec.layers: + spec.neuralNetworkClassifier.layers.add().CopyFrom(layer) + for preprocessing in nn_spec.preprocessing: + spec.neuralNetworkClassifier.preprocessing.add().CopyFrom(preprocessing) + spec.neuralNetworkClassifier.arrayInputShapeMapping = nn_spec.arrayInputShapeMapping + spec.neuralNetworkClassifier.imageInputShapeMapping = nn_spec.imageInputShapeMapping + spec.neuralNetworkClassifier.updateParams.CopyFrom(nn_spec.updateParams) + + # set properties related to classifier + builder = NeuralNetworkBuilder(spec=spec) + message = "Class labels must be a list of integers / strings or a file path" + classes_in = class_labels + if isinstance(classes_in, str): + import os + + if not os.path.isfile(classes_in): + raise ValueError("Path to class labels (%s) does not exist." % classes_in) + with open(classes_in, "r") as f: + classes = f.read() + classes = classes.splitlines() + elif isinstance(classes_in, list): # list[int or str] + classes = classes_in + assert all([isinstance(x, \ + (int, str)) for x in classes]), message + else: + raise ValueError(message) + + kwargs = {} + if predicted_feature_name is not None: + kwargs["predicted_feature_name"] = predicted_feature_name + if predicted_probabilities_output is not None: + kwargs["prediction_blob"] = predicted_probabilities_output + builder.set_class_labels(classes, **kwargs) + + return _get_model(spec) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/pipeline.py b/__packaged__/coreml/.python_dependencies/coremltools/models/pipeline.py new file mode 100644 index 00000000..487c2466 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/pipeline.py @@ -0,0 +1,305 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Pipeline utils for this package. +""" +from .. import SPECIFICATION_VERSION as _SPECIFICATION_VERSION +from ..proto import Model_pb2 as _Model_pb2 +from . import _feature_management +from . import model as _model +from ._interface_management import (set_classifier_interface_params, + set_regressor_interface_params, + set_training_features, + set_transform_interface_params) + + +class Pipeline: + """ + A pipeline model that exposes a sequence of models as a single model, + It requires a set of inputs, a sequence of other models and a set of outputs. + + This class is the base class for :py:class:`PipelineClassifier` and + :py:class:`PipelineRegressor`, which contain a sequence ending in a classifier + or regressor and themselves behave like a classifier or regressor. This class + may be used directly for a sequence of feature transformer objects. + + """ + + def __init__(self, input_features, output_features, training_features=None): + """ + Create a pipeline of models to be executed sequentially. + + Parameters + ---------- + + input_features: [list of 2-tuples] + Name(s) of the input features, given as a list of `('name', datatype)` + tuples. The datatypes entry can be any of the data types defined in the + :py:mod:`models.datatypes` module. + + output_features: [list of features] + Name(s) of the output features, given as a list of + `('name',datatype)` tuples. The datatypes entry can be any of the + data types defined in the :py:mod:`models.datatypes` module. All features + must be either defined in the inputs or be produced by one of the + contained models. + + """ + spec = _Model_pb2.Model() + spec.specificationVersion = _SPECIFICATION_VERSION + + # Access this to declare it as a pipeline + spec.pipeline + + spec = set_transform_interface_params( + spec, input_features, output_features, training_features + ) + + # Save the spec as a member variable. + self.spec = spec + + def _validate_updatable_pipeline_on_add_model(self, spec): + if spec.isUpdatable: + raise ValueError( + "New sub-models cannot be added after the pipeline has been marked as updatable" + ) + + def add_model(self, spec): + """ + Add a protobuf spec or :py:class:`models.MLModel` instance to the pipeline. + + All input features of this model must either match the input_features + of the pipeline, or match the outputs of a previous model. + + Parameters + ---------- + spec: [MLModel, Model_pb2] + A protobuf spec or MLModel instance containing a model. + """ + + self._validate_updatable_pipeline_on_add_model(self.spec) + + if isinstance(spec, _model.MLModel): + spec = spec._spec + + pipeline = self.spec.pipeline + step_spec = pipeline.models.add() + step_spec.CopyFrom(spec) + + def _validate_sub_models_and_make_updatable(self, pipeline, spec): + + num_models = len(pipeline.models) + if num_models < 1: + raise ValueError( + "Pipeline does not seem to have any models. It should be marked as updatable only after adding all sub-models." + ) + + for model in pipeline.models[:-1]: + if model.isUpdatable: + raise ValueError( + "Only the last model can be updatable in an updatable pipeline." + ) + + last_model = pipeline.models[num_models - 1] + if not last_model.isUpdatable: + raise ValueError( + "A pipeline can be made updatable only if the last model is updatable." + ) + + spec.isUpdatable = True + + def make_updatable(self): + self._validate_sub_models_and_make_updatable(self.spec.pipeline, self.spec) + + def set_training_input(self, training_input): + """ + Set the training inputs of the network spec. + + Parameters + ---------- + training_input: [tuple] + List of training input names and type of the network. + """ + spec = self.spec + set_training_features(spec, training_input) + + +class PipelineRegressor(Pipeline): + """ + A pipeline model that exposes a sequence of models as a single model, + It requires a set of inputs, a sequence of other models and a set of outputs. + In this case the pipeline itself behaves as a regression model by designating + a real valued output feature as its 'predicted feature'. + """ + + def __init__(self, input_features, output_features, training_features=None): + """ + Create a set of pipeline models given a set of model specs. The final + output model must be a regression model. + + Parameters + ---------- + + input_features: [list of 2-tuples] + Name(s) of the input features, given as a list of `('name', datatype)` + tuples. The datatypes entry can be any of the data types defined in the + :py:mod:`models.datatypes` module. + + output_features: [list of features] + Name(s) of the output features, given as a list of + `('name',datatype)` tuples. The datatypes entry can be any of the + data types defined in the :py:mod:`models.datatypes` module. All features + must be either defined in the inputs or be produced by one of the + contained models. + + """ + spec = _Model_pb2.Model() + spec.specificationVersion = _SPECIFICATION_VERSION + + # Access this to declare it as a pipeline + spec.pipelineRegressor + spec = set_regressor_interface_params( + spec, input_features, output_features, training_features + ) + + # Save as a member variable + self.spec = spec + + def add_model(self, spec): + """ + Add a protobuf spec or :py:class:`models.MLModel` instance to the pipeline. + + All input features of this model must either match the input_features + of the pipeline, or match the outputs of a previous model. + + Parameters + ---------- + spec: [MLModel, Model_pb2] + A protobuf spec or MLModel instance containing a model. + """ + + super()._validate_updatable_pipeline_on_add_model(self.spec) + + if isinstance(spec, _model.MLModel): + spec = spec._spec + + pipeline = self.spec.pipelineRegressor.pipeline + step_spec = pipeline.models.add() + step_spec.CopyFrom(spec) + + def make_updatable(self): + super()._validate_sub_models_and_make_updatable( + self.spec.pipelineRegressor.pipeline, self.spec + ) + + def set_training_input(self, training_input): + """ + Set the training inputs of the network spec. + + Parameters + ---------- + training_input: [tuple] + List of training input names and type of the network. + """ + spec = self.spec + set_training_features(spec, training_input) + + +class PipelineClassifier(Pipeline): + """ + A pipeline model that exposes a sequence of models as a single model, + It requires a set of inputs, a sequence of other models and a set of outputs. + In this case the pipeline itself behaves as a classification model by designating + a discrete categorical output feature as its 'predicted feature'. + """ + + def __init__( + self, input_features, class_labels, output_features=None, training_features=None + ): + """ + Create a set of pipeline models given a set of model specs. The last + model in this list must be a classifier model. + + Parameters + ---------- + input_features: [list of 2-tuples] + Name(s) of the input features, given as a list of `('name', datatype)` + tuples. The datatypes entry can be any of the data types defined in the + :py:mod:`models.datatypes` module. + + class_labels: [list] + A list of string or integer class labels to use in making predictions. + This list must match the class labels in the model outputting the categorical + predictedFeatureName + + output_features: [list] + A string or a list of two strings specifying the names of the two + output features, the first being a class label corresponding + to the class with the highest predicted score, and the second being + a dictionary mapping each class to its score. If `output_features` + is a string, it specifies the predicted class label and the class + scores is set to the default value of `"classProbability."` + + """ + + output_features = _feature_management.process_or_validate_classifier_output_features( + output_features, class_labels + ) + + spec = _Model_pb2.Model() + spec.specificationVersion = _SPECIFICATION_VERSION + spec = set_classifier_interface_params( + spec, + input_features, + class_labels, + "pipelineClassifier", + output_features, + training_features, + ) + + # Access this to declare it as a pipeline + spec.pipelineClassifier + + # Save as a member variable + self.spec = spec + + def add_model(self, spec): + """ + Add a protobuf spec or :py:class:`models.MLModel` instance to the pipeline. + + All input features of this model must either match the input_features + of the pipeline, or match the outputs of a previous model. + + Parameters + ---------- + spec: [MLModel, Model_pb2] + A protobuf spec or MLModel instance containing a model. + """ + + super()._validate_updatable_pipeline_on_add_model(self.spec) + + if isinstance(spec, _model.MLModel): + spec = spec._spec + pipeline = self.spec.pipelineClassifier.pipeline + step_spec = pipeline.models.add() + step_spec.CopyFrom(spec) + + def make_updatable(self): + super(PipelineClassifier, self)._validate_sub_models_and_make_updatable( + self.spec.pipelineClassifier.pipeline, self.spec + ) + + def set_training_input(self, training_input): + """ + Set the training inputs of the network spec. + + Parameters + ---------- + training_input: [tuple] + List of training input names and type of the network. + """ + spec = self.spec + set_training_features(spec, training_input) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/tree_ensemble.py b/__packaged__/coreml/.python_dependencies/coremltools/models/tree_ensemble.py new file mode 100644 index 00000000..7cd023a0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/tree_ensemble.py @@ -0,0 +1,426 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Tree ensemble builder class to construct CoreML models. +""" +import collections as _collections + +from .. import SPECIFICATION_VERSION as _SPECIFICATION_VERSION +from ..proto import Model_pb2 as _Model_pb2 +from ..proto import TreeEnsemble_pb2 as _TreeEnsemble_pb2 +from ._interface_management import (set_classifier_interface_params, + set_regressor_interface_params) + + +class TreeEnsembleBase: + """ + Base class for the tree ensemble builder class. This should be instantiated + either through the :py:class:`TreeEnsembleRegressor` or + :py:class:`TreeEnsembleClassifier` classes. + """ + + def __init__(self): + """ + High level Python API to build a tree ensemble model for Core ML. + """ + # Set inputs and outputs + spec = _Model_pb2.Model() + spec.specificationVersion = _SPECIFICATION_VERSION + + # Save the spec in the protobuf + self.spec = spec + + def set_default_prediction_value(self, values): + """ + Set the default prediction value(s). + + The values given here form the base prediction value that the values + at activated leaves are added to. If values is a scalar, then + the output of the tree must also be 1 dimensional; otherwise, values + must be a list with length matching the dimension of values in the tree. + + Parameters + ---------- + values: [int | double | list[double]] + Default values for predictions. + + """ + if type(values) is not list: + values = [float(values)] + self.tree_parameters.numPredictionDimensions = len(values) + for value in values: + self.tree_parameters.basePredictionValue.append(value) + + def set_post_evaluation_transform(self, value): + r""" + Set the post processing transform applied after the prediction value + from the tree ensemble. + + Parameters + ---------- + + value: str + + A value denoting the transform applied. Possible values are: + + - ``"NoTransform"`` (default). Do not apply a transform. + + - ``"Classification_SoftMax"``. + + Apply a softmax function to the outcome to produce normalized, + non-negative scores that sum to 1. The transformation applied to + dimension `i` is equivalent to: + + .. math:: + + \frac{e^{x_i}}{\sum_j e^{x_j}} + + Note: This is the output transformation applied by the XGBoost package + with multiclass classification. + + - ``"Regression_Logistic"``. + + Applies a logistic transform the predicted value, specifically: + + .. math:: + + (1 + e^{-v})^{-1} + + This is the transformation used in binary classification. + + + """ + self.tree_spec.postEvaluationTransform = _TreeEnsemble_pb2.TreeEnsemblePostEvaluationTransform.Value( + value + ) + + def add_branch_node( + self, + tree_id, + node_id, + feature_index, + feature_value, + branch_mode, + true_child_id, + false_child_id, + relative_hit_rate=None, + missing_value_tracks_true_child=False, + ): + """ + Add a branch node to the tree ensemble. + + Parameters + ---------- + tree_id: int + ID of the tree to add the node to. + + node_id: int + ID of the node within the tree. + + feature_index: int + Index of the feature in the input being split on. + + feature_value: double or int + The value used in the feature comparison determining the traversal + direction from this node. + + branch_mode: str + Branch mode of the node, specifying the condition under which the node + referenced by ``true_child_id`` is called next. + + Must be one of the following: + + - ``"BranchOnValueLessThanEqual"``. Traverse to node ``true_child_id`` + if ``input[feature_index] <= feature_value``, and ``false_child_id`` + otherwise. + + - ``"BranchOnValueLessThan"``. Traverse to node ``true_child_id`` + if ``input[feature_index] < feature_value``, and ``false_child_id`` + otherwise. + + - ``"BranchOnValueGreaterThanEqual"``. Traverse to node ``true_child_id`` + if ``input[feature_index] >= feature_value``, and ``false_child_id`` + otherwise. + + - ``"BranchOnValueGreaterThan"``. Traverse to node ``true_child_id`` + if ``input[feature_index] > feature_value``, and ``false_child_id`` + otherwise. + + - ``"BranchOnValueEqual"``. Traverse to node ``true_child_id`` + if ``input[feature_index] == feature_value``, and ``false_child_id`` + otherwise. + + - ``"BranchOnValueNotEqual"``. Traverse to node ``true_child_id`` + if ``input[feature_index] != feature_value``, and ``false_child_id`` + otherwise. + + true_child_id: int + ID of the child under the true condition of the split. An error will + be raised at model validation if this does not match the ``node_id`` + of a node instantiated by ``add_branch_node`` or ``add_leaf_node`` within + this ``tree_id``. + + false_child_id: int + ID of the child under the false condition of the split. An error will + be raised at model validation if this does not match the ``node_id`` + of a node instantiated by ``add_branch_node`` or ``add_leaf_node`` within + this ``tree_id``. + + relative_hit_rate: float [optional] + When the model is converted compiled by CoreML, this gives hints to + Core ML about which node is more likely to be hit on evaluation, + allowing for additional optimizations. The values can be on any scale, + with the values between child nodes being compared relative to each + other. + + missing_value_tracks_true_child: bool [optional] + If the training data contains NaN values or missing values, then this + flag determines which direction a NaN value traverses. + + """ + spec_node = self.tree_parameters.nodes.add() + spec_node.treeId = tree_id + spec_node.nodeId = node_id + spec_node.branchFeatureIndex = int(feature_index) + spec_node.branchFeatureValue = feature_value + spec_node.trueChildNodeId = true_child_id + spec_node.falseChildNodeId = false_child_id + spec_node.nodeBehavior = _TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value( + branch_mode + ) + + if relative_hit_rate is not None: + spec_node.relativeHitRate = relative_hit_rate + spec_node.missingValueTracksTrueChild = missing_value_tracks_true_child + + def add_leaf_node(self, tree_id, node_id, values, relative_hit_rate=None): + """ + Add a leaf node to the tree ensemble. + + Parameters + ---------- + tree_id: int + ID of the tree to add the node to. + + node_id: int + ID of the node within the tree. + + values: [float | int | list | dict] + Value(s) at the leaf node to add to the prediction when this node is + activated. If the prediction dimension of the tree is 1, then the + value is specified as a float or integer value. + + For multidimensional predictions, the values can be a list of numbers + with length matching the dimension of the predictions or a dictionary + mapping index to value added to that dimension. + + Note that the dimension of any tree must match the dimension given + when :py:meth:`set_default_prediction_value` is called. + + """ + spec_node = self.tree_parameters.nodes.add() + spec_node.treeId = tree_id + spec_node.nodeId = node_id + spec_node.nodeBehavior = _TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value( + "LeafNode" + ) + + if not isinstance(values, _collections.abc.Iterable): + values = [values] + + if relative_hit_rate is not None: + spec_node.relativeHitRate = relative_hit_rate + + if type(values) == dict: + iter = values.items() + else: + iter = enumerate(values) + + for index, value in iter: + ev_info = spec_node.evaluationInfo.add() + ev_info.evaluationIndex = index + ev_info.evaluationValue = float(value) + spec_node.nodeBehavior = _TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value( + "LeafNode" + ) + + +class TreeEnsembleRegressor(TreeEnsembleBase): + """ + Tree Ensemble builder class to construct a Tree Ensemble regression model. + + The TreeEnsembleRegressor class constructs a Tree Ensemble model incrementally + using methods to add branch and leaf nodes specifying the behavior of the model. + + Examples + -------- + + In the following example, the code saves the model to disk, which is a + recommended practice but not required. + + .. sourcecode:: python + + >>> # Required inputs + >>> import coremltools + >>> from coremltools.models import datatypes + >>> from coremltools.models.tree_ensemble import TreeEnsembleRegressor + >>> import numpy as np + + >>> # Define input features + >>> input_features = [("a", datatypes.Array(3)), ("b", (datatypes.Double()))] + + >>> # Define output_features + >>> output_features = [("predicted_values", datatypes.Double())] + + >>> tm = TreeEnsembleRegressor(features = input_features, target = output_features) + + >>> # Split on a[2] <= 3 + >>> tm.add_branch_node(0, 0, 2, 3, "BranchOnValueLessThanEqual", 1, 2) + + >>> # Add leaf to the true branch of node 0 that subtracts 1. + >>> tm.add_leaf_node(0, 1, -1) + + >>> # Add split on b == 0 to the false branch of node 0, which is index 3 + >>> tm.add_branch_node(0, 2, 3, 0, "BranchOnValueEqual", 3, 4) + + >>> # Add leaf to the true branch of node 2 that adds 1 to the result. + >>> tm.add_leaf_node(0, 3, 1) + + >>> # Add leaf to the false branch of node 2 that subtracts 1 from the result. + >>> tm.add_leaf_node(0, 4, -1) + + >>> tm.set_default_prediction_value([0, 0]) + + >>> # save the model to a .mlmodel file + >>> model_path = './tree.mlmodel' + >>> coremltools.models.utils.save_spec(tm.spec, model_path) + + >>> # load the .mlmodel + >>> mlmodel = coremltools.models.MLModel(model_path) + + >>> # make predictions + >>> test_input = { + >>> 'a': np.array([0, 1, 2]).astype(np.float32), + >>> "b": 3.0, + >>> } + >>> predictions = mlmodel.predict(test_input) + + """ + + def __init__(self, features, target): + """ + Create a Tree Ensemble regression model that takes one or more input + features and maps them to an output feature. + + Parameters + ---------- + + features: [list of features] + Name(s) of the input features, given as a list of ``('name', datatype)`` + tuples. The features are one of ``models.datatypes.Int64``, + ``datatypes.Double``, or ``models.datatypes.Array``. + Feature indices in the nodes are counted sequentially from 0 through + the features. + + target: (default = None) + Name of the target feature predicted. + """ + super().__init__() + spec = self.spec + spec = set_regressor_interface_params(spec, features, target) + self.tree_spec = spec.treeEnsembleRegressor + self.tree_parameters = self.tree_spec.treeEnsemble + + +class TreeEnsembleClassifier(TreeEnsembleBase): + """ + Tree Ensemble builder class to construct a Tree Ensemble classification model. + + The TreeEnsembleClassifier class constructs a Tree Ensemble model incrementally + using methods to add branch and leaf nodes specifying the behavior of the model. + + + Examples + -------- + + In the following example, the code saves the model to disk, which is a + recommended practice but not required. + + .. sourcecode:: python + + >>> input_features = [("a", datatypes.Array(3)), ("b", datatypes.Double())] + + >>> tm = TreeEnsembleClassifier(features = input_features, class_labels = [0, 1], + output_features = "predicted_class") + + >>> # Split on a[2] <= 3 + >>> tm.add_branch_node(0, 0, 2, 3, "BranchOnValueLessThanEqual", 1, 2) + + >>> # Add leaf to the true branch of node 0 that subtracts 1. + >>> tm.add_leaf_node(0, 1, -1) + + >>> # Add split on b == 0 to the false branch of node 0. + >>> tm.add_branch_node(0, 2, 3, 0, "BranchOnValueEqual", 3, 4) + + >>> # Add leaf to the true branch of node 2 that adds 1 to the result. + >>> tm.add_leaf_node(0, 3, 1) + + >>> # Add leaf to the false branch of node 2 that subtracts 1 from the result. + >>> tm.add_leaf_node(0, 4, -1) + + >>> # Put in a softmax transform to translate these into probabilities. + >>> tm.set_post_evaluation_transform("Classification_SoftMax") + + >>> tm.set_default_prediction_value([0, 0]) + + >>> # save the model to a .mlmodel file + >>> model_path = './tree.mlmodel' + >>> coremltools.models.utils.save_spec(tm.spec, model_path) + + >>> # load the .mlmodel + >>> mlmodel = coremltools.models.MLModel(model_path) + + >>> # make predictions + >>> test_input = { + >>> 'a': np.array([0, 1, 2]).astype(np.float32), + >>> "b": 3.0, + >>> } + >>> predictions = mlmodel.predict(test_input) + + """ + + def __init__(self, features, class_labels, output_features): + """ + Create a tree ensemble classifier model. + + Parameters + ---------- + features: [list of features] + Name(s) of the input features, given as a list of ``('name', datatype)`` + tuples. The features are one of ``models.datatypes.Int64``, + ``datatypes.Double``, or ``models.datatypes.Array``. + Feature indices in the nodes are counted sequentially from 0 through + the features. + + class_labels: [list] + A list of string or integer class labels to use in making predictions. + The length of this must match the dimension of the tree model. + + output_features: [list] + A string or a list of two strings specifying the names of the two + output features, the first being a class label corresponding + to the class with the highest predicted score, and the second being + a dictionary mapping each class to its score. If ``output_features`` + is a string, it specifies the predicted class label and the class + scores is set to the default value of ``"classProbability"``. + """ + super().__init__() + spec = self.spec + spec = set_classifier_interface_params( + spec, features, class_labels, "treeEnsembleClassifier", output_features + ) + self.tree_spec = spec.treeEnsembleClassifier + self.tree_parameters = self.tree_spec.treeEnsemble diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/utils.py new file mode 100644 index 00000000..fe6c30df --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/utils.py @@ -0,0 +1,1097 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Utilities for the entire package. +""" + +from collections.abc import Iterable as _Iterable +from functools import lru_cache as _lru_cache +import math as _math +import os as _os +import shutil as _shutil +import subprocess as _subprocess +import sys as _sys +import tempfile as _tempfile +from typing import Optional as _Optional +import warnings as _warnings + +import numpy as _np + +import coremltools as _ct +from coremltools import ComputeUnit as _ComputeUnit +from coremltools.converters.mil.mil.passes.defs.preprocess import NameSanitizer as _NameSanitizer +from coremltools.proto import Model_pb2 as _Model_pb2 +import coremltools.proto.MIL_pb2 as _mil_proto + +from .._deps import _HAS_SCIPY + +_MLMODEL_EXTENSION = ".mlmodel" +_MLPACKAGE_EXTENSION = ".mlpackage" +_MODEL_FILE_NAME = 'model.mlmodel' +_WEIGHTS_FILE_NAME = 'weight.bin' +_WEIGHTS_DIR_NAME = 'weights' +_MLPACKAGE_AUTHOR_NAME = "com.apple.CoreML" + +try: + from ..libmodelpackage import ModelPackage as _ModelPackage +except: + _ModelPackage = None + +if _HAS_SCIPY: + import scipy.sparse as _sp + + +def _to_unicode(x): + if isinstance(x, bytes): + return x.decode() + else: + return x + + +def _remove_invalid_keys(input_dict, model): + # make sure that input_dict does not contain an input name, which + # is not present in the list of model inputs + input_dict_keys = list(input_dict.keys()) + model_input_names = set([inp.name for inp in model._spec.description.input]) + for k in input_dict_keys: + if k not in model_input_names: + del input_dict[k] + + +def _create_mlpackage( + proto_spec: _Model_pb2, + weights_dir: _Optional[str] = None, + package_path: _Optional[str] = None, +) -> str: + """ + Args: + proto_spec: The proto spec of the model. + weights_dir: Copy weights from this path to the mlpackage. + package_path: Place the created mlpackage at this path. Error out if this path is a non-empty directory. + + Returns: + path to the mlpackage + """ + if package_path is None: + package_path = _tempfile.mkdtemp(suffix=_MLPACKAGE_EXTENSION) + if _os.path.exists(package_path): + if _os.listdir(package_path): + raise FileExistsError( + f"The package_path is invalid because it's a non-empty directory: {package_path}" + ) + # If package_path is an empty dir, the ModelPackage load will error out with `manifest.json not found` issue. + _shutil.rmtree(package_path) + + _, ext = _os.path.splitext(package_path) + if ext != _MLPACKAGE_EXTENSION: + raise Exception( + f"For an ML Package, extension must be {_MLPACKAGE_EXTENSION} (not {ext})" + ) + + package = _ModelPackage(package_path) + + # Save proto to disk as the root model file, and copy into the model package. + spec_file = _tempfile.NamedTemporaryFile(suffix=_MLMODEL_EXTENSION) + spec_file.write(proto_spec.SerializeToString()) + spec_file.flush() + package.setRootModel(spec_file.name, _MODEL_FILE_NAME, _MLPACKAGE_AUTHOR_NAME, + "CoreML Model Specification") + # Spec file is auto cleaned after close, which is fine because it is already added to the model package. + spec_file.close() + + # Add weights bundle into the model package. + if weights_dir is not None: + package.addItem( + weights_dir, + _WEIGHTS_DIR_NAME, + _MLPACKAGE_AUTHOR_NAME, + "CoreML Model Weights", + ) + + return package_path + + +def save_spec(spec, filename, auto_set_specification_version=False, weights_dir=None): + """ + Save a protobuf model specification to file. + + Parameters + ---------- + spec: Model_pb + Protobuf representation of the model + + filename: str + File path where the spec gets saved. + + auto_set_specification_version: bool + If true, will always try to set specification version automatically. + + weights_dir: str + Path to the directory containing the weigths.bin file. This is required + when the spec if of model type mlprogram. If the mlprogram does not contain + any weights, this path can be an empty directory. + + Examples + -------- + .. sourcecode:: python + + coremltools.utils.save_spec(spec, "HousePricer.mlmodel") + coremltools.utils.save_spec(spec, "HousePricer.mlpackage") + coremltools.utils.save_spec( + spec, "mlprogram_model.mlpackage", weights_dir="/path/to/weights/directory" + ) + + See Also + -------- + load_spec + """ + name, ext = _os.path.splitext(filename) + + is_package = False + + if not ext: + filename = "{}{}".format(filename, _MLMODEL_EXTENSION) + elif ext == _MLPACKAGE_EXTENSION: + is_package = True + elif ext == _MLMODEL_EXTENSION: + is_package = False + else: + raise Exception("Extension must be {} or {} (not {})".format(_MLMODEL_EXTENSION, _MLPACKAGE_EXTENSION, ext)) + + if auto_set_specification_version: + try: + # always try to downgrade the specification version to the + # minimal version that supports everything in this mlmodel + from ..libcoremlpython import _MLModelProxy + + spec = _MLModelProxy.auto_set_specification_version(spec) + except Exception as e: + print(e) + _warnings.warn( + "Failed to automatic set specification version for this model.", + RuntimeWarning, + ) + + if is_package: + if _ModelPackage is None: + raise Exception( + "Unable to load libmodelpackage. Cannot save spec" + ) + if spec.WhichOneof('Type') == "mlProgram" and weights_dir is None: + raise Exception('spec of type mlProgram cannot be saved without the' + ' weights file. Please provide the path to the weights file as well, ' + 'using the \'weights_dir\' argument.') + _create_mlpackage(spec, weights_dir=weights_dir, package_path=filename) + else: + with open(filename, "wb") as f: + f.write(spec.SerializeToString()) + + +def load_spec(model_path: str) -> _Model_pb2: + """ + Load a protobuf model specification from file (mlmodel) or directory (mlpackage). + + Parameters + ---------- + model_path: Path to the model from which the protobuf spec is loaded. + + Returns + ------- + model_spec: Model_pb + Protobuf representation of the model + + Examples + -------- + .. sourcecode:: python + + spec = coremltools.utils.load_spec("HousePricer.mlmodel") + spec = coremltools.utils.load_spec("HousePricer.mlpackage") + + See Also + -------- + save_spec + """ + if _os.path.isdir(model_path): + if _ModelPackage is None: + raise Exception("Unable to load libmodelpackage. Cannot make save spec.") + specfile = _ModelPackage(model_path).getRootModel().path() + else: + specfile = model_path + + spec = _Model_pb2.Model() + with open(specfile, "rb") as f: + spec.ParseFromString(f.read()) + return spec + + +def _get_nn_layers(spec): + """ + Returns a list of neural network layers if the model contains any. + + Parameters + ---------- + spec: Model_pb + A model protobuf specification. + + Returns + ------- + [NN layer] + list of all layers (including layers from elements of a pipeline + + """ + + layers = [] + if spec.WhichOneof("Type") == "pipeline": + layers = [] + for model_spec in spec.pipeline.models: + if not layers: + return _get_nn_layers(model_spec) + else: + layers.extend(_get_nn_layers(model_spec)) + + elif spec.WhichOneof("Type") in ["pipelineClassifier", "pipelineRegressor"]: + layers = [] + for model_spec in spec.pipeline.models: + if not layers: + return _get_nn_layers(model_spec) + else: + layers.extend(_get_nn_layers(model_spec)) + + elif spec.neuralNetwork.layers: + layers = spec.neuralNetwork.layers + elif spec.neuralNetworkClassifier.layers: + layers = spec.neuralNetworkClassifier.layers + elif spec.neuralNetworkRegressor.layers: + layers = spec.neuralNetworkRegressor.layers + + return layers + + +def _fp32_to_reversed_fp16_byte_array(fp32_arr): + raw_fp16 = _np.float16(fp32_arr) + x = "" + for fp16 in raw_fp16: + all_bytes = _np.fromstring(fp16.tobytes(), dtype="int8") + x += all_bytes[1].tobytes() + x += all_bytes[0].tobytes() + return x + + +def _fp32_to_fp16_byte_array(fp32_arr): + if _np.amax(fp32_arr) >= 65504 or _np.amin(fp32_arr) <= -65504: + raise Exception( + "Model cannot be converted as " + "it has weights that cannot be represented in " + "half precision.\n" + ) + + if _sys.byteorder == "little": + return _np.float16(fp32_arr).tobytes() + else: + return _fp32_to_reversed_fp16_byte_array(fp32_arr) + + +def _wp_to_fp16wp(wp): + assert wp + # If the float32 field is empty do nothing. + if len(wp.floatValue) == 0: + return + wp.float16Value = _fp32_to_fp16_byte_array(wp.floatValue) + del wp.floatValue[:] + +def _convert_neural_network_spec_weights_to_fp16(fp_spec): + from .neural_network.quantization_utils import ( + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, _quantize_spec_weights) + + qspec = _quantize_spec_weights(fp_spec, 16, _QUANTIZATION_MODE_LINEAR_QUANTIZATION) + return qspec + + +def _convert_neural_network_weights_to_fp16(full_precision_model): + """ + Utility function to convert a full precision (float) MLModel to a + half precision MLModel (float16). + + Parameters + ---------- + full_precision_model: MLModel + Model which will be converted to half precision. Currently conversion + for only neural network models is supported. If a pipeline model is + passed in then all embedded neural network models embedded within + will be converted. + + Returns + ------- + model: MLModel + The converted half precision MLModel + + """ + spec = full_precision_model.get_spec() + return _get_model(_convert_neural_network_spec_weights_to_fp16(spec)) + + +def _get_model(spec, compute_units=_ComputeUnit.ALL): + """ + Utility to get the model and the data. + """ + from . import MLModel + + if isinstance(spec, MLModel): + return spec + else: + return MLModel(spec, compute_units=compute_units) + + +def evaluate_regressor(model, data, target="target", verbose=False): + """ + Evaluate a CoreML regression model and compare against predictions + from the original framework (for testing correctness of conversion). + + Parameters + ---------- + model: MLModel or str + A loaded MLModel or a path to a saved MLModel + + data: Dataframe + Test data on which to evaluate the models + + target: str + Name of the column in the dataframe to be compared against the prediction + + verbose: bool + Set to true for a more verbose output. + + See Also + -------- + evaluate_classifier + + Examples + -------- + .. sourcecode:: python + + metrics = coremltools.utils.evaluate_regressor( + spec, "data_and_predictions.csv", "target" + ) + print(metrics) + {"samples": 10, "rmse": 0.0, max_error: 0.0} + """ + model = _get_model(model) + + if verbose: + print("") + print("Other Framework\t\tPredicted\t\tDelta") + + max_error = 0 + error_squared = 0 + + for _, row in data.iterrows(): + input_dict = dict(row) + _remove_invalid_keys(input_dict, model) + predicted = model.predict(input_dict)[_to_unicode(target)] + other_framework = row[target] + delta = predicted - other_framework + + if verbose: + print("{}\t\t\t\t{}\t\t\t{:0.4f}".format(other_framework, predicted, delta)) + + max_error = max(abs(delta), max_error) + error_squared = error_squared + (delta * delta) + + ret = { + "samples": len(data), + "rmse": _math.sqrt(error_squared / len(data)), + "max_error": max_error, + } + + if verbose: + print("results: {}".format(ret)) + return ret + + +def evaluate_classifier(model, data, target="target", verbose=False): + """ + Evaluate a Core ML classifier model and compare against predictions + from the original framework (for testing correctness of conversion). + Use this evaluation for models that don't deal with probabilities. + + Parameters + ---------- + filename: list of str or list of MLModel + File from where to load the model from (OR) a loaded + version of the MLModel. + + data: list of str or list of Dataframe + Test data on which to evaluate the models (dataframe, + or path to a csv file). + + target: str + Column to interpret as the target column + + verbose: bool + Set to true for a more verbose output. + + See Also + -------- + evaluate_regressor, evaluate_classifier_with_probabilities + + Examples + -------- + .. sourcecode:: python + + metrics = coremltools.utils.evaluate_classifier( + spec, "data_and_predictions.csv", "target" + ) + print(metrics) + {"samples": 10, num_errors: 0} + """ + model = _get_model(model) + if verbose: + print("") + print("Other Framework\t\tPredicted") + + num_errors = 0 + + for _, row in data.iterrows(): + input_dict = dict(row) + _remove_invalid_keys(input_dict, model) + predicted = model.predict(input_dict)[_to_unicode(target)] + other_framework = row[target] + if predicted != other_framework: + num_errors += 1 + + if verbose: + print("{}\t\t\t\t{}".format(other_framework, predicted)) + + ret = {"num_samples": len(data), "num_errors": num_errors} + + if verbose: + print("results: {}".format(ret)) + + return ret + + +def evaluate_classifier_with_probabilities( + model, data, probabilities="probabilities", verbose=False +): + """ + Evaluate a classifier specification for testing. + + Parameters + ---------- + filename: [str | Model] + File from where to load the model from (OR) a loaded + version of the MLModel. + + data: [str | Dataframe] + Test data on which to evaluate the models (dataframe, + or path to a csv file). + + probabilities: str + Column to interpret as the probabilities column + + verbose: bool + Verbosity levels of the predictions. + """ + + model = _get_model(model) + if verbose: + print("") + print("Other Framework\t\tPredicted") + + max_probability_error, num_key_mismatch = 0, 0 + + for _, row in data.iterrows(): + input_dict = {k: v for k, v in dict(row).items() if k != probabilities} + _remove_invalid_keys(input_dict, model) + predicted_values = model.predict(input_dict)[_to_unicode(probabilities)] + other_values = row[probabilities] + + if set(predicted_values.keys()) != set(other_values.keys()): + if verbose: + print( + "Different classes: ", + str(predicted_values.keys()), + str(other_values.keys()), + ) + num_key_mismatch += 1 + continue + + for cur_class, cur_predicted_class_values in predicted_values.items(): + delta = cur_predicted_class_values - other_values[cur_class] + if verbose: + print(delta, cur_predicted_class_values, other_values[cur_class]) + + max_probability_error = max(abs(delta), max_probability_error) + + if verbose: + print("") + + ret = { + "num_samples": len(data), + "max_probability_error": max_probability_error, + "num_key_mismatch": num_key_mismatch, + } + + if verbose: + print("results: {}".format(ret)) + + return ret + + +def rename_feature( + spec, current_name, new_name, rename_inputs=True, rename_outputs=True +): + """ + Rename a feature in the specification. + + Parameters + ---------- + spec: Model_pb + The specification containing the feature to rename. + + current_name: str + Current name of the feature. If this feature doesn't exist, the rename + is a no-op. + + new_name: str + New name of the feature. + + rename_inputs: bool + Search for `current_name` only in the input features (i.e ignore output + features) + + rename_outputs: bool + Search for `current_name` only in the output features (i.e ignore input + features) + + Examples + -------- + .. sourcecode:: python + + # In-place rename of spec + model = MLModel("model.mlmodel") + spec = model.get_spec() + coremltools.utils.rename_feature(spec, "old_feature", "new_feature_name") + # re-initialize model + model = MLModel(spec) + model.save("model.mlmodel") + + # Rename a spec when the model is an mlprogram, in that case, weights are stored outside of the spec + model = coremltools.convert(torch_model, convert_to="mlprogram") + spec = model.get_spec() + # print info about inputs and outputs + print(spec.description) + coremltools.utils.rename_feature(spec, "old_feature", "new_feature_name") + # re-initialize model + model = MLModel(spec, weights_dir=model.weights_dir) + model.save("model.mlpackage") + """ + + if not rename_inputs and not rename_outputs: + return + + changed_input = False + changed_output = False + + if rename_inputs: + for input in spec.description.input: + if input.name == current_name: + input.name = new_name + changed_input = True + + if rename_outputs: + for output in spec.description.output: + if output.name == current_name: + output.name = new_name + changed_output = True + + if spec.description.predictedFeatureName == current_name: + spec.description.predictedFeatureName = new_name + + if spec.description.predictedProbabilitiesName == current_name: + spec.description.predictedProbabilitiesName = new_name + + if not changed_input and not changed_output: + return + + # Rename internally in NN model + nn = None + for nn_type in [ + "neuralNetwork", + "neuralNetworkClassifier", + "neuralNetworkRegressor", + ]: + if spec.HasField(nn_type): + nn = getattr(spec, nn_type) + + if nn is not None: + for layer in nn.layers: + if rename_inputs: + for index, name in enumerate(layer.input): + if name == current_name: + layer.input[index] = new_name + if rename_outputs: + for index, name in enumerate(layer.output): + if name == current_name: + layer.output[index] = new_name + + if rename_inputs: + for preprocess_params in nn.preprocessing: + if preprocess_params.featureName == current_name: + preprocess_params.featureName = new_name + + if spec.HasField("neuralNetworkClassifier"): + if nn.labelProbabilityLayerName == current_name: + nn.labelProbabilityLayerName = new_name + + # Rename internally for feature vectorizer + if spec.HasField("featureVectorizer") and rename_inputs: + for input in spec.featureVectorizer.inputList: + if input.inputColumn == current_name: + input.inputColumn = new_name + changed_input = True + + # Rename for pipeline models + pipeline = None + if spec.HasField("pipeline"): + pipeline = spec.pipeline + elif spec.HasField("pipelineClassifier"): + pipeline = spec.pipelineClassifier.pipeline + elif spec.HasField("pipelineRegressor"): + pipeline = spec.pipelineRegressor.pipeline + + if pipeline is not None: + for index, model in enumerate(pipeline.models): + rename_feature( + model, + current_name, + new_name, + rename_inputs or (index != 0), + rename_outputs or (index < len(spec.pipeline.models)), + ) + + # Rename for mlProgram + if spec.HasField("mlProgram"): + new_name_sanitized = _NameSanitizer().sanitize_name(new_name) + if new_name != new_name_sanitized: + raise ValueError("Input/output names for ML Program must be of the format [a-zA-Z_][a-zA-Z0-9_]*. " + "That is, it must start with a letter and only contain numerals, underscore or letters. " + "Provided feature name, \"{}\" does not satisfy these requirements.".format(new_name)) + mil = spec.mlProgram + for function in mil.functions.values(): + for name_value_type in function.inputs: + if name_value_type.name == current_name: + name_value_type.name = new_name + for block in function.block_specializations.values(): + for i, out_name in enumerate(block.outputs): + if out_name == current_name: + block.outputs[i] = new_name + for op in block.operations: + for argument in op.inputs.values(): + for binding in argument.arguments: + if binding.HasField("name"): + if binding.name == current_name: + binding.name = new_name + for name_value_type in op.outputs: + if name_value_type.name == current_name: + name_value_type.name = new_name + + +def _sanitize_value(x): + """ + Performs cleaning steps on the data so various type comparisons can + be performed correctly. + """ + if isinstance(x, (str, int, float,)): + return x + elif _HAS_SCIPY and _sp.issparse(x): + return x.todense() + elif isinstance(x, _np.ndarray): + return x + elif isinstance(x, tuple): + return (_sanitize_value(v) for v in x) + elif isinstance(x, list): + return [_sanitize_value(v) for v in x] + elif isinstance(x, dict): + return dict((_sanitize_value(k), _sanitize_value(v)) for k, v in x.items()) + else: + assert False, str(x) + + +def _element_equal(x, y): + """ + Performs a robust equality test between elements. + """ + if isinstance(x, _np.ndarray) or isinstance(y, _np.ndarray): + try: + return (abs(_np.asarray(x) - _np.asarray(y)) < 1e-5).all() + except: + return False + elif isinstance(x, dict): + return ( + isinstance(y, dict) + and _element_equal(x.keys(), y.keys()) + and all(_element_equal(x[k], y[k]) for k in x.keys()) + ) + elif isinstance(x, float): + return abs(x - y) < 1e-5 * (abs(x) + abs(y)) + elif isinstance(x, (list, tuple)): + return x == y + else: + return bool(x == y) + + +def evaluate_transformer(model, input_data, reference_output, verbose=False): + """ + Evaluate a transformer specification for testing. + + Parameters + ---------- + spec: list of str or list of MLModel + File from where to load the Model from (OR) a loaded + version of MLModel. + + input_data: list of dict + Test data on which to evaluate the models. + + reference_output: list of dict + Expected results for the model. + + verbose: bool + Verbosity levels of the predictions. + + Examples + -------- + .. sourcecode:: python + + input_data = [{"input_1": 1, "input_2": 2}, {"input_1": 3, "input_2": 3}] + expected_output = [{"input_1": 2.5, "input_2": 2.0}, {"input_1": 1.3, "input_2": 2.3}] + metrics = coremltools.utils.evaluate_transformer( + scaler_spec, input_data, expected_output + ) + + See Also + -------- + evaluate_regressor, evaluate_classifier + """ + model = _get_model(model) + if verbose: + print(model) + print("") + print("Other Framework\t\tPredicted") + + num_errors = 0 + for index, row in enumerate(input_data): + assert isinstance(row, dict) + sanitized_row = _sanitize_value(row) + ref_data = _sanitize_value(reference_output[index]) + if verbose: + print("Input:\n\t", str(row)) + print("Correct output:\n\t", str(ref_data)) + + predicted = _sanitize_value(model.predict(sanitized_row)) + + assert isinstance(ref_data, dict) + assert isinstance(predicted, dict) + + predicted_trimmed = dict((k, predicted[k]) for k in ref_data.keys()) + + if verbose: + print("Predicted:\n\t", str(predicted_trimmed)) + + if not _element_equal(predicted_trimmed, ref_data): + num_errors += 1 + + ret = {"num_samples": len(input_data), "num_errors": num_errors} + + if verbose: + print("results: {}".format(ret)) + return ret + + +def _has_custom_layer(spec): + """ + + Returns true if the given protobuf specification has a custom layer, and false otherwise. + + Parameters + ---------- + spec: mlmodel spec + + Returns + ------- + + True if the protobuf specification contains a neural network with a custom layer, False otherwise. + + """ + + layers = _get_nn_layers(spec) + for layer in layers: + if layer.WhichOneof("layer") == "custom": + return True + + return False + + +def _get_custom_layer_names(spec): + """ + + Returns a list of className fields which appear in the given protobuf spec + + Parameters + ---------- + spec: mlmodel spec + + Returns + ------- + + set(str) A set of unique className fields of custom layers that appear in the model. + + """ + layers = _get_nn_layers(spec) + layers_out = set() + for layer in layers: + if layer.WhichOneof("layer") == "custom": + layers_out.add(layer.custom.className) + + return layers_out + + +def _get_custom_layers(spec): + """ + + Returns a list of all neural network custom layers in the spec. + + Parameters + ---------- + spec: mlmodel spec + + Returns + ------- + + [NN layer] A list of custom layer implementations + """ + layers = _get_nn_layers(spec) + layers_out = [] + for layer in layers: + if layer.WhichOneof("layer") == "custom": + layers_out.append(layer) + + return layers_out + + +def _replace_custom_layer_name(spec, oldname, newname): + """ + + Substitutes newname for oldname in the className field of custom layers. If there are no custom layers, or no + layers with className=oldname, then the spec is unchanged. + + Parameters + ---------- + spec: mlmodel spec + + oldname: str The custom layer className to be replaced. + + newname: str The new className value to replace oldname + + Returns + ------- + + An mlmodel spec. + + """ + layers = _get_custom_layers(spec) + for layer in layers: + if layer.custom.className == oldname: + layer.custom.className = newname + + +def _is_macos(): + """Returns True if current platform is MacOS, False otherwise.""" + return _sys.platform == "darwin" + + +@_lru_cache() +def _macos_version(): + """ + Returns macOS version as a tuple of integers, making it easy to do proper + version comparisons. On non-Macs, it returns an empty tuple. + """ + if _is_macos(): + try: + ver_str = _subprocess.run(["sw_vers", "-productVersion"], stdout=_subprocess.PIPE).stdout.decode('utf-8').strip('\n') + return tuple([int(v) for v in ver_str.split(".")]) + except: + raise Exception("Unable to detemine the macOS version") + return () + + +def _python_version(): + """ + Return python version as a tuple of integers + """ + version = _sys.version.split(" ")[0] + version = list(map(int, list(version.split(".")))) + return tuple(version) + + +def _get_feature(spec, feature_name): + for input_feature in spec.description.input: + if input_feature.name == feature_name: + return input_feature + + for output_feature in spec.description.output: + if output_feature.name == feature_name: + return output_feature + + raise Exception("Feature with name {} does not exist".format(feature_name)) + + +def _get_input_names(spec): + """ + Returns a list of the names of the inputs to this model. + :param spec: The model protobuf specification + :return: list of str A list of input feature names + """ + retval = [feature.name for feature in spec.description.input] + return retval + + +def convert_double_to_float_multiarray_type(spec): + """ + Convert all double multiarrays feature descriptions (input, output, training input) + to float multiarrays + + Parameters + ---------- + spec: Model_pb + The specification containing the multiarrays types to convert + + Examples + -------- + .. sourcecode:: python + + # In-place convert multiarray type of spec + spec = mlmodel.get_spec() + coremltools.utils.convert_double_to_float_multiarray_type(spec) + model = coremltools.models.MLModel(spec) + """ + + def _convert_to_float(feature): + if feature.type.HasField("multiArrayType"): + if ( + feature.type.multiArrayType.dataType + == _Model_pb2.ArrayFeatureType.DOUBLE + ): + feature.type.multiArrayType.dataType = ( + _Model_pb2.ArrayFeatureType.FLOAT32 + ) + + for feature in spec.description.input: + _convert_to_float(feature) + + for feature in spec.description.output: + _convert_to_float(feature) + + for feature in spec.description.trainingInput: + _convert_to_float(feature) + + if spec.WhichOneof("Type") == "pipeline": + for model_spec in spec.pipeline.models: + convert_double_to_float_multiarray_type(model_spec) + + +def make_pipeline(*models): + """ + Makes a pipeline with the given models. + + Parameters + ---------- + *models - two or more instances of ct.models.MLModel + + Returns + ------- + ct.models.MLModel + + Examples + -------- + my_model1 = ct.models.MLModel('/tmp/m1.mlpackage') + my_model2 = ct.models.MLModel('/tmp/m2.mlmodel') + + my_pipeline_model = ct.utils.make_pipeline(my_model1, my_model2) + """ + + def updateBlobFileName(proto_message, new_path): + if type(proto_message) == _mil_proto.Value: + # Value protobuf message. This is what might need to be updated. + if proto_message.WhichOneof('value') == 'blobFileValue': + assert proto_message.blobFileValue.fileName == "@model_path/weights/weight.bin" + proto_message.blobFileValue.fileName = new_path + elif hasattr(proto_message, 'ListFields'): + # Normal protobuf message + for f in proto_message.ListFields(): + updateBlobFileName(f[1], new_path) + elif hasattr(proto_message, 'values'): + # Protobuf map + for v in proto_message.values(): + updateBlobFileName(v, new_path) + elif isinstance(proto_message, _Iterable) and not isinstance(proto_message, str): + # Repeated protobuf message + for e in proto_message: + updateBlobFileName(e, new_path) + + + assert len(models) > 1 + input_specs = list(map(lambda m: m.get_spec(), models)) + + pipeline_spec = _ct.proto.Model_pb2.Model() + pipeline_spec.specificationVersion = max( + map(lambda spec: spec.specificationVersion, input_specs) + ) + + # Set pipeline input + pipeline_spec.description.input.MergeFrom( + input_specs[0].description.input + ) + + # Set pipeline output + pipeline_spec.description.output.MergeFrom( + input_specs[-1].description.output + ) + + # Map input shapes to output shapes + var_name_to_type = {} + for i in range(len(input_specs) - 1): + for j in input_specs[i + 1].description.input: + var_name_to_type[j.name] = j.type + + for j in input_specs[i].description.output: + # If shape is already present, don't override it + if j.type.WhichOneof('Type') == 'multiArrayType' and len(j.type.multiArrayType.shape) != 0: + continue + + if j.name in var_name_to_type: + j.type.CopyFrom(var_name_to_type[j.name]) + + # Update each model's spec to have a unique weight filename + for i, cur_spec in enumerate(input_specs): + if cur_spec.WhichOneof("Type") == "mlProgram": + new_file_path = f"@model_path/weights/{i}-weight.bin" + updateBlobFileName(cur_spec.mlProgram, new_file_path) + pipeline_spec.pipeline.models.append(cur_spec) + + mlpackage_path = _create_mlpackage(pipeline_spec) + dst = mlpackage_path + '/Data/' + _MLPACKAGE_AUTHOR_NAME + '/' + _WEIGHTS_DIR_NAME + _os.mkdir(dst) + + # Copy and rename each model's weight file + for i, cur_model in enumerate(models): + if cur_model.weights_dir is not None: + weight_file_path = cur_model.weights_dir + "/" + _WEIGHTS_FILE_NAME + if _os.path.exists(weight_file_path): + _shutil.copyfile(weight_file_path, dst + f"/{i}-weight.bin") + + return _ct.models.MLModel(pipeline_spec, weights_dir=dst) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/ArrayFeatureExtractor_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/ArrayFeatureExtractor_pb2.py new file mode 100644 index 00000000..a94fcc18 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/ArrayFeatureExtractor_pb2.py @@ -0,0 +1,71 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: ArrayFeatureExtractor.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='ArrayFeatureExtractor.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x1b\x41rrayFeatureExtractor.proto\x12\x14\x43oreML.Specification\"-\n\x15\x41rrayFeatureExtractor\x12\x14\n\x0c\x65xtractIndex\x18\x01 \x03(\x04\x42\x02H\x03\x62\x06proto3') +) + + + + +_ARRAYFEATUREEXTRACTOR = _descriptor.Descriptor( + name='ArrayFeatureExtractor', + full_name='CoreML.Specification.ArrayFeatureExtractor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='extractIndex', full_name='CoreML.Specification.ArrayFeatureExtractor.extractIndex', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=53, + serialized_end=98, +) + +DESCRIPTOR.message_types_by_name['ArrayFeatureExtractor'] = _ARRAYFEATUREEXTRACTOR +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +ArrayFeatureExtractor = _reflection.GeneratedProtocolMessageType('ArrayFeatureExtractor', (_message.Message,), dict( + DESCRIPTOR = _ARRAYFEATUREEXTRACTOR, + __module__ = 'ArrayFeatureExtractor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureExtractor) + )) +_sym_db.RegisterMessage(ArrayFeatureExtractor) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/AudioFeaturePrint_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/AudioFeaturePrint_pb2.py new file mode 100644 index 00000000..b48e078e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/AudioFeaturePrint_pb2.py @@ -0,0 +1,142 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: AudioFeaturePrint.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='AudioFeaturePrint.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x17\x41udioFeaturePrint.proto\x12!CoreML.Specification.CoreMLModels\"\x9d\x02\n\x11\x41udioFeaturePrint\x12K\n\x05sound\x18\x14 \x01(\x0b\x32:.CoreML.Specification.CoreMLModels.AudioFeaturePrint.SoundH\x00\x1a\xa1\x01\n\x05Sound\x12X\n\x07version\x18\x01 \x01(\x0e\x32G.CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.SoundVersion\">\n\x0cSoundVersion\x12\x19\n\x15SOUND_VERSION_INVALID\x10\x00\x12\x13\n\x0fSOUND_VERSION_1\x10\x01\x42\x17\n\x15\x41udioFeaturePrintTypeB\x02H\x03\x62\x06proto3') +) + + + +_AUDIOFEATUREPRINT_SOUND_SOUNDVERSION = _descriptor.EnumDescriptor( + name='SoundVersion', + full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.SoundVersion', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SOUND_VERSION_INVALID', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SOUND_VERSION_1', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=261, + serialized_end=323, +) +_sym_db.RegisterEnumDescriptor(_AUDIOFEATUREPRINT_SOUND_SOUNDVERSION) + + +_AUDIOFEATUREPRINT_SOUND = _descriptor.Descriptor( + name='Sound', + full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.version', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _AUDIOFEATUREPRINT_SOUND_SOUNDVERSION, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=162, + serialized_end=323, +) + +_AUDIOFEATUREPRINT = _descriptor.Descriptor( + name='AudioFeaturePrint', + full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sound', full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint.sound', index=0, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_AUDIOFEATUREPRINT_SOUND, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='AudioFeaturePrintType', full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint.AudioFeaturePrintType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=63, + serialized_end=348, +) + +_AUDIOFEATUREPRINT_SOUND.fields_by_name['version'].enum_type = _AUDIOFEATUREPRINT_SOUND_SOUNDVERSION +_AUDIOFEATUREPRINT_SOUND.containing_type = _AUDIOFEATUREPRINT +_AUDIOFEATUREPRINT_SOUND_SOUNDVERSION.containing_type = _AUDIOFEATUREPRINT_SOUND +_AUDIOFEATUREPRINT.fields_by_name['sound'].message_type = _AUDIOFEATUREPRINT_SOUND +_AUDIOFEATUREPRINT.oneofs_by_name['AudioFeaturePrintType'].fields.append( + _AUDIOFEATUREPRINT.fields_by_name['sound']) +_AUDIOFEATUREPRINT.fields_by_name['sound'].containing_oneof = _AUDIOFEATUREPRINT.oneofs_by_name['AudioFeaturePrintType'] +DESCRIPTOR.message_types_by_name['AudioFeaturePrint'] = _AUDIOFEATUREPRINT +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +AudioFeaturePrint = _reflection.GeneratedProtocolMessageType('AudioFeaturePrint', (_message.Message,), dict( + + Sound = _reflection.GeneratedProtocolMessageType('Sound', (_message.Message,), dict( + DESCRIPTOR = _AUDIOFEATUREPRINT_SOUND, + __module__ = 'AudioFeaturePrint_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound) + )) + , + DESCRIPTOR = _AUDIOFEATUREPRINT, + __module__ = 'AudioFeaturePrint_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.AudioFeaturePrint) + )) +_sym_db.RegisterMessage(AudioFeaturePrint) +_sym_db.RegisterMessage(AudioFeaturePrint.Sound) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/BayesianProbitRegressor_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/BayesianProbitRegressor_pb2.py new file mode 100644 index 00000000..eeaad0a0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/BayesianProbitRegressor_pb2.py @@ -0,0 +1,283 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: BayesianProbitRegressor.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='BayesianProbitRegressor.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x1d\x42\x61yesianProbitRegressor.proto\x12\x14\x43oreML.Specification\"\xa0\x06\n\x17\x42\x61yesianProbitRegressor\x12\x18\n\x10numberOfFeatures\x18\x01 \x01(\r\x12\x44\n\x04\x62ias\x18\x02 \x01(\x0b\x32\x36.CoreML.Specification.BayesianProbitRegressor.Gaussian\x12M\n\x08\x66\x65\x61tures\x18\x03 \x03(\x0b\x32;.CoreML.Specification.BayesianProbitRegressor.FeatureWeight\x12\"\n\x1aregressionInputFeatureName\x18\n \x01(\t\x12 \n\x18optimismInputFeatureName\x18\x0b \x01(\t\x12%\n\x1dsamplingScaleInputFeatureName\x18\x0c \x01(\t\x12*\n\"samplingTruncationInputFeatureName\x18\r \x01(\t\x12\x1d\n\x15meanOutputFeatureName\x18\x14 \x01(\t\x12!\n\x19varianceOutputFeatureName\x18\x15 \x01(\t\x12/\n\'pessimisticProbabilityOutputFeatureName\x18\x16 \x01(\t\x12+\n#sampledProbabilityOutputFeatureName\x18\x17 \x01(\t\x1a+\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x01\x12\x11\n\tprecision\x18\x02 \x01(\x01\x1ay\n\x12\x46\x65\x61tureValueWeight\x12\x14\n\x0c\x66\x65\x61tureValue\x18\x01 \x01(\r\x12M\n\rfeatureWeight\x18\x02 \x01(\x0b\x32\x36.CoreML.Specification.BayesianProbitRegressor.Gaussian\x1au\n\rFeatureWeight\x12\x11\n\tfeatureId\x18\x01 \x01(\r\x12Q\n\x07weights\x18\x02 \x03(\x0b\x32@.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeightB\x02H\x03\x62\x06proto3') +) + + + + +_BAYESIANPROBITREGRESSOR_GAUSSIAN = _descriptor.Descriptor( + name='Gaussian', + full_name='CoreML.Specification.BayesianProbitRegressor.Gaussian', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mean', full_name='CoreML.Specification.BayesianProbitRegressor.Gaussian.mean', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='precision', full_name='CoreML.Specification.BayesianProbitRegressor.Gaussian.precision', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=571, + serialized_end=614, +) + +_BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT = _descriptor.Descriptor( + name='FeatureValueWeight', + full_name='CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='featureValue', full_name='CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight.featureValue', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='featureWeight', full_name='CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight.featureWeight', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=616, + serialized_end=737, +) + +_BAYESIANPROBITREGRESSOR_FEATUREWEIGHT = _descriptor.Descriptor( + name='FeatureWeight', + full_name='CoreML.Specification.BayesianProbitRegressor.FeatureWeight', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='featureId', full_name='CoreML.Specification.BayesianProbitRegressor.FeatureWeight.featureId', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.BayesianProbitRegressor.FeatureWeight.weights', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=739, + serialized_end=856, +) + +_BAYESIANPROBITREGRESSOR = _descriptor.Descriptor( + name='BayesianProbitRegressor', + full_name='CoreML.Specification.BayesianProbitRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='numberOfFeatures', full_name='CoreML.Specification.BayesianProbitRegressor.numberOfFeatures', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.BayesianProbitRegressor.bias', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='features', full_name='CoreML.Specification.BayesianProbitRegressor.features', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='regressionInputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.regressionInputFeatureName', index=3, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='optimismInputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.optimismInputFeatureName', index=4, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='samplingScaleInputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.samplingScaleInputFeatureName', index=5, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='samplingTruncationInputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.samplingTruncationInputFeatureName', index=6, + number=13, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='meanOutputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.meanOutputFeatureName', index=7, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='varianceOutputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.varianceOutputFeatureName', index=8, + number=21, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pessimisticProbabilityOutputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.pessimisticProbabilityOutputFeatureName', index=9, + number=22, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sampledProbabilityOutputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.sampledProbabilityOutputFeatureName', index=10, + number=23, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_BAYESIANPROBITREGRESSOR_GAUSSIAN, _BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT, _BAYESIANPROBITREGRESSOR_FEATUREWEIGHT, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=56, + serialized_end=856, +) + +_BAYESIANPROBITREGRESSOR_GAUSSIAN.containing_type = _BAYESIANPROBITREGRESSOR +_BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT.fields_by_name['featureWeight'].message_type = _BAYESIANPROBITREGRESSOR_GAUSSIAN +_BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT.containing_type = _BAYESIANPROBITREGRESSOR +_BAYESIANPROBITREGRESSOR_FEATUREWEIGHT.fields_by_name['weights'].message_type = _BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT +_BAYESIANPROBITREGRESSOR_FEATUREWEIGHT.containing_type = _BAYESIANPROBITREGRESSOR +_BAYESIANPROBITREGRESSOR.fields_by_name['bias'].message_type = _BAYESIANPROBITREGRESSOR_GAUSSIAN +_BAYESIANPROBITREGRESSOR.fields_by_name['features'].message_type = _BAYESIANPROBITREGRESSOR_FEATUREWEIGHT +DESCRIPTOR.message_types_by_name['BayesianProbitRegressor'] = _BAYESIANPROBITREGRESSOR +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +BayesianProbitRegressor = _reflection.GeneratedProtocolMessageType('BayesianProbitRegressor', (_message.Message,), dict( + + Gaussian = _reflection.GeneratedProtocolMessageType('Gaussian', (_message.Message,), dict( + DESCRIPTOR = _BAYESIANPROBITREGRESSOR_GAUSSIAN, + __module__ = 'BayesianProbitRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BayesianProbitRegressor.Gaussian) + )) + , + + FeatureValueWeight = _reflection.GeneratedProtocolMessageType('FeatureValueWeight', (_message.Message,), dict( + DESCRIPTOR = _BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT, + __module__ = 'BayesianProbitRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight) + )) + , + + FeatureWeight = _reflection.GeneratedProtocolMessageType('FeatureWeight', (_message.Message,), dict( + DESCRIPTOR = _BAYESIANPROBITREGRESSOR_FEATUREWEIGHT, + __module__ = 'BayesianProbitRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BayesianProbitRegressor.FeatureWeight) + )) + , + DESCRIPTOR = _BAYESIANPROBITREGRESSOR, + __module__ = 'BayesianProbitRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BayesianProbitRegressor) + )) +_sym_db.RegisterMessage(BayesianProbitRegressor) +_sym_db.RegisterMessage(BayesianProbitRegressor.Gaussian) +_sym_db.RegisterMessage(BayesianProbitRegressor.FeatureValueWeight) +_sym_db.RegisterMessage(BayesianProbitRegressor.FeatureWeight) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/CategoricalMapping_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/CategoricalMapping_pb2.py new file mode 100644 index 00000000..25e4daf8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/CategoricalMapping_pb2.py @@ -0,0 +1,120 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: CategoricalMapping.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='CategoricalMapping.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x18\x43\x61tegoricalMapping.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xe7\x01\n\x12\x43\x61tegoricalMapping\x12\x42\n\x10stringToInt64Map\x18\x01 \x01(\x0b\x32&.CoreML.Specification.StringToInt64MapH\x00\x12\x42\n\x10int64ToStringMap\x18\x02 \x01(\x0b\x32&.CoreML.Specification.Int64ToStringMapH\x00\x12\x12\n\x08strValue\x18\x65 \x01(\tH\x01\x12\x14\n\nint64Value\x18\x66 \x01(\x03H\x01\x42\r\n\x0bMappingTypeB\x10\n\x0eValueOnUnknownB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_CATEGORICALMAPPING = _descriptor.Descriptor( + name='CategoricalMapping', + full_name='CoreML.Specification.CategoricalMapping', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='stringToInt64Map', full_name='CoreML.Specification.CategoricalMapping.stringToInt64Map', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ToStringMap', full_name='CoreML.Specification.CategoricalMapping.int64ToStringMap', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strValue', full_name='CoreML.Specification.CategoricalMapping.strValue', index=2, + number=101, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64Value', full_name='CoreML.Specification.CategoricalMapping.int64Value', index=3, + number=102, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='MappingType', full_name='CoreML.Specification.CategoricalMapping.MappingType', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='ValueOnUnknown', full_name='CoreML.Specification.CategoricalMapping.ValueOnUnknown', + index=1, containing_type=None, fields=[]), + ], + serialized_start=73, + serialized_end=304, +) + +_CATEGORICALMAPPING.fields_by_name['stringToInt64Map'].message_type = DataStructures__pb2._STRINGTOINT64MAP +_CATEGORICALMAPPING.fields_by_name['int64ToStringMap'].message_type = DataStructures__pb2._INT64TOSTRINGMAP +_CATEGORICALMAPPING.oneofs_by_name['MappingType'].fields.append( + _CATEGORICALMAPPING.fields_by_name['stringToInt64Map']) +_CATEGORICALMAPPING.fields_by_name['stringToInt64Map'].containing_oneof = _CATEGORICALMAPPING.oneofs_by_name['MappingType'] +_CATEGORICALMAPPING.oneofs_by_name['MappingType'].fields.append( + _CATEGORICALMAPPING.fields_by_name['int64ToStringMap']) +_CATEGORICALMAPPING.fields_by_name['int64ToStringMap'].containing_oneof = _CATEGORICALMAPPING.oneofs_by_name['MappingType'] +_CATEGORICALMAPPING.oneofs_by_name['ValueOnUnknown'].fields.append( + _CATEGORICALMAPPING.fields_by_name['strValue']) +_CATEGORICALMAPPING.fields_by_name['strValue'].containing_oneof = _CATEGORICALMAPPING.oneofs_by_name['ValueOnUnknown'] +_CATEGORICALMAPPING.oneofs_by_name['ValueOnUnknown'].fields.append( + _CATEGORICALMAPPING.fields_by_name['int64Value']) +_CATEGORICALMAPPING.fields_by_name['int64Value'].containing_oneof = _CATEGORICALMAPPING.oneofs_by_name['ValueOnUnknown'] +DESCRIPTOR.message_types_by_name['CategoricalMapping'] = _CATEGORICALMAPPING +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +CategoricalMapping = _reflection.GeneratedProtocolMessageType('CategoricalMapping', (_message.Message,), dict( + DESCRIPTOR = _CATEGORICALMAPPING, + __module__ = 'CategoricalMapping_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CategoricalMapping) + )) +_sym_db.RegisterMessage(CategoricalMapping) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/ClassConfidenceThresholding_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/ClassConfidenceThresholding_pb2.py new file mode 100644 index 00000000..8268f33e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/ClassConfidenceThresholding_pb2.py @@ -0,0 +1,80 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: ClassConfidenceThresholding.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='ClassConfidenceThresholding.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n!ClassConfidenceThresholding.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"h\n\x1b\x43lassConfidenceThresholding\x12I\n\x15precisionRecallCurves\x18\x64 \x03(\x0b\x32*.CoreML.Specification.PrecisionRecallCurveB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_CLASSCONFIDENCETHRESHOLDING = _descriptor.Descriptor( + name='ClassConfidenceThresholding', + full_name='CoreML.Specification.ClassConfidenceThresholding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='precisionRecallCurves', full_name='CoreML.Specification.ClassConfidenceThresholding.precisionRecallCurves', index=0, + number=100, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=81, + serialized_end=185, +) + +_CLASSCONFIDENCETHRESHOLDING.fields_by_name['precisionRecallCurves'].message_type = DataStructures__pb2._PRECISIONRECALLCURVE +DESCRIPTOR.message_types_by_name['ClassConfidenceThresholding'] = _CLASSCONFIDENCETHRESHOLDING +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +ClassConfidenceThresholding = _reflection.GeneratedProtocolMessageType('ClassConfidenceThresholding', (_message.Message,), dict( + DESCRIPTOR = _CLASSCONFIDENCETHRESHOLDING, + __module__ = 'ClassConfidenceThresholding_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ClassConfidenceThresholding) + )) +_sym_db.RegisterMessage(ClassConfidenceThresholding) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/CustomModel_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/CustomModel_pb2.py new file mode 100644 index 00000000..6dcb25a6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/CustomModel_pb2.py @@ -0,0 +1,230 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: CustomModel.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='CustomModel.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x11\x43ustomModel.proto\x12\x14\x43oreML.Specification\"\x8d\x03\n\x0b\x43ustomModel\x12\x11\n\tclassName\x18\n \x01(\t\x12\x45\n\nparameters\x18\x1e \x03(\x0b\x32\x31.CoreML.Specification.CustomModel.ParametersEntry\x12\x13\n\x0b\x64\x65scription\x18( \x01(\t\x1a\xa2\x01\n\x15\x43ustomModelParamValue\x12\x15\n\x0b\x64oubleValue\x18\n \x01(\x01H\x00\x12\x15\n\x0bstringValue\x18\x14 \x01(\tH\x00\x12\x12\n\x08intValue\x18\x1e \x01(\x05H\x00\x12\x13\n\tlongValue\x18( \x01(\x03H\x00\x12\x13\n\tboolValue\x18\x32 \x01(\x08H\x00\x12\x14\n\nbytesValue\x18< \x01(\x0cH\x00\x42\x07\n\x05value\x1aj\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x46\n\x05value\x18\x02 \x01(\x0b\x32\x37.CoreML.Specification.CustomModel.CustomModelParamValue:\x02\x38\x01\x42\x02H\x03\x62\x06proto3') +) + + + + +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE = _descriptor.Descriptor( + name='CustomModelParamValue', + full_name='CoreML.Specification.CustomModel.CustomModelParamValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='doubleValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.doubleValue', index=0, + number=10, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.stringValue', index=1, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='intValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.intValue', index=2, + number=30, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='longValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.longValue', index=3, + number=40, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='boolValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.boolValue', index=4, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bytesValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.bytesValue', index=5, + number=60, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='value', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.value', + index=0, containing_type=None, fields=[]), + ], + serialized_start=171, + serialized_end=333, +) + +_CUSTOMMODEL_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='CoreML.Specification.CustomModel.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.CustomModel.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.CustomModel.ParametersEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=335, + serialized_end=441, +) + +_CUSTOMMODEL = _descriptor.Descriptor( + name='CustomModel', + full_name='CoreML.Specification.CustomModel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='className', full_name='CoreML.Specification.CustomModel.className', index=0, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='parameters', full_name='CoreML.Specification.CustomModel.parameters', index=1, + number=30, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='description', full_name='CoreML.Specification.CustomModel.description', index=2, + number=40, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CUSTOMMODEL_CUSTOMMODELPARAMVALUE, _CUSTOMMODEL_PARAMETERSENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=44, + serialized_end=441, +) + +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.containing_type = _CUSTOMMODEL +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['doubleValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['doubleValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['stringValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['stringValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['intValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['intValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['longValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['longValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['boolValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['boolValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['bytesValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['bytesValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_PARAMETERSENTRY.fields_by_name['value'].message_type = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE +_CUSTOMMODEL_PARAMETERSENTRY.containing_type = _CUSTOMMODEL +_CUSTOMMODEL.fields_by_name['parameters'].message_type = _CUSTOMMODEL_PARAMETERSENTRY +DESCRIPTOR.message_types_by_name['CustomModel'] = _CUSTOMMODEL +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +CustomModel = _reflection.GeneratedProtocolMessageType('CustomModel', (_message.Message,), dict( + + CustomModelParamValue = _reflection.GeneratedProtocolMessageType('CustomModelParamValue', (_message.Message,), dict( + DESCRIPTOR = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE, + __module__ = 'CustomModel_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomModel.CustomModelParamValue) + )) + , + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _CUSTOMMODEL_PARAMETERSENTRY, + __module__ = 'CustomModel_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomModel.ParametersEntry) + )) + , + DESCRIPTOR = _CUSTOMMODEL, + __module__ = 'CustomModel_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomModel) + )) +_sym_db.RegisterMessage(CustomModel) +_sym_db.RegisterMessage(CustomModel.CustomModelParamValue) +_sym_db.RegisterMessage(CustomModel.ParametersEntry) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +_CUSTOMMODEL_PARAMETERSENTRY.has_options = True +_CUSTOMMODEL_PARAMETERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/DataStructures_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/DataStructures_pb2.py new file mode 100644 index 00000000..1f4b301c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/DataStructures_pb2.py @@ -0,0 +1,739 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: DataStructures.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import FeatureTypes_pb2 as FeatureTypes__pb2 + +from .FeatureTypes_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='DataStructures.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x14\x44\x61taStructures.proto\x12\x14\x43oreML.Specification\x1a\x12\x46\x65\x61tureTypes.proto\"|\n\x10StringToInt64Map\x12<\n\x03map\x18\x01 \x03(\x0b\x32/.CoreML.Specification.StringToInt64Map.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\"|\n\x10Int64ToStringMap\x12<\n\x03map\x18\x01 \x03(\x0b\x32/.CoreML.Specification.Int64ToStringMap.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"~\n\x11StringToDoubleMap\x12=\n\x03map\x18\x01 \x03(\x0b\x32\x30.CoreML.Specification.StringToDoubleMap.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\"|\n\x10Int64ToDoubleMap\x12<\n\x03map\x18\x01 \x03(\x0b\x32/.CoreML.Specification.Int64ToDoubleMap.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\"\x1e\n\x0cStringVector\x12\x0e\n\x06vector\x18\x01 \x03(\t\"\x1d\n\x0bInt64Vector\x12\x0e\n\x06vector\x18\x01 \x03(\x03\"\x1d\n\x0b\x46loatVector\x12\x0e\n\x06vector\x18\x01 \x03(\x02\"\x1e\n\x0c\x44oubleVector\x12\x0e\n\x06vector\x18\x01 \x03(\x01\"0\n\nInt64Range\x12\x10\n\x08minValue\x18\x01 \x01(\x03\x12\x10\n\x08maxValue\x18\x02 \x01(\x03\"\x1a\n\x08Int64Set\x12\x0e\n\x06values\x18\x01 \x03(\x03\"1\n\x0b\x44oubleRange\x12\x10\n\x08minValue\x18\x01 \x01(\x01\x12\x10\n\x08maxValue\x18\x02 \x01(\x01\"\x9c\x02\n\x14PrecisionRecallCurve\x12:\n\x0fprecisionValues\x18\x01 \x01(\x0b\x32!.CoreML.Specification.FloatVector\x12H\n\x1dprecisionConfidenceThresholds\x18\x02 \x01(\x0b\x32!.CoreML.Specification.FloatVector\x12\x37\n\x0crecallValues\x18\x03 \x01(\x0b\x32!.CoreML.Specification.FloatVector\x12\x45\n\x1arecallConfidenceThresholds\x18\x04 \x01(\x0b\x32!.CoreML.Specification.FloatVectorB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[FeatureTypes__pb2.DESCRIPTOR,], + public_dependencies=[FeatureTypes__pb2.DESCRIPTOR,]) + + + + +_STRINGTOINT64MAP_MAPENTRY = _descriptor.Descriptor( + name='MapEntry', + full_name='CoreML.Specification.StringToInt64Map.MapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.StringToInt64Map.MapEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.StringToInt64Map.MapEntry.value', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=148, + serialized_end=190, +) + +_STRINGTOINT64MAP = _descriptor.Descriptor( + name='StringToInt64Map', + full_name='CoreML.Specification.StringToInt64Map', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='map', full_name='CoreML.Specification.StringToInt64Map.map', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_STRINGTOINT64MAP_MAPENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=66, + serialized_end=190, +) + + +_INT64TOSTRINGMAP_MAPENTRY = _descriptor.Descriptor( + name='MapEntry', + full_name='CoreML.Specification.Int64ToStringMap.MapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.Int64ToStringMap.MapEntry.key', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.Int64ToStringMap.MapEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=274, + serialized_end=316, +) + +_INT64TOSTRINGMAP = _descriptor.Descriptor( + name='Int64ToStringMap', + full_name='CoreML.Specification.Int64ToStringMap', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='map', full_name='CoreML.Specification.Int64ToStringMap.map', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_INT64TOSTRINGMAP_MAPENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=192, + serialized_end=316, +) + + +_STRINGTODOUBLEMAP_MAPENTRY = _descriptor.Descriptor( + name='MapEntry', + full_name='CoreML.Specification.StringToDoubleMap.MapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.StringToDoubleMap.MapEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.StringToDoubleMap.MapEntry.value', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=402, + serialized_end=444, +) + +_STRINGTODOUBLEMAP = _descriptor.Descriptor( + name='StringToDoubleMap', + full_name='CoreML.Specification.StringToDoubleMap', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='map', full_name='CoreML.Specification.StringToDoubleMap.map', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_STRINGTODOUBLEMAP_MAPENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=318, + serialized_end=444, +) + + +_INT64TODOUBLEMAP_MAPENTRY = _descriptor.Descriptor( + name='MapEntry', + full_name='CoreML.Specification.Int64ToDoubleMap.MapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.Int64ToDoubleMap.MapEntry.key', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.Int64ToDoubleMap.MapEntry.value', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=528, + serialized_end=570, +) + +_INT64TODOUBLEMAP = _descriptor.Descriptor( + name='Int64ToDoubleMap', + full_name='CoreML.Specification.Int64ToDoubleMap', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='map', full_name='CoreML.Specification.Int64ToDoubleMap.map', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_INT64TODOUBLEMAP_MAPENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=446, + serialized_end=570, +) + + +_STRINGVECTOR = _descriptor.Descriptor( + name='StringVector', + full_name='CoreML.Specification.StringVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vector', full_name='CoreML.Specification.StringVector.vector', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=572, + serialized_end=602, +) + + +_INT64VECTOR = _descriptor.Descriptor( + name='Int64Vector', + full_name='CoreML.Specification.Int64Vector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vector', full_name='CoreML.Specification.Int64Vector.vector', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=604, + serialized_end=633, +) + + +_FLOATVECTOR = _descriptor.Descriptor( + name='FloatVector', + full_name='CoreML.Specification.FloatVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vector', full_name='CoreML.Specification.FloatVector.vector', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=635, + serialized_end=664, +) + + +_DOUBLEVECTOR = _descriptor.Descriptor( + name='DoubleVector', + full_name='CoreML.Specification.DoubleVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vector', full_name='CoreML.Specification.DoubleVector.vector', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=666, + serialized_end=696, +) + + +_INT64RANGE = _descriptor.Descriptor( + name='Int64Range', + full_name='CoreML.Specification.Int64Range', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='minValue', full_name='CoreML.Specification.Int64Range.minValue', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxValue', full_name='CoreML.Specification.Int64Range.maxValue', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=698, + serialized_end=746, +) + + +_INT64SET = _descriptor.Descriptor( + name='Int64Set', + full_name='CoreML.Specification.Int64Set', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.Int64Set.values', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=748, + serialized_end=774, +) + + +_DOUBLERANGE = _descriptor.Descriptor( + name='DoubleRange', + full_name='CoreML.Specification.DoubleRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='minValue', full_name='CoreML.Specification.DoubleRange.minValue', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxValue', full_name='CoreML.Specification.DoubleRange.maxValue', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=776, + serialized_end=825, +) + + +_PRECISIONRECALLCURVE = _descriptor.Descriptor( + name='PrecisionRecallCurve', + full_name='CoreML.Specification.PrecisionRecallCurve', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='precisionValues', full_name='CoreML.Specification.PrecisionRecallCurve.precisionValues', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='precisionConfidenceThresholds', full_name='CoreML.Specification.PrecisionRecallCurve.precisionConfidenceThresholds', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='recallValues', full_name='CoreML.Specification.PrecisionRecallCurve.recallValues', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='recallConfidenceThresholds', full_name='CoreML.Specification.PrecisionRecallCurve.recallConfidenceThresholds', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=828, + serialized_end=1112, +) + +_STRINGTOINT64MAP_MAPENTRY.containing_type = _STRINGTOINT64MAP +_STRINGTOINT64MAP.fields_by_name['map'].message_type = _STRINGTOINT64MAP_MAPENTRY +_INT64TOSTRINGMAP_MAPENTRY.containing_type = _INT64TOSTRINGMAP +_INT64TOSTRINGMAP.fields_by_name['map'].message_type = _INT64TOSTRINGMAP_MAPENTRY +_STRINGTODOUBLEMAP_MAPENTRY.containing_type = _STRINGTODOUBLEMAP +_STRINGTODOUBLEMAP.fields_by_name['map'].message_type = _STRINGTODOUBLEMAP_MAPENTRY +_INT64TODOUBLEMAP_MAPENTRY.containing_type = _INT64TODOUBLEMAP +_INT64TODOUBLEMAP.fields_by_name['map'].message_type = _INT64TODOUBLEMAP_MAPENTRY +_PRECISIONRECALLCURVE.fields_by_name['precisionValues'].message_type = _FLOATVECTOR +_PRECISIONRECALLCURVE.fields_by_name['precisionConfidenceThresholds'].message_type = _FLOATVECTOR +_PRECISIONRECALLCURVE.fields_by_name['recallValues'].message_type = _FLOATVECTOR +_PRECISIONRECALLCURVE.fields_by_name['recallConfidenceThresholds'].message_type = _FLOATVECTOR +DESCRIPTOR.message_types_by_name['StringToInt64Map'] = _STRINGTOINT64MAP +DESCRIPTOR.message_types_by_name['Int64ToStringMap'] = _INT64TOSTRINGMAP +DESCRIPTOR.message_types_by_name['StringToDoubleMap'] = _STRINGTODOUBLEMAP +DESCRIPTOR.message_types_by_name['Int64ToDoubleMap'] = _INT64TODOUBLEMAP +DESCRIPTOR.message_types_by_name['StringVector'] = _STRINGVECTOR +DESCRIPTOR.message_types_by_name['Int64Vector'] = _INT64VECTOR +DESCRIPTOR.message_types_by_name['FloatVector'] = _FLOATVECTOR +DESCRIPTOR.message_types_by_name['DoubleVector'] = _DOUBLEVECTOR +DESCRIPTOR.message_types_by_name['Int64Range'] = _INT64RANGE +DESCRIPTOR.message_types_by_name['Int64Set'] = _INT64SET +DESCRIPTOR.message_types_by_name['DoubleRange'] = _DOUBLERANGE +DESCRIPTOR.message_types_by_name['PrecisionRecallCurve'] = _PRECISIONRECALLCURVE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +StringToInt64Map = _reflection.GeneratedProtocolMessageType('StringToInt64Map', (_message.Message,), dict( + + MapEntry = _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), dict( + DESCRIPTOR = _STRINGTOINT64MAP_MAPENTRY, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringToInt64Map.MapEntry) + )) + , + DESCRIPTOR = _STRINGTOINT64MAP, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringToInt64Map) + )) +_sym_db.RegisterMessage(StringToInt64Map) +_sym_db.RegisterMessage(StringToInt64Map.MapEntry) + +Int64ToStringMap = _reflection.GeneratedProtocolMessageType('Int64ToStringMap', (_message.Message,), dict( + + MapEntry = _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), dict( + DESCRIPTOR = _INT64TOSTRINGMAP_MAPENTRY, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64ToStringMap.MapEntry) + )) + , + DESCRIPTOR = _INT64TOSTRINGMAP, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64ToStringMap) + )) +_sym_db.RegisterMessage(Int64ToStringMap) +_sym_db.RegisterMessage(Int64ToStringMap.MapEntry) + +StringToDoubleMap = _reflection.GeneratedProtocolMessageType('StringToDoubleMap', (_message.Message,), dict( + + MapEntry = _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), dict( + DESCRIPTOR = _STRINGTODOUBLEMAP_MAPENTRY, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringToDoubleMap.MapEntry) + )) + , + DESCRIPTOR = _STRINGTODOUBLEMAP, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringToDoubleMap) + )) +_sym_db.RegisterMessage(StringToDoubleMap) +_sym_db.RegisterMessage(StringToDoubleMap.MapEntry) + +Int64ToDoubleMap = _reflection.GeneratedProtocolMessageType('Int64ToDoubleMap', (_message.Message,), dict( + + MapEntry = _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), dict( + DESCRIPTOR = _INT64TODOUBLEMAP_MAPENTRY, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64ToDoubleMap.MapEntry) + )) + , + DESCRIPTOR = _INT64TODOUBLEMAP, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64ToDoubleMap) + )) +_sym_db.RegisterMessage(Int64ToDoubleMap) +_sym_db.RegisterMessage(Int64ToDoubleMap.MapEntry) + +StringVector = _reflection.GeneratedProtocolMessageType('StringVector', (_message.Message,), dict( + DESCRIPTOR = _STRINGVECTOR, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringVector) + )) +_sym_db.RegisterMessage(StringVector) + +Int64Vector = _reflection.GeneratedProtocolMessageType('Int64Vector', (_message.Message,), dict( + DESCRIPTOR = _INT64VECTOR, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64Vector) + )) +_sym_db.RegisterMessage(Int64Vector) + +FloatVector = _reflection.GeneratedProtocolMessageType('FloatVector', (_message.Message,), dict( + DESCRIPTOR = _FLOATVECTOR, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FloatVector) + )) +_sym_db.RegisterMessage(FloatVector) + +DoubleVector = _reflection.GeneratedProtocolMessageType('DoubleVector', (_message.Message,), dict( + DESCRIPTOR = _DOUBLEVECTOR, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DoubleVector) + )) +_sym_db.RegisterMessage(DoubleVector) + +Int64Range = _reflection.GeneratedProtocolMessageType('Int64Range', (_message.Message,), dict( + DESCRIPTOR = _INT64RANGE, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64Range) + )) +_sym_db.RegisterMessage(Int64Range) + +Int64Set = _reflection.GeneratedProtocolMessageType('Int64Set', (_message.Message,), dict( + DESCRIPTOR = _INT64SET, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64Set) + )) +_sym_db.RegisterMessage(Int64Set) + +DoubleRange = _reflection.GeneratedProtocolMessageType('DoubleRange', (_message.Message,), dict( + DESCRIPTOR = _DOUBLERANGE, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DoubleRange) + )) +_sym_db.RegisterMessage(DoubleRange) + +PrecisionRecallCurve = _reflection.GeneratedProtocolMessageType('PrecisionRecallCurve', (_message.Message,), dict( + DESCRIPTOR = _PRECISIONRECALLCURVE, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PrecisionRecallCurve) + )) +_sym_db.RegisterMessage(PrecisionRecallCurve) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +_STRINGTOINT64MAP_MAPENTRY.has_options = True +_STRINGTOINT64MAP_MAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_INT64TOSTRINGMAP_MAPENTRY.has_options = True +_INT64TOSTRINGMAP_MAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_STRINGTODOUBLEMAP_MAPENTRY.has_options = True +_STRINGTODOUBLEMAP_MAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_INT64TODOUBLEMAP_MAPENTRY.has_options = True +_INT64TODOUBLEMAP_MAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/DictVectorizer_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/DictVectorizer_pb2.py new file mode 100644 index 00000000..1ba214ca --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/DictVectorizer_pb2.py @@ -0,0 +1,97 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: DictVectorizer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='DictVectorizer.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x14\x44ictVectorizer.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\x8f\x01\n\x0e\x44ictVectorizer\x12;\n\rstringToIndex\x18\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12\x39\n\x0cint64ToIndex\x18\x02 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x42\x05\n\x03MapB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_DICTVECTORIZER = _descriptor.Descriptor( + name='DictVectorizer', + full_name='CoreML.Specification.DictVectorizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='stringToIndex', full_name='CoreML.Specification.DictVectorizer.stringToIndex', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ToIndex', full_name='CoreML.Specification.DictVectorizer.int64ToIndex', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Map', full_name='CoreML.Specification.DictVectorizer.Map', + index=0, containing_type=None, fields=[]), + ], + serialized_start=69, + serialized_end=212, +) + +_DICTVECTORIZER.fields_by_name['stringToIndex'].message_type = DataStructures__pb2._STRINGVECTOR +_DICTVECTORIZER.fields_by_name['int64ToIndex'].message_type = DataStructures__pb2._INT64VECTOR +_DICTVECTORIZER.oneofs_by_name['Map'].fields.append( + _DICTVECTORIZER.fields_by_name['stringToIndex']) +_DICTVECTORIZER.fields_by_name['stringToIndex'].containing_oneof = _DICTVECTORIZER.oneofs_by_name['Map'] +_DICTVECTORIZER.oneofs_by_name['Map'].fields.append( + _DICTVECTORIZER.fields_by_name['int64ToIndex']) +_DICTVECTORIZER.fields_by_name['int64ToIndex'].containing_oneof = _DICTVECTORIZER.oneofs_by_name['Map'] +DESCRIPTOR.message_types_by_name['DictVectorizer'] = _DICTVECTORIZER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +DictVectorizer = _reflection.GeneratedProtocolMessageType('DictVectorizer', (_message.Message,), dict( + DESCRIPTOR = _DICTVECTORIZER, + __module__ = 'DictVectorizer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DictVectorizer) + )) +_sym_db.RegisterMessage(DictVectorizer) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureTypes_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureTypes_pb2.py new file mode 100644 index 00000000..ef54f112 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureTypes_pb2.py @@ -0,0 +1,924 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: FeatureTypes.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='FeatureTypes.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x12\x46\x65\x61tureTypes.proto\x12\x14\x43oreML.Specification\"\x12\n\x10Int64FeatureType\"\x13\n\x11\x44oubleFeatureType\"\x13\n\x11StringFeatureType\"3\n\tSizeRange\x12\x12\n\nlowerBound\x18\x01 \x01(\x04\x12\x12\n\nupperBound\x18\x02 \x01(\x03\"\x95\x05\n\x10ImageFeatureType\x12\r\n\x05width\x18\x01 \x01(\x03\x12\x0e\n\x06height\x18\x02 \x01(\x03\x12V\n\x0f\x65numeratedSizes\x18\x15 \x01(\x0b\x32;.CoreML.Specification.ImageFeatureType.EnumeratedImageSizesH\x00\x12O\n\x0eimageSizeRange\x18\x1f \x01(\x0b\x32\x35.CoreML.Specification.ImageFeatureType.ImageSizeRangeH\x00\x12\x45\n\ncolorSpace\x18\x03 \x01(\x0e\x32\x31.CoreML.Specification.ImageFeatureType.ColorSpace\x1a*\n\tImageSize\x12\r\n\x05width\x18\x01 \x01(\x04\x12\x0e\n\x06height\x18\x02 \x01(\x04\x1aW\n\x14\x45numeratedImageSizes\x12?\n\x05sizes\x18\x01 \x03(\x0b\x32\x30.CoreML.Specification.ImageFeatureType.ImageSize\x1a{\n\x0eImageSizeRange\x12\x33\n\nwidthRange\x18\x01 \x01(\x0b\x32\x1f.CoreML.Specification.SizeRange\x12\x34\n\x0bheightRange\x18\x02 \x01(\x0b\x32\x1f.CoreML.Specification.SizeRange\"]\n\nColorSpace\x12\x17\n\x13INVALID_COLOR_SPACE\x10\x00\x12\r\n\tGRAYSCALE\x10\n\x12\x07\n\x03RGB\x10\x14\x12\x07\n\x03\x42GR\x10\x1e\x12\x15\n\x11GRAYSCALE_FLOAT16\x10(B\x11\n\x0fSizeFlexibility\"\x9d\x05\n\x10\x41rrayFeatureType\x12\r\n\x05shape\x18\x01 \x03(\x03\x12\x46\n\x08\x64\x61taType\x18\x02 \x01(\x0e\x32\x34.CoreML.Specification.ArrayFeatureType.ArrayDataType\x12S\n\x10\x65numeratedShapes\x18\x15 \x01(\x0b\x32\x37.CoreML.Specification.ArrayFeatureType.EnumeratedShapesH\x00\x12G\n\nshapeRange\x18\x1f \x01(\x0b\x32\x31.CoreML.Specification.ArrayFeatureType.ShapeRangeH\x00\x12\x19\n\x0fintDefaultValue\x18) \x01(\x05H\x01\x12\x1b\n\x11\x66loatDefaultValue\x18\x33 \x01(\x02H\x01\x12\x1c\n\x12\x64oubleDefaultValue\x18= \x01(\x01H\x01\x1a\x16\n\x05Shape\x12\r\n\x05shape\x18\x01 \x03(\x03\x1aP\n\x10\x45numeratedShapes\x12<\n\x06shapes\x18\x01 \x03(\x0b\x32,.CoreML.Specification.ArrayFeatureType.Shape\x1a\x41\n\nShapeRange\x12\x33\n\nsizeRanges\x18\x01 \x03(\x0b\x32\x1f.CoreML.Specification.SizeRange\"e\n\rArrayDataType\x12\x1b\n\x17INVALID_ARRAY_DATA_TYPE\x10\x00\x12\r\n\x07\x46LOAT32\x10\xa0\x80\x04\x12\x0c\n\x06\x44OUBLE\x10\xc0\x80\x04\x12\x0b\n\x05INT32\x10\xa0\x80\x08\x12\r\n\x07\x46LOAT16\x10\x90\x80\x04\x42\x12\n\x10ShapeFlexibilityB\x16\n\x14\x64\x65\x66\x61ultOptionalValue\"\xa4\x01\n\x15\x44ictionaryFeatureType\x12>\n\x0cint64KeyType\x18\x01 \x01(\x0b\x32&.CoreML.Specification.Int64FeatureTypeH\x00\x12@\n\rstringKeyType\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.StringFeatureTypeH\x00\x42\t\n\x07KeyType\"\xcd\x01\n\x13SequenceFeatureType\x12;\n\tint64Type\x18\x01 \x01(\x0b\x32&.CoreML.Specification.Int64FeatureTypeH\x00\x12=\n\nstringType\x18\x03 \x01(\x0b\x32\'.CoreML.Specification.StringFeatureTypeH\x00\x12\x32\n\tsizeRange\x18\x65 \x01(\x0b\x32\x1f.CoreML.Specification.SizeRangeB\x06\n\x04Type\"\xee\x03\n\x0b\x46\x65\x61tureType\x12;\n\tint64Type\x18\x01 \x01(\x0b\x32&.CoreML.Specification.Int64FeatureTypeH\x00\x12=\n\ndoubleType\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.DoubleFeatureTypeH\x00\x12=\n\nstringType\x18\x03 \x01(\x0b\x32\'.CoreML.Specification.StringFeatureTypeH\x00\x12;\n\timageType\x18\x04 \x01(\x0b\x32&.CoreML.Specification.ImageFeatureTypeH\x00\x12@\n\x0emultiArrayType\x18\x05 \x01(\x0b\x32&.CoreML.Specification.ArrayFeatureTypeH\x00\x12\x45\n\x0e\x64ictionaryType\x18\x06 \x01(\x0b\x32+.CoreML.Specification.DictionaryFeatureTypeH\x00\x12\x41\n\x0csequenceType\x18\x07 \x01(\x0b\x32).CoreML.Specification.SequenceFeatureTypeH\x00\x12\x13\n\nisOptional\x18\xe8\x07 \x01(\x08\x42\x06\n\x04TypeB\x02H\x03\x62\x06proto3') +) + + + +_IMAGEFEATURETYPE_COLORSPACE = _descriptor.EnumDescriptor( + name='ColorSpace', + full_name='CoreML.Specification.ImageFeatureType.ColorSpace', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='INVALID_COLOR_SPACE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GRAYSCALE', index=1, number=10, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RGB', index=2, number=20, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BGR', index=3, number=30, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GRAYSCALE_FLOAT16', index=4, number=40, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=709, + serialized_end=802, +) +_sym_db.RegisterEnumDescriptor(_IMAGEFEATURETYPE_COLORSPACE) + +_ARRAYFEATURETYPE_ARRAYDATATYPE = _descriptor.EnumDescriptor( + name='ArrayDataType', + full_name='CoreML.Specification.ArrayFeatureType.ArrayDataType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='INVALID_ARRAY_DATA_TYPE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT32', index=1, number=65568, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DOUBLE', index=2, number=65600, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INT32', index=3, number=131104, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT16', index=4, number=65552, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1348, + serialized_end=1449, +) +_sym_db.RegisterEnumDescriptor(_ARRAYFEATURETYPE_ARRAYDATATYPE) + + +_INT64FEATURETYPE = _descriptor.Descriptor( + name='Int64FeatureType', + full_name='CoreML.Specification.Int64FeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=44, + serialized_end=62, +) + + +_DOUBLEFEATURETYPE = _descriptor.Descriptor( + name='DoubleFeatureType', + full_name='CoreML.Specification.DoubleFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=64, + serialized_end=83, +) + + +_STRINGFEATURETYPE = _descriptor.Descriptor( + name='StringFeatureType', + full_name='CoreML.Specification.StringFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=85, + serialized_end=104, +) + + +_SIZERANGE = _descriptor.Descriptor( + name='SizeRange', + full_name='CoreML.Specification.SizeRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='lowerBound', full_name='CoreML.Specification.SizeRange.lowerBound', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='upperBound', full_name='CoreML.Specification.SizeRange.upperBound', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=106, + serialized_end=157, +) + + +_IMAGEFEATURETYPE_IMAGESIZE = _descriptor.Descriptor( + name='ImageSize', + full_name='CoreML.Specification.ImageFeatureType.ImageSize', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='width', full_name='CoreML.Specification.ImageFeatureType.ImageSize.width', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='height', full_name='CoreML.Specification.ImageFeatureType.ImageSize.height', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=451, + serialized_end=493, +) + +_IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES = _descriptor.Descriptor( + name='EnumeratedImageSizes', + full_name='CoreML.Specification.ImageFeatureType.EnumeratedImageSizes', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sizes', full_name='CoreML.Specification.ImageFeatureType.EnumeratedImageSizes.sizes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=495, + serialized_end=582, +) + +_IMAGEFEATURETYPE_IMAGESIZERANGE = _descriptor.Descriptor( + name='ImageSizeRange', + full_name='CoreML.Specification.ImageFeatureType.ImageSizeRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='widthRange', full_name='CoreML.Specification.ImageFeatureType.ImageSizeRange.widthRange', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='heightRange', full_name='CoreML.Specification.ImageFeatureType.ImageSizeRange.heightRange', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=584, + serialized_end=707, +) + +_IMAGEFEATURETYPE = _descriptor.Descriptor( + name='ImageFeatureType', + full_name='CoreML.Specification.ImageFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='width', full_name='CoreML.Specification.ImageFeatureType.width', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='height', full_name='CoreML.Specification.ImageFeatureType.height', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='enumeratedSizes', full_name='CoreML.Specification.ImageFeatureType.enumeratedSizes', index=2, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imageSizeRange', full_name='CoreML.Specification.ImageFeatureType.imageSizeRange', index=3, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='colorSpace', full_name='CoreML.Specification.ImageFeatureType.colorSpace', index=4, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_IMAGEFEATURETYPE_IMAGESIZE, _IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES, _IMAGEFEATURETYPE_IMAGESIZERANGE, ], + enum_types=[ + _IMAGEFEATURETYPE_COLORSPACE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='SizeFlexibility', full_name='CoreML.Specification.ImageFeatureType.SizeFlexibility', + index=0, containing_type=None, fields=[]), + ], + serialized_start=160, + serialized_end=821, +) + + +_ARRAYFEATURETYPE_SHAPE = _descriptor.Descriptor( + name='Shape', + full_name='CoreML.Specification.ArrayFeatureType.Shape', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='CoreML.Specification.ArrayFeatureType.Shape.shape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1175, + serialized_end=1197, +) + +_ARRAYFEATURETYPE_ENUMERATEDSHAPES = _descriptor.Descriptor( + name='EnumeratedShapes', + full_name='CoreML.Specification.ArrayFeatureType.EnumeratedShapes', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shapes', full_name='CoreML.Specification.ArrayFeatureType.EnumeratedShapes.shapes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1199, + serialized_end=1279, +) + +_ARRAYFEATURETYPE_SHAPERANGE = _descriptor.Descriptor( + name='ShapeRange', + full_name='CoreML.Specification.ArrayFeatureType.ShapeRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sizeRanges', full_name='CoreML.Specification.ArrayFeatureType.ShapeRange.sizeRanges', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1281, + serialized_end=1346, +) + +_ARRAYFEATURETYPE = _descriptor.Descriptor( + name='ArrayFeatureType', + full_name='CoreML.Specification.ArrayFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='CoreML.Specification.ArrayFeatureType.shape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dataType', full_name='CoreML.Specification.ArrayFeatureType.dataType', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='enumeratedShapes', full_name='CoreML.Specification.ArrayFeatureType.enumeratedShapes', index=2, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shapeRange', full_name='CoreML.Specification.ArrayFeatureType.shapeRange', index=3, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='intDefaultValue', full_name='CoreML.Specification.ArrayFeatureType.intDefaultValue', index=4, + number=41, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='floatDefaultValue', full_name='CoreML.Specification.ArrayFeatureType.floatDefaultValue', index=5, + number=51, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='doubleDefaultValue', full_name='CoreML.Specification.ArrayFeatureType.doubleDefaultValue', index=6, + number=61, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_ARRAYFEATURETYPE_SHAPE, _ARRAYFEATURETYPE_ENUMERATEDSHAPES, _ARRAYFEATURETYPE_SHAPERANGE, ], + enum_types=[ + _ARRAYFEATURETYPE_ARRAYDATATYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ShapeFlexibility', full_name='CoreML.Specification.ArrayFeatureType.ShapeFlexibility', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='defaultOptionalValue', full_name='CoreML.Specification.ArrayFeatureType.defaultOptionalValue', + index=1, containing_type=None, fields=[]), + ], + serialized_start=824, + serialized_end=1493, +) + + +_DICTIONARYFEATURETYPE = _descriptor.Descriptor( + name='DictionaryFeatureType', + full_name='CoreML.Specification.DictionaryFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='int64KeyType', full_name='CoreML.Specification.DictionaryFeatureType.int64KeyType', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringKeyType', full_name='CoreML.Specification.DictionaryFeatureType.stringKeyType', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='KeyType', full_name='CoreML.Specification.DictionaryFeatureType.KeyType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1496, + serialized_end=1660, +) + + +_SEQUENCEFEATURETYPE = _descriptor.Descriptor( + name='SequenceFeatureType', + full_name='CoreML.Specification.SequenceFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='int64Type', full_name='CoreML.Specification.SequenceFeatureType.int64Type', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringType', full_name='CoreML.Specification.SequenceFeatureType.stringType', index=1, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sizeRange', full_name='CoreML.Specification.SequenceFeatureType.sizeRange', index=2, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Type', full_name='CoreML.Specification.SequenceFeatureType.Type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1663, + serialized_end=1868, +) + + +_FEATURETYPE = _descriptor.Descriptor( + name='FeatureType', + full_name='CoreML.Specification.FeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='int64Type', full_name='CoreML.Specification.FeatureType.int64Type', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='doubleType', full_name='CoreML.Specification.FeatureType.doubleType', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringType', full_name='CoreML.Specification.FeatureType.stringType', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imageType', full_name='CoreML.Specification.FeatureType.imageType', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='multiArrayType', full_name='CoreML.Specification.FeatureType.multiArrayType', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dictionaryType', full_name='CoreML.Specification.FeatureType.dictionaryType', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sequenceType', full_name='CoreML.Specification.FeatureType.sequenceType', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isOptional', full_name='CoreML.Specification.FeatureType.isOptional', index=7, + number=1000, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Type', full_name='CoreML.Specification.FeatureType.Type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1871, + serialized_end=2365, +) + +_IMAGEFEATURETYPE_IMAGESIZE.containing_type = _IMAGEFEATURETYPE +_IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES.fields_by_name['sizes'].message_type = _IMAGEFEATURETYPE_IMAGESIZE +_IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES.containing_type = _IMAGEFEATURETYPE +_IMAGEFEATURETYPE_IMAGESIZERANGE.fields_by_name['widthRange'].message_type = _SIZERANGE +_IMAGEFEATURETYPE_IMAGESIZERANGE.fields_by_name['heightRange'].message_type = _SIZERANGE +_IMAGEFEATURETYPE_IMAGESIZERANGE.containing_type = _IMAGEFEATURETYPE +_IMAGEFEATURETYPE.fields_by_name['enumeratedSizes'].message_type = _IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES +_IMAGEFEATURETYPE.fields_by_name['imageSizeRange'].message_type = _IMAGEFEATURETYPE_IMAGESIZERANGE +_IMAGEFEATURETYPE.fields_by_name['colorSpace'].enum_type = _IMAGEFEATURETYPE_COLORSPACE +_IMAGEFEATURETYPE_COLORSPACE.containing_type = _IMAGEFEATURETYPE +_IMAGEFEATURETYPE.oneofs_by_name['SizeFlexibility'].fields.append( + _IMAGEFEATURETYPE.fields_by_name['enumeratedSizes']) +_IMAGEFEATURETYPE.fields_by_name['enumeratedSizes'].containing_oneof = _IMAGEFEATURETYPE.oneofs_by_name['SizeFlexibility'] +_IMAGEFEATURETYPE.oneofs_by_name['SizeFlexibility'].fields.append( + _IMAGEFEATURETYPE.fields_by_name['imageSizeRange']) +_IMAGEFEATURETYPE.fields_by_name['imageSizeRange'].containing_oneof = _IMAGEFEATURETYPE.oneofs_by_name['SizeFlexibility'] +_ARRAYFEATURETYPE_SHAPE.containing_type = _ARRAYFEATURETYPE +_ARRAYFEATURETYPE_ENUMERATEDSHAPES.fields_by_name['shapes'].message_type = _ARRAYFEATURETYPE_SHAPE +_ARRAYFEATURETYPE_ENUMERATEDSHAPES.containing_type = _ARRAYFEATURETYPE +_ARRAYFEATURETYPE_SHAPERANGE.fields_by_name['sizeRanges'].message_type = _SIZERANGE +_ARRAYFEATURETYPE_SHAPERANGE.containing_type = _ARRAYFEATURETYPE +_ARRAYFEATURETYPE.fields_by_name['dataType'].enum_type = _ARRAYFEATURETYPE_ARRAYDATATYPE +_ARRAYFEATURETYPE.fields_by_name['enumeratedShapes'].message_type = _ARRAYFEATURETYPE_ENUMERATEDSHAPES +_ARRAYFEATURETYPE.fields_by_name['shapeRange'].message_type = _ARRAYFEATURETYPE_SHAPERANGE +_ARRAYFEATURETYPE_ARRAYDATATYPE.containing_type = _ARRAYFEATURETYPE +_ARRAYFEATURETYPE.oneofs_by_name['ShapeFlexibility'].fields.append( + _ARRAYFEATURETYPE.fields_by_name['enumeratedShapes']) +_ARRAYFEATURETYPE.fields_by_name['enumeratedShapes'].containing_oneof = _ARRAYFEATURETYPE.oneofs_by_name['ShapeFlexibility'] +_ARRAYFEATURETYPE.oneofs_by_name['ShapeFlexibility'].fields.append( + _ARRAYFEATURETYPE.fields_by_name['shapeRange']) +_ARRAYFEATURETYPE.fields_by_name['shapeRange'].containing_oneof = _ARRAYFEATURETYPE.oneofs_by_name['ShapeFlexibility'] +_ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'].fields.append( + _ARRAYFEATURETYPE.fields_by_name['intDefaultValue']) +_ARRAYFEATURETYPE.fields_by_name['intDefaultValue'].containing_oneof = _ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'] +_ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'].fields.append( + _ARRAYFEATURETYPE.fields_by_name['floatDefaultValue']) +_ARRAYFEATURETYPE.fields_by_name['floatDefaultValue'].containing_oneof = _ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'] +_ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'].fields.append( + _ARRAYFEATURETYPE.fields_by_name['doubleDefaultValue']) +_ARRAYFEATURETYPE.fields_by_name['doubleDefaultValue'].containing_oneof = _ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'] +_DICTIONARYFEATURETYPE.fields_by_name['int64KeyType'].message_type = _INT64FEATURETYPE +_DICTIONARYFEATURETYPE.fields_by_name['stringKeyType'].message_type = _STRINGFEATURETYPE +_DICTIONARYFEATURETYPE.oneofs_by_name['KeyType'].fields.append( + _DICTIONARYFEATURETYPE.fields_by_name['int64KeyType']) +_DICTIONARYFEATURETYPE.fields_by_name['int64KeyType'].containing_oneof = _DICTIONARYFEATURETYPE.oneofs_by_name['KeyType'] +_DICTIONARYFEATURETYPE.oneofs_by_name['KeyType'].fields.append( + _DICTIONARYFEATURETYPE.fields_by_name['stringKeyType']) +_DICTIONARYFEATURETYPE.fields_by_name['stringKeyType'].containing_oneof = _DICTIONARYFEATURETYPE.oneofs_by_name['KeyType'] +_SEQUENCEFEATURETYPE.fields_by_name['int64Type'].message_type = _INT64FEATURETYPE +_SEQUENCEFEATURETYPE.fields_by_name['stringType'].message_type = _STRINGFEATURETYPE +_SEQUENCEFEATURETYPE.fields_by_name['sizeRange'].message_type = _SIZERANGE +_SEQUENCEFEATURETYPE.oneofs_by_name['Type'].fields.append( + _SEQUENCEFEATURETYPE.fields_by_name['int64Type']) +_SEQUENCEFEATURETYPE.fields_by_name['int64Type'].containing_oneof = _SEQUENCEFEATURETYPE.oneofs_by_name['Type'] +_SEQUENCEFEATURETYPE.oneofs_by_name['Type'].fields.append( + _SEQUENCEFEATURETYPE.fields_by_name['stringType']) +_SEQUENCEFEATURETYPE.fields_by_name['stringType'].containing_oneof = _SEQUENCEFEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.fields_by_name['int64Type'].message_type = _INT64FEATURETYPE +_FEATURETYPE.fields_by_name['doubleType'].message_type = _DOUBLEFEATURETYPE +_FEATURETYPE.fields_by_name['stringType'].message_type = _STRINGFEATURETYPE +_FEATURETYPE.fields_by_name['imageType'].message_type = _IMAGEFEATURETYPE +_FEATURETYPE.fields_by_name['multiArrayType'].message_type = _ARRAYFEATURETYPE +_FEATURETYPE.fields_by_name['dictionaryType'].message_type = _DICTIONARYFEATURETYPE +_FEATURETYPE.fields_by_name['sequenceType'].message_type = _SEQUENCEFEATURETYPE +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['int64Type']) +_FEATURETYPE.fields_by_name['int64Type'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['doubleType']) +_FEATURETYPE.fields_by_name['doubleType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['stringType']) +_FEATURETYPE.fields_by_name['stringType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['imageType']) +_FEATURETYPE.fields_by_name['imageType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['multiArrayType']) +_FEATURETYPE.fields_by_name['multiArrayType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['dictionaryType']) +_FEATURETYPE.fields_by_name['dictionaryType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['sequenceType']) +_FEATURETYPE.fields_by_name['sequenceType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +DESCRIPTOR.message_types_by_name['Int64FeatureType'] = _INT64FEATURETYPE +DESCRIPTOR.message_types_by_name['DoubleFeatureType'] = _DOUBLEFEATURETYPE +DESCRIPTOR.message_types_by_name['StringFeatureType'] = _STRINGFEATURETYPE +DESCRIPTOR.message_types_by_name['SizeRange'] = _SIZERANGE +DESCRIPTOR.message_types_by_name['ImageFeatureType'] = _IMAGEFEATURETYPE +DESCRIPTOR.message_types_by_name['ArrayFeatureType'] = _ARRAYFEATURETYPE +DESCRIPTOR.message_types_by_name['DictionaryFeatureType'] = _DICTIONARYFEATURETYPE +DESCRIPTOR.message_types_by_name['SequenceFeatureType'] = _SEQUENCEFEATURETYPE +DESCRIPTOR.message_types_by_name['FeatureType'] = _FEATURETYPE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Int64FeatureType = _reflection.GeneratedProtocolMessageType('Int64FeatureType', (_message.Message,), dict( + DESCRIPTOR = _INT64FEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64FeatureType) + )) +_sym_db.RegisterMessage(Int64FeatureType) + +DoubleFeatureType = _reflection.GeneratedProtocolMessageType('DoubleFeatureType', (_message.Message,), dict( + DESCRIPTOR = _DOUBLEFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DoubleFeatureType) + )) +_sym_db.RegisterMessage(DoubleFeatureType) + +StringFeatureType = _reflection.GeneratedProtocolMessageType('StringFeatureType', (_message.Message,), dict( + DESCRIPTOR = _STRINGFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringFeatureType) + )) +_sym_db.RegisterMessage(StringFeatureType) + +SizeRange = _reflection.GeneratedProtocolMessageType('SizeRange', (_message.Message,), dict( + DESCRIPTOR = _SIZERANGE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SizeRange) + )) +_sym_db.RegisterMessage(SizeRange) + +ImageFeatureType = _reflection.GeneratedProtocolMessageType('ImageFeatureType', (_message.Message,), dict( + + ImageSize = _reflection.GeneratedProtocolMessageType('ImageSize', (_message.Message,), dict( + DESCRIPTOR = _IMAGEFEATURETYPE_IMAGESIZE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ImageFeatureType.ImageSize) + )) + , + + EnumeratedImageSizes = _reflection.GeneratedProtocolMessageType('EnumeratedImageSizes', (_message.Message,), dict( + DESCRIPTOR = _IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ImageFeatureType.EnumeratedImageSizes) + )) + , + + ImageSizeRange = _reflection.GeneratedProtocolMessageType('ImageSizeRange', (_message.Message,), dict( + DESCRIPTOR = _IMAGEFEATURETYPE_IMAGESIZERANGE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ImageFeatureType.ImageSizeRange) + )) + , + DESCRIPTOR = _IMAGEFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ImageFeatureType) + )) +_sym_db.RegisterMessage(ImageFeatureType) +_sym_db.RegisterMessage(ImageFeatureType.ImageSize) +_sym_db.RegisterMessage(ImageFeatureType.EnumeratedImageSizes) +_sym_db.RegisterMessage(ImageFeatureType.ImageSizeRange) + +ArrayFeatureType = _reflection.GeneratedProtocolMessageType('ArrayFeatureType', (_message.Message,), dict( + + Shape = _reflection.GeneratedProtocolMessageType('Shape', (_message.Message,), dict( + DESCRIPTOR = _ARRAYFEATURETYPE_SHAPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureType.Shape) + )) + , + + EnumeratedShapes = _reflection.GeneratedProtocolMessageType('EnumeratedShapes', (_message.Message,), dict( + DESCRIPTOR = _ARRAYFEATURETYPE_ENUMERATEDSHAPES, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureType.EnumeratedShapes) + )) + , + + ShapeRange = _reflection.GeneratedProtocolMessageType('ShapeRange', (_message.Message,), dict( + DESCRIPTOR = _ARRAYFEATURETYPE_SHAPERANGE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureType.ShapeRange) + )) + , + DESCRIPTOR = _ARRAYFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureType) + )) +_sym_db.RegisterMessage(ArrayFeatureType) +_sym_db.RegisterMessage(ArrayFeatureType.Shape) +_sym_db.RegisterMessage(ArrayFeatureType.EnumeratedShapes) +_sym_db.RegisterMessage(ArrayFeatureType.ShapeRange) + +DictionaryFeatureType = _reflection.GeneratedProtocolMessageType('DictionaryFeatureType', (_message.Message,), dict( + DESCRIPTOR = _DICTIONARYFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DictionaryFeatureType) + )) +_sym_db.RegisterMessage(DictionaryFeatureType) + +SequenceFeatureType = _reflection.GeneratedProtocolMessageType('SequenceFeatureType', (_message.Message,), dict( + DESCRIPTOR = _SEQUENCEFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SequenceFeatureType) + )) +_sym_db.RegisterMessage(SequenceFeatureType) + +FeatureType = _reflection.GeneratedProtocolMessageType('FeatureType', (_message.Message,), dict( + DESCRIPTOR = _FEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FeatureType) + )) +_sym_db.RegisterMessage(FeatureType) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureVectorizer_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureVectorizer_pb2.py new file mode 100644 index 00000000..ede75fc4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureVectorizer_pb2.py @@ -0,0 +1,118 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: FeatureVectorizer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='FeatureVectorizer.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x17\x46\x65\x61tureVectorizer.proto\x12\x14\x43oreML.Specification\"\x98\x01\n\x11\x46\x65\x61tureVectorizer\x12\x46\n\tinputList\x18\x01 \x03(\x0b\x32\x33.CoreML.Specification.FeatureVectorizer.InputColumn\x1a;\n\x0bInputColumn\x12\x13\n\x0binputColumn\x18\x01 \x01(\t\x12\x17\n\x0finputDimensions\x18\x02 \x01(\x04\x42\x02H\x03\x62\x06proto3') +) + + + + +_FEATUREVECTORIZER_INPUTCOLUMN = _descriptor.Descriptor( + name='InputColumn', + full_name='CoreML.Specification.FeatureVectorizer.InputColumn', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputColumn', full_name='CoreML.Specification.FeatureVectorizer.InputColumn.inputColumn', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputDimensions', full_name='CoreML.Specification.FeatureVectorizer.InputColumn.inputDimensions', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=143, + serialized_end=202, +) + +_FEATUREVECTORIZER = _descriptor.Descriptor( + name='FeatureVectorizer', + full_name='CoreML.Specification.FeatureVectorizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputList', full_name='CoreML.Specification.FeatureVectorizer.inputList', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_FEATUREVECTORIZER_INPUTCOLUMN, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=50, + serialized_end=202, +) + +_FEATUREVECTORIZER_INPUTCOLUMN.containing_type = _FEATUREVECTORIZER +_FEATUREVECTORIZER.fields_by_name['inputList'].message_type = _FEATUREVECTORIZER_INPUTCOLUMN +DESCRIPTOR.message_types_by_name['FeatureVectorizer'] = _FEATUREVECTORIZER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +FeatureVectorizer = _reflection.GeneratedProtocolMessageType('FeatureVectorizer', (_message.Message,), dict( + + InputColumn = _reflection.GeneratedProtocolMessageType('InputColumn', (_message.Message,), dict( + DESCRIPTOR = _FEATUREVECTORIZER_INPUTCOLUMN, + __module__ = 'FeatureVectorizer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FeatureVectorizer.InputColumn) + )) + , + DESCRIPTOR = _FEATUREVECTORIZER, + __module__ = 'FeatureVectorizer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FeatureVectorizer) + )) +_sym_db.RegisterMessage(FeatureVectorizer) +_sym_db.RegisterMessage(FeatureVectorizer.InputColumn) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMClassifier_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMClassifier_pb2.py new file mode 100644 index 00000000..134de9b3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMClassifier_pb2.py @@ -0,0 +1,215 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: GLMClassifier.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='GLMClassifier.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x13GLMClassifier.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\x9c\x04\n\rGLMClassifier\x12@\n\x07weights\x18\x01 \x03(\x0b\x32/.CoreML.Specification.GLMClassifier.DoubleArray\x12\x0e\n\x06offset\x18\x02 \x03(\x01\x12\\\n\x17postEvaluationTransform\x18\x03 \x01(\x0e\x32;.CoreML.Specification.GLMClassifier.PostEvaluationTransform\x12H\n\rclassEncoding\x18\x04 \x01(\x0e\x32\x31.CoreML.Specification.GLMClassifier.ClassEncoding\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x1a\x1c\n\x0b\x44oubleArray\x12\r\n\x05value\x18\x01 \x03(\x01\"0\n\x17PostEvaluationTransform\x12\t\n\x05Logit\x10\x00\x12\n\n\x06Probit\x10\x01\"2\n\rClassEncoding\x12\x12\n\x0eReferenceClass\x10\x00\x12\r\n\tOneVsRest\x10\x01\x42\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + +_GLMCLASSIFIER_POSTEVALUATIONTRANSFORM = _descriptor.EnumDescriptor( + name='PostEvaluationTransform', + full_name='CoreML.Specification.GLMClassifier.PostEvaluationTransform', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='Logit', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Probit', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=493, + serialized_end=541, +) +_sym_db.RegisterEnumDescriptor(_GLMCLASSIFIER_POSTEVALUATIONTRANSFORM) + +_GLMCLASSIFIER_CLASSENCODING = _descriptor.EnumDescriptor( + name='ClassEncoding', + full_name='CoreML.Specification.GLMClassifier.ClassEncoding', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='ReferenceClass', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='OneVsRest', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=543, + serialized_end=593, +) +_sym_db.RegisterEnumDescriptor(_GLMCLASSIFIER_CLASSENCODING) + + +_GLMCLASSIFIER_DOUBLEARRAY = _descriptor.Descriptor( + name='DoubleArray', + full_name='CoreML.Specification.GLMClassifier.DoubleArray', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.GLMClassifier.DoubleArray.value', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=463, + serialized_end=491, +) + +_GLMCLASSIFIER = _descriptor.Descriptor( + name='GLMClassifier', + full_name='CoreML.Specification.GLMClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.GLMClassifier.weights', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset', full_name='CoreML.Specification.GLMClassifier.offset', index=1, + number=2, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='postEvaluationTransform', full_name='CoreML.Specification.GLMClassifier.postEvaluationTransform', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='classEncoding', full_name='CoreML.Specification.GLMClassifier.classEncoding', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.GLMClassifier.stringClassLabels', index=4, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.GLMClassifier.int64ClassLabels', index=5, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_GLMCLASSIFIER_DOUBLEARRAY, ], + enum_types=[ + _GLMCLASSIFIER_POSTEVALUATIONTRANSFORM, + _GLMCLASSIFIER_CLASSENCODING, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.GLMClassifier.ClassLabels', + index=0, containing_type=None, fields=[]), + ], + serialized_start=68, + serialized_end=608, +) + +_GLMCLASSIFIER_DOUBLEARRAY.containing_type = _GLMCLASSIFIER +_GLMCLASSIFIER.fields_by_name['weights'].message_type = _GLMCLASSIFIER_DOUBLEARRAY +_GLMCLASSIFIER.fields_by_name['postEvaluationTransform'].enum_type = _GLMCLASSIFIER_POSTEVALUATIONTRANSFORM +_GLMCLASSIFIER.fields_by_name['classEncoding'].enum_type = _GLMCLASSIFIER_CLASSENCODING +_GLMCLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_GLMCLASSIFIER.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_GLMCLASSIFIER_POSTEVALUATIONTRANSFORM.containing_type = _GLMCLASSIFIER +_GLMCLASSIFIER_CLASSENCODING.containing_type = _GLMCLASSIFIER +_GLMCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _GLMCLASSIFIER.fields_by_name['stringClassLabels']) +_GLMCLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _GLMCLASSIFIER.oneofs_by_name['ClassLabels'] +_GLMCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _GLMCLASSIFIER.fields_by_name['int64ClassLabels']) +_GLMCLASSIFIER.fields_by_name['int64ClassLabels'].containing_oneof = _GLMCLASSIFIER.oneofs_by_name['ClassLabels'] +DESCRIPTOR.message_types_by_name['GLMClassifier'] = _GLMCLASSIFIER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +GLMClassifier = _reflection.GeneratedProtocolMessageType('GLMClassifier', (_message.Message,), dict( + + DoubleArray = _reflection.GeneratedProtocolMessageType('DoubleArray', (_message.Message,), dict( + DESCRIPTOR = _GLMCLASSIFIER_DOUBLEARRAY, + __module__ = 'GLMClassifier_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GLMClassifier.DoubleArray) + )) + , + DESCRIPTOR = _GLMCLASSIFIER, + __module__ = 'GLMClassifier_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GLMClassifier) + )) +_sym_db.RegisterMessage(GLMClassifier) +_sym_db.RegisterMessage(GLMClassifier.DoubleArray) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMRegressor_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMRegressor_pb2.py new file mode 100644 index 00000000..cb7491e7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMRegressor_pb2.py @@ -0,0 +1,154 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: GLMRegressor.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='GLMRegressor.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x12GLMRegressor.proto\x12\x14\x43oreML.Specification\"\x9d\x02\n\x0cGLMRegressor\x12?\n\x07weights\x18\x01 \x03(\x0b\x32..CoreML.Specification.GLMRegressor.DoubleArray\x12\x0e\n\x06offset\x18\x02 \x03(\x01\x12[\n\x17postEvaluationTransform\x18\x03 \x01(\x0e\x32:.CoreML.Specification.GLMRegressor.PostEvaluationTransform\x1a\x1c\n\x0b\x44oubleArray\x12\r\n\x05value\x18\x01 \x03(\x01\"A\n\x17PostEvaluationTransform\x12\x0f\n\x0bNoTransform\x10\x00\x12\t\n\x05Logit\x10\x01\x12\n\n\x06Probit\x10\x02\x42\x02H\x03\x62\x06proto3') +) + + + +_GLMREGRESSOR_POSTEVALUATIONTRANSFORM = _descriptor.EnumDescriptor( + name='PostEvaluationTransform', + full_name='CoreML.Specification.GLMRegressor.PostEvaluationTransform', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='NoTransform', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Logit', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Probit', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=265, + serialized_end=330, +) +_sym_db.RegisterEnumDescriptor(_GLMREGRESSOR_POSTEVALUATIONTRANSFORM) + + +_GLMREGRESSOR_DOUBLEARRAY = _descriptor.Descriptor( + name='DoubleArray', + full_name='CoreML.Specification.GLMRegressor.DoubleArray', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.GLMRegressor.DoubleArray.value', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=235, + serialized_end=263, +) + +_GLMREGRESSOR = _descriptor.Descriptor( + name='GLMRegressor', + full_name='CoreML.Specification.GLMRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.GLMRegressor.weights', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset', full_name='CoreML.Specification.GLMRegressor.offset', index=1, + number=2, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='postEvaluationTransform', full_name='CoreML.Specification.GLMRegressor.postEvaluationTransform', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_GLMREGRESSOR_DOUBLEARRAY, ], + enum_types=[ + _GLMREGRESSOR_POSTEVALUATIONTRANSFORM, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=45, + serialized_end=330, +) + +_GLMREGRESSOR_DOUBLEARRAY.containing_type = _GLMREGRESSOR +_GLMREGRESSOR.fields_by_name['weights'].message_type = _GLMREGRESSOR_DOUBLEARRAY +_GLMREGRESSOR.fields_by_name['postEvaluationTransform'].enum_type = _GLMREGRESSOR_POSTEVALUATIONTRANSFORM +_GLMREGRESSOR_POSTEVALUATIONTRANSFORM.containing_type = _GLMREGRESSOR +DESCRIPTOR.message_types_by_name['GLMRegressor'] = _GLMREGRESSOR +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +GLMRegressor = _reflection.GeneratedProtocolMessageType('GLMRegressor', (_message.Message,), dict( + + DoubleArray = _reflection.GeneratedProtocolMessageType('DoubleArray', (_message.Message,), dict( + DESCRIPTOR = _GLMREGRESSOR_DOUBLEARRAY, + __module__ = 'GLMRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GLMRegressor.DoubleArray) + )) + , + DESCRIPTOR = _GLMREGRESSOR, + __module__ = 'GLMRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GLMRegressor) + )) +_sym_db.RegisterMessage(GLMRegressor) +_sym_db.RegisterMessage(GLMRegressor.DoubleArray) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Gazetteer_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Gazetteer_pb2.py new file mode 100644 index 00000000..4c917353 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Gazetteer_pb2.py @@ -0,0 +1,107 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Gazetteer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Gazetteer.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x0fGazetteer.proto\x12!CoreML.Specification.CoreMLModels\x1a\x14\x44\x61taStructures.proto\"\x9c\x01\n\tGazetteer\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x12@\n\x11stringClassLabels\x18\xc8\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x42\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_GAZETTEER = _descriptor.Descriptor( + name='Gazetteer', + full_name='CoreML.Specification.CoreMLModels.Gazetteer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='revision', full_name='CoreML.Specification.CoreMLModels.Gazetteer.revision', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='language', full_name='CoreML.Specification.CoreMLModels.Gazetteer.language', index=1, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.Gazetteer.modelParameterData', index=2, + number=100, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.CoreMLModels.Gazetteer.stringClassLabels', index=3, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.CoreMLModels.Gazetteer.ClassLabels', + index=0, containing_type=None, fields=[]), + ], + serialized_start=77, + serialized_end=233, +) + +_GAZETTEER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_GAZETTEER.oneofs_by_name['ClassLabels'].fields.append( + _GAZETTEER.fields_by_name['stringClassLabels']) +_GAZETTEER.fields_by_name['stringClassLabels'].containing_oneof = _GAZETTEER.oneofs_by_name['ClassLabels'] +DESCRIPTOR.message_types_by_name['Gazetteer'] = _GAZETTEER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Gazetteer = _reflection.GeneratedProtocolMessageType('Gazetteer', (_message.Message,), dict( + DESCRIPTOR = _GAZETTEER, + __module__ = 'Gazetteer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.Gazetteer) + )) +_sym_db.RegisterMessage(Gazetteer) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Identity_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Identity_pb2.py new file mode 100644 index 00000000..c6411dd5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Identity_pb2.py @@ -0,0 +1,64 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Identity.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Identity.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x0eIdentity.proto\x12\x14\x43oreML.Specification\"\n\n\x08IdentityB\x02H\x03\x62\x06proto3') +) + + + + +_IDENTITY = _descriptor.Descriptor( + name='Identity', + full_name='CoreML.Specification.Identity', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=40, + serialized_end=50, +) + +DESCRIPTOR.message_types_by_name['Identity'] = _IDENTITY +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Identity = _reflection.GeneratedProtocolMessageType('Identity', (_message.Message,), dict( + DESCRIPTOR = _IDENTITY, + __module__ = 'Identity_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Identity) + )) +_sym_db.RegisterMessage(Identity) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Imputer_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Imputer_pb2.py new file mode 100644 index 00000000..e18f0f3d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Imputer_pb2.py @@ -0,0 +1,182 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Imputer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Imputer.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\rImputer.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xf3\x03\n\x07Imputer\x12\x1c\n\x12imputedDoubleValue\x18\x01 \x01(\x01H\x00\x12\x1b\n\x11imputedInt64Value\x18\x02 \x01(\x03H\x00\x12\x1c\n\x12imputedStringValue\x18\x03 \x01(\tH\x00\x12@\n\x12imputedDoubleArray\x18\x04 \x01(\x0b\x32\".CoreML.Specification.DoubleVectorH\x00\x12>\n\x11imputedInt64Array\x18\x05 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x12J\n\x17imputedStringDictionary\x18\x06 \x01(\x0b\x32\'.CoreML.Specification.StringToDoubleMapH\x00\x12H\n\x16imputedInt64Dictionary\x18\x07 \x01(\x0b\x32&.CoreML.Specification.Int64ToDoubleMapH\x00\x12\x1c\n\x12replaceDoubleValue\x18\x0b \x01(\x01H\x01\x12\x1b\n\x11replaceInt64Value\x18\x0c \x01(\x03H\x01\x12\x1c\n\x12replaceStringValue\x18\r \x01(\tH\x01\x42\x0e\n\x0cImputedValueB\x0e\n\x0cReplaceValueB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_IMPUTER = _descriptor.Descriptor( + name='Imputer', + full_name='CoreML.Specification.Imputer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='imputedDoubleValue', full_name='CoreML.Specification.Imputer.imputedDoubleValue', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedInt64Value', full_name='CoreML.Specification.Imputer.imputedInt64Value', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedStringValue', full_name='CoreML.Specification.Imputer.imputedStringValue', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedDoubleArray', full_name='CoreML.Specification.Imputer.imputedDoubleArray', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedInt64Array', full_name='CoreML.Specification.Imputer.imputedInt64Array', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedStringDictionary', full_name='CoreML.Specification.Imputer.imputedStringDictionary', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedInt64Dictionary', full_name='CoreML.Specification.Imputer.imputedInt64Dictionary', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='replaceDoubleValue', full_name='CoreML.Specification.Imputer.replaceDoubleValue', index=7, + number=11, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='replaceInt64Value', full_name='CoreML.Specification.Imputer.replaceInt64Value', index=8, + number=12, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='replaceStringValue', full_name='CoreML.Specification.Imputer.replaceStringValue', index=9, + number=13, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ImputedValue', full_name='CoreML.Specification.Imputer.ImputedValue', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='ReplaceValue', full_name='CoreML.Specification.Imputer.ReplaceValue', + index=1, containing_type=None, fields=[]), + ], + serialized_start=62, + serialized_end=561, +) + +_IMPUTER.fields_by_name['imputedDoubleArray'].message_type = DataStructures__pb2._DOUBLEVECTOR +_IMPUTER.fields_by_name['imputedInt64Array'].message_type = DataStructures__pb2._INT64VECTOR +_IMPUTER.fields_by_name['imputedStringDictionary'].message_type = DataStructures__pb2._STRINGTODOUBLEMAP +_IMPUTER.fields_by_name['imputedInt64Dictionary'].message_type = DataStructures__pb2._INT64TODOUBLEMAP +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedDoubleValue']) +_IMPUTER.fields_by_name['imputedDoubleValue'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedInt64Value']) +_IMPUTER.fields_by_name['imputedInt64Value'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedStringValue']) +_IMPUTER.fields_by_name['imputedStringValue'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedDoubleArray']) +_IMPUTER.fields_by_name['imputedDoubleArray'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedInt64Array']) +_IMPUTER.fields_by_name['imputedInt64Array'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedStringDictionary']) +_IMPUTER.fields_by_name['imputedStringDictionary'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedInt64Dictionary']) +_IMPUTER.fields_by_name['imputedInt64Dictionary'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ReplaceValue'].fields.append( + _IMPUTER.fields_by_name['replaceDoubleValue']) +_IMPUTER.fields_by_name['replaceDoubleValue'].containing_oneof = _IMPUTER.oneofs_by_name['ReplaceValue'] +_IMPUTER.oneofs_by_name['ReplaceValue'].fields.append( + _IMPUTER.fields_by_name['replaceInt64Value']) +_IMPUTER.fields_by_name['replaceInt64Value'].containing_oneof = _IMPUTER.oneofs_by_name['ReplaceValue'] +_IMPUTER.oneofs_by_name['ReplaceValue'].fields.append( + _IMPUTER.fields_by_name['replaceStringValue']) +_IMPUTER.fields_by_name['replaceStringValue'].containing_oneof = _IMPUTER.oneofs_by_name['ReplaceValue'] +DESCRIPTOR.message_types_by_name['Imputer'] = _IMPUTER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Imputer = _reflection.GeneratedProtocolMessageType('Imputer', (_message.Message,), dict( + DESCRIPTOR = _IMPUTER, + __module__ = 'Imputer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Imputer) + )) +_sym_db.RegisterMessage(Imputer) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/ItemSimilarityRecommender_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/ItemSimilarityRecommender_pb2.py new file mode 100644 index 00000000..b70f3c10 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/ItemSimilarityRecommender_pb2.py @@ -0,0 +1,238 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: ItemSimilarityRecommender.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='ItemSimilarityRecommender.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x1fItemSimilarityRecommender.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xb2\x05\n\x19ItemSimilarityRecommender\x12Z\n\x14itemItemSimilarities\x18\x01 \x03(\x0b\x32<.CoreML.Specification.ItemSimilarityRecommender.SimilarItems\x12\x39\n\ritemStringIds\x18\x02 \x01(\x0b\x32\".CoreML.Specification.StringVector\x12\x37\n\x0citemInt64Ids\x18\x03 \x01(\x0b\x32!.CoreML.Specification.Int64Vector\x12\x1c\n\x14itemInputFeatureName\x18\n \x01(\t\x12*\n\"numRecommendationsInputFeatureName\x18\x0b \x01(\t\x12\'\n\x1fitemRestrictionInputFeatureName\x18\x0c \x01(\t\x12%\n\x1ditemExclusionInputFeatureName\x18\r \x01(\t\x12,\n$recommendedItemListOutputFeatureName\x18\x14 \x01(\t\x12-\n%recommendedItemScoreOutputFeatureName\x18\x15 \x01(\t\x1a\x38\n\rConnectedItem\x12\x0e\n\x06itemId\x18\x01 \x01(\x04\x12\x17\n\x0fsimilarityScore\x18\x02 \x01(\x01\x1a\x93\x01\n\x0cSimilarItems\x12\x0e\n\x06itemId\x18\x01 \x01(\x04\x12V\n\x0fsimilarItemList\x18\x02 \x03(\x0b\x32=.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem\x12\x1b\n\x13itemScoreAdjustment\x18\x03 \x01(\x01\x42\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_ITEMSIMILARITYRECOMMENDER_CONNECTEDITEM = _descriptor.Descriptor( + name='ConnectedItem', + full_name='CoreML.Specification.ItemSimilarityRecommender.ConnectedItem', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='itemId', full_name='CoreML.Specification.ItemSimilarityRecommender.ConnectedItem.itemId', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='similarityScore', full_name='CoreML.Specification.ItemSimilarityRecommender.ConnectedItem.similarityScore', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=564, + serialized_end=620, +) + +_ITEMSIMILARITYRECOMMENDER_SIMILARITEMS = _descriptor.Descriptor( + name='SimilarItems', + full_name='CoreML.Specification.ItemSimilarityRecommender.SimilarItems', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='itemId', full_name='CoreML.Specification.ItemSimilarityRecommender.SimilarItems.itemId', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='similarItemList', full_name='CoreML.Specification.ItemSimilarityRecommender.SimilarItems.similarItemList', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemScoreAdjustment', full_name='CoreML.Specification.ItemSimilarityRecommender.SimilarItems.itemScoreAdjustment', index=2, + number=3, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=623, + serialized_end=770, +) + +_ITEMSIMILARITYRECOMMENDER = _descriptor.Descriptor( + name='ItemSimilarityRecommender', + full_name='CoreML.Specification.ItemSimilarityRecommender', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='itemItemSimilarities', full_name='CoreML.Specification.ItemSimilarityRecommender.itemItemSimilarities', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemStringIds', full_name='CoreML.Specification.ItemSimilarityRecommender.itemStringIds', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemInt64Ids', full_name='CoreML.Specification.ItemSimilarityRecommender.itemInt64Ids', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemInputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.itemInputFeatureName', index=3, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numRecommendationsInputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.numRecommendationsInputFeatureName', index=4, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemRestrictionInputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.itemRestrictionInputFeatureName', index=5, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemExclusionInputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.itemExclusionInputFeatureName', index=6, + number=13, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='recommendedItemListOutputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.recommendedItemListOutputFeatureName', index=7, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='recommendedItemScoreOutputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.recommendedItemScoreOutputFeatureName', index=8, + number=21, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_ITEMSIMILARITYRECOMMENDER_CONNECTEDITEM, _ITEMSIMILARITYRECOMMENDER_SIMILARITEMS, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=80, + serialized_end=770, +) + +_ITEMSIMILARITYRECOMMENDER_CONNECTEDITEM.containing_type = _ITEMSIMILARITYRECOMMENDER +_ITEMSIMILARITYRECOMMENDER_SIMILARITEMS.fields_by_name['similarItemList'].message_type = _ITEMSIMILARITYRECOMMENDER_CONNECTEDITEM +_ITEMSIMILARITYRECOMMENDER_SIMILARITEMS.containing_type = _ITEMSIMILARITYRECOMMENDER +_ITEMSIMILARITYRECOMMENDER.fields_by_name['itemItemSimilarities'].message_type = _ITEMSIMILARITYRECOMMENDER_SIMILARITEMS +_ITEMSIMILARITYRECOMMENDER.fields_by_name['itemStringIds'].message_type = DataStructures__pb2._STRINGVECTOR +_ITEMSIMILARITYRECOMMENDER.fields_by_name['itemInt64Ids'].message_type = DataStructures__pb2._INT64VECTOR +DESCRIPTOR.message_types_by_name['ItemSimilarityRecommender'] = _ITEMSIMILARITYRECOMMENDER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +ItemSimilarityRecommender = _reflection.GeneratedProtocolMessageType('ItemSimilarityRecommender', (_message.Message,), dict( + + ConnectedItem = _reflection.GeneratedProtocolMessageType('ConnectedItem', (_message.Message,), dict( + DESCRIPTOR = _ITEMSIMILARITYRECOMMENDER_CONNECTEDITEM, + __module__ = 'ItemSimilarityRecommender_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ItemSimilarityRecommender.ConnectedItem) + )) + , + + SimilarItems = _reflection.GeneratedProtocolMessageType('SimilarItems', (_message.Message,), dict( + DESCRIPTOR = _ITEMSIMILARITYRECOMMENDER_SIMILARITEMS, + __module__ = 'ItemSimilarityRecommender_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ItemSimilarityRecommender.SimilarItems) + )) + , + DESCRIPTOR = _ITEMSIMILARITYRECOMMENDER, + __module__ = 'ItemSimilarityRecommender_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ItemSimilarityRecommender) + )) +_sym_db.RegisterMessage(ItemSimilarityRecommender) +_sym_db.RegisterMessage(ItemSimilarityRecommender.ConnectedItem) +_sym_db.RegisterMessage(ItemSimilarityRecommender.SimilarItems) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/LinkedModel_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/LinkedModel_pb2.py new file mode 100644 index 00000000..325492d1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/LinkedModel_pb2.py @@ -0,0 +1,138 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: LinkedModel.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import Parameters_pb2 as Parameters__pb2 +try: + DataStructures__pb2 = Parameters__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Parameters__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes_pb2 + +from .Parameters_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='LinkedModel.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x11LinkedModel.proto\x12\x14\x43oreML.Specification\x1a\x10Parameters.proto\"[\n\x0bLinkedModel\x12@\n\x0flinkedModelFile\x18\x01 \x01(\x0b\x32%.CoreML.Specification.LinkedModelFileH\x00\x42\n\n\x08LinkType\"\x9b\x01\n\x0fLinkedModelFile\x12\x42\n\x13linkedModelFileName\x18\x01 \x01(\x0b\x32%.CoreML.Specification.StringParameter\x12\x44\n\x15linkedModelSearchPath\x18\x02 \x01(\x0b\x32%.CoreML.Specification.StringParameterB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[Parameters__pb2.DESCRIPTOR,], + public_dependencies=[Parameters__pb2.DESCRIPTOR,]) + + + + +_LINKEDMODEL = _descriptor.Descriptor( + name='LinkedModel', + full_name='CoreML.Specification.LinkedModel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='linkedModelFile', full_name='CoreML.Specification.LinkedModel.linkedModelFile', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='LinkType', full_name='CoreML.Specification.LinkedModel.LinkType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=61, + serialized_end=152, +) + + +_LINKEDMODELFILE = _descriptor.Descriptor( + name='LinkedModelFile', + full_name='CoreML.Specification.LinkedModelFile', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='linkedModelFileName', full_name='CoreML.Specification.LinkedModelFile.linkedModelFileName', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linkedModelSearchPath', full_name='CoreML.Specification.LinkedModelFile.linkedModelSearchPath', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=155, + serialized_end=310, +) + +_LINKEDMODEL.fields_by_name['linkedModelFile'].message_type = _LINKEDMODELFILE +_LINKEDMODEL.oneofs_by_name['LinkType'].fields.append( + _LINKEDMODEL.fields_by_name['linkedModelFile']) +_LINKEDMODEL.fields_by_name['linkedModelFile'].containing_oneof = _LINKEDMODEL.oneofs_by_name['LinkType'] +_LINKEDMODELFILE.fields_by_name['linkedModelFileName'].message_type = Parameters__pb2._STRINGPARAMETER +_LINKEDMODELFILE.fields_by_name['linkedModelSearchPath'].message_type = Parameters__pb2._STRINGPARAMETER +DESCRIPTOR.message_types_by_name['LinkedModel'] = _LINKEDMODEL +DESCRIPTOR.message_types_by_name['LinkedModelFile'] = _LINKEDMODELFILE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +LinkedModel = _reflection.GeneratedProtocolMessageType('LinkedModel', (_message.Message,), dict( + DESCRIPTOR = _LINKEDMODEL, + __module__ = 'LinkedModel_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LinkedModel) + )) +_sym_db.RegisterMessage(LinkedModel) + +LinkedModelFile = _reflection.GeneratedProtocolMessageType('LinkedModelFile', (_message.Message,), dict( + DESCRIPTOR = _LINKEDMODELFILE, + __module__ = 'LinkedModel_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LinkedModelFile) + )) +_sym_db.RegisterMessage(LinkedModelFile) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/MIL_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/MIL_pb2.py new file mode 100644 index 00000000..0e9bf64f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/MIL_pb2.py @@ -0,0 +1,2086 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: MIL.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='MIL.proto', + package='CoreML.Specification.MILSpec', + syntax='proto3', + serialized_pb=_b('\n\tMIL.proto\x12\x1c\x43oreML.Specification.MILSpec\"\xf3\x02\n\x07Program\x12\x0f\n\x07version\x18\x01 \x01(\x03\x12G\n\tfunctions\x18\x02 \x03(\x0b\x32\x34.CoreML.Specification.MILSpec.Program.FunctionsEntry\x12\x11\n\tdocString\x18\x03 \x01(\t\x12I\n\nattributes\x18\x04 \x03(\x0b\x32\x35.CoreML.Specification.MILSpec.Program.AttributesEntry\x1aX\n\x0e\x46unctionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.CoreML.Specification.MILSpec.Function:\x02\x38\x01\x1aV\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value:\x02\x38\x01\"\xbe\x03\n\x08\x46unction\x12<\n\x06inputs\x18\x01 \x03(\x0b\x32,.CoreML.Specification.MILSpec.NamedValueType\x12\r\n\x05opset\x18\x02 \x01(\t\x12_\n\x15\x62lock_specializations\x18\x03 \x03(\x0b\x32@.CoreML.Specification.MILSpec.Function.BlockSpecializationsEntry\x12J\n\nattributes\x18\x04 \x03(\x0b\x32\x36.CoreML.Specification.MILSpec.Function.AttributesEntry\x1a`\n\x19\x42lockSpecializationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Block:\x02\x38\x01\x1aV\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value:\x02\x38\x01\"\xb4\x02\n\x05\x42lock\x12<\n\x06inputs\x18\x01 \x03(\x0b\x32,.CoreML.Specification.MILSpec.NamedValueType\x12\x0f\n\x07outputs\x18\x02 \x03(\t\x12;\n\noperations\x18\x03 \x03(\x0b\x32\'.CoreML.Specification.MILSpec.Operation\x12G\n\nattributes\x18\x04 \x03(\x0b\x32\x33.CoreML.Specification.MILSpec.Block.AttributesEntry\x1aV\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value:\x02\x38\x01\"\xa9\x01\n\x08\x41rgument\x12\x41\n\targuments\x18\x01 \x03(\x0b\x32..CoreML.Specification.MILSpec.Argument.Binding\x1aZ\n\x07\x42inding\x12\x0e\n\x04name\x18\x01 \x01(\tH\x00\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.ValueH\x00\x42\t\n\x07\x62inding\"\xce\x03\n\tOperation\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x43\n\x06inputs\x18\x02 \x03(\x0b\x32\x33.CoreML.Specification.MILSpec.Operation.InputsEntry\x12=\n\x07outputs\x18\x03 \x03(\x0b\x32,.CoreML.Specification.MILSpec.NamedValueType\x12\x33\n\x06\x62locks\x18\x04 \x03(\x0b\x32#.CoreML.Specification.MILSpec.Block\x12K\n\nattributes\x18\x05 \x03(\x0b\x32\x37.CoreML.Specification.MILSpec.Operation.AttributesEntry\x1aU\n\x0bInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.CoreML.Specification.MILSpec.Argument:\x02\x38\x01\x1aV\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value:\x02\x38\x01\"U\n\x0eNamedValueType\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x04type\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\"\x95\x02\n\tValueType\x12>\n\ntensorType\x18\x01 \x01(\x0b\x32(.CoreML.Specification.MILSpec.TensorTypeH\x00\x12:\n\x08listType\x18\x02 \x01(\x0b\x32&.CoreML.Specification.MILSpec.ListTypeH\x00\x12<\n\ttupleType\x18\x03 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.TupleTypeH\x00\x12\x46\n\x0e\x64ictionaryType\x18\x04 \x01(\x0b\x32,.CoreML.Specification.MILSpec.DictionaryTypeH\x00\x42\x06\n\x04type\"\xb7\x02\n\nTensorType\x12\x38\n\x08\x64\x61taType\x18\x01 \x01(\x0e\x32&.CoreML.Specification.MILSpec.DataType\x12\x0c\n\x04rank\x18\x02 \x01(\x03\x12;\n\ndimensions\x18\x03 \x03(\x0b\x32\'.CoreML.Specification.MILSpec.Dimension\x12L\n\nattributes\x18\x04 \x03(\x0b\x32\x38.CoreML.Specification.MILSpec.TensorType.AttributesEntry\x1aV\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value:\x02\x38\x01\"C\n\tTupleType\x12\x36\n\x05types\x18\x01 \x03(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\"z\n\x08ListType\x12\x35\n\x04type\x18\x01 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\x12\x37\n\x06length\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.Dimension\"\x86\x01\n\x0e\x44ictionaryType\x12\x38\n\x07keyType\x18\x01 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\x12:\n\tvalueType\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\"\xfd\x01\n\tDimension\x12M\n\x08\x63onstant\x18\x01 \x01(\x0b\x32\x39.CoreML.Specification.MILSpec.Dimension.ConstantDimensionH\x00\x12K\n\x07unknown\x18\x02 \x01(\x0b\x32\x38.CoreML.Specification.MILSpec.Dimension.UnknownDimensionH\x00\x1a!\n\x11\x43onstantDimension\x12\x0c\n\x04size\x18\x01 \x01(\x04\x1a$\n\x10UnknownDimension\x12\x10\n\x08variadic\x18\x01 \x01(\x08\x42\x0b\n\tdimension\"\xb9\x04\n\x05Value\x12\x11\n\tdocString\x18\x01 \x01(\t\x12\x35\n\x04type\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\x12L\n\x0eimmediateValue\x18\x03 \x01(\x0b\x32\x32.CoreML.Specification.MILSpec.Value.ImmediateValueH\x00\x12J\n\rblobFileValue\x18\x05 \x01(\x0b\x32\x31.CoreML.Specification.MILSpec.Value.BlobFileValueH\x00\x1a\x8f\x02\n\x0eImmediateValue\x12;\n\x06tensor\x18\x01 \x01(\x0b\x32).CoreML.Specification.MILSpec.TensorValueH\x00\x12\x39\n\x05tuple\x18\x02 \x01(\x0b\x32(.CoreML.Specification.MILSpec.TupleValueH\x00\x12\x37\n\x04list\x18\x03 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ListValueH\x00\x12\x43\n\ndictionary\x18\x04 \x01(\x0b\x32-.CoreML.Specification.MILSpec.DictionaryValueH\x00\x42\x07\n\x05value\x1a\x31\n\rBlobFileValue\x12\x10\n\x08\x66ileName\x18\x01 \x01(\t\x12\x0e\n\x06offset\x18\x02 \x01(\x04\x42\x07\n\x05value\"\xac\x06\n\x0bTensorValue\x12J\n\x06\x66loats\x18\x01 \x01(\x0b\x32\x38.CoreML.Specification.MILSpec.TensorValue.RepeatedFloatsH\x00\x12\x46\n\x04ints\x18\x02 \x01(\x0b\x32\x36.CoreML.Specification.MILSpec.TensorValue.RepeatedIntsH\x00\x12H\n\x05\x62ools\x18\x03 \x01(\x0b\x32\x37.CoreML.Specification.MILSpec.TensorValue.RepeatedBoolsH\x00\x12L\n\x07strings\x18\x04 \x01(\x0b\x32\x39.CoreML.Specification.MILSpec.TensorValue.RepeatedStringsH\x00\x12N\n\x08longInts\x18\x05 \x01(\x0b\x32:.CoreML.Specification.MILSpec.TensorValue.RepeatedLongIntsH\x00\x12L\n\x07\x64oubles\x18\x06 \x01(\x0b\x32\x39.CoreML.Specification.MILSpec.TensorValue.RepeatedDoublesH\x00\x12H\n\x05\x62ytes\x18\x07 \x01(\x0b\x32\x37.CoreML.Specification.MILSpec.TensorValue.RepeatedBytesH\x00\x1a$\n\x0eRepeatedFloats\x12\x12\n\x06values\x18\x01 \x03(\x02\x42\x02\x10\x01\x1a%\n\x0fRepeatedDoubles\x12\x12\n\x06values\x18\x01 \x03(\x01\x42\x02\x10\x01\x1a\"\n\x0cRepeatedInts\x12\x12\n\x06values\x18\x01 \x03(\x05\x42\x02\x10\x01\x1a&\n\x10RepeatedLongInts\x12\x12\n\x06values\x18\x01 \x03(\x03\x42\x02\x10\x01\x1a#\n\rRepeatedBools\x12\x12\n\x06values\x18\x01 \x03(\x08\x42\x02\x10\x01\x1a!\n\x0fRepeatedStrings\x12\x0e\n\x06values\x18\x01 \x03(\t\x1a\x1f\n\rRepeatedBytes\x12\x0e\n\x06values\x18\x01 \x01(\x0c\x42\x07\n\x05value\"A\n\nTupleValue\x12\x33\n\x06values\x18\x01 \x03(\x0b\x32#.CoreML.Specification.MILSpec.Value\"@\n\tListValue\x12\x33\n\x06values\x18\x01 \x03(\x0b\x32#.CoreML.Specification.MILSpec.Value\"\xd3\x01\n\x0f\x44ictionaryValue\x12J\n\x06values\x18\x01 \x03(\x0b\x32:.CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair\x1at\n\x0cKeyValuePair\x12\x30\n\x03key\x18\x01 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value*\xb2\x01\n\x08\x44\x61taType\x12\x0f\n\x0bUNUSED_TYPE\x10\x00\x12\x08\n\x04\x42OOL\x10\x01\x12\n\n\x06STRING\x10\x02\x12\x0b\n\x07\x46LOAT16\x10\n\x12\x0b\n\x07\x46LOAT32\x10\x0b\x12\x0b\n\x07\x46LOAT64\x10\x0c\x12\x08\n\x04INT8\x10\x15\x12\t\n\x05INT16\x10\x16\x12\t\n\x05INT32\x10\x17\x12\t\n\x05INT64\x10\x18\x12\t\n\x05UINT8\x10\x1f\x12\n\n\x06UINT16\x10 \x12\n\n\x06UINT32\x10!\x12\n\n\x06UINT64\x10\"B\x02H\x03\x62\x06proto3') +) + +_DATATYPE = _descriptor.EnumDescriptor( + name='DataType', + full_name='CoreML.Specification.MILSpec.DataType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNUSED_TYPE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BOOL', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='STRING', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT16', index=3, number=10, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT32', index=4, number=11, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT64', index=5, number=12, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INT8', index=6, number=21, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INT16', index=7, number=22, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INT32', index=8, number=23, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INT64', index=9, number=24, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UINT8', index=10, number=31, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UINT16', index=11, number=32, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UINT32', index=12, number=33, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UINT64', index=13, number=34, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=4816, + serialized_end=4994, +) +_sym_db.RegisterEnumDescriptor(_DATATYPE) + +DataType = enum_type_wrapper.EnumTypeWrapper(_DATATYPE) +UNUSED_TYPE = 0 +BOOL = 1 +STRING = 2 +FLOAT16 = 10 +FLOAT32 = 11 +FLOAT64 = 12 +INT8 = 21 +INT16 = 22 +INT32 = 23 +INT64 = 24 +UINT8 = 31 +UINT16 = 32 +UINT32 = 33 +UINT64 = 34 + + + +_PROGRAM_FUNCTIONSENTRY = _descriptor.Descriptor( + name='FunctionsEntry', + full_name='CoreML.Specification.MILSpec.Program.FunctionsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Program.FunctionsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Program.FunctionsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=239, + serialized_end=327, +) + +_PROGRAM_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='CoreML.Specification.MILSpec.Program.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Program.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Program.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=329, + serialized_end=415, +) + +_PROGRAM = _descriptor.Descriptor( + name='Program', + full_name='CoreML.Specification.MILSpec.Program', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='CoreML.Specification.MILSpec.Program.version', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='functions', full_name='CoreML.Specification.MILSpec.Program.functions', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='docString', full_name='CoreML.Specification.MILSpec.Program.docString', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='attributes', full_name='CoreML.Specification.MILSpec.Program.attributes', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_PROGRAM_FUNCTIONSENTRY, _PROGRAM_ATTRIBUTESENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=44, + serialized_end=415, +) + + +_FUNCTION_BLOCKSPECIALIZATIONSENTRY = _descriptor.Descriptor( + name='BlockSpecializationsEntry', + full_name='CoreML.Specification.MILSpec.Function.BlockSpecializationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Function.BlockSpecializationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Function.BlockSpecializationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=680, + serialized_end=776, +) + +_FUNCTION_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='CoreML.Specification.MILSpec.Function.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Function.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Function.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=329, + serialized_end=415, +) + +_FUNCTION = _descriptor.Descriptor( + name='Function', + full_name='CoreML.Specification.MILSpec.Function', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputs', full_name='CoreML.Specification.MILSpec.Function.inputs', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='opset', full_name='CoreML.Specification.MILSpec.Function.opset', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='block_specializations', full_name='CoreML.Specification.MILSpec.Function.block_specializations', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='attributes', full_name='CoreML.Specification.MILSpec.Function.attributes', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_FUNCTION_BLOCKSPECIALIZATIONSENTRY, _FUNCTION_ATTRIBUTESENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=418, + serialized_end=864, +) + + +_BLOCK_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='CoreML.Specification.MILSpec.Block.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Block.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Block.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=329, + serialized_end=415, +) + +_BLOCK = _descriptor.Descriptor( + name='Block', + full_name='CoreML.Specification.MILSpec.Block', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputs', full_name='CoreML.Specification.MILSpec.Block.inputs', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputs', full_name='CoreML.Specification.MILSpec.Block.outputs', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='operations', full_name='CoreML.Specification.MILSpec.Block.operations', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='attributes', full_name='CoreML.Specification.MILSpec.Block.attributes', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_BLOCK_ATTRIBUTESENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=867, + serialized_end=1175, +) + + +_ARGUMENT_BINDING = _descriptor.Descriptor( + name='Binding', + full_name='CoreML.Specification.MILSpec.Argument.Binding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.MILSpec.Argument.Binding.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Argument.Binding.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='binding', full_name='CoreML.Specification.MILSpec.Argument.Binding.binding', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1257, + serialized_end=1347, +) + +_ARGUMENT = _descriptor.Descriptor( + name='Argument', + full_name='CoreML.Specification.MILSpec.Argument', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='arguments', full_name='CoreML.Specification.MILSpec.Argument.arguments', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_ARGUMENT_BINDING, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1178, + serialized_end=1347, +) + + +_OPERATION_INPUTSENTRY = _descriptor.Descriptor( + name='InputsEntry', + full_name='CoreML.Specification.MILSpec.Operation.InputsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Operation.InputsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Operation.InputsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1639, + serialized_end=1724, +) + +_OPERATION_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='CoreML.Specification.MILSpec.Operation.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Operation.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Operation.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=329, + serialized_end=415, +) + +_OPERATION = _descriptor.Descriptor( + name='Operation', + full_name='CoreML.Specification.MILSpec.Operation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.MILSpec.Operation.type', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputs', full_name='CoreML.Specification.MILSpec.Operation.inputs', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputs', full_name='CoreML.Specification.MILSpec.Operation.outputs', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blocks', full_name='CoreML.Specification.MILSpec.Operation.blocks', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='attributes', full_name='CoreML.Specification.MILSpec.Operation.attributes', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_OPERATION_INPUTSENTRY, _OPERATION_ATTRIBUTESENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1350, + serialized_end=1812, +) + + +_NAMEDVALUETYPE = _descriptor.Descriptor( + name='NamedValueType', + full_name='CoreML.Specification.MILSpec.NamedValueType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.MILSpec.NamedValueType.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.MILSpec.NamedValueType.type', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1814, + serialized_end=1899, +) + + +_VALUETYPE = _descriptor.Descriptor( + name='ValueType', + full_name='CoreML.Specification.MILSpec.ValueType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='tensorType', full_name='CoreML.Specification.MILSpec.ValueType.tensorType', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='listType', full_name='CoreML.Specification.MILSpec.ValueType.listType', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tupleType', full_name='CoreML.Specification.MILSpec.ValueType.tupleType', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dictionaryType', full_name='CoreML.Specification.MILSpec.ValueType.dictionaryType', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='type', full_name='CoreML.Specification.MILSpec.ValueType.type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1902, + serialized_end=2179, +) + + +_TENSORTYPE_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='CoreML.Specification.MILSpec.TensorType.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.TensorType.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.TensorType.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=329, + serialized_end=415, +) + +_TENSORTYPE = _descriptor.Descriptor( + name='TensorType', + full_name='CoreML.Specification.MILSpec.TensorType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='dataType', full_name='CoreML.Specification.MILSpec.TensorType.dataType', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rank', full_name='CoreML.Specification.MILSpec.TensorType.rank', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dimensions', full_name='CoreML.Specification.MILSpec.TensorType.dimensions', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='attributes', full_name='CoreML.Specification.MILSpec.TensorType.attributes', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TENSORTYPE_ATTRIBUTESENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2182, + serialized_end=2493, +) + + +_TUPLETYPE = _descriptor.Descriptor( + name='TupleType', + full_name='CoreML.Specification.MILSpec.TupleType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='types', full_name='CoreML.Specification.MILSpec.TupleType.types', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2495, + serialized_end=2562, +) + + +_LISTTYPE = _descriptor.Descriptor( + name='ListType', + full_name='CoreML.Specification.MILSpec.ListType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.MILSpec.ListType.type', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='length', full_name='CoreML.Specification.MILSpec.ListType.length', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2564, + serialized_end=2686, +) + + +_DICTIONARYTYPE = _descriptor.Descriptor( + name='DictionaryType', + full_name='CoreML.Specification.MILSpec.DictionaryType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='keyType', full_name='CoreML.Specification.MILSpec.DictionaryType.keyType', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='valueType', full_name='CoreML.Specification.MILSpec.DictionaryType.valueType', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2689, + serialized_end=2823, +) + + +_DIMENSION_CONSTANTDIMENSION = _descriptor.Descriptor( + name='ConstantDimension', + full_name='CoreML.Specification.MILSpec.Dimension.ConstantDimension', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='size', full_name='CoreML.Specification.MILSpec.Dimension.ConstantDimension.size', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2995, + serialized_end=3028, +) + +_DIMENSION_UNKNOWNDIMENSION = _descriptor.Descriptor( + name='UnknownDimension', + full_name='CoreML.Specification.MILSpec.Dimension.UnknownDimension', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='variadic', full_name='CoreML.Specification.MILSpec.Dimension.UnknownDimension.variadic', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3030, + serialized_end=3066, +) + +_DIMENSION = _descriptor.Descriptor( + name='Dimension', + full_name='CoreML.Specification.MILSpec.Dimension', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='constant', full_name='CoreML.Specification.MILSpec.Dimension.constant', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='unknown', full_name='CoreML.Specification.MILSpec.Dimension.unknown', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_DIMENSION_CONSTANTDIMENSION, _DIMENSION_UNKNOWNDIMENSION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='dimension', full_name='CoreML.Specification.MILSpec.Dimension.dimension', + index=0, containing_type=None, fields=[]), + ], + serialized_start=2826, + serialized_end=3079, +) + + +_VALUE_IMMEDIATEVALUE = _descriptor.Descriptor( + name='ImmediateValue', + full_name='CoreML.Specification.MILSpec.Value.ImmediateValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='tensor', full_name='CoreML.Specification.MILSpec.Value.ImmediateValue.tensor', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tuple', full_name='CoreML.Specification.MILSpec.Value.ImmediateValue.tuple', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='list', full_name='CoreML.Specification.MILSpec.Value.ImmediateValue.list', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dictionary', full_name='CoreML.Specification.MILSpec.Value.ImmediateValue.dictionary', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Value.ImmediateValue.value', + index=0, containing_type=None, fields=[]), + ], + serialized_start=3320, + serialized_end=3591, +) + +_VALUE_BLOBFILEVALUE = _descriptor.Descriptor( + name='BlobFileValue', + full_name='CoreML.Specification.MILSpec.Value.BlobFileValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='fileName', full_name='CoreML.Specification.MILSpec.Value.BlobFileValue.fileName', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset', full_name='CoreML.Specification.MILSpec.Value.BlobFileValue.offset', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3593, + serialized_end=3642, +) + +_VALUE = _descriptor.Descriptor( + name='Value', + full_name='CoreML.Specification.MILSpec.Value', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='docString', full_name='CoreML.Specification.MILSpec.Value.docString', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.MILSpec.Value.type', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='immediateValue', full_name='CoreML.Specification.MILSpec.Value.immediateValue', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blobFileValue', full_name='CoreML.Specification.MILSpec.Value.blobFileValue', index=3, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_VALUE_IMMEDIATEVALUE, _VALUE_BLOBFILEVALUE, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Value.value', + index=0, containing_type=None, fields=[]), + ], + serialized_start=3082, + serialized_end=3651, +) + + +_TENSORVALUE_REPEATEDFLOATS = _descriptor.Descriptor( + name='RepeatedFloats', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedFloats', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedFloats.values', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4201, + serialized_end=4237, +) + +_TENSORVALUE_REPEATEDDOUBLES = _descriptor.Descriptor( + name='RepeatedDoubles', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles.values', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4239, + serialized_end=4276, +) + +_TENSORVALUE_REPEATEDINTS = _descriptor.Descriptor( + name='RepeatedInts', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedInts', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedInts.values', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4278, + serialized_end=4312, +) + +_TENSORVALUE_REPEATEDLONGINTS = _descriptor.Descriptor( + name='RepeatedLongInts', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts.values', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4314, + serialized_end=4352, +) + +_TENSORVALUE_REPEATEDBOOLS = _descriptor.Descriptor( + name='RepeatedBools', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedBools', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedBools.values', index=0, + number=1, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4354, + serialized_end=4389, +) + +_TENSORVALUE_REPEATEDSTRINGS = _descriptor.Descriptor( + name='RepeatedStrings', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedStrings', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedStrings.values', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4391, + serialized_end=4424, +) + +_TENSORVALUE_REPEATEDBYTES = _descriptor.Descriptor( + name='RepeatedBytes', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedBytes', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedBytes.values', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4426, + serialized_end=4457, +) + +_TENSORVALUE = _descriptor.Descriptor( + name='TensorValue', + full_name='CoreML.Specification.MILSpec.TensorValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='floats', full_name='CoreML.Specification.MILSpec.TensorValue.floats', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='ints', full_name='CoreML.Specification.MILSpec.TensorValue.ints', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bools', full_name='CoreML.Specification.MILSpec.TensorValue.bools', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strings', full_name='CoreML.Specification.MILSpec.TensorValue.strings', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='longInts', full_name='CoreML.Specification.MILSpec.TensorValue.longInts', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='doubles', full_name='CoreML.Specification.MILSpec.TensorValue.doubles', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bytes', full_name='CoreML.Specification.MILSpec.TensorValue.bytes', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TENSORVALUE_REPEATEDFLOATS, _TENSORVALUE_REPEATEDDOUBLES, _TENSORVALUE_REPEATEDINTS, _TENSORVALUE_REPEATEDLONGINTS, _TENSORVALUE_REPEATEDBOOLS, _TENSORVALUE_REPEATEDSTRINGS, _TENSORVALUE_REPEATEDBYTES, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.TensorValue.value', + index=0, containing_type=None, fields=[]), + ], + serialized_start=3654, + serialized_end=4466, +) + + +_TUPLEVALUE = _descriptor.Descriptor( + name='TupleValue', + full_name='CoreML.Specification.MILSpec.TupleValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TupleValue.values', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4468, + serialized_end=4533, +) + + +_LISTVALUE = _descriptor.Descriptor( + name='ListValue', + full_name='CoreML.Specification.MILSpec.ListValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.ListValue.values', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4535, + serialized_end=4599, +) + + +_DICTIONARYVALUE_KEYVALUEPAIR = _descriptor.Descriptor( + name='KeyValuePair', + full_name='CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair.key', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4697, + serialized_end=4813, +) + +_DICTIONARYVALUE = _descriptor.Descriptor( + name='DictionaryValue', + full_name='CoreML.Specification.MILSpec.DictionaryValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.DictionaryValue.values', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_DICTIONARYVALUE_KEYVALUEPAIR, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4602, + serialized_end=4813, +) + +_PROGRAM_FUNCTIONSENTRY.fields_by_name['value'].message_type = _FUNCTION +_PROGRAM_FUNCTIONSENTRY.containing_type = _PROGRAM +_PROGRAM_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VALUE +_PROGRAM_ATTRIBUTESENTRY.containing_type = _PROGRAM +_PROGRAM.fields_by_name['functions'].message_type = _PROGRAM_FUNCTIONSENTRY +_PROGRAM.fields_by_name['attributes'].message_type = _PROGRAM_ATTRIBUTESENTRY +_FUNCTION_BLOCKSPECIALIZATIONSENTRY.fields_by_name['value'].message_type = _BLOCK +_FUNCTION_BLOCKSPECIALIZATIONSENTRY.containing_type = _FUNCTION +_FUNCTION_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VALUE +_FUNCTION_ATTRIBUTESENTRY.containing_type = _FUNCTION +_FUNCTION.fields_by_name['inputs'].message_type = _NAMEDVALUETYPE +_FUNCTION.fields_by_name['block_specializations'].message_type = _FUNCTION_BLOCKSPECIALIZATIONSENTRY +_FUNCTION.fields_by_name['attributes'].message_type = _FUNCTION_ATTRIBUTESENTRY +_BLOCK_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VALUE +_BLOCK_ATTRIBUTESENTRY.containing_type = _BLOCK +_BLOCK.fields_by_name['inputs'].message_type = _NAMEDVALUETYPE +_BLOCK.fields_by_name['operations'].message_type = _OPERATION +_BLOCK.fields_by_name['attributes'].message_type = _BLOCK_ATTRIBUTESENTRY +_ARGUMENT_BINDING.fields_by_name['value'].message_type = _VALUE +_ARGUMENT_BINDING.containing_type = _ARGUMENT +_ARGUMENT_BINDING.oneofs_by_name['binding'].fields.append( + _ARGUMENT_BINDING.fields_by_name['name']) +_ARGUMENT_BINDING.fields_by_name['name'].containing_oneof = _ARGUMENT_BINDING.oneofs_by_name['binding'] +_ARGUMENT_BINDING.oneofs_by_name['binding'].fields.append( + _ARGUMENT_BINDING.fields_by_name['value']) +_ARGUMENT_BINDING.fields_by_name['value'].containing_oneof = _ARGUMENT_BINDING.oneofs_by_name['binding'] +_ARGUMENT.fields_by_name['arguments'].message_type = _ARGUMENT_BINDING +_OPERATION_INPUTSENTRY.fields_by_name['value'].message_type = _ARGUMENT +_OPERATION_INPUTSENTRY.containing_type = _OPERATION +_OPERATION_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VALUE +_OPERATION_ATTRIBUTESENTRY.containing_type = _OPERATION +_OPERATION.fields_by_name['inputs'].message_type = _OPERATION_INPUTSENTRY +_OPERATION.fields_by_name['outputs'].message_type = _NAMEDVALUETYPE +_OPERATION.fields_by_name['blocks'].message_type = _BLOCK +_OPERATION.fields_by_name['attributes'].message_type = _OPERATION_ATTRIBUTESENTRY +_NAMEDVALUETYPE.fields_by_name['type'].message_type = _VALUETYPE +_VALUETYPE.fields_by_name['tensorType'].message_type = _TENSORTYPE +_VALUETYPE.fields_by_name['listType'].message_type = _LISTTYPE +_VALUETYPE.fields_by_name['tupleType'].message_type = _TUPLETYPE +_VALUETYPE.fields_by_name['dictionaryType'].message_type = _DICTIONARYTYPE +_VALUETYPE.oneofs_by_name['type'].fields.append( + _VALUETYPE.fields_by_name['tensorType']) +_VALUETYPE.fields_by_name['tensorType'].containing_oneof = _VALUETYPE.oneofs_by_name['type'] +_VALUETYPE.oneofs_by_name['type'].fields.append( + _VALUETYPE.fields_by_name['listType']) +_VALUETYPE.fields_by_name['listType'].containing_oneof = _VALUETYPE.oneofs_by_name['type'] +_VALUETYPE.oneofs_by_name['type'].fields.append( + _VALUETYPE.fields_by_name['tupleType']) +_VALUETYPE.fields_by_name['tupleType'].containing_oneof = _VALUETYPE.oneofs_by_name['type'] +_VALUETYPE.oneofs_by_name['type'].fields.append( + _VALUETYPE.fields_by_name['dictionaryType']) +_VALUETYPE.fields_by_name['dictionaryType'].containing_oneof = _VALUETYPE.oneofs_by_name['type'] +_TENSORTYPE_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VALUE +_TENSORTYPE_ATTRIBUTESENTRY.containing_type = _TENSORTYPE +_TENSORTYPE.fields_by_name['dataType'].enum_type = _DATATYPE +_TENSORTYPE.fields_by_name['dimensions'].message_type = _DIMENSION +_TENSORTYPE.fields_by_name['attributes'].message_type = _TENSORTYPE_ATTRIBUTESENTRY +_TUPLETYPE.fields_by_name['types'].message_type = _VALUETYPE +_LISTTYPE.fields_by_name['type'].message_type = _VALUETYPE +_LISTTYPE.fields_by_name['length'].message_type = _DIMENSION +_DICTIONARYTYPE.fields_by_name['keyType'].message_type = _VALUETYPE +_DICTIONARYTYPE.fields_by_name['valueType'].message_type = _VALUETYPE +_DIMENSION_CONSTANTDIMENSION.containing_type = _DIMENSION +_DIMENSION_UNKNOWNDIMENSION.containing_type = _DIMENSION +_DIMENSION.fields_by_name['constant'].message_type = _DIMENSION_CONSTANTDIMENSION +_DIMENSION.fields_by_name['unknown'].message_type = _DIMENSION_UNKNOWNDIMENSION +_DIMENSION.oneofs_by_name['dimension'].fields.append( + _DIMENSION.fields_by_name['constant']) +_DIMENSION.fields_by_name['constant'].containing_oneof = _DIMENSION.oneofs_by_name['dimension'] +_DIMENSION.oneofs_by_name['dimension'].fields.append( + _DIMENSION.fields_by_name['unknown']) +_DIMENSION.fields_by_name['unknown'].containing_oneof = _DIMENSION.oneofs_by_name['dimension'] +_VALUE_IMMEDIATEVALUE.fields_by_name['tensor'].message_type = _TENSORVALUE +_VALUE_IMMEDIATEVALUE.fields_by_name['tuple'].message_type = _TUPLEVALUE +_VALUE_IMMEDIATEVALUE.fields_by_name['list'].message_type = _LISTVALUE +_VALUE_IMMEDIATEVALUE.fields_by_name['dictionary'].message_type = _DICTIONARYVALUE +_VALUE_IMMEDIATEVALUE.containing_type = _VALUE +_VALUE_IMMEDIATEVALUE.oneofs_by_name['value'].fields.append( + _VALUE_IMMEDIATEVALUE.fields_by_name['tensor']) +_VALUE_IMMEDIATEVALUE.fields_by_name['tensor'].containing_oneof = _VALUE_IMMEDIATEVALUE.oneofs_by_name['value'] +_VALUE_IMMEDIATEVALUE.oneofs_by_name['value'].fields.append( + _VALUE_IMMEDIATEVALUE.fields_by_name['tuple']) +_VALUE_IMMEDIATEVALUE.fields_by_name['tuple'].containing_oneof = _VALUE_IMMEDIATEVALUE.oneofs_by_name['value'] +_VALUE_IMMEDIATEVALUE.oneofs_by_name['value'].fields.append( + _VALUE_IMMEDIATEVALUE.fields_by_name['list']) +_VALUE_IMMEDIATEVALUE.fields_by_name['list'].containing_oneof = _VALUE_IMMEDIATEVALUE.oneofs_by_name['value'] +_VALUE_IMMEDIATEVALUE.oneofs_by_name['value'].fields.append( + _VALUE_IMMEDIATEVALUE.fields_by_name['dictionary']) +_VALUE_IMMEDIATEVALUE.fields_by_name['dictionary'].containing_oneof = _VALUE_IMMEDIATEVALUE.oneofs_by_name['value'] +_VALUE_BLOBFILEVALUE.containing_type = _VALUE +_VALUE.fields_by_name['type'].message_type = _VALUETYPE +_VALUE.fields_by_name['immediateValue'].message_type = _VALUE_IMMEDIATEVALUE +_VALUE.fields_by_name['blobFileValue'].message_type = _VALUE_BLOBFILEVALUE +_VALUE.oneofs_by_name['value'].fields.append( + _VALUE.fields_by_name['immediateValue']) +_VALUE.fields_by_name['immediateValue'].containing_oneof = _VALUE.oneofs_by_name['value'] +_VALUE.oneofs_by_name['value'].fields.append( + _VALUE.fields_by_name['blobFileValue']) +_VALUE.fields_by_name['blobFileValue'].containing_oneof = _VALUE.oneofs_by_name['value'] +_TENSORVALUE_REPEATEDFLOATS.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDDOUBLES.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDINTS.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDLONGINTS.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDBOOLS.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDSTRINGS.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDBYTES.containing_type = _TENSORVALUE +_TENSORVALUE.fields_by_name['floats'].message_type = _TENSORVALUE_REPEATEDFLOATS +_TENSORVALUE.fields_by_name['ints'].message_type = _TENSORVALUE_REPEATEDINTS +_TENSORVALUE.fields_by_name['bools'].message_type = _TENSORVALUE_REPEATEDBOOLS +_TENSORVALUE.fields_by_name['strings'].message_type = _TENSORVALUE_REPEATEDSTRINGS +_TENSORVALUE.fields_by_name['longInts'].message_type = _TENSORVALUE_REPEATEDLONGINTS +_TENSORVALUE.fields_by_name['doubles'].message_type = _TENSORVALUE_REPEATEDDOUBLES +_TENSORVALUE.fields_by_name['bytes'].message_type = _TENSORVALUE_REPEATEDBYTES +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['floats']) +_TENSORVALUE.fields_by_name['floats'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['ints']) +_TENSORVALUE.fields_by_name['ints'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['bools']) +_TENSORVALUE.fields_by_name['bools'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['strings']) +_TENSORVALUE.fields_by_name['strings'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['longInts']) +_TENSORVALUE.fields_by_name['longInts'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['doubles']) +_TENSORVALUE.fields_by_name['doubles'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['bytes']) +_TENSORVALUE.fields_by_name['bytes'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TUPLEVALUE.fields_by_name['values'].message_type = _VALUE +_LISTVALUE.fields_by_name['values'].message_type = _VALUE +_DICTIONARYVALUE_KEYVALUEPAIR.fields_by_name['key'].message_type = _VALUE +_DICTIONARYVALUE_KEYVALUEPAIR.fields_by_name['value'].message_type = _VALUE +_DICTIONARYVALUE_KEYVALUEPAIR.containing_type = _DICTIONARYVALUE +_DICTIONARYVALUE.fields_by_name['values'].message_type = _DICTIONARYVALUE_KEYVALUEPAIR +DESCRIPTOR.message_types_by_name['Program'] = _PROGRAM +DESCRIPTOR.message_types_by_name['Function'] = _FUNCTION +DESCRIPTOR.message_types_by_name['Block'] = _BLOCK +DESCRIPTOR.message_types_by_name['Argument'] = _ARGUMENT +DESCRIPTOR.message_types_by_name['Operation'] = _OPERATION +DESCRIPTOR.message_types_by_name['NamedValueType'] = _NAMEDVALUETYPE +DESCRIPTOR.message_types_by_name['ValueType'] = _VALUETYPE +DESCRIPTOR.message_types_by_name['TensorType'] = _TENSORTYPE +DESCRIPTOR.message_types_by_name['TupleType'] = _TUPLETYPE +DESCRIPTOR.message_types_by_name['ListType'] = _LISTTYPE +DESCRIPTOR.message_types_by_name['DictionaryType'] = _DICTIONARYTYPE +DESCRIPTOR.message_types_by_name['Dimension'] = _DIMENSION +DESCRIPTOR.message_types_by_name['Value'] = _VALUE +DESCRIPTOR.message_types_by_name['TensorValue'] = _TENSORVALUE +DESCRIPTOR.message_types_by_name['TupleValue'] = _TUPLEVALUE +DESCRIPTOR.message_types_by_name['ListValue'] = _LISTVALUE +DESCRIPTOR.message_types_by_name['DictionaryValue'] = _DICTIONARYVALUE +DESCRIPTOR.enum_types_by_name['DataType'] = _DATATYPE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Program = _reflection.GeneratedProtocolMessageType('Program', (_message.Message,), dict( + + FunctionsEntry = _reflection.GeneratedProtocolMessageType('FunctionsEntry', (_message.Message,), dict( + DESCRIPTOR = _PROGRAM_FUNCTIONSENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Program.FunctionsEntry) + )) + , + + AttributesEntry = _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), dict( + DESCRIPTOR = _PROGRAM_ATTRIBUTESENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Program.AttributesEntry) + )) + , + DESCRIPTOR = _PROGRAM, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Program) + )) +_sym_db.RegisterMessage(Program) +_sym_db.RegisterMessage(Program.FunctionsEntry) +_sym_db.RegisterMessage(Program.AttributesEntry) + +Function = _reflection.GeneratedProtocolMessageType('Function', (_message.Message,), dict( + + BlockSpecializationsEntry = _reflection.GeneratedProtocolMessageType('BlockSpecializationsEntry', (_message.Message,), dict( + DESCRIPTOR = _FUNCTION_BLOCKSPECIALIZATIONSENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Function.BlockSpecializationsEntry) + )) + , + + AttributesEntry = _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), dict( + DESCRIPTOR = _FUNCTION_ATTRIBUTESENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Function.AttributesEntry) + )) + , + DESCRIPTOR = _FUNCTION, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Function) + )) +_sym_db.RegisterMessage(Function) +_sym_db.RegisterMessage(Function.BlockSpecializationsEntry) +_sym_db.RegisterMessage(Function.AttributesEntry) + +Block = _reflection.GeneratedProtocolMessageType('Block', (_message.Message,), dict( + + AttributesEntry = _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), dict( + DESCRIPTOR = _BLOCK_ATTRIBUTESENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Block.AttributesEntry) + )) + , + DESCRIPTOR = _BLOCK, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Block) + )) +_sym_db.RegisterMessage(Block) +_sym_db.RegisterMessage(Block.AttributesEntry) + +Argument = _reflection.GeneratedProtocolMessageType('Argument', (_message.Message,), dict( + + Binding = _reflection.GeneratedProtocolMessageType('Binding', (_message.Message,), dict( + DESCRIPTOR = _ARGUMENT_BINDING, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Argument.Binding) + )) + , + DESCRIPTOR = _ARGUMENT, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Argument) + )) +_sym_db.RegisterMessage(Argument) +_sym_db.RegisterMessage(Argument.Binding) + +Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), dict( + + InputsEntry = _reflection.GeneratedProtocolMessageType('InputsEntry', (_message.Message,), dict( + DESCRIPTOR = _OPERATION_INPUTSENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Operation.InputsEntry) + )) + , + + AttributesEntry = _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), dict( + DESCRIPTOR = _OPERATION_ATTRIBUTESENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Operation.AttributesEntry) + )) + , + DESCRIPTOR = _OPERATION, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Operation) + )) +_sym_db.RegisterMessage(Operation) +_sym_db.RegisterMessage(Operation.InputsEntry) +_sym_db.RegisterMessage(Operation.AttributesEntry) + +NamedValueType = _reflection.GeneratedProtocolMessageType('NamedValueType', (_message.Message,), dict( + DESCRIPTOR = _NAMEDVALUETYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.NamedValueType) + )) +_sym_db.RegisterMessage(NamedValueType) + +ValueType = _reflection.GeneratedProtocolMessageType('ValueType', (_message.Message,), dict( + DESCRIPTOR = _VALUETYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.ValueType) + )) +_sym_db.RegisterMessage(ValueType) + +TensorType = _reflection.GeneratedProtocolMessageType('TensorType', (_message.Message,), dict( + + AttributesEntry = _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), dict( + DESCRIPTOR = _TENSORTYPE_ATTRIBUTESENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorType.AttributesEntry) + )) + , + DESCRIPTOR = _TENSORTYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorType) + )) +_sym_db.RegisterMessage(TensorType) +_sym_db.RegisterMessage(TensorType.AttributesEntry) + +TupleType = _reflection.GeneratedProtocolMessageType('TupleType', (_message.Message,), dict( + DESCRIPTOR = _TUPLETYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TupleType) + )) +_sym_db.RegisterMessage(TupleType) + +ListType = _reflection.GeneratedProtocolMessageType('ListType', (_message.Message,), dict( + DESCRIPTOR = _LISTTYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.ListType) + )) +_sym_db.RegisterMessage(ListType) + +DictionaryType = _reflection.GeneratedProtocolMessageType('DictionaryType', (_message.Message,), dict( + DESCRIPTOR = _DICTIONARYTYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.DictionaryType) + )) +_sym_db.RegisterMessage(DictionaryType) + +Dimension = _reflection.GeneratedProtocolMessageType('Dimension', (_message.Message,), dict( + + ConstantDimension = _reflection.GeneratedProtocolMessageType('ConstantDimension', (_message.Message,), dict( + DESCRIPTOR = _DIMENSION_CONSTANTDIMENSION, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Dimension.ConstantDimension) + )) + , + + UnknownDimension = _reflection.GeneratedProtocolMessageType('UnknownDimension', (_message.Message,), dict( + DESCRIPTOR = _DIMENSION_UNKNOWNDIMENSION, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Dimension.UnknownDimension) + )) + , + DESCRIPTOR = _DIMENSION, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Dimension) + )) +_sym_db.RegisterMessage(Dimension) +_sym_db.RegisterMessage(Dimension.ConstantDimension) +_sym_db.RegisterMessage(Dimension.UnknownDimension) + +Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), dict( + + ImmediateValue = _reflection.GeneratedProtocolMessageType('ImmediateValue', (_message.Message,), dict( + DESCRIPTOR = _VALUE_IMMEDIATEVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Value.ImmediateValue) + )) + , + + BlobFileValue = _reflection.GeneratedProtocolMessageType('BlobFileValue', (_message.Message,), dict( + DESCRIPTOR = _VALUE_BLOBFILEVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Value.BlobFileValue) + )) + , + DESCRIPTOR = _VALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Value) + )) +_sym_db.RegisterMessage(Value) +_sym_db.RegisterMessage(Value.ImmediateValue) +_sym_db.RegisterMessage(Value.BlobFileValue) + +TensorValue = _reflection.GeneratedProtocolMessageType('TensorValue', (_message.Message,), dict( + + RepeatedFloats = _reflection.GeneratedProtocolMessageType('RepeatedFloats', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDFLOATS, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedFloats) + )) + , + + RepeatedDoubles = _reflection.GeneratedProtocolMessageType('RepeatedDoubles', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDDOUBLES, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles) + )) + , + + RepeatedInts = _reflection.GeneratedProtocolMessageType('RepeatedInts', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDINTS, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedInts) + )) + , + + RepeatedLongInts = _reflection.GeneratedProtocolMessageType('RepeatedLongInts', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDLONGINTS, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts) + )) + , + + RepeatedBools = _reflection.GeneratedProtocolMessageType('RepeatedBools', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDBOOLS, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedBools) + )) + , + + RepeatedStrings = _reflection.GeneratedProtocolMessageType('RepeatedStrings', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDSTRINGS, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedStrings) + )) + , + + RepeatedBytes = _reflection.GeneratedProtocolMessageType('RepeatedBytes', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDBYTES, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedBytes) + )) + , + DESCRIPTOR = _TENSORVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue) + )) +_sym_db.RegisterMessage(TensorValue) +_sym_db.RegisterMessage(TensorValue.RepeatedFloats) +_sym_db.RegisterMessage(TensorValue.RepeatedDoubles) +_sym_db.RegisterMessage(TensorValue.RepeatedInts) +_sym_db.RegisterMessage(TensorValue.RepeatedLongInts) +_sym_db.RegisterMessage(TensorValue.RepeatedBools) +_sym_db.RegisterMessage(TensorValue.RepeatedStrings) +_sym_db.RegisterMessage(TensorValue.RepeatedBytes) + +TupleValue = _reflection.GeneratedProtocolMessageType('TupleValue', (_message.Message,), dict( + DESCRIPTOR = _TUPLEVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TupleValue) + )) +_sym_db.RegisterMessage(TupleValue) + +ListValue = _reflection.GeneratedProtocolMessageType('ListValue', (_message.Message,), dict( + DESCRIPTOR = _LISTVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.ListValue) + )) +_sym_db.RegisterMessage(ListValue) + +DictionaryValue = _reflection.GeneratedProtocolMessageType('DictionaryValue', (_message.Message,), dict( + + KeyValuePair = _reflection.GeneratedProtocolMessageType('KeyValuePair', (_message.Message,), dict( + DESCRIPTOR = _DICTIONARYVALUE_KEYVALUEPAIR, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair) + )) + , + DESCRIPTOR = _DICTIONARYVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.DictionaryValue) + )) +_sym_db.RegisterMessage(DictionaryValue) +_sym_db.RegisterMessage(DictionaryValue.KeyValuePair) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +_PROGRAM_FUNCTIONSENTRY.has_options = True +_PROGRAM_FUNCTIONSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_PROGRAM_ATTRIBUTESENTRY.has_options = True +_PROGRAM_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_FUNCTION_BLOCKSPECIALIZATIONSENTRY.has_options = True +_FUNCTION_BLOCKSPECIALIZATIONSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_FUNCTION_ATTRIBUTESENTRY.has_options = True +_FUNCTION_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_BLOCK_ATTRIBUTESENTRY.has_options = True +_BLOCK_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_OPERATION_INPUTSENTRY.has_options = True +_OPERATION_INPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_OPERATION_ATTRIBUTESENTRY.has_options = True +_OPERATION_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_TENSORTYPE_ATTRIBUTESENTRY.has_options = True +_TENSORTYPE_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_TENSORVALUE_REPEATEDFLOATS.fields_by_name['values'].has_options = True +_TENSORVALUE_REPEATEDFLOATS.fields_by_name['values']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) +_TENSORVALUE_REPEATEDDOUBLES.fields_by_name['values'].has_options = True +_TENSORVALUE_REPEATEDDOUBLES.fields_by_name['values']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) +_TENSORVALUE_REPEATEDINTS.fields_by_name['values'].has_options = True +_TENSORVALUE_REPEATEDINTS.fields_by_name['values']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) +_TENSORVALUE_REPEATEDLONGINTS.fields_by_name['values'].has_options = True +_TENSORVALUE_REPEATEDLONGINTS.fields_by_name['values']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) +_TENSORVALUE_REPEATEDBOOLS.fields_by_name['values'].has_options = True +_TENSORVALUE_REPEATEDBOOLS.fields_by_name['values']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Model_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Model_pb2.py new file mode 100644 index 00000000..86743064 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Model_pb2.py @@ -0,0 +1,1153 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Model.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import VisionFeaturePrint_pb2 as VisionFeaturePrint__pb2 +from . import AudioFeaturePrint_pb2 as AudioFeaturePrint__pb2 +from . import TextClassifier_pb2 as TextClassifier__pb2 +try: + DataStructures__pb2 = TextClassifier__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = TextClassifier__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = TextClassifier__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = TextClassifier__pb2.FeatureTypes_pb2 +from . import WordTagger_pb2 as WordTagger__pb2 +try: + DataStructures__pb2 = WordTagger__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = WordTagger__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = WordTagger__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = WordTagger__pb2.FeatureTypes_pb2 +from . import Gazetteer_pb2 as Gazetteer__pb2 +try: + DataStructures__pb2 = Gazetteer__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Gazetteer__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Gazetteer__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Gazetteer__pb2.FeatureTypes_pb2 +from . import WordEmbedding_pb2 as WordEmbedding__pb2 +try: + DataStructures__pb2 = WordEmbedding__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = WordEmbedding__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = WordEmbedding__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = WordEmbedding__pb2.FeatureTypes_pb2 +from . import ArrayFeatureExtractor_pb2 as ArrayFeatureExtractor__pb2 +from . import BayesianProbitRegressor_pb2 as BayesianProbitRegressor__pb2 +from . import CategoricalMapping_pb2 as CategoricalMapping__pb2 +try: + DataStructures__pb2 = CategoricalMapping__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = CategoricalMapping__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = CategoricalMapping__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = CategoricalMapping__pb2.FeatureTypes_pb2 +from . import CustomModel_pb2 as CustomModel__pb2 +from . import DictVectorizer_pb2 as DictVectorizer__pb2 +try: + DataStructures__pb2 = DictVectorizer__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = DictVectorizer__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = DictVectorizer__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DictVectorizer__pb2.FeatureTypes_pb2 +from . import FeatureTypes_pb2 as FeatureTypes__pb2 +from . import FeatureVectorizer_pb2 as FeatureVectorizer__pb2 +from . import GLMRegressor_pb2 as GLMRegressor__pb2 +from . import GLMClassifier_pb2 as GLMClassifier__pb2 +try: + DataStructures__pb2 = GLMClassifier__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = GLMClassifier__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = GLMClassifier__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = GLMClassifier__pb2.FeatureTypes_pb2 +from . import NearestNeighbors_pb2 as NearestNeighbors__pb2 +try: + DataStructures__pb2 = NearestNeighbors__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = NearestNeighbors__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = NearestNeighbors__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = NearestNeighbors__pb2.FeatureTypes_pb2 +try: + Parameters__pb2 = NearestNeighbors__pb2.Parameters__pb2 +except AttributeError: + Parameters__pb2 = NearestNeighbors__pb2.Parameters_pb2 +try: + DataStructures__pb2 = NearestNeighbors__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = NearestNeighbors__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = NearestNeighbors__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = NearestNeighbors__pb2.FeatureTypes_pb2 +from . import Identity_pb2 as Identity__pb2 +from . import Imputer_pb2 as Imputer__pb2 +try: + DataStructures__pb2 = Imputer__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Imputer__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Imputer__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Imputer__pb2.FeatureTypes_pb2 +from . import MIL_pb2 as MIL__pb2 +from . import NeuralNetwork_pb2 as NeuralNetwork__pb2 +try: + DataStructures__pb2 = NeuralNetwork__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = NeuralNetwork__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = NeuralNetwork__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = NeuralNetwork__pb2.FeatureTypes_pb2 +try: + Parameters__pb2 = NeuralNetwork__pb2.Parameters__pb2 +except AttributeError: + Parameters__pb2 = NeuralNetwork__pb2.Parameters_pb2 +try: + DataStructures__pb2 = NeuralNetwork__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = NeuralNetwork__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = NeuralNetwork__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = NeuralNetwork__pb2.FeatureTypes_pb2 +from . import Normalizer_pb2 as Normalizer__pb2 +from . import OneHotEncoder_pb2 as OneHotEncoder__pb2 +try: + DataStructures__pb2 = OneHotEncoder__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = OneHotEncoder__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = OneHotEncoder__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = OneHotEncoder__pb2.FeatureTypes_pb2 +from . import Scaler_pb2 as Scaler__pb2 +from . import NonMaximumSuppression_pb2 as NonMaximumSuppression__pb2 +try: + DataStructures__pb2 = NonMaximumSuppression__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = NonMaximumSuppression__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = NonMaximumSuppression__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = NonMaximumSuppression__pb2.FeatureTypes_pb2 +from . import SVM_pb2 as SVM__pb2 +try: + DataStructures__pb2 = SVM__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = SVM__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = SVM__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = SVM__pb2.FeatureTypes_pb2 +from . import TreeEnsemble_pb2 as TreeEnsemble__pb2 +try: + DataStructures__pb2 = TreeEnsemble__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = TreeEnsemble__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = TreeEnsemble__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = TreeEnsemble__pb2.FeatureTypes_pb2 +from . import Parameters_pb2 as Parameters__pb2 +try: + DataStructures__pb2 = Parameters__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Parameters__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes_pb2 +from . import ItemSimilarityRecommender_pb2 as ItemSimilarityRecommender__pb2 +try: + DataStructures__pb2 = ItemSimilarityRecommender__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = ItemSimilarityRecommender__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = ItemSimilarityRecommender__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = ItemSimilarityRecommender__pb2.FeatureTypes_pb2 +from . import SoundAnalysisPreprocessing_pb2 as SoundAnalysisPreprocessing__pb2 +from . import LinkedModel_pb2 as LinkedModel__pb2 +try: + Parameters__pb2 = LinkedModel__pb2.Parameters__pb2 +except AttributeError: + Parameters__pb2 = LinkedModel__pb2.Parameters_pb2 +try: + DataStructures__pb2 = LinkedModel__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = LinkedModel__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = LinkedModel__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = LinkedModel__pb2.FeatureTypes_pb2 +from . import ClassConfidenceThresholding_pb2 as ClassConfidenceThresholding__pb2 +try: + DataStructures__pb2 = ClassConfidenceThresholding__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = ClassConfidenceThresholding__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = ClassConfidenceThresholding__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = ClassConfidenceThresholding__pb2.FeatureTypes_pb2 + +from .VisionFeaturePrint_pb2 import * +from .AudioFeaturePrint_pb2 import * +from .TextClassifier_pb2 import * +from .WordTagger_pb2 import * +from .Gazetteer_pb2 import * +from .WordEmbedding_pb2 import * +from .ArrayFeatureExtractor_pb2 import * +from .BayesianProbitRegressor_pb2 import * +from .CategoricalMapping_pb2 import * +from .CustomModel_pb2 import * +from .DictVectorizer_pb2 import * +from .FeatureTypes_pb2 import * +from .FeatureVectorizer_pb2 import * +from .GLMRegressor_pb2 import * +from .GLMClassifier_pb2 import * +from .NearestNeighbors_pb2 import * +from .Identity_pb2 import * +from .Imputer_pb2 import * +from .MIL_pb2 import * +from .NeuralNetwork_pb2 import * +from .Normalizer_pb2 import * +from .OneHotEncoder_pb2 import * +from .Scaler_pb2 import * +from .NonMaximumSuppression_pb2 import * +from .SVM_pb2 import * +from .TreeEnsemble_pb2 import * +from .Parameters_pb2 import * +from .ItemSimilarityRecommender_pb2 import * +from .SoundAnalysisPreprocessing_pb2 import * +from .LinkedModel_pb2 import * +from .ClassConfidenceThresholding_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Model.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x0bModel.proto\x12\x14\x43oreML.Specification\x1a\x18VisionFeaturePrint.proto\x1a\x17\x41udioFeaturePrint.proto\x1a\x14TextClassifier.proto\x1a\x10WordTagger.proto\x1a\x0fGazetteer.proto\x1a\x13WordEmbedding.proto\x1a\x1b\x41rrayFeatureExtractor.proto\x1a\x1d\x42\x61yesianProbitRegressor.proto\x1a\x18\x43\x61tegoricalMapping.proto\x1a\x11\x43ustomModel.proto\x1a\x14\x44ictVectorizer.proto\x1a\x12\x46\x65\x61tureTypes.proto\x1a\x17\x46\x65\x61tureVectorizer.proto\x1a\x12GLMRegressor.proto\x1a\x13GLMClassifier.proto\x1a\x16NearestNeighbors.proto\x1a\x0eIdentity.proto\x1a\rImputer.proto\x1a\tMIL.proto\x1a\x13NeuralNetwork.proto\x1a\x10Normalizer.proto\x1a\x13OneHotEncoder.proto\x1a\x0cScaler.proto\x1a\x1bNonMaximumSuppression.proto\x1a\tSVM.proto\x1a\x12TreeEnsemble.proto\x1a\x10Parameters.proto\x1a\x1fItemSimilarityRecommender.proto\x1a SoundAnalysisPreprocessing.proto\x1a\x11LinkedModel.proto\x1a!ClassConfidenceThresholding.proto\"F\n\x08Pipeline\x12+\n\x06models\x18\x01 \x03(\x0b\x32\x1b.CoreML.Specification.Model\x12\r\n\x05names\x18\x02 \x03(\t\"F\n\x12PipelineClassifier\x12\x30\n\x08pipeline\x18\x01 \x01(\x0b\x32\x1e.CoreML.Specification.Pipeline\"E\n\x11PipelineRegressor\x12\x30\n\x08pipeline\x18\x01 \x01(\x0b\x32\x1e.CoreML.Specification.Pipeline\"m\n\x12\x46\x65\x61tureDescription\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10shortDescription\x18\x02 \x01(\t\x12/\n\x04type\x18\x03 \x01(\x0b\x32!.CoreML.Specification.FeatureType\"\xd6\x01\n\x08Metadata\x12\x18\n\x10shortDescription\x18\x01 \x01(\t\x12\x15\n\rversionString\x18\x02 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x03 \x01(\t\x12\x0f\n\x07license\x18\x04 \x01(\t\x12\x44\n\x0buserDefined\x18\x64 \x03(\x0b\x32/.CoreML.Specification.Metadata.UserDefinedEntry\x1a\x32\n\x10UserDefinedEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xba\x02\n\x10ModelDescription\x12\x37\n\x05input\x18\x01 \x03(\x0b\x32(.CoreML.Specification.FeatureDescription\x12\x38\n\x06output\x18\n \x03(\x0b\x32(.CoreML.Specification.FeatureDescription\x12\x1c\n\x14predictedFeatureName\x18\x0b \x01(\t\x12\"\n\x1apredictedProbabilitiesName\x18\x0c \x01(\t\x12?\n\rtrainingInput\x18\x32 \x03(\x0b\x32(.CoreML.Specification.FeatureDescription\x12\x30\n\x08metadata\x18\x64 \x01(\x0b\x32\x1e.CoreML.Specification.Metadata\"4\n\x0fSerializedModel\x12\x12\n\nidentifier\x18\x01 \x01(\t\x12\r\n\x05model\x18\x02 \x01(\x0c\"\xf1\x15\n\x05Model\x12\x1c\n\x14specificationVersion\x18\x01 \x01(\x05\x12;\n\x0b\x64\x65scription\x18\x02 \x01(\x0b\x32&.CoreML.Specification.ModelDescription\x12\x13\n\x0bisUpdatable\x18\n \x01(\x08\x12G\n\x12pipelineClassifier\x18\xc8\x01 \x01(\x0b\x32(.CoreML.Specification.PipelineClassifierH\x00\x12\x45\n\x11pipelineRegressor\x18\xc9\x01 \x01(\x0b\x32\'.CoreML.Specification.PipelineRegressorH\x00\x12\x33\n\x08pipeline\x18\xca\x01 \x01(\x0b\x32\x1e.CoreML.Specification.PipelineH\x00\x12;\n\x0cglmRegressor\x18\xac\x02 \x01(\x0b\x32\".CoreML.Specification.GLMRegressorH\x00\x12O\n\x16supportVectorRegressor\x18\xad\x02 \x01(\x0b\x32,.CoreML.Specification.SupportVectorRegressorH\x00\x12M\n\x15treeEnsembleRegressor\x18\xae\x02 \x01(\x0b\x32+.CoreML.Specification.TreeEnsembleRegressorH\x00\x12O\n\x16neuralNetworkRegressor\x18\xaf\x02 \x01(\x0b\x32,.CoreML.Specification.NeuralNetworkRegressorH\x00\x12Q\n\x17\x62\x61yesianProbitRegressor\x18\xb0\x02 \x01(\x0b\x32-.CoreML.Specification.BayesianProbitRegressorH\x00\x12=\n\rglmClassifier\x18\x90\x03 \x01(\x0b\x32#.CoreML.Specification.GLMClassifierH\x00\x12Q\n\x17supportVectorClassifier\x18\x91\x03 \x01(\x0b\x32-.CoreML.Specification.SupportVectorClassifierH\x00\x12O\n\x16treeEnsembleClassifier\x18\x92\x03 \x01(\x0b\x32,.CoreML.Specification.TreeEnsembleClassifierH\x00\x12Q\n\x17neuralNetworkClassifier\x18\x93\x03 \x01(\x0b\x32-.CoreML.Specification.NeuralNetworkClassifierH\x00\x12Y\n\x1bkNearestNeighborsClassifier\x18\x94\x03 \x01(\x0b\x32\x31.CoreML.Specification.KNearestNeighborsClassifierH\x00\x12=\n\rneuralNetwork\x18\xf4\x03 \x01(\x0b\x32#.CoreML.Specification.NeuralNetworkH\x00\x12U\n\x19itemSimilarityRecommender\x18\xf5\x03 \x01(\x0b\x32/.CoreML.Specification.ItemSimilarityRecommenderH\x00\x12;\n\tmlProgram\x18\xf6\x03 \x01(\x0b\x32%.CoreML.Specification.MILSpec.ProgramH\x00\x12\x39\n\x0b\x63ustomModel\x18\xab\x04 \x01(\x0b\x32!.CoreML.Specification.CustomModelH\x00\x12\x39\n\x0blinkedModel\x18\xac\x04 \x01(\x0b\x32!.CoreML.Specification.LinkedModelH\x00\x12Y\n\x1b\x63lassConfidenceThresholding\x18\xb0\x04 \x01(\x0b\x32\x31.CoreML.Specification.ClassConfidenceThresholdingH\x00\x12=\n\roneHotEncoder\x18\xd8\x04 \x01(\x0b\x32#.CoreML.Specification.OneHotEncoderH\x00\x12\x31\n\x07imputer\x18\xd9\x04 \x01(\x0b\x32\x1d.CoreML.Specification.ImputerH\x00\x12\x45\n\x11\x66\x65\x61tureVectorizer\x18\xda\x04 \x01(\x0b\x32\'.CoreML.Specification.FeatureVectorizerH\x00\x12?\n\x0e\x64ictVectorizer\x18\xdb\x04 \x01(\x0b\x32$.CoreML.Specification.DictVectorizerH\x00\x12/\n\x06scaler\x18\xdc\x04 \x01(\x0b\x32\x1c.CoreML.Specification.ScalerH\x00\x12G\n\x12\x63\x61tegoricalMapping\x18\xde\x04 \x01(\x0b\x32(.CoreML.Specification.CategoricalMappingH\x00\x12\x37\n\nnormalizer\x18\xdf\x04 \x01(\x0b\x32 .CoreML.Specification.NormalizerH\x00\x12M\n\x15\x61rrayFeatureExtractor\x18\xe1\x04 \x01(\x0b\x32+.CoreML.Specification.ArrayFeatureExtractorH\x00\x12M\n\x15nonMaximumSuppression\x18\xe2\x04 \x01(\x0b\x32+.CoreML.Specification.NonMaximumSuppressionH\x00\x12\x33\n\x08identity\x18\x84\x07 \x01(\x0b\x32\x1e.CoreML.Specification.IdentityH\x00\x12L\n\x0etextClassifier\x18\xd0\x0f \x01(\x0b\x32\x31.CoreML.Specification.CoreMLModels.TextClassifierH\x00\x12\x44\n\nwordTagger\x18\xd1\x0f \x01(\x0b\x32-.CoreML.Specification.CoreMLModels.WordTaggerH\x00\x12T\n\x12visionFeaturePrint\x18\xd2\x0f \x01(\x0b\x32\x35.CoreML.Specification.CoreMLModels.VisionFeaturePrintH\x00\x12\x64\n\x1asoundAnalysisPreprocessing\x18\xd3\x0f \x01(\x0b\x32=.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessingH\x00\x12\x42\n\tgazetteer\x18\xd4\x0f \x01(\x0b\x32,.CoreML.Specification.CoreMLModels.GazetteerH\x00\x12J\n\rwordEmbedding\x18\xd5\x0f \x01(\x0b\x32\x30.CoreML.Specification.CoreMLModels.WordEmbeddingH\x00\x12R\n\x11\x61udioFeaturePrint\x18\xd6\x0f \x01(\x0b\x32\x34.CoreML.Specification.CoreMLModels.AudioFeaturePrintH\x00\x12\x41\n\x0fserializedModel\x18\xb8\x17 \x01(\x0b\x32%.CoreML.Specification.SerializedModelH\x00\x42\x06\n\x04TypeB\x02H\x03P\x00P\x01P\x02P\x03P\x04P\x05P\x06P\x07P\x08P\tP\nP\x0bP\x0cP\rP\x0eP\x0fP\x10P\x11P\x12P\x13P\x14P\x15P\x16P\x17P\x18P\x19P\x1aP\x1bP\x1cP\x1dP\x1e\x62\x06proto3') + , + dependencies=[VisionFeaturePrint__pb2.DESCRIPTOR,AudioFeaturePrint__pb2.DESCRIPTOR,TextClassifier__pb2.DESCRIPTOR,WordTagger__pb2.DESCRIPTOR,Gazetteer__pb2.DESCRIPTOR,WordEmbedding__pb2.DESCRIPTOR,ArrayFeatureExtractor__pb2.DESCRIPTOR,BayesianProbitRegressor__pb2.DESCRIPTOR,CategoricalMapping__pb2.DESCRIPTOR,CustomModel__pb2.DESCRIPTOR,DictVectorizer__pb2.DESCRIPTOR,FeatureTypes__pb2.DESCRIPTOR,FeatureVectorizer__pb2.DESCRIPTOR,GLMRegressor__pb2.DESCRIPTOR,GLMClassifier__pb2.DESCRIPTOR,NearestNeighbors__pb2.DESCRIPTOR,Identity__pb2.DESCRIPTOR,Imputer__pb2.DESCRIPTOR,MIL__pb2.DESCRIPTOR,NeuralNetwork__pb2.DESCRIPTOR,Normalizer__pb2.DESCRIPTOR,OneHotEncoder__pb2.DESCRIPTOR,Scaler__pb2.DESCRIPTOR,NonMaximumSuppression__pb2.DESCRIPTOR,SVM__pb2.DESCRIPTOR,TreeEnsemble__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,ItemSimilarityRecommender__pb2.DESCRIPTOR,SoundAnalysisPreprocessing__pb2.DESCRIPTOR,LinkedModel__pb2.DESCRIPTOR,ClassConfidenceThresholding__pb2.DESCRIPTOR,], + public_dependencies=[VisionFeaturePrint__pb2.DESCRIPTOR,AudioFeaturePrint__pb2.DESCRIPTOR,TextClassifier__pb2.DESCRIPTOR,WordTagger__pb2.DESCRIPTOR,Gazetteer__pb2.DESCRIPTOR,WordEmbedding__pb2.DESCRIPTOR,ArrayFeatureExtractor__pb2.DESCRIPTOR,BayesianProbitRegressor__pb2.DESCRIPTOR,CategoricalMapping__pb2.DESCRIPTOR,CustomModel__pb2.DESCRIPTOR,DictVectorizer__pb2.DESCRIPTOR,FeatureTypes__pb2.DESCRIPTOR,FeatureVectorizer__pb2.DESCRIPTOR,GLMRegressor__pb2.DESCRIPTOR,GLMClassifier__pb2.DESCRIPTOR,NearestNeighbors__pb2.DESCRIPTOR,Identity__pb2.DESCRIPTOR,Imputer__pb2.DESCRIPTOR,MIL__pb2.DESCRIPTOR,NeuralNetwork__pb2.DESCRIPTOR,Normalizer__pb2.DESCRIPTOR,OneHotEncoder__pb2.DESCRIPTOR,Scaler__pb2.DESCRIPTOR,NonMaximumSuppression__pb2.DESCRIPTOR,SVM__pb2.DESCRIPTOR,TreeEnsemble__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,ItemSimilarityRecommender__pb2.DESCRIPTOR,SoundAnalysisPreprocessing__pb2.DESCRIPTOR,LinkedModel__pb2.DESCRIPTOR,ClassConfidenceThresholding__pb2.DESCRIPTOR,]) + + + + +_PIPELINE = _descriptor.Descriptor( + name='Pipeline', + full_name='CoreML.Specification.Pipeline', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='models', full_name='CoreML.Specification.Pipeline.models', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='names', full_name='CoreML.Specification.Pipeline.names', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=718, + serialized_end=788, +) + + +_PIPELINECLASSIFIER = _descriptor.Descriptor( + name='PipelineClassifier', + full_name='CoreML.Specification.PipelineClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='pipeline', full_name='CoreML.Specification.PipelineClassifier.pipeline', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=790, + serialized_end=860, +) + + +_PIPELINEREGRESSOR = _descriptor.Descriptor( + name='PipelineRegressor', + full_name='CoreML.Specification.PipelineRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='pipeline', full_name='CoreML.Specification.PipelineRegressor.pipeline', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=862, + serialized_end=931, +) + + +_FEATUREDESCRIPTION = _descriptor.Descriptor( + name='FeatureDescription', + full_name='CoreML.Specification.FeatureDescription', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.FeatureDescription.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shortDescription', full_name='CoreML.Specification.FeatureDescription.shortDescription', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.FeatureDescription.type', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=933, + serialized_end=1042, +) + + +_METADATA_USERDEFINEDENTRY = _descriptor.Descriptor( + name='UserDefinedEntry', + full_name='CoreML.Specification.Metadata.UserDefinedEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.Metadata.UserDefinedEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.Metadata.UserDefinedEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1209, + serialized_end=1259, +) + +_METADATA = _descriptor.Descriptor( + name='Metadata', + full_name='CoreML.Specification.Metadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shortDescription', full_name='CoreML.Specification.Metadata.shortDescription', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='versionString', full_name='CoreML.Specification.Metadata.versionString', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='author', full_name='CoreML.Specification.Metadata.author', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='license', full_name='CoreML.Specification.Metadata.license', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='userDefined', full_name='CoreML.Specification.Metadata.userDefined', index=4, + number=100, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_METADATA_USERDEFINEDENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1045, + serialized_end=1259, +) + + +_MODELDESCRIPTION = _descriptor.Descriptor( + name='ModelDescription', + full_name='CoreML.Specification.ModelDescription', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='input', full_name='CoreML.Specification.ModelDescription.input', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='output', full_name='CoreML.Specification.ModelDescription.output', index=1, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='predictedFeatureName', full_name='CoreML.Specification.ModelDescription.predictedFeatureName', index=2, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='predictedProbabilitiesName', full_name='CoreML.Specification.ModelDescription.predictedProbabilitiesName', index=3, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='trainingInput', full_name='CoreML.Specification.ModelDescription.trainingInput', index=4, + number=50, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='metadata', full_name='CoreML.Specification.ModelDescription.metadata', index=5, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1262, + serialized_end=1576, +) + + +_SERIALIZEDMODEL = _descriptor.Descriptor( + name='SerializedModel', + full_name='CoreML.Specification.SerializedModel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='identifier', full_name='CoreML.Specification.SerializedModel.identifier', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='model', full_name='CoreML.Specification.SerializedModel.model', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1578, + serialized_end=1630, +) + + +_MODEL = _descriptor.Descriptor( + name='Model', + full_name='CoreML.Specification.Model', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='specificationVersion', full_name='CoreML.Specification.Model.specificationVersion', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='description', full_name='CoreML.Specification.Model.description', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isUpdatable', full_name='CoreML.Specification.Model.isUpdatable', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pipelineClassifier', full_name='CoreML.Specification.Model.pipelineClassifier', index=3, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pipelineRegressor', full_name='CoreML.Specification.Model.pipelineRegressor', index=4, + number=201, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pipeline', full_name='CoreML.Specification.Model.pipeline', index=5, + number=202, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='glmRegressor', full_name='CoreML.Specification.Model.glmRegressor', index=6, + number=300, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='supportVectorRegressor', full_name='CoreML.Specification.Model.supportVectorRegressor', index=7, + number=301, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='treeEnsembleRegressor', full_name='CoreML.Specification.Model.treeEnsembleRegressor', index=8, + number=302, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='neuralNetworkRegressor', full_name='CoreML.Specification.Model.neuralNetworkRegressor', index=9, + number=303, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bayesianProbitRegressor', full_name='CoreML.Specification.Model.bayesianProbitRegressor', index=10, + number=304, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='glmClassifier', full_name='CoreML.Specification.Model.glmClassifier', index=11, + number=400, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='supportVectorClassifier', full_name='CoreML.Specification.Model.supportVectorClassifier', index=12, + number=401, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='treeEnsembleClassifier', full_name='CoreML.Specification.Model.treeEnsembleClassifier', index=13, + number=402, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='neuralNetworkClassifier', full_name='CoreML.Specification.Model.neuralNetworkClassifier', index=14, + number=403, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kNearestNeighborsClassifier', full_name='CoreML.Specification.Model.kNearestNeighborsClassifier', index=15, + number=404, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='neuralNetwork', full_name='CoreML.Specification.Model.neuralNetwork', index=16, + number=500, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemSimilarityRecommender', full_name='CoreML.Specification.Model.itemSimilarityRecommender', index=17, + number=501, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mlProgram', full_name='CoreML.Specification.Model.mlProgram', index=18, + number=502, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customModel', full_name='CoreML.Specification.Model.customModel', index=19, + number=555, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linkedModel', full_name='CoreML.Specification.Model.linkedModel', index=20, + number=556, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='classConfidenceThresholding', full_name='CoreML.Specification.Model.classConfidenceThresholding', index=21, + number=560, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='oneHotEncoder', full_name='CoreML.Specification.Model.oneHotEncoder', index=22, + number=600, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputer', full_name='CoreML.Specification.Model.imputer', index=23, + number=601, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='featureVectorizer', full_name='CoreML.Specification.Model.featureVectorizer', index=24, + number=602, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dictVectorizer', full_name='CoreML.Specification.Model.dictVectorizer', index=25, + number=603, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scaler', full_name='CoreML.Specification.Model.scaler', index=26, + number=604, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='categoricalMapping', full_name='CoreML.Specification.Model.categoricalMapping', index=27, + number=606, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='normalizer', full_name='CoreML.Specification.Model.normalizer', index=28, + number=607, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='arrayFeatureExtractor', full_name='CoreML.Specification.Model.arrayFeatureExtractor', index=29, + number=609, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='nonMaximumSuppression', full_name='CoreML.Specification.Model.nonMaximumSuppression', index=30, + number=610, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='identity', full_name='CoreML.Specification.Model.identity', index=31, + number=900, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='textClassifier', full_name='CoreML.Specification.Model.textClassifier', index=32, + number=2000, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='wordTagger', full_name='CoreML.Specification.Model.wordTagger', index=33, + number=2001, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='visionFeaturePrint', full_name='CoreML.Specification.Model.visionFeaturePrint', index=34, + number=2002, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='soundAnalysisPreprocessing', full_name='CoreML.Specification.Model.soundAnalysisPreprocessing', index=35, + number=2003, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gazetteer', full_name='CoreML.Specification.Model.gazetteer', index=36, + number=2004, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='wordEmbedding', full_name='CoreML.Specification.Model.wordEmbedding', index=37, + number=2005, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='audioFeaturePrint', full_name='CoreML.Specification.Model.audioFeaturePrint', index=38, + number=2006, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='serializedModel', full_name='CoreML.Specification.Model.serializedModel', index=39, + number=3000, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Type', full_name='CoreML.Specification.Model.Type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1633, + serialized_end=4434, +) + +_PIPELINE.fields_by_name['models'].message_type = _MODEL +_PIPELINECLASSIFIER.fields_by_name['pipeline'].message_type = _PIPELINE +_PIPELINEREGRESSOR.fields_by_name['pipeline'].message_type = _PIPELINE +_FEATUREDESCRIPTION.fields_by_name['type'].message_type = FeatureTypes__pb2._FEATURETYPE +_METADATA_USERDEFINEDENTRY.containing_type = _METADATA +_METADATA.fields_by_name['userDefined'].message_type = _METADATA_USERDEFINEDENTRY +_MODELDESCRIPTION.fields_by_name['input'].message_type = _FEATUREDESCRIPTION +_MODELDESCRIPTION.fields_by_name['output'].message_type = _FEATUREDESCRIPTION +_MODELDESCRIPTION.fields_by_name['trainingInput'].message_type = _FEATUREDESCRIPTION +_MODELDESCRIPTION.fields_by_name['metadata'].message_type = _METADATA +_MODEL.fields_by_name['description'].message_type = _MODELDESCRIPTION +_MODEL.fields_by_name['pipelineClassifier'].message_type = _PIPELINECLASSIFIER +_MODEL.fields_by_name['pipelineRegressor'].message_type = _PIPELINEREGRESSOR +_MODEL.fields_by_name['pipeline'].message_type = _PIPELINE +_MODEL.fields_by_name['glmRegressor'].message_type = GLMRegressor__pb2._GLMREGRESSOR +_MODEL.fields_by_name['supportVectorRegressor'].message_type = SVM__pb2._SUPPORTVECTORREGRESSOR +_MODEL.fields_by_name['treeEnsembleRegressor'].message_type = TreeEnsemble__pb2._TREEENSEMBLEREGRESSOR +_MODEL.fields_by_name['neuralNetworkRegressor'].message_type = NeuralNetwork__pb2._NEURALNETWORKREGRESSOR +_MODEL.fields_by_name['bayesianProbitRegressor'].message_type = BayesianProbitRegressor__pb2._BAYESIANPROBITREGRESSOR +_MODEL.fields_by_name['glmClassifier'].message_type = GLMClassifier__pb2._GLMCLASSIFIER +_MODEL.fields_by_name['supportVectorClassifier'].message_type = SVM__pb2._SUPPORTVECTORCLASSIFIER +_MODEL.fields_by_name['treeEnsembleClassifier'].message_type = TreeEnsemble__pb2._TREEENSEMBLECLASSIFIER +_MODEL.fields_by_name['neuralNetworkClassifier'].message_type = NeuralNetwork__pb2._NEURALNETWORKCLASSIFIER +_MODEL.fields_by_name['kNearestNeighborsClassifier'].message_type = NearestNeighbors__pb2._KNEARESTNEIGHBORSCLASSIFIER +_MODEL.fields_by_name['neuralNetwork'].message_type = NeuralNetwork__pb2._NEURALNETWORK +_MODEL.fields_by_name['itemSimilarityRecommender'].message_type = ItemSimilarityRecommender__pb2._ITEMSIMILARITYRECOMMENDER +_MODEL.fields_by_name['mlProgram'].message_type = MIL__pb2._PROGRAM +_MODEL.fields_by_name['customModel'].message_type = CustomModel__pb2._CUSTOMMODEL +_MODEL.fields_by_name['linkedModel'].message_type = LinkedModel__pb2._LINKEDMODEL +_MODEL.fields_by_name['classConfidenceThresholding'].message_type = ClassConfidenceThresholding__pb2._CLASSCONFIDENCETHRESHOLDING +_MODEL.fields_by_name['oneHotEncoder'].message_type = OneHotEncoder__pb2._ONEHOTENCODER +_MODEL.fields_by_name['imputer'].message_type = Imputer__pb2._IMPUTER +_MODEL.fields_by_name['featureVectorizer'].message_type = FeatureVectorizer__pb2._FEATUREVECTORIZER +_MODEL.fields_by_name['dictVectorizer'].message_type = DictVectorizer__pb2._DICTVECTORIZER +_MODEL.fields_by_name['scaler'].message_type = Scaler__pb2._SCALER +_MODEL.fields_by_name['categoricalMapping'].message_type = CategoricalMapping__pb2._CATEGORICALMAPPING +_MODEL.fields_by_name['normalizer'].message_type = Normalizer__pb2._NORMALIZER +_MODEL.fields_by_name['arrayFeatureExtractor'].message_type = ArrayFeatureExtractor__pb2._ARRAYFEATUREEXTRACTOR +_MODEL.fields_by_name['nonMaximumSuppression'].message_type = NonMaximumSuppression__pb2._NONMAXIMUMSUPPRESSION +_MODEL.fields_by_name['identity'].message_type = Identity__pb2._IDENTITY +_MODEL.fields_by_name['textClassifier'].message_type = TextClassifier__pb2._TEXTCLASSIFIER +_MODEL.fields_by_name['wordTagger'].message_type = WordTagger__pb2._WORDTAGGER +_MODEL.fields_by_name['visionFeaturePrint'].message_type = VisionFeaturePrint__pb2._VISIONFEATUREPRINT +_MODEL.fields_by_name['soundAnalysisPreprocessing'].message_type = SoundAnalysisPreprocessing__pb2._SOUNDANALYSISPREPROCESSING +_MODEL.fields_by_name['gazetteer'].message_type = Gazetteer__pb2._GAZETTEER +_MODEL.fields_by_name['wordEmbedding'].message_type = WordEmbedding__pb2._WORDEMBEDDING +_MODEL.fields_by_name['audioFeaturePrint'].message_type = AudioFeaturePrint__pb2._AUDIOFEATUREPRINT +_MODEL.fields_by_name['serializedModel'].message_type = _SERIALIZEDMODEL +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['pipelineClassifier']) +_MODEL.fields_by_name['pipelineClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['pipelineRegressor']) +_MODEL.fields_by_name['pipelineRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['pipeline']) +_MODEL.fields_by_name['pipeline'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['glmRegressor']) +_MODEL.fields_by_name['glmRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['supportVectorRegressor']) +_MODEL.fields_by_name['supportVectorRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['treeEnsembleRegressor']) +_MODEL.fields_by_name['treeEnsembleRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['neuralNetworkRegressor']) +_MODEL.fields_by_name['neuralNetworkRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['bayesianProbitRegressor']) +_MODEL.fields_by_name['bayesianProbitRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['glmClassifier']) +_MODEL.fields_by_name['glmClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['supportVectorClassifier']) +_MODEL.fields_by_name['supportVectorClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['treeEnsembleClassifier']) +_MODEL.fields_by_name['treeEnsembleClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['neuralNetworkClassifier']) +_MODEL.fields_by_name['neuralNetworkClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['kNearestNeighborsClassifier']) +_MODEL.fields_by_name['kNearestNeighborsClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['neuralNetwork']) +_MODEL.fields_by_name['neuralNetwork'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['itemSimilarityRecommender']) +_MODEL.fields_by_name['itemSimilarityRecommender'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['mlProgram']) +_MODEL.fields_by_name['mlProgram'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['customModel']) +_MODEL.fields_by_name['customModel'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['linkedModel']) +_MODEL.fields_by_name['linkedModel'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['classConfidenceThresholding']) +_MODEL.fields_by_name['classConfidenceThresholding'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['oneHotEncoder']) +_MODEL.fields_by_name['oneHotEncoder'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['imputer']) +_MODEL.fields_by_name['imputer'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['featureVectorizer']) +_MODEL.fields_by_name['featureVectorizer'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['dictVectorizer']) +_MODEL.fields_by_name['dictVectorizer'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['scaler']) +_MODEL.fields_by_name['scaler'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['categoricalMapping']) +_MODEL.fields_by_name['categoricalMapping'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['normalizer']) +_MODEL.fields_by_name['normalizer'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['arrayFeatureExtractor']) +_MODEL.fields_by_name['arrayFeatureExtractor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['nonMaximumSuppression']) +_MODEL.fields_by_name['nonMaximumSuppression'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['identity']) +_MODEL.fields_by_name['identity'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['textClassifier']) +_MODEL.fields_by_name['textClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['wordTagger']) +_MODEL.fields_by_name['wordTagger'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['visionFeaturePrint']) +_MODEL.fields_by_name['visionFeaturePrint'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['soundAnalysisPreprocessing']) +_MODEL.fields_by_name['soundAnalysisPreprocessing'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['gazetteer']) +_MODEL.fields_by_name['gazetteer'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['wordEmbedding']) +_MODEL.fields_by_name['wordEmbedding'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['audioFeaturePrint']) +_MODEL.fields_by_name['audioFeaturePrint'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['serializedModel']) +_MODEL.fields_by_name['serializedModel'].containing_oneof = _MODEL.oneofs_by_name['Type'] +DESCRIPTOR.message_types_by_name['Pipeline'] = _PIPELINE +DESCRIPTOR.message_types_by_name['PipelineClassifier'] = _PIPELINECLASSIFIER +DESCRIPTOR.message_types_by_name['PipelineRegressor'] = _PIPELINEREGRESSOR +DESCRIPTOR.message_types_by_name['FeatureDescription'] = _FEATUREDESCRIPTION +DESCRIPTOR.message_types_by_name['Metadata'] = _METADATA +DESCRIPTOR.message_types_by_name['ModelDescription'] = _MODELDESCRIPTION +DESCRIPTOR.message_types_by_name['SerializedModel'] = _SERIALIZEDMODEL +DESCRIPTOR.message_types_by_name['Model'] = _MODEL +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Pipeline = _reflection.GeneratedProtocolMessageType('Pipeline', (_message.Message,), dict( + DESCRIPTOR = _PIPELINE, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Pipeline) + )) +_sym_db.RegisterMessage(Pipeline) + +PipelineClassifier = _reflection.GeneratedProtocolMessageType('PipelineClassifier', (_message.Message,), dict( + DESCRIPTOR = _PIPELINECLASSIFIER, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PipelineClassifier) + )) +_sym_db.RegisterMessage(PipelineClassifier) + +PipelineRegressor = _reflection.GeneratedProtocolMessageType('PipelineRegressor', (_message.Message,), dict( + DESCRIPTOR = _PIPELINEREGRESSOR, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PipelineRegressor) + )) +_sym_db.RegisterMessage(PipelineRegressor) + +FeatureDescription = _reflection.GeneratedProtocolMessageType('FeatureDescription', (_message.Message,), dict( + DESCRIPTOR = _FEATUREDESCRIPTION, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FeatureDescription) + )) +_sym_db.RegisterMessage(FeatureDescription) + +Metadata = _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), dict( + + UserDefinedEntry = _reflection.GeneratedProtocolMessageType('UserDefinedEntry', (_message.Message,), dict( + DESCRIPTOR = _METADATA_USERDEFINEDENTRY, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Metadata.UserDefinedEntry) + )) + , + DESCRIPTOR = _METADATA, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Metadata) + )) +_sym_db.RegisterMessage(Metadata) +_sym_db.RegisterMessage(Metadata.UserDefinedEntry) + +ModelDescription = _reflection.GeneratedProtocolMessageType('ModelDescription', (_message.Message,), dict( + DESCRIPTOR = _MODELDESCRIPTION, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ModelDescription) + )) +_sym_db.RegisterMessage(ModelDescription) + +SerializedModel = _reflection.GeneratedProtocolMessageType('SerializedModel', (_message.Message,), dict( + DESCRIPTOR = _SERIALIZEDMODEL, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SerializedModel) + )) +_sym_db.RegisterMessage(SerializedModel) + +Model = _reflection.GeneratedProtocolMessageType('Model', (_message.Message,), dict( + DESCRIPTOR = _MODEL, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Model) + )) +_sym_db.RegisterMessage(Model) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +_METADATA_USERDEFINEDENTRY.has_options = True +_METADATA_USERDEFINEDENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/NamedParameters_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/NamedParameters_pb2.py new file mode 100644 index 00000000..3d47504d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/NamedParameters_pb2.py @@ -0,0 +1,393 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: NamedParameters.proto + +import sys + +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pb2 +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='NamedParameters.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x15NamedParameters.proto\x12\x14\x43oreML.Specification\"0\n\nInt32Range\x12\x10\n\x08minValue\x18\x01 \x01(\x05\x12\x10\n\x08maxValue\x18\x02 \x01(\x05\"\x1a\n\x08Int32Set\x12\x0e\n\x06values\x18\x01 \x03(\x05\"0\n\nFloatRange\x12\x10\n\x08minValue\x18\x01 \x01(\x02\x12\x10\n\x08maxValue\x18\x02 \x01(\x02\"\x99\x01\n\x0eInt32Parameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\x05\x12\x31\n\x05range\x18\n \x01(\x0b\x32 .CoreML.Specification.Int32RangeH\x00\x12-\n\x03set\x18\x0b \x01(\x0b\x32\x1e.CoreML.Specification.Int32SetH\x00\x42\x0f\n\rAllowedValues\"j\n\x0e\x46loatParameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\x02\x12\x31\n\x05range\x18\n \x01(\x0b\x32 .CoreML.Specification.FloatRangeH\x00\x42\x0f\n\rAllowedValues\"\x93\x01\n\tParameter\x12>\n\x0eint32Parameter\x18\x01 \x01(\x0b\x32$.CoreML.Specification.Int32ParameterH\x00\x12>\n\x0e\x66loatParameter\x18\x02 \x01(\x0b\x32$.CoreML.Specification.FloatParameterH\x00\x42\x06\n\x04Type\"l\n\x0eNamedParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10shortDescription\x18\x02 \x01(\t\x12\x32\n\tparameter\x18\x03 \x01(\x0b\x32\x1f.CoreML.Specification.ParameterB\x02H\x03\x62\x06proto3') +) + + + + +_INT32RANGE = _descriptor.Descriptor( + name='Int32Range', + full_name='CoreML.Specification.Int32Range', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='minValue', full_name='CoreML.Specification.Int32Range.minValue', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxValue', full_name='CoreML.Specification.Int32Range.maxValue', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=47, + serialized_end=95, +) + + +_INT32SET = _descriptor.Descriptor( + name='Int32Set', + full_name='CoreML.Specification.Int32Set', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.Int32Set.values', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=97, + serialized_end=123, +) + + +_FLOATRANGE = _descriptor.Descriptor( + name='FloatRange', + full_name='CoreML.Specification.FloatRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='minValue', full_name='CoreML.Specification.FloatRange.minValue', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxValue', full_name='CoreML.Specification.FloatRange.maxValue', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=125, + serialized_end=173, +) + + +_INT32PARAMETER = _descriptor.Descriptor( + name='Int32Parameter', + full_name='CoreML.Specification.Int32Parameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.Int32Parameter.defaultValue', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range', full_name='CoreML.Specification.Int32Parameter.range', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='set', full_name='CoreML.Specification.Int32Parameter.set', index=2, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='AllowedValues', full_name='CoreML.Specification.Int32Parameter.AllowedValues', + index=0, containing_type=None, fields=[]), + ], + serialized_start=176, + serialized_end=329, +) + + +_FLOATPARAMETER = _descriptor.Descriptor( + name='FloatParameter', + full_name='CoreML.Specification.FloatParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.FloatParameter.defaultValue', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range', full_name='CoreML.Specification.FloatParameter.range', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='AllowedValues', full_name='CoreML.Specification.FloatParameter.AllowedValues', + index=0, containing_type=None, fields=[]), + ], + serialized_start=331, + serialized_end=437, +) + + +_PARAMETER = _descriptor.Descriptor( + name='Parameter', + full_name='CoreML.Specification.Parameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='int32Parameter', full_name='CoreML.Specification.Parameter.int32Parameter', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='floatParameter', full_name='CoreML.Specification.Parameter.floatParameter', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Type', full_name='CoreML.Specification.Parameter.Type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=440, + serialized_end=587, +) + + +_NAMEDPARAMETER = _descriptor.Descriptor( + name='NamedParameter', + full_name='CoreML.Specification.NamedParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.NamedParameter.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shortDescription', full_name='CoreML.Specification.NamedParameter.shortDescription', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='parameter', full_name='CoreML.Specification.NamedParameter.parameter', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=589, + serialized_end=697, +) + +_INT32PARAMETER.fields_by_name['range'].message_type = _INT32RANGE +_INT32PARAMETER.fields_by_name['set'].message_type = _INT32SET +_INT32PARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _INT32PARAMETER.fields_by_name['range']) +_INT32PARAMETER.fields_by_name['range'].containing_oneof = _INT32PARAMETER.oneofs_by_name['AllowedValues'] +_INT32PARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _INT32PARAMETER.fields_by_name['set']) +_INT32PARAMETER.fields_by_name['set'].containing_oneof = _INT32PARAMETER.oneofs_by_name['AllowedValues'] +_FLOATPARAMETER.fields_by_name['range'].message_type = _FLOATRANGE +_FLOATPARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _FLOATPARAMETER.fields_by_name['range']) +_FLOATPARAMETER.fields_by_name['range'].containing_oneof = _FLOATPARAMETER.oneofs_by_name['AllowedValues'] +_PARAMETER.fields_by_name['int32Parameter'].message_type = _INT32PARAMETER +_PARAMETER.fields_by_name['floatParameter'].message_type = _FLOATPARAMETER +_PARAMETER.oneofs_by_name['Type'].fields.append( + _PARAMETER.fields_by_name['int32Parameter']) +_PARAMETER.fields_by_name['int32Parameter'].containing_oneof = _PARAMETER.oneofs_by_name['Type'] +_PARAMETER.oneofs_by_name['Type'].fields.append( + _PARAMETER.fields_by_name['floatParameter']) +_PARAMETER.fields_by_name['floatParameter'].containing_oneof = _PARAMETER.oneofs_by_name['Type'] +_NAMEDPARAMETER.fields_by_name['parameter'].message_type = _PARAMETER +DESCRIPTOR.message_types_by_name['Int32Range'] = _INT32RANGE +DESCRIPTOR.message_types_by_name['Int32Set'] = _INT32SET +DESCRIPTOR.message_types_by_name['FloatRange'] = _FLOATRANGE +DESCRIPTOR.message_types_by_name['Int32Parameter'] = _INT32PARAMETER +DESCRIPTOR.message_types_by_name['FloatParameter'] = _FLOATPARAMETER +DESCRIPTOR.message_types_by_name['Parameter'] = _PARAMETER +DESCRIPTOR.message_types_by_name['NamedParameter'] = _NAMEDPARAMETER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Int32Range = _reflection.GeneratedProtocolMessageType('Int32Range', (_message.Message,), dict( + DESCRIPTOR = _INT32RANGE, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int32Range) + )) +_sym_db.RegisterMessage(Int32Range) + +Int32Set = _reflection.GeneratedProtocolMessageType('Int32Set', (_message.Message,), dict( + DESCRIPTOR = _INT32SET, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int32Set) + )) +_sym_db.RegisterMessage(Int32Set) + +FloatRange = _reflection.GeneratedProtocolMessageType('FloatRange', (_message.Message,), dict( + DESCRIPTOR = _FLOATRANGE, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FloatRange) + )) +_sym_db.RegisterMessage(FloatRange) + +Int32Parameter = _reflection.GeneratedProtocolMessageType('Int32Parameter', (_message.Message,), dict( + DESCRIPTOR = _INT32PARAMETER, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int32Parameter) + )) +_sym_db.RegisterMessage(Int32Parameter) + +FloatParameter = _reflection.GeneratedProtocolMessageType('FloatParameter', (_message.Message,), dict( + DESCRIPTOR = _FLOATPARAMETER, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FloatParameter) + )) +_sym_db.RegisterMessage(FloatParameter) + +Parameter = _reflection.GeneratedProtocolMessageType('Parameter', (_message.Message,), dict( + DESCRIPTOR = _PARAMETER, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Parameter) + )) +_sym_db.RegisterMessage(Parameter) + +NamedParameter = _reflection.GeneratedProtocolMessageType('NamedParameter', (_message.Message,), dict( + DESCRIPTOR = _NAMEDPARAMETER, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NamedParameter) + )) +_sym_db.RegisterMessage(NamedParameter) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/NearestNeighbors_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/NearestNeighbors_pb2.py new file mode 100644 index 00000000..68ebea13 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/NearestNeighbors_pb2.py @@ -0,0 +1,424 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: NearestNeighbors.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 +from . import Parameters_pb2 as Parameters__pb2 +try: + DataStructures__pb2 = Parameters__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Parameters__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * +from .Parameters_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='NearestNeighbors.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x16NearestNeighbors.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\x1a\x10Parameters.proto\"\xb6\x04\n\x1bKNearestNeighborsClassifier\x12J\n\x15nearestNeighborsIndex\x18\x01 \x01(\x0b\x32+.CoreML.Specification.NearestNeighborsIndex\x12?\n\x11numberOfNeighbors\x18\x03 \x01(\x0b\x32$.CoreML.Specification.Int64Parameter\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x12\x1c\n\x12\x64\x65\x66\x61ultStringLabel\x18n \x01(\tH\x01\x12\x1b\n\x11\x64\x65\x66\x61ultInt64Label\x18o \x01(\x03H\x01\x12\x43\n\x10uniformWeighting\x18\xc8\x01 \x01(\x0b\x32&.CoreML.Specification.UniformWeightingH\x02\x12S\n\x18inverseDistanceWeighting\x18\xd2\x01 \x01(\x0b\x32..CoreML.Specification.InverseDistanceWeightingH\x02\x42\r\n\x0b\x43lassLabelsB\x13\n\x11\x44\x65\x66\x61ultClassLabelB\x11\n\x0fWeightingScheme\"\xe2\x02\n\x15NearestNeighborsIndex\x12\x1a\n\x12numberOfDimensions\x18\x01 \x01(\x05\x12\x37\n\x0c\x66loatSamples\x18\x02 \x03(\x0b\x32!.CoreML.Specification.FloatVector\x12\x38\n\x0blinearIndex\x18\x64 \x01(\x0b\x32!.CoreML.Specification.LinearIndexH\x00\x12\x44\n\x11singleKdTreeIndex\x18n \x01(\x0b\x32\'.CoreML.Specification.SingleKdTreeIndexH\x00\x12S\n\x18squaredEuclideanDistance\x18\xc8\x01 \x01(\x0b\x32..CoreML.Specification.SquaredEuclideanDistanceH\x01\x42\x0b\n\tIndexTypeB\x12\n\x10\x44istanceFunction\"\x12\n\x10UniformWeighting\"\x1a\n\x18InverseDistanceWeighting\"\r\n\x0bLinearIndex\"%\n\x11SingleKdTreeIndex\x12\x10\n\x08leafSize\x18\x01 \x01(\x05\"\x1a\n\x18SquaredEuclideanDistanceB\x02H\x03P\x00P\x01\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,]) + + + + +_KNEARESTNEIGHBORSCLASSIFIER = _descriptor.Descriptor( + name='KNearestNeighborsClassifier', + full_name='CoreML.Specification.KNearestNeighborsClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='nearestNeighborsIndex', full_name='CoreML.Specification.KNearestNeighborsClassifier.nearestNeighborsIndex', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numberOfNeighbors', full_name='CoreML.Specification.KNearestNeighborsClassifier.numberOfNeighbors', index=1, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.KNearestNeighborsClassifier.stringClassLabels', index=2, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.KNearestNeighborsClassifier.int64ClassLabels', index=3, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='defaultStringLabel', full_name='CoreML.Specification.KNearestNeighborsClassifier.defaultStringLabel', index=4, + number=110, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='defaultInt64Label', full_name='CoreML.Specification.KNearestNeighborsClassifier.defaultInt64Label', index=5, + number=111, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='uniformWeighting', full_name='CoreML.Specification.KNearestNeighborsClassifier.uniformWeighting', index=6, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inverseDistanceWeighting', full_name='CoreML.Specification.KNearestNeighborsClassifier.inverseDistanceWeighting', index=7, + number=210, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.KNearestNeighborsClassifier.ClassLabels', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='DefaultClassLabel', full_name='CoreML.Specification.KNearestNeighborsClassifier.DefaultClassLabel', + index=1, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='WeightingScheme', full_name='CoreML.Specification.KNearestNeighborsClassifier.WeightingScheme', + index=2, containing_type=None, fields=[]), + ], + serialized_start=89, + serialized_end=655, +) + + +_NEARESTNEIGHBORSINDEX = _descriptor.Descriptor( + name='NearestNeighborsIndex', + full_name='CoreML.Specification.NearestNeighborsIndex', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='numberOfDimensions', full_name='CoreML.Specification.NearestNeighborsIndex.numberOfDimensions', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='floatSamples', full_name='CoreML.Specification.NearestNeighborsIndex.floatSamples', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linearIndex', full_name='CoreML.Specification.NearestNeighborsIndex.linearIndex', index=2, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='singleKdTreeIndex', full_name='CoreML.Specification.NearestNeighborsIndex.singleKdTreeIndex', index=3, + number=110, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='squaredEuclideanDistance', full_name='CoreML.Specification.NearestNeighborsIndex.squaredEuclideanDistance', index=4, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='IndexType', full_name='CoreML.Specification.NearestNeighborsIndex.IndexType', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='DistanceFunction', full_name='CoreML.Specification.NearestNeighborsIndex.DistanceFunction', + index=1, containing_type=None, fields=[]), + ], + serialized_start=658, + serialized_end=1012, +) + + +_UNIFORMWEIGHTING = _descriptor.Descriptor( + name='UniformWeighting', + full_name='CoreML.Specification.UniformWeighting', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1014, + serialized_end=1032, +) + + +_INVERSEDISTANCEWEIGHTING = _descriptor.Descriptor( + name='InverseDistanceWeighting', + full_name='CoreML.Specification.InverseDistanceWeighting', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1034, + serialized_end=1060, +) + + +_LINEARINDEX = _descriptor.Descriptor( + name='LinearIndex', + full_name='CoreML.Specification.LinearIndex', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1062, + serialized_end=1075, +) + + +_SINGLEKDTREEINDEX = _descriptor.Descriptor( + name='SingleKdTreeIndex', + full_name='CoreML.Specification.SingleKdTreeIndex', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='leafSize', full_name='CoreML.Specification.SingleKdTreeIndex.leafSize', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1077, + serialized_end=1114, +) + + +_SQUAREDEUCLIDEANDISTANCE = _descriptor.Descriptor( + name='SquaredEuclideanDistance', + full_name='CoreML.Specification.SquaredEuclideanDistance', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1116, + serialized_end=1142, +) + +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['nearestNeighborsIndex'].message_type = _NEARESTNEIGHBORSINDEX +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['numberOfNeighbors'].message_type = Parameters__pb2._INT64PARAMETER +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['uniformWeighting'].message_type = _UNIFORMWEIGHTING +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['inverseDistanceWeighting'].message_type = _INVERSEDISTANCEWEIGHTING +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['stringClassLabels']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['ClassLabels'] +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['int64ClassLabels']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['int64ClassLabels'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['ClassLabels'] +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['DefaultClassLabel'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['defaultStringLabel']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['defaultStringLabel'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['DefaultClassLabel'] +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['DefaultClassLabel'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['defaultInt64Label']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['defaultInt64Label'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['DefaultClassLabel'] +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['WeightingScheme'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['uniformWeighting']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['uniformWeighting'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['WeightingScheme'] +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['WeightingScheme'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['inverseDistanceWeighting']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['inverseDistanceWeighting'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['WeightingScheme'] +_NEARESTNEIGHBORSINDEX.fields_by_name['floatSamples'].message_type = DataStructures__pb2._FLOATVECTOR +_NEARESTNEIGHBORSINDEX.fields_by_name['linearIndex'].message_type = _LINEARINDEX +_NEARESTNEIGHBORSINDEX.fields_by_name['singleKdTreeIndex'].message_type = _SINGLEKDTREEINDEX +_NEARESTNEIGHBORSINDEX.fields_by_name['squaredEuclideanDistance'].message_type = _SQUAREDEUCLIDEANDISTANCE +_NEARESTNEIGHBORSINDEX.oneofs_by_name['IndexType'].fields.append( + _NEARESTNEIGHBORSINDEX.fields_by_name['linearIndex']) +_NEARESTNEIGHBORSINDEX.fields_by_name['linearIndex'].containing_oneof = _NEARESTNEIGHBORSINDEX.oneofs_by_name['IndexType'] +_NEARESTNEIGHBORSINDEX.oneofs_by_name['IndexType'].fields.append( + _NEARESTNEIGHBORSINDEX.fields_by_name['singleKdTreeIndex']) +_NEARESTNEIGHBORSINDEX.fields_by_name['singleKdTreeIndex'].containing_oneof = _NEARESTNEIGHBORSINDEX.oneofs_by_name['IndexType'] +_NEARESTNEIGHBORSINDEX.oneofs_by_name['DistanceFunction'].fields.append( + _NEARESTNEIGHBORSINDEX.fields_by_name['squaredEuclideanDistance']) +_NEARESTNEIGHBORSINDEX.fields_by_name['squaredEuclideanDistance'].containing_oneof = _NEARESTNEIGHBORSINDEX.oneofs_by_name['DistanceFunction'] +DESCRIPTOR.message_types_by_name['KNearestNeighborsClassifier'] = _KNEARESTNEIGHBORSCLASSIFIER +DESCRIPTOR.message_types_by_name['NearestNeighborsIndex'] = _NEARESTNEIGHBORSINDEX +DESCRIPTOR.message_types_by_name['UniformWeighting'] = _UNIFORMWEIGHTING +DESCRIPTOR.message_types_by_name['InverseDistanceWeighting'] = _INVERSEDISTANCEWEIGHTING +DESCRIPTOR.message_types_by_name['LinearIndex'] = _LINEARINDEX +DESCRIPTOR.message_types_by_name['SingleKdTreeIndex'] = _SINGLEKDTREEINDEX +DESCRIPTOR.message_types_by_name['SquaredEuclideanDistance'] = _SQUAREDEUCLIDEANDISTANCE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +KNearestNeighborsClassifier = _reflection.GeneratedProtocolMessageType('KNearestNeighborsClassifier', (_message.Message,), dict( + DESCRIPTOR = _KNEARESTNEIGHBORSCLASSIFIER, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.KNearestNeighborsClassifier) + )) +_sym_db.RegisterMessage(KNearestNeighborsClassifier) + +NearestNeighborsIndex = _reflection.GeneratedProtocolMessageType('NearestNeighborsIndex', (_message.Message,), dict( + DESCRIPTOR = _NEARESTNEIGHBORSINDEX, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NearestNeighborsIndex) + )) +_sym_db.RegisterMessage(NearestNeighborsIndex) + +UniformWeighting = _reflection.GeneratedProtocolMessageType('UniformWeighting', (_message.Message,), dict( + DESCRIPTOR = _UNIFORMWEIGHTING, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.UniformWeighting) + )) +_sym_db.RegisterMessage(UniformWeighting) + +InverseDistanceWeighting = _reflection.GeneratedProtocolMessageType('InverseDistanceWeighting', (_message.Message,), dict( + DESCRIPTOR = _INVERSEDISTANCEWEIGHTING, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.InverseDistanceWeighting) + )) +_sym_db.RegisterMessage(InverseDistanceWeighting) + +LinearIndex = _reflection.GeneratedProtocolMessageType('LinearIndex', (_message.Message,), dict( + DESCRIPTOR = _LINEARINDEX, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LinearIndex) + )) +_sym_db.RegisterMessage(LinearIndex) + +SingleKdTreeIndex = _reflection.GeneratedProtocolMessageType('SingleKdTreeIndex', (_message.Message,), dict( + DESCRIPTOR = _SINGLEKDTREEINDEX, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SingleKdTreeIndex) + )) +_sym_db.RegisterMessage(SingleKdTreeIndex) + +SquaredEuclideanDistance = _reflection.GeneratedProtocolMessageType('SquaredEuclideanDistance', (_message.Message,), dict( + DESCRIPTOR = _SQUAREDEUCLIDEANDISTANCE, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SquaredEuclideanDistance) + )) +_sym_db.RegisterMessage(SquaredEuclideanDistance) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/NeuralNetwork_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/NeuralNetwork_pb2.py new file mode 100644 index 00000000..dc1a8150 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/NeuralNetwork_pb2.py @@ -0,0 +1,12661 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: NeuralNetwork.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 +from . import Parameters_pb2 as Parameters__pb2 +try: + DataStructures__pb2 = Parameters__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Parameters__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * +from .Parameters_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='NeuralNetwork.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x13NeuralNetwork.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\x1a\x10Parameters.proto\"\x88\x03\n\rNeuralNetwork\x12\x38\n\x06layers\x18\x01 \x03(\x0b\x32(.CoreML.Specification.NeuralNetworkLayer\x12G\n\rpreprocessing\x18\x02 \x03(\x0b\x32\x30.CoreML.Specification.NeuralNetworkPreprocessing\x12Y\n\x16\x61rrayInputShapeMapping\x18\x05 \x01(\x0e\x32\x39.CoreML.Specification.NeuralNetworkMultiArrayShapeMapping\x12T\n\x16imageInputShapeMapping\x18\x06 \x01(\x0e\x32\x34.CoreML.Specification.NeuralNetworkImageShapeMapping\x12\x43\n\x0cupdateParams\x18\n \x01(\x0b\x32-.CoreML.Specification.NetworkUpdateParameters\"x\n\x18NeuralNetworkImageScaler\x12\x14\n\x0c\x63hannelScale\x18\n \x01(\x02\x12\x10\n\x08\x62lueBias\x18\x14 \x01(\x02\x12\x11\n\tgreenBias\x18\x15 \x01(\x02\x12\x0f\n\x07redBias\x18\x16 \x01(\x02\x12\x10\n\x08grayBias\x18\x1e \x01(\x02\"+\n\x16NeuralNetworkMeanImage\x12\x11\n\tmeanImage\x18\x01 \x03(\x02\"\xc6\x01\n\x1aNeuralNetworkPreprocessing\x12\x13\n\x0b\x66\x65\x61tureName\x18\x01 \x01(\t\x12@\n\x06scaler\x18\n \x01(\x0b\x32..CoreML.Specification.NeuralNetworkImageScalerH\x00\x12\x41\n\tmeanImage\x18\x0b \x01(\x0b\x32,.CoreML.Specification.NeuralNetworkMeanImageH\x00\x42\x0e\n\x0cpreprocessor\"\x10\n\x0e\x41\x63tivationReLU\"$\n\x13\x41\x63tivationLeakyReLU\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"\x10\n\x0e\x41\x63tivationTanh\"3\n\x14\x41\x63tivationScaledTanh\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\"\x13\n\x11\x41\x63tivationSigmoid\"/\n\x10\x41\x63tivationLinear\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\"4\n\x15\x41\x63tivationSigmoidHard\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\"D\n\x0f\x41\x63tivationPReLU\x12\x31\n\x05\x61lpha\x18\x01 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\x1e\n\rActivationELU\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"*\n\x19\x41\x63tivationThresholdedReLU\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"\x14\n\x12\x41\x63tivationSoftsign\"\x14\n\x12\x41\x63tivationSoftplus\"\x83\x01\n\x1c\x41\x63tivationParametricSoftplus\x12\x31\n\x05\x61lpha\x18\x01 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62\x65ta\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\xd4\x06\n\x10\x41\x63tivationParams\x12\x38\n\x06linear\x18\x05 \x01(\x0b\x32&.CoreML.Specification.ActivationLinearH\x00\x12\x34\n\x04ReLU\x18\n \x01(\x0b\x32$.CoreML.Specification.ActivationReLUH\x00\x12>\n\tleakyReLU\x18\x0f \x01(\x0b\x32).CoreML.Specification.ActivationLeakyReLUH\x00\x12J\n\x0fthresholdedReLU\x18\x14 \x01(\x0b\x32/.CoreML.Specification.ActivationThresholdedReLUH\x00\x12\x36\n\x05PReLU\x18\x19 \x01(\x0b\x32%.CoreML.Specification.ActivationPReLUH\x00\x12\x34\n\x04tanh\x18\x1e \x01(\x0b\x32$.CoreML.Specification.ActivationTanhH\x00\x12@\n\nscaledTanh\x18\x1f \x01(\x0b\x32*.CoreML.Specification.ActivationScaledTanhH\x00\x12:\n\x07sigmoid\x18( \x01(\x0b\x32\'.CoreML.Specification.ActivationSigmoidH\x00\x12\x42\n\x0bsigmoidHard\x18) \x01(\x0b\x32+.CoreML.Specification.ActivationSigmoidHardH\x00\x12\x32\n\x03\x45LU\x18\x32 \x01(\x0b\x32#.CoreML.Specification.ActivationELUH\x00\x12<\n\x08softsign\x18< \x01(\x0b\x32(.CoreML.Specification.ActivationSoftsignH\x00\x12<\n\x08softplus\x18\x46 \x01(\x0b\x32(.CoreML.Specification.ActivationSoftplusH\x00\x12P\n\x12parametricSoftplus\x18G \x01(\x0b\x32\x32.CoreML.Specification.ActivationParametricSoftplusH\x00\x42\x12\n\x10NonlinearityType\"(\n\x06Tensor\x12\x0c\n\x04rank\x18\x01 \x01(\r\x12\x10\n\x08\x64imValue\x18\x02 \x03(\x03\"\xeaU\n\x12NeuralNetworkLayer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x03(\t\x12\x0e\n\x06output\x18\x03 \x03(\t\x12\x31\n\x0binputTensor\x18\x04 \x03(\x0b\x32\x1c.CoreML.Specification.Tensor\x12\x32\n\x0coutputTensor\x18\x05 \x03(\x0b\x32\x1c.CoreML.Specification.Tensor\x12\x13\n\x0bisUpdatable\x18\n \x01(\x08\x12\x43\n\x0b\x63onvolution\x18\x64 \x01(\x0b\x32,.CoreML.Specification.ConvolutionLayerParamsH\x00\x12;\n\x07pooling\x18x \x01(\x0b\x32(.CoreML.Specification.PoolingLayerParamsH\x00\x12=\n\nactivation\x18\x82\x01 \x01(\x0b\x32&.CoreML.Specification.ActivationParamsH\x00\x12\x46\n\x0cinnerProduct\x18\x8c\x01 \x01(\x0b\x32-.CoreML.Specification.InnerProductLayerParamsH\x00\x12@\n\tembedding\x18\x96\x01 \x01(\x0b\x32*.CoreML.Specification.EmbeddingLayerParamsH\x00\x12@\n\tbatchnorm\x18\xa0\x01 \x01(\x0b\x32*.CoreML.Specification.BatchnormLayerParamsH\x00\x12\x46\n\x03mvn\x18\xa5\x01 \x01(\x0b\x32\x36.CoreML.Specification.MeanVarianceNormalizeLayerParamsH\x00\x12\x44\n\x0bl2normalize\x18\xaa\x01 \x01(\x0b\x32,.CoreML.Specification.L2NormalizeLayerParamsH\x00\x12<\n\x07softmax\x18\xaf\x01 \x01(\x0b\x32(.CoreML.Specification.SoftmaxLayerParamsH\x00\x12\x34\n\x03lrn\x18\xb4\x01 \x01(\x0b\x32$.CoreML.Specification.LRNLayerParamsH\x00\x12\x36\n\x04\x63rop\x18\xbe\x01 \x01(\x0b\x32%.CoreML.Specification.CropLayerParamsH\x00\x12<\n\x07padding\x18\xc8\x01 \x01(\x0b\x32(.CoreML.Specification.PaddingLayerParamsH\x00\x12>\n\x08upsample\x18\xd2\x01 \x01(\x0b\x32).CoreML.Specification.UpsampleLayerParamsH\x00\x12J\n\x0eresizeBilinear\x18\xd3\x01 \x01(\x0b\x32/.CoreML.Specification.ResizeBilinearLayerParamsH\x00\x12\x42\n\ncropResize\x18\xd4\x01 \x01(\x0b\x32+.CoreML.Specification.CropResizeLayerParamsH\x00\x12@\n\x05unary\x18\xdc\x01 \x01(\x0b\x32..CoreML.Specification.UnaryFunctionLayerParamsH\x00\x12\x34\n\x03\x61\x64\x64\x18\xe6\x01 \x01(\x0b\x32$.CoreML.Specification.AddLayerParamsH\x00\x12>\n\x08multiply\x18\xe7\x01 \x01(\x0b\x32).CoreML.Specification.MultiplyLayerParamsH\x00\x12<\n\x07\x61verage\x18\xf0\x01 \x01(\x0b\x32(.CoreML.Specification.AverageLayerParamsH\x00\x12\x38\n\x05scale\x18\xf5\x01 \x01(\x0b\x32&.CoreML.Specification.ScaleLayerParamsH\x00\x12\x36\n\x04\x62ias\x18\xfa\x01 \x01(\x0b\x32%.CoreML.Specification.BiasLayerParamsH\x00\x12\x34\n\x03max\x18\x84\x02 \x01(\x0b\x32$.CoreML.Specification.MaxLayerParamsH\x00\x12\x34\n\x03min\x18\x85\x02 \x01(\x0b\x32$.CoreML.Specification.MinLayerParamsH\x00\x12;\n\x03\x64ot\x18\x8e\x02 \x01(\x0b\x32+.CoreML.Specification.DotProductLayerParamsH\x00\x12:\n\x06reduce\x18\x98\x02 \x01(\x0b\x32\'.CoreML.Specification.ReduceLayerParamsH\x00\x12\x46\n\x0cloadConstant\x18\xa2\x02 \x01(\x0b\x32-.CoreML.Specification.LoadConstantLayerParamsH\x00\x12<\n\x07reshape\x18\xac\x02 \x01(\x0b\x32(.CoreML.Specification.ReshapeLayerParamsH\x00\x12<\n\x07\x66latten\x18\xad\x02 \x01(\x0b\x32(.CoreML.Specification.FlattenLayerParamsH\x00\x12<\n\x07permute\x18\xb6\x02 \x01(\x0b\x32(.CoreML.Specification.PermuteLayerParamsH\x00\x12:\n\x06\x63oncat\x18\xc0\x02 \x01(\x0b\x32\'.CoreML.Specification.ConcatLayerParamsH\x00\x12\x38\n\x05split\x18\xca\x02 \x01(\x0b\x32&.CoreML.Specification.SplitLayerParamsH\x00\x12J\n\x0esequenceRepeat\x18\xd4\x02 \x01(\x0b\x32/.CoreML.Specification.SequenceRepeatLayerParamsH\x00\x12J\n\x0ereorganizeData\x18\xd9\x02 \x01(\x0b\x32/.CoreML.Specification.ReorganizeDataLayerParamsH\x00\x12\x38\n\x05slice\x18\xde\x02 \x01(\x0b\x32&.CoreML.Specification.SliceLayerParamsH\x00\x12L\n\x0fsimpleRecurrent\x18\x90\x03 \x01(\x0b\x32\x30.CoreML.Specification.SimpleRecurrentLayerParamsH\x00\x12\x34\n\x03gru\x18\x9a\x03 \x01(\x0b\x32$.CoreML.Specification.GRULayerParamsH\x00\x12R\n\x12uniDirectionalLSTM\x18\xa4\x03 \x01(\x0b\x32\x33.CoreML.Specification.UniDirectionalLSTMLayerParamsH\x00\x12P\n\x11\x62iDirectionalLSTM\x18\xae\x03 \x01(\x0b\x32\x32.CoreML.Specification.BiDirectionalLSTMLayerParamsH\x00\x12:\n\x06\x63ustom\x18\xf4\x03 \x01(\x0b\x32\'.CoreML.Specification.CustomLayerParamsH\x00\x12\x36\n\x04\x63opy\x18\xd8\x04 \x01(\x0b\x32%.CoreML.Specification.CopyLayerParamsH\x00\x12:\n\x06\x62ranch\x18\xdd\x04 \x01(\x0b\x32\'.CoreML.Specification.BranchLayerParamsH\x00\x12\x36\n\x04loop\x18\xe7\x04 \x01(\x0b\x32%.CoreML.Specification.LoopLayerParamsH\x00\x12@\n\tloopBreak\x18\xec\x04 \x01(\x0b\x32*.CoreML.Specification.LoopBreakLayerParamsH\x00\x12\x46\n\x0cloopContinue\x18\xf1\x04 \x01(\x0b\x32-.CoreML.Specification.LoopContinueLayerParamsH\x00\x12\x44\n\x0brangeStatic\x18\xfb\x04 \x01(\x0b\x32,.CoreML.Specification.RangeStaticLayerParamsH\x00\x12\x46\n\x0crangeDynamic\x18\x80\x05 \x01(\x0b\x32-.CoreML.Specification.RangeDynamicLayerParamsH\x00\x12\x36\n\x04\x63lip\x18\x94\x05 \x01(\x0b\x32%.CoreML.Specification.ClipLayerParamsH\x00\x12\x36\n\x04\x63\x65il\x18\x99\x05 \x01(\x0b\x32%.CoreML.Specification.CeilLayerParamsH\x00\x12\x38\n\x05\x66loor\x18\x9e\x05 \x01(\x0b\x32&.CoreML.Specification.FloorLayerParamsH\x00\x12\x36\n\x04sign\x18\xa8\x05 \x01(\x0b\x32%.CoreML.Specification.SignLayerParamsH\x00\x12\x38\n\x05round\x18\xad\x05 \x01(\x0b\x32&.CoreML.Specification.RoundLayerParamsH\x00\x12\x36\n\x04\x65xp2\x18\xbc\x05 \x01(\x0b\x32%.CoreML.Specification.Exp2LayerParamsH\x00\x12\x34\n\x03sin\x18\xc6\x05 \x01(\x0b\x32$.CoreML.Specification.SinLayerParamsH\x00\x12\x34\n\x03\x63os\x18\xcb\x05 \x01(\x0b\x32$.CoreML.Specification.CosLayerParamsH\x00\x12\x34\n\x03tan\x18\xd0\x05 \x01(\x0b\x32$.CoreML.Specification.TanLayerParamsH\x00\x12\x36\n\x04\x61sin\x18\xda\x05 \x01(\x0b\x32%.CoreML.Specification.AsinLayerParamsH\x00\x12\x36\n\x04\x61\x63os\x18\xdf\x05 \x01(\x0b\x32%.CoreML.Specification.AcosLayerParamsH\x00\x12\x36\n\x04\x61tan\x18\xe4\x05 \x01(\x0b\x32%.CoreML.Specification.AtanLayerParamsH\x00\x12\x36\n\x04sinh\x18\xee\x05 \x01(\x0b\x32%.CoreML.Specification.SinhLayerParamsH\x00\x12\x36\n\x04\x63osh\x18\xf3\x05 \x01(\x0b\x32%.CoreML.Specification.CoshLayerParamsH\x00\x12\x36\n\x04tanh\x18\xf8\x05 \x01(\x0b\x32%.CoreML.Specification.TanhLayerParamsH\x00\x12\x38\n\x05\x61sinh\x18\x82\x06 \x01(\x0b\x32&.CoreML.Specification.AsinhLayerParamsH\x00\x12\x38\n\x05\x61\x63osh\x18\x87\x06 \x01(\x0b\x32&.CoreML.Specification.AcoshLayerParamsH\x00\x12\x38\n\x05\x61tanh\x18\x8c\x06 \x01(\x0b\x32&.CoreML.Specification.AtanhLayerParamsH\x00\x12\x34\n\x03\x65rf\x18\x96\x06 \x01(\x0b\x32$.CoreML.Specification.ErfLayerParamsH\x00\x12\x36\n\x04gelu\x18\x9b\x06 \x01(\x0b\x32%.CoreML.Specification.GeluLayerParamsH\x00\x12\x38\n\x05\x65qual\x18\xaf\x06 \x01(\x0b\x32&.CoreML.Specification.EqualLayerParamsH\x00\x12>\n\x08notEqual\x18\xb4\x06 \x01(\x0b\x32).CoreML.Specification.NotEqualLayerParamsH\x00\x12>\n\x08lessThan\x18\xb9\x06 \x01(\x0b\x32).CoreML.Specification.LessThanLayerParamsH\x00\x12@\n\tlessEqual\x18\xbb\x06 \x01(\x0b\x32*.CoreML.Specification.LessEqualLayerParamsH\x00\x12\x44\n\x0bgreaterThan\x18\xbe\x06 \x01(\x0b\x32,.CoreML.Specification.GreaterThanLayerParamsH\x00\x12\x46\n\x0cgreaterEqual\x18\xc0\x06 \x01(\x0b\x32-.CoreML.Specification.GreaterEqualLayerParamsH\x00\x12@\n\tlogicalOr\x18\xc8\x06 \x01(\x0b\x32*.CoreML.Specification.LogicalOrLayerParamsH\x00\x12\x42\n\nlogicalXor\x18\xcd\x06 \x01(\x0b\x32+.CoreML.Specification.LogicalXorLayerParamsH\x00\x12\x42\n\nlogicalNot\x18\xd2\x06 \x01(\x0b\x32+.CoreML.Specification.LogicalNotLayerParamsH\x00\x12\x42\n\nlogicalAnd\x18\xd7\x06 \x01(\x0b\x32+.CoreML.Specification.LogicalAndLayerParamsH\x00\x12N\n\x10modBroadcastable\x18\xe1\x06 \x01(\x0b\x32\x31.CoreML.Specification.ModBroadcastableLayerParamsH\x00\x12N\n\x10minBroadcastable\x18\xe6\x06 \x01(\x0b\x32\x31.CoreML.Specification.MinBroadcastableLayerParamsH\x00\x12N\n\x10maxBroadcastable\x18\xeb\x06 \x01(\x0b\x32\x31.CoreML.Specification.MaxBroadcastableLayerParamsH\x00\x12N\n\x10\x61\x64\x64\x42roadcastable\x18\xf0\x06 \x01(\x0b\x32\x31.CoreML.Specification.AddBroadcastableLayerParamsH\x00\x12N\n\x10powBroadcastable\x18\xf5\x06 \x01(\x0b\x32\x31.CoreML.Specification.PowBroadcastableLayerParamsH\x00\x12T\n\x13\x64ivideBroadcastable\x18\xfa\x06 \x01(\x0b\x32\x34.CoreML.Specification.DivideBroadcastableLayerParamsH\x00\x12X\n\x15\x66loorDivBroadcastable\x18\xff\x06 \x01(\x0b\x32\x36.CoreML.Specification.FloorDivBroadcastableLayerParamsH\x00\x12X\n\x15multiplyBroadcastable\x18\x84\x07 \x01(\x0b\x32\x36.CoreML.Specification.MultiplyBroadcastableLayerParamsH\x00\x12X\n\x15subtractBroadcastable\x18\x89\x07 \x01(\x0b\x32\x36.CoreML.Specification.SubtractBroadcastableLayerParamsH\x00\x12\x36\n\x04tile\x18\x98\x07 \x01(\x0b\x32%.CoreML.Specification.TileLayerParamsH\x00\x12\x38\n\x05stack\x18\x9d\x07 \x01(\x0b\x32&.CoreML.Specification.StackLayerParamsH\x00\x12:\n\x06gather\x18\xa2\x07 \x01(\x0b\x32\'.CoreML.Specification.GatherLayerParamsH\x00\x12<\n\x07scatter\x18\xa7\x07 \x01(\x0b\x32(.CoreML.Specification.ScatterLayerParamsH\x00\x12>\n\x08gatherND\x18\xac\x07 \x01(\x0b\x32).CoreML.Specification.GatherNDLayerParamsH\x00\x12@\n\tscatterND\x18\xb1\x07 \x01(\x0b\x32*.CoreML.Specification.ScatterNDLayerParamsH\x00\x12@\n\tsoftmaxND\x18\xb6\x07 \x01(\x0b\x32*.CoreML.Specification.SoftmaxNDLayerParamsH\x00\x12L\n\x0fgatherAlongAxis\x18\xb8\x07 \x01(\x0b\x32\x30.CoreML.Specification.GatherAlongAxisLayerParamsH\x00\x12N\n\x10scatterAlongAxis\x18\xba\x07 \x01(\x0b\x32\x31.CoreML.Specification.ScatterAlongAxisLayerParamsH\x00\x12<\n\x07reverse\x18\xc0\x07 \x01(\x0b\x32(.CoreML.Specification.ReverseLayerParamsH\x00\x12\x42\n\nreverseSeq\x18\xc5\x07 \x01(\x0b\x32+.CoreML.Specification.ReverseSeqLayerParamsH\x00\x12<\n\x07splitND\x18\xcf\x07 \x01(\x0b\x32(.CoreML.Specification.SplitNDLayerParamsH\x00\x12>\n\x08\x63oncatND\x18\xd4\x07 \x01(\x0b\x32).CoreML.Specification.ConcatNDLayerParamsH\x00\x12@\n\ttranspose\x18\xd9\x07 \x01(\x0b\x32*.CoreML.Specification.TransposeLayerParamsH\x00\x12\x44\n\x0bsliceStatic\x18\xe3\x07 \x01(\x0b\x32,.CoreML.Specification.SliceStaticLayerParamsH\x00\x12\x46\n\x0csliceDynamic\x18\xe8\x07 \x01(\x0b\x32-.CoreML.Specification.SliceDynamicLayerParamsH\x00\x12J\n\x0eslidingWindows\x18\xed\x07 \x01(\x0b\x32/.CoreML.Specification.SlidingWindowsLayerParamsH\x00\x12\x36\n\x04topK\x18\xf7\x07 \x01(\x0b\x32%.CoreML.Specification.TopKLayerParamsH\x00\x12:\n\x06\x61rgMin\x18\xfc\x07 \x01(\x0b\x32\'.CoreML.Specification.ArgMinLayerParamsH\x00\x12:\n\x06\x61rgMax\x18\x81\x08 \x01(\x0b\x32\'.CoreML.Specification.ArgMaxLayerParamsH\x00\x12\x44\n\x0b\x65mbeddingND\x18\x90\x08 \x01(\x0b\x32,.CoreML.Specification.EmbeddingNDLayerParamsH\x00\x12H\n\rbatchedMatmul\x18\x95\x08 \x01(\x0b\x32..CoreML.Specification.BatchedMatMulLayerParamsH\x00\x12>\n\x08getShape\x18\xa9\x08 \x01(\x0b\x32).CoreML.Specification.GetShapeLayerParamsH\x00\x12J\n\x0eloadConstantND\x18\xae\x08 \x01(\x0b\x32/.CoreML.Specification.LoadConstantNDLayerParamsH\x00\x12>\n\x08\x66illLike\x18\xb8\x08 \x01(\x0b\x32).CoreML.Specification.FillLikeLayerParamsH\x00\x12\x42\n\nfillStatic\x18\xbd\x08 \x01(\x0b\x32+.CoreML.Specification.FillStaticLayerParamsH\x00\x12\x44\n\x0b\x66illDynamic\x18\xc2\x08 \x01(\x0b\x32,.CoreML.Specification.FillDynamicLayerParamsH\x00\x12L\n\x0f\x62roadcastToLike\x18\xcc\x08 \x01(\x0b\x32\x30.CoreML.Specification.BroadcastToLikeLayerParamsH\x00\x12P\n\x11\x62roadcastToStatic\x18\xd1\x08 \x01(\x0b\x32\x32.CoreML.Specification.BroadcastToStaticLayerParamsH\x00\x12R\n\x12\x62roadcastToDynamic\x18\xd6\x08 \x01(\x0b\x32\x33.CoreML.Specification.BroadcastToDynamicLayerParamsH\x00\x12<\n\x07squeeze\x18\xe0\x08 \x01(\x0b\x32(.CoreML.Specification.SqueezeLayerParamsH\x00\x12\x42\n\nexpandDims\x18\xe5\x08 \x01(\x0b\x32+.CoreML.Specification.ExpandDimsLayerParamsH\x00\x12\x44\n\x0b\x66lattenTo2D\x18\xea\x08 \x01(\x0b\x32,.CoreML.Specification.FlattenTo2DLayerParamsH\x00\x12\x44\n\x0breshapeLike\x18\xef\x08 \x01(\x0b\x32,.CoreML.Specification.ReshapeLikeLayerParamsH\x00\x12H\n\rreshapeStatic\x18\xf4\x08 \x01(\x0b\x32..CoreML.Specification.ReshapeStaticLayerParamsH\x00\x12J\n\x0ereshapeDynamic\x18\xf9\x08 \x01(\x0b\x32/.CoreML.Specification.ReshapeDynamicLayerParamsH\x00\x12X\n\x15rankPreservingReshape\x18\xfe\x08 \x01(\x0b\x32\x36.CoreML.Specification.RankPreservingReshapeLayerParamsH\x00\x12H\n\x0b\x63onstantPad\x18\x83\t \x01(\x0b\x32\x30.CoreML.Specification.ConstantPaddingLayerParamsH\x00\x12N\n\x10randomNormalLike\x18\x92\t \x01(\x0b\x32\x31.CoreML.Specification.RandomNormalLikeLayerParamsH\x00\x12R\n\x12randomNormalStatic\x18\x97\t \x01(\x0b\x32\x33.CoreML.Specification.RandomNormalStaticLayerParamsH\x00\x12T\n\x13randomNormalDynamic\x18\x9c\t \x01(\x0b\x32\x34.CoreML.Specification.RandomNormalDynamicLayerParamsH\x00\x12P\n\x11randomUniformLike\x18\xa6\t \x01(\x0b\x32\x32.CoreML.Specification.RandomUniformLikeLayerParamsH\x00\x12T\n\x13randomUniformStatic\x18\xab\t \x01(\x0b\x32\x34.CoreML.Specification.RandomUniformStaticLayerParamsH\x00\x12V\n\x14randomUniformDynamic\x18\xb0\t \x01(\x0b\x32\x35.CoreML.Specification.RandomUniformDynamicLayerParamsH\x00\x12T\n\x13randomBernoulliLike\x18\xba\t \x01(\x0b\x32\x34.CoreML.Specification.RandomBernoulliLikeLayerParamsH\x00\x12X\n\x15randomBernoulliStatic\x18\xbf\t \x01(\x0b\x32\x36.CoreML.Specification.RandomBernoulliStaticLayerParamsH\x00\x12Z\n\x16randomBernoulliDynamic\x18\xc4\t \x01(\x0b\x32\x37.CoreML.Specification.RandomBernoulliDynamicLayerParamsH\x00\x12\\\n\x17\x63\x61tegoricalDistribution\x18\xce\t \x01(\x0b\x32\x38.CoreML.Specification.CategoricalDistributionLayerParamsH\x00\x12>\n\x08reduceL1\x18\xe2\t \x01(\x0b\x32).CoreML.Specification.ReduceL1LayerParamsH\x00\x12>\n\x08reduceL2\x18\xe7\t \x01(\x0b\x32).CoreML.Specification.ReduceL2LayerParamsH\x00\x12@\n\treduceMax\x18\xec\t \x01(\x0b\x32*.CoreML.Specification.ReduceMaxLayerParamsH\x00\x12@\n\treduceMin\x18\xf1\t \x01(\x0b\x32*.CoreML.Specification.ReduceMinLayerParamsH\x00\x12@\n\treduceSum\x18\xf6\t \x01(\x0b\x32*.CoreML.Specification.ReduceSumLayerParamsH\x00\x12\x42\n\nreduceProd\x18\xfb\t \x01(\x0b\x32+.CoreML.Specification.ReduceProdLayerParamsH\x00\x12\x42\n\nreduceMean\x18\x80\n \x01(\x0b\x32+.CoreML.Specification.ReduceMeanLayerParamsH\x00\x12\x46\n\x0creduceLogSum\x18\x85\n \x01(\x0b\x32-.CoreML.Specification.ReduceLogSumLayerParamsH\x00\x12L\n\x0freduceSumSquare\x18\x8a\n \x01(\x0b\x32\x30.CoreML.Specification.ReduceSumSquareLayerParamsH\x00\x12L\n\x0freduceLogSumExp\x18\x8f\n \x01(\x0b\x32\x30.CoreML.Specification.ReduceLogSumExpLayerParamsH\x00\x12\x46\n\x0cwhereNonZero\x18\xa1\n \x01(\x0b\x32-.CoreML.Specification.WhereNonZeroLayerParamsH\x00\x12J\n\x0ematrixBandPart\x18\xa3\n \x01(\x0b\x32/.CoreML.Specification.MatrixBandPartLayerParamsH\x00\x12L\n\x0flowerTriangular\x18\xa8\n \x01(\x0b\x32\x30.CoreML.Specification.LowerTriangularLayerParamsH\x00\x12L\n\x0fupperTriangular\x18\xad\n \x01(\x0b\x32\x30.CoreML.Specification.UpperTriangularLayerParamsH\x00\x12R\n\x12whereBroadcastable\x18\xb2\n \x01(\x0b\x32\x33.CoreML.Specification.WhereBroadcastableLayerParamsH\x00\x12R\n\x12layerNormalization\x18\xc6\n \x01(\x0b\x32\x33.CoreML.Specification.LayerNormalizationLayerParamsH\x00\x12X\n\x15NonMaximumSuppression\x18\xf8\n \x01(\x0b\x32\x36.CoreML.Specification.NonMaximumSuppressionLayerParamsH\x00\x12:\n\x06oneHot\x18\xaa\x0b \x01(\x0b\x32\'.CoreML.Specification.OneHotLayerParamsH\x00\x12:\n\x06\x63umSum\x18\xaf\x0b \x01(\x0b\x32\'.CoreML.Specification.CumSumLayerParamsH\x00\x12\x44\n\x0b\x63lampedReLU\x18\xb4\x0b \x01(\x0b\x32,.CoreML.Specification.ClampedReLULayerParamsH\x00\x12<\n\x07\x61rgSort\x18\xb5\x0b \x01(\x0b\x32(.CoreML.Specification.ArgSortLayerParamsH\x00\x12@\n\tpooling3d\x18\xb9\x0b \x01(\x0b\x32*.CoreML.Specification.Pooling3DLayerParamsH\x00\x12L\n\x0fglobalPooling3d\x18\xba\x0b \x01(\x0b\x32\x30.CoreML.Specification.GlobalPooling3DLayerParamsH\x00\x12\x44\n\x0bsliceBySize\x18\xbe\x0b \x01(\x0b\x32,.CoreML.Specification.SliceBySizeLayerParamsH\x00\x12H\n\rconvolution3d\x18\xbf\x0b \x01(\x0b\x32..CoreML.Specification.Convolution3DLayerParamsH\x00\x42\x07\n\x05layer\"\x83\x01\n\x11\x42ranchLayerParams\x12\x35\n\x08ifBranch\x18\x01 \x01(\x0b\x32#.CoreML.Specification.NeuralNetwork\x12\x37\n\nelseBranch\x18\x02 \x01(\x0b\x32#.CoreML.Specification.NeuralNetwork\"\xbb\x01\n\x0fLoopLayerParams\x12\x19\n\x11maxLoopIterations\x18\x01 \x01(\x04\x12\x14\n\x0c\x63onditionVar\x18\x02 \x01(\t\x12=\n\x10\x63onditionNetwork\x18\x03 \x01(\x0b\x32#.CoreML.Specification.NeuralNetwork\x12\x38\n\x0b\x62odyNetwork\x18\x04 \x01(\x0b\x32#.CoreML.Specification.NeuralNetwork\"\x16\n\x14LoopBreakLayerParams\"\x19\n\x17LoopContinueLayerParams\"\x11\n\x0f\x43opyLayerParams\"\'\n\x16GreaterThanLayerParams\x12\r\n\x05\x61lpha\x18\x02 \x01(\x02\"(\n\x17GreaterEqualLayerParams\x12\r\n\x05\x61lpha\x18\x02 \x01(\x02\"$\n\x13LessThanLayerParams\x12\r\n\x05\x61lpha\x18\x02 \x01(\x02\"%\n\x14LessEqualLayerParams\x12\r\n\x05\x61lpha\x18\x02 \x01(\x02\"!\n\x10\x45qualLayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"$\n\x13NotEqualLayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"\x17\n\x15LogicalAndLayerParams\"\x16\n\x14LogicalOrLayerParams\"\x17\n\x15LogicalXorLayerParams\"\x17\n\x15LogicalNotLayerParams\"\x8e\x01\n\rBorderAmounts\x12\x44\n\rborderAmounts\x18\n \x03(\x0b\x32-.CoreML.Specification.BorderAmounts.EdgeSizes\x1a\x37\n\tEdgeSizes\x12\x15\n\rstartEdgeSize\x18\x01 \x01(\x04\x12\x13\n\x0b\x65ndEdgeSize\x18\x02 \x01(\x04\"K\n\x0cValidPadding\x12;\n\x0epaddingAmounts\x18\x01 \x01(\x0b\x32#.CoreML.Specification.BorderAmounts\"\x96\x01\n\x0bSamePadding\x12H\n\rasymmetryMode\x18\x01 \x01(\x0e\x32\x31.CoreML.Specification.SamePadding.SamePaddingMode\"=\n\x0fSamePaddingMode\x12\x16\n\x12\x42OTTOM_RIGHT_HEAVY\x10\x00\x12\x12\n\x0eTOP_LEFT_HEAVY\x10\x01\"\xbd\x01\n\x0cSamplingMode\x12\x41\n\x0esamplingMethod\x18\x01 \x01(\x0e\x32).CoreML.Specification.SamplingMode.Method\"j\n\x06Method\x12\x1f\n\x1bSTRICT_ALIGN_ENDPOINTS_MODE\x10\x00\x12\x18\n\x14\x41LIGN_ENDPOINTS_MODE\x10\x01\x12\x11\n\rUPSAMPLE_MODE\x10\x02\x12\x12\n\x0eROI_ALIGN_MODE\x10\x03\"\xd8\x01\n\x12\x42oxCoordinatesMode\x12\x45\n\x07\x62oxMode\x18\x01 \x01(\x0e\x32\x34.CoreML.Specification.BoxCoordinatesMode.Coordinates\"{\n\x0b\x43oordinates\x12\x18\n\x14\x43ORNERS_HEIGHT_FIRST\x10\x00\x12\x17\n\x13\x43ORNERS_WIDTH_FIRST\x10\x01\x12\x1c\n\x18\x43\x45NTER_SIZE_HEIGHT_FIRST\x10\x02\x12\x1b\n\x17\x43\x45NTER_SIZE_WIDTH_FIRST\x10\x03\"\xb5\x01\n\x0cWeightParams\x12\x12\n\nfloatValue\x18\x01 \x03(\x02\x12\x14\n\x0c\x66loat16Value\x18\x02 \x01(\x0c\x12\x10\n\x08rawValue\x18\x1e \x01(\x0c\x12\x14\n\x0cint8RawValue\x18\x1f \x01(\x0c\x12>\n\x0cquantization\x18( \x01(\x0b\x32(.CoreML.Specification.QuantizationParams\x12\x13\n\x0bisUpdatable\x18\x32 \x01(\x08\"\xe4\x01\n\x12QuantizationParams\x12\x14\n\x0cnumberOfBits\x18\x01 \x01(\x04\x12L\n\x12linearQuantization\x18\x65 \x01(\x0b\x32..CoreML.Specification.LinearQuantizationParamsH\x00\x12V\n\x17lookupTableQuantization\x18\x66 \x01(\x0b\x32\x33.CoreML.Specification.LookUpTableQuantizationParamsH\x00\x42\x12\n\x10QuantizationType\"7\n\x18LinearQuantizationParams\x12\r\n\x05scale\x18\x01 \x03(\x02\x12\x0c\n\x04\x62ias\x18\x02 \x03(\x02\"3\n\x1dLookUpTableQuantizationParams\x12\x12\n\nfloatValue\x18\x01 \x03(\x02\"\xbd\x03\n\x16\x43onvolutionLayerParams\x12\x16\n\x0eoutputChannels\x18\x01 \x01(\x04\x12\x16\n\x0ekernelChannels\x18\x02 \x01(\x04\x12\x0f\n\x07nGroups\x18\n \x01(\x04\x12\x12\n\nkernelSize\x18\x14 \x03(\x04\x12\x0e\n\x06stride\x18\x1e \x03(\x04\x12\x16\n\x0e\x64ilationFactor\x18( \x03(\x04\x12\x33\n\x05valid\x18\x32 \x01(\x0b\x32\".CoreML.Specification.ValidPaddingH\x00\x12\x31\n\x04same\x18\x33 \x01(\x0b\x32!.CoreML.Specification.SamePaddingH\x00\x12\x17\n\x0fisDeconvolution\x18< \x01(\x08\x12\x0f\n\x07hasBias\x18\x46 \x01(\x08\x12\x33\n\x07weights\x18Z \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18[ \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x13\n\x0boutputShape\x18\x64 \x03(\x04\x42\x18\n\x16\x43onvolutionPaddingType\"\xec\x05\n\x18\x43onvolution3DLayerParams\x12\x16\n\x0eoutputChannels\x18\x01 \x01(\x05\x12\x15\n\rinputChannels\x18\x02 \x01(\x05\x12\x0f\n\x07nGroups\x18\n \x01(\x05\x12\x13\n\x0bkernelDepth\x18\x14 \x01(\x05\x12\x14\n\x0ckernelHeight\x18\x15 \x01(\x05\x12\x13\n\x0bkernelWidth\x18\x16 \x01(\x05\x12\x13\n\x0bstrideDepth\x18\x1f \x01(\x05\x12\x14\n\x0cstrideHeight\x18 \x01(\x05\x12\x13\n\x0bstrideWidth\x18! \x01(\x05\x12\x15\n\rdilationDepth\x18( \x01(\x05\x12\x16\n\x0e\x64ilationHeight\x18) \x01(\x05\x12\x15\n\rdilationWidth\x18* \x01(\x05\x12\x0f\n\x07hasBias\x18\x32 \x01(\x08\x12\x33\n\x07weights\x18< \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18= \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12O\n\x0bpaddingType\x18\x46 \x01(\x0e\x32:.CoreML.Specification.Convolution3DLayerParams.PaddingType\x12\x1a\n\x12\x63ustomPaddingFront\x18P \x01(\x05\x12\x19\n\x11\x63ustomPaddingBack\x18Q \x01(\x05\x12\x18\n\x10\x63ustomPaddingTop\x18R \x01(\x05\x12\x1b\n\x13\x63ustomPaddingBottom\x18S \x01(\x05\x12\x19\n\x11\x63ustomPaddingLeft\x18T \x01(\x05\x12\x1a\n\x12\x63ustomPaddingRight\x18U \x01(\x05\x12\x17\n\x0fisDeconvolution\x18V \x01(\x08\x12\x13\n\x0boutputShape\x18W \x03(\x04\".\n\x0bPaddingType\x12\n\n\x06\x43USTOM\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x08\n\x04SAME\x10\x02\"\xdd\x01\n\x17InnerProductLayerParams\x12\x15\n\rinputChannels\x18\x01 \x01(\x04\x12\x16\n\x0eoutputChannels\x18\x02 \x01(\x04\x12\x0f\n\x07hasBias\x18\n \x01(\x08\x12\x33\n\x07weights\x18\x14 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18\x15 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x1b\n\x13int8DynamicQuantize\x18\x16 \x01(\x08\"\xb8\x01\n\x14\x45mbeddingLayerParams\x12\x10\n\x08inputDim\x18\x01 \x01(\x04\x12\x16\n\x0eoutputChannels\x18\x02 \x01(\x04\x12\x0f\n\x07hasBias\x18\n \x01(\x08\x12\x33\n\x07weights\x18\x14 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18\x15 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\xba\x01\n\x16\x45mbeddingNDLayerParams\x12\x11\n\tvocabSize\x18\x01 \x01(\x04\x12\x15\n\rembeddingSize\x18\x02 \x01(\x04\x12\x0f\n\x07hasBias\x18\x03 \x01(\x08\x12\x33\n\x07weights\x18\x14 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18\x15 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\xbd\x02\n\x14\x42\x61tchnormLayerParams\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x04\x12\x16\n\x0e\x63omputeMeanVar\x18\x05 \x01(\x08\x12\x1d\n\x15instanceNormalization\x18\x06 \x01(\x08\x12\x0f\n\x07\x65psilon\x18\n \x01(\x02\x12\x31\n\x05gamma\x18\x0f \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62\x65ta\x18\x10 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04mean\x18\x11 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x34\n\x08variance\x18\x12 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\xe8\x03\n\x12PoolingLayerParams\x12\x42\n\x04type\x18\x01 \x01(\x0e\x32\x34.CoreML.Specification.PoolingLayerParams.PoolingType\x12\x12\n\nkernelSize\x18\n \x03(\x04\x12\x0e\n\x06stride\x18\x14 \x03(\x04\x12\x33\n\x05valid\x18\x1e \x01(\x0b\x32\".CoreML.Specification.ValidPaddingH\x00\x12\x31\n\x04same\x18\x1f \x01(\x0b\x32!.CoreML.Specification.SamePaddingH\x00\x12Y\n\x10includeLastPixel\x18 \x01(\x0b\x32=.CoreML.Specification.PoolingLayerParams.ValidCompletePaddingH\x00\x12\x1d\n\x15\x61vgPoolExcludePadding\x18\x32 \x01(\x08\x12\x15\n\rglobalPooling\x18< \x01(\x08\x1a.\n\x14ValidCompletePadding\x12\x16\n\x0epaddingAmounts\x18\n \x03(\x04\"+\n\x0bPoolingType\x12\x07\n\x03MAX\x10\x00\x12\x0b\n\x07\x41VERAGE\x10\x01\x12\x06\n\x02L2\x10\x02\x42\x14\n\x12PoolingPaddingType\"\xd6\x04\n\x14Pooling3DLayerParams\x12\x46\n\x04type\x18\x01 \x01(\x0e\x32\x38.CoreML.Specification.Pooling3DLayerParams.PoolingType3D\x12\x13\n\x0bkernelDepth\x18\x02 \x01(\x05\x12\x14\n\x0ckernelHeight\x18\x03 \x01(\x05\x12\x13\n\x0bkernelWidth\x18\x04 \x01(\x05\x12\x13\n\x0bstrideDepth\x18\x05 \x01(\x05\x12\x14\n\x0cstrideHeight\x18\x06 \x01(\x05\x12\x13\n\x0bstrideWidth\x18\x07 \x01(\x05\x12T\n\x0bpaddingType\x18\x0f \x01(\x0e\x32?.CoreML.Specification.Pooling3DLayerParams.Pooling3DPaddingType\x12\x1a\n\x12\x63ustomPaddingFront\x18\x08 \x01(\x05\x12\x19\n\x11\x63ustomPaddingBack\x18\t \x01(\x05\x12\x18\n\x10\x63ustomPaddingTop\x18\n \x01(\x05\x12\x1b\n\x13\x63ustomPaddingBottom\x18\x0b \x01(\x05\x12\x19\n\x11\x63ustomPaddingLeft\x18\x0c \x01(\x05\x12\x1a\n\x12\x63ustomPaddingRight\x18\r \x01(\x05\x12\x1b\n\x13\x63ountExcludePadding\x18\x0e \x01(\x08\"%\n\rPoolingType3D\x12\x07\n\x03MAX\x10\x00\x12\x0b\n\x07\x41VERAGE\x10\x01\"7\n\x14Pooling3DPaddingType\x12\n\n\x06\x43USTOM\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x08\n\x04SAME\x10\x02\"\x9d\x01\n\x1aGlobalPooling3DLayerParams\x12R\n\x04type\x18\x01 \x01(\x0e\x32\x44.CoreML.Specification.GlobalPooling3DLayerParams.GlobalPoolingType3D\"+\n\x13GlobalPoolingType3D\x12\x07\n\x03MAX\x10\x00\x12\x0b\n\x07\x41VERAGE\x10\x01\"\xa1\x03\n\x12PaddingLayerParams\x12L\n\x08\x63onstant\x18\x01 \x01(\x0b\x32\x38.CoreML.Specification.PaddingLayerParams.PaddingConstantH\x00\x12P\n\nreflection\x18\x02 \x01(\x0b\x32:.CoreML.Specification.PaddingLayerParams.PaddingReflectionH\x00\x12R\n\x0breplication\x18\x03 \x01(\x0b\x32;.CoreML.Specification.PaddingLayerParams.PaddingReplicationH\x00\x12;\n\x0epaddingAmounts\x18\n \x01(\x0b\x32#.CoreML.Specification.BorderAmounts\x1a \n\x0fPaddingConstant\x12\r\n\x05value\x18\x01 \x01(\x02\x1a\x13\n\x11PaddingReflection\x1a\x14\n\x12PaddingReplicationB\r\n\x0bPaddingType\"+\n\x11\x43oncatLayerParams\x12\x16\n\x0esequenceConcat\x18\x64 \x01(\x08\"K\n\x0eLRNLayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\x12\x11\n\tlocalSize\x18\x03 \x01(\x04\x12\t\n\x01k\x18\x04 \x01(\x02\"\x14\n\x12SoftmaxLayerParams\"$\n\x10SplitLayerParams\x12\x10\n\x08nOutputs\x18\x01 \x01(\x04\"\x1f\n\x0e\x41\x64\x64LayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"$\n\x13MultiplyLayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"\x84\x02\n\x18UnaryFunctionLayerParams\x12\x46\n\x04type\x18\x01 \x01(\x0e\x32\x38.CoreML.Specification.UnaryFunctionLayerParams.Operation\x12\r\n\x05\x61lpha\x18\x02 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x03 \x01(\x02\x12\r\n\x05shift\x18\x04 \x01(\x02\x12\r\n\x05scale\x18\x05 \x01(\x02\"b\n\tOperation\x12\x08\n\x04SQRT\x10\x00\x12\t\n\x05RSQRT\x10\x01\x12\x0b\n\x07INVERSE\x10\x02\x12\t\n\x05POWER\x10\x03\x12\x07\n\x03\x45XP\x10\x04\x12\x07\n\x03LOG\x10\x05\x12\x07\n\x03\x41\x42S\x10\x06\x12\r\n\tTHRESHOLD\x10\x07\"\xf1\x02\n\x13UpsampleLayerParams\x12\x15\n\rscalingFactor\x18\x01 \x03(\x04\x12\x1f\n\x17\x66ractionalScalingFactor\x18\x07 \x03(\x02\x12I\n\x04mode\x18\x05 \x01(\x0e\x32;.CoreML.Specification.UpsampleLayerParams.InterpolationMode\x12X\n\x12linearUpsampleMode\x18\x06 \x01(\x0e\x32<.CoreML.Specification.UpsampleLayerParams.LinearUpsampleMode\")\n\x11InterpolationMode\x12\x06\n\x02NN\x10\x00\x12\x0c\n\x08\x42ILINEAR\x10\x01\"R\n\x12LinearUpsampleMode\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x16\n\x12\x41LIGN_CORNERS_TRUE\x10\x01\x12\x17\n\x13\x41LIGN_CORNERS_FALSE\x10\x02\"a\n\x19ResizeBilinearLayerParams\x12\x12\n\ntargetSize\x18\x01 \x03(\x04\x12\x30\n\x04mode\x18\x02 \x01(\x0b\x32\".CoreML.Specification.SamplingMode\"\xd4\x01\n\x15\x43ropResizeLayerParams\x12\x12\n\ntargetSize\x18\x01 \x03(\x04\x12\x1d\n\x15normalizedCoordinates\x18\x02 \x01(\x08\x12\x30\n\x04mode\x18\x03 \x01(\x0b\x32\".CoreML.Specification.SamplingMode\x12@\n\x0e\x62oxIndicesMode\x18\x04 \x01(\x0b\x32(.CoreML.Specification.BoxCoordinatesMode\x12\x14\n\x0cspatialScale\x18\x05 \x01(\x02\"R\n\x0f\x42iasLayerParams\x12\r\n\x05shape\x18\x01 \x03(\x04\x12\x30\n\x04\x62ias\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\xaf\x01\n\x10ScaleLayerParams\x12\x12\n\nshapeScale\x18\x01 \x03(\x04\x12\x31\n\x05scale\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x0f\n\x07hasBias\x18\x03 \x01(\x08\x12\x11\n\tshapeBias\x18\x04 \x03(\x04\x12\x30\n\x04\x62ias\x18\x05 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"Z\n\x17LoadConstantLayerParams\x12\r\n\x05shape\x18\x01 \x03(\x04\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\")\n\x16L2NormalizeLayerParams\x12\x0f\n\x07\x65psilon\x18\x01 \x01(\x02\"\x8e\x01\n\x12\x46lattenLayerParams\x12\x43\n\x04mode\x18\x01 \x01(\x0e\x32\x35.CoreML.Specification.FlattenLayerParams.FlattenOrder\"3\n\x0c\x46lattenOrder\x12\x11\n\rCHANNEL_FIRST\x10\x00\x12\x10\n\x0c\x43HANNEL_LAST\x10\x01\"\xa3\x01\n\x12ReshapeLayerParams\x12\x13\n\x0btargetShape\x18\x01 \x03(\x03\x12\x43\n\x04mode\x18\x02 \x01(\x0e\x32\x35.CoreML.Specification.ReshapeLayerParams.ReshapeOrder\"3\n\x0cReshapeOrder\x12\x11\n\rCHANNEL_FIRST\x10\x00\x12\x10\n\x0c\x43HANNEL_LAST\x10\x01\"\"\n\x12PermuteLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x03(\x04\"\xd1\x01\n\x19ReorganizeDataLayerParams\x12P\n\x04mode\x18\x01 \x01(\x0e\x32\x42.CoreML.Specification.ReorganizeDataLayerParams.ReorganizationType\x12\x11\n\tblockSize\x18\x02 \x01(\x04\"O\n\x12ReorganizationType\x12\x12\n\x0eSPACE_TO_DEPTH\x10\x00\x12\x12\n\x0e\x44\x45PTH_TO_SPACE\x10\x01\x12\x11\n\rPIXEL_SHUFFLE\x10\x02\"\xc8\x01\n\x10SliceLayerParams\x12\x12\n\nstartIndex\x18\x01 \x01(\x03\x12\x10\n\x08\x65ndIndex\x18\x02 \x01(\x03\x12\x0e\n\x06stride\x18\x03 \x01(\x04\x12>\n\x04\x61xis\x18\x04 \x01(\x0e\x32\x30.CoreML.Specification.SliceLayerParams.SliceAxis\">\n\tSliceAxis\x12\x10\n\x0c\x43HANNEL_AXIS\x10\x00\x12\x0f\n\x0bHEIGHT_AXIS\x10\x01\x12\x0e\n\nWIDTH_AXIS\x10\x02\"\xd9\x02\n\x11ReduceLayerParams\x12\x45\n\x04mode\x18\x01 \x01(\x0e\x32\x37.CoreML.Specification.ReduceLayerParams.ReduceOperation\x12\x0f\n\x07\x65psilon\x18\x02 \x01(\x02\x12@\n\x04\x61xis\x18\x03 \x01(\x0e\x32\x32.CoreML.Specification.ReduceLayerParams.ReduceAxis\"v\n\x0fReduceOperation\x12\x07\n\x03SUM\x10\x00\x12\x07\n\x03\x41VG\x10\x01\x12\x08\n\x04PROD\x10\x02\x12\n\n\x06LOGSUM\x10\x03\x12\r\n\tSUMSQUARE\x10\x04\x12\x06\n\x02L1\x10\x05\x12\x06\n\x02L2\x10\x06\x12\x07\n\x03MAX\x10\x07\x12\x07\n\x03MIN\x10\x08\x12\n\n\x06\x41RGMAX\x10\t\"2\n\nReduceAxis\x12\x07\n\x03\x43HW\x10\x00\x12\x06\n\x02HW\x10\x01\x12\x05\n\x01\x43\x10\x02\x12\x05\n\x01H\x10\x03\x12\x05\n\x01W\x10\x04\"[\n\x0f\x43ropLayerParams\x12\x38\n\x0b\x63ropAmounts\x18\x01 \x01(\x0b\x32#.CoreML.Specification.BorderAmounts\x12\x0e\n\x06offset\x18\x05 \x03(\x04\"\x14\n\x12\x41verageLayerParams\"\x10\n\x0eMaxLayerParams\"\x10\n\x0eMinLayerParams\"1\n\x15\x44otProductLayerParams\x12\x18\n\x10\x63osineSimilarity\x18\x01 \x01(\x08\"f\n MeanVarianceNormalizeLayerParams\x12\x16\n\x0e\x61\x63rossChannels\x18\x01 \x01(\x08\x12\x19\n\x11normalizeVariance\x18\x02 \x01(\x08\x12\x0f\n\x07\x65psilon\x18\x03 \x01(\x02\"1\n\x19SequenceRepeatLayerParams\x12\x14\n\x0cnRepetitions\x18\x01 \x01(\x04\"\xff\x02\n\x1aSimpleRecurrentLayerParams\x12\x17\n\x0finputVectorSize\x18\x01 \x01(\x04\x12\x18\n\x10outputVectorSize\x18\x02 \x01(\x04\x12:\n\nactivation\x18\n \x01(\x0b\x32&.CoreML.Specification.ActivationParams\x12\x16\n\x0esequenceOutput\x18\x0f \x01(\x08\x12\x15\n\rhasBiasVector\x18\x14 \x01(\x08\x12\x38\n\x0cweightMatrix\x18\x1e \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12;\n\x0frecursionMatrix\x18\x1f \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x36\n\nbiasVector\x18 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x14\n\x0creverseInput\x18\x64 \x01(\x08\"\xaa\x06\n\x0eGRULayerParams\x12\x17\n\x0finputVectorSize\x18\x01 \x01(\x04\x12\x18\n\x10outputVectorSize\x18\x02 \x01(\x04\x12;\n\x0b\x61\x63tivations\x18\n \x03(\x0b\x32&.CoreML.Specification.ActivationParams\x12\x16\n\x0esequenceOutput\x18\x0f \x01(\x08\x12\x16\n\x0ehasBiasVectors\x18\x14 \x01(\x08\x12\x42\n\x16updateGateWeightMatrix\x18\x1e \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x41\n\x15resetGateWeightMatrix\x18\x1f \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x42\n\x16outputGateWeightMatrix\x18 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x45\n\x19updateGateRecursionMatrix\x18\x32 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x44\n\x18resetGateRecursionMatrix\x18\x33 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x45\n\x19outputGateRecursionMatrix\x18\x34 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12@\n\x14updateGateBiasVector\x18\x46 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12?\n\x13resetGateBiasVector\x18G \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12@\n\x14outputGateBiasVector\x18H \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x14\n\x0creverseInput\x18\x64 \x01(\x08\"\xaa\x01\n\nLSTMParams\x12\x16\n\x0esequenceOutput\x18\n \x01(\x08\x12\x16\n\x0ehasBiasVectors\x18\x14 \x01(\x08\x12\x12\n\nforgetBias\x18\x1e \x01(\x08\x12\x1a\n\x12hasPeepholeVectors\x18( \x01(\x08\x12!\n\x19\x63oupledInputAndForgetGate\x18\x32 \x01(\x08\x12\x19\n\x11\x63\x65llClipThreshold\x18< \x01(\x02\"\x94\x08\n\x10LSTMWeightParams\x12\x41\n\x15inputGateWeightMatrix\x18\x01 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x42\n\x16\x66orgetGateWeightMatrix\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x42\n\x16\x62lockInputWeightMatrix\x18\x03 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x42\n\x16outputGateWeightMatrix\x18\x04 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x44\n\x18inputGateRecursionMatrix\x18\x14 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x45\n\x19\x66orgetGateRecursionMatrix\x18\x15 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x45\n\x19\x62lockInputRecursionMatrix\x18\x16 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x45\n\x19outputGateRecursionMatrix\x18\x17 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12?\n\x13inputGateBiasVector\x18( \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12@\n\x14\x66orgetGateBiasVector\x18) \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12@\n\x14\x62lockInputBiasVector\x18* \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12@\n\x14outputGateBiasVector\x18+ \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x43\n\x17inputGatePeepholeVector\x18< \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x44\n\x18\x66orgetGatePeepholeVector\x18= \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x44\n\x18outputGatePeepholeVector\x18> \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\x95\x02\n\x1dUniDirectionalLSTMLayerParams\x12\x17\n\x0finputVectorSize\x18\x01 \x01(\x04\x12\x18\n\x10outputVectorSize\x18\x02 \x01(\x04\x12;\n\x0b\x61\x63tivations\x18\n \x03(\x0b\x32&.CoreML.Specification.ActivationParams\x12\x30\n\x06params\x18\x0f \x01(\x0b\x32 .CoreML.Specification.LSTMParams\x12<\n\x0cweightParams\x18\x14 \x01(\x0b\x32&.CoreML.Specification.LSTMWeightParams\x12\x14\n\x0creverseInput\x18\x64 \x01(\x08\"\xd2\x02\n\x1c\x42iDirectionalLSTMLayerParams\x12\x17\n\x0finputVectorSize\x18\x01 \x01(\x04\x12\x18\n\x10outputVectorSize\x18\x02 \x01(\x04\x12\x46\n\x16\x61\x63tivationsForwardLSTM\x18\n \x03(\x0b\x32&.CoreML.Specification.ActivationParams\x12G\n\x17\x61\x63tivationsBackwardLSTM\x18\x0b \x03(\x0b\x32&.CoreML.Specification.ActivationParams\x12\x30\n\x06params\x18\x0f \x01(\x0b\x32 .CoreML.Specification.LSTMParams\x12<\n\x0cweightParams\x18\x14 \x03(\x0b\x32&.CoreML.Specification.LSTMWeightParams\"\xbe\x03\n\x11\x43ustomLayerParams\x12\x11\n\tclassName\x18\n \x01(\t\x12\x33\n\x07weights\x18\x14 \x03(\x0b\x32\".CoreML.Specification.WeightParams\x12K\n\nparameters\x18\x1e \x03(\x0b\x32\x37.CoreML.Specification.CustomLayerParams.ParametersEntry\x12\x13\n\x0b\x64\x65scription\x18( \x01(\t\x1a\x8c\x01\n\x15\x43ustomLayerParamValue\x12\x15\n\x0b\x64oubleValue\x18\n \x01(\x01H\x00\x12\x15\n\x0bstringValue\x18\x14 \x01(\tH\x00\x12\x12\n\x08intValue\x18\x1e \x01(\x05H\x00\x12\x13\n\tlongValue\x18( \x01(\x03H\x00\x12\x13\n\tboolValue\x18\x32 \x01(\x08H\x00\x42\x07\n\x05value\x1ap\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12L\n\x05value\x18\x02 \x01(\x0b\x32=.CoreML.Specification.CustomLayerParams.CustomLayerParamValue:\x02\x38\x01\"$\n\x14TransposeLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x04\"\xa0\x02\n\x18\x42\x61tchedMatMulLayerParams\x12\x12\n\ntransposeA\x18\x01 \x01(\x08\x12\x12\n\ntransposeB\x18\x02 \x01(\x08\x12\"\n\x1aweightMatrixFirstDimension\x18\x05 \x01(\x04\x12#\n\x1bweightMatrixSecondDimension\x18\x06 \x01(\x04\x12\x0f\n\x07hasBias\x18\x07 \x01(\x08\x12\x33\n\x07weights\x18\x08 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18\t \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x1b\n\x13int8DynamicQuantize\x18\n \x01(\x08\"7\n\x13\x43oncatNDLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x12\n\ninterleave\x18\x02 \x01(\x08\"$\n\x14SoftmaxNDLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\"(\n\x12ReverseLayerParams\x12\x12\n\nreverseDim\x18\x01 \x03(\x08\"@\n\x15ReverseSeqLayerParams\x12\x11\n\tbatchAxis\x18\x01 \x01(\x03\x12\x14\n\x0csequenceAxis\x18\x02 \x01(\x03\"\\\n\x19LoadConstantNDLayerParams\x12\r\n\x05shape\x18\x01 \x03(\x04\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"$\n\x13\x46illLikeLayerParams\x12\r\n\x05value\x18\x01 \x01(\x02\";\n\x15\x46illStaticLayerParams\x12\r\n\x05value\x18\x01 \x01(\x02\x12\x13\n\x0btargetShape\x18\x02 \x03(\x04\"\'\n\x16\x46illDynamicLayerParams\x12\r\n\x05value\x18\x01 \x01(\x02\"\x1f\n\x1dWhereBroadcastableLayerParams\"\x10\n\x0eSinLayerParams\"\x10\n\x0e\x43osLayerParams\"\x10\n\x0eTanLayerParams\"\x11\n\x0f\x41sinLayerParams\"\x11\n\x0f\x41\x63osLayerParams\"\x11\n\x0f\x41tanLayerParams\"\x11\n\x0fSinhLayerParams\"\x11\n\x0f\x43oshLayerParams\"\x11\n\x0fTanhLayerParams\"\x12\n\x10\x41sinhLayerParams\"\x12\n\x10\x41\x63oshLayerParams\"\x12\n\x10\x41tanhLayerParams\"\x1d\n\x1bPowBroadcastableLayerParams\"\x11\n\x0f\x45xp2LayerParams\"\x19\n\x17WhereNonZeroLayerParams\"?\n\x19MatrixBandPartLayerParams\x12\x10\n\x08numLower\x18\x01 \x01(\x03\x12\x10\n\x08numUpper\x18\x02 \x01(\x03\"\'\n\x1aUpperTriangularLayerParams\x12\t\n\x01k\x18\x01 \x01(\x03\"\'\n\x1aLowerTriangularLayerParams\x12\t\n\x01k\x18\x01 \x01(\x03\"\x1c\n\x1a\x42roadcastToLikeLayerParams\"3\n\x1c\x42roadcastToStaticLayerParams\x12\x13\n\x0btargetShape\x18\x01 \x03(\x04\"\x1f\n\x1d\x42roadcastToDynamicLayerParams\"\x1d\n\x1b\x41\x64\x64\x42roadcastableLayerParams\"\x1d\n\x1bMaxBroadcastableLayerParams\"\x1d\n\x1bMinBroadcastableLayerParams\"\x1d\n\x1bModBroadcastableLayerParams\"\"\n FloorDivBroadcastableLayerParams\"\"\n SubtractBroadcastableLayerParams\"\"\n MultiplyBroadcastableLayerParams\" \n\x1e\x44ivideBroadcastableLayerParams\"!\n\x11GatherLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\"S\n\x12ScatterLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12/\n\x04mode\x18\x02 \x01(\x0e\x32!.CoreML.Specification.ScatterMode\"\x15\n\x13GatherNDLayerParams\"G\n\x14ScatterNDLayerParams\x12/\n\x04mode\x18\x01 \x01(\x0e\x32!.CoreML.Specification.ScatterMode\"*\n\x1aGatherAlongAxisLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\"\\\n\x1bScatterAlongAxisLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12/\n\x04mode\x18\x02 \x01(\x0e\x32!.CoreML.Specification.ScatterMode\" \n\x10StackLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\"7\n RankPreservingReshapeLayerParams\x12\x13\n\x0btargetShape\x18\x01 \x03(\x03\"a\n\x1a\x43onstantPaddingLayerParams\x12\r\n\x05value\x18\x01 \x01(\x02\x12\x12\n\npadAmounts\x18\x02 \x03(\x04\x12 \n\x18padToGivenOutputSizeMode\x18\x03 \x01(\x08\"I\n\x1bRandomNormalLikeLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04mean\x18\x02 \x01(\x02\x12\x0e\n\x06stdDev\x18\x03 \x01(\x02\"`\n\x1dRandomNormalStaticLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04mean\x18\x02 \x01(\x02\x12\x0e\n\x06stdDev\x18\x03 \x01(\x02\x12\x13\n\x0boutputShape\x18\x04 \x03(\x04\"L\n\x1eRandomNormalDynamicLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04mean\x18\x02 \x01(\x02\x12\x0e\n\x06stdDev\x18\x03 \x01(\x02\"L\n\x1cRandomUniformLikeLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0e\n\x06minVal\x18\x02 \x01(\x02\x12\x0e\n\x06maxVal\x18\x03 \x01(\x02\"c\n\x1eRandomUniformStaticLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0e\n\x06minVal\x18\x02 \x01(\x02\x12\x0e\n\x06maxVal\x18\x03 \x01(\x02\x12\x13\n\x0boutputShape\x18\x04 \x03(\x04\"O\n\x1fRandomUniformDynamicLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0e\n\x06minVal\x18\x02 \x01(\x02\x12\x0e\n\x06maxVal\x18\x03 \x01(\x02\"<\n\x1eRandomBernoulliLikeLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04prob\x18\x02 \x01(\x02\"S\n RandomBernoulliStaticLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04prob\x18\x02 \x01(\x02\x12\x13\n\x0boutputShape\x18\x03 \x03(\x04\"?\n!RandomBernoulliDynamicLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04prob\x18\x02 \x01(\x02\"z\n\"CategoricalDistributionLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x12\n\nnumSamples\x18\x02 \x01(\x03\x12\x10\n\x08isLogits\x18\x03 \x01(\x08\x12\x0b\n\x03\x65ps\x18\x04 \x01(\x02\x12\x13\n\x0btemperature\x18\x05 \x01(\x02\"H\n\x13ReduceL1LayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"H\n\x13ReduceL2LayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"I\n\x14ReduceMaxLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"I\n\x14ReduceMinLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"I\n\x14ReduceSumLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"J\n\x15ReduceProdLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"J\n\x15ReduceMeanLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"L\n\x17ReduceLogSumLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"O\n\x1aReduceSumSquareLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"O\n\x1aReduceLogSumExpLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"%\n\x15\x45xpandDimsLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\"&\n\x16\x46lattenTo2DLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\"/\n\x18ReshapeStaticLayerParams\x12\x13\n\x0btargetShape\x18\x01 \x03(\x03\"\x18\n\x16ReshapeLikeLayerParams\"\x1b\n\x19ReshapeDynamicLayerParams\"6\n\x12SqueezeLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x12\n\nsqueezeAll\x18\x02 \x01(\x08\">\n\x0fTopKLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\t\n\x01K\x18\x02 \x01(\x04\x12\x12\n\nuseBottomK\x18\x03 \x01(\x08\"4\n\x11\x41rgMaxLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x11\n\tremoveDim\x18\x02 \x01(\x08\"4\n\x11\x41rgMinLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x11\n\tremoveDim\x18\x02 \x01(\x08\"I\n\x12SplitNDLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x11\n\tnumSplits\x18\x02 \x01(\x04\x12\x12\n\nsplitSizes\x18\x03 \x03(\x04\"\x11\n\x0f\x43\x65ilLayerParams\"\x12\n\x10RoundLayerParams\"\x12\n\x10\x46loorLayerParams\"\x11\n\x0fSignLayerParams\"1\n\x0f\x43lipLayerParams\x12\x0e\n\x06minVal\x18\x01 \x01(\x02\x12\x0e\n\x06maxVal\x18\x02 \x01(\x02\"\x87\x01\n\x16SliceStaticLayerParams\x12\x10\n\x08\x62\x65ginIds\x18\x01 \x03(\x03\x12\x12\n\nbeginMasks\x18\x02 \x03(\x08\x12\x0e\n\x06\x65ndIds\x18\x03 \x03(\x03\x12\x10\n\x08\x65ndMasks\x18\x04 \x03(\x08\x12\x0f\n\x07strides\x18\x05 \x03(\x03\x12\x14\n\x0csqueezeMasks\x18\x06 \x03(\x08\"v\n\x17SliceDynamicLayerParams\x12\x12\n\nbeginMasks\x18\x02 \x03(\x08\x12\x0e\n\x06\x65ndIds\x18\x03 \x03(\x03\x12\x10\n\x08\x65ndMasks\x18\x04 \x03(\x08\x12\x0f\n\x07strides\x18\x05 \x03(\x03\x12\x14\n\x0csqueezeMasks\x18\x06 \x03(\x08\"\x1f\n\x0fTileLayerParams\x12\x0c\n\x04reps\x18\x01 \x03(\x04\"\x15\n\x13GetShapeLayerParams\"\x10\n\x0e\x45rfLayerParams\"\x99\x01\n\x0fGeluLayerParams\x12<\n\x04mode\x18\x01 \x01(\x0e\x32..CoreML.Specification.GeluLayerParams.GeluMode\"H\n\x08GeluMode\x12\t\n\x05\x45XACT\x10\x00\x12\x16\n\x12TANH_APPROXIMATION\x10\x01\x12\x19\n\x15SIGMOID_APPROXIMATION\x10\x02\"U\n\x16RangeStaticLayerParams\x12\x10\n\x08\x65ndValue\x18\x01 \x01(\x02\x12\x12\n\nstartValue\x18\x02 \x01(\x02\x12\x15\n\rstepSizeValue\x18\x03 \x01(\x02\"D\n\x17RangeDynamicLayerParams\x12\x12\n\nstartValue\x18\x02 \x01(\x02\x12\x15\n\rstepSizeValue\x18\x03 \x01(\x02\"K\n\x19SlidingWindowsLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x12\n\nwindowSize\x18\x02 \x01(\x04\x12\x0c\n\x04step\x18\x03 \x01(\x04\"\xaa\x01\n\x1dLayerNormalizationLayerParams\x12\x17\n\x0fnormalizedShape\x18\x01 \x03(\x03\x12\x0b\n\x03\x65ps\x18\x02 \x01(\x02\x12\x31\n\x05gamma\x18\x03 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62\x65ta\x18\x04 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\x7f\n NonMaximumSuppressionLayerParams\x12\x14\n\x0ciouThreshold\x18\x01 \x01(\x02\x12\x16\n\x0escoreThreshold\x18\x02 \x01(\x02\x12\x10\n\x08maxBoxes\x18\x03 \x01(\x04\x12\x1b\n\x13perClassSuppression\x18\x04 \x01(\x08\"5\n\x16\x43lampedReLULayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\"6\n\x12\x41rgSortLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x12\n\ndescending\x18\x02 \x01(\x08\"4\n\x16SliceBySizeLayerParams\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x03\"\xc5\x04\n\x17NeuralNetworkClassifier\x12\x38\n\x06layers\x18\x01 \x03(\x0b\x32(.CoreML.Specification.NeuralNetworkLayer\x12G\n\rpreprocessing\x18\x02 \x03(\x0b\x32\x30.CoreML.Specification.NeuralNetworkPreprocessing\x12Y\n\x16\x61rrayInputShapeMapping\x18\x05 \x01(\x0e\x32\x39.CoreML.Specification.NeuralNetworkMultiArrayShapeMapping\x12T\n\x16imageInputShapeMapping\x18\x06 \x01(\x0e\x32\x34.CoreML.Specification.NeuralNetworkImageShapeMapping\x12\x43\n\x0cupdateParams\x18\n \x01(\x0b\x32-.CoreML.Specification.NetworkUpdateParameters\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x12\"\n\x19labelProbabilityLayerName\x18\xc8\x01 \x01(\tB\r\n\x0b\x43lassLabels\"^\n\x11OneHotLayerParams\x12\x18\n\x10oneHotVectorSize\x18\x01 \x01(\x04\x12\x0c\n\x04\x61xis\x18\x02 \x01(\x03\x12\x0f\n\x07onValue\x18\x03 \x01(\x02\x12\x10\n\x08offValue\x18\x04 \x01(\x02\"K\n\x11\x43umSumLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x17\n\x0f\x65xcludeFinalSum\x18\x02 \x01(\x08\x12\x0f\n\x07reverse\x18\x03 \x01(\x08\"\x91\x03\n\x16NeuralNetworkRegressor\x12\x38\n\x06layers\x18\x01 \x03(\x0b\x32(.CoreML.Specification.NeuralNetworkLayer\x12G\n\rpreprocessing\x18\x02 \x03(\x0b\x32\x30.CoreML.Specification.NeuralNetworkPreprocessing\x12Y\n\x16\x61rrayInputShapeMapping\x18\x05 \x01(\x0e\x32\x39.CoreML.Specification.NeuralNetworkMultiArrayShapeMapping\x12T\n\x16imageInputShapeMapping\x18\x06 \x01(\x0e\x32\x34.CoreML.Specification.NeuralNetworkImageShapeMapping\x12\x43\n\x0cupdateParams\x18\n \x01(\x0b\x32-.CoreML.Specification.NetworkUpdateParameters\"\xa2\x02\n\x17NetworkUpdateParameters\x12\x33\n\nlossLayers\x18\x01 \x03(\x0b\x32\x1f.CoreML.Specification.LossLayer\x12\x32\n\toptimizer\x18\x02 \x01(\x0b\x32\x1f.CoreML.Specification.Optimizer\x12\x34\n\x06\x65pochs\x18\x03 \x01(\x0b\x32$.CoreML.Specification.Int64Parameter\x12\x34\n\x07shuffle\x18\n \x01(\x0b\x32#.CoreML.Specification.BoolParameter\x12\x32\n\x04seed\x18\x14 \x01(\x0b\x32$.CoreML.Specification.Int64Parameter\"\xe4\x01\n\tLossLayer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x62\n categoricalCrossEntropyLossLayer\x18\n \x01(\x0b\x32\x36.CoreML.Specification.CategoricalCrossEntropyLossLayerH\x00\x12T\n\x19meanSquaredErrorLossLayer\x18\x0b \x01(\x0b\x32/.CoreML.Specification.MeanSquaredErrorLossLayerH\x00\x42\x0f\n\rLossLayerType\"A\n CategoricalCrossEntropyLossLayer\x12\r\n\x05input\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\":\n\x19MeanSquaredErrorLossLayer\x12\r\n\x05input\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"\x96\x01\n\tOptimizer\x12:\n\x0csgdOptimizer\x18\n \x01(\x0b\x32\".CoreML.Specification.SGDOptimizerH\x00\x12<\n\radamOptimizer\x18\x0b \x01(\x0b\x32#.CoreML.Specification.AdamOptimizerH\x00\x42\x0f\n\rOptimizerType\"\xc1\x01\n\x0cSGDOptimizer\x12;\n\x0clearningRate\x18\x01 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter\x12;\n\rminiBatchSize\x18\x02 \x01(\x0b\x32$.CoreML.Specification.Int64Parameter\x12\x37\n\x08momentum\x18\x03 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter\"\xa9\x02\n\rAdamOptimizer\x12;\n\x0clearningRate\x18\x01 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter\x12;\n\rminiBatchSize\x18\x02 \x01(\x0b\x32$.CoreML.Specification.Int64Parameter\x12\x34\n\x05\x62\x65ta1\x18\x03 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter\x12\x34\n\x05\x62\x65ta2\x18\x04 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter\x12\x32\n\x03\x65ps\x18\x05 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter*W\n#NeuralNetworkMultiArrayShapeMapping\x12\x17\n\x13RANK5_ARRAY_MAPPING\x10\x00\x12\x17\n\x13\x45XACT_ARRAY_MAPPING\x10\x01*R\n\x1eNeuralNetworkImageShapeMapping\x12\x17\n\x13RANK5_IMAGE_MAPPING\x10\x00\x12\x17\n\x13RANK4_IMAGE_MAPPING\x10\x01*\x87\x01\n\x0bScatterMode\x12\x12\n\x0eSCATTER_UPDATE\x10\x00\x12\x0f\n\x0bSCATTER_ADD\x10\x01\x12\x0f\n\x0bSCATTER_SUB\x10\x02\x12\x0f\n\x0bSCATTER_MUL\x10\x03\x12\x0f\n\x0bSCATTER_DIV\x10\x04\x12\x0f\n\x0bSCATTER_MAX\x10\x05\x12\x0f\n\x0bSCATTER_MIN\x10\x06\x42\x02H\x03P\x00P\x01\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,]) + +_NEURALNETWORKMULTIARRAYSHAPEMAPPING = _descriptor.EnumDescriptor( + name='NeuralNetworkMultiArrayShapeMapping', + full_name='CoreML.Specification.NeuralNetworkMultiArrayShapeMapping', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='RANK5_ARRAY_MAPPING', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EXACT_ARRAY_MAPPING', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=33746, + serialized_end=33833, +) +_sym_db.RegisterEnumDescriptor(_NEURALNETWORKMULTIARRAYSHAPEMAPPING) + +NeuralNetworkMultiArrayShapeMapping = enum_type_wrapper.EnumTypeWrapper(_NEURALNETWORKMULTIARRAYSHAPEMAPPING) +_NEURALNETWORKIMAGESHAPEMAPPING = _descriptor.EnumDescriptor( + name='NeuralNetworkImageShapeMapping', + full_name='CoreML.Specification.NeuralNetworkImageShapeMapping', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='RANK5_IMAGE_MAPPING', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RANK4_IMAGE_MAPPING', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=33835, + serialized_end=33917, +) +_sym_db.RegisterEnumDescriptor(_NEURALNETWORKIMAGESHAPEMAPPING) + +NeuralNetworkImageShapeMapping = enum_type_wrapper.EnumTypeWrapper(_NEURALNETWORKIMAGESHAPEMAPPING) +_SCATTERMODE = _descriptor.EnumDescriptor( + name='ScatterMode', + full_name='CoreML.Specification.ScatterMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SCATTER_UPDATE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_ADD', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_SUB', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_MUL', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_DIV', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_MAX', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_MIN', index=6, number=6, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=33920, + serialized_end=34055, +) +_sym_db.RegisterEnumDescriptor(_SCATTERMODE) + +ScatterMode = enum_type_wrapper.EnumTypeWrapper(_SCATTERMODE) +RANK5_ARRAY_MAPPING = 0 +EXACT_ARRAY_MAPPING = 1 +RANK5_IMAGE_MAPPING = 0 +RANK4_IMAGE_MAPPING = 1 +SCATTER_UPDATE = 0 +SCATTER_ADD = 1 +SCATTER_SUB = 2 +SCATTER_MUL = 3 +SCATTER_DIV = 4 +SCATTER_MAX = 5 +SCATTER_MIN = 6 + + +_SAMEPADDING_SAMEPADDINGMODE = _descriptor.EnumDescriptor( + name='SamePaddingMode', + full_name='CoreML.Specification.SamePadding.SamePaddingMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='BOTTOM_RIGHT_HEAVY', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TOP_LEFT_HEAVY', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=14347, + serialized_end=14408, +) +_sym_db.RegisterEnumDescriptor(_SAMEPADDING_SAMEPADDINGMODE) + +_SAMPLINGMODE_METHOD = _descriptor.EnumDescriptor( + name='Method', + full_name='CoreML.Specification.SamplingMode.Method', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STRICT_ALIGN_ENDPOINTS_MODE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ALIGN_ENDPOINTS_MODE', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UPSAMPLE_MODE', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ROI_ALIGN_MODE', index=3, number=3, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=14494, + serialized_end=14600, +) +_sym_db.RegisterEnumDescriptor(_SAMPLINGMODE_METHOD) + +_BOXCOORDINATESMODE_COORDINATES = _descriptor.EnumDescriptor( + name='Coordinates', + full_name='CoreML.Specification.BoxCoordinatesMode.Coordinates', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CORNERS_HEIGHT_FIRST', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CORNERS_WIDTH_FIRST', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CENTER_SIZE_HEIGHT_FIRST', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CENTER_SIZE_WIDTH_FIRST', index=3, number=3, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=14696, + serialized_end=14819, +) +_sym_db.RegisterEnumDescriptor(_BOXCOORDINATESMODE_COORDINATES) + +_CONVOLUTION3DLAYERPARAMS_PADDINGTYPE = _descriptor.EnumDescriptor( + name='PaddingType', + full_name='CoreML.Specification.Convolution3DLayerParams.PaddingType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CUSTOM', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VALID', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SAME', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=16497, + serialized_end=16543, +) +_sym_db.RegisterEnumDescriptor(_CONVOLUTION3DLAYERPARAMS_PADDINGTYPE) + +_POOLINGLAYERPARAMS_POOLINGTYPE = _descriptor.EnumDescriptor( + name='PoolingType', + full_name='CoreML.Specification.PoolingLayerParams.PoolingType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='MAX', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AVERAGE', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='L2', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=17889, + serialized_end=17932, +) +_sym_db.RegisterEnumDescriptor(_POOLINGLAYERPARAMS_POOLINGTYPE) + +_POOLING3DLAYERPARAMS_POOLINGTYPE3D = _descriptor.EnumDescriptor( + name='PoolingType3D', + full_name='CoreML.Specification.Pooling3DLayerParams.PoolingType3D', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='MAX', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AVERAGE', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=18461, + serialized_end=18498, +) +_sym_db.RegisterEnumDescriptor(_POOLING3DLAYERPARAMS_POOLINGTYPE3D) + +_POOLING3DLAYERPARAMS_POOLING3DPADDINGTYPE = _descriptor.EnumDescriptor( + name='Pooling3DPaddingType', + full_name='CoreML.Specification.Pooling3DLayerParams.Pooling3DPaddingType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CUSTOM', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VALID', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SAME', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=18500, + serialized_end=18555, +) +_sym_db.RegisterEnumDescriptor(_POOLING3DLAYERPARAMS_POOLING3DPADDINGTYPE) + +_GLOBALPOOLING3DLAYERPARAMS_GLOBALPOOLINGTYPE3D = _descriptor.EnumDescriptor( + name='GlobalPoolingType3D', + full_name='CoreML.Specification.GlobalPooling3DLayerParams.GlobalPoolingType3D', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='MAX', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AVERAGE', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=18672, + serialized_end=18715, +) +_sym_db.RegisterEnumDescriptor(_GLOBALPOOLING3DLAYERPARAMS_GLOBALPOOLINGTYPE3D) + +_UNARYFUNCTIONLAYERPARAMS_OPERATION = _descriptor.EnumDescriptor( + name='Operation', + full_name='CoreML.Specification.UnaryFunctionLayerParams.Operation', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SQRT', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RSQRT', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INVERSE', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='POWER', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EXP', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LOG', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ABS', index=6, number=6, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='THRESHOLD', index=7, number=7, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=19553, + serialized_end=19651, +) +_sym_db.RegisterEnumDescriptor(_UNARYFUNCTIONLAYERPARAMS_OPERATION) + +_UPSAMPLELAYERPARAMS_INTERPOLATIONMODE = _descriptor.EnumDescriptor( + name='InterpolationMode', + full_name='CoreML.Specification.UpsampleLayerParams.InterpolationMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='NN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BILINEAR', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=19898, + serialized_end=19939, +) +_sym_db.RegisterEnumDescriptor(_UPSAMPLELAYERPARAMS_INTERPOLATIONMODE) + +_UPSAMPLELAYERPARAMS_LINEARUPSAMPLEMODE = _descriptor.EnumDescriptor( + name='LinearUpsampleMode', + full_name='CoreML.Specification.UpsampleLayerParams.LinearUpsampleMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='DEFAULT', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ALIGN_CORNERS_TRUE', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ALIGN_CORNERS_FALSE', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=19941, + serialized_end=20023, +) +_sym_db.RegisterEnumDescriptor(_UPSAMPLELAYERPARAMS_LINEARUPSAMPLEMODE) + +_FLATTENLAYERPARAMS_FLATTENORDER = _descriptor.EnumDescriptor( + name='FlattenOrder', + full_name='CoreML.Specification.FlattenLayerParams.FlattenOrder', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CHANNEL_FIRST', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CHANNEL_LAST', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=20828, + serialized_end=20879, +) +_sym_db.RegisterEnumDescriptor(_FLATTENLAYERPARAMS_FLATTENORDER) + +_RESHAPELAYERPARAMS_RESHAPEORDER = _descriptor.EnumDescriptor( + name='ReshapeOrder', + full_name='CoreML.Specification.ReshapeLayerParams.ReshapeOrder', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CHANNEL_FIRST', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CHANNEL_LAST', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=20994, + serialized_end=21045, +) +_sym_db.RegisterEnumDescriptor(_RESHAPELAYERPARAMS_RESHAPEORDER) + +_REORGANIZEDATALAYERPARAMS_REORGANIZATIONTYPE = _descriptor.EnumDescriptor( + name='ReorganizationType', + full_name='CoreML.Specification.ReorganizeDataLayerParams.ReorganizationType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SPACE_TO_DEPTH', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DEPTH_TO_SPACE', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PIXEL_SHUFFLE', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=21214, + serialized_end=21293, +) +_sym_db.RegisterEnumDescriptor(_REORGANIZEDATALAYERPARAMS_REORGANIZATIONTYPE) + +_SLICELAYERPARAMS_SLICEAXIS = _descriptor.EnumDescriptor( + name='SliceAxis', + full_name='CoreML.Specification.SliceLayerParams.SliceAxis', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CHANNEL_AXIS', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HEIGHT_AXIS', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='WIDTH_AXIS', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=21434, + serialized_end=21496, +) +_sym_db.RegisterEnumDescriptor(_SLICELAYERPARAMS_SLICEAXIS) + +_REDUCELAYERPARAMS_REDUCEOPERATION = _descriptor.EnumDescriptor( + name='ReduceOperation', + full_name='CoreML.Specification.ReduceLayerParams.ReduceOperation', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SUM', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AVG', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PROD', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LOGSUM', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SUMSQUARE', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='L1', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='L2', index=6, number=6, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MAX', index=7, number=7, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MIN', index=8, number=8, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ARGMAX', index=9, number=9, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=21674, + serialized_end=21792, +) +_sym_db.RegisterEnumDescriptor(_REDUCELAYERPARAMS_REDUCEOPERATION) + +_REDUCELAYERPARAMS_REDUCEAXIS = _descriptor.EnumDescriptor( + name='ReduceAxis', + full_name='CoreML.Specification.ReduceLayerParams.ReduceAxis', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CHW', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HW', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='C', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='H', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='W', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=21794, + serialized_end=21844, +) +_sym_db.RegisterEnumDescriptor(_REDUCELAYERPARAMS_REDUCEAXIS) + +_GELULAYERPARAMS_GELUMODE = _descriptor.EnumDescriptor( + name='GeluMode', + full_name='CoreML.Specification.GeluLayerParams.GeluMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='EXACT', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TANH_APPROXIMATION', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SIGMOID_APPROXIMATION', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=30510, + serialized_end=30582, +) +_sym_db.RegisterEnumDescriptor(_GELULAYERPARAMS_GELUMODE) + + +_NEURALNETWORK = _descriptor.Descriptor( + name='NeuralNetwork', + full_name='CoreML.Specification.NeuralNetwork', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='layers', full_name='CoreML.Specification.NeuralNetwork.layers', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='preprocessing', full_name='CoreML.Specification.NeuralNetwork.preprocessing', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='arrayInputShapeMapping', full_name='CoreML.Specification.NeuralNetwork.arrayInputShapeMapping', index=2, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imageInputShapeMapping', full_name='CoreML.Specification.NeuralNetwork.imageInputShapeMapping', index=3, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateParams', full_name='CoreML.Specification.NeuralNetwork.updateParams', index=4, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=86, + serialized_end=478, +) + + +_NEURALNETWORKIMAGESCALER = _descriptor.Descriptor( + name='NeuralNetworkImageScaler', + full_name='CoreML.Specification.NeuralNetworkImageScaler', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='channelScale', full_name='CoreML.Specification.NeuralNetworkImageScaler.channelScale', index=0, + number=10, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blueBias', full_name='CoreML.Specification.NeuralNetworkImageScaler.blueBias', index=1, + number=20, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='greenBias', full_name='CoreML.Specification.NeuralNetworkImageScaler.greenBias', index=2, + number=21, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='redBias', full_name='CoreML.Specification.NeuralNetworkImageScaler.redBias', index=3, + number=22, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='grayBias', full_name='CoreML.Specification.NeuralNetworkImageScaler.grayBias', index=4, + number=30, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=480, + serialized_end=600, +) + + +_NEURALNETWORKMEANIMAGE = _descriptor.Descriptor( + name='NeuralNetworkMeanImage', + full_name='CoreML.Specification.NeuralNetworkMeanImage', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='meanImage', full_name='CoreML.Specification.NeuralNetworkMeanImage.meanImage', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=602, + serialized_end=645, +) + + +_NEURALNETWORKPREPROCESSING = _descriptor.Descriptor( + name='NeuralNetworkPreprocessing', + full_name='CoreML.Specification.NeuralNetworkPreprocessing', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='featureName', full_name='CoreML.Specification.NeuralNetworkPreprocessing.featureName', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scaler', full_name='CoreML.Specification.NeuralNetworkPreprocessing.scaler', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='meanImage', full_name='CoreML.Specification.NeuralNetworkPreprocessing.meanImage', index=2, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='preprocessor', full_name='CoreML.Specification.NeuralNetworkPreprocessing.preprocessor', + index=0, containing_type=None, fields=[]), + ], + serialized_start=648, + serialized_end=846, +) + + +_ACTIVATIONRELU = _descriptor.Descriptor( + name='ActivationReLU', + full_name='CoreML.Specification.ActivationReLU', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=848, + serialized_end=864, +) + + +_ACTIVATIONLEAKYRELU = _descriptor.Descriptor( + name='ActivationLeakyReLU', + full_name='CoreML.Specification.ActivationLeakyReLU', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationLeakyReLU.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=866, + serialized_end=902, +) + + +_ACTIVATIONTANH = _descriptor.Descriptor( + name='ActivationTanh', + full_name='CoreML.Specification.ActivationTanh', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=904, + serialized_end=920, +) + + +_ACTIVATIONSCALEDTANH = _descriptor.Descriptor( + name='ActivationScaledTanh', + full_name='CoreML.Specification.ActivationScaledTanh', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationScaledTanh.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.ActivationScaledTanh.beta', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=922, + serialized_end=973, +) + + +_ACTIVATIONSIGMOID = _descriptor.Descriptor( + name='ActivationSigmoid', + full_name='CoreML.Specification.ActivationSigmoid', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=975, + serialized_end=994, +) + + +_ACTIVATIONLINEAR = _descriptor.Descriptor( + name='ActivationLinear', + full_name='CoreML.Specification.ActivationLinear', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationLinear.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.ActivationLinear.beta', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=996, + serialized_end=1043, +) + + +_ACTIVATIONSIGMOIDHARD = _descriptor.Descriptor( + name='ActivationSigmoidHard', + full_name='CoreML.Specification.ActivationSigmoidHard', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationSigmoidHard.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.ActivationSigmoidHard.beta', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1045, + serialized_end=1097, +) + + +_ACTIVATIONPRELU = _descriptor.Descriptor( + name='ActivationPReLU', + full_name='CoreML.Specification.ActivationPReLU', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationPReLU.alpha', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1099, + serialized_end=1167, +) + + +_ACTIVATIONELU = _descriptor.Descriptor( + name='ActivationELU', + full_name='CoreML.Specification.ActivationELU', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationELU.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1169, + serialized_end=1199, +) + + +_ACTIVATIONTHRESHOLDEDRELU = _descriptor.Descriptor( + name='ActivationThresholdedReLU', + full_name='CoreML.Specification.ActivationThresholdedReLU', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationThresholdedReLU.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1201, + serialized_end=1243, +) + + +_ACTIVATIONSOFTSIGN = _descriptor.Descriptor( + name='ActivationSoftsign', + full_name='CoreML.Specification.ActivationSoftsign', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1245, + serialized_end=1265, +) + + +_ACTIVATIONSOFTPLUS = _descriptor.Descriptor( + name='ActivationSoftplus', + full_name='CoreML.Specification.ActivationSoftplus', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1267, + serialized_end=1287, +) + + +_ACTIVATIONPARAMETRICSOFTPLUS = _descriptor.Descriptor( + name='ActivationParametricSoftplus', + full_name='CoreML.Specification.ActivationParametricSoftplus', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationParametricSoftplus.alpha', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.ActivationParametricSoftplus.beta', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1290, + serialized_end=1421, +) + + +_ACTIVATIONPARAMS = _descriptor.Descriptor( + name='ActivationParams', + full_name='CoreML.Specification.ActivationParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='linear', full_name='CoreML.Specification.ActivationParams.linear', index=0, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='ReLU', full_name='CoreML.Specification.ActivationParams.ReLU', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='leakyReLU', full_name='CoreML.Specification.ActivationParams.leakyReLU', index=2, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='thresholdedReLU', full_name='CoreML.Specification.ActivationParams.thresholdedReLU', index=3, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='PReLU', full_name='CoreML.Specification.ActivationParams.PReLU', index=4, + number=25, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tanh', full_name='CoreML.Specification.ActivationParams.tanh', index=5, + number=30, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scaledTanh', full_name='CoreML.Specification.ActivationParams.scaledTanh', index=6, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sigmoid', full_name='CoreML.Specification.ActivationParams.sigmoid', index=7, + number=40, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sigmoidHard', full_name='CoreML.Specification.ActivationParams.sigmoidHard', index=8, + number=41, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='ELU', full_name='CoreML.Specification.ActivationParams.ELU', index=9, + number=50, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='softsign', full_name='CoreML.Specification.ActivationParams.softsign', index=10, + number=60, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='softplus', full_name='CoreML.Specification.ActivationParams.softplus', index=11, + number=70, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='parametricSoftplus', full_name='CoreML.Specification.ActivationParams.parametricSoftplus', index=12, + number=71, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='NonlinearityType', full_name='CoreML.Specification.ActivationParams.NonlinearityType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1424, + serialized_end=2276, +) + + +_TENSOR = _descriptor.Descriptor( + name='Tensor', + full_name='CoreML.Specification.Tensor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rank', full_name='CoreML.Specification.Tensor.rank', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dimValue', full_name='CoreML.Specification.Tensor.dimValue', index=1, + number=2, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2278, + serialized_end=2318, +) + + +_NEURALNETWORKLAYER = _descriptor.Descriptor( + name='NeuralNetworkLayer', + full_name='CoreML.Specification.NeuralNetworkLayer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.NeuralNetworkLayer.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='input', full_name='CoreML.Specification.NeuralNetworkLayer.input', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='output', full_name='CoreML.Specification.NeuralNetworkLayer.output', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputTensor', full_name='CoreML.Specification.NeuralNetworkLayer.inputTensor', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputTensor', full_name='CoreML.Specification.NeuralNetworkLayer.outputTensor', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isUpdatable', full_name='CoreML.Specification.NeuralNetworkLayer.isUpdatable', index=5, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='convolution', full_name='CoreML.Specification.NeuralNetworkLayer.convolution', index=6, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pooling', full_name='CoreML.Specification.NeuralNetworkLayer.pooling', index=7, + number=120, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activation', full_name='CoreML.Specification.NeuralNetworkLayer.activation', index=8, + number=130, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='innerProduct', full_name='CoreML.Specification.NeuralNetworkLayer.innerProduct', index=9, + number=140, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='embedding', full_name='CoreML.Specification.NeuralNetworkLayer.embedding', index=10, + number=150, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='batchnorm', full_name='CoreML.Specification.NeuralNetworkLayer.batchnorm', index=11, + number=160, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mvn', full_name='CoreML.Specification.NeuralNetworkLayer.mvn', index=12, + number=165, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='l2normalize', full_name='CoreML.Specification.NeuralNetworkLayer.l2normalize', index=13, + number=170, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='softmax', full_name='CoreML.Specification.NeuralNetworkLayer.softmax', index=14, + number=175, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lrn', full_name='CoreML.Specification.NeuralNetworkLayer.lrn', index=15, + number=180, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='crop', full_name='CoreML.Specification.NeuralNetworkLayer.crop', index=16, + number=190, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='padding', full_name='CoreML.Specification.NeuralNetworkLayer.padding', index=17, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='upsample', full_name='CoreML.Specification.NeuralNetworkLayer.upsample', index=18, + number=210, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='resizeBilinear', full_name='CoreML.Specification.NeuralNetworkLayer.resizeBilinear', index=19, + number=211, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cropResize', full_name='CoreML.Specification.NeuralNetworkLayer.cropResize', index=20, + number=212, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='unary', full_name='CoreML.Specification.NeuralNetworkLayer.unary', index=21, + number=220, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='add', full_name='CoreML.Specification.NeuralNetworkLayer.add', index=22, + number=230, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='multiply', full_name='CoreML.Specification.NeuralNetworkLayer.multiply', index=23, + number=231, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='average', full_name='CoreML.Specification.NeuralNetworkLayer.average', index=24, + number=240, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scale', full_name='CoreML.Specification.NeuralNetworkLayer.scale', index=25, + number=245, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.NeuralNetworkLayer.bias', index=26, + number=250, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max', full_name='CoreML.Specification.NeuralNetworkLayer.max', index=27, + number=260, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='min', full_name='CoreML.Specification.NeuralNetworkLayer.min', index=28, + number=261, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dot', full_name='CoreML.Specification.NeuralNetworkLayer.dot', index=29, + number=270, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduce', full_name='CoreML.Specification.NeuralNetworkLayer.reduce', index=30, + number=280, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loadConstant', full_name='CoreML.Specification.NeuralNetworkLayer.loadConstant', index=31, + number=290, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reshape', full_name='CoreML.Specification.NeuralNetworkLayer.reshape', index=32, + number=300, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='flatten', full_name='CoreML.Specification.NeuralNetworkLayer.flatten', index=33, + number=301, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='permute', full_name='CoreML.Specification.NeuralNetworkLayer.permute', index=34, + number=310, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='concat', full_name='CoreML.Specification.NeuralNetworkLayer.concat', index=35, + number=320, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='split', full_name='CoreML.Specification.NeuralNetworkLayer.split', index=36, + number=330, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sequenceRepeat', full_name='CoreML.Specification.NeuralNetworkLayer.sequenceRepeat', index=37, + number=340, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reorganizeData', full_name='CoreML.Specification.NeuralNetworkLayer.reorganizeData', index=38, + number=345, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='slice', full_name='CoreML.Specification.NeuralNetworkLayer.slice', index=39, + number=350, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='simpleRecurrent', full_name='CoreML.Specification.NeuralNetworkLayer.simpleRecurrent', index=40, + number=400, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gru', full_name='CoreML.Specification.NeuralNetworkLayer.gru', index=41, + number=410, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='uniDirectionalLSTM', full_name='CoreML.Specification.NeuralNetworkLayer.uniDirectionalLSTM', index=42, + number=420, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='biDirectionalLSTM', full_name='CoreML.Specification.NeuralNetworkLayer.biDirectionalLSTM', index=43, + number=430, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='custom', full_name='CoreML.Specification.NeuralNetworkLayer.custom', index=44, + number=500, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='copy', full_name='CoreML.Specification.NeuralNetworkLayer.copy', index=45, + number=600, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='branch', full_name='CoreML.Specification.NeuralNetworkLayer.branch', index=46, + number=605, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loop', full_name='CoreML.Specification.NeuralNetworkLayer.loop', index=47, + number=615, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loopBreak', full_name='CoreML.Specification.NeuralNetworkLayer.loopBreak', index=48, + number=620, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loopContinue', full_name='CoreML.Specification.NeuralNetworkLayer.loopContinue', index=49, + number=625, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rangeStatic', full_name='CoreML.Specification.NeuralNetworkLayer.rangeStatic', index=50, + number=635, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rangeDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.rangeDynamic', index=51, + number=640, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='clip', full_name='CoreML.Specification.NeuralNetworkLayer.clip', index=52, + number=660, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='ceil', full_name='CoreML.Specification.NeuralNetworkLayer.ceil', index=53, + number=665, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='floor', full_name='CoreML.Specification.NeuralNetworkLayer.floor', index=54, + number=670, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sign', full_name='CoreML.Specification.NeuralNetworkLayer.sign', index=55, + number=680, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='round', full_name='CoreML.Specification.NeuralNetworkLayer.round', index=56, + number=685, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='exp2', full_name='CoreML.Specification.NeuralNetworkLayer.exp2', index=57, + number=700, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sin', full_name='CoreML.Specification.NeuralNetworkLayer.sin', index=58, + number=710, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cos', full_name='CoreML.Specification.NeuralNetworkLayer.cos', index=59, + number=715, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tan', full_name='CoreML.Specification.NeuralNetworkLayer.tan', index=60, + number=720, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='asin', full_name='CoreML.Specification.NeuralNetworkLayer.asin', index=61, + number=730, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='acos', full_name='CoreML.Specification.NeuralNetworkLayer.acos', index=62, + number=735, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='atan', full_name='CoreML.Specification.NeuralNetworkLayer.atan', index=63, + number=740, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sinh', full_name='CoreML.Specification.NeuralNetworkLayer.sinh', index=64, + number=750, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cosh', full_name='CoreML.Specification.NeuralNetworkLayer.cosh', index=65, + number=755, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tanh', full_name='CoreML.Specification.NeuralNetworkLayer.tanh', index=66, + number=760, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='asinh', full_name='CoreML.Specification.NeuralNetworkLayer.asinh', index=67, + number=770, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='acosh', full_name='CoreML.Specification.NeuralNetworkLayer.acosh', index=68, + number=775, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='atanh', full_name='CoreML.Specification.NeuralNetworkLayer.atanh', index=69, + number=780, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='erf', full_name='CoreML.Specification.NeuralNetworkLayer.erf', index=70, + number=790, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gelu', full_name='CoreML.Specification.NeuralNetworkLayer.gelu', index=71, + number=795, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='equal', full_name='CoreML.Specification.NeuralNetworkLayer.equal', index=72, + number=815, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='notEqual', full_name='CoreML.Specification.NeuralNetworkLayer.notEqual', index=73, + number=820, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lessThan', full_name='CoreML.Specification.NeuralNetworkLayer.lessThan', index=74, + number=825, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lessEqual', full_name='CoreML.Specification.NeuralNetworkLayer.lessEqual', index=75, + number=827, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='greaterThan', full_name='CoreML.Specification.NeuralNetworkLayer.greaterThan', index=76, + number=830, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='greaterEqual', full_name='CoreML.Specification.NeuralNetworkLayer.greaterEqual', index=77, + number=832, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='logicalOr', full_name='CoreML.Specification.NeuralNetworkLayer.logicalOr', index=78, + number=840, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='logicalXor', full_name='CoreML.Specification.NeuralNetworkLayer.logicalXor', index=79, + number=845, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='logicalNot', full_name='CoreML.Specification.NeuralNetworkLayer.logicalNot', index=80, + number=850, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='logicalAnd', full_name='CoreML.Specification.NeuralNetworkLayer.logicalAnd', index=81, + number=855, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.modBroadcastable', index=82, + number=865, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='minBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.minBroadcastable', index=83, + number=870, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.maxBroadcastable', index=84, + number=875, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='addBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.addBroadcastable', index=85, + number=880, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='powBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.powBroadcastable', index=86, + number=885, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='divideBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.divideBroadcastable', index=87, + number=890, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='floorDivBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.floorDivBroadcastable', index=88, + number=895, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='multiplyBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.multiplyBroadcastable', index=89, + number=900, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='subtractBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.subtractBroadcastable', index=90, + number=905, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tile', full_name='CoreML.Specification.NeuralNetworkLayer.tile', index=91, + number=920, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stack', full_name='CoreML.Specification.NeuralNetworkLayer.stack', index=92, + number=925, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gather', full_name='CoreML.Specification.NeuralNetworkLayer.gather', index=93, + number=930, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scatter', full_name='CoreML.Specification.NeuralNetworkLayer.scatter', index=94, + number=935, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gatherND', full_name='CoreML.Specification.NeuralNetworkLayer.gatherND', index=95, + number=940, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scatterND', full_name='CoreML.Specification.NeuralNetworkLayer.scatterND', index=96, + number=945, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='softmaxND', full_name='CoreML.Specification.NeuralNetworkLayer.softmaxND', index=97, + number=950, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gatherAlongAxis', full_name='CoreML.Specification.NeuralNetworkLayer.gatherAlongAxis', index=98, + number=952, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scatterAlongAxis', full_name='CoreML.Specification.NeuralNetworkLayer.scatterAlongAxis', index=99, + number=954, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverse', full_name='CoreML.Specification.NeuralNetworkLayer.reverse', index=100, + number=960, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverseSeq', full_name='CoreML.Specification.NeuralNetworkLayer.reverseSeq', index=101, + number=965, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='splitND', full_name='CoreML.Specification.NeuralNetworkLayer.splitND', index=102, + number=975, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='concatND', full_name='CoreML.Specification.NeuralNetworkLayer.concatND', index=103, + number=980, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='transpose', full_name='CoreML.Specification.NeuralNetworkLayer.transpose', index=104, + number=985, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sliceStatic', full_name='CoreML.Specification.NeuralNetworkLayer.sliceStatic', index=105, + number=995, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sliceDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.sliceDynamic', index=106, + number=1000, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='slidingWindows', full_name='CoreML.Specification.NeuralNetworkLayer.slidingWindows', index=107, + number=1005, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='topK', full_name='CoreML.Specification.NeuralNetworkLayer.topK', index=108, + number=1015, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='argMin', full_name='CoreML.Specification.NeuralNetworkLayer.argMin', index=109, + number=1020, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='argMax', full_name='CoreML.Specification.NeuralNetworkLayer.argMax', index=110, + number=1025, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='embeddingND', full_name='CoreML.Specification.NeuralNetworkLayer.embeddingND', index=111, + number=1040, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='batchedMatmul', full_name='CoreML.Specification.NeuralNetworkLayer.batchedMatmul', index=112, + number=1045, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='getShape', full_name='CoreML.Specification.NeuralNetworkLayer.getShape', index=113, + number=1065, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loadConstantND', full_name='CoreML.Specification.NeuralNetworkLayer.loadConstantND', index=114, + number=1070, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fillLike', full_name='CoreML.Specification.NeuralNetworkLayer.fillLike', index=115, + number=1080, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fillStatic', full_name='CoreML.Specification.NeuralNetworkLayer.fillStatic', index=116, + number=1085, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fillDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.fillDynamic', index=117, + number=1090, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='broadcastToLike', full_name='CoreML.Specification.NeuralNetworkLayer.broadcastToLike', index=118, + number=1100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='broadcastToStatic', full_name='CoreML.Specification.NeuralNetworkLayer.broadcastToStatic', index=119, + number=1105, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='broadcastToDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.broadcastToDynamic', index=120, + number=1110, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='squeeze', full_name='CoreML.Specification.NeuralNetworkLayer.squeeze', index=121, + number=1120, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='expandDims', full_name='CoreML.Specification.NeuralNetworkLayer.expandDims', index=122, + number=1125, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='flattenTo2D', full_name='CoreML.Specification.NeuralNetworkLayer.flattenTo2D', index=123, + number=1130, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reshapeLike', full_name='CoreML.Specification.NeuralNetworkLayer.reshapeLike', index=124, + number=1135, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reshapeStatic', full_name='CoreML.Specification.NeuralNetworkLayer.reshapeStatic', index=125, + number=1140, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reshapeDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.reshapeDynamic', index=126, + number=1145, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rankPreservingReshape', full_name='CoreML.Specification.NeuralNetworkLayer.rankPreservingReshape', index=127, + number=1150, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='constantPad', full_name='CoreML.Specification.NeuralNetworkLayer.constantPad', index=128, + number=1155, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomNormalLike', full_name='CoreML.Specification.NeuralNetworkLayer.randomNormalLike', index=129, + number=1170, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomNormalStatic', full_name='CoreML.Specification.NeuralNetworkLayer.randomNormalStatic', index=130, + number=1175, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomNormalDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.randomNormalDynamic', index=131, + number=1180, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomUniformLike', full_name='CoreML.Specification.NeuralNetworkLayer.randomUniformLike', index=132, + number=1190, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomUniformStatic', full_name='CoreML.Specification.NeuralNetworkLayer.randomUniformStatic', index=133, + number=1195, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomUniformDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.randomUniformDynamic', index=134, + number=1200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomBernoulliLike', full_name='CoreML.Specification.NeuralNetworkLayer.randomBernoulliLike', index=135, + number=1210, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomBernoulliStatic', full_name='CoreML.Specification.NeuralNetworkLayer.randomBernoulliStatic', index=136, + number=1215, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomBernoulliDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.randomBernoulliDynamic', index=137, + number=1220, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='categoricalDistribution', full_name='CoreML.Specification.NeuralNetworkLayer.categoricalDistribution', index=138, + number=1230, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceL1', full_name='CoreML.Specification.NeuralNetworkLayer.reduceL1', index=139, + number=1250, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceL2', full_name='CoreML.Specification.NeuralNetworkLayer.reduceL2', index=140, + number=1255, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceMax', full_name='CoreML.Specification.NeuralNetworkLayer.reduceMax', index=141, + number=1260, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceMin', full_name='CoreML.Specification.NeuralNetworkLayer.reduceMin', index=142, + number=1265, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceSum', full_name='CoreML.Specification.NeuralNetworkLayer.reduceSum', index=143, + number=1270, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceProd', full_name='CoreML.Specification.NeuralNetworkLayer.reduceProd', index=144, + number=1275, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceMean', full_name='CoreML.Specification.NeuralNetworkLayer.reduceMean', index=145, + number=1280, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceLogSum', full_name='CoreML.Specification.NeuralNetworkLayer.reduceLogSum', index=146, + number=1285, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceSumSquare', full_name='CoreML.Specification.NeuralNetworkLayer.reduceSumSquare', index=147, + number=1290, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceLogSumExp', full_name='CoreML.Specification.NeuralNetworkLayer.reduceLogSumExp', index=148, + number=1295, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='whereNonZero', full_name='CoreML.Specification.NeuralNetworkLayer.whereNonZero', index=149, + number=1313, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='matrixBandPart', full_name='CoreML.Specification.NeuralNetworkLayer.matrixBandPart', index=150, + number=1315, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lowerTriangular', full_name='CoreML.Specification.NeuralNetworkLayer.lowerTriangular', index=151, + number=1320, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='upperTriangular', full_name='CoreML.Specification.NeuralNetworkLayer.upperTriangular', index=152, + number=1325, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='whereBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.whereBroadcastable', index=153, + number=1330, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='layerNormalization', full_name='CoreML.Specification.NeuralNetworkLayer.layerNormalization', index=154, + number=1350, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='NonMaximumSuppression', full_name='CoreML.Specification.NeuralNetworkLayer.NonMaximumSuppression', index=155, + number=1400, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='oneHot', full_name='CoreML.Specification.NeuralNetworkLayer.oneHot', index=156, + number=1450, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cumSum', full_name='CoreML.Specification.NeuralNetworkLayer.cumSum', index=157, + number=1455, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='clampedReLU', full_name='CoreML.Specification.NeuralNetworkLayer.clampedReLU', index=158, + number=1460, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='argSort', full_name='CoreML.Specification.NeuralNetworkLayer.argSort', index=159, + number=1461, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pooling3d', full_name='CoreML.Specification.NeuralNetworkLayer.pooling3d', index=160, + number=1465, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='globalPooling3d', full_name='CoreML.Specification.NeuralNetworkLayer.globalPooling3d', index=161, + number=1466, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sliceBySize', full_name='CoreML.Specification.NeuralNetworkLayer.sliceBySize', index=162, + number=1470, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='convolution3d', full_name='CoreML.Specification.NeuralNetworkLayer.convolution3d', index=163, + number=1471, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='layer', full_name='CoreML.Specification.NeuralNetworkLayer.layer', + index=0, containing_type=None, fields=[]), + ], + serialized_start=2321, + serialized_end=13307, +) + + +_BRANCHLAYERPARAMS = _descriptor.Descriptor( + name='BranchLayerParams', + full_name='CoreML.Specification.BranchLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='ifBranch', full_name='CoreML.Specification.BranchLayerParams.ifBranch', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='elseBranch', full_name='CoreML.Specification.BranchLayerParams.elseBranch', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13310, + serialized_end=13441, +) + + +_LOOPLAYERPARAMS = _descriptor.Descriptor( + name='LoopLayerParams', + full_name='CoreML.Specification.LoopLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='maxLoopIterations', full_name='CoreML.Specification.LoopLayerParams.maxLoopIterations', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='conditionVar', full_name='CoreML.Specification.LoopLayerParams.conditionVar', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='conditionNetwork', full_name='CoreML.Specification.LoopLayerParams.conditionNetwork', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bodyNetwork', full_name='CoreML.Specification.LoopLayerParams.bodyNetwork', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13444, + serialized_end=13631, +) + + +_LOOPBREAKLAYERPARAMS = _descriptor.Descriptor( + name='LoopBreakLayerParams', + full_name='CoreML.Specification.LoopBreakLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13633, + serialized_end=13655, +) + + +_LOOPCONTINUELAYERPARAMS = _descriptor.Descriptor( + name='LoopContinueLayerParams', + full_name='CoreML.Specification.LoopContinueLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13657, + serialized_end=13682, +) + + +_COPYLAYERPARAMS = _descriptor.Descriptor( + name='CopyLayerParams', + full_name='CoreML.Specification.CopyLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13684, + serialized_end=13701, +) + + +_GREATERTHANLAYERPARAMS = _descriptor.Descriptor( + name='GreaterThanLayerParams', + full_name='CoreML.Specification.GreaterThanLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.GreaterThanLayerParams.alpha', index=0, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13703, + serialized_end=13742, +) + + +_GREATEREQUALLAYERPARAMS = _descriptor.Descriptor( + name='GreaterEqualLayerParams', + full_name='CoreML.Specification.GreaterEqualLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.GreaterEqualLayerParams.alpha', index=0, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13744, + serialized_end=13784, +) + + +_LESSTHANLAYERPARAMS = _descriptor.Descriptor( + name='LessThanLayerParams', + full_name='CoreML.Specification.LessThanLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.LessThanLayerParams.alpha', index=0, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13786, + serialized_end=13822, +) + + +_LESSEQUALLAYERPARAMS = _descriptor.Descriptor( + name='LessEqualLayerParams', + full_name='CoreML.Specification.LessEqualLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.LessEqualLayerParams.alpha', index=0, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13824, + serialized_end=13861, +) + + +_EQUALLAYERPARAMS = _descriptor.Descriptor( + name='EqualLayerParams', + full_name='CoreML.Specification.EqualLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.EqualLayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13863, + serialized_end=13896, +) + + +_NOTEQUALLAYERPARAMS = _descriptor.Descriptor( + name='NotEqualLayerParams', + full_name='CoreML.Specification.NotEqualLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.NotEqualLayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13898, + serialized_end=13934, +) + + +_LOGICALANDLAYERPARAMS = _descriptor.Descriptor( + name='LogicalAndLayerParams', + full_name='CoreML.Specification.LogicalAndLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13936, + serialized_end=13959, +) + + +_LOGICALORLAYERPARAMS = _descriptor.Descriptor( + name='LogicalOrLayerParams', + full_name='CoreML.Specification.LogicalOrLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13961, + serialized_end=13983, +) + + +_LOGICALXORLAYERPARAMS = _descriptor.Descriptor( + name='LogicalXorLayerParams', + full_name='CoreML.Specification.LogicalXorLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13985, + serialized_end=14008, +) + + +_LOGICALNOTLAYERPARAMS = _descriptor.Descriptor( + name='LogicalNotLayerParams', + full_name='CoreML.Specification.LogicalNotLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14010, + serialized_end=14033, +) + + +_BORDERAMOUNTS_EDGESIZES = _descriptor.Descriptor( + name='EdgeSizes', + full_name='CoreML.Specification.BorderAmounts.EdgeSizes', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='startEdgeSize', full_name='CoreML.Specification.BorderAmounts.EdgeSizes.startEdgeSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endEdgeSize', full_name='CoreML.Specification.BorderAmounts.EdgeSizes.endEdgeSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14123, + serialized_end=14178, +) + +_BORDERAMOUNTS = _descriptor.Descriptor( + name='BorderAmounts', + full_name='CoreML.Specification.BorderAmounts', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='borderAmounts', full_name='CoreML.Specification.BorderAmounts.borderAmounts', index=0, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_BORDERAMOUNTS_EDGESIZES, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14036, + serialized_end=14178, +) + + +_VALIDPADDING = _descriptor.Descriptor( + name='ValidPadding', + full_name='CoreML.Specification.ValidPadding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='paddingAmounts', full_name='CoreML.Specification.ValidPadding.paddingAmounts', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14180, + serialized_end=14255, +) + + +_SAMEPADDING = _descriptor.Descriptor( + name='SamePadding', + full_name='CoreML.Specification.SamePadding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='asymmetryMode', full_name='CoreML.Specification.SamePadding.asymmetryMode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _SAMEPADDING_SAMEPADDINGMODE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14258, + serialized_end=14408, +) + + +_SAMPLINGMODE = _descriptor.Descriptor( + name='SamplingMode', + full_name='CoreML.Specification.SamplingMode', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='samplingMethod', full_name='CoreML.Specification.SamplingMode.samplingMethod', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _SAMPLINGMODE_METHOD, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14411, + serialized_end=14600, +) + + +_BOXCOORDINATESMODE = _descriptor.Descriptor( + name='BoxCoordinatesMode', + full_name='CoreML.Specification.BoxCoordinatesMode', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='boxMode', full_name='CoreML.Specification.BoxCoordinatesMode.boxMode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _BOXCOORDINATESMODE_COORDINATES, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14603, + serialized_end=14819, +) + + +_WEIGHTPARAMS = _descriptor.Descriptor( + name='WeightParams', + full_name='CoreML.Specification.WeightParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='floatValue', full_name='CoreML.Specification.WeightParams.floatValue', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='float16Value', full_name='CoreML.Specification.WeightParams.float16Value', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rawValue', full_name='CoreML.Specification.WeightParams.rawValue', index=2, + number=30, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int8RawValue', full_name='CoreML.Specification.WeightParams.int8RawValue', index=3, + number=31, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='quantization', full_name='CoreML.Specification.WeightParams.quantization', index=4, + number=40, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isUpdatable', full_name='CoreML.Specification.WeightParams.isUpdatable', index=5, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14822, + serialized_end=15003, +) + + +_QUANTIZATIONPARAMS = _descriptor.Descriptor( + name='QuantizationParams', + full_name='CoreML.Specification.QuantizationParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='numberOfBits', full_name='CoreML.Specification.QuantizationParams.numberOfBits', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linearQuantization', full_name='CoreML.Specification.QuantizationParams.linearQuantization', index=1, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lookupTableQuantization', full_name='CoreML.Specification.QuantizationParams.lookupTableQuantization', index=2, + number=102, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='QuantizationType', full_name='CoreML.Specification.QuantizationParams.QuantizationType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=15006, + serialized_end=15234, +) + + +_LINEARQUANTIZATIONPARAMS = _descriptor.Descriptor( + name='LinearQuantizationParams', + full_name='CoreML.Specification.LinearQuantizationParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='scale', full_name='CoreML.Specification.LinearQuantizationParams.scale', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.LinearQuantizationParams.bias', index=1, + number=2, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=15236, + serialized_end=15291, +) + + +_LOOKUPTABLEQUANTIZATIONPARAMS = _descriptor.Descriptor( + name='LookUpTableQuantizationParams', + full_name='CoreML.Specification.LookUpTableQuantizationParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='floatValue', full_name='CoreML.Specification.LookUpTableQuantizationParams.floatValue', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=15293, + serialized_end=15344, +) + + +_CONVOLUTIONLAYERPARAMS = _descriptor.Descriptor( + name='ConvolutionLayerParams', + full_name='CoreML.Specification.ConvolutionLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='outputChannels', full_name='CoreML.Specification.ConvolutionLayerParams.outputChannels', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelChannels', full_name='CoreML.Specification.ConvolutionLayerParams.kernelChannels', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='nGroups', full_name='CoreML.Specification.ConvolutionLayerParams.nGroups', index=2, + number=10, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelSize', full_name='CoreML.Specification.ConvolutionLayerParams.kernelSize', index=3, + number=20, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stride', full_name='CoreML.Specification.ConvolutionLayerParams.stride', index=4, + number=30, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dilationFactor', full_name='CoreML.Specification.ConvolutionLayerParams.dilationFactor', index=5, + number=40, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='valid', full_name='CoreML.Specification.ConvolutionLayerParams.valid', index=6, + number=50, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='same', full_name='CoreML.Specification.ConvolutionLayerParams.same', index=7, + number=51, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isDeconvolution', full_name='CoreML.Specification.ConvolutionLayerParams.isDeconvolution', index=8, + number=60, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.ConvolutionLayerParams.hasBias', index=9, + number=70, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.ConvolutionLayerParams.weights', index=10, + number=90, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.ConvolutionLayerParams.bias', index=11, + number=91, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputShape', full_name='CoreML.Specification.ConvolutionLayerParams.outputShape', index=12, + number=100, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ConvolutionPaddingType', full_name='CoreML.Specification.ConvolutionLayerParams.ConvolutionPaddingType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=15347, + serialized_end=15792, +) + + +_CONVOLUTION3DLAYERPARAMS = _descriptor.Descriptor( + name='Convolution3DLayerParams', + full_name='CoreML.Specification.Convolution3DLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='outputChannels', full_name='CoreML.Specification.Convolution3DLayerParams.outputChannels', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputChannels', full_name='CoreML.Specification.Convolution3DLayerParams.inputChannels', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='nGroups', full_name='CoreML.Specification.Convolution3DLayerParams.nGroups', index=2, + number=10, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelDepth', full_name='CoreML.Specification.Convolution3DLayerParams.kernelDepth', index=3, + number=20, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelHeight', full_name='CoreML.Specification.Convolution3DLayerParams.kernelHeight', index=4, + number=21, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelWidth', full_name='CoreML.Specification.Convolution3DLayerParams.kernelWidth', index=5, + number=22, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideDepth', full_name='CoreML.Specification.Convolution3DLayerParams.strideDepth', index=6, + number=31, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideHeight', full_name='CoreML.Specification.Convolution3DLayerParams.strideHeight', index=7, + number=32, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideWidth', full_name='CoreML.Specification.Convolution3DLayerParams.strideWidth', index=8, + number=33, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dilationDepth', full_name='CoreML.Specification.Convolution3DLayerParams.dilationDepth', index=9, + number=40, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dilationHeight', full_name='CoreML.Specification.Convolution3DLayerParams.dilationHeight', index=10, + number=41, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dilationWidth', full_name='CoreML.Specification.Convolution3DLayerParams.dilationWidth', index=11, + number=42, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.Convolution3DLayerParams.hasBias', index=12, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.Convolution3DLayerParams.weights', index=13, + number=60, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.Convolution3DLayerParams.bias', index=14, + number=61, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='paddingType', full_name='CoreML.Specification.Convolution3DLayerParams.paddingType', index=15, + number=70, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingFront', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingFront', index=16, + number=80, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingBack', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingBack', index=17, + number=81, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingTop', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingTop', index=18, + number=82, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingBottom', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingBottom', index=19, + number=83, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingLeft', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingLeft', index=20, + number=84, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingRight', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingRight', index=21, + number=85, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isDeconvolution', full_name='CoreML.Specification.Convolution3DLayerParams.isDeconvolution', index=22, + number=86, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputShape', full_name='CoreML.Specification.Convolution3DLayerParams.outputShape', index=23, + number=87, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _CONVOLUTION3DLAYERPARAMS_PADDINGTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=15795, + serialized_end=16543, +) + + +_INNERPRODUCTLAYERPARAMS = _descriptor.Descriptor( + name='InnerProductLayerParams', + full_name='CoreML.Specification.InnerProductLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputChannels', full_name='CoreML.Specification.InnerProductLayerParams.inputChannels', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputChannels', full_name='CoreML.Specification.InnerProductLayerParams.outputChannels', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.InnerProductLayerParams.hasBias', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.InnerProductLayerParams.weights', index=3, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.InnerProductLayerParams.bias', index=4, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int8DynamicQuantize', full_name='CoreML.Specification.InnerProductLayerParams.int8DynamicQuantize', index=5, + number=22, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=16546, + serialized_end=16767, +) + + +_EMBEDDINGLAYERPARAMS = _descriptor.Descriptor( + name='EmbeddingLayerParams', + full_name='CoreML.Specification.EmbeddingLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputDim', full_name='CoreML.Specification.EmbeddingLayerParams.inputDim', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputChannels', full_name='CoreML.Specification.EmbeddingLayerParams.outputChannels', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.EmbeddingLayerParams.hasBias', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.EmbeddingLayerParams.weights', index=3, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.EmbeddingLayerParams.bias', index=4, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=16770, + serialized_end=16954, +) + + +_EMBEDDINGNDLAYERPARAMS = _descriptor.Descriptor( + name='EmbeddingNDLayerParams', + full_name='CoreML.Specification.EmbeddingNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vocabSize', full_name='CoreML.Specification.EmbeddingNDLayerParams.vocabSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='embeddingSize', full_name='CoreML.Specification.EmbeddingNDLayerParams.embeddingSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.EmbeddingNDLayerParams.hasBias', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.EmbeddingNDLayerParams.weights', index=3, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.EmbeddingNDLayerParams.bias', index=4, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=16957, + serialized_end=17143, +) + + +_BATCHNORMLAYERPARAMS = _descriptor.Descriptor( + name='BatchnormLayerParams', + full_name='CoreML.Specification.BatchnormLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='channels', full_name='CoreML.Specification.BatchnormLayerParams.channels', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='computeMeanVar', full_name='CoreML.Specification.BatchnormLayerParams.computeMeanVar', index=1, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='instanceNormalization', full_name='CoreML.Specification.BatchnormLayerParams.instanceNormalization', index=2, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='epsilon', full_name='CoreML.Specification.BatchnormLayerParams.epsilon', index=3, + number=10, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gamma', full_name='CoreML.Specification.BatchnormLayerParams.gamma', index=4, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.BatchnormLayerParams.beta', index=5, + number=16, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mean', full_name='CoreML.Specification.BatchnormLayerParams.mean', index=6, + number=17, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='variance', full_name='CoreML.Specification.BatchnormLayerParams.variance', index=7, + number=18, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=17146, + serialized_end=17463, +) + + +_POOLINGLAYERPARAMS_VALIDCOMPLETEPADDING = _descriptor.Descriptor( + name='ValidCompletePadding', + full_name='CoreML.Specification.PoolingLayerParams.ValidCompletePadding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='paddingAmounts', full_name='CoreML.Specification.PoolingLayerParams.ValidCompletePadding.paddingAmounts', index=0, + number=10, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=17841, + serialized_end=17887, +) + +_POOLINGLAYERPARAMS = _descriptor.Descriptor( + name='PoolingLayerParams', + full_name='CoreML.Specification.PoolingLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.PoolingLayerParams.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelSize', full_name='CoreML.Specification.PoolingLayerParams.kernelSize', index=1, + number=10, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stride', full_name='CoreML.Specification.PoolingLayerParams.stride', index=2, + number=20, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='valid', full_name='CoreML.Specification.PoolingLayerParams.valid', index=3, + number=30, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='same', full_name='CoreML.Specification.PoolingLayerParams.same', index=4, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='includeLastPixel', full_name='CoreML.Specification.PoolingLayerParams.includeLastPixel', index=5, + number=32, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='avgPoolExcludePadding', full_name='CoreML.Specification.PoolingLayerParams.avgPoolExcludePadding', index=6, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='globalPooling', full_name='CoreML.Specification.PoolingLayerParams.globalPooling', index=7, + number=60, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_POOLINGLAYERPARAMS_VALIDCOMPLETEPADDING, ], + enum_types=[ + _POOLINGLAYERPARAMS_POOLINGTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='PoolingPaddingType', full_name='CoreML.Specification.PoolingLayerParams.PoolingPaddingType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=17466, + serialized_end=17954, +) + + +_POOLING3DLAYERPARAMS = _descriptor.Descriptor( + name='Pooling3DLayerParams', + full_name='CoreML.Specification.Pooling3DLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.Pooling3DLayerParams.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelDepth', full_name='CoreML.Specification.Pooling3DLayerParams.kernelDepth', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelHeight', full_name='CoreML.Specification.Pooling3DLayerParams.kernelHeight', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelWidth', full_name='CoreML.Specification.Pooling3DLayerParams.kernelWidth', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideDepth', full_name='CoreML.Specification.Pooling3DLayerParams.strideDepth', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideHeight', full_name='CoreML.Specification.Pooling3DLayerParams.strideHeight', index=5, + number=6, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideWidth', full_name='CoreML.Specification.Pooling3DLayerParams.strideWidth', index=6, + number=7, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='paddingType', full_name='CoreML.Specification.Pooling3DLayerParams.paddingType', index=7, + number=15, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingFront', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingFront', index=8, + number=8, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingBack', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingBack', index=9, + number=9, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingTop', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingTop', index=10, + number=10, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingBottom', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingBottom', index=11, + number=11, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingLeft', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingLeft', index=12, + number=12, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingRight', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingRight', index=13, + number=13, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='countExcludePadding', full_name='CoreML.Specification.Pooling3DLayerParams.countExcludePadding', index=14, + number=14, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _POOLING3DLAYERPARAMS_POOLINGTYPE3D, + _POOLING3DLAYERPARAMS_POOLING3DPADDINGTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=17957, + serialized_end=18555, +) + + +_GLOBALPOOLING3DLAYERPARAMS = _descriptor.Descriptor( + name='GlobalPooling3DLayerParams', + full_name='CoreML.Specification.GlobalPooling3DLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.GlobalPooling3DLayerParams.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _GLOBALPOOLING3DLAYERPARAMS_GLOBALPOOLINGTYPE3D, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=18558, + serialized_end=18715, +) + + +_PADDINGLAYERPARAMS_PADDINGCONSTANT = _descriptor.Descriptor( + name='PaddingConstant', + full_name='CoreML.Specification.PaddingLayerParams.PaddingConstant', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.PaddingLayerParams.PaddingConstant.value', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19045, + serialized_end=19077, +) + +_PADDINGLAYERPARAMS_PADDINGREFLECTION = _descriptor.Descriptor( + name='PaddingReflection', + full_name='CoreML.Specification.PaddingLayerParams.PaddingReflection', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19079, + serialized_end=19098, +) + +_PADDINGLAYERPARAMS_PADDINGREPLICATION = _descriptor.Descriptor( + name='PaddingReplication', + full_name='CoreML.Specification.PaddingLayerParams.PaddingReplication', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19100, + serialized_end=19120, +) + +_PADDINGLAYERPARAMS = _descriptor.Descriptor( + name='PaddingLayerParams', + full_name='CoreML.Specification.PaddingLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='constant', full_name='CoreML.Specification.PaddingLayerParams.constant', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reflection', full_name='CoreML.Specification.PaddingLayerParams.reflection', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='replication', full_name='CoreML.Specification.PaddingLayerParams.replication', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='paddingAmounts', full_name='CoreML.Specification.PaddingLayerParams.paddingAmounts', index=3, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_PADDINGLAYERPARAMS_PADDINGCONSTANT, _PADDINGLAYERPARAMS_PADDINGREFLECTION, _PADDINGLAYERPARAMS_PADDINGREPLICATION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='PaddingType', full_name='CoreML.Specification.PaddingLayerParams.PaddingType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=18718, + serialized_end=19135, +) + + +_CONCATLAYERPARAMS = _descriptor.Descriptor( + name='ConcatLayerParams', + full_name='CoreML.Specification.ConcatLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sequenceConcat', full_name='CoreML.Specification.ConcatLayerParams.sequenceConcat', index=0, + number=100, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19137, + serialized_end=19180, +) + + +_LRNLAYERPARAMS = _descriptor.Descriptor( + name='LRNLayerParams', + full_name='CoreML.Specification.LRNLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.LRNLayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.LRNLayerParams.beta', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='localSize', full_name='CoreML.Specification.LRNLayerParams.localSize', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='k', full_name='CoreML.Specification.LRNLayerParams.k', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19182, + serialized_end=19257, +) + + +_SOFTMAXLAYERPARAMS = _descriptor.Descriptor( + name='SoftmaxLayerParams', + full_name='CoreML.Specification.SoftmaxLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19259, + serialized_end=19279, +) + + +_SPLITLAYERPARAMS = _descriptor.Descriptor( + name='SplitLayerParams', + full_name='CoreML.Specification.SplitLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='nOutputs', full_name='CoreML.Specification.SplitLayerParams.nOutputs', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19281, + serialized_end=19317, +) + + +_ADDLAYERPARAMS = _descriptor.Descriptor( + name='AddLayerParams', + full_name='CoreML.Specification.AddLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.AddLayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19319, + serialized_end=19350, +) + + +_MULTIPLYLAYERPARAMS = _descriptor.Descriptor( + name='MultiplyLayerParams', + full_name='CoreML.Specification.MultiplyLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.MultiplyLayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19352, + serialized_end=19388, +) + + +_UNARYFUNCTIONLAYERPARAMS = _descriptor.Descriptor( + name='UnaryFunctionLayerParams', + full_name='CoreML.Specification.UnaryFunctionLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.UnaryFunctionLayerParams.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.UnaryFunctionLayerParams.alpha', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='epsilon', full_name='CoreML.Specification.UnaryFunctionLayerParams.epsilon', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shift', full_name='CoreML.Specification.UnaryFunctionLayerParams.shift', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scale', full_name='CoreML.Specification.UnaryFunctionLayerParams.scale', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _UNARYFUNCTIONLAYERPARAMS_OPERATION, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19391, + serialized_end=19651, +) + + +_UPSAMPLELAYERPARAMS = _descriptor.Descriptor( + name='UpsampleLayerParams', + full_name='CoreML.Specification.UpsampleLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='scalingFactor', full_name='CoreML.Specification.UpsampleLayerParams.scalingFactor', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fractionalScalingFactor', full_name='CoreML.Specification.UpsampleLayerParams.fractionalScalingFactor', index=1, + number=7, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.UpsampleLayerParams.mode', index=2, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linearUpsampleMode', full_name='CoreML.Specification.UpsampleLayerParams.linearUpsampleMode', index=3, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _UPSAMPLELAYERPARAMS_INTERPOLATIONMODE, + _UPSAMPLELAYERPARAMS_LINEARUPSAMPLEMODE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19654, + serialized_end=20023, +) + + +_RESIZEBILINEARLAYERPARAMS = _descriptor.Descriptor( + name='ResizeBilinearLayerParams', + full_name='CoreML.Specification.ResizeBilinearLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetSize', full_name='CoreML.Specification.ResizeBilinearLayerParams.targetSize', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ResizeBilinearLayerParams.mode', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20025, + serialized_end=20122, +) + + +_CROPRESIZELAYERPARAMS = _descriptor.Descriptor( + name='CropResizeLayerParams', + full_name='CoreML.Specification.CropResizeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetSize', full_name='CoreML.Specification.CropResizeLayerParams.targetSize', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='normalizedCoordinates', full_name='CoreML.Specification.CropResizeLayerParams.normalizedCoordinates', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.CropResizeLayerParams.mode', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='boxIndicesMode', full_name='CoreML.Specification.CropResizeLayerParams.boxIndicesMode', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='spatialScale', full_name='CoreML.Specification.CropResizeLayerParams.spatialScale', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20125, + serialized_end=20337, +) + + +_BIASLAYERPARAMS = _descriptor.Descriptor( + name='BiasLayerParams', + full_name='CoreML.Specification.BiasLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='CoreML.Specification.BiasLayerParams.shape', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.BiasLayerParams.bias', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20339, + serialized_end=20421, +) + + +_SCALELAYERPARAMS = _descriptor.Descriptor( + name='ScaleLayerParams', + full_name='CoreML.Specification.ScaleLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shapeScale', full_name='CoreML.Specification.ScaleLayerParams.shapeScale', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scale', full_name='CoreML.Specification.ScaleLayerParams.scale', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.ScaleLayerParams.hasBias', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shapeBias', full_name='CoreML.Specification.ScaleLayerParams.shapeBias', index=3, + number=4, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.ScaleLayerParams.bias', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20424, + serialized_end=20599, +) + + +_LOADCONSTANTLAYERPARAMS = _descriptor.Descriptor( + name='LoadConstantLayerParams', + full_name='CoreML.Specification.LoadConstantLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='CoreML.Specification.LoadConstantLayerParams.shape', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='data', full_name='CoreML.Specification.LoadConstantLayerParams.data', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20601, + serialized_end=20691, +) + + +_L2NORMALIZELAYERPARAMS = _descriptor.Descriptor( + name='L2NormalizeLayerParams', + full_name='CoreML.Specification.L2NormalizeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='epsilon', full_name='CoreML.Specification.L2NormalizeLayerParams.epsilon', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20693, + serialized_end=20734, +) + + +_FLATTENLAYERPARAMS = _descriptor.Descriptor( + name='FlattenLayerParams', + full_name='CoreML.Specification.FlattenLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.FlattenLayerParams.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FLATTENLAYERPARAMS_FLATTENORDER, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20737, + serialized_end=20879, +) + + +_RESHAPELAYERPARAMS = _descriptor.Descriptor( + name='ReshapeLayerParams', + full_name='CoreML.Specification.ReshapeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetShape', full_name='CoreML.Specification.ReshapeLayerParams.targetShape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ReshapeLayerParams.mode', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _RESHAPELAYERPARAMS_RESHAPEORDER, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20882, + serialized_end=21045, +) + + +_PERMUTELAYERPARAMS = _descriptor.Descriptor( + name='PermuteLayerParams', + full_name='CoreML.Specification.PermuteLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.PermuteLayerParams.axis', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21047, + serialized_end=21081, +) + + +_REORGANIZEDATALAYERPARAMS = _descriptor.Descriptor( + name='ReorganizeDataLayerParams', + full_name='CoreML.Specification.ReorganizeDataLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ReorganizeDataLayerParams.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blockSize', full_name='CoreML.Specification.ReorganizeDataLayerParams.blockSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _REORGANIZEDATALAYERPARAMS_REORGANIZATIONTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21084, + serialized_end=21293, +) + + +_SLICELAYERPARAMS = _descriptor.Descriptor( + name='SliceLayerParams', + full_name='CoreML.Specification.SliceLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='startIndex', full_name='CoreML.Specification.SliceLayerParams.startIndex', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endIndex', full_name='CoreML.Specification.SliceLayerParams.endIndex', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stride', full_name='CoreML.Specification.SliceLayerParams.stride', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.SliceLayerParams.axis', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _SLICELAYERPARAMS_SLICEAXIS, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21296, + serialized_end=21496, +) + + +_REDUCELAYERPARAMS = _descriptor.Descriptor( + name='ReduceLayerParams', + full_name='CoreML.Specification.ReduceLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ReduceLayerParams.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='epsilon', full_name='CoreML.Specification.ReduceLayerParams.epsilon', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ReduceLayerParams.axis', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _REDUCELAYERPARAMS_REDUCEOPERATION, + _REDUCELAYERPARAMS_REDUCEAXIS, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21499, + serialized_end=21844, +) + + +_CROPLAYERPARAMS = _descriptor.Descriptor( + name='CropLayerParams', + full_name='CoreML.Specification.CropLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='cropAmounts', full_name='CoreML.Specification.CropLayerParams.cropAmounts', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset', full_name='CoreML.Specification.CropLayerParams.offset', index=1, + number=5, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21846, + serialized_end=21937, +) + + +_AVERAGELAYERPARAMS = _descriptor.Descriptor( + name='AverageLayerParams', + full_name='CoreML.Specification.AverageLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21939, + serialized_end=21959, +) + + +_MAXLAYERPARAMS = _descriptor.Descriptor( + name='MaxLayerParams', + full_name='CoreML.Specification.MaxLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21961, + serialized_end=21977, +) + + +_MINLAYERPARAMS = _descriptor.Descriptor( + name='MinLayerParams', + full_name='CoreML.Specification.MinLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21979, + serialized_end=21995, +) + + +_DOTPRODUCTLAYERPARAMS = _descriptor.Descriptor( + name='DotProductLayerParams', + full_name='CoreML.Specification.DotProductLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='cosineSimilarity', full_name='CoreML.Specification.DotProductLayerParams.cosineSimilarity', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21997, + serialized_end=22046, +) + + +_MEANVARIANCENORMALIZELAYERPARAMS = _descriptor.Descriptor( + name='MeanVarianceNormalizeLayerParams', + full_name='CoreML.Specification.MeanVarianceNormalizeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='acrossChannels', full_name='CoreML.Specification.MeanVarianceNormalizeLayerParams.acrossChannels', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='normalizeVariance', full_name='CoreML.Specification.MeanVarianceNormalizeLayerParams.normalizeVariance', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='epsilon', full_name='CoreML.Specification.MeanVarianceNormalizeLayerParams.epsilon', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=22048, + serialized_end=22150, +) + + +_SEQUENCEREPEATLAYERPARAMS = _descriptor.Descriptor( + name='SequenceRepeatLayerParams', + full_name='CoreML.Specification.SequenceRepeatLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='nRepetitions', full_name='CoreML.Specification.SequenceRepeatLayerParams.nRepetitions', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=22152, + serialized_end=22201, +) + + +_SIMPLERECURRENTLAYERPARAMS = _descriptor.Descriptor( + name='SimpleRecurrentLayerParams', + full_name='CoreML.Specification.SimpleRecurrentLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputVectorSize', full_name='CoreML.Specification.SimpleRecurrentLayerParams.inputVectorSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputVectorSize', full_name='CoreML.Specification.SimpleRecurrentLayerParams.outputVectorSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activation', full_name='CoreML.Specification.SimpleRecurrentLayerParams.activation', index=2, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sequenceOutput', full_name='CoreML.Specification.SimpleRecurrentLayerParams.sequenceOutput', index=3, + number=15, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBiasVector', full_name='CoreML.Specification.SimpleRecurrentLayerParams.hasBiasVector', index=4, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weightMatrix', full_name='CoreML.Specification.SimpleRecurrentLayerParams.weightMatrix', index=5, + number=30, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='recursionMatrix', full_name='CoreML.Specification.SimpleRecurrentLayerParams.recursionMatrix', index=6, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='biasVector', full_name='CoreML.Specification.SimpleRecurrentLayerParams.biasVector', index=7, + number=32, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverseInput', full_name='CoreML.Specification.SimpleRecurrentLayerParams.reverseInput', index=8, + number=100, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=22204, + serialized_end=22587, +) + + +_GRULAYERPARAMS = _descriptor.Descriptor( + name='GRULayerParams', + full_name='CoreML.Specification.GRULayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputVectorSize', full_name='CoreML.Specification.GRULayerParams.inputVectorSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputVectorSize', full_name='CoreML.Specification.GRULayerParams.outputVectorSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activations', full_name='CoreML.Specification.GRULayerParams.activations', index=2, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sequenceOutput', full_name='CoreML.Specification.GRULayerParams.sequenceOutput', index=3, + number=15, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBiasVectors', full_name='CoreML.Specification.GRULayerParams.hasBiasVectors', index=4, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateGateWeightMatrix', full_name='CoreML.Specification.GRULayerParams.updateGateWeightMatrix', index=5, + number=30, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='resetGateWeightMatrix', full_name='CoreML.Specification.GRULayerParams.resetGateWeightMatrix', index=6, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateWeightMatrix', full_name='CoreML.Specification.GRULayerParams.outputGateWeightMatrix', index=7, + number=32, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateGateRecursionMatrix', full_name='CoreML.Specification.GRULayerParams.updateGateRecursionMatrix', index=8, + number=50, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='resetGateRecursionMatrix', full_name='CoreML.Specification.GRULayerParams.resetGateRecursionMatrix', index=9, + number=51, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateRecursionMatrix', full_name='CoreML.Specification.GRULayerParams.outputGateRecursionMatrix', index=10, + number=52, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateGateBiasVector', full_name='CoreML.Specification.GRULayerParams.updateGateBiasVector', index=11, + number=70, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='resetGateBiasVector', full_name='CoreML.Specification.GRULayerParams.resetGateBiasVector', index=12, + number=71, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateBiasVector', full_name='CoreML.Specification.GRULayerParams.outputGateBiasVector', index=13, + number=72, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverseInput', full_name='CoreML.Specification.GRULayerParams.reverseInput', index=14, + number=100, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=22590, + serialized_end=23400, +) + + +_LSTMPARAMS = _descriptor.Descriptor( + name='LSTMParams', + full_name='CoreML.Specification.LSTMParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sequenceOutput', full_name='CoreML.Specification.LSTMParams.sequenceOutput', index=0, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBiasVectors', full_name='CoreML.Specification.LSTMParams.hasBiasVectors', index=1, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='forgetBias', full_name='CoreML.Specification.LSTMParams.forgetBias', index=2, + number=30, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasPeepholeVectors', full_name='CoreML.Specification.LSTMParams.hasPeepholeVectors', index=3, + number=40, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coupledInputAndForgetGate', full_name='CoreML.Specification.LSTMParams.coupledInputAndForgetGate', index=4, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cellClipThreshold', full_name='CoreML.Specification.LSTMParams.cellClipThreshold', index=5, + number=60, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=23403, + serialized_end=23573, +) + + +_LSTMWEIGHTPARAMS = _descriptor.Descriptor( + name='LSTMWeightParams', + full_name='CoreML.Specification.LSTMWeightParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputGateWeightMatrix', full_name='CoreML.Specification.LSTMWeightParams.inputGateWeightMatrix', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='forgetGateWeightMatrix', full_name='CoreML.Specification.LSTMWeightParams.forgetGateWeightMatrix', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blockInputWeightMatrix', full_name='CoreML.Specification.LSTMWeightParams.blockInputWeightMatrix', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateWeightMatrix', full_name='CoreML.Specification.LSTMWeightParams.outputGateWeightMatrix', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputGateRecursionMatrix', full_name='CoreML.Specification.LSTMWeightParams.inputGateRecursionMatrix', index=4, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='forgetGateRecursionMatrix', full_name='CoreML.Specification.LSTMWeightParams.forgetGateRecursionMatrix', index=5, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blockInputRecursionMatrix', full_name='CoreML.Specification.LSTMWeightParams.blockInputRecursionMatrix', index=6, + number=22, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateRecursionMatrix', full_name='CoreML.Specification.LSTMWeightParams.outputGateRecursionMatrix', index=7, + number=23, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputGateBiasVector', full_name='CoreML.Specification.LSTMWeightParams.inputGateBiasVector', index=8, + number=40, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='forgetGateBiasVector', full_name='CoreML.Specification.LSTMWeightParams.forgetGateBiasVector', index=9, + number=41, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blockInputBiasVector', full_name='CoreML.Specification.LSTMWeightParams.blockInputBiasVector', index=10, + number=42, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateBiasVector', full_name='CoreML.Specification.LSTMWeightParams.outputGateBiasVector', index=11, + number=43, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputGatePeepholeVector', full_name='CoreML.Specification.LSTMWeightParams.inputGatePeepholeVector', index=12, + number=60, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='forgetGatePeepholeVector', full_name='CoreML.Specification.LSTMWeightParams.forgetGatePeepholeVector', index=13, + number=61, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGatePeepholeVector', full_name='CoreML.Specification.LSTMWeightParams.outputGatePeepholeVector', index=14, + number=62, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=23576, + serialized_end=24620, +) + + +_UNIDIRECTIONALLSTMLAYERPARAMS = _descriptor.Descriptor( + name='UniDirectionalLSTMLayerParams', + full_name='CoreML.Specification.UniDirectionalLSTMLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputVectorSize', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.inputVectorSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputVectorSize', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.outputVectorSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activations', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.activations', index=2, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='params', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.params', index=3, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weightParams', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.weightParams', index=4, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverseInput', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.reverseInput', index=5, + number=100, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=24623, + serialized_end=24900, +) + + +_BIDIRECTIONALLSTMLAYERPARAMS = _descriptor.Descriptor( + name='BiDirectionalLSTMLayerParams', + full_name='CoreML.Specification.BiDirectionalLSTMLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputVectorSize', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.inputVectorSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputVectorSize', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.outputVectorSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activationsForwardLSTM', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.activationsForwardLSTM', index=2, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activationsBackwardLSTM', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.activationsBackwardLSTM', index=3, + number=11, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='params', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.params', index=4, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weightParams', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.weightParams', index=5, + number=20, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=24903, + serialized_end=25241, +) + + +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE = _descriptor.Descriptor( + name='CustomLayerParamValue', + full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='doubleValue', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.doubleValue', index=0, + number=10, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringValue', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.stringValue', index=1, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='intValue', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.intValue', index=2, + number=30, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='longValue', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.longValue', index=3, + number=40, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='boolValue', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.boolValue', index=4, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='value', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.value', + index=0, containing_type=None, fields=[]), + ], + serialized_start=25436, + serialized_end=25576, +) + +_CUSTOMLAYERPARAMS_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='CoreML.Specification.CustomLayerParams.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.CustomLayerParams.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.CustomLayerParams.ParametersEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=25578, + serialized_end=25690, +) + +_CUSTOMLAYERPARAMS = _descriptor.Descriptor( + name='CustomLayerParams', + full_name='CoreML.Specification.CustomLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='className', full_name='CoreML.Specification.CustomLayerParams.className', index=0, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.CustomLayerParams.weights', index=1, + number=20, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='parameters', full_name='CoreML.Specification.CustomLayerParams.parameters', index=2, + number=30, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='description', full_name='CoreML.Specification.CustomLayerParams.description', index=3, + number=40, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE, _CUSTOMLAYERPARAMS_PARAMETERSENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=25244, + serialized_end=25690, +) + + +_TRANSPOSELAYERPARAMS = _descriptor.Descriptor( + name='TransposeLayerParams', + full_name='CoreML.Specification.TransposeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.TransposeLayerParams.axes', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=25692, + serialized_end=25728, +) + + +_BATCHEDMATMULLAYERPARAMS = _descriptor.Descriptor( + name='BatchedMatMulLayerParams', + full_name='CoreML.Specification.BatchedMatMulLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='transposeA', full_name='CoreML.Specification.BatchedMatMulLayerParams.transposeA', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='transposeB', full_name='CoreML.Specification.BatchedMatMulLayerParams.transposeB', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weightMatrixFirstDimension', full_name='CoreML.Specification.BatchedMatMulLayerParams.weightMatrixFirstDimension', index=2, + number=5, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weightMatrixSecondDimension', full_name='CoreML.Specification.BatchedMatMulLayerParams.weightMatrixSecondDimension', index=3, + number=6, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.BatchedMatMulLayerParams.hasBias', index=4, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.BatchedMatMulLayerParams.weights', index=5, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.BatchedMatMulLayerParams.bias', index=6, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int8DynamicQuantize', full_name='CoreML.Specification.BatchedMatMulLayerParams.int8DynamicQuantize', index=7, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=25731, + serialized_end=26019, +) + + +_CONCATNDLAYERPARAMS = _descriptor.Descriptor( + name='ConcatNDLayerParams', + full_name='CoreML.Specification.ConcatNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ConcatNDLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='interleave', full_name='CoreML.Specification.ConcatNDLayerParams.interleave', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26021, + serialized_end=26076, +) + + +_SOFTMAXNDLAYERPARAMS = _descriptor.Descriptor( + name='SoftmaxNDLayerParams', + full_name='CoreML.Specification.SoftmaxNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.SoftmaxNDLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26078, + serialized_end=26114, +) + + +_REVERSELAYERPARAMS = _descriptor.Descriptor( + name='ReverseLayerParams', + full_name='CoreML.Specification.ReverseLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='reverseDim', full_name='CoreML.Specification.ReverseLayerParams.reverseDim', index=0, + number=1, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26116, + serialized_end=26156, +) + + +_REVERSESEQLAYERPARAMS = _descriptor.Descriptor( + name='ReverseSeqLayerParams', + full_name='CoreML.Specification.ReverseSeqLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='batchAxis', full_name='CoreML.Specification.ReverseSeqLayerParams.batchAxis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sequenceAxis', full_name='CoreML.Specification.ReverseSeqLayerParams.sequenceAxis', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26158, + serialized_end=26222, +) + + +_LOADCONSTANTNDLAYERPARAMS = _descriptor.Descriptor( + name='LoadConstantNDLayerParams', + full_name='CoreML.Specification.LoadConstantNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='CoreML.Specification.LoadConstantNDLayerParams.shape', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='data', full_name='CoreML.Specification.LoadConstantNDLayerParams.data', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26224, + serialized_end=26316, +) + + +_FILLLIKELAYERPARAMS = _descriptor.Descriptor( + name='FillLikeLayerParams', + full_name='CoreML.Specification.FillLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.FillLikeLayerParams.value', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26318, + serialized_end=26354, +) + + +_FILLSTATICLAYERPARAMS = _descriptor.Descriptor( + name='FillStaticLayerParams', + full_name='CoreML.Specification.FillStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.FillStaticLayerParams.value', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='targetShape', full_name='CoreML.Specification.FillStaticLayerParams.targetShape', index=1, + number=2, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26356, + serialized_end=26415, +) + + +_FILLDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='FillDynamicLayerParams', + full_name='CoreML.Specification.FillDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.FillDynamicLayerParams.value', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26417, + serialized_end=26456, +) + + +_WHEREBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='WhereBroadcastableLayerParams', + full_name='CoreML.Specification.WhereBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26458, + serialized_end=26489, +) + + +_SINLAYERPARAMS = _descriptor.Descriptor( + name='SinLayerParams', + full_name='CoreML.Specification.SinLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26491, + serialized_end=26507, +) + + +_COSLAYERPARAMS = _descriptor.Descriptor( + name='CosLayerParams', + full_name='CoreML.Specification.CosLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26509, + serialized_end=26525, +) + + +_TANLAYERPARAMS = _descriptor.Descriptor( + name='TanLayerParams', + full_name='CoreML.Specification.TanLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26527, + serialized_end=26543, +) + + +_ASINLAYERPARAMS = _descriptor.Descriptor( + name='AsinLayerParams', + full_name='CoreML.Specification.AsinLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26545, + serialized_end=26562, +) + + +_ACOSLAYERPARAMS = _descriptor.Descriptor( + name='AcosLayerParams', + full_name='CoreML.Specification.AcosLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26564, + serialized_end=26581, +) + + +_ATANLAYERPARAMS = _descriptor.Descriptor( + name='AtanLayerParams', + full_name='CoreML.Specification.AtanLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26583, + serialized_end=26600, +) + + +_SINHLAYERPARAMS = _descriptor.Descriptor( + name='SinhLayerParams', + full_name='CoreML.Specification.SinhLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26602, + serialized_end=26619, +) + + +_COSHLAYERPARAMS = _descriptor.Descriptor( + name='CoshLayerParams', + full_name='CoreML.Specification.CoshLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26621, + serialized_end=26638, +) + + +_TANHLAYERPARAMS = _descriptor.Descriptor( + name='TanhLayerParams', + full_name='CoreML.Specification.TanhLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26640, + serialized_end=26657, +) + + +_ASINHLAYERPARAMS = _descriptor.Descriptor( + name='AsinhLayerParams', + full_name='CoreML.Specification.AsinhLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26659, + serialized_end=26677, +) + + +_ACOSHLAYERPARAMS = _descriptor.Descriptor( + name='AcoshLayerParams', + full_name='CoreML.Specification.AcoshLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26679, + serialized_end=26697, +) + + +_ATANHLAYERPARAMS = _descriptor.Descriptor( + name='AtanhLayerParams', + full_name='CoreML.Specification.AtanhLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26699, + serialized_end=26717, +) + + +_POWBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='PowBroadcastableLayerParams', + full_name='CoreML.Specification.PowBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26719, + serialized_end=26748, +) + + +_EXP2LAYERPARAMS = _descriptor.Descriptor( + name='Exp2LayerParams', + full_name='CoreML.Specification.Exp2LayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26750, + serialized_end=26767, +) + + +_WHERENONZEROLAYERPARAMS = _descriptor.Descriptor( + name='WhereNonZeroLayerParams', + full_name='CoreML.Specification.WhereNonZeroLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26769, + serialized_end=26794, +) + + +_MATRIXBANDPARTLAYERPARAMS = _descriptor.Descriptor( + name='MatrixBandPartLayerParams', + full_name='CoreML.Specification.MatrixBandPartLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='numLower', full_name='CoreML.Specification.MatrixBandPartLayerParams.numLower', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numUpper', full_name='CoreML.Specification.MatrixBandPartLayerParams.numUpper', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26796, + serialized_end=26859, +) + + +_UPPERTRIANGULARLAYERPARAMS = _descriptor.Descriptor( + name='UpperTriangularLayerParams', + full_name='CoreML.Specification.UpperTriangularLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='k', full_name='CoreML.Specification.UpperTriangularLayerParams.k', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26861, + serialized_end=26900, +) + + +_LOWERTRIANGULARLAYERPARAMS = _descriptor.Descriptor( + name='LowerTriangularLayerParams', + full_name='CoreML.Specification.LowerTriangularLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='k', full_name='CoreML.Specification.LowerTriangularLayerParams.k', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26902, + serialized_end=26941, +) + + +_BROADCASTTOLIKELAYERPARAMS = _descriptor.Descriptor( + name='BroadcastToLikeLayerParams', + full_name='CoreML.Specification.BroadcastToLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26943, + serialized_end=26971, +) + + +_BROADCASTTOSTATICLAYERPARAMS = _descriptor.Descriptor( + name='BroadcastToStaticLayerParams', + full_name='CoreML.Specification.BroadcastToStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetShape', full_name='CoreML.Specification.BroadcastToStaticLayerParams.targetShape', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26973, + serialized_end=27024, +) + + +_BROADCASTTODYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='BroadcastToDynamicLayerParams', + full_name='CoreML.Specification.BroadcastToDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27026, + serialized_end=27057, +) + + +_ADDBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='AddBroadcastableLayerParams', + full_name='CoreML.Specification.AddBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27059, + serialized_end=27088, +) + + +_MAXBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='MaxBroadcastableLayerParams', + full_name='CoreML.Specification.MaxBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27090, + serialized_end=27119, +) + + +_MINBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='MinBroadcastableLayerParams', + full_name='CoreML.Specification.MinBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27121, + serialized_end=27150, +) + + +_MODBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='ModBroadcastableLayerParams', + full_name='CoreML.Specification.ModBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27152, + serialized_end=27181, +) + + +_FLOORDIVBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='FloorDivBroadcastableLayerParams', + full_name='CoreML.Specification.FloorDivBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27183, + serialized_end=27217, +) + + +_SUBTRACTBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='SubtractBroadcastableLayerParams', + full_name='CoreML.Specification.SubtractBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27219, + serialized_end=27253, +) + + +_MULTIPLYBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='MultiplyBroadcastableLayerParams', + full_name='CoreML.Specification.MultiplyBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27255, + serialized_end=27289, +) + + +_DIVIDEBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='DivideBroadcastableLayerParams', + full_name='CoreML.Specification.DivideBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27291, + serialized_end=27323, +) + + +_GATHERLAYERPARAMS = _descriptor.Descriptor( + name='GatherLayerParams', + full_name='CoreML.Specification.GatherLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.GatherLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27325, + serialized_end=27358, +) + + +_SCATTERLAYERPARAMS = _descriptor.Descriptor( + name='ScatterLayerParams', + full_name='CoreML.Specification.ScatterLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ScatterLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ScatterLayerParams.mode', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27360, + serialized_end=27443, +) + + +_GATHERNDLAYERPARAMS = _descriptor.Descriptor( + name='GatherNDLayerParams', + full_name='CoreML.Specification.GatherNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27445, + serialized_end=27466, +) + + +_SCATTERNDLAYERPARAMS = _descriptor.Descriptor( + name='ScatterNDLayerParams', + full_name='CoreML.Specification.ScatterNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ScatterNDLayerParams.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27468, + serialized_end=27539, +) + + +_GATHERALONGAXISLAYERPARAMS = _descriptor.Descriptor( + name='GatherAlongAxisLayerParams', + full_name='CoreML.Specification.GatherAlongAxisLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.GatherAlongAxisLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27541, + serialized_end=27583, +) + + +_SCATTERALONGAXISLAYERPARAMS = _descriptor.Descriptor( + name='ScatterAlongAxisLayerParams', + full_name='CoreML.Specification.ScatterAlongAxisLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ScatterAlongAxisLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ScatterAlongAxisLayerParams.mode', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27585, + serialized_end=27677, +) + + +_STACKLAYERPARAMS = _descriptor.Descriptor( + name='StackLayerParams', + full_name='CoreML.Specification.StackLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.StackLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27679, + serialized_end=27711, +) + + +_RANKPRESERVINGRESHAPELAYERPARAMS = _descriptor.Descriptor( + name='RankPreservingReshapeLayerParams', + full_name='CoreML.Specification.RankPreservingReshapeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetShape', full_name='CoreML.Specification.RankPreservingReshapeLayerParams.targetShape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27713, + serialized_end=27768, +) + + +_CONSTANTPADDINGLAYERPARAMS = _descriptor.Descriptor( + name='ConstantPaddingLayerParams', + full_name='CoreML.Specification.ConstantPaddingLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.ConstantPaddingLayerParams.value', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='padAmounts', full_name='CoreML.Specification.ConstantPaddingLayerParams.padAmounts', index=1, + number=2, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='padToGivenOutputSizeMode', full_name='CoreML.Specification.ConstantPaddingLayerParams.padToGivenOutputSizeMode', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27770, + serialized_end=27867, +) + + +_RANDOMNORMALLIKELAYERPARAMS = _descriptor.Descriptor( + name='RandomNormalLikeLayerParams', + full_name='CoreML.Specification.RandomNormalLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomNormalLikeLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mean', full_name='CoreML.Specification.RandomNormalLikeLayerParams.mean', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stdDev', full_name='CoreML.Specification.RandomNormalLikeLayerParams.stdDev', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27869, + serialized_end=27942, +) + + +_RANDOMNORMALSTATICLAYERPARAMS = _descriptor.Descriptor( + name='RandomNormalStaticLayerParams', + full_name='CoreML.Specification.RandomNormalStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomNormalStaticLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mean', full_name='CoreML.Specification.RandomNormalStaticLayerParams.mean', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stdDev', full_name='CoreML.Specification.RandomNormalStaticLayerParams.stdDev', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputShape', full_name='CoreML.Specification.RandomNormalStaticLayerParams.outputShape', index=3, + number=4, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27944, + serialized_end=28040, +) + + +_RANDOMNORMALDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='RandomNormalDynamicLayerParams', + full_name='CoreML.Specification.RandomNormalDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomNormalDynamicLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mean', full_name='CoreML.Specification.RandomNormalDynamicLayerParams.mean', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stdDev', full_name='CoreML.Specification.RandomNormalDynamicLayerParams.stdDev', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28042, + serialized_end=28118, +) + + +_RANDOMUNIFORMLIKELAYERPARAMS = _descriptor.Descriptor( + name='RandomUniformLikeLayerParams', + full_name='CoreML.Specification.RandomUniformLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomUniformLikeLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='minVal', full_name='CoreML.Specification.RandomUniformLikeLayerParams.minVal', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxVal', full_name='CoreML.Specification.RandomUniformLikeLayerParams.maxVal', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28120, + serialized_end=28196, +) + + +_RANDOMUNIFORMSTATICLAYERPARAMS = _descriptor.Descriptor( + name='RandomUniformStaticLayerParams', + full_name='CoreML.Specification.RandomUniformStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomUniformStaticLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='minVal', full_name='CoreML.Specification.RandomUniformStaticLayerParams.minVal', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxVal', full_name='CoreML.Specification.RandomUniformStaticLayerParams.maxVal', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputShape', full_name='CoreML.Specification.RandomUniformStaticLayerParams.outputShape', index=3, + number=4, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28198, + serialized_end=28297, +) + + +_RANDOMUNIFORMDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='RandomUniformDynamicLayerParams', + full_name='CoreML.Specification.RandomUniformDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomUniformDynamicLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='minVal', full_name='CoreML.Specification.RandomUniformDynamicLayerParams.minVal', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxVal', full_name='CoreML.Specification.RandomUniformDynamicLayerParams.maxVal', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28299, + serialized_end=28378, +) + + +_RANDOMBERNOULLILIKELAYERPARAMS = _descriptor.Descriptor( + name='RandomBernoulliLikeLayerParams', + full_name='CoreML.Specification.RandomBernoulliLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomBernoulliLikeLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='prob', full_name='CoreML.Specification.RandomBernoulliLikeLayerParams.prob', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28380, + serialized_end=28440, +) + + +_RANDOMBERNOULLISTATICLAYERPARAMS = _descriptor.Descriptor( + name='RandomBernoulliStaticLayerParams', + full_name='CoreML.Specification.RandomBernoulliStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomBernoulliStaticLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='prob', full_name='CoreML.Specification.RandomBernoulliStaticLayerParams.prob', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputShape', full_name='CoreML.Specification.RandomBernoulliStaticLayerParams.outputShape', index=2, + number=3, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28442, + serialized_end=28525, +) + + +_RANDOMBERNOULLIDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='RandomBernoulliDynamicLayerParams', + full_name='CoreML.Specification.RandomBernoulliDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomBernoulliDynamicLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='prob', full_name='CoreML.Specification.RandomBernoulliDynamicLayerParams.prob', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28527, + serialized_end=28590, +) + + +_CATEGORICALDISTRIBUTIONLAYERPARAMS = _descriptor.Descriptor( + name='CategoricalDistributionLayerParams', + full_name='CoreML.Specification.CategoricalDistributionLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.CategoricalDistributionLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numSamples', full_name='CoreML.Specification.CategoricalDistributionLayerParams.numSamples', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isLogits', full_name='CoreML.Specification.CategoricalDistributionLayerParams.isLogits', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='eps', full_name='CoreML.Specification.CategoricalDistributionLayerParams.eps', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='temperature', full_name='CoreML.Specification.CategoricalDistributionLayerParams.temperature', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28592, + serialized_end=28714, +) + + +_REDUCEL1LAYERPARAMS = _descriptor.Descriptor( + name='ReduceL1LayerParams', + full_name='CoreML.Specification.ReduceL1LayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceL1LayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceL1LayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceL1LayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28716, + serialized_end=28788, +) + + +_REDUCEL2LAYERPARAMS = _descriptor.Descriptor( + name='ReduceL2LayerParams', + full_name='CoreML.Specification.ReduceL2LayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceL2LayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceL2LayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceL2LayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28790, + serialized_end=28862, +) + + +_REDUCEMAXLAYERPARAMS = _descriptor.Descriptor( + name='ReduceMaxLayerParams', + full_name='CoreML.Specification.ReduceMaxLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceMaxLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceMaxLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceMaxLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28864, + serialized_end=28937, +) + + +_REDUCEMINLAYERPARAMS = _descriptor.Descriptor( + name='ReduceMinLayerParams', + full_name='CoreML.Specification.ReduceMinLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceMinLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceMinLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceMinLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28939, + serialized_end=29012, +) + + +_REDUCESUMLAYERPARAMS = _descriptor.Descriptor( + name='ReduceSumLayerParams', + full_name='CoreML.Specification.ReduceSumLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceSumLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceSumLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceSumLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29014, + serialized_end=29087, +) + + +_REDUCEPRODLAYERPARAMS = _descriptor.Descriptor( + name='ReduceProdLayerParams', + full_name='CoreML.Specification.ReduceProdLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceProdLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceProdLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceProdLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29089, + serialized_end=29163, +) + + +_REDUCEMEANLAYERPARAMS = _descriptor.Descriptor( + name='ReduceMeanLayerParams', + full_name='CoreML.Specification.ReduceMeanLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceMeanLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceMeanLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceMeanLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29165, + serialized_end=29239, +) + + +_REDUCELOGSUMLAYERPARAMS = _descriptor.Descriptor( + name='ReduceLogSumLayerParams', + full_name='CoreML.Specification.ReduceLogSumLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceLogSumLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceLogSumLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceLogSumLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29241, + serialized_end=29317, +) + + +_REDUCESUMSQUARELAYERPARAMS = _descriptor.Descriptor( + name='ReduceSumSquareLayerParams', + full_name='CoreML.Specification.ReduceSumSquareLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceSumSquareLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceSumSquareLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceSumSquareLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29319, + serialized_end=29398, +) + + +_REDUCELOGSUMEXPLAYERPARAMS = _descriptor.Descriptor( + name='ReduceLogSumExpLayerParams', + full_name='CoreML.Specification.ReduceLogSumExpLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceLogSumExpLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceLogSumExpLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceLogSumExpLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29400, + serialized_end=29479, +) + + +_EXPANDDIMSLAYERPARAMS = _descriptor.Descriptor( + name='ExpandDimsLayerParams', + full_name='CoreML.Specification.ExpandDimsLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ExpandDimsLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29481, + serialized_end=29518, +) + + +_FLATTENTO2DLAYERPARAMS = _descriptor.Descriptor( + name='FlattenTo2DLayerParams', + full_name='CoreML.Specification.FlattenTo2DLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.FlattenTo2DLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29520, + serialized_end=29558, +) + + +_RESHAPESTATICLAYERPARAMS = _descriptor.Descriptor( + name='ReshapeStaticLayerParams', + full_name='CoreML.Specification.ReshapeStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetShape', full_name='CoreML.Specification.ReshapeStaticLayerParams.targetShape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29560, + serialized_end=29607, +) + + +_RESHAPELIKELAYERPARAMS = _descriptor.Descriptor( + name='ReshapeLikeLayerParams', + full_name='CoreML.Specification.ReshapeLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29609, + serialized_end=29633, +) + + +_RESHAPEDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='ReshapeDynamicLayerParams', + full_name='CoreML.Specification.ReshapeDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29635, + serialized_end=29662, +) + + +_SQUEEZELAYERPARAMS = _descriptor.Descriptor( + name='SqueezeLayerParams', + full_name='CoreML.Specification.SqueezeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.SqueezeLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='squeezeAll', full_name='CoreML.Specification.SqueezeLayerParams.squeezeAll', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29664, + serialized_end=29718, +) + + +_TOPKLAYERPARAMS = _descriptor.Descriptor( + name='TopKLayerParams', + full_name='CoreML.Specification.TopKLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.TopKLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='K', full_name='CoreML.Specification.TopKLayerParams.K', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='useBottomK', full_name='CoreML.Specification.TopKLayerParams.useBottomK', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29720, + serialized_end=29782, +) + + +_ARGMAXLAYERPARAMS = _descriptor.Descriptor( + name='ArgMaxLayerParams', + full_name='CoreML.Specification.ArgMaxLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ArgMaxLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='removeDim', full_name='CoreML.Specification.ArgMaxLayerParams.removeDim', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29784, + serialized_end=29836, +) + + +_ARGMINLAYERPARAMS = _descriptor.Descriptor( + name='ArgMinLayerParams', + full_name='CoreML.Specification.ArgMinLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ArgMinLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='removeDim', full_name='CoreML.Specification.ArgMinLayerParams.removeDim', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29838, + serialized_end=29890, +) + + +_SPLITNDLAYERPARAMS = _descriptor.Descriptor( + name='SplitNDLayerParams', + full_name='CoreML.Specification.SplitNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.SplitNDLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numSplits', full_name='CoreML.Specification.SplitNDLayerParams.numSplits', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='splitSizes', full_name='CoreML.Specification.SplitNDLayerParams.splitSizes', index=2, + number=3, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29892, + serialized_end=29965, +) + + +_CEILLAYERPARAMS = _descriptor.Descriptor( + name='CeilLayerParams', + full_name='CoreML.Specification.CeilLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29967, + serialized_end=29984, +) + + +_ROUNDLAYERPARAMS = _descriptor.Descriptor( + name='RoundLayerParams', + full_name='CoreML.Specification.RoundLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29986, + serialized_end=30004, +) + + +_FLOORLAYERPARAMS = _descriptor.Descriptor( + name='FloorLayerParams', + full_name='CoreML.Specification.FloorLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30006, + serialized_end=30024, +) + + +_SIGNLAYERPARAMS = _descriptor.Descriptor( + name='SignLayerParams', + full_name='CoreML.Specification.SignLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30026, + serialized_end=30043, +) + + +_CLIPLAYERPARAMS = _descriptor.Descriptor( + name='ClipLayerParams', + full_name='CoreML.Specification.ClipLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='minVal', full_name='CoreML.Specification.ClipLayerParams.minVal', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxVal', full_name='CoreML.Specification.ClipLayerParams.maxVal', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30045, + serialized_end=30094, +) + + +_SLICESTATICLAYERPARAMS = _descriptor.Descriptor( + name='SliceStaticLayerParams', + full_name='CoreML.Specification.SliceStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='beginIds', full_name='CoreML.Specification.SliceStaticLayerParams.beginIds', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beginMasks', full_name='CoreML.Specification.SliceStaticLayerParams.beginMasks', index=1, + number=2, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endIds', full_name='CoreML.Specification.SliceStaticLayerParams.endIds', index=2, + number=3, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endMasks', full_name='CoreML.Specification.SliceStaticLayerParams.endMasks', index=3, + number=4, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strides', full_name='CoreML.Specification.SliceStaticLayerParams.strides', index=4, + number=5, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='squeezeMasks', full_name='CoreML.Specification.SliceStaticLayerParams.squeezeMasks', index=5, + number=6, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30097, + serialized_end=30232, +) + + +_SLICEDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='SliceDynamicLayerParams', + full_name='CoreML.Specification.SliceDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='beginMasks', full_name='CoreML.Specification.SliceDynamicLayerParams.beginMasks', index=0, + number=2, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endIds', full_name='CoreML.Specification.SliceDynamicLayerParams.endIds', index=1, + number=3, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endMasks', full_name='CoreML.Specification.SliceDynamicLayerParams.endMasks', index=2, + number=4, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strides', full_name='CoreML.Specification.SliceDynamicLayerParams.strides', index=3, + number=5, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='squeezeMasks', full_name='CoreML.Specification.SliceDynamicLayerParams.squeezeMasks', index=4, + number=6, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30234, + serialized_end=30352, +) + + +_TILELAYERPARAMS = _descriptor.Descriptor( + name='TileLayerParams', + full_name='CoreML.Specification.TileLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='reps', full_name='CoreML.Specification.TileLayerParams.reps', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30354, + serialized_end=30385, +) + + +_GETSHAPELAYERPARAMS = _descriptor.Descriptor( + name='GetShapeLayerParams', + full_name='CoreML.Specification.GetShapeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30387, + serialized_end=30408, +) + + +_ERFLAYERPARAMS = _descriptor.Descriptor( + name='ErfLayerParams', + full_name='CoreML.Specification.ErfLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30410, + serialized_end=30426, +) + + +_GELULAYERPARAMS = _descriptor.Descriptor( + name='GeluLayerParams', + full_name='CoreML.Specification.GeluLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.GeluLayerParams.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _GELULAYERPARAMS_GELUMODE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30429, + serialized_end=30582, +) + + +_RANGESTATICLAYERPARAMS = _descriptor.Descriptor( + name='RangeStaticLayerParams', + full_name='CoreML.Specification.RangeStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='endValue', full_name='CoreML.Specification.RangeStaticLayerParams.endValue', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='startValue', full_name='CoreML.Specification.RangeStaticLayerParams.startValue', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stepSizeValue', full_name='CoreML.Specification.RangeStaticLayerParams.stepSizeValue', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30584, + serialized_end=30669, +) + + +_RANGEDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='RangeDynamicLayerParams', + full_name='CoreML.Specification.RangeDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='startValue', full_name='CoreML.Specification.RangeDynamicLayerParams.startValue', index=0, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stepSizeValue', full_name='CoreML.Specification.RangeDynamicLayerParams.stepSizeValue', index=1, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30671, + serialized_end=30739, +) + + +_SLIDINGWINDOWSLAYERPARAMS = _descriptor.Descriptor( + name='SlidingWindowsLayerParams', + full_name='CoreML.Specification.SlidingWindowsLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.SlidingWindowsLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='windowSize', full_name='CoreML.Specification.SlidingWindowsLayerParams.windowSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='step', full_name='CoreML.Specification.SlidingWindowsLayerParams.step', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30741, + serialized_end=30816, +) + + +_LAYERNORMALIZATIONLAYERPARAMS = _descriptor.Descriptor( + name='LayerNormalizationLayerParams', + full_name='CoreML.Specification.LayerNormalizationLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='normalizedShape', full_name='CoreML.Specification.LayerNormalizationLayerParams.normalizedShape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='eps', full_name='CoreML.Specification.LayerNormalizationLayerParams.eps', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gamma', full_name='CoreML.Specification.LayerNormalizationLayerParams.gamma', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.LayerNormalizationLayerParams.beta', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30819, + serialized_end=30989, +) + + +_NONMAXIMUMSUPPRESSIONLAYERPARAMS = _descriptor.Descriptor( + name='NonMaximumSuppressionLayerParams', + full_name='CoreML.Specification.NonMaximumSuppressionLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='iouThreshold', full_name='CoreML.Specification.NonMaximumSuppressionLayerParams.iouThreshold', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scoreThreshold', full_name='CoreML.Specification.NonMaximumSuppressionLayerParams.scoreThreshold', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxBoxes', full_name='CoreML.Specification.NonMaximumSuppressionLayerParams.maxBoxes', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='perClassSuppression', full_name='CoreML.Specification.NonMaximumSuppressionLayerParams.perClassSuppression', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30991, + serialized_end=31118, +) + + +_CLAMPEDRELULAYERPARAMS = _descriptor.Descriptor( + name='ClampedReLULayerParams', + full_name='CoreML.Specification.ClampedReLULayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ClampedReLULayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.ClampedReLULayerParams.beta', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31120, + serialized_end=31173, +) + + +_ARGSORTLAYERPARAMS = _descriptor.Descriptor( + name='ArgSortLayerParams', + full_name='CoreML.Specification.ArgSortLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ArgSortLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='descending', full_name='CoreML.Specification.ArgSortLayerParams.descending', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31175, + serialized_end=31229, +) + + +_SLICEBYSIZELAYERPARAMS = _descriptor.Descriptor( + name='SliceBySizeLayerParams', + full_name='CoreML.Specification.SliceBySizeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='size', full_name='CoreML.Specification.SliceBySizeLayerParams.size', index=0, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.SliceBySizeLayerParams.axis', index=1, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31231, + serialized_end=31283, +) + + +_NEURALNETWORKCLASSIFIER = _descriptor.Descriptor( + name='NeuralNetworkClassifier', + full_name='CoreML.Specification.NeuralNetworkClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='layers', full_name='CoreML.Specification.NeuralNetworkClassifier.layers', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='preprocessing', full_name='CoreML.Specification.NeuralNetworkClassifier.preprocessing', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='arrayInputShapeMapping', full_name='CoreML.Specification.NeuralNetworkClassifier.arrayInputShapeMapping', index=2, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imageInputShapeMapping', full_name='CoreML.Specification.NeuralNetworkClassifier.imageInputShapeMapping', index=3, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateParams', full_name='CoreML.Specification.NeuralNetworkClassifier.updateParams', index=4, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.NeuralNetworkClassifier.stringClassLabels', index=5, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.NeuralNetworkClassifier.int64ClassLabels', index=6, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='labelProbabilityLayerName', full_name='CoreML.Specification.NeuralNetworkClassifier.labelProbabilityLayerName', index=7, + number=200, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.NeuralNetworkClassifier.ClassLabels', + index=0, containing_type=None, fields=[]), + ], + serialized_start=31286, + serialized_end=31867, +) + + +_ONEHOTLAYERPARAMS = _descriptor.Descriptor( + name='OneHotLayerParams', + full_name='CoreML.Specification.OneHotLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='oneHotVectorSize', full_name='CoreML.Specification.OneHotLayerParams.oneHotVectorSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.OneHotLayerParams.axis', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='onValue', full_name='CoreML.Specification.OneHotLayerParams.onValue', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offValue', full_name='CoreML.Specification.OneHotLayerParams.offValue', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31869, + serialized_end=31963, +) + + +_CUMSUMLAYERPARAMS = _descriptor.Descriptor( + name='CumSumLayerParams', + full_name='CoreML.Specification.CumSumLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.CumSumLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='excludeFinalSum', full_name='CoreML.Specification.CumSumLayerParams.excludeFinalSum', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverse', full_name='CoreML.Specification.CumSumLayerParams.reverse', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31965, + serialized_end=32040, +) + + +_NEURALNETWORKREGRESSOR = _descriptor.Descriptor( + name='NeuralNetworkRegressor', + full_name='CoreML.Specification.NeuralNetworkRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='layers', full_name='CoreML.Specification.NeuralNetworkRegressor.layers', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='preprocessing', full_name='CoreML.Specification.NeuralNetworkRegressor.preprocessing', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='arrayInputShapeMapping', full_name='CoreML.Specification.NeuralNetworkRegressor.arrayInputShapeMapping', index=2, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imageInputShapeMapping', full_name='CoreML.Specification.NeuralNetworkRegressor.imageInputShapeMapping', index=3, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateParams', full_name='CoreML.Specification.NeuralNetworkRegressor.updateParams', index=4, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=32043, + serialized_end=32444, +) + + +_NETWORKUPDATEPARAMETERS = _descriptor.Descriptor( + name='NetworkUpdateParameters', + full_name='CoreML.Specification.NetworkUpdateParameters', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='lossLayers', full_name='CoreML.Specification.NetworkUpdateParameters.lossLayers', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='optimizer', full_name='CoreML.Specification.NetworkUpdateParameters.optimizer', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='epochs', full_name='CoreML.Specification.NetworkUpdateParameters.epochs', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shuffle', full_name='CoreML.Specification.NetworkUpdateParameters.shuffle', index=3, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.NetworkUpdateParameters.seed', index=4, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=32447, + serialized_end=32737, +) + + +_LOSSLAYER = _descriptor.Descriptor( + name='LossLayer', + full_name='CoreML.Specification.LossLayer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.LossLayer.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='categoricalCrossEntropyLossLayer', full_name='CoreML.Specification.LossLayer.categoricalCrossEntropyLossLayer', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='meanSquaredErrorLossLayer', full_name='CoreML.Specification.LossLayer.meanSquaredErrorLossLayer', index=2, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='LossLayerType', full_name='CoreML.Specification.LossLayer.LossLayerType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=32740, + serialized_end=32968, +) + + +_CATEGORICALCROSSENTROPYLOSSLAYER = _descriptor.Descriptor( + name='CategoricalCrossEntropyLossLayer', + full_name='CoreML.Specification.CategoricalCrossEntropyLossLayer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='input', full_name='CoreML.Specification.CategoricalCrossEntropyLossLayer.input', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='target', full_name='CoreML.Specification.CategoricalCrossEntropyLossLayer.target', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=32970, + serialized_end=33035, +) + + +_MEANSQUAREDERRORLOSSLAYER = _descriptor.Descriptor( + name='MeanSquaredErrorLossLayer', + full_name='CoreML.Specification.MeanSquaredErrorLossLayer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='input', full_name='CoreML.Specification.MeanSquaredErrorLossLayer.input', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='target', full_name='CoreML.Specification.MeanSquaredErrorLossLayer.target', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=33037, + serialized_end=33095, +) + + +_OPTIMIZER = _descriptor.Descriptor( + name='Optimizer', + full_name='CoreML.Specification.Optimizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sgdOptimizer', full_name='CoreML.Specification.Optimizer.sgdOptimizer', index=0, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='adamOptimizer', full_name='CoreML.Specification.Optimizer.adamOptimizer', index=1, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='OptimizerType', full_name='CoreML.Specification.Optimizer.OptimizerType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=33098, + serialized_end=33248, +) + + +_SGDOPTIMIZER = _descriptor.Descriptor( + name='SGDOptimizer', + full_name='CoreML.Specification.SGDOptimizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='learningRate', full_name='CoreML.Specification.SGDOptimizer.learningRate', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='miniBatchSize', full_name='CoreML.Specification.SGDOptimizer.miniBatchSize', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='momentum', full_name='CoreML.Specification.SGDOptimizer.momentum', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=33251, + serialized_end=33444, +) + + +_ADAMOPTIMIZER = _descriptor.Descriptor( + name='AdamOptimizer', + full_name='CoreML.Specification.AdamOptimizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='learningRate', full_name='CoreML.Specification.AdamOptimizer.learningRate', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='miniBatchSize', full_name='CoreML.Specification.AdamOptimizer.miniBatchSize', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta1', full_name='CoreML.Specification.AdamOptimizer.beta1', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta2', full_name='CoreML.Specification.AdamOptimizer.beta2', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='eps', full_name='CoreML.Specification.AdamOptimizer.eps', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=33447, + serialized_end=33744, +) + +_NEURALNETWORK.fields_by_name['layers'].message_type = _NEURALNETWORKLAYER +_NEURALNETWORK.fields_by_name['preprocessing'].message_type = _NEURALNETWORKPREPROCESSING +_NEURALNETWORK.fields_by_name['arrayInputShapeMapping'].enum_type = _NEURALNETWORKMULTIARRAYSHAPEMAPPING +_NEURALNETWORK.fields_by_name['imageInputShapeMapping'].enum_type = _NEURALNETWORKIMAGESHAPEMAPPING +_NEURALNETWORK.fields_by_name['updateParams'].message_type = _NETWORKUPDATEPARAMETERS +_NEURALNETWORKPREPROCESSING.fields_by_name['scaler'].message_type = _NEURALNETWORKIMAGESCALER +_NEURALNETWORKPREPROCESSING.fields_by_name['meanImage'].message_type = _NEURALNETWORKMEANIMAGE +_NEURALNETWORKPREPROCESSING.oneofs_by_name['preprocessor'].fields.append( + _NEURALNETWORKPREPROCESSING.fields_by_name['scaler']) +_NEURALNETWORKPREPROCESSING.fields_by_name['scaler'].containing_oneof = _NEURALNETWORKPREPROCESSING.oneofs_by_name['preprocessor'] +_NEURALNETWORKPREPROCESSING.oneofs_by_name['preprocessor'].fields.append( + _NEURALNETWORKPREPROCESSING.fields_by_name['meanImage']) +_NEURALNETWORKPREPROCESSING.fields_by_name['meanImage'].containing_oneof = _NEURALNETWORKPREPROCESSING.oneofs_by_name['preprocessor'] +_ACTIVATIONPRELU.fields_by_name['alpha'].message_type = _WEIGHTPARAMS +_ACTIVATIONPARAMETRICSOFTPLUS.fields_by_name['alpha'].message_type = _WEIGHTPARAMS +_ACTIVATIONPARAMETRICSOFTPLUS.fields_by_name['beta'].message_type = _WEIGHTPARAMS +_ACTIVATIONPARAMS.fields_by_name['linear'].message_type = _ACTIVATIONLINEAR +_ACTIVATIONPARAMS.fields_by_name['ReLU'].message_type = _ACTIVATIONRELU +_ACTIVATIONPARAMS.fields_by_name['leakyReLU'].message_type = _ACTIVATIONLEAKYRELU +_ACTIVATIONPARAMS.fields_by_name['thresholdedReLU'].message_type = _ACTIVATIONTHRESHOLDEDRELU +_ACTIVATIONPARAMS.fields_by_name['PReLU'].message_type = _ACTIVATIONPRELU +_ACTIVATIONPARAMS.fields_by_name['tanh'].message_type = _ACTIVATIONTANH +_ACTIVATIONPARAMS.fields_by_name['scaledTanh'].message_type = _ACTIVATIONSCALEDTANH +_ACTIVATIONPARAMS.fields_by_name['sigmoid'].message_type = _ACTIVATIONSIGMOID +_ACTIVATIONPARAMS.fields_by_name['sigmoidHard'].message_type = _ACTIVATIONSIGMOIDHARD +_ACTIVATIONPARAMS.fields_by_name['ELU'].message_type = _ACTIVATIONELU +_ACTIVATIONPARAMS.fields_by_name['softsign'].message_type = _ACTIVATIONSOFTSIGN +_ACTIVATIONPARAMS.fields_by_name['softplus'].message_type = _ACTIVATIONSOFTPLUS +_ACTIVATIONPARAMS.fields_by_name['parametricSoftplus'].message_type = _ACTIVATIONPARAMETRICSOFTPLUS +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['linear']) +_ACTIVATIONPARAMS.fields_by_name['linear'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['ReLU']) +_ACTIVATIONPARAMS.fields_by_name['ReLU'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['leakyReLU']) +_ACTIVATIONPARAMS.fields_by_name['leakyReLU'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['thresholdedReLU']) +_ACTIVATIONPARAMS.fields_by_name['thresholdedReLU'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['PReLU']) +_ACTIVATIONPARAMS.fields_by_name['PReLU'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['tanh']) +_ACTIVATIONPARAMS.fields_by_name['tanh'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['scaledTanh']) +_ACTIVATIONPARAMS.fields_by_name['scaledTanh'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['sigmoid']) +_ACTIVATIONPARAMS.fields_by_name['sigmoid'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['sigmoidHard']) +_ACTIVATIONPARAMS.fields_by_name['sigmoidHard'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['ELU']) +_ACTIVATIONPARAMS.fields_by_name['ELU'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['softsign']) +_ACTIVATIONPARAMS.fields_by_name['softsign'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['softplus']) +_ACTIVATIONPARAMS.fields_by_name['softplus'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['parametricSoftplus']) +_ACTIVATIONPARAMS.fields_by_name['parametricSoftplus'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_NEURALNETWORKLAYER.fields_by_name['inputTensor'].message_type = _TENSOR +_NEURALNETWORKLAYER.fields_by_name['outputTensor'].message_type = _TENSOR +_NEURALNETWORKLAYER.fields_by_name['convolution'].message_type = _CONVOLUTIONLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['pooling'].message_type = _POOLINGLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['activation'].message_type = _ACTIVATIONPARAMS +_NEURALNETWORKLAYER.fields_by_name['innerProduct'].message_type = _INNERPRODUCTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['embedding'].message_type = _EMBEDDINGLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['batchnorm'].message_type = _BATCHNORMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['mvn'].message_type = _MEANVARIANCENORMALIZELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['l2normalize'].message_type = _L2NORMALIZELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['softmax'].message_type = _SOFTMAXLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['lrn'].message_type = _LRNLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['crop'].message_type = _CROPLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['padding'].message_type = _PADDINGLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['upsample'].message_type = _UPSAMPLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['resizeBilinear'].message_type = _RESIZEBILINEARLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['cropResize'].message_type = _CROPRESIZELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['unary'].message_type = _UNARYFUNCTIONLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['add'].message_type = _ADDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['multiply'].message_type = _MULTIPLYLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['average'].message_type = _AVERAGELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['scale'].message_type = _SCALELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['bias'].message_type = _BIASLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['max'].message_type = _MAXLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['min'].message_type = _MINLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['dot'].message_type = _DOTPRODUCTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduce'].message_type = _REDUCELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['loadConstant'].message_type = _LOADCONSTANTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reshape'].message_type = _RESHAPELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['flatten'].message_type = _FLATTENLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['permute'].message_type = _PERMUTELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['concat'].message_type = _CONCATLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['split'].message_type = _SPLITLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sequenceRepeat'].message_type = _SEQUENCEREPEATLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reorganizeData'].message_type = _REORGANIZEDATALAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['slice'].message_type = _SLICELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['simpleRecurrent'].message_type = _SIMPLERECURRENTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['gru'].message_type = _GRULAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['uniDirectionalLSTM'].message_type = _UNIDIRECTIONALLSTMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['biDirectionalLSTM'].message_type = _BIDIRECTIONALLSTMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['custom'].message_type = _CUSTOMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['copy'].message_type = _COPYLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['branch'].message_type = _BRANCHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['loop'].message_type = _LOOPLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['loopBreak'].message_type = _LOOPBREAKLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['loopContinue'].message_type = _LOOPCONTINUELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['rangeStatic'].message_type = _RANGESTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['rangeDynamic'].message_type = _RANGEDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['clip'].message_type = _CLIPLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['ceil'].message_type = _CEILLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['floor'].message_type = _FLOORLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sign'].message_type = _SIGNLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['round'].message_type = _ROUNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['exp2'].message_type = _EXP2LAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sin'].message_type = _SINLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['cos'].message_type = _COSLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['tan'].message_type = _TANLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['asin'].message_type = _ASINLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['acos'].message_type = _ACOSLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['atan'].message_type = _ATANLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sinh'].message_type = _SINHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['cosh'].message_type = _COSHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['tanh'].message_type = _TANHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['asinh'].message_type = _ASINHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['acosh'].message_type = _ACOSHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['atanh'].message_type = _ATANHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['erf'].message_type = _ERFLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['gelu'].message_type = _GELULAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['equal'].message_type = _EQUALLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['notEqual'].message_type = _NOTEQUALLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['lessThan'].message_type = _LESSTHANLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['lessEqual'].message_type = _LESSEQUALLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['greaterThan'].message_type = _GREATERTHANLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['greaterEqual'].message_type = _GREATEREQUALLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['logicalOr'].message_type = _LOGICALORLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['logicalXor'].message_type = _LOGICALXORLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['logicalNot'].message_type = _LOGICALNOTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['logicalAnd'].message_type = _LOGICALANDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['modBroadcastable'].message_type = _MODBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['minBroadcastable'].message_type = _MINBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['maxBroadcastable'].message_type = _MAXBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['addBroadcastable'].message_type = _ADDBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['powBroadcastable'].message_type = _POWBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['divideBroadcastable'].message_type = _DIVIDEBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['floorDivBroadcastable'].message_type = _FLOORDIVBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['multiplyBroadcastable'].message_type = _MULTIPLYBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['subtractBroadcastable'].message_type = _SUBTRACTBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['tile'].message_type = _TILELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['stack'].message_type = _STACKLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['gather'].message_type = _GATHERLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['scatter'].message_type = _SCATTERLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['gatherND'].message_type = _GATHERNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['scatterND'].message_type = _SCATTERNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['softmaxND'].message_type = _SOFTMAXNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['gatherAlongAxis'].message_type = _GATHERALONGAXISLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['scatterAlongAxis'].message_type = _SCATTERALONGAXISLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reverse'].message_type = _REVERSELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reverseSeq'].message_type = _REVERSESEQLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['splitND'].message_type = _SPLITNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['concatND'].message_type = _CONCATNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['transpose'].message_type = _TRANSPOSELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sliceStatic'].message_type = _SLICESTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sliceDynamic'].message_type = _SLICEDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['slidingWindows'].message_type = _SLIDINGWINDOWSLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['topK'].message_type = _TOPKLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['argMin'].message_type = _ARGMINLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['argMax'].message_type = _ARGMAXLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['embeddingND'].message_type = _EMBEDDINGNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['batchedMatmul'].message_type = _BATCHEDMATMULLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['getShape'].message_type = _GETSHAPELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['loadConstantND'].message_type = _LOADCONSTANTNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['fillLike'].message_type = _FILLLIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['fillStatic'].message_type = _FILLSTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['fillDynamic'].message_type = _FILLDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['broadcastToLike'].message_type = _BROADCASTTOLIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['broadcastToStatic'].message_type = _BROADCASTTOSTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['broadcastToDynamic'].message_type = _BROADCASTTODYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['squeeze'].message_type = _SQUEEZELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['expandDims'].message_type = _EXPANDDIMSLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['flattenTo2D'].message_type = _FLATTENTO2DLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reshapeLike'].message_type = _RESHAPELIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reshapeStatic'].message_type = _RESHAPESTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reshapeDynamic'].message_type = _RESHAPEDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['rankPreservingReshape'].message_type = _RANKPRESERVINGRESHAPELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['constantPad'].message_type = _CONSTANTPADDINGLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomNormalLike'].message_type = _RANDOMNORMALLIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomNormalStatic'].message_type = _RANDOMNORMALSTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomNormalDynamic'].message_type = _RANDOMNORMALDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomUniformLike'].message_type = _RANDOMUNIFORMLIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomUniformStatic'].message_type = _RANDOMUNIFORMSTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomUniformDynamic'].message_type = _RANDOMUNIFORMDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliLike'].message_type = _RANDOMBERNOULLILIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliStatic'].message_type = _RANDOMBERNOULLISTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliDynamic'].message_type = _RANDOMBERNOULLIDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['categoricalDistribution'].message_type = _CATEGORICALDISTRIBUTIONLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceL1'].message_type = _REDUCEL1LAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceL2'].message_type = _REDUCEL2LAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceMax'].message_type = _REDUCEMAXLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceMin'].message_type = _REDUCEMINLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceSum'].message_type = _REDUCESUMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceProd'].message_type = _REDUCEPRODLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceMean'].message_type = _REDUCEMEANLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceLogSum'].message_type = _REDUCELOGSUMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceSumSquare'].message_type = _REDUCESUMSQUARELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceLogSumExp'].message_type = _REDUCELOGSUMEXPLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['whereNonZero'].message_type = _WHERENONZEROLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['matrixBandPart'].message_type = _MATRIXBANDPARTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['lowerTriangular'].message_type = _LOWERTRIANGULARLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['upperTriangular'].message_type = _UPPERTRIANGULARLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['whereBroadcastable'].message_type = _WHEREBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['layerNormalization'].message_type = _LAYERNORMALIZATIONLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['NonMaximumSuppression'].message_type = _NONMAXIMUMSUPPRESSIONLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['oneHot'].message_type = _ONEHOTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['cumSum'].message_type = _CUMSUMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['clampedReLU'].message_type = _CLAMPEDRELULAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['argSort'].message_type = _ARGSORTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['pooling3d'].message_type = _POOLING3DLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['globalPooling3d'].message_type = _GLOBALPOOLING3DLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sliceBySize'].message_type = _SLICEBYSIZELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['convolution3d'].message_type = _CONVOLUTION3DLAYERPARAMS +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['convolution']) +_NEURALNETWORKLAYER.fields_by_name['convolution'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['pooling']) +_NEURALNETWORKLAYER.fields_by_name['pooling'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['activation']) +_NEURALNETWORKLAYER.fields_by_name['activation'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['innerProduct']) +_NEURALNETWORKLAYER.fields_by_name['innerProduct'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['embedding']) +_NEURALNETWORKLAYER.fields_by_name['embedding'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['batchnorm']) +_NEURALNETWORKLAYER.fields_by_name['batchnorm'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['mvn']) +_NEURALNETWORKLAYER.fields_by_name['mvn'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['l2normalize']) +_NEURALNETWORKLAYER.fields_by_name['l2normalize'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['softmax']) +_NEURALNETWORKLAYER.fields_by_name['softmax'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['lrn']) +_NEURALNETWORKLAYER.fields_by_name['lrn'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['crop']) +_NEURALNETWORKLAYER.fields_by_name['crop'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['padding']) +_NEURALNETWORKLAYER.fields_by_name['padding'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['upsample']) +_NEURALNETWORKLAYER.fields_by_name['upsample'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['resizeBilinear']) +_NEURALNETWORKLAYER.fields_by_name['resizeBilinear'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['cropResize']) +_NEURALNETWORKLAYER.fields_by_name['cropResize'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['unary']) +_NEURALNETWORKLAYER.fields_by_name['unary'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['add']) +_NEURALNETWORKLAYER.fields_by_name['add'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['multiply']) +_NEURALNETWORKLAYER.fields_by_name['multiply'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['average']) +_NEURALNETWORKLAYER.fields_by_name['average'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['scale']) +_NEURALNETWORKLAYER.fields_by_name['scale'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['bias']) +_NEURALNETWORKLAYER.fields_by_name['bias'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['max']) +_NEURALNETWORKLAYER.fields_by_name['max'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['min']) +_NEURALNETWORKLAYER.fields_by_name['min'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['dot']) +_NEURALNETWORKLAYER.fields_by_name['dot'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduce']) +_NEURALNETWORKLAYER.fields_by_name['reduce'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['loadConstant']) +_NEURALNETWORKLAYER.fields_by_name['loadConstant'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reshape']) +_NEURALNETWORKLAYER.fields_by_name['reshape'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['flatten']) +_NEURALNETWORKLAYER.fields_by_name['flatten'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['permute']) +_NEURALNETWORKLAYER.fields_by_name['permute'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['concat']) +_NEURALNETWORKLAYER.fields_by_name['concat'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['split']) +_NEURALNETWORKLAYER.fields_by_name['split'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sequenceRepeat']) +_NEURALNETWORKLAYER.fields_by_name['sequenceRepeat'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reorganizeData']) +_NEURALNETWORKLAYER.fields_by_name['reorganizeData'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['slice']) +_NEURALNETWORKLAYER.fields_by_name['slice'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['simpleRecurrent']) +_NEURALNETWORKLAYER.fields_by_name['simpleRecurrent'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['gru']) +_NEURALNETWORKLAYER.fields_by_name['gru'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['uniDirectionalLSTM']) +_NEURALNETWORKLAYER.fields_by_name['uniDirectionalLSTM'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['biDirectionalLSTM']) +_NEURALNETWORKLAYER.fields_by_name['biDirectionalLSTM'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['custom']) +_NEURALNETWORKLAYER.fields_by_name['custom'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['copy']) +_NEURALNETWORKLAYER.fields_by_name['copy'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['branch']) +_NEURALNETWORKLAYER.fields_by_name['branch'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['loop']) +_NEURALNETWORKLAYER.fields_by_name['loop'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['loopBreak']) +_NEURALNETWORKLAYER.fields_by_name['loopBreak'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['loopContinue']) +_NEURALNETWORKLAYER.fields_by_name['loopContinue'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['rangeStatic']) +_NEURALNETWORKLAYER.fields_by_name['rangeStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['rangeDynamic']) +_NEURALNETWORKLAYER.fields_by_name['rangeDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['clip']) +_NEURALNETWORKLAYER.fields_by_name['clip'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['ceil']) +_NEURALNETWORKLAYER.fields_by_name['ceil'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['floor']) +_NEURALNETWORKLAYER.fields_by_name['floor'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sign']) +_NEURALNETWORKLAYER.fields_by_name['sign'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['round']) +_NEURALNETWORKLAYER.fields_by_name['round'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['exp2']) +_NEURALNETWORKLAYER.fields_by_name['exp2'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sin']) +_NEURALNETWORKLAYER.fields_by_name['sin'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['cos']) +_NEURALNETWORKLAYER.fields_by_name['cos'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['tan']) +_NEURALNETWORKLAYER.fields_by_name['tan'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['asin']) +_NEURALNETWORKLAYER.fields_by_name['asin'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['acos']) +_NEURALNETWORKLAYER.fields_by_name['acos'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['atan']) +_NEURALNETWORKLAYER.fields_by_name['atan'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sinh']) +_NEURALNETWORKLAYER.fields_by_name['sinh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['cosh']) +_NEURALNETWORKLAYER.fields_by_name['cosh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['tanh']) +_NEURALNETWORKLAYER.fields_by_name['tanh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['asinh']) +_NEURALNETWORKLAYER.fields_by_name['asinh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['acosh']) +_NEURALNETWORKLAYER.fields_by_name['acosh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['atanh']) +_NEURALNETWORKLAYER.fields_by_name['atanh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['erf']) +_NEURALNETWORKLAYER.fields_by_name['erf'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['gelu']) +_NEURALNETWORKLAYER.fields_by_name['gelu'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['equal']) +_NEURALNETWORKLAYER.fields_by_name['equal'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['notEqual']) +_NEURALNETWORKLAYER.fields_by_name['notEqual'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['lessThan']) +_NEURALNETWORKLAYER.fields_by_name['lessThan'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['lessEqual']) +_NEURALNETWORKLAYER.fields_by_name['lessEqual'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['greaterThan']) +_NEURALNETWORKLAYER.fields_by_name['greaterThan'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['greaterEqual']) +_NEURALNETWORKLAYER.fields_by_name['greaterEqual'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['logicalOr']) +_NEURALNETWORKLAYER.fields_by_name['logicalOr'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['logicalXor']) +_NEURALNETWORKLAYER.fields_by_name['logicalXor'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['logicalNot']) +_NEURALNETWORKLAYER.fields_by_name['logicalNot'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['logicalAnd']) +_NEURALNETWORKLAYER.fields_by_name['logicalAnd'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['modBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['modBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['minBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['minBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['maxBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['maxBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['addBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['addBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['powBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['powBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['divideBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['divideBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['floorDivBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['floorDivBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['multiplyBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['multiplyBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['subtractBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['subtractBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['tile']) +_NEURALNETWORKLAYER.fields_by_name['tile'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['stack']) +_NEURALNETWORKLAYER.fields_by_name['stack'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['gather']) +_NEURALNETWORKLAYER.fields_by_name['gather'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['scatter']) +_NEURALNETWORKLAYER.fields_by_name['scatter'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['gatherND']) +_NEURALNETWORKLAYER.fields_by_name['gatherND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['scatterND']) +_NEURALNETWORKLAYER.fields_by_name['scatterND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['softmaxND']) +_NEURALNETWORKLAYER.fields_by_name['softmaxND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['gatherAlongAxis']) +_NEURALNETWORKLAYER.fields_by_name['gatherAlongAxis'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['scatterAlongAxis']) +_NEURALNETWORKLAYER.fields_by_name['scatterAlongAxis'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reverse']) +_NEURALNETWORKLAYER.fields_by_name['reverse'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reverseSeq']) +_NEURALNETWORKLAYER.fields_by_name['reverseSeq'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['splitND']) +_NEURALNETWORKLAYER.fields_by_name['splitND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['concatND']) +_NEURALNETWORKLAYER.fields_by_name['concatND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['transpose']) +_NEURALNETWORKLAYER.fields_by_name['transpose'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sliceStatic']) +_NEURALNETWORKLAYER.fields_by_name['sliceStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sliceDynamic']) +_NEURALNETWORKLAYER.fields_by_name['sliceDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['slidingWindows']) +_NEURALNETWORKLAYER.fields_by_name['slidingWindows'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['topK']) +_NEURALNETWORKLAYER.fields_by_name['topK'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['argMin']) +_NEURALNETWORKLAYER.fields_by_name['argMin'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['argMax']) +_NEURALNETWORKLAYER.fields_by_name['argMax'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['embeddingND']) +_NEURALNETWORKLAYER.fields_by_name['embeddingND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['batchedMatmul']) +_NEURALNETWORKLAYER.fields_by_name['batchedMatmul'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['getShape']) +_NEURALNETWORKLAYER.fields_by_name['getShape'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['loadConstantND']) +_NEURALNETWORKLAYER.fields_by_name['loadConstantND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['fillLike']) +_NEURALNETWORKLAYER.fields_by_name['fillLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['fillStatic']) +_NEURALNETWORKLAYER.fields_by_name['fillStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['fillDynamic']) +_NEURALNETWORKLAYER.fields_by_name['fillDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['broadcastToLike']) +_NEURALNETWORKLAYER.fields_by_name['broadcastToLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['broadcastToStatic']) +_NEURALNETWORKLAYER.fields_by_name['broadcastToStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['broadcastToDynamic']) +_NEURALNETWORKLAYER.fields_by_name['broadcastToDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['squeeze']) +_NEURALNETWORKLAYER.fields_by_name['squeeze'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['expandDims']) +_NEURALNETWORKLAYER.fields_by_name['expandDims'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['flattenTo2D']) +_NEURALNETWORKLAYER.fields_by_name['flattenTo2D'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reshapeLike']) +_NEURALNETWORKLAYER.fields_by_name['reshapeLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reshapeStatic']) +_NEURALNETWORKLAYER.fields_by_name['reshapeStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reshapeDynamic']) +_NEURALNETWORKLAYER.fields_by_name['reshapeDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['rankPreservingReshape']) +_NEURALNETWORKLAYER.fields_by_name['rankPreservingReshape'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['constantPad']) +_NEURALNETWORKLAYER.fields_by_name['constantPad'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomNormalLike']) +_NEURALNETWORKLAYER.fields_by_name['randomNormalLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomNormalStatic']) +_NEURALNETWORKLAYER.fields_by_name['randomNormalStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomNormalDynamic']) +_NEURALNETWORKLAYER.fields_by_name['randomNormalDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomUniformLike']) +_NEURALNETWORKLAYER.fields_by_name['randomUniformLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomUniformStatic']) +_NEURALNETWORKLAYER.fields_by_name['randomUniformStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomUniformDynamic']) +_NEURALNETWORKLAYER.fields_by_name['randomUniformDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomBernoulliLike']) +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomBernoulliStatic']) +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomBernoulliDynamic']) +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['categoricalDistribution']) +_NEURALNETWORKLAYER.fields_by_name['categoricalDistribution'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceL1']) +_NEURALNETWORKLAYER.fields_by_name['reduceL1'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceL2']) +_NEURALNETWORKLAYER.fields_by_name['reduceL2'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceMax']) +_NEURALNETWORKLAYER.fields_by_name['reduceMax'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceMin']) +_NEURALNETWORKLAYER.fields_by_name['reduceMin'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceSum']) +_NEURALNETWORKLAYER.fields_by_name['reduceSum'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceProd']) +_NEURALNETWORKLAYER.fields_by_name['reduceProd'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceMean']) +_NEURALNETWORKLAYER.fields_by_name['reduceMean'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceLogSum']) +_NEURALNETWORKLAYER.fields_by_name['reduceLogSum'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceSumSquare']) +_NEURALNETWORKLAYER.fields_by_name['reduceSumSquare'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceLogSumExp']) +_NEURALNETWORKLAYER.fields_by_name['reduceLogSumExp'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['whereNonZero']) +_NEURALNETWORKLAYER.fields_by_name['whereNonZero'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['matrixBandPart']) +_NEURALNETWORKLAYER.fields_by_name['matrixBandPart'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['lowerTriangular']) +_NEURALNETWORKLAYER.fields_by_name['lowerTriangular'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['upperTriangular']) +_NEURALNETWORKLAYER.fields_by_name['upperTriangular'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['whereBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['whereBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['layerNormalization']) +_NEURALNETWORKLAYER.fields_by_name['layerNormalization'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['NonMaximumSuppression']) +_NEURALNETWORKLAYER.fields_by_name['NonMaximumSuppression'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['oneHot']) +_NEURALNETWORKLAYER.fields_by_name['oneHot'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['cumSum']) +_NEURALNETWORKLAYER.fields_by_name['cumSum'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['clampedReLU']) +_NEURALNETWORKLAYER.fields_by_name['clampedReLU'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['argSort']) +_NEURALNETWORKLAYER.fields_by_name['argSort'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['pooling3d']) +_NEURALNETWORKLAYER.fields_by_name['pooling3d'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['globalPooling3d']) +_NEURALNETWORKLAYER.fields_by_name['globalPooling3d'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sliceBySize']) +_NEURALNETWORKLAYER.fields_by_name['sliceBySize'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['convolution3d']) +_NEURALNETWORKLAYER.fields_by_name['convolution3d'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_BRANCHLAYERPARAMS.fields_by_name['ifBranch'].message_type = _NEURALNETWORK +_BRANCHLAYERPARAMS.fields_by_name['elseBranch'].message_type = _NEURALNETWORK +_LOOPLAYERPARAMS.fields_by_name['conditionNetwork'].message_type = _NEURALNETWORK +_LOOPLAYERPARAMS.fields_by_name['bodyNetwork'].message_type = _NEURALNETWORK +_BORDERAMOUNTS_EDGESIZES.containing_type = _BORDERAMOUNTS +_BORDERAMOUNTS.fields_by_name['borderAmounts'].message_type = _BORDERAMOUNTS_EDGESIZES +_VALIDPADDING.fields_by_name['paddingAmounts'].message_type = _BORDERAMOUNTS +_SAMEPADDING.fields_by_name['asymmetryMode'].enum_type = _SAMEPADDING_SAMEPADDINGMODE +_SAMEPADDING_SAMEPADDINGMODE.containing_type = _SAMEPADDING +_SAMPLINGMODE.fields_by_name['samplingMethod'].enum_type = _SAMPLINGMODE_METHOD +_SAMPLINGMODE_METHOD.containing_type = _SAMPLINGMODE +_BOXCOORDINATESMODE.fields_by_name['boxMode'].enum_type = _BOXCOORDINATESMODE_COORDINATES +_BOXCOORDINATESMODE_COORDINATES.containing_type = _BOXCOORDINATESMODE +_WEIGHTPARAMS.fields_by_name['quantization'].message_type = _QUANTIZATIONPARAMS +_QUANTIZATIONPARAMS.fields_by_name['linearQuantization'].message_type = _LINEARQUANTIZATIONPARAMS +_QUANTIZATIONPARAMS.fields_by_name['lookupTableQuantization'].message_type = _LOOKUPTABLEQUANTIZATIONPARAMS +_QUANTIZATIONPARAMS.oneofs_by_name['QuantizationType'].fields.append( + _QUANTIZATIONPARAMS.fields_by_name['linearQuantization']) +_QUANTIZATIONPARAMS.fields_by_name['linearQuantization'].containing_oneof = _QUANTIZATIONPARAMS.oneofs_by_name['QuantizationType'] +_QUANTIZATIONPARAMS.oneofs_by_name['QuantizationType'].fields.append( + _QUANTIZATIONPARAMS.fields_by_name['lookupTableQuantization']) +_QUANTIZATIONPARAMS.fields_by_name['lookupTableQuantization'].containing_oneof = _QUANTIZATIONPARAMS.oneofs_by_name['QuantizationType'] +_CONVOLUTIONLAYERPARAMS.fields_by_name['valid'].message_type = _VALIDPADDING +_CONVOLUTIONLAYERPARAMS.fields_by_name['same'].message_type = _SAMEPADDING +_CONVOLUTIONLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_CONVOLUTIONLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_CONVOLUTIONLAYERPARAMS.oneofs_by_name['ConvolutionPaddingType'].fields.append( + _CONVOLUTIONLAYERPARAMS.fields_by_name['valid']) +_CONVOLUTIONLAYERPARAMS.fields_by_name['valid'].containing_oneof = _CONVOLUTIONLAYERPARAMS.oneofs_by_name['ConvolutionPaddingType'] +_CONVOLUTIONLAYERPARAMS.oneofs_by_name['ConvolutionPaddingType'].fields.append( + _CONVOLUTIONLAYERPARAMS.fields_by_name['same']) +_CONVOLUTIONLAYERPARAMS.fields_by_name['same'].containing_oneof = _CONVOLUTIONLAYERPARAMS.oneofs_by_name['ConvolutionPaddingType'] +_CONVOLUTION3DLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_CONVOLUTION3DLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_CONVOLUTION3DLAYERPARAMS.fields_by_name['paddingType'].enum_type = _CONVOLUTION3DLAYERPARAMS_PADDINGTYPE +_CONVOLUTION3DLAYERPARAMS_PADDINGTYPE.containing_type = _CONVOLUTION3DLAYERPARAMS +_INNERPRODUCTLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_INNERPRODUCTLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_EMBEDDINGLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_EMBEDDINGLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_EMBEDDINGNDLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_EMBEDDINGNDLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_BATCHNORMLAYERPARAMS.fields_by_name['gamma'].message_type = _WEIGHTPARAMS +_BATCHNORMLAYERPARAMS.fields_by_name['beta'].message_type = _WEIGHTPARAMS +_BATCHNORMLAYERPARAMS.fields_by_name['mean'].message_type = _WEIGHTPARAMS +_BATCHNORMLAYERPARAMS.fields_by_name['variance'].message_type = _WEIGHTPARAMS +_POOLINGLAYERPARAMS_VALIDCOMPLETEPADDING.containing_type = _POOLINGLAYERPARAMS +_POOLINGLAYERPARAMS.fields_by_name['type'].enum_type = _POOLINGLAYERPARAMS_POOLINGTYPE +_POOLINGLAYERPARAMS.fields_by_name['valid'].message_type = _VALIDPADDING +_POOLINGLAYERPARAMS.fields_by_name['same'].message_type = _SAMEPADDING +_POOLINGLAYERPARAMS.fields_by_name['includeLastPixel'].message_type = _POOLINGLAYERPARAMS_VALIDCOMPLETEPADDING +_POOLINGLAYERPARAMS_POOLINGTYPE.containing_type = _POOLINGLAYERPARAMS +_POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'].fields.append( + _POOLINGLAYERPARAMS.fields_by_name['valid']) +_POOLINGLAYERPARAMS.fields_by_name['valid'].containing_oneof = _POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'] +_POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'].fields.append( + _POOLINGLAYERPARAMS.fields_by_name['same']) +_POOLINGLAYERPARAMS.fields_by_name['same'].containing_oneof = _POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'] +_POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'].fields.append( + _POOLINGLAYERPARAMS.fields_by_name['includeLastPixel']) +_POOLINGLAYERPARAMS.fields_by_name['includeLastPixel'].containing_oneof = _POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'] +_POOLING3DLAYERPARAMS.fields_by_name['type'].enum_type = _POOLING3DLAYERPARAMS_POOLINGTYPE3D +_POOLING3DLAYERPARAMS.fields_by_name['paddingType'].enum_type = _POOLING3DLAYERPARAMS_POOLING3DPADDINGTYPE +_POOLING3DLAYERPARAMS_POOLINGTYPE3D.containing_type = _POOLING3DLAYERPARAMS +_POOLING3DLAYERPARAMS_POOLING3DPADDINGTYPE.containing_type = _POOLING3DLAYERPARAMS +_GLOBALPOOLING3DLAYERPARAMS.fields_by_name['type'].enum_type = _GLOBALPOOLING3DLAYERPARAMS_GLOBALPOOLINGTYPE3D +_GLOBALPOOLING3DLAYERPARAMS_GLOBALPOOLINGTYPE3D.containing_type = _GLOBALPOOLING3DLAYERPARAMS +_PADDINGLAYERPARAMS_PADDINGCONSTANT.containing_type = _PADDINGLAYERPARAMS +_PADDINGLAYERPARAMS_PADDINGREFLECTION.containing_type = _PADDINGLAYERPARAMS +_PADDINGLAYERPARAMS_PADDINGREPLICATION.containing_type = _PADDINGLAYERPARAMS +_PADDINGLAYERPARAMS.fields_by_name['constant'].message_type = _PADDINGLAYERPARAMS_PADDINGCONSTANT +_PADDINGLAYERPARAMS.fields_by_name['reflection'].message_type = _PADDINGLAYERPARAMS_PADDINGREFLECTION +_PADDINGLAYERPARAMS.fields_by_name['replication'].message_type = _PADDINGLAYERPARAMS_PADDINGREPLICATION +_PADDINGLAYERPARAMS.fields_by_name['paddingAmounts'].message_type = _BORDERAMOUNTS +_PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'].fields.append( + _PADDINGLAYERPARAMS.fields_by_name['constant']) +_PADDINGLAYERPARAMS.fields_by_name['constant'].containing_oneof = _PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'] +_PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'].fields.append( + _PADDINGLAYERPARAMS.fields_by_name['reflection']) +_PADDINGLAYERPARAMS.fields_by_name['reflection'].containing_oneof = _PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'] +_PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'].fields.append( + _PADDINGLAYERPARAMS.fields_by_name['replication']) +_PADDINGLAYERPARAMS.fields_by_name['replication'].containing_oneof = _PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'] +_UNARYFUNCTIONLAYERPARAMS.fields_by_name['type'].enum_type = _UNARYFUNCTIONLAYERPARAMS_OPERATION +_UNARYFUNCTIONLAYERPARAMS_OPERATION.containing_type = _UNARYFUNCTIONLAYERPARAMS +_UPSAMPLELAYERPARAMS.fields_by_name['mode'].enum_type = _UPSAMPLELAYERPARAMS_INTERPOLATIONMODE +_UPSAMPLELAYERPARAMS.fields_by_name['linearUpsampleMode'].enum_type = _UPSAMPLELAYERPARAMS_LINEARUPSAMPLEMODE +_UPSAMPLELAYERPARAMS_INTERPOLATIONMODE.containing_type = _UPSAMPLELAYERPARAMS +_UPSAMPLELAYERPARAMS_LINEARUPSAMPLEMODE.containing_type = _UPSAMPLELAYERPARAMS +_RESIZEBILINEARLAYERPARAMS.fields_by_name['mode'].message_type = _SAMPLINGMODE +_CROPRESIZELAYERPARAMS.fields_by_name['mode'].message_type = _SAMPLINGMODE +_CROPRESIZELAYERPARAMS.fields_by_name['boxIndicesMode'].message_type = _BOXCOORDINATESMODE +_BIASLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_SCALELAYERPARAMS.fields_by_name['scale'].message_type = _WEIGHTPARAMS +_SCALELAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_LOADCONSTANTLAYERPARAMS.fields_by_name['data'].message_type = _WEIGHTPARAMS +_FLATTENLAYERPARAMS.fields_by_name['mode'].enum_type = _FLATTENLAYERPARAMS_FLATTENORDER +_FLATTENLAYERPARAMS_FLATTENORDER.containing_type = _FLATTENLAYERPARAMS +_RESHAPELAYERPARAMS.fields_by_name['mode'].enum_type = _RESHAPELAYERPARAMS_RESHAPEORDER +_RESHAPELAYERPARAMS_RESHAPEORDER.containing_type = _RESHAPELAYERPARAMS +_REORGANIZEDATALAYERPARAMS.fields_by_name['mode'].enum_type = _REORGANIZEDATALAYERPARAMS_REORGANIZATIONTYPE +_REORGANIZEDATALAYERPARAMS_REORGANIZATIONTYPE.containing_type = _REORGANIZEDATALAYERPARAMS +_SLICELAYERPARAMS.fields_by_name['axis'].enum_type = _SLICELAYERPARAMS_SLICEAXIS +_SLICELAYERPARAMS_SLICEAXIS.containing_type = _SLICELAYERPARAMS +_REDUCELAYERPARAMS.fields_by_name['mode'].enum_type = _REDUCELAYERPARAMS_REDUCEOPERATION +_REDUCELAYERPARAMS.fields_by_name['axis'].enum_type = _REDUCELAYERPARAMS_REDUCEAXIS +_REDUCELAYERPARAMS_REDUCEOPERATION.containing_type = _REDUCELAYERPARAMS +_REDUCELAYERPARAMS_REDUCEAXIS.containing_type = _REDUCELAYERPARAMS +_CROPLAYERPARAMS.fields_by_name['cropAmounts'].message_type = _BORDERAMOUNTS +_SIMPLERECURRENTLAYERPARAMS.fields_by_name['activation'].message_type = _ACTIVATIONPARAMS +_SIMPLERECURRENTLAYERPARAMS.fields_by_name['weightMatrix'].message_type = _WEIGHTPARAMS +_SIMPLERECURRENTLAYERPARAMS.fields_by_name['recursionMatrix'].message_type = _WEIGHTPARAMS +_SIMPLERECURRENTLAYERPARAMS.fields_by_name['biasVector'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['activations'].message_type = _ACTIVATIONPARAMS +_GRULAYERPARAMS.fields_by_name['updateGateWeightMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['resetGateWeightMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['outputGateWeightMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['updateGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['resetGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['outputGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['updateGateBiasVector'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['resetGateBiasVector'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['outputGateBiasVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['inputGateWeightMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['forgetGateWeightMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['blockInputWeightMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['outputGateWeightMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['inputGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['forgetGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['blockInputRecursionMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['outputGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['inputGateBiasVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['forgetGateBiasVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['blockInputBiasVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['outputGateBiasVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['inputGatePeepholeVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['forgetGatePeepholeVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['outputGatePeepholeVector'].message_type = _WEIGHTPARAMS +_UNIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['activations'].message_type = _ACTIVATIONPARAMS +_UNIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['params'].message_type = _LSTMPARAMS +_UNIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['weightParams'].message_type = _LSTMWEIGHTPARAMS +_BIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['activationsForwardLSTM'].message_type = _ACTIVATIONPARAMS +_BIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['activationsBackwardLSTM'].message_type = _ACTIVATIONPARAMS +_BIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['params'].message_type = _LSTMPARAMS +_BIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['weightParams'].message_type = _LSTMWEIGHTPARAMS +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.containing_type = _CUSTOMLAYERPARAMS +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['doubleValue']) +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['doubleValue'].containing_oneof = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'] +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['stringValue']) +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['stringValue'].containing_oneof = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'] +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['intValue']) +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['intValue'].containing_oneof = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'] +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['longValue']) +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['longValue'].containing_oneof = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'] +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['boolValue']) +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['boolValue'].containing_oneof = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'] +_CUSTOMLAYERPARAMS_PARAMETERSENTRY.fields_by_name['value'].message_type = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE +_CUSTOMLAYERPARAMS_PARAMETERSENTRY.containing_type = _CUSTOMLAYERPARAMS +_CUSTOMLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_CUSTOMLAYERPARAMS.fields_by_name['parameters'].message_type = _CUSTOMLAYERPARAMS_PARAMETERSENTRY +_BATCHEDMATMULLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_BATCHEDMATMULLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_LOADCONSTANTNDLAYERPARAMS.fields_by_name['data'].message_type = _WEIGHTPARAMS +_SCATTERLAYERPARAMS.fields_by_name['mode'].enum_type = _SCATTERMODE +_SCATTERNDLAYERPARAMS.fields_by_name['mode'].enum_type = _SCATTERMODE +_SCATTERALONGAXISLAYERPARAMS.fields_by_name['mode'].enum_type = _SCATTERMODE +_GELULAYERPARAMS.fields_by_name['mode'].enum_type = _GELULAYERPARAMS_GELUMODE +_GELULAYERPARAMS_GELUMODE.containing_type = _GELULAYERPARAMS +_LAYERNORMALIZATIONLAYERPARAMS.fields_by_name['gamma'].message_type = _WEIGHTPARAMS +_LAYERNORMALIZATIONLAYERPARAMS.fields_by_name['beta'].message_type = _WEIGHTPARAMS +_NEURALNETWORKCLASSIFIER.fields_by_name['layers'].message_type = _NEURALNETWORKLAYER +_NEURALNETWORKCLASSIFIER.fields_by_name['preprocessing'].message_type = _NEURALNETWORKPREPROCESSING +_NEURALNETWORKCLASSIFIER.fields_by_name['arrayInputShapeMapping'].enum_type = _NEURALNETWORKMULTIARRAYSHAPEMAPPING +_NEURALNETWORKCLASSIFIER.fields_by_name['imageInputShapeMapping'].enum_type = _NEURALNETWORKIMAGESHAPEMAPPING +_NEURALNETWORKCLASSIFIER.fields_by_name['updateParams'].message_type = _NETWORKUPDATEPARAMETERS +_NEURALNETWORKCLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_NEURALNETWORKCLASSIFIER.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_NEURALNETWORKCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _NEURALNETWORKCLASSIFIER.fields_by_name['stringClassLabels']) +_NEURALNETWORKCLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _NEURALNETWORKCLASSIFIER.oneofs_by_name['ClassLabels'] +_NEURALNETWORKCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _NEURALNETWORKCLASSIFIER.fields_by_name['int64ClassLabels']) +_NEURALNETWORKCLASSIFIER.fields_by_name['int64ClassLabels'].containing_oneof = _NEURALNETWORKCLASSIFIER.oneofs_by_name['ClassLabels'] +_NEURALNETWORKREGRESSOR.fields_by_name['layers'].message_type = _NEURALNETWORKLAYER +_NEURALNETWORKREGRESSOR.fields_by_name['preprocessing'].message_type = _NEURALNETWORKPREPROCESSING +_NEURALNETWORKREGRESSOR.fields_by_name['arrayInputShapeMapping'].enum_type = _NEURALNETWORKMULTIARRAYSHAPEMAPPING +_NEURALNETWORKREGRESSOR.fields_by_name['imageInputShapeMapping'].enum_type = _NEURALNETWORKIMAGESHAPEMAPPING +_NEURALNETWORKREGRESSOR.fields_by_name['updateParams'].message_type = _NETWORKUPDATEPARAMETERS +_NETWORKUPDATEPARAMETERS.fields_by_name['lossLayers'].message_type = _LOSSLAYER +_NETWORKUPDATEPARAMETERS.fields_by_name['optimizer'].message_type = _OPTIMIZER +_NETWORKUPDATEPARAMETERS.fields_by_name['epochs'].message_type = Parameters__pb2._INT64PARAMETER +_NETWORKUPDATEPARAMETERS.fields_by_name['shuffle'].message_type = Parameters__pb2._BOOLPARAMETER +_NETWORKUPDATEPARAMETERS.fields_by_name['seed'].message_type = Parameters__pb2._INT64PARAMETER +_LOSSLAYER.fields_by_name['categoricalCrossEntropyLossLayer'].message_type = _CATEGORICALCROSSENTROPYLOSSLAYER +_LOSSLAYER.fields_by_name['meanSquaredErrorLossLayer'].message_type = _MEANSQUAREDERRORLOSSLAYER +_LOSSLAYER.oneofs_by_name['LossLayerType'].fields.append( + _LOSSLAYER.fields_by_name['categoricalCrossEntropyLossLayer']) +_LOSSLAYER.fields_by_name['categoricalCrossEntropyLossLayer'].containing_oneof = _LOSSLAYER.oneofs_by_name['LossLayerType'] +_LOSSLAYER.oneofs_by_name['LossLayerType'].fields.append( + _LOSSLAYER.fields_by_name['meanSquaredErrorLossLayer']) +_LOSSLAYER.fields_by_name['meanSquaredErrorLossLayer'].containing_oneof = _LOSSLAYER.oneofs_by_name['LossLayerType'] +_OPTIMIZER.fields_by_name['sgdOptimizer'].message_type = _SGDOPTIMIZER +_OPTIMIZER.fields_by_name['adamOptimizer'].message_type = _ADAMOPTIMIZER +_OPTIMIZER.oneofs_by_name['OptimizerType'].fields.append( + _OPTIMIZER.fields_by_name['sgdOptimizer']) +_OPTIMIZER.fields_by_name['sgdOptimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['OptimizerType'] +_OPTIMIZER.oneofs_by_name['OptimizerType'].fields.append( + _OPTIMIZER.fields_by_name['adamOptimizer']) +_OPTIMIZER.fields_by_name['adamOptimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['OptimizerType'] +_SGDOPTIMIZER.fields_by_name['learningRate'].message_type = Parameters__pb2._DOUBLEPARAMETER +_SGDOPTIMIZER.fields_by_name['miniBatchSize'].message_type = Parameters__pb2._INT64PARAMETER +_SGDOPTIMIZER.fields_by_name['momentum'].message_type = Parameters__pb2._DOUBLEPARAMETER +_ADAMOPTIMIZER.fields_by_name['learningRate'].message_type = Parameters__pb2._DOUBLEPARAMETER +_ADAMOPTIMIZER.fields_by_name['miniBatchSize'].message_type = Parameters__pb2._INT64PARAMETER +_ADAMOPTIMIZER.fields_by_name['beta1'].message_type = Parameters__pb2._DOUBLEPARAMETER +_ADAMOPTIMIZER.fields_by_name['beta2'].message_type = Parameters__pb2._DOUBLEPARAMETER +_ADAMOPTIMIZER.fields_by_name['eps'].message_type = Parameters__pb2._DOUBLEPARAMETER +DESCRIPTOR.message_types_by_name['NeuralNetwork'] = _NEURALNETWORK +DESCRIPTOR.message_types_by_name['NeuralNetworkImageScaler'] = _NEURALNETWORKIMAGESCALER +DESCRIPTOR.message_types_by_name['NeuralNetworkMeanImage'] = _NEURALNETWORKMEANIMAGE +DESCRIPTOR.message_types_by_name['NeuralNetworkPreprocessing'] = _NEURALNETWORKPREPROCESSING +DESCRIPTOR.message_types_by_name['ActivationReLU'] = _ACTIVATIONRELU +DESCRIPTOR.message_types_by_name['ActivationLeakyReLU'] = _ACTIVATIONLEAKYRELU +DESCRIPTOR.message_types_by_name['ActivationTanh'] = _ACTIVATIONTANH +DESCRIPTOR.message_types_by_name['ActivationScaledTanh'] = _ACTIVATIONSCALEDTANH +DESCRIPTOR.message_types_by_name['ActivationSigmoid'] = _ACTIVATIONSIGMOID +DESCRIPTOR.message_types_by_name['ActivationLinear'] = _ACTIVATIONLINEAR +DESCRIPTOR.message_types_by_name['ActivationSigmoidHard'] = _ACTIVATIONSIGMOIDHARD +DESCRIPTOR.message_types_by_name['ActivationPReLU'] = _ACTIVATIONPRELU +DESCRIPTOR.message_types_by_name['ActivationELU'] = _ACTIVATIONELU +DESCRIPTOR.message_types_by_name['ActivationThresholdedReLU'] = _ACTIVATIONTHRESHOLDEDRELU +DESCRIPTOR.message_types_by_name['ActivationSoftsign'] = _ACTIVATIONSOFTSIGN +DESCRIPTOR.message_types_by_name['ActivationSoftplus'] = _ACTIVATIONSOFTPLUS +DESCRIPTOR.message_types_by_name['ActivationParametricSoftplus'] = _ACTIVATIONPARAMETRICSOFTPLUS +DESCRIPTOR.message_types_by_name['ActivationParams'] = _ACTIVATIONPARAMS +DESCRIPTOR.message_types_by_name['Tensor'] = _TENSOR +DESCRIPTOR.message_types_by_name['NeuralNetworkLayer'] = _NEURALNETWORKLAYER +DESCRIPTOR.message_types_by_name['BranchLayerParams'] = _BRANCHLAYERPARAMS +DESCRIPTOR.message_types_by_name['LoopLayerParams'] = _LOOPLAYERPARAMS +DESCRIPTOR.message_types_by_name['LoopBreakLayerParams'] = _LOOPBREAKLAYERPARAMS +DESCRIPTOR.message_types_by_name['LoopContinueLayerParams'] = _LOOPCONTINUELAYERPARAMS +DESCRIPTOR.message_types_by_name['CopyLayerParams'] = _COPYLAYERPARAMS +DESCRIPTOR.message_types_by_name['GreaterThanLayerParams'] = _GREATERTHANLAYERPARAMS +DESCRIPTOR.message_types_by_name['GreaterEqualLayerParams'] = _GREATEREQUALLAYERPARAMS +DESCRIPTOR.message_types_by_name['LessThanLayerParams'] = _LESSTHANLAYERPARAMS +DESCRIPTOR.message_types_by_name['LessEqualLayerParams'] = _LESSEQUALLAYERPARAMS +DESCRIPTOR.message_types_by_name['EqualLayerParams'] = _EQUALLAYERPARAMS +DESCRIPTOR.message_types_by_name['NotEqualLayerParams'] = _NOTEQUALLAYERPARAMS +DESCRIPTOR.message_types_by_name['LogicalAndLayerParams'] = _LOGICALANDLAYERPARAMS +DESCRIPTOR.message_types_by_name['LogicalOrLayerParams'] = _LOGICALORLAYERPARAMS +DESCRIPTOR.message_types_by_name['LogicalXorLayerParams'] = _LOGICALXORLAYERPARAMS +DESCRIPTOR.message_types_by_name['LogicalNotLayerParams'] = _LOGICALNOTLAYERPARAMS +DESCRIPTOR.message_types_by_name['BorderAmounts'] = _BORDERAMOUNTS +DESCRIPTOR.message_types_by_name['ValidPadding'] = _VALIDPADDING +DESCRIPTOR.message_types_by_name['SamePadding'] = _SAMEPADDING +DESCRIPTOR.message_types_by_name['SamplingMode'] = _SAMPLINGMODE +DESCRIPTOR.message_types_by_name['BoxCoordinatesMode'] = _BOXCOORDINATESMODE +DESCRIPTOR.message_types_by_name['WeightParams'] = _WEIGHTPARAMS +DESCRIPTOR.message_types_by_name['QuantizationParams'] = _QUANTIZATIONPARAMS +DESCRIPTOR.message_types_by_name['LinearQuantizationParams'] = _LINEARQUANTIZATIONPARAMS +DESCRIPTOR.message_types_by_name['LookUpTableQuantizationParams'] = _LOOKUPTABLEQUANTIZATIONPARAMS +DESCRIPTOR.message_types_by_name['ConvolutionLayerParams'] = _CONVOLUTIONLAYERPARAMS +DESCRIPTOR.message_types_by_name['Convolution3DLayerParams'] = _CONVOLUTION3DLAYERPARAMS +DESCRIPTOR.message_types_by_name['InnerProductLayerParams'] = _INNERPRODUCTLAYERPARAMS +DESCRIPTOR.message_types_by_name['EmbeddingLayerParams'] = _EMBEDDINGLAYERPARAMS +DESCRIPTOR.message_types_by_name['EmbeddingNDLayerParams'] = _EMBEDDINGNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['BatchnormLayerParams'] = _BATCHNORMLAYERPARAMS +DESCRIPTOR.message_types_by_name['PoolingLayerParams'] = _POOLINGLAYERPARAMS +DESCRIPTOR.message_types_by_name['Pooling3DLayerParams'] = _POOLING3DLAYERPARAMS +DESCRIPTOR.message_types_by_name['GlobalPooling3DLayerParams'] = _GLOBALPOOLING3DLAYERPARAMS +DESCRIPTOR.message_types_by_name['PaddingLayerParams'] = _PADDINGLAYERPARAMS +DESCRIPTOR.message_types_by_name['ConcatLayerParams'] = _CONCATLAYERPARAMS +DESCRIPTOR.message_types_by_name['LRNLayerParams'] = _LRNLAYERPARAMS +DESCRIPTOR.message_types_by_name['SoftmaxLayerParams'] = _SOFTMAXLAYERPARAMS +DESCRIPTOR.message_types_by_name['SplitLayerParams'] = _SPLITLAYERPARAMS +DESCRIPTOR.message_types_by_name['AddLayerParams'] = _ADDLAYERPARAMS +DESCRIPTOR.message_types_by_name['MultiplyLayerParams'] = _MULTIPLYLAYERPARAMS +DESCRIPTOR.message_types_by_name['UnaryFunctionLayerParams'] = _UNARYFUNCTIONLAYERPARAMS +DESCRIPTOR.message_types_by_name['UpsampleLayerParams'] = _UPSAMPLELAYERPARAMS +DESCRIPTOR.message_types_by_name['ResizeBilinearLayerParams'] = _RESIZEBILINEARLAYERPARAMS +DESCRIPTOR.message_types_by_name['CropResizeLayerParams'] = _CROPRESIZELAYERPARAMS +DESCRIPTOR.message_types_by_name['BiasLayerParams'] = _BIASLAYERPARAMS +DESCRIPTOR.message_types_by_name['ScaleLayerParams'] = _SCALELAYERPARAMS +DESCRIPTOR.message_types_by_name['LoadConstantLayerParams'] = _LOADCONSTANTLAYERPARAMS +DESCRIPTOR.message_types_by_name['L2NormalizeLayerParams'] = _L2NORMALIZELAYERPARAMS +DESCRIPTOR.message_types_by_name['FlattenLayerParams'] = _FLATTENLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReshapeLayerParams'] = _RESHAPELAYERPARAMS +DESCRIPTOR.message_types_by_name['PermuteLayerParams'] = _PERMUTELAYERPARAMS +DESCRIPTOR.message_types_by_name['ReorganizeDataLayerParams'] = _REORGANIZEDATALAYERPARAMS +DESCRIPTOR.message_types_by_name['SliceLayerParams'] = _SLICELAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceLayerParams'] = _REDUCELAYERPARAMS +DESCRIPTOR.message_types_by_name['CropLayerParams'] = _CROPLAYERPARAMS +DESCRIPTOR.message_types_by_name['AverageLayerParams'] = _AVERAGELAYERPARAMS +DESCRIPTOR.message_types_by_name['MaxLayerParams'] = _MAXLAYERPARAMS +DESCRIPTOR.message_types_by_name['MinLayerParams'] = _MINLAYERPARAMS +DESCRIPTOR.message_types_by_name['DotProductLayerParams'] = _DOTPRODUCTLAYERPARAMS +DESCRIPTOR.message_types_by_name['MeanVarianceNormalizeLayerParams'] = _MEANVARIANCENORMALIZELAYERPARAMS +DESCRIPTOR.message_types_by_name['SequenceRepeatLayerParams'] = _SEQUENCEREPEATLAYERPARAMS +DESCRIPTOR.message_types_by_name['SimpleRecurrentLayerParams'] = _SIMPLERECURRENTLAYERPARAMS +DESCRIPTOR.message_types_by_name['GRULayerParams'] = _GRULAYERPARAMS +DESCRIPTOR.message_types_by_name['LSTMParams'] = _LSTMPARAMS +DESCRIPTOR.message_types_by_name['LSTMWeightParams'] = _LSTMWEIGHTPARAMS +DESCRIPTOR.message_types_by_name['UniDirectionalLSTMLayerParams'] = _UNIDIRECTIONALLSTMLAYERPARAMS +DESCRIPTOR.message_types_by_name['BiDirectionalLSTMLayerParams'] = _BIDIRECTIONALLSTMLAYERPARAMS +DESCRIPTOR.message_types_by_name['CustomLayerParams'] = _CUSTOMLAYERPARAMS +DESCRIPTOR.message_types_by_name['TransposeLayerParams'] = _TRANSPOSELAYERPARAMS +DESCRIPTOR.message_types_by_name['BatchedMatMulLayerParams'] = _BATCHEDMATMULLAYERPARAMS +DESCRIPTOR.message_types_by_name['ConcatNDLayerParams'] = _CONCATNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['SoftmaxNDLayerParams'] = _SOFTMAXNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReverseLayerParams'] = _REVERSELAYERPARAMS +DESCRIPTOR.message_types_by_name['ReverseSeqLayerParams'] = _REVERSESEQLAYERPARAMS +DESCRIPTOR.message_types_by_name['LoadConstantNDLayerParams'] = _LOADCONSTANTNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['FillLikeLayerParams'] = _FILLLIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['FillStaticLayerParams'] = _FILLSTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['FillDynamicLayerParams'] = _FILLDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['WhereBroadcastableLayerParams'] = _WHEREBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['SinLayerParams'] = _SINLAYERPARAMS +DESCRIPTOR.message_types_by_name['CosLayerParams'] = _COSLAYERPARAMS +DESCRIPTOR.message_types_by_name['TanLayerParams'] = _TANLAYERPARAMS +DESCRIPTOR.message_types_by_name['AsinLayerParams'] = _ASINLAYERPARAMS +DESCRIPTOR.message_types_by_name['AcosLayerParams'] = _ACOSLAYERPARAMS +DESCRIPTOR.message_types_by_name['AtanLayerParams'] = _ATANLAYERPARAMS +DESCRIPTOR.message_types_by_name['SinhLayerParams'] = _SINHLAYERPARAMS +DESCRIPTOR.message_types_by_name['CoshLayerParams'] = _COSHLAYERPARAMS +DESCRIPTOR.message_types_by_name['TanhLayerParams'] = _TANHLAYERPARAMS +DESCRIPTOR.message_types_by_name['AsinhLayerParams'] = _ASINHLAYERPARAMS +DESCRIPTOR.message_types_by_name['AcoshLayerParams'] = _ACOSHLAYERPARAMS +DESCRIPTOR.message_types_by_name['AtanhLayerParams'] = _ATANHLAYERPARAMS +DESCRIPTOR.message_types_by_name['PowBroadcastableLayerParams'] = _POWBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['Exp2LayerParams'] = _EXP2LAYERPARAMS +DESCRIPTOR.message_types_by_name['WhereNonZeroLayerParams'] = _WHERENONZEROLAYERPARAMS +DESCRIPTOR.message_types_by_name['MatrixBandPartLayerParams'] = _MATRIXBANDPARTLAYERPARAMS +DESCRIPTOR.message_types_by_name['UpperTriangularLayerParams'] = _UPPERTRIANGULARLAYERPARAMS +DESCRIPTOR.message_types_by_name['LowerTriangularLayerParams'] = _LOWERTRIANGULARLAYERPARAMS +DESCRIPTOR.message_types_by_name['BroadcastToLikeLayerParams'] = _BROADCASTTOLIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['BroadcastToStaticLayerParams'] = _BROADCASTTOSTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['BroadcastToDynamicLayerParams'] = _BROADCASTTODYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['AddBroadcastableLayerParams'] = _ADDBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['MaxBroadcastableLayerParams'] = _MAXBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['MinBroadcastableLayerParams'] = _MINBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['ModBroadcastableLayerParams'] = _MODBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['FloorDivBroadcastableLayerParams'] = _FLOORDIVBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['SubtractBroadcastableLayerParams'] = _SUBTRACTBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['MultiplyBroadcastableLayerParams'] = _MULTIPLYBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['DivideBroadcastableLayerParams'] = _DIVIDEBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['GatherLayerParams'] = _GATHERLAYERPARAMS +DESCRIPTOR.message_types_by_name['ScatterLayerParams'] = _SCATTERLAYERPARAMS +DESCRIPTOR.message_types_by_name['GatherNDLayerParams'] = _GATHERNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['ScatterNDLayerParams'] = _SCATTERNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['GatherAlongAxisLayerParams'] = _GATHERALONGAXISLAYERPARAMS +DESCRIPTOR.message_types_by_name['ScatterAlongAxisLayerParams'] = _SCATTERALONGAXISLAYERPARAMS +DESCRIPTOR.message_types_by_name['StackLayerParams'] = _STACKLAYERPARAMS +DESCRIPTOR.message_types_by_name['RankPreservingReshapeLayerParams'] = _RANKPRESERVINGRESHAPELAYERPARAMS +DESCRIPTOR.message_types_by_name['ConstantPaddingLayerParams'] = _CONSTANTPADDINGLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomNormalLikeLayerParams'] = _RANDOMNORMALLIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomNormalStaticLayerParams'] = _RANDOMNORMALSTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomNormalDynamicLayerParams'] = _RANDOMNORMALDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomUniformLikeLayerParams'] = _RANDOMUNIFORMLIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomUniformStaticLayerParams'] = _RANDOMUNIFORMSTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomUniformDynamicLayerParams'] = _RANDOMUNIFORMDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomBernoulliLikeLayerParams'] = _RANDOMBERNOULLILIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomBernoulliStaticLayerParams'] = _RANDOMBERNOULLISTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomBernoulliDynamicLayerParams'] = _RANDOMBERNOULLIDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['CategoricalDistributionLayerParams'] = _CATEGORICALDISTRIBUTIONLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceL1LayerParams'] = _REDUCEL1LAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceL2LayerParams'] = _REDUCEL2LAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceMaxLayerParams'] = _REDUCEMAXLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceMinLayerParams'] = _REDUCEMINLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceSumLayerParams'] = _REDUCESUMLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceProdLayerParams'] = _REDUCEPRODLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceMeanLayerParams'] = _REDUCEMEANLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceLogSumLayerParams'] = _REDUCELOGSUMLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceSumSquareLayerParams'] = _REDUCESUMSQUARELAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceLogSumExpLayerParams'] = _REDUCELOGSUMEXPLAYERPARAMS +DESCRIPTOR.message_types_by_name['ExpandDimsLayerParams'] = _EXPANDDIMSLAYERPARAMS +DESCRIPTOR.message_types_by_name['FlattenTo2DLayerParams'] = _FLATTENTO2DLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReshapeStaticLayerParams'] = _RESHAPESTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReshapeLikeLayerParams'] = _RESHAPELIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['ReshapeDynamicLayerParams'] = _RESHAPEDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['SqueezeLayerParams'] = _SQUEEZELAYERPARAMS +DESCRIPTOR.message_types_by_name['TopKLayerParams'] = _TOPKLAYERPARAMS +DESCRIPTOR.message_types_by_name['ArgMaxLayerParams'] = _ARGMAXLAYERPARAMS +DESCRIPTOR.message_types_by_name['ArgMinLayerParams'] = _ARGMINLAYERPARAMS +DESCRIPTOR.message_types_by_name['SplitNDLayerParams'] = _SPLITNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['CeilLayerParams'] = _CEILLAYERPARAMS +DESCRIPTOR.message_types_by_name['RoundLayerParams'] = _ROUNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['FloorLayerParams'] = _FLOORLAYERPARAMS +DESCRIPTOR.message_types_by_name['SignLayerParams'] = _SIGNLAYERPARAMS +DESCRIPTOR.message_types_by_name['ClipLayerParams'] = _CLIPLAYERPARAMS +DESCRIPTOR.message_types_by_name['SliceStaticLayerParams'] = _SLICESTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['SliceDynamicLayerParams'] = _SLICEDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['TileLayerParams'] = _TILELAYERPARAMS +DESCRIPTOR.message_types_by_name['GetShapeLayerParams'] = _GETSHAPELAYERPARAMS +DESCRIPTOR.message_types_by_name['ErfLayerParams'] = _ERFLAYERPARAMS +DESCRIPTOR.message_types_by_name['GeluLayerParams'] = _GELULAYERPARAMS +DESCRIPTOR.message_types_by_name['RangeStaticLayerParams'] = _RANGESTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RangeDynamicLayerParams'] = _RANGEDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['SlidingWindowsLayerParams'] = _SLIDINGWINDOWSLAYERPARAMS +DESCRIPTOR.message_types_by_name['LayerNormalizationLayerParams'] = _LAYERNORMALIZATIONLAYERPARAMS +DESCRIPTOR.message_types_by_name['NonMaximumSuppressionLayerParams'] = _NONMAXIMUMSUPPRESSIONLAYERPARAMS +DESCRIPTOR.message_types_by_name['ClampedReLULayerParams'] = _CLAMPEDRELULAYERPARAMS +DESCRIPTOR.message_types_by_name['ArgSortLayerParams'] = _ARGSORTLAYERPARAMS +DESCRIPTOR.message_types_by_name['SliceBySizeLayerParams'] = _SLICEBYSIZELAYERPARAMS +DESCRIPTOR.message_types_by_name['NeuralNetworkClassifier'] = _NEURALNETWORKCLASSIFIER +DESCRIPTOR.message_types_by_name['OneHotLayerParams'] = _ONEHOTLAYERPARAMS +DESCRIPTOR.message_types_by_name['CumSumLayerParams'] = _CUMSUMLAYERPARAMS +DESCRIPTOR.message_types_by_name['NeuralNetworkRegressor'] = _NEURALNETWORKREGRESSOR +DESCRIPTOR.message_types_by_name['NetworkUpdateParameters'] = _NETWORKUPDATEPARAMETERS +DESCRIPTOR.message_types_by_name['LossLayer'] = _LOSSLAYER +DESCRIPTOR.message_types_by_name['CategoricalCrossEntropyLossLayer'] = _CATEGORICALCROSSENTROPYLOSSLAYER +DESCRIPTOR.message_types_by_name['MeanSquaredErrorLossLayer'] = _MEANSQUAREDERRORLOSSLAYER +DESCRIPTOR.message_types_by_name['Optimizer'] = _OPTIMIZER +DESCRIPTOR.message_types_by_name['SGDOptimizer'] = _SGDOPTIMIZER +DESCRIPTOR.message_types_by_name['AdamOptimizer'] = _ADAMOPTIMIZER +DESCRIPTOR.enum_types_by_name['NeuralNetworkMultiArrayShapeMapping'] = _NEURALNETWORKMULTIARRAYSHAPEMAPPING +DESCRIPTOR.enum_types_by_name['NeuralNetworkImageShapeMapping'] = _NEURALNETWORKIMAGESHAPEMAPPING +DESCRIPTOR.enum_types_by_name['ScatterMode'] = _SCATTERMODE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +NeuralNetwork = _reflection.GeneratedProtocolMessageType('NeuralNetwork', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORK, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetwork) + )) +_sym_db.RegisterMessage(NeuralNetwork) + +NeuralNetworkImageScaler = _reflection.GeneratedProtocolMessageType('NeuralNetworkImageScaler', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKIMAGESCALER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkImageScaler) + )) +_sym_db.RegisterMessage(NeuralNetworkImageScaler) + +NeuralNetworkMeanImage = _reflection.GeneratedProtocolMessageType('NeuralNetworkMeanImage', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKMEANIMAGE, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkMeanImage) + )) +_sym_db.RegisterMessage(NeuralNetworkMeanImage) + +NeuralNetworkPreprocessing = _reflection.GeneratedProtocolMessageType('NeuralNetworkPreprocessing', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKPREPROCESSING, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkPreprocessing) + )) +_sym_db.RegisterMessage(NeuralNetworkPreprocessing) + +ActivationReLU = _reflection.GeneratedProtocolMessageType('ActivationReLU', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONRELU, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationReLU) + )) +_sym_db.RegisterMessage(ActivationReLU) + +ActivationLeakyReLU = _reflection.GeneratedProtocolMessageType('ActivationLeakyReLU', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONLEAKYRELU, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationLeakyReLU) + )) +_sym_db.RegisterMessage(ActivationLeakyReLU) + +ActivationTanh = _reflection.GeneratedProtocolMessageType('ActivationTanh', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONTANH, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationTanh) + )) +_sym_db.RegisterMessage(ActivationTanh) + +ActivationScaledTanh = _reflection.GeneratedProtocolMessageType('ActivationScaledTanh', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONSCALEDTANH, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationScaledTanh) + )) +_sym_db.RegisterMessage(ActivationScaledTanh) + +ActivationSigmoid = _reflection.GeneratedProtocolMessageType('ActivationSigmoid', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONSIGMOID, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationSigmoid) + )) +_sym_db.RegisterMessage(ActivationSigmoid) + +ActivationLinear = _reflection.GeneratedProtocolMessageType('ActivationLinear', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONLINEAR, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationLinear) + )) +_sym_db.RegisterMessage(ActivationLinear) + +ActivationSigmoidHard = _reflection.GeneratedProtocolMessageType('ActivationSigmoidHard', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONSIGMOIDHARD, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationSigmoidHard) + )) +_sym_db.RegisterMessage(ActivationSigmoidHard) + +ActivationPReLU = _reflection.GeneratedProtocolMessageType('ActivationPReLU', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONPRELU, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationPReLU) + )) +_sym_db.RegisterMessage(ActivationPReLU) + +ActivationELU = _reflection.GeneratedProtocolMessageType('ActivationELU', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONELU, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationELU) + )) +_sym_db.RegisterMessage(ActivationELU) + +ActivationThresholdedReLU = _reflection.GeneratedProtocolMessageType('ActivationThresholdedReLU', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONTHRESHOLDEDRELU, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationThresholdedReLU) + )) +_sym_db.RegisterMessage(ActivationThresholdedReLU) + +ActivationSoftsign = _reflection.GeneratedProtocolMessageType('ActivationSoftsign', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONSOFTSIGN, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationSoftsign) + )) +_sym_db.RegisterMessage(ActivationSoftsign) + +ActivationSoftplus = _reflection.GeneratedProtocolMessageType('ActivationSoftplus', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONSOFTPLUS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationSoftplus) + )) +_sym_db.RegisterMessage(ActivationSoftplus) + +ActivationParametricSoftplus = _reflection.GeneratedProtocolMessageType('ActivationParametricSoftplus', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONPARAMETRICSOFTPLUS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationParametricSoftplus) + )) +_sym_db.RegisterMessage(ActivationParametricSoftplus) + +ActivationParams = _reflection.GeneratedProtocolMessageType('ActivationParams', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationParams) + )) +_sym_db.RegisterMessage(ActivationParams) + +Tensor = _reflection.GeneratedProtocolMessageType('Tensor', (_message.Message,), dict( + DESCRIPTOR = _TENSOR, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Tensor) + )) +_sym_db.RegisterMessage(Tensor) + +NeuralNetworkLayer = _reflection.GeneratedProtocolMessageType('NeuralNetworkLayer', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKLAYER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkLayer) + )) +_sym_db.RegisterMessage(NeuralNetworkLayer) + +BranchLayerParams = _reflection.GeneratedProtocolMessageType('BranchLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BRANCHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BranchLayerParams) + )) +_sym_db.RegisterMessage(BranchLayerParams) + +LoopLayerParams = _reflection.GeneratedProtocolMessageType('LoopLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOOPLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LoopLayerParams) + )) +_sym_db.RegisterMessage(LoopLayerParams) + +LoopBreakLayerParams = _reflection.GeneratedProtocolMessageType('LoopBreakLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOOPBREAKLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LoopBreakLayerParams) + )) +_sym_db.RegisterMessage(LoopBreakLayerParams) + +LoopContinueLayerParams = _reflection.GeneratedProtocolMessageType('LoopContinueLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOOPCONTINUELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LoopContinueLayerParams) + )) +_sym_db.RegisterMessage(LoopContinueLayerParams) + +CopyLayerParams = _reflection.GeneratedProtocolMessageType('CopyLayerParams', (_message.Message,), dict( + DESCRIPTOR = _COPYLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CopyLayerParams) + )) +_sym_db.RegisterMessage(CopyLayerParams) + +GreaterThanLayerParams = _reflection.GeneratedProtocolMessageType('GreaterThanLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GREATERTHANLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GreaterThanLayerParams) + )) +_sym_db.RegisterMessage(GreaterThanLayerParams) + +GreaterEqualLayerParams = _reflection.GeneratedProtocolMessageType('GreaterEqualLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GREATEREQUALLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GreaterEqualLayerParams) + )) +_sym_db.RegisterMessage(GreaterEqualLayerParams) + +LessThanLayerParams = _reflection.GeneratedProtocolMessageType('LessThanLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LESSTHANLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LessThanLayerParams) + )) +_sym_db.RegisterMessage(LessThanLayerParams) + +LessEqualLayerParams = _reflection.GeneratedProtocolMessageType('LessEqualLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LESSEQUALLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LessEqualLayerParams) + )) +_sym_db.RegisterMessage(LessEqualLayerParams) + +EqualLayerParams = _reflection.GeneratedProtocolMessageType('EqualLayerParams', (_message.Message,), dict( + DESCRIPTOR = _EQUALLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.EqualLayerParams) + )) +_sym_db.RegisterMessage(EqualLayerParams) + +NotEqualLayerParams = _reflection.GeneratedProtocolMessageType('NotEqualLayerParams', (_message.Message,), dict( + DESCRIPTOR = _NOTEQUALLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NotEqualLayerParams) + )) +_sym_db.RegisterMessage(NotEqualLayerParams) + +LogicalAndLayerParams = _reflection.GeneratedProtocolMessageType('LogicalAndLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOGICALANDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LogicalAndLayerParams) + )) +_sym_db.RegisterMessage(LogicalAndLayerParams) + +LogicalOrLayerParams = _reflection.GeneratedProtocolMessageType('LogicalOrLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOGICALORLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LogicalOrLayerParams) + )) +_sym_db.RegisterMessage(LogicalOrLayerParams) + +LogicalXorLayerParams = _reflection.GeneratedProtocolMessageType('LogicalXorLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOGICALXORLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LogicalXorLayerParams) + )) +_sym_db.RegisterMessage(LogicalXorLayerParams) + +LogicalNotLayerParams = _reflection.GeneratedProtocolMessageType('LogicalNotLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOGICALNOTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LogicalNotLayerParams) + )) +_sym_db.RegisterMessage(LogicalNotLayerParams) + +BorderAmounts = _reflection.GeneratedProtocolMessageType('BorderAmounts', (_message.Message,), dict( + + EdgeSizes = _reflection.GeneratedProtocolMessageType('EdgeSizes', (_message.Message,), dict( + DESCRIPTOR = _BORDERAMOUNTS_EDGESIZES, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BorderAmounts.EdgeSizes) + )) + , + DESCRIPTOR = _BORDERAMOUNTS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BorderAmounts) + )) +_sym_db.RegisterMessage(BorderAmounts) +_sym_db.RegisterMessage(BorderAmounts.EdgeSizes) + +ValidPadding = _reflection.GeneratedProtocolMessageType('ValidPadding', (_message.Message,), dict( + DESCRIPTOR = _VALIDPADDING, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ValidPadding) + )) +_sym_db.RegisterMessage(ValidPadding) + +SamePadding = _reflection.GeneratedProtocolMessageType('SamePadding', (_message.Message,), dict( + DESCRIPTOR = _SAMEPADDING, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SamePadding) + )) +_sym_db.RegisterMessage(SamePadding) + +SamplingMode = _reflection.GeneratedProtocolMessageType('SamplingMode', (_message.Message,), dict( + DESCRIPTOR = _SAMPLINGMODE, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SamplingMode) + )) +_sym_db.RegisterMessage(SamplingMode) + +BoxCoordinatesMode = _reflection.GeneratedProtocolMessageType('BoxCoordinatesMode', (_message.Message,), dict( + DESCRIPTOR = _BOXCOORDINATESMODE, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BoxCoordinatesMode) + )) +_sym_db.RegisterMessage(BoxCoordinatesMode) + +WeightParams = _reflection.GeneratedProtocolMessageType('WeightParams', (_message.Message,), dict( + DESCRIPTOR = _WEIGHTPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.WeightParams) + )) +_sym_db.RegisterMessage(WeightParams) + +QuantizationParams = _reflection.GeneratedProtocolMessageType('QuantizationParams', (_message.Message,), dict( + DESCRIPTOR = _QUANTIZATIONPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.QuantizationParams) + )) +_sym_db.RegisterMessage(QuantizationParams) + +LinearQuantizationParams = _reflection.GeneratedProtocolMessageType('LinearQuantizationParams', (_message.Message,), dict( + DESCRIPTOR = _LINEARQUANTIZATIONPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LinearQuantizationParams) + )) +_sym_db.RegisterMessage(LinearQuantizationParams) + +LookUpTableQuantizationParams = _reflection.GeneratedProtocolMessageType('LookUpTableQuantizationParams', (_message.Message,), dict( + DESCRIPTOR = _LOOKUPTABLEQUANTIZATIONPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LookUpTableQuantizationParams) + )) +_sym_db.RegisterMessage(LookUpTableQuantizationParams) + +ConvolutionLayerParams = _reflection.GeneratedProtocolMessageType('ConvolutionLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CONVOLUTIONLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ConvolutionLayerParams) + )) +_sym_db.RegisterMessage(ConvolutionLayerParams) + +Convolution3DLayerParams = _reflection.GeneratedProtocolMessageType('Convolution3DLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CONVOLUTION3DLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Convolution3DLayerParams) + )) +_sym_db.RegisterMessage(Convolution3DLayerParams) + +InnerProductLayerParams = _reflection.GeneratedProtocolMessageType('InnerProductLayerParams', (_message.Message,), dict( + DESCRIPTOR = _INNERPRODUCTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.InnerProductLayerParams) + )) +_sym_db.RegisterMessage(InnerProductLayerParams) + +EmbeddingLayerParams = _reflection.GeneratedProtocolMessageType('EmbeddingLayerParams', (_message.Message,), dict( + DESCRIPTOR = _EMBEDDINGLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.EmbeddingLayerParams) + )) +_sym_db.RegisterMessage(EmbeddingLayerParams) + +EmbeddingNDLayerParams = _reflection.GeneratedProtocolMessageType('EmbeddingNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _EMBEDDINGNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.EmbeddingNDLayerParams) + )) +_sym_db.RegisterMessage(EmbeddingNDLayerParams) + +BatchnormLayerParams = _reflection.GeneratedProtocolMessageType('BatchnormLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BATCHNORMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BatchnormLayerParams) + )) +_sym_db.RegisterMessage(BatchnormLayerParams) + +PoolingLayerParams = _reflection.GeneratedProtocolMessageType('PoolingLayerParams', (_message.Message,), dict( + + ValidCompletePadding = _reflection.GeneratedProtocolMessageType('ValidCompletePadding', (_message.Message,), dict( + DESCRIPTOR = _POOLINGLAYERPARAMS_VALIDCOMPLETEPADDING, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PoolingLayerParams.ValidCompletePadding) + )) + , + DESCRIPTOR = _POOLINGLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PoolingLayerParams) + )) +_sym_db.RegisterMessage(PoolingLayerParams) +_sym_db.RegisterMessage(PoolingLayerParams.ValidCompletePadding) + +Pooling3DLayerParams = _reflection.GeneratedProtocolMessageType('Pooling3DLayerParams', (_message.Message,), dict( + DESCRIPTOR = _POOLING3DLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Pooling3DLayerParams) + )) +_sym_db.RegisterMessage(Pooling3DLayerParams) + +GlobalPooling3DLayerParams = _reflection.GeneratedProtocolMessageType('GlobalPooling3DLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GLOBALPOOLING3DLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GlobalPooling3DLayerParams) + )) +_sym_db.RegisterMessage(GlobalPooling3DLayerParams) + +PaddingLayerParams = _reflection.GeneratedProtocolMessageType('PaddingLayerParams', (_message.Message,), dict( + + PaddingConstant = _reflection.GeneratedProtocolMessageType('PaddingConstant', (_message.Message,), dict( + DESCRIPTOR = _PADDINGLAYERPARAMS_PADDINGCONSTANT, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PaddingLayerParams.PaddingConstant) + )) + , + + PaddingReflection = _reflection.GeneratedProtocolMessageType('PaddingReflection', (_message.Message,), dict( + DESCRIPTOR = _PADDINGLAYERPARAMS_PADDINGREFLECTION, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PaddingLayerParams.PaddingReflection) + )) + , + + PaddingReplication = _reflection.GeneratedProtocolMessageType('PaddingReplication', (_message.Message,), dict( + DESCRIPTOR = _PADDINGLAYERPARAMS_PADDINGREPLICATION, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PaddingLayerParams.PaddingReplication) + )) + , + DESCRIPTOR = _PADDINGLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PaddingLayerParams) + )) +_sym_db.RegisterMessage(PaddingLayerParams) +_sym_db.RegisterMessage(PaddingLayerParams.PaddingConstant) +_sym_db.RegisterMessage(PaddingLayerParams.PaddingReflection) +_sym_db.RegisterMessage(PaddingLayerParams.PaddingReplication) + +ConcatLayerParams = _reflection.GeneratedProtocolMessageType('ConcatLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CONCATLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ConcatLayerParams) + )) +_sym_db.RegisterMessage(ConcatLayerParams) + +LRNLayerParams = _reflection.GeneratedProtocolMessageType('LRNLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LRNLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LRNLayerParams) + )) +_sym_db.RegisterMessage(LRNLayerParams) + +SoftmaxLayerParams = _reflection.GeneratedProtocolMessageType('SoftmaxLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SOFTMAXLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SoftmaxLayerParams) + )) +_sym_db.RegisterMessage(SoftmaxLayerParams) + +SplitLayerParams = _reflection.GeneratedProtocolMessageType('SplitLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SPLITLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SplitLayerParams) + )) +_sym_db.RegisterMessage(SplitLayerParams) + +AddLayerParams = _reflection.GeneratedProtocolMessageType('AddLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ADDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AddLayerParams) + )) +_sym_db.RegisterMessage(AddLayerParams) + +MultiplyLayerParams = _reflection.GeneratedProtocolMessageType('MultiplyLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MULTIPLYLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MultiplyLayerParams) + )) +_sym_db.RegisterMessage(MultiplyLayerParams) + +UnaryFunctionLayerParams = _reflection.GeneratedProtocolMessageType('UnaryFunctionLayerParams', (_message.Message,), dict( + DESCRIPTOR = _UNARYFUNCTIONLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.UnaryFunctionLayerParams) + )) +_sym_db.RegisterMessage(UnaryFunctionLayerParams) + +UpsampleLayerParams = _reflection.GeneratedProtocolMessageType('UpsampleLayerParams', (_message.Message,), dict( + DESCRIPTOR = _UPSAMPLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.UpsampleLayerParams) + )) +_sym_db.RegisterMessage(UpsampleLayerParams) + +ResizeBilinearLayerParams = _reflection.GeneratedProtocolMessageType('ResizeBilinearLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RESIZEBILINEARLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ResizeBilinearLayerParams) + )) +_sym_db.RegisterMessage(ResizeBilinearLayerParams) + +CropResizeLayerParams = _reflection.GeneratedProtocolMessageType('CropResizeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CROPRESIZELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CropResizeLayerParams) + )) +_sym_db.RegisterMessage(CropResizeLayerParams) + +BiasLayerParams = _reflection.GeneratedProtocolMessageType('BiasLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BIASLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BiasLayerParams) + )) +_sym_db.RegisterMessage(BiasLayerParams) + +ScaleLayerParams = _reflection.GeneratedProtocolMessageType('ScaleLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SCALELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ScaleLayerParams) + )) +_sym_db.RegisterMessage(ScaleLayerParams) + +LoadConstantLayerParams = _reflection.GeneratedProtocolMessageType('LoadConstantLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOADCONSTANTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LoadConstantLayerParams) + )) +_sym_db.RegisterMessage(LoadConstantLayerParams) + +L2NormalizeLayerParams = _reflection.GeneratedProtocolMessageType('L2NormalizeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _L2NORMALIZELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.L2NormalizeLayerParams) + )) +_sym_db.RegisterMessage(L2NormalizeLayerParams) + +FlattenLayerParams = _reflection.GeneratedProtocolMessageType('FlattenLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FLATTENLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FlattenLayerParams) + )) +_sym_db.RegisterMessage(FlattenLayerParams) + +ReshapeLayerParams = _reflection.GeneratedProtocolMessageType('ReshapeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RESHAPELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReshapeLayerParams) + )) +_sym_db.RegisterMessage(ReshapeLayerParams) + +PermuteLayerParams = _reflection.GeneratedProtocolMessageType('PermuteLayerParams', (_message.Message,), dict( + DESCRIPTOR = _PERMUTELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PermuteLayerParams) + )) +_sym_db.RegisterMessage(PermuteLayerParams) + +ReorganizeDataLayerParams = _reflection.GeneratedProtocolMessageType('ReorganizeDataLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REORGANIZEDATALAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReorganizeDataLayerParams) + )) +_sym_db.RegisterMessage(ReorganizeDataLayerParams) + +SliceLayerParams = _reflection.GeneratedProtocolMessageType('SliceLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SLICELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SliceLayerParams) + )) +_sym_db.RegisterMessage(SliceLayerParams) + +ReduceLayerParams = _reflection.GeneratedProtocolMessageType('ReduceLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceLayerParams) + )) +_sym_db.RegisterMessage(ReduceLayerParams) + +CropLayerParams = _reflection.GeneratedProtocolMessageType('CropLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CROPLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CropLayerParams) + )) +_sym_db.RegisterMessage(CropLayerParams) + +AverageLayerParams = _reflection.GeneratedProtocolMessageType('AverageLayerParams', (_message.Message,), dict( + DESCRIPTOR = _AVERAGELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AverageLayerParams) + )) +_sym_db.RegisterMessage(AverageLayerParams) + +MaxLayerParams = _reflection.GeneratedProtocolMessageType('MaxLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MAXLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MaxLayerParams) + )) +_sym_db.RegisterMessage(MaxLayerParams) + +MinLayerParams = _reflection.GeneratedProtocolMessageType('MinLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MINLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MinLayerParams) + )) +_sym_db.RegisterMessage(MinLayerParams) + +DotProductLayerParams = _reflection.GeneratedProtocolMessageType('DotProductLayerParams', (_message.Message,), dict( + DESCRIPTOR = _DOTPRODUCTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DotProductLayerParams) + )) +_sym_db.RegisterMessage(DotProductLayerParams) + +MeanVarianceNormalizeLayerParams = _reflection.GeneratedProtocolMessageType('MeanVarianceNormalizeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MEANVARIANCENORMALIZELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MeanVarianceNormalizeLayerParams) + )) +_sym_db.RegisterMessage(MeanVarianceNormalizeLayerParams) + +SequenceRepeatLayerParams = _reflection.GeneratedProtocolMessageType('SequenceRepeatLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SEQUENCEREPEATLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SequenceRepeatLayerParams) + )) +_sym_db.RegisterMessage(SequenceRepeatLayerParams) + +SimpleRecurrentLayerParams = _reflection.GeneratedProtocolMessageType('SimpleRecurrentLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SIMPLERECURRENTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SimpleRecurrentLayerParams) + )) +_sym_db.RegisterMessage(SimpleRecurrentLayerParams) + +GRULayerParams = _reflection.GeneratedProtocolMessageType('GRULayerParams', (_message.Message,), dict( + DESCRIPTOR = _GRULAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GRULayerParams) + )) +_sym_db.RegisterMessage(GRULayerParams) + +LSTMParams = _reflection.GeneratedProtocolMessageType('LSTMParams', (_message.Message,), dict( + DESCRIPTOR = _LSTMPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LSTMParams) + )) +_sym_db.RegisterMessage(LSTMParams) + +LSTMWeightParams = _reflection.GeneratedProtocolMessageType('LSTMWeightParams', (_message.Message,), dict( + DESCRIPTOR = _LSTMWEIGHTPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LSTMWeightParams) + )) +_sym_db.RegisterMessage(LSTMWeightParams) + +UniDirectionalLSTMLayerParams = _reflection.GeneratedProtocolMessageType('UniDirectionalLSTMLayerParams', (_message.Message,), dict( + DESCRIPTOR = _UNIDIRECTIONALLSTMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.UniDirectionalLSTMLayerParams) + )) +_sym_db.RegisterMessage(UniDirectionalLSTMLayerParams) + +BiDirectionalLSTMLayerParams = _reflection.GeneratedProtocolMessageType('BiDirectionalLSTMLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BIDIRECTIONALLSTMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BiDirectionalLSTMLayerParams) + )) +_sym_db.RegisterMessage(BiDirectionalLSTMLayerParams) + +CustomLayerParams = _reflection.GeneratedProtocolMessageType('CustomLayerParams', (_message.Message,), dict( + + CustomLayerParamValue = _reflection.GeneratedProtocolMessageType('CustomLayerParamValue', (_message.Message,), dict( + DESCRIPTOR = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomLayerParams.CustomLayerParamValue) + )) + , + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _CUSTOMLAYERPARAMS_PARAMETERSENTRY, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomLayerParams.ParametersEntry) + )) + , + DESCRIPTOR = _CUSTOMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomLayerParams) + )) +_sym_db.RegisterMessage(CustomLayerParams) +_sym_db.RegisterMessage(CustomLayerParams.CustomLayerParamValue) +_sym_db.RegisterMessage(CustomLayerParams.ParametersEntry) + +TransposeLayerParams = _reflection.GeneratedProtocolMessageType('TransposeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _TRANSPOSELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TransposeLayerParams) + )) +_sym_db.RegisterMessage(TransposeLayerParams) + +BatchedMatMulLayerParams = _reflection.GeneratedProtocolMessageType('BatchedMatMulLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BATCHEDMATMULLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BatchedMatMulLayerParams) + )) +_sym_db.RegisterMessage(BatchedMatMulLayerParams) + +ConcatNDLayerParams = _reflection.GeneratedProtocolMessageType('ConcatNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CONCATNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ConcatNDLayerParams) + )) +_sym_db.RegisterMessage(ConcatNDLayerParams) + +SoftmaxNDLayerParams = _reflection.GeneratedProtocolMessageType('SoftmaxNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SOFTMAXNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SoftmaxNDLayerParams) + )) +_sym_db.RegisterMessage(SoftmaxNDLayerParams) + +ReverseLayerParams = _reflection.GeneratedProtocolMessageType('ReverseLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REVERSELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReverseLayerParams) + )) +_sym_db.RegisterMessage(ReverseLayerParams) + +ReverseSeqLayerParams = _reflection.GeneratedProtocolMessageType('ReverseSeqLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REVERSESEQLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReverseSeqLayerParams) + )) +_sym_db.RegisterMessage(ReverseSeqLayerParams) + +LoadConstantNDLayerParams = _reflection.GeneratedProtocolMessageType('LoadConstantNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOADCONSTANTNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LoadConstantNDLayerParams) + )) +_sym_db.RegisterMessage(LoadConstantNDLayerParams) + +FillLikeLayerParams = _reflection.GeneratedProtocolMessageType('FillLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FILLLIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FillLikeLayerParams) + )) +_sym_db.RegisterMessage(FillLikeLayerParams) + +FillStaticLayerParams = _reflection.GeneratedProtocolMessageType('FillStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FILLSTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FillStaticLayerParams) + )) +_sym_db.RegisterMessage(FillStaticLayerParams) + +FillDynamicLayerParams = _reflection.GeneratedProtocolMessageType('FillDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FILLDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FillDynamicLayerParams) + )) +_sym_db.RegisterMessage(FillDynamicLayerParams) + +WhereBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('WhereBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _WHEREBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.WhereBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(WhereBroadcastableLayerParams) + +SinLayerParams = _reflection.GeneratedProtocolMessageType('SinLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SINLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SinLayerParams) + )) +_sym_db.RegisterMessage(SinLayerParams) + +CosLayerParams = _reflection.GeneratedProtocolMessageType('CosLayerParams', (_message.Message,), dict( + DESCRIPTOR = _COSLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CosLayerParams) + )) +_sym_db.RegisterMessage(CosLayerParams) + +TanLayerParams = _reflection.GeneratedProtocolMessageType('TanLayerParams', (_message.Message,), dict( + DESCRIPTOR = _TANLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TanLayerParams) + )) +_sym_db.RegisterMessage(TanLayerParams) + +AsinLayerParams = _reflection.GeneratedProtocolMessageType('AsinLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ASINLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AsinLayerParams) + )) +_sym_db.RegisterMessage(AsinLayerParams) + +AcosLayerParams = _reflection.GeneratedProtocolMessageType('AcosLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ACOSLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AcosLayerParams) + )) +_sym_db.RegisterMessage(AcosLayerParams) + +AtanLayerParams = _reflection.GeneratedProtocolMessageType('AtanLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ATANLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AtanLayerParams) + )) +_sym_db.RegisterMessage(AtanLayerParams) + +SinhLayerParams = _reflection.GeneratedProtocolMessageType('SinhLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SINHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SinhLayerParams) + )) +_sym_db.RegisterMessage(SinhLayerParams) + +CoshLayerParams = _reflection.GeneratedProtocolMessageType('CoshLayerParams', (_message.Message,), dict( + DESCRIPTOR = _COSHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoshLayerParams) + )) +_sym_db.RegisterMessage(CoshLayerParams) + +TanhLayerParams = _reflection.GeneratedProtocolMessageType('TanhLayerParams', (_message.Message,), dict( + DESCRIPTOR = _TANHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TanhLayerParams) + )) +_sym_db.RegisterMessage(TanhLayerParams) + +AsinhLayerParams = _reflection.GeneratedProtocolMessageType('AsinhLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ASINHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AsinhLayerParams) + )) +_sym_db.RegisterMessage(AsinhLayerParams) + +AcoshLayerParams = _reflection.GeneratedProtocolMessageType('AcoshLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ACOSHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AcoshLayerParams) + )) +_sym_db.RegisterMessage(AcoshLayerParams) + +AtanhLayerParams = _reflection.GeneratedProtocolMessageType('AtanhLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ATANHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AtanhLayerParams) + )) +_sym_db.RegisterMessage(AtanhLayerParams) + +PowBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('PowBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _POWBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PowBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(PowBroadcastableLayerParams) + +Exp2LayerParams = _reflection.GeneratedProtocolMessageType('Exp2LayerParams', (_message.Message,), dict( + DESCRIPTOR = _EXP2LAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Exp2LayerParams) + )) +_sym_db.RegisterMessage(Exp2LayerParams) + +WhereNonZeroLayerParams = _reflection.GeneratedProtocolMessageType('WhereNonZeroLayerParams', (_message.Message,), dict( + DESCRIPTOR = _WHERENONZEROLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.WhereNonZeroLayerParams) + )) +_sym_db.RegisterMessage(WhereNonZeroLayerParams) + +MatrixBandPartLayerParams = _reflection.GeneratedProtocolMessageType('MatrixBandPartLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MATRIXBANDPARTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MatrixBandPartLayerParams) + )) +_sym_db.RegisterMessage(MatrixBandPartLayerParams) + +UpperTriangularLayerParams = _reflection.GeneratedProtocolMessageType('UpperTriangularLayerParams', (_message.Message,), dict( + DESCRIPTOR = _UPPERTRIANGULARLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.UpperTriangularLayerParams) + )) +_sym_db.RegisterMessage(UpperTriangularLayerParams) + +LowerTriangularLayerParams = _reflection.GeneratedProtocolMessageType('LowerTriangularLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOWERTRIANGULARLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LowerTriangularLayerParams) + )) +_sym_db.RegisterMessage(LowerTriangularLayerParams) + +BroadcastToLikeLayerParams = _reflection.GeneratedProtocolMessageType('BroadcastToLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BROADCASTTOLIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BroadcastToLikeLayerParams) + )) +_sym_db.RegisterMessage(BroadcastToLikeLayerParams) + +BroadcastToStaticLayerParams = _reflection.GeneratedProtocolMessageType('BroadcastToStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BROADCASTTOSTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BroadcastToStaticLayerParams) + )) +_sym_db.RegisterMessage(BroadcastToStaticLayerParams) + +BroadcastToDynamicLayerParams = _reflection.GeneratedProtocolMessageType('BroadcastToDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BROADCASTTODYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BroadcastToDynamicLayerParams) + )) +_sym_db.RegisterMessage(BroadcastToDynamicLayerParams) + +AddBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('AddBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ADDBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AddBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(AddBroadcastableLayerParams) + +MaxBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('MaxBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MAXBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MaxBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(MaxBroadcastableLayerParams) + +MinBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('MinBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MINBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MinBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(MinBroadcastableLayerParams) + +ModBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('ModBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MODBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ModBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(ModBroadcastableLayerParams) + +FloorDivBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('FloorDivBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FLOORDIVBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FloorDivBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(FloorDivBroadcastableLayerParams) + +SubtractBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('SubtractBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SUBTRACTBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SubtractBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(SubtractBroadcastableLayerParams) + +MultiplyBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('MultiplyBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MULTIPLYBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MultiplyBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(MultiplyBroadcastableLayerParams) + +DivideBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('DivideBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _DIVIDEBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DivideBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(DivideBroadcastableLayerParams) + +GatherLayerParams = _reflection.GeneratedProtocolMessageType('GatherLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GATHERLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GatherLayerParams) + )) +_sym_db.RegisterMessage(GatherLayerParams) + +ScatterLayerParams = _reflection.GeneratedProtocolMessageType('ScatterLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SCATTERLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ScatterLayerParams) + )) +_sym_db.RegisterMessage(ScatterLayerParams) + +GatherNDLayerParams = _reflection.GeneratedProtocolMessageType('GatherNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GATHERNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GatherNDLayerParams) + )) +_sym_db.RegisterMessage(GatherNDLayerParams) + +ScatterNDLayerParams = _reflection.GeneratedProtocolMessageType('ScatterNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SCATTERNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ScatterNDLayerParams) + )) +_sym_db.RegisterMessage(ScatterNDLayerParams) + +GatherAlongAxisLayerParams = _reflection.GeneratedProtocolMessageType('GatherAlongAxisLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GATHERALONGAXISLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GatherAlongAxisLayerParams) + )) +_sym_db.RegisterMessage(GatherAlongAxisLayerParams) + +ScatterAlongAxisLayerParams = _reflection.GeneratedProtocolMessageType('ScatterAlongAxisLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SCATTERALONGAXISLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ScatterAlongAxisLayerParams) + )) +_sym_db.RegisterMessage(ScatterAlongAxisLayerParams) + +StackLayerParams = _reflection.GeneratedProtocolMessageType('StackLayerParams', (_message.Message,), dict( + DESCRIPTOR = _STACKLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StackLayerParams) + )) +_sym_db.RegisterMessage(StackLayerParams) + +RankPreservingReshapeLayerParams = _reflection.GeneratedProtocolMessageType('RankPreservingReshapeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANKPRESERVINGRESHAPELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RankPreservingReshapeLayerParams) + )) +_sym_db.RegisterMessage(RankPreservingReshapeLayerParams) + +ConstantPaddingLayerParams = _reflection.GeneratedProtocolMessageType('ConstantPaddingLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CONSTANTPADDINGLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ConstantPaddingLayerParams) + )) +_sym_db.RegisterMessage(ConstantPaddingLayerParams) + +RandomNormalLikeLayerParams = _reflection.GeneratedProtocolMessageType('RandomNormalLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMNORMALLIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomNormalLikeLayerParams) + )) +_sym_db.RegisterMessage(RandomNormalLikeLayerParams) + +RandomNormalStaticLayerParams = _reflection.GeneratedProtocolMessageType('RandomNormalStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMNORMALSTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomNormalStaticLayerParams) + )) +_sym_db.RegisterMessage(RandomNormalStaticLayerParams) + +RandomNormalDynamicLayerParams = _reflection.GeneratedProtocolMessageType('RandomNormalDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMNORMALDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomNormalDynamicLayerParams) + )) +_sym_db.RegisterMessage(RandomNormalDynamicLayerParams) + +RandomUniformLikeLayerParams = _reflection.GeneratedProtocolMessageType('RandomUniformLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMUNIFORMLIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomUniformLikeLayerParams) + )) +_sym_db.RegisterMessage(RandomUniformLikeLayerParams) + +RandomUniformStaticLayerParams = _reflection.GeneratedProtocolMessageType('RandomUniformStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMUNIFORMSTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomUniformStaticLayerParams) + )) +_sym_db.RegisterMessage(RandomUniformStaticLayerParams) + +RandomUniformDynamicLayerParams = _reflection.GeneratedProtocolMessageType('RandomUniformDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMUNIFORMDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomUniformDynamicLayerParams) + )) +_sym_db.RegisterMessage(RandomUniformDynamicLayerParams) + +RandomBernoulliLikeLayerParams = _reflection.GeneratedProtocolMessageType('RandomBernoulliLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMBERNOULLILIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomBernoulliLikeLayerParams) + )) +_sym_db.RegisterMessage(RandomBernoulliLikeLayerParams) + +RandomBernoulliStaticLayerParams = _reflection.GeneratedProtocolMessageType('RandomBernoulliStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMBERNOULLISTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomBernoulliStaticLayerParams) + )) +_sym_db.RegisterMessage(RandomBernoulliStaticLayerParams) + +RandomBernoulliDynamicLayerParams = _reflection.GeneratedProtocolMessageType('RandomBernoulliDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMBERNOULLIDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomBernoulliDynamicLayerParams) + )) +_sym_db.RegisterMessage(RandomBernoulliDynamicLayerParams) + +CategoricalDistributionLayerParams = _reflection.GeneratedProtocolMessageType('CategoricalDistributionLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CATEGORICALDISTRIBUTIONLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CategoricalDistributionLayerParams) + )) +_sym_db.RegisterMessage(CategoricalDistributionLayerParams) + +ReduceL1LayerParams = _reflection.GeneratedProtocolMessageType('ReduceL1LayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEL1LAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceL1LayerParams) + )) +_sym_db.RegisterMessage(ReduceL1LayerParams) + +ReduceL2LayerParams = _reflection.GeneratedProtocolMessageType('ReduceL2LayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEL2LAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceL2LayerParams) + )) +_sym_db.RegisterMessage(ReduceL2LayerParams) + +ReduceMaxLayerParams = _reflection.GeneratedProtocolMessageType('ReduceMaxLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEMAXLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceMaxLayerParams) + )) +_sym_db.RegisterMessage(ReduceMaxLayerParams) + +ReduceMinLayerParams = _reflection.GeneratedProtocolMessageType('ReduceMinLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEMINLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceMinLayerParams) + )) +_sym_db.RegisterMessage(ReduceMinLayerParams) + +ReduceSumLayerParams = _reflection.GeneratedProtocolMessageType('ReduceSumLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCESUMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceSumLayerParams) + )) +_sym_db.RegisterMessage(ReduceSumLayerParams) + +ReduceProdLayerParams = _reflection.GeneratedProtocolMessageType('ReduceProdLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEPRODLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceProdLayerParams) + )) +_sym_db.RegisterMessage(ReduceProdLayerParams) + +ReduceMeanLayerParams = _reflection.GeneratedProtocolMessageType('ReduceMeanLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEMEANLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceMeanLayerParams) + )) +_sym_db.RegisterMessage(ReduceMeanLayerParams) + +ReduceLogSumLayerParams = _reflection.GeneratedProtocolMessageType('ReduceLogSumLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCELOGSUMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceLogSumLayerParams) + )) +_sym_db.RegisterMessage(ReduceLogSumLayerParams) + +ReduceSumSquareLayerParams = _reflection.GeneratedProtocolMessageType('ReduceSumSquareLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCESUMSQUARELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceSumSquareLayerParams) + )) +_sym_db.RegisterMessage(ReduceSumSquareLayerParams) + +ReduceLogSumExpLayerParams = _reflection.GeneratedProtocolMessageType('ReduceLogSumExpLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCELOGSUMEXPLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceLogSumExpLayerParams) + )) +_sym_db.RegisterMessage(ReduceLogSumExpLayerParams) + +ExpandDimsLayerParams = _reflection.GeneratedProtocolMessageType('ExpandDimsLayerParams', (_message.Message,), dict( + DESCRIPTOR = _EXPANDDIMSLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ExpandDimsLayerParams) + )) +_sym_db.RegisterMessage(ExpandDimsLayerParams) + +FlattenTo2DLayerParams = _reflection.GeneratedProtocolMessageType('FlattenTo2DLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FLATTENTO2DLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FlattenTo2DLayerParams) + )) +_sym_db.RegisterMessage(FlattenTo2DLayerParams) + +ReshapeStaticLayerParams = _reflection.GeneratedProtocolMessageType('ReshapeStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RESHAPESTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReshapeStaticLayerParams) + )) +_sym_db.RegisterMessage(ReshapeStaticLayerParams) + +ReshapeLikeLayerParams = _reflection.GeneratedProtocolMessageType('ReshapeLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RESHAPELIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReshapeLikeLayerParams) + )) +_sym_db.RegisterMessage(ReshapeLikeLayerParams) + +ReshapeDynamicLayerParams = _reflection.GeneratedProtocolMessageType('ReshapeDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RESHAPEDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReshapeDynamicLayerParams) + )) +_sym_db.RegisterMessage(ReshapeDynamicLayerParams) + +SqueezeLayerParams = _reflection.GeneratedProtocolMessageType('SqueezeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SQUEEZELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SqueezeLayerParams) + )) +_sym_db.RegisterMessage(SqueezeLayerParams) + +TopKLayerParams = _reflection.GeneratedProtocolMessageType('TopKLayerParams', (_message.Message,), dict( + DESCRIPTOR = _TOPKLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TopKLayerParams) + )) +_sym_db.RegisterMessage(TopKLayerParams) + +ArgMaxLayerParams = _reflection.GeneratedProtocolMessageType('ArgMaxLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ARGMAXLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArgMaxLayerParams) + )) +_sym_db.RegisterMessage(ArgMaxLayerParams) + +ArgMinLayerParams = _reflection.GeneratedProtocolMessageType('ArgMinLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ARGMINLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArgMinLayerParams) + )) +_sym_db.RegisterMessage(ArgMinLayerParams) + +SplitNDLayerParams = _reflection.GeneratedProtocolMessageType('SplitNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SPLITNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SplitNDLayerParams) + )) +_sym_db.RegisterMessage(SplitNDLayerParams) + +CeilLayerParams = _reflection.GeneratedProtocolMessageType('CeilLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CEILLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CeilLayerParams) + )) +_sym_db.RegisterMessage(CeilLayerParams) + +RoundLayerParams = _reflection.GeneratedProtocolMessageType('RoundLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ROUNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RoundLayerParams) + )) +_sym_db.RegisterMessage(RoundLayerParams) + +FloorLayerParams = _reflection.GeneratedProtocolMessageType('FloorLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FLOORLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FloorLayerParams) + )) +_sym_db.RegisterMessage(FloorLayerParams) + +SignLayerParams = _reflection.GeneratedProtocolMessageType('SignLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SIGNLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SignLayerParams) + )) +_sym_db.RegisterMessage(SignLayerParams) + +ClipLayerParams = _reflection.GeneratedProtocolMessageType('ClipLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CLIPLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ClipLayerParams) + )) +_sym_db.RegisterMessage(ClipLayerParams) + +SliceStaticLayerParams = _reflection.GeneratedProtocolMessageType('SliceStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SLICESTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SliceStaticLayerParams) + )) +_sym_db.RegisterMessage(SliceStaticLayerParams) + +SliceDynamicLayerParams = _reflection.GeneratedProtocolMessageType('SliceDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SLICEDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SliceDynamicLayerParams) + )) +_sym_db.RegisterMessage(SliceDynamicLayerParams) + +TileLayerParams = _reflection.GeneratedProtocolMessageType('TileLayerParams', (_message.Message,), dict( + DESCRIPTOR = _TILELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TileLayerParams) + )) +_sym_db.RegisterMessage(TileLayerParams) + +GetShapeLayerParams = _reflection.GeneratedProtocolMessageType('GetShapeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GETSHAPELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GetShapeLayerParams) + )) +_sym_db.RegisterMessage(GetShapeLayerParams) + +ErfLayerParams = _reflection.GeneratedProtocolMessageType('ErfLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ERFLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ErfLayerParams) + )) +_sym_db.RegisterMessage(ErfLayerParams) + +GeluLayerParams = _reflection.GeneratedProtocolMessageType('GeluLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GELULAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GeluLayerParams) + )) +_sym_db.RegisterMessage(GeluLayerParams) + +RangeStaticLayerParams = _reflection.GeneratedProtocolMessageType('RangeStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANGESTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RangeStaticLayerParams) + )) +_sym_db.RegisterMessage(RangeStaticLayerParams) + +RangeDynamicLayerParams = _reflection.GeneratedProtocolMessageType('RangeDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANGEDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RangeDynamicLayerParams) + )) +_sym_db.RegisterMessage(RangeDynamicLayerParams) + +SlidingWindowsLayerParams = _reflection.GeneratedProtocolMessageType('SlidingWindowsLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SLIDINGWINDOWSLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SlidingWindowsLayerParams) + )) +_sym_db.RegisterMessage(SlidingWindowsLayerParams) + +LayerNormalizationLayerParams = _reflection.GeneratedProtocolMessageType('LayerNormalizationLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LAYERNORMALIZATIONLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LayerNormalizationLayerParams) + )) +_sym_db.RegisterMessage(LayerNormalizationLayerParams) + +NonMaximumSuppressionLayerParams = _reflection.GeneratedProtocolMessageType('NonMaximumSuppressionLayerParams', (_message.Message,), dict( + DESCRIPTOR = _NONMAXIMUMSUPPRESSIONLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NonMaximumSuppressionLayerParams) + )) +_sym_db.RegisterMessage(NonMaximumSuppressionLayerParams) + +ClampedReLULayerParams = _reflection.GeneratedProtocolMessageType('ClampedReLULayerParams', (_message.Message,), dict( + DESCRIPTOR = _CLAMPEDRELULAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ClampedReLULayerParams) + )) +_sym_db.RegisterMessage(ClampedReLULayerParams) + +ArgSortLayerParams = _reflection.GeneratedProtocolMessageType('ArgSortLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ARGSORTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArgSortLayerParams) + )) +_sym_db.RegisterMessage(ArgSortLayerParams) + +SliceBySizeLayerParams = _reflection.GeneratedProtocolMessageType('SliceBySizeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SLICEBYSIZELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SliceBySizeLayerParams) + )) +_sym_db.RegisterMessage(SliceBySizeLayerParams) + +NeuralNetworkClassifier = _reflection.GeneratedProtocolMessageType('NeuralNetworkClassifier', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKCLASSIFIER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkClassifier) + )) +_sym_db.RegisterMessage(NeuralNetworkClassifier) + +OneHotLayerParams = _reflection.GeneratedProtocolMessageType('OneHotLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ONEHOTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.OneHotLayerParams) + )) +_sym_db.RegisterMessage(OneHotLayerParams) + +CumSumLayerParams = _reflection.GeneratedProtocolMessageType('CumSumLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CUMSUMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CumSumLayerParams) + )) +_sym_db.RegisterMessage(CumSumLayerParams) + +NeuralNetworkRegressor = _reflection.GeneratedProtocolMessageType('NeuralNetworkRegressor', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKREGRESSOR, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkRegressor) + )) +_sym_db.RegisterMessage(NeuralNetworkRegressor) + +NetworkUpdateParameters = _reflection.GeneratedProtocolMessageType('NetworkUpdateParameters', (_message.Message,), dict( + DESCRIPTOR = _NETWORKUPDATEPARAMETERS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NetworkUpdateParameters) + )) +_sym_db.RegisterMessage(NetworkUpdateParameters) + +LossLayer = _reflection.GeneratedProtocolMessageType('LossLayer', (_message.Message,), dict( + DESCRIPTOR = _LOSSLAYER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LossLayer) + )) +_sym_db.RegisterMessage(LossLayer) + +CategoricalCrossEntropyLossLayer = _reflection.GeneratedProtocolMessageType('CategoricalCrossEntropyLossLayer', (_message.Message,), dict( + DESCRIPTOR = _CATEGORICALCROSSENTROPYLOSSLAYER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CategoricalCrossEntropyLossLayer) + )) +_sym_db.RegisterMessage(CategoricalCrossEntropyLossLayer) + +MeanSquaredErrorLossLayer = _reflection.GeneratedProtocolMessageType('MeanSquaredErrorLossLayer', (_message.Message,), dict( + DESCRIPTOR = _MEANSQUAREDERRORLOSSLAYER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MeanSquaredErrorLossLayer) + )) +_sym_db.RegisterMessage(MeanSquaredErrorLossLayer) + +Optimizer = _reflection.GeneratedProtocolMessageType('Optimizer', (_message.Message,), dict( + DESCRIPTOR = _OPTIMIZER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Optimizer) + )) +_sym_db.RegisterMessage(Optimizer) + +SGDOptimizer = _reflection.GeneratedProtocolMessageType('SGDOptimizer', (_message.Message,), dict( + DESCRIPTOR = _SGDOPTIMIZER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SGDOptimizer) + )) +_sym_db.RegisterMessage(SGDOptimizer) + +AdamOptimizer = _reflection.GeneratedProtocolMessageType('AdamOptimizer', (_message.Message,), dict( + DESCRIPTOR = _ADAMOPTIMIZER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AdamOptimizer) + )) +_sym_db.RegisterMessage(AdamOptimizer) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +_CUSTOMLAYERPARAMS_PARAMETERSENTRY.has_options = True +_CUSTOMLAYERPARAMS_PARAMETERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/NonMaximumSuppression_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/NonMaximumSuppression_pb2.py new file mode 100644 index 00000000..285ac82d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/NonMaximumSuppression_pb2.py @@ -0,0 +1,206 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: NonMaximumSuppression.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='NonMaximumSuppression.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x1bNonMaximumSuppression.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xc0\x04\n\x15NonMaximumSuppression\x12\x46\n\x07pickTop\x18\x01 \x01(\x0b\x32\x33.CoreML.Specification.NonMaximumSuppression.PickTopH\x00\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x01\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x01\x12\x14\n\x0ciouThreshold\x18n \x01(\x01\x12\x1b\n\x13\x63onfidenceThreshold\x18o \x01(\x01\x12#\n\x1a\x63onfidenceInputFeatureName\x18\xc8\x01 \x01(\t\x12$\n\x1b\x63oordinatesInputFeatureName\x18\xc9\x01 \x01(\t\x12%\n\x1ciouThresholdInputFeatureName\x18\xca\x01 \x01(\t\x12,\n#confidenceThresholdInputFeatureName\x18\xcb\x01 \x01(\t\x12$\n\x1b\x63onfidenceOutputFeatureName\x18\xd2\x01 \x01(\t\x12%\n\x1c\x63oordinatesOutputFeatureName\x18\xd3\x01 \x01(\t\x1a\x1b\n\x07PickTop\x12\x10\n\x08perClass\x18\x01 \x01(\x08\x42\x13\n\x11SuppressionMethodB\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_NONMAXIMUMSUPPRESSION_PICKTOP = _descriptor.Descriptor( + name='PickTop', + full_name='CoreML.Specification.NonMaximumSuppression.PickTop', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='perClass', full_name='CoreML.Specification.NonMaximumSuppression.PickTop.perClass', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=589, + serialized_end=616, +) + +_NONMAXIMUMSUPPRESSION = _descriptor.Descriptor( + name='NonMaximumSuppression', + full_name='CoreML.Specification.NonMaximumSuppression', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='pickTop', full_name='CoreML.Specification.NonMaximumSuppression.pickTop', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.NonMaximumSuppression.stringClassLabels', index=1, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.NonMaximumSuppression.int64ClassLabels', index=2, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='iouThreshold', full_name='CoreML.Specification.NonMaximumSuppression.iouThreshold', index=3, + number=110, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='confidenceThreshold', full_name='CoreML.Specification.NonMaximumSuppression.confidenceThreshold', index=4, + number=111, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='confidenceInputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.confidenceInputFeatureName', index=5, + number=200, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coordinatesInputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.coordinatesInputFeatureName', index=6, + number=201, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='iouThresholdInputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.iouThresholdInputFeatureName', index=7, + number=202, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='confidenceThresholdInputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.confidenceThresholdInputFeatureName', index=8, + number=203, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='confidenceOutputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.confidenceOutputFeatureName', index=9, + number=210, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coordinatesOutputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.coordinatesOutputFeatureName', index=10, + number=211, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_NONMAXIMUMSUPPRESSION_PICKTOP, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='SuppressionMethod', full_name='CoreML.Specification.NonMaximumSuppression.SuppressionMethod', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.NonMaximumSuppression.ClassLabels', + index=1, containing_type=None, fields=[]), + ], + serialized_start=76, + serialized_end=652, +) + +_NONMAXIMUMSUPPRESSION_PICKTOP.containing_type = _NONMAXIMUMSUPPRESSION +_NONMAXIMUMSUPPRESSION.fields_by_name['pickTop'].message_type = _NONMAXIMUMSUPPRESSION_PICKTOP +_NONMAXIMUMSUPPRESSION.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_NONMAXIMUMSUPPRESSION.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_NONMAXIMUMSUPPRESSION.oneofs_by_name['SuppressionMethod'].fields.append( + _NONMAXIMUMSUPPRESSION.fields_by_name['pickTop']) +_NONMAXIMUMSUPPRESSION.fields_by_name['pickTop'].containing_oneof = _NONMAXIMUMSUPPRESSION.oneofs_by_name['SuppressionMethod'] +_NONMAXIMUMSUPPRESSION.oneofs_by_name['ClassLabels'].fields.append( + _NONMAXIMUMSUPPRESSION.fields_by_name['stringClassLabels']) +_NONMAXIMUMSUPPRESSION.fields_by_name['stringClassLabels'].containing_oneof = _NONMAXIMUMSUPPRESSION.oneofs_by_name['ClassLabels'] +_NONMAXIMUMSUPPRESSION.oneofs_by_name['ClassLabels'].fields.append( + _NONMAXIMUMSUPPRESSION.fields_by_name['int64ClassLabels']) +_NONMAXIMUMSUPPRESSION.fields_by_name['int64ClassLabels'].containing_oneof = _NONMAXIMUMSUPPRESSION.oneofs_by_name['ClassLabels'] +DESCRIPTOR.message_types_by_name['NonMaximumSuppression'] = _NONMAXIMUMSUPPRESSION +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +NonMaximumSuppression = _reflection.GeneratedProtocolMessageType('NonMaximumSuppression', (_message.Message,), dict( + + PickTop = _reflection.GeneratedProtocolMessageType('PickTop', (_message.Message,), dict( + DESCRIPTOR = _NONMAXIMUMSUPPRESSION_PICKTOP, + __module__ = 'NonMaximumSuppression_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NonMaximumSuppression.PickTop) + )) + , + DESCRIPTOR = _NONMAXIMUMSUPPRESSION, + __module__ = 'NonMaximumSuppression_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NonMaximumSuppression) + )) +_sym_db.RegisterMessage(NonMaximumSuppression) +_sym_db.RegisterMessage(NonMaximumSuppression.PickTop) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Normalizer_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Normalizer_pb2.py new file mode 100644 index 00000000..0cf2a4c5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Normalizer_pb2.py @@ -0,0 +1,100 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Normalizer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Normalizer.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x10Normalizer.proto\x12\x14\x43oreML.Specification\"o\n\nNormalizer\x12;\n\x08normType\x18\x01 \x01(\x0e\x32).CoreML.Specification.Normalizer.NormType\"$\n\x08NormType\x12\x08\n\x04LMax\x10\x00\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\x42\x02H\x03\x62\x06proto3') +) + + + +_NORMALIZER_NORMTYPE = _descriptor.EnumDescriptor( + name='NormType', + full_name='CoreML.Specification.Normalizer.NormType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='LMax', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='L1', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='L2', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=117, + serialized_end=153, +) +_sym_db.RegisterEnumDescriptor(_NORMALIZER_NORMTYPE) + + +_NORMALIZER = _descriptor.Descriptor( + name='Normalizer', + full_name='CoreML.Specification.Normalizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='normType', full_name='CoreML.Specification.Normalizer.normType', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _NORMALIZER_NORMTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=42, + serialized_end=153, +) + +_NORMALIZER.fields_by_name['normType'].enum_type = _NORMALIZER_NORMTYPE +_NORMALIZER_NORMTYPE.containing_type = _NORMALIZER +DESCRIPTOR.message_types_by_name['Normalizer'] = _NORMALIZER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Normalizer = _reflection.GeneratedProtocolMessageType('Normalizer', (_message.Message,), dict( + DESCRIPTOR = _NORMALIZER, + __module__ = 'Normalizer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Normalizer) + )) +_sym_db.RegisterMessage(Normalizer) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/OneHotEncoder_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/OneHotEncoder_pb2.py new file mode 100644 index 00000000..6219f13b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/OneHotEncoder_pb2.py @@ -0,0 +1,136 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: OneHotEncoder.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='OneHotEncoder.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x13OneHotEncoder.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xb5\x02\n\rOneHotEncoder\x12>\n\x10stringCategories\x18\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12<\n\x0fint64Categories\x18\x02 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x12\x14\n\x0coutputSparse\x18\n \x01(\x08\x12H\n\rhandleUnknown\x18\x0b \x01(\x0e\x32\x31.CoreML.Specification.OneHotEncoder.HandleUnknown\"6\n\rHandleUnknown\x12\x12\n\x0e\x45rrorOnUnknown\x10\x00\x12\x11\n\rIgnoreUnknown\x10\x01\x42\x0e\n\x0c\x43\x61tegoryTypeB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + +_ONEHOTENCODER_HANDLEUNKNOWN = _descriptor.EnumDescriptor( + name='HandleUnknown', + full_name='CoreML.Specification.OneHotEncoder.HandleUnknown', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='ErrorOnUnknown', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IgnoreUnknown', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=307, + serialized_end=361, +) +_sym_db.RegisterEnumDescriptor(_ONEHOTENCODER_HANDLEUNKNOWN) + + +_ONEHOTENCODER = _descriptor.Descriptor( + name='OneHotEncoder', + full_name='CoreML.Specification.OneHotEncoder', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='stringCategories', full_name='CoreML.Specification.OneHotEncoder.stringCategories', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64Categories', full_name='CoreML.Specification.OneHotEncoder.int64Categories', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputSparse', full_name='CoreML.Specification.OneHotEncoder.outputSparse', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='handleUnknown', full_name='CoreML.Specification.OneHotEncoder.handleUnknown', index=3, + number=11, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _ONEHOTENCODER_HANDLEUNKNOWN, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='CategoryType', full_name='CoreML.Specification.OneHotEncoder.CategoryType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=68, + serialized_end=377, +) + +_ONEHOTENCODER.fields_by_name['stringCategories'].message_type = DataStructures__pb2._STRINGVECTOR +_ONEHOTENCODER.fields_by_name['int64Categories'].message_type = DataStructures__pb2._INT64VECTOR +_ONEHOTENCODER.fields_by_name['handleUnknown'].enum_type = _ONEHOTENCODER_HANDLEUNKNOWN +_ONEHOTENCODER_HANDLEUNKNOWN.containing_type = _ONEHOTENCODER +_ONEHOTENCODER.oneofs_by_name['CategoryType'].fields.append( + _ONEHOTENCODER.fields_by_name['stringCategories']) +_ONEHOTENCODER.fields_by_name['stringCategories'].containing_oneof = _ONEHOTENCODER.oneofs_by_name['CategoryType'] +_ONEHOTENCODER.oneofs_by_name['CategoryType'].fields.append( + _ONEHOTENCODER.fields_by_name['int64Categories']) +_ONEHOTENCODER.fields_by_name['int64Categories'].containing_oneof = _ONEHOTENCODER.oneofs_by_name['CategoryType'] +DESCRIPTOR.message_types_by_name['OneHotEncoder'] = _ONEHOTENCODER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +OneHotEncoder = _reflection.GeneratedProtocolMessageType('OneHotEncoder', (_message.Message,), dict( + DESCRIPTOR = _ONEHOTENCODER, + __module__ = 'OneHotEncoder_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.OneHotEncoder) + )) +_sym_db.RegisterMessage(OneHotEncoder) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Parameters_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Parameters_pb2.py new file mode 100644 index 00000000..05273b44 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Parameters_pb2.py @@ -0,0 +1,235 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Parameters.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Parameters.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x10Parameters.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\x99\x01\n\x0eInt64Parameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\x03\x12\x31\n\x05range\x18\n \x01(\x0b\x32 .CoreML.Specification.Int64RangeH\x00\x12-\n\x03set\x18\x0b \x01(\x0b\x32\x1e.CoreML.Specification.Int64SetH\x00\x42\x0f\n\rAllowedValues\"l\n\x0f\x44oubleParameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\x01\x12\x32\n\x05range\x18\n \x01(\x0b\x32!.CoreML.Specification.DoubleRangeH\x00\x42\x0f\n\rAllowedValues\"\'\n\x0fStringParameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\t\"%\n\rBoolParameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\x08\x42\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_INT64PARAMETER = _descriptor.Descriptor( + name='Int64Parameter', + full_name='CoreML.Specification.Int64Parameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.Int64Parameter.defaultValue', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range', full_name='CoreML.Specification.Int64Parameter.range', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='set', full_name='CoreML.Specification.Int64Parameter.set', index=2, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='AllowedValues', full_name='CoreML.Specification.Int64Parameter.AllowedValues', + index=0, containing_type=None, fields=[]), + ], + serialized_start=65, + serialized_end=218, +) + + +_DOUBLEPARAMETER = _descriptor.Descriptor( + name='DoubleParameter', + full_name='CoreML.Specification.DoubleParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.DoubleParameter.defaultValue', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range', full_name='CoreML.Specification.DoubleParameter.range', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='AllowedValues', full_name='CoreML.Specification.DoubleParameter.AllowedValues', + index=0, containing_type=None, fields=[]), + ], + serialized_start=220, + serialized_end=328, +) + + +_STRINGPARAMETER = _descriptor.Descriptor( + name='StringParameter', + full_name='CoreML.Specification.StringParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.StringParameter.defaultValue', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=330, + serialized_end=369, +) + + +_BOOLPARAMETER = _descriptor.Descriptor( + name='BoolParameter', + full_name='CoreML.Specification.BoolParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.BoolParameter.defaultValue', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=371, + serialized_end=408, +) + +_INT64PARAMETER.fields_by_name['range'].message_type = DataStructures__pb2._INT64RANGE +_INT64PARAMETER.fields_by_name['set'].message_type = DataStructures__pb2._INT64SET +_INT64PARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _INT64PARAMETER.fields_by_name['range']) +_INT64PARAMETER.fields_by_name['range'].containing_oneof = _INT64PARAMETER.oneofs_by_name['AllowedValues'] +_INT64PARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _INT64PARAMETER.fields_by_name['set']) +_INT64PARAMETER.fields_by_name['set'].containing_oneof = _INT64PARAMETER.oneofs_by_name['AllowedValues'] +_DOUBLEPARAMETER.fields_by_name['range'].message_type = DataStructures__pb2._DOUBLERANGE +_DOUBLEPARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _DOUBLEPARAMETER.fields_by_name['range']) +_DOUBLEPARAMETER.fields_by_name['range'].containing_oneof = _DOUBLEPARAMETER.oneofs_by_name['AllowedValues'] +DESCRIPTOR.message_types_by_name['Int64Parameter'] = _INT64PARAMETER +DESCRIPTOR.message_types_by_name['DoubleParameter'] = _DOUBLEPARAMETER +DESCRIPTOR.message_types_by_name['StringParameter'] = _STRINGPARAMETER +DESCRIPTOR.message_types_by_name['BoolParameter'] = _BOOLPARAMETER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Int64Parameter = _reflection.GeneratedProtocolMessageType('Int64Parameter', (_message.Message,), dict( + DESCRIPTOR = _INT64PARAMETER, + __module__ = 'Parameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64Parameter) + )) +_sym_db.RegisterMessage(Int64Parameter) + +DoubleParameter = _reflection.GeneratedProtocolMessageType('DoubleParameter', (_message.Message,), dict( + DESCRIPTOR = _DOUBLEPARAMETER, + __module__ = 'Parameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DoubleParameter) + )) +_sym_db.RegisterMessage(DoubleParameter) + +StringParameter = _reflection.GeneratedProtocolMessageType('StringParameter', (_message.Message,), dict( + DESCRIPTOR = _STRINGPARAMETER, + __module__ = 'Parameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringParameter) + )) +_sym_db.RegisterMessage(StringParameter) + +BoolParameter = _reflection.GeneratedProtocolMessageType('BoolParameter', (_message.Message,), dict( + DESCRIPTOR = _BOOLPARAMETER, + __module__ = 'Parameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BoolParameter) + )) +_sym_db.RegisterMessage(BoolParameter) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/SVM_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/SVM_pb2.py new file mode 100644 index 00000000..5ad1de30 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/SVM_pb2.py @@ -0,0 +1,739 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: SVM.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='SVM.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\tSVM.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\x0e\n\x0cLinearKernel\"\x1a\n\tRBFKernel\x12\r\n\x05gamma\x18\x01 \x01(\x01\"6\n\nPolyKernel\x12\x0e\n\x06\x64\x65gree\x18\x01 \x01(\x05\x12\t\n\x01\x63\x18\x02 \x01(\x01\x12\r\n\x05gamma\x18\x03 \x01(\x01\")\n\rSigmoidKernel\x12\r\n\x05gamma\x18\x01 \x01(\x01\x12\t\n\x01\x63\x18\x02 \x01(\x01\"\xfa\x01\n\x06Kernel\x12:\n\x0clinearKernel\x18\x01 \x01(\x0b\x32\".CoreML.Specification.LinearKernelH\x00\x12\x34\n\trbfKernel\x18\x02 \x01(\x0b\x32\x1f.CoreML.Specification.RBFKernelH\x00\x12\x36\n\npolyKernel\x18\x03 \x01(\x0b\x32 .CoreML.Specification.PolyKernelH\x00\x12<\n\rsigmoidKernel\x18\x04 \x01(\x0b\x32#.CoreML.Specification.SigmoidKernelH\x00\x42\x08\n\x06kernel\"*\n\nSparseNode\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x01\"?\n\x0cSparseVector\x12/\n\x05nodes\x18\x01 \x03(\x0b\x32 .CoreML.Specification.SparseNode\"K\n\x14SparseSupportVectors\x12\x33\n\x07vectors\x18\x01 \x03(\x0b\x32\".CoreML.Specification.SparseVector\"\x1d\n\x0b\x44\x65nseVector\x12\x0e\n\x06values\x18\x01 \x03(\x01\"I\n\x13\x44\x65nseSupportVectors\x12\x32\n\x07vectors\x18\x01 \x03(\x0b\x32!.CoreML.Specification.DenseVector\"\x1d\n\x0c\x43oefficients\x12\r\n\x05\x61lpha\x18\x01 \x03(\x01\"\xb5\x02\n\x16SupportVectorRegressor\x12,\n\x06kernel\x18\x01 \x01(\x0b\x32\x1c.CoreML.Specification.Kernel\x12J\n\x14sparseSupportVectors\x18\x02 \x01(\x0b\x32*.CoreML.Specification.SparseSupportVectorsH\x00\x12H\n\x13\x64\x65nseSupportVectors\x18\x03 \x01(\x0b\x32).CoreML.Specification.DenseSupportVectorsH\x00\x12\x38\n\x0c\x63oefficients\x18\x04 \x01(\x0b\x32\".CoreML.Specification.Coefficients\x12\x0b\n\x03rho\x18\x05 \x01(\x01\x42\x10\n\x0esupportVectors\"\x8b\x04\n\x17SupportVectorClassifier\x12,\n\x06kernel\x18\x01 \x01(\x0b\x32\x1c.CoreML.Specification.Kernel\x12&\n\x1enumberOfSupportVectorsPerClass\x18\x02 \x03(\x05\x12J\n\x14sparseSupportVectors\x18\x03 \x01(\x0b\x32*.CoreML.Specification.SparseSupportVectorsH\x00\x12H\n\x13\x64\x65nseSupportVectors\x18\x04 \x01(\x0b\x32).CoreML.Specification.DenseSupportVectorsH\x00\x12\x38\n\x0c\x63oefficients\x18\x05 \x03(\x0b\x32\".CoreML.Specification.Coefficients\x12\x0b\n\x03rho\x18\x06 \x03(\x01\x12\r\n\x05probA\x18\x07 \x03(\x01\x12\r\n\x05probB\x18\x08 \x03(\x01\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x01\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x01\x42\x10\n\x0esupportVectorsB\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_LINEARKERNEL = _descriptor.Descriptor( + name='LinearKernel', + full_name='CoreML.Specification.LinearKernel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=57, + serialized_end=71, +) + + +_RBFKERNEL = _descriptor.Descriptor( + name='RBFKernel', + full_name='CoreML.Specification.RBFKernel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='gamma', full_name='CoreML.Specification.RBFKernel.gamma', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=73, + serialized_end=99, +) + + +_POLYKERNEL = _descriptor.Descriptor( + name='PolyKernel', + full_name='CoreML.Specification.PolyKernel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='degree', full_name='CoreML.Specification.PolyKernel.degree', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='c', full_name='CoreML.Specification.PolyKernel.c', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gamma', full_name='CoreML.Specification.PolyKernel.gamma', index=2, + number=3, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=101, + serialized_end=155, +) + + +_SIGMOIDKERNEL = _descriptor.Descriptor( + name='SigmoidKernel', + full_name='CoreML.Specification.SigmoidKernel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='gamma', full_name='CoreML.Specification.SigmoidKernel.gamma', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='c', full_name='CoreML.Specification.SigmoidKernel.c', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=157, + serialized_end=198, +) + + +_KERNEL = _descriptor.Descriptor( + name='Kernel', + full_name='CoreML.Specification.Kernel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='linearKernel', full_name='CoreML.Specification.Kernel.linearKernel', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rbfKernel', full_name='CoreML.Specification.Kernel.rbfKernel', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='polyKernel', full_name='CoreML.Specification.Kernel.polyKernel', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sigmoidKernel', full_name='CoreML.Specification.Kernel.sigmoidKernel', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='kernel', full_name='CoreML.Specification.Kernel.kernel', + index=0, containing_type=None, fields=[]), + ], + serialized_start=201, + serialized_end=451, +) + + +_SPARSENODE = _descriptor.Descriptor( + name='SparseNode', + full_name='CoreML.Specification.SparseNode', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='CoreML.Specification.SparseNode.index', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.SparseNode.value', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=453, + serialized_end=495, +) + + +_SPARSEVECTOR = _descriptor.Descriptor( + name='SparseVector', + full_name='CoreML.Specification.SparseVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='nodes', full_name='CoreML.Specification.SparseVector.nodes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=497, + serialized_end=560, +) + + +_SPARSESUPPORTVECTORS = _descriptor.Descriptor( + name='SparseSupportVectors', + full_name='CoreML.Specification.SparseSupportVectors', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vectors', full_name='CoreML.Specification.SparseSupportVectors.vectors', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=562, + serialized_end=637, +) + + +_DENSEVECTOR = _descriptor.Descriptor( + name='DenseVector', + full_name='CoreML.Specification.DenseVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.DenseVector.values', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=639, + serialized_end=668, +) + + +_DENSESUPPORTVECTORS = _descriptor.Descriptor( + name='DenseSupportVectors', + full_name='CoreML.Specification.DenseSupportVectors', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vectors', full_name='CoreML.Specification.DenseSupportVectors.vectors', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=670, + serialized_end=743, +) + + +_COEFFICIENTS = _descriptor.Descriptor( + name='Coefficients', + full_name='CoreML.Specification.Coefficients', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.Coefficients.alpha', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=745, + serialized_end=774, +) + + +_SUPPORTVECTORREGRESSOR = _descriptor.Descriptor( + name='SupportVectorRegressor', + full_name='CoreML.Specification.SupportVectorRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='kernel', full_name='CoreML.Specification.SupportVectorRegressor.kernel', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sparseSupportVectors', full_name='CoreML.Specification.SupportVectorRegressor.sparseSupportVectors', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='denseSupportVectors', full_name='CoreML.Specification.SupportVectorRegressor.denseSupportVectors', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coefficients', full_name='CoreML.Specification.SupportVectorRegressor.coefficients', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rho', full_name='CoreML.Specification.SupportVectorRegressor.rho', index=4, + number=5, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='supportVectors', full_name='CoreML.Specification.SupportVectorRegressor.supportVectors', + index=0, containing_type=None, fields=[]), + ], + serialized_start=777, + serialized_end=1086, +) + + +_SUPPORTVECTORCLASSIFIER = _descriptor.Descriptor( + name='SupportVectorClassifier', + full_name='CoreML.Specification.SupportVectorClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='kernel', full_name='CoreML.Specification.SupportVectorClassifier.kernel', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numberOfSupportVectorsPerClass', full_name='CoreML.Specification.SupportVectorClassifier.numberOfSupportVectorsPerClass', index=1, + number=2, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sparseSupportVectors', full_name='CoreML.Specification.SupportVectorClassifier.sparseSupportVectors', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='denseSupportVectors', full_name='CoreML.Specification.SupportVectorClassifier.denseSupportVectors', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coefficients', full_name='CoreML.Specification.SupportVectorClassifier.coefficients', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rho', full_name='CoreML.Specification.SupportVectorClassifier.rho', index=5, + number=6, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='probA', full_name='CoreML.Specification.SupportVectorClassifier.probA', index=6, + number=7, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='probB', full_name='CoreML.Specification.SupportVectorClassifier.probB', index=7, + number=8, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.SupportVectorClassifier.stringClassLabels', index=8, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.SupportVectorClassifier.int64ClassLabels', index=9, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='supportVectors', full_name='CoreML.Specification.SupportVectorClassifier.supportVectors', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.SupportVectorClassifier.ClassLabels', + index=1, containing_type=None, fields=[]), + ], + serialized_start=1089, + serialized_end=1612, +) + +_KERNEL.fields_by_name['linearKernel'].message_type = _LINEARKERNEL +_KERNEL.fields_by_name['rbfKernel'].message_type = _RBFKERNEL +_KERNEL.fields_by_name['polyKernel'].message_type = _POLYKERNEL +_KERNEL.fields_by_name['sigmoidKernel'].message_type = _SIGMOIDKERNEL +_KERNEL.oneofs_by_name['kernel'].fields.append( + _KERNEL.fields_by_name['linearKernel']) +_KERNEL.fields_by_name['linearKernel'].containing_oneof = _KERNEL.oneofs_by_name['kernel'] +_KERNEL.oneofs_by_name['kernel'].fields.append( + _KERNEL.fields_by_name['rbfKernel']) +_KERNEL.fields_by_name['rbfKernel'].containing_oneof = _KERNEL.oneofs_by_name['kernel'] +_KERNEL.oneofs_by_name['kernel'].fields.append( + _KERNEL.fields_by_name['polyKernel']) +_KERNEL.fields_by_name['polyKernel'].containing_oneof = _KERNEL.oneofs_by_name['kernel'] +_KERNEL.oneofs_by_name['kernel'].fields.append( + _KERNEL.fields_by_name['sigmoidKernel']) +_KERNEL.fields_by_name['sigmoidKernel'].containing_oneof = _KERNEL.oneofs_by_name['kernel'] +_SPARSEVECTOR.fields_by_name['nodes'].message_type = _SPARSENODE +_SPARSESUPPORTVECTORS.fields_by_name['vectors'].message_type = _SPARSEVECTOR +_DENSESUPPORTVECTORS.fields_by_name['vectors'].message_type = _DENSEVECTOR +_SUPPORTVECTORREGRESSOR.fields_by_name['kernel'].message_type = _KERNEL +_SUPPORTVECTORREGRESSOR.fields_by_name['sparseSupportVectors'].message_type = _SPARSESUPPORTVECTORS +_SUPPORTVECTORREGRESSOR.fields_by_name['denseSupportVectors'].message_type = _DENSESUPPORTVECTORS +_SUPPORTVECTORREGRESSOR.fields_by_name['coefficients'].message_type = _COEFFICIENTS +_SUPPORTVECTORREGRESSOR.oneofs_by_name['supportVectors'].fields.append( + _SUPPORTVECTORREGRESSOR.fields_by_name['sparseSupportVectors']) +_SUPPORTVECTORREGRESSOR.fields_by_name['sparseSupportVectors'].containing_oneof = _SUPPORTVECTORREGRESSOR.oneofs_by_name['supportVectors'] +_SUPPORTVECTORREGRESSOR.oneofs_by_name['supportVectors'].fields.append( + _SUPPORTVECTORREGRESSOR.fields_by_name['denseSupportVectors']) +_SUPPORTVECTORREGRESSOR.fields_by_name['denseSupportVectors'].containing_oneof = _SUPPORTVECTORREGRESSOR.oneofs_by_name['supportVectors'] +_SUPPORTVECTORCLASSIFIER.fields_by_name['kernel'].message_type = _KERNEL +_SUPPORTVECTORCLASSIFIER.fields_by_name['sparseSupportVectors'].message_type = _SPARSESUPPORTVECTORS +_SUPPORTVECTORCLASSIFIER.fields_by_name['denseSupportVectors'].message_type = _DENSESUPPORTVECTORS +_SUPPORTVECTORCLASSIFIER.fields_by_name['coefficients'].message_type = _COEFFICIENTS +_SUPPORTVECTORCLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_SUPPORTVECTORCLASSIFIER.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_SUPPORTVECTORCLASSIFIER.oneofs_by_name['supportVectors'].fields.append( + _SUPPORTVECTORCLASSIFIER.fields_by_name['sparseSupportVectors']) +_SUPPORTVECTORCLASSIFIER.fields_by_name['sparseSupportVectors'].containing_oneof = _SUPPORTVECTORCLASSIFIER.oneofs_by_name['supportVectors'] +_SUPPORTVECTORCLASSIFIER.oneofs_by_name['supportVectors'].fields.append( + _SUPPORTVECTORCLASSIFIER.fields_by_name['denseSupportVectors']) +_SUPPORTVECTORCLASSIFIER.fields_by_name['denseSupportVectors'].containing_oneof = _SUPPORTVECTORCLASSIFIER.oneofs_by_name['supportVectors'] +_SUPPORTVECTORCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _SUPPORTVECTORCLASSIFIER.fields_by_name['stringClassLabels']) +_SUPPORTVECTORCLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _SUPPORTVECTORCLASSIFIER.oneofs_by_name['ClassLabels'] +_SUPPORTVECTORCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _SUPPORTVECTORCLASSIFIER.fields_by_name['int64ClassLabels']) +_SUPPORTVECTORCLASSIFIER.fields_by_name['int64ClassLabels'].containing_oneof = _SUPPORTVECTORCLASSIFIER.oneofs_by_name['ClassLabels'] +DESCRIPTOR.message_types_by_name['LinearKernel'] = _LINEARKERNEL +DESCRIPTOR.message_types_by_name['RBFKernel'] = _RBFKERNEL +DESCRIPTOR.message_types_by_name['PolyKernel'] = _POLYKERNEL +DESCRIPTOR.message_types_by_name['SigmoidKernel'] = _SIGMOIDKERNEL +DESCRIPTOR.message_types_by_name['Kernel'] = _KERNEL +DESCRIPTOR.message_types_by_name['SparseNode'] = _SPARSENODE +DESCRIPTOR.message_types_by_name['SparseVector'] = _SPARSEVECTOR +DESCRIPTOR.message_types_by_name['SparseSupportVectors'] = _SPARSESUPPORTVECTORS +DESCRIPTOR.message_types_by_name['DenseVector'] = _DENSEVECTOR +DESCRIPTOR.message_types_by_name['DenseSupportVectors'] = _DENSESUPPORTVECTORS +DESCRIPTOR.message_types_by_name['Coefficients'] = _COEFFICIENTS +DESCRIPTOR.message_types_by_name['SupportVectorRegressor'] = _SUPPORTVECTORREGRESSOR +DESCRIPTOR.message_types_by_name['SupportVectorClassifier'] = _SUPPORTVECTORCLASSIFIER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +LinearKernel = _reflection.GeneratedProtocolMessageType('LinearKernel', (_message.Message,), dict( + DESCRIPTOR = _LINEARKERNEL, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LinearKernel) + )) +_sym_db.RegisterMessage(LinearKernel) + +RBFKernel = _reflection.GeneratedProtocolMessageType('RBFKernel', (_message.Message,), dict( + DESCRIPTOR = _RBFKERNEL, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RBFKernel) + )) +_sym_db.RegisterMessage(RBFKernel) + +PolyKernel = _reflection.GeneratedProtocolMessageType('PolyKernel', (_message.Message,), dict( + DESCRIPTOR = _POLYKERNEL, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PolyKernel) + )) +_sym_db.RegisterMessage(PolyKernel) + +SigmoidKernel = _reflection.GeneratedProtocolMessageType('SigmoidKernel', (_message.Message,), dict( + DESCRIPTOR = _SIGMOIDKERNEL, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SigmoidKernel) + )) +_sym_db.RegisterMessage(SigmoidKernel) + +Kernel = _reflection.GeneratedProtocolMessageType('Kernel', (_message.Message,), dict( + DESCRIPTOR = _KERNEL, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Kernel) + )) +_sym_db.RegisterMessage(Kernel) + +SparseNode = _reflection.GeneratedProtocolMessageType('SparseNode', (_message.Message,), dict( + DESCRIPTOR = _SPARSENODE, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SparseNode) + )) +_sym_db.RegisterMessage(SparseNode) + +SparseVector = _reflection.GeneratedProtocolMessageType('SparseVector', (_message.Message,), dict( + DESCRIPTOR = _SPARSEVECTOR, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SparseVector) + )) +_sym_db.RegisterMessage(SparseVector) + +SparseSupportVectors = _reflection.GeneratedProtocolMessageType('SparseSupportVectors', (_message.Message,), dict( + DESCRIPTOR = _SPARSESUPPORTVECTORS, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SparseSupportVectors) + )) +_sym_db.RegisterMessage(SparseSupportVectors) + +DenseVector = _reflection.GeneratedProtocolMessageType('DenseVector', (_message.Message,), dict( + DESCRIPTOR = _DENSEVECTOR, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DenseVector) + )) +_sym_db.RegisterMessage(DenseVector) + +DenseSupportVectors = _reflection.GeneratedProtocolMessageType('DenseSupportVectors', (_message.Message,), dict( + DESCRIPTOR = _DENSESUPPORTVECTORS, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DenseSupportVectors) + )) +_sym_db.RegisterMessage(DenseSupportVectors) + +Coefficients = _reflection.GeneratedProtocolMessageType('Coefficients', (_message.Message,), dict( + DESCRIPTOR = _COEFFICIENTS, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Coefficients) + )) +_sym_db.RegisterMessage(Coefficients) + +SupportVectorRegressor = _reflection.GeneratedProtocolMessageType('SupportVectorRegressor', (_message.Message,), dict( + DESCRIPTOR = _SUPPORTVECTORREGRESSOR, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SupportVectorRegressor) + )) +_sym_db.RegisterMessage(SupportVectorRegressor) + +SupportVectorClassifier = _reflection.GeneratedProtocolMessageType('SupportVectorClassifier', (_message.Message,), dict( + DESCRIPTOR = _SUPPORTVECTORCLASSIFIER, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SupportVectorClassifier) + )) +_sym_db.RegisterMessage(SupportVectorClassifier) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Scaler_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Scaler_pb2.py new file mode 100644 index 00000000..c5f3d323 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Scaler_pb2.py @@ -0,0 +1,78 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Scaler.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Scaler.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x0cScaler.proto\x12\x14\x43oreML.Specification\"0\n\x06Scaler\x12\x12\n\nshiftValue\x18\x01 \x03(\x01\x12\x12\n\nscaleValue\x18\x02 \x03(\x01\x42\x02H\x03\x62\x06proto3') +) + + + + +_SCALER = _descriptor.Descriptor( + name='Scaler', + full_name='CoreML.Specification.Scaler', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shiftValue', full_name='CoreML.Specification.Scaler.shiftValue', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scaleValue', full_name='CoreML.Specification.Scaler.scaleValue', index=1, + number=2, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=38, + serialized_end=86, +) + +DESCRIPTOR.message_types_by_name['Scaler'] = _SCALER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Scaler = _reflection.GeneratedProtocolMessageType('Scaler', (_message.Message,), dict( + DESCRIPTOR = _SCALER, + __module__ = 'Scaler_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Scaler) + )) +_sym_db.RegisterMessage(Scaler) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/SoundAnalysisPreprocessing_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/SoundAnalysisPreprocessing_pb2.py new file mode 100644 index 00000000..271a0a62 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/SoundAnalysisPreprocessing_pb2.py @@ -0,0 +1,110 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: SoundAnalysisPreprocessing.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='SoundAnalysisPreprocessing.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n SoundAnalysisPreprocessing.proto\x12!CoreML.Specification.CoreMLModels\"\xa0\x01\n\x1aSoundAnalysisPreprocessing\x12V\n\x06vggish\x18\x14 \x01(\x0b\x32\x44.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.VggishH\x00\x1a\x08\n\x06VggishB \n\x1eSoundAnalysisPreprocessingTypeB\x02H\x03\x62\x06proto3') +) + + + + +_SOUNDANALYSISPREPROCESSING_VGGISH = _descriptor.Descriptor( + name='Vggish', + full_name='CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=190, + serialized_end=198, +) + +_SOUNDANALYSISPREPROCESSING = _descriptor.Descriptor( + name='SoundAnalysisPreprocessing', + full_name='CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vggish', full_name='CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.vggish', index=0, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_SOUNDANALYSISPREPROCESSING_VGGISH, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='SoundAnalysisPreprocessingType', full_name='CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.SoundAnalysisPreprocessingType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=72, + serialized_end=232, +) + +_SOUNDANALYSISPREPROCESSING_VGGISH.containing_type = _SOUNDANALYSISPREPROCESSING +_SOUNDANALYSISPREPROCESSING.fields_by_name['vggish'].message_type = _SOUNDANALYSISPREPROCESSING_VGGISH +_SOUNDANALYSISPREPROCESSING.oneofs_by_name['SoundAnalysisPreprocessingType'].fields.append( + _SOUNDANALYSISPREPROCESSING.fields_by_name['vggish']) +_SOUNDANALYSISPREPROCESSING.fields_by_name['vggish'].containing_oneof = _SOUNDANALYSISPREPROCESSING.oneofs_by_name['SoundAnalysisPreprocessingType'] +DESCRIPTOR.message_types_by_name['SoundAnalysisPreprocessing'] = _SOUNDANALYSISPREPROCESSING +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +SoundAnalysisPreprocessing = _reflection.GeneratedProtocolMessageType('SoundAnalysisPreprocessing', (_message.Message,), dict( + + Vggish = _reflection.GeneratedProtocolMessageType('Vggish', (_message.Message,), dict( + DESCRIPTOR = _SOUNDANALYSISPREPROCESSING_VGGISH, + __module__ = 'SoundAnalysisPreprocessing_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish) + )) + , + DESCRIPTOR = _SOUNDANALYSISPREPROCESSING, + __module__ = 'SoundAnalysisPreprocessing_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing) + )) +_sym_db.RegisterMessage(SoundAnalysisPreprocessing) +_sym_db.RegisterMessage(SoundAnalysisPreprocessing.Vggish) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/TextClassifier_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/TextClassifier_pb2.py new file mode 100644 index 00000000..9edaafcf --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/TextClassifier_pb2.py @@ -0,0 +1,107 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: TextClassifier.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='TextClassifier.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x14TextClassifier.proto\x12!CoreML.Specification.CoreMLModels\x1a\x14\x44\x61taStructures.proto\"\xa1\x01\n\x0eTextClassifier\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x12@\n\x11stringClassLabels\x18\xc8\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x42\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_TEXTCLASSIFIER = _descriptor.Descriptor( + name='TextClassifier', + full_name='CoreML.Specification.CoreMLModels.TextClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='revision', full_name='CoreML.Specification.CoreMLModels.TextClassifier.revision', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='language', full_name='CoreML.Specification.CoreMLModels.TextClassifier.language', index=1, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.TextClassifier.modelParameterData', index=2, + number=100, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.CoreMLModels.TextClassifier.stringClassLabels', index=3, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.CoreMLModels.TextClassifier.ClassLabels', + index=0, containing_type=None, fields=[]), + ], + serialized_start=82, + serialized_end=243, +) + +_TEXTCLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_TEXTCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _TEXTCLASSIFIER.fields_by_name['stringClassLabels']) +_TEXTCLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _TEXTCLASSIFIER.oneofs_by_name['ClassLabels'] +DESCRIPTOR.message_types_by_name['TextClassifier'] = _TEXTCLASSIFIER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +TextClassifier = _reflection.GeneratedProtocolMessageType('TextClassifier', (_message.Message,), dict( + DESCRIPTOR = _TEXTCLASSIFIER, + __module__ = 'TextClassifier_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.TextClassifier) + )) +_sym_db.RegisterMessage(TextClassifier) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/TreeEnsemble_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/TreeEnsemble_pb2.py new file mode 100644 index 00000000..b2a96adf --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/TreeEnsemble_pb2.py @@ -0,0 +1,446 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: TreeEnsemble.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='TreeEnsemble.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x12TreeEnsemble.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xc4\x06\n\x16TreeEnsembleParameters\x12\x44\n\x05nodes\x18\x01 \x03(\x0b\x32\x35.CoreML.Specification.TreeEnsembleParameters.TreeNode\x12\x1f\n\x17numPredictionDimensions\x18\x02 \x01(\x04\x12\x1b\n\x13\x62\x61sePredictionValue\x18\x03 \x03(\x01\x1a\xa5\x05\n\x08TreeNode\x12\x0e\n\x06treeId\x18\x01 \x01(\x04\x12\x0e\n\x06nodeId\x18\x02 \x01(\x04\x12\\\n\x0cnodeBehavior\x18\x03 \x01(\x0e\x32\x46.CoreML.Specification.TreeEnsembleParameters.TreeNode.TreeNodeBehavior\x12\x1a\n\x12\x62ranchFeatureIndex\x18\n \x01(\x04\x12\x1a\n\x12\x62ranchFeatureValue\x18\x0b \x01(\x01\x12\x17\n\x0ftrueChildNodeId\x18\x0c \x01(\x04\x12\x18\n\x10\x66\x61lseChildNodeId\x18\r \x01(\x04\x12#\n\x1bmissingValueTracksTrueChild\x18\x0e \x01(\x08\x12\\\n\x0e\x65valuationInfo\x18\x14 \x03(\x0b\x32\x44.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo\x12\x17\n\x0frelativeHitRate\x18\x1e \x01(\x01\x1a\x42\n\x0e\x45valuationInfo\x12\x17\n\x0f\x65valuationIndex\x18\x01 \x01(\x04\x12\x17\n\x0f\x65valuationValue\x18\x02 \x01(\x01\"\xcf\x01\n\x10TreeNodeBehavior\x12\x1e\n\x1a\x42ranchOnValueLessThanEqual\x10\x00\x12\x19\n\x15\x42ranchOnValueLessThan\x10\x01\x12!\n\x1d\x42ranchOnValueGreaterThanEqual\x10\x02\x12\x1c\n\x18\x42ranchOnValueGreaterThan\x10\x03\x12\x16\n\x12\x42ranchOnValueEqual\x10\x04\x12\x19\n\x15\x42ranchOnValueNotEqual\x10\x05\x12\x0c\n\x08LeafNode\x10\x06\"\xc7\x02\n\x16TreeEnsembleClassifier\x12\x42\n\x0ctreeEnsemble\x18\x01 \x01(\x0b\x32,.CoreML.Specification.TreeEnsembleParameters\x12Z\n\x17postEvaluationTransform\x18\x02 \x01(\x0e\x32\x39.CoreML.Specification.TreeEnsemblePostEvaluationTransform\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x42\r\n\x0b\x43lassLabels\"\xb7\x01\n\x15TreeEnsembleRegressor\x12\x42\n\x0ctreeEnsemble\x18\x01 \x01(\x0b\x32,.CoreML.Specification.TreeEnsembleParameters\x12Z\n\x17postEvaluationTransform\x18\x02 \x01(\x0e\x32\x39.CoreML.Specification.TreeEnsemblePostEvaluationTransform*\x9d\x01\n#TreeEnsemblePostEvaluationTransform\x12\x0f\n\x0bNoTransform\x10\x00\x12\x1a\n\x16\x43lassification_SoftMax\x10\x01\x12\x17\n\x13Regression_Logistic\x10\x02\x12\x30\n,Classification_SoftMaxWithZeroClassReference\x10\x03\x42\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + +_TREEENSEMBLEPOSTEVALUATIONTRANSFORM = _descriptor.EnumDescriptor( + name='TreeEnsemblePostEvaluationTransform', + full_name='CoreML.Specification.TreeEnsemblePostEvaluationTransform', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='NoTransform', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Classification_SoftMax', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Regression_Logistic', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Classification_SoftMaxWithZeroClassReference', index=3, number=3, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1422, + serialized_end=1579, +) +_sym_db.RegisterEnumDescriptor(_TREEENSEMBLEPOSTEVALUATIONTRANSFORM) + +TreeEnsemblePostEvaluationTransform = enum_type_wrapper.EnumTypeWrapper(_TREEENSEMBLEPOSTEVALUATIONTRANSFORM) +NoTransform = 0 +Classification_SoftMax = 1 +Regression_Logistic = 2 +Classification_SoftMaxWithZeroClassReference = 3 + + +_TREEENSEMBLEPARAMETERS_TREENODE_TREENODEBEHAVIOR = _descriptor.EnumDescriptor( + name='TreeNodeBehavior', + full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.TreeNodeBehavior', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='BranchOnValueLessThanEqual', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BranchOnValueLessThan', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BranchOnValueGreaterThanEqual', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BranchOnValueGreaterThan', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BranchOnValueEqual', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BranchOnValueNotEqual', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LeafNode', index=6, number=6, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=696, + serialized_end=903, +) +_sym_db.RegisterEnumDescriptor(_TREEENSEMBLEPARAMETERS_TREENODE_TREENODEBEHAVIOR) + + +_TREEENSEMBLEPARAMETERS_TREENODE_EVALUATIONINFO = _descriptor.Descriptor( + name='EvaluationInfo', + full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='evaluationIndex', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo.evaluationIndex', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='evaluationValue', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo.evaluationValue', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=627, + serialized_end=693, +) + +_TREEENSEMBLEPARAMETERS_TREENODE = _descriptor.Descriptor( + name='TreeNode', + full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='treeId', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.treeId', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='nodeId', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.nodeId', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='nodeBehavior', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.nodeBehavior', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='branchFeatureIndex', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.branchFeatureIndex', index=3, + number=10, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='branchFeatureValue', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.branchFeatureValue', index=4, + number=11, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='trueChildNodeId', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.trueChildNodeId', index=5, + number=12, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='falseChildNodeId', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.falseChildNodeId', index=6, + number=13, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='missingValueTracksTrueChild', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.missingValueTracksTrueChild', index=7, + number=14, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='evaluationInfo', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.evaluationInfo', index=8, + number=20, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='relativeHitRate', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.relativeHitRate', index=9, + number=30, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TREEENSEMBLEPARAMETERS_TREENODE_EVALUATIONINFO, ], + enum_types=[ + _TREEENSEMBLEPARAMETERS_TREENODE_TREENODEBEHAVIOR, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=226, + serialized_end=903, +) + +_TREEENSEMBLEPARAMETERS = _descriptor.Descriptor( + name='TreeEnsembleParameters', + full_name='CoreML.Specification.TreeEnsembleParameters', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='nodes', full_name='CoreML.Specification.TreeEnsembleParameters.nodes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numPredictionDimensions', full_name='CoreML.Specification.TreeEnsembleParameters.numPredictionDimensions', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='basePredictionValue', full_name='CoreML.Specification.TreeEnsembleParameters.basePredictionValue', index=2, + number=3, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TREEENSEMBLEPARAMETERS_TREENODE, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=67, + serialized_end=903, +) + + +_TREEENSEMBLECLASSIFIER = _descriptor.Descriptor( + name='TreeEnsembleClassifier', + full_name='CoreML.Specification.TreeEnsembleClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='treeEnsemble', full_name='CoreML.Specification.TreeEnsembleClassifier.treeEnsemble', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='postEvaluationTransform', full_name='CoreML.Specification.TreeEnsembleClassifier.postEvaluationTransform', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.TreeEnsembleClassifier.stringClassLabels', index=2, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.TreeEnsembleClassifier.int64ClassLabels', index=3, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.TreeEnsembleClassifier.ClassLabels', + index=0, containing_type=None, fields=[]), + ], + serialized_start=906, + serialized_end=1233, +) + + +_TREEENSEMBLEREGRESSOR = _descriptor.Descriptor( + name='TreeEnsembleRegressor', + full_name='CoreML.Specification.TreeEnsembleRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='treeEnsemble', full_name='CoreML.Specification.TreeEnsembleRegressor.treeEnsemble', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='postEvaluationTransform', full_name='CoreML.Specification.TreeEnsembleRegressor.postEvaluationTransform', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1236, + serialized_end=1419, +) + +_TREEENSEMBLEPARAMETERS_TREENODE_EVALUATIONINFO.containing_type = _TREEENSEMBLEPARAMETERS_TREENODE +_TREEENSEMBLEPARAMETERS_TREENODE.fields_by_name['nodeBehavior'].enum_type = _TREEENSEMBLEPARAMETERS_TREENODE_TREENODEBEHAVIOR +_TREEENSEMBLEPARAMETERS_TREENODE.fields_by_name['evaluationInfo'].message_type = _TREEENSEMBLEPARAMETERS_TREENODE_EVALUATIONINFO +_TREEENSEMBLEPARAMETERS_TREENODE.containing_type = _TREEENSEMBLEPARAMETERS +_TREEENSEMBLEPARAMETERS_TREENODE_TREENODEBEHAVIOR.containing_type = _TREEENSEMBLEPARAMETERS_TREENODE +_TREEENSEMBLEPARAMETERS.fields_by_name['nodes'].message_type = _TREEENSEMBLEPARAMETERS_TREENODE +_TREEENSEMBLECLASSIFIER.fields_by_name['treeEnsemble'].message_type = _TREEENSEMBLEPARAMETERS +_TREEENSEMBLECLASSIFIER.fields_by_name['postEvaluationTransform'].enum_type = _TREEENSEMBLEPOSTEVALUATIONTRANSFORM +_TREEENSEMBLECLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_TREEENSEMBLECLASSIFIER.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_TREEENSEMBLECLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _TREEENSEMBLECLASSIFIER.fields_by_name['stringClassLabels']) +_TREEENSEMBLECLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _TREEENSEMBLECLASSIFIER.oneofs_by_name['ClassLabels'] +_TREEENSEMBLECLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _TREEENSEMBLECLASSIFIER.fields_by_name['int64ClassLabels']) +_TREEENSEMBLECLASSIFIER.fields_by_name['int64ClassLabels'].containing_oneof = _TREEENSEMBLECLASSIFIER.oneofs_by_name['ClassLabels'] +_TREEENSEMBLEREGRESSOR.fields_by_name['treeEnsemble'].message_type = _TREEENSEMBLEPARAMETERS +_TREEENSEMBLEREGRESSOR.fields_by_name['postEvaluationTransform'].enum_type = _TREEENSEMBLEPOSTEVALUATIONTRANSFORM +DESCRIPTOR.message_types_by_name['TreeEnsembleParameters'] = _TREEENSEMBLEPARAMETERS +DESCRIPTOR.message_types_by_name['TreeEnsembleClassifier'] = _TREEENSEMBLECLASSIFIER +DESCRIPTOR.message_types_by_name['TreeEnsembleRegressor'] = _TREEENSEMBLEREGRESSOR +DESCRIPTOR.enum_types_by_name['TreeEnsemblePostEvaluationTransform'] = _TREEENSEMBLEPOSTEVALUATIONTRANSFORM +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +TreeEnsembleParameters = _reflection.GeneratedProtocolMessageType('TreeEnsembleParameters', (_message.Message,), dict( + + TreeNode = _reflection.GeneratedProtocolMessageType('TreeNode', (_message.Message,), dict( + + EvaluationInfo = _reflection.GeneratedProtocolMessageType('EvaluationInfo', (_message.Message,), dict( + DESCRIPTOR = _TREEENSEMBLEPARAMETERS_TREENODE_EVALUATIONINFO, + __module__ = 'TreeEnsemble_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo) + )) + , + DESCRIPTOR = _TREEENSEMBLEPARAMETERS_TREENODE, + __module__ = 'TreeEnsemble_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TreeEnsembleParameters.TreeNode) + )) + , + DESCRIPTOR = _TREEENSEMBLEPARAMETERS, + __module__ = 'TreeEnsemble_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TreeEnsembleParameters) + )) +_sym_db.RegisterMessage(TreeEnsembleParameters) +_sym_db.RegisterMessage(TreeEnsembleParameters.TreeNode) +_sym_db.RegisterMessage(TreeEnsembleParameters.TreeNode.EvaluationInfo) + +TreeEnsembleClassifier = _reflection.GeneratedProtocolMessageType('TreeEnsembleClassifier', (_message.Message,), dict( + DESCRIPTOR = _TREEENSEMBLECLASSIFIER, + __module__ = 'TreeEnsemble_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TreeEnsembleClassifier) + )) +_sym_db.RegisterMessage(TreeEnsembleClassifier) + +TreeEnsembleRegressor = _reflection.GeneratedProtocolMessageType('TreeEnsembleRegressor', (_message.Message,), dict( + DESCRIPTOR = _TREEENSEMBLEREGRESSOR, + __module__ = 'TreeEnsemble_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TreeEnsembleRegressor) + )) +_sym_db.RegisterMessage(TreeEnsembleRegressor) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/VisionFeaturePrint_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/VisionFeaturePrint_pb2.py new file mode 100644 index 00000000..face7914 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/VisionFeaturePrint_pb2.py @@ -0,0 +1,232 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: VisionFeaturePrint.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='VisionFeaturePrint.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x18VisionFeaturePrint.proto\x12!CoreML.Specification.CoreMLModels\"\xe0\x04\n\x12VisionFeaturePrint\x12L\n\x05scene\x18\x14 \x01(\x0b\x32;.CoreML.Specification.CoreMLModels.VisionFeaturePrint.SceneH\x00\x12P\n\x07objects\x18\x15 \x01(\x0b\x32=.CoreML.Specification.CoreMLModels.VisionFeaturePrint.ObjectsH\x00\x1a\xb7\x01\n\x05Scene\x12Y\n\x07version\x18\x01 \x01(\x0e\x32H.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.SceneVersion\"S\n\x0cSceneVersion\x12\x19\n\x15SCENE_VERSION_INVALID\x10\x00\x12\x13\n\x0fSCENE_VERSION_1\x10\x01\x12\x13\n\x0fSCENE_VERSION_2\x10\x02\x1a\xd5\x01\n\x07Objects\x12]\n\x07version\x18\x01 \x01(\x0e\x32L.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.ObjectsVersion\x12\x0e\n\x06output\x18\x64 \x03(\t\"[\n\x0eObjectsVersion\x12\x1b\n\x17OBJECTS_VERSION_INVALID\x10\x00\x12\x15\n\x11OBJECTS_VERSION_1\x10\x01\x12\x15\n\x11OBJECTS_VERSION_2\x10\x02\x42\x18\n\x16VisionFeaturePrintTypeB\x02H\x03\x62\x06proto3') +) + + + +_VISIONFEATUREPRINT_SCENE_SCENEVERSION = _descriptor.EnumDescriptor( + name='SceneVersion', + full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.SceneVersion', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SCENE_VERSION_INVALID', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCENE_VERSION_1', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCENE_VERSION_2', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=347, + serialized_end=430, +) +_sym_db.RegisterEnumDescriptor(_VISIONFEATUREPRINT_SCENE_SCENEVERSION) + +_VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION = _descriptor.EnumDescriptor( + name='ObjectsVersion', + full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.ObjectsVersion', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='OBJECTS_VERSION_INVALID', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='OBJECTS_VERSION_1', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='OBJECTS_VERSION_2', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=555, + serialized_end=646, +) +_sym_db.RegisterEnumDescriptor(_VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION) + + +_VISIONFEATUREPRINT_SCENE = _descriptor.Descriptor( + name='Scene', + full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.version', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _VISIONFEATUREPRINT_SCENE_SCENEVERSION, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=247, + serialized_end=430, +) + +_VISIONFEATUREPRINT_OBJECTS = _descriptor.Descriptor( + name='Objects', + full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.version', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='output', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.output', index=1, + number=100, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=433, + serialized_end=646, +) + +_VISIONFEATUREPRINT = _descriptor.Descriptor( + name='VisionFeaturePrint', + full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='scene', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.scene', index=0, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='objects', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.objects', index=1, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_VISIONFEATUREPRINT_SCENE, _VISIONFEATUREPRINT_OBJECTS, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='VisionFeaturePrintType', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.VisionFeaturePrintType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=64, + serialized_end=672, +) + +_VISIONFEATUREPRINT_SCENE.fields_by_name['version'].enum_type = _VISIONFEATUREPRINT_SCENE_SCENEVERSION +_VISIONFEATUREPRINT_SCENE.containing_type = _VISIONFEATUREPRINT +_VISIONFEATUREPRINT_SCENE_SCENEVERSION.containing_type = _VISIONFEATUREPRINT_SCENE +_VISIONFEATUREPRINT_OBJECTS.fields_by_name['version'].enum_type = _VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION +_VISIONFEATUREPRINT_OBJECTS.containing_type = _VISIONFEATUREPRINT +_VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION.containing_type = _VISIONFEATUREPRINT_OBJECTS +_VISIONFEATUREPRINT.fields_by_name['scene'].message_type = _VISIONFEATUREPRINT_SCENE +_VISIONFEATUREPRINT.fields_by_name['objects'].message_type = _VISIONFEATUREPRINT_OBJECTS +_VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'].fields.append( + _VISIONFEATUREPRINT.fields_by_name['scene']) +_VISIONFEATUREPRINT.fields_by_name['scene'].containing_oneof = _VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'] +_VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'].fields.append( + _VISIONFEATUREPRINT.fields_by_name['objects']) +_VISIONFEATUREPRINT.fields_by_name['objects'].containing_oneof = _VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'] +DESCRIPTOR.message_types_by_name['VisionFeaturePrint'] = _VISIONFEATUREPRINT +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +VisionFeaturePrint = _reflection.GeneratedProtocolMessageType('VisionFeaturePrint', (_message.Message,), dict( + + Scene = _reflection.GeneratedProtocolMessageType('Scene', (_message.Message,), dict( + DESCRIPTOR = _VISIONFEATUREPRINT_SCENE, + __module__ = 'VisionFeaturePrint_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene) + )) + , + + Objects = _reflection.GeneratedProtocolMessageType('Objects', (_message.Message,), dict( + DESCRIPTOR = _VISIONFEATUREPRINT_OBJECTS, + __module__ = 'VisionFeaturePrint_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects) + )) + , + DESCRIPTOR = _VISIONFEATUREPRINT, + __module__ = 'VisionFeaturePrint_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.VisionFeaturePrint) + )) +_sym_db.RegisterMessage(VisionFeaturePrint) +_sym_db.RegisterMessage(VisionFeaturePrint.Scene) +_sym_db.RegisterMessage(VisionFeaturePrint.Objects) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/WordEmbedding_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/WordEmbedding_pb2.py new file mode 100644 index 00000000..a10ac8c4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/WordEmbedding_pb2.py @@ -0,0 +1,93 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: WordEmbedding.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='WordEmbedding.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x13WordEmbedding.proto\x12!CoreML.Specification.CoreMLModels\x1a\x14\x44\x61taStructures.proto\"O\n\rWordEmbedding\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x42\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_WORDEMBEDDING = _descriptor.Descriptor( + name='WordEmbedding', + full_name='CoreML.Specification.CoreMLModels.WordEmbedding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='revision', full_name='CoreML.Specification.CoreMLModels.WordEmbedding.revision', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='language', full_name='CoreML.Specification.CoreMLModels.WordEmbedding.language', index=1, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.WordEmbedding.modelParameterData', index=2, + number=100, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=80, + serialized_end=159, +) + +DESCRIPTOR.message_types_by_name['WordEmbedding'] = _WORDEMBEDDING +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +WordEmbedding = _reflection.GeneratedProtocolMessageType('WordEmbedding', (_message.Message,), dict( + DESCRIPTOR = _WORDEMBEDDING, + __module__ = 'WordEmbedding_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.WordEmbedding) + )) +_sym_db.RegisterMessage(WordEmbedding) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/WordTagger_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/WordTagger_pb2.py new file mode 100644 index 00000000..a8333094 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/WordTagger_pb2.py @@ -0,0 +1,135 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: WordTagger.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='WordTagger.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x10WordTagger.proto\x12!CoreML.Specification.CoreMLModels\x1a\x14\x44\x61taStructures.proto\"\xa4\x02\n\nWordTagger\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1f\n\x17tokensOutputFeatureName\x18\x14 \x01(\t\x12\"\n\x1atokenTagsOutputFeatureName\x18\x15 \x01(\t\x12\'\n\x1ftokenLocationsOutputFeatureName\x18\x16 \x01(\t\x12%\n\x1dtokenLengthsOutputFeatureName\x18\x17 \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x12\x39\n\nstringTags\x18\xc8\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x42\x06\n\x04TagsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_WORDTAGGER = _descriptor.Descriptor( + name='WordTagger', + full_name='CoreML.Specification.CoreMLModels.WordTagger', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='revision', full_name='CoreML.Specification.CoreMLModels.WordTagger.revision', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='language', full_name='CoreML.Specification.CoreMLModels.WordTagger.language', index=1, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tokensOutputFeatureName', full_name='CoreML.Specification.CoreMLModels.WordTagger.tokensOutputFeatureName', index=2, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tokenTagsOutputFeatureName', full_name='CoreML.Specification.CoreMLModels.WordTagger.tokenTagsOutputFeatureName', index=3, + number=21, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tokenLocationsOutputFeatureName', full_name='CoreML.Specification.CoreMLModels.WordTagger.tokenLocationsOutputFeatureName', index=4, + number=22, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tokenLengthsOutputFeatureName', full_name='CoreML.Specification.CoreMLModels.WordTagger.tokenLengthsOutputFeatureName', index=5, + number=23, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.WordTagger.modelParameterData', index=6, + number=100, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringTags', full_name='CoreML.Specification.CoreMLModels.WordTagger.stringTags', index=7, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Tags', full_name='CoreML.Specification.CoreMLModels.WordTagger.Tags', + index=0, containing_type=None, fields=[]), + ], + serialized_start=78, + serialized_end=370, +) + +_WORDTAGGER.fields_by_name['stringTags'].message_type = DataStructures__pb2._STRINGVECTOR +_WORDTAGGER.oneofs_by_name['Tags'].fields.append( + _WORDTAGGER.fields_by_name['stringTags']) +_WORDTAGGER.fields_by_name['stringTags'].containing_oneof = _WORDTAGGER.oneofs_by_name['Tags'] +DESCRIPTOR.message_types_by_name['WordTagger'] = _WORDTAGGER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +WordTagger = _reflection.GeneratedProtocolMessageType('WordTagger', (_message.Message,), dict( + DESCRIPTOR = _WORDTAGGER, + __module__ = 'WordTagger_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.WordTagger) + )) +_sym_db.RegisterMessage(WordTagger) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/__init__.py new file mode 100644 index 00000000..013a7fd1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/__init__.py @@ -0,0 +1 @@ +### Module for proto generated Python code. diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/api/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/api/__init__.py new file mode 100644 index 00000000..1665bc37 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/api/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017 - 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_examples.py b/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_examples.py new file mode 100644 index 00000000..f13e3742 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_examples.py @@ -0,0 +1,519 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import os +import tempfile + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TORCH +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.mil import Function, Program, get_new_symbol +from coremltools.converters.mil.testing_utils import get_op_types_in_program + +if _HAS_TORCH: + import torch + + +class TestMILExamples: + @staticmethod + def test_tutorial(): + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 100, 100, 3))] + ) + def prog(x): + x = mb.relu(x=x, name="relu") + x = mb.transpose(x=x, perm=[0, 3, 1, 2], name="transpose") + x = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=False, name="reduce") + x = mb.log(x=x, name="log") + y = mb.add(x=1, y=2) + return x + + # Convert and verify + mlmodel = ct.convert(prog) + + # running predict() is only supported on macOS + if ct.utils._is_macos(): + prediction = mlmodel.predict( + {"x": np.random.rand(1, 100, 100, 3).astype(np.float32)} + ) + assert len(prediction) == 1 + + +@pytest.mark.skipif(ct.utils._macos_version() < (10, 15), reason='Model produces specification 4.') +class TestInputs: + @staticmethod + @pytest.mark.skipif(not ct.utils._is_macos(), reason="Platform is not Mac OS") + def test_unsanitized_input_name_during_prediction(): + ''' + input name : "x/0" becomes "x_0" due to name sanitization applied during conversion + ''' + prog = Program() + func_inputs = {"x/0": mb.placeholder(shape=[2, 3]), + "y": mb.placeholder(shape=[2, 3])} + with Function(func_inputs) as ssa_fun: + x, y = ssa_fun.inputs["x/0"], ssa_fun.inputs["y"] + x = mb.relu(x=x, name="relu") + z = mb.add(x=x, y=y, name="out") + ssa_fun.set_outputs([z]) + prog.add_function("main", ssa_fun) + + mlmodel = ct.convert(prog) + + with pytest.raises(KeyError) as error_info: + mlmodel.predict( + {"x/0": np.random.rand(2, 3).astype(np.float32), + "y": np.random.rand(2, 3).astype(np.float32)} + ) + error_str = str(error_info.value) + assert "does not match any of the model input" in error_str + + @staticmethod + def _test_variant_input_type_prediction(to_tensor): + prog = Program() + func_inputs = {"x": mb.placeholder(shape=[2, 3]), + "y": mb.placeholder(shape=[2, 3])} + with Function(func_inputs) as ssa_fun: + x, y = ssa_fun.inputs["x"], ssa_fun.inputs["y"] + x = mb.relu(x=x, name="relu") + z = mb.add(x=x, y=y, name="out") + ssa_fun.set_outputs([z]) + prog.add_function("main", ssa_fun) + + mlmodel = ct.convert(prog) + x_numpy = np.random.rand(2, 3) + y_numpy = np.random.rand(2, 3) + out_by_numpy = mlmodel.predict( + {"x": x_numpy, + "y": y_numpy} + ) + out_by_tensor = mlmodel.predict( + {"x": to_tensor(x_numpy), + "y": to_tensor(y_numpy)} + ) + np.allclose(out_by_numpy["out"], out_by_tensor["out"]) + + @staticmethod + @pytest.mark.skipif(not ct.utils._is_macos(), reason="test needs predictions") + def test_list_predict_input(): + TestInputs._test_variant_input_type_prediction(lambda x: x.tolist()) + + @staticmethod + def test_rank0_inputs_mil(): + with pytest.raises(ValueError, match=r"Rank-0"): + @mb.program( + input_specs=[ + mb.TensorSpec(shape=()), + ] + ) + def prog(x): + return x + + +############################################################################### +# Note: all tests are examples of conversion to the Core ML format +# Each test case is expected to be runnable and self-complete. +############################################################################### + +class TestMLProgramConverterExamples: + + @staticmethod + def test_model_save(tmpdir): + save_path_dir = str(tmpdir) + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.square(x=x) + return x + + # save neuralnetwork model without extension and check that it is saved with + # mlmodel extension + mlmodel = ct.convert(prog) + mlmodel_path = os.path.join(save_path_dir, "model_nn") + mlmodel.save(mlmodel_path) + assert os.path.exists(mlmodel_path + ".mlmodel") + + # save neuralnetwork model with mlpackage extension + mlmodel_path = os.path.join(save_path_dir, "model_nn2.mlpackage") + mlmodel.save(mlmodel_path) + assert os.path.exists(mlmodel_path) + + # save mlprogram model without extension and check that it is saved with + # mlpackage extension + mlmodel = ct.convert(prog, convert_to="mlprogram") + mlmodel_path = os.path.join(save_path_dir, "model_mlprogram") + mlmodel.save(mlmodel_path) + assert os.path.exists(mlmodel_path + ".mlpackage") + + # check error if mlprogram is saved with mlmodel extension + mlmodel_path = os.path.join(save_path_dir, "model_mlprogram.mlmodel") + with pytest.raises(Exception) as e: + mlmodel.save(mlmodel_path) + expected_error = "For an ML Program, extension must be .mlpackage (not .mlmodel)" + assert expected_error == str(e.value) + + @staticmethod + @pytest.mark.skipif(not ct.utils._is_macos(), reason="Platform is not Mac OS") + def test_deepcopy_error_with_symbols_in_prog(): + prog = Program() + func_inputs = {"x": mb.placeholder(shape=[get_new_symbol(), 3]), + "y": mb.placeholder(shape=[2, 3])} + with Function(func_inputs) as ssa_fun: + x, y = ssa_fun.inputs["x"], ssa_fun.inputs["y"] + x = mb.relu(x=x) + z = mb.add(x=x, y=y) + ssa_fun.set_outputs([z]) + prog.add_function("main", ssa_fun) + mlmodel = ct.convert(prog, convert_to="mlprogram", compute_precision=ct.precision.FLOAT32) + prog2 = mlmodel._get_mil_internal() # this will invoke a deepcopy on the prog + + @pytest.mark.skipif(not ct.utils._is_macos(), reason="Platform is not Mac OS") + @pytest.mark.parametrize("skip_model_load", [True, False]) + def test_model_load_skip_flag(self, skip_model_load): + @mb.program(input_specs=[mb.TensorSpec(shape=(3,)), ]) + def prog(x): + return mb.relu(x=x, name='relu') + + if ct.utils._macos_version() < (12, 0) and not skip_model_load: + # converting to mlprogram, on macOS < 12 + # should raise a runtime error when skip_model_load is False + with pytest.warns(RuntimeWarning): + model = ct.convert(prog, convert_to='mlprogram', + skip_model_load=skip_model_load) + else: + model = ct.convert(prog, convert_to="mlprogram", skip_model_load=skip_model_load) + + assert model is not None + if skip_model_load: + assert model.__proxy__ is None + model_dir = tempfile.TemporaryDirectory() + filename = os.path.join(model_dir.name, "test.mlpackage") + model.save(filename) + assert os.path.exists(filename) + + +@pytest.mark.skipif(ct.utils._macos_version() < (12, 0), reason='Model produces specification 6.') +class TestMLProgramFP16Transform: + @staticmethod + def test_compute_precision_api(): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.square(x=x) + return x + + mlmodel = ct.convert(copy.deepcopy(prog), + compute_precision=ct.precision.FLOAT16, + convert_to='mlprogram') + mil_prog = mlmodel._get_mil_internal() + np.testing.assert_array_equal(["cast", "square", "cast"], get_op_types_in_program(mil_prog)) + + mlmodel = ct.convert(copy.deepcopy(prog), + compute_precision=ct.precision.FLOAT32, + convert_to='mlprogram') + mil_prog = mlmodel._get_mil_internal() + np.testing.assert_array_equal(["square"], get_op_types_in_program(mil_prog)) + + mlmodel = ct.convert( + copy.deepcopy(prog), + compute_precision=ct.transform.FP16ComputePrecision( + op_selector=lambda op: op.op_type != "square" + ), + convert_to="mlprogram", + ) + mil_prog = mlmodel._get_mil_internal() + np.testing.assert_array_equal(["square"], get_op_types_in_program(mil_prog)) + + with pytest.raises(ValueError) as e: + mlmodel = ct.convert(copy.deepcopy(prog), + compute_precision='fp64', + convert_to='mlprogram') + expected_error = "'compute_precision' must be either coremltools.precision.FLOAT32 or " \ + "coremltools.precision.FLOAT16 or of type coremltools.transform.FP16ComputePrecision()" + assert expected_error == str(e.value) + + expected_pattern = "compute_precision .* supported .* mlprogram .* None .* target=='neuralnetwork'.*minimum_deployment_target.*" + with pytest.raises(ValueError, match=expected_pattern) as e: + mlmodel = ct.convert(copy.deepcopy(prog), compute_precision='fp16') + + @staticmethod + def test_invalid_argument_nn_backend(): + ''' + Since the compute_precision argument is only applicable when converting to "mlprogram", + check that an error is correctly raised when conversion is targeted at the neuralnetwork backend + ''' + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.square(x=x) + return x + + expected_err_str = "compute_precision is only supported for mlprogram target and must be None if target.*" + with pytest.raises(ValueError, match=expected_err_str): + mlmodel = ct.convert(prog, compute_precision=ct.precision.FLOAT16) + with pytest.raises(ValueError, match=expected_err_str): + mlmodel = ct.convert(prog, compute_precision=ct.precision.FLOAT32) + + +@pytest.mark.skipif(not _HAS_TORCH, reason="PyTorch not found") +class TestGraphPassManagement: + @staticmethod + def _get_test_model(): + class TestModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 8, 5, padding="same") + self.bn1 = torch.nn.BatchNorm2d(8) + self.linear1 = torch.nn.Linear(28 * 28 * 8, 5) + self.alpha = 0.7 + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.linear1(torch.flatten(x)) + x = torch.maximum(self.alpha * x, x) + return x + + return TestModel().eval() + + def test_default_pipeline(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=ct.PassPipeline(), + ) + assert get_op_types_in_program(model_converted._get_mil_internal()) == [ + "cast", + "conv", + "reshape", + "linear", + "leaky_relu", + "cast", + ] + + def test_skip_pass(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + model_converted = ct.convert( + traced_model, inputs=[ct.TensorType(shape=example_input.shape)], convert_to="mlprogram" + ) + assert get_op_types_in_program(model_converted._get_mil_internal()) == [ + "cast", + "conv", + "reshape", + "linear", + "leaky_relu", + "cast", + ] + + pipeline = ct.PassPipeline() + pipeline.remove_passes(passes_names=["common::fuse_conv_batchnorm"]) + model_converted_with_skipped_passes = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + assert get_op_types_in_program(model_converted_with_skipped_passes._get_mil_internal()) == [ + "cast", + "conv", + "batch_norm", + "reshape", + "linear", + "leaky_relu", + "cast", + ] + + def test_skip_two_passes(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline() + pipeline.remove_passes( + passes_names=["common::fuse_conv_batchnorm", "common::fuse_leaky_relu"] + ) + model_converted_with_skipped_passes = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + assert get_op_types_in_program(model_converted_with_skipped_passes._get_mil_internal()) == [ + "cast", + "conv", + "batch_norm", + "reshape", + "linear", + "mul", + "maximum", + "cast", + ] + + def test_skip_passes_in_different_pipelines(self): + """ + Some passes exist in different pipelines. For example, const_elimination is in both main + and backend pipelines. If the user want to skip the const_elimination pass, we want to make + sure both pipelines skip that pass. + """ + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline() + pipeline.remove_passes(passes_names=["common::const_elimination"]) + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + assert ( + get_op_types_in_program( + model_converted._get_mil_internal(), skip_const_ops=False + ).count("const") + == 24 + ) + + def test_empty_pipeline(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline.get_empty_pipeline() + + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + assert get_op_types_in_program(model_converted._get_mil_internal()) == [ + "conv", + "batch_norm", + "shape", + "slice_by_index", + "slice_by_index", + "concat", + "cast", + "reshape", + "linear", + "mul", + "maximum", + ] + + def test_pass_option_skip_ops_by_type(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline() + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "conv,linear"}) + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + # The fp16 cast is skipped for conv and linear as we specified them in the pass options. + assert get_op_types_in_program(model_converted._get_mil_internal()) == [ + "conv", + "cast", + "reshape", + "cast", + "linear", + "cast", + "leaky_relu", + "cast", + ] + + def test_pass_option_skip_const_by_size(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + model_converted_without_pipeline = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + ) + + pipeline = ct.PassPipeline() + pipeline.set_options("common::const_elimination", {"skip_const_by_size": "1e8"}) + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + # When the threshold is set to 1e8, no var is skipped in const elimination. + assert get_op_types_in_program( + model_converted._get_mil_internal(), skip_const_ops=False + ).count("const") == get_op_types_in_program( + model_converted_without_pipeline._get_mil_internal(), skip_const_ops=False + ).count( + "const" + ) + + pipeline.set_options( + "common::const_elimination", {"skip_const_by_size": "-1"}, override=True + ) + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + # When the threshold -1, almost all vars (except scalars) are skipped in const elimination. + assert ( + get_op_types_in_program( + model_converted._get_mil_internal(), skip_const_ops=False + ).count("const") + == 23 + ) + + def test_pass_unsupported_option(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline() + pipeline.set_options("common::fuse_conv_batchnorm", {"skip_ops_by_type": "conv,linear"}) + with pytest.raises( + NotImplementedError, + match="The graph pass `fuse_conv_batchnorm` doesn't support option `skip_ops_by_type`.", + ): + ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + + def test_pass_option_invalid_val(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline() + pipeline.set_options("common::const_elimination", {"skip_const_by_size": "dummy"}) + with pytest.raises( + ValueError, + match="Expected to get float threshold, but got `dummy` which cannot be converted to float", + ): + ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_visibilities.py b/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_visibilities.py new file mode 100644 index 00000000..c66e0c6d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_visibilities.py @@ -0,0 +1,230 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +import coremltools as ct + + +def _get_visible_items(d): + return [x for x in dir(d) if not x.startswith("_")] + + +def _check_visible_modules(actual, expected): + assert set(actual) == set(expected), "API mis-matched. Got %s, expected %s" % ( + actual, + expected, + ) + + +EXPECTED_MODULES = [ + "ClassifierConfig", + "ComputeUnit", + "EnumeratedShapes", + "ImageType", + "RangeDim", + "SPECIFICATION_VERSION", + "Shape", + "TensorType", + "colorlayout", + "compression_utils", + "convert", + "converters", + "libcoremlpython", + "models", + "PassPipeline", + "proto", + "precision", + "target", + "utils", + "version", + "test", + "transform", + "libmodelpackage", + "libmilstoragepython", +] + + +class TestApiVisibilities: + """Test public coremltools API visibilities.""" + + def test_top_level(self): + if not ct.utils._is_macos(): + EXPECTED_MODULES.remove("libcoremlpython") + _check_visible_modules(_get_visible_items(ct), EXPECTED_MODULES) + + def test_utils(self): + expected = [ + "convert_double_to_float_multiarray_type", + "evaluate_classifier", + "evaluate_classifier_with_probabilities", + "evaluate_regressor", + "evaluate_transformer", + "make_pipeline", + "load_spec", + "rename_feature", + "save_spec", + ] + _check_visible_modules(_get_visible_items(ct.utils), expected) + + def test_models(self): + expected = [ + "MLModel", + "datatypes", + "feature_vectorizer", + "ml_program", + "model", + "nearest_neighbors", + "neural_network", + "pipeline", + "tree_ensemble", + "utils", + ] + _check_visible_modules(_get_visible_items(ct.models), expected) + + def test_models_mlmodel(self): + expected = [ + "author", + "get_spec", + "input_description", + "license", + "output_description", + "predict", + "save", + "short_description", + "user_defined_metadata", + "version", + "weights_dir", + ] + _check_visible_modules(_get_visible_items(ct.models.MLModel), expected) + + def test_models_neural_network(self): + expected = [ + "AdamParams", + "NeuralNetworkBuilder", + "SgdParams", + "builder", + "flexible_shape_utils", + "optimization_utils", + "printer", + "quantization_utils", + "spec_inspection_utils", + "update_optimizer_utils", + "utils", + ] + _check_visible_modules(_get_visible_items(ct.models.neural_network), expected) + + def test_models_neural_network_utils(self): + expected = ["NeuralNetworkBuilder", "make_image_input", "make_nn_classifier"] + _check_visible_modules( + _get_visible_items(ct.models.neural_network.utils), expected + ) + + def test_models_tree_ensemble(self): + expected = [ + "TreeEnsembleBase", + "TreeEnsembleClassifier", + "TreeEnsembleRegressor", + "set_classifier_interface_params", + "set_regressor_interface_params", + ] + _check_visible_modules(_get_visible_items(ct.models.tree_ensemble), expected) + + def test_models_pipeline(self): + expected = [ + "Pipeline", + "PipelineClassifier", + "PipelineRegressor", + "set_classifier_interface_params", + "set_regressor_interface_params", + "set_training_features", + "set_transform_interface_params", + ] + _check_visible_modules(_get_visible_items(ct.models.pipeline), expected) + + def test_converters(self): + expected = [ + "ClassifierConfig", + "ColorLayout", + "EnumeratedShapes", + "ImageType", + "RangeDim", + "Shape", + "TensorType", + "convert", + "libsvm", + "mil", + "sklearn", + "xgboost", + ] + _check_visible_modules(_get_visible_items(ct.converters), expected) + + def test_converters_libsvm(self): + _check_visible_modules(_get_visible_items(ct.converters.libsvm), ["convert"]) + + def test_converters_sklearn(self): + _check_visible_modules(_get_visible_items(ct.converters.sklearn), ["convert"]) + + def test_converters_xgboost(self): + _check_visible_modules(_get_visible_items(ct.converters.xgboost), ["convert"]) + + def test_models_neural_network_quantization_utils(self): + expected = [ + "AdvancedQuantizedLayerSelector", + "MatrixMultiplyLayerSelector", + "ModelMetrics", + "NoiseMetrics", + "OutputMetric", + "QuantizedLayerSelector", + "TopKMetrics", + "activate_int8_int8_matrix_multiplications", + "compare_models", + "quantize_weights", + ] + _check_visible_modules( + _get_visible_items(ct.models.neural_network.quantization_utils), expected + ) + + def test_compression_utils(self): + expected = [ + "affine_quantize_weights", + "palettize_weights", + "sparsify_weights", + "decompress_weights", + ] + _check_visible_modules( + _get_visible_items(ct.compression_utils), expected + ) + + def test_models_neural_network_flexible_shape_utils(self): + expected = [ + "NeuralNetworkImageSize", + "NeuralNetworkImageSizeRange", + "NeuralNetworkMultiArrayShape", + "NeuralNetworkMultiArrayShapeRange", + "Shape", + "ShapeRange", + "Size", + "add_enumerated_image_sizes", + "add_enumerated_multiarray_shapes", + "add_multiarray_ndshape_enumeration", + "set_multiarray_ndshape_range", + "update_image_size_range", + "update_multiarray_shape_range", + ] + _check_visible_modules( + _get_visible_items(ct.models.neural_network.flexible_shape_utils), expected + ) + + def test_models_neural_network_update_optimizer_utils(self): + expected = ["AdamParams", "Batch", "RangeParam", "SgdParams"] + _check_visible_modules( + _get_visible_items(ct.models.neural_network.update_optimizer_utils), + expected, + ) + + def test_models_neural_network_optimization_utils(self): + _check_visible_modules( + _get_visible_items(ct.models.neural_network.optimization_utils), [], + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/blob/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/blob/__init__.py new file mode 100644 index 00000000..9293abe9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/blob/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017 - 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/blob/test_weights.py b/__packaged__/coreml/.python_dependencies/coremltools/test/blob/test_weights.py new file mode 100644 index 00000000..c0818346 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/blob/test_weights.py @@ -0,0 +1,68 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import tempfile +import unittest + +import numpy as np + +from coremltools.libmilstoragepython import _BlobStorageReader as BlobReader +from coremltools.libmilstoragepython import _BlobStorageWriter as BlobWriter + + +class WeightTest(unittest.TestCase): + def setUp(self): + self.working_dir = tempfile.mkdtemp() + + def tearDown(self): + if os.path.exists(self.working_dir): + shutil.rmtree(self.working_dir) + + def test_weight_blob_int8(self): + writer = BlobWriter(self.working_dir + "/net.wt") + input_arr = np.array([-5, -2, 0, 2, 5], dtype=np.int8) + offset = writer.write_int8_data(input_arr) + writer = None + + reader = BlobReader(self.working_dir + "/net.wt") + output_arr = np.array(reader.read_int8_data(offset), np.int8) + np.testing.assert_equal(input_arr, output_arr) + + def test_weight_blob_uint8(self): + writer = BlobWriter(self.working_dir + "/net.wt") + input_arr = np.array([1, 2, 3, 4, 5], dtype=np.uint8) + offset = writer.write_uint8_data(input_arr) + writer = None + + reader = BlobReader(self.working_dir + "/net.wt") + output_arr = np.array(reader.read_uint8_data(offset), np.uint8) + np.testing.assert_almost_equal(input_arr, output_arr) + + def test_weight_blob_fp16(self): + writer = BlobWriter(self.working_dir + "/net.wt") + input_arr = np.array([2.3, 4.6, 7.9], dtype=np.float16) + input_arr_to_bytes_uint16 = np.frombuffer(input_arr.tobytes(), np.uint16) + offset = writer.write_fp16_data(input_arr_to_bytes_uint16) + writer = None + + reader = BlobReader(self.working_dir + "/net.wt") + output_arr_uint16 = np.array(reader.read_fp16_data(offset), np.uint16) + output_arr = np.frombuffer(output_arr_uint16.tobytes(), np.float16) + np.testing.assert_almost_equal(input_arr, output_arr) + + def test_weight_blob_fp32(self): + writer = BlobWriter(self.working_dir + "/net.wt") + input_arr = np.array([1.0, 2.4, 3.9, -4.8, 5.2], dtype=np.float32) + offset = writer.write_float_data(input_arr) + writer = None + + reader = BlobReader(self.working_dir + "/net.wt") + output_arr = np.array(reader.read_float_data(offset), np.float32) + np.testing.assert_almost_equal(input_arr, output_arr) + +if __name__ == "__main__": + unittest.main() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/__init__.py new file mode 100644 index 00000000..9fcc9060 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/test_compression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/test_compression.py new file mode 100644 index 00000000..283efbfb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/test_compression.py @@ -0,0 +1,432 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest +import torch + +import coremltools as ct +from coremltools._deps import _HAS_SKLEARN +from coremltools.converters.mil.testing_utils import get_op_types_in_program +from coremltools.converters.mil.mil import types + + +def create_unique_weight(weight, nbits): + shape = weight.detach().numpy().shape + size = weight.detach().numpy().size + + unique_number = 1 << 4 + weight = [] + partition_len = size // unique_number + 1 + for i in range(unique_number): + weight += [i] * (partition_len) + weight = np.reshape(np.array(weight[:size]).astype(np.float32), shape) + return weight + +def get_test_model_and_data(multi_layer=False): + inputs = [ct.TensorType(name="data", shape=(1, 64, 10, 10))] + torch_input_values = [torch.rand(*i.shape.to_list()) for i in inputs] + coreml_input_values = { + i.name: val.detach().numpy() for i, val in zip(inputs, torch_input_values) + } + if multi_layer: + class Model(torch.nn.Module): + def __init__(self): + super(Model, self).__init__() + self.conv_1 = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=2) + self.conv_2 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=2) + + def forward(self, x): + conv_1 = self.conv_1(x) + conv_2 = self.conv_2(conv_1) + return conv_2 + + model = Model().eval() + else: + model = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=2) + + return model, inputs, torch_input_values, coreml_input_values + + +class TestCompressionUtils: + + affine_quantize_weights = ct.compression_utils.affine_quantize_weights + palettize_weights = ct.compression_utils.palettize_weights + sparsify_weights = ct.compression_utils.sparsify_weights + decompress_weights = ct.compression_utils.decompress_weights + + @staticmethod + def verify_model_outputs(model, compressed_model, input_values): + """ + This utility functions does the following checks: + + (1) Verify the output of the compressed model has the same shape / type of the original model + (2) The decompressed and compressed model have the same numerical outputs + """ + + # Make sure the model can be decompressed + decompressed_model = TestCompressionUtils.decompress_weights(compressed_model) + + # Validate the output shape / type + ref_outputs = model._mil_program.functions["main"].outputs + outputs = compressed_model._mil_program.functions["main"].outputs + + assert len(ref_outputs) == len(outputs) + + for a, b in zip(ref_outputs, outputs): + assert a.name == b.name + assert a.shape == a.shape + assert a.dtype == b.dtype + + if ct.utils._macos_version() < (13, 0): + return + + # Validate that the compressed model could be decompressed, and produces correct outputs + output_dict = compressed_model.predict(input_values) + de_output_dict = decompressed_model.predict(input_values) + for k, v in de_output_dict.items(): + assert k in output_dict + np.testing.assert_allclose(v, output_dict[k]) + + @staticmethod + def test_op_selector(): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + mlmodel_no_quantized = TestCompressionUtils.affine_quantize_weights(mlmodel, mode="linear", op_selector=lambda const_op: const_op.val.val.size > 1e7) + expected_ops = ['cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_no_quantized._mil_program) == expected_ops + + @staticmethod + @pytest.mark.skipif(not _HAS_SKLEARN, reason="Missing scikit-learn. Skipping tests.") + def test_weight_decompression(): + """ + This test is doing the following steps + + (1) compress a model with two conv layers into a compressed model with two different constexpr ops + + [Original model]: + + weight_1 weight_2 + | | + v v + input -> conv_1 -----> conv_2 ---> output + + + [Compressed model]: + + weight_1_lut weight_2_affine + | | + v v + input -> conv_1 ------> conv_2 ---> output + + , where weight_1_lut is a constexpr_lut_to_dense op and weight_2_affine is a constexpr_affine_dequantize op + + (2) decompress the compressed model + + [Decompressed model]: + + weight_1_new weight_2_new + | | + v v + input -> conv_1 ------> conv_2 ---> output + + , note that, weight_1_new is equivalent to weight_1_lut, and weight_2_new is equivalent to weight_2_affine + """ + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data(multi_layer=True) + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + # we first compress the model + mlmodel = TestCompressionUtils.palettize_weights(mlmodel, mode="kmeans", nbits=4, op_selector=lambda const_op: const_op.name == "conv_1_weight_to_fp16") + mlmodel = TestCompressionUtils.affine_quantize_weights(mlmodel, mode="linear", op_selector=lambda const_op: const_op.name == "conv_2_weight_to_fp16") + expected_ops = ['constexpr_lut_to_dense', 'cast', 'conv', 'constexpr_affine_dequantize', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel._mil_program) == expected_ops + + # decompress the model + decompressed_model = TestCompressionUtils.decompress_weights(mlmodel) + assert get_op_types_in_program(decompressed_model._mil_program) == ['cast', 'conv', 'conv', 'cast'] + + if ct.utils._macos_version() < (13, 0): + return + + # compared the numerical outputs + output_dict = mlmodel.predict(coreml_input_values) + de_output_dict = decompressed_model.predict(coreml_input_values) + + for k, v in output_dict.items(): + assert k in de_output_dict + np.testing.assert_allclose(v, de_output_dict[k]) + + @staticmethod + def test_compression_utils_error_handling(): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + # Test invalid mode for affine quantization + expected_err_str = "supported for weight affine quantization. Got mode" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.affine_quantize_weights(mlmodel, mode="invalid_mode") + + # Test invalid dtype for affine quantization + expected_err_str = "is unsupported for affine_quantize_weight" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.affine_quantize_weights(mlmodel, dtype=np.int32) + + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.affine_quantize_weights(mlmodel, dtype="int32") + + # Test invalid mode for weight sparsification + expected_err_str = "supported for weight sparsification. Got mode" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.sparsify_weights(mlmodel, mode="invalid_mode") + + # Test invalid threshold for weight sparsification + expected_err_str = "Invalid value of threshold: \-1. Needs to be in \[0, inf\)" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.sparsify_weights(mlmodel, mode="threshold_based", threshold=-1) + + # Test invalid percentile for weight sparsification + expected_err_str = "Invalid value of target_percentile: 1.2. Needs to be in \[0, 1\]" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.sparsify_weights(mlmodel, mode="percentile_based", target_percentile=1.2) + + # Test invalid mode for weight palettization + expected_err_str = "supported for weight palettization. Got mode" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="invalid_mode") + + # Test nbits must be provided for kmeans, uniform mode for weight palettization + expected_err_str = "nbits must be provided for mode" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="kmeans") + + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="uniform") + + # Test nbits must not be provided for unique, custom mode for weight palettization + expected_err_str = "nbits must NOT be provided for mode" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="unique", nbits=2) + + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="custom", nbits=2) + + # Test lut_function must be provided for custom mode, and must not be provided otherwise + expected_err_str = "lut_function must be None if mode is not custom, and that it cannot be None when the mode is custom." + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="custom") + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="unique", lut_function=lambda op: True) + + # Test lut_function must be a function obejct + expected_err_str = "A function object must be provided as lut_function" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="custom", lut_function=1) + + + @staticmethod + @pytest.mark.parametrize( + "mode, dtype", + itertools.product( + ("linear", "linear_symmetric"), + (np.int8, np.uint8, types.int8, types.uint8), + ), + ) + def test_linear_quanitzation(mode, dtype): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + mlmodel_quantized = TestCompressionUtils.affine_quantize_weights(mlmodel, mode=mode, dtype=dtype) + + # validate parameters + expected_ops = ['constexpr_affine_dequantize', 'cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_quantized._mil_program) == expected_ops + + quanitze_op = mlmodel_quantized._mil_program.functions["main"].find_ops(op_type="constexpr_affine_dequantize")[0] + assert model.weight.detach().numpy().shape == quanitze_op.quantized_data.shape + + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_quantized, coreml_input_values) + + @staticmethod + @pytest.mark.parametrize( + "threshold", + (0.0, 0.001, 1e2), + ) + def test_weight_sparsify_threshold_based(threshold): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + with torch.no_grad(): + model.weight[0][0][0][0] = 101 + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + mlmodel_sparsified = TestCompressionUtils.sparsify_weights(mlmodel, mode="threshold_based", threshold=threshold) + + # validate parameters + expected_ops = ['constexpr_sparse_to_dense', 'cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_sparsified._mil_program) == expected_ops + + main_func = mlmodel_sparsified._mil_program.functions["main"] + sparse_to_dense_op = main_func.find_ops(op_type="constexpr_sparse_to_dense")[0] + non_sparse_data = sparse_to_dense_op.nonzero_data + + if threshold != 1e2: + assert np.min(np.absolute(non_sparse_data.val)) >= threshold + else: + assert non_sparse_data.val.size == 1 + + assert sparse_to_dense_op.shape.val.tolist() == list(model.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_sparsified, coreml_input_values) + + @staticmethod + @pytest.mark.parametrize( + "percentile", + (0., 0.5, 1.0), + ) + def test_weight_sparsify_percentile_based(percentile): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + mlmodel_sparsified = TestCompressionUtils.sparsify_weights(mlmodel, mode="percentile_based", target_percentile=percentile) + + # validate parameters + expected_ops = ['constexpr_sparse_to_dense', 'cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_sparsified._mil_program) == expected_ops + + main_func = mlmodel_sparsified._mil_program.functions["main"] + sparse_to_dense_op = main_func.find_ops(op_type="constexpr_sparse_to_dense")[0] + non_sparse_data = sparse_to_dense_op.nonzero_data + weight = model.weight.detach().numpy() + + if percentile == 0.: + assert non_sparse_data.val.size == weight.size - 1 + elif percentile == 0.5: + assert non_sparse_data.val.size <= 0.51 * (weight.size) and non_sparse_data.val.size >= 0.49 * (weight.size) + else: + assert non_sparse_data.val.size == 0 + + assert sparse_to_dense_op.shape.val.tolist() == list(model.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_sparsified, coreml_input_values) + + @staticmethod + @pytest.mark.parametrize( + "mode", + ("uniform", "kmeans") if _HAS_SKLEARN else ("uniform",) + ) + def test_weight_palettization(mode): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + mlmodel_palettized = TestCompressionUtils.palettize_weights(mlmodel, nbits=4, mode=mode) + + # validate parameters + expected_ops = ['constexpr_lut_to_dense', 'cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_palettized._mil_program) == expected_ops + + main_func = mlmodel_palettized._mil_program.functions["main"] + lut_to_dense_op = main_func.find_ops(op_type="constexpr_lut_to_dense")[0] + + assert lut_to_dense_op.shape.val.tolist() == list(model.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_palettized, coreml_input_values) + + @staticmethod + def test_weight_palettization_unique_case_1(): + # In this model, both conv weights can be palettized + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data(multi_layer=True) + + weight_1_unique = create_unique_weight(model.conv_1.weight, nbits=2) + weight_2_unique = create_unique_weight(model.conv_2.weight, nbits=6) + + with torch.no_grad(): + model.conv_1.weight = torch.nn.Parameter(torch.Tensor(weight_1_unique)) + model.conv_2.weight = torch.nn.Parameter(torch.Tensor(weight_2_unique)) + + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + # validate parameters + mlmodel_palettized = TestCompressionUtils.palettize_weights(mlmodel, mode="unique") + expected_ops = ['constexpr_lut_to_dense', 'cast', 'conv', 'constexpr_lut_to_dense', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_palettized._mil_program) == expected_ops + + main_func = mlmodel_palettized._mil_program.functions["main"] + lut_to_dense_op_1 = main_func.find_ops(op_type="constexpr_lut_to_dense")[0] + lut_to_dense_op_2 = main_func.find_ops(op_type="constexpr_lut_to_dense")[1] + + assert lut_to_dense_op_1.shape.val.tolist() == list(model.conv_1.weight.detach().numpy().shape) + assert lut_to_dense_op_2.shape.val.tolist() == list(model.conv_2.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_palettized, coreml_input_values) + + @staticmethod + def test_weight_palettization_unique_case_2(caplog): + # In this model, only one conv weights can be palettized, the converter should warn the users that one weight is skipped + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data(multi_layer=True) + + weight_1_unique = create_unique_weight(model.conv_1.weight, nbits=2) + + with torch.no_grad(): + model.conv_1.weight = torch.nn.Parameter(torch.Tensor(weight_1_unique)) + + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + # validate parameters + # converter should warn the user that one weight is not compressed + mlmodel_palettized = TestCompressionUtils.palettize_weights(mlmodel, mode="unique") + warning_msg = "weight value cannot be represented in an 8 bits palettization. Skipped." + assert any([warning_msg in rec.message for rec in caplog.records]) + + expected_ops = ['constexpr_lut_to_dense', 'cast', 'conv', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_palettized._mil_program) == expected_ops + + main_func = mlmodel_palettized._mil_program.functions["main"] + lut_to_dense_op_1 = main_func.find_ops(op_type="constexpr_lut_to_dense")[0] + assert lut_to_dense_op_1.shape.val.tolist() == list(model.conv_1.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_palettized, coreml_input_values) + + @staticmethod + def test_weight_palettization_custom(): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + def lut_function(weight): + nbits = 4 + weight = weight.flatten() + unique_elements = np.unique(weight) + k = (1 << nbits) - 1 + top_k = np.partition(weight, -k)[-k:] + np.sort(top_k) + lut = np.array([0.] + top_k.tolist()).astype(weight.dtype) + mapping = {v: idx for idx, v in enumerate(lut)} + indices = np.array([mapping[v] if v in mapping else 0 for v in weight]).astype(np.uint8) + return lut, indices + + mlmodel_palettized = TestCompressionUtils.palettize_weights(mlmodel, mode="custom", lut_function=lut_function) + + # validate parameters + expected_ops = ['constexpr_lut_to_dense', 'cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_palettized._mil_program) == expected_ops + + main_func = mlmodel_palettized._mil_program.functions["main"] + lut_to_dense_op = main_func.find_ops(op_type="constexpr_lut_to_dense")[0] + + assert lut_to_dense_op.shape.val.tolist() == list(model.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_palettized, coreml_input_values) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/__init__.py new file mode 100644 index 00000000..9293abe9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017 - 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_mlmodel.py b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_mlmodel.py new file mode 100644 index 00000000..8d2f97e1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_mlmodel.py @@ -0,0 +1,74 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil + +import numpy as np +import torch + +import coremltools as ct +from coremltools._deps import _IS_MACOS +from coremltools.models.model import MLModel +from coremltools.models.utils import _macos_version + + +def test_mlmodel_demo(tmpdir): + NUM_TOKENS = 3 + EMBEDDING_SIZE = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(NUM_TOKENS, EMBEDDING_SIZE) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.randint(high=NUM_TOKENS, size=(2,), + dtype=torch.int64) + traced_model = torch.jit.trace(model, example_input) + mlmodel = ct.convert( + traced_model, + source='pytorch', + convert_to='mlprogram', + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + compute_precision=ct.precision.FLOAT32, + compute_units=ct.ComputeUnit.CPU_ONLY + ) + assert isinstance(mlmodel, MLModel) + + # mlpackage_path is a model package + mlpackage_path = os.path.join(str(tmpdir), 'mymodel.mlpackage') + mlmodel.save(mlpackage_path) + + # Read back the saved bundle and compile + mlmodel2 = MLModel(mlpackage_path) + + if not _IS_MACOS or _macos_version() < (12, 0): + # Can not get predictions unless on macOS 12 or higher. + shutil.rmtree(mlpackage_path) + return + + result = mlmodel2.predict( + {"input": example_input.cpu().detach().numpy().astype(np.float32)}, + ) + + # Verify outputs + expected = model(example_input) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.cpu().detach().numpy()) + + # Cleanup package + shutil.rmtree(mlpackage_path) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_modelpackage.py b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_modelpackage.py new file mode 100644 index 00000000..887f7788 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_modelpackage.py @@ -0,0 +1,519 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import tempfile + +import numpy as np +import pytest + +import coremltools +from coremltools import ComputeUnit, utils +from coremltools.converters.mil import Builder as mb +from coremltools.libmodelpackage import ModelPackage +from coremltools.models import MLModel +from coremltools.models.utils import (_MLPACKAGE_AUTHOR_NAME, + _WEIGHTS_DIR_NAME) +from coremltools.proto import Model_pb2 + + +def _remove_path(path): + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + +class TestMLModel: + + def setup_class(self): + + spec = Model_pb2.Model() + spec.specificationVersion = coremltools.SPECIFICATION_VERSION + + features = ["feature_1", "feature_2"] + output = "output" + for f in features: + input_ = spec.description.input.add() + input_.name = f + input_.type.doubleType.MergeFromString(b"") + + output_ = spec.description.output.add() + output_.name = output + output_.type.doubleType.MergeFromString(b"") + + lr = spec.glmRegressor + lr.offset.append(0.1) + weights = lr.weights.add() + coefs = [1.0, 2.0] + for i in coefs: + weights.value.append(i) + + spec.description.predictedFeatureName = "output" + self.spec = spec + + def test_model_creation(self): + model = MLModel(self.spec) + assert model is not None + + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + utils.save_spec(self.spec, package.name) + model = MLModel(package.name) + assert model is not None + + # cleanup + _remove_path(package.name) + + def test_model_api(self): + model = MLModel(self.spec) + assert model is not None + + model.author = "Test author" + assert model.author == "Test author" + assert model.get_spec().description.metadata.author == "Test author" + + model.license = "Test license" + assert model.license == "Test license" + assert model.get_spec().description.metadata.license == "Test license" + + model.short_description = "Test model" + assert model.short_description == "Test model" + assert model.get_spec().description.metadata.shortDescription == "Test model" + + model.version = "1.3" + assert model.version == "1.3" + assert model.get_spec().description.metadata.versionString == "1.3" + + model.input_description["feature_1"] = "This is feature 1" + assert model.input_description["feature_1"] == "This is feature 1" + + model.output_description["output"] = "This is output" + assert model.output_description["output"] == "This is output" + + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + model.save(package.name) + loaded_model = MLModel(package.name) + + assert model.author == "Test author" + assert model.license == "Test license" + assert model.short_description == "Test model" + assert model.input_description["feature_1"] == "This is feature 1" + assert model.output_description["output"] == "This is output" + + # cleanup + _remove_path(package.name) + + def test_predict_api(self): + model = MLModel(self.spec) + + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + model.save(package.name) + + if utils._macos_version() >= (12, 0): + for compute_units in coremltools.ComputeUnit: + if (compute_units == coremltools.ComputeUnit.CPU_AND_NE + and utils._macos_version() < (13, 0)): + continue + + loaded_model = MLModel(package.name, compute_units=compute_units) + + preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0}) + assert preds is not None + assert preds["output"] == 3.1 + assert loaded_model.compute_unit == compute_units + else: + # just check if we can load it + loaded_model = MLModel(package.name) + + # cleanup + _remove_path(package.name) + + def test_rename_input(self): + utils.rename_feature(self.spec, "feature_1", "renamed_feature", rename_inputs=True) + model = MLModel(self.spec) + + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + model.save(package.name) + loaded_model = MLModel(package.name) + + if utils._macos_version() >= (12, 0): + preds = loaded_model.predict({"renamed_feature": 1.0, "feature_2": 1.0}) + assert preds is not None + assert preds["output"] == 3.1 + + # reset the spec for next run + utils.rename_feature(self.spec, "renamed_feature", "feature_1", rename_inputs=True) + + # cleanup + _remove_path(package.name) + + def test_rename_input_bad(self): + utils.rename_feature(self.spec, "blah", "bad_name", rename_inputs=True) + model = MLModel(self.spec) + + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + model.save(package.name) + loaded_model = MLModel(package.name) + + if utils._macos_version() >= (12, 0): + preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0}) + assert preds is not None + assert preds["output"] == 3.1 + + # cleanup + _remove_path(package.name) + + def test_save(self): + model = MLModel(self.spec) + + # Verify "save" can be called twice and the saved + # model can be loaded successfully each time + for _ in range(0, 2): + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + model.save(package.name) + loaded_model = MLModel(package.name) + + if utils._macos_version() >= (12, 0): + preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0}) + assert preds is not None + assert preds["output"] == 3.1 + + _remove_path(package.name) + + def test_save_in_place(self): + model = MLModel(self.spec) + + # Verify "save" can be called twice and the saved + # model can be loaded successfully each time + # the mlpackage remains in place after the first save + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + for _ in range(2): + + model.save(package.name) + loaded_model = MLModel(package.name) + + if utils._macos_version() >= (12, 0): + preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0}) + assert preds is not None + assert preds["output"] == 3.1 + + _remove_path(package.name) + + def test_mil_as_package(self): + import torch + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, embedding_size) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.randint(high=num_tokens, size=(2,), dtype=torch.int64) + traced_model = torch.jit.trace(model, example_input) + + temp_package_dir = tempfile.TemporaryDirectory(suffix=".mlpackage") + for converted_package_path in [None, temp_package_dir.name]: + mlmodel = coremltools.convert( + traced_model, + package_dir=converted_package_path, + source='pytorch', + convert_to='mlprogram', + compute_precision=coremltools.precision.FLOAT32, + inputs=[ + coremltools.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + ) + + assert isinstance(mlmodel, MLModel) + + package_path = tempfile.mkdtemp(suffix=".mlpackage") + mlmodel.save(package_path) + + assert ModelPackage.isValid(package_path) + assert os.path.exists(ModelPackage(package_path).getRootModel().path()) + + # Read back the saved bundle and compile + mlmodel2 = MLModel(package_path, compute_units=ComputeUnit.CPU_ONLY) + + if utils._macos_version() >= (12, 0): + result = mlmodel2.predict( + {"input": example_input.cpu().detach().numpy().astype(np.float32)} + ) + + # Verify outputs + expected = model(example_input) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.cpu().detach().numpy()) + + # Cleanup package + shutil.rmtree(package_path) + + tmp_package_path = mlmodel.package_path + assert os.path.exists(tmp_package_path) + del mlmodel + if converted_package_path is not None: + # Verify we leave the provided package dir alone + assert os.path.exists(tmp_package_path) + + temp_package_dir.cleanup() + + def test_model_save_no_extension(self): + import torch + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, embedding_size) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.randint(high=num_tokens, size=(2,), dtype=torch.int64) + traced_model = torch.jit.trace(model, example_input) + + mlmodel = coremltools.convert( + traced_model, + package_dir=None, + source='pytorch', + convert_to='mlprogram', + inputs=[ + coremltools.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + ) + assert isinstance(mlmodel, MLModel) + + package = tempfile.TemporaryDirectory() + package.cleanup() + package_path = package.name + + mlmodel.save(package_path) + assert not os.path.exists(package_path) + + package_path = package_path + ".mlpackage" + assert os.path.exists(package_path) + + shutil.rmtree(package_path) + +class TestSpecAndMLModelAPIs: + + def setup_class(self): + # define an mlprogram, which has weights + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 5000))]) + def linear_prog(input): + W = mb.const(val=np.random.rand(100, 5000), name="const_W") + out = mb.linear(x=input, weight=W, name="output") + return out + + # define another mlprogram, which does not have weights + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 5, 2))]) + def relu_prog(input): + out = mb.relu(x=input, name="output") + return out + + # convert and save model on disk + self.mlmodel = coremltools.convert(linear_prog, convert_to="mlprogram") + self.mlpackage_path = tempfile.mkdtemp(suffix=utils._MLPACKAGE_EXTENSION) + self.mlmodel.save(self.mlpackage_path) + self.mlmodel_no_weights = coremltools.convert(relu_prog, convert_to="mlprogram") + + def teardown_class(self): + _remove_path(self.mlpackage_path) + self.mlmodel = None + self.mlmodel_no_weights = None + + def _test_mlmodel_correctness(self, mlmodel): + """ + :param mlmodel: coremltools.models.MLModel + Test the following: + - calling .predict on mlmodel works correctly + - calling .save on mlmodel works correctly + """ + # construct input dictionary + spec = mlmodel.get_spec() + inputs = spec.description.input + input_dict = {} + for input in inputs: + input_dict[input.name] = np.random.rand(*tuple(input.type.multiArrayType.shape)) + # check prediction + preds = mlmodel.predict(input_dict) + assert preds is not None + # save, load and predict again to check that the saving and loading worked correctly + with tempfile.TemporaryDirectory(suffix=utils._MLPACKAGE_EXTENSION) as temp_path: + mlmodel.save(temp_path) + mlmodel_reloaded = MLModel(temp_path) + preds = mlmodel_reloaded.predict(input_dict) + assert preds is not None + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_mlmodel_to_spec_to_mlmodel(self): + """ + convert mlmodel to spec, and then back to mlmodel and verify that it works + """ + spec = self.mlmodel.get_spec() + # reload the model from the spec and verify it + weights_dir = self.mlmodel.weights_dir + mlmodel_from_spec = MLModel(spec, weights_dir=weights_dir) + self._test_mlmodel_correctness(mlmodel_from_spec) + # check that the original model still works + self._test_mlmodel_correctness(self.mlmodel) + # check that an error is raised when MLModel is initialized without the weights + with pytest.raises(Exception, match="MLModel of type mlProgram cannot be loaded just from the model " + "spec object. It also needs the path to the weights file. " + "Please provide that as well, using the 'weights_dir' argument."): + MLModel(spec) + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_path_to_mlmodel_to_spec_to_mlmodel(self): + """ + load an mlmodel from disk, convert it to spec, and then convert the spec back to mlmodel + """ + mlmodel_from_disk = MLModel(self.mlpackage_path) + spec = mlmodel_from_disk.get_spec() + mlmodel_from_spec = MLModel(spec, weights_dir=mlmodel_from_disk.weights_dir) + self._test_mlmodel_correctness(mlmodel_from_spec) + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_path_to_spec_to_mlmodel(self): + """ + load a spec from disk, then convert it to mlmodel, and check that it works + """ + spec = utils.load_spec(self.mlpackage_path) + weights_dir = self.mlpackage_path + "/Data/" + _MLPACKAGE_AUTHOR_NAME + "/weights" + mlmodel = MLModel(spec, weights_dir=weights_dir) + self._test_mlmodel_correctness(mlmodel) + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_save_spec_api_mlprogram_without_weights_dir(self): + """ + save an mlpackage using the save_spec API. It should error out because no weights dir. + """ + spec = self.mlmodel.get_spec() + with tempfile.TemporaryDirectory(suffix=utils._MLPACKAGE_EXTENSION) as model_path: + # this should raise error: + with pytest.raises(Exception, match="spec of type mlProgram cannot be saved without" + " the weights file. Please provide the path to " + "the weights file as well, using the 'weights_dir' argument."): + utils.save_spec(spec, model_path) + + @pytest.mark.skipif( + utils._macos_version() < (12, 0), + reason="prediction on mlprogram model " "available only on macOS12+", + ) + def test_save_spec_api(self): + """ + save an mlpackage using the save_spec API. Reload the model from disk and verify it works + """ + spec = self.mlmodel.get_spec() + with tempfile.TemporaryDirectory( + suffix=utils._MLPACKAGE_EXTENSION + ) as model_path: + utils.save_spec(spec, model_path, weights_dir=self.mlmodel.weights_dir) + model = MLModel(model_path) + self._test_mlmodel_correctness(model) + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_save_spec_api_model_with_no_weights(self): + """ + save an mlprogram model with no weights, using the save SPI and an empty weights directory. + Reload the model from disk and verify it works + """ + spec = self.mlmodel_no_weights.get_spec() + with tempfile.TemporaryDirectory(suffix=utils._MLPACKAGE_EXTENSION) as model_path: + with tempfile.TemporaryDirectory() as empty_weight_dir: + utils.save_spec(spec, model_path, weights_dir=empty_weight_dir) + model = MLModel(model_path) + self._test_mlmodel_correctness(model) + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_mlmodel_to_spec_to_mlmodel_with_no_weights_model(self): + """ + convert mlmodel to spec, and then back to mlmodel and verify that it works + """ + spec = self.mlmodel_no_weights.get_spec() + # if no weights_dir is passed, error will be raised + with pytest.raises(Exception, match="MLModel of type mlProgram cannot be loaded just from the model " + "spec object. It also needs the path to the weights file. " + "Please provide that as well, using the 'weights_dir' argument."): + MLModel(spec) + + # weights_dir will still exist, even though the model has no weights, + # with a weights file that only has header and no data + weights_dir = self.mlmodel_no_weights.weights_dir + assert weights_dir is not None + mlmodel_from_spec = MLModel(spec, weights_dir=weights_dir) + self._test_mlmodel_correctness(mlmodel_from_spec) + + # load mlmodel from spec using an empty weights_dir + with tempfile.TemporaryDirectory() as empty_weight_dir: + mlmodel_from_spec = MLModel(spec, weights_dir=weights_dir) + self._test_mlmodel_correctness(mlmodel_from_spec) + + def test_weights_path_correctness(self): + """ + test that after reloading an mlmodel from the spec, the weights path is updated + """ + spec = self.mlmodel.get_spec() + original_weight_dir_path = self.mlmodel.weights_dir + assert os.path.exists(original_weight_dir_path) + # load mlmodel from spec: this will create a new mlpackage in a temp location + # and copy over the weights + mlmodel_reloaded = MLModel(spec, weights_dir=original_weight_dir_path) + assert os.path.exists(mlmodel_reloaded.weights_dir) + assert mlmodel_reloaded.weights_dir != original_weight_dir_path + assert mlmodel_reloaded.weights_dir == mlmodel_reloaded.package_path + "/Data/" \ + + _MLPACKAGE_AUTHOR_NAME + "/weights" + + def test_weights_dir_discovery_method(self): + """ + Test "coremltools.libmodelpackage.ModelPackage.findItemByNameAuthor" function + """ + mlpackage = ModelPackage(self.mlpackage_path) + model_package_item_info = mlpackage.findItemByNameAuthor(_WEIGHTS_DIR_NAME, _MLPACKAGE_AUTHOR_NAME) + weights_dir_path = model_package_item_info.path() + assert weights_dir_path == self.mlpackage_path + "/Data/" + _MLPACKAGE_AUTHOR_NAME + "/weights" + # verify that findItemByNameAuthor returns None, when item not found + model_package_item_info = mlpackage.findItemByNameAuthor(_WEIGHTS_DIR_NAME, "inexistent_author_name") + assert model_package_item_info is None diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/__init__.py new file mode 100644 index 00000000..8aa13a28 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_custom_neural_nets.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_custom_neural_nets.py new file mode 100644 index 00000000..5c5d1f88 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_custom_neural_nets.py @@ -0,0 +1,89 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import tempfile +import unittest + +import numpy as np + +import coremltools +import coremltools.models.datatypes as datatypes +from coremltools.models import neural_network as neural_network +from coremltools.models.utils import _is_macos, _macos_version + + +class SimpleTest(unittest.TestCase): + def test_fixed_seq_len(self): + """ + Input has a fixed sequence length. + (this happens when model is trained using padded sequences, inspiration: https://forums.developer.apple.com/thread/80407) + + (Seq,Batch,C,H,W) + embedding: input shape (15,1,1,1,1) --> output shape (15,1,32,1,1) + permute : input shape (15,1,32,1,1) --> output shape (1,1,32,1,15) + flatten : input shape (1,1,32,1,15) --> output shape (1,1,32 * 15,1,1) + dense : input shape (1,1,480,1,1) --> output shape (1,1,2,1,1) + """ + + coreml_preds = [] + input_dim = (1, 1, 1) + output_dim = ( + 1, + 1, + 1, + ) # some random dimensions here: we are going to remove this information later + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*output_dim))] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + + # ADD Layers + builder.add_embedding( + "embed", + W=np.random.rand(10, 32), + b=None, + input_dim=10, + output_channels=32, + has_bias=0, + input_name="data", + output_name="embed", + ) + builder.add_permute( + "permute", dim=[3, 1, 2, 0], input_name="embed", output_name="permute" + ) + builder.add_flatten( + "flatten", mode=0, input_name="permute", output_name="flatten" + ) + builder.add_inner_product( + "dense", + W=np.random.rand(480, 2), + b=None, + input_channels=480, + output_channels=2, + has_bias=0, + input_name="flatten", + output_name="output", + ) + + # Remove output shape by deleting and adding an output + del builder.spec.description.output[-1] + output = builder.spec.description.output.add() + output.name = "output" + output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value( + "DOUBLE" + ) + + # save the model + model_dir = tempfile.TemporaryDirectory() + model_path = os.path.join(model_dir.name, "test_layer.mlmodel") + coremltools.utils.save_spec(builder.spec, model_path) + # preprare input and get predictions + coreml_model = coremltools.models.MLModel(model_path) + X = np.random.randint(low=0, high=10, size=15) + X = np.reshape(X, (15, 1, 1, 1, 1)).astype(np.float32) + coreml_input = {"data": X} + if _is_macos() and _macos_version() >= (10, 13): + coreml_preds = coreml_model.predict(coreml_input)["output"] + self.assertEqual(len(coreml_preds.flatten()), 2) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_model.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_model.py new file mode 100644 index 00000000..1b08d187 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_model.py @@ -0,0 +1,569 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import tempfile +import unittest + +import numpy as np +import PIL.Image + +import coremltools +from coremltools import ComputeUnit +from coremltools._deps import _HAS_TORCH +from coremltools.converters.mil import Builder as mb +from coremltools.models import MLModel, datatypes +from coremltools.models.neural_network import NeuralNetworkBuilder +from coremltools.models.neural_network.utils import (make_image_input, + make_nn_classifier) +from coremltools.models.utils import ( + _convert_neural_network_spec_weights_to_fp16, _is_macos, _macos_version, + convert_double_to_float_multiarray_type, rename_feature, save_spec) +from coremltools.proto import Model_pb2 + +if _HAS_TORCH: + import torch as _torch + + +class MLModelTest(unittest.TestCase): + @classmethod + def setUpClass(self): + + spec = Model_pb2.Model() + spec.specificationVersion = coremltools.SPECIFICATION_VERSION + + features = ["feature_1", "feature_2"] + output = "output" + for f in features: + input_ = spec.description.input.add() + input_.name = f + input_.type.doubleType.MergeFromString(b"") + + output_ = spec.description.output.add() + output_.name = output + output_.type.doubleType.MergeFromString(b"") + + lr = spec.glmRegressor + lr.offset.append(0.1) + weights = lr.weights.add() + coefs = [1.0, 2.0] + for i in coefs: + weights.value.append(i) + + spec.description.predictedFeatureName = "output" + self.spec = spec + + def test_model_creation(self): + model = MLModel(self.spec) + self.assertIsNotNone(model) + + filename = tempfile.mktemp(suffix=".mlmodel") + save_spec(self.spec, filename) + model = MLModel(filename) + self.assertIsNotNone(model) + + def test_model_save_no_extension(self): + model = MLModel(self.spec) + self.assertIsNotNone(model) + + filename = tempfile.mktemp(suffix="") + save_spec(self.spec, filename) # appends .mlmodel extension when it is not provided + self.assertFalse(os.path.exists(filename)) + + filename = filename + ".mlmodel" + self.assertTrue(os.path.exists(filename)) + + model = MLModel(filename) + self.assertIsNotNone(model) + os.remove(filename) + + def test_model_api(self): + model = MLModel(self.spec) + self.assertIsNotNone(model) + + model.author = "Test author" + self.assertEqual(model.author, "Test author") + self.assertEqual(model.get_spec().description.metadata.author, "Test author") + + model.license = "Test license" + self.assertEqual(model.license, "Test license") + self.assertEqual(model.get_spec().description.metadata.license, "Test license") + + model.short_description = "Test model" + self.assertEqual(model.short_description, "Test model") + self.assertEqual( + model.get_spec().description.metadata.shortDescription, "Test model" + ) + + model.version = "1.3" + self.assertEqual(model.version, "1.3") + self.assertEqual(model.get_spec().description.metadata.versionString, "1.3") + + model.input_description["feature_1"] = "This is feature 1" + self.assertEqual(model.input_description["feature_1"], "This is feature 1") + + model.output_description["output"] = "This is output" + self.assertEqual(model.output_description["output"], "This is output") + + filename = tempfile.mktemp(suffix=".mlmodel") + model.save(filename) + loaded_model = MLModel(filename) + + self.assertEqual(model.author, "Test author") + self.assertEqual(model.license, "Test license") + # self.assertEqual(model.short_description, 'Test model') + self.assertEqual(model.input_description["feature_1"], "This is feature 1") + self.assertEqual(model.output_description["output"], "This is output") + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_predict_api(self): + model = MLModel(self.spec) + preds = model.predict({"feature_1": 1.0, "feature_2": 1.0}) + self.assertIsNotNone(preds) + self.assertEqual(preds["output"], 3.1) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_input(self): + rename_feature(self.spec, "feature_1", "renamed_feature", rename_inputs=True) + model = MLModel(self.spec) + preds = model.predict({"renamed_feature": 1.0, "feature_2": 1.0}) + self.assertIsNotNone(preds) + self.assertEqual(preds["output"], 3.1) + # reset the spec for next run + rename_feature(self.spec, "renamed_feature", "feature_1", rename_inputs=True) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_input_bad(self): + rename_feature(self.spec, "blah", "bad_name", rename_inputs=True) + model = MLModel(self.spec) + preds = model.predict({"feature_1": 1.0, "feature_2": 1.0}) + self.assertIsNotNone(preds) + self.assertEqual(preds["output"], 3.1) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_output(self): + rename_feature( + self.spec, + "output", + "renamed_output", + rename_inputs=False, + rename_outputs=True, + ) + model = MLModel(self.spec) + preds = model.predict({"feature_1": 1.0, "feature_2": 1.0}) + self.assertIsNotNone(preds) + self.assertEqual(preds["renamed_output"], 3.1) + rename_feature( + self.spec, + "renamed_output", + "output", + rename_inputs=False, + rename_outputs=True, + ) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_output_bad(self): + rename_feature( + self.spec, "blah", "bad_name", rename_inputs=False, rename_outputs=True + ) + model = MLModel(self.spec) + preds = model.predict({"feature_1": 1.0, "feature_2": 1.0}) + self.assertIsNotNone(preds) + self.assertEqual(preds["output"], 3.1) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_future_version(self): + self.spec.specificationVersion = 10000 + filename = tempfile.mktemp(suffix=".mlmodel") + save_spec(self.spec, filename, auto_set_specification_version=False) + model = MLModel(filename) + # this model should exist, but throw an exception when we try to use + # predict because the engine doesn't support this model version + self.assertIsNotNone(model) + with self.assertRaises(Exception): + try: + model.predict({}) + except Exception as e: + assert "Core ML model specification version" in str(e) + raise + self.spec.specificationVersion = 1 + + @unittest.skipUnless( + _is_macos() and _macos_version() < (10, 13), "Only supported on macOS 10.13-" + ) + def test_MLModel_warning(self): + self.spec.specificationVersion = 3 + import warnings + + with warnings.catch_warnings(record=True) as w: + # Cause all warnings to always be triggered. + warnings.simplefilter("always") + model = MLModel(self.spec) + assert len(w) == 1 + assert issubclass(w[-1].category, RuntimeWarning) + assert "not able to run predict()" in str(w[-1].message) + self.spec.specificationVersion = 1 + model = MLModel(self.spec) + + def test_convert_nn_spec_to_half_precision(self): + # simple network with quantization layer + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder(input_features, output_features) + weights = np.random.uniform(-0.5, 0.5, (3, 3)) + builder.add_inner_product( + name="inner_product", + W=weights, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="data", + output_name="out", + ) + model = MLModel(builder.spec) + spec = _convert_neural_network_spec_weights_to_fp16(model.get_spec()) + self.assertIsNotNone(spec) + + # simple network without quantization layer + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_lrn( + name="lrn", + input_name="data", + output_name="out", + alpha=2, + beta=3, + local_size=1, + k=8, + ) + model = MLModel(builder.spec) + spec = _convert_neural_network_spec_weights_to_fp16(model.get_spec()) + self.assertIsNotNone(spec) + + @unittest.skip + def test_downgrade_specification_version(self): + # manually set a invalid specification version + self.spec.specificationVersion = -1 + model = MLModel(self.spec) + assert model.get_spec().specificationVersion == 1 + + # manually set a high specification version + self.spec.specificationVersion = 4 + filename = tempfile.mktemp(suffix=".mlmodel") + save_spec(self.spec, filename, auto_set_specification_version=True) + model = MLModel(filename) + assert model.get_spec().specificationVersion == 1 + + # simple neural network with only spec 1 layer + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_activation("relu", "RELU", "data", "out") + # set a high specification version + builder.spec.specificationVersion = 3 + model = MLModel(builder.spec) + filename = tempfile.mktemp(suffix=".mlmodel") + model.save(filename) + # load the model back + model = MLModel(filename) + assert model.get_spec().specificationVersion == 1 + + # test save without automatic set specification version + self.spec.specificationVersion = 3 + filename = tempfile.mktemp(suffix=".mlmodel") + save_spec(self.spec, filename, auto_set_specification_version=False) + model = MLModel(filename) + # the specification version should be original + assert model.get_spec().specificationVersion == 3 + + def test_multiarray_type_convert_to_float(self): + input_features = [("data", datatypes.Array(2))] + output_features = [("out", datatypes.Array(2))] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_ceil("ceil", "data", "out") + spec = builder.spec + self.assertEqual( + spec.description.input[0].type.multiArrayType.dataType, + Model_pb2.ArrayFeatureType.DOUBLE, + ) + self.assertEqual( + spec.description.output[0].type.multiArrayType.dataType, + Model_pb2.ArrayFeatureType.DOUBLE, + ) + convert_double_to_float_multiarray_type(spec) + self.assertEqual( + spec.description.input[0].type.multiArrayType.dataType, + Model_pb2.ArrayFeatureType.FLOAT32, + ) + self.assertEqual( + spec.description.output[0].type.multiArrayType.dataType, + Model_pb2.ArrayFeatureType.FLOAT32, + ) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_multiarray_to_image_input_util(self): + H, W, C = 1, 1, 3 + input_features = [("data", datatypes.Array(C, H, W))] + output_features = [("out", datatypes.Array(C, H, W))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + mlmodel = make_image_input( + mlmodel, + "data", + red_bias=-5, + green_bias=-6, + blue_bias=-2.5, + scale=10.0, + image_format="NCHW", + ) + x = np.array([4, 2, 5], dtype=np.uint8) + x = np.reshape(x, (H, W, C)) + pil_img = PIL.Image.fromarray(x) + y = mlmodel.predict({"data": pil_img})["out"] + self.assertEqual(y.shape, (C, H, W)) + np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5]) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_multiarray_to_image_input_util_transpose_elimination(self): + H, W, C = 1, 1, 3 + input_features = [("data", datatypes.Array(H, W, C))] + output_features = [("out", datatypes.Array(H, W, C))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_transpose("transpose", [2, 0, 1], "data", "transpose") + builder.add_activation("linear", "LINEAR", "transpose", "out") + spec = builder.spec + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + mlmodel = make_image_input( + mlmodel, + "data", + red_bias=-5, + green_bias=-6, + blue_bias=-2.5, + scale=10.0, + image_format="NHWC", + ) + x = np.array([4, 2, 5], dtype=np.uint8) + x = np.reshape(x, (H, W, C)) + pil_img = PIL.Image.fromarray(x) + y = mlmodel.predict({"data": pil_img})["out"] + self.assertEqual(y.shape, (H, W, C)) + np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5]) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_multiarray_to_image_input_util_HWC_format(self): + H, W, C = 1, 1, 3 + input_features = [("data", datatypes.Array(H, W, C))] + output_features = [("out", datatypes.Array(H, W, C))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + mlmodel = make_image_input( + mlmodel, + "data", + red_bias=-5, + green_bias=-6, + blue_bias=-2.5, + scale=10.0, + image_format="NHWC", + ) + x = np.array([4, 2, 5], dtype=np.uint8) + x = np.reshape(x, (H, W, C)) + pil_img = PIL.Image.fromarray(x) + y = mlmodel.predict({"data": pil_img})["out"] + self.assertEqual(y.shape, (H, W, C)) + np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5]) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_nn_classifier_util(self): + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + mlmodel = make_nn_classifier( + mlmodel, + class_labels=["a", "b", "c"], + predicted_feature_name="out_confidence", + predicted_probabilities_output="out", + ) + out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}) + self.assertEqual(out_dict["out_confidence"], "c") + self.assertEqual( + mlmodel.get_spec().WhichOneof("Type"), "neuralNetworkClassifier" + ) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_nn_classifier_util_file(self): + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + + class_labels = ["a", "b", "c"] + with tempfile.NamedTemporaryFile(mode="w", suffix=".txt") as f: + f.write("\n".join(class_labels)) + f.flush() + mlmodel = make_nn_classifier( + mlmodel, + class_labels=f.name, + predicted_feature_name="out_confidence", + predicted_probabilities_output="out", + ) + out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}) + self.assertEqual(out_dict["out_confidence"], "c") + self.assertEqual( + mlmodel.get_spec().WhichOneof("Type"), "neuralNetworkClassifier" + ) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_output_nn_classifier(self): + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + mlmodel = MLModel(spec) + + class_labels = ["a", "b", "c"] + mlmodel = make_nn_classifier(mlmodel, class_labels=["a", "b", "c"]) + + # rename output + spec = mlmodel.get_spec() + rename_feature(spec, "out", "new_out_name") + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + + out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}) + self.assertEqual(out_dict["classLabel"], "c") + self.assertTrue("new_out_name" in out_dict) + self.assertTrue(isinstance(out_dict["new_out_name"], dict)) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_image_input(self): + input_features = [("data", datatypes.Array(3, 1, 1))] + output_features = [("out", datatypes.Array(3, 1, 1))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + # make an image input + mlmodel = make_image_input(MLModel(spec), "data", image_format="NCHW", scale=2.0) + # rename the input + spec = mlmodel.get_spec() + rename_feature(spec, "data", "new_input_name") + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + + # test + x = np.array([4, 5, 6], dtype=np.uint8).reshape(1, 1, 3) + pil_img = PIL.Image.fromarray(x) + out = mlmodel.predict({"new_input_name": pil_img})['out'] + np.testing.assert_equal(out, np.array([8.0, 10.0, 12.0]).reshape(3, 1, 1)) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (12, 0), "Only supported on macOS 12+" + ) + def test_rename_feature_mlprogram(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3,))]) + def linear_prog(input): + W = np.ones((10, 3), dtype=np.float32) + out = mb.linear(x=input, weight=W, name="output") + return out + + model = coremltools.convert( + linear_prog, + convert_to='mlprogram' + ) + + spec = model.get_spec() + input_name = spec.description.input[0].name + output_name = spec.description.output[0].name + + # rename input + rename_feature(spec, input_name, "new_input_name") + self.assertEqual(spec.description.input[0].name, "new_input_name") + model = coremltools.models.MLModel(spec, weights_dir=model.weights_dir) + out = model.predict({"new_input_name": np.array([1.0, 2.0, 3.0])})[output_name] + self.assertEqual(out.shape, (10,)) + self.assertEqual(out[0], 6.0) + + # rename output + rename_feature(spec, output_name, "new_output_name") + self.assertEqual(spec.description.output[0].name, "new_output_name") + model = coremltools.models.MLModel(spec, weights_dir=model.weights_dir) + out = model.predict({"new_input_name": np.array([1.0, 2.0, 3.0])})["new_output_name"] + self.assertEqual(out.shape, (10,)) + self.assertEqual(out[1], 6.0) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (12, 0) and _HAS_TORCH, "Only supported on macOS 12+" + ) + def test_rename_feature_classifier_mlprogram(self): + torch_model = _torch.nn.ReLU().eval() + model = coremltools.convert( + _torch.jit.trace(torch_model, _torch.rand(3, )), + inputs=[coremltools.TensorType(shape=(3,))], + classifier_config=coremltools.ClassifierConfig(['a', 'b', 'c']), + convert_to='mlprogram' + ) + spec = model.get_spec() + input_name = spec.description.input[0].name + + rename_feature(spec, 'classLabel', 'highestProbClass') + model = coremltools.models.MLModel(spec, weights_dir=model.weights_dir) + output_class = model.predict({input_name: np.array([1.0, 2.0, 3.0])})['highestProbClass'] + self.assertEqual(output_class, 'c') + + +if __name__ == "__main__": + unittest.main() + # suite = unittest.TestSuite() + # suite.addTest(MLModelTest('test_multiarray_type_convert_to_float')) + # unittest.TextTestRunner().run(suite) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_neural_networks.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_neural_networks.py new file mode 100644 index 00000000..2d0360b5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_neural_networks.py @@ -0,0 +1,60 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import coremltools +from coremltools.models.utils import (_get_custom_layer_names, + _replace_custom_layer_name) +from coremltools.proto import Model_pb2 + + +class CustomLayerUtilsTest(unittest.TestCase): + @classmethod + def setUpClass(self): + spec = Model_pb2.Model() + spec.specificationVersion = coremltools.SPECIFICATION_VERSION + + features = ["feature_1", "feature_2"] + output = "output" + for f in features: + input_ = spec.description.input.add() + input_.name = f + input_.type.doubleType.MergeFromString(b"") + + output_ = spec.description.output.add() + output_.name = output + output_.type.doubleType.MergeFromString(b"") + + layer = spec.neuralNetwork.layers.add() + layer.name = "custom1" + layer.input.append("input") + layer.output.append("temp1") + layer.custom.className = "name1" + + layer2 = spec.neuralNetwork.layers.add() + layer2.name = "custom2" + layer2.input.append("temp1") + layer2.output.append("temp2") + layer2.custom.className = "name2" + + layer3 = spec.neuralNetwork.layers.add() + layer3.name = "custom3" + layer3.input.append("temp2") + layer3.output.append("output") + layer3.custom.className = "name1" + + self.spec = spec + + def test_get_custom_names(self): + names = _get_custom_layer_names(self.spec) + self.assertEqual(names, {"name1", "name2"}) + + def test_change_custom_name(self): + _replace_custom_layer_name(self.spec, "name1", "notname1") + names = _get_custom_layer_names(self.spec) + self.assertEqual(names, {"notname1", "name2"}) + # set it back for future tests + _replace_custom_layer_name(self.spec, "notname1", "name1") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_nn_builder.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_nn_builder.py new file mode 100644 index 00000000..3303e440 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_nn_builder.py @@ -0,0 +1,627 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import pytest +import unittest + +import coremltools +from coremltools import ComputeUnit +from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type +from coremltools.models import MLModel, datatypes +from coremltools.models.neural_network import NeuralNetworkBuilder +from coremltools.models.neural_network.quantization_utils import ( + _convert_array_to_nbit_quantized_bytes, quantize_weights) +from coremltools.models.utils import _is_macos, _macos_version + +MIN_MACOS_VERSION_REQUIRED = (10, 13) +LAYERS_10_14_MACOS_VERSION = (10, 14) +LAYERS_10_15_MACOS_VERSION = (10, 15) + + +@unittest.skipIf( + not _is_macos() or _macos_version() < LAYERS_10_15_MACOS_VERSION, + "Only supported on macOS 10.15+", +) +class ControlFlowCorrectnessTest(unittest.TestCase): + @classmethod + def setup_class(cls): + pass + + def runTest(): + pass + + def _test_model(self, model, input_dict, output_ref, delta=1e-2): + preds = model.predict(input_dict) + for name in output_ref: + ref_val = output_ref[name] + val = preds[name] + self.assertTrue(np.allclose(val, ref_val, rtol=delta)) + + def test_simple_branch(self): + """ Test a simple if-else branch network + """ + input_features = [("data", datatypes.Array(3)), ("cond", datatypes.Array(1))] + output_features = [("output", None)] + + builder_top = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + layer = builder_top.add_branch("branch_layer", "cond") + + builder_ifbranch = NeuralNetworkBuilder( + input_features=None, + output_features=None, + spec=None, + nn_spec=layer.branch.ifBranch, + ) + builder_ifbranch.add_elementwise( + "mult_layer", + input_names=["data"], + output_name="output", + mode="MULTIPLY", + alpha=10, + ) + builder_elsebranch = NeuralNetworkBuilder( + input_features=None, + output_features=None, + spec=None, + nn_spec=layer.branch.elseBranch, + ) + builder_elsebranch.add_elementwise( + "add_layer", + input_names=["data"], + output_name="output", + mode="ADD", + alpha=10, + ) + coremltools.models.utils.save_spec( + builder_top.spec, "/tmp/simple_branch.mlmodel" + ) + mlmodel = MLModel(builder_top.spec) + + # True branch case + input_dict = { + "data": np.array(range(1, 4), dtype="float"), + "cond": np.array([1], dtype="float"), + } + output_ref = {"output": input_dict["data"] * 10} + self._test_model(mlmodel, input_dict, output_ref) + + # False branch case + input_dict["cond"] = np.array([0], dtype="float") + output_ref["output"] = input_dict["data"] + 10 + self._test_model(mlmodel, input_dict, output_ref) + + def test_simple_loop_fixed_iterations(self): + input_features = [("data", datatypes.Array(1))] + output_features = [("output", None)] + + builder_top = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder_top.add_copy("copy_1", input_name="data", output_name="output") + + loop_layer = builder_top.add_loop("loop_layer") + loop_layer.loop.maxLoopIterations = 5 + builder_body = NeuralNetworkBuilder( + input_features=None, + output_features=None, + spec=None, + nn_spec=loop_layer.loop.bodyNetwork, + ) + builder_body.add_elementwise( + "add", input_names=["output"], output_name="x", mode="ADD", alpha=2 + ) + + builder_body.add_copy("copy_2", input_name="x", output_name="output") + coremltools.models.utils.save_spec( + builder_top.spec, "/tmp/simple_loop_fixed_iterations.mlmodel" + ) + mlmodel = MLModel(builder_top.spec) + + # True branch case + input_dict = {"data": np.array([0], dtype="float")} + output_ref = {"output": np.array([10], dtype="float")} + self._test_model(mlmodel, input_dict, output_ref) + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= LAYERS_10_14_MACOS_VERSION, + "Only supported on macOS 10.14+", +) +class BasicNumericCorrectnessTest_1014NewLayers(unittest.TestCase): + def build_quant_conv_layer( + self, + W=None, + quantization_type="linear", + nbits=8, + quant_scale=None, + quant_bias=None, + quant_lut=None, + output_channels=2, + ): + input_features = [("data", datatypes.Array(1, 2, 2))] + output_features = [("out", datatypes.Array(2, 1, 1))] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_convolution( + name="conv", + kernel_channels=1, + output_channels=output_channels, + height=2, + width=2, + stride_height=1, + stride_width=1, + border_mode="valid", + groups=1, + W=W, + b=None, + has_bias=False, + input_name="data", + output_name="out", + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + return MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + + def test_linear_quant_convolution_8bit(self): + W = np.ones((2, 2, 1, 2), dtype=np.uint8) + W[:, :, :, 1] = 2 + mlmodel = self.build_quant_conv_layer( + W=W.flatten().tobytes(), + quantization_type="linear", + nbits=8, + quant_scale=[4.0], + quant_bias=[-2.0], + ) + data = np.ones((1, 2, 2)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + expected_out = np.reshape(np.array([8, 24]), (2, 1, 1)) + self.assertTrue(np.allclose(out, expected_out)) + + def test_linear_quant_convolution_8bit_vector_scalebias(self): + W = np.ones((2, 2, 1, 2), dtype=np.uint8) + W[:, :, :, 1] = 2 + mlmodel = self.build_quant_conv_layer( + W=W.flatten().tobytes(), + quantization_type="linear", + nbits=8, + quant_scale=[4.0, 5.0], + quant_bias=[-2.0, 1.0], + ) + data = np.ones((1, 2, 2)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + expected_out = np.reshape(np.array([8, 44]), (2, 1, 1)) + self.assertTrue(np.allclose(out, expected_out)) + + def test_linear_quant_convolution_8bit_float_scale_and_bias(self): + W = np.array(([[[[1, 248], [248, 248]]]]), dtype=np.uint8) + mlmodel = self.build_quant_conv_layer( + W=W.flatten().tobytes(), + quantization_type="linear", + nbits=8, + quant_scale=[15], + quant_bias=[-3913], + output_channels=1, + ) + data = np.ones((1, 2, 2)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + # Output should be equal to: (scale*(1+248+248+248)+(4*bias)) + expected_out = np.reshape(np.array([-4477]), (1, 1, 1, 1, 1)) + self.assertTrue(np.allclose(out, expected_out)) + + def test_lut_quant_convolution_2bit(self): + W = np.zeros((2, 2, 1, 2), dtype=np.uint8) + W[:, :, :, 0] = 0 + W[:, :, :, 1] = 2 + W = _convert_array_to_nbit_quantized_bytes(W.flatten(), 2).tobytes() + mlmodel = self.build_quant_conv_layer( + W=W, quantization_type="lut", nbits=2, quant_lut=[10.0, 11.0, -3.0, -1.0] + ) + data = np.ones((1, 2, 2)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + expected_out = np.reshape(np.array([40, -12]), (2, 1, 1)) + self.assertTrue(np.allclose(out, expected_out)) + + def test_linear_quant_inner_product_3bit(self): + pytest.xfail("rdar://101370330 ([CI] nnv1 model compression tests are failing after roots is updated)") + W = np.reshape(np.arange(6), (2, 3)).astype(np.uint8) + input_features = [("data", datatypes.Array(3))] + output_features = [("probs", None)] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_inner_product( + name="ip1", + W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 3).tobytes(), + b=None, + input_channels=3, + output_channels=2, + has_bias=False, + input_name="data", + output_name="probs", + quantization_type="linear", + nbits=3, + quant_scale=[11.0, 2.0], + quant_bias=[-2.0, 10.0], + ) + mlmodel = MLModel(builder.spec) + data = np.array([1.0, 3.0, 5.0]) + data_dict = {"data": data} + probs = mlmodel.predict(data_dict)["probs"] + expected_out = np.array([125, 170]) + self.assertTrue(np.allclose(probs.flatten(), expected_out.flatten())) + + def test_lut_quant_inner_product_1bit(self): + pytest.xfail("rdar://101370330 ([CI] nnv1 model compression tests are failing after roots is updated)") + W = np.zeros((2, 3), dtype=np.uint8) + W[0, :] = [0, 1, 1] + W[1, :] = [1, 0, 0] + input_features = [("data", datatypes.Array(3))] + output_features = [("probs", None)] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_inner_product( + name="ip1", + W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 1).tobytes(), + b=None, + input_channels=3, + output_channels=2, + has_bias=False, + input_name="data", + output_name="probs", + quantization_type="lut", + nbits=1, + quant_lut=[5.0, -3.0], + ) + mlmodel = MLModel(builder.spec) + data = np.array([1.0, 3.0, 5.0]) + data_dict = {"data": data} + probs = mlmodel.predict(data_dict)["probs"] + expected_out = np.array([-19, 37]) + self.assertTrue(np.allclose(probs.flatten(), expected_out.flatten())) + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= LAYERS_10_15_MACOS_VERSION, + "Only supported on macOS 10.15+", +) +class BasicNumericCorrectnessTest_1015NewLayers(unittest.TestCase): + def test_linear_quant_batchedmatmul_5bit(self): + W = np.zeros((2, 3), dtype=np.uint8) + W[0, :] = [31, 20, 11] + W[1, :] = [1, 0, 8] + quant_scale = np.reshape(np.array([10.0, 2.0, 3.0]), (1, 3)) + quant_bias = np.reshape(np.array([-2.0, -10.0, 6.0]), (1, 3)) + W_unquantized = np.broadcast_to(quant_scale, (2, 3)) * W + np.broadcast_to( + quant_bias, (2, 3) + ) + bias = np.array([1.0, 2.0, 3.0]) + + input_features = [("data", datatypes.Array(2, 2))] + output_features = [("out", None)] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="out", + weight_matrix_rows=2, + weight_matrix_columns=3, + W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 5).tobytes(), + bias=bias, + is_quantized_weight=True, + quantization_type="linear", + nbits=5, + quant_scale=quant_scale.flatten(), + quant_bias=quant_bias.flatten(), + ) + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + data = np.zeros((2, 2), dtype=np.float32) + data[0, :] = [5, 6] + data[1, :] = [10, 12] + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + expected_out = np.matmul(data, W_unquantized) + bias + self.assertTrue(out.shape == expected_out.shape) + self.assertTrue(np.allclose(out.flatten(), expected_out.flatten())) + + def test_linear_quant_batchedmatmul_8bit(self): + np.random.seed(1988) + W = np.random.rand(32, 32) * 2.0 - 1 + bias = np.random.rand(32) + + input_features = [("data", datatypes.Array(2, 32))] + output_features = [("out", None)] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="out", + weight_matrix_rows=32, + weight_matrix_columns=32, + W=W, + bias=bias, + ) + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + q_mlmodel = quantize_weights(mlmodel, 8) + q_spec = q_mlmodel.get_spec() + q_layer = q_spec.neuralNetwork.layers[0].batchedMatmul + + self.assertTrue(len(q_layer.weights.floatValue) == 0) + self.assertTrue(len(q_layer.weights.rawValue) > 0) + + data = np.random.rand(2, 32) + data_dict = {"data": data} + out = q_mlmodel.predict(data_dict)["out"] + expected_out = np.matmul(data, W) + bias + self.assertTrue(out.shape == expected_out.shape) + self.assertTrue(np.allclose(out.flatten(), expected_out.flatten(), atol=0.1)) + + def test_lut_quant_embedding_nd_2bit(self): + embed_size = 2 + vocab_size = 3 + W = np.zeros((embed_size, vocab_size), dtype=np.uint8) + W[:, 0] = [1, 0] + W[:, 1] = [0, 1] + W[:, 2] = [3, 2] + bias = np.array([1.0, 2.0]) + quant_lut = np.array([34.0, 12.0, -6.0, 6.0]) + + input_features = [("data", datatypes.Array(4, 1))] + output_features = [("out", None)] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_embedding_nd( + name="embedding_nd", + input_name="data", + output_name="out", + vocab_size=vocab_size, + embedding_size=embed_size, + W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 2).tobytes(), + b=bias, + is_quantized_weight=True, + quantization_type="lut", + nbits=2, + quant_lut=quant_lut, + ) + + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + data = np.reshape(np.array([2.0, 2.0, 1.0, 0.0]), (4, 1)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + expected_out = np.zeros((4, embed_size), dtype=np.float32) + expected_out[0, :] = [quant_lut[W[0, 2]], quant_lut[W[1, 2]]] + bias + expected_out[1, :] = [quant_lut[W[0, 2]], quant_lut[W[1, 2]]] + bias + expected_out[2, :] = [quant_lut[W[0, 1]], quant_lut[W[1, 1]]] + bias + expected_out[3, :] = [quant_lut[W[0, 0]], quant_lut[W[1, 0]]] + bias + self.assertTrue(out.shape == expected_out.shape) + self.assertTrue(np.allclose(out.flatten(), expected_out.flatten())) + + + def test_linear_quant_embedding_7bit(self): + embed_size = 2 + vocab_size = 3 + W = np.zeros((embed_size, vocab_size), dtype=np.uint8) + W[:, 0] = [100, 127] + W[:, 1] = [20, 40] + W[:, 2] = [90, 1] + quant_scale = np.reshape(np.array([10.0, 2.0]), (2, 1)) + quant_bias = np.reshape(np.array([-2.0, -10.0]), (2, 1)) + W_unquantized = np.broadcast_to(quant_scale, (2, 3)) * W + np.broadcast_to( + quant_bias, (2, 3) + ) + bias = np.reshape(np.array([1.0, 2.0]), (2, 1)) + W_unquantized = W_unquantized + np.broadcast_to(bias, (2, 3)) + + input_features = [("data", datatypes.Array(4, 1, 1, 1))] + output_features = [("out", None)] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_embedding( + name="embed", + W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 7).tobytes(), + b=bias, + input_dim=vocab_size, + output_channels=embed_size, + has_bias=True, + input_name="data", + output_name="out", + is_quantized_weight=True, + quantization_type="linear", + nbits=7, + quant_scale=np_val_to_py_type(quant_scale), + quant_bias=np_val_to_py_type(quant_bias), + ) + + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + data = np.reshape(np.array([2.0, 2.0, 1.0, 0.0]), (4, 1, 1, 1)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + self.assertTrue(out.shape == (4, embed_size, 1, 1)) + expected_out = np.zeros((4, embed_size), dtype=np.float32) + expected_out[0, :] = W_unquantized[:, 2].flatten() + expected_out[1, :] = W_unquantized[:, 2].flatten() + expected_out[2, :] = W_unquantized[:, 1].flatten() + expected_out[3, :] = W_unquantized[:, 0].flatten() + self.assertTrue(np.allclose(out.flatten(), expected_out.flatten())) + + +@unittest.skipIf( + not _is_macos() or _macos_version() < (10, 13), "Only supported on macOS 10.13+" +) +class BasicNumericCorrectnessTest(unittest.TestCase): + def _build_nn_with_one_ip_layer(self): + input_features = [("data", datatypes.Array(3))] + output_features = [("out", None)] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + w = np.random.uniform(-0.5, 0.5, (3, 3)) + builder.add_inner_product( + name="ip1", + W=w, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="input", + output_name="hidden", + ) + return builder + + def test_undefined_shape_single_output(self): + W = np.ones((3, 3)) + input_features = [("data", datatypes.Array(3))] + output_features = [("probs", None)] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_inner_product( + name="ip1", + W=W, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="data", + output_name="probs", + ) + mlmodel = MLModel(builder.spec) + data = np.ones((3,)) + data_dict = {"data": data} + probs = mlmodel.predict(data_dict)["probs"] + self.assertTrue(np.allclose(probs, np.ones(3) * 3)) + + def test_set_input(self): + builder = self._build_nn_with_one_ip_layer() + builder.set_input(input_names=["data_renamed"], input_dims=[(2,)]) + + self.assertEqual( + builder.spec.description.input[0].type.multiArrayType.shape[0], 2 + ) + self.assertEqual(builder.spec.description.input[0].name, "data_renamed") + + def test_set_input_fail(self): + builder = self._build_nn_with_one_ip_layer() + + # fails since input_names and input_dims do not have same size + with self.assertRaises(ValueError): + builder.set_input(input_names=["data_1", "data_2"], input_dims=[(3,)]) + + def test_set_output(self): + builder = self._build_nn_with_one_ip_layer() + builder.set_output(output_names=["out_renamed"], output_dims=[(2,)]) + + self.assertEqual( + builder.spec.description.output[0].type.multiArrayType.shape[0], 2 + ) + self.assertEqual(builder.spec.description.output[0].name, "out_renamed") + + def test_set_output_fail(self): + builder = self._build_nn_with_one_ip_layer() + + # fails since output_names and output_dims do not have same size + with self.assertRaises(ValueError): + builder.set_output(output_names=["out_1", "out_2"], output_dims=[(3,)]) + + def test_invalid_image_preprocessing_params(self): + builder = self._build_nn_with_one_ip_layer() + image_input_names = ["input1", "input2"] + with self.assertRaises(ValueError): + image_scale = {"invalid": 1.0 / 255.0} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, image_scale=image_scale + ) + with self.assertRaises(ValueError): + red_bias = {"invalid": -1} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, red_bias=red_bias + ) + with self.assertRaises(ValueError): + blue_bias = {"invalid": -1} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, blue_bias=blue_bias + ) + with self.assertRaises(ValueError): + green_bias = {"invalid": -1} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, green_bias=green_bias + ) + with self.assertRaises(ValueError): + gray_bias = {"invalid": -1} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, gray_bias=gray_bias + ) + with self.assertRaises(ValueError): + is_bgr = {"invalid": False} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, is_bgr=is_bgr + ) + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" +) +class UseFloatArraytypeTest(unittest.TestCase): + """Test that the boolean flag `use_float_arraytype` correctly changes the datatype of the + network's inputs and outputs and produces a spec that the `MLModel` class can call `predict` + with. + """ + + def _test_use_float_array_helper(self, use_float_arraytype): + input_features = [("data", datatypes.Array(3))] + output_features = [("probs", None)] + builder = NeuralNetworkBuilder( + input_features=input_features, + output_features=output_features, + use_float_arraytype=use_float_arraytype, + ) + weights = np.ones((3, 3)) + builder.add_inner_product( + name="ip1", + W=weights, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="data", + output_name="probs", + ) + spec = builder.spec + array_feature_type = ( + coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.FLOAT32 + if use_float_arraytype + else coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.DOUBLE + ) + for input in spec.description.input: + self.assertEqual(input.type.multiArrayType.dataType, array_feature_type) + for output in spec.description.input: + self.assertEqual(output.type.multiArrayType.dataType, array_feature_type) + + # Assert that the generated spec is functional + mlmodel = MLModel(spec) + data = np.ones((3,)) + data_dict = {"data": data} + try: + predictions = mlmodel.predict(data_dict) + except Exception as e: + self.fail(e) + self.assertTrue(np.allclose(predictions["probs"], np.ones(3) * 3)) + + def test_true_use_float_array(self): + # Instruct the builder to use the Float32 datatype for inputs and outputs + self._test_use_float_array_helper(True) + + def test_false_use_float_array(self): + # Instruct the builder to use its default Double datatype for inputs and outputs + self._test_use_float_array_helper(False) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_numpy_nn_layers.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_numpy_nn_layers.py new file mode 100644 index 00000000..bca34e14 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_numpy_nn_layers.py @@ -0,0 +1,7086 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import math +import os +import platform +import random +import tempfile +import unittest + +import numpy as np +import pytest + +from coremltools._deps import _HAS_TF_2, MSG_TF2_NOT_FOUND + +if _HAS_TF_2: + import tensorflow as tf + +import torch + +import coremltools +import coremltools.models.datatypes as datatypes +from coremltools import ComputeUnit +from coremltools.converters.mil.mil.ops.defs._utils import aggregated_pad +from coremltools.models import (_MLMODEL_FULL_PRECISION, + _MLMODEL_HALF_PRECISION, neural_network) +from coremltools.models.neural_network import flexible_shape_utils +from coremltools.models.utils import _MODEL_FILE_NAME, _is_macos, _macos_version + +np.random.seed(10) + +MIN_MACOS_VERSION_REQUIRED = (10, 13) +LAYERS_10_15_MACOS_VERSION = (10, 15) +LAYERS_11_0_MACOS_VERSION = (11, 0) + + +def _get_unary_model_spec(x, mode, alpha=1.0): + input_dim = x.shape + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + + builder.add_unary( + name="unary", input_name="data", output_name="output", mode=mode, alpha=alpha + ) + return builder.spec + + +class CorrectnessTest(unittest.TestCase): + def runTest(self): + pass + + def _compare_shapes(self, np_preds, coreml_preds): + return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape + + def _test_shape_equality(self, np_preds, coreml_preds): + np.testing.assert_array_equal( + np.squeeze(coreml_preds).shape, np.squeeze(np_preds).shape + ) + + def _test_nd_shape_equality(self, np_preds, coreml_preds, shape=()): + if shape: + np.testing.assert_array_equal(coreml_preds.shape, shape) + else: + # check if shape has 0 valued dimension + if np.prod(np_preds.shape) == 0 and np.prod(coreml_preds.shape) == 0: + return + np.testing.assert_array_equal(coreml_preds.shape, np_preds.shape) + + def _compare_predictions(self, np_preds, coreml_preds, delta=0.01): + np_preds = np_preds.flatten() + coreml_preds = coreml_preds.flatten() + max_arr = np.maximum(np.maximum(np_preds, coreml_preds), 1.0) + all_deltas = np.abs(np_preds / max_arr - coreml_preds / max_arr) + max_delta = np.amax(all_deltas) + if max_delta > delta: + return False + return True + + def _test_predictions( + self, + np_preds, + coreml_preds, + delta=0.01, + test_metric="rel_error", + SNR=30, + PSNR=40, + ): + np_preds = np_preds.flatten() + coreml_preds = coreml_preds.flatten() + if test_metric == "rel_error": + max_arr = np.maximum(np.abs(np_preds), 1.0) + all_deltas = np.abs(np_preds / max_arr - coreml_preds / max_arr) + max_delta = np.amax(all_deltas, initial=0) + self.assertLessEqual( + max_delta, + delta, + "Expected %s to be within %s of %s" % (coreml_preds, delta, np_preds), + ) + elif test_metric == "SNR": + noise = np_preds - coreml_preds + noise_var = np.sum(noise ** 2) / len(noise) + 1e-7 + signal_energy = np.sum(np_preds ** 2) / len(np_preds) + max_signal_energy = np.amax(np_preds ** 2) + snr = 10 * np.log10(signal_energy / noise_var) + psnr = 10 * np.log10(max_signal_energy / noise_var) + self.assertGreaterEqual(snr, SNR) + self.assertGreaterEqual(psnr, PSNR) + else: + raise ValueError("Test metric not supported") + + @staticmethod + def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10): + """ + This utility function is used for validate random distributions layers. + It validates the first 10 moments of prediction and expected values. + """ + + def get_moment(data, k): + return np.mean(np.power(data - np.mean(data), k)) + + if isinstance(model, str): + model = coremltools.models.MLModel(model) + + if use_cpu_only: + compute_unit=ComputeUnit.CPU_ONLY + else: + compute_unit=ComputeUnit.ALL + + model = coremltools.models.MLModel(model, compute_units=compute_unit) + prediction = model.predict(inputs) + + for output_name in expected: + np_preds = expected[output_name] + coreml_preds = prediction[output_name] + + np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)] + coreml_moments = [ + get_moment(coreml_preds.flatten(), k) for k in range(num_moments) + ] + + np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2) + + # override expected values to allow element-wise compares + for output_name in expected: + expected[output_name] = prediction[output_name] + + def _test_model( + self, + model, + input, + expected, + model_precision=_MLMODEL_FULL_PRECISION, + useCPUOnly=False, + output_name_shape_dict={}, + validate_shapes_only=False, + test_metric="rel_error", + delta=0.01, + SNR=30, + ): + + if useCPUOnly: + compute_unit = ComputeUnit.CPU_ONLY + else: + compute_unit = ComputeUnit.ALL + + # if we're given a path to a model + if isinstance(model, str): + model = coremltools.models.MLModel(model, compute_units=compute_unit) + # If we're passed in a specification, save out the model and then load it back. + elif isinstance(model, coremltools.proto.Model_pb2.Model): + tmp_model_file = tempfile.NamedTemporaryFile(suffix=_MODEL_FILE_NAME) + coremltools.utils.save_spec(model, tmp_model_file.name) + model = coremltools.models.MLModel( + tmp_model_file.name, compute_units=compute_unit + ) + + # If we want to test the half precision case + if model_precision == _MLMODEL_HALF_PRECISION: + model = coremltools.utils._convert_neural_network_weights_to_fp16(model) + + prediction = model.predict(input) + for output_name in expected: + if self.__class__.__name__ == "SimpleTest": + self._test_shape_equality( + expected[output_name], prediction[output_name] + ) + else: + if output_name in output_name_shape_dict: + output_shape = output_name_shape_dict[output_name] + else: + output_shape = [] + + if len(output_shape) == 0 and len(expected[output_name].shape) == 0: + output_shape = (1,) + + self._test_nd_shape_equality( + expected[output_name], prediction[output_name], output_shape + ) + + if not validate_shapes_only: + self._test_predictions( + expected[output_name], + prediction[output_name], + delta=delta, + test_metric=test_metric, + SNR=SNR, + ) + + +@unittest.skipIf( + not _is_macos() or _macos_version() < MIN_MACOS_VERSION_REQUIRED, + "macOS 10.13+ is required. Skipping tests.", +) +class SimpleTest(CorrectnessTest): + def test_tiny_upsample_linear_mode(self): + input_dim = (1, 1, 3) # (C,H,W) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_upsample( + name="upsample", + scaling_factor_h=2, + scaling_factor_w=3, + input_name="data", + output_name="output", + mode="BILINEAR", + ) + + input = {"data": np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))} + expected = { + "output": np.array( + [ + [1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3], + [1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3], + ] + ) + } + + self._test_model(builder.spec, input, expected) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_LRN(self): + input_dim = (1, 3, 3) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_lrn( + name="lrn", + input_name="data", + output_name="output", + alpha=2, + beta=3, + local_size=1, + k=8, + ) + + input = {"data": np.ones((1, 3, 3))} + expected = {"output": 1e-3 * np.ones((1, 3, 3))} + + self._test_model(builder.spec, input, expected) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_MVN(self): + input_dim = (2, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_mvn( + name="mvn", + input_name="data", + output_name="output", + across_channels=False, + normalize_variance=False, + ) + + input = {"data": np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))} + expected = { + "output": np.reshape( + np.arange(8) - np.array([1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), + (2, 2, 2), + ) + } + + self._test_model(builder.spec, input, expected) + + def test_L2_normalize(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_l2_normalize(name="mvn", input_name="data", output_name="output") + + input = {"data": np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))} + expected = { + "output": np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + / np.sqrt(14) + } + + self._test_model(builder.spec, input, expected) + + def test_unary_sqrt(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": np.sqrt(x)} + spec = _get_unary_model_spec(x, "sqrt") + self._test_model(spec, input, expected) + + def test_unary_rsqrt(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": 1 / np.sqrt(x)} + spec = _get_unary_model_spec(x, "rsqrt") + self._test_model(spec, input, expected) + + def test_unary_inverse(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": 1 / x} + spec = _get_unary_model_spec(x, "inverse") + self._test_model(spec, input, expected) + + def test_unary_power(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": x ** 3} + spec = _get_unary_model_spec(x, "power", 3) + self._test_model(spec, input, expected) + + def test_unary_exp(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": np.exp(x)} + spec = _get_unary_model_spec(x, "exp") + self._test_model(spec, input, expected) + + def test_unary_log(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": np.log(x)} + spec = _get_unary_model_spec(x, "log") + self._test_model(spec, input, expected) + + def test_unary_abs(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": np.abs(x)} + spec = _get_unary_model_spec(x, "abs") + self._test_model(spec, input, expected) + + def test_unary_threshold(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": np.maximum(x, 2)} + spec = _get_unary_model_spec(x, "threshold", 2) + self._test_model(spec, input, expected) + + def test_split(self): + input_dim = (9, 2, 2) + x = np.random.rand(*input_dim) + + input_features = [("data", datatypes.Array(*input_dim))] + output_names = [] + output_features = [] + for i in range(3): + out = "out_" + str(i) + output_names.append(out) + output_features.append((out, None)) + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_split(name="split", input_name="data", output_names=output_names) + + input = {"data": x} + expected = {"out_0": x[0:3, :, :], "out_1": x[3:6, :, :], "out_2": x[6:9, :, :]} + + self._test_model(builder.spec, input, expected) + for output_ in output_names: + self.assertEqual(len(input_dim), builder._get_rank(output_)) + + def test_scale_constant(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_scale( + name="scale", + W=5, + b=45, + has_bias=True, + input_name="data", + output_name="output", + ) + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": 5 * x + 45} + + self._test_model(builder.spec, input, expected) + + def test_scale_matrix(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + W = np.reshape(np.arange(5, 9), (1, 2, 2)) + + builder.add_scale( + name="scale", + W=W, + b=None, + has_bias=False, + input_name="data", + output_name="output", + shape_scale=[1, 2, 2], + ) + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": W * x} + + self._test_model(builder.spec, input, expected) + + def test_bias_constant(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_bias(name="bias", b=45, input_name="data", output_name="output") + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": x + 45} + + self._test_model(builder.spec, input, expected) + + def test_bias_matrix(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + b = np.reshape(np.arange(5, 9), (1, 2, 2)) + + builder.add_bias( + name="bias", + b=b, + input_name="data", + output_name="output", + shape_bias=[1, 2, 2], + ) + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": x + b} + + self._test_model(builder.spec, input, expected) + + def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + b = np.reshape(np.arange(5, 9), (1, 2, 2)) + + builder.add_load_constant( + name="load_constant", output_name="bias", constant_value=b, shape=[1, 2, 2] + ) + builder.add_elementwise( + name="add", input_names=["data", "bias"], output_name="output", mode="ADD" + ) + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": x + b} + + self._test_model(builder.spec, input, expected, model_precision) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_load_constant_half_precision(self): + self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION) + + def test_min(self): + input_dim = (1, 2, 2) + input_features = [ + ("data_0", datatypes.Array(*input_dim)), + ("data_1", datatypes.Array(*input_dim)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + + builder.add_elementwise( + name="min", + input_names=["data_0", "data_1"], + output_name="output", + mode="MIN", + ) + x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2)) + + input = {"data_0": x1, "data_1": x2} + expected = {"output": np.minimum(x1, x2)} + + self._test_model(builder.spec, input, expected) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_conv_same_padding(self): + input_dim = (10, 15, 15) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + W = np.random.rand(3, 3, 10, 20) + + builder.add_convolution( + name="conv", + kernel_channels=10, + output_channels=20, + height=3, + width=3, + stride_height=2, + stride_width=2, + border_mode="same", + groups=1, + W=W, + b=None, + has_bias=False, + input_name="data", + output_name="output", + same_padding_asymmetry_mode="TOP_LEFT_HEAVY", + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": np.random.rand(20, 8, 8)} + + self._test_model(builder.spec, input, expected, validate_shapes_only=True) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_deconv_valid_padding(self): + input_dim = (10, 15, 15) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + W = np.random.rand(3, 3, 10, 20) + + builder.add_convolution( + name="deconv", + kernel_channels=10, + output_channels=20, + height=3, + width=3, + stride_height=2, + stride_width=2, + border_mode="valid", + groups=1, + W=W, + b=None, + has_bias=False, + is_deconv=True, + input_name="data", + output_name="output", + padding_top=2, + padding_bottom=3, + padding_left=2, + padding_right=3, + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": np.random.rand(20, 26, 26)} + + self._test_model(builder.spec, input, expected, validate_shapes_only=True) + + def test_deconv_non_unit_groups(self): + input_dim = (16, 15, 15) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + + W = np.random.rand(3, 3, 16, 5) + builder.add_convolution( + name="deconv", + kernel_channels=16, + output_channels=20, + height=3, + width=3, + stride_height=2, + stride_width=2, + border_mode="valid", + groups=4, + W=W, + b=None, + has_bias=False, + is_deconv=True, + input_name="data", + output_name="output", + padding_top=2, + padding_bottom=3, + padding_left=2, + padding_right=3, + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": np.random.rand(20, 26, 26)} + + self._test_model(builder.spec, input, expected, validate_shapes_only=True) + + def test_linear_activation(self): + input_dim = (10, 15, 15) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_activation( + name="activation", + non_linearity="LINEAR", + input_name="data", + output_name="output", + params=[34.0, 67.0], + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": 34.0 * x + 67.0} + + self._test_model(builder.spec, input, expected) + + def test_padding_constant(self): + input_dim = (1, 2, 3) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_padding( + name="pad", + left=1, + right=0, + top=2, + bottom=0, + value=-1, + input_name="data", + output_name="output", + ) + + x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(np.float32) + input = {"data": x} + y = np.reshape( + np.array( + [[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3], [-1, 4, 5, 6]] + ), + (1, 4, 4), + ).astype(np.float32) + expected = {"output": y} + + self._test_model(builder.spec, input, expected) + + def test_padding_replication(self): + input_dim = (1, 2, 3) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_padding( + name="pad", + left=1, + top=2, + input_name="data", + output_name="output", + padding_type="replication", + ) + + x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(np.float32) + input = {"data": x} + y = np.reshape( + np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3], [4, 4, 5, 6]]), + (1, 4, 4), + ).astype(np.float32) + expected = {"output": y} + + self._test_model(builder.spec, input, expected) + + def test_reshape_target_shape_3(self): + input_dim = (1, 2, 5) # (C,H,W) + target_dim = (10, 1, 1) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_reshape( + name="reshape", + input_name="data", + output_name="output", + target_shape=target_dim, + mode=0, + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": np.reshape(x, (10, 1, 1))} + + self._test_model(builder.spec, input, expected) + self.assertEqual(len(target_dim), builder._get_rank("output")) + + def test_reshape_target_shape_4(self): + input_dim = (1, 2, 5) # (C,H,W) + target_dim = (1, 10, 1, 1) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_reshape( + name="reshape", + input_name="data", + output_name="output", + target_shape=target_dim, + mode=0, + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": np.reshape(x, (1, 10, 1, 1))} + + self._test_model(builder.spec, input, expected) + self.assertEqual(len(target_dim), builder._get_rank("output")) + + def test_bias_matrix_cpu(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + b = np.reshape(np.arange(5, 9), (1, 2, 2)) + + builder.add_bias( + name="bias", + b=b, + input_name="data", + output_name="output", + shape_bias=[1, 2, 2], + ) + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": x + b} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + + def test_linear_activation_cpu(self): + input_dim = (10, 15, 15) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_activation( + name="activation", + non_linearity="LINEAR", + input_name="data", + output_name="output", + params=[34.0, 67.0], + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": 34.0 * x + 67.0} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + + +@unittest.skipIf( + not _is_macos() or _macos_version() < LAYERS_10_15_MACOS_VERSION, + "macOS 10.15+ required. Skipping tests.", +) +class NewLayersSimpleTest(CorrectnessTest): + def test_shape_flexibility_range(self): + + input_features = [("data", datatypes.Array(*(3, 4)))] + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + builder.add_sin(name="sin", input_name="data", output_name="output") + spec = builder.spec + + flexible_shape_utils.set_multiarray_ndshape_range( + spec, feature_name="data", lower_bounds=[1, 1], upper_bounds=[-1, 5] + ) + + shapes = [(3, 4), (1, 5), (60, 5), (22, 4), (5, 3)] + for s in shapes: + x = np.random.rand(*s) + expected = {"output": np.sin(x)} + self._test_model(spec, {"data": x}, expected, useCPUOnly=True) + + def test_shape_flexibility_enumeration(self, rank=4): + default_shape = tuple(np.random.randint(1, 15, size=rank)) + input_features = [("data", datatypes.Array(*default_shape))] + builder = neural_network.NeuralNetworkBuilder( + input_features=input_features, + output_features=[("output", None)], + disable_rank5_shape_mapping=True, + ) + builder.add_sin(name="sin", input_name="data", output_name="output") + spec = builder.spec + + shapes = [ + tuple(np.random.randint(1, 15, size=rank)), + tuple(np.random.randint(1, 15, size=rank)), + ] + flexible_shape_utils.add_multiarray_ndshape_enumeration( + spec, feature_name="data", enumerated_shapes=shapes + ) + + shapes.append(default_shape) + for s in shapes: + x = np.random.rand(*s) + expected = {"output": np.sin(x)} + self._test_model(spec, {"data": x}, expected, useCPUOnly=True) + + def test_shape_flexibility_enumeration_rank3(self): + self.test_shape_flexibility_enumeration(rank=3) + + def test_shape_flexibility_enumeration_rank2(self): + self.test_shape_flexibility_enumeration(rank=2) + + def test_transpose_cpu(self): + for rank in range(1, 6): + axes = np.random.permutation(rank) + axes = [ + axis - rank if np.random.choice([True, False]) else axis + for axis in axes + ] + input_shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_transpose( + name="TransposeND", axes=axes, input_name="data", output_name="output" + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": np.transpose(x, axes)} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + + def test_dynamic_weight_conv(self): + + input_dim = (1, 3, 16, 16) + # weight layout: (output_channels, kernel_channels, height, width) + weight_dim = (4, 3, 3, 3) + output_dim = (1, 4, 14, 14) + + kernel_channels = input_dim[0] + output_channels, kernel_channels, height, width = weight_dim + + input_features = [ + ("input", datatypes.Array(*input_dim)), + ("weight", datatypes.Array(*weight_dim)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_convolution( + name="two_input_conv_layer", + kernel_channels=kernel_channels, + output_channels=output_channels, + height=height, + width=width, + stride_height=1, + stride_width=1, + border_mode="valid", + groups=1, + W=None, + b=None, + has_bias=False, + input_name=["input", "weight"], + output_name="output", + ) + + # Assigning everything to ones should cover the execution path + # and engine failures, but is not a complete check on numerics. + input_val = np.ones(input_dim) + weight_val = np.ones(weight_dim) + expected = np.ones(output_dim) * 27 + + feed_dict = {"input": input_val, "weight": weight_val} + expected = {"output": expected} + + self._test_model(builder.spec, feed_dict, expected, useCPUOnly=True) + self._test_model(builder.spec, feed_dict, expected, useCPUOnly=False) + + def test_batched_mat_mul_cpu(self, cpu_only=True): + a_shapes = [ + (10,), + (4, 10), + (10,), + (10,), + (2, 3), + (1, 3, 4), + (1, 3, 1, 2, 3), + (2, 3, 1, 3, 4), + ] + b_shapes = [ + (10,), + (10,), + (10, 3), + (2, 10, 3), + (3, 4), + (3, 2, 4, 5), + (1, 4, 3, 2), + (2, 1, 2, 4, 5), + ] + out_shapes = [ + (1, 1), + (4, 1), + (1, 3), + (2, 1, 3), + (2, 4), + (3, 2, 3, 5), + (1, 3, 4, 2, 2), + (2, 3, 2, 3, 5), + ] + + for a_shape, b_shape, outShape in zip(a_shapes, b_shapes, out_shapes): + input_shapes = [a_shape, b_shape] + input_features = [ + ("A", datatypes.Array(*input_shapes[0])), + ("B", datatypes.Array(*input_shapes[1])), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_batched_mat_mul( + name="batched_mat_mul", + input_names=["A", "B"], + output_name="output", + transpose_a=False, + transpose_b=False, + ) + + a = np.random.rand(*input_shapes[0]) + b = np.random.rand(*input_shapes[1]) + input_ = {"A": a, "B": b} + expected = {"output": np.array(np.matmul(a, b))} + shape_dict = {"output": outShape} + self._test_model( + builder.spec, + input_, + expected, + useCPUOnly=cpu_only, + output_name_shape_dict=shape_dict, + ) + self.assertEqual(len(outShape), builder._get_rank("output")) + + def test_batched_mat_mul_gpu(self): + self.test_batched_mat_mul_cpu(cpu_only=False) + + def test_batched_mat_mul_with_transposes_cpu(self, cpu_only=True): + for transpose_a, transpose_b in itertools.product([True, False], [True, False]): + a_shape = (3, 4) + b_shape = (4, 5) + a_shape = a_shape[::-1] if transpose_a else a_shape + b_shape = b_shape[::-1] if transpose_b else b_shape + input_shapes = [a_shape, b_shape] + input_features = [ + ("A", datatypes.Array(*input_shapes[0])), + ("B", datatypes.Array(*input_shapes[1])), + ] + + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_batched_mat_mul( + name="BatchedMatMul", + input_names=["A", "B"], + output_name="output", + transpose_a=transpose_a, + transpose_b=transpose_b, + ) + a = np.random.rand(*input_shapes[0]) + b = np.random.rand(*input_shapes[1]) + inputs = {"A": a, "B": b} + a = a.T if transpose_a else a + b = b.T if transpose_b else b + expected = {"output": np.matmul(a, b)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_batched_mat_mul_with_transposes_gpu(self): + self.test_batched_mat_mul_with_transposes_cpu(cpu_only=False) + + def test_batched_mat_mul_single_input_cpu( + self, model_precision=_MLMODEL_FULL_PRECISION, cpu_only=True + ): + X1 = 11 + X2 = 23 + W = np.random.rand(X1, X2) + bias = np.random.rand(X2) + input_shapes = [ + (X1,), + (5, X1), + (2, 3, X1), + (4, 1, X1), + (12, 5, 8, X1), + (2, 3, 1, 5, X1), + ] + for input_shape in input_shapes: + x = np.random.rand(*input_shape) + np_out = np.matmul(x, W) + bias + expected = {"output": np_out} + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_batched_mat_mul( + name="batched_mat_mul", + input_names=["data"], + output_name="output", + weight_matrix_rows=X1, + weight_matrix_columns=X2, + W=W, + bias=bias, + ) + inputs = {"data": x} + + self._test_model( + builder.spec, + inputs, + expected, + model_precision=model_precision, + useCPUOnly=cpu_only, + ) + + def test_batched_mat_mul_single_input_half_precision_cpu(self): + self.test_batched_mat_mul_single_input_cpu( + model_precision=_MLMODEL_HALF_PRECISION, cpu_only=True + ) + + def test_batched_mat_mul_single_input_gpu(self): + self.test_batched_mat_mul_single_input_cpu( + model_precision=_MLMODEL_FULL_PRECISION, cpu_only=False + ) + + def test_embedding_nd_cpu( + self, model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=True + ): + vocab_size = 10 + embedding_size = 19 + W = np.random.rand(embedding_size, vocab_size) + input_shapes = [(5, 1), (2, 3, 1), (4, 1, 1), (12, 5, 8, 1), (2, 3, 1, 5, 1)] + for input_shape in input_shapes: + x = np.random.randint(vocab_size, size=input_shape) + + np_out = np.take(np.transpose(W), np.squeeze(x, axis=-1), axis=0) + expected = {"output": np_out} + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_embedding_nd( + name="embedding_nd", + input_name="data", + output_name="output", + vocab_size=vocab_size, + embedding_size=embedding_size, + W=W, + ) + + input = {"data": x.astype(np.float32)} + + self._test_model( + builder.spec, + input, + expected, + model_precision=model_precision, + useCPUOnly=use_cpu_only, + ) + + def test_embedding_nd_half_precision_cpu(self): + self.test_embedding_nd_cpu( + model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=True + ) + + def test_embedding_nd_GPU(self): + self.test_embedding_nd_cpu( + model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=False + ) + + def test_embedding_nd_half_precision_GPU(self): + self.test_embedding_nd_cpu( + model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=False + ) + + def test_softmax_nan_bug_cpu(self): + input_shape = [2, 2] + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + for axis in [0, 1]: + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_softmax_nd( + name="softmax_nd", input_name="data", output_name="output", axis=axis + ) + + x = np.array([[0.5, 0.5], [1e8, 1e8]]) + input = {"data": x} + y = np.exp(x - np.max(x, axis=axis, keepdims=True)) + y = y / np.sum(y, axis=axis, keepdims=True) + expected = {"output": y} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + + def test_softmax_nd_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + input_shape = np.random.randint(low=2, high=5, size=rank) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_softmax_nd( + name="softmax_nd", + input_name="data", + output_name="output", + axis=axis, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + y = np.exp(x - np.max(x, axis=axis, keepdims=True)) + y = y / np.sum(y, axis=axis, keepdims=True) + expected = {"output": y} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_softmax_nd_gpu(self): + self.test_softmax_nd_cpu(cpu_only=False) + + def test_concat_nd_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + n_inputs = np.random.choice(range(2, 5)) + output_shape = np.random.randint(low=2, high=5, size=rank) + output_shape[axis] = 0 + input_shapes = [] + input_features = [] + input_names = [] + for _ in range(n_inputs): + input_shapes.append(np.copy(output_shape)) + input_shapes[-1][axis] = np.random.choice(range(2, 8)) + output_shape[axis] += input_shapes[-1][axis] + for i, input_dim in enumerate(input_shapes): + input_name = "input_%s" % str(i) + input_names.append(input_name) + input_features.append((input_name, datatypes.Array(*input_dim))) + + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_concat_nd( + name="concat_nd", + input_names=input_names, + output_name="output", + axis=axis, + ) + + input_tensors = [] + for input_dim in input_shapes: + input_tensors.append(np.random.rand(*input_dim)) + input = dict(zip(input_names, input_tensors)) + expected = {"output": np.concatenate(input_tensors, axis)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_concat_nd_gpu(self): + self.test_concat_nd_cpu(cpu_only=False) + + def test_fill_like_cpu(self, cpu_only=True): + + for rank in range(1, 6): + target_shape = np.random.randint(low=2, high=6, size=rank) + value = float(np.random.rand()) + + input_features = [("tensor", datatypes.Array(*target_shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_fill_like( + name="fill_like", input_name="tensor", output_name="output", value=value + ) + + tensor = np.random.rand(*target_shape) + input = {"tensor": tensor} + expected = {"output": np.zeros(target_shape) + value} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_fill_like_gpu(self): + self.test_fill_like_cpu(cpu_only=False) + + def test_fill_static_cpu(self, cpu_only=True): + + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + + input_features = [("data", datatypes.Array(*shape))] + value = float(np.random.rand()) + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + builder.add_fill_static( + name="fill_static", + output_name="tmp", + output_shape=list(shape), + value=value, + ) + + builder.add_elementwise("add_layer", ["data", "tmp"], "output", mode="ADD") + + data = np.random.rand(*shape) + input = {"data": data} + expected = {"output": data + value} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(len(shape), builder._get_rank("output")) + + def test_fill_static_gpu(self): + self.test_fill_static_cpu(cpu_only=False) + + def test_fill_dynamic_cpu(self, cpu_only=True): + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=8, size=rank) + value = float(np.random.rand()) + + input_features = [("shape", datatypes.Array(len(input_shape)))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_fill_dynamic( + name="fill_dynamic", + input_name="shape", + output_name="output", + value=value, + ) + + input = {"shape": np.array(input_shape, dtype="float")} + expected = {"output": np.zeros(input_shape) + value} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(builder._get_rank("output"), -1) + + def test_fill_dynamic_gpu(self): + self.test_fill_dynamic_cpu(cpu_only=False) + + def test_broadcast_to_like_cpu(self, cpu_only=True): + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=8, size=rank) + mask = [np.random.choice([True, False, False]) for _ in range(rank)] + input_shape = np.where(mask, 1, input_shape) + + target_rank = np.random.randint(low=rank, high=6) + target_shape = [ + np.random.randint(low=2, high=8) + if (-i > rank or input_shape[i] == 1) + else input_shape[i] + for i in range(-1, -target_rank - 1, -1) + ][::-1] + + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("tensor", datatypes.Array(*target_shape)), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_broadcast_to_like( + name="broadcast_to_like", + input_names=["data", "tensor"], + output_name="output", + ) + + data = np.random.rand(*input_shape) + tensor = np.random.rand(*target_shape) + inputs = {"data": data, "tensor": tensor} + expected = {"output": np.broadcast_to(data, target_shape)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_broadcast_to_like_gpu(self): + self.test_broadcast_to_like_cpu(cpu_only=False) + + def test_broadcast_to_static_cpu(self, cpu_only=True): + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=8, size=rank) + mask = [np.random.choice([True, False, False]) for _ in range(rank)] + input_shape = np.where(mask, 1, input_shape) + + target_rank = np.random.randint(low=rank, high=6) + target_shape = [ + np.random.randint(low=2, high=8) + if (-i > rank or input_shape[i] == 1) + else input_shape[i] + for i in range(-1, -target_rank - 1, -1) + ][::-1] + + input_features = [("data", datatypes.Array(*input_shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_broadcast_to_static( + name="broadcast_to_static", + input_name="data", + output_name="output", + output_shape=list(target_shape), + ) + + data = np.random.rand(*input_shape) + input = {"data": data} + expected = {"output": np.broadcast_to(data, target_shape)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(target_rank, builder._get_rank("output")) + + def test_broadcast_to_static_gpu(self): + self.test_broadcast_to_static_cpu(cpu_only=False) + + def test_broadcast_to_dynamic_cpu(self, cpu_only=True): + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=8, size=rank) + mask = [np.random.choice([True, False, False]) for _ in range(rank)] + input_shape = np.where(mask, 1, input_shape) + + target_rank = np.random.randint(low=rank, high=6) + target_shape = [ + np.random.randint(low=2, high=8) + if (-i > rank or input_shape[i] == 1) + else input_shape[i] + for i in range(-1, -target_rank - 1, -1) + ][::-1] + + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("shape", datatypes.Array(len(target_shape))), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_broadcast_to_dynamic( + name="broadcast_to_dynamic", + input_names=["data", "shape"], + output_name="output", + ) + + data = np.random.rand(*input_shape) + inputs = {"data": data, "shape": np.array(target_shape, dtype="float")} + expected = {"output": np.broadcast_to(data, target_shape)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(builder._get_rank("output"), -1) + + def test_broadcast_to_dynamic_gpu(self): + self.test_broadcast_to_dynamic_cpu(cpu_only=False) + + # Test Rank being set to unknown when one of the input rank is unknown + # For max rank case + def test_unknown_rank(self, cpu_only=True): + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=8, size=rank) + mask = [np.random.choice([True, False, False]) for _ in range(rank)] + input_shape = np.where(mask, 1, input_shape) + + target_rank = np.random.randint(low=rank, high=6) + target_shape = [ + np.random.randint(low=2, high=8) + if (-i > rank or input_shape[i] == 1) + else input_shape[i] + for i in range(-1, -target_rank - 1, -1) + ][::-1] + + input_features = [ + ("x", datatypes.Array(*input_shape)), + ("shape", datatypes.Array(len(target_shape))), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_broadcast_to_dynamic( + name="broadcast_to_dynamic", input_names=["x", "shape"], output_name="y" + ) + + condition = np.random.randint(0, 2, input_shape).astype(np.float32) + builder.add_load_constant_nd( + name="load_constant_condition", + output_name="condition", + constant_value=condition, + shape=input_shape, + ) + + builder.add_where_broadcastable( + name="where", input_names=["condition", "x", "y"], output_name="output" + ) + + self.assertEqual(builder._get_rank("output"), -1) + + def test_trigonometry_cpu(self, cpu_only=True): + + ops = [ + "sin", + "cos", + "tan", + "asin", + "acos", + "atan", + "sinh", + "cosh", + "tanh", + "asinh", + "acosh", + "atanh", + ] + + for op in ops: + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + x = np.random.rand(*shape) + + if op == "sin": + builder.add_sin(name=op, input_name="data", output_name="output") + expected = {"output": np.sin(x)} + elif op == "cos": + builder.add_cos(name=op, input_name="data", output_name="output") + expected = {"output": np.cos(x)} + elif op == "tan": + builder.add_tan(name=op, input_name="data", output_name="output") + expected = {"output": np.tan(x)} + elif op == "asin": + builder.add_asin(name=op, input_name="data", output_name="output") + expected = {"output": np.arcsin(x)} + elif op == "acos": + builder.add_acos(name=op, input_name="data", output_name="output") + expected = {"output": np.arccos(x)} + elif op == "atan": + builder.add_atan(name=op, input_name="data", output_name="output") + expected = {"output": np.arctan(x)} + elif op == "sinh": + builder.add_sinh(name=op, input_name="data", output_name="output") + expected = {"output": np.sinh(x)} + elif op == "cosh": + builder.add_cosh(name=op, input_name="data", output_name="output") + expected = {"output": np.cosh(x)} + elif op == "tanh": + builder.add_tanh(name=op, input_name="data", output_name="output") + expected = {"output": np.tanh(x)} + elif op == "asinh": + builder.add_asinh(name=op, input_name="data", output_name="output") + expected = {"output": np.arcsinh(x)} + elif op == "acosh": + x = np.random.choice([10, np.e, 1], tuple(shape)).astype(np.float32) + builder.add_acosh(name=op, input_name="data", output_name="output") + expected = {"output": np.arccosh(x)} + elif op == "atanh": + builder.add_atanh(name=op, input_name="data", output_name="output") + expected = {"output": np.arctanh(x)} + + self._test_model( + builder.spec, {"data": x}, expected, useCPUOnly=cpu_only + ) + + def test_trigonometry_gpu(self): + self.test_trigonometry_cpu(cpu_only=False) + + def test_exp2_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + builder.add_exp2(name="exp2", input_name="data", output_name="output") + + x = np.random.rand(*shape) + input = {"data": x} + expected = {"output": np.exp2(x)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_exp2_gpu(self): + self.test_exp2_cpu(cpu_only=False) + + def test_elementwise_binary_cpu(self, cpu_only=True): + input_names = ["A", "B"] + test_cases = [ + "greater", + "less", + "equal", + "not_equal", + "greater_equal", + "less_equal", + "logical_and", + "logical_or", + "logical_xor", + "add", + "subtract", + "multiply", + "divide", + "power", + "maximum", + "minimum", + "floor_divide", + "mod", + ] + for test_case in test_cases: + for _ in range(10): + rank_a = np.random.randint(low=1, high=6) + rank_b = np.random.randint(low=1, high=6) + + rank_out = max(rank_a, rank_b) + + shape_a = np.random.randint(low=2, high=8, size=rank_a) + shape_b = np.random.randint(low=2, high=8, size=rank_b) + + for i in range(-1, -rank_out - 1, -1): + dims = [] + if -i <= rank_a: + dims.append(shape_a[i]) + if -i <= rank_b: + dims.append(shape_b[i]) + + dim = np.random.choice(dims) + if -i <= rank_a: + shape_a[i] = np.random.choice([1, dim]) + if -i <= rank_b: + shape_b[i] = np.random.choice([1, dim]) + + input_shapes = [shape_a, shape_b] + input_features = [ + ("A", datatypes.Array(*input_shapes[0])), + ("B", datatypes.Array(*input_shapes[1])), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + func = getattr(np, test_case) + if test_case == "greater": + builder.add_greater_than( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "less": + builder.add_less_than( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "equal": + builder.add_equal( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "not_equal": + builder.add_not_equal( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "greater_equal": + builder.add_greater_than( + test_case, + input_names=input_names, + output_name="output", + use_greater_than_equal=True, + ) + elif test_case == "less_equal": + builder.add_less_than( + test_case, + input_names=input_names, + output_name="output", + use_less_than_equal=True, + ) + elif test_case == "logical_and": + builder.add_logical( + test_case, + input_names=input_names, + output_name="output", + mode="AND", + ) + elif test_case == "logical_or": + builder.add_logical( + test_case, + input_names=input_names, + output_name="output", + mode="OR", + ) + elif test_case == "logical_xor": + builder.add_logical( + test_case, + input_names=input_names, + output_name="output", + mode="XOR", + ) + elif test_case == "add": + builder.add_add_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "subtract": + builder.add_subtract_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "multiply": + builder.add_multiply_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "divide": + builder.add_divide_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "power": + builder.add_pow_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "maximum": + builder.add_max_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "minimum": + builder.add_min_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "floor_divide": + builder.add_floor_div_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "mod": + builder.add_mod_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + a = np.random.rand(*input_shapes[0]) + b = np.random.rand(*input_shapes[1]) + input = {"A": a, "B": b} + expected = {"output": func(a, b)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_elementwise_binary_gpu(self): + self.test_elementwise_binary_cpu(cpu_only=False) + + def test_elementwise_boolean_unary_cpu(self, cpu_only=True): + input_names = ["input"] + shapes = [ + (1, 2, 3, 1), + (3, 1, 2, 1, 2), + (1, 2, 1, 3), + (2, 3), + (2, 1, 1), + (2, 3, 4), + (2, 4), + (1,), + (1,), + ] + test_cases = [ + "greater", + "less", + "equal", + "not_equal", + "greater_equal", + "less_equal", + ] + for test_case in test_cases: + for shape in shapes: + input_features = [("input", datatypes.Array(*shape))] + b = np.random.rand() + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + func = getattr(np, test_case) + if test_case == "greater": + builder.add_greater_than( + test_case, + input_names=input_names, + output_name="output", + alpha=b, + ) + elif test_case == "less": + builder.add_less_than( + test_case, + input_names=input_names, + output_name="output", + alpha=b, + ) + elif test_case == "equal": + builder.add_equal( + test_case, + input_names=input_names, + output_name="output", + alpha=b, + ) + elif test_case == "not_equal": + builder.add_not_equal( + test_case, + input_names=input_names, + output_name="output", + alpha=b, + ) + elif test_case == "greater_equal": + builder.add_greater_than( + test_case, + input_names=input_names, + output_name="output", + use_greater_than_equal=True, + alpha=b, + ) + elif test_case == "less_equal": + builder.add_less_than( + test_case, + input_names=input_names, + output_name="output", + use_less_than_equal=True, + alpha=b, + ) + + a = np.random.rand(*shape) + input = {"input": a} + expected = {"output": func(a, b)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_elementwise_boolean_unary_gpu(self): + self.test_elementwise_boolean_unary_cpu(cpu_only=False) + + def test_logical_not_cpu(self, cpu_only=True): + input_names = ["input"] + shapes = [ + (1, 2, 3, 1), + (3, 1, 2, 1, 2), + (1, 2, 1, 3), + (2, 3), + (2, 1, 1), + (2, 3, 4), + (2, 4), + (1,), + (1,), + ] + for shape in shapes: + input_features = [("input", datatypes.Array(*shape))] + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + builder.add_logical( + "logical_not", input_names=input_names, output_name="output", mode="NOT" + ) + + a = np.random.rand(*shape) + input = {"input": a} + expected = {"output": np.logical_not(a)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_logical_not_gpu(self): + self.test_logical_not_cpu(cpu_only=False) + + def test_stack_cpu(self, cpu_only=True): + for input_rank in range(1, 5): + for axis in range(-input_rank - 1, input_rank + 1): + n_inputs = np.random.choice(range(2, 5)) + input_shape = np.random.randint(low=2, high=5, size=input_rank) + input_features = [] + input_names = [] + for i in range(n_inputs): + input_name = "input_%s" % str(i) + input_names.append(input_name) + input_features.append((input_name, datatypes.Array(*input_shape))) + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_stack( + name="stack", + input_names=input_names, + output_name="output", + axis=axis, + ) + + input_tensors = [] + for _ in range(n_inputs): + input_tensors.append(np.random.rand(*input_shape)) + input = dict(zip(input_names, input_tensors)) + expected = {"output": np.stack(input_tensors, axis)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(input_rank + 1, builder._get_rank("output")) + + def test_stack_gpu(self): + self.test_stack_cpu(cpu_only=False) + + def test_ceil_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_ceil(name="ceil", input_name="data", output_name="output") + + x = np.random.rand(*shape) + inputs = {"data": x} + expected = {"output": np.ceil(x)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_ceil_gpu(self): + self.test_ceil_cpu(cpu_only=False) + + def test_floor_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_floor(name="floor", input_name="data", output_name="output") + + x = np.random.rand(*shape) + inputs = {"data": x} + expected = {"output": np.floor(x)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_round_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_round(name="round", input_name="data", output_name="output") + + x = np.float32( + np.random.rand(*shape) * np.random.randint(low=-100, high=101) + ) + inputs = {"data": x} + expected = {"output": np.around(x)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_round_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self.test_round_cpu(cpu_only=False) + + def test_sign_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_sign(name="sign", input_name="data", output_name="output") + + x = np.random.choice( + [-np.random.rand(1)[0], 0.0, np.random.rand(1)[0]], tuple(shape) + ).astype(np.float32) + inputs = {"data": x} + expected = {"output": np.sign(x)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_sign_gpu(self): + self.test_sign_cpu(cpu_only=False) + + def test_clip_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", datatypes.Array(*shape))] + + x = np.random.rand(*shape) + min_value = np.percentile(x, 25) + max_value = np.percentile(x, 75) + input = {"data": x} + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_clip( + name="clip", + input_name="data", + output_name="output", + min_value=min_value, + max_value=max_value, + ) + + expected = {"output": np.clip(x, min_value, max_value)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_clip_gpu(self): + self.test_clip_cpu(cpu_only=False) + + def test_split_nd_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + n_outputs = np.random.choice(range(2, 4)) + input_shape = np.random.randint(low=2, high=5, size=rank) + input_shape[axis] = 0 + output_shapes = [] + output_features = [] + output_names = [] + almost_equal = random.choice([True, False]) + remainder = np.random.choice(range(1, n_outputs)) if almost_equal else 0 + value = np.random.choice(range(2, 5)) + for k in range(n_outputs): + output_shapes.append(np.copy(input_shape)) + output_shapes[-1][axis] = value + 1 if k < remainder else value + input_shape[axis] += output_shapes[-1][axis] + + for i in range(n_outputs): + output_name = "output_%s" % str(i) + output_names.append(output_name) + output_features.append((output_name, None)) + + input_features = [("data", datatypes.Array(*input_shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_split_nd( + name="split_nd", + input_name="data", + output_names=output_names, + axis=axis, + num_splits=n_outputs, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = dict( + zip( + output_names, + np.array_split(x, n_outputs, axis=axis) + if almost_equal + else np.split(x, n_outputs, axis=axis), + ) + ) # Explicitly trying to compare against both versions of numpy split + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + for output_ in output_names: + self.assertEqual(rank, builder._get_rank(output_)) + + def test_split_nd_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self.test_split_nd_cpu(cpu_only=False) + + def test_split_nd_with_split_sizes_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + n_outputs = np.random.choice(range(2, 4)) + input_shape = np.random.randint(low=2, high=5, size=rank) + input_shape[axis] = 0 + output_shapes, output_features, output_names = [], [], [] + sections, split_sizes = [], [] + for _ in range(n_outputs): + output_shapes.append(np.copy(input_shape)) + output_shapes[-1][axis] = np.random.choice(range(2, 5)) + input_shape[axis] += output_shapes[-1][axis] + sections.append(input_shape[axis]) + split_sizes.append(output_shapes[-1][axis]) + + sections.pop() + for i in range(n_outputs): + output_name = "output_%s" % str(i) + output_names.append(output_name) + output_features.append((output_name, None)) + + input_features = [("data", datatypes.Array(*input_shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_split_nd( + name="split_nd", + input_name="data", + output_names=output_names, + axis=axis, + split_sizes=split_sizes, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = dict(zip(output_names, np.split(x, sections, axis=axis))) + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + for output_ in output_names: + self.assertEqual(rank, builder._get_rank(output_)) + + def test_split_nd_with_split_sizes_gpu(self): + self.test_split_nd_with_split_sizes_cpu(cpu_only=False) + + def test_slice_static_cpu(self, cpu_only=True): + for rank in range(1, 6): + for _ in range(200): + input_shape = np.array([5 for _ in range(rank)]) + objs, strides, begin_masks, end_ids, end_masks, begin_ids = ( + [], + [], + [], + [], + [], + [], + ) + for dim in range(rank): + stride = random.choice([-3, -1, 1, 2]) + begin_mask = random.choice([True, False]) + end_mask = random.choice([True, False]) + length = 0 + while length <= 0: + begin_id = np.random.randint( + low=-input_shape[dim], high=input_shape[dim] + ) + end_id = np.random.randint( + low=-input_shape[dim], high=input_shape[dim] + ) + obj = slice( + None if begin_mask else begin_id, + None if end_mask else end_id, + stride, + ) + length = np.arange(input_shape[dim])[(obj,)].shape[0] + + objs.append(obj), strides.append(stride), begin_masks.append( + begin_mask + ) + end_masks.append(end_mask), begin_ids.append( + begin_id + ), end_ids.append(end_id) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_slice_static( + "slice_static", + "data", + "output", + begin_ids=begin_ids, + end_ids=end_ids, + strides=strides, + begin_masks=begin_masks, + end_masks=end_masks, + ) + + x = np.random.rand(*input_shape) + inputs = {"data": x} + expected = {"output": x[tuple(objs)]} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_slice_static_gpu(self): + self.test_slice_static_cpu(cpu_only=False) + + def test_slice_dynamic_cpu(self, cpu_only=True): + for rank in range(1, 6): + input_shape = np.array([5 for _ in range(rank)]) + objs, strides, begin_masks, end_ids, end_masks, begin_ids = ( + [], + [], + [], + [], + [], + [], + ) + squeeze_masks = [] + squeeze_axes = [] + for dim in range(rank): + stride = random.choice([-3, -1, 1, 2]) + begin_mask = random.choice([True, False]) + end_mask = random.choice([True, False]) + if len(squeeze_axes) + 1 < rank: + squeeze_mask = random.choice([True, False]) + else: + squeeze_mask = False + if squeeze_mask: + squeeze_axes.append(dim) + length = 0 + while length <= 0: + begin_id = np.random.randint( + low=-input_shape[dim], high=input_shape[dim] + ) + end_id = np.random.randint( + low=-input_shape[dim], high=input_shape[dim] + ) + obj = slice( + None if begin_mask else begin_id, + None if end_mask else end_id, + stride, + ) + length = np.arange(input_shape[dim])[(obj,)].shape[0] + + objs.append(obj), strides.append(stride), begin_masks.append(begin_mask) + end_masks.append(end_mask), begin_ids.append(begin_id), end_ids.append( + end_id + ) + squeeze_masks.append(squeeze_mask) + + # test different number of inputs, from 2 inputs up to 7 inputs + # when num_inputs == 2, begin_ids are inputs, rest are read from parameters + # when num_inputs == 7, all read from inputs, none are read from parameters + for num_inputs in [2, 3, 4, 5, 6]: + x = np.random.rand(*input_shape) + + input_features = [("data", datatypes.Array(*input_shape))] + input_names = ["data"] + inputs = dict() + inputs["data"] = x + + if num_inputs == 2: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ] + input_names = ["data", "begin_ids"] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + elif num_inputs == 3: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ("end_ids", datatypes.Array(len(end_ids))), + ] + input_names = ["data", "begin_ids", "end_ids"] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + inputs["end_ids"] = np.array(end_ids, dtype=np.int32) + elif num_inputs == 4: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ("end_ids", datatypes.Array(len(end_ids))), + ("strides", datatypes.Array(len(strides))), + ] + input_names = ["data", "begin_ids", "end_ids", "strides"] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + inputs["end_ids"] = np.array(end_ids, dtype=np.int32) + inputs["strides"] = np.array(strides, dtype=np.int32) + elif num_inputs == 5: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ("end_ids", datatypes.Array(len(end_ids))), + ("strides", datatypes.Array(len(strides))), + ("begin_masks", datatypes.Array(len(begin_masks))), + ] + input_names = [ + "data", + "begin_ids", + "end_ids", + "strides", + "begin_masks", + ] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + inputs["end_ids"] = np.array(end_ids, dtype=np.int32) + inputs["strides"] = np.array(strides, dtype=np.int32) + inputs["begin_masks"] = np.array(begin_masks, dtype=np.int32) + elif num_inputs == 6: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ("end_ids", datatypes.Array(len(end_ids))), + ("strides", datatypes.Array(len(strides))), + ("begin_masks", datatypes.Array(len(begin_masks))), + ("end_masks", datatypes.Array(len(end_masks))), + ] + input_names = [ + "data", + "begin_ids", + "end_ids", + "strides", + "begin_masks", + "end_masks", + ] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + inputs["end_ids"] = np.array(end_ids, dtype=np.int32) + inputs["strides"] = np.array(strides, dtype=np.int32) + inputs["begin_masks"] = np.array(begin_masks, dtype=np.int32) + inputs["end_masks"] = np.array(end_masks, dtype=np.int32) + elif num_inputs == 7: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ("end_ids", datatypes.Array(len(end_ids))), + ("strides", datatypes.Array(len(strides))), + ("begin_masks", datatypes.Array(len(begin_masks))), + ("end_masks", datatypes.Array(len(end_masks))), + ("squeeze_masks", datatypes.Array(len(squeeze_masks))), + ] + input_names = [ + "data", + "begin_ids", + "end_ids", + "strides", + "begin_masks", + "end_masks", + "squeeze_masks", + ] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + inputs["end_ids"] = np.array(end_ids, dtype=np.int32) + inputs["strides"] = np.array(strides, dtype=np.int32) + inputs["begin_masks"] = np.array(begin_masks, dtype=np.int32) + inputs["end_masks"] = np.array(end_masks, dtype=np.int32) + inputs["squeeze_masks"] = np.array(squeeze_masks, dtype=np.int32) + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + if num_inputs == 2: + builder.add_slice_dynamic( + "slice_dynamic", + input_names, + "output", + end_ids=end_ids, + strides=strides, + begin_masks=begin_masks, + end_masks=end_masks, + squeeze_masks=squeeze_masks, + ) + elif num_inputs == 3: + builder.add_slice_dynamic( + "slice_dynamic", + input_names, + "output", + strides=strides, + begin_masks=begin_masks, + end_masks=end_masks, + squeeze_masks=squeeze_masks, + ) + elif num_inputs == 4: + builder.add_slice_dynamic( + "slice_dynamic", + input_names, + "output", + begin_masks=begin_masks, + end_masks=end_masks, + squeeze_masks=squeeze_masks, + ) + elif num_inputs == 5: + builder.add_slice_dynamic( + "slice_dynamic", + input_names, + "output", + end_masks=end_masks, + squeeze_masks=squeeze_masks, + ) + elif num_inputs == 6: + builder.add_slice_dynamic( + "slice_dynamic", + input_names, + "output", + squeeze_masks=squeeze_masks, + ) + elif num_inputs == 7: + builder.add_slice_dynamic("slice_dynamic", input_names, "output") + + expected_x = x[tuple(objs)] + squeeze_slices = [] + for squeeze in squeeze_masks: + if squeeze: + squeeze_slices.append(slice(None, 1, None)) + else: + squeeze_slices.append(slice(None, None, None)) + expected_x = np.squeeze( + expected_x[tuple(squeeze_slices)], axis=tuple(squeeze_axes) + ) + expected = {"output": expected_x} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_slice_dynamic_gpu(self): + self.test_slice_dynamic_cpu(cpu_only=False) + + def test_tile_cpu(self, cpu_only=True): + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=5, size=rank) + for rep_rank in range(1, rank + 1): + reps = list(np.random.randint(low=1, high=9, size=rep_rank)) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_tile("Tile", "data", "output", reps) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": np.tile(x, reps)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_tile_gpu(self): + self.test_tile_cpu(cpu_only=False) + + def test_dynamic_tile_cpu(self, cpu_only=True): + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=5, size=rank) + for rep_rank in range(1, rank + 1): + reps = np.random.randint(low=1, high=9, size=rep_rank) + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("reps", datatypes.Array(*reps.shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_tile("Tile", ["data", "reps"], "output") + + x = np.random.rand(*input_shape) + input = {"data": x, "reps": reps.astype(np.float32)} + expected = {"output": np.tile(x, list(reps))} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_sliding_windows_cpu(self, cpu_only=True): + def numpy_sliding_windows(a, np_axis, np_size, np_step): + n = (a.shape[np_axis] - np_size) // np_step + 1 + shape = list(a.shape) + shape[np_axis] = n + if np_axis < 0: + np_axis += len(shape) + shape.insert(np_axis + 1, np_size) + strides = list(a.strides) + effstride = strides[np_axis] * np_step + strides.insert(np_axis, effstride) + return np.lib.stride_tricks.as_strided(a, shape, strides) + + for rank in range(1, 5): + for axis in range(-rank, rank): + input_shape = np.random.randint(low=2, high=5, size=rank) + output_shape = list(input_shape) + window_size = np.random.randint(low=1, high=input_shape[axis]) + + length = 0 + while length <= 0: + step = np.random.randint(low=1, high=input_shape[axis]) + length = (input_shape[axis] - window_size) // step + 1 + + output_shape[axis] = length + + pos_axis = axis if axis >= 0 else axis + rank + output_shape.insert(pos_axis + 1, window_size) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_sliding_windows( + "sliding_windows", + input_name="data", + output_name="output", + axis=axis, + window_size=window_size, + step=step, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": numpy_sliding_windows(x, axis, window_size, step)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(rank + 1, builder._get_rank("output")) + + def test_sliding_windows_gpu(self): + self.test_sliding_windows_cpu(cpu_only=False) + + def test_range_static_cpu(self, cpu_only=True): + + params = [ + (-10.4, 23, 12.2), + (0, 1000, 1), + (50.5, 90.5, 1.5), + (5, 8, 2), + (5, 8, 98), + (5, 8, 1.5), + (10, 5, -0.6), + (24, -65, -2), + ] + + for param in params: + start, end, step = param + input_features = [("multiplicative_input", datatypes.Array(1))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_range_static( + "range_static", "output_range", end=end, start=start, step=step + ) + builder.add_multiply_broadcastable( + name="multiply_broadcastable", + input_names=["multiplicative_input", "output_range"], + output_name="output", + ) + + # save the model + model_dir = tempfile.TemporaryDirectory() + model_path = os.path.join(model_dir.name, "test_layer.mlmodel") + coremltools.utils.save_spec(builder.spec, model_path) + + inputs = dict() + inputs["multiplicative_input"] = np.ones((1,), dtype=np.float64) + expected = {"output": np.arange(start, end, step)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(1, builder._get_rank("output")) + + def test_range_static_gpu(self): + self.test_range_static_cpu(cpu_only=False) + + def test_range_dynamic_cpu(self, cpu_only=True): + params = [ + (-10.4, 23, 12.2), + (0, 1000, 1), + (50.5, 90.5, 1.5), + (5, 8, 2), + (5, 8, 98), + (5, 8, 1.5), + (10, 5, -0.6), + (24, -65, -2), + ] + + # input size == 1: end is input, start and step are read from parameters + # input size == 2: end, start are inputs, step is read from parameters + # input size == 3: start, end, step are all inputs, none of the parameters are used. + for num_inputs in [1, 2, 3]: + for param in params: + inputs = dict() + start, end, step = param + + if num_inputs == 1: + input_features = [("end", datatypes.Array(1))] + elif num_inputs == 2: + input_features = [ + ("end", datatypes.Array(1)), + ("start", datatypes.Array(1)), + ] + elif num_inputs == 3: + input_features = [ + ("end", datatypes.Array(1)), + ("start", datatypes.Array(1)), + ("step", datatypes.Array(1)), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + if num_inputs == 1: + inputs["end"] = end * np.ones((1,), dtype=np.float64) + builder.add_range_dynamic( + "range_dynamic", + output_name="output", + input_names=["end"], + start=start, + step=step, + ) + elif num_inputs == 2: + inputs["end"] = end * np.ones((1,), dtype=np.float64) + inputs["start"] = start * np.ones((1,), dtype=np.float64) + builder.add_range_dynamic( + "range_dynamic", + output_name="output", + input_names=["end", "start"], + step=step, + ) + elif num_inputs == 3: + inputs["end"] = end * np.ones((1,), dtype=np.float64) + inputs["start"] = start * np.ones((1,), dtype=np.float64) + inputs["step"] = step * np.ones((1,), dtype=np.float64) + builder.add_range_dynamic( + "range_dynamic", + output_name="output", + input_names=["end", "start", "step"], + ) + + expected = {"output": np.arange(start, end, step)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(1, builder._get_rank("output")) + + def test_range_dynamic_gpu(self): + self.test_range_dynamic_cpu(cpu_only=False) + + def test_linear_activation_different_ranks_cpu(self, cpu_only=True): + for input_dim in [(10, 15), (10, 15, 2, 3), (10, 2, 4, 15, 1), (6,)]: + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_activation( + name="activation", + non_linearity="LINEAR", + input_name="data", + output_name="output", + params=[34.0, 67.0], + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": 34.0 * x + 67.0} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_linear_activation_different_ranks_gpu(self): + self.test_linear_activation_different_ranks_cpu(cpu_only=False) + + def test_topk_cpu(self, cpu_only=True): + test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)] + K = [3, 5] + axes = [[0], [0, 1], [1, 2], [0, 3, 1], [1, 3, 4]] + + for ii, input_shape in enumerate(test_input_shapes): + for k in K: + for n_inputs in [1, 2]: + for bottom_k_flag in [False, True]: + for axis in axes[ii]: + for negative_axis in [False, True]: + + if negative_axis: + axis = axis - len(input_shape) + + input_features = [ + ("data", datatypes.Array(*input_shape)) + ] + output_features = [("values", None), ("indices", None)] + + input_names = ["data"] + output_names = ["values", "indices"] + + if n_inputs == 2: + input_names.append("k_in") + input_features.append(("k_in", datatypes.Array(1))) + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + if n_inputs == 2: + builder.add_topk( + "topk", + input_names, + output_names, + axis=axis, + use_bottom_k=bottom_k_flag, + ) + else: + builder.add_topk( + "topk", + input_names, + output_names, + k=k, + axis=axis, + use_bottom_k=bottom_k_flag, + ) + + data = np.random.randint( + low=0, + high=int(np.prod(input_shape)), + size=input_shape, + ) + data = data.astype(np.float32) + + input = {"data": data} + if n_inputs == 2: + input["k_in"] = k * np.ones([1], dtype=np.float32) + + # numpy reference values + if bottom_k_flag: + ref_indices = np.argsort(data, axis=axis) + else: + ref_indices = np.argsort(-data, axis=axis) + + slc = [slice(None)] * len(input_shape) + slc[axis] = slice(0, k) + ref_indices = ref_indices[tuple(slc)] + ref_values = np.take_along_axis( + data, ref_indices, axis=axis + ) + expected = { + "values": ref_values, + "indices": ref_indices, + } + + self._test_model( + builder.spec, input, expected, useCPUOnly=cpu_only + ) + + def test_topk_gpu(self): + self.test_topk_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_const_pad_cpu(self, cpu_only=True): + def get_reference(data, pads, value): + res = tf.pad(data, pads, mode='CONSTANT', constant_values=value) + return res.numpy() + + value = 34.0 + shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)] + + ctr = 0 + for shape in shapes: + rank = len(shape) + for force_zeros_in_end in [0, 2, 6]: + for max_pad_value in range(1, 6): + for n_inputs in [1, 2]: + pads = np.random.randint( + low=0, high=max_pad_value, size=(rank, 2) + ) + + if force_zeros_in_end > 2 * rank: + continue + + # pads = np.reshape(np.array([1,1,1,0,0,1]), (rank, 2)) + if force_zeros_in_end != 0: + pads[-force_zeros_in_end:] = 0 + + data = np.random.rand(*shape) + reference = get_reference(data, pads, value) + + ctr += 1 + + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + input_names = ["data"] + if n_inputs == 2: + input_names.append("pads") + input_features.append(("pads", datatypes.Array(2 * rank,))) + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + if n_inputs == 2: + builder.add_constant_pad( + "pad", input_names, "output", value=value + ) + else: + builder.add_constant_pad( + "pad", + input_names, + "output", + value=value, + pad_amounts=pads.flatten(), + ) + + input = {"data": data} + if n_inputs == 2: + input["pads"] = pads.flatten().astype(np.float32) + + expected = {"output": reference} + self._test_model( + builder.spec, input, expected, useCPUOnly=cpu_only + ) + + def test_const_pad_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self.test_const_pad_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_const_pad_mode2_cpu(self, cpu_only=True): + def get_reference(data, output_shape, value, left_pad=False): + pads = np.zeros((len(output_shape), 2)) + if left_pad: + pads[:, 0] = np.array(output_shape) - np.array(data.shape) + else: + pads[:, 1] = np.array(output_shape) - np.array(data.shape) + res = tf.pad(data, pads, mode="CONSTANT", constant_values=value) + return res.numpy() + + + value = 34.0 + shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)] + out_shapes = [(5,), (4, 8), (2, 4, 10), (20, 6, 7, 10, 7), (5, 24, 10, 4, 10)] + + ctr = 0 + for ii, shape in enumerate(shapes): + rank = len(shape) + for left_pad in [True, False]: + for n_inputs in [1, 2]: + + data = np.random.rand(*shape) + reference = get_reference(data, out_shapes[ii], value, left_pad) + + pads = np.zeros((rank, 2)) + tmp = np.zeros((rank)) + + for i in range(rank): + if out_shapes[ii][i] == shape[i]: + tmp[i] = 0 + else: + tmp[i] = out_shapes[ii][i] + + if left_pad: + pads[:, 0] = tmp + else: + pads[:, 1] = tmp + + ctr += 1 + + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + input_names = ["data"] + if n_inputs == 2: + input_names.append("pads") + input_features.append(("pads", datatypes.Array(2 * rank,))) + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + if n_inputs == 2: + builder.add_constant_pad( + "pad", + input_names, + "output", + value=value, + pad_to_given_output_size_mode=True, + ) + else: + builder.add_constant_pad( + "pad", + input_names, + "output", + value=value, + pad_amounts=pads.flatten(), + pad_to_given_output_size_mode=True, + ) + + input = {"data": data} + if n_inputs == 2: + input["pads"] = pads.flatten().astype(np.float32) + + expected = {"output": reference} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_const_pad_mode2_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self.test_const_pad_mode2_cpu(cpu_only=False) + + def test_nms_cpu(self, cpu_only=True): + def _compute_iou_matrix(boxes): + # input is (N,4), in order [center_w, center_h, width, height] + self.assertEqual(len(boxes.shape), 2) + self.assertEqual(boxes.shape[1], 4) + boxes = boxes.astype(np.float32) + center_w, center_h, width, height = np.split( + boxes, 4, axis=1 + ) # outs are all (N,1) + top = center_h + 0.5 * height + bottom = center_h - 0.5 * height + left = center_w - 0.5 * width + right = center_w + 0.5 * width + area = width * height + + hB = np.minimum(top, np.transpose(top)) + wB = np.minimum(right, np.transpose(right)) + hA = np.maximum(bottom, np.transpose(bottom)) + wA = np.maximum(left, np.transpose(left)) + + intersection_area = np.maximum(0, hB - hA) * np.maximum(0, wB - wA) + union_area = area + np.transpose(area) - intersection_area + iou = intersection_area / union_area + return iou + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def _nms_TF( + boxes, scores, iou_threshold, score_threshold, per_class_suppression, M + ): + # boxes is (B,N,4), in order [center_w, center_h, width, height] + # scores is (B,N,C) + # output shapes: (B,M,4), (B,M,C), (B,M), (B,) + """ + this is implementation of CoreML's NMS layer + """ + B, N, C = scores.shape + + iou_threshold = iou_threshold.astype(np.float32) + score_threshold = score_threshold.astype(np.float32) + + # convert box ids to TF style + center_w, center_h, width, height = np.split( + boxes, 4, axis=-1 + ) # outs are all (B,N,1) + y1 = center_h - 0.5 * height + y2 = center_h + 0.5 * height + x1 = center_w - 0.5 * width + x2 = center_w + 0.5 * width + boxes_tf = np.concatenate((y1, x1, y2, x2), axis=-1) # (B,N,4) + + out1 = np.zeros((B, M, 4)) + out2 = np.zeros((B, M, C)) + out3 = -1 * np.ones((B, M)) + out4 = np.zeros((B,)) + + for b in range(B): + box_coord_matrix = boxes_tf[b, :, :] # (N,4) + score_vector = np.max(scores[b, :, :], axis=-1) # (N,) + if not per_class_suppression: + # this is the simple case as TF directly supports it + ids_g = tf.image.non_max_suppression( + box_coord_matrix, + score_vector, + max_output_size=M, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + ids = ids_g.numpy() + else: + # this is slightly complicated as TF does not directly support it + class_ids = np.argmax(scores[b, :, :], axis=-1) # (N,) + sorted_score_ids = np.argsort(-score_vector) + box_coord_matrix2 = np.take( + box_coord_matrix, sorted_score_ids, axis=0 + ) + score_vector2 = np.take(score_vector, sorted_score_ids) + class_ids = np.take(class_ids, sorted_score_ids) + classes_seen = dict() + ids_intermediate = np.array([], dtype=np.int32) + for n in range(N): + if class_ids[n] in classes_seen: + continue + c = class_ids[n] + classes_seen[c] = True + current_class_ids = np.where(class_ids == c)[0] + if len(current_class_ids) > 0: + feed_in1 = np.take( + box_coord_matrix2, current_class_ids, axis=0 + ) + feed_in2 = np.take(score_vector2, current_class_ids) + cur_ids_g = tf.image.non_max_suppression( + feed_in1, + feed_in2, + max_output_size=M, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + cur_ids = cur_ids_g.numpy() + + from_sort_ids = np.take(current_class_ids, cur_ids) + ids_intermediate = np.append( + ids_intermediate, from_sort_ids + ) + ids_intermediate.sort() + ids = np.take(sorted_score_ids, ids_intermediate) + + xx = len(ids) + if xx == 0: + ids = np.array([np.argmax(score_vector)]) + xx = 1 + if xx > M: + ids = ids[:M] + xx = len(ids) + out1[b, :xx, :] = np.take(boxes[b, :, :], ids, axis=0) + out2[b, :xx, :] = np.take(scores[b, :, :], ids, axis=0) + out3[b, :xx] = ids + out4[b] = xx + + return out1, out2, out3, out4 + + iou_threshold_percentile = [0, 30, 80, 100] + score_threshold_percentile_arr = [0, 40, 100] + N_M_pairs_to_test = [[100, 48], [100, 112]] # N : boxes in, M: max boxes out + + number_of_test = 0 + for N_M in N_M_pairs_to_test: + for B in [1]: # [1, 5] TODO Re-enable when rdar://60280745 is fixed + for C in [1, 7]: + N, M = N_M + + boxes = np.random.rand(B, N, 4) + scores = np.random.rand(B, N, C) + + iou_matrix = _compute_iou_matrix(boxes[0, :, :]) # (N,N) + iou_matrix = iou_matrix[ + ~np.eye(iou_matrix.shape[0], dtype=bool) + ].reshape(iou_matrix.shape[0], -1) + + for per_class_suppression in [False, True]: + for iou_thresh in iou_threshold_percentile: + for score_thresh in score_threshold_percentile_arr: + for is_dynamic in [False, True]: + + if score_thresh == 0: + score_threshold = np.min(scores) - 1 + elif score_thresh == 100: + score_threshold = np.max(scores) + 1 + else: + score_threshold = ( + np.percentile(scores, score_thresh) + 0.01 + ) + + if iou_thresh == 0: + iou_threshold = np.maximum( + np.min(iou_matrix) - 0.01, 0.0 + ) + else: + iou_threshold = ( + np.percentile(iou_matrix, iou_thresh) + 0.01 + ) + iou_threshold = np.maximum(iou_threshold, 1e-8) + + number_of_test += 1 + + tf_boxes, tf_scores, tf_ids, tf_num_boxes = _nms_TF( + boxes, + scores, + iou_threshold, + score_threshold, + per_class_suppression, + M, + ) + expected = dict() + expected["selected_boxes"] = tf_boxes + expected["selected_scores"] = tf_scores + expected["selected_box_ids"] = tf_ids + expected["number_of_boxes"] = tf_num_boxes + + # define CoreML model + + input_features = [ + ("boxes", datatypes.Array(B, N, 4)), + ("scores", datatypes.Array(B, N, C)), + ] + output_features = [ + ("selected_boxes", None), + ("selected_scores", None), + ("selected_box_ids", None), + ("number_of_boxes", None), + ] + + input_names = ["boxes", "scores"] + if is_dynamic: + input_names.extend( + [ + "iou_threshold", + "score_threshold", + "max_boxes", + ] + ) + input_features.append( + ("iou_threshold", datatypes.Array(1,)) + ) + input_features.append( + ("score_threshold", datatypes.Array(1,)) + ) + input_features.append( + ("max_boxes", datatypes.Array(1,)) + ) + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + input_dict = dict() + input_dict["boxes"] = boxes + input_dict["scores"] = scores + + if is_dynamic: + builder.add_nms( + "nms", + input_names, + [ + "selected_boxes", + "selected_scores", + "selected_box_ids", + "number_of_boxes", + ], + per_class_suppression=per_class_suppression, + ) + + input_dict[ + "iou_threshold" + ] = iou_threshold * np.ones([1], dtype=np.float32) + input_dict["score_threshold"] = ( + score_threshold + * np.ones([1], dtype=np.float32) + ) + input_dict["max_boxes"] = M * np.ones( + [1], dtype=np.float32 + ) + else: + builder.add_nms( + "nms", + input_names, + [ + "selected_boxes", + "selected_scores", + "selected_box_ids", + "number_of_boxes", + ], + iou_threshold=iou_threshold, + score_threshold=score_threshold, + max_boxes=M, + per_class_suppression=per_class_suppression, + ) + + self._test_model( + builder.spec, + input_dict, + expected, + useCPUOnly=cpu_only, + ) + + def test_nms_gpu(self): + self.test_nms_cpu(cpu_only=False) + + def test_rank_preserving_reshape(self): + input_shapes = [(20, 10), (20, 10, 5), (10, 3, 5)] + target_shapes = [(5, -1), (0, 2, 25), (25, 0, -1)] + output_shapes = [(5, 40), (20, 2, 25), (25, 3, 2)] + + for i in range(len(input_shapes)): + input_features = [("data", datatypes.Array(*input_shapes[i]))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_rank_preserving_reshape( + name="rank_preserving_reshape", + input_name="data", + output_name="output", + output_shape=target_shapes[i], + ) + + x = np.random.rand(*input_shapes[i]) + input = {"data": x} + expected = {"output": np.reshape(x, output_shapes[i])} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + self.assertEqual(len(output_shapes[i]), builder._get_rank("output")) + + def test_expand_dims(self): + input_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (10,)] + axes = [(0, 1), (0, 2), (2, 0), (-2, -1), (1, 0, -2)] + output_shapes = [ + (1, 1, 10, 5), + (1, 10, 1, 5), + (1, 10, 1, 5), + (10, 5, 1, 1), + (1, 1, 1, 10), + ] + + for i in range(len(input_shapes)): + input_features = [("data", datatypes.Array(*input_shapes[i]))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_expand_dims( + name="expand_dims", + input_name="data", + output_name="output", + axes=axes[i], + ) + + x = np.random.rand(*input_shapes[i]) + input = {"data": x} + expected = {"output": np.reshape(x, output_shapes[i])} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + self.assertEqual(len(output_shapes[i]), builder._get_rank("output")) + + def test_squeeze(self): + input_shapes = [ + (1, 1, 10, 5), + (1, 10, 1, 5), + (10, 5, 1, 1), + (10, 5, 1, 1), + (1,), + (10, 5, 1, 1), + (3, 1, 7), + ] + axes = [(0, 1), (0, 2), (-2, -1), (-1, -2), (0,), (3, -2), (1,)] + output_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (1,), (10, 5), (3, 7)] + + for i in range(len(input_shapes)): + input_features = [("data", datatypes.Array(*input_shapes[i]))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_squeeze( + name="squeeze_layer", + input_name="data", + output_name="output", + axes=list(axes[i]), + ) + + x = np.random.rand(*input_shapes[i]) + input = {"data": x} + expected = {"output": np.reshape(x, output_shapes[i])} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + self.assertEqual(len(output_shapes[i]), builder._get_rank("output")) + + def test_squeeze_all(self): + input_shapes = [ + (1, 1, 10, 5), + (1, 10, 1, 5), + (10, 5, 1, 1), + (10, 5, 1, 1), + (1,), + (10, 5, 1, 1), + (3, 1, 7), + (3,), + (5, 6), + ] + for input_shape in input_shapes: + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_squeeze( + name="squeeze_layer", + input_name="data", + output_name="output", + squeeze_all=True, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + reference = np.squeeze(x) + if not reference.shape: + reference = np.reshape(reference, (1,)) + expected = {"output": reference} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + self.assertEqual(-1, builder._get_rank("output")) + + def test_argmax_argmin(self): + test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)] + + # (1+2+3+4+5) * 2^3 = 120 test cases + for input_shape in test_input_shapes: + for negative_axis in [False, True]: + for mode in ["argmax", "argmin"]: + for keep_dims in [True, False]: + for axis in np.arange(len(input_shape)): + + if negative_axis: + axis_val = axis - len(input_shape) + else: + axis_val = axis + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + x = np.random.rand(*input_shape) + + if mode == "argmax": + builder.add_argmax( + "argmax", + "data", + "output", + axis=axis_val, + keepdims=keep_dims, + ) + np_out = np.argmax(x, axis=axis_val) + else: + builder.add_argmin( + "argmin", + "data", + "output", + axis=axis_val, + keepdims=keep_dims, + ) + np_out = np.argmin(x, axis=axis_val) + + if keep_dims: + np_out = np.expand_dims(np_out, axis=axis_val) + elif len(input_shape) == 1: + np_out = np.expand_dims(np_out, axis=axis_val) + + input = {"data": x} + expected = {"output": np_out} + + test_case = "test_argmax_argmin_input_shape_{}_axis_{}_keep_dims_{}_numpy_out_shape_{}".format( + x.shape, axis_val, keep_dims, np_out.shape + ) + + self._test_model( + builder.spec, input, expected, useCPUOnly=True + ) + if len(np_out.shape) != 0: + self.assertEqual( + len(np_out.shape), builder._get_rank("output") + ) + + def test_get_shape(self): + dims = [1, 2, 3, 4, 5] + for rank in range(1, len(dims) + 1): + input_shape = dims[:rank] + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_get_shape( + name="get_shape_layer", input_name="data", output_name="output" + ) + + feed = {"data": np.random.rand(*input_shape)} + expected = {"output": np.array(input_shape)} + + self._test_model(builder.spec, feed, expected, useCPUOnly=True) + self.assertEqual(1, builder._get_rank("output")) + + def test_load_constant_nd(self): + dims = [2, 3, 4, 5, 6] + for rank in range(1, len(dims) + 1): + input_shape = dims[:rank] + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_load_constant_nd( + "load_const_nd_layer", + "tmp", + constant_value=np.ones(input_shape), + shape=input_shape, + ) + builder.add_elementwise("add_layer", ["data", "tmp"], "output", mode="ADD") + feed = {"data": np.random.rand(*input_shape)} + expected = {"output": feed["data"] + 1} + + self._test_model(builder.spec, feed, expected, useCPUOnly=True) + self.assertEqual(rank, builder._get_rank("output")) + + def test_simple_array_alloc_scatter(self): + alloc_shape = [2, 3, 4] + value_shape = [1, 3, 4] + input_features = [ + ("alloc_shape", datatypes.Array(len(alloc_shape))), + ("value", datatypes.Array(*value_shape)), + ("index", datatypes.Array(1)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_fill_dynamic( + name="fill_dynamic_layer", + input_name="alloc_shape", + output_name="array", + value=np.float32(0.0), + ) + # CoreML input order: container (array), indices, slices (value) + builder.add_scatter( + name="scatter_layer", + input_names=["array", "index", "value"], + output_name="output", + ) + + value = np.random.rand(*value_shape).astype("float") + feed = { + "alloc_shape": np.array(alloc_shape, dtype="float"), + "value": value, + "index": np.array([1], dtype="float"), + } + + ref = np.zeros(alloc_shape) + ref[1, :, :] = value + expected = {"output": ref} + + self._test_model(builder.spec, feed, expected, useCPUOnly=True) + + def test_erf_activation_cpu(self, cpu_only=True): + input_features = [("data", datatypes.Array(10, 45))] + output_features = [("output", datatypes.Array(10, 45))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_erf(name="erf", input_name="data", output_name="output") + x = np.random.rand(10, 45) + input = {"data": x} + expected = { + "output": np.asarray([math.erf(i) for i in x.flatten().tolist()]).reshape( + 10, 45 + ) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_erf_activation_gpu(self): + self.test_erf_activation_cpu(cpu_only=False) + + def test_gelu_activation(self): + + for mode in ["EXACT", "TANH_APPROXIMATION", "SIGMOID_APPROXIMATION"]: + for rank in range(1, 6): + shape = np.random.randint(low=2, high=5, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_gelu( + name="gelu", input_name="data", output_name="output", mode=mode + ) + + x = np.random.rand(*shape) + input = {"data": x} + exact = np.asarray( + [ + 0.5 * i * (1.0 + math.erf(i / math.sqrt(2))) + for i in x.flatten().tolist() + ] + ).reshape(*shape) + + expected = {"output": exact} + self._test_model(builder.spec, input, expected, useCPUOnly=True) + + def test_lower_triangular_cpu(self, cpu_only=True): + for rank in range(2, 6): + for k in range(-3, 4): + shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_lower_triangular("tril", "data", "output", k=k) + + x = np.random.rand(*shape) + input = {"data": x} + expected = {"output": np.tril(x, k=k)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_lower_triangular_gpu(self): + self.test_lower_triangular_cpu(cpu_only=False) + + def test_upper_triangular_cpu(self, cpu_only=True): + for rank in range(2, 6): + for k in range(-3, 4): + shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_upper_triangular("triu", "data", "output", k=k) + + x = np.random.rand(*shape) + input = {"data": x} + expected = {"output": np.triu(x, k=k)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_upper_triangular_gpu(self): + self.test_upper_triangular_cpu(cpu_only=False) + + def test_where_broadcastable_cpu(self, cpu_only=True): + for _ in range(150): + rank_cond = np.random.randint(low=1, high=6) + rank_true = np.random.randint(low=1, high=6) + rank_false = np.random.randint(low=1, high=6) + + rank_out = max(rank_cond, rank_true, rank_false) + + shape_cond = np.random.randint(low=2, high=8, size=rank_cond) + shape_true = np.random.randint(low=2, high=8, size=rank_true) + shape_false = np.random.randint(low=2, high=8, size=rank_false) + + for i in range(-1, -rank_out - 1, -1): + dims = [] + if -i <= rank_cond: + dims.append(shape_cond[i]) + if -i <= rank_true: + dims.append(shape_true[i]) + if -i <= rank_false: + dims.append(shape_false[i]) + + dim = np.random.choice(dims) + if -i <= rank_cond: + shape_cond[i] = np.random.choice([1, dim]) + if -i <= rank_true: + shape_true[i] = np.random.choice([1, dim]) + if -i <= rank_false: + shape_false[i] = np.random.choice([1, dim]) + + input_features = [ + ("cond", datatypes.Array(*shape_cond)), + ("true", datatypes.Array(*shape_true)), + ("false", datatypes.Array(*shape_false)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_where_broadcastable( + "if_broadcastable", + input_names=["cond", "true", "false"], + output_name="output", + ) + + cond = np.random.choice([1.0, 0.0], size=shape_cond) + true = np.random.rand(*shape_true) + false = np.random.rand(*shape_false) + + input = {"cond": cond, "true": true, "false": false} + expected = {"output": np.where(cond, true, false)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(len(expected["output"].shape), builder._get_rank("output")) + + def test_where_broadcastable_gpu(self): + self.test_where_broadcastable_cpu(cpu_only=False) + + @pytest.mark.slow + def test_random_normal_like_cpu(self, cpu_only=True): + mean, stddev, seed = 0.0, 1.0, 42 + + for rank in range(5, -1, -1): + if rank > 0: + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + shape = np.random.randint(low=low, high=high, size=rank) + else: # one extra test to test more moments + shape = np.array([10, 10, 10, 10, 10000]) + + input_features = [("tensor", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_normal_like( + name="random_normal_like", + input_name="tensor", + output_name="output", + mean=mean, + stddev=stddev, + seed=seed, + ) + + inputs = {"tensor": np.random.rand(*shape)} + expected = {"output": np.random.normal(mean, stddev, shape)} + + if rank > 0: + CorrectnessTest._compare_moments( + builder.spec, inputs, expected, num_moments=2 + ) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + else: # one extra test to test more moments + CorrectnessTest._compare_moments( + builder.spec, inputs, expected, num_moments=6 + ) + + @pytest.mark.slow + def test_random_normal_like_gpu(self): + self.test_random_normal_like_cpu(cpu_only=False) + + def test_random_normal_static_cpu(self, cpu_only=True): + + mean, stddev, seed = 0.0, 1.0, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_normal_static( + name="random_normal_static", + output_name="tmp", + output_shape=list(shape), + mean=mean, + stddev=stddev, + seed=seed, + ) + + builder.add_elementwise("add_layer", ["data", "tmp"], "output", mode="ADD") + + data = np.zeros(shape) + inputs = {"data": data} + expected = {"output": data + np.random.normal(mean, stddev, shape)} + + CorrectnessTest._compare_moments( + builder.spec, inputs, expected, num_moments=2 + ) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_random_normal_static_gpu(self): + self.test_random_normal_static_cpu(cpu_only=False) + + def test_random_normal_dynamic_cpu(self, cpu_only=True): + mean, stddev, seed = 0.0, 1.0, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("shape", datatypes.Array(len(shape)))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_normal_dynamic( + name="random_normal_dynamic", + input_names=["shape"], + output_name="output", + mean=mean, + stddev=stddev, + seed=seed, + ) + + inputs = {"shape": np.array(shape, np.float32)} + expected = {"output": np.random.normal(mean, stddev, shape)} + + CorrectnessTest._compare_moments( + builder.spec, inputs, expected, num_moments=2 + ) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(-1, builder._get_rank("output")) + + def test_random_normal_dynamic_gpu(self): + self.test_random_normal_dynamic_cpu(cpu_only=False) + + def test_random_uniform_like_cpu(self, cpu_only=True): + minval, maxval, seed = 0.0, 1.0, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("tensor", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_uniform_like( + name="random_uniform_like", + input_name="tensor", + output_name="output", + minval=minval, + maxval=maxval, + seed=seed, + ) + + tensor = np.random.rand(*shape) + inputs = {"tensor": tensor} + expected = {"output": np.random.uniform(minval, maxval, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_random_uniform_like_gpu(self): + self.test_random_uniform_like_cpu(cpu_only=False) + + def test_random_uniform_static_cpu(self, cpu_only=True): + minval, maxval, seed = 0.0, 1.0, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_uniform_static( + name="random_uniform_static", + output_name="tmp", + output_shape=list(shape), + minval=minval, + maxval=maxval, + seed=seed, + ) + + builder.add_elementwise("add_layer", ["data", "tmp"], "output", mode="ADD") + + data = np.zeros(shape) + inputs = {"data": data} + expected = {"output": data + np.random.uniform(minval, maxval, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_random_uniform_static_gpu(self): + self.test_random_uniform_static_cpu(cpu_only=False) + + def test_random_uniform_dynamic_cpu(self, cpu_only=True): + minval, maxval, seed = 0.0, 1.0, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("shape", datatypes.Array(len(shape)))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_uniform_dynamic( + name="random_uniform_dynamic", + input_names=["shape"], + output_name="output", + minval=minval, + maxval=maxval, + seed=seed, + ) + + inputs = {"shape": np.array(shape, np.float32)} + expected = {"output": np.random.uniform(minval, maxval, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(-1, builder._get_rank("output")) + + def test_random_uniform_dynamic_gpu(self): + self.test_random_uniform_dynamic_cpu(cpu_only=False) + + def test_random_bernoulli_like_cpu(self, cpu_only=True): + + prob, seed = 0.5, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("tensor", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_bernoulli_like( + name="random_bernoulli_like", + input_name="tensor", + output_name="output", + prob=prob, + seed=seed, + ) + + tensor = np.random.rand(*shape) + inputs = {"tensor": tensor} + expected = {"output": np.random.binomial(1, prob, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_random_bernoulli_like_gpu(self): + self.test_random_bernoulli_like_cpu(cpu_only=False) + + def test_random_bernoulli_static_cpu(self, cpu_only=True): + prob, seed = 0.5, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_bernoulli_static( + name="random_bernoulli_static", + output_name="tmp", + output_shape=list(shape), + prob=prob, + seed=seed, + ) + + builder.add_elementwise("add_layer", ["data", "tmp"], "output", mode="ADD") + + data = np.zeros(shape) + inputs = {"data": data} + expected = {"output": data + np.random.binomial(1, prob, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_random_bernoulli_static_gpu(self): + self.test_random_bernoulli_static_cpu(cpu_only=False) + + def test_random_bernoulli_dynamic_cpu(self, cpu_only=True): + prob, seed = 0.5, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("shape", datatypes.Array(len(shape)))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_bernoulli_dynamic( + name="random_bernoulli_dynamic", + input_names=["shape"], + output_name="output", + prob=prob, + seed=seed, + ) + + inputs = {"shape": np.array(shape, np.float32)} + expected = {"output": np.random.binomial(1, prob, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_random_bernoulli_dynamic_gpu(self): + self.test_random_bernoulli_dynamic_cpu(cpu_only=False) + + def test_categorical_distribution_cpu_shapes(self): + + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + num_samples = np.random.randint(low=10, high=1000) + + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_categorical_distribution( + name="categorical_distribution", + input_name="data", + output_name="output", + num_samples=num_samples, + ) + + x = np.random.randint(low=0, high=20, size=shape).astype(np.float32) + inputs = {"data": x} + shape[-1] = num_samples + expected = {"output": np.random.rand(*shape)} + + self._test_model( + builder.spec, + inputs, + expected, + useCPUOnly=True, + validate_shapes_only=True, + ) + + @pytest.mark.xfail( + reason="rdar://64153463 ([GitLab CI] test_categorical_distribution_cpu_probs failing)" + ) + def test_categorical_distribution_cpu_logits(self): + def softmax(data): + e_data = np.exp(data - np.max(data)) + return e_data / e_data.sum() + + num_samples, num_class = 50000, 10 + input_name, output_name = "data", "output" + + shapes = [ + (2, num_class), + (2, 1, num_class), + (1, 2, num_class), + (2, 1, 1, num_class), + (1, 2, 1, num_class), + (1, 1, 2, num_class), + (2, 1, 1, 1, num_class), + (1, 2, 1, 1, num_class), + (1, 1, 2, 1, num_class), + (1, 1, 1, 2, num_class), + ] + + for shape in shapes: + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_categorical_distribution( + name="categorical_distribution", + input_name=input_name, + output_name=output_name, + num_samples=num_samples, + is_logits=True, + seed=42, + ) + + x = np.random.rand(*shape) + inputs = {input_name: x} + + model = builder.spec + if isinstance(model, str): + model = coremltools.models.MLModel(model) + + model = coremltools.models.MLModel(model) + prediction = model.predict(inputs) + + # validate each distribution separately + logits = x.reshape(2, num_class) + probs = [softmax(logits[0]), softmax(logits[1])] + + ref0 = np.random.multinomial(num_samples, probs[0]) + ref1 = np.random.multinomial(num_samples, probs[1]) + + pre0 = prediction[output_name].reshape(2, num_samples)[0] + pre1 = prediction[output_name].reshape(2, num_samples)[1] + + expected = {output_name: np.stack((pre0, pre1))} + + # convert to bincount and validate probabilities + pre0 = np.bincount(np.array(pre0).astype(np.int32), minlength=num_class) + pre1 = np.bincount(np.array(pre1).astype(np.int32), minlength=num_class) + + np.testing.assert_allclose( + np.true_divide(pre0, num_samples), probs[0], atol=1e-2 + ) + np.testing.assert_allclose( + np.true_divide(pre0, num_samples), + np.true_divide(ref0, num_samples), + atol=1e-2, + ) + + np.testing.assert_allclose( + np.true_divide(pre1, num_samples), probs[1], atol=1e-2 + ) + np.testing.assert_allclose( + np.true_divide(pre1, num_samples), + np.true_divide(ref1, num_samples), + atol=1e-2, + ) + + self._test_model( + model, + inputs, + expected, + useCPUOnly=True, + output_name_shape_dict={"output": prediction["output"].shape}, + ) + + @pytest.mark.xfail( + reason="rdar://64153463 ([GitLab CI] test_categorical_distribution_cpu_probs failing)" + ) + def test_categorical_distribution_cpu_probs(self): + def softmax(data): + e_data = np.exp(data - np.max(data)) + return e_data / e_data.sum() + + num_samples, num_class = 50000, 10 + input_name, output_name = "data", "output" + + shapes = [ + (2, num_class), + (2, 1, num_class), + (1, 2, num_class), + (2, 1, 1, num_class), + (1, 2, 1, num_class), + (1, 1, 2, num_class), + (2, 1, 1, 1, num_class), + (1, 2, 1, 1, num_class), + (1, 1, 2, 1, num_class), + (1, 1, 1, 2, num_class), + ] + + for shape in shapes: + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_categorical_distribution( + name="categorical_distribution", + input_name=input_name, + output_name=output_name, + num_samples=num_samples, + is_logits=False, + seed=42, + ) + + x = np.random.rand(*shape) + probs = x.reshape(2, num_class) + probs[0], probs[1] = softmax(probs[0]), softmax(probs[1]) + inputs = {input_name: np.reshape(probs, shape)} + + model = builder.spec + if isinstance(model, str): + model = coremltools.models.MLModel(model) + + model = coremltools.models.MLModel(model, useCPUOnly=True) + prediction = model.predict(inputs, useCPUOnly=True) + + # validate each distribution separately + probs = probs.reshape(2, num_class) + + ref0 = np.random.multinomial(num_samples, probs[0]) + ref1 = np.random.multinomial(num_samples, probs[1]) + + pre0 = prediction[output_name].reshape(2, num_samples)[0] + pre1 = prediction[output_name].reshape(2, num_samples)[1] + + expected = {output_name: np.stack((pre0, pre1))} + + # convert to bincount and validate probabilities + pre0 = np.bincount(np.array(pre0).astype(np.int32), minlength=num_class) + pre1 = np.bincount(np.array(pre1).astype(np.int32), minlength=num_class) + + np.testing.assert_allclose( + np.true_divide(pre0, num_samples), probs[0], atol=1e-2 + ) + np.testing.assert_allclose( + np.true_divide(pre0, num_samples), + np.true_divide(ref0, num_samples), + atol=1e-2, + ) + + np.testing.assert_allclose( + np.true_divide(pre1, num_samples), probs[1], atol=1e-2 + ) + np.testing.assert_allclose( + np.true_divide(pre1, num_samples), + np.true_divide(ref1, num_samples), + atol=1e-2, + ) + + self._test_model( + model, + inputs, + expected, + useCPUOnly=True, + output_name_shape_dict={"output": prediction["output"].shape}, + ) + + def test_reverse_cpu(self, cpu_only=True): + + for rank in range(1, 6): + for _ in range(20): + input_shape = np.random.randint(low=2, high=8, size=rank) + reverse_dim = [np.random.choice([True, False]) for _ in range(rank)] + axes = [i for i in range(rank) if reverse_dim[i] == True] + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_reverse("reverse", "data", "output", reverse_dim) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": np.flip(x, axis=axes)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reverse_gpu(self): + self.test_reverse_cpu(cpu_only=False) + + def test_matrix_band_part_cpu(self, cpu_only=True): + + for rank in range(2, 6): + for _ in range(20): + num_lower = np.random.randint(low=-7, high=8) + num_upper = np.random.randint(low=-7, high=8) + shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_matrix_band_part( + "matrix_band_part", + "data", + "output", + num_lower=num_lower, + num_upper=num_upper, + ) + + x = np.random.rand(*shape) + input = {"data": x} + + rows, cols = shape[-2:] + band = np.ones((rows, cols)) + for m in range(rows): + for n in range(cols): + band[m, n] = (num_lower < 0 or (m - n) <= num_lower) and ( + num_upper < 0 or (n - m) <= num_upper + ) + + expected = {"output": np.multiply(band, x)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_matrix_band_part_gpu(self): + self.test_matrix_band_part_cpu(cpu_only=False) + + def test_flatten_to_2d_cpu(self, cpu_only=True): + + for rank in range(1, 6): + for axis in range(-rank, rank + 1): + shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_flatten_to_2d("flatten_to_2d", "data", "output", axis=axis) + + x = np.random.rand(*shape) + np_axis = axis + rank if axis < 0 else axis + pl, pr = 1, 1 + for i in range(0, np_axis): + pl *= shape[i] + for i in range(np_axis, len(shape)): + pr *= shape[i] + + new_shape = [pl, pr] + ref = x.reshape(new_shape) + + input = {"data": x} + expected = {"output": ref} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(2, builder._get_rank("output")) + + def test_flatten_to_2d_gpu(self): + self.test_flatten_to_2d_cpu(cpu_only=False) + + def test_reshape_like_cpu(self, cpu_only=True): + + for rank in range(1, 6): + for _ in range(20): + input_shape = np.random.randint(low=2, high=8, size=rank) + n = int(np.prod(input_shape)) + divisors = [d for d in range(1, n) if n % d == 0] + target_rank = np.random.randint(low=2, high=6) + target_shape = [1] + for i in range(target_rank - 1): + dim_size = np.random.choice(divisors) + while n % (np.prod(target_shape) * dim_size) != 0: + dim_size = np.random.choice(divisors) + target_shape.append(dim_size) + target_shape[0] = n // np.prod(target_shape) + + np.random.shuffle(target_shape) + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("tensor", datatypes.Array(*target_shape)), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_reshape_like( + name="reshape_like", + input_names=["data", "tensor"], + output_name="output", + ) + + data = np.random.rand(*input_shape) + tensor = np.random.rand(*target_shape) + inputs = {"data": data, "tensor": tensor} + expected = {"output": np.reshape(data, target_shape)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(target_rank, builder._get_rank("output")) + + def test_reshape_like_gpu(self): + self.test_reshape_like_cpu(cpu_only=False) + + def test_reshape_static_cpu(self, cpu_only=True): + for rank in range(1, 6): + for _ in range(20): + input_shape = np.random.randint(low=2, high=8, size=rank) + n = int(np.prod(input_shape)) + divisors = [d for d in range(1, n) if n % d == 0] + target_rank = np.random.randint(low=2, high=6) + + target_shape = [1] + for i in range(target_rank - 1): + dim_size = np.random.choice(divisors) + while n % (np.prod(target_shape) * dim_size) != 0: + dim_size = np.random.choice(divisors) + target_shape.append(dim_size) + + target_shape[0] = -1 + + np.random.shuffle(target_shape) + input_features = [("data", datatypes.Array(*input_shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_reshape_static( + name="reshape_static", + input_name="data", + output_name="output", + output_shape=target_shape, + ) + + data = np.random.rand(*input_shape) + inputs = {"data": data} + expected = {"output": np.reshape(data, target_shape)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(len(target_shape), builder._get_rank("output")) + + def test_reshape_static_gpu(self): + self.test_reshape_static_cpu(cpu_only=False) + + def test_reshape_dynamic_cpu(self, cpu_only=True): + for rank in range(1, 6): + for _ in range(20): + input_shape = np.random.randint(low=2, high=8, size=rank) + n = int(np.prod(input_shape)) + divisors = [d for d in range(1, n) if n % d == 0] + target_rank = np.random.randint(low=2, high=6) + + target_shape = [1] + for i in range(target_rank - 1): + dim_size = np.random.choice(divisors) + while n % (np.prod(target_shape) * dim_size) != 0: + dim_size = np.random.choice(divisors) + target_shape.append(dim_size) + + target_shape[0] = -1 + + np.random.shuffle(target_shape) + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("shape", datatypes.Array(len(target_shape))), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_reshape_dynamic( + name="reshape_dynamic", + input_names=["data", "shape"], + output_name="output", + ) + + data = np.random.rand(*input_shape) + inputs = {"data": data, "shape": np.array(target_shape, dtype="float")} + expected = {"output": np.reshape(data, target_shape)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(-1, builder._get_rank("output")) + + def test_reshape_dynamic_gpu(self): + self.test_reshape_dynamic_cpu(cpu_only=False) + + def test_reduce_sum_cpu(self, cpu_only=True): + + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_sum( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": np.add.reduce(x, axes, keepdims=keep_dims)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + expected_rank = len(expected["output"].shape) + if expected_rank == 0: + expected_rank = 1 + self.assertEqual(expected_rank, builder._get_rank("output")) + + def test_reduce_sum_gpu(self): + self.test_reduce_sum_cpu(cpu_only=False) + + def test_reduce_prod_cpu(self, cpu_only=True): + + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_prod( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.multiply.reduce(x, axes, keepdims=keep_dims) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + expected_rank = len(expected["output"].shape) + if expected_rank == 0: + expected_rank = 1 + self.assertEqual(expected_rank, builder._get_rank("output")) + + def test_reduce_prod_gpu(self): + self.test_reduce_prod_cpu(cpu_only=False) + + def test_reduce_mean_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_mean( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": np.mean(x, axes, keepdims=keep_dims)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_mean_gpu(self): + self.test_reduce_mean_cpu(cpu_only=False) + + def test_reduce_max_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_max( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.maximum.reduce(x, axes, keepdims=keep_dims) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_max_gpu(self): + self.test_reduce_max_cpu(cpu_only=False) + + def test_reduce_min_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_min( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.minimum.reduce(x, axes, keepdims=keep_dims) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_min_gpu(self): + self.test_reduce_min_cpu(cpu_only=False) + + def test_reduce_l2_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_l2( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.sqrt( + np.sum(np.square(x), axis=axes, keepdims=keep_dims) + ) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_l2_gpu(self): + self.test_reduce_l2_cpu(cpu_only=False) + + def test_reduce_l1_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_l1( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.sum(np.abs(x), axis=axes, keepdims=keep_dims) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_l1_gpu(self): + self.test_reduce_l1_cpu(cpu_only=False) + + def test_reduce_sumsquare_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_sumsquare( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.sum(np.square(x), axis=axes, keepdims=keep_dims) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_sumsquare_gpu(self): + self.test_reduce_sumsquare_cpu(cpu_only=False) + + def test_reduce_logsum_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_logsum( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.log(np.sum(x, axis=axes, keepdims=keep_dims)) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_logsum_gpu(self): + self.test_reduce_logsum_cpu(cpu_only=False) + + def test_reduce_logsumexp_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_logsumexp( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.log( + np.sum(np.exp(x), axis=axes, keepdims=keep_dims) + ) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_logsumexp_gpu(self): + self.test_reduce_logsumexp_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_reverse_sequence_cpu(self, cpu_only=True): + for rank in range(2, 6): + for i in range(20): + input_shape = np.random.randint(low=2, high=6, size=rank) + + seq_axis = np.random.randint(low=-rank, high=rank) + batch_axis = np.random.randint(low=-rank, high=rank) + pos_batch_axis = batch_axis if batch_axis >= 0 else rank + batch_axis + pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis + while pos_batch_axis >= pos_seq_axis: + seq_axis = np.random.randint(low=-rank, high=rank) + batch_axis = np.random.randint(low=-rank, high=rank) + pos_batch_axis = ( + batch_axis if batch_axis >= 0 else rank + batch_axis + ) + pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis + + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("lengths", datatypes.Array(input_shape[batch_axis])), + ] + + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_reverse_sequence( + "reverse_sequence", + ["data", "lengths"], + "output", + batch_axis=batch_axis, + seq_axis=seq_axis, + ) + + data = np.random.rand(*input_shape) + lengths = np.random.randint( + low=0, high=input_shape[seq_axis], size=input_shape[batch_axis] + ) + + input = {"data": data, "lengths": lengths.astype(np.float32)} + + tf_op = tf.reverse_sequence( + input=data, + seq_lengths=lengths, + seq_axis=pos_seq_axis, + batch_axis=pos_batch_axis, + ) + expected = {"output": tf_op.numpy()} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reverse_sequence_gpu(self): + self.test_reverse_sequence_cpu(cpu_only=False) + + def test_where_nonzero_cpu(self, cpu_only=True): + + for rank in range(1, 6): + for i in range(10): + shape = np.random.randint(low=2, high=8, size=rank) + + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_where_nonzero("multi_indices", "data", "output") + + x = np.random.randint(low=0, high=3, size=shape) + + input = {"data": x.astype(np.float32)} + expected = {"output": np.transpose(np.nonzero(x)).astype(np.float32)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_where_nonzero_gpu(self): + self.test_where_nonzero_cpu(cpu_only=False) + + def test_gather_cpu(self, cpu_only=True): + for rankParams, rankIndices in [ + (i, j) for i in range(1, 6) for j in range(1, 6) + ]: + for axis in range(-rankParams, rankParams): + shapeParams = np.random.randint(low=2, high=5, size=rankParams) + shapeIndices = np.random.randint(low=2, high=5, size=rankIndices) + input_shapes = [shapeParams, shapeIndices] + posAxis = axis if axis >= 0 else axis + rankParams + output_shape = ( + list(shapeParams[:posAxis]) + + list(shapeIndices) + + list(shapeParams[posAxis + 1 :]) + ) + + if len(output_shape) > 5: + continue + + input_names = ["params", "indices"] + input_features = [ + ("params", datatypes.Array(*input_shapes[0])), + ("indices", datatypes.Array(*input_shapes[1])), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_gather( + name="gather", + input_names=input_names, + output_name="output", + axis=axis, + ) + + a = np.random.rand(*input_shapes[0]) + b = np.random.randint( + -shapeParams[axis], shapeParams[axis], size=shapeIndices + ) + input = {"params": a, "indices": b.astype(np.float32)} + expected = {"output": np.take(a, b, axis=axis)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual( + len(expected["output"].shape), builder._get_rank("output") + ) + + def test_gather_gpu(self): + self.test_gather_cpu(cpu_only=False) + + def test_gather_along_axis_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + for _ in range(5): + params_shape = np.random.randint(low=2, high=8, size=rank) + indices_shape = np.copy(params_shape) + indices_shape[axis] = np.random.randint(low=1, high=8) + + input_features = [ + ("params", datatypes.Array(*params_shape)), + ("indices", datatypes.Array(*indices_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + builder.add_gather_along_axis( + "gather_along_axis", ["params", "indices"], "output", axis=axis + ) + + a = np.random.rand(*params_shape) + b = np.random.randint( + -params_shape[axis], params_shape[axis], size=indices_shape + ) + + input = {"params": a, "indices": b.astype(np.float32)} + expected = {"output": np.take_along_axis(a, b, axis=axis)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual( + len(expected["output"].shape), builder._get_rank("output") + ) + + def test_gather_along_axis_gpu(self): + self.test_gather_along_axis_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_gather_nd_cpu(self, cpu_only=True): + for params_rank, indices_rank in [ + (i, j) for i in range(1, 6) for j in range(1, 6) + ]: + params_shape = np.random.randint(low=2, high=8, size=params_rank) + indices_shape = np.random.randint(low=2, high=8, size=indices_rank) + indices_shape[-1] = np.random.randint(low=1, high=params_rank + 1) + + for _ in range(5): + input_features = [ + ("params", datatypes.Array(*params_shape)), + ("indices", datatypes.Array(*indices_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + output_shape = list(indices_shape[:-1]) + list( + params_shape[indices_shape[-1] :] + ) + if len(output_shape) > 5: + continue + + builder.add_gather_nd("gather_nd", ["params", "indices"], "output") + + a = np.random.rand(*params_shape) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append( + np.random.randint(0, params_shape[i], size=indices_shape[:-1]) + ) + + indices = np.stack(indices_list, axis=-1) + input = {"params": a, "indices": indices.astype(np.float32)} + + tf_op = tf.gather_nd(a, indices) + expected = {"output": tf_op.numpy()} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(-1, builder._get_rank("output")) + + def test_gather_nd_gpu(self): + self.test_gather_nd_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_scatter_cpu(self, cpu_only=True): + for ref_rank, indices_rank in [ + (i, j) for i in range(1, 6) for j in range(1, 6) + ]: + for accumulate_mode in ["UPDATE", "ADD", "SUB", "MUL", "DIV", "MAX", "MIN"]: + for _ in range(5): + ref_shape = np.random.randint(low=2, high=8, size=ref_rank) + indices_shape = np.random.randint(low=2, high=8, size=indices_rank) + updates_shape = list(indices_shape) + list(ref_shape[1:]) + + input_features = [ + ("ref", datatypes.Array(*ref_shape)), + ("indices", datatypes.Array(*indices_shape)), + ("updates", datatypes.Array(*updates_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + if len(updates_shape) > 5: + continue + + builder.add_scatter( + "scatter", + ["ref", "indices", "updates"], + "output", + axis=0, + mode=accumulate_mode, + ) + + ref = np.random.rand(*ref_shape) + updates = np.random.rand(*updates_shape) + if accumulate_mode == "DIV": + updates += 10.0 + indices = np.random.randint(0, ref_shape[0], size=indices_shape) + input = { + "ref": ref, + "indices": indices.astype(np.float32), + "updates": updates, + } + + tf_output = tf.Variable(ref) + if accumulate_mode == "UPDATE": + tf.compat.v1.scatter_update(tf_output, indices, updates) + if accumulate_mode == "ADD": + tf.compat.v1.scatter_add(tf_output, indices, updates) + if accumulate_mode == "SUB": + tf.compat.v1.scatter_sub(tf_output, indices, updates) + if accumulate_mode == "MUL": + tf.compat.v1.scatter_mul(tf_output, indices, updates) + if accumulate_mode == "DIV": + tf.compat.v1.scatter_div(tf_output, indices, updates) + if accumulate_mode == "MAX": + tf.compat.v1.scatter_max(tf_output, indices, updates) + if accumulate_mode == "MIN": + tf.compat.v1.scatter_min(tf_output, indices, updates) + expected = {"output": tf_output.numpy()} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_scatter_gpu(self): + self.test_scatter_cpu(cpu_only=False) + + def test_gather_scatter_multiple_axis_cpu(self, cpu_only=True): + + for params_rank, indices_rank in [ + (i, j) for i in range(1, 6) for j in range(1, 6) + ]: + for axis in range(-params_rank, params_rank): + for _ in range(5): + params_shape = np.random.randint(low=2, high=8, size=params_rank) + indices_shape = np.random.randint(low=2, high=8, size=indices_rank) + + pos_axis = axis if axis >= 0 else axis + params_rank + output_shape = ( + list(params_shape[:pos_axis]) + + list(indices_shape) + + list(params_shape[pos_axis + 1 :]) + ) + + if len(output_shape) > 5: + continue + + input_features = [ + ("params", datatypes.Array(*params_shape)), + ("indices", datatypes.Array(*indices_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_gather( + "gather", ["params", "indices"], "updates", axis=axis + ) + builder.add_scatter( + "scatter", + ["params", "indices", "updates"], + "output", + axis=axis, + mode="UPDATE", + ) + + a = np.random.rand(*params_shape) + b = np.random.randint( + -params_shape[axis], params_shape[axis], size=indices_shape + ) + + input = {"params": a, "indices": b.astype(np.float32)} + expected = {"output": a} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_gather_scatter_multiple_axis_gpu(self): + self.test_gather_scatter_multiple_axis_cpu(cpu_only=False) + + def test_scatter_along_axis_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + for id in range(5): + ref_shape = np.random.randint(low=2, high=8, size=rank) + indices_shape = np.copy(ref_shape) + indices_shape[axis] = np.random.randint(low=1, high=8) + updates_shape = indices_shape + + input_features = [ + ("ref", datatypes.Array(*ref_shape)), + ("indices", datatypes.Array(*indices_shape)), + ("updates", datatypes.Array(*updates_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_scatter_along_axis( + "scatter_along_axis", + ["ref", "indices", "updates"], + "output", + axis=axis, + mode="UPDATE", + ) + + ref = np.random.rand(*ref_shape) + updates = np.random.rand(*updates_shape) + indices = np.random.randint( + -ref_shape[axis], ref_shape[axis], size=indices_shape + ) + input = { + "ref": ref, + "indices": indices.astype(np.float32), + "updates": updates, + } + + np_output = np.copy(ref) + np.put_along_axis(np_output, indices, updates, axis=axis) + expected = {"output": np_output} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_scatter_along_axis_gpu(self): + self.test_scatter_along_axis_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_scatter_nd_cpu(self, cpu_only=True): + for ref_rank, indices_rank in [ + (i, j) for i in range(1, 6) for j in range(2, 6) + ]: + ref_shape = np.random.randint(low=2, high=8, size=ref_rank) + indices_shape = np.random.randint(low=2, high=8, size=indices_rank) + indices_shape[-1] = np.random.randint(low=1, high=ref_rank + 1) + for accumulate_mode in ["UPDATE", "ADD", "SUB"]: + for id in range(20): + updates_shape = list(indices_shape[:-1]) + list( + ref_shape[indices_shape[-1] :] + ) + if len(updates_shape) > 5: + continue + + input_features = [ + ("ref", datatypes.Array(*ref_shape)), + ("indices", datatypes.Array(*indices_shape)), + ("updates", datatypes.Array(*updates_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_scatter_nd( + "scatter_nd", + ["ref", "indices", "updates"], + "output", + mode=accumulate_mode, + ) + + ref = np.random.rand(*ref_shape) + updates = np.random.rand(*updates_shape) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append( + np.random.randint(0, ref_shape[i], size=indices_shape[:-1]) + ) + + indices = np.stack(indices_list, axis=-1) + + input = { + "ref": ref, + "indices": indices.astype(np.float32), + "updates": updates, + } + + tf_output = tf.Variable(ref) + if accumulate_mode == "UPDATE": + tf.compat.v1.scatter_nd_update(tf_output, indices, updates) + if accumulate_mode == "ADD": + tf.compat.v1.scatter_nd_add(tf_output, indices, updates) + if accumulate_mode == "SUB": + tf.compat.v1.scatter_nd_sub(tf_output, indices, updates) + expected = {"output": tf_output.numpy()} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_scatter_nd_gpu(self): + self.test_scatter_nd_cpu(cpu_only=False) + + def test_layer_normalization_cpu(self, cpu_only=True): + def layer_norm_numpy(x, shapes, gamma_, beta_, eps=1e-5): + axes = [-i - 1 for i, _ in enumerate(shapes)] + num = x - np.mean(x, axis=tuple(axes), keepdims=True) + dem = np.sqrt( + np.sum(np.square(num), axis=tuple(axes), keepdims=True) + / np.prod(shapes) + + eps + ) + return num / dem * gamma_ + beta_ + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=6, size=rank) + for axis in range(1, len(input_shape) + 1): + norm_shapes = input_shape[-axis:] + + data = np.random.rand(*input_shape) + + gamma = np.random.rand(*norm_shapes) + beta = np.random.rand(*norm_shapes) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_layer_normalization( + name="layer_normalization", + input_name="data", + output_name="output", + normalized_shape=norm_shapes, + gamma=gamma, + beta=beta, + ) + + inputs = {"data": data} + ref = layer_norm_numpy(data, norm_shapes, gamma, beta) + expected = {"output": ref} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_layer_normalization_gpu(self): + self.test_layer_normalization_cpu(cpu_only=False) + + +def get_size_after_stride(X, params): + start = params["start"] + end = params["end"] + stride = params["stride"] + if params["axis"] == "width": + axis = 2 + if params["axis"] == "height": + axis = 1 + if params["axis"] == "channel": + axis = 0 + N = X.shape[axis] + if end < 0: + end = end + N + end = min(end, N) + if start > N - 1: + L = 0 + else: + L = np.floor((end - 1 - start) / stride) + 1 + if L < 0: + L = 0 + return L + + +def get_numpy_predictions_slice(X, params): + start = params["start"] + end = params["end"] + stride = params["stride"] + if params["axis"] == "width": + return X[:, :, start:end:stride] + if params["axis"] == "height": + return X[:, start:end:stride, :] + if params["axis"] == "channel": + return X[start:end:stride, :, :] + + +def get_coreml_predictions_slice(X, params): + coreml_preds = [] + eval = True + try: + input_dim = X.shape + output_dim = ( + 1, + 1, + 1, + ) # some random dimensions here: we are going to remove this information later + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*output_dim))] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_slice( + "slice", + "data", + "output", + start_index=params["start"], + end_index=params["end"], + stride=params["stride"], + axis=params["axis"], + ) + # Remove output shape by deleting and adding an output + del builder.spec.description.output[-1] + output = builder.spec.description.output.add() + output.name = "output" + output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value( + "DOUBLE" + ) + # save the model + model_dir = tempfile.TemporaryDirectory() + model_path = os.path.join(model_dir.name, "test_layer.mlmodel") + coremltools.utils.save_spec(builder.spec, model_path) + # prepare input and get predictions + coreml_model = coremltools.models.MLModel(model_path) + coreml_input = {"data": X} + if _is_macos() and _macos_version() >= (10, 13): + coreml_preds = coreml_model.predict(coreml_input)["output"] + else: + coreml_preds = None + except RuntimeError as e: + print(e) + eval = False + + return coreml_preds, eval + + +def get_numpy_predictions_reduce(X, params): + if params["axis"] == "CHW": + axis = (0, 1, 2) + if params["axis"] == "HW": + axis = (1, 2) + if params["axis"] == "C": + axis = 0 + if params["axis"] == "H": + axis = 1 + if params["axis"] == "W": + axis = 2 + + if params["mode"] == "sum": + return np.sum(X, axis) + if params["mode"] == "avg": + return np.mean(X, axis) + if params["mode"] == "prod": + return np.prod(X, axis) + if params["mode"] == "logsum": + return np.sum(np.log(X + 1e-6), axis) + if params["mode"] == "sumsquare": + return np.sum(X ** 2, axis) + if params["mode"] == "L2": + return np.sqrt(np.sum(X ** 2, axis)) + if params["mode"] == "L1": + return np.sum(np.abs(X), axis) + if params["mode"] == "max": + return np.amax(X, axis) + if params["mode"] == "min": + return np.amin(X, axis) + if params["mode"] == "argmax": + return np.argmax(X, axis) + + +def get_coreml_predictions_reduce(X, params): + coreml_preds = [] + eval = True + try: + input_dim = X.shape + # some random dimensions here: we are going to remove this information later + output_dim = (1, 1, 1) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*output_dim))] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_reduce( + "reduce", "data", "output", axis=params["axis"], mode=params["mode"] + ) + # Remove output shape by deleting and adding an output + del builder.spec.description.output[-1] + output = builder.spec.description.output.add() + output.name = "output" + output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value( + "DOUBLE" + ) + # save the model + model_dir = tempfile.TemporaryDirectory() + model_path = os.path.join(model_dir.name, "test_layer.mlmodel") + coremltools.utils.save_spec(builder.spec, model_path) + # prepare input and get predictions + coreml_model = coremltools.models.MLModel(model_path) + coreml_input = {"data": X} + if _is_macos() and _macos_version() >= (10, 13): + coreml_preds = coreml_model.predict(coreml_input)["output"] + else: + coreml_preds = None + except RuntimeError as e: + print(e) + eval = False + + return coreml_preds, eval + + +@pytest.mark.slow +class StressTest(CorrectnessTest): + def test_slice_layer(self): + params_dict = dict( + input_shape=[[30, 100, 8], [80, 50, 5], [4, 12, 5], [56, 8, 14]], + axis=["channel", "height", "width"], + start=[0, 1, 2, 5], + end=[5, 100, 56, -1, -2, -4], + stride=[1, 2, 3], + ) + params = list(itertools.product(*params_dict.values())) + all_candidates = [dict(zip(params_dict.keys(), x)) for x in params] + valid_params = [] + for pr in all_candidates: + X = np.random.rand(*pr["input_shape"]) + if get_size_after_stride(X, pr): + valid_params.append(pr) + print( + "Total params to be tested: ", + len(valid_params), + "out of candidates: ", + len(all_candidates), + ) + + failed_tests_compile = [] + failed_tests_shape = [] + failed_tests_numerical = [] + for i in range(len(valid_params)): + params = valid_params[i] + X = np.random.rand(*params["input_shape"]) + np_preds = get_numpy_predictions_slice(X, params) + coreml_preds, eval = get_coreml_predictions_slice(X, params) + if eval is False: + failed_tests_compile.append(params) + elif coreml_preds is not None: + if not self._compare_shapes(np_preds, coreml_preds): + failed_tests_shape.append(params) + elif not self._compare_predictions(np_preds, coreml_preds): + failed_tests_numerical.append(params) + + self.assertEqual(failed_tests_compile, []) + self.assertEqual(failed_tests_shape, []) + self.assertEqual(failed_tests_numerical, []) + + def test_reduce_layer(self): + params_dict = dict( + input_shape=[[3, 10, 8], [8, 5, 5], [4, 12, 10], [7, 1, 14]], + mode=[ + "sum", + "avg", + "prod", + "sumsquare", + "L1", + "L2", + "max", + "min", + "argmax", + ], + axis=["CHW", "HW", "C", "H", "W"], + ) + params = list(itertools.product(*params_dict.values())) + all_candidates = [dict(zip(params_dict.keys(), x)) for x in params] + valid_params = [] + for pr in all_candidates: + if pr["mode"] == "argmax": + if pr["axis"] == "CHW" or pr["axis"] == "HW": + continue + valid_params.append(pr) + print( + "Total params to be tested: ", + len(valid_params), + "out of candidates: ", + len(all_candidates), + ) + + failed_tests_compile = [] + failed_tests_shape = [] + failed_tests_numerical = [] + for i in range(len(valid_params)): + params = valid_params[i] + X = np.random.rand(*params["input_shape"]) + np_preds = get_numpy_predictions_reduce(X, params) + coreml_preds, eval = get_coreml_predictions_reduce(X, params) + if eval is False: + failed_tests_compile.append(params) + elif coreml_preds is not None: + if not self._compare_shapes(np_preds, coreml_preds): + failed_tests_shape.append(params) + elif not self._compare_predictions(np_preds, coreml_preds): + failed_tests_numerical.append(params) + + self.assertEqual(failed_tests_compile, []) + self.assertEqual(failed_tests_shape, []) + self.assertEqual(failed_tests_numerical, []) + + +@pytest.mark.slow +@unittest.skipIf( + not _is_macos() or _macos_version() < LAYERS_10_15_MACOS_VERSION, + "macOS 10.15+ required. Skipping tests.", +) +class CoreML3NetworkStressTest(CorrectnessTest): + def test_dyn_weight_conv2d_stress(self): + options = dict( + padding=["valid"], + filters=[1, 2, 4], + kernel_size=[1, 3, 5], # square kernels + strides=[1, 2], + dilation_rate=[1], + batch_size=[1, 64, 512], + ) + + input_size = 64 + input_channels = 64 + input_dim = [1, input_channels, input_size, input_size] + + def conv_spatial_size(image_size, kernel_size, stride, dilation, padding): + if padding == "valid": + kernel_size_dilated = (kernel_size - 1) * dilation + 1 + return (image_size - kernel_size_dilated) // stride + 1 + elif padding == "same": + return int(math.ceil(image_size * 1.0 / stride)) + else: + return 0 + + for x in itertools.product(*options.values()): + kwargs = dict(zip(options.keys(), x)) + if kwargs["strides"] > 1 and kwargs["dilation_rate"] > 1: + continue + # weight layout: (output_channels, kernel_channels, height, width) + weight_dim = ( + kwargs["filters"], + input_channels, + kwargs["kernel_size"], + kwargs["kernel_size"], + ) + + input_dim[0] = kwargs["batch_size"] + input_features = [ + ("input", datatypes.Array(*input_dim)), + ("weight", datatypes.Array(*weight_dim)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_convolution( + name="two_input_conv_layer", + kernel_channels=input_channels, + output_channels=kwargs["filters"], + height=kwargs["kernel_size"], + width=kwargs["kernel_size"], + stride_height=kwargs["strides"], + stride_width=kwargs["strides"], + border_mode=kwargs["padding"], + groups=1, + W=None, + b=None, + has_bias=False, + dilation_rate=kwargs["dilation_rate"], + input_name=["input", "weight"], + output_name="output", + ) + + # Assigning everything to ones should cover the execution path + # and engine failures, but is not a complete check on numerics. + out_spatial_size = conv_spatial_size( + input_size, + kwargs["kernel_size"], + kwargs["strides"], + kwargs["dilation_rate"], + kwargs["padding"], + ) + + input_val = np.ones(input_dim) + weight_val = np.ones(weight_dim) + output_dim = ( + kwargs["batch_size"], + kwargs["filters"], + out_spatial_size, + out_spatial_size, + ) + expected = np.ones(output_dim) * ( + kwargs["kernel_size"] * kwargs["kernel_size"] * input_channels + ) + + feed_dict = {"input": input_val, "weight": weight_val} + expected = {"output": expected} + + self._test_model(builder.spec, feed_dict, expected) + + def test_static_weight_conv2d_stress(self): + options = dict( + padding=["valid"], + filters=[1, 2, 5], + kernel_size=[1, 3, 4], # square kernels + strides=[1, 2], + dilation_rate=[1, 2], + batch_size=[1, 32, 512], + ) + + input_size = 64 + input_channels = 64 + input_dim = [1, input_channels, input_size, input_size] + + def conv_spatial_size(image_size, kernel_size, stride, dilation, padding): + if padding == "valid": + kernel_size_dilated = (kernel_size - 1) * dilation + 1 + return (image_size - kernel_size_dilated) // stride + 1 + elif padding == "same": + return int(math.ceil(image_size * 1.0 / stride)) + else: + return 0 + + for x in itertools.product(*options.values()): + kwargs = dict(zip(options.keys(), x)) + if kwargs["strides"] > 1 and kwargs["dilation_rate"] > 1: + continue + # weight layout: (output_channels, kernel_channels, height, width) + weight_dim = ( + kwargs["filters"], + input_channels, + kwargs["kernel_size"], + kwargs["kernel_size"], + ) + + input_dim[0] = kwargs["batch_size"] + input_features = [("input", datatypes.Array(*input_dim))] + # ('weight', datatypes.Array(*weight_dim))] + output_features = [("output", None)] + + input_weight = np.ones(weight_dim) + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_convolution( + name="two_input_conv_layer", + kernel_channels=input_channels, + output_channels=kwargs["filters"], + height=kwargs["kernel_size"], + width=kwargs["kernel_size"], + stride_height=kwargs["strides"], + stride_width=kwargs["strides"], + border_mode=kwargs["padding"], + groups=1, + W=input_weight, + b=None, + has_bias=False, + dilation_factors=[kwargs["dilation_rate"]] * 2, + input_name=["input"], + output_name="output", + ) + + # Assigning everything to ones should cover the execution path + # and engine failures, but is not a complete check on numerics. + out_spatial_size = conv_spatial_size( + input_size, + kwargs["kernel_size"], + kwargs["strides"], + kwargs["dilation_rate"], + kwargs["padding"], + ) + + input_val = np.ones(input_dim) + weight_val = np.ones(weight_dim) + output_dim = ( + kwargs["batch_size"], + kwargs["filters"], + out_spatial_size, + out_spatial_size, + ) + expected = np.ones(output_dim) * ( + kwargs["kernel_size"] * kwargs["kernel_size"] * input_channels + ) + + feed_dict = {"input": input_val} # , 'weight': weight_val} + expected = {"output": expected} + + self._test_model(builder.spec, feed_dict, expected) + + def test_power_iteration_cpu(self): + + convergence_tolerance = 1e-8 + number_of_iterations = 200 + + input_features = [ + ("matrix", datatypes.Array(*(2, 2))), + ("starting_vector", datatypes.Array(*(2,))), + ] + + output_features = [ + ("maximum_eigen_value", datatypes.Array(*(1, 1))), + ("eigen_vector", None), + ("iteration_count", datatypes.Array(*(1,))), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_expand_dims("expand_dims", "starting_vector", "x", axes=[-1]) + builder.add_load_constant_nd( + "iteration_count", + "iteration_count", + constant_value=np.zeros((1,)), + shape=(1,), + ) + + loop_layer = builder.add_loop("loop", max_iterations=number_of_iterations) + loop_body_builder = neural_network.NeuralNetworkBuilder( + nn_spec=loop_layer.loop.bodyNetwork + ) + # output shape: (n,1) + loop_body_builder.add_batched_mat_mul( + "bmm.1", input_names=["matrix", "x"], output_name="y" + ) + loop_body_builder.add_reduce_l2( + "reduce", input_name="y", output_name="norm", axes=[0] + ) + loop_body_builder.add_divide_broadcastable( + "divide", ["y", "norm"], "y_normalized" + ) + # find diff: 1- abs(cosine) + loop_body_builder.add_batched_mat_mul( + "cosine", ["y_normalized", "x"], "cosine_diff", transpose_a=True + ) + loop_body_builder.add_squeeze( + "squeeze_all", "cosine_diff", "cosine_diff_squeeze", squeeze_all=True + ) + loop_body_builder.add_unary( + "abs_cosine", "cosine_diff_squeeze", "abs_cosine_diff", mode="abs" + ) + loop_body_builder.add_activation( + "diff", + non_linearity="LINEAR", + input_name="abs_cosine_diff", + output_name="diff", + params=[-1, 1], + ) + + # update iteration count + loop_body_builder.add_activation( + "iteration_count_add", + non_linearity="LINEAR", + input_name="iteration_count", + output_name="iteration_count_plus_1", + params=[1, 1], + ) + loop_body_builder.add_copy( + "iteration_count_copy", "iteration_count_plus_1", "iteration_count" + ) + + # update 'x' + loop_body_builder.add_copy("update_x", "y_normalized", "x") + + # add condition to break from the loop, if convergence criterion is met + loop_body_builder.add_less_than( + "cond", ["diff"], "cond", alpha=convergence_tolerance + ) + branch_layer = loop_body_builder.add_branch("branch_layer", "cond") + builder_ifbranch = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.ifBranch + ) + builder_ifbranch.add_loop_break("break") + + # now we are out of the loop, compute the eigen value + builder.add_batched_mat_mul( + "bmm.2", input_names=["matrix", "x"], output_name="x_right" + ) + builder.add_batched_mat_mul( + "bmm.3", + input_names=["x", "x_right"], + output_name="maximum_eigen_value", + transpose_a=True, + ) + builder.add_squeeze("squeeze", "x", "eigen_vector", squeeze_all=True) + + # make input sizes flexible + spec = builder.spec + + flexible_shape_utils.add_multiarray_ndshape_enumeration( + spec, feature_name="matrix", enumerated_shapes=[(3, 3), (4, 4)] + ) + + flexible_shape_utils.add_multiarray_ndshape_enumeration( + spec, feature_name="starting_vector", enumerated_shapes=[(3,), (4,)] + ) + + from numpy import linalg as LA + + # try on 3x3 matrix + A = np.array([[2, -6, 8], [-6, 4, 5], [8, 5, 3]], dtype=np.float32) + starting_vector = np.random.rand(3) + starting_vector = starting_vector / np.sqrt(np.sum(starting_vector ** 2)) + + e, v = LA.eig(A) + idx = np.argmax(abs(e)) + input = {"starting_vector": starting_vector, "matrix": A.astype(np.float32)} + expected = {"maximum_eigen_value": np.array([[e[idx]]])} + self._test_model(spec, input, expected, useCPUOnly=True) + + # try on 2x2 matrix + A = np.array([[4, -5], [-5, 3]], dtype=np.float32) + starting_vector = np.random.rand(2) + starting_vector = starting_vector / np.sqrt(np.sum(starting_vector ** 2)) + + e, v = LA.eig(A) + idx = np.argmax(abs(e)) + + input = {"starting_vector": starting_vector, "matrix": A.astype(np.float32)} + expected = {"maximum_eigen_value": np.array([[e[idx]]])} + self._test_model(spec, input, expected, useCPUOnly=True) + + +@unittest.skipIf( + _macos_version() < LAYERS_11_0_MACOS_VERSION, + "macOS 11.0+ required. Skipping tests.", +) +class IOS14SingleLayerTests(CorrectnessTest): + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_onehot_layer_cpu(self, cpu_only=True): + ctr = 0 + params_dict = dict( + input_rank=[1, 2, 3, 4], + negative_axis=[True, False], + depth=[30], + on_value=[30.0], + off_value=[-4.0], + ) + params = list(itertools.product(*params_dict.values())) + for param in params: + param = dict(zip(params_dict.keys(), param)) + input_rank = param["input_rank"] + vectorSize = param["depth"] + on_value = param["on_value"] + off_value = param["off_value"] + + for axis in range(input_rank + 1): + ctr += 1 + if param["negative_axis"]: + axis_param = axis - (input_rank + 1) + else: + axis_param = axis + + input_shape = np.random.randint(1, 10, size=(input_rank,)) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_one_hot( + "one_hot", + ["data"], + "output", + one_hot_vector_size=vectorSize, + axis=axis_param, + on_value=on_value, + off_value=off_value, + ) + + x = np.random.randint(0, vectorSize, size=input_shape) + # x[::4] -= vectorSize # [To do] Need to Handle this case. + + # TF seems to have a bug with axis < -1 + if axis_param < -1: + axis_param += input_rank + 1 + tf_op = tf.one_hot( + x, + axis=axis_param, + depth=vectorSize, + on_value=on_value, + off_value=off_value, + ) + expected = {"output": tf_op.numpy()} + + input = {"data": x.astype(np.float32)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_batched_mat_mul_dynamic_quantization_cpu(self, cpu_only=True): + X1 = 11 + X2 = 23 + W = np.random.rand(X1, X2) * 20 - 10 # uniform between [-10, 10] + b = np.random.rand(X2) * 20 - 10 + input_shapes = [ + (X1,), + (5, X1), + (2, 3, X1), + (4, 1, X1), + ] # , (12, 5, 8, X1), (2, 3, 1, 5, X1)] + + W_max = max(np.abs(np.min(W)), np.abs(np.max(W))) + W_normalized = W / W_max # [-1,1] + W_quantized_int8 = 127.0 * W_normalized # [-127, 127] + W_quantized_int8 = W_quantized_int8.astype(np.int8) + quant_scale = W_max / 127.0 + + for input_shape in input_shapes: + x = np.random.rand(*input_shape) * 10 + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + for has_bias in [True, False]: + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_batched_mat_mul( + name="batched_mat_mul", + input_names=["data"], + output_name="output", + weight_matrix_rows=X1, + weight_matrix_columns=X2, + int_8_dynamic_quantize=True, + is_quantized_weight=True, + quantization_type="linear", + nbits=8, + W=W_quantized_int8.tobytes(), + bias=b if has_bias else None, + quant_scale=np.array([quant_scale]), + ) + inputs = {"data": x} + expected = { + "output": np.matmul( + x, W_quantized_int8.astype(np.float32) * quant_scale + ) + + (b if has_bias else np.zeros(X2)) + } + self._test_model( + builder.spec, + inputs, + expected, + useCPUOnly=cpu_only, + test_metric="SNR", + SNR=40, + ) + + def test_batched_mat_mul_dynamic_quantization_gpu(self): + self.test_batched_mat_mul_dynamic_quantization_cpu(cpu_only=False) + + def test_inner_product_dynamic_quantization_cpu(self, cpu_only=True): + Xin = 24 + Xout = 23 + W = np.random.rand(Xout, Xin) + b = np.random.rand(Xout) + # For rank 4 and 5, the product of the last 3 dimensions must equal Xin + input_shapes = [ + (Xin,), + (5, Xin), + (2, 3, Xin), + (4, 1, Xin), + (5, 2, 3, 4), + (5, 6, 2, 3, 4), + ] + + W_max = max(np.abs(np.min(W)), np.abs(np.max(W))) + W_normalized = W / W_max # [-1,1] + W_quantized_int8 = 127.0 * W_normalized # [-127, 127] + W_quantized_int8 = W_quantized_int8.astype(np.int8) + quant_scale = W_max / 127.0 + + for input_shape in input_shapes: + rank = len(input_shape) + x = np.random.rand(*input_shape) * 5 + + W_for_numpy = W_quantized_int8.astype(np.float32) * quant_scale + for has_bias in [True, False]: + b = b if has_bias else np.zeros(Xout) + if rank == 1 or rank == 2 or rank == 3: + np_out = np.matmul(x, np.transpose(W_for_numpy)) + b + expected = {"output": np_out} + elif rank == 4: + x_shaped = np.reshape(x, (x.shape[0], np.product(x.shape[1:]))) + np_out = np.matmul(x_shaped, np.transpose(W_for_numpy)) + b + expected = {"output": np.reshape(np_out, np_out.shape + (1, 1))} + elif rank == 5: + x_shaped = np.reshape(x, x.shape[0:2] + (np.product(x.shape[2:]),)) + np_out = np.matmul(x_shaped, np.transpose(W_for_numpy)) + b + expected = { + "output": np.reshape( + np_out, x.shape[0:2] + (np_out.shape[-1],) + (1, 1) + ) + } + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_inner_product( + name="ip", + W=W_quantized_int8.tobytes(), + b=b if has_bias else None, + input_channels=Xin, + output_channels=Xout, + has_bias=has_bias, + input_name="data", + output_name="output", + int_8_dynamic_quantize=True, + is_quantized_weight=True, + quantization_type="linear", + nbits=8, + quant_scale=np.array([quant_scale]), + ) + inputs = {"data": x} + self._test_model( + builder.spec, + inputs, + expected, + useCPUOnly=cpu_only, + test_metric="SNR", + SNR=40, + ) + + def test_inner_product_dynamic_quantization_gpu(self): + self.test_inner_product_dynamic_quantization_cpu(cpu_only=False) + + def test_onehot_layer_gpu(self): + self.test_onehot_layer_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_cumsum_layer_cpu(self, cpu_only=True): + ctr = 0 + params_dict = dict( + rank=[1, 2, 3, 4, 5], + exclusive=[False, True], + reverse=[False, True], + n_inputs=[1, 2], + ) + params = list(itertools.product(*params_dict.values())) + for param in params: + param = dict(zip(params_dict.keys(), param)) + rank = param["rank"] + exclusive = param["exclusive"] + reverse = param["reverse"] + n_inputs = param["n_inputs"] + + for axis in range(rank): + ctr += 1 + if np.random.rand(1) > 0.5: + axis_param = axis + else: + axis_param = axis - rank + + input_shape = np.random.randint(1, 10, size=(rank,)) + + input_features = [("data", datatypes.Array(*input_shape))] + if n_inputs == 2: + input_features.append(("axis", datatypes.Array(1,))) + + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + if n_inputs == 1: + builder.add_cumsum( + "cumsum", + ["data"], + "output", + axis=axis_param, + reverse=reverse, + exclusive=exclusive, + ) + else: + builder.add_cumsum( + "cumsum", + ["data", "axis"], + "output", + reverse=reverse, + exclusive=exclusive, + ) + + x = np.random.rand(*input_shape) + + tf_op = tf.cumsum( + x, axis=axis_param, exclusive=exclusive, reverse=reverse + ) + expected = {"output": tf_op.numpy()} + + input = {"data": x} + if n_inputs == 2: + input["axis"] = axis_param * np.ones((1,), dtype=np.float32) + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_cumsum_layer_gpu(self): + self.test_cumsum_layer_cpu(cpu_only=False) + + def test_clamped_relu_cpu(self, cpu_only=True): + + params_dict = dict(alpha=[0.0, 2.0, -3.0], beta=[7.0, -8.0]) + params = list(itertools.product(*params_dict.values())) + for param in params: + param = dict(zip(params_dict.keys(), param)) + alpha = param["alpha"] + beta = param["beta"] + input_shape = [40] + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_clamped_relu( + "clamped_relu", "data", "output", alpha=alpha, beta=beta + ) + + x = np.arange(-20, 20, dtype=np.float32) + input = {"data": x} + expected = {"output": np.minimum(beta, np.where(x >= 0, x, x * alpha))} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_clamped_relu_gpu(self): + self.test_clamped_relu_cpu(cpu_only=False) + + def _test_pool3d(self, cpu_only): + pool_types = ("MAX", "AVERAGE") + # Defining shapes as (batch, channel, depth, height, width) + shapes = ((1, 1, 1, 2, 2), (1, 1, 3, 3, 3), (3, 4, 10, 17, 90)) + # Defining kernels and strides as (depth, height, width) + kernels = ((2, 2, 2), (1, 3, 4), (2, 3, 4), (5, 1, 6), (8, 9, 1), (7, 11, 13)) + strides = ((1, 1, 1), (1, 2, 3), (2, 3, 2), (4, 1, 2), (3, 4, 1), (7, 11, 13)) + # Defining paddings as (left, right, top, bottom, front, back) + # This is backwards from how we define shapes, kernels, and strides, + # but it better matches pytorch, making the creation of pytorch layers + # much easier. + paddings = ( + ("CUSTOM", (0, 0, 0, 0, 0, 0)), + ("CUSTOM", (2, 2, 2, 2, 2, 2)), + ("CUSTOM", (5, 6, 3, 4, 2, 2)), + # VALID and SAME padding must have custom paddings unset or set to zero. + ("VALID", (0, 0, 0, 0, 0, 0)), + ("SAME", (0, 0, 0, 0, 0, 0)), + ) + + # Structure to collect failures so + # we can run all tests, even if one fails. + # This should be able to go away when we can parameterize + # our tests: Enable parameterized tests in test_numpy_nn_layers.py + failures = [] + num_successes = 0 + num_skipped = 0 + + for pool_type in pool_types: + for shape in shapes: + for kernel in kernels: + for stride in strides: + for padding in paddings: + for average_pooling_count_excludes_padding in (False, True): + result = self._test_pool3d_single_case( + cpu_only, + pool_type, + shape, + kernel, + stride, + padding, + average_pooling_count_excludes_padding, + ) + if type(result) is str: + failures.append(result) + elif result: + num_successes += 1 + else: + num_skipped += 1 + self.assertEqual( + len(failures), + 0, + "Got %s successes, %s skipped, %s failures: %s" + % (num_successes, num_skipped, len(failures), failures), + ) + + def _test_pool3d_single_case( + self, + cpu_only, + pool_type, + shape, + kernel, + stride, + padding, + average_pooling_count_excludes_padding, + ): + """ + + Args: + cpu_only: + pool_type: + shape: + kernel: + stride: + padding: + average_pooling_count_excludes_padding: + + Returns: True if success, False if skipped, Str if error + + """ + test_case = ( + "Test case:: pool_type: %s, shape: %s, kernel: %s, stride: %s, padding: %s, average_pooling_count_excludes_padding: %s" + % ( + pool_type, + shape, + kernel, + stride, + padding, + average_pooling_count_excludes_padding, + ) + ) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + padding_mode = padding[0] + padding_values = padding[1] + builder.add_pooling3d( + name="pooling3d", + input_name="data", + output_name="output", + pooling_type=pool_type, + kernel_depth=kernel[0], + kernel_height=kernel[1], + kernel_width=kernel[2], + stride_depth=stride[0], + stride_height=stride[1], + stride_width=stride[2], + padding_mode=padding_mode, + custom_padding_front=padding_values[4], + custom_padding_back=padding_values[5], + custom_padding_top=padding_values[2], + custom_padding_bottom=padding_values[3], + custom_padding_left=padding_values[0], + custom_padding_right=padding_values[1], + average_pooling_count_excludes_padding=average_pooling_count_excludes_padding, + ) + + # Expected output + input = np.random.rand(*shape) + torch_input = torch.from_numpy(np.reshape(input, shape)) + + # Padding + if padding_mode == "CUSTOM": + torch_padding = torch.nn.ConstantPad3d(padding_values, 0) + elif padding_mode == "VALID": + torch_padding = torch.nn.ConstantPad3d(0, 0) + elif padding_mode == "SAME": + padding_list = [] + # torch.nn.ConstantPad3d wants (left, right, top, bottom, front, back) + # but our shape, kernel, and stride are (depth, height, width). + total_paddings = aggregated_pad( + pad_type=padding_mode.lower(), + kernel_shape=kernel, + input_shape=shape[2:], + strides=stride, + ) + total_paddings.reverse() + for p in total_paddings: + before = int(math.floor(float(p) / 2.0)) + after = int(math.ceil(float(p) / 2.0)) + padding_list.append(before) + padding_list.append(after) + + torch_padding = torch.nn.ConstantPad3d(tuple(padding_list), 0) + padding_values = padding_list[:] + else: + assert False + + # Validate output shape + for i in range(3): + try: + IOS14SingleLayerTests._validate_pooling_dimension( + shape[i + 2], + kernel[i], + stride[i], + padding_values[6 - i - 2], + padding_values[6 - i - 1], + ) + except ValueError: + return False + + # Pooling type + # Average pooling + if pool_type == "AVERAGE": + # torch.nn.AvgPool3d only accepts a single integer for padding, so we normally + # create a pooling layer first which allows us to fully specify the + # before and after padding in all three dimensions. + # + # However, when we use a padding layer, torch.nn.AvgPool3d doesn't + # know what is padding and what isn't, which means that its + # `count_include_pad` parameter has no effect. + # + # Therefore, we can only test average_pooling_count_excludes_padding=True + # when padding is homogeneous. + is_padding_homogeneous = all(p == padding_values[0] for p in padding_values) + if average_pooling_count_excludes_padding: + if not is_padding_homogeneous: + return False + else: + # padding is homogeneous + torch_model = torch.nn.AvgPool3d( + kernel, + stride=stride, + padding=padding_values[0], + count_include_pad=not average_pooling_count_excludes_padding, + ) + else: + # average_pooling_count_excludes_padding == False + torch_pool = torch.nn.AvgPool3d( + kernel, + stride=stride, + count_include_pad=not average_pooling_count_excludes_padding, + ) + torch_model = torch.nn.Sequential(torch_padding, torch_pool) + # Max pooling + else: + torch_pool = torch.nn.MaxPool3d(kernel, stride=stride) + torch_model = torch.nn.Sequential(torch_padding, torch_pool) + + try: + expected = torch_model(torch_input).numpy() + self._test_model( + builder.spec, {"data": input}, {"output": expected}, useCPUOnly=cpu_only + ) + return True + except AssertionError as e: + print(e) + return "test_case: %s, error: %s" % (test_case, e) + + @staticmethod + def _validate_pooling_dimension( + input_size, kernel_size, stride, start_padding, end_padding + ): + # https://adeshpande3.github.io/A-Beginner%27s-Guide-To-Understanding-Convolutional-Neural-Networks-Part-2/ + output_size = ( + input_size + start_padding + end_padding - kernel_size + ) / stride + 1 + if output_size < 1: + raise ValueError( + "Dimension with input_size: %s, kernel_size: %s, stride: %s, start_padding: %s, end_padding: %s " + "has output size of %s, but must be >= 1" + % ( + input_size, + kernel_size, + stride, + start_padding, + end_padding, + output_size, + ) + ) + if input_size < kernel_size: + raise ValueError( + "Dimension has input_size (%s) less than kernel_size (%s)" + % (input_size, kernel_size) + ) + if (start_padding + end_padding) / 2 >= kernel_size / 2: + raise ValueError( + "The average of the start (%s) and end (%s) padding must be less than half the kernel size (%s / 2 = %s)" + % (start_padding, end_padding, kernel_size, kernel_size / 2) + ) + + def test_pool3d_cpu(self): + self._test_pool3d(cpu_only=True) + + def test_pool3d_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self._test_pool3d(cpu_only=False) + + def _test_global_pool3d(self, cpu_only): + shapes = ((1, 1, 1, 2, 2), (1, 1, 3, 3, 3), (3, 4, 10, 17, 90)) + pool_types = ("MAX", "AVERAGE") + + for shape in shapes: + for pool_type in pool_types: + test_case = "test_case:: shape: %s, pool_type: %s" % (shape, pool_type) + print(test_case) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_global_pooling3d( + name="pooling3d", + input_name="data", + output_name="output", + pooling_type=pool_type, + ) + input = np.random.rand(*shape) + + # Expected output from Torch + torch_input = torch.from_numpy(np.reshape(input, shape)) + if pool_type == "AVERAGE": + torch_pool = torch.nn.AvgPool3d(shape[-3:]) + else: + torch_pool = torch.nn.MaxPool3d(shape[-3:]) + exptected = torch_pool(torch_input).numpy() + + self._test_model( + builder.spec, + {"data": input}, + {"output": exptected}, + useCPUOnly=cpu_only, + ) + + def test_global_pool3d_cpu(self): + self._test_global_pool3d(cpu_only=True) + + def test_global_pool3d_gpu(self): + self._test_global_pool3d(cpu_only=False) + + def test_argsort_cpu(self, cpu_only=True): + + shapes = [(4,), (3, 4), (2, 5, 6), (3, 5, 2, 4), (4, 5, 3, 6, 7)] + + for shape in shapes: + for descending in [False, True]: + for axis in range(len(shape)): + + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + builder.add_argsort( + "argsort", "data", "output", axis=axis, descending=descending + ) + + x = np.random.rand(*shape) + if descending: + expected = {"output": np.argsort(-x, axis)} + else: + expected = {"output": np.argsort(x, axis)} + + input = {"data": x} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_argsort_gpu(self): + self.test_argsort_cpu(cpu_only=False) + + def test_upsample_pytorch_cpu(self): + self.upsample_pytorch_test_iter(np.arange(1, 4), True) + self.upsample_pytorch_test_iter(np.arange(1.0, 3.0, 0.66), True) + + def test_upsample_pytorch_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self.upsample_pytorch_test_iter(np.arange(1, 4), False) + self.upsample_pytorch_test_iter(np.arange(1.0, 3.0, 0.66), False) + + def upsample_pytorch_test_iter(self, scale_range, cpu_only): + for align_corners in [False, True]: + for scale_h in scale_range: + for scale_w in scale_range: + for input_h in range(2, 6): + for input_w in range(2, 6): + self.upsample_pytorch_test( + input_h, + input_w, + scale_h, + scale_w, + align_corners, + cpu_only, + ) + + def upsample_pytorch_test(self, h, w, scale_h, scale_w, align_corners, cpu_only): + input_dim = (1, 1, h, w) + if align_corners: + linear_upsample_mode = "ALIGN_CORNERS_TRUE" + else: + linear_upsample_mode = "ALIGN_CORNERS_FALSE" + + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_upsample( + name="upsample", + scaling_factor_h=scale_h, + scaling_factor_w=scale_w, + linear_upsample_mode=linear_upsample_mode, + input_name="data", + output_name="output", + mode="BILINEAR", + ) + + input_tensor = np.reshape(np.arange(1.0, 1.0 + (h * w), 1.0), input_dim) + input = {"data": input_tensor} + + # Get result from PyTorch + x = torch.from_numpy(np.reshape(input_tensor, (1, 1, h, w))) + pytorch_output = torch.nn.functional.interpolate( + x, + scale_factor=(scale_h, scale_w), + mode="bilinear", + align_corners=align_corners, + recompute_scale_factor=True, + ) + + # Expect PyTorch output matches CoreML output + expected = {"output": pytorch_output.numpy()} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_slice_by_size_cpu(self, cpu_only=True): + + shapes = [(4,), (3, 4), (2, 5, 6), (3, 5, 2, 4), (4, 5, 3, 6, 7)] + + for shape in shapes: + for axis in range(len(shape)): + begin = np.random.randint(shape[axis]) + begin_input = np.array([begin]).astype(np.float32) + size = np.random.randint(shape[axis] - begin) + 1 + + x = np.random.rand(*shape) + slices = [] + for i in range(len(shape)): + if i != axis: + slices.append(slice(None, None, None)) + else: + slices.append(slice(begin, begin + size, 1)) + slices = tuple(slices) + expected = {"output": x[slices]} + + input_features = [ + ("data", datatypes.Array(*shape)), + ("begin", datatypes.Array(1)), + ] + output_features = [("output", datatypes.Array(*x[slices].shape))] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_slice_by_size( + "slice_by_size", ["data", "begin"], "output", axis=axis, size=size + ) + + input = {"data": x, "begin": begin_input} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def _test_conv3d(self, cpu_only, full_test): + # Input shape defined by us and PyTorch as [batch, channels, depth, height, width] + input_shapes = [ + [1, 3, 3, 8, 8], + [1, 1, 3, 8, 8], + [1, 7, 8, 15, 63], + [4, 32, 8, 16, 16], + ] + # Large enough kernels and/or input causes int overflow and seg fault: see rdar://60309763 + kernels = [[3, 3, 3], [2, 2, 2]] + strides = [[1, 1, 1], [2, 2, 2]] + dilations = [[1, 1, 1], [2, 2, 2]] + has_biases = [True, False] + # Note: PyTorch's `torch.nn.Conv3d` doesn't support these padding modes, just a single + # padding value (for all dimensions) or 3 values (for each dimension) + padding_modes = ["custom", "valid", "same"] + # Padding shape is front, back, top, bottom, left, right + paddings = [[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]] + + # Add some additional test cases if `full_test` is True + if full_test: + input_shapes.extend([[1, 4, 3, 128, 128]]) + kernels.extend([[1, 2, 3], [5, 5, 5]]) + strides.extend([[1, 2, 3]]) + dilations.extend([[1, 2, 3]]) + paddings.extend([[2, 0, 2, 0, 2, 0], [0, 1, 2, 3, 4, 5]]) + + test_case_format_str = ( + "Conv3d test case | Input shape: {}, Output channels: {}, Groups: {}, Kernel shape: {}," + " Stride: {}, Padding: {}, Padding mode: {}, Dilation: {}, Has bias: {}" + ) + + for in_shape in input_shapes: + # Test "normal" and depthwise convolution with corresponding groups and output channels + groups_outchannels = [(1, 2), (in_shape[1], 2 * in_shape[1])] + for kernel in kernels: + for has_bias in has_biases: + for stride in strides: + for dilation in dilations: + for padding_mode in padding_modes: + # For all modes besides 'custom', the padding values are ignored + if padding_mode == "custom": + loop_paddings = paddings + else: + loop_paddings = [[0, 0, 0, 0, 0, 0]] + for padding in loop_paddings: + for groups, output_channels in groups_outchannels: + # Dilated kernel shape = (K - 1) * D + 1 + dilated_kernel = list( + map( + lambda k, d: (k - 1) * d + 1, + kernel, + dilation, + ) + ) + + # Use paddings if padding_mode is "custom", else compute + # them according to + # https://stanford.edu/~shervine/teaching/cs-230/cheatsheet-convolutional-neural-networks#filter + if padding_mode == "same": + pad_d = max( + 0, + ( + stride[0] + * math.ceil( + in_shape[2] / float(stride[0]) + ) + - in_shape[2] + + dilated_kernel[0] + - stride[0] + ) + / 2.0, + ) + pad_h = max( + 0, + ( + stride[1] + * math.ceil( + in_shape[3] / float(stride[1]) + ) + - in_shape[3] + + dilated_kernel[1] + - stride[1] + ) + / 2.0, + ) + pad_w = max( + 0, + ( + stride[2] + * math.ceil( + in_shape[4] / float(stride[2]) + ) + - in_shape[4] + + dilated_kernel[2] + - stride[2] + ) + / 2.0, + ) + + # Depth + padding[0] = int(math.floor(pad_d)) + padding[1] = int(math.ceil(pad_d)) + # Height + padding[2] = int(math.floor(pad_h)) + padding[3] = int(math.ceil(pad_h)) + # Width + padding[4] = int(math.floor(pad_w)) + padding[5] = int(math.ceil(pad_w)) + elif padding_mode == "valid": + # Set to zero for PyTorch padding + padding = [0] * 6 + elif padding_mode == "custom": + # No-op: valid ignores padding and custom uses the + # specified padding + pass + + input_features = [ + ("data", datatypes.Array(*in_shape)) + ] + output_features = [("output", None)] + input_channels = in_shape[1] + # [output_channels, kernel_channels, depth, height, width] + weights_shape = [ + output_channels, + int(input_channels / groups), + kernel[0], + kernel[1], + kernel[2], + ] + + # Init random input + input_tensor = np.random.normal(size=in_shape) + input_torch = torch.tensor(input_tensor) + # Init random weights + weights_tensor = np.random.normal( + size=weights_shape + ) + weights_torch = torch.DoubleTensor( + weights_tensor + ) + # Init random bias if applicable + if has_bias: + bias_tensor = np.random.normal( + size=output_channels + ) + bias_torch = torch.DoubleTensor(bias_tensor) + else: + bias_tensor = None + bias_torch = None + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + builder.add_convolution3d( + name="conv3d", + input_channels=input_channels, + output_channels=output_channels, + depth=kernel[0], + height=kernel[1], + width=kernel[2], + W=weights_tensor, + b=bias_tensor, + has_bias=has_bias, + groups=groups, + stride_depth=stride[0], + stride_height=stride[1], + stride_width=stride[2], + dilation_depth=dilation[0], + dilation_height=dilation[1], + dilation_width=dilation[2], + padding_mode=padding_mode, + padding_front=padding[0], + padding_back=padding[1], + padding_top=padding[2], + padding_bottom=padding[3], + padding_left=padding[4], + padding_right=padding[5], + input_name="data", + output_name="output", + ) + + # Get PyTorch output to compare ours to + # First pad, since PyTorch Conv3d only supports custom and + # same symmetric padding. Padding shape is + # (left, right, top, bottom, front, back) + padded_input = input_torch + if any(p > 0 for p in padding): + torch_padding = ( + padding[4], + padding[5], + padding[2], + padding[3], + padding[0], + padding[1], + ) + pad_layer = torch.nn.ConstantPad3d( + torch_padding, 0 + ) + padded_input = pad_layer(input_torch) + # Check if dilated kernel size exceeds padded input size in + # any dimension. If it does, it's not a valid convolution + if ( + dilated_kernel[0] > padded_input.shape[2] + or dilated_kernel[1] > padded_input.shape[3] + or dilated_kernel[2] > padded_input.shape[4] + ): + print( + "SKIPPING: Dilated kernel exceeds padded input." + ) + continue + # Using Sequential with a padding layer first produces + # incorrect convolution output + model = torch.nn.Sequential( + torch.nn.Conv3d( + input_channels, + output_channels, + kernel, + stride=stride, + padding=0, + dilation=dilation, + groups=groups, + bias=False, + ) + ) + with torch.no_grad(): + model[0].weight = torch.nn.Parameter( + weights_torch + ) + if has_bias: + model[0].bias = torch.nn.Parameter( + bias_torch + ) + torch_expected = model(padded_input) + + test_case = test_case_format_str.format( + in_shape, + output_channels, + groups, + weights_shape, + stride, + padding, + padding_mode, + dilation, + has_bias, + ) + try: + self._test_model( + builder.spec, + {"data": input_tensor}, + { + "output": torch_expected.detach().numpy() + }, + useCPUOnly=cpu_only, + test_metric="SNR", + SNR=40, + validate_shapes_only=False, + ) + except AssertionError as e: + print(test_case) + raise + + def test_conv3d_cpu_basic(self): + self._test_conv3d(cpu_only=True, full_test=False) + + @pytest.mark.slow + def test_conv3d_cpu_slow(self): + self._test_conv3d(cpu_only=True, full_test=True) + + def test_conv3d_gpu_basic(self): + self._test_conv3d(cpu_only=False, full_test=False) + + @pytest.mark.slow + def test_conv3d_gpu_slow(self): + self._test_conv3d(cpu_only=False, full_test=True) + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= LAYERS_11_0_MACOS_VERSION, + "Only supported on macOS 10.16+", +) +class TestReorganizeDataTests(CorrectnessTest): + def _to_rank_4(self, x): + from_rank = len(x.shape) + if from_rank == 3: + return np.reshape(x, [1] + list(x.shape)) + elif from_rank == 4: + return x + elif from_rank == 5: + return np.squeeze(x, axis=0) + + def _from_rank_4(self, x, to_rank): + if to_rank == 3: + return np.squeeze(x, axis=0) + elif to_rank == 4: + return x + elif to_rank == 5: + return np.reshape(x, [1] + list(x.shape)) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_depth_to_space_cpu(self, cpu_only=True): + + params_dict = { + "block_size": [2, 3, 4], + "channels_div_bsq": [1, 2, 3, 7], + "spatial": [[2, 3], [4, 4], [1, 1]], + "batch_size": [None, 1, 2], + "seq_length": [None, 1], + } + params_product = list(itertools.product(*params_dict.values())) + for param in params_product: + param = dict(zip(params_dict.keys(), param)) + # Create input based on params + block_size = param["block_size"] + bsq = block_size * block_size + input_shape = [bsq * param["channels_div_bsq"]] + param["spatial"] + if param["batch_size"] is not None: + input_shape = [param["batch_size"]] + input_shape + if param["seq_length"] is not None: + input_shape = [param["seq_length"]] + input_shape + rank = len(input_shape) + x = np.random.random(input_shape) + input = {"data": x} + + # Set up network + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_reorganize_data( + "reorganize_data", + "data", + "output", + mode="DEPTH_TO_SPACE", + block_size=block_size, + ) + + # Run tensorflow to calculate expected values + # TensorFlow requires rank 4, NHWC order on CPU + x_tf = self._to_rank_4(x).transpose(0, 2, 3, 1) + out_tf = tf.nn.depth_to_space(x_tf, block_size, data_format="NHWC").numpy() + out = self._from_rank_4(out_tf.transpose(0, 3, 1, 2), to_rank=rank) + expected = {"output": out} + + # Run model to calculate CoreML values and compare with expected + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_depth_to_space_gpu(self): + self.test_depth_to_space_cpu(cpu_only=False) + + @unittest.skipIf( + _macos_version() < LAYERS_11_0_MACOS_VERSION, + "macOS 11.0+ required. Skipping tests.", + ) + def test_pixel_shuffle_cpu(self, cpu_only=True): + + params_dict = { + "block_size": [2, 3, 4], + "channels_div_bsq": [1, 2, 3, 7], + "spatial": [[2, 3], [4, 4], [1, 1]], + "batch_size": [None, 1, 2], + "seq_length": [None, 1], + } + params_product = list(itertools.product(*params_dict.values())) + for param in params_product: + param = dict(zip(params_dict.keys(), param)) + # Create input based on params + block_size = param["block_size"] + bsq = block_size * block_size + input_shape = [bsq * param["channels_div_bsq"]] + param["spatial"] + if param["batch_size"] is not None: + input_shape = [param["batch_size"]] + input_shape + if param["seq_length"] is not None: + input_shape = [param["seq_length"]] + input_shape + rank = len(input_shape) + x = np.random.random(input_shape) + input = {"data": x} + + # Set up network + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_reorganize_data( + "reorganize_data", + "data", + "output", + mode="PIXEL_SHUFFLE", + block_size=block_size, + ) + + # Run pytorch to calculate expected values + x_torch = torch.from_numpy(self._to_rank_4(x)) + out_torch = torch.pixel_shuffle(x_torch, upscale_factor=block_size) + out = self._from_rank_4(out_torch.numpy(), to_rank=rank) + expected = {"output": out} + + # Run model to calculate CoreML values and compare with expected + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + @unittest.skipIf( + _macos_version() < LAYERS_11_0_MACOS_VERSION, + "macOS 10.16+ required. Skipping tests.", + ) + def test_pixel_shuffle_gpu(self): + self.test_pixel_shuffle_cpu(cpu_only=False) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_quantization.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_quantization.py new file mode 100644 index 00000000..ff589914 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_quantization.py @@ -0,0 +1,562 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Module containing unit tests for verifying various quantizations. +""" + +import unittest + +import numpy as np +import pytest + +import coremltools +import coremltools.models.datatypes as datatypes +from coremltools import ComputeUnit +from coremltools.models import (_QUANTIZATION_MODE_LINEAR_QUANTIZATION, + neural_network) +from coremltools.models.neural_network import quantization_utils +from coremltools.models.neural_network.quantization_utils import ( + MatrixMultiplyLayerSelector, _quantize_spec_weights, + activate_int8_int8_matrix_multiplications) + + +@unittest.skipIf( + not coremltools.utils._is_macos() or coremltools.utils._macos_version() < (10, 16), + "Missing macOS 10.16+. Skipping tests.", +) +class DynamicQuantizedInt8Int8MatMul(unittest.TestCase): + """ + Quantization tests for dynamic Int8 - Int8 matrix multiplications + """ + + def initialize(self): + np.random.seed(1988) + self.Cout, self.Cin = 16, 32 + self.W = np.random.rand(self.Cout, self.Cin) * 20.0 - 10.0 + self.b = np.random.rand(self.Cout) * 20.0 - 10.0 + self.input_shape = (5, self.Cin) + input_features = [("data", datatypes.Array(*self.input_shape))] + output_features = [("output", None)] + self.builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + self.selector = MatrixMultiplyLayerSelector() + + def _test_predictions( + self, np_preds, coreml_preds, SNR=30, PSNR=40, + ): + + np_preds = np_preds.flatten() + coreml_preds = coreml_preds.flatten() + + noise = np_preds - coreml_preds + noise_var = np.sum(noise ** 2) / len(noise) + 1e-7 + signal_energy = np.sum(np_preds ** 2) / len(np_preds) + max_signal_energy = np.amax(np_preds ** 2) + snr = 10 * np.log10(signal_energy / noise_var) + psnr = 10 * np.log10(max_signal_energy / noise_var) + self.assertGreaterEqual(snr, SNR) + self.assertGreaterEqual(psnr, PSNR) + + def compare(self, specification_modified=True): + x = np.random.rand(*self.input_shape) + + def _get_preds(spec): + mlmodel = coremltools.models.MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + return mlmodel.predict({"data": x})["output"] + + preds = _get_preds(self.builder.spec) + self.assertEqual(self.builder.spec.specificationVersion, 4) + + quantized_spec = activate_int8_int8_matrix_multiplications( + self.builder.spec, self.selector + ) + + layer = self.builder.spec.neuralNetwork.layers[0] + layer_type = layer.WhichOneof("layer") + if layer_type == "innerProduct": + matmul_layer = layer.innerProduct + + elif layer_type == "batchedMatmul": + matmul_layer = layer.batchedMatmul + wp = matmul_layer.weights + + if specification_modified: + self.assertEqual(self.builder.spec.specificationVersion, 5) + quant_preds = _get_preds(quantized_spec) + self._test_predictions(preds, quant_preds, SNR=40) + self.assertEqual(len(wp.floatValue), 0) + else: + self.assertEqual(self.builder.spec.specificationVersion, 4) + quant_preds = _get_preds(quantized_spec) + np.testing.assert_array_almost_equal(preds, quant_preds) + self.assertGreater(len(wp.floatValue), 0) + + def test_single_batched_matmul_no_bias(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.compare() + + def test_single_batched_matmul_with_bias(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + bias=self.b, + ) + self.compare() + + def test_single_inner_product_no_bias(self): + + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=None, + has_bias=False, + ) + self.compare() + + def test_single_inner_product_with_bias(self): + + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.compare() + + def test_inner_product_min_input_channels_valid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.minimum_input_channels = 31 + self.compare() + + def test_batched_matmul_min_input_channels_valid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.minimum_input_channels = 32 + self.compare() + + def test_inner_product_min_input_channels_invalid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.minimum_input_channels = 33 + self.compare(specification_modified=False) + + def test_batched_matmul_min_input_channels_invalid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.minimum_input_channels = 33 + self.compare(specification_modified=False) + + def test_batched_matmul_max_input_channels_valid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.maximum_input_channels = 32 + self.compare() + + def test_inner_product_max_input_channels_valid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.maximum_input_channels = 33 + self.compare() + + def test_batched_matmul_max_input_channels_invalid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.maximum_input_channels = 31 + self.compare(specification_modified=False) + + def test_inner_product_max_input_channels_invalid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.maximum_input_channels = 30 + self.compare(specification_modified=False) + + def test_inner_product_min_output_channels_valid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.minimum_output_channels = 16 + self.compare() + + def test_batched_matmul_min_output_channels_valid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.minimum_output_channels = 16 + self.compare() + + def test_inner_product_min_output_channels_invalid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.minimum_output_channels = 17 + self.compare(specification_modified=False) + + def test_batched_matmul_min_output_channels_invalid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.minimum_output_channels = 17 + self.compare(specification_modified=False) + + def test_batched_matmul_max_output_channels_valid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.maximum_output_channels = 17 + self.compare() + + def test_inner_product_max_output_channels_valid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.maximum_output_channels = 16 + self.compare() + + def test_batched_matmul_max_output_channels_invalid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.maximum_output_channels = 14 + self.compare(specification_modified=False) + + def test_inner_product_max_output_channels_invalid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.maximum_output_channels = 15 + self.compare(specification_modified=False) + + def test_inner_product_min_weight_count_valid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.minimum_weight_count = 512 + self.compare() + + def test_batched_matmul_min_weight_count_invalid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.minimum_weight_count = 513 + self.compare(specification_modified=False) + + def test_inner_product_layer_names_invalid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.include_layers_with_names = ["ip1", "ip2"] + self.compare(specification_modified=False) + + def test_batched_matmul_layer_names_valid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.include_layers_with_names = ["bm1", "batched_matmul"] + self.compare() + + def test_batched_matmul_8bit_weight_quantized(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + _quantize_spec_weights( + self.builder.spec, 8, _QUANTIZATION_MODE_LINEAR_QUANTIZATION + ) + self.compare() + + def test_batched_matmul_4bit_weight_quantized(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + _quantize_spec_weights( + self.builder.spec, 4, _QUANTIZATION_MODE_LINEAR_QUANTIZATION + ) + self.compare() + + def test_batched_matmul_2bit_weight_quantized(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + _quantize_spec_weights( + self.builder.spec, 2, _QUANTIZATION_MODE_LINEAR_QUANTIZATION + ) + self.compare() + + def test_batched_matmul_1bit_weight_quantized(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + _quantize_spec_weights( + self.builder.spec, 1, _QUANTIZATION_MODE_LINEAR_QUANTIZATION + ) + self.compare() + + +class TestQuantizeWeightsAPI: + @staticmethod + @pytest.mark.parametrize( + "compute_units", [ComputeUnit.ALL, ComputeUnit.CPU_AND_GPU, ComputeUnit.CPU_ONLY] + ) + def test_embeddingND_quantize(compute_units): + input_features = [("data", datatypes.Array(10, 1))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_embedding_nd( + name="embedding_nd", + input_name="data", + output_name="output", + vocab_size=300, + embedding_size=20, + W=np.random.rand(20, 300), + ) + + spec = builder.spec + model_fp32 = coremltools.models.MLModel(spec, compute_units=compute_units) + assert len(spec.neuralNetwork.layers[0].embeddingND.weights.floatValue) == 6000 + + # quantize to FP16 + model_fp16 = quantization_utils.quantize_weights(model_fp32, nbits=16) + assert model_fp16.compute_unit == compute_units + spec_fp16 = model_fp16.get_spec() + assert len(spec_fp16.neuralNetwork.layers[0].embeddingND.weights.floatValue) == 0 + assert len(spec_fp16.neuralNetwork.layers[0].embeddingND.weights.float16Value) == 2 * 6000 + + # quantize to uint8 + model_uint8 = quantization_utils.quantize_weights(model_fp32, nbits=8) + assert model_uint8.compute_unit == compute_units + spec_uint8 = model_uint8.get_spec() + assert len(spec_uint8.neuralNetwork.layers[0].embeddingND.weights.floatValue) == 0 + assert len(spec_uint8.neuralNetwork.layers[0].embeddingND.weights.float16Value) == 0 + assert len(spec_uint8.neuralNetwork.layers[0].embeddingND.weights.rawValue) == 6000 + + # quantize to uint5 + model_uint5 = quantization_utils.quantize_weights(model_fp32, nbits=5) + assert model_uint5.compute_unit == compute_units + spec_uint5 = model_uint5.get_spec() + assert len(spec_uint5.neuralNetwork.layers[0].embeddingND.weights.floatValue) == 0 + assert len(spec_uint5.neuralNetwork.layers[0].embeddingND.weights.float16Value) == 0 + assert len(spec_uint5.neuralNetwork.layers[0].embeddingND.weights.rawValue) == 3750 # 3750 = 5*6000/8 + + @unittest.skipIf(coremltools.utils._macos_version() < (13, 0), + 'ComputeUnit.CPU_AND_NE is only available on macOS >= 13.0' + ) + def test_embeddingND_quantize_CPU_and_NE(self): + self.test_embeddingND_quantize(ComputeUnit.CPU_AND_NE) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_simple_nn_inference.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_simple_nn_inference.py new file mode 100644 index 00000000..49663d48 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_simple_nn_inference.py @@ -0,0 +1,53 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +import coremltools +import coremltools.models.datatypes as datatypes +from coremltools import ComputeUnit, utils +from coremltools.models import neural_network as neural_network + + +class TestNeuralNetworkPrediction: + + @staticmethod + def test_lrn_model(tmpdir): + + input_dim = (1, 3, 3) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_lrn( + name="lrn", + input_name="data", + output_name="output", + alpha=2, + beta=3, + local_size=1, + k=8, + ) + + input = {"data": np.ones((1, 3, 3))} + expected = 1e-3 * np.ones((1, 3, 3)) + model_path = os.path.join(str(tmpdir), "lrn_model.mlmodel") + coremltools.models.utils.save_spec(builder.spec, model_path) + + try: + model = coremltools.models.MLModel(model_path, compute_units=ComputeUnit.CPU_ONLY) + if utils._macos_version() >= (10, 13): + out = model.predict(input) + except RuntimeError as e: + print(e) + assert str(e) == "Error compiling model: \"The file couldn’t be saved.\"." + else: + if utils._macos_version() >= (10, 13): + assert out['output'].shape == (1, 3, 3) + np.testing.assert_allclose(expected, out['output']) + print("Core ML output", out) + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_tf_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_tf_numeric.py new file mode 100644 index 00000000..8c0ebbfa --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_tf_numeric.py @@ -0,0 +1,508 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest + +import numpy as np + +import coremltools.models.datatypes as datatypes +from coremltools import ComputeUnit +from coremltools._deps import _HAS_TF_2, MSG_TF2_NOT_FOUND +from coremltools.models import MLModel, neural_network +from coremltools.models.utils import _is_macos, _macos_version + +if _HAS_TF_2: + import tensorflow as tf + + +np.random.seed(10) +np.set_printoptions(precision=4, suppress=True) + + +@unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) +class CorrectnessTest(unittest.TestCase): + def _compare_shapes(self, ref_preds, coreml_preds): + if np.squeeze(ref_preds).shape != np.squeeze(coreml_preds).shape: + return False + else: + return True + + def _compare_predictions_numerical( + self, ref_preds, coreml_preds, snr_thresh=15, psnr_thresh=30 + ): + ref_preds = ref_preds.flatten() + coreml_preds = coreml_preds.flatten() + noise = coreml_preds - ref_preds + noise_var = np.mean(noise ** 2) + signal_energy = np.mean(ref_preds ** 2) + max_signal_energy = np.amax(ref_preds ** 2) + + if noise_var > 1e-6 and signal_energy > 1e-6: + SNR = 10 * np.log10(signal_energy / noise_var) + PSNR = 10 * np.log10(max_signal_energy / noise_var) + + print("SNR: {}, PSNR: {}".format(SNR, PSNR)) + print("noise var: ", np.mean(noise ** 2)) + print("max signal energy: ", np.amax(ref_preds ** 2)) + print("signal energy: ", np.mean(ref_preds ** 2)) + + self.assertGreaterEqual(PSNR, psnr_thresh) + self.assertGreaterEqual(SNR, snr_thresh) + + def _test_model( + self, + input_dict, + ref_output_dict, + coreml_model, + snr_thresh=15, + psnr_thresh=30, + ): + coreml_out_dict = coreml_model.predict(input_dict) + for out_ in list(ref_output_dict.keys()): + ref_out = ref_output_dict[out_].flatten() + coreml_out = coreml_out_dict[out_].flatten() + self.assertEqual(len(coreml_out), len(ref_out)) + self._compare_predictions_numerical( + ref_out, coreml_out, snr_thresh=snr_thresh, psnr_thresh=psnr_thresh + ) + + +@unittest.skipUnless(_is_macos(), "Only supported for MacOS platform.") +class StressTest(CorrectnessTest): + def test_data_reorganize(self, cpu_only=False): + def get_coreml_model_reorganize(X, params): + eval = True + mlmodel = None + try: + input_dim = X.shape[2:] + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + builder.add_reorganize_data( + "reorg", + "data", + "output", + mode=params["mode"], + block_size=params["block_size"], + ) + + if cpu_only: + compute_unit=ComputeUnit.CPU_ONLY + else: + compute_unit=ComputeUnit.ALL + mlmodel = MLModel(builder.spec, compute_units=compute_unit) + except RuntimeError as e: + print(e) + eval = False + + return mlmodel, eval + + def get_tf_predictions_reorganize(X, params): + if params["mode"] == "SPACE_TO_DEPTH": + y = tf.nn.space_to_depth(X, params["block_size"]) + else: + y = tf.nn.depth_to_space(X, params["block_size"]) + return y.numpy() + + """ + Define Params + """ + params_dict = dict( + C=[1, 2, 8, 16, 15, 27], + H=[2, 4, 6, 8, 10, 15, 21, 16], + W=[2, 4, 6, 8, 10, 15, 21, 16], + block_size=[2, 3, 4, 5], + mode=["SPACE_TO_DEPTH", "DEPTH_TO_SPACE"], + ) + params = [x for x in list(itertools.product(*params_dict.values()))] + all_candidates = [dict(zip(params_dict.keys(), x)) for x in params] + valid_params = [] + for pr in all_candidates: + if pr["mode"] == "SPACE_TO_DEPTH": + if pr["H"] % pr["block_size"] == 0 and pr["W"] % pr["block_size"] == 0: + valid_params.append(pr) + else: + if pr["C"] % (pr["block_size"] ** 2) == 0: + valid_params.append(pr) + print( + "Total params to be tested: ", + len(valid_params), + "out of canditates: ", + len(all_candidates), + ) + """ + Test + """ + failed_tests_compile = [] + for i in range(len(valid_params)): + params = valid_params[i] + # print("=========: ", params) + # if i % 10 == 0: print("======== Testing {}/{}".format(str(i), str(len(valid_params)))) + X = np.random.rand(1, params["C"], params["H"], params["W"]) + tf_preds = get_tf_predictions_reorganize( + np.transpose(X, [0, 2, 3, 1]), params + ) + tf_preds = np.transpose(tf_preds, [0, 3, 1, 2]) + coreml_model, eval = get_coreml_model_reorganize( + np.expand_dims(X, axis=0), params + ) + if eval is False: + failed_tests_compile.append(params) + else: + input_dict = {"data": np.expand_dims(X, axis=0)} + ref_output_dict = {"output": tf_preds[0, :, :, :]} + self._test_model(input_dict, ref_output_dict, coreml_model) + + self.assertEqual(failed_tests_compile, []) + + def test_data_reorganize_cpu_only(self): + self.test_data_reorganize(cpu_only=True) + + def test_depthwise_conv(self, cpu_only=False): + def get_coreml_model_depthwise(X, params, w): + eval = True + mlmodel = None + try: + input_dim = X.shape[2:] + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + # tranlate weights : (Kh, Kw, kernel_channels, output_channels) == (Kh, Kw, Cin/g, Cout) == (Kh, Kw, 1, channel_multiplier * Cin) + w_e = np.reshape( + w, + ( + params["kernel_size"], + params["kernel_size"], + params["multiplier"] * params["C"], + 1, + ), + ) + w_e = np.transpose(w_e, [0, 1, 3, 2]) + if params["padding"] == "SAME": + pad_mode = "same" + else: + pad_mode = "valid" + builder.add_convolution( + "conv", + kernel_channels=1, + output_channels=params["multiplier"] * params["C"], + height=params["kernel_size"], + width=params["kernel_size"], + stride_height=params["stride"], + stride_width=params["stride"], + border_mode=pad_mode, + groups=params["C"], + W=w_e, + b=None, + has_bias=0, + is_deconv=0, + output_shape=None, + input_name="data", + output_name="output", + ) + + if cpu_only: + compute_unit=ComputeUnit.CPU_ONLY + else: + compute_unit=ComputeUnit.ALL + mlmodel = MLModel(builder.spec, compute_units=compute_unit) + except RuntimeError as e: + print(e) + eval = False + return mlmodel, eval + + def get_tf_predictions_depthwise(X, params, w): + Cin = params["C"] + Kh = Kw = params["kernel_size"] + channel_multiplier = params["multiplier"] + y = tf.nn.depthwise_conv2d( + X, + w, + strides=[1, params["stride"], params["stride"], 1], + padding=params["padding"], + ) + return y.numpy() + + """ + Define Params + """ + params_dict = dict( + C=[1, 4, 7], + H=[11, 16], + stride=[1, 2, 3], + kernel_size=[1, 2, 3, 5], + multiplier=[1, 2, 3, 4], + padding=["SAME", "VALID"], + ) + params = [x for x in list(itertools.product(*params_dict.values()))] + all_candidates = [dict(zip(params_dict.keys(), x)) for x in params] + valid_params = [] + for pr in all_candidates: + if pr["padding"] == "VALID": + if np.floor((pr["H"] - pr["kernel_size"]) / pr["stride"]) + 1 <= 0: + continue + valid_params.append(pr) + print( + "Total params to be tested: ", + len(valid_params), + "out of canditates: ", + len(all_candidates), + ) + """ + Test + """ + failed_tests_compile = [] + for i in range(len(valid_params)): + params = valid_params[i] + # print("=========: ", params) + # if i % 10 == 0: print "======== Testing {}/{}".format(str(i), str(len(valid_params))) + X = np.random.rand(1, params["C"], params["H"], params["H"]) + w = np.random.rand( + params["kernel_size"], + params["kernel_size"], + params["C"], + params["multiplier"], + ) + tf_preds = get_tf_predictions_depthwise( + np.transpose(X, [0, 2, 3, 1]), params, w + ) + tf_preds = np.transpose(tf_preds, [0, 3, 1, 2]) + coreml_model, eval = get_coreml_model_depthwise( + np.expand_dims(X, axis=0), params, w + ) + if eval is False: + failed_tests_compile.append(params) + else: + input_dict = {"data": np.expand_dims(X, axis=0)} + ref_output_dict = {"output": tf_preds[0, :, :, :]} + self._test_model(input_dict, ref_output_dict, coreml_model) + + self.assertEqual(failed_tests_compile, []) + + def test_depthwise_conv_cpu_only(self, cpu_only=False): + self.test_depthwise_conv(cpu_only=True) + + @unittest.skipUnless(_macos_version() >= (10, 14), "Only supported on MacOS 10.14+") + def test_resize_bilinear(self, cpu_only=False): + def get_coreml_model_resize_bilinear(X, params): + eval = True + mlmodel = None + try: + input_dim = X.shape[2:] + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + if params["align_corners"]: + mode = "STRICT_ALIGN_ENDPOINTS_MODE" + else: + mode = "UPSAMPLE_MODE" + builder.add_resize_bilinear( + "resize", + "data", + "output", + target_height=params["Hnew"], + target_width=params["Wnew"], + mode=mode, + ) + + if cpu_only: + compute_unit=ComputeUnit.CPU_ONLY + else: + compute_unit=ComputeUnit.ALL + + mlmodel = MLModel(builder.spec, compute_units=compute_unit) + except RuntimeError as e: + print(e) + eval = False + + return mlmodel, eval + + def get_tf_predictions_resize_bilinear(X, params): + y = tf.compat.v1.image.resize_bilinear( + X, + size=[params["Hnew"], params["Wnew"]], + align_corners=params["align_corners"], + ) + return y.numpy() + + """ + Define Params + """ + params_dict = dict( + H=[1, 3, 10], # [1,2,3,10] + W=[1, 3, 10], # [1,2,3,10] + Hnew=[1, 2, 6], # [1,3,6,12,20] + Wnew=[1, 2, 6], # [1,3,6,12,20] + align_corners=[False, True], # [False, True] + ch=[1, 5], # [1,5] + batch=[1, 3], # [1, 3] + ) + params = [x for x in list(itertools.product(*params_dict.values()))] + valid_params = [dict(zip(params_dict.keys(), x)) for x in params] + print("Total params to be tested: {}".format(len(valid_params))) + """ + Test + """ + failed_tests_compile = [] + for i in range(len(valid_params)): + params = valid_params[i] + # #print("=========: ", params) + if i % 100 == 0: + print( + "======================= Testing {}/{}".format( + str(i), str(len(valid_params)) + ) + ) + X = np.round( + 255 + * np.random.rand( + params["batch"], params["ch"], params["H"], params["W"] + ) + ) + tf_preds = get_tf_predictions_resize_bilinear( + np.transpose(X, [0, 2, 3, 1]), params + ) + tf_preds = np.transpose(tf_preds, [0, 3, 1, 2]) + coreml_model, eval = get_coreml_model_resize_bilinear( + np.expand_dims(X, axis=0), params + ) + if eval is False: + failed_tests_compile.append(params) + else: + input_dict = {"data": np.expand_dims(X, axis=0)} + ref_output_dict = {"output": np.expand_dims(tf_preds, axis=0)} + self._test_model(input_dict, ref_output_dict, coreml_model) + + self.assertEqual(failed_tests_compile, []) + + @unittest.skipUnless(_macos_version() >= (10, 14), "Only supported on MacOS 10.14+") + def test_resize_bilinear_cpu_only(self): + self.test_resize_bilinear(cpu_only=True) + + @unittest.skipUnless(_macos_version() >= (10, 14), "Only supported on MacOS 10.14+") + def test_crop_resize(self, cpu_only=False): + def get_coreml_model_crop_resize(params): + eval = True + mlmodel = None + batch, ch, n_roi = params["b_c_n"] + H = params["H"] + W = params["W"] + try: + input_features = [("data", datatypes.Array(ch, H, W))] + input_features.append(("roi", datatypes.Array(4, 1, 1))) + if batch != 1: + input_features.append(("box_ind", datatypes.Array(1, 1, 1))) + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + + if batch != 1: + builder.add_elementwise( + "concat", ["box_ind", "roi"], "roi_out", "CONCAT" + ) + input_names = ["data", "roi_out"] + else: + input_names = ["data", "roi"] + + builder.add_crop_resize( + "resize", + input_names, + "output", + target_height=params["Hnew"], + target_width=params["Wnew"], + mode="ALIGN_ENDPOINTS_MODE", + normalized_roi=True, + box_indices_mode="CORNERS_HEIGHT_FIRST", + spatial_scale=1.0, + ) + + if cpu_only: + compute_unit=ComputeUnit.CPU_ONLY + else: + compute_unit=ComputeUnit.ALL + mlmodel = MLModel(builder.spec, compute_units=compute_unit) + except RuntimeError as e: + print(e) + eval = False + + return mlmodel, eval + + def get_tf_predictions_crop_resize(X, boxes, box_ind, params): + y = tf.image.crop_and_resize( + X, boxes, box_ind, crop_size=[params["Hnew"], params["Wnew"]] + ) + return y.numpy() + + """ + Define Params + """ + params_dict = dict( + H=[1, 3, 10], # [1,2,3,6,10] + W=[1, 3, 10], # [1,2,3,6,10] + Hnew=[1, 2, 3, 6], # [1,2,3,6,12,20] + Wnew=[1, 2, 3, 6], # [1,2,3,6,12,20] + b_c_n=[ + (1, 1, 1), + (1, 2, 3), + (3, 2, 1), + (3, 4, 3), + ], # [(1,1,1),(1,2,3),(3,2,1),(3,4,3)] + ) + params = [x for x in list(itertools.product(*params_dict.values()))] + valid_params = [dict(zip(params_dict.keys(), x)) for x in params] + print("Total params to be tested: {}".format(len(valid_params))) + """ + Test + """ + failed_tests_compile = [] + for i in range(len(valid_params)): + params = valid_params[i] + batch, ch, n_roi = params["b_c_n"] + X = np.round(255 * np.random.rand(batch, ch, params["H"], params["W"])) + roi = np.zeros((n_roi, 4), dtype=np.float32) + box_ind = np.zeros((n_roi)) + if batch != 1: + box_ind = np.random.randint(low=0, high=batch, size=(n_roi)) + for ii in range(n_roi): + r = np.random.rand(4) + w_start = r[0] + h_start = r[1] + w_end = r[2] * (1 - w_start) + w_start + h_end = r[3] * (1 - h_start) + h_start + roi[ii, :] = [h_start, w_start, h_end, w_end] + roi[ii, :] = np.round(100 * roi[ii, :]) / 100 + assert roi[ii, 0] <= roi[ii, 2] + assert roi[ii, 1] <= roi[ii, 3] + + tf_preds = get_tf_predictions_crop_resize( + np.transpose(X, [0, 2, 3, 1]), roi, box_ind, params + ) + tf_preds = np.transpose(tf_preds, [0, 3, 1, 2]) + coreml_model, eval = get_coreml_model_crop_resize(params) + if eval is False: + failed_tests_compile.append(params) + else: + input_dict = {"data": np.expand_dims(X, axis=0)} + input_dict["roi"] = np.reshape(roi, (n_roi, 1, 4, 1, 1)) + if batch != 1: + input_dict["box_ind"] = np.reshape( + box_ind.astype(np.float32), (n_roi, 1, 1, 1, 1) + ) + ref_output_dict = {"output": np.expand_dims(tf_preds, axis=0)} + self._test_model(input_dict, ref_output_dict, coreml_model) + + self.assertEqual(failed_tests_compile, []) + + @unittest.skipUnless(_macos_version() >= (10, 14), "Only supported on MacOS 10.14+") + def test_crop_resize_cpu_only(self): + self.test_crop_resize(cpu_only=True) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/__init__.py new file mode 100644 index 00000000..8aa13a28 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_model_updatable.py b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_model_updatable.py new file mode 100644 index 00000000..3406d37f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_model_updatable.py @@ -0,0 +1,796 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import tempfile +import unittest + +import numpy as _np + +import coremltools.models.datatypes as datatypes +from coremltools.models import MLModel +from coremltools.models.neural_network import (AdamParams, + NeuralNetworkBuilder, SgdParams, + quantization_utils) +from coremltools.models.pipeline import PipelineClassifier, PipelineRegressor +from coremltools.models.utils import save_spec + + +class LayerSelector(quantization_utils.QuantizedLayerSelector): + def __init__(self, layer_name): + super(LayerSelector, self).__init__() + self.layer_name = layer_name + + def do_quantize(self, layer, weight_param="bias"): + ret = super(LayerSelector, self).do_quantize(layer) + if not ret or layer.name == self.layer_name: + return False + return True + + +class MLModelUpdatableTest(unittest.TestCase): + @classmethod + def setUpClass(self): + self.model_dir = tempfile.mkdtemp() + + @classmethod + def tearDownClass(self): + if os.path.exists(self.model_dir): + shutil.rmtree(self.model_dir) + + def create_base_builder(self, is_updatable=True): + self.input_features = [("input", datatypes.Array(3))] + self.output_features = [("output", None)] + self.output_names = ["output"] + + builder = NeuralNetworkBuilder(self.input_features, self.output_features) + + W1 = _np.random.uniform(-0.5, 0.5, (3, 3)) + W2 = _np.random.uniform(-0.5, 0.5, (3, 3)) + builder.add_inner_product( + name="ip1", + W=W1, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="input", + output_name="hidden", + ) + builder.add_inner_product( + name="ip2", + W=W2, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="hidden", + output_name="output", + ) + + if is_updatable: + builder.make_updatable(["ip1", "ip2"]) + + return builder + + def test_updatable_model_creation_ce_sgd(self): + builder = self.create_base_builder() + + builder.add_softmax( + name="softmax", input_name="output", output_name="softmax_output" + ) + + builder.set_categorical_cross_entropy_loss( + name="cross_entropy", input="softmax_output" + ) + + builder.set_sgd_optimizer(SgdParams(lr=1e-2, batch=10, momentum=0.0)) + builder.set_epochs(20, allowed_set=[10, 20, 30, 40]) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + + spec = mlmodel.get_spec() + self.assertTrue(spec.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable) + + self.assertTrue( + spec.neuralNetwork.updateParams.lossLayers[ + 0 + ].categoricalCrossEntropyLossLayer + is not None + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer is not None + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.defaultValue, + 1e-2, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.defaultValue, + 10, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.defaultValue, + 0, + atol=1e-8, + ) + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4 + ) + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.set.values + == [10] + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.maxValue + == 1 + ) + + def test_updatable_model_creation_ce_adam(self): + builder = self.create_base_builder() + + builder.add_softmax( + name="softmax", input_name="output", output_name="softmax_output" + ) + + builder.set_categorical_cross_entropy_loss( + name="cross_entropy", input="softmax_output" + ) + + adam_params = AdamParams() + adam_params.set_batch(value=10, allowed_set=[10, 20]) + builder.set_adam_optimizer(adam_params) + builder.set_epochs(20) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + spec = mlmodel.get_spec() + self.assertTrue(spec.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable) + + self.assertTrue( + spec.neuralNetwork.updateParams.lossLayers[ + 0 + ].categoricalCrossEntropyLossLayer + is not None + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer is not None + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.defaultValue, + 1e-2, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.defaultValue, + 10, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.defaultValue, + 0.9, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.defaultValue, + 0.999, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.defaultValue, + 1e-8, + atol=1e-8, + ) + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4 + ) + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.set.values + == [10, 20] + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.maxValue + == 1 + ) + + self.assertTrue(spec.neuralNetwork.updateParams.epochs.set.values == [20]) + + def test_updatable_model_creation_mse_sgd(self): + builder = self.create_base_builder() + + builder.set_mean_squared_error_loss( + name="mse", input_feature=("output", datatypes.Array(3)) + ) + + builder.set_sgd_optimizer(SgdParams(lr=1e-2, batch=10, momentum=0.0)) + + builder.set_epochs(20) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + spec = mlmodel.get_spec() + self.assertTrue(spec.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable) + + self.assertTrue( + spec.neuralNetwork.updateParams.lossLayers[ + 0 + ].categoricalCrossEntropyLossLayer + is not None + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer is not None + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.defaultValue, + 1e-2, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.defaultValue, + 10, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.defaultValue, + 0, + atol=1e-8, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4 + ) + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.set.values + == [10] + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.maxValue + == 1 + ) + + def test_updatable_model_creation_mse_adam(self): + builder = self.create_base_builder() + + builder.set_mean_squared_error_loss( + name="mse", input_feature=("output", datatypes.Array(3)) + ) + + builder.set_adam_optimizer( + AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8) + ) + builder.set_epochs(20, allowed_set=[10, 20, 30]) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + spec = mlmodel.get_spec() + self.assertTrue(spec.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable) + + self.assertTrue( + spec.neuralNetwork.updateParams.lossLayers[ + 0 + ].categoricalCrossEntropyLossLayer + is not None + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer is not None + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.defaultValue, + 1e-2, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.defaultValue, + 10, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.defaultValue, + 0.9, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.defaultValue, + 0.999, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.defaultValue, + 1e-8, + atol=1e-8, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4 + ) + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.set.values + == [10] + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.epochs.set.values == [10, 20, 30] + ) + + def test_nn_set_cce_without_softmax_fail(self): + nn_builder = self.create_base_builder() + + # fails since adding CCE without softmax must raise error + with self.assertRaises(ValueError): + nn_builder.set_categorical_cross_entropy_loss( + name="cross_entropy", input="output" + ) + + def test_nn_set_cce_invalid(self): + nn_builder = self.create_base_builder() + nn_builder.add_softmax( + name="softmax", input_name="output", output_name="softmax_output" + ) + + # fails since CCE input must be softmax output + with self.assertRaises(ValueError): + nn_builder.set_categorical_cross_entropy_loss( + name="cross_entropy", input="output" + ) + + def test_nn_set_softmax_updatable_invalid(self): + nn_builder = self.create_base_builder() + nn_builder.add_softmax( + name="softmax", input_name="output", output_name="softmax_output" + ) + + # fails since marking softmax as updatable layer is not allowed + with self.assertRaises(ValueError): + nn_builder.make_updatable(["softmax"]) + + def test_nn_set_training_input(self): + builder = self.create_base_builder() + + builder.set_mean_squared_error_loss( + name="mse", input_feature=("output", datatypes.Array(3)) + ) + + builder.set_adam_optimizer( + AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8) + ) + builder.set_epochs(20, allowed_set=[10, 20, 30]) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + spec = mlmodel.get_spec() + self.assertEqual(spec.description.trainingInput[0].name, "input") + self.assertEqual( + spec.description.trainingInput[0].type.WhichOneof("Type"), "multiArrayType" + ) + self.assertEqual(spec.description.trainingInput[1].name, "output_true") + self.assertEqual( + spec.description.trainingInput[1].type.WhichOneof("Type"), "multiArrayType" + ) + + def test_nn_builder_with_training_features(self): + input_features = [("input", datatypes.Array(3))] + output_features = [("output", datatypes.Array(3))] + builder = NeuralNetworkBuilder(input_features, output_features) + + W1 = _np.random.uniform(-0.5, 0.5, (3, 3)) + W2 = _np.random.uniform(-0.5, 0.5, (3, 3)) + builder.add_inner_product( + name="ip1", + W=W1, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="input", + output_name="hidden", + ) + builder.add_inner_product( + name="ip2", + W=W2, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="hidden", + output_name="output", + ) + + builder.make_updatable(["ip1", "ip2"]) # or a dict for weightParams + + builder.set_mean_squared_error_loss( + name="mse", input_feature=("output", datatypes.Array(3)) + ) + + builder.set_adam_optimizer( + AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8) + ) + builder.set_epochs(20, allowed_set=[10, 20, 30]) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + spec = mlmodel.get_spec() + self.assertEqual(spec.description.trainingInput[0].name, "input") + self.assertEqual( + spec.description.trainingInput[0].type.WhichOneof("Type"), "multiArrayType" + ) + self.assertEqual(spec.description.trainingInput[1].name, "output_true") + self.assertEqual( + spec.description.trainingInput[1].type.WhichOneof("Type"), "multiArrayType" + ) + + def test_nn_fp16_make_updatable_fail(self): + nn_builder = self.create_base_builder(is_updatable=False) + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + save_spec(nn_builder.spec, model_path) + mlmodel = MLModel(model_path) + + quantized_result = quantization_utils.quantize_weights(mlmodel, 16, "linear") + q_nn_builder = NeuralNetworkBuilder(spec=quantized_result._spec) + + # fails since an FP16 model cannot be marked updatable + with self.assertRaises(ValueError): + q_nn_builder.make_updatable(["ip1", "ip2"]) + + def test_nn_partial_fp16_make_updatable_fail(self): + nn_builder = self.create_base_builder(is_updatable=False) + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(nn_builder.spec, model_path) + mlmodel = MLModel(model_path) + + selector = LayerSelector(layer_name='ip1') + quantized_model = quantization_utils.quantize_weights(mlmodel, 16, "linear", selector=selector) + + q_nn_builder = NeuralNetworkBuilder(spec=quantized_model._spec) + + # fails since model has a layer with FP16 bias + with self.assertRaises(ValueError): + q_nn_builder.make_updatable(["ip2"]) + + def test_nn_partial_fp16_make_updatable_quantized_layer_fail(self): + nn_builder = self.create_base_builder(is_updatable=False) + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(nn_builder.spec, model_path) + mlmodel = MLModel(model_path) + + selector = LayerSelector(layer_name='ip2') + quantized_result = quantization_utils.quantize_weights(mlmodel, 16, "linear", selector=selector) + quantized_spec = quantized_result._spec + q_nn_builder = NeuralNetworkBuilder(spec=quantized_spec) + + # fails since model has a layer with FP16 bias + with self.assertRaises(ValueError): + q_nn_builder.make_updatable(["ip2"]) + + def test_nn_partial_fp16_make_updatable_fail(self): + nn_builder = self.create_base_builder() + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(nn_builder.spec, model_path) + mlmodel = MLModel(model_path) + + # fails since updatable models cannot get quantized to FP16 + with self.assertRaises(Exception): + quantization_utils.quantize_weights(mlmodel, 16, "linear") + + def test_pipeline_regressor_make_updatable(self): + builder = self.create_base_builder() + builder.spec.isUpdatable = False + + training_input = [("input", datatypes.Array(3)), ("target", "Double")] + + # fails due to missing sub-models + p_regressor = PipelineRegressor( + self.input_features, self.output_names, training_input + ) + with self.assertRaises(ValueError): + p_regressor.make_updatable() + self.assertEqual(p_regressor.spec.isUpdatable, False) + + # fails due to sub-model being not updatable + p_regressor.add_model(builder.spec) + with self.assertRaises(ValueError): + p_regressor.make_updatable() + self.assertEqual(p_regressor.spec.isUpdatable, False) + + builder.spec.isUpdatable = True + p_regressor.add_model(builder.spec) + + self.assertEqual(p_regressor.spec.isUpdatable, False) + p_regressor.make_updatable() + self.assertEqual(p_regressor.spec.isUpdatable, True) + self.assertEqual(p_regressor.spec.description.trainingInput[0].name, "input") + self.assertEqual( + p_regressor.spec.description.trainingInput[0].type.WhichOneof("Type"), + "multiArrayType", + ) + self.assertEqual(p_regressor.spec.description.trainingInput[1].name, "target") + self.assertEqual( + p_regressor.spec.description.trainingInput[1].type.WhichOneof("Type"), + "doubleType", + ) + + # fails since once updatable does not allow adding new models + with self.assertRaises(ValueError): + p_regressor.add_model(builder.spec) + self.assertEqual(p_regressor.spec.isUpdatable, True) + + def test_pipeline_classifier_make_updatable(self): + builder = self.create_base_builder() + builder.spec.isUpdatable = False + training_input = [("input", datatypes.Array(3)), ("target", "String")] + + # fails due to missing sub-models + p_classifier = PipelineClassifier( + self.input_features, self.output_names, training_features=training_input + ) + with self.assertRaises(ValueError): + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, False) + + # fails due to sub-model being not updatable + p_classifier.add_model(builder.spec) + with self.assertRaises(ValueError): + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, False) + + builder.spec.isUpdatable = True + p_classifier.add_model(builder.spec) + + self.assertEqual(p_classifier.spec.isUpdatable, False) + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, True) + self.assertEqual(p_classifier.spec.description.trainingInput[0].name, "input") + self.assertEqual( + p_classifier.spec.description.trainingInput[0].type.WhichOneof("Type"), + "multiArrayType", + ) + self.assertEqual(p_classifier.spec.description.trainingInput[1].name, "target") + self.assertEqual( + p_classifier.spec.description.trainingInput[1].type.WhichOneof("Type"), + "stringType", + ) + + # fails since once updatable does not allow adding new models + with self.assertRaises(ValueError): + p_classifier.add_model(builder.spec) + self.assertEqual(p_classifier.spec.isUpdatable, True) + + def test_pipeline_classifier_set_training_inputs(self): + builder = self.create_base_builder() + builder.spec.isUpdatable = False + training_input = [("input", datatypes.Array(3)), ("target", "String")] + + # fails due to missing sub-models + p_classifier = PipelineClassifier(self.input_features, self.output_names) + p_classifier.set_training_input(training_input) + with self.assertRaises(ValueError): + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, False) + + # fails due to sub-model being not updatable + p_classifier.add_model(builder.spec) + with self.assertRaises(ValueError): + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, False) + + builder.spec.isUpdatable = True + p_classifier.add_model(builder.spec) + + self.assertEqual(p_classifier.spec.isUpdatable, False) + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, True) + self.assertEqual(p_classifier.spec.description.trainingInput[0].name, "input") + self.assertEqual( + p_classifier.spec.description.trainingInput[0].type.WhichOneof("Type"), + "multiArrayType", + ) + self.assertEqual(p_classifier.spec.description.trainingInput[1].name, "target") + self.assertEqual( + p_classifier.spec.description.trainingInput[1].type.WhichOneof("Type"), + "stringType", + ) + + # fails since once updatable does not allow adding new models + with self.assertRaises(ValueError): + p_classifier.add_model(builder.spec) + self.assertEqual(p_classifier.spec.isUpdatable, True) + + def test_shuffle_on_by_default(self): + builder = self.create_base_builder() + + # base builder already marks two layers as updatable + self.assertTrue( + builder.nn_spec.updateParams.shuffle.defaultValue, + "Shuffle not turned on by default for updatable models", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_pipeline.py b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_pipeline.py new file mode 100644 index 00000000..06d2a6bd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_pipeline.py @@ -0,0 +1,277 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import tempfile +import unittest + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_LIBSVM, _HAS_SKLEARN +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, Program +from coremltools.models.pipeline import PipelineClassifier, PipelineRegressor + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.linear_model import LinearRegression + from sklearn.pipeline import Pipeline + from sklearn.preprocessing import OneHotEncoder + + from coremltools.converters import sklearn as converter + +if _HAS_LIBSVM: + from libsvm import svmutil + + from coremltools.converters import libsvm as libsvm_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +@unittest.skipIf(not _HAS_LIBSVM, "Missing libsvm. Skipping tests.") +class LinearRegressionPipelineCreationTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + + if not (_HAS_SKLEARN): + return + + scikit_data = load_boston() + feature_names = scikit_data.feature_names + + scikit_model = LinearRegression() + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + scikit_spec = converter.convert( + scikit_model, feature_names, "target" + ).get_spec() + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + self.scikit_spec = scikit_spec + + def test_pipeline_regression_creation(self): + + input_names = self.scikit_data.feature_names + output_name = "target" + p_regressor = PipelineRegressor(input_names, "target") + p_regressor.add_model(self.scikit_spec) + + self.assertIsNotNone(p_regressor.spec) + self.assertEqual(len(p_regressor.spec.pipelineRegressor.pipeline.models), 1) + + # Test the model class of the linear regressor model + spec = p_regressor.spec.pipelineRegressor.pipeline.models[0] + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +@unittest.skipIf(not _HAS_LIBSVM, "Missing libsvm. Skipping tests.") +class LibSVMPipelineCreationTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + if not _HAS_LIBSVM: + return + + scikit_data = load_boston() + prob = svmutil.svm_problem( + scikit_data["target"] > scikit_data["target"].mean(), + scikit_data["data"].tolist(), + ) + param = svmutil.svm_parameter() + param.svm_type = svmutil.C_SVC + param.kernel_type = svmutil.LINEAR + param.eps = 1 + + libsvm_model = svmutil.svm_train(prob, param) + libsvm_spec = libsvm_converter.convert( + libsvm_model, scikit_data.feature_names, "target" + ).get_spec() + + # Save the data and the model + self.scikit_data = scikit_data + self.libsvm_spec = libsvm_spec + + def test_pipeline_classifier_creation(self): + + input_names = self.scikit_data.feature_names + p_classifier = PipelineClassifier(input_names, [1, 0]) + p_classifier.add_model(self.libsvm_spec) + + self.assertIsNotNone(p_classifier.spec) + self.assertEqual(len(p_classifier.spec.pipelineClassifier.pipeline.models), 1) + + # Test the model class of the svm model + spec = p_classifier.spec.pipelineClassifier.pipeline.models[0] + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class LinearRegressionPipeline(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + scikit_data = load_boston() + feature_names = scikit_data.feature_names + + scikit_model = Pipeline(steps=[("linear", LinearRegression())]) + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_pipeline_regression_creation(self): + input_names = self.scikit_data.feature_names + output_name = "target" + + p_regressor = converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(p_regressor) + self.assertEqual(len(p_regressor.pipelineRegressor.pipeline.models), 2) + + # Test the model class of the linear regressor model + spec = p_regressor.pipelineRegressor.pipeline.models[-1] + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + + for input_type in p_regressor.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), + sorted(map(lambda x: x.name, p_regressor.description.input)), + ) + + def test_conversion_bad_inputs(self): + """ + Failure testing for bad conversion. + """ + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = converter.convert(model, "data", "out", "regressor") + + +class TestMakePipeline: + @staticmethod + def _make_model(input_name, input_length, + output_name, output_length, + convert_to): + + weight_tensor = np.arange(input_length * output_length, dtype='float32') + weight_tensor = weight_tensor.reshape(output_length, input_length) + + prog = Program() + func_inputs = {input_name: mb.placeholder(shape=(input_length,))} + with Function(func_inputs) as ssa_fun: + input = ssa_fun.inputs[input_name] + y = mb.linear(x=input, weight=weight_tensor, name=output_name) + ssa_fun.set_outputs([y]) + prog.add_function("main", ssa_fun) + + return ct.convert(prog, convert_to=convert_to) + + + @staticmethod + @pytest.mark.parametrize( + "model1_backend, model2_backend", + itertools.product(["mlprogram", "neuralnetwork"], ["mlprogram", "neuralnetwork"]), + ) + def test_simple(model1_backend, model2_backend): + # Create models + m1 = TestMakePipeline._make_model("x", 20, "y1", 10, model1_backend) + m2 = TestMakePipeline._make_model("y1", 10, "y2", 2, model2_backend) + + # Get non-pipeline result + x = np.random.rand(20) + y1 = m1.predict({"x": x})["y1"] + y2 = m2.predict({"y1": y1}) + + pipeline_model = ct.utils.make_pipeline(m1, m2) + + y_pipeline = pipeline_model.predict({"x": x}) + np.testing.assert_allclose(y2["y2"], y_pipeline["y2"]) + + # Check save/load + with tempfile.TemporaryDirectory() as save_dir: + # Save pipeline + save_path = save_dir + "/test.mlpackage" + pipeline_model.save(save_path) + + # Check loading from a mlpackage path + p2 = ct.models.MLModel(save_path) + y_pipeline = p2.predict({"x": x}) + np.testing.assert_allclose(y2["y2"], y_pipeline["y2"]) + + # Check loading from spec and weight dir + p3 = ct.models.MLModel(p2.get_spec(), weights_dir=p2.weights_dir) + y_pipeline = p3.predict({"x": x}) + np.testing.assert_allclose(y2["y2"], y_pipeline["y2"]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/__init__.py new file mode 100644 index 00000000..8aa13a28 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVC.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVC.py new file mode 100644 index 00000000..2d916802 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVC.py @@ -0,0 +1,309 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import random +import tempfile +import unittest + +import pandas as pd +import pytest + +from coremltools._deps import (_HAS_LIBSVM, _HAS_SKLEARN, _SKLEARN_VERSION, + MSG_LIBSVM_NOT_FOUND, MSG_SKLEARN_NOT_FOUND) +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier, + evaluate_classifier_with_probabilities) + +if _HAS_LIBSVM: + from libsvm import svmutil + from svmutil import svm_predict, svm_train + + from coremltools.converters import libsvm + +if _HAS_SKLEARN: + from distutils.version import StrictVersion + + from sklearn.preprocessing import OneHotEncoder + from sklearn.svm import NuSVC + + from coremltools.converters import sklearn as scikit_converter + + +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class NuSvcScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def _evaluation_test_helper( + self, + class_labels, + use_probability_estimates, + allow_slow, + allowed_prob_delta=0.00001, + ): + # Parameters to test + kernel_parameters = [ + {}, + {"kernel": "rbf", "gamma": 1.2}, + {"kernel": "linear"}, + {"kernel": "poly"}, + {"kernel": "poly", "degree": 2}, + {"kernel": "poly", "gamma": 0.75}, + ] + # sklearn version > 0.22 NuSVC introduced finiteness checks that fail for + # the 'sigmoid' and one 'poly' kernel test cases. Avoid those. + # See https://github.com/scikit-learn/scikit-learn/issues/17925 + if _SKLEARN_VERSION <= StrictVersion("0.22"): + kernel_parameters += [ + {"kernel": "poly", "degree": 0, "gamma": 0.9, "coef0": 2}, + {"kernel": "sigmoid"}, + {"kernel": "sigmoid", "gamma": 1.3}, + {"kernel": "sigmoid", "coef0": 0.8}, + {"kernel": "sigmoid", "coef0": 0.8, "gamma": 0.5}, + ] + + non_kernel_parameters = [ + {}, + {"nu": 0.75}, + {"nu": 0.25, "shrinking": True}, + {"shrinking": False}, + ] + + # Generate some random data + x, y = [], [] + random.seed(42) + for _ in range(50): + x.append( + [random.gauss(200, 30), random.gauss(-100, 22), random.gauss(100, 42)] + ) + y.append(random.choice(class_labels)) + column_names = ["x1", "x2", "x3"] + # make sure first label is seen first, second is seen second, and so on. + for i, val in enumerate(class_labels): + y[i] = val + df = pd.DataFrame(x, columns=column_names) + + # Test + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + cur_params = param1.copy() + cur_params.update(param2) + cur_params["probability"] = use_probability_estimates + cur_params["max_iter"] = 10 # Don't want test to take too long + + cur_model = NuSVC(**cur_params) + cur_model.fit(x, y) + + spec = scikit_converter.convert(cur_model, column_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + if use_probability_estimates: + probability_lists = cur_model.predict_proba(x) + df["classProbability"] = [ + dict(zip(cur_model.classes_, cur_vals)) + for cur_vals in probability_lists + ] + metrics = evaluate_classifier_with_probabilities( + spec, df, probabilities="classProbability" + ) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess( + metrics["max_probability_error"], allowed_prob_delta + ) + else: + df["target"] = cur_model.predict(x) + metrics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(metrics["num_errors"], 0) + + if not allow_slow: + break + + if not allow_slow: + break + + @pytest.mark.slow + def test_binary_class_int_label_without_probability_stress_test(self): + self._evaluation_test_helper([1, 3], False, allow_slow=True) + + def test_binary_class_int_label_without_probability(self): + self._evaluation_test_helper([1, 3], False, allow_slow=False) + + @pytest.mark.slow + def test_binary_class_string_label_with_probability_stress_test(self): + # Scikit Learn uses technique to normalize pairwise probabilities even for binary classification. + # This leads to difference in probabilities. + self._evaluation_test_helper( + ["foo", "bar"], True, allow_slow=True, allowed_prob_delta=0.005 + ) + + def test_binary_class_string_label_with_probability(self): + # Scikit Learn uses technique to normalize pairwise probabilities even for binary classification. + # This leads to difference in probabilities. + self._evaluation_test_helper( + ["foo", "bar"], True, allow_slow=False, allowed_prob_delta=0.005 + ) + + @pytest.mark.slow + def test_multi_class_int_label_without_probability_stress_test(self): + self._evaluation_test_helper([12, 33, -1, 1234], False, allow_slow=True) + + def test_multi_class_int_label_without_probability(self): + self._evaluation_test_helper([12, 33, -1, 1234], False, allow_slow=False) + + @pytest.mark.slow + def test_multi_class_string_label_with_probability_stress_test(self): + self._evaluation_test_helper(["X", "Y", "z"], True, allow_slow=True) + + def test_multi_class_string_label_with_probability(self): + self._evaluation_test_helper(["X", "Y", "z"], True, allow_slow=False) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = NuSVC() + spec = scikit_converter.convert(model, "data", "out") + + # Check the expected class during conversion + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = scikit_converter.convert(model, "data", "out") + + +@unittest.skipIf(not _HAS_LIBSVM, MSG_LIBSVM_NOT_FOUND) +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class NuSVCLibSVMTest(unittest.TestCase): + # Model parameters for testing + base_param = "-s 1 -q" # model type C-SVC and quiet mode + non_kernel_parameters = ["", "-n 0.6 -p 0.5 -h 1", "-c 0.5 -p 0.5 -h 0"] + kernel_parameters = [ + "-t 0", # linear kernel + "", + "-t 2 -g 1.2", # rbf kernel + "-t 1", + "-t 1 -d 2", + "-t 1 -g 0.75", + "-t 1 -d 0 -g 0.9 -r 2", # poly kernel + "-t 3", + "-t 3 -g 1.3", + "-t 3 -r 0.8", + "-t 3 -r 0.8 -g 0.5", # sigmoid kernel + ] + + """ + Unit test class for testing the libsvm sklearn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_LIBSVM: + # setUpClass is still called even if class is skipped. + return + + # Generate some random data. + # This unit test should not rely on scikit learn for test data. + self.x, self.y = [], [] + random.seed(42) + for _ in range(50): + self.x.append([random.gauss(200, 30), random.gauss(-100, 22)]) + self.y.append(random.choice([1, 2])) + self.y[0] = 1 # Make sure 1 is always the first label it sees + self.y[1] = 2 + self.column_names = ["x1", "x2"] + self.prob = svmutil.svm_problem(self.y, self.x) + + param = svmutil.svm_parameter() + param.svm_type = svmutil.NU_SVC + param.kernel_type = svmutil.LINEAR + param.eps = 1 + param.probability = 1 + + # Save the data and the model + self.libsvm_model = svmutil.svm_train(self.prob, param) + + self.df = pd.DataFrame(self.x, columns=self.column_names) + + def _test_prob_model(self, param1, param2): + probability_param = "-b 1" + df = self.df + + param_str = " ".join([self.base_param, param1, param2, probability_param]) + param = svmutil.svm_parameter(param_str) + model = svm_train(self.prob, param) + + # Get predictions with probabilities as dictionaries + (df["prediction"], _, probability_lists) = svm_predict( + self.y, self.x, model, probability_param + " -q" + ) + probability_dicts = [ + dict(zip([1, 2], cur_vals)) for cur_vals in probability_lists + ] + df["probabilities"] = probability_dicts + + spec = libsvm.convert(model, self.column_names, "target", "probabilities") + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_classifier_with_probabilities(spec, df, verbose=False) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess(metrics["max_probability_error"], 0.00001) + + @pytest.mark.slow + def test_binary_classificaiton_with_probability_stress_test(self): + for param1 in self.non_kernel_parameters: + for param2 in self.kernel_parameters: + self._test_prob_model(param1, param2) + + def test_binary_classificaiton_with_probability(self): + param1 = self.non_kernel_parameters[0] + param2 = self.kernel_parameters[0] + self._test_prob_model(param1, param2) + + @pytest.mark.slow + @unittest.skip( + "LibSVM's Python library is broken for NuSVC without probabilities. It always segfaults during prediction time." + ) + def test_multi_class_without_probability(self): + # Generate some random data. + # This unit test should not rely on scikit learn for test data. + x, y = [], [] + for _ in range(50): + x.append( + [random.gauss(200, 30), random.gauss(-100, 22), random.gauss(100, 42)] + ) + y.append(random.choice([1, 2, 10, 12])) + y[0], y[1], y[2], y[3] = 1, 2, 10, 12 + column_names = ["x1", "x2", "x3"] + prob = svmutil.svm_problem(y, x) + + df = pd.DataFrame(x, columns=column_names) + + for param1 in self.non_kernel_parameters: + for param2 in self.kernel_parameters: + param_str = " ".join([self.base_param, param1, param2]) + param = svmutil.svm_parameter(param_str) + + model = svm_train(prob, param) + + # Get predictions with probabilities as dictionaries + (df["prediction"], _, _) = svm_predict(y, x, model, " -q") + + spec = libsvm.convert(model, column_names, "target") + + metrics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(metrics["num_errors"], 0) + + def test_conversion_from_filesystem(self): + libsvm_model_path = tempfile.mktemp(suffix="model.libsvm") + svmutil.svm_save_model(libsvm_model_path, self.libsvm_model) + spec = libsvm.convert(libsvm_model_path, "data", "target") + + def test_conversion_bad_inputs(self): + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = libsvm.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVR.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVR.py new file mode 100644 index 00000000..7646abd8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVR.py @@ -0,0 +1,224 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import random +import tempfile +import unittest + +import pandas as pd +import pytest + +from coremltools._deps import (_HAS_LIBSVM, _HAS_SKLEARN, MSG_LIBSVM_NOT_FOUND, + MSG_SKLEARN_NOT_FOUND) +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_LIBSVM: + from libsvm import svmutil + from svmutil import svm_predict, svm_train + + from coremltools.converters import libsvm + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.preprocessing import OneHotEncoder + from sklearn.svm import NuSVR + + from coremltools.converters import sklearn as scikit_converter + + +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class NuSVRScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + + self.scikit_model = NuSVR(kernel="linear") + self.data = load_boston() + self.scikit_model.fit(self.data["data"], self.data["target"]) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = NuSVR() + spec = scikit_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = scikit_converter.convert(model, "data", "out") + + @pytest.mark.slow + def test_evaluation_stress_test(self): + self._test_evaluation(allow_slow=True) + + def test_evaluation(self): + self._test_evaluation(allow_slow=False) + + def _test_evaluation(self, allow_slow): + """ + Test that the same predictions are made + """ + + # Generate some smallish (some kernels take too long on anything else) random data + x, y = [], [] + for _ in range(50): + cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2) + x.append([cur_x1, cur_x2]) + y.append(1 + 2 * cur_x1 + 3 * cur_x2) + + input_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=input_names) + + # Parameters to test + kernel_parameters = [ + {}, + {"kernel": "rbf", "gamma": 1.2}, + {"kernel": "linear"}, + {"kernel": "poly"}, + {"kernel": "poly", "degree": 2}, + {"kernel": "poly", "gamma": 0.75}, + {"kernel": "poly", "degree": 0, "gamma": 0.9, "coef0": 2}, + {"kernel": "sigmoid"}, + {"kernel": "sigmoid", "gamma": 1.3}, + {"kernel": "sigmoid", "coef0": 0.8}, + {"kernel": "sigmoid", "coef0": 0.8, "gamma": 0.5}, + ] + non_kernel_parameters = [ + {}, + {"C": 1}, + {"C": 1.5, "shrinking": True}, + {"C": 0.5, "shrinking": False, "nu": 0.9}, + ] + + # Test + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + cur_params = param1.copy() + cur_params.update(param2) + + cur_model = NuSVR(**cur_params) + cur_model.fit(x, y) + df["target"] = cur_model.predict(x) + + spec = scikit_converter.convert(cur_model, input_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + if not allow_slow: + break + + if not allow_slow: + break + + +@unittest.skipIf(not _HAS_LIBSVM, MSG_LIBSVM_NOT_FOUND) +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class NuSVRLibSVMTest(unittest.TestCase): + """ + Unit test class for testing the libsvm sklearn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + if not _HAS_LIBSVM: + return + + scikit_data = load_boston() + prob = svmutil.svm_problem(scikit_data["target"], scikit_data["data"].tolist()) + param = svmutil.svm_parameter() + param.svm_type = svmutil.NU_SVR + param.kernel_type = svmutil.LINEAR + param.eps = 1 + + self.libsvm_model = svmutil.svm_train(prob, param) + + def test_conversion(self): + spec = libsvm.convert(self.libsvm_model, "data", "target") + + def test_conversion_from_filesystem(self): + libsvm_model_path = tempfile.mktemp(suffix="model.libsvm") + svmutil.svm_save_model(libsvm_model_path, self.libsvm_model) + spec = libsvm.convert(libsvm_model_path, "data", "target") + + def test_conversion_bad_inputs(self): + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = libsvm.convert(model, "data", "out") + + @pytest.mark.slow + def test_evaluation_stress_test(self): + self._test_evaluation(allow_slow=True) + + def test_evaluation(self): + self._test_evaluation(allow_slow=False) + + def _test_evaluation(self, allow_slow): + """ + Test that the same predictions are made + """ + # Generate some smallish (poly kernels take too long on anything else) random data + x, y = [], [] + for _ in range(50): + cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2) + x.append([cur_x1, cur_x2]) + y.append(1 + 2 * cur_x1 + 3 * cur_x2) + + input_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=input_names) + prob = svmutil.svm_problem(y, x) + + # Parameters + base_param = "-s 4" # model type is nu-SVR + non_kernel_parameters = ["", "-c 1.5 -p 0.5 -h 1", "-c 0.5 -p 0.5 -h 0"] + kernel_parameters = [ + "", + "-t 2 -g 1.2", # rbf kernel + "-t 0", # linear kernel + "-t 1", + "-t 1 -d 2", + "-t 1 -g 0.75", + "-t 1 -d 0 -g 0.9 -r 2", # poly kernel + "-t 3", + "-t 3 -g 1.3", + "-t 3 -r 0.8", + "-t 3 -r 0.8 -g 0.5", # sigmoid kernel + ] + + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + param_str = " ".join([base_param, param1, param2]) + param = svmutil.svm_parameter(param_str) + + model = svm_train(prob, param) + (df["target"], _, _) = svm_predict(y, x, model) + + spec = libsvm.convert(model, input_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + if not allow_slow: + break + + if not allow_slow: + break diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVC.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVC.py new file mode 100644 index 00000000..749f38a2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVC.py @@ -0,0 +1,369 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import random +import tempfile +import unittest + +import numpy as np +import pandas as pd +import pytest + +from coremltools._deps import _HAS_LIBSVM, _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier, + evaluate_classifier_with_probabilities) + +if _HAS_SKLEARN: + from sklearn.preprocessing import OneHotEncoder + from sklearn.svm import SVC + + from coremltools.converters import sklearn as scikit_converter + +if _HAS_LIBSVM: + import svmutil + from svm import svm_parameter + from svmutil import svm_predict, svm_train + + from coremltools.converters import libsvm + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class SvcScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def _evaluation_test_helper( + self, + class_labels, + use_probability_estimates, + allow_slow, + allowed_prob_delta=0.00001, + ): + # Parameters to test + kernel_parameters = [ + {}, + {"kernel": "rbf", "gamma": 1.2}, + {"kernel": "linear"}, + {"kernel": "poly"}, + {"kernel": "poly", "degree": 2}, + {"kernel": "poly", "gamma": 0.75}, + {"kernel": "poly", "degree": 0, "gamma": 0.9, "coef0": 2}, + {"kernel": "sigmoid"}, + {"kernel": "sigmoid", "gamma": 1.3}, + {"kernel": "sigmoid", "coef0": 0.8}, + {"kernel": "sigmoid", "coef0": 0.8, "gamma": 0.5}, + ] + non_kernel_parameters = [ + {}, + {"C": 1}, + {"C": 1.5, "shrinking": True}, + {"C": 0.5, "shrinking": False}, + ] + + # Generate some random data + x, y = [], [] + random.seed(42) + for _ in range(50): + x.append( + [random.gauss(200, 30), random.gauss(-100, 22), random.gauss(100, 42)] + ) + y.append(random.choice(class_labels)) + column_names = ["x1", "x2", "x3"] + # make sure first label is seen first, second is seen second, and so on. + for i, val in enumerate(class_labels): + y[i] = val + df = pd.DataFrame(x, columns=column_names) + + # Test + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + cur_params = param1.copy() + cur_params.update(param2) + cur_params["probability"] = use_probability_estimates + cur_params["max_iter"] = 10 # Don't want test to take too long + + cur_model = SVC(**cur_params) + cur_model.fit(x, y) + + spec = scikit_converter.convert(cur_model, column_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + if use_probability_estimates: + probability_lists = cur_model.predict_proba(x) + df["classProbability"] = [ + dict(zip(cur_model.classes_, cur_vals)) + for cur_vals in probability_lists + ] + metrics = evaluate_classifier_with_probabilities( + spec, df, probabilities="classProbability", verbose=True + ) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess( + metrics["max_probability_error"], allowed_prob_delta + ) + else: + df["target"] = cur_model.predict(x) + metrics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(metrics["num_errors"], 0) + + if not allow_slow: + break + + if not allow_slow: + break + + @pytest.mark.slow + def test_binary_class_string_label_without_probability_stress_test(self): + self._evaluation_test_helper(["A", "B"], False, allow_slow=True) + + def test_binary_class_string_label_without_probability(self): + self._evaluation_test_helper(["A", "B"], False, allow_slow=False) + + @pytest.mark.slow + def test_binary_class_string_label_with_probability_stress_test(self): + # Scikit Learn uses technique to normalize pairwise probabilities even for binary classification. + # This leads to difference in probabilities. + self._evaluation_test_helper( + ["foo", "bar"], True, allow_slow=True, allowed_prob_delta=0.005 + ) + + def test_binary_class_string_label_with_probability(self): + # Scikit Learn uses technique to normalize pairwise probabilities even for binary classification. + # This leads to difference in probabilities. + self._evaluation_test_helper( + ["foo", "bar"], True, allow_slow=False, allowed_prob_delta=0.005 + ) + + @pytest.mark.slow + def test_multi_class_int_label_without_probability_stress_test(self): + self._evaluation_test_helper([12, 33, -1, 1234], False, allow_slow=True) + + def test_multi_class_int_label_without_probability(self): + self._evaluation_test_helper([12, 33, -1, 1234], False, allow_slow=False) + + @pytest.mark.slow + def test_multi_class_int_label_with_probability_stress_test(self): + self._evaluation_test_helper([1, 2, 3], True, allow_slow=True) + + def test_multi_class_int_label_with_probability(self): + self._evaluation_test_helper([1, 2, 3], True, allow_slow=False) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = SVC() + spec = scikit_converter.convert(model, "data", "out") + + # Check the expected class during conversion + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = scikit_converter.convert(model, "data", "out") + + +@unittest.skipIf(not _HAS_LIBSVM, "Missing libsvm. Skipping tests.") +class CSVCLibSVMTest(unittest.TestCase): + # Model parameters for testing + base_param = "-s 0 -q " # model type C-SVC and quiet mode + non_kernel_parameters = ["", "-c 1.5 -p 0.5 -h 1", "-c 0.5 -p 0.5 -h 0"] + kernel_parameters = [ + "-t 0", # linear kernel + "", + "-t 2 -g 1.2", # rbf kernel + "-t 1", + "-t 1 -d 2", + "-t 1 -g 0.75", + "-t 1 -d 0 -g 0.9 -r 2", # poly kernel + "-t 3", + "-t 3 -g 1.3", + "-t 3 -r 0.8", + "-t 3 -r 0.8 -g 0.5", # sigmoid kernel + ] + # XXX: wi params? + + """ + Unit test class for testing the libsvm converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_LIBSVM: + # setUpClass is still called even if class is skipped. + return + + # Generate some random data. + # This unit test should not rely on scikit learn for test data. + self.x, self.y = [], [] + random.seed(42) + for _ in range(50): + self.x.append([random.gauss(200, 30), random.gauss(-100, 22)]) + self.y.append(random.choice([1, 2])) + self.y[0] = 1 # Make sure 1 is always the first label it sees + self.y[1] = 2 + self.column_names = ["x1", "x2"] + self.prob = svmutil.svm_problem(self.y, self.x) + + param = svmutil.svm_parameter() + param.svm_type = svmutil.C_SVC + param.kernel_type = svmutil.LINEAR + param.eps = 1 + param.probability = 1 + + self.libsvm_model = svmutil.svm_train(self.prob, param) + + def test_default_names(self): + df = pd.DataFrame({"input": self.x}) + df["input"] = df["input"].apply(np.array) + + # Test with probabilities + spec = libsvm.convert(self.libsvm_model).get_spec() + if _is_macos() and _macos_version() >= (10, 13): + (_, _, probability_lists) = svm_predict( + self.y, self.x, self.libsvm_model, "-b 1 -q" + ) + probability_dicts = [ + dict(zip([1, 2], cur_vals)) for cur_vals in probability_lists + ] + df["classProbability"] = probability_dicts + metrics = evaluate_classifier_with_probabilities( + spec, df, verbose=False, probabilities="classProbability" + ) + self.assertLess(metrics["max_probability_error"], 0.00001) + + # Test model without probabilities + no_probability_model = svmutil.svm_train(self.prob, svmutil.svm_parameter()) + spec = libsvm.convert(no_probability_model).get_spec() + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, u"target") + if _is_macos() and _macos_version() >= (10, 13): + (df["target"], _, _) = svm_predict( + self.y, self.x, no_probability_model, " -q" + ) + metrics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(metrics["num_errors"], 0) + + # LibSVM only supports string labels + @pytest.mark.slow + def test_binary_class_without_probability_stress_test(self): + self._evaluation_test_helper_no_probability([0, 1], allow_slow=True) + + @pytest.mark.slow + def test_binary_class_with_probability_stress_test(self): + self._evaluation_test_helper_with_probability([-1, 90], allow_slow=True) + + @pytest.mark.slow + def test_multi_class_without_probability_stress_test(self): + self._evaluation_test_helper_no_probability([12, 33, 12341], allow_slow=True) + + @pytest.mark.slow + def test_multi_class_with_probability_stress_test(self): + self._evaluation_test_helper_with_probability([1, 2, 3], allow_slow=True) + + # LibSVM only supports string labels + def test_binary_class_without_probability(self): + self._evaluation_test_helper_no_probability([0, 1], allow_slow=False) + + def test_binary_class_with_probability(self): + self._evaluation_test_helper_with_probability([-1, 90], allow_slow=False) + + def test_multi_class_without_probability(self): + self._evaluation_test_helper_no_probability([12, 33, 12341], allow_slow=False) + + def test_multi_class_with_probability(self): + self._evaluation_test_helper_with_probability([1, 2, 3], allow_slow=False) + + def _evaluation_test_helper_with_probability(self, labels, allow_slow): + df = pd.DataFrame(self.x, columns=self.column_names) + y = copy.copy(self.y) + for i, val in enumerate(labels): + y[i] = val + probability_param = "-b 1" + + for param1 in self.non_kernel_parameters: + for param2 in self.kernel_parameters: + param_str = " ".join( + [self.base_param, param1, param2, probability_param] + ) + param = svm_parameter(param_str) + + model = svm_train(self.prob, param) + + # Get predictions with probabilities as dictionaries + (df["target"], _, probability_lists) = svm_predict( + y, self.x, model, probability_param + " -q" + ) + probability_dicts = [ + dict(zip([1, 2], cur_vals)) for cur_vals in probability_lists + ] + df["probabilities"] = probability_dicts + + spec = libsvm.convert( + model, self.column_names, "target", "probabilities" + ) + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_classifier_with_probabilities( + spec, df, verbose=False + ) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess(metrics["max_probability_error"], 0.00001) + + if not allow_slow: + break + + if not allow_slow: + break + + def _evaluation_test_helper_no_probability(self, labels, allow_slow): + # Generate some random data. + # This unit test should not rely on scikit learn for test data. + x, y = [], [] + random.seed(42) + for _ in range(50): + x.append( + [random.gauss(200, 30), random.gauss(-100, 22), random.gauss(100, 42)] + ) + y.append(random.choice(labels)) + # make sure first label is seen first, second is seen second, and so on. + for i, val in enumerate(labels): + y[i] = val + column_names = ["x1", "x2", "x3"] + prob = svmutil.svm_problem(y, x) + + df = pd.DataFrame(x, columns=column_names) + + for param1 in self.non_kernel_parameters: + for param2 in self.kernel_parameters: + param_str = " ".join([self.base_param, param1, param2]) + param = svm_parameter(param_str) + + model = svm_train(prob, param) + + # Get predictions with probabilities as dictionaries + (df["target"], _, _) = svm_predict(y, x, model, " -q") + + spec = libsvm.convert(model, column_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(metrics["num_errors"], 0) + + if not allow_slow: + break + + if not allow_slow: + break + + def test_conversion_from_filesystem(self): + libsvm_model_path = tempfile.mktemp(suffix="model.libsvm") + svmutil.svm_save_model(libsvm_model_path, self.libsvm_model) + # libsvm's save(...) truncates floating points. So it's not going to match self.libsvm_model any more. + spec = libsvm.convert(libsvm_model_path, self.column_names, "target") + self.assertIsNotNone(spec) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVR.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVR.py new file mode 100644 index 00000000..39bee077 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVR.py @@ -0,0 +1,259 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import random +import tempfile +import unittest + +import numpy as np +import pandas as pd +import pytest + +from coremltools._deps import (_HAS_LIBSVM, _HAS_SKLEARN, MSG_LIBSVM_NOT_FOUND, + MSG_SKLEARN_NOT_FOUND) +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_LIBSVM: + import svmutil + + from coremltools.converters import libsvm + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.preprocessing import OneHotEncoder + from sklearn.svm import SVR + + from coremltools.converters import sklearn as sklearn_converter + + +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class SvrScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn sklearn_converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + + scikit_data = load_boston() + scikit_model = SVR(kernel="linear") + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = SVR() + spec = sklearn_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = sklearn_converter.convert(model, "data", "out") + + @pytest.mark.slow + def test_evaluation_stress_test(self): + self._test_evaluation(allow_slow=True) + + def test_evaluation(self): + self._test_evaluation(allow_slow=False) + + def _test_evaluation(self, allow_slow): + """ + Test that the same predictions are made + """ + + # Generate some smallish (some kernels take too long on anything else) random data + x, y = [], [] + for _ in range(50): + cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2) + x.append([cur_x1, cur_x2]) + y.append(1 + 2 * cur_x1 + 3 * cur_x2) + + input_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=input_names) + + # Parameters to test + kernel_parameters = [ + {}, + {"kernel": "rbf", "gamma": 1.2}, + {"kernel": "linear"}, + {"kernel": "poly"}, + {"kernel": "poly", "degree": 2}, + {"kernel": "poly", "gamma": 0.75}, + {"kernel": "poly", "degree": 0, "gamma": 0.9, "coef0": 2}, + {"kernel": "sigmoid"}, + {"kernel": "sigmoid", "gamma": 1.3}, + {"kernel": "sigmoid", "coef0": 0.8}, + {"kernel": "sigmoid", "coef0": 0.8, "gamma": 0.5}, + ] + non_kernel_parameters = [ + {}, + {"C": 1}, + {"C": 1.5, "epsilon": 0.5, "shrinking": True}, + {"C": 0.5, "epsilon": 1.5, "shrinking": False}, + ] + + # Test + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + cur_params = param1.copy() + cur_params.update(param2) + print("cur_params=" + str(cur_params)) + + cur_model = SVR(**cur_params) + cur_model.fit(x, y) + df["target"] = cur_model.predict(x) + + spec = sklearn_converter.convert(cur_model, input_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + if not allow_slow: + break + + if not allow_slow: + break + + +@unittest.skipIf(not _HAS_LIBSVM, MSG_LIBSVM_NOT_FOUND) +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class EpsilonSVRLibSVMTest(unittest.TestCase): + """ + Unit test class for testing the libsvm sklearn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + if not _HAS_LIBSVM: + return + + scikit_data = load_boston() + prob = svmutil.svm_problem(scikit_data["target"], scikit_data["data"].tolist()) + param = svmutil.svm_parameter() + param.svm_type = svmutil.EPSILON_SVR + param.kernel_type = svmutil.LINEAR + param.eps = 1 + + self.libsvm_model = svmutil.svm_train(prob, param) + + def test_input_names(self): + data = load_boston() + df = pd.DataFrame({"input": data["data"].tolist()}) + df["input"] = df["input"].apply(np.array) + + # Default values + spec = libsvm.convert(self.libsvm_model) + if _is_macos() and _macos_version() >= (10, 13): + (df["target"], _, _) = svmutil.svm_predict( + data["target"], data["data"].tolist(), self.libsvm_model + ) + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + # One extra parameters. This is legal/possible. + num_inputs = len(data["data"][0]) + spec = libsvm.convert(self.libsvm_model, input_length=num_inputs + 1) + + # Not enought input names. + input_names = ["this", "is", "not", "enought", "names"] + with self.assertRaises(ValueError): + libsvm.convert(self.libsvm_model, input_names=input_names) + with self.assertRaises(ValueError): + libsvm.convert(self.libsvm_model, input_length=num_inputs - 1) + + def test_conversion_from_filesystem(self): + libsvm_model_path = tempfile.mktemp(suffix="model.libsvm") + svmutil.svm_save_model(libsvm_model_path, self.libsvm_model) + spec = libsvm.convert( + libsvm_model_path, input_names="data", target_name="target" + ) + + def test_conversion_bad_inputs(self): + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = libsvm.convert(model, "data", "out") + + @pytest.mark.slow + def test_evaluation_stress_test(self): + self._test_evaluation(allow_slow=True) + + def test_evaluation(self): + self._test_evaluation(allow_slow=False) + + def _test_evaluation(self, allow_slow): + """ + Test that the same predictions are made + """ + from svm import svm_parameter, svm_problem + from svmutil import svm_predict, svm_train + + # Generate some smallish (poly kernels take too long on anything else) random data + x, y = [], [] + for _ in range(50): + cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2) + x.append([cur_x1, cur_x2]) + y.append(1 + 2 * cur_x1 + 3 * cur_x2) + + input_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=input_names) + prob = svm_problem(y, x) + + # Parameters + base_param = "-s 3" # model type is epsilon SVR + non_kernel_parameters = ["", "-c 1.5 -p 0.5 -h 1", "-c 0.5 -p 0.5 -h 0"] + kernel_parameters = [ + "", + "-t 2 -g 1.2", # rbf kernel + "-t 0", # linear kernel + "-t 1", + "-t 1 -d 2", + "-t 1 -g 0.75", + "-t 1 -d 0 -g 0.9 -r 2", # poly kernel + "-t 3", + "-t 3 -g 1.3", + "-t 3 -r 0.8", + "-t 3 -r 0.8 -g 0.5", # sigmoid kernel + ] + + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + param_str = " ".join([base_param, param1, param2]) + print(param_str) + param = svm_parameter(param_str) + + model = svm_train(prob, param) + (df["target"], _, _) = svm_predict(y, x, model) + + spec = libsvm.convert( + model, input_names=input_names, target_name="target" + ) + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + if not allow_slow: + break + + if not allow_slow: + break diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_categorical_imputer.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_categorical_imputer.py new file mode 100644 index 00000000..2076c75a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_categorical_imputer.py @@ -0,0 +1,78 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest +from distutils.version import StrictVersion + +import numpy as np + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION + +if _HAS_SKLEARN: + import sklearn + + from coremltools.converters import sklearn as converter + try: + # scikit-learn >= 0.21 + from sklearn.impute import SimpleImputer as Imputer + + sklearn_class = sklearn.impute.SimpleImputer + except ImportError: + # scikit-learn < 0.21 + from sklearn.preprocessing import Imputer + + sklearn_class = sklearn.preprocessing.Imputer + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class ImputerTestCase(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + + scikit_data = load_boston() + # axis parameter deprecated in SimpleImputer >= 0.22. which now imputes + # only along columns as desired here. + if _SKLEARN_VERSION >= StrictVersion("0.22"): + scikit_model = Imputer(strategy="most_frequent") + else: + scikit_model = Imputer(strategy="most_frequent", axis=0) + scikit_data["data"][1, 8] = np.NaN + + input_data = scikit_data["data"][:, 8].reshape(-1, 1) + scikit_model.fit(input_data, scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + spec = converter.convert(self.scikit_model, "data", "out").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface + self.assertTrue(spec.pipeline.models[-1].HasField("imputer")) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = Imputer() + spec = converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(Exception): + from sklearn.linear_model import LinearRegression + + model = LinearRegression() + spec = converter.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_composite_pipelines.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_composite_pipelines.py new file mode 100644 index 00000000..95cdcd56 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_composite_pipelines.py @@ -0,0 +1,85 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest +from distutils.version import StrictVersion + +import pandas as pd + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.converters.sklearn import convert +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor, evaluate_transformer) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import GradientBoostingRegressor + from sklearn.pipeline import Pipeline + from sklearn.preprocessing import OneHotEncoder, StandardScaler + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class GradientBoostingRegressorBostonHousingScikitNumericTest(unittest.TestCase): + + @unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") + @unittest.skipIf(_SKLEARN_VERSION >= StrictVersion("0.22"), + "categorical_features parameter to OneHotEncoder() deprecated after SciKit Learn 0.22." + ) + def test_boston_OHE_plus_normalizer(self): + data = load_boston() + + pl = Pipeline( + [ + ("OHE", OneHotEncoder(categorical_features=[8], sparse=False)), + ("Scaler", StandardScaler()), + ] + ) + + pl.fit(data.data, data.target) + + # Convert the model + spec = convert(pl, data.feature_names, "out") + + if _is_macos() and _macos_version() >= (10, 13): + input_data = [dict(zip(data.feature_names, row)) for row in data.data] + output_data = [{"out": row} for row in pl.transform(data.data)] + + result = evaluate_transformer(spec, input_data, output_data) + assert result["num_errors"] == 0 + + @unittest.skipIf(_SKLEARN_VERSION >= StrictVersion("0.22"), + "categorical_features parameter to OneHotEncoder() deprecated after SciKit Learn 0.22." + ) + def _test_boston_OHE_plus_trees(self, loss='ls'): + + data = load_boston() + + pl = Pipeline( + [ + ("OHE", OneHotEncoder(categorical_features=[8], sparse=False)), + ("Trees", GradientBoostingRegressor(random_state=1, loss=loss)), + ] + ) + + pl.fit(data.data, data.target) + + # Convert the model + spec = convert(pl, data.feature_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(data.data, columns=data.feature_names) + df["target"] = pl.predict(data.data) + + # Evaluate it + result = evaluate_regressor(spec, df, "target", verbose=False) + + assert result["max_error"] < 0.0001 + + def test_boston_OHE_plus_trees(self): + self._test_boston_OHE_plus_trees() + + def test_boston_OHE_plus_trees_with_huber_loss(self): + self._test_boston_OHE_plus_trees(loss='huber') diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_dict_vectorizer.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_dict_vectorizer.py new file mode 100644 index 00000000..c0323cbd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_dict_vectorizer.py @@ -0,0 +1,102 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import numpy as np +import numpy.random as rn +import pandas as pd + +import coremltools +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier, + evaluate_transformer) + +if _HAS_SKLEARN: + from sklearn.feature_extraction import DictVectorizer + from sklearn.linear_model import LogisticRegression + from sklearn.pipeline import Pipeline + + from coremltools.converters import sklearn + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DictVectorizerScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def _test_conversion(self, data, trained_dict_vectorizer): + + X = trained_dict_vectorizer.transform(data) + + m = sklearn.convert( + trained_dict_vectorizer, + input_features="features", + output_feature_names="output", + ) + + if _is_macos() and _macos_version() >= (10, 13): + ret = evaluate_transformer( + m, + [{"features": row} for row in data], + [{"output": x_r} for x_r in X], + True, + ) + assert ret["num_errors"] == 0 + + + def test_dictvectorizer(self): + D = [ + {"foo": 1, "bar": 3}, + {"bar": 4, "baz": 2}, + {"bar": 1, "quux": 1, "quuux": 2}, + ] + + for sparse in (True, False): + for dtype in (int, np.float32, np.int16): + for sort in (True, False): + v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort) + v = v.fit(D) + self._test_conversion(D, v) + + + def test_unseen_or_no_features(self): + D1 = [{"camelot": 0, "spamalot": 1}] + D2 = [{}, {"nothing": 21}] + + for sparse in (True, False): + for dtype in (int, np.float32, np.int16): + for sort in (True, False): + v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort) + v = v.fit(D1) + self._test_conversion(D2, v) + + + def test_int_features_in_pipeline(self): + rn.seed(0) + + x_train_dict = [ + dict((rn.randint(100), 1) for i in range(20)) for j in range(100) + ] + y_train = [0, 1] * 50 + + # multi_class default changed in version >= 0.22 from ‘ovr’ to ‘auto’. + # Specify explicitly to match < 0.22 behavior. + pl = Pipeline([("dv", DictVectorizer()), ("lm", LogisticRegression(multi_class='ovr'))]) + pl.fit(x_train_dict, y_train) + + model = coremltools.converters.sklearn.convert( + pl, input_features="features", output_feature_names="target" + ) + + if _is_macos() and _macos_version() >= (10, 13): + x = pd.DataFrame( + {"features": x_train_dict, "target": pl.predict(x_train_dict)} + ) + + cur_eval_metics = evaluate_classifier(model, x) + self.assertEqual(cur_eval_metics["num_errors"], 0) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_feature_names.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_feature_names.py new file mode 100644 index 00000000..88b5d47f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_feature_names.py @@ -0,0 +1,30 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import coremltools.models._feature_management as fm +import coremltools.models.datatypes as dt +from coremltools._deps import _HAS_SKLEARN + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class FeatureManagementTests(unittest.TestCase): + def test_all_strings(self): + features = ["a", "b", "c"] + processed_features = [ + ("a", dt.Double()), + ("b", dt.Double()), + ("c", dt.Double()), + ] + out = fm.process_or_validate_features(features) + self.assertEqual(out, processed_features) + self.assertTrue(fm.is_valid_feature_list(out)) + + def test_single_array(self): + self.assertEqual( + fm.process_or_validate_features("a", num_dimensions=10), + [("a", dt.Array(10))], + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_glm_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_glm_classifier.py new file mode 100644 index 00000000..b912f69b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_glm_classifier.py @@ -0,0 +1,112 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import random +import unittest + +import pandas as pd + +from coremltools._deps import _HAS_SKLEARN +from coremltools.converters.sklearn import convert +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier, + evaluate_classifier_with_probabilities) + +if _HAS_SKLEARN: + from sklearn.linear_model import LogisticRegression + from sklearn.svm import LinearSVC + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class GlmCassifierTest(unittest.TestCase): + def test_logistic_regression_binary_classification_with_string_labels(self): + self._conversion_and_evaluation_helper_for_logistic_regression(["Foo", "Bar"]) + + def test_logistic_regression_multiclass_classification_with_int_labels(self): + self._conversion_and_evaluation_helper_for_logistic_regression([1, 2, 3, 4]) + + @staticmethod + def _generate_random_data(labels): + random.seed(42) + + # Generate some random data + x, y = [], [] + for _ in range(100): + x.append([random.gauss(2, 3), random.gauss(-1, 2)]) + y.append(random.choice(labels)) + return x, y + + def _conversion_and_evaluation_helper_for_logistic_regression(self, class_labels): + options = { + "C": (0.1, 1.0, 2.0), + "fit_intercept": (True, False), + "class_weight": ("balanced", None), + "solver": ("newton-cg", "lbfgs", "liblinear", "sag"), + } + + # Generate a list of all combinations of options and the default parameters + product = itertools.product(*options.values()) + args = [{}] + [dict(zip(options.keys(), p)) for p in product] + + x, y = GlmCassifierTest._generate_random_data(class_labels) + column_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=column_names) + + for cur_args in args: + # multi_class default changed in version 0.22 from ‘ovr’ to ‘auto’ in 0.22. + # Specify explicitly to match <0.22 behavior. + cur_model = LogisticRegression(**cur_args, multi_class='ovr') + cur_model.fit(x, y) + + spec = convert( + cur_model, input_features=column_names, output_feature_names="target" + ) + + if _is_macos() and _macos_version() >= (10, 13): + probability_lists = cur_model.predict_proba(x) + df["classProbability"] = [ + dict(zip(cur_model.classes_, cur_vals)) + for cur_vals in probability_lists + ] + + metrics = evaluate_classifier_with_probabilities( + spec, df, probabilities="classProbability", verbose=False + ) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess(metrics["max_probability_error"], 0.00001) + + def test_linear_svc_binary_classification_with_string_labels(self): + self._conversion_and_evaluation_helper_for_linear_svc(["Foo", "Bar"]) + + def test_linear_svc_multiclass_classification_with_int_labels(self): + self._conversion_and_evaluation_helper_for_linear_svc([1, 2, 3, 4]) + + def _conversion_and_evaluation_helper_for_linear_svc(self, class_labels): + ARGS = [ + {}, + {"C": 0.75, "loss": "hinge"}, + {"penalty": "l1", "dual": False}, + {"tol": 0.001, "fit_intercept": False}, + {"intercept_scaling": 1.5}, + ] + + x, y = GlmCassifierTest._generate_random_data(class_labels) + column_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=column_names) + + for cur_args in ARGS: + cur_model = LinearSVC(**cur_args) + cur_model.fit(x, y) + + spec = convert( + cur_model, input_features=column_names, output_feature_names="target" + ) + + if _is_macos() and _macos_version() >= (10, 13): + df["target"] = cur_model.predict(x) + + cur_eval_metics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(cur_eval_metics["num_errors"], 0) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_imputer.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_imputer.py new file mode 100644 index 00000000..7afef95d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_imputer.py @@ -0,0 +1,80 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest +from distutils.version import StrictVersion + +import numpy as np +import numpy.random as rn + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_transformer) + +if _HAS_SKLEARN: + import sklearn + + try: + # scikit-learn >= 0.21 + from sklearn.impute import SimpleImputer as Imputer + + sklearn_class = sklearn.impute.SimpleImputer + except ImportError: + # scikit-learn < 0.21 + from sklearn.preprocessing import Imputer + + sklearn_class = sklearn.preprocessing.Imputer + + from coremltools.converters import sklearn as converter + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" +) +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class NumericalImputerTestCase(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def test_conversion_boston(self): + + from sklearn.datasets import load_boston + + scikit_data = load_boston() + + sh = scikit_data.data.shape + + rn.seed(0) + missing_value_indices = [ + (rn.randint(sh[0]), rn.randint(sh[1])) for k in range(sh[0]) + ] + + for strategy in ["mean", "median", "most_frequent"]: + for missing_value in [0, "NaN", -999]: + # SimpleImputer >=0.22 does not accept missing values encoded as NaN. + if _SKLEARN_VERSION >= StrictVersion("0.22"): + if missing_value == "NaN": + continue + + X = np.array(scikit_data.data).copy() + + for i, j in missing_value_indices: + X[i, j] = missing_value + + model = Imputer(missing_values=missing_value, strategy=strategy) + model = model.fit(X) + + tr_X = model.transform(X.copy()) + + spec = converter.convert(model, scikit_data.feature_names, "out") + + input_data = [dict(zip(scikit_data.feature_names, row)) for row in X] + + output_data = [{"out": row} for row in tr_X] + + result = evaluate_transformer(spec, input_data, output_data) + + assert result["num_errors"] == 0 diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_io_types.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_io_types.py new file mode 100644 index 00000000..e74b20d9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_io_types.py @@ -0,0 +1,342 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import numpy as np +import PIL.Image + +import coremltools +from coremltools._deps import _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND +from coremltools.models.utils import _is_macos, _macos_version + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor + from sklearn.linear_model import LinearRegression + from sklearn.svm import SVC, SVR + from sklearn.tree import DecisionTreeRegressor + + +def create_model(spec): + """ + Create MLModel with specified types + Parameters + ---------- + spec: Pb spec from 3rd party converted model + + Returns + ------- + MLModel + """ + return coremltools.models.MLModel(spec) + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" +) +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class TestIODataTypes(unittest.TestCase): + """ + This class tests for different I/O feature data types for an .mlmodel + It will cover the following areas to test for: + - All features must have a valid type + - Multiarrays must have a valid dataType. Inputs must specify shape. Shape must have >= 0 elements + - Images must have a valid colorspace. width & height have to be >= 0 + - Dictionaries must have a valid key type + """ + + @property + def scikit_data(self): + return load_boston() + + def _feature_data_type(self, dtype): + feature_dict = {np.int32: "INT32", np.float32: "FLOAT32", np.float64: "DOUBLE"} + return feature_dict[dtype] + + @property + def number_data_type(self): + return dict( + int8=np.int8, + int16=np.int16, + int32=np.int32, + uint8=np.uint8, + uint16=np.uint16, + uint32=np.uint32, + float=np.float32, + double=np.double, + ) + + def _sklearn_setup(self, model, dtype, data, target): + model.fit(data, target) + spec = coremltools.converters.sklearn.convert( + model, "data", "target" + ).get_spec() + return model, spec + + def _check_tree_model(self, spec, inputType, outputType, n_out): + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), n_out) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual(spec.description.output[0].type.WhichOneof("Type"), outputType) + self.assertEqual(spec.description.input[0].name, "data") + self.assertEqual(spec.description.input[0].type.WhichOneof("Type"), inputType) + + def test_tree_regressor(self): + for dtype in self.number_data_type.keys(): + scikit_model = DecisionTreeRegressor(random_state=1) + data = self.scikit_data["data"].astype(dtype) + target = self.scikit_data["target"].astype(dtype) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + test_data = data[0].reshape(1, -1) + self._check_tree_model(spec, "multiArrayType", "doubleType", 1) + coreml_model = create_model(spec) + try: + self.assertEqual( + scikit_model.predict(test_data)[0].dtype, + type(coreml_model.predict({"data": test_data})["target"]), + ) + self.assertEqual( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_random_forest_classifier(self): + for dtype in self.number_data_type.keys(): + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + scikit_model = RandomForestClassifier(random_state=1, n_estimators=10) + data = self.scikit_data["data"].astype(dtype) + target = ( + self.scikit_data["target"].astype(dtype) + > self.scikit_data["target"].astype(dtype).mean() + ) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + test_data = data[0].reshape(1, -1) + self._check_tree_model(spec, "multiArrayType", "int64Type", 2) + coreml_model = create_model(spec) + try: + self.assertEqual( + scikit_model.predict(test_data)[0], + bool(int(coreml_model.predict({"data": test_data})["target"])), + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + bool(int(coreml_model.predict({"data": test_data})["target"])), + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_random_forest_regressor(self): + for dtype in self.number_data_type.keys(): + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + scikit_model = RandomForestRegressor(random_state=1, n_estimators=10) + data = self.scikit_data["data"].astype(dtype) + target = self.scikit_data["target"].astype(dtype) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + test_data = data[0].reshape(1, -1) + self._check_tree_model(spec, "multiArrayType", "doubleType", 1) + coreml_model = create_model(spec) + try: + self.assertEqual( + scikit_model.predict(test_data)[0].dtype, + type(coreml_model.predict({"data": test_data})["target"]), + ) + self.assertAlmostEqual( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_support_vector_classifier(self): + for dtype in self.number_data_type.keys(): + scikit_model = SVC(kernel="rbf", gamma=1.2, C=1) + data = self.scikit_data["data"].astype(dtype) + target = ( + self.scikit_data["target"].astype(dtype) + > self.scikit_data["target"].astype(dtype).mean() + ) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + coreml_model = create_model(spec) + for idx in range(0, 10): + test_data = data[idx].reshape(1, -1) + try: + self.assertEqual( + scikit_model.predict(test_data)[0], + bool(int(coreml_model.predict({"data": test_data})["target"])), + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + bool( + int(coreml_model.predict({"data": test_data})["target"]) + ), + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_support_vector_regressor(self): + for dtype in self.number_data_type.keys(): + scikit_model = SVR(kernel="rbf") + data = self.scikit_data["data"].astype(dtype) + target = self.scikit_data["target"].astype(dtype) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + test_data = data[0].reshape(1, -1) + coreml_model = create_model(spec) + try: + self.assertAlmostEqual( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_linear_regressor(self): + for dtype in self.number_data_type.keys(): + scikit_model = LinearRegression(normalize=True) + data = self.scikit_data["data"].astype(dtype) + target = self.scikit_data["target"].astype(dtype) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + test_data = data[0].reshape(1, -1) + coreml_model = create_model(spec) + try: + self.assertEqual( + scikit_model.predict(test_data)[0].dtype, + type(coreml_model.predict({"data": test_data})["target"]), + ) + self.assertAlmostEqual( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_image_output_rgb(self): + input_shape = (3, 10, 20) + input_features = [("data", coremltools.models.datatypes.Array(*input_shape))] + output_features = [("target", coremltools.models.datatypes.Array(*input_shape))] + builder = coremltools.models.neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + builder.add_elementwise( + "Identity", + input_names=["data"], + output_name="target", + mode="ADD", + alpha=0.0, + ) + spec = builder.spec + output = spec.description.output[0] + output.type.imageType.colorSpace = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "RGB" + ) + output.type.imageType.height = input_shape[1] + output.type.imageType.width = input_shape[2] + + coreml_model = coremltools.models.MLModel(spec) + input_data = np.floor(np.random.rand(*input_shape) * 255) + + coreml_out = coreml_model.predict({"data": input_data})["target"] + self.assertEqual(PIL.Image.Image, type(coreml_out)) + self.assertEqual("RGBA", coreml_out.mode) + np.testing.assert_equal( + np.uint8(input_data), np.array(coreml_out).transpose(2, 0, 1)[:3, :] + ) + + @unittest.skip("rdar://71638164") + def test_image_output_bgr(self): + input_shape = (3, 15, 25) + input_features = [("data", coremltools.models.datatypes.Array(*input_shape))] + output_features = [("target", coremltools.models.datatypes.Array(*input_shape))] + builder = coremltools.models.neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + builder.add_elementwise( + "Identity", + input_names=["data"], + output_name="target", + mode="ADD", + alpha=0.0, + ) + spec = builder.spec + output = spec.description.output[0] + output.type.imageType.colorSpace = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "BGR" + ) + output.type.imageType.height = input_shape[1] + output.type.imageType.width = input_shape[2] + + coreml_model = coremltools.models.MLModel(spec) + input_data = np.floor(np.random.rand(*input_shape) * 255) + + coreml_out = coreml_model.predict({"data": input_data})["target"] + self.assertEqual(PIL.Image.Image, type(coreml_out)) + self.assertEqual("RGBA", coreml_out.mode) + np.testing.assert_equal( + np.uint8(input_data), + np.array(coreml_out)[:, :, ::-1].transpose(2, 0, 1)[1:, :], + ) + + def test_image_output_grayscale(self): + input_shape = (1, 20, 30) + input_features = [("data", coremltools.models.datatypes.Array(*input_shape))] + output_features = [("target", coremltools.models.datatypes.Array(*input_shape))] + builder = coremltools.models.neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + builder.add_elementwise( + "Identity", + input_names=["data"], + output_name="target", + mode="ADD", + alpha=0.0, + ) + spec = builder.spec + output = spec.description.output[0] + output.type.imageType.colorSpace = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "GRAYSCALE" + ) + output.type.imageType.height = input_shape[1] + output.type.imageType.width = input_shape[2] + + coreml_model = coremltools.models.MLModel(spec) + input_data = np.floor(np.random.rand(*input_shape) * 255) + + coreml_out = coreml_model.predict({"data": input_data})["target"] + self.assertEqual(PIL.Image.Image, type(coreml_out)) + self.assertEqual("L", coreml_out.mode) + np.testing.assert_equal(np.uint8(input_data)[0], np.array(coreml_out)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_k_neighbors_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_k_neighbors_classifier.py new file mode 100644 index 00000000..1781d139 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_k_neighbors_classifier.py @@ -0,0 +1,277 @@ +# Copyright (c) 2019, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from scipy import sparse + +from coremltools._deps import _HAS_SKLEARN + +if _HAS_SKLEARN: + from sklearn.datasets import load_iris + from sklearn.neighbors import KNeighborsClassifier + + from coremltools.converters import sklearn + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class KNeighborsClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + print("Setting up KNeighborsClassifier converter tests") + iris_samples = load_iris() + self.iris_X = iris_samples.data + self.iris_y = iris_samples.target + + def test_conversion_unfitted(self): + """Tests conversion failure for an unfitted scikit model.""" + scikit_model = KNeighborsClassifier() + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def test_conversion_brute_algorithm(self): + """Tests conversion of a scikit KNeighborsClassifier using the brute force algorithm.""" + scikit_model = KNeighborsClassifier(algorithm="brute", n_neighbors=42) + scikit_model.fit(self.iris_X, self.iris_y) + + coreml_model = sklearn.convert(scikit_model, "single_input", "single_output") + coreml_spec = coreml_model.get_spec() + + self.assertIsNotNone(coreml_spec) + self.assertTrue(coreml_spec.HasField("kNearestNeighborsClassifier")) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue, 42 + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.range.minValue, 1 + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.range.maxValue, + len(self.iris_X), + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.HasField("uniformWeighting") + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions, + len(self.iris_X[0]), + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "linearIndex" + ) + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "squaredEuclideanDistance" + ) + ) + + self.validate_labels(coreml_spec, self.iris_y) + self.validate_float_samples(coreml_spec, self.iris_X) + + def test_conversion_kd_tree_algorithm(self): + """Tests conversion of a scikit KNeighborsClassifier using the brute force algorithm.""" + test_leaf_size = 23 + test_n_neighbors = 42 + scikit_model = KNeighborsClassifier( + algorithm="kd_tree", leaf_size=test_leaf_size, n_neighbors=test_n_neighbors + ) + scikit_model.fit(self.iris_X, self.iris_y) + + coreml_model = sklearn.convert(scikit_model, "single_input", "single_output") + coreml_spec = coreml_model.get_spec() + + self.assertIsNotNone(coreml_spec) + self.assertTrue(coreml_spec.HasField("kNearestNeighborsClassifier")) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue, + test_n_neighbors, + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.range.minValue, 1 + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.range.maxValue, + len(self.iris_X), + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.HasField("uniformWeighting") + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions, + len(self.iris_X[0]), + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "singleKdTreeIndex" + ) + ) + self.assertEqual( + test_leaf_size, + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.singleKdTreeIndex.leafSize, + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "squaredEuclideanDistance" + ) + ) + + self.validate_labels(coreml_spec, self.iris_y) + self.validate_float_samples(coreml_spec, self.iris_X) + + def test_conversion_auto_algorithm(self): + """Tests conversion of a scikit KNeighborsClassifier using the brute force algorithm.""" + test_n_neighbors = 42 + scikit_model = KNeighborsClassifier( + algorithm="auto", n_neighbors=test_n_neighbors + ) + scikit_model.fit(self.iris_X, self.iris_y) + + coreml_model = sklearn.convert(scikit_model, "single_input", "single_output") + coreml_spec = coreml_model.get_spec() + self.assertIsNotNone(coreml_spec) + + def test_conversion_unsupported_algorithm(self): + """Test a scikit KNeighborsClassifier with an invalid algorithm.""" + scikit_model = KNeighborsClassifier(algorithm="ball_tree") + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def test_conversion_weight_function_good(self): + scikit_model = KNeighborsClassifier(weights="uniform") + scikit_model.fit(self.iris_X, self.iris_y) + + coreml_model = sklearn.convert(scikit_model, "single_input", "single_output") + coreml_spec = coreml_model.get_spec() + self.assertIsNotNone(coreml_spec) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.HasField("uniformWeighting") + ) + + def test_conversion_unsupported_weight_function(self): + scikit_model = KNeighborsClassifier(algorithm="brute", weights="distance") + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def callable_weight_function(): + print("Inside callable_weight_function") + + scikit_model = KNeighborsClassifier( + algorithm="brute", weights=callable_weight_function + ) + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def test_conversion_distance_function_good(self): + """Tests conversion of a scikit KNeighborsClassifier with a valid distance metric.""" + scikit_model = KNeighborsClassifier(algorithm="brute", metric="euclidean") + scikit_model.fit(self.iris_X, self.iris_y) + coreml_model = sklearn.convert(scikit_model, "single_input", "single_output") + coreml_spec = coreml_model.get_spec() + self.assertIsNotNone(coreml_spec) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "squaredEuclideanDistance" + ) + ) + + # Minkowski metric with p=2 is equivalent to the squared Euclidean distance + scikit_model = KNeighborsClassifier(algorithm="brute", metric="minkowski", p=2) + scikit_model.fit(self.iris_X, self.iris_y) + coreml_spec = coreml_model.get_spec() + self.assertIsNotNone(coreml_spec) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "squaredEuclideanDistance" + ) + ) + + def test_conversion_unsupported_distance_function(self): + """Tests conversion of a scikit KNeighborsClassifier with an invalid distance metric.""" + # There are many possible distance functions for a brute force neighbors function, but these 3 should give us + # coverage over the converter code. + scikit_model = KNeighborsClassifier(algorithm="brute", metric="manhattan") + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + scikit_model = KNeighborsClassifier(algorithm="kd_tree", metric="chebyshev") + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + scikit_model = KNeighborsClassifier(algorithm="brute", metric="minkowski", p=3) + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def callable_distance_function(): + print("Inside callable_distance_function") + + scikit_model = KNeighborsClassifier( + algorithm="brute", metric=callable_distance_function + ) + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def test_conversion_with_sparse_X(self): + """Tests conversion of a model that's fitted with sparse data.""" + num_samples = 100 + num_dims = 64 + sparse_X = sparse.rand( + num_samples, num_dims, format="csr" + ) # KNeighborsClassifier only supports CSR format + y = self.iris_y[ + 0:num_samples + ] # the labels themselves don't matter - just use 100 of the Iris ones + + sklearn_model = KNeighborsClassifier(algorithm="brute") + sklearn_model.fit(sparse_X, y) + + coreml_model = sklearn.convert(sklearn_model) + coreml_spec = coreml_model.get_spec() + self.assertIsNotNone(coreml_spec) + + def test_conversion_with_sparse_y(self): + """Tests conversion of a model that's fitted with y values in a sparse format.""" + from sklearn.model_selection import train_test_split + + X_train, X_test, y_train, y_test = train_test_split( + self.iris_X, self.iris_y, test_size=0.2, train_size=0.8 + ) + + from sklearn import preprocessing + + lb = preprocessing.LabelBinarizer(sparse_output=True) + binarized_y = lb.fit_transform(y_train) + + sklearn_model = KNeighborsClassifier(algorithm="brute") + sklearn_model.fit(X_train, binarized_y) + + self.assertRaises(ValueError, sklearn.convert, sklearn_model) + + def validate_labels(self, spec, expected): + """Validate the labels returned from the converted scikit KNeighborsClassifier""" + self.assertTrue(spec.kNearestNeighborsClassifier.HasField("int64ClassLabels")) + for index, label in enumerate( + spec.kNearestNeighborsClassifier.int64ClassLabels.vector + ): + self.assertEqual(label, expected[index]) + + def validate_float_samples(self, spec, expected): + """Validate the float samples returned from the converted scikit KNeighborsClassifier""" + num_dimensions = ( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions + ) + for index, sample in enumerate( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.floatSamples + ): + for dim in range(0, num_dimensions): + self.assertAlmostEqual( + sample.vector[dim], expected[index][dim], places=6 + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_linear_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_linear_regression.py new file mode 100644 index 00000000..e94fc595 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_linear_regression.py @@ -0,0 +1,136 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import pandas as pd + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.linear_model import LinearRegression + from sklearn.preprocessing import OneHotEncoder + from sklearn.svm import LinearSVR + + from coremltools.converters.sklearn import convert + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikitlearn. Skipping tests.") +class LinearRegressionScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + scikit_data = load_boston() + scikit_model = LinearRegression() + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + spec = convert(self.scikit_model, input_names, "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + self.assertTrue( + spec.pipelineRegressor.pipeline.models[-1].HasField("glmRegressor") + ) + lr = spec.pipelineRegressor.pipeline.models[-1].glmRegressor + self.assertEqual(lr.offset, self.scikit_model.intercept_) + self.assertEqual(len(lr.weights), 1) + self.assertEqual(len(lr.weights[0].value), 13) + i = 0 + for w in lr.weights[0].value: + self.assertAlmostEqual(w, self.scikit_model.coef_[i]) + i = i + 1 + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = LinearRegression() + spec = convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = convert(model, "data", "out") + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_linear_regression_evaluation(self): + """ + Check that the evaluation results are the same in scikit learn and coremltools + """ + input_names = self.scikit_data.feature_names + df = pd.DataFrame(self.scikit_data.data, columns=input_names) + + for normalize_value in (True, False): + cur_model = LinearRegression(normalize=normalize_value) + cur_model.fit(self.scikit_data["data"], self.scikit_data["target"]) + spec = convert(cur_model, input_names, "target") + + df["target"] = cur_model.predict(self.scikit_data.data) + + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_linear_svr_evaluation(self): + """ + Check that the evaluation results are the same in scikit learn and coremltools + """ + ARGS = [ + {}, + {"C": 0.5, "epsilon": 0.25}, + {"dual": False, "loss": "squared_epsilon_insensitive"}, + {"tol": 0.005}, + {"fit_intercept": False}, + {"intercept_scaling": 1.5}, + ] + + input_names = self.scikit_data.feature_names + df = pd.DataFrame(self.scikit_data.data, columns=input_names) + + for cur_args in ARGS: + cur_model = LinearSVR(**cur_args) + cur_model.fit(self.scikit_data["data"], self.scikit_data["target"]) + spec = convert(cur_model, input_names, "target") + + df["target"] = cur_model.predict(self.scikit_data.data) + + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_nearest_neighbors_builder.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_nearest_neighbors_builder.py new file mode 100644 index 00000000..7fb34d4e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_nearest_neighbors_builder.py @@ -0,0 +1,418 @@ +# Copyright (c) 2019, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import unittest + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models import MLModel +from coremltools.models.nearest_neighbors import \ + KNearestNeighborsClassifierBuilder +from coremltools.models.utils import _is_macos + +if _HAS_SKLEARN: + from sklearn.datasets import load_iris + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class NearestNeighborsBuilderTest(unittest.TestCase): + """ + Unit tests for the nearest neighbors builder class. + """ + + def setUp(self): + iris_samples = load_iris() + self.iris_X = iris_samples.data + self.iris_y = iris_samples.target + self.training_X = self.iris_X[-30:] + self.training_y = self.iris_y[-30:] + + def tearDown(self): + # Do any cleanup here + pass + + def create_builder(self, default_class_label="default_label"): + builder = KNearestNeighborsClassifierBuilder( + input_name="input", + output_name="output", + number_of_dimensions=4, + default_class_label=default_class_label, + ) + return builder + + def test_builder_output_types(self): + builder = self.create_builder(default_class_label="default") + self.assertIsNotNone(builder) + self.assertTrue( + builder.spec.kNearestNeighborsClassifier.HasField("stringClassLabels") + ) + + builder = self.create_builder(default_class_label=12) + self.assertIsNotNone(builder) + self.assertTrue( + builder.spec.kNearestNeighborsClassifier.HasField("int64ClassLabels") + ) + + with self.assertRaises(TypeError): + bad_default_label = float(21.32) + self.create_builder(default_class_label=bad_default_label) + + def test_builder_training_input(self): + builder = self.create_builder(default_class_label="default") + self.assertIsNotNone(builder) + self.assertTrue( + builder.spec.kNearestNeighborsClassifier.HasField("stringClassLabels") + ) + + self.assertEqual(builder.spec.description.trainingInput[0].name, "input") + self.assertEqual( + builder.spec.description.trainingInput[0].type.WhichOneof("Type"), + "multiArrayType", + ) + self.assertEqual(builder.spec.description.trainingInput[1].name, "output") + self.assertEqual( + builder.spec.description.trainingInput[1].type.WhichOneof("Type"), + "stringType", + ) + + def test_make_updatable(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + self.assertTrue(builder.spec.isUpdatable) + builder.is_updatable = False + self.assertFalse(builder.spec.isUpdatable) + builder.is_updatable = True + self.assertTrue(builder.spec.isUpdatable) + + def test_author(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + self.assertEqual(builder.spec.description.metadata.author, "") + builder.author = "John Doe" + self.assertEqual(builder.author, "John Doe") + self.assertEqual(builder.spec.description.metadata.author, "John Doe") + + def test_description(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + self.assertEqual(builder.spec.description.metadata.shortDescription, "") + builder.description = "This is a description" + self.assertEqual(builder.description, "This is a description") + self.assertEqual( + builder.spec.description.metadata.shortDescription, "This is a description" + ) + + def test_weighting_scheme(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + builder.weighting_scheme = "uniform" + self.assertEqual(builder.weighting_scheme, "uniform") + + builder.weighting_scheme = "inverse_distance" + self.assertEqual(builder.weighting_scheme, "inverse_distance") + + builder.weighting_scheme = "unIfOrM" + self.assertEqual(builder.weighting_scheme, "uniform") + + builder.weighting_scheme = "InVerSE_DISTance" + self.assertEqual(builder.weighting_scheme, "inverse_distance") + + with self.assertRaises(TypeError): + builder.weighting_scheme = "test" + + def test_index_type(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + self.assertEqual(builder.index_type, "linear") + self.assertEqual(builder.leaf_size, 0) + + builder.set_index_type("kd_tree") + self.assertEqual(builder.index_type, "kd_tree") # test default value + self.assertEqual(builder.leaf_size, 30) + + builder.set_index_type("linear") + self.assertEqual(builder.index_type, "linear") + self.assertEqual(builder.leaf_size, 0) + + builder.set_index_type("kd_tree", leaf_size=45) # test user-defined value + self.assertEqual(builder.index_type, "kd_tree") + self.assertEqual(builder.leaf_size, 45) + + builder.set_index_type("linear", leaf_size=37) + self.assertEqual(builder.index_type, "linear") + self.assertEqual(builder.leaf_size, 0) + + builder.set_index_type("KD_TrEe", leaf_size=22) # test user-defined value + self.assertEqual(builder.index_type, "kd_tree") + self.assertEqual(builder.leaf_size, 22) + + builder.set_index_type("linEAR") + self.assertEqual(builder.index_type, "linear") + self.assertEqual(builder.leaf_size, 0) + + with self.assertRaises(TypeError): + builder.set_index_type("unsupported_index") + + with self.assertRaises(TypeError): + builder.set_index_type("kd_tree", -10) + + with self.assertRaises(TypeError): + builder.set_index_type("kd_tree", 0) + + def test_leaf_size(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + builder.set_index_type("kd_tree", leaf_size=45) # test user-defined value + self.assertEqual(builder.index_type, "kd_tree") + self.assertEqual(builder.leaf_size, 45) + + builder.leaf_size = 12 + self.assertEqual(builder.index_type, "kd_tree") + self.assertEqual(builder.leaf_size, 12) + + def test_set_number_of_neighbors_with_bounds(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + self.assertEqual(builder.number_of_neighbors, 5) + (min_value, max_value) = builder.number_of_neighbors_allowed_range() + self.assertEqual(min_value, 1) + self.assertEqual(max_value, 1000) + + builder.set_number_of_neighbors_with_bounds(12, allowed_range=(2, 24)) + (min_value, max_value) = builder.number_of_neighbors_allowed_range() + self.assertEqual(builder.number_of_neighbors, 12) + self.assertEqual(min_value, 2) + self.assertEqual(max_value, 24) + allowed_values = builder.number_of_neighbors_allowed_set() + self.assertIsNone(allowed_values) + + test_set = {3, 5, 7, 9} + builder.set_number_of_neighbors_with_bounds(7, allowed_set=test_set) + self.assertEqual(builder.number_of_neighbors, 7) + allowed_values = builder.number_of_neighbors_allowed_set() + self.assertIsNotNone(allowed_values) + self.assertEqual(allowed_values, test_set) + + def test_set_number_of_neighbors_with_bounds_error_conditions(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(3) + + test_range = (3, 15) + test_set = {1, 3, 5} + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds( + 3, allowed_range=test_range, allowed_set=test_set + ) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(3, allowed_range=(-5, 5)) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(3, allowed_range=(5, 1)) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds( + 3, allowed_range=test_range, allowed_set=test_set + ) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(2, allowed_range=test_range) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(5, allowed_set={5, -3, 7}) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(4, allowed_set=test_set) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(4, allowed_set=test_set) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(2, allowed_set=[1, 2, 3]) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(4, allowed_range={2, 200}) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(4, allowed_range=(2, 10, 20)) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(4, allowed_set=set()) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(4, allowed_range=[]) + + def test_set_number_of_neighbors(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + builder.set_number_of_neighbors_with_bounds(12, allowed_range=(2, 24)) + self.assertEqual(builder.number_of_neighbors, 12) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(1, allowed_range=(2, 24)) + builder.set_number_of_neighbors_with_bounds(4, allowed_range=(2, 24)) + self.assertEqual(builder.number_of_neighbors, 4) + + test_set = {3, 5, 7, 9} + builder.set_number_of_neighbors_with_bounds(7, allowed_set=test_set) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(4, allowed_set=test_set) + builder.set_number_of_neighbors_with_bounds(5, allowed_set=test_set) + self.assertEqual(builder.number_of_neighbors, 5) + + def test_add_samples_invalid_data(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + invalid_X = [[1.0, 2.4]] + with self.assertRaises(TypeError): + builder.add_samples(invalid_X, self.training_y) + + with self.assertRaises(TypeError): + builder.add_samples(self.training_X, self.training_y[:3]) + + with self.assertRaises(TypeError): + builder.add_samples([], self.training_y) + + with self.assertRaises(TypeError): + builder.add_samples(self.training_X, []) + + def test_add_samples_int_labels(self): + builder = self.create_builder(default_class_label=12) + self.assertIsNotNone(builder) + + some_X = self.training_X[:10] + some_y = self.training_y[:10] + builder.add_samples(some_X, some_y) + self._validate_samples(builder.spec, some_X, some_y) + + addl_X = self.training_X[10:20] + addl_y = self.training_y[10:20] + builder.add_samples(addl_X, addl_y) + self._validate_samples(builder.spec, self.training_X[:20], self.training_y[:20]) + + def test_add_samples_string_labels(self): + builder = self.create_builder(default_class_label="default") + self.assertIsNotNone(builder) + + some_X = self.training_X[:3] + some_y = ["one", "two", "three"] + builder.add_samples(some_X, some_y) + self._validate_samples(builder.spec, some_X, some_y) + + addl_X = self.training_X[3:6] + addl_y = ["four", "five", "six"] + builder.add_samples(addl_X, addl_y) + self._validate_samples(builder.spec, self.training_X[0:6], some_y + addl_y) + + def test_add_samples_invalid_label_types(self): + builder_int_labels = self.create_builder(default_class_label=42) + self.assertIsNotNone(builder_int_labels) + + some_X = self.training_X[:3] + invalid_int_y = [0, "one", 2] + with self.assertRaises(TypeError): + builder_int_labels.add_samples(some_X, invalid_int_y) + + builder_string_labels = self.create_builder(default_class_label="default") + self.assertIsNotNone(builder_string_labels) + + invalid_string_y = ["zero", "one", 2] + with self.assertRaises(TypeError): + builder_string_labels.add_samples(some_X, invalid_string_y) + + @unittest.skipUnless(_is_macos(), "Only supported on MacOS platform.") + def test_can_init_and_save_model_from_builder_with_updated_spec(self): + builder = KNearestNeighborsClassifierBuilder( + input_name="input", + output_name="output", + number_of_dimensions=10, + default_class_label="defaultLabel", + k=3, + weighting_scheme="inverse_distance", + index_type="kd_tree", + leaf_size=50, + ) + builder.author = "CoreML Team" + builder.license = "MIT" + builder.description = "test_builder_with_validation" + + # Save the updated spec + coreml_model = MLModel(builder.spec) + self.assertIsNotNone(coreml_model) + coreml_model_path = "/tmp/__test_builder_with_validation.mlmodel" + + try: + coreml_model.save(coreml_model_path) + self.assertTrue(os.path.isfile(coreml_model_path)) + finally: + self._delete_mlmodel_and_mlmodelc(coreml_model_path) + + @unittest.skipUnless(_is_macos(), "Only supported on MacOS platform.") + def test_can_init_and_save_model_from_builder_default_parameters(self): + builder = KNearestNeighborsClassifierBuilder( + input_name="input", + output_name="output", + number_of_dimensions=4, + default_class_label="defaultLabel", + ) + + # Save the updated spec + coreml_model = MLModel(builder.spec) + self.assertIsNotNone(coreml_model) + coreml_model_path = "/tmp/__test_builder_with_validation.mlmodel" + + try: + coreml_model.save(coreml_model_path) + self.assertTrue(os.path.isfile(coreml_model_path)) + finally: + self._delete_mlmodel_and_mlmodelc(coreml_model_path) + + def _validate_samples(self, spec, expected_X, expected_y): + """Validate the float samples returned from the converted scikit KNeighborsClassifier""" + num_dimensions = ( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions + ) + for index, sample in enumerate( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.floatSamples + ): + for dim in range(0, num_dimensions): + self.assertAlmostEqual( + sample.vector[dim], expected_X[index][dim], places=6 + ) + + if spec.kNearestNeighborsClassifier.HasField("int64ClassLabels"): + for index, label in enumerate( + spec.kNearestNeighborsClassifier.int64ClassLabels.vector + ): + self.assertEqual(label, expected_y[index]) + + elif spec.kNearestNeighborsClassifier.HasField("stringClassLabels"): + for index, label in enumerate( + spec.kNearestNeighborsClassifier.stringClassLabels.vector + ): + self.assertEqual(label, expected_y[index]) + + @staticmethod + def _delete_mlmodel_and_mlmodelc(path_to_mlmodel): + """Delete the .mlmodel and .mlmodelc for the given .mlmodel.""" + if os.path.exists(path_to_mlmodel): + os.remove(path_to_mlmodel) + path_to_mlmodelc = "{}c".format(path_to_mlmodel) + if os.path.exists(path_to_mlmodelc): + shutil.rmtree(path_to_mlmodelc) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_normalizer.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_normalizer.py new file mode 100644 index 00000000..b396a21b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_normalizer.py @@ -0,0 +1,60 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import numpy as _np + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_transformer) + +if _HAS_SKLEARN: + from sklearn.preprocessing import Normalizer + + from coremltools.converters import sklearn as converter + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" +) +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class NormalizerScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def test_random(self): + # Generate some random data_imputeValue.multiArrayValue[i] + X = _np.random.random(size=(50, 3)) + + for param in ("l1", "l2", "max"): + cur_model = Normalizer(norm=param) + + output = cur_model.fit_transform(X) + + spec = converter.convert(cur_model, ["a", "b", "c"], "out") + + evaluate_transformer( + spec, + [dict(zip(["a", "b", "c"], row)) for row in X], + [{"out": row} for row in output], + ) + + def test_boston(self): + from sklearn.datasets import load_boston + + scikit_data = load_boston() + scikit_model = Normalizer(norm="l2").fit(scikit_data.data) + + spec = converter.convert(scikit_model, scikit_data.feature_names, "out") + + input_data = [ + dict(zip(scikit_data.feature_names, row)) for row in scikit_data.data + ] + + output_data = [{"out": row} for row in scikit_model.transform(scikit_data.data)] + + evaluate_transformer(spec, input_data, output_data) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_one_hot_encoder.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_one_hot_encoder.py new file mode 100644 index 00000000..93be1124 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_one_hot_encoder.py @@ -0,0 +1,290 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest +from copy import copy +from distutils.version import StrictVersion + +import numpy as np + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_transformer) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.pipeline import Pipeline + from sklearn.preprocessing import Normalizer, OneHotEncoder + + from coremltools.converters import sklearn + from coremltools.models.datatypes import Array + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class OneHotEncoderScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + scikit_data = [[0], [1], [2], [4], [3], [2], [4], [5], [6], [7]] + scikit_data_multiple_cols = [[0, 1], [1, 0], [2, 2], [3, 3], [4, 4]] + scikit_model = OneHotEncoder() + scikit_model.fit(scikit_data) + + # Save the data and the model + self.scikit_data = np.asarray(scikit_data, dtype="d") + self.scikit_data_multiple_cols = np.asarray( + scikit_data_multiple_cols, dtype="d" + ) + self.scikit_model = scikit_model + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_conversion_one_column(self): + # Fit a single OHE + scikit_model = OneHotEncoder() + scikit_model.fit(self.scikit_data) + spec = sklearn.convert(scikit_model, "single_feature", "out").get_spec() + + test_data = [{"single_feature": row} for row in self.scikit_data] + scikit_output = [ + {"out": row} for row in scikit_model.transform(self.scikit_data).toarray() + ] + metrics = evaluate_transformer(spec, test_data, scikit_output) + + self.assertIsNotNone(spec) + self.assertIsNotNone(spec.description) + self.assertEqual(metrics["num_errors"], 0) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_conversion_many_columns(self): + scikit_model = OneHotEncoder() + scikit_model.fit(self.scikit_data_multiple_cols) + spec = sklearn.convert( + scikit_model, ["feature_1", "feature_2"], "out" + ).get_spec() + + test_data = [ + {"feature_1": row[0], "feature_2": row[1]} + for row in self.scikit_data_multiple_cols + ] + scikit_output = [ + {"out": row} + for row in scikit_model.transform(self.scikit_data_multiple_cols).toarray() + ] + metrics = evaluate_transformer(spec, test_data, scikit_output) + + self.assertIsNotNone(spec) + self.assertIsNotNone(spec.description) + self.assertEqual(metrics["num_errors"], 0) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_conversion_one_column_of_several(self): + if _SKLEARN_VERSION >= StrictVersion("0.22"): + scikit_model = OneHotEncoder() + else: + scikit_model = OneHotEncoder(categorical_features=[0]) + + scikit_model.fit(copy(self.scikit_data_multiple_cols)) + spec = sklearn.convert( + scikit_model, ["feature_1", "feature_2"], "out" + ).get_spec() + + test_data = [ + {"feature_1": row[0], "feature_2": row[1]} + for row in self.scikit_data_multiple_cols + ] + scikit_output = [ + {"out": row} + for row in scikit_model.transform(self.scikit_data_multiple_cols).toarray() + ] + metrics = evaluate_transformer(spec, test_data, scikit_output) + + self.assertIsNotNone(spec) + self.assertIsNotNone(spec.description) + self.assertEqual(metrics["num_errors"], 0) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + @unittest.skipIf(_SKLEARN_VERSION >= StrictVersion("0.22"), + "categorical_features parameter to OneHotEncoder() deprecated after SciKit Learn 0.22." + ) + def test_boston_OHE(self): + data = load_boston() + + for categorical_features in [[3], [8], [3, 8], [8, 3]]: + model = OneHotEncoder( + categorical_features=categorical_features, sparse=False + ) + model.fit(data.data, data.target) + + # Convert the model + spec = sklearn.convert(model, data.feature_names, "out").get_spec() + + input_data = [dict(zip(data.feature_names, row)) for row in data.data] + output_data = [{"out": row} for row in model.transform(data.data)] + + result = evaluate_transformer(spec, input_data, output_data) + + assert result["num_errors"] == 0 + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + @unittest.skipIf(_SKLEARN_VERSION >= StrictVersion("0.22"), + "categorical_features parameter to OneHotEncoder() deprecated after SciKit Learn 0.22." + ) + def test_boston_OHE_pipeline(self): + data = load_boston() + + for categorical_features in [[3], [8], [3, 8], [8, 3]]: + # Put it in a pipeline so that we can test whether the output dimension + # handling is correct. + + model = Pipeline( + [ + ("OHE", OneHotEncoder(categorical_features=categorical_features)), + ("Normalizer", Normalizer()), + ] + ) + + model.fit(data.data.copy(), data.target) + + # Convert the model + spec = sklearn.convert(model, data.feature_names, "out").get_spec() + + input_data = [dict(zip(data.feature_names, row)) for row in data.data] + output_data = [{"out": row} for row in model.transform(data.data.copy())] + + result = evaluate_transformer(spec, input_data, output_data) + + assert result["num_errors"] == 0 + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + @unittest.skipIf(_SKLEARN_VERSION >= StrictVersion("0.22"), + "categorical_features parameter to OneHotEncoder() deprecated after SciKit Learn 0.22." + ) + def test_random_sparse_data(self): + + n_columns = 8 + n_categories = 20 + + import numpy.random as rn + + rn.seed(0) + categories = rn.randint(50000, size=(n_columns, n_categories)) + + for dt in ["int32", "float32", "float64"]: + + _X = np.array( + [ + [categories[j, rn.randint(n_categories)] for j in range(n_columns)] + for i in range(100) + ], + dtype=dt, + ) + + # Test this data on a bunch of possible inputs. + for sparse in (True, False): + for categorical_features in [ + "all", + [3], + [4], + range(2, 8), + range(0, 4), + range(0, 8), + ]: + X = _X.copy() + + # This appears to be the only type now working. + assert X.dtype == np.dtype(dt) + + model = OneHotEncoder( + categorical_features=categorical_features, sparse=sparse + ) + model.fit(X) + + # Convert the model + spec = sklearn.convert(model, [("data", Array(n_columns))], "out") + + X_out = model.transform(X) + if sparse: + X_out = X_out.todense() + + input_data = [{"data": row} for row in X] + output_data = [{"out": row} for row in X_out] + + result = evaluate_transformer(spec, input_data, output_data) + + assert result["num_errors"] == 0 + + # Test normal data inside a pipeline + for sparse in (True, False): + for categorical_features in [ + "all", + [3], + [4], + range(2, 8), + range(0, 4), + range(0, 8), + ]: + X = _X.copy() + + model = Pipeline( + [ + ( + "OHE", + OneHotEncoder( + categorical_features=categorical_features, + sparse=sparse, + ), + ), + ("Normalizer", Normalizer()), + ] + ) + + model.fit(X) + + # Convert the model + spec = sklearn.convert( + model, [("data", Array(n_columns))], "out" + ).get_spec() + + X_out = model.transform(X) + if sparse: + X_out = X_out.todense() + + input_data = [{"data": row} for row in X] + output_data = [{"out": row} for row in X_out] + + result = evaluate_transformer(spec, input_data, output_data) + + assert result["num_errors"] == 0 + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = sklearn.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + from sklearn.linear_model import LinearRegression + + model = LinearRegression() + spec = sklearn.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier.py new file mode 100644 index 00000000..198dee9d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier.py @@ -0,0 +1,168 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from coremltools._deps import _HAS_SKLEARN + +if _HAS_SKLEARN: + from sklearn.ensemble import RandomForestClassifier + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestBinaryClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestClassifier + + scikit_data = load_boston() + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + scikit_model = RandomForestClassifier(random_state=1, n_estimators=10) + target = 1 * (scikit_data["target"] > scikit_data["target"].mean()) + scikit_model.fit(scikit_data["data"], target) + + self.scikit_model_node_count = sum(map(lambda e: e.tree_.node_count, + scikit_model.estimators_)) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + self.assertEqual(len(spec.pipelineClassifier.pipeline.models), 2) + tr = spec.pipelineClassifier.pipeline.models[ + -1 + ].treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + model = RandomForestClassifier(n_estimators=10) + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestMultiClassClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + import numpy as np + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestClassifier + + scikit_data = load_boston() + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + scikit_model = RandomForestClassifier(random_state=1, n_estimators=10) + t = scikit_data.target + target = np.digitize(t, np.histogram(t)[1]) - 1 + scikit_model.fit(scikit_data.data, target) + + self.scikit_model_node_count = sum(map(lambda e: e.tree_.node_count, + scikit_model.estimators_)) + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + self.assertEqual(len(spec.pipelineClassifier.pipeline.models), 2) + tr = spec.pipelineClassifier.pipeline.models[ + -1 + ].treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + model = RandomForestClassifier(n_estimators=10) + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(Exception): + from sklearn.preprocessing import OneHotEncoder + + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier_numeric.py new file mode 100644 index 00000000..b1be9b54 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier_numeric.py @@ -0,0 +1,141 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest +from distutils.version import StrictVersion + +import numpy as np +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestClassifier + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestClassificationBostonHousingScikitNumericTest(unittest.TestCase): + def _check_metrics(self, metrics, params={}): + self.assertEqual( + metrics["num_errors"], + 0, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + scikit_model = RandomForestClassifier(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_classifier(spec, df, verbose=False) + self._check_metrics(metrics, scikit_params) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestBinaryClassifierBostonHousingScikitNumericTest( + RandomForestClassificationBostonHousingScikitNumericTest +): + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Load data and train model + scikit_data = load_boston() + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + self.target = 1 * (scikit_data["target"] > scikit_data["target"].mean()) + self.feature_names = scikit_data.feature_names + self.output_name = "target" + self.scikit_data = scikit_data + + def test_simple_binary_classifier(self): + self._train_convert_evaluate_assert(max_depth=13) + + @pytest.mark.slow + def test_binary_classifier_stress_test(self): + + options = dict( + n_estimators=[1, 5, 10], + max_depth=[1, 5, None], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_leaf_nodes=[None, 20], + ) + + if _SKLEARN_VERSION >= StrictVersion("0.19"): + options["min_impurity_decrease"] = [1e-07, 0.1] + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestMultiClassClassificationBostonHousingScikitNumericTest( + RandomForestClassificationBostonHousingScikitNumericTest +): + @classmethod + def setUpClass(self): + # Load data and train model + scikit_data = load_boston() + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + t = scikit_data.target + num_classes = 3 + target = np.digitize(t, np.histogram(t, bins=num_classes - 1)[1]) - 1 + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_multiclass(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_multiclass_stress_test(self): + options = dict( + n_estimators=[1, 5, 10], + max_depth=[1, 5, None], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_leaf_nodes=[None, 20], + ) + + if _SKLEARN_VERSION >= StrictVersion("0.19"): + options["min_impurity_decrease"] = [1e-07, 0.1] + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression.py new file mode 100644 index 00000000..0c263585 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression.py @@ -0,0 +1,88 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from coremltools._deps import _HAS_SKLEARN + +if _HAS_SKLEARN: + from sklearn.ensemble import RandomForestRegressor + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class RandomForestRegressorScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestRegressor + + scikit_data = load_boston() + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + scikit_model = RandomForestRegressor(random_state=1, n_estimators=10) + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + self.scikit_model_node_count = sum(map(lambda e: e.tree_.node_count, + scikit_model.estimators_)) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + self.assertEqual(len(spec.pipelineRegressor.pipeline.models), 2) + tr = spec.pipelineRegressor.pipeline.models[ + -1 + ].treeEnsembleRegressor.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + model = RandomForestRegressor(n_estimators=10) + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression_numeric.py new file mode 100644 index 00000000..cdb9aed8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression_numeric.py @@ -0,0 +1,107 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest + +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestRegressor + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestRegressorBostonHousingScikitNumericTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter and running both models + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + self.target = scikit_data.target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, params={}): + """ + Check the metrics + """ + self.assertAlmostEqual( + metrics["rmse"], + 0.0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + self.assertAlmostEqual( + metrics["max_error"], + 0.0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + """ + Train a scikit-learn model, convert it and then evaluate it with CoreML + """ + scikit_model = RandomForestRegressor(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_regressor(spec, df, verbose=False) + self._check_metrics(metrics, scikit_params) + + def test_boston_housing_simple_regression(self): + self._train_convert_evaluate_assert() + + def test_boston_housing_float_double_corner_case(self): + self._train_convert_evaluate_assert(max_depth=13) + + @pytest.mark.slow + def test_boston_housing_parameter_stress_test(self): + + ## These are all the options in decision tree regression of scikit-learn + options = dict( + criterion=["mse"], + n_estimators=[1, 5, 10], + max_depth=[1, 5], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_leaf_nodes=[None, 20], + min_impurity_decrease=[1e-07, 0.1, 0.0], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_ridge_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_ridge_regression.py new file mode 100644 index 00000000..6eabce89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_ridge_regression.py @@ -0,0 +1,106 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import pandas as pd + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.linear_model import Ridge + from sklearn.preprocessing import OneHotEncoder + + from coremltools.converters.sklearn import convert + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikitlearn. Skipping tests.") +class RidgeRegressionScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + scikit_data = load_boston() + scikit_model = Ridge() + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + spec = convert(self.scikit_model, input_names, "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the ridge regression parameters. + self.assertTrue( + spec.pipelineRegressor.pipeline.models[-1].HasField("glmRegressor") + ) + lr = spec.pipelineRegressor.pipeline.models[-1].glmRegressor + self.assertEqual(lr.offset, self.scikit_model.intercept_) + self.assertEqual(len(lr.weights), 1) + self.assertEqual(len(lr.weights[0].value), 13) + i = 0 + for w in lr.weights[0].value: + self.assertAlmostEqual(w, self.scikit_model.coef_[i]) + i = i + 1 + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = Ridge() + spec = convert(model, "data", "out") + + # Check the expected class during conversion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = convert(model, "data", "out") + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_ridge_regression_evaluation(self): + """ + Check that the evaluation results are the same in scikit learn and coremltools + """ + input_names = self.scikit_data.feature_names + df = pd.DataFrame(self.scikit_data.data, columns=input_names) + + for normalize_value in (True, False): + cur_model = Ridge() + cur_model.fit(self.scikit_data["data"], self.scikit_data["target"]) + spec = convert(cur_model, input_names, "target") + + df["target"] = cur_model.predict(self.scikit_data.data) + + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_standard_scalar.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_standard_scalar.py new file mode 100644 index 00000000..834a6ce3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_standard_scalar.py @@ -0,0 +1,65 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import numpy as _np + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_transformer) + +if _HAS_SKLEARN: + from sklearn.preprocessing import StandardScaler + + from coremltools.converters import sklearn as converter + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" +) +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class StandardScalerTestCase(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def test_random(self): + # Generate some random data + X = _np.random.random(size=(50, 3)) + + cur_model = StandardScaler() + + output = cur_model.fit_transform(X) + + spec = converter.convert(cur_model, ["a", "b", "c"], "out").get_spec() + + metrics = evaluate_transformer( + spec, + [dict(zip(["a", "b", "c"], row)) for row in X], + [{"out": row} for row in output], + ) + + assert metrics["num_errors"] == 0 + + def test_boston(self): + from sklearn.datasets import load_boston + + scikit_data = load_boston() + scikit_model = StandardScaler().fit(scikit_data.data) + + spec = converter.convert( + scikit_model, scikit_data.feature_names, "out" + ).get_spec() + + input_data = [ + dict(zip(scikit_data.feature_names, row)) for row in scikit_data.data + ] + + output_data = [{"out": row} for row in scikit_model.transform(scikit_data.data)] + + metrics = evaluate_transformer(spec, input_data, output_data) + + assert metrics["num_errors"] == 0 diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_utils.py new file mode 100644 index 00000000..11db0456 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_utils.py @@ -0,0 +1,49 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# # Use of this source code is governed by a BSD-3-clause license that can be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models import MLModel +from coremltools.models.utils import _is_macos, _macos_version, rename_feature + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.linear_model import LinearRegression + + from coremltools.converters import sklearn as converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class PipeLineRenameTests(unittest.TestCase): + @classmethod + def setUpClass(self): + scikit_data = load_boston() + feature_names = scikit_data.feature_names + + scikit_model = LinearRegression() + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + @unittest.skip("rdar://71638164") + def test_pipeline_rename(self): + # Convert + scikit_spec = converter.convert(self.scikit_model).get_spec() + model = MLModel(scikit_spec) + sample_data = self.scikit_data.data[0] + + # Rename + rename_feature(scikit_spec, "input", "renamed_input") + renamed_model = MLModel(scikit_spec) + + # Check the predictions + if _is_macos() and _macos_version() >= (10, 13): + out_dict = model.predict({"input": sample_data}) + out_dict_renamed = renamed_model.predict({"renamed_input": sample_data}) + self.assertAlmostEqual(list(out_dict.keys()), list(out_dict_renamed.keys())) + self.assertAlmostEqual( + list(out_dict.values()), list(out_dict_renamed.values()) + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/__init__.py new file mode 100644 index 00000000..8aa13a28 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier.py new file mode 100644 index 00000000..08efb96c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier.py @@ -0,0 +1,342 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import json +import tempfile +import unittest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST +from coremltools.converters import sklearn as skl_converter +from coremltools.models.utils import _macos_version + +if _HAS_SKLEARN: + from sklearn.ensemble import GradientBoostingClassifier + +if _HAS_XGBOOST: + import xgboost + + from coremltools.converters import xgboost as xgb_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class GradientBoostingBinaryClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + + scikit_data = load_boston() + scikit_model = GradientBoostingClassifier(random_state=1) + target = scikit_data["target"] > scikit_data["target"].mean() + scikit_model.fit(scikit_data["data"], target) + + s = 0 + for est in scikit_model.estimators_: + for e in est: + s = s + e.tree_.node_count + self.scikit_model_node_count = s + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + tr = spec.pipelineClassifier.pipeline.models[ + 1 + ].treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = GradientBoostingClassifier() + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") + +class GradientBoostingMulticlassClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + import numpy as np + from sklearn.datasets import load_boston + + scikit_data = load_boston() + scikit_model = GradientBoostingClassifier(random_state=1) + t = scikit_data.target + target = np.digitize(t, np.histogram(t)[1]) - 1 + scikit_model.fit(scikit_data.data, target) + self.target = target + + s = 0 + for est in scikit_model.estimators_: + for e in est: + s = s + e.tree_.node_count + self.scikit_model_node_count = s + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + self.assertEqual(len(spec.pipelineClassifier.pipeline.models), 2) + tr = spec.pipelineClassifier.pipeline.models[ + -1 + ].treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = GradientBoostingClassifier() + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class GradientBoostingBinaryClassifierXGboostTest(unittest.TestCase): + """ + Unit test class for testing xgboost converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + + scikit_data = load_boston() + self.xgb_model = xgboost.XGBClassifier() + target = scikit_data["target"] > scikit_data["target"].mean() + self.xgb_model.fit(scikit_data["data"], target) + + # Save the data and the model + self.scikit_data = scikit_data + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = xgb_converter.convert( + self.xgb_model, input_names, output_name, mode="classifier" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, output_name) + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, output_name) + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + tr = spec.treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = xgboost.XGBClassifier() + spec = xgb_converter.convert(model, "data", "out", mode="classifier") + + # Check the expected class during covnersion. + with self.assertRaises(Exception): + model = xgboost.XGBRegressor() + spec = xgb_converter.convert(model, "data", "out", mode="classifier") + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class GradientBoostingMulticlassClassifierXGboostTest(unittest.TestCase): + """ + Unit test class for testing xgboost converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + import numpy as np + from sklearn.datasets import load_boston + + scikit_data = load_boston() + t = scikit_data.target + target = np.digitize(t, np.histogram(t)[1]) - 1 + dtrain = xgboost.DMatrix( + scikit_data.data, label=target, feature_names=scikit_data.feature_names + ) + self.xgb_model = xgboost.train({}, dtrain) + self.target = target + + # Save the data and the model + self.scikit_data = scikit_data + self.n_classes = len(np.unique(self.target)) + + def test_conversion(self): + + input_names = self.scikit_data.feature_names + output_name = "target" + spec = xgb_converter.convert( + self.xgb_model, + input_names, + output_name, + mode="classifier", + n_classes=self.n_classes, + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertEqual(spec.description.predictedFeatureName, output_name) + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, output_name) + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + tr = spec.treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + + def test_conversion_from_file(self): + import numpy as np + + output_name = "target" + feature_names = self.scikit_data.feature_names + + xgb_model_json = tempfile.mktemp("xgb_tree_model_classifier.json") + xgb_json_out = self.xgb_model.get_dump(with_stats=True, dump_format="json") + with open(xgb_model_json, "w") as f: + json.dump(xgb_json_out, f) + spec = xgb_converter.convert( + xgb_model_json, + feature_names, + output_name, + mode="classifier", + n_classes=self.n_classes, + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleRegressor) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, output_name) + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, output_name) + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(self.scikit_data.feature_names), + sorted(map(lambda x: x.name, spec.description.input)), + ) + + # Test the linear regression parameters. + tr = spec.treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier_numeric.py new file mode 100644 index 00000000..c246f3fc --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier_numeric.py @@ -0,0 +1,264 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest + +import numpy as np +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier, + evaluate_classifier_with_probabilities) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import GradientBoostingClassifier + + from coremltools.converters import sklearn as skl_converter + +if _HAS_XGBOOST: + import xgboost + + from coremltools.converters import xgboost as xgb_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class BoostedTreeClassificationBostonHousingScikitNumericTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter and running both models + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + self.target = 1 * (scikit_data["target"] > scikit_data["target"].mean()) + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, params={}): + self.assertEqual( + metrics["num_errors"], + 0, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + """ + Train a scikit-learn model, convert it and then evaluate it with CoreML + """ + scikit_model = GradientBoostingClassifier(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if hasattr(scikit_model, '_init_decision_function') and scikit_model.n_classes_ > 2: + # fix initial default prediction for multiclass classification + # https://github.com/scikit-learn/scikit-learn/pull/12983 + assert hasattr(scikit_model, 'init_') + assert hasattr(scikit_model.init_, 'priors') + scikit_model.init_.priors = np.log(scikit_model.init_.priors) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_classifier(spec, df) + self._check_metrics(metrics) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class BoostedTreeBinaryClassificationBostonHousingScikitNumericTest( + BoostedTreeClassificationBostonHousingScikitNumericTest +): + def test_simple_binary_classifier(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_binary_classifier_stress_test(self): + options = dict( + max_depth=[1, 10, None], + min_samples_split=[2, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1], + max_leaf_nodes=[None, 20], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class BoostedTreeMultiClassClassificationBostonHousingScikitNumericTest( + BoostedTreeClassificationBostonHousingScikitNumericTest +): + @classmethod + def setUpClass(self): + # Load data and train model + scikit_data = load_boston() + num_classes = 3 + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + t = scikit_data.target + target = np.digitize(t, np.histogram(t, bins=num_classes - 1)[1]) - 1 + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_multiclass(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_multiclass_stress_test(self): + options = dict( + max_depth=[1, 10, None], + min_samples_split=[2, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1], + max_leaf_nodes=[None, 20], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class BoostedTreeClassificationBostonHousingXGboostNumericTest(unittest.TestCase): + """ + Unit test class for testing xgboost converter and running both models + """ + + def _check_metrics(self, metrics, params={}): + self.assertEqual( + metrics["num_errors"], + 0, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **xgboost_params): + """ + Train a scikit-learn model, convert it and then evaluate it with CoreML + """ + xgb_model = xgboost.XGBClassifier(**xgboost_params) + xgb_model.fit(self.X, self.target) + + # Convert the model + spec = xgb_converter.convert( + xgb_model, self.feature_names, self.output_name, mode="classifier" + ) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + probabilities = xgb_model.predict_proba(self.X) + df["classProbability"] = [ + dict(zip(xgb_model.classes_, cur_vals)) for cur_vals in probabilities + ] + metrics = evaluate_classifier_with_probabilities( + spec, df, probabilities="classProbability", verbose=False + ) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess(metrics["max_probability_error"], 1e-3) + + def _classifier_stress_test(self): + options = dict( + max_depth=[1, 10], min_child_weight=[2, 0.5], max_delta_step=[1, 5], + ) + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(_macos_version() >= (10, 16), "rdar://problem/84898245") +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class BoostedTreeBinaryClassificationBostonHousingXGboostNumericTest( + BoostedTreeClassificationBostonHousingXGboostNumericTest +): + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + self.target = 1 * (scikit_data["target"] > scikit_data["target"].mean()) + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_binary_classifier(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_binary_classifier_stress_test(self): + self._classifier_stress_test() + + +@unittest.skipIf(_macos_version() >= (12, 0), "rdar://problem/84898245") +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class BoostedTreeMultiClassClassificationBostonHousingXGboostNumericTest( + BoostedTreeClassificationBostonHousingXGboostNumericTest +): + @classmethod + def setUpClass(self): + scikit_data = load_boston() + num_classes = 3 + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + t = scikit_data.target + target = np.digitize(t, np.histogram(t, bins=num_classes - 1)[1]) - 1 + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_multiclass(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_multiclass_stress_test(self): + self._classifier_stress_test() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression.py new file mode 100644 index 00000000..2ed9fa32 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression.py @@ -0,0 +1,218 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import json +import tempfile +import unittest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST +from coremltools.models.utils import _macos_version + +if _HAS_XGBOOST: + import xgboost + + from coremltools.converters import xgboost as xgb_converter + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import GradientBoostingRegressor + from sklearn.preprocessing import OneHotEncoder + + from coremltools.converters import sklearn as skl_converter + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class GradientBoostingRegressorScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(cls): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + + scikit_data = load_boston() + scikit_model = GradientBoostingRegressor(random_state=1) + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + s = 0 + for est in scikit_model.estimators_: + for e in est: + s = s + e.tree_.node_count + cls.scikit_model_node_count = s + + # Save the data and the model + cls.scikit_data = scikit_data + cls.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + tr = spec.pipelineRegressor.pipeline.models[ + -1 + ].treeEnsembleRegressor.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + + # Error on converting an untrained model + with self.assertRaises(Exception): + model = GradientBoostingRegressor() + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") + + +@unittest.skipIf(_macos_version() >= (10, 16), "rdar://problem/84898245") +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class BoostedTreeRegressorXGboostTest(unittest.TestCase): + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_XGBOOST: + return + if not _HAS_SKLEARN: + return + + scikit_data = load_boston() + dtrain = xgboost.DMatrix( + scikit_data.data, + label=scikit_data.target, + feature_names=scikit_data.feature_names, + ) + xgb_model = xgboost.train({}, dtrain, 1) + + # Save the data and the model + self.scikit_data = scikit_data + self.xgb_model = xgb_model + self.feature_names = self.scikit_data.feature_names + + def test_conversion(self): + + feature_names = self.scikit_data.feature_names + output_name = "target" + spec = xgb_converter.convert(self.xgb_model, feature_names, "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleRegressor) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(self.feature_names), + sorted(map(lambda x: x.name, spec.description.input)), + ) + + # Test the linear regression parameters. + tr = spec.treeEnsembleRegressor.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), 23) + + def test_conversion_from_file(self): + + output_name = "target" + feature_names = self.feature_names + + xgb_model_json = tempfile.mktemp("tree_model.json") + xgb_json_out = self.xgb_model.get_dump(dump_format="json") + with open(xgb_model_json, "w") as f: + json.dump(xgb_json_out, f) + spec = xgb_converter.convert(xgb_model_json, feature_names, "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleRegressor) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(self.feature_names), + sorted(map(lambda x: x.name, spec.description.input)), + ) + + # Test the linear regression parameters. + tr = spec.treeEnsembleRegressor.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), 23) + + def test_unsupported_conversion(self): + + feature_names = self.scikit_data.feature_names + output_name = "target" + xgb_model = xgboost.XGBRegressor(objective="reg:gamma") + xgb_model.fit(self.scikit_data.data, self.scikit_data.target) + with self.assertRaises(ValueError): + spec = xgb_converter.convert(xgb_model, feature_names, "target") + + xgb_model = xgboost.XGBRegressor(objective="reg:tweedie") + xgb_model.fit(self.scikit_data.data, self.scikit_data.target) + with self.assertRaises(ValueError): + spec = xgb_converter.convert(xgb_model, feature_names, "target") + + def test_conversion_bad_inputs(self): + + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = GradientBoostingRegressor() + spec = xgb_converter.convert(model, "data", "out") + + # Check the expected class during conversion + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = xgb_converter.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression_numeric.py new file mode 100644 index 00000000..98ea3022 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression_numeric.py @@ -0,0 +1,309 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest + +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_XGBOOST: + import xgboost + + from coremltools.converters import xgboost as xgb_converter + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import GradientBoostingRegressor + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class GradientBoostingRegressorBostonHousingScikitNumericTest(unittest.TestCase): + @classmethod + def setUpClass(self): + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data["data"] + self.target = scikit_data["target"] + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, params={}): + self.assertAlmostEqual( + metrics["rmse"], + 0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + self.assertAlmostEqual( + metrics["max_error"], + 0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + scikit_model = GradientBoostingRegressor(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_regressor(spec, df, "target", verbose=False) + self._check_metrics(metrics, scikit_params) + + def test_boston_housing_simple_regression(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_boston_housing_parameter_stress_test(self): + + options = dict( + max_depth=[1, 10, None], + min_samples_split=[2, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1], + max_leaf_nodes=[None, 20], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(_macos_version() >= (12, 0), "rdar://problem/84898245") +@unittest.skipIf(not _HAS_XGBOOST, "Missing xgboost. Skipping") +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class XgboostBoosterBostonHousingNumericTest(unittest.TestCase): + @classmethod + def setUpClass(self): + if not _HAS_XGBOOST: + return + if not _HAS_SKLEARN: + return + + # Load data and train model + scikit_data = load_boston() + self.X = scikit_data.data.astype("f").astype("d") + self.dtrain = xgboost.DMatrix( + scikit_data.data, + label=scikit_data.target, + feature_names=scikit_data.feature_names, + ) + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, allowed_error={}, params={}): + """ + Check the metrics + """ + self.assertAlmostEqual( + metrics["rmse"], + allowed_error.get("rmse", 0), + delta=1e-2, + msg="Failed case %s. Results %s" % (params, metrics), + ) + self.assertAlmostEqual( + metrics["max_error"], + allowed_error.get("max_error", 0), + delta=1e-2, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, bt_params={}, allowed_error={}, **params): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Train a model + xgb_model = xgboost.train(bt_params, self.dtrain, **params) + + # Convert the model + spec = xgb_converter.convert( + xgb_model, self.feature_names, self.output_name, force_32bit_float=False + ) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = xgb_model.predict(self.dtrain) + + # Evaluate it + metrics = evaluate_regressor(spec, df, target="target", verbose=False) + self._check_metrics(metrics, allowed_error, bt_params) + + def test_boston_housing_simple_decision_tree_regression(self): + self._train_convert_evaluate_assert(num_boost_round=1) + + def test_boston_housing_simple_boosted_tree_regression(self): + self._train_convert_evaluate_assert(num_boost_round=10) + + def test_boston_housing_simple_random_forest_regression(self): + self._train_convert_evaluate_assert(bt_params={"subsample": 0.5}, + allowed_error={"rmse": 0.004, "max_error": 0.09}) + + def test_boston_housing_float_double_corner_case(self): + self._train_convert_evaluate_assert( + { + "colsample_bytree": 1, + "colsample_bylevel": 1, + "scale_pos_weight": 1, + "learning_rate": 0.5, + "max_delta_step": 0, + "min_child_weight": 1, + "n_estimators": 1, + "subsample": 0.5, + "objective": "reg:linear", + "max_depth": 5, + }, + num_boost_round=2, + ) + + @pytest.mark.slow + def test_boston_housing_parameter_stress_test(self): + + options = dict( + max_depth=[1, 5], + learning_rate=[0.1, 0.5], + n_estimators=[1, 10], + min_child_weight=[1, 2], + max_delta_step=[0, 0.1], + colsample_bytree=[1, 0.5], + colsample_bylevel=[1, 0.5], + scale_pos_weight=[1], + objective=["reg:linear"], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(arg) + + +@unittest.skipIf(_macos_version() >= (12, 0), "rdar://problem/84898245") +@unittest.skipIf(not _HAS_XGBOOST, "Missing xgboost. Skipping") +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class XGboostRegressorBostonHousingNumericTest(unittest.TestCase): + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + + # Load data and train model + scikit_data = load_boston() + + self.X = scikit_data.data + self.scikit_data = self.X + self.target = scikit_data.target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, params={}, allowed_error={}): + self.assertAlmostEqual( + metrics["rmse"], + allowed_error.get("rmse", 0), + delta=1e-2, + msg="Failed case %s. Results %s" % (params, metrics), + ) + self.assertAlmostEqual( + metrics["max_error"], + allowed_error.get("max_error", 0), + delta=1e-2, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, bt_params={}, allowed_error={}, **params): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Train a model + xgb_model = xgboost.XGBRegressor(**params) + xgb_model.fit(self.X, self.target) + + # Convert the model (feature_names can't be given because of XGboost) + spec = xgb_converter.convert( + xgb_model, self.feature_names, self.output_name, force_32bit_float=False + ) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = xgb_model.predict(self.X) + + # Evaluate it + metrics = evaluate_regressor(spec, df, target="target", verbose=False) + self._check_metrics(metrics, bt_params, allowed_error) + + def test_boston_housing_simple_boosted_tree_regression(self): + self._train_convert_evaluate_assert() + + def test_boston_housing_simple_random_forest_regression(self): + self._train_convert_evaluate_assert( + allowed_error={"rmse": 0.05, "max_error": 0.81}, subsample=0.5 + ) + + def test_boston_housing_simple_decision_tree_regression(self): + self._train_convert_evaluate_assert(n_estimators=1) + + def test_boston_housing_float_double_corner_case(self): + self._train_convert_evaluate_assert( + { + "colsample_bytree": 1, + "colsample_bylevel": 1, + "scale_pos_weight": 1, + "learning_rate": 0.1, + "max_delta_step": 0, + "min_child_weight": 1, + "n_estimators": 10, + "subsample": 0.3, + "objective": "reg:linear", + "max_depth": 1, + } + ) + + @pytest.mark.slow + def test_boston_housing_parameter_stress_test(self): + + options = dict( + max_depth=[1, 5], + learning_rate=[0.1, 0.5], + n_estimators=[1, 10], + objective=["reg:linear"], + min_child_weight=[1, 2], + max_delta_step=[0, 0.1], + subsample=[1, 0.5, 0.3], + colsample_bytree=[1, 0.5], + colsample_bylevel=[1, 0.5], + scale_pos_weight=[1], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(arg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier.py new file mode 100644 index 00000000..8df139e6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier.py @@ -0,0 +1,150 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST + +if _HAS_SKLEARN: + from sklearn.tree import DecisionTreeClassifier + + from coremltools.converters.sklearn import convert as skl_converter + +if _HAS_XGBOOST: + pass + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class DecisionTreeBinaryClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + from sklearn.tree import DecisionTreeClassifier + + scikit_data = load_boston() + scikit_model = DecisionTreeClassifier(random_state=1) + target = scikit_data["target"] > scikit_data["target"].mean() + scikit_model.fit(scikit_data["data"], target) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + output_name = "target" + spec = skl_converter(self.scikit_model, "data", "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + self.assertEqual(len(spec.description.input), 1) + + input_type = spec.description.input[0] + + self.assertEqual(input_type.type.WhichOneof("Type"), "multiArrayType") + self.assertEqual(input_type.name, "data") + + # Test the linear regression parameters. + tr = spec.treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model.tree_.node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = DecisionTreeClassifier() + spec = skl_converter(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter(model, "data", "out") + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class DecisionTreeMultiClassClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + import numpy as np + from sklearn.datasets import load_boston + from sklearn.tree import DecisionTreeClassifier + + scikit_data = load_boston() + scikit_model = DecisionTreeClassifier(random_state=1) + t = scikit_data.target + target = np.digitize(t, np.histogram(t)[1]) - 1 + scikit_model.fit(scikit_data.data, target) + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.scikit_model = scikit_model + + def test_conversion(self): + output_name = "target" + spec = skl_converter(self.scikit_model, "data", "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + self.assertEqual(spec.description.input[0].name, "data") + self.assertEqual( + spec.description.input[0].type.WhichOneof("Type"), "multiArrayType" + ) + + tr = spec.treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model.tree_.node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = DecisionTreeClassifier() + spec = skl_converter(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier_numeric.py new file mode 100644 index 00000000..bc507b2e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier_numeric.py @@ -0,0 +1,137 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest +from distutils.version import StrictVersion + +import numpy as np +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.tree import DecisionTreeClassifier + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DecisionTreeClassificationBostonHousingScikitNumericTest(unittest.TestCase): + def _check_metrics(self, metrics, params={}): + self.assertEqual( + metrics["num_errors"], + 0, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + scikit_model = DecisionTreeClassifier(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_classifier(spec, df) + self._check_metrics(metrics, scikit_params) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DecisionTreeBinaryClassificationBostonHousingScikitNumericTest( + DecisionTreeClassificationBostonHousingScikitNumericTest +): + @classmethod + def setUpClass(self): + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + self.target = 1 * (scikit_data["target"] > scikit_data["target"].mean()) + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_binary_classifier(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_binary_classifier_stress_test(self): + options = dict( + splitter=["best"], + max_depth=[1, 10, None], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1, 5], + max_leaf_nodes=[None, 20], + ) + if _SKLEARN_VERSION < StrictVersion("0.22"): # 'presort' option deprecated >=0.22 + options["presort"] = [False, True] + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DecisionTreeMultiClassClassificationBostonHousingScikitNumericTest( + DecisionTreeClassificationBostonHousingScikitNumericTest +): + @classmethod + def setUpClass(self): + # Load data and train model + scikit_data = load_boston() + num_classes = 3 + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + t = scikit_data.target + target = np.digitize(t, np.histogram(t, bins=num_classes - 1)[1]) - 1 + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_multiclass(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_multiclass_stress_test(self): + options = dict( + splitter=["best"], + max_depth=[1, 10, None], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1, 5], + max_leaf_nodes=[None, 20], + ) + if _SKLEARN_VERSION < StrictVersion("0.22"): # 'presort' option deprecated >=0.22 + options["presort"] = [False, True] + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression.py new file mode 100644 index 00000000..c8d9e2b5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression.py @@ -0,0 +1,87 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST + +if _HAS_XGBOOST: + + pass + +if _HAS_SKLEARN: + from sklearn.tree import DecisionTreeRegressor + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DecisionTreeRegressorScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + from sklearn.tree import DecisionTreeRegressor + + scikit_data = load_boston() + scikit_model = DecisionTreeRegressor(random_state=1) + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + feature_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, feature_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleRegressor) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(feature_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + tr = spec.pipelineRegressor.pipeline.models[ + 1 + ].treeEnsembleRegressor.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model.tree_.node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = DecisionTreeRegressor() + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression_numeric.py new file mode 100644 index 00000000..eb3e5a37 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression_numeric.py @@ -0,0 +1,106 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest +from distutils.version import StrictVersion + +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DecisionTreeRegressorBostonHousingScikitNumericTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter and running both models + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data["data"] + self.target = scikit_data["target"] + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, params={}): + """ + Check the metrics + """ + self.assertAlmostEqual( + metrics["rmse"], + 0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + self.assertAlmostEqual( + metrics["max_error"], + 0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + """ + Train a scikit-learn model, convert it and then evaluate it with CoreML + """ + from sklearn.tree import DecisionTreeRegressor + + from coremltools.converters import sklearn as skl_converter + + scikit_model = DecisionTreeRegressor(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_regressor(spec, df, target="target", verbose=False) + self._check_metrics(metrics, scikit_params) + + def test_boston_housing_simple_regression(self): + self._train_convert_evaluate_assert(max_depth=20) + + @pytest.mark.slow + def test_boston_housing_parameter_stress_test(self): + + ## These are all the options in decision tree regression of scikit-learn + options = dict( + criterion=["mse"], + splitter=["best"], + max_depth=[1, 10, None], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1, 5], + max_leaf_nodes=[None, 20], + min_impurity_decrease=[0.0, 1e-07, 0.1], + ) + if _SKLEARN_VERSION < StrictVersion("0.22"): # 'presort' option deprecated >=0.22 + options["presort"] = [False, True] + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/version.py b/__packaged__/coreml/.python_dependencies/coremltools/version.py new file mode 100644 index 00000000..b1b16114 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/version.py @@ -0,0 +1,7 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +__version__ = "6.3.0" # VERSION_STRING diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/INSTALLER b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/LICENSE.md b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/LICENSE.md new file mode 100644 index 00000000..76bb5d96 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/LICENSE.md @@ -0,0 +1,39 @@ +Copyright (C) 2022 Apple Inc. All Rights Reserved. + +IMPORTANT: This Apple software is supplied to you by Apple +Inc. ("Apple") in consideration of your agreement to the following +terms, and your use, installation, modification or redistribution of +this Apple software constitutes acceptance of these terms. If you do +not agree with these terms, please do not use, install, modify or +redistribute this Apple software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, Apple grants you a personal, non-exclusive +license, under Apple's copyrights in this original Apple software (the +"Apple Software"), to use, reproduce, modify and redistribute the Apple +Software, with or without modifications, in source and/or binary forms; +provided that if you redistribute the Apple Software in its entirety and +without modifications, you must retain this notice and the following +text and disclaimers in all such redistributions of the Apple Software. +Neither the name, trademarks, service marks or logos of Apple Inc. may +be used to endorse or promote products derived from the Apple Software +without specific prior written permission from Apple. Except as +expressly stated in this notice, no other rights or licenses, express or +implied, are granted by Apple herein, including but not limited to any +patent rights that may be infringed by your derivative works or by other +works in which the Apple Software may be incorporated. + +The Apple Software is provided by Apple on an "AS IS" basis. APPLE +MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION +THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND +OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS. + +IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, +MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED +AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE), +STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/METADATA b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/METADATA new file mode 100644 index 00000000..1dddbb0d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/METADATA @@ -0,0 +1,486 @@ +Metadata-Version: 2.1 +Name: python-coreml-stable-diffusion +Version: 0.1.0 +Summary: Run Stable Diffusion on Apple Silicon with Core ML (Python and Swift) +Home-page: https://github.com/apple/ml-stable-diffusion +Author: Apple Inc. +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Topic :: Artificial Intelligence +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Software Development +Description-Content-Type: text/markdown +License-File: LICENSE.md +Requires-Dist: coremltools (>=6.1) +Requires-Dist: diffusers[torch] +Requires-Dist: torch +Requires-Dist: transformers +Requires-Dist: huggingface-hub +Requires-Dist: scipy +Requires-Dist: numpy (<1.24) + +# Core ML Stable Diffusion + +Run Stable Diffusion on Apple Silicon with Core ML + + + +This repository comprises: + +- `python_coreml_stable_diffusion`, a Python package for converting PyTorch models to Core ML format and performing image generation with Hugging Face [diffusers](https://github.com/huggingface/diffusers) in Python +- `StableDiffusion`, a Swift package that developers can add to their Xcode projects as a dependency to deploy image generation capabilities in their apps. The Swift package relies on the Core ML model files generated by `python_coreml_stable_diffusion` + +If you run into issues during installation or runtime, please refer to the [FAQ](#faq) section. Please refer to the [System Requirements](#system-requirements) section before getting started. + + +## Example Results + +There are numerous versions of Stable Diffusion available on the [Hugging Face Hub](https://huggingface.co/models?search=stable-diffusion). Here are example results from three of those models: + +`--model-version` | [stabilityai/stable-diffusion-2-base](https://huggingface.co/stabilityai/stable-diffusion-2-base) | [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) | [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) | +:------:|:------:|:------:|:------: +Output | ![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_11_computeUnit_CPU_AND_GPU_modelVersion_stabilityai_stable-diffusion-2-base.png) | ![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_13_computeUnit_CPU_AND_NE_modelVersion_CompVis_stable-diffusion-v1-4.png) | ![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_93_computeUnit_CPU_AND_NE_modelVersion_runwayml_stable-diffusion-v1-5.png) +M1 iPad Pro 8GB Latency (s) | 29 | 38 | 38 | +M1 MacBook Pro 16GB Latency (s) | 24 | 35 | 35 | +M2 MacBook Air 8GB Latency (s) | 18 | 23 | 23 | + +Please see [Important Notes on Performance Benchmarks](#important-notes-on-performance-benchmarks) section for details. + +## System Requirements + +The following is recommended to use all the functionality in this repository: + + Python | macOS | Xcode | iPadOS, iOS | +:------:|:-----:|:-----:|:-----------:| + 3.8 | 13.1 | 14.3 | 16.2 | + +## Using Ready-made Core ML Models from Hugging Face Hub + +
+ Click to expand + +🤗 Hugging Face ran the [conversion procedure](#converting-models-to-coreml) on the following models and made the Core ML weights publicly available on the Hub. If you would like to convert a version of Stable Diffusion that is not already available on the Hub, please refer to the [Converting Models to Core ML](#converting-models-to-core-ml). + +* [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/apple/coreml-stable-diffusion-v1-4) +* [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/apple/coreml-stable-diffusion-v1-5) +* [`stabilityai/stable-diffusion-2-base`](https://huggingface.co/apple/coreml-stable-diffusion-2-base) + +If you want to use any of those models you may download the weights and proceed to [generate images with Python](#image-generation-with-python) or [Swift](#image-generation-with-swift). + +There are several variants in each model repository. You may clone the whole repos using `git` and `git lfs` to download all variants, or selectively download the ones you need. + +To clone the repos using `git`, please follow this process: + +**Step 1:** Install the `git lfs` extension for your system. + +`git lfs` stores large files outside the main git repo, and it downloads them from the appropriate server after you clone or checkout. It is available in most package managers, check [the installation page](https://git-lfs.com) for details. + +**Step 2:** Enable `git lfs` by running this command once: + +```bash +git lfs install +``` + +**Step 3:** Use `git clone` to download a copy of the repo that includes all model variants. For Stable Diffusion version 1.4, you'd issue the following command in your terminal: + +```bash +git clone https://huggingface.co/apple/coreml-stable-diffusion-v1-4 +``` + +If you prefer to download specific variants instead of cloning the repos, you can use the `huggingface_hub` Python library. For example, to do generation in Python using the `ORIGINAL` attention implementation (read [this section](#converting-models-to-core-ml) for details), you could use the following helper code: + +```Python +from huggingface_hub import snapshot_download +from huggingface_hub.file_download import repo_folder_name +from pathlib import Path +import shutil + +repo_id = "apple/coreml-stable-diffusion-v1-4" +variant = "original/packages" + +def download_model(repo_id, variant, output_dir): + destination = Path(output_dir) / (repo_id.split("/")[-1] + "_" + variant.replace("/", "_")) + if destination.exists(): + raise Exception(f"Model already exists at {destination}") + + # Download and copy without symlinks + downloaded = snapshot_download(repo_id, allow_patterns=f"{variant}/*", cache_dir=output_dir) + downloaded_bundle = Path(downloaded) / variant + shutil.copytree(downloaded_bundle, destination) + + # Remove all downloaded files + cache_folder = Path(output_dir) / repo_folder_name(repo_id=repo_id, repo_type="model") + shutil.rmtree(cache_folder) + return destination + +model_path = download_model(repo_id, variant, output_dir="./models") +print(f"Model downloaded at {model_path}") +``` + +`model_path` would be the path in your local filesystem where the checkpoint was saved. Please, refer to [this post](https://huggingface.co/blog/diffusers-coreml) for additional details. + +
+ +## Converting Models to Core ML + +
+ Click to expand + +**Step 1:** Create a Python environment and install dependencies: + +```bash +conda create -n coreml_stable_diffusion python=3.8 -y +conda activate coreml_stable_diffusion +cd /path/to/cloned/ml-stable-diffusion/repository +pip install -e . +``` + +**Step 2:** Log in to or register for your [Hugging Face account](https://huggingface.co), generate a [User Access Token](https://huggingface.co/settings/tokens) and use this token to set up Hugging Face API access by running `huggingface-cli login` in a Terminal window. + +**Step 3:** Navigate to the version of Stable Diffusion that you would like to use on [Hugging Face Hub](https://huggingface.co/models?search=stable-diffusion) and accept its Terms of Use. The default model version is [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4). The model version may be changed by the user as described in the next step. + +**Step 4:** Execute the following command from the Terminal to generate Core ML model files (`.mlpackage`) + +```shell +python -m python_coreml_stable_diffusion.torch2coreml --convert-unet --convert-text-encoder --convert-vae-decoder --convert-safety-checker -o +``` + +**WARNING:** This command will download several GB worth of PyTorch checkpoints from Hugging Face. Please ensure that you are on Wi-Fi and have enough disk space. + +This generally takes 15-20 minutes on an M1 MacBook Pro. Upon successful execution, the 4 neural network models that comprise Stable Diffusion will have been converted from PyTorch to Core ML (`.mlpackage`) and saved into the specified ``. Some additional notable arguments: + +- `--model-version`: The model version defaults to [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4). Developers may specify other versions that are available on [Hugging Face Hub](https://huggingface.co/models?search=stable-diffusion), e.g. [stabilityai/stable-diffusion-2-base](https://huggingface.co/stabilityai/stable-diffusion-2-base) & [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5). + + +- `--bundle-resources-for-swift-cli`: Compiles all 4 models and bundles them along with necessary resources for text tokenization into `/Resources` which should provided as input to the Swift package. This flag is not necessary for the diffusers-based Python pipeline. + +- `--chunk-unet`: Splits the Unet model in two approximately equal chunks (each with less than 1GB of weights) for mobile-friendly deployment. This is **required** for Neural Engine deployment on iOS and iPadOS. This is not required for macOS. Swift CLI is able to consume both the chunked and regular versions of the Unet model but prioritizes the former. Note that chunked unet is not compatible with the Python pipeline because Python pipeline is intended for macOS only. Chunking is for on-device deployment with Swift only. + +- `--attention-implementation`: Defaults to `SPLIT_EINSUM` which is the implementation described in [Deploying Transformers on the Apple Neural Engine](https://machinelearning.apple.com/research/neural-engine-transformers). `--attention-implementation ORIGINAL` will switch to an alternative that should be used for CPU or GPU deployment. Please refer to the [Performance Benchmark](#performance-benchmark) section for further guidance. + +- `--check-output-correctness`: Compares original PyTorch model's outputs to final Core ML model's outputs. This flag increases RAM consumption significantly so it is recommended only for debugging purposes. + +- `--convert-controlnet`: Converts ControlNet models specified after this option. This can also convert multiple models if you specify like `--convert-controlnet lllyasviel/sd-controlnet-mlsd lllyasviel/sd-controlnet-depth`. + +- `--unet-support-controlnet`: enables a converted UNet model to receive additional inputs from ControlNet. This is required for generating image with using ControlNet and saved with a different name, `*_control-unet.mlpackage`, distinct from normal UNet. On the other hand, this UNet model can not work without ControlNet. Please use normal UNet for just txt2img. + +
+ +## Image Generation with Python + +
+ Click to expand + +Run text-to-image generation using the example Python pipeline based on [diffusers](https://github.com/huggingface/diffusers): + +```shell +python -m python_coreml_stable_diffusion.pipeline --prompt "a photo of an astronaut riding a horse on mars" -i -o --compute-unit ALL --seed 93 +``` +Please refer to the help menu for all available arguments: `python -m python_coreml_stable_diffusion.pipeline -h`. Some notable arguments: + +- `-i`: Should point to the `-o` directory from Step 4 of [Converting Models to Core ML](#converting-models-to-coreml) section from above. +- `--model-version`: If you overrode the default model version while converting models to Core ML, you will need to specify the same model version here. +- `--compute-unit`: Note that the most performant compute unit for this particular implementation may differ across different hardware. `CPU_AND_GPU` or `CPU_AND_NE` may be faster than `ALL`. Please refer to the [Performance Benchmark](#performance-benchmark) section for further guidance. +- `--scheduler`: If you would like to experiment with different schedulers, you may specify it here. For available options, please see the help menu. You may also specify a custom number of inference steps by `--num-inference-steps` which defaults to 50. +- `--controlnet`: ControlNet models specified with this option are used in image generation. Use this option in the format `--controlnet lllyasviel/sd-controlnet-mlsd lllyasviel/sd-controlnet-depth` and make sure to use `--controlnet-inputs` in conjunction. +- `--controlnet-inputs`: Image inputs corresponding to each ControlNet model. Please provide image paths in same order as models in `--controlnet`, for example: `--controlnet-inputs image_mlsd image_depth`. + +
+ +## Image Generation with Swift + +
+ Click to expand + +### System Requirements + +**Building** (minimum): + +- Xcode 14.3 +- Command Line Tools for Xcode 14.3 + +Check [developer.apple.com](https://developer.apple.com/download/all/?q=xcode) for the latest versions. + +**Running** (minimum): + +| Mac | iPad\* | iPhone\* | +|:----------:|:-----------:|:-------------:| +| macOS 13.1 | iPadOS 16.2 | iOS 16.2 | +| M1 | M1 | iPhone 12 Pro | + +You will also need the resources generated by the `--bundle-resources-for-swift-cli` option described in [Converting Models to Core ML](#converting-models-to-coreml) + +\* Please see [FAQ](#faq) [Q6](#q-mobile-app) regarding deploying on iPad and iPhone. + +### Example CLI Usage +```shell +swift run StableDiffusionSample "a photo of an astronaut riding a horse on mars" --resource-path /Resources/ --seed 93 --output-path +``` +The output will be named based on the prompt and random seed: +e.g. `/a_photo_of_an_astronaut_riding_a_horse_on_mars.93.final.png` + +Please use the `--help` flag to learn about batched generation and more. + +### Example Library Usage + +```swift +import StableDiffusion +... +let pipeline = try StableDiffusionPipeline(resourcesAt: resourceURL) +pipeline.loadResources() +let image = try pipeline.generateImages(prompt: prompt, seed: seed).first +``` +On iOS, the `reduceMemory` option should be set to `true` when constructing `StableDiffusionPipeline` + +### Swift Package Details + +This Swift package contains two products: + +- `StableDiffusion` library +- `StableDiffusionSample` command-line tool + +Both of these products require the Core ML models and tokenization resources to be supplied. When specifying resources via a directory path that directory must contain the following: + +- `TextEncoder.mlmodelc` (text embedding model) +- `Unet.mlmodelc` or `UnetChunk1.mlmodelc` & `UnetChunk2.mlmodelc` (denoising autoencoder model) +- `VAEDecoder.mlmodelc` (image decoder model) +- `vocab.json` (tokenizer vocabulary file) +- `merges.text` (merges for byte pair encoding file) + +Optionally, for image2image, in-painting, or similar: + +- `VAEEncoder.mlmodelc` (image encoder model) + +Optionally, it may also include the safety checker model that some versions of Stable Diffusion include: + +- `SafetyChecker.mlmodelc` + +Optionally, for ControlNet: + +- `ControlledUNet.mlmodelc` or `ControlledUnetChunk1.mlmodelc` & `ControlledUnetChunk2.mlmodelc` (enabled to receive ControlNet values) +- `controlnet/` (directory containing ControlNet models) + - `LllyasvielSdControlnetMlsd.mlmodelc` (for example, from lllyasviel/sd-controlnet-mlsd) + - `LllyasvielSdControlnetDepth.mlmodelc` (for example, from lllyasviel/sd-controlnet-depth) + - Other models you converted + +Note that the chunked version of Unet is checked for first. Only if it is not present will the full `Unet.mlmodelc` be loaded. Chunking is required for iOS and iPadOS and not necessary for macOS. + +
+ +## Example Swift App + +
+ Click to expand + +🤗 Hugging Face created an [open-source demo app](https://github.com/huggingface/swift-coreml-diffusers) on top of this library. It's written in native Swift and Swift UI, and runs on macOS, iOS and iPadOS. You can use the code as a starting point for your app, or to see how to integrate this library in your own projects. + +Hugging Face has made the app [available in the Mac App Store](https://apps.apple.com/app/diffusers/id1666309574?mt=12). + +
+ +## Performance Benchmark + +
+ Click to expand + +Standard [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) Benchmark + +| Device | `--compute-unit`| `--attention-implementation` | Latency (seconds) | +| ---------------------------------- | -------------- | ---------------------------- | ----------------- | +| Mac Studio (M1 Ultra, 64-core GPU) | `CPU_AND_GPU` | `ORIGINAL` | 9 | +| Mac Studio (M1 Ultra, 48-core GPU) | `CPU_AND_GPU` | `ORIGINAL` | 13 | +| MacBook Pro (M1 Max, 32-core GPU) | `CPU_AND_GPU` | `ORIGINAL` | 18 | +| MacBook Pro (M1 Max, 24-core GPU) | `CPU_AND_GPU` | `ORIGINAL` | 20 | +| MacBook Pro (M1 Pro, 16-core GPU) | `ALL` | `SPLIT_EINSUM (default)` | 26 | +| MacBook Pro (M2) | `CPU_AND_NE` | `SPLIT_EINSUM (default)` | 23 | +| MacBook Pro (M1) | `CPU_AND_NE` | `SPLIT_EINSUM (default)` | 35 | +| iPad Pro (5th gen, M1) | `CPU_AND_NE` | `SPLIT_EINSUM (default)` | 38 | + + +Please see [Important Notes on Performance Benchmarks](#important-notes-on-performance-benchmarks) section for details. + +
+ +## Important Notes on Performance Benchmarks + +
+ Click to expand + +- This benchmark was conducted by Apple using public beta versions of iOS 16.2, iPadOS 16.2 and macOS 13.1 in November 2022. +- The executed program is `python_coreml_stable_diffusion.pipeline` for macOS devices and a minimal Swift test app built on the `StableDiffusion` Swift package for iOS and iPadOS devices. +- The median value across 3 end-to-end executions is reported. +- Performance may materially differ across different versions of Stable Diffusion due to architecture changes in the model itself. Each reported number is specific to the model version mentioned in that context. +- The image generation procedure follows the standard configuration: 50 inference steps, 512x512 output image resolution, 77 text token sequence length, classifier-free guidance (batch size of 2 for unet). +- The actual prompt length does not impact performance because the Core ML model is converted with a static shape that computes the forward pass for all of the 77 elements (`tokenizer.model_max_length`) in the text token sequence regardless of the actual length of the input text. +- Pipelining across the 4 models is not optimized and these performance numbers are subject to variance under increased system load from other applications. Given these factors, we do not report sub-second variance in latency. +- Weights and activations are in float16 precision for both the GPU and the Neural Engine. +- The Swift CLI program consumes a peak memory of approximately 2.6GB (without the safety checker), 2.1GB of which is model weights in float16 precision. We applied [8-bit weight quantization](https://coremltools.readme.io/docs/compressing-ml-program-weights#use-affine-quantization) to reduce peak memory consumption by approximately 1GB. However, we observed that it had an adverse effect on generated image quality and we rolled it back. We encourage developers to experiment with other advanced weight compression techniques such as [palettization](https://coremltools.readme.io/docs/compressing-ml-program-weights#use-a-lookup-table) and/or [pruning](https://coremltools.readme.io/docs/compressing-ml-program-weights#use-sparse-representation) which may yield better results. +- In the [benchmark table](performance-benchmark), we report the best performing `--compute-unit` and `--attention-implementation` values per device. The former does not modify the Core ML model and can be applied during runtime. The latter modifies the Core ML model. Note that the best performing compute unit is model version and hardware-specific. + +
+ + +## Results with Different Compute Units + +
+ Click to expand + +It is highly probable that there will be slight differences across generated images using different compute units. + +The following images were generated on an M1 MacBook Pro and macOS 13.1 with the prompt *"a photo of an astronaut riding a horse on mars"* using the [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) model version. The random seed was set to 93: + + CPU_AND_NE | CPU_AND_GPU | ALL | +:------------:|:-------------:|:------: +![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_93_computeUnit_CPU_AND_NE_modelVersion_runwayml_stable-diffusion-v1-5.png) | ![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_93_computeUnit_CPU_AND_GPU_modelVersion_runwayml_stable-diffusion-v1-5.png) | ![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_93_computeUnit_ALL_modelVersion_runwayml_stable-diffusion-v1-5.png) | + +Differences may be less or more pronounced for different inputs. Please see the [FAQ](#faq) Q8 for a detailed explanation. + +
+ +## Results with ControlNet + +
+ Click to expand + +[ControlNet](https://huggingface.co/lllyasviel/ControlNet) allows users to condition image generation with Stable Diffusion on signals such as edge maps, depth maps, segmentation maps, scribbles and pose. Thanks to [@ryu38's contribution](https://github.com/apple/ml-stable-diffusion/pull/153), both the Python CLI and the Swift package support ControlNet models. Please refer to CLI arguments in previous sections to exercise this new feature. + +Example results using the prompt "a high quality photo of a surfing dog" conditioned on the scribble (leftmost): + + + +
+ + +## FAQ + +
+ Click to expand +
+ + + Q1: ERROR: Failed building wheel for tokenizers or error: can't find Rust compiler + + A1: Please review this [potential solution](https://github.com/huggingface/transformers/issues/2831#issuecomment-592724471). +
+ + +
+ Q2: RuntimeError: {NSLocalizedDescription = "Error computing NN outputs." + + A2: There are many potential causes for this error. In this context, it is highly likely to be encountered when your system is under increased memory pressure from other applications. Reducing memory utilization of other applications is likely to help alleviate the issue. +
+ +
+ Q3: My Mac has 8GB RAM and I am converting models to Core ML using the example command. The process is getting killed because of memory issues. How do I fix this issue? + + A3: In order to minimize the memory impact of the model conversion process, please execute the following command instead: + +```bash +python -m python_coreml_stable_diffusion.torch2coreml --convert-vae-encoder -o && \ +python -m python_coreml_stable_diffusion.torch2coreml --convert-vae-decoder -o && \ +python -m python_coreml_stable_diffusion.torch2coreml --convert-unet -o && \ +python -m python_coreml_stable_diffusion.torch2coreml --convert-text-encoder -o && \ +python -m python_coreml_stable_diffusion.torch2coreml --convert-safety-checker -o && +``` + +If you need `--chunk-unet`, you may do so in yet another independent command which will reuse the previously exported Unet model and simply chunk it in place: + +```bash +python -m python_coreml_stable_diffusion.torch2coreml --convert-unet --chunk-unet -o +``` + +
+ +
+ Q4: My Mac has 8GB RAM, should image generation work on my machine? + + A4: Yes! Especially the `--compute-unit CPU_AND_NE` option should work under reasonable system load from other applications. Note that part of the [Example Results](#example-results) were generated using an M2 MacBook Air with 8GB RAM. +
+ +
+ Q5: Every time I generate an image using the Python pipeline, loading all the Core ML models takes 2-3 minutes. Is this expected? + + A5: Yes and using the Swift library reduces this to just a few seconds. The reason is that `coremltools` loads Core ML models (`.mlpackage`) and each model is compiled to be run on the requested compute unit during load time. Because of the size and number of operations of the unet model, it takes around 2-3 minutes to compile it for Neural Engine execution. Other models should take at most a few seconds. Note that `coremltools` does not cache the compiled model for later loads so each load takes equally long. In order to benefit from compilation caching, `StableDiffusion` Swift package by default relies on compiled Core ML models (`.mlmodelc`) which will be compiled down for the requested compute unit upon first load but then the cache will be reused on subsequent loads until it is purged due to lack of use. + +If you intend to use the Python pipeline in an application, we recommend initializing the pipeline once so that the load time is only incurred once. Afterwards, generating images using different prompts and random seeds will not incur the load time for the current session of your application. + +
+ + +
+ Q6: I want to deploy StableDiffusion, the Swift package, in my mobile app. What should I be aware of? + + A6: The [Image Generation with Swift](#image-gen-swift) section describes the minimum SDK and OS versions as well as the device models supported by this package. We recommend carefully testing the package on the device with the least amount of RAM available among your deployment targets. + +The image generation process in `StableDiffusion` can yield over 2 GB of peak memory during runtime depending on the compute units selected. On iPadOS, we recommend using `.cpuAndNeuralEngine` in your configuration and the `reduceMemory` option when constructing a `StableDiffusionPipeline` to minimize memory pressure. + +If your app crashes during image generation, consider adding the [Increased Memory Limit](https://developer.apple.com/documentation/bundleresources/entitlements/com_apple_developer_kernel_increased-memory-limit) capability to inform the system that some of your app’s core features may perform better by exceeding the default app memory limit on supported devices. + +On iOS, depending on the iPhone model, Stable Diffusion model versions, selected compute units, system load and design of your app, this may still not be sufficient to keep your apps peak memory under the limit. Please remember, because the device shares memory between apps and iOS processes, one app using too much memory can compromise the user experience across the whole device. +
+ +
+ Q7: How do I generate images with different resolutions using the same Core ML models? + + A7: The current version of `python_coreml_stable_diffusion` does not support single-model multi-resolution out of the box. However, developers may fork this project and leverage the [flexible shapes](https://coremltools.readme.io/docs/flexible-inputs) support from coremltools to extend the `torch2coreml` script by using `coremltools.EnumeratedShapes`. Note that, while the `text_encoder` is agnostic to the image resolution, the inputs and outputs of `vae_decoder` and `unet` models are dependent on the desired image resolution. +
+ +
+ Q8: Are the Core ML and PyTorch generated images going to be identical? + + A8: If desired, the generated images across PyTorch and Core ML can be made approximately identical. However, it is not guaranteed by default. There are several factors that might lead to different images across PyTorch and Core ML: + + + 1. Random Number Generator Behavior + + The main source of potentially different results across PyTorch and Core ML is the Random Number Generator ([RNG](https://en.wikipedia.org/wiki/Random_number_generation)) behavior. PyTorch and Numpy have different sources of randomness. `python_coreml_stable_diffusion` generally relies on Numpy for RNG (e.g. latents initialization) and `StableDiffusion` Swift Library reproduces this RNG behavior by default. However, PyTorch-based pipelines such as Hugging Face `diffusers` relies on PyTorch's RNG behavior. Thanks to @liuliu's [contribution](https://github.com/apple/ml-stable-diffusion/pull/124), one can match the PyTorch (CPU) RNG behavior in Swift by specifying `--rng torch` which selects the `torchRNG` mode. + + 2. PyTorch + + *"Completely reproducible results are not guaranteed across PyTorch releases, individual commits, or different platforms. Furthermore, results may not be reproducible between CPU and GPU executions, even when using identical seeds."* ([source](https://pytorch.org/docs/stable/notes/randomness.html#reproducibility)). + + 3. Model Function Drift During Conversion + + The difference in outputs across corresponding PyTorch and Core ML models is a potential cause. The signal integrity is tested during the conversion process (enabled via `--check-output-correctness` argument to `python_coreml_stable_diffusion.torch2coreml`) and it is verified to be above a minimum [PSNR](https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio) value as tested on random inputs. Note that this is simply a sanity check and does not guarantee this minimum PSNR across all possible inputs. Furthermore, the results are not guaranteed to be identical when executing the same Core ML models across different compute units. This is not expected to be a major source of difference as the sample visual results indicate in [this section](#results-with-different-compute-units). + + 4. Weights and Activations Data Type + + When quantizing models from float32 to lower-precision data types such as float16, the generated images are [known to vary slightly](https://lambdalabs.com/blog/inference-benchmark-stable-diffusion) in semantics even when using the same PyTorch model. Core ML models generated by coremltools have float16 weights and activations by default [unless explicitly overridden](https://github.com/apple/coremltools/blob/main/coremltools/converters/_converters_entry.py#L256). This is not expected to be a major source of difference. + +
+ +
+ Q9: The model files are very large, how do I avoid a large binary for my App? + + A9: The recommended option is to prompt the user to download these assets upon first launch of the app. This keeps the app binary size independent of the Core ML models being deployed. Disclosing the size of the download to the user is extremely important as there could be data charges or storage impact that the user might not be comfortable with. + +
+ +
+ Q10: `Could not initialize NNPACK! Reason: Unsupported hardware` + + A10: This warning is safe to ignore in the context of this repository. + +
+ +
+ Q11: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect + + A11: This warning is safe to ignore in the context of this repository. +
+ +
+ Q12: UserWarning: resource_tracker: There appear to be 1 leaked semaphore objects to clean up at shutdown + + A12: If this warning is printed right after zsh: killed python -m python_coreml_stable_diffusion.torch2coreml ... , then it is highly likely that your Mac has run out of memory while converting models to Core ML. Please see [Q3](#low-mem-conversion) from above for the solution. + +
+ +
+ + diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/RECORD b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/RECORD new file mode 100644 index 00000000..53a8ba8c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/RECORD @@ -0,0 +1,30 @@ +python_coreml_stable_diffusion-0.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +python_coreml_stable_diffusion-0.1.0.dist-info/LICENSE.md,sha256=JIQZkbAux5If54HQ603cmYQpkWgaZ6Rs-FlRuv2gYsg,2316 +python_coreml_stable_diffusion-0.1.0.dist-info/METADATA,sha256=_lUpsIqDtVlF7G8X0iz6VmrwwWbw1bQl1SQgWHn2puU,33120 +python_coreml_stable_diffusion-0.1.0.dist-info/RECORD,, +python_coreml_stable_diffusion-0.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +python_coreml_stable_diffusion-0.1.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +python_coreml_stable_diffusion-0.1.0.dist-info/direct_url.json,sha256=sSoaLV-iELt5XHJbVI1L2cR5thhycX_nVjy_DEpeaUE,170 +python_coreml_stable_diffusion-0.1.0.dist-info/top_level.txt,sha256=c-I_5aLPp9EFhGmVoH3pIdzaf99seReL3bpJMpmOuac,37 +python_coreml_stable_diffusion/__init__.py,sha256=ZygAIkX6Nbjag1czWdQa-yP-GM1mBE_9ss21Xh__JFc,34 +python_coreml_stable_diffusion/__pycache__/__init__.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/_version.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/chunk_mlprogram.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/controlnet.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/coreml_model.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/layer_norm.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/pipeline.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/torch2coreml.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/unet.cpython-310.pyc,, +python_coreml_stable_diffusion/_version.py,sha256=QTYqXqSTHFRkM9TEgpDFcHvwLbvqHDqvqfQ9EiXkcAM,23 +python_coreml_stable_diffusion/chunk_mlprogram.py,sha256=ozrlOyq1919a6K29E1lw51DiWfPLKmoVmhQdG6iUvMc,12168 +python_coreml_stable_diffusion/controlnet.py,sha256=ccviLEpauaPZlnyNDRcLLjygaWcD7h4v281fc1vzwTk,8976 +python_coreml_stable_diffusion/coreml_model.py,sha256=Z115OM7t70TXf-CL9w8O7yh2NlU68K4urfHEFezGNR0,3918 +python_coreml_stable_diffusion/layer_norm.py,sha256=78mpmGHnQBO_jjyvrt3m0D8A6eb-uK5hr0wzzZFNsjw,3001 +python_coreml_stable_diffusion/pipeline.py,sha256=wo7dDCyh9gd4bmD18hHd1t7B9t8Ryk5IWsjuI-7iMYk,25751 +python_coreml_stable_diffusion/torch2coreml.py,sha256=KgeiBuUoU0egVBLKEtPMbVK-9hdy1WszbmLjdWqtRiU,54327 +python_coreml_stable_diffusion/unet.py,sha256=Zv72BBiC5GID13GZzwcPBpOqD2OxYhqDWXqtvDwWD-8,38574 +tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tests/__pycache__/__init__.cpython-310.pyc,, +tests/__pycache__/test_stable_diffusion.cpython-310.pyc,, +tests/test_stable_diffusion.py,sha256=bRZUK3joxhgSoPKAbwMWn6GG_G-V4smOrPzbKozIfcw,15965 diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/REQUESTED b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/REQUESTED new file mode 100644 index 00000000..e69de29b diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/WHEEL b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/WHEEL new file mode 100644 index 00000000..57e3d840 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/direct_url.json b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/direct_url.json new file mode 100644 index 00000000..5f116baa --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/direct_url.json @@ -0,0 +1 @@ +{"url": "https://github.com/apple/ml-stable-diffusion", "vcs_info": {"commit_id": "940dba02ee6dbdd0ae1238dcdef6cd259b345603", "requested_revision": "main", "vcs": "git"}} \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/top_level.txt b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/top_level.txt new file mode 100644 index 00000000..6d9c39a4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +python_coreml_stable_diffusion +tests diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/__init__.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/__init__.py new file mode 100644 index 00000000..8dee4bf8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/__init__.py @@ -0,0 +1 @@ +from ._version import __version__ diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/_version.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/_version.py new file mode 100644 index 00000000..3f5c4a7d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/_version.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/chunk_mlprogram.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/chunk_mlprogram.py new file mode 100644 index 00000000..1aef76c6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/chunk_mlprogram.py @@ -0,0 +1,337 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +import argparse +from collections import OrderedDict + +import coremltools as ct +from coremltools.converters.mil import Block, Program, Var +from coremltools.converters.mil.frontend.milproto.load import load as _milproto_to_pymil +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Placeholder +from coremltools.converters.mil.mil import types as types +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import random_gen_input_feature_type + +import gc + +import logging + +logging.basicConfig() +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +import numpy as np +import os +from python_coreml_stable_diffusion import torch2coreml +import shutil +import time + + +def _verify_output_correctness_of_chunks(full_model, first_chunk_model, + second_chunk_model): + """ Verifies the end-to-end output correctness of full (original) model versus chunked models + """ + # Generate inputs for first chunk and full model + input_dict = {} + for input_desc in full_model._spec.description.input: + input_dict[input_desc.name] = random_gen_input_feature_type(input_desc) + + # Generate outputs for first chunk and full model + outputs_from_full_model = full_model.predict(input_dict) + outputs_from_first_chunk_model = first_chunk_model.predict(input_dict) + + # Prepare inputs for second chunk model from first chunk's outputs and regular inputs + second_chunk_input_dict = {} + for input_desc in second_chunk_model._spec.description.input: + if input_desc.name in outputs_from_first_chunk_model: + second_chunk_input_dict[ + input_desc.name] = outputs_from_first_chunk_model[ + input_desc.name] + else: + second_chunk_input_dict[input_desc.name] = input_dict[ + input_desc.name] + + # Generate output for second chunk model + outputs_from_second_chunk_model = second_chunk_model.predict( + second_chunk_input_dict) + + # Verify correctness across all outputs from second chunk and full model + for out_name in outputs_from_full_model.keys(): + torch2coreml.report_correctness( + original_outputs=outputs_from_full_model[out_name], + final_outputs=outputs_from_second_chunk_model[out_name], + log_prefix=f"{out_name}") + + +def _load_prog_from_mlmodel(model): + """ Load MIL Program from an MLModel + """ + model_spec = model.get_spec() + start_ = time.time() + logger.info( + "Loading MLModel object into a MIL Program object (including the weights).." + ) + prog = _milproto_to_pymil( + model_spec=model_spec, + specification_version=model_spec.specificationVersion, + file_weights_dir=model.weights_dir, + ) + logger.info(f"Program loaded in {time.time() - start_:.1f} seconds") + + return prog + + +def _get_op_idx_split_location(prog: Program): + """ Find the op that approximately bisects the graph as measure by weights size on each side + """ + main_block = prog.functions["main"] + total_size_in_mb = 0 + + for op in main_block.operations: + if op.op_type == "const" and isinstance(op.val.val, np.ndarray): + size_in_mb = op.val.val.size * op.val.val.itemsize / (1024 * 1024) + total_size_in_mb += size_in_mb + half_size = total_size_in_mb / 2 + + # Find the first non const op (single child), where the total cumulative size exceeds + # the half size for the first time + cumulative_size_in_mb = 0 + for op in main_block.operations: + if op.op_type == "const" and isinstance(op.val.val, np.ndarray): + size_in_mb = op.val.val.size * op.val.val.itemsize / (1024 * 1024) + cumulative_size_in_mb += size_in_mb + + if (cumulative_size_in_mb > half_size and op.op_type != "const" + and len(op.outputs) == 1 + and len(op.outputs[0].child_ops) == 1): + op_idx = main_block.operations.index(op) + return op_idx, cumulative_size_in_mb, total_size_in_mb + + +def _get_first_chunk_outputs(block, op_idx): + # Get the list of all vars that go across from first program (all ops from 0 to op_idx (inclusive)) + # to the second program (all ops from op_idx+1 till the end). These all vars need to be made the output + # of the first program and the input of the second program + boundary_vars = set() + for i in range(op_idx + 1): + op = block.operations[i] + for var in op.outputs: + if var.val is None: # only consider non const vars + for child_op in var.child_ops: + child_op_idx = block.operations.index(child_op) + if child_op_idx > op_idx: + boundary_vars.add(var) + return list(boundary_vars) + + +@block_context_manager +def _add_fp32_casts(block, boundary_vars): + new_boundary_vars = [] + for var in boundary_vars: + if var.dtype != types.fp16: + new_boundary_vars.append(var) + else: + fp32_var = mb.cast(x=var, dtype="fp32", name=var.name) + new_boundary_vars.append(fp32_var) + return new_boundary_vars + + +def _make_first_chunk_prog(prog, op_idx): + """ Build first chunk by declaring early outputs and removing unused subgraph + """ + block = prog.functions["main"] + boundary_vars = _get_first_chunk_outputs(block, op_idx) + + # Due to possible numerical issues, cast any fp16 var to fp32 + new_boundary_vars = _add_fp32_casts(block, boundary_vars) + + block.outputs.clear() + block.set_outputs(new_boundary_vars) + PASS_REGISTRY["common::dead_code_elimination"](prog) + return prog + + +def _make_second_chunk_prog(prog, op_idx): + """ Build second chunk by rebuilding a pristine MIL Program from MLModel + """ + block = prog.functions["main"] + block.opset_version = ct.target.iOS16 + + # First chunk outputs are second chunk inputs (e.g. skip connections) + boundary_vars = _get_first_chunk_outputs(block, op_idx) + + # This op will not be included in this program. Its output var will be made into an input + boundary_op = block.operations[op_idx] + + # Add all boundary ops as inputs + with block: + for var in boundary_vars: + new_placeholder = Placeholder( + sym_shape=var.shape, + dtype=var.dtype if var.dtype != types.fp16 else types.fp32, + name=var.name, + ) + + block._input_dict[ + new_placeholder.outputs[0].name] = new_placeholder.outputs[0] + + block.function_inputs = tuple(block._input_dict.values()) + new_var = None + if var.dtype == types.fp16: + new_var = mb.cast(x=new_placeholder.outputs[0], + dtype="fp16", + before_op=var.op) + else: + new_var = new_placeholder.outputs[0] + + block.replace_uses_of_var_after_op( + anchor_op=boundary_op, + old_var=var, + new_var=new_var, + ) + + PASS_REGISTRY["common::dead_code_elimination"](prog) + + # Remove any unused inputs + new_input_dict = OrderedDict() + for k, v in block._input_dict.items(): + if len(v.child_ops) > 0: + new_input_dict[k] = v + block._input_dict = new_input_dict + block.function_inputs = tuple(block._input_dict.values()) + + return prog + + +def main(args): + os.makedirs(args.o, exist_ok=True) + + # Check filename extension + mlpackage_name = os.path.basename(args.mlpackage_path) + name, ext = os.path.splitext(mlpackage_name) + assert ext == ".mlpackage", f"`--mlpackage-path` (args.mlpackage_path) is not an .mlpackage file" + + # Load CoreML model + logger.info("Loading model from {}".format(args.mlpackage_path)) + start_ = time.time() + model = ct.models.MLModel( + args.mlpackage_path, + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + logger.info( + f"Loading {args.mlpackage_path} took {time.time() - start_:.1f} seconds" + ) + + # Load the MIL Program from MLModel + prog = _load_prog_from_mlmodel(model) + + # Compute the incision point by bisecting the program based on weights size + op_idx, first_chunk_weights_size, total_weights_size = _get_op_idx_split_location( + prog) + main_block = prog.functions["main"] + incision_op = main_block.operations[op_idx] + logger.info(f"{args.mlpackage_path} will chunked into two pieces.") + logger.info( + f"The incision op: name={incision_op.name}, type={incision_op.op_type}, index={op_idx}/{len(main_block.operations)}" + ) + logger.info(f"First chunk size = {first_chunk_weights_size:.2f} MB") + logger.info( + f"Second chunk size = {total_weights_size - first_chunk_weights_size:.2f} MB" + ) + + # Build first chunk (in-place modifies prog by declaring early exits and removing unused subgraph) + prog_chunk1 = _make_first_chunk_prog(prog, op_idx) + + # Build the second chunk + prog_chunk2 = _make_second_chunk_prog(_load_prog_from_mlmodel(model), + op_idx) + + if not args.check_output_correctness: + # Original model no longer needed in memory + del model + gc.collect() + + # Convert the MIL Program objects into MLModels + logger.info("Converting the two programs") + model_chunk1 = ct.convert( + prog_chunk1, + convert_to="mlprogram", + compute_units=ct.ComputeUnit.CPU_ONLY, + minimum_deployment_target=ct.target.iOS16, + ) + del prog_chunk1 + gc.collect() + logger.info("Conversion of first chunk done.") + + model_chunk2 = ct.convert( + prog_chunk2, + convert_to="mlprogram", + compute_units=ct.ComputeUnit.CPU_ONLY, + minimum_deployment_target=ct.target.iOS16, + ) + del prog_chunk2 + gc.collect() + logger.info("Conversion of second chunk done.") + + # Verify output correctness + if args.check_output_correctness: + logger.info("Verifying output correctness of chunks") + _verify_output_correctness_of_chunks( + full_model=model, + first_chunk_model=model_chunk1, + second_chunk_model=model_chunk2, + ) + + # Remove original (non-chunked) model if requested + if args.remove_original: + logger.info( + "Removing original (non-chunked) model at {args.mlpackage_path}") + shutil.rmtree(args.mlpackage_path) + logger.info("Done.") + + # Save the chunked models to disk + out_path_chunk1 = os.path.join(args.o, name + "_chunk1.mlpackage") + out_path_chunk2 = os.path.join(args.o, name + "_chunk2.mlpackage") + + logger.info( + f"Saved chunks in {args.o} with the suffix _chunk1.mlpackage and _chunk2.mlpackage" + ) + model_chunk1.save(out_path_chunk1) + model_chunk2.save(out_path_chunk2) + logger.info("Done.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--mlpackage-path", + required=True, + help= + "Path to the mlpackage file to be split into two mlpackages of approximately same file size.", + ) + parser.add_argument( + "-o", + required=True, + help= + "Path to output directory where the two model chunks should be saved.", + ) + parser.add_argument( + "--remove-original", + action="store_true", + help= + "If specified, removes the original (non-chunked) model to avoid duplicating storage." + ) + parser.add_argument( + "--check-output-correctness", + action="store_true", + help= + ("If specified, compares the outputs of original Core ML model with that of pipelined CoreML model chunks and reports PSNR in dB. ", + "Enabling this feature uses more memory. Disable it if your machine runs out of memory." + )) + + args = parser.parse_args() + main(args) diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/controlnet.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/controlnet.py new file mode 100644 index 00000000..4482e7bf --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/controlnet.py @@ -0,0 +1,244 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers import ModelMixin + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .unet import Timesteps, TimestepEmbedding, get_down_block, UNetMidBlock2DCrossAttn, linear_to_conv2d_map + +class ControlNetConditioningEmbedding(nn.Module): + + def __init__( + self, + conditioning_embedding_channels, + conditioning_channels=3, + block_out_channels=(16, 32, 96, 256), + ): + super().__init__() + + self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) + + self.blocks = nn.ModuleList([]) + + for i in range(len(block_out_channels) - 1): + channel_in = block_out_channels[i] + channel_out = block_out_channels[i + 1] + self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) + self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) + + self.conv_out = nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) + + def forward(self, conditioning): + embedding = self.conv_in(conditioning) + embedding = F.silu(embedding) + + for block in self.blocks: + embedding = block(embedding) + embedding = F.silu(embedding) + + embedding = self.conv_out(embedding) + + return embedding + +class ControlNetModel(ModelMixin, ConfigMixin): + + @register_to_config + def __init__( + self, + in_channels=4, + flip_sin_to_cos=True, + freq_shift=0, + down_block_types=( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ), + only_cross_attention=False, + block_out_channels=(320, 640, 1280, 1280), + layers_per_block=2, + downsample_padding=1, + mid_block_scale_factor=1, + act_fn="silu", + norm_num_groups=32, + norm_eps=1e-5, + cross_attention_dim=1280, + attention_head_dim=8, + use_linear_projection=False, + upcast_attention=False, + resnet_time_scale_shift="default", + conditioning_embedding_out_channels=(16, 32, 96, 256), + **kwargs, + ): + super().__init__() + + # Check inputs + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." + ) + + self._register_load_state_dict_pre_hook(linear_to_conv2d_map) + + # input + conv_in_kernel = 3 + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in = nn.Conv2d( + in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding + ) + + # time + time_embed_dim = block_out_channels[0] * 4 + + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + ) + + # control net conditioning embedding + self.controlnet_cond_embedding = ControlNetConditioningEmbedding( + conditioning_embedding_channels=block_out_channels[0], + block_out_channels=conditioning_embedding_out_channels, + ) + + self.down_blocks = nn.ModuleList([]) + self.controlnet_down_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if isinstance(attention_head_dim, int): + attention_head_dim = (attention_head_dim,) * len(down_block_types) + + # down + output_channel = block_out_channels[0] + + controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + self.controlnet_down_blocks.append(controlnet_block) + + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim[i], + downsample_padding=downsample_padding, + ) + self.down_blocks.append(down_block) + + for _ in range(layers_per_block): + controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + self.controlnet_down_blocks.append(controlnet_block) + + if not is_final_block: + controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + self.controlnet_down_blocks.append(controlnet_block) + + # mid + mid_block_channel = block_out_channels[-1] + + controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1) + self.controlnet_mid_block = controlnet_block + + self.mid_block = UNetMidBlock2DCrossAttn( + in_channels=mid_block_channel, + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim[-1], + resnet_groups=norm_num_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + ) + + def get_num_residuals(self): + num_res = 2 # initial sample + mid block + for down_block in self.down_blocks: + num_res += len(down_block.resnets) + if hasattr(down_block, "downsamplers") and down_block.downsamplers is not None: + num_res += len(down_block.downsamplers) + return num_res + + def forward( + self, + sample, + timestep, + encoder_hidden_states, + controlnet_cond, + ): + # 1. time + t_emb = self.time_proj(timestep) + emb = self.time_embedding(t_emb) + + # 2. pre-process + sample = self.conv_in(sample) + + controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) + + sample += controlnet_cond + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "attentions") and downsample_block.attentions is not None: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + + down_block_res_samples += res_samples + + # 4. mid + if self.mid_block is not None: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + ) + + # 5. Control net blocks + controlnet_down_block_res_samples = () + + for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): + down_block_res_sample = controlnet_block(down_block_res_sample) + controlnet_down_block_res_samples += (down_block_res_sample,) + + down_block_res_samples = controlnet_down_block_res_samples + + mid_block_res_sample = self.controlnet_mid_block(sample) + + return down_block_res_samples, mid_block_res_sample \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/coreml_model.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/coreml_model.py new file mode 100644 index 00000000..ce0375b9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/coreml_model.py @@ -0,0 +1,119 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +import coremltools as ct + +import logging + +logging.basicConfig() +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +import numpy as np + +import os +import time + + +class CoreMLModel: + """ Wrapper for running CoreML models using coremltools + """ + + def __init__(self, model_path, compute_unit): + assert os.path.exists(model_path) and model_path.endswith(".mlpackage") + + logger.info(f"Loading {model_path}") + + start = time.time() + self.model = ct.models.MLModel( + model_path, compute_units=ct.ComputeUnit[compute_unit]) + load_time = time.time() - start + logger.info(f"Done. Took {load_time:.1f} seconds.") + + if load_time > LOAD_TIME_INFO_MSG_TRIGGER: + logger.info( + "Loading a CoreML model through coremltools triggers compilation every time. " + "The Swift package we provide uses precompiled Core ML models (.mlmodelc) to avoid compile-on-load." + ) + + + DTYPE_MAP = { + 65552: np.float16, + 65568: np.float32, + 131104: np.int32, + } + + self.expected_inputs = { + input_tensor.name: { + "shape": tuple(input_tensor.type.multiArrayType.shape), + "dtype": DTYPE_MAP[input_tensor.type.multiArrayType.dataType], + } + for input_tensor in self.model._spec.description.input + } + + def _verify_inputs(self, **kwargs): + for k, v in kwargs.items(): + if k in self.expected_inputs: + if not isinstance(v, np.ndarray): + raise TypeError( + f"Expected numpy.ndarray, got {v} for input: {k}") + + expected_dtype = self.expected_inputs[k]["dtype"] + if not v.dtype == expected_dtype: + raise TypeError( + f"Expected dtype {expected_dtype}, got {v.dtype} for input: {k}" + ) + + expected_shape = self.expected_inputs[k]["shape"] + if not v.shape == expected_shape: + raise TypeError( + f"Expected shape {expected_shape}, got {v.shape} for input: {k}" + ) + else: + raise ValueError("Received unexpected input kwarg: {k}") + + def __call__(self, **kwargs): + self._verify_inputs(**kwargs) + return self.model.predict(kwargs) + + +LOAD_TIME_INFO_MSG_TRIGGER = 10 # seconds + + +def _load_mlpackage(submodule_name, mlpackages_dir, model_version, + compute_unit): + """ Load Core ML (mlpackage) models from disk (As exported by torch2coreml.py) + """ + logger.info(f"Loading {submodule_name} mlpackage") + + fname = f"Stable_Diffusion_version_{model_version}_{submodule_name}.mlpackage".replace( + "/", "_") + mlpackage_path = os.path.join(mlpackages_dir, fname) + + if not os.path.exists(mlpackage_path): + raise FileNotFoundError( + f"{submodule_name} CoreML model doesn't exist at {mlpackage_path}") + + return CoreMLModel(mlpackage_path, compute_unit) + +def _load_mlpackage_controlnet(mlpackages_dir, model_version, compute_unit): + """ Load Core ML (mlpackage) models from disk (As exported by torch2coreml.py) + """ + model_name = model_version.replace("/", "_") + + logger.info(f"Loading controlnet_{model_name} mlpackage") + + fname = f"ControlNet_{model_name}.mlpackage" + + mlpackage_path = os.path.join(mlpackages_dir, fname) + + if not os.path.exists(mlpackage_path): + raise FileNotFoundError( + f"controlnet_{model_name} CoreML model doesn't exist at {mlpackage_path}") + + return CoreMLModel(mlpackage_path, compute_unit) + +def get_available_compute_units(): + return tuple(cu for cu in ct.ComputeUnit._member_names_) diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/layer_norm.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/layer_norm.py new file mode 100644 index 00000000..677758e1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/layer_norm.py @@ -0,0 +1,80 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +import torch +import torch.nn as nn + + +# Reference: https://github.com/apple/ml-ane-transformers/blob/main/ane_transformers/reference/layer_norm.py +class LayerNormANE(nn.Module): + """ LayerNorm optimized for Apple Neural Engine (ANE) execution + + Note: This layer only supports normalization over the final dim. It expects `num_channels` + as an argument and not `normalized_shape` which is used by `torch.nn.LayerNorm`. + """ + + def __init__(self, + num_channels, + clip_mag=None, + eps=1e-5, + elementwise_affine=True): + """ + Args: + num_channels: Number of channels (C) where the expected input data format is BC1S. S stands for sequence length. + clip_mag: Optional float value to use for clamping the input range before layer norm is applied. + If specified, helps reduce risk of overflow. + eps: Small value to avoid dividing by zero + elementwise_affine: If true, adds learnable channel-wise shift (bias) and scale (weight) parameters + """ + super().__init__() + # Principle 1: Picking the Right Data Format (machinelearning.apple.com/research/apple-neural-engine) + self.expected_rank = len("BC1S") + + self.num_channels = num_channels + self.eps = eps + self.clip_mag = clip_mag + self.elementwise_affine = elementwise_affine + + if self.elementwise_affine: + self.weight = nn.Parameter(torch.Tensor(num_channels)) + self.bias = nn.Parameter(torch.Tensor(num_channels)) + + self._reset_parameters() + + def _reset_parameters(self): + if self.elementwise_affine: + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, inputs): + input_rank = len(inputs.size()) + + # Principle 1: Picking the Right Data Format (machinelearning.apple.com/research/apple-neural-engine) + # Migrate the data format from BSC to BC1S (most conducive to ANE) + if input_rank == 3 and inputs.size(2) == self.num_channels: + inputs = inputs.transpose(1, 2).unsqueeze(2) + input_rank = len(inputs.size()) + + assert input_rank == self.expected_rank + assert inputs.size(1) == self.num_channels + + if self.clip_mag is not None: + inputs.clamp_(-self.clip_mag, self.clip_mag) + + channels_mean = inputs.mean(dim=1, keepdims=True) + + zero_mean = inputs - channels_mean + + zero_mean_sq = zero_mean * zero_mean + + denom = (zero_mean_sq.mean(dim=1, keepdims=True) + self.eps).rsqrt() + + out = zero_mean * denom + + if self.elementwise_affine: + out = (out + self.bias.view(1, self.num_channels, 1, 1) + ) * self.weight.view(1, self.num_channels, 1, 1) + + return out diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/pipeline.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/pipeline.py new file mode 100644 index 00000000..6a5a47bc --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/pipeline.py @@ -0,0 +1,656 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +import argparse + +from diffusers.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from diffusers.schedulers.scheduling_utils import SchedulerMixin + +import gc +import inspect + +import logging + +logging.basicConfig() +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +import numpy as np +import os + +from python_coreml_stable_diffusion.coreml_model import ( + CoreMLModel, + _load_mlpackage, + _load_mlpackage_controlnet, + get_available_compute_units, +) + +import time +import torch # Only used for `torch.from_tensor` in `pipe.scheduler.step()` +from transformers import CLIPFeatureExtractor, CLIPTokenizer +from typing import List, Optional, Union +from PIL import Image + + +class CoreMLStableDiffusionPipeline(DiffusionPipeline): + """ Core ML version of + `diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline` + """ + + def __init__( + self, + text_encoder: CoreMLModel, + unet: CoreMLModel, + vae_decoder: CoreMLModel, + feature_extractor: CLIPFeatureExtractor, + safety_checker: Optional[CoreMLModel], + scheduler: Union[DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler], + tokenizer: CLIPTokenizer, + controlnet: Optional[List[CoreMLModel]], + ): + super().__init__() + + # Register non-Core ML components of the pipeline similar to the original pipeline + self.register_modules( + tokenizer=tokenizer, + scheduler=scheduler, + feature_extractor=feature_extractor, + ) + + if safety_checker is None: + # Reproduce original warning: + # https://github.com/huggingface/diffusers/blob/v0.9.0/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L119 + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + # Register Core ML components of the pipeline + self.safety_checker = safety_checker + self.text_encoder = text_encoder + self.unet = unet + self.unet.in_channels = self.unet.expected_inputs["sample"]["shape"][1] + + self.controlnet = controlnet + + self.vae_decoder = vae_decoder + + VAE_DECODER_UPSAMPLE_FACTOR = 8 + + # In PyTorch, users can determine the tensor shapes dynamically by default + # In CoreML, tensors have static shapes unless flexible shapes were used during export + # See https://coremltools.readme.io/docs/flexible-inputs + latent_h, latent_w = self.unet.expected_inputs["sample"]["shape"][2:] + self.height = latent_h * VAE_DECODER_UPSAMPLE_FACTOR + self.width = latent_w * VAE_DECODER_UPSAMPLE_FACTOR + + logger.info( + f"Stable Diffusion configured to generate {self.height}x{self.width} images" + ) + + def _encode_prompt(self, prompt, num_images_per_prompt, + do_classifier_free_guidance, negative_prompt): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + + if text_input_ids.shape[-1] > self.tokenizer.model_max_length: + removed_text = self.tokenizer.batch_decode( + text_input_ids[:, self.tokenizer.model_max_length:]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}") + text_input_ids = text_input_ids[:, :self.tokenizer. + model_max_length] + + text_embeddings = self.text_encoder( + input_ids=text_input_ids.astype(np.float32))["last_hidden_state"] + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + "`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + " {type(prompt)}.") + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`.") + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + + uncond_embeddings = self.text_encoder( + input_ids=uncond_input.input_ids.astype( + np.float32))["last_hidden_state"] + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_embeddings = np.concatenate( + [uncond_embeddings, text_embeddings]) + + text_embeddings = text_embeddings.transpose(0, 2, 1)[:, :, None, :] + + return text_embeddings + + def run_controlnet(self, + sample, + timestep, + encoder_hidden_states, + controlnet_cond, + output_dtype=np.float16): + if not self.controlnet: + raise ValueError( + "Conditions for controlnet are given but the pipeline has no controlnet modules") + + for i, (module, cond) in enumerate(zip(self.controlnet, controlnet_cond)): + module_outputs = module( + sample=sample.astype(np.float16), + timestep=timestep.astype(np.float16), + encoder_hidden_states=encoder_hidden_states.astype(np.float16), + controlnet_cond=cond.astype(np.float16), + ) + if i == 0: + outputs = module_outputs + else: + for key in outputs.keys(): + outputs[key] += module_outputs[key] + + outputs = {k: v.astype(output_dtype) for k, v in outputs.items()} + + return outputs + + def run_safety_checker(self, image): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), + return_tensors="np", + ) + + safety_checker_outputs = self.safety_checker( + clip_input=safety_checker_input.pixel_values.astype( + np.float16), + images=image.astype(np.float16), + adjustment=np.array([0.]).astype( + np.float16), # defaults to 0 in original pipeline + ) + + # Unpack dict + has_nsfw_concept = safety_checker_outputs["has_nsfw_concepts"] + image = safety_checker_outputs["filtered_images"] + concept_scores = safety_checker_outputs["concept_scores"] + + logger.info( + f"Generated image has nsfw concept={has_nsfw_concept.any()}") + else: + has_nsfw_concept = None + + return image, has_nsfw_concept + + def decode_latents(self, latents): + latents = 1 / 0.18215 * latents + image = self.vae_decoder(z=latents.astype(np.float16))["image"] + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + + return image + + def prepare_latents(self, + batch_size, + num_channels_latents, + height, + width, + latents=None): + latents_shape = (batch_size, num_channels_latents, self.height // 8, + self.width // 8) + if latents is None: + latents = np.random.randn(*latents_shape).astype(np.float16) + elif latents.shape != latents_shape: + raise ValueError( + f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" + ) + + latents = latents * self.scheduler.init_noise_sigma + + return latents + + def prepare_control_cond(self, + controlnet_cond, + do_classifier_free_guidance, + batch_size, + num_images_per_prompt): + processed_cond_list = [] + for cond in controlnet_cond: + cond = np.stack([cond] * batch_size * num_images_per_prompt) + if do_classifier_free_guidance: + cond = np.concatenate([cond] * 2) + processed_cond_list.append(cond) + return processed_cond_list + + def check_inputs(self, prompt, height, width, callback_steps): + if height != self.height or width != self.width: + logger.warning( + "`height` and `width` dimensions (of the output image tensor) are fixed when exporting the Core ML models " \ + "unless flexible shapes are used during export (https://coremltools.readme.io/docs/flexible-inputs). " \ + "This pipeline was provided with Core ML models that generate {self.height}x{self.width} images (user requested {height}x{width})" + ) + + if not isinstance(prompt, str) and not isinstance(prompt, list): + raise ValueError( + f"`prompt` has to be of type `str` or `list` but is {type(prompt)}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError( + f"`height` and `width` have to be divisible by 8 but are {height} and {width}." + ) + + if (callback_steps is None) or (callback_steps is not None and + (not isinstance(callback_steps, int) + or callback_steps <= 0)): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}.") + + def prepare_extra_step_kwargs(self, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set( + inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + return extra_step_kwargs + + def __call__( + self, + prompt, + height=512, + width=512, + num_inference_steps=50, + guidance_scale=7.5, + negative_prompt=None, + num_images_per_prompt=1, + eta=0.0, + latents=None, + output_type="pil", + return_dict=True, + callback=None, + callback_steps=1, + controlnet_cond=None, + **kwargs, + ): + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + if batch_size > 1 or num_images_per_prompt > 1: + raise NotImplementedError( + "For batched generation of multiple images and/or multiple prompts, please refer to the Swift package." + ) + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_embeddings = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables and controlnet cond + num_channels_latents = self.unet.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + latents, + ) + + if controlnet_cond: + controlnet_cond = self.prepare_control_cond( + controlnet_cond, + do_classifier_free_guidance, + batch_size, + num_images_per_prompt, + ) + + # 6. Prepare extra step kwargs + extra_step_kwargs = self.prepare_extra_step_kwargs(eta) + + # 7. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate( + [latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input( + latent_model_input, t) + + # controlnet + if controlnet_cond: + additional_residuals = self.run_controlnet( + sample=latent_model_input, + timestep=np.array([t, t]), + encoder_hidden_states=text_embeddings, + controlnet_cond=controlnet_cond, + ) + else: + additional_residuals = {} + + # predict the noise residual + noise_pred = self.unet( + sample=latent_model_input.astype(np.float16), + timestep=np.array([t, t], np.float16), + encoder_hidden_states=text_embeddings.astype(np.float16), + **additional_residuals, + )["noise_pred"] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * ( + noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(torch.from_numpy(noise_pred), + t, + torch.from_numpy(latents), + **extra_step_kwargs, + ).prev_sample.numpy() + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # 8. Post-processing + image = self.decode_latents(latents) + + # 9. Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image) + + # 10. Convert to PIL + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput( + images=image, nsfw_content_detected=has_nsfw_concept) + + +def get_available_schedulers(): + schedulers = {} + for scheduler in [DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler]: + schedulers[scheduler().__class__.__name__.replace("Scheduler", "")] = scheduler + return schedulers + +SCHEDULER_MAP = get_available_schedulers() + +def get_coreml_pipe(pytorch_pipe, + mlpackages_dir, + model_version, + compute_unit, + delete_original_pipe=True, + scheduler_override=None, + controlnet_models=None): + """ Initializes and returns a `CoreMLStableDiffusionPipeline` from an original + diffusers PyTorch pipeline + """ + # Ensure `scheduler_override` object is of correct type if specified + if scheduler_override is not None: + assert isinstance(scheduler_override, SchedulerMixin) + logger.warning( + "Overriding scheduler in pipeline: " + f"Default={pytorch_pipe.scheduler}, Override={scheduler_override}") + + # Gather configured tokenizer and scheduler attributes from the original pipe + coreml_pipe_kwargs = { + "tokenizer": pytorch_pipe.tokenizer, + "scheduler": pytorch_pipe.scheduler if scheduler_override is None else scheduler_override, + "feature_extractor": pytorch_pipe.feature_extractor, + } + + model_names_to_load = ["text_encoder", "unet", "vae_decoder"] + if getattr(pytorch_pipe, "safety_checker", None) is not None: + model_names_to_load.append("safety_checker") + else: + logger.warning( + f"Original diffusers pipeline for {model_version} does not have a safety_checker, " + "Core ML pipeline will mirror this behavior.") + coreml_pipe_kwargs["safety_checker"] = None + + if delete_original_pipe: + del pytorch_pipe + gc.collect() + logger.info("Removed PyTorch pipe to reduce peak memory consumption") + + if controlnet_models: + model_names_to_load.remove("unet") + coreml_pipe_kwargs["unet"] = _load_mlpackage( + "control-unet", + mlpackages_dir, + model_version, + compute_unit, + ) + coreml_pipe_kwargs["controlnet"] = [_load_mlpackage_controlnet( + mlpackages_dir, + model_version, + compute_unit, + ) for model_version in controlnet_models] + else: + coreml_pipe_kwargs["controlnet"] = None + + # Load Core ML models + logger.info(f"Loading Core ML models in memory from {mlpackages_dir}") + coreml_pipe_kwargs.update({ + model_name: _load_mlpackage( + model_name, + mlpackages_dir, + model_version, + compute_unit, + ) + for model_name in model_names_to_load + }) + logger.info("Done.") + + logger.info("Initializing Core ML pipe for image generation") + coreml_pipe = CoreMLStableDiffusionPipeline(**coreml_pipe_kwargs) + logger.info("Done.") + + return coreml_pipe + + +def get_image_path(args, **override_kwargs): + """ mkdir output folder and encode metadata in the filename + """ + out_folder = os.path.join(args.o, "_".join(args.prompt.replace("/", "_").rsplit(" "))) + os.makedirs(out_folder, exist_ok=True) + + out_fname = f"randomSeed_{override_kwargs.get('seed', None) or args.seed}" + out_fname += f"_computeUnit_{override_kwargs.get('compute_unit', None) or args.compute_unit}" + out_fname += f"_modelVersion_{override_kwargs.get('model_version', None) or args.model_version.replace('/', '_')}" + + if args.scheduler is not None: + out_fname += f"_customScheduler_{override_kwargs.get('scheduler', None) or args.scheduler}" + out_fname += f"_numInferenceSteps{override_kwargs.get('num_inference_steps', None) or args.num_inference_steps}" + + return os.path.join(out_folder, out_fname + ".png") + +def prepare_controlnet_cond(image_path, height, width): + image = Image.open(image_path).convert("RGB") + image = image.resize((height, width), resample=Image.LANCZOS) + image = np.array(image).transpose(2, 0, 1) / 255.0 + return image + +def main(args): + logger.info(f"Setting random seed to {args.seed}") + np.random.seed(args.seed) + + logger.info("Initializing PyTorch pipe for reference configuration") + from diffusers import StableDiffusionPipeline + pytorch_pipe = StableDiffusionPipeline.from_pretrained(args.model_version, + use_auth_token=True) + + user_specified_scheduler = None + if args.scheduler is not None: + user_specified_scheduler = SCHEDULER_MAP[ + args.scheduler].from_config(pytorch_pipe.scheduler.config) + + coreml_pipe = get_coreml_pipe(pytorch_pipe=pytorch_pipe, + mlpackages_dir=args.i, + model_version=args.model_version, + compute_unit=args.compute_unit, + scheduler_override=user_specified_scheduler, + controlnet_models=args.controlnet) + + if args.controlnet: + controlnet_cond = [] + for i, _ in enumerate(args.controlnet): + image_path = args.controlnet_inputs[i] + image = prepare_controlnet_cond(image_path, coreml_pipe.height, coreml_pipe.width) + controlnet_cond.append(image) + else: + controlnet_cond = None + + logger.info("Beginning image generation.") + image = coreml_pipe( + prompt=args.prompt, + height=coreml_pipe.height, + width=coreml_pipe.width, + num_inference_steps=args.num_inference_steps, + guidance_scale=args.guidance_scale, + controlnet_cond=controlnet_cond, + negative_prompt=args.negative_prompt, + ) + + out_path = get_image_path(args) + logger.info(f"Saving generated image to {out_path}") + image["images"][0].save(out_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument( + "--prompt", + required=True, + help="The text prompt to be used for text-to-image generation.") + parser.add_argument( + "-i", + required=True, + help=("Path to input directory with the .mlpackage files generated by " + "python_coreml_stable_diffusion.torch2coreml")) + parser.add_argument("-o", required=True) + parser.add_argument("--seed", + "-s", + default=93, + type=int, + help="Random seed to be able to reproduce results") + parser.add_argument( + "--model-version", + default="CompVis/stable-diffusion-v1-4", + help= + ("The pre-trained model checkpoint and configuration to restore. " + "For available versions: https://huggingface.co/models?search=stable-diffusion" + )) + parser.add_argument( + "--compute-unit", + choices=get_available_compute_units(), + default="ALL", + help=("The compute units to be used when executing Core ML models. " + f"Options: {get_available_compute_units()}")) + parser.add_argument( + "--scheduler", + choices=tuple(SCHEDULER_MAP.keys()), + default=None, + help=("The scheduler to use for running the reverse diffusion process. " + "If not specified, the default scheduler from the diffusers pipeline is utilized")) + parser.add_argument( + "--num-inference-steps", + default=50, + type=int, + help="The number of iterations the unet model will be executed throughout the reverse diffusion process") + parser.add_argument( + "--guidance-scale", + default=7.5, + type=float, + help="Controls the influence of the text prompt on sampling process (0=random images)") + parser.add_argument( + "--controlnet", + nargs="*", + type=str, + help=("Enables ControlNet and use control-unet instead of unet for additional inputs. " + "For Multi-Controlnet, provide the model names separated by spaces.")) + parser.add_argument( + "--controlnet-inputs", + nargs="*", + type=str, + help=("Image paths for ControlNet inputs. " + "Please enter images corresponding to each controlnet provided at --controlnet option in same order.")) + parser.add_argument( + "--negative-prompt", + default=None, + help="The negative text prompt to be used for text-to-image generation.") + + args = parser.parse_args() + main(args) diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/torch2coreml.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/torch2coreml.py new file mode 100644 index 00000000..89b9f212 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/torch2coreml.py @@ -0,0 +1,1311 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +from python_coreml_stable_diffusion import unet, controlnet + +import argparse +from collections import OrderedDict, defaultdict +from copy import deepcopy +import coremltools as ct +from diffusers import StableDiffusionPipeline, ControlNetModel +import gc + +import logging + +logging.basicConfig() +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +import numpy as np +import os +from python_coreml_stable_diffusion import chunk_mlprogram +import requests +import shutil +import time +import re +import pathlib + +import torch +import torch.nn as nn +import torch.nn.functional as F + +torch.set_grad_enabled(False) + +from types import MethodType + + +def _get_coreml_inputs(sample_inputs, args): + return [ + ct.TensorType( + name=k, + shape=v.shape, + dtype=v.numpy().dtype if isinstance(v, torch.Tensor) else v.dtype, + ) for k, v in sample_inputs.items() + ] + + +def compute_psnr(a, b): + """ Compute Peak-Signal-to-Noise-Ratio across two numpy.ndarray objects + """ + max_b = np.abs(b).max() + sumdeltasq = 0.0 + + sumdeltasq = ((a - b) * (a - b)).sum() + + sumdeltasq /= b.size + sumdeltasq = np.sqrt(sumdeltasq) + + eps = 1e-5 + eps2 = 1e-10 + psnr = 20 * np.log10((max_b + eps) / (sumdeltasq + eps2)) + + return psnr + + +ABSOLUTE_MIN_PSNR = 35 + + +def report_correctness(original_outputs, final_outputs, log_prefix): + """ Report PSNR values across two compatible tensors + """ + original_psnr = compute_psnr(original_outputs, original_outputs) + final_psnr = compute_psnr(original_outputs, final_outputs) + + dB_change = final_psnr - original_psnr + logger.info( + f"{log_prefix}: PSNR changed by {dB_change:.1f} dB ({original_psnr:.1f} -> {final_psnr:.1f})" + ) + + if final_psnr < ABSOLUTE_MIN_PSNR: + raise ValueError(f"{final_psnr:.1f} dB is too low!") + else: + logger.info( + f"{final_psnr:.1f} dB > {ABSOLUTE_MIN_PSNR} dB (minimum allowed) parity check passed" + ) + return final_psnr + +def _get_out_path(args, submodule_name): + fname = f"Stable_Diffusion_version_{args.model_version}_{submodule_name}.mlpackage" + fname = fname.replace("/", "_") + return os.path.join(args.o, fname) + + +# https://github.com/apple/coremltools/issues/1680 +def _save_mlpackage(model, output_path): + # First recreate MLModel object using its in memory spec, then save + ct.models.MLModel(model._spec, + weights_dir=model._weights_dir, + is_temp_package=True).save(output_path) + + +def _convert_to_coreml(submodule_name, torchscript_module, sample_inputs, + output_names, args, out_path=None): + + if out_path is None: + out_path = _get_out_path(args, submodule_name) + + if os.path.exists(out_path): + logger.info(f"Skipping export because {out_path} already exists") + logger.info(f"Loading model from {out_path}") + + start = time.time() + # Note: Note that each model load will trigger a model compilation which takes up to a few minutes. + # The Swifty CLI we provide uses precompiled Core ML models (.mlmodelc) which incurs compilation only + # upon first load and mitigates the load time in subsequent runs. + coreml_model = ct.models.MLModel( + out_path, compute_units=ct.ComputeUnit[args.compute_unit]) + logger.info( + f"Loading {out_path} took {time.time() - start:.1f} seconds") + + coreml_model.compute_unit = ct.ComputeUnit[args.compute_unit] + else: + logger.info(f"Converting {submodule_name} to CoreML..") + coreml_model = ct.convert( + torchscript_module, + convert_to="mlprogram", + minimum_deployment_target=ct.target.macOS13, + inputs=_get_coreml_inputs(sample_inputs, args), + outputs=[ct.TensorType(name=name) for name in output_names], + compute_units=ct.ComputeUnit[args.compute_unit], + # skip_model_load=True, + ) + + del torchscript_module + gc.collect() + + coreml_model.save(out_path) + logger.info(f"Saved {submodule_name} model to {out_path}") + + return coreml_model, out_path + + +def quantize_weights_to_8bits(args): + for model_name in [ + "text_encoder", "vae_decoder", "vae_encoder", "unet", "unet_chunk1", "unet_chunk2", + "control-unet", "control-unet_chunk1", "control-unet_chunk2", "safety_checker" + ]: + out_path = _get_out_path(args, model_name) + _quantize_and_save_8bits_model(out_path, model_name) + + if args.convert_controlnet: + for controlnet_model_version in args.convert_controlnet: + controlnet_model_name = controlnet_model_version.replace("/", "_") + fname = f"ControlNet_{controlnet_model_name}.mlpackage" + out_path = os.path.join(args.o, fname) + _quantize_and_save_8bits_model(out_path, controlnet_model_name) + + +def _quantize_and_save_8bits_model(out_path, model_name): + if os.path.exists(out_path): + logger.info(f"Quantizing {model_name}") + mlmodel = ct.models.MLModel(out_path, + compute_units=ct.ComputeUnit.CPU_ONLY) + mlmodel = ct.compression_utils.affine_quantize_weights( + mlmodel, mode="linear") + mlmodel.save(out_path) + logger.info("Done") + else: + logger.info( + f"Skipped quantizing {model_name} (Not found at {out_path})") + + +def _compile_coreml_model(source_model_path, output_dir, final_name): + """ Compiles Core ML models using the coremlcompiler utility from Xcode toolchain + """ + target_path = os.path.join(output_dir, f"{final_name}.mlmodelc") + if os.path.exists(target_path): + logger.warning( + f"Found existing compiled model at {target_path}! Skipping..") + return target_path + + logger.info(f"Compiling {source_model_path}") + source_model_name = os.path.basename( + os.path.splitext(source_model_path)[0]) + + os.system(f"xcrun coremlcompiler compile {source_model_path} {output_dir}") + compiled_output = os.path.join(output_dir, f"{source_model_name}.mlmodelc") + shutil.move(compiled_output, target_path) + + return target_path + + +def bundle_resources_for_swift_cli(args): + """ + - Compiles Core ML models from mlpackage into mlmodelc format + - Download tokenizer resources for the text encoder + """ + resources_dir = os.path.join(args.o, "Resources") + if not os.path.exists(resources_dir): + os.makedirs(resources_dir, exist_ok=True) + logger.info(f"Created {resources_dir} for Swift CLI assets") + + # Compile model using coremlcompiler (Significantly reduces the load time for unet) + for source_name, target_name in [("text_encoder", "TextEncoder"), + ("vae_decoder", "VAEDecoder"), + ("vae_encoder", "VAEEncoder"), + ("unet", "Unet"), + ("unet_chunk1", "UnetChunk1"), + ("unet_chunk2", "UnetChunk2"), + ("control-unet", "ControlledUnet"), + ("control-unet_chunk1", "ControlledUnetChunk1"), + ("control-unet_chunk2", "ControlledUnetChunk2"), + ("safety_checker", "SafetyChecker")]: + source_path = _get_out_path(args, source_name) + if os.path.exists(source_path): + target_path = _compile_coreml_model(source_path, resources_dir, + target_name) + logger.info(f"Compiled {source_path} to {target_path}") + else: + logger.warning( + f"{source_path} not found, skipping compilation to {target_name}.mlmodelc" + ) + + if args.convert_controlnet: + for controlnet_model_version in args.convert_controlnet: + controlnet_model_name = controlnet_model_version.replace("/", "_") + fname = f"ControlNet_{controlnet_model_name}.mlpackage" + source_path = os.path.join(args.o, fname) + controlnet_dir = os.path.join(resources_dir, "controlnet") + target_name = "".join([word.title() for word in re.split('_|-', controlnet_model_name)]) + + if os.path.exists(source_path): + target_path = _compile_coreml_model(source_path, controlnet_dir, + target_name) + logger.info(f"Compiled {source_path} to {target_path}") + else: + logger.warning( + f"{source_path} not found, skipping compilation to {target_name}.mlmodelc" + ) + + # Fetch and save vocabulary JSON file for text tokenizer + logger.info("Downloading and saving tokenizer vocab.json") + with open(os.path.join(resources_dir, "vocab.json"), "wb") as f: + f.write(requests.get(args.text_encoder_vocabulary_url).content) + logger.info("Done") + + # Fetch and save merged pairs JSON file for text tokenizer + logger.info("Downloading and saving tokenizer merges.txt") + with open(os.path.join(resources_dir, "merges.txt"), "wb") as f: + f.write(requests.get(args.text_encoder_merges_url).content) + logger.info("Done") + + return resources_dir + + +def convert_text_encoder(pipe, args): + """ Converts the text encoder component of Stable Diffusion + """ + out_path = _get_out_path(args, "text_encoder") + if os.path.exists(out_path): + logger.info( + f"`text_encoder` already exists at {out_path}, skipping conversion." + ) + return + + # Create sample inputs for tracing, conversion and correctness verification + text_encoder_sequence_length = pipe.tokenizer.model_max_length + text_encoder_hidden_size = pipe.text_encoder.config.hidden_size + + sample_text_encoder_inputs = { + "input_ids": + torch.randint( + pipe.text_encoder.config.vocab_size, + (1, text_encoder_sequence_length), + # https://github.com/apple/coremltools/issues/1423 + dtype=torch.float32, + ) + } + sample_text_encoder_inputs_spec = { + k: (v.shape, v.dtype) + for k, v in sample_text_encoder_inputs.items() + } + logger.info(f"Sample inputs spec: {sample_text_encoder_inputs_spec}") + + def _build_causal_attention_mask(self, bsz, seq_len, dtype): + mask = torch.ones((bsz, seq_len, seq_len), dtype=dtype) * -1e4 + mask.triu_(1) + mask = mask.unsqueeze(1) + return mask + + class TextEncoder(nn.Module): + + def __init__(self): + super().__init__() + self.text_encoder = pipe.text_encoder + setattr( + self.text_encoder.text_model, "_build_causal_attention_mask", + MethodType(_build_causal_attention_mask, + self.text_encoder.text_model)) + + def forward(self, input_ids): + return self.text_encoder(input_ids, return_dict=False) + + reference_text_encoder = TextEncoder().eval() + + logger.info("JIT tracing text_encoder..") + reference_text_encoder = torch.jit.trace( + reference_text_encoder, + (sample_text_encoder_inputs["input_ids"].to(torch.int32), ), + ) + logger.info("Done.") + + coreml_text_encoder, out_path = _convert_to_coreml( + "text_encoder", reference_text_encoder, sample_text_encoder_inputs, + ["last_hidden_state", "pooled_outputs"], args) + + # Set model metadata + coreml_text_encoder.author = f"Please refer to the Model Card available at huggingface.co/{args.model_version}" + coreml_text_encoder.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_text_encoder.version = args.model_version + coreml_text_encoder.short_description = \ + "Stable Diffusion generates images conditioned on text and/or other images as input through the diffusion process. " \ + "Please refer to https://arxiv.org/abs/2112.10752 for details." + + # Set the input descriptions + coreml_text_encoder.input_description[ + "input_ids"] = "The token ids that represent the input text" + + # Set the output descriptions + coreml_text_encoder.output_description[ + "last_hidden_state"] = "The token embeddings as encoded by the Transformer model" + coreml_text_encoder.output_description[ + "pooled_outputs"] = "The version of the `last_hidden_state` output after pooling" + + _save_mlpackage(coreml_text_encoder, out_path) + + logger.info(f"Saved text_encoder into {out_path}") + + # Parity check PyTorch vs CoreML + if args.check_output_correctness: + baseline_out = pipe.text_encoder( + sample_text_encoder_inputs["input_ids"].to(torch.int32), + return_dict=False, + )[1].numpy() + + coreml_out = list( + coreml_text_encoder.predict( + {k: v.numpy() + for k, v in sample_text_encoder_inputs.items()}).values())[0] + report_correctness( + baseline_out, coreml_out, + "text_encoder baseline PyTorch to reference CoreML") + + del reference_text_encoder, coreml_text_encoder, pipe.text_encoder + gc.collect() + + +def modify_coremltools_torch_frontend_badbmm(): + """ + Modifies coremltools torch frontend for baddbmm to be robust to the `beta` argument being of non-float dtype: + e.g. https://github.com/huggingface/diffusers/blob/v0.8.1/src/diffusers/models/attention.py#L315 + """ + from coremltools.converters.mil import register_torch_op + from coremltools.converters.mil.mil import Builder as mb + from coremltools.converters.mil.frontend.torch.ops import _get_inputs + from coremltools.converters.mil.frontend.torch.torch_op_registry import _TORCH_OPS_REGISTRY + if "baddbmm" in _TORCH_OPS_REGISTRY: + del _TORCH_OPS_REGISTRY["baddbmm"] + + @register_torch_op + def baddbmm(context, node): + """ + baddbmm(Tensor input, Tensor batch1, Tensor batch2, Scalar beta=1, Scalar alpha=1) + output = beta * input + alpha * batch1 * batch2 + Notice that batch1 and batch2 must be 3-D tensors each containing the same number of matrices. + If batch1 is a (b×n×m) tensor, batch2 is a (b×m×p) tensor, then input must be broadcastable with a (b×n×p) tensor + and out will be a (b×n×p) tensor. + """ + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node, expected=5) + bias, batch1, batch2, beta, alpha = inputs + + if beta.val != 1.0: + # Apply scaling factor beta to the bias. + if beta.val.dtype == np.int32: + beta = mb.cast(x=beta, dtype="fp32") + logger.warning( + f"Casted the `beta`(value={beta.val}) argument of `baddbmm` op " + "from int32 to float32 dtype for conversion!") + bias = mb.mul(x=beta, y=bias, name=bias.name + "_scaled") + + context.add(bias) + + if alpha.val != 1.0: + # Apply scaling factor alpha to the input. + batch1 = mb.mul(x=alpha, y=batch1, name=batch1.name + "_scaled") + context.add(batch1) + + bmm_node = mb.matmul(x=batch1, y=batch2, name=node.name + "_bmm") + context.add(bmm_node) + + baddbmm_node = mb.add(x=bias, y=bmm_node, name=node.name) + context.add(baddbmm_node) + + +def convert_vae_decoder(pipe, args): + """ Converts the VAE Decoder component of Stable Diffusion + """ + out_path = _get_out_path(args, "vae_decoder") + if os.path.exists(out_path): + logger.info( + f"`vae_decoder` already exists at {out_path}, skipping conversion." + ) + return + + if not hasattr(pipe, "unet"): + raise RuntimeError( + "convert_unet() deletes pipe.unet to save RAM. " + "Please use convert_vae_decoder() before convert_unet()") + + z_shape = ( + 1, # B + pipe.vae.config.latent_channels, # C + args.latent_h or pipe.unet.config.sample_size, # H + args.latent_w or pipe.unet.config.sample_size, # w + ) + + sample_vae_decoder_inputs = { + "z": torch.rand(*z_shape, dtype=torch.float16) + } + + class VAEDecoder(nn.Module): + """ Wrapper nn.Module wrapper for pipe.decode() method + """ + + def __init__(self): + super().__init__() + self.post_quant_conv = pipe.vae.post_quant_conv + self.decoder = pipe.vae.decoder + # Disable torch 2.0 scaled dot-product attention: https://github.com/apple/coremltools/issues/1823 + self.decoder.mid_block.attentions[0]._use_2_0_attn = False + + def forward(self, z): + return self.decoder(self.post_quant_conv(z)) + + baseline_decoder = VAEDecoder().eval() + + # No optimization needed for the VAE Decoder as it is a pure ConvNet + traced_vae_decoder = torch.jit.trace( + baseline_decoder, (sample_vae_decoder_inputs["z"].to(torch.float32), )) + + modify_coremltools_torch_frontend_badbmm() + coreml_vae_decoder, out_path = _convert_to_coreml( + "vae_decoder", traced_vae_decoder, sample_vae_decoder_inputs, + ["image"], args) + + # Set model metadata + coreml_vae_decoder.author = f"Please refer to the Model Card available at huggingface.co/{args.model_version}" + coreml_vae_decoder.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_vae_decoder.version = args.model_version + coreml_vae_decoder.short_description = \ + "Stable Diffusion generates images conditioned on text and/or other images as input through the diffusion process. " \ + "Please refer to https://arxiv.org/abs/2112.10752 for details." + + # Set the input descriptions + coreml_vae_decoder.input_description["z"] = \ + "The denoised latent embeddings from the unet model after the last step of reverse diffusion" + + # Set the output descriptions + coreml_vae_decoder.output_description[ + "image"] = "Generated image normalized to range [-1, 1]" + + _save_mlpackage(coreml_vae_decoder, out_path) + + logger.info(f"Saved vae_decoder into {out_path}") + + # Parity check PyTorch vs CoreML + if args.check_output_correctness: + baseline_out = baseline_decoder( + z=sample_vae_decoder_inputs["z"].to(torch.float32)).numpy() + coreml_out = list( + coreml_vae_decoder.predict( + {k: v.numpy() + for k, v in sample_vae_decoder_inputs.items()}).values())[0] + report_correctness(baseline_out, coreml_out, + "vae_decoder baseline PyTorch to baseline CoreML") + + del traced_vae_decoder, pipe.vae.decoder, coreml_vae_decoder + gc.collect() + + +def convert_vae_encoder(pipe, args): + """ Converts the VAE Encoder component of Stable Diffusion + """ + out_path = _get_out_path(args, "vae_encoder") + if os.path.exists(out_path): + logger.info( + f"`vae_encoder` already exists at {out_path}, skipping conversion." + ) + return + + if not hasattr(pipe, "unet"): + raise RuntimeError( + "convert_unet() deletes pipe.unet to save RAM. " + "Please use convert_vae_encoder() before convert_unet()") + + height = (args.latent_h or pipe.unet.config.sample_size) * 8 + width = (args.latent_w or pipe.unet.config.sample_size) * 8 + + z_shape = ( + 1, # B + 3, # C (RGB range from -1 to 1) + height, # H + width, # w + ) + + sample_vae_encoder_inputs = { + "z": torch.rand(*z_shape, dtype=torch.float16) + } + + class VAEEncoder(nn.Module): + """ Wrapper nn.Module wrapper for pipe.encode() method + """ + + def __init__(self): + super().__init__() + self.quant_conv = pipe.vae.quant_conv + self.encoder = pipe.vae.encoder + # Disable torch 2.0 scaled dot-product attention: https://github.com/apple/coremltools/issues/1823 + self.encoder.mid_block.attentions[0]._use_2_0_attn = False + + def forward(self, z): + return self.quant_conv(self.encoder(z)) + + baseline_encoder = VAEEncoder().eval() + + # No optimization needed for the VAE Encoder as it is a pure ConvNet + traced_vae_encoder = torch.jit.trace( + baseline_encoder, (sample_vae_encoder_inputs["z"].to(torch.float32), )) + + modify_coremltools_torch_frontend_badbmm() + coreml_vae_encoder, out_path = _convert_to_coreml( + "vae_encoder", traced_vae_encoder, sample_vae_encoder_inputs, + ["latent"], args) + + # Set model metadata + coreml_vae_encoder.author = f"Please refer to the Model Card available at huggingface.co/{args.model_version}" + coreml_vae_encoder.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_vae_encoder.version = args.model_version + coreml_vae_encoder.short_description = \ + "Stable Diffusion generates images conditioned on text and/or other images as input through the diffusion process. " \ + "Please refer to https://arxiv.org/abs/2112.10752 for details." + + # Set the input descriptions + coreml_vae_encoder.input_description["z"] = \ + "The input image to base the initial latents on normalized to range [-1, 1]" + + # Set the output descriptions + coreml_vae_encoder.output_description["latent"] = "The latent embeddings from the unet model from the input image." + + _save_mlpackage(coreml_vae_encoder, out_path) + + logger.info(f"Saved vae_encoder into {out_path}") + + # Parity check PyTorch vs CoreML + if args.check_output_correctness: + baseline_out = baseline_encoder( + z=sample_vae_encoder_inputs["z"].to(torch.float32)).numpy() + coreml_out = list( + coreml_vae_encoder.predict( + {k: v.numpy() + for k, v in sample_vae_encoder_inputs.items()}).values())[0] + report_correctness(baseline_out, coreml_out, + "vae_encoder baseline PyTorch to baseline CoreML") + + del traced_vae_encoder, pipe.vae.encoder, coreml_vae_encoder + gc.collect() + + +def convert_unet(pipe, args): + """ Converts the UNet component of Stable Diffusion + """ + if args.unet_support_controlnet: + unet_name = "control-unet" + else: + unet_name = "unet" + + out_path = _get_out_path(args, unet_name) + + # Check if Unet was previously exported and then chunked + unet_chunks_exist = all( + os.path.exists( + out_path.replace(".mlpackage", f"_chunk{idx+1}.mlpackage")) + for idx in range(2)) + + if args.chunk_unet and unet_chunks_exist: + logger.info("`unet` chunks already exist, skipping conversion.") + del pipe.unet + gc.collect() + return + + # If original Unet does not exist, export it from PyTorch+diffusers + elif not os.path.exists(out_path): + # Prepare sample input shapes and values + batch_size = 2 # for classifier-free guidance + sample_shape = ( + batch_size, # B + pipe.unet.config.in_channels, # C + args.latent_h or pipe.unet.config.sample_size, # H + args.latent_w or pipe.unet.config.sample_size, # W + ) + + if not hasattr(pipe, "text_encoder"): + raise RuntimeError( + "convert_text_encoder() deletes pipe.text_encoder to save RAM. " + "Please use convert_unet() before convert_text_encoder()") + + encoder_hidden_states_shape = ( + batch_size, + pipe.text_encoder.config.hidden_size, + 1, + pipe.text_encoder.config.max_position_embeddings, + ) + + # Create the scheduled timesteps for downstream use + DEFAULT_NUM_INFERENCE_STEPS = 50 + pipe.scheduler.set_timesteps(DEFAULT_NUM_INFERENCE_STEPS) + + sample_unet_inputs = OrderedDict([ + ("sample", torch.rand(*sample_shape)), + ("timestep", + torch.tensor([pipe.scheduler.timesteps[0].item()] * + (batch_size)).to(torch.float32)), + ("encoder_hidden_states", torch.rand(*encoder_hidden_states_shape)) + ]) + + # Prepare inputs + baseline_sample_unet_inputs = deepcopy(sample_unet_inputs) + baseline_sample_unet_inputs[ + "encoder_hidden_states"] = baseline_sample_unet_inputs[ + "encoder_hidden_states"].squeeze(2).transpose(1, 2) + + # Initialize reference unet + reference_unet = unet.UNet2DConditionModel(**pipe.unet.config).eval() + load_state_dict_summary = reference_unet.load_state_dict( + pipe.unet.state_dict()) + + if args.unet_support_controlnet: + from .unet import calculate_conv2d_output_shape + additional_residuals_shapes = [] + + # conv_in + out_h, out_w = calculate_conv2d_output_shape( + (args.latent_h or pipe.unet.config.sample_size), + (args.latent_w or pipe.unet.config.sample_size), + reference_unet.conv_in, + ) + additional_residuals_shapes.append( + (batch_size, reference_unet.conv_in.out_channels, out_h, out_w)) + + # down_blocks + for down_block in reference_unet.down_blocks: + additional_residuals_shapes += [ + (batch_size, resnet.out_channels, out_h, out_w) for resnet in down_block.resnets + ] + if hasattr(down_block, "downsamplers") and down_block.downsamplers is not None: + for downsampler in down_block.downsamplers: + out_h, out_w = calculate_conv2d_output_shape(out_h, out_w, downsampler.conv) + additional_residuals_shapes.append( + (batch_size, down_block.downsamplers[-1].conv.out_channels, out_h, out_w)) + + # mid_block + additional_residuals_shapes.append( + (batch_size, reference_unet.mid_block.resnets[-1].out_channels, out_h, out_w) + ) + + baseline_sample_unet_inputs["down_block_additional_residuals"] = () + for i, shape in enumerate(additional_residuals_shapes): + sample_residual_input = torch.rand(*shape) + sample_unet_inputs[f"additional_residual_{i}"] = sample_residual_input + if i == len(additional_residuals_shapes) - 1: + baseline_sample_unet_inputs["mid_block_additional_residual"] = sample_residual_input + else: + baseline_sample_unet_inputs["down_block_additional_residuals"] += (sample_residual_input, ) + + sample_unet_inputs_spec = { + k: (v.shape, v.dtype) + for k, v in sample_unet_inputs.items() + } + logger.info(f"Sample UNet inputs spec: {sample_unet_inputs_spec}") + + # JIT trace + logger.info("JIT tracing..") + reference_unet = torch.jit.trace(reference_unet, + list(sample_unet_inputs.values())) + logger.info("Done.") + + if args.check_output_correctness: + baseline_out = pipe.unet(**baseline_sample_unet_inputs, + return_dict=False)[0].numpy() + reference_out = reference_unet(*sample_unet_inputs.values())[0].numpy() + report_correctness(baseline_out, reference_out, + "unet baseline to reference PyTorch") + + del pipe.unet + gc.collect() + + coreml_sample_unet_inputs = { + k: v.numpy().astype(np.float16) + for k, v in sample_unet_inputs.items() + } + + coreml_unet, out_path = _convert_to_coreml(unet_name, reference_unet, + coreml_sample_unet_inputs, + ["noise_pred"], args) + del reference_unet + gc.collect() + + # Set model metadata + coreml_unet.author = f"Please refer to the Model Card available at huggingface.co/{args.model_version}" + coreml_unet.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_unet.version = args.model_version + coreml_unet.short_description = \ + "Stable Diffusion generates images conditioned on text or other images as input through the diffusion process. " \ + "Please refer to https://arxiv.org/abs/2112.10752 for details." + + # Set the input descriptions + coreml_unet.input_description["sample"] = \ + "The low resolution latent feature maps being denoised through reverse diffusion" + coreml_unet.input_description["timestep"] = \ + "A value emitted by the associated scheduler object to condition the model on a given noise schedule" + coreml_unet.input_description["encoder_hidden_states"] = \ + "Output embeddings from the associated text_encoder model to condition to generated image on text. " \ + "A maximum of 77 tokens (~40 words) are allowed. Longer text is truncated. " \ + "Shorter text does not reduce computation." + + # Set the output descriptions + coreml_unet.output_description["noise_pred"] = \ + "Same shape and dtype as the `sample` input. " \ + "The predicted noise to facilitate the reverse diffusion (denoising) process" + + _save_mlpackage(coreml_unet, out_path) + logger.info(f"Saved unet into {out_path}") + + # Parity check PyTorch vs CoreML + if args.check_output_correctness: + coreml_out = list( + coreml_unet.predict(coreml_sample_unet_inputs).values())[0] + report_correctness(baseline_out, coreml_out, + "unet baseline PyTorch to reference CoreML") + + del coreml_unet + gc.collect() + else: + del pipe.unet + gc.collect() + logger.info( + f"`unet` already exists at {out_path}, skipping conversion.") + + if args.chunk_unet and not unet_chunks_exist: + logger.info("Chunking unet in two approximately equal MLModels") + args.mlpackage_path = out_path + args.remove_original = False + chunk_mlprogram.main(args) + + +def convert_safety_checker(pipe, args): + """ Converts the Safety Checker component of Stable Diffusion + """ + if pipe.safety_checker is None: + logger.warning( + f"diffusers pipeline for {args.model_version} does not have a `safety_checker` module! " \ + "`--convert-safety-checker` will be ignored." + ) + return + + out_path = _get_out_path(args, "safety_checker") + if os.path.exists(out_path): + logger.info( + f"`safety_checker` already exists at {out_path}, skipping conversion." + ) + return + + im_h = pipe.vae.config.sample_size + im_w = pipe.vae.config.sample_size + + if args.latent_h is not None: + im_h = args.latent_h * 8 + + if args.latent_w is not None: + im_w = args.latent_w * 8 + + sample_image = np.random.randn( + 1, # B + im_h, # H + im_w, # w + 3 # C + ).astype(np.float32) + + # Note that pipe.feature_extractor is not an ML model. It simply + # preprocesses data for the pipe.safety_checker module. + safety_checker_input = pipe.feature_extractor( + pipe.numpy_to_pil(sample_image), + return_tensors="pt", + ).pixel_values.to(torch.float32) + + sample_safety_checker_inputs = OrderedDict([ + ("clip_input", safety_checker_input), + ("images", torch.from_numpy(sample_image)), + ("adjustment", torch.tensor([0]).to(torch.float32)), + ]) + + sample_safety_checker_inputs_spec = { + k: (v.shape, v.dtype) + for k, v in sample_safety_checker_inputs.items() + } + logger.info(f"Sample inputs spec: {sample_safety_checker_inputs_spec}") + + # Patch safety_checker's forward pass to be vectorized and avoid conditional blocks + # (similar to pipe.safety_checker.forward_onnx) + from diffusers.pipelines.stable_diffusion import safety_checker + + def forward_coreml(self, clip_input, images, adjustment): + """ Forward pass implementation for safety_checker + """ + + def cosine_distance(image_embeds, text_embeds): + return F.normalize(image_embeds) @ F.normalize( + text_embeds).transpose(0, 1) + + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + special_cos_dist = cosine_distance(image_embeds, + self.special_care_embeds) + cos_dist = cosine_distance(image_embeds, self.concept_embeds) + + special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment + special_care = special_scores.gt(0).float().sum(dim=1).gt(0).float() + special_adjustment = special_care * 0.01 + special_adjustment = special_adjustment.unsqueeze(1).expand( + -1, cos_dist.shape[1]) + + concept_scores = (cos_dist - + self.concept_embeds_weights) + special_adjustment + has_nsfw_concepts = concept_scores.gt(0).float().sum(dim=1).gt(0)[:, + None, + None, + None] + + has_nsfw_concepts_inds, _ = torch.broadcast_tensors( + has_nsfw_concepts, images) + images[has_nsfw_concepts_inds] = 0.0 # black image + + return images, has_nsfw_concepts.float(), concept_scores + + baseline_safety_checker = deepcopy(pipe.safety_checker.eval()) + setattr(baseline_safety_checker, "forward", + MethodType(forward_coreml, baseline_safety_checker)) + + # In order to parity check the actual signal, we need to override the forward pass to return `concept_scores` which is the + # output before thresholding + # Reference: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L100 + def forward_extended_return(self, clip_input, images, adjustment): + + def cosine_distance(image_embeds, text_embeds): + normalized_image_embeds = F.normalize(image_embeds) + normalized_text_embeds = F.normalize(text_embeds) + return torch.mm(normalized_image_embeds, + normalized_text_embeds.t()) + + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + special_cos_dist = cosine_distance(image_embeds, + self.special_care_embeds) + cos_dist = cosine_distance(image_embeds, self.concept_embeds) + + adjustment = 0.0 + + special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment + special_care = torch.any(special_scores > 0, dim=1) + special_adjustment = special_care * 0.01 + special_adjustment = special_adjustment.unsqueeze(1).expand( + -1, cos_dist.shape[1]) + + concept_scores = (cos_dist - + self.concept_embeds_weights) + special_adjustment + has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) + + images[has_nsfw_concepts] = 0.0 + + return images, has_nsfw_concepts, concept_scores + + setattr(pipe.safety_checker, "forward", + MethodType(forward_extended_return, pipe.safety_checker)) + + # Trace the safety_checker model + logger.info("JIT tracing..") + traced_safety_checker = torch.jit.trace( + baseline_safety_checker, list(sample_safety_checker_inputs.values())) + logger.info("Done.") + del baseline_safety_checker + gc.collect() + + # Cast all inputs to float16 + coreml_sample_safety_checker_inputs = { + k: v.numpy().astype(np.float16) + for k, v in sample_safety_checker_inputs.items() + } + + # Convert safety_checker model to Core ML + coreml_safety_checker, out_path = _convert_to_coreml( + "safety_checker", traced_safety_checker, + coreml_sample_safety_checker_inputs, + ["filtered_images", "has_nsfw_concepts", "concept_scores"], args) + + # Set model metadata + coreml_safety_checker.author = f"Please refer to the Model Card available at huggingface.co/{args.model_version}" + coreml_safety_checker.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_safety_checker.version = args.model_version + coreml_safety_checker.short_description = \ + "Stable Diffusion generates images conditioned on text and/or other images as input through the diffusion process. " \ + "Please refer to https://arxiv.org/abs/2112.10752 for details." + + # Set the input descriptions + coreml_safety_checker.input_description["clip_input"] = \ + "The normalized image input tensor resized to (224x224) in channels-first (BCHW) format" + coreml_safety_checker.input_description["images"] = \ + f"Output of the vae_decoder ({pipe.vae.config.sample_size}x{pipe.vae.config.sample_size}) in channels-last (BHWC) format" + coreml_safety_checker.input_description["adjustment"] = \ + "Bias added to the concept scores to trade off increased recall for reduce precision in the safety checker classifier" + + # Set the output descriptions + coreml_safety_checker.output_description["filtered_images"] = \ + f"Identical to the input `images`. If safety checker detected any sensitive content, " \ + "the corresponding image is replaced with a blank image (zeros)" + coreml_safety_checker.output_description["has_nsfw_concepts"] = \ + "Indicates whether the safety checker model found any sensitive content in the given image" + coreml_safety_checker.output_description["concept_scores"] = \ + "Concept scores are the scores before thresholding at zero yields the `has_nsfw_concepts` output. " \ + "These scores can be used to tune the `adjustment` input" + + _save_mlpackage(coreml_safety_checker, out_path) + + if args.check_output_correctness: + baseline_out = pipe.safety_checker( + **sample_safety_checker_inputs)[2].numpy() + coreml_out = coreml_safety_checker.predict( + coreml_sample_safety_checker_inputs)["concept_scores"] + report_correctness( + baseline_out, coreml_out, + "safety_checker baseline PyTorch to reference CoreML") + + del traced_safety_checker, coreml_safety_checker, pipe.safety_checker + gc.collect() + +def _get_controlnet_base_model(controlnet_model_version): + from huggingface_hub import model_info + info = model_info(controlnet_model_version) + return info.cardData.get("base_model", None) + +def convert_controlnet(pipe, args): + """ Converts each ControlNet for Stable Diffusion + """ + if not hasattr(pipe, "unet"): + raise RuntimeError( + "convert_unet() deletes pipe.unet to save RAM. " + "Please use convert_vae_encoder() before convert_unet()") + + if not hasattr(pipe, "text_encoder"): + raise RuntimeError( + "convert_text_encoder() deletes pipe.text_encoder to save RAM. " + "Please use convert_unet() before convert_text_encoder()") + + for i, controlnet_model_version in enumerate(args.convert_controlnet): + base_model = _get_controlnet_base_model(controlnet_model_version) + + if base_model is None and args.model_version != "runwayml/stable-diffusion-v1-5": + logger.warning( + f"The original ControlNet models were trained using Stable Diffusion v1.5. " + f"It is possible that model {args.model_version} is not compatible with controlnet.") + if base_model is not None and base_model != args.model_version: + raise RuntimeError( + f"ControlNet model {controlnet_model_version} was trained using " + f"Stable Diffusion model {base_model}.\n However, you specified " + f"version {args.model_version} in the command line. Please, use " + f"--model-version {base_model} to convert this model.") + + controlnet_model_name = controlnet_model_version.replace("/", "_") + fname = f"ControlNet_{controlnet_model_name}.mlpackage" + out_path = os.path.join(args.o, fname) + + if os.path.exists(out_path): + logger.info( + f"`controlnet_{controlnet_model_name}` already exists at {out_path}, skipping conversion." + ) + continue + + if i == 0: + batch_size = 2 # for classifier-free guidance + sample_shape = ( + batch_size, # B + pipe.unet.config.in_channels, # C + (args.latent_h or pipe.unet.config.sample_size), # H + (args.latent_w or pipe.unet.config.sample_size), # W + ) + + encoder_hidden_states_shape = ( + batch_size, + pipe.text_encoder.config.hidden_size, + 1, + pipe.text_encoder.config.max_position_embeddings, + ) + + controlnet_cond_shape = ( + batch_size, # B + 3, # C + (args.latent_h or pipe.unet.config.sample_size) * 8, # H + (args.latent_w or pipe.unet.config.sample_size) * 8, # w + ) + + # Create the scheduled timesteps for downstream use + DEFAULT_NUM_INFERENCE_STEPS = 50 + pipe.scheduler.set_timesteps(DEFAULT_NUM_INFERENCE_STEPS) + + # Prepare inputs + sample_controlnet_inputs = OrderedDict([ + ("sample", torch.rand(*sample_shape)), + ("timestep", + torch.tensor([pipe.scheduler.timesteps[0].item()] * + (batch_size)).to(torch.float32)), + ("encoder_hidden_states", torch.rand(*encoder_hidden_states_shape)), + ("controlnet_cond", torch.rand(*controlnet_cond_shape)), + ]) + sample_controlnet_inputs_spec = { + k: (v.shape, v.dtype) + for k, v in sample_controlnet_inputs.items() + } + logger.info( + f"Sample ControlNet inputs spec: {sample_controlnet_inputs_spec}") + + baseline_sample_controlnet_inputs = deepcopy(sample_controlnet_inputs) + baseline_sample_controlnet_inputs[ + "encoder_hidden_states"] = baseline_sample_controlnet_inputs[ + "encoder_hidden_states"].squeeze(2).transpose(1, 2) + + # Import controlnet model and initialize reference controlnet + original_controlnet = ControlNetModel.from_pretrained( + controlnet_model_version, + use_auth_token=True + ) + reference_controlnet = controlnet.ControlNetModel(**original_controlnet.config).eval() + load_state_dict_summary = reference_controlnet.load_state_dict( + original_controlnet.state_dict()) + + num_residuals = reference_controlnet.get_num_residuals() + output_keys = [f"additional_residual_{i}" for i in range(num_residuals)] + + # JIT trace + logger.info("JIT tracing..") + reference_controlnet = torch.jit.trace(reference_controlnet, + list(sample_controlnet_inputs.values())) + logger.info("Done.") + + if args.check_output_correctness: + baseline_out = original_controlnet(**baseline_sample_controlnet_inputs, + return_dict=False) + reference_out = reference_controlnet(*sample_controlnet_inputs.values()) + + baseline_down_residuals, baseline_mid_residuals = baseline_out + baseline_out = baseline_down_residuals + (baseline_mid_residuals,) + reference_down_residuals, reference_mid_residuals = reference_out + reference_out = reference_down_residuals +(reference_mid_residuals,) + + for key, b_out, r_out in zip(output_keys, baseline_out, reference_out): + b_out = b_out.numpy() + r_out = r_out.numpy() + logger.info(f"Check {key} correctness") + report_correctness(b_out, r_out, + f"controlnet({controlnet_model_name}) baseline to reference PyTorch") + + del original_controlnet + gc.collect() + + coreml_sample_controlnet_inputs = { + k: v.numpy().astype(np.float16) + for k, v in sample_controlnet_inputs.items() + } + + coreml_controlnet, out_path = _convert_to_coreml(f"controlnet_{controlnet_model_name}", reference_controlnet, + coreml_sample_controlnet_inputs, + output_keys, args, + out_path=out_path) + + del reference_controlnet + gc.collect() + + coreml_controlnet.author = f"Please refer to the Model Card available at huggingface.co/{controlnet_model_version}" + coreml_controlnet.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_controlnet.version = controlnet_model_version + coreml_controlnet.short_description = \ + "ControlNet is a neural network structure to control diffusion models by adding extra conditions. " \ + "Please refer to https://arxiv.org/abs/2302.05543 for details." + + # Set the input descriptions + coreml_controlnet.input_description["sample"] = \ + "The low resolution latent feature maps being denoised through reverse diffusion" + coreml_controlnet.input_description["timestep"] = \ + "A value emitted by the associated scheduler object to condition the model on a given noise schedule" + coreml_controlnet.input_description["encoder_hidden_states"] = \ + "Output embeddings from the associated text_encoder model to condition to generated image on text. " \ + "A maximum of 77 tokens (~40 words) are allowed. Longer text is truncated. " \ + "Shorter text does not reduce computation." + coreml_controlnet.input_description["controlnet_cond"] = \ + "An additional input image for ControlNet to condition the generated images." + + # Set the output descriptions + for i in range(num_residuals): + coreml_controlnet.output_description[f"additional_residual_{i}"] = \ + "One of the outputs of each downsampling block in ControlNet. " \ + "The value added to the corresponding resnet output in UNet." + + _save_mlpackage(coreml_controlnet, out_path) + logger.info(f"Saved controlnet into {out_path}") + + # Parity check PyTorch vs CoreML + if args.check_output_correctness: + coreml_out = coreml_controlnet.predict(coreml_sample_controlnet_inputs) + for key, b_out in zip(output_keys, baseline_out): + b_out = b_out.numpy() + logger.info(f"Check {key} correctness") + report_correctness(b_out, coreml_out[key], + "controlnet baseline PyTorch to reference CoreML") + + del coreml_controlnet + gc.collect() + + +def main(args): + os.makedirs(args.o, exist_ok=True) + + # Instantiate diffusers pipe as reference + logger.info( + f"Initializing StableDiffusionPipeline with {args.model_version}..") + pipe = StableDiffusionPipeline.from_pretrained(args.model_version, + use_auth_token=True) + logger.info("Done.") + + # Register the selected attention implementation globally + unet.ATTENTION_IMPLEMENTATION_IN_EFFECT = unet.AttentionImplementations[ + args.attention_implementation] + logger.info( + f"Attention implementation in effect: {unet.ATTENTION_IMPLEMENTATION_IN_EFFECT}" + ) + + # Convert models + if args.convert_vae_decoder: + logger.info("Converting vae_decoder") + convert_vae_decoder(pipe, args) + logger.info("Converted vae_decoder") + + if args.convert_vae_encoder: + logger.info("Converting vae_encoder") + convert_vae_encoder(pipe, args) + logger.info("Converted vae_encoder") + + if args.convert_controlnet: + logger.info("Converting controlnet") + convert_controlnet(pipe, args) + logger.info("Converted controlnet") + + if args.convert_unet: + logger.info("Converting unet") + convert_unet(pipe, args) + logger.info("Converted unet") + + if args.convert_text_encoder: + logger.info("Converting text_encoder") + convert_text_encoder(pipe, args) + logger.info("Converted text_encoder") + + if args.convert_safety_checker: + logger.info("Converting safety_checker") + convert_safety_checker(pipe, args) + logger.info("Converted safety_checker") + + if args.bundle_resources_for_swift_cli: + logger.info("Bundling resources for the Swift CLI") + bundle_resources_for_swift_cli(args) + logger.info("Bundled resources for the Swift CLI") + + if args.quantize_weights_to_8bits: + # Note: Not recommended, significantly degrades generated image quality + logger.info("Quantizing weights to 8-bit precision") + quantize_weights_to_8bits(args) + logger.info("Quantized weights to 8-bit precision") + + +def parser_spec(): + parser = argparse.ArgumentParser() + + # Select which models to export (All are needed for text-to-image pipeline to function) + parser.add_argument("--convert-text-encoder", action="store_true") + parser.add_argument("--convert-vae-decoder", action="store_true") + parser.add_argument("--convert-vae-encoder", action="store_true") + parser.add_argument("--convert-unet", action="store_true") + parser.add_argument("--convert-safety-checker", action="store_true") + parser.add_argument( + "--convert-controlnet", + nargs="*", + type=str, + help= + "Converts a ControlNet model hosted on HuggingFace to coreML format. " \ + "To convert multiple models, provide their names separated by spaces.", + ) + parser.add_argument( + "--model-version", + default="CompVis/stable-diffusion-v1-4", + help= + ("The pre-trained model checkpoint and configuration to restore. " + "For available versions: https://huggingface.co/models?search=stable-diffusion" + )) + parser.add_argument("--compute-unit", + choices=tuple(cu + for cu in ct.ComputeUnit._member_names_), + default="ALL") + + parser.add_argument( + "--latent-h", + type=int, + default=None, + help= + "The spatial resolution (number of rows) of the latent space. `Defaults to pipe.unet.config.sample_size`", + ) + parser.add_argument( + "--latent-w", + type=int, + default=None, + help= + "The spatial resolution (number of cols) of the latent space. `Defaults to pipe.unet.config.sample_size`", + ) + parser.add_argument( + "--attention-implementation", + choices=tuple(ai + for ai in unet.AttentionImplementations._member_names_), + default=unet.ATTENTION_IMPLEMENTATION_IN_EFFECT.name, + help= + "The enumerated implementations trade off between ANE and GPU performance", + ) + parser.add_argument( + "-o", + default=os.getcwd(), + help="The resulting mlpackages will be saved into this directory") + parser.add_argument( + "--check-output-correctness", + action="store_true", + help= + "If specified, compares the outputs of original PyTorch and final CoreML models and reports PSNR in dB. " + "Enabling this feature uses more memory. Disable it if your machine runs out of memory." + ) + parser.add_argument( + "--chunk-unet", + action="store_true", + help= + "If specified, generates two mlpackages out of the unet model which approximately equal weights sizes. " + "This is required for ANE deployment on iOS and iPadOS. Not required for macOS." + ) + parser.add_argument( + "--quantize-weights-to-8bits", + action="store_true", + help= + "If specified, quantize 16-bits weights to 8-bits weights in-place for all models. " + "Not recommended as the generated image quality degraded significantly after 8-bit weight quantization" + ) + parser.add_argument( + "--unet-support-controlnet", + action="store_true", + help= + "If specified, enable unet to receive additional inputs from controlnet. " + "Each input added to corresponding resnet output." + ) + + # Swift CLI Resource Bundling + parser.add_argument( + "--bundle-resources-for-swift-cli", + action="store_true", + help= + "If specified, creates a resources directory compatible with the sample Swift CLI. " + "It compiles all four models and adds them to a StableDiffusionResources directory " + "along with a `vocab.json` and `merges.txt` for the text tokenizer") + parser.add_argument( + "--text-encoder-vocabulary-url", + default= + "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/vocab.json", + help="The URL to the vocabulary file use by the text tokenizer") + parser.add_argument( + "--text-encoder-merges-url", + default= + "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/merges.txt", + help="The URL to the merged pairs used in by the text tokenizer.") + + return parser + + +if __name__ == "__main__": + parser = parser_spec() + args = parser.parse_args() + + main(args) diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/unet.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/unet.py new file mode 100644 index 00000000..cf5cdb39 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/unet.py @@ -0,0 +1,1104 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +from python_coreml_stable_diffusion.layer_norm import LayerNormANE + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers import ModelMixin + +from enum import Enum + +import logging + +logger = logging.getLogger(__name__) +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +# Ensure minimum macOS version requirement is met for this particular model +from coremltools.models.utils import _macos_version +if not _macos_version() >= (13, 1): + logger.warning( + "!!! macOS 13.1 and newer or iOS/iPadOS 16.2 and newer is required for best performance !!!" + ) + + +class AttentionImplementations(Enum): + ORIGINAL = "ORIGINAL" + SPLIT_EINSUM = "SPLIT_EINSUM" + + +ATTENTION_IMPLEMENTATION_IN_EFFECT = AttentionImplementations.SPLIT_EINSUM + +WARN_MSG = \ + "This `nn.Module` is intended for Apple Silicon deployment only. " \ + "PyTorch-specific optimizations and training is disabled" + +class CrossAttention(nn.Module): + """ Apple Silicon friendly version of `diffusers.models.attention.CrossAttention` + """ + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64): + super().__init__() + inner_dim = dim_head * heads + context_dim = context_dim if context_dim is not None else query_dim + + self.scale = dim_head**-0.5 + self.heads = heads + self.dim_head = dim_head + + self.to_q = nn.Conv2d(query_dim, inner_dim, kernel_size=1, bias=False) + self.to_k = nn.Conv2d(context_dim, + inner_dim, + kernel_size=1, + bias=False) + self.to_v = nn.Conv2d(context_dim, + inner_dim, + kernel_size=1, + bias=False) + self.to_out = nn.Sequential( + nn.Conv2d(inner_dim, query_dim, kernel_size=1, bias=True)) + + def forward(self, hidden_states, context=None, mask=None): + if self.training: + raise NotImplementedError(WARN_MSG) + + batch_size, dim, _, sequence_length = hidden_states.shape + + q = self.to_q(hidden_states) + context = context if context is not None else hidden_states + k = self.to_k(context) + v = self.to_v(context) + + # Validate mask + if mask is not None: + expected_mask_shape = [batch_size, sequence_length, 1, 1] + if mask.dtype == torch.bool: + mask = mask.logical_not().float() * -1e4 + elif mask.dtype == torch.int64: + mask = (1 - mask).float() * -1e4 + elif mask.dtype != torch.float32: + raise TypeError(f"Unexpected dtype for mask: {mask.dtype}") + + if len(mask.size()) == 2: + mask = mask.unsqueeze(2).unsqueeze(2) + + if list(mask.size()) != expected_mask_shape: + raise RuntimeError( + f"Invalid shape for `mask` (Expected {expected_mask_shape}, got {list(mask.size())}" + ) + + if ATTENTION_IMPLEMENTATION_IN_EFFECT == AttentionImplementations.ORIGINAL: + # This version of the attention function is recommended for high GPU core count + # devices such as the M1 Max and M1 Ultra + bs = q.size(0) + mh_q = q.view(bs, self.heads, self.dim_head, -1) + mh_k = k.view(bs, self.heads, self.dim_head, -1) + mh_v = v.view(bs, self.heads, self.dim_head, -1) + + attn_weights = torch.einsum("bhcq,bhck->bhqk", [mh_q, mh_k]) + attn_weights.mul_(self.scale) + + if mask is not None: + attn_weights = attn_weights + mask + + attn_weights = attn_weights.softmax(dim=3) + + attn = torch.einsum("bhqk,bhck->bhcq", [attn_weights, mh_v]) + attn = attn.contiguous().view(bs, self.heads * self.dim_head, 1, + -1) + + elif ATTENTION_IMPLEMENTATION_IN_EFFECT == AttentionImplementations.SPLIT_EINSUM: + # The split attention and einsum from https://machinelearning.apple.com/research/neural-engine-transformers + # are utilized to build an ANE implementation. This version is marginally slower on the GPU engine and is + # not recommended for Max and Ultra Mac variants + mh_q = [ + q[:, head_idx * self.dim_head:(head_idx + 1) * + self.dim_head, :, :] for head_idx in range(self.heads) + ] # (bs, dim_head, 1, max_seq_length) * heads + + k = k.transpose(1, 3) + mh_k = [ + k[:, :, :, + head_idx * self.dim_head:(head_idx + 1) * self.dim_head] + for head_idx in range(self.heads) + ] # (bs, max_seq_length, 1, dim_head) * heads + + mh_v = [ + v[:, head_idx * self.dim_head:(head_idx + 1) * + self.dim_head, :, :] for head_idx in range(self.heads) + ] # (bs, dim_head, 1, max_seq_length) * heads + + attn_weights = [ + torch.einsum("bchq,bkhc->bkhq", [qi, ki]) * self.scale + for qi, ki in zip(mh_q, mh_k) + ] # (bs, max_seq_length, 1, max_seq_length) * heads + + if mask is not None: + for head_idx in range(self.heads): + attn_weights[head_idx] = attn_weights[head_idx] + mask + + attn_weights = [ + aw.softmax(dim=1) for aw in attn_weights + ] # (bs, max_seq_length, 1, max_seq_length) * heads + attn = [ + torch.einsum("bkhq,bchk->bchq", wi, vi) + for wi, vi in zip(attn_weights, mh_v) + ] # (bs, dim_head, 1, max_seq_length) * heads + + attn = torch.cat(attn, dim=1) # (bs, dim, 1, max_seq_length) + + else: + raise ValueError(ATTENTION_IMPLEMENTATION_IN_EFFECT) + + return self.to_out(attn) + + +def linear_to_conv2d_map(state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """ Unsqueeze twice to map nn.Linear weights to nn.Conv2d weights + """ + for k in state_dict: + is_internal_proj = all(substr in k for substr in ["to_", ".weight"]) + is_ff_proj = all(substr in k for substr in ["ff.", ".weight"]) + is_temb_proj = all(substr in k for substr in ["time_emb", ".weight"]) + is_proj_in = "proj_in.weight" in k + is_proj_out = "proj_out.weight" in k + + if is_internal_proj or is_ff_proj or is_temb_proj or is_proj_in or is_proj_out: + if len(state_dict[k].shape) == 2: + state_dict[k] = state_dict[k][:, :, None, None] + +# Note: torch.nn.LayerNorm and ane_transformers.reference.layer_norm.LayerNormANE +# apply scale and bias terms in opposite orders. In order to accurately restore a +# state_dict trained using the former into the the latter, we adjust the bias term +def correct_for_bias_scale_order_inversion(state_dict, prefix, local_metadata, + strict, missing_keys, + unexpected_keys, error_msgs): + state_dict[prefix + + "bias"] = state_dict[prefix + "bias"] / state_dict[prefix + + "weight"] + return state_dict + + +class LayerNormANE(LayerNormANE): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._register_load_state_dict_pre_hook( + correct_for_bias_scale_order_inversion) + + +# Reference: https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py +# (modified, e.g. the attention implementation) +class CrossAttnUpBlock2D(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + prev_output_channel, + temb_channels, + num_layers=1, + resnet_eps=1e-6, + resnet_time_scale_shift="default", + resnet_act_fn="swish", + resnet_groups=32, + attn_num_head_channels=1, + cross_attention_dim=768, + attention_type="default", + output_scale_factor=1.0, + downsample_padding=1, + add_upsample=True, + ): + super().__init__() + resnets = [] + attentions = [] + + self.attention_type = attention_type + self.attn_num_head_channels = attn_num_head_channels + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - + 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + )) + attentions.append( + SpatialTransformer( + out_channels, + attn_num_head_channels, + out_channels // attn_num_head_channels, + depth=1, + context_dim=cross_attention_dim, + )) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + self.upsamplers = None + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels)]) + + def forward(self, + hidden_states, + res_hidden_states_tuple, + temb=None, + encoder_hidden_states=None): + for resnet, attn in zip(self.resnets, self.attentions): + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], + dim=1) + + hidden_states = resnet(hidden_states, temb) + hidden_states = attn(hidden_states, context=encoder_hidden_states) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +class UpBlock2D(nn.Module): + + def __init__( + self, + in_channels, + prev_output_channel, + out_channels, + temb_channels, + num_layers=1, + resnet_eps=1e-6, + resnet_time_scale_shift="default", + resnet_act_fn="swish", + resnet_groups=32, + add_upsample=True, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - + 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + )) + + self.resnets = nn.ModuleList(resnets) + self.upsamplers = None + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels)]) + + def forward(self, hidden_states, res_hidden_states_tuple, temb=None): + for resnet in self.resnets: + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], + dim=1) + + hidden_states = resnet(hidden_states, temb) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +class CrossAttnDownBlock2D(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + temb_channels, + num_layers=1, + resnet_eps=1e-6, + resnet_time_scale_shift="default", + resnet_act_fn="swish", + resnet_groups=32, + attn_num_head_channels=1, + cross_attention_dim=768, + attention_type="default", + output_scale_factor=1.0, + downsample_padding=1, + add_downsample=True, + ): + super().__init__() + resnets = [] + attentions = [] + + self.attention_type = attention_type + self.attn_num_head_channels = attn_num_head_channels + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + )) + attentions.append( + SpatialTransformer( + out_channels, + attn_num_head_channels, + out_channels // attn_num_head_channels, + depth=1, + context_dim=cross_attention_dim, + )) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList([Downsample2D(out_channels)]) + else: + self.downsamplers = None + + def forward(self, hidden_states, temb=None, encoder_hidden_states=None): + output_states = () + + for resnet, attn in zip(self.resnets, self.attentions): + hidden_states = resnet(hidden_states, temb) + hidden_states = attn(hidden_states, context=encoder_hidden_states) + output_states += (hidden_states, ) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states += (hidden_states, ) + + return hidden_states, output_states + + +class DownBlock2D(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + temb_channels, + num_layers=1, + resnet_eps=1e-6, + resnet_time_scale_shift="default", + resnet_act_fn="swish", + resnet_groups=32, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + )) + + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states, temb=None): + output_states = () + + for resnet in self.resnets: + hidden_states = resnet(hidden_states, temb) + output_states += (hidden_states, ) + + return hidden_states, output_states + + +class ResnetBlock2D(nn.Module): + + def __init__( + self, + *, + in_channels, + out_channels=None, + conv_shortcut=False, + temb_channels=512, + groups=32, + groups_out=None, + eps=1e-6, + time_embedding_norm="default", + use_nin_shortcut=None, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = in_channels if out_channels is None else out_channels + self.use_conv_shortcut = conv_shortcut + self.time_embedding_norm = time_embedding_norm + + if groups_out is None: + groups_out = groups + + self.norm1 = torch.nn.GroupNorm(num_groups=groups, + num_channels=in_channels, + eps=eps, + affine=True) + + self.conv1 = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + if temb_channels is not None: + self.time_emb_proj = torch.nn.Conv2d(temb_channels, + out_channels, + kernel_size=1) + else: + self.time_emb_proj = None + + self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, + num_channels=out_channels, + eps=eps, + affine=True) + self.conv2 = torch.nn.Conv2d(out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + self.nonlinearity = nn.SiLU() + + self.use_nin_shortcut = self.in_channels != self.out_channels if use_nin_shortcut is None else use_nin_shortcut + + self.conv_shortcut = None + if self.use_nin_shortcut: + self.conv_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x, temb): + hidden_states = x + hidden_states = self.norm1(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + hidden_states = self.conv1(hidden_states) + + if temb is not None: + temb = self.time_emb_proj(self.nonlinearity(temb)) + hidden_states = hidden_states + temb + + hidden_states = self.norm2(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + x = self.conv_shortcut(x) + + out = (x + hidden_states) + + return out + + +class Upsample2D(nn.Module): + + def __init__(self, channels): + super().__init__() + self.conv = nn.Conv2d(channels, channels, 3, padding=1) + + def forward(self, x): + x = F.interpolate(x, scale_factor=2.0, mode="nearest") + return self.conv(x) + + +class Downsample2D(nn.Module): + + def __init__(self, channels): + super().__init__() + self.conv = nn.Conv2d(channels, channels, 3, stride=2, padding=1) + + def forward(self, x): + return self.conv(x) + + +class SpatialTransformer(nn.Module): + + def __init__( + self, + in_channels, + n_heads, + d_head, + depth=1, + context_dim=None, + ): + super().__init__() + self.n_heads = n_heads + self.d_head = d_head + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = torch.nn.GroupNorm(num_groups=32, + num_channels=in_channels, + eps=1e-6, + affine=True) + + self.proj_in = nn.Conv2d(in_channels, + inner_dim, + kernel_size=1, + stride=1, + padding=0) + + self.transformer_blocks = nn.ModuleList([ + BasicTransformerBlock(inner_dim, + n_heads, + d_head, + context_dim=context_dim) + for d in range(depth) + ]) + + self.proj_out = nn.Conv2d(inner_dim, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, hidden_states, context=None): + batch, channel, height, weight = hidden_states.shape + residual = hidden_states + hidden_states = self.norm(hidden_states) + hidden_states = self.proj_in(hidden_states) + hidden_states = hidden_states.view(batch, channel, 1, height * weight) + for block in self.transformer_blocks: + hidden_states = block(hidden_states, context=context) + hidden_states = hidden_states.view(batch, channel, height, weight) + hidden_states = self.proj_out(hidden_states) + return hidden_states + residual + + +class BasicTransformerBlock(nn.Module): + + def __init__(self, dim, n_heads, d_head, context_dim=None, gated_ff=True): + super().__init__() + self.attn1 = CrossAttention( + query_dim=dim, + heads=n_heads, + dim_head=d_head, + ) + self.ff = FeedForward(dim, glu=gated_ff) + self.attn2 = CrossAttention( + query_dim=dim, + context_dim=context_dim, + heads=n_heads, + dim_head=d_head, + ) + self.norm1 = LayerNormANE(dim) + self.norm2 = LayerNormANE(dim) + self.norm3 = LayerNormANE(dim) + + def forward(self, hidden_states, context=None): + hidden_states = self.attn1(self.norm1(hidden_states)) + hidden_states + hidden_states = self.attn2(self.norm2(hidden_states), + context=context) + hidden_states + hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states + return hidden_states + + +class FeedForward(nn.Module): + + def __init__(self, dim, dim_out=None, mult=4, glu=False): + super().__init__() + inner_dim = int(dim * mult) + self.net = nn.Sequential( + GEGLU(dim_in=dim, dim_out=inner_dim), nn.Identity(), + nn.Conv2d(inner_dim, + dim_out if dim_out is not None else dim, + kernel_size=1)) + + def forward(self, hidden_states): + return self.net(hidden_states) + + +class GEGLU(nn.Module): + + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Conv2d(dim_in, dim_out * 2, kernel_size=1) + + def forward(self, hidden_states): + hidden_states, gate = self.proj(hidden_states).chunk(2, dim=1) + return hidden_states * F.gelu(gate) + + +class TimestepEmbedding(nn.Module): + + def __init__(self, channel, time_embed_dim, act_fn="silu"): + super().__init__() + + self.linear_1 = nn.Conv2d(channel, time_embed_dim, kernel_size=1) + self.act = None + if act_fn == "silu": + self.act = nn.SiLU() + self.linear_2 = nn.Conv2d(time_embed_dim, + time_embed_dim, + kernel_size=1) + + def forward(self, sample): + if len(sample.shape) == 2: + sample = sample.unsqueeze(-1).unsqueeze(-1) + sample = self.linear_1(sample) + + if self.act is not None: + sample = self.act(sample) + + sample = self.linear_2(sample) + return sample + + +class Timesteps(nn.Module): + + def __init__(self, num_channels, flip_sin_to_cos, downscale_freq_shift): + super().__init__() + self.num_channels = num_channels + self.flip_sin_to_cos = flip_sin_to_cos + self.downscale_freq_shift = downscale_freq_shift + + def forward(self, timesteps): + t_emb = get_timestep_embedding( + timesteps, + self.num_channels, + flip_sin_to_cos=self.flip_sin_to_cos, + downscale_freq_shift=self.downscale_freq_shift, + ) + return t_emb + + +def get_timestep_embedding( + timesteps, + embedding_dim, + flip_sin_to_cos=False, + downscale_freq_shift=1, + scale=1, + max_period=10000, +): + assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" + + half_dim = embedding_dim // 2 + exponent = -math.log(max_period) * torch.arange( + start=0, end=half_dim, dtype=torch.float32) + exponent = exponent / (half_dim - downscale_freq_shift) + + emb = torch.exp(exponent).to(device=timesteps.device) + emb = timesteps[:, None].float() * emb[None, :] + emb = scale * emb + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) + + if flip_sin_to_cos: + emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) + + if embedding_dim % 2 == 1: + emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) + return emb + + +class UNetMidBlock2DCrossAttn(nn.Module): + + def __init__( + self, + in_channels, + temb_channels, + num_layers=1, + resnet_eps=1e-6, + resnet_time_scale_shift="default", + resnet_act_fn="swish", + resnet_groups=32, + attn_num_head_channels=1, + attention_type="default", + cross_attention_dim=768, + **kwargs, + ): + super().__init__() + + self.attention_type = attention_type + self.attn_num_head_channels = attn_num_head_channels + resnet_groups = resnet_groups if resnet_groups is not None else min( + in_channels // 4, 32) + + resnets = [ + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + ) + ] + attentions = [] + + for _ in range(num_layers): + attentions.append( + SpatialTransformer( + in_channels, + attn_num_head_channels, + in_channels // attn_num_head_channels, + depth=1, + context_dim=cross_attention_dim, + )) + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + )) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states, temb=None, encoder_hidden_states=None): + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + hidden_states = attn(hidden_states, encoder_hidden_states) + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +class UNet2DConditionModel(ModelMixin, ConfigMixin): + + @register_to_config + def __init__( + self, + sample_size=None, + in_channels=4, + out_channels=4, + center_input_sample=False, + flip_sin_to_cos=True, + freq_shift=0, + down_block_types=( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ), + up_block_types=("UpBlock2D", "CrossAttnUpBlock2D", + "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), + only_cross_attention=False, + block_out_channels=(320, 640, 1280, 1280), + layers_per_block=2, + downsample_padding=1, + mid_block_scale_factor=1, + act_fn="silu", + norm_num_groups=32, + norm_eps=1e-5, + cross_attention_dim=768, + attention_head_dim=8, + **kwargs, + ): + if kwargs.get("dual_cross_attention", None): + raise NotImplementedError + if kwargs.get("num_classs_embeds", None): + raise NotImplementedError + if only_cross_attention: + raise NotImplementedError + if kwargs.get("use_linear_projection", None): + logger.warning("`use_linear_projection=True` is ignored!") + + super().__init__() + self._register_load_state_dict_pre_hook(linear_to_conv2d_map) + + self.sample_size = sample_size + time_embed_dim = block_out_channels[0] * 4 + + # input + self.conv_in = nn.Conv2d(in_channels, + block_out_channels[0], + kernel_size=3, + padding=(1, 1)) + + # time + time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, + freq_shift) + timestep_input_dim = block_out_channels[0] + time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) + + self.time_proj = time_proj + self.time_embedding = time_embedding + + self.down_blocks = nn.ModuleList([]) + self.mid_block = None + self.up_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if isinstance(attention_head_dim, int): + attention_head_dim = (attention_head_dim,) * len(down_block_types) + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim[i], + downsample_padding=downsample_padding, + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = UNetMidBlock2DCrossAttn( + in_channels=block_out_channels[-1], + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift="default", + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim[i], + resnet_groups=norm_num_groups, + ) + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_attention_head_dim = list(reversed(attention_head_dim)) + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min( + i + 1, + len(block_out_channels) - 1)] + + is_final_block = i == len(block_out_channels) - 1 + + up_block = get_up_block( + up_block_type, + num_layers=layers_per_block + 1, + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=time_embed_dim, + add_upsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=reversed_attention_head_dim[i], + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], + num_groups=norm_num_groups, + eps=norm_eps) + self.conv_act = nn.SiLU() + self.conv_out = nn.Conv2d(block_out_channels[0], + out_channels, + 3, + padding=1) + + def forward( + self, + sample, + timestep, + encoder_hidden_states, + *additional_residuals, + ): + # 0. Project (or look-up) time embeddings + t_emb = self.time_proj(timestep) + emb = self.time_embedding(t_emb) + + # 1. center input if necessary + if self.config.center_input_sample: + sample = 2 * sample - 1.0 + + # 2. pre-process + sample = self.conv_in(sample) + + # 3. down + down_block_res_samples = (sample, ) + for downsample_block in self.down_blocks: + if hasattr( + downsample_block, + "attentions") and downsample_block.attentions is not None: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states) + else: + sample, res_samples = downsample_block(hidden_states=sample, + temb=emb) + + down_block_res_samples += res_samples + + if additional_residuals: + new_down_block_res_samples = () + for i, down_block_res_sample in enumerate(down_block_res_samples): + down_block_res_sample = down_block_res_sample + additional_residuals[i] + new_down_block_res_samples += (down_block_res_sample,) + down_block_res_samples = new_down_block_res_samples + + # 4. mid + sample = self.mid_block(sample, + emb, + encoder_hidden_states=encoder_hidden_states) + + if additional_residuals: + sample = sample + additional_residuals[-1] + + # 5. up + for upsample_block in self.up_blocks: + res_samples = down_block_res_samples[-len(upsample_block.resnets):] + down_block_res_samples = down_block_res_samples[:-len( + upsample_block.resnets)] + + if hasattr(upsample_block, + "attentions") and upsample_block.attentions is not None: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + ) + else: + sample = upsample_block(hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples) + + # 6. post-process + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + return (sample, ) + + +def get_down_block( + down_block_type, + num_layers, + in_channels, + out_channels, + temb_channels, + resnet_eps, + resnet_act_fn, + attn_num_head_channels, + cross_attention_dim=None, + downsample_padding=None, +): + down_block_type = down_block_type[7:] if down_block_type.startswith( + "UNetRes") else down_block_type + if down_block_type == "DownBlock2D": + return DownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + ) + elif down_block_type == "CrossAttnDownBlock2D": + if cross_attention_dim is None: + raise ValueError( + "cross_attention_dim must be specified for CrossAttnDownBlock2D" + ) + return CrossAttnDownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + downsample_padding=downsample_padding, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attn_num_head_channels, + ) + + +def get_up_block( + up_block_type, + num_layers, + in_channels, + out_channels, + prev_output_channel, + temb_channels, + add_upsample, + resnet_eps, + resnet_act_fn, + attn_num_head_channels, + cross_attention_dim=None, +): + up_block_type = up_block_type[7:] if up_block_type.startswith( + "UNetRes") else up_block_type + if up_block_type == "UpBlock2D": + return UpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + ) + elif up_block_type == "CrossAttnUpBlock2D": + if cross_attention_dim is None: + raise ValueError( + "cross_attention_dim must be specified for CrossAttnUpBlock2D") + return CrossAttnUpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attn_num_head_channels, + ) + raise ValueError(f"{up_block_type} does not exist.") + +def calculate_conv2d_output_shape(in_h, in_w, conv2d_layer): + k_h, k_w = conv2d_layer.kernel_size + pad_h, pad_w = conv2d_layer.padding + stride_h, stride_w = conv2d_layer.stride + + out_h = math.floor((in_h + 2 * pad_h - k_h) / stride_h + 1) + out_w = math.floor((in_w + 2 * pad_w - k_w) / stride_w + 1) + + return out_h, out_w diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/INSTALLER b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/LICENSE.txt b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/LICENSE.txt new file mode 100644 index 00000000..601c3f4b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/LICENSE.txt @@ -0,0 +1,819 @@ +Copyright (c) 2001-2002 Enthought, Inc. 2003-2022, SciPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + +This binary distribution of SciPy also bundles the following software: + + +Name: GCC runtime library +Files: .dylibs/* +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/viewcvs/gcc/ +License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/METADATA b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/METADATA new file mode 100644 index 00000000..99d9ea2b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/METADATA @@ -0,0 +1,952 @@ +Metadata-Version: 2.1 +Name: scipy +Version: 1.10.1 +Summary: Fundamental algorithms for scientific computing in Python +Home-page: https://scipy.org/ +Maintainer-Email: SciPy Developers +License: Copyright (c) 2001-2002 Enthought, Inc. 2003-2022, SciPy Developers. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---- + + This binary distribution of SciPy also bundles the following software: + + + Name: GCC runtime library + Files: .dylibs/* + Description: dynamically linked to files compiled with gcc + Availability: https://gcc.gnu.org/viewcvs/gcc/ + License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + + ---- + + Full text of license texts referred to above follows (that they are + listed below does not necessarily imply the conditions apply to the + present binary release): + + ---- + + GCC RUNTIME LIBRARY EXCEPTION + + Version 3.1, 31 March 2009 + + Copyright (C) 2009 Free Software Foundation, Inc. + + Everyone is permitted to copy and distribute verbatim copies of this + license document, but changing it is not allowed. + + This GCC Runtime Library Exception ("Exception") is an additional + permission under section 7 of the GNU General Public License, version + 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that + bears a notice placed by the copyright holder of the file stating that + the file is governed by GPLv3 along with this Exception. + + When you use GCC to compile a program, GCC may combine portions of + certain GCC header files and runtime libraries with the compiled + program. The purpose of this Exception is to allow compilation of + non-GPL (including proprietary) programs to use, in this way, the + header files and runtime libraries covered by this Exception. + + 0. Definitions. + + A file is an "Independent Module" if it either requires the Runtime + Library for execution after a Compilation Process, or makes use of an + interface provided by the Runtime Library, but is not otherwise based + on the Runtime Library. + + "GCC" means a version of the GNU Compiler Collection, with or without + modifications, governed by version 3 (or a specified later version) of + the GNU General Public License (GPL) with the option of using any + subsequent versions published by the FSF. + + "GPL-compatible Software" is software whose conditions of propagation, + modification and use would permit combination with GCC in accord with + the license of GCC. + + "Target Code" refers to output from any compiler for a real or virtual + target processor architecture, in executable form or suitable for + input to an assembler, loader, linker and/or execution + phase. Notwithstanding that, Target Code does not include data in any + format that is used as a compiler intermediate representation, or used + for producing a compiler intermediate representation. + + The "Compilation Process" transforms code entirely represented in + non-intermediate languages designed for human-written code, and/or in + Java Virtual Machine byte code, into Target Code. Thus, for example, + use of source code generators and preprocessors need not be considered + part of the Compilation Process, since the Compilation Process can be + understood as starting with the output of the generators or + preprocessors. + + A Compilation Process is "Eligible" if it is done using GCC, alone or + with other GPL-compatible software, or if it is done without using any + work based on GCC. For example, using non-GPL-compatible Software to + optimize any GCC intermediate representations would not qualify as an + Eligible Compilation Process. + + 1. Grant of Additional Permission. + + You have permission to propagate a work of Target Code formed by + combining the Runtime Library with Independent Modules, even if such + propagation would otherwise violate the terms of GPLv3, provided that + all Target Code was generated by Eligible Compilation Processes. You + may then convey such a combination under terms of your choice, + consistent with the licensing of the Independent Modules. + + 2. No Weakening of GCC Copyleft. + + The availability of this Exception does not imply any general + presumption that third-party software is unaffected by the copyleft + requirements of the license of GCC. + + ---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for + software and other kinds of works. + + The licenses for most software and other practical works are designed + to take away your freedom to share and change the works. By contrast, + the GNU General Public License is intended to guarantee your freedom to + share and change all versions of a program--to make sure it remains free + software for all its users. We, the Free Software Foundation, use the + GNU General Public License for most of our software; it applies also to + any other work released this way by its authors. You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + them if you wish), that you receive source code or can get it if you + want it, that you can change the software or use pieces of it in new + free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you + these rights or asking you to surrender the rights. Therefore, you have + certain responsibilities if you distribute copies of the software, or if + you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must pass on to the recipients the same + freedoms that you received. You must make sure that they, too, receive + or can get the source code. And you must show them these terms so they + know their rights. + + Developers that use the GNU GPL protect your rights with two steps: + (1) assert copyright on the software, and (2) offer you this License + giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains + that there is no warranty for this free software. For both users' and + authors' sake, the GPL requires that modified versions be marked as + changed, so that their problems will not be attributed erroneously to + authors of previous versions. + + Some devices are designed to deny users access to install or run + modified versions of the software inside them, although the manufacturer + can do so. This is fundamentally incompatible with the aim of + protecting users' freedom to change the software. The systematic + pattern of such abuse occurs in the area of products for individuals to + use, which is precisely where it is most unacceptable. Therefore, we + have designed this version of the GPL to prohibit the practice for those + products. If such problems arise substantially in other domains, we + stand ready to extend this provision to those domains in future versions + of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. + States should not allow patents to restrict development and use of + software on general-purpose computers, but in those that do, we wish to + avoid the special danger that patents applied to a free program could + make it effectively proprietary. To prevent this, the GPL assures that + patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and + modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of + works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this + License. Each licensee is addressed as "you". "Licensees" and + "recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work + in a fashion requiring copyright permission, other than the making of an + exact copy. The resulting work is called a "modified version" of the + earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based + on the Program. + + To "propagate" a work means to do anything with it that, without + permission, would make you directly or secondarily liable for + infringement under applicable copyright law, except executing it on a + computer or modifying a private copy. Propagation includes copying, + distribution (with or without modification), making available to the + public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other + parties to make or receive copies. Mere interaction with a user through + a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" + to the extent that it includes a convenient and prominently visible + feature that (1) displays an appropriate copyright notice, and (2) + tells the user that there is no warranty for the work (except to the + extent that warranties are provided), that licensees may convey the + work under this License, and how to view a copy of this License. If + the interface presents a list of user commands or options, such as a + menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work + for making modifications to it. "Object code" means any non-source + form of a work. + + A "Standard Interface" means an interface that either is an official + standard defined by a recognized standards body, or, in the case of + interfaces specified for a particular programming language, one that + is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other + than the work as a whole, that (a) is included in the normal form of + packaging a Major Component, but which is not part of that Major + Component, and (b) serves only to enable use of the work with that + Major Component, or to implement a Standard Interface for which an + implementation is available to the public in source code form. A + "Major Component", in this context, means a major essential component + (kernel, window system, and so on) of the specific operating system + (if any) on which the executable work runs, or a compiler used to + produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all + the source code needed to generate, install, and (for an executable + work) run the object code and to modify the work, including scripts to + control those activities. However, it does not include the work's + System Libraries, or general-purpose tools or generally available free + programs which are used unmodified in performing those activities but + which are not part of the work. For example, Corresponding Source + includes interface definition files associated with source files for + the work, and the source code for shared libraries and dynamically + linked subprograms that the work is specifically designed to require, + such as by intimate data communication or control flow between those + subprograms and other parts of the work. + + The Corresponding Source need not include anything that users + can regenerate automatically from other parts of the Corresponding + Source. + + The Corresponding Source for a work in source code form is that + same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of + copyright on the Program, and are irrevocable provided the stated + conditions are met. This License explicitly affirms your unlimited + permission to run the unmodified Program. The output from running a + covered work is covered by this License only if the output, given its + content, constitutes a covered work. This License acknowledges your + rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not + convey, without conditions so long as your license otherwise remains + in force. You may convey covered works to others for the sole purpose + of having them make modifications exclusively for you, or provide you + with facilities for running those works, provided that you comply with + the terms of this License in conveying all material for which you do + not control copyright. Those thus making or running the covered works + for you must do so exclusively on your behalf, under your direction + and control, on terms that prohibit them from making any copies of + your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under + the conditions stated below. Sublicensing is not allowed; section 10 + makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological + measure under any applicable law fulfilling obligations under article + 11 of the WIPO copyright treaty adopted on 20 December 1996, or + similar laws prohibiting or restricting circumvention of such + measures. + + When you convey a covered work, you waive any legal power to forbid + circumvention of technological measures to the extent such circumvention + is effected by exercising rights under this License with respect to + the covered work, and you disclaim any intention to limit operation or + modification of the work as a means of enforcing, against the work's + users, your or third parties' legal rights to forbid circumvention of + technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you + receive it, in any medium, provided that you conspicuously and + appropriately publish on each copy an appropriate copyright notice; + keep intact all notices stating that this License and any + non-permissive terms added in accord with section 7 apply to the code; + keep intact all notices of the absence of any warranty; and give all + recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, + and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to + produce it from the Program, in the form of source code under the + terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent + works, which are not by their nature extensions of the covered work, + and which are not combined with it such as to form a larger program, + in or on a volume of a storage or distribution medium, is called an + "aggregate" if the compilation and its resulting copyright are not + used to limit the access or legal rights of the compilation's users + beyond what the individual works permit. Inclusion of a covered work + in an aggregate does not cause this License to apply to the other + parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms + of sections 4 and 5, provided that you also convey the + machine-readable Corresponding Source under the terms of this License, + in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded + from the Corresponding Source as a System Library, need not be + included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any + tangible personal property which is normally used for personal, family, + or household purposes, or (2) anything designed or sold for incorporation + into a dwelling. In determining whether a product is a consumer product, + doubtful cases shall be resolved in favor of coverage. For a particular + product received by a particular user, "normally used" refers to a + typical or common use of that class of product, regardless of the status + of the particular user or of the way in which the particular user + actually uses, or expects or is expected to use, the product. A product + is a consumer product regardless of whether the product has substantial + commercial, industrial or non-consumer uses, unless such uses represent + the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, + procedures, authorization keys, or other information required to install + and execute modified versions of a covered work in that User Product from + a modified version of its Corresponding Source. The information must + suffice to ensure that the continued functioning of the modified object + code is in no case prevented or interfered with solely because + modification has been made. + + If you convey an object code work under this section in, or with, or + specifically for use in, a User Product, and the conveying occurs as + part of a transaction in which the right of possession and use of the + User Product is transferred to the recipient in perpetuity or for a + fixed term (regardless of how the transaction is characterized), the + Corresponding Source conveyed under this section must be accompanied + by the Installation Information. But this requirement does not apply + if neither you nor any third party retains the ability to install + modified object code on the User Product (for example, the work has + been installed in ROM). + + The requirement to provide Installation Information does not include a + requirement to continue to provide support service, warranty, or updates + for a work that has been modified or installed by the recipient, or for + the User Product in which it has been modified or installed. Access to a + network may be denied when the modification itself materially and + adversely affects the operation of the network or violates the rules and + protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, + in accord with this section must be in a format that is publicly + documented (and with an implementation available to the public in + source code form), and must require no special password or key for + unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this + License by making exceptions from one or more of its conditions. + Additional permissions that are applicable to the entire Program shall + be treated as though they were included in this License, to the extent + that they are valid under applicable law. If additional permissions + apply only to part of the Program, that part may be used separately + under those permissions, but the entire Program remains governed by + this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option + remove any additional permissions from that copy, or from any part of + it. (Additional permissions may be written to require their own + removal in certain cases when you modify the work.) You may place + additional permissions on material, added by you to a covered work, + for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you + add to a covered work, you may (if authorized by the copyright holders of + that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further + restrictions" within the meaning of section 10. If the Program as you + received it, or any part of it, contains a notice stating that it is + governed by this License along with a term that is a further + restriction, you may remove that term. If a license document contains + a further restriction but permits relicensing or conveying under this + License, you may add to a covered work material governed by the terms + of that license document, provided that the further restriction does + not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you + must place, in the relevant source files, a statement of the + additional terms that apply to those files, or a notice indicating + where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the + form of a separately written license, or stated as exceptions; + the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly + provided under this License. Any attempt otherwise to propagate or + modify it is void, and will automatically terminate your rights under + this License (including any patent licenses granted under the third + paragraph of section 11). + + However, if you cease all violation of this License, then your + license from a particular copyright holder is reinstated (a) + provisionally, unless and until the copyright holder explicitly and + finally terminates your license, and (b) permanently, if the copyright + holder fails to notify you of the violation by some reasonable means + prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is + reinstated permanently if the copyright holder notifies you of the + violation by some reasonable means, this is the first time you have + received notice of violation of this License (for any work) from that + copyright holder, and you cure the violation prior to 30 days after + your receipt of the notice. + + Termination of your rights under this section does not terminate the + licenses of parties who have received copies or rights from you under + this License. If your rights have been terminated and not permanently + reinstated, you do not qualify to receive new licenses for the same + material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or + run a copy of the Program. Ancillary propagation of a covered work + occurring solely as a consequence of using peer-to-peer transmission + to receive a copy likewise does not require acceptance. However, + nothing other than this License grants you permission to propagate or + modify any covered work. These actions infringe copyright if you do + not accept this License. Therefore, by modifying or propagating a + covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically + receives a license from the original licensors, to run, modify and + propagate that work, subject to this License. You are not responsible + for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an + organization, or substantially all assets of one, or subdividing an + organization, or merging organizations. If propagation of a covered + work results from an entity transaction, each party to that + transaction who receives a copy of the work also receives whatever + licenses to the work the party's predecessor in interest had or could + give under the previous paragraph, plus a right to possession of the + Corresponding Source of the work from the predecessor in interest, if + the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the + rights granted or affirmed under this License. For example, you may + not impose a license fee, royalty, or other charge for exercise of + rights granted under this License, and you may not initiate litigation + (including a cross-claim or counterclaim in a lawsuit) alleging that + any patent claim is infringed by making, using, selling, offering for + sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this + License of the Program or a work on which the Program is based. The + work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims + owned or controlled by the contributor, whether already acquired or + hereafter acquired, that would be infringed by some manner, permitted + by this License, of making, using, or selling its contributor version, + but do not include claims that would be infringed only as a + consequence of further modification of the contributor version. For + purposes of this definition, "control" includes the right to grant + patent sublicenses in a manner consistent with the requirements of + this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free + patent license under the contributor's essential patent claims, to + make, use, sell, offer for sale, import and otherwise run, modify and + propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express + agreement or commitment, however denominated, not to enforce a patent + (such as an express permission to practice a patent or covenant not to + sue for patent infringement). To "grant" such a patent license to a + party means to make such an agreement or commitment not to enforce a + patent against the party. + + If you convey a covered work, knowingly relying on a patent license, + and the Corresponding Source of the work is not available for anyone + to copy, free of charge and under the terms of this License, through a + publicly available network server or other readily accessible means, + then you must either (1) cause the Corresponding Source to be so + available, or (2) arrange to deprive yourself of the benefit of the + patent license for this particular work, or (3) arrange, in a manner + consistent with the requirements of this License, to extend the patent + license to downstream recipients. "Knowingly relying" means you have + actual knowledge that, but for the patent license, your conveying the + covered work in a country, or your recipient's use of the covered work + in a country, would infringe one or more identifiable patents in that + country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or + arrangement, you convey, or propagate by procuring conveyance of, a + covered work, and grant a patent license to some of the parties + receiving the covered work authorizing them to use, propagate, modify + or convey a specific copy of the covered work, then the patent license + you grant is automatically extended to all recipients of the covered + work and works based on it. + + A patent license is "discriminatory" if it does not include within + the scope of its coverage, prohibits the exercise of, or is + conditioned on the non-exercise of one or more of the rights that are + specifically granted under this License. You may not convey a covered + work if you are a party to an arrangement with a third party that is + in the business of distributing software, under which you make payment + to the third party based on the extent of your activity of conveying + the work, and under which the third party grants, to any of the + parties who would receive the covered work from you, a discriminatory + patent license (a) in connection with copies of the covered work + conveyed by you (or copies made from those copies), or (b) primarily + for and in connection with specific products or compilations that + contain the covered work, unless you entered into that arrangement, + or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting + any implied license or other defenses to infringement that may + otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot convey a + covered work so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you may + not convey it at all. For example, if you agree to terms that obligate you + to collect a royalty for further conveying from those to whom you convey + the Program, the only way you could satisfy both those terms and this + License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have + permission to link or combine any covered work with a work licensed + under version 3 of the GNU Affero General Public License into a single + combined work, and to convey the resulting work. The terms of this + License will continue to apply to the part which is the covered work, + but the special requirements of the GNU Affero General Public License, + section 13, concerning interaction through a network will apply to the + combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of + the GNU General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the + Program specifies that a certain numbered version of the GNU General + Public License "or any later version" applies to it, you have the + option of following the terms and conditions either of that numbered + version or of any later version published by the Free Software + Foundation. If the Program does not specify a version number of the + GNU General Public License, you may choose any version ever published + by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future + versions of the GNU General Public License can be used, that proxy's + public statement of acceptance of a version permanently authorizes you + to choose that version for the Program. + + Later license versions may give you additional or different + permissions. However, no additional obligations are imposed on any + author or copyright holder as a result of your choosing to follow a + later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY + APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT + HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY + OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, + THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM + IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF + ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS + THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY + GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE + USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF + DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD + PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), + EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF + SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided + above cannot be given local legal effect according to their terms, + reviewing courts shall apply local law that most closely approximates + an absolute waiver of all civil liability in connection with the + Program, unless a warranty or assumption of liability accompanies a + copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest + possible use to the public, the best way to achieve this is to make it + free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest + to attach them to the start of each source file to most effectively + state the exclusion of warranty; and each file should have at least + the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short + notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + + The hypothetical commands `show w' and `show c' should show the appropriate + parts of the General Public License. Of course, your program's commands + might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, + if any, to sign a "copyright disclaimer" for the program, if necessary. + For more information on this, and how to apply and follow the GNU GPL, see + . + + The GNU General Public License does not permit incorporating your program + into proprietary programs. If your program is a subroutine library, you + may consider it more useful to permit linking proprietary applications with + the library. If this is what you want to do, use the GNU Lesser General + Public License instead of this License. But first, please read + . +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Scientific/Engineering +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Project-URL: Homepage, https://scipy.org/ +Project-URL: Documentation, https://docs.scipy.org/doc/scipy/ +Project-URL: Source, https://github.com/scipy/scipy +Project-URL: Download, https://github.com/scipy/scipy/releases +Project-URL: Tracker, https://github.com/scipy/scipy/issues +Requires-Python: <3.12,>=3.8 +Requires-Dist: numpy<1.27.0,>=1.19.5 +Requires-Dist: pytest; extra == "test" +Requires-Dist: pytest-cov; extra == "test" +Requires-Dist: pytest-timeout; extra == "test" +Requires-Dist: pytest-xdist; extra == "test" +Requires-Dist: asv; extra == "test" +Requires-Dist: mpmath; extra == "test" +Requires-Dist: gmpy2; extra == "test" +Requires-Dist: threadpoolctl; extra == "test" +Requires-Dist: scikit-umfpack; extra == "test" +Requires-Dist: pooch; extra == "test" +Requires-Dist: sphinx!=4.1.0; extra == "doc" +Requires-Dist: pydata-sphinx-theme==0.9.0; extra == "doc" +Requires-Dist: sphinx-design>=0.2.0; extra == "doc" +Requires-Dist: matplotlib>2; extra == "doc" +Requires-Dist: numpydoc; extra == "doc" +Requires-Dist: mypy; extra == "dev" +Requires-Dist: typing_extensions; extra == "dev" +Requires-Dist: pycodestyle; extra == "dev" +Requires-Dist: flake8; extra == "dev" +Requires-Dist: rich-click; extra == "dev" +Requires-Dist: click; extra == "dev" +Requires-Dist: doit>=0.36.0; extra == "dev" +Requires-Dist: pydevtool; extra == "dev" +Provides-Extra: test +Provides-Extra: doc +Provides-Extra: dev +Description-Content-Type: text/x-rst + +.. image:: https://github.com/scipy/scipy/blob/main/doc/source/_static/logo.svg + :target: https://scipy.org + :width: 110 + :height: 110 + :align: left + +.. image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A + :target: https://numfocus.org + +.. image:: https://img.shields.io/pypi/dm/scipy.svg?label=Pypi%20downloads + :target: https://pypi.org/project/scipy/ + +.. image:: https://img.shields.io/conda/dn/conda-forge/scipy.svg?label=Conda%20downloads + :target: https://anaconda.org/conda-forge/scipy + +.. image:: https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg + :target: https://stackoverflow.com/questions/tagged/scipy + +.. image:: https://img.shields.io/badge/DOI-10.1038%2Fs41592--019--0686--2-blue + :target: https://www.nature.com/articles/s41592-019-0686-2 + +SciPy (pronounced "Sigh Pie") is an open-source software for mathematics, +science, and engineering. It includes modules for statistics, optimization, +integration, linear algebra, Fourier transforms, signal and image processing, +ODE solvers, and more. + +- **Website:** https://scipy.org +- **Documentation:** https://docs.scipy.org/doc/scipy/ +- **Development version of the documentation:** https://scipy.github.io/devdocs +- **Mailing list:** https://mail.python.org/mailman3/lists/scipy-dev.python.org/ +- **Source code:** https://github.com/scipy/scipy +- **Contributing:** https://scipy.github.io/devdocs/dev/index.html +- **Bug reports:** https://github.com/scipy/scipy/issues +- **Code of Conduct:** https://docs.scipy.org/doc/scipy/dev/conduct/code_of_conduct.html +- **Report a security vulnerability:** https://tidelift.com/docs/security +- **Citing in your work:** https://www.scipy.org/citing-scipy/ + +SciPy is built to work with +NumPy arrays, and provides many user-friendly and efficient numerical routines, +such as routines for numerical integration and optimization. Together, they +run on all popular operating systems, are quick to install, and are free of +charge. NumPy and SciPy are easy to use, but powerful enough to be depended +upon by some of the world's leading scientists and engineers. If you need to +manipulate numbers on a computer and display or publish the results, give +SciPy a try! + +For the installation instructions, see `our install +guide `__. + + +Call for Contributions +---------------------- + +We appreciate and welcome contributions. Small improvements or fixes are always appreciated; issues labeled as "good +first issue" may be a good starting point. Have a look at `our contributing +guide `__. + +Writing code isn’t the only way to contribute to SciPy. You can also: + +- review pull requests +- triage issues +- develop tutorials, presentations, and other educational materials +- maintain and improve `our website `__ +- develop graphic design for our brand assets and promotional materials +- help with outreach and onboard new contributors +- write grant proposals and help with other fundraising efforts + +If you’re unsure where to start or how your skills fit in, reach out! You can +ask on the mailing list or here, on GitHub, by leaving a +comment on a relevant issue that is already open. + +If you are new to contributing to open source, `this +guide `__ helps explain why, what, +and how to get involved. diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/RECORD b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/RECORD new file mode 100644 index 00000000..86ae4d87 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/RECORD @@ -0,0 +1,2037 @@ +scipy-1.10.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +scipy-1.10.1.dist-info/LICENSE.txt,sha256=90B8ODojKU86elJLmtrzM1tNG5GKEJNxQOYjZuA5YFc,41429 +scipy-1.10.1.dist-info/METADATA,sha256=O0g8a_iszEmeFEN7kfAIlMqS2kLICR8Aph6B9yimsd0,53928 +scipy-1.10.1.dist-info/RECORD,, +scipy-1.10.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy-1.10.1.dist-info/WHEEL,sha256=Ep2SgF6yV45nIBdxm49hl_xnVh0uKnFXRsk2oguDYE8,93 +scipy/.dylibs/libgcc_s.1.1.dylib,sha256=eMgpcUIhwd2iIQii-PWOGKdIDUZJcAtg58Jc-vx5beM,156896 +scipy/.dylibs/libgfortran.5.dylib,sha256=H3N-g4-YnOnXuaH7Q4xEHh9tfzyoY-uTeEwvDeRIPkA,1846176 +scipy/.dylibs/libopenblas.0.dylib,sha256=Lao87Mh3f70q32BxFVJXIeXOGgkbRzbBDHfiX0o8ugw,20952288 +scipy/.dylibs/libquadmath.0.dylib,sha256=w2NvO_dk1WG46nm6G9H0tB8wFqp25V_PrPvGOUQLxt8,349408 +scipy/__config__.py,sha256=QjarxZy9mtRzzblgt5H4Ep-6mr8oFrYEUqzo9kVTrY4,4409 +scipy/__init__.py,sha256=e1Kwf9PJyelCWzxN7d2m-2fGSq5OCPEiIkhn0Odf3a8,7110 +scipy/__pycache__/__config__.cpython-310.pyc,, +scipy/__pycache__/__init__.cpython-310.pyc,, +scipy/__pycache__/_distributor_init.cpython-310.pyc,, +scipy/__pycache__/conftest.cpython-310.pyc,, +scipy/__pycache__/version.cpython-310.pyc,, +scipy/_distributor_init.py,sha256=2LDC4c2QoxdDkay0RO61CkHdMYLo-TdsihTtkbjt7XA,331 +scipy/_lib/__init__.py,sha256=CXrH_YBpZ-HImHHrqXIhQt_vevp4P5NXClp7hnFMVLM,353 +scipy/_lib/__pycache__/__init__.cpython-310.pyc,, +scipy/_lib/__pycache__/_bunch.cpython-310.pyc,, +scipy/_lib/__pycache__/_ccallback.cpython-310.pyc,, +scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc,, +scipy/_lib/__pycache__/_docscrape.cpython-310.pyc,, +scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc,, +scipy/_lib/__pycache__/_gcutils.cpython-310.pyc,, +scipy/_lib/__pycache__/_pep440.cpython-310.pyc,, +scipy/_lib/__pycache__/_testutils.cpython-310.pyc,, +scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc,, +scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc,, +scipy/_lib/__pycache__/_util.cpython-310.pyc,, +scipy/_lib/__pycache__/decorator.cpython-310.pyc,, +scipy/_lib/__pycache__/deprecation.cpython-310.pyc,, +scipy/_lib/__pycache__/doccer.cpython-310.pyc,, +scipy/_lib/__pycache__/uarray.cpython-310.pyc,, +scipy/_lib/_bunch.py,sha256=r3I77OVyYCaMtQy_kNPEYvpfdKB_8YOeuTjHCVuGYQI,8117 +scipy/_lib/_ccallback.py,sha256=_PGPShGcultXRpGR7HuATDHMR6kJpWcxeQPzL0gOh0k,6213 +scipy/_lib/_ccallback_c.cpython-310-darwin.so,sha256=48CkXHcHYazqA5g2OV52Hl8_BI_i8gY_BbNRtAwsyps,106367 +scipy/_lib/_disjoint_set.py,sha256=NeAK884rqiJRghFrwT5s5bVij9WHv65R6CAN0CIW4F4,5483 +scipy/_lib/_docscrape.py,sha256=qRbjNYTP_efI3r3w1aVBnj0PbglqsepL07EbObgiXOo,21584 +scipy/_lib/_finite_differences.py,sha256=llaIPvCOxpE4VA8O8EycPEU8i6LHJyOD-y7Y9OvQHt0,4172 +scipy/_lib/_fpumode.cpython-310-darwin.so,sha256=O9wYQjvaHpDafKOG4YKIVm-2wOpsOAihOfggfajLgCg,50203 +scipy/_lib/_gcutils.py,sha256=hajQd-HUw9ckK7QeBaqXVRpmnxPgyXO3QqqniEh7tRk,2669 +scipy/_lib/_pep440.py,sha256=Vr7B3QsijR5p6h8YAz2LjNGUyzHUJ5gZ4v26NpZAKDc,14069 +scipy/_lib/_test_ccallback.cpython-310-darwin.so,sha256=E2691ASrwEjo6Z8ogUyKx1kuCs9JoX52sI-nIYMIDFM,53218 +scipy/_lib/_test_deprecation_call.cpython-310-darwin.so,sha256=K-dLwjR81Pd1cnmkcI3un2TPAc8zzTwjJP0OGX5m6YA,55577 +scipy/_lib/_test_deprecation_def.cpython-310-darwin.so,sha256=gUpmabNKudY8hov3_21avYvgWi7OgjRiCyDA9FO08DI,56360 +scipy/_lib/_testutils.py,sha256=SRljev7z4zQOvviYWKL3ZMV9xq-evelO_-kU57IZ8Wc,6841 +scipy/_lib/_threadsafety.py,sha256=xuVqUS2jv46fOOQf7bcrhiYtnPVygqmrIVJc-7_LlI8,1455 +scipy/_lib/_tmpdirs.py,sha256=z3IYpzACnWdN_BMjOvqYbkTvYyUbfbQvfehq7idENSo,2374 +scipy/_lib/_uarray/LICENSE,sha256=yAw5tfzga6SJfhTgsKiLVEWDNNlR6xNhQC_60s-4Y7Q,1514 +scipy/_lib/_uarray/__init__.py,sha256=JLZP3pTSOy4i3Usw4odj4P9dtImMNFrxT4_A9dcgzQU,4493 +scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc,, +scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc,, +scipy/_lib/_uarray/_backend.py,sha256=E1Hzup7YGgJVpb9OVo97zx72a6wgcngxsD12sjylcD4,20498 +scipy/_lib/_uarray/_uarray.cpython-310-darwin.so,sha256=kPml_lt8I8KEOWZ3TVRtOZEokqCtbTbfbkW_gpBEgv8,121706 +scipy/_lib/_util.py,sha256=CFxZxCjAIo2fZzefbaieCRd2aJz4B63JdcpXlSETKZE,24486 +scipy/_lib/decorator.py,sha256=hxTEDkl4BUscis3hwGs8HswgI3ojTml1-UIsLlgdHHY,15059 +scipy/_lib/deprecation.py,sha256=PKadEK4--UfW_-IzAJgwW-olBYTUDsTaHSZlEanpIWg,3176 +scipy/_lib/doccer.py,sha256=shdWIi3u7QBN5CyyKwqWW99qOEsiFewB8eH10FWhYLM,8362 +scipy/_lib/messagestream.cpython-310-darwin.so,sha256=aqLLG76n3MLzcQarXsfYYUK1F3IpiSTd6wiqoldQkl0,79728 +scipy/_lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/_lib/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test__pep440.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_import_cycles.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_scipy_version.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_warnings.cpython-310.pyc,, +scipy/_lib/tests/test__gcutils.py,sha256=OdEmx9K4QVSokt0sV8VF4Uxp4bnSyJyjdFLi2F7nYcg,3416 +scipy/_lib/tests/test__pep440.py,sha256=u9hPoolK4AoIIS-Rq74Du5SJu5og2RxMwgaAvGgWvRo,2277 +scipy/_lib/tests/test__testutils.py,sha256=P4WDJpUgy19wD9tknQSjIivuQvZF7YUBGSBWlur2QRA,800 +scipy/_lib/tests/test__threadsafety.py,sha256=qSfCF5OG_5lbnSl-grmDN_QCU4QLe-fS3sqnwL04pf8,1322 +scipy/_lib/tests/test__util.py,sha256=G5lSPfcPxs7erNfBZjIn9fsUEgHrEnfAEv90zqgBrmU,13325 +scipy/_lib/tests/test_bunch.py,sha256=cQoJPEalhaGSOiwvGwMtNyv4IAVxnjJJxDiBluiS-SY,6169 +scipy/_lib/tests/test_ccallback.py,sha256=mvo9OeGktIqO-vfLLU1FPAfFwxPzX0wcYh_Lnwby7ik,5995 +scipy/_lib/tests/test_deprecation.py,sha256=a_3r_9pFx1sxJXeFgiTSV9DXYnktc4fio1hR0ITPywA,364 +scipy/_lib/tests/test_import_cycles.py,sha256=3MoMy-2qdOw1UOcjQMStTIbyoM6fdSdp6BF_Ie0lzec,1306 +scipy/_lib/tests/test_public_api.py,sha256=tRZud6CKVSFu20f_c79YIXOEAlzGTUcejmLL7E0Aa1g,9942 +scipy/_lib/tests/test_scipy_version.py,sha256=jgo-2YhCkBksXHM6xKiN_iJJZkqz0CvXqn2jVxx1djA,606 +scipy/_lib/tests/test_tmpdirs.py,sha256=jusM--qpUMscMAdbgNGkmCU23UGhytuqZM1gX76oWcE,1242 +scipy/_lib/tests/test_warnings.py,sha256=FIn3ndQMNIiR-o_CjvX5D2ZnAYFKWi8aqunLgkIappU,4295 +scipy/_lib/uarray.py,sha256=wmH9RAWa-jXxiokMHx-nv0dazCR0UoPlitauJCWspQs,773 +scipy/cluster/__init__.py,sha256=Sco_jwP4eqTtUfElVvmub0n5Ue75puxGtCXYIbt7ZKI,871 +scipy/cluster/__pycache__/__init__.cpython-310.pyc,, +scipy/cluster/__pycache__/hierarchy.cpython-310.pyc,, +scipy/cluster/__pycache__/vq.cpython-310.pyc,, +scipy/cluster/_hierarchy.cpython-310-darwin.so,sha256=zmL_sZd-R3bgEmnHy90h2PtWh16w6JMZJy2gQe4pFU8,340013 +scipy/cluster/_optimal_leaf_ordering.cpython-310-darwin.so,sha256=-JIo2jp88ehM_Hp22IL_blEsLMqknzAHm4jI-G8mgfE,252425 +scipy/cluster/_vq.cpython-310-darwin.so,sha256=u1oAw36G-ECJ96qLHk81Qpw3vaauT_smdddtvick2OI,121718 +scipy/cluster/hierarchy.py,sha256=8xdDObqJN983cw1rOfgF3S-5oGxC60WXHi7e4bAjnSU,148455 +scipy/cluster/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc,, +scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc,, +scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc,, +scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc,, +scipy/cluster/tests/hierarchy_test_data.py,sha256=7syUYdIaDVr7hgvMliX0CW4386utjBJn1DOgX0USXls,6850 +scipy/cluster/tests/test_disjoint_set.py,sha256=UXzuhddiuQIu9kgftWBvqOg6MCSej3NHJCdEhvcl0cY,5469 +scipy/cluster/tests/test_hierarchy.py,sha256=OScakHr3N2uDXNZ12g8WAzAPD6NtSr83aRLoodqUfSo,43629 +scipy/cluster/tests/test_vq.py,sha256=NZQd3jloOCPjMnhZ5x5nuMIZ38eX_Nkwgx0ZMjOEd34,13434 +scipy/cluster/vq.py,sha256=M6Sf9qSV4-2y6m-Dgi0FHDRNy3GcFlTfNZh2hdp8avA,29222 +scipy/conftest.py,sha256=DsaVf3409ME_kaiWcxChA2bS5f1CCzbxgPFznQBuyJM,3478 +scipy/constants/__init__.py,sha256=sRLD0haSgEtXA4TphGkVv2p2qQxTyeyVlmpzZSq0Ygg,12297 +scipy/constants/__pycache__/__init__.cpython-310.pyc,, +scipy/constants/__pycache__/_codata.cpython-310.pyc,, +scipy/constants/__pycache__/_constants.cpython-310.pyc,, +scipy/constants/__pycache__/codata.cpython-310.pyc,, +scipy/constants/__pycache__/constants.cpython-310.pyc,, +scipy/constants/_codata.py,sha256=7zEUms7rnmSfBppsh38PNx6p2c-k0SVCcVpea3QZK5k,155898 +scipy/constants/_constants.py,sha256=V9mm4Dp35Vun9RjG_DGoVywCvE2N2mWk8XfeamBVgjM,10275 +scipy/constants/codata.py,sha256=F87N9rObCx8B3y_wcoPEzFWGhZmdXJ6B0Nll7IUEfv8,1015 +scipy/constants/constants.py,sha256=k8IODtGkknZ44clDFEihVparvjJFwEDG454V2of4BpQ,2477 +scipy/constants/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/constants/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc,, +scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc,, +scipy/constants/tests/test_codata.py,sha256=ToO_lhQOsusJlP3QjrYqa1vw7x6wTCuKH17fg87tH08,1959 +scipy/constants/tests/test_constants.py,sha256=PY1oy6bbM2zoPAPgUeBqVThnVRuu4lBt_uMmxm7Ct38,1632 +scipy/datasets/__init__.py,sha256=lO6WMYM5CbayWGLjzgcJdZoxQHUYijYbfzyHxo9Bbt0,2816 +scipy/datasets/__pycache__/__init__.cpython-310.pyc,, +scipy/datasets/__pycache__/_download_all.cpython-310.pyc,, +scipy/datasets/__pycache__/_fetchers.cpython-310.pyc,, +scipy/datasets/__pycache__/_registry.cpython-310.pyc,, +scipy/datasets/__pycache__/_utils.cpython-310.pyc,, +scipy/datasets/_download_all.py,sha256=iRPR2IUk6C3B5u2q77yOhac449MRSoRaTlCy2oCIknE,1701 +scipy/datasets/_fetchers.py,sha256=Ef8RxSZkB0KIjmF-wFoW_QX8wbXHAgOzSAp1zFgE2QU,6759 +scipy/datasets/_registry.py,sha256=br0KfyalEbh5yrQLznQ_QvBtmN4rMsm0UxOjnsJp4OQ,1072 +scipy/datasets/_utils.py,sha256=0uGnuXK3KyLzUV4cGL76mGqQzrtEpSP8NAQxWuaw4cU,2914 +scipy/datasets/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc,, +scipy/datasets/tests/test_data.py,sha256=GelFTF2yZqiiQkgTv8ukv8sKTJBdmpsyK5fr0G6z7Ls,4064 +scipy/fft/__init__.py,sha256=aihIkaW0Nr76Ct84OInhv-8AjbV8Z9ah44KiDEYEFSM,3567 +scipy/fft/__pycache__/__init__.cpython-310.pyc,, +scipy/fft/__pycache__/_backend.cpython-310.pyc,, +scipy/fft/__pycache__/_basic.cpython-310.pyc,, +scipy/fft/__pycache__/_debug_backends.cpython-310.pyc,, +scipy/fft/__pycache__/_fftlog.cpython-310.pyc,, +scipy/fft/__pycache__/_fftlog_multimethods.cpython-310.pyc,, +scipy/fft/__pycache__/_helper.cpython-310.pyc,, +scipy/fft/__pycache__/_realtransforms.cpython-310.pyc,, +scipy/fft/_backend.py,sha256=7a3Gx0WLclcFFSeQA1RdARuC8QJ4oLpI3V9pqJvGQv4,6396 +scipy/fft/_basic.py,sha256=KXnf-LBgrD0XToSowdJ64Uvg5f0jyoinJV3-UwuklqA,62991 +scipy/fft/_debug_backends.py,sha256=RlvyunZNqaDDsI3-I6QH6GSBz_faT6EN4OONWsvMtR8,598 +scipy/fft/_fftlog.py,sha256=WVibBtg6iin0R-hBahHS-VnUxTlNZj3KTyL510D2MmE,11879 +scipy/fft/_fftlog_multimethods.py,sha256=wFwqCnjY_DH6_XVm6cQ4pIlu0LpCp76c5GeXWvOvTH0,575 +scipy/fft/_helper.py,sha256=L1WGWDlBmD0gTBpfyGWag7StqYI0KcQ5LMuNbVVDV3c,3416 +scipy/fft/_pocketfft/LICENSE.md,sha256=wlSytf0wrjyJ02ugYXMFY7l2D8oE8bdGobLDFX2ix4k,1498 +scipy/fft/_pocketfft/__init__.py,sha256=dROVDi9kRvkbSdynd3L09tp9_exzQ4QqG3xnNx78JeU,207 +scipy/fft/_pocketfft/__pycache__/__init__.cpython-310.pyc,, +scipy/fft/_pocketfft/__pycache__/basic.cpython-310.pyc,, +scipy/fft/_pocketfft/__pycache__/helper.cpython-310.pyc,, +scipy/fft/_pocketfft/__pycache__/realtransforms.cpython-310.pyc,, +scipy/fft/_pocketfft/basic.py,sha256=Wn-qvc2r1lfrU1df-rBVjhq1m0kGssOGCQWrGB2etG0,9845 +scipy/fft/_pocketfft/helper.py,sha256=CWvGbhM_ZtVz1U1y1U0-kY16Mya4lesliJFfik5jCC8,5725 +scipy/fft/_pocketfft/pypocketfft.cpython-310-darwin.so,sha256=Uh-CUyqObhE3X9nVVNOSLa7SB8Ta7Zz4M5-alfUX_9w,788350 +scipy/fft/_pocketfft/realtransforms.py,sha256=zv9ABJnBOIthx-5-eXjve3SsR3i2TgMSx_IEaiBjNxQ,3379 +scipy/fft/_pocketfft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/fft/_pocketfft/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/fft/_pocketfft/tests/__pycache__/test_basic.cpython-310.pyc,, +scipy/fft/_pocketfft/tests/__pycache__/test_real_transforms.cpython-310.pyc,, +scipy/fft/_pocketfft/tests/test_basic.py,sha256=zwwMq-1QTwIhQXC8AqKc1bO7lNaVddGc58Dd7RPalwo,35706 +scipy/fft/_pocketfft/tests/test_real_transforms.py,sha256=a4uH-yIEXgOzNeI7NSTnB6gCSxywBxJQ0M3ojQ1xl7c,16426 +scipy/fft/_realtransforms.py,sha256=y4PJZkRhuwnJTy4-J2U7SP-Soj69dtnYXSK073Ur06Y,25280 +scipy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/fft/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/fft/tests/__pycache__/mock_backend.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_backend.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_fft_function.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_fftlog.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_helper.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_multithreading.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_numpy.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_real_transforms.cpython-310.pyc,, +scipy/fft/tests/mock_backend.py,sha256=00ZsBjrauFbGgMKB9-vh-CBvJPLsRFPVDp015PbiWjk,1769 +scipy/fft/tests/test_backend.py,sha256=29ZzhDK9ySCXfqgazIgBfMtp1fUpQXl0xTS0IE-ccoc,4256 +scipy/fft/tests/test_fft_function.py,sha256=ZVK0wunPrwE-LkgQOxp3B4sgqcD6aLmyWcpytKvDBWE,1048 +scipy/fft/tests/test_fftlog.py,sha256=gcPRfbarV_rijIIbcU_oQuY2Y1J7s6CIShZqK8rxvQk,5819 +scipy/fft/tests/test_helper.py,sha256=STdMQCUMckqAcIIW6T1Wv2th3M8nfBvTKkPWbEfWQCE,9807 +scipy/fft/tests/test_multithreading.py,sha256=Ub0qD3_iSApPT9E71i0dvKnsKrctLiwMq95y3370POE,2132 +scipy/fft/tests/test_numpy.py,sha256=kI1Y5jjZdLXHhklOFHDtDN2FGq4xKmbl5e-lceK5Zhw,14432 +scipy/fft/tests/test_real_transforms.py,sha256=0zZDdJ0xVI1d7MwZEKg6iaoM0vc0Zm4kG_BpFlGqLbI,7592 +scipy/fftpack/__init__.py,sha256=QuxHBvLU1MJt2nMuZ7n9AuXoHVxhseHaOmxfb2G5JFU,3200 +scipy/fftpack/__pycache__/__init__.cpython-310.pyc,, +scipy/fftpack/__pycache__/_basic.cpython-310.pyc,, +scipy/fftpack/__pycache__/_helper.cpython-310.pyc,, +scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc,, +scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc,, +scipy/fftpack/__pycache__/basic.cpython-310.pyc,, +scipy/fftpack/__pycache__/helper.cpython-310.pyc,, +scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc,, +scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc,, +scipy/fftpack/_basic.py,sha256=Sk_gfswmWKb3za6wrU_mIrRVBl69qjzAu9ltznbDCKs,13098 +scipy/fftpack/_helper.py,sha256=6oIZ6ErA0Bt61s460_WjQfwmpENR0NnjNmPlO3ImhXo,3354 +scipy/fftpack/_pseudo_diffs.py,sha256=eCln0ZImNYr-wUWpOZ-SmKKIbhJsV8VBLmwT_C79RsQ,14200 +scipy/fftpack/_realtransforms.py,sha256=ledb21L13ofGnOU4pkx8uWuARCxsh3IFQrHctxTgzzw,19214 +scipy/fftpack/basic.py,sha256=DMX__JJaJK_FEPw5LhxVaiwqM8ive616PGZ1uzXBLNM,790 +scipy/fftpack/convolve.cpython-310-darwin.so,sha256=EWQFwh3d4aEix7IvqzqSyaWFUaafTqfR9DVpvYNyzww,208555 +scipy/fftpack/helper.py,sha256=RWzRMKNW8K5M2jHGRwWB7CtvYVEoWdP63LISGcGgMaI,795 +scipy/fftpack/pseudo_diffs.py,sha256=gWafKeFKkbnvaxQAtgj7Vzj_q60xwLR3ghZn3ttO3wU,901 +scipy/fftpack/realtransforms.py,sha256=79A6XfPab3kR0KN4XfkDrTzTZH41LQmW4AcMYYTnpyY,826 +scipy/fftpack/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc,, +scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc,, +scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc,, +scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc,, +scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc,, +scipy/fftpack/tests/fftw_double_ref.npz,sha256=pgxklBW2RSI5JNg0LMxcCXgByGkBKHo2nlP8kln17E4,162120 +scipy/fftpack/tests/fftw_longdouble_ref.npz,sha256=pAbL1NrQTQxZ3Tj1RBb7SUJMgiKcGgdLakTsDN4gAOM,296072 +scipy/fftpack/tests/fftw_single_ref.npz,sha256=J2qRQTGOb8NuSrb_VKYbZAVO-ISbZg8XNZ5fVBtDxSY,95144 +scipy/fftpack/tests/test.npz,sha256=Nt6ASiLY_eoFRZDOSd3zyFmDi32JGTxWs7y2YMv0N5c,11968 +scipy/fftpack/tests/test_basic.py,sha256=3dFa77VxVp-DeN6y1ofmfPfqsG6Qdx3INu4PHu-lBQ4,30373 +scipy/fftpack/tests/test_helper.py,sha256=8JaPSJOwsk5XXOf1zFahJ_ktUTfNGSk2-k3R6e420XI,1675 +scipy/fftpack/tests/test_import.py,sha256=X_rM3ncYPlL78G_xvHyYU48IBhy7YuG6mU4-uoo3978,1129 +scipy/fftpack/tests/test_pseudo_diffs.py,sha256=SEVPHPDdSxDSUCC8qkwuKD7mIX8rFIx9puxGzBYd1uk,13389 +scipy/fftpack/tests/test_real_transforms.py,sha256=YoN1b8ZhPbJTK0ww6U8ZxWXL52a1-HqiY45fN9LvQgI,23941 +scipy/integrate/__init__.py,sha256=83lQ1LAVQqQGGcVn-u5BecQN99D3pyRwpsg9BvGT_W4,4003 +scipy/integrate/__pycache__/__init__.cpython-310.pyc,, +scipy/integrate/__pycache__/_bvp.cpython-310.pyc,, +scipy/integrate/__pycache__/_ode.cpython-310.pyc,, +scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc,, +scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc,, +scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc,, +scipy/integrate/__pycache__/_quadrature.cpython-310.pyc,, +scipy/integrate/__pycache__/dop.cpython-310.pyc,, +scipy/integrate/__pycache__/lsoda.cpython-310.pyc,, +scipy/integrate/__pycache__/odepack.cpython-310.pyc,, +scipy/integrate/__pycache__/quadpack.cpython-310.pyc,, +scipy/integrate/__pycache__/vode.cpython-310.pyc,, +scipy/integrate/_bvp.py,sha256=fOUu518B3bcRpitEQWNM2DfvMvszELAbJt2n8wQD5HE,41083 +scipy/integrate/_dop.cpython-310-darwin.so,sha256=nfDz8wpiHtTEaucHgBR93AzjTNR1ZCg8P6rEGoQYlKM,126000 +scipy/integrate/_ivp/__init__.py,sha256=gKFR_pPjr8fRLgAGY5sOzYKGUFu2nGX8x1RrXT-GZZc,256 +scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc,, +scipy/integrate/_ivp/base.py,sha256=lLM1Oj1nuUUY3m_YDZkecSjAY9ovcFCG9N14lkLzkys,9550 +scipy/integrate/_ivp/bdf.py,sha256=niJPOk99OdWyeqWnuzhW4dV_eo5wcBXrzXhY7aIK_SQ,17161 +scipy/integrate/_ivp/common.py,sha256=xFX5sqbjrNEvv7bjcOVknh48K1TeJ8aOqjvkZPoiXmo,14780 +scipy/integrate/_ivp/dop853_coefficients.py,sha256=OrYvW0Hu6X7sOh37FU58gNkgC77KVpYclewv_ARGMAE,7237 +scipy/integrate/_ivp/ivp.py,sha256=_CvCn29i1deNn-LNYftbJpZ-YMXNV9a92luZYlFvGVM,28282 +scipy/integrate/_ivp/lsoda.py,sha256=dIVlRXlOcNSvYCXKbT9YYS47cTe3i3QPHww38Y3bTYk,8298 +scipy/integrate/_ivp/radau.py,sha256=flnYZBLAW0-ZRdZTdhz0oZwj6wpqlU8L5Qwd8GEVPew,19383 +scipy/integrate/_ivp/rk.py,sha256=SlUEo9QM5WN205QAT1CAGUJlyhSFWxBq3aPaQX2wcCs,22244 +scipy/integrate/_ivp/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc,, +scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc,, +scipy/integrate/_ivp/tests/test_ivp.py,sha256=amcGZPZT8rWRTU8zvhbtfyX1BqjWcT4Bo-u1h__Suk4,34762 +scipy/integrate/_ivp/tests/test_rk.py,sha256=K9UxZghBzSL2BzmgLndPJcWOWV4Nr530TGKWakpsoeM,1326 +scipy/integrate/_lsoda.cpython-310-darwin.so,sha256=1cdq1L_l6rpmblDfSFEYYrWvdBJEJHK70akSnT2taJ0,127120 +scipy/integrate/_ode.py,sha256=bMLLW6Z2T_-kKpzDyZeoQvwcjhbyDStwvLNWn4kYIHY,47945 +scipy/integrate/_odepack.cpython-310-darwin.so,sha256=BB9F9idSg_MZw8DwJUYlnpqo6XE3FRBreqe1rFbi-xU,105808 +scipy/integrate/_odepack_py.py,sha256=aTlpXCI0qvXG0Z8ibHqiWyHTo2Q16RN8MyolfiMIVB8,10769 +scipy/integrate/_quad_vec.py,sha256=1Gu10Jyj6BKODMVlq5fQGKuJl-pJgdKGOTlAzo4nhnQ,21194 +scipy/integrate/_quadpack.cpython-310-darwin.so,sha256=GVmzTMOxVcpw4rQL824E1105M6s7Dp6YSQehQVQNN6Y,123632 +scipy/integrate/_quadpack_py.py,sha256=UB42Y4nPnIObv_nwznM3VT3Cl12S3UkMU0XjZfEX4Gg,52346 +scipy/integrate/_quadrature.py,sha256=lE3Lvo6sglaI41DzhQaT3K8dpGmdYm7rhTXRQKBK0BU,45913 +scipy/integrate/_test_multivariate.cpython-310-darwin.so,sha256=kc01VVc7Y9Uq8XkZYKmaP4-b8JRm59tsBSzqPG0iGiw,50693 +scipy/integrate/_test_odeint_banded.cpython-310-darwin.so,sha256=CxcILmKWbeQk0ARC0NxSeoUvjD_PiUQbHBJdkqgdNIg,127088 +scipy/integrate/_vode.cpython-310-darwin.so,sha256=jQ7EmKKxFjRhhIwRQltdufo3-lEfWk64VFT8kpCpQGQ,161680 +scipy/integrate/dop.py,sha256=yx0rG-U_s77y6_cRKkuIo27IFepKhow6VnXQmYHq6vk,622 +scipy/integrate/lsoda.py,sha256=I4nTMQz101vjwrDVjO1eR7mZjwP7CJW1P5aA_Qo3394,610 +scipy/integrate/odepack.py,sha256=bGHp-nnd-dVQHYxy_PogCiY8CODz6pok9adiUtgq7zI,771 +scipy/integrate/quadpack.py,sha256=fy0Vz51sZkG5Cjdp_EXGEDfHlFGjLdOqz8EtnXdMwSY,845 +scipy/integrate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc,, +scipy/integrate/tests/test__quad_vec.py,sha256=HzFkvaykp7RSen7XMBP3NKL9qMfl9FkSuGjG4T-va6M,6219 +scipy/integrate/tests/test_banded_ode_solvers.py,sha256=kJWirYckJ7k4tfweg1ds-Tozp3GEhxTbuXfgSdeJw7k,6687 +scipy/integrate/tests/test_bvp.py,sha256=xWrQWFzmrpt4sq9XOFrquxh4yp57ydJMJoQ7ST5qgSU,20159 +scipy/integrate/tests/test_integrate.py,sha256=y2kHglNbcpvE6oRdGCi9UjLMV-uz5OqzEGztuO3bMVY,24335 +scipy/integrate/tests/test_odeint_jac.py,sha256=VW63bDRP3uOg6uzm-3787qJl-UQ5Wsht3Ttc6YRybnE,1820 +scipy/integrate/tests/test_quadpack.py,sha256=8MRf70VLUdg7m5Y9G45m7Aq614ondwOjs4ZNvtFWzAA,27946 +scipy/integrate/tests/test_quadrature.py,sha256=NBf9Ok9wim8HOSF487sGkBNvUHsNFnAsT6gzCYe884Y,15317 +scipy/integrate/vode.py,sha256=xv-9AX3Yh1T0w-YoIPRrpQwavFTnoak81AsWiH_HsGA,625 +scipy/interpolate/__init__.py,sha256=zdsw-3YouJ38EFdizaPWGjsaq_G0nWRd2i5vVvHqNZs,3483 +scipy/interpolate/__pycache__/__init__.cpython-310.pyc,, +scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc,, +scipy/interpolate/__pycache__/_cubic.cpython-310.pyc,, +scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc,, +scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc,, +scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc,, +scipy/interpolate/__pycache__/_interpnd_info.cpython-310.pyc,, +scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc,, +scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc,, +scipy/interpolate/__pycache__/_pade.cpython-310.pyc,, +scipy/interpolate/__pycache__/_polyint.cpython-310.pyc,, +scipy/interpolate/__pycache__/_rbf.cpython-310.pyc,, +scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc,, +scipy/interpolate/__pycache__/_rgi.cpython-310.pyc,, +scipy/interpolate/__pycache__/fitpack.cpython-310.pyc,, +scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc,, +scipy/interpolate/__pycache__/interpolate.cpython-310.pyc,, +scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc,, +scipy/interpolate/__pycache__/polyint.cpython-310.pyc,, +scipy/interpolate/__pycache__/rbf.cpython-310.pyc,, +scipy/interpolate/_bspl.cpython-310-darwin.so,sha256=hUckd5kz90BSl2vEONBiPuz48os5KuaBBCpuulc_egE,290888 +scipy/interpolate/_bsplines.py,sha256=5Bbuvj9ICltuswGfT-SnfIJdItJk0HSZKilFYbU09qo,69165 +scipy/interpolate/_cubic.py,sha256=VK-OgwTY_qnhP2tXkj3CrusiiZrhbTGME5VJqdQOIRA,33771 +scipy/interpolate/_fitpack.cpython-310-darwin.so,sha256=ENTXD-AhaphLEMgxkA3Bo3M_g1V6JpaT82sz5l3P--Y,138576 +scipy/interpolate/_fitpack2.py,sha256=ke_ZIEQHfCxk8TVO-btWA8XDH1AYfzvCQGSM80xPnF0,81559 +scipy/interpolate/_fitpack_impl.py,sha256=qupBfm0JYWvQ5-A3d68tUkeRzOD4McOFjvVt2Pd8BVI,46808 +scipy/interpolate/_fitpack_py.py,sha256=qAPP7AIvJMdo3mqbL_BoJIpp5cbj6WJihqSixvPrX_Y,27540 +scipy/interpolate/_interpnd_info.py,sha256=B0E0S3ozMrYkGSJ_XTX_Qj6U9vle0U59i8dlqpTCd4g,869 +scipy/interpolate/_interpolate.py,sha256=clz2h9p7TU3FtkMgyK1mAX7bvqzz4Fe2o6GaVExDow4,87752 +scipy/interpolate/_ndgriddata.py,sha256=v8yn1kC3OkZf7hWij6lclUhAjSJ6UjS4T02Zrpxt88I,9087 +scipy/interpolate/_pade.py,sha256=OBorKWc3vCSGlsWrajoF1_7WeNd9QtdbX0wOHLdRI2A,1827 +scipy/interpolate/_polyint.py,sha256=cd1FXF4VGMe-vabBZ_PigaFrId8yRTrdQYf5vKuj2zY,25992 +scipy/interpolate/_ppoly.cpython-310-darwin.so,sha256=M5sGSDwlNNpr09AJGI2qihqHApWoPVLmUPMwrWEQiks,331241 +scipy/interpolate/_rbf.py,sha256=Ck3PXK7IK6Ac7kx5qY-DP64gLFKxVxjisW8y4PClH_I,11663 +scipy/interpolate/_rbfinterp.py,sha256=xcSiwrp4i46ZiRejBcfJYwFM45ZZRVnELYuRJwE-imk,19380 +scipy/interpolate/_rbfinterp_pythran.cpython-310-darwin.so,sha256=WqvZeJWnaWjqB3p_CG6Aa5pf2VzCadNgxl3-5WnyHPw,272789 +scipy/interpolate/_rgi.py,sha256=NaypNbXpDoD6EwioYCqzCYkTvaXWR0_nr-1wlNL9gGI,26925 +scipy/interpolate/_rgi_cython.cpython-310-darwin.so,sha256=vG0sXeIiOkjUFk1xTVz56lBLD9u342hgavmxgx_YT-s,228894 +scipy/interpolate/dfitpack.cpython-310-darwin.so,sha256=DwkYNNXdXH-KB0EompfFxjzA3sHoeTQdFTCLoLfNWiM,279264 +scipy/interpolate/fitpack.py,sha256=w__c8vjFPORQpgpmWIi6MN_PpsJGBeDPYxoxpkUOdRQ,948 +scipy/interpolate/fitpack2.py,sha256=ivHIjzk8VqI33aDkUo58-pl5eOmDrhInm8CMhgd0lJs,1195 +scipy/interpolate/interpnd.cpython-310-darwin.so,sha256=hucI-BTy1c3J2_R1pzxCLYXX6xZ4bTzmpK9vbQ_rJwc,340731 +scipy/interpolate/interpolate.py,sha256=iz2Yifiy51N7r3tgsAdiSt1swa7C4kQOkbZWPBp_9GM,1180 +scipy/interpolate/ndgriddata.py,sha256=AXW0AnerFGis7MyHWVvYBrnde7g5rBg3FeYV_NY-Xb0,912 +scipy/interpolate/polyint.py,sha256=24_OrluWJYXC0hIuLf6O7h3B0Te364bTPhqKXsV5N3M,941 +scipy/interpolate/rbf.py,sha256=X_dHEfyyCI_XSRmK1d1vnkkWwPlbx7kSHhORv9WByPk,818 +scipy/interpolate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_fitpack.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_fitpack2.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_interpnd.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_interpolate.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_ndgriddata.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_pade.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_rbf.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_rbfinterp.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_rgi.cpython-310.pyc,, +scipy/interpolate/tests/data/bug-1310.npz,sha256=jWgDwLOY8nBMI28dG56OXt4GvRZaCrsPIoKBq71FWuk,2648 +scipy/interpolate/tests/data/estimate_gradients_hang.npy,sha256=QGwQhXQX_16pjYzSiUXJ0OT1wk-SpIrQ6Pq5Vb8kd_E,35680 +scipy/interpolate/tests/data/gcvspl.npz,sha256=A86BVabLoMG_CiRBoQwigZH5Ft7DbLggcjQpgRKWu6g,3138 +scipy/interpolate/tests/test_bsplines.py,sha256=E9OaU0hxyAx73hGrBxX2Z33csh13dCCrFNgV8xnBq8E,59651 +scipy/interpolate/tests/test_fitpack.py,sha256=HV5vP86olfXEgl0uDAKsk-2vB_cTAcVnNMgt6M360Nw,14533 +scipy/interpolate/tests/test_fitpack2.py,sha256=JrAx37unJ8nhICmovLeSX7fGonVKB390WVkjcTBS44E,58477 +scipy/interpolate/tests/test_gil.py,sha256=wt92CaxUlVgRGB-Wl2EuQxveqdARU8rZucD9IKl-pUE,1874 +scipy/interpolate/tests/test_interpnd.py,sha256=RVc-0onUWODNoc_S3_21pAGDF5U0COyEXZqfLhGwPEw,13627 +scipy/interpolate/tests/test_interpolate.py,sha256=YsknISeWB2pd648RFCiNgwMw3JSH95Yy-zOS3H4ERnE,95734 +scipy/interpolate/tests/test_ndgriddata.py,sha256=lBMoAFWMMZcKLkZlvT2g4laNH3KsWIGpB3TQOaOvMjE,9445 +scipy/interpolate/tests/test_pade.py,sha256=x5VyACjEgqIsz5e5vIOoCaIVb-ToZsFw6baxLQjRFZQ,3786 +scipy/interpolate/tests/test_polyint.py,sha256=8HuqeZIBzmHxmcNUwMJdFYZn73GsLHsLnOKDDTYqSzU,30292 +scipy/interpolate/tests/test_rbf.py,sha256=d0RoNqSChlizopvy5x6vQxhiGkc-LekNbnCYFIN_Z1g,6547 +scipy/interpolate/tests/test_rbfinterp.py,sha256=5gnAZ2c8OFiT5AV_B160XzXNmVwNoMhoGX_iDt0OQLQ,18127 +scipy/interpolate/tests/test_rgi.py,sha256=wxVGOX8sVtpg2CS1wRuV7DL333U4Mj0VV6ncVwGBCkY,41234 +scipy/io/__init__.py,sha256=71Eyk9AluJQpbnqeJq1YWU_43nTUU4AW2lT6p_JPTw4,2746 +scipy/io/__pycache__/__init__.cpython-310.pyc,, +scipy/io/__pycache__/_fortran.cpython-310.pyc,, +scipy/io/__pycache__/_idl.cpython-310.pyc,, +scipy/io/__pycache__/_mmio.cpython-310.pyc,, +scipy/io/__pycache__/_netcdf.cpython-310.pyc,, +scipy/io/__pycache__/harwell_boeing.cpython-310.pyc,, +scipy/io/__pycache__/idl.cpython-310.pyc,, +scipy/io/__pycache__/mmio.cpython-310.pyc,, +scipy/io/__pycache__/netcdf.cpython-310.pyc,, +scipy/io/__pycache__/wavfile.cpython-310.pyc,, +scipy/io/_fortran.py,sha256=3Pa-LQ2iDECm1ADv_QtjhmMZwgR_WzUEmZdd_sM9lao,10903 +scipy/io/_harwell_boeing/__init__.py,sha256=2iVxlj6ZquU8_XPA37npOdeHCXe8XbQrmMZO7k6Bzxs,574 +scipy/io/_harwell_boeing/__pycache__/__init__.cpython-310.pyc,, +scipy/io/_harwell_boeing/__pycache__/_fortran_format_parser.cpython-310.pyc,, +scipy/io/_harwell_boeing/__pycache__/hb.cpython-310.pyc,, +scipy/io/_harwell_boeing/_fortran_format_parser.py,sha256=8F5psqkhiR1M4JzOWOGH1PoQAsZUjbctTlibFpXMAFA,8922 +scipy/io/_harwell_boeing/hb.py,sha256=Y1NLBc2yoADFy28_Vx2SzI1fyhoUF2sq84RFbwB6jUw,19167 +scipy/io/_harwell_boeing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/_harwell_boeing/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/io/_harwell_boeing/tests/__pycache__/test_fortran_format.cpython-310.pyc,, +scipy/io/_harwell_boeing/tests/__pycache__/test_hb.cpython-310.pyc,, +scipy/io/_harwell_boeing/tests/test_fortran_format.py,sha256=0LxOjUewBj1Fwf7EOxMWZG_PdzMbVrFYMUeGgs23VII,2360 +scipy/io/_harwell_boeing/tests/test_hb.py,sha256=3eLwxTSg_Ebt2pjBLvZhpq8WUMjkFhM1lsTu_mgvDTI,2284 +scipy/io/_idl.py,sha256=cTTwTYp-ukhGKr9vQZaohOAfNSEmvl2bKvfsxDCKUzM,26930 +scipy/io/_mmio.py,sha256=yFRIpdGC8lyRf2ZMvGPUX_scTpiUZDyok8dPr8CX3Qw,33165 +scipy/io/_netcdf.py,sha256=4j56RRusPvC3TAx4gKj927ab3LqWmCLcRk0aAWX3LxM,39085 +scipy/io/_test_fortran.cpython-310-darwin.so,sha256=8f_SU_Cs25WOT2TomV2Yre4TDoE5jwD0JKKteaw0xls,91856 +scipy/io/arff/__init__.py,sha256=czaV8hvY6JnmEn2qyU3_fzcy_P55aXVT09OzGnhJT9I,805 +scipy/io/arff/__pycache__/__init__.cpython-310.pyc,, +scipy/io/arff/__pycache__/_arffread.cpython-310.pyc,, +scipy/io/arff/__pycache__/arffread.cpython-310.pyc,, +scipy/io/arff/_arffread.py,sha256=eLnxbFfejH5pmbWQCo8aXExNL26F4jYzHk3X2_E2ynU,26373 +scipy/io/arff/arffread.py,sha256=2_W-Wt0drknNg734xtup-U1AeuqGMYKQUzCE3I3CW0k,1364 +scipy/io/arff/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/arff/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/io/arff/tests/__pycache__/test_arffread.cpython-310.pyc,, +scipy/io/arff/tests/data/iris.arff,sha256=fTS6VWSX6dwoM16mYoo30dvLoJChriDcLenHAy0ZkVM,7486 +scipy/io/arff/tests/data/missing.arff,sha256=ga__Te95i1Yf-yu2kmYDBVTz0xpSTemz7jS74_OfI4I,120 +scipy/io/arff/tests/data/nodata.arff,sha256=DBXdnIe28vrbf4C-ar7ZgeFIa0kGD4pDBJ4YP-z4QHQ,229 +scipy/io/arff/tests/data/quoted_nominal.arff,sha256=01mPSc-_OpcjXFy3EoIzKdHCmzWSag4oK1Ek2tUc6_U,286 +scipy/io/arff/tests/data/quoted_nominal_spaces.arff,sha256=bcMOl-E0I5uTT27E7bDTbW2mYOp9jS8Yrj0NfFjQdKU,292 +scipy/io/arff/tests/data/test1.arff,sha256=nUFDXUbV3sIkur55rL4qvvBdqUTbzSRrTiIPwmtmG8I,191 +scipy/io/arff/tests/data/test10.arff,sha256=va7cXiWX_AnHf-_yz25ychD8hOgf7-sEMJITGwQla30,199009 +scipy/io/arff/tests/data/test11.arff,sha256=G-cbOUUxuc3859vVkRDNjcLRSnUu8-T-Y8n0dSpvweo,241 +scipy/io/arff/tests/data/test2.arff,sha256=COGWCYV9peOGLqlYWhqG4ANT2UqlAtoVehbJLW6fxHw,300 +scipy/io/arff/tests/data/test3.arff,sha256=jUTWGaZbzoeGBneCmKu6V6RwsRPp9_0sJaSCdBg6tyI,72 +scipy/io/arff/tests/data/test4.arff,sha256=mtyuSFKUeiRR2o3mNlwvDCxWq4DsHEBHj_8IthNzp-M,238 +scipy/io/arff/tests/data/test5.arff,sha256=2Q_prOBCfM_ggsGRavlOaJ_qnWPFf2akFXJFz0NtTIE,365 +scipy/io/arff/tests/data/test6.arff,sha256=V8FNv-WUdurutFXKTOq8DADtNDrzfW65gyOlv-lquOU,195 +scipy/io/arff/tests/data/test7.arff,sha256=rxsqdev8WeqC_nKJNwetjVYXA1-qCzWmaHlMvSaVRGk,559 +scipy/io/arff/tests/data/test8.arff,sha256=c34srlkU8hkXYpdKXVozEutiPryR8bf_5qEmiGQBoG4,429 +scipy/io/arff/tests/data/test9.arff,sha256=ZuXQQzprgmTXxENW7we3wBJTpByBlpakrvRgG8n7fUk,311 +scipy/io/arff/tests/test_arffread.py,sha256=RaecyT864asNEteY0kiRZ2FqA_LcCMBL4zXh6htC0t8,13098 +scipy/io/harwell_boeing.py,sha256=Wdd3nB8n1bxmvbjYBUBa1_ZmWbsPaIy3AJBZt2JJQmQ,898 +scipy/io/idl.py,sha256=YhznpLgDnxrm9bwG7PP8zb6volg9oofEXYBTL86X7E0,794 +scipy/io/matlab/__init__.py,sha256=uBmpYjqjkLRikI2im0mk6SOa13aAuQpSqwHY79RsoSE,2022 +scipy/io/matlab/__pycache__/__init__.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_byteordercodes.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_mio.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_mio4.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_mio5.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_mio5_params.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_miobase.cpython-310.pyc,, +scipy/io/matlab/__pycache__/byteordercodes.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio4.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio5.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio5_params.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio5_utils.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio_utils.cpython-310.pyc,, +scipy/io/matlab/__pycache__/miobase.cpython-310.pyc,, +scipy/io/matlab/__pycache__/streams.cpython-310.pyc,, +scipy/io/matlab/_byteordercodes.py,sha256=8RmsRKpJt_48P6bWbQw4HmwwP6g6uZhmWM3DX_dIAok,1902 +scipy/io/matlab/_mio.py,sha256=Rr89q5cFlKwH4svZy_VzHXFZ48PlXjSzcP9TpAr0MA0,12799 +scipy/io/matlab/_mio4.py,sha256=9gZ9pV_Esuh63jDaFc1w7cH0zu6_V6ee24YKPJa3ryk,20612 +scipy/io/matlab/_mio5.py,sha256=zCXqiLNVRNFHNIsKDsOkGj2RS_0EQBSlOpuXgswT0jg,33426 +scipy/io/matlab/_mio5_params.py,sha256=2NBQ0IEVRQS5GQ7_AoKY3Dl_CqzaA3kltnw8-_D1tXU,8199 +scipy/io/matlab/_mio5_utils.cpython-310-darwin.so,sha256=j3HGVZYyWSExXgNbkqZGvEbhv4u2VzbZuCd52kEMrbQ,218430 +scipy/io/matlab/_mio_utils.cpython-310-darwin.so,sha256=v4dPYSipx5Xf50x0-4pp9R7uoMdwSlrsxt7YU6OjZS8,77869 +scipy/io/matlab/_miobase.py,sha256=_3woLhBtEIy8ezRSNVK9RA65JWq9TmcxKgCP27A4vvY,12908 +scipy/io/matlab/_streams.cpython-310-darwin.so,sha256=fmOf7YSU7xqNGRcoJ0ccUwPmcGBVVs2KzMDV62Xdv3c,128075 +scipy/io/matlab/byteordercodes.py,sha256=SjReEJ2PzTMsU5fNeZ2m3i05uX6LiJ_GLsFi-PVKXyE,849 +scipy/io/matlab/mio.py,sha256=HQSGsh4b1F6KoHWV8uEdPIgu2nBjclubz0ZaE5mwup0,894 +scipy/io/matlab/mio4.py,sha256=gsMWD_lpymj7BLAh0dwVHXMervPkdLu_79PZtABjcCM,1201 +scipy/io/matlab/mio5.py,sha256=3dgxKJjhjooruN_ch9UxlAIN1_Re_to8I5v-x_PB7TE,1435 +scipy/io/matlab/mio5_params.py,sha256=g3Jk-weBAqKSwV9IqtB-cf0DkuYrKcxsO4cojGRFwPk,1526 +scipy/io/matlab/mio5_utils.py,sha256=K_ILFiIcD5EispmZtCidJJD69_ygB4OaFI6-fiiJ9oo,899 +scipy/io/matlab/mio_utils.py,sha256=jthSqDwKuvQaNYuKx-02atSoNiQ5PD9uAVzlyWZeRIo,786 +scipy/io/matlab/miobase.py,sha256=l6sTgtB3-CGjin4L_6Xbf-gnCOGFnBvh0yCEG3_U354,988 +scipy/io/matlab/streams.py,sha256=wgX5MSEUPdAhxK0DHw9iQsLzwnHU9GIrxIDm6JdWMGg,809 +scipy/io/matlab/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/matlab/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_byteordercodes.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_mio.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_mio5_utils.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_mio_funcs.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_mio_utils.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_miobase.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_pathological.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_streams.cpython-310.pyc,, +scipy/io/matlab/tests/data/bad_miuint32.mat,sha256=CVkYHp_U4jxYKRRHSuZ5fREop4tJjnZcQ02DKfObkRA,272 +scipy/io/matlab/tests/data/bad_miutf8_array_name.mat,sha256=V-jfVMkYyy8qRGcOIsNGcoO0GCgTxchrsQUBGBnfWHE,208 +scipy/io/matlab/tests/data/big_endian.mat,sha256=2ttpiaH2B6nmHnq-gsFeMvZ2ZSLOlpzt0IJiqBTcc8M,273 +scipy/io/matlab/tests/data/broken_utf8.mat,sha256=nm8aotRl6NIxlM3IgPegKR3EeevYZoJCrYpV4Sa1T5I,216 +scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat,sha256=X4dvE7K9DmGEF3D6I-48hC86W41jB54H7bD8KTXjtYA,276 +scipy/io/matlab/tests/data/corrupted_zlib_data.mat,sha256=DfE1YBH-pYw-dAaEeKA6wZcyKeo9GlEfrzZtql-fO_w,3451 +scipy/io/matlab/tests/data/japanese_utf8.txt,sha256=rgxiBH7xmEKF91ZkB3oMLrqABBXINEMHPXDKdZXNBEY,270 +scipy/io/matlab/tests/data/little_endian.mat,sha256=FQP_2MNod-FFF-JefN7ZxovQ6QLCdHQ0DPL_qBCP44Y,265 +scipy/io/matlab/tests/data/logical_sparse.mat,sha256=qujUUpYewaNsFKAwGpYS05z7kdUv9TQZTHV5_lWhRrs,208 +scipy/io/matlab/tests/data/malformed1.mat,sha256=DTuTr1-IzpLMBf8u5DPb3HXmw9xJo1aWfayA5S_3zUI,2208 +scipy/io/matlab/tests/data/miuint32_for_miint32.mat,sha256=romrBP_BS46Sl2-pKWsUnxYDad2wehyjq4wwLaVqums,272 +scipy/io/matlab/tests/data/miutf8_array_name.mat,sha256=Vo8JptFr-Kg2f2cEoDg8LtELSjVNyccdJY74WP_kqtc,208 +scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat,sha256=bvdmj6zDDUIpOfIP8J4Klo107RYCDd5VK5gtOYx3GsU,8168 +scipy/io/matlab/tests/data/one_by_zero_char.mat,sha256=Z3QdZjTlOojjUpS0cfBP4XfNQI3GTjqU0n_pnAzgQhU,184 +scipy/io/matlab/tests/data/parabola.mat,sha256=ENWuWX_uwo4Av16dIGOwnbMReAMrShDhalkq8QUI8Rg,729 +scipy/io/matlab/tests/data/single_empty_string.mat,sha256=4uTmX0oydTjmtnhxqi9SyPWCG2I24gj_5LarS80bPik,171 +scipy/io/matlab/tests/data/some_functions.mat,sha256=JA736oG3s8PPdKhdsYK-BndLUsGrJCJAIRBseSIEZtM,1397 +scipy/io/matlab/tests/data/sqr.mat,sha256=3DtGl_V4wABKCDQ0P3He5qfOzpUTC-mINdK73MKS7AM,679 +scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat,sha256=-odiBIQAbOLERg0Vg682QHGfs7C8MaA_gY77OWR8x78,232 +scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat,sha256=G5siwvZ-7Uv5KJ6h7AA3OHL6eiFsd8Lnjx4IcoByzCU,232 +scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat,sha256=EVj1wPnoyWGIdTpkSj3YAwqzTAm27eqZNxCaJAs3pwU,213 +scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat,sha256=S_Sd3sxorDd8tZ5CxD5_J8vXbfcksLWzhUQY5b82L9g,213 +scipy/io/matlab/tests/data/test_empty_struct.mat,sha256=WoC7g7TyXqNr2T0d5xE3IUq5PRzatE0mxXjqoHX5Xec,173 +scipy/io/matlab/tests/data/test_mat4_le_floats.mat,sha256=2xvn3Cg4039shJl62T-bH-VeVP_bKtwdqvGfIxv8FJ4,38 +scipy/io/matlab/tests/data/test_skip_variable.mat,sha256=pJLVpdrdEb-9SMZxaDu-uryShlIi90l5LfXhvpVipJ0,20225 +scipy/io/matlab/tests/data/testbool_8_WIN64.mat,sha256=_xBw_2oZA7u9Xs6GJItUpSIEV4jVdfdcwzmLNFWM6ow,185 +scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat,sha256=OWOBzNpWTyAHIcZABRytVMcABiRYgEoMyF9gDaIkFe4,536 +scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat,sha256=7111TN_sh1uMHmYx-bjd_v9uaAnWhJMhrQFAtAw6Nvk,536 +scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat,sha256=62p6LRW6PbM-Y16aUeGVhclTVqS5IxPUtsohe7MjrYo,283 +scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat,sha256=NkTA8UW98hIQ0t5hGx_leG-MzNroDelYwqx8MPnO63Q,283 +scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat,sha256=AeNaog8HUDCVrIuGICAXYu9SGDsvV6qeGjgvWHrVQho,568 +scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat,sha256=Gl4QA0yYwGxjiajjgWS939WVAM-W2ahNIm9wwMaT5oc,568 +scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat,sha256=CUGtkwIU9CBa0Slx13mbaM67_ec0p-unZdu8Z4YYM3c,228 +scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat,sha256=TeTk5yjl5j_bcnmIkpzuYHxGGQXNu-rK6xOsN4t6lX8,228 +scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat,sha256=WOwauWInSVUFBuOJ1Bo3spmUQ3UWUIlsIe4tYGlrU7o,176 +scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat,sha256=GpAEccizI8WvlrBPdvlKUv6uKbZOo_cjUK3WVVb2lo4,352 +scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat,sha256=3MEbf0zJdQGAO7x-pzFCup2QptfYJHQG59z0vVOdxl4,352 +scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat,sha256=VNHV2AIEkvPuhae1kKIqt5t8AMgUyr0L_CAp-ykLxt4,247 +scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat,sha256=8rWGf5bqY7_2mcd5w5gTYgMkXVePlLL8qT7lh8kApn0,247 +scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat,sha256=MzT7OYPEUXHYNPBrVkyKEaG5Cas2aOA0xvrO7l4YTrQ,103 +scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat,sha256=DpB-mVKx1gsjl-3IbxfxHNuzU5dnuku-MDQCA8kALVI,272 +scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat,sha256=4hY5VEubavNEv5KvcqQnd7MWWvFUzHXXpYIqUuUt-50,272 +scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat,sha256=N2QOOIXPyy0zPZZ_qY7xIDaodMGrTq3oXNBEHZEscw0,232 +scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat,sha256=TrkJ4Xx_dC9YrPdewlsOvYs_xag7gT3cN4HkDsJmT8I,232 +scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat,sha256=g96Vh9FpNhkiWKsRm4U6KqeKd1hNAEyYSD7IVzdzwsU,472 +scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat,sha256=2Zw-cMv-Mjbs2HkSl0ubmh_htFUEpkn7XVHG8iM32o0,472 +scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat,sha256=t5Ar8EgjZ7fkTUHIVpdXg-yYWo_MBaigMDJUGWEIrmU,218 +scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat,sha256=5PPvfOoL-_Q5ou_2nIzIrHgeaOZGFXGxAFdYzCQuwEQ,218 +scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat,sha256=ScTKftENe78imbMc0I5ouBlIMcEEmZgu8HVKWAMNr58,381 +scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat,sha256=ZoVbGk38_MCppZ0LRr6OE07HL8ZB4rHXgMj9LwUBgGg,4168 +scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat,sha256=14YMiKAN9JCPTqSDXxa58BK6Un7EM4hEoSGAUuwKWGQ,151 +scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat,sha256=ZdjNbcIE75V5Aht5EVBvJX26aabvNqbUH0Q9VBnxBS4,216 +scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat,sha256=OB82QgB6SwtsxT4t453OVSj-B777XrHGEGOMgMD1XGc,216 +scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat,sha256=-TYB0kREY7i7gt5x15fOYjXi410pXuDWUFxPYuMwywI,193 +scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat,sha256=l9psDc5K1bpxNeuFlyYIYauswLnOB6dTX6-jvelW0kU,193 +scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat,sha256=2914WYQajPc9-Guy3jDOLU3YkuE4OXC_63FUSDzJzX0,38 +scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat,sha256=2X2fZKomz0ktBvibj7jvHbEvt2HRA8D6hN9qA1IDicw,200 +scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat,sha256=i364SgUCLSYRjQsyygvY1ArjEaO5uLip3HyU-R7zaLo,200 +scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat,sha256=gtYNC9_TciYdq8X9IwyGEjiw2f1uCVTGgiOPFOiQbJc,184 +scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat,sha256=eXcoTM8vKuh4tQnl92lwdDaqssGB6G9boSHh3FOCkng,184 +scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat,sha256=Zhyu2KCsseSJ5NARdS00uwddCs4wmjcWNP2LJFns2-Q,240 +scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat,sha256=KI3H58BVj6k6MFsj8icSbjy_0Z-jOesWN5cafStLPG8,276 +scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat,sha256=Yr4YKCP27yMWlK5UOK3BAEOAyMr-m0yYGcj8v1tCx-I,276 +scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat,sha256=kzLxy_1o1HclPXWyA-SX5gl6LsG1ioHuN4eS6x5iZio,800 +scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat,sha256=dq_6_n0v7cUz9YziXn-gZFNc9xYtNxZ8exTsziWIM7s,672 +scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat,sha256=3z-boFw0SC5142YPOLo2JqdusPItVzjCFMhXAQNaQUQ,306 +scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat,sha256=5OwLTMgCBlxsDfiEUzlVjqcSbVQG-X5mIw5JfW3wQXA,306 +scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat,sha256=BCvppGhO19-j-vxAvbdsORIiyuJqzCuQog9Ao8V1lvA,40 +scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat,sha256=ThppTHGJFrUfal5tewS70DL00dSwk1otazuVdJrTioE,200 +scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat,sha256=SBfN6e7Vz1rAdi8HLguYXcHUHk1viaXTYccdEyhhob4,200 +scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat,sha256=m8W9GqvflfAsizkhgAfT0lLcxuegZIWCLNuHVX69Jac,184 +scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat,sha256=t9ObKZOLy3vufnER8TlvQcUkd_wmXbJSdQoG4f3rVKY,184 +scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat,sha256=5LX9sLH7Y6h_N_a1XRN2GuMgp_P7ECpPsXGDOypAJg0,194 +scipy/io/matlab/tests/data/testsimplecell.mat,sha256=Aoeh0PX2yiLDTwkxMEyZ_CNX2mJHZvyfuFJl817pA1c,220 +scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat,sha256=dFUcB1gunfWqexgR4YDZ_Ec0w0HffM1DUE1C5PVfDDc,223 +scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat,sha256=9Sgd_SPkGNim7ZL0xgD71qml3DK0yDHYC7VSNLNQEXA,280 +scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat,sha256=jp1ILNxLyV6XmCCGxAz529XoZ9dhCqGEO-ExPH70_Pg,328 +scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat,sha256=k8QuQ_4Zu7FWTzHjRnHCVZ9Yu5vwNP0WyNzu6TuiY-4,229 +scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat,sha256=QbZOCqIvnaK0XOH3kaSXBe-m_1_Rb33psq8E-WMSBTU,229 +scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat,sha256=QMVoBXVyl9RBGvAjLoiW85kAXYJ-hHprUMegEG69A5w,294 +scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat,sha256=WfEroAT5YF4HGAKq3jTJxlFrKaTCh3rwlSlKu__VjwA,304 +scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat,sha256=e0s6cyoKJeYMArdceHpnKDvtCVcw7XuB44OBDHpoa6U,400 +scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat,sha256=kgHcuq-deI2y8hfkGwlMOkW7lntexdPHfuz0ar6b3jo,241 +scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat,sha256=rYCaWNLXK7f_jjMc6_UvZz6ZDuMCuVRmJV5RyeXiDm8,241 +scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat,sha256=hnNV6GZazEeqTXuA9vcOUo4xam_UnKRYGYH9PUGTLv8,219 +scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat,sha256=cAhec51DlqIYfDXXGaumOE3Hqb3cFWM1UsUK3K_lDP8,375 +scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat,sha256=ciFzNGMO7gjYecony-E8vtOwBY4vXIUhyug6Euaz3Kg,288 +scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat,sha256=yrJrpLiwLvU_LI1D6rw1Pk1qJK1YlC7Cmw7lwyJVLtw,288 +scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat,sha256=zo7sh-8dMpGqhoNxLEnfz3Oc7RonxiY5j0B3lxk0e8o,224 +scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat,sha256=igL_CvtAcNEa1nxunDjQZY5wS0rJOlzsUkBiDreJssk,224 +scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat,sha256=pRldk-R0ig1k3ouvaR9oVtBwZsQcDW_b4RBEDYu1-Vk,156 +scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat,sha256=B9IdaSsyb0wxjyYyHOj_GDO0laAeWDEJhoEhC9xdm1E,232 +scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat,sha256=t4tKGJg2NEg_Ar5MkOjCoQb2hVL8Q_Jdh9FF4TPL_4g,232 +scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat,sha256=lpYkBZX8K-c4FO5z0P9DMfYc7Y-yzyg11J6m-19uYTU,203 +scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat,sha256=lG-c7U-5Bo8j8xZLpd0JAsMYwewT6cAw4eJCZH5xf6E,203 +scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat,sha256=3GJbA4O7LP57J6IYzmJqTPeSJrEaiNSk-rg7h0ANR1w,608 +scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat,sha256=fRbqAnzTeOU3dTQx7O24MfMVFr6pM5u594FRrPPkYJE,552 +scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat,sha256=mCtI_Yot08NazvWHvehOZbTV4bW_I4-D5jBgJ6T9EbI,314 +scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat,sha256=52qaF4HRCtPl1jE6ljbkEl2mofZVAPpmBxrm-J5OTTI,314 +scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat,sha256=vneCpWBwApBGfeKzdZcybyajxjR-ZYf64j0l08_hU84,528 +scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat,sha256=gqhRpSfNNB5SR9sCp-wWrvokr5VV_heGnvco6dmfOvY,472 +scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat,sha256=6VDU0mtTBEG0bBHqKP1p8xq846eMhSZ_WvBZv8MzE7M,246 +scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat,sha256=ejtyxeeX_W1a2rNrEUUiG9txPW8_UtSgt8IaDOxE2pg,246 +scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat,sha256=sbi0wUwOrbU-gBq3lyDwhAbvchdtOJkflOR_MU7uGKA,496 +scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat,sha256=uTkKtrYBTuz4kICVisEaG7V5C2nJDKjy92mPDswTLPE,416 +scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat,sha256=o4F2jOhYyNpJCo-BMg6v_ITZQvjenXfXHLq94e7iwRo,252 +scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat,sha256=CNXO12O6tedEuMG0jNma4qfbTgCswAbHwh49a3uE3Yk,252 +scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat,sha256=KV97FCW-1XZiXrwXJoZPbgyAht79oIFHa917W1KFLwE,357 +scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat,sha256=9-8xzACZleBkMjZnbr8t4Ncs9B6mbzrONDblPnteBPU,357 +scipy/io/matlab/tests/data/testvec_4_GLNX86.mat,sha256=GQzR3mBVS266_NBfrRC9X0dLgmeu8Jl4r4ZYMOrn1V0,93 +scipy/io/matlab/tests/test_byteordercodes.py,sha256=FCHBAxeQZlhvTXw-AO-ukwTWvpN7NzmncBEDJ1P4de4,938 +scipy/io/matlab/tests/test_mio.py,sha256=61iYn2ELM4i7YXQZzlLr-ifMetmOdHwVCs38kBW6YQY,43337 +scipy/io/matlab/tests/test_mio5_utils.py,sha256=4uKkvA7p6pc8ybktQGAdGZaNFzNT4yan0dyCs4ruC4A,5419 +scipy/io/matlab/tests/test_mio_funcs.py,sha256=fSDaeVPvCRBFzqjWtXR5xIv9UQ_yv6Y_Nl5D5u0HIGo,1392 +scipy/io/matlab/tests/test_mio_utils.py,sha256=GX85RuLqr2HxS5_f7ZgrxbhswJy2GPQQoQbiQYg0s14,1594 +scipy/io/matlab/tests/test_miobase.py,sha256=xH4ZOR_b25TJLyIGqYQdeSASpTi8j-oIkRcO4D-R4us,1464 +scipy/io/matlab/tests/test_pathological.py,sha256=qir2euvFJnsXQYDzg0xAy5VUsNqCJPvI19IOLPj2T0A,1060 +scipy/io/matlab/tests/test_streams.py,sha256=-Yf5bbmFQnEdyW_zmQstHdMBkn95RYVxCzg-Cfdg9Qs,7319 +scipy/io/mmio.py,sha256=TkHUGo7h8JCkFI5se5T_rSC3Wc_Ojkb-yLhp99cmV-M,779 +scipy/io/netcdf.py,sha256=A5jSFgdrJGZHgeoFHvLuEHMFi0ZYZt76eyOErVHy04Q,1080 +scipy/io/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_fortran.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_idl.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_mmio.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_netcdf.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_paths.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_wavfile.cpython-310.pyc,, +scipy/io/tests/data/Transparent Busy.ani,sha256=vwoK3ysYo87-TwzvjerHjFjSPIGpw83jjiMDXcHPWjA,4362 +scipy/io/tests/data/array_float32_1d.sav,sha256=A_xXWkfS1sQCxP4ONezeEZvlKEXwZ1TPG2rCCFdmBNM,2628 +scipy/io/tests/data/array_float32_2d.sav,sha256=qJmN94pywXznXMHzt-L6DJgaIq_FfruVKJl_LMaI8UU,3192 +scipy/io/tests/data/array_float32_3d.sav,sha256=U7P6As7Nw6LdBY1pTOaW9C-O_NlXLXZwSgbT3H8Z8uk,13752 +scipy/io/tests/data/array_float32_4d.sav,sha256=Tl6erEw_Zq3dwVbVyPXRWqB83u_o4wkIVFOe3wQrSro,6616 +scipy/io/tests/data/array_float32_5d.sav,sha256=VmaBgCD854swYyLouDMHJf4LL6iUNgajEOQf0pUjHjg,7896 +scipy/io/tests/data/array_float32_6d.sav,sha256=lb7modI0OQDweJWbDxEV2OddffKgMgq1tvCy5EK6sOU,19416 +scipy/io/tests/data/array_float32_7d.sav,sha256=pqLWIoxev9sLCs9LLwxFlM4RCFwxHC4Q0dEEz578mpI,3288 +scipy/io/tests/data/array_float32_8d.sav,sha256=R8A004f9XLWvF6eKMNEqIrC6PGP1vLZr9sFqawqM8ZA,13656 +scipy/io/tests/data/array_float32_pointer_1d.sav,sha256=sV7qFNwHK-prG5vODa7m5HYK7HlH_lqdfsI5Y1RWDyg,2692 +scipy/io/tests/data/array_float32_pointer_2d.sav,sha256=b0brvK6xQeezoRuujmEcJNw2v6bfASLM3FSY9u5dMSg,3256 +scipy/io/tests/data/array_float32_pointer_3d.sav,sha256=a_Iyg1YjPBRh6B-N_n_BGIVjFje4K-EPibKV-bPbF7E,13816 +scipy/io/tests/data/array_float32_pointer_4d.sav,sha256=cXrkHHlPyoYstDL_OJ15-55sZOOeDNW2OJ3KWhBv-Kk,6680 +scipy/io/tests/data/array_float32_pointer_5d.sav,sha256=gRVAZ6jeqFZyIQI9JVBHed9Y0sjS-W4bLseb01rIcGs,7960 +scipy/io/tests/data/array_float32_pointer_6d.sav,sha256=9yic-CQiS0YR_ow2yUA2Nix0Nb_YCKMUsIgPhgcJT1c,19480 +scipy/io/tests/data/array_float32_pointer_7d.sav,sha256=Rp1s8RbW8eoEIRTqxba4opAyY0uhTuyy3YkwRlNspQU,3352 +scipy/io/tests/data/array_float32_pointer_8d.sav,sha256=Wk3Dd2ClAwWprXLKZon3blY7aMvMrJqz_NXzK0J5MFY,13720 +scipy/io/tests/data/example_1.nc,sha256=EkfC57dWXeljgXy5sidrJHJG12D1gmQUyPDK18WzlT4,1736 +scipy/io/tests/data/example_2.nc,sha256=wywMDspJ2QT431_sJUr_5DHqG3pt9VTvDJzfR9jeWCk,272 +scipy/io/tests/data/example_3_maskedvals.nc,sha256=P9N92jCJgKJo9VmNd7FeeJSvl4yUUFwBy6JpR4MeuME,1424 +scipy/io/tests/data/fortran-3x3d-2i.dat,sha256=oYCXgtY6qqIqLAhoh_46ob_RVQRcV4uu333pOiLKgRM,451 +scipy/io/tests/data/fortran-mixed.dat,sha256=zTi7RLEnyAat_DdC3iSEcSbyDtAu0aTKwUT-tExjasw,40 +scipy/io/tests/data/fortran-sf8-11x1x10.dat,sha256=KwaOrZOAe-wRhuxvmHIK-Wr59us40MmiA9QyWtIAUaA,888 +scipy/io/tests/data/fortran-sf8-15x10x22.dat,sha256=5ohvjjOUcIsGimSqDhpUUKwflyhVsfwKL5ElQe_SU0I,26408 +scipy/io/tests/data/fortran-sf8-1x1x1.dat,sha256=Djmoip8zn-UcxWGUPKV5wzKOYOf7pbU5L7HaR3BYlec,16 +scipy/io/tests/data/fortran-sf8-1x1x5.dat,sha256=Btgavm3w3c9md_5yFfq6Veo_5IK9KtlLF1JEPeHhZoU,48 +scipy/io/tests/data/fortran-sf8-1x1x7.dat,sha256=L0r9yAEMbfMwYQytzYsS45COqaVk-o_hi6zRY3yIiO4,64 +scipy/io/tests/data/fortran-sf8-1x3x5.dat,sha256=c2LTocHclwTIeaR1Pm3mVMyf5Pl_imfjIFwi4Lpv0Xs,128 +scipy/io/tests/data/fortran-si4-11x1x10.dat,sha256=OesvSIGsZjpKZlZsV74PNwy0Co0KH8-3gxL9-DWoa08,448 +scipy/io/tests/data/fortran-si4-15x10x22.dat,sha256=OJcKyw-GZmhHb8REXMsHDn7W5VP5bhmxgVPIAYG-Fj4,13208 +scipy/io/tests/data/fortran-si4-1x1x1.dat,sha256=1Lbx01wZPCOJHwg99MBDuc6QZKdMnccxNgICt4omfFM,12 +scipy/io/tests/data/fortran-si4-1x1x5.dat,sha256=L1St4yiHTA3v91JjnndYfUrdKfT1bWxckwnnrscEZXc,28 +scipy/io/tests/data/fortran-si4-1x1x7.dat,sha256=Dmqt-tD1v2DiPZkghGGZ9Ss-nJGfei-3yFXPO5Acpk4,36 +scipy/io/tests/data/fortran-si4-1x3x5.dat,sha256=3vl6q93m25jEcZVKD0CuKNHmhZwZKp-rv0tfHoPVP88,68 +scipy/io/tests/data/invalid_pointer.sav,sha256=JmgoISXC4r5fSmI5FqyapvmzQ4qpYLf-9N7_Et1p1HQ,1280 +scipy/io/tests/data/null_pointer.sav,sha256=P_3a_sU614F3InwM82jSMtWycSZkvqRn1apwd8XxbtE,2180 +scipy/io/tests/data/scalar_byte.sav,sha256=dNJbcE5OVDY_wHwN_UBUtfIRd13Oqu-RBEO74g5SsBA,2076 +scipy/io/tests/data/scalar_byte_descr.sav,sha256=DNTmDgDWOuzlQnrceER6YJ0NutUUwZ9tozVMBWQmuuY,2124 +scipy/io/tests/data/scalar_complex32.sav,sha256=NGd-EvmFZgt8Ko5MP3T_TLwyby6yS0BXM_OW8197hpU,2076 +scipy/io/tests/data/scalar_complex64.sav,sha256=gFBWtxuAajazupGFSbvlWUPDYK-JdWgZcEWih2-7IYU,2084 +scipy/io/tests/data/scalar_float32.sav,sha256=EwWQw2JTwq99CHVpDAh4R20R0jWaynXABaE2aTRmXrs,2072 +scipy/io/tests/data/scalar_float64.sav,sha256=iPcDlgF1t0HoabvNLWCbSiTPIa9rvVEbOGGmE_3Ilsk,2076 +scipy/io/tests/data/scalar_heap_pointer.sav,sha256=JXZbPmntXILsNOuLIKL8qdu8gDJekYrlN9DQxAWve0E,2204 +scipy/io/tests/data/scalar_int16.sav,sha256=kDBLbPYGo2pzmZDhyl8rlDv0l6TMEWLIoLtmgJXDMkk,2072 +scipy/io/tests/data/scalar_int32.sav,sha256=IzJwLvEoqWLO5JRaHp8qChfptlauU-ll3rb0TfDDM8Y,2072 +scipy/io/tests/data/scalar_int64.sav,sha256=-aSHQRiaE3wjAxINwuLX33_8qmWl4GUkTH45elTkA-8,2076 +scipy/io/tests/data/scalar_string.sav,sha256=AQ7iZ8dKk9QfnLdP9idKv1ojz0M_SwpL7XAUmbHodDQ,2124 +scipy/io/tests/data/scalar_uint16.sav,sha256=928fmxLsQM83ue4eUS3IEnsLSEzmHBklDA59JAUvGK8,2072 +scipy/io/tests/data/scalar_uint32.sav,sha256=X3RbPhS6_e-u-1S1gMyF7s9ys7oV6ZNwPrJqJ6zIJsk,2072 +scipy/io/tests/data/scalar_uint64.sav,sha256=ffVyS2oKn9PDtWjJdOjSRT2KZzy6Mscgd4u540MPHC4,2076 +scipy/io/tests/data/struct_arrays.sav,sha256=TzH-Gf0JgbP_OgeKYbV8ZbJXvWt1VetdUr6C_ziUlzg,2580 +scipy/io/tests/data/struct_arrays_byte_idl80.sav,sha256=oOmhTnmKlE60-JMJRRMv_zfFs4zqioMN8QA0ldlgQZo,1388 +scipy/io/tests/data/struct_arrays_replicated.sav,sha256=kXU8j9QI2Q8D22DVboH9fwwDQSLVvuWMJl3iIOhUAH8,2936 +scipy/io/tests/data/struct_arrays_replicated_3d.sav,sha256=s3ZUwhT6TfiVfk4AGBSyxYR4FRzo4sZQkTxFCJbIQMI,4608 +scipy/io/tests/data/struct_inherit.sav,sha256=4YajBZcIjqMQ4CI0lRUjXpYDY3rI5vzJJzOYpjWqOJk,2404 +scipy/io/tests/data/struct_pointer_arrays.sav,sha256=fkldO6-RO2uAN_AI9hM6SEaBPrBf8TfiodFGJpViaqg,2408 +scipy/io/tests/data/struct_pointer_arrays_replicated.sav,sha256=eKVerR0LoD9CuNlpwoBcn7BIdj3-8x56VNg--Qn7Hgc,2492 +scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav,sha256=vsqhGpn3YkZEYjQuI-GoX8Jg5Dv8A2uRtP0kzQkq4lg,2872 +scipy/io/tests/data/struct_pointers.sav,sha256=Zq6d5V9ZijpocxJpimrdFTQG827GADBkMB_-6AweDYI,2268 +scipy/io/tests/data/struct_pointers_replicated.sav,sha256=aIXPBIXTfPmd4IaLpYD5W_HUoIOdL5Y3Hj7WOeRM2sA,2304 +scipy/io/tests/data/struct_pointers_replicated_3d.sav,sha256=t1jhVXmhW6VotQMNZ0fv0sDO2pkN4EutGsx5No4VJQs,2456 +scipy/io/tests/data/struct_scalars.sav,sha256=LYICjERzGJ_VvYgtwJ_Up2svQTv8wBzNcVD3nsd_OPg,2316 +scipy/io/tests/data/struct_scalars_replicated.sav,sha256=lw3fC4kppi6BUWAd4n81h8_KgoUdiJl5UIt3CvJIuBs,2480 +scipy/io/tests/data/struct_scalars_replicated_3d.sav,sha256=xVAup6f1dSV_IsSwBQC3KVs0eLEZ6-o5EaZT9yUoDZI,3240 +scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav,sha256=gjv__ng9xH_sm34hyxCbCgO4AP--PZAfDOArH5omkjM,3586 +scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav,sha256=H0LLyv2lc2guzYGnx4DWXU6vB57JrRX-G9Dd4qGh0hM,3586 +scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav,sha256=KKz9SXv_R3gX_AVeED2vyhYnj4BvD1uyDiKpCT3ulZ0,17720 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav,sha256=YX1g8qdCOAG16vX9G6q4SsfCj2ZVk199jzDQ8S0zWYI,72 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav,sha256=bFrsRqw0QXmsaDtjD6TFP8hZ5jEYMyaCmt-ka_C6GNk,1024 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav,sha256=zMnhvZvrP4kyOWKVKfbBneyv03xvzgqXYhHNxsAxDJ4,13 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav,sha256=9qTCvpgdz3raecVN1ViggHPnQjBf47xmXod9iCDsEik,17720 +scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav,sha256=EqYBnEgTxTKvaTAtdA5HIl47CCFIje93y4hawR6Pyu0,7792 +scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav,sha256=hGYchxQFjrtvZCBo0ULi-xdZ8krqXcKdTl3NSUfqe8k,90 +scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav,sha256=h8CXsW5_ShKR197t_d-TUTlgDqOZ-7wK_EcVGucR-aY,74 +scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav,sha256=BoUCDct3GiY_JJV_HoghF3mzAebT18j02c-MOn19KxU,70 +scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav,sha256=R6EJshvQp5YVR4GB9u4Khn5HM1VMfJUj082i8tkBIJ8,1644 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav,sha256=t2Mgri3h6JLQDekrwIhDBOaG46OUzHynUz0pKbvOpNU,90 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav,sha256=yCv0uh-ux_skJsxeOjzog0YBk3ZQO_kw5HJHMqtVyI0,90 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav,sha256=oiMVsQV9-qGBz_ZwsfAkgA9BZXNjXbH4zxCGvvdT0RY,120 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav,sha256=e97XoPrPGJDIh8nO6mii__ViY5yVlmt4OnPQoDN1djs,134 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav,sha256=wbonKlzvzQ_bQYyBsj-GwnihZOhn0uxfKhL_nENCGNc,150 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav,sha256=Uu5QPQcbtnFlnxOd4zFGxpiTC4wgdp6JOoYJ2VMZIU0,164 +scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav,sha256=1F67h8tr2xz0C5K21T9y9gspcGA0qnSOzsl2vjArAMs,116 +scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav,sha256=TJvGU7GpgXdCrdrjzMlDtpieDMnDK-lWMMqlWjT23BY,89 +scipy/io/tests/data/various_compressed.sav,sha256=H-7pc-RCQx5y6_IbHk1hB6OfnhvuPyW6EJq4EwI9iMc,1015 +scipy/io/tests/test_fortran.py,sha256=2NZb7RoXsoH5pqh1WHCH6j0PTf4q_Lnee_vmgcmU1Xs,7572 +scipy/io/tests/test_idl.py,sha256=rQd2IH7BwOzS1X1sO6dlLax85_i3OWgjRGzZqJOyI2w,19874 +scipy/io/tests/test_mmio.py,sha256=oFKsQi8sGfj3KGPqOQ9Hz0scj-Ih0P7NZpqdN5OOHxg,26585 +scipy/io/tests/test_netcdf.py,sha256=5RMWHfw349f7Gjp-RLNTSxLpYaRlnxIjXHaa-z46M0g,19317 +scipy/io/tests/test_paths.py,sha256=3ewh_1yXujx3NIZ3deUjepFJgJDa5IHIugxupLDhHoU,3178 +scipy/io/tests/test_wavfile.py,sha256=UluHY_ZPAbAaot_5ykV2aArBmwMRlKhEdZHiTzj-JLc,15303 +scipy/io/wavfile.py,sha256=CXcu2wq38iAExx-bBeGHeYbStPxF9uhss3nA9lgyUow,26642 +scipy/linalg.pxd,sha256=M28Y_hLKRSlomUNFNm0LbL9lhYINd7mgo7maa_WiHmw,48 +scipy/linalg/__init__.py,sha256=rQCI6sMFRogHgCboA0NsCvLrnKFH0iEoDeZE72BHbS4,7604 +scipy/linalg/__pycache__/__init__.cpython-310.pyc,, +scipy/linalg/__pycache__/_basic.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_cholesky.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_cossin.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_ldl.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_lu.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_polar.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_qr.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_qz.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_schur.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_svd.cpython-310.pyc,, +scipy/linalg/__pycache__/_expm_frechet.cpython-310.pyc,, +scipy/linalg/__pycache__/_flinalg_py.cpython-310.pyc,, +scipy/linalg/__pycache__/_interpolative_backend.cpython-310.pyc,, +scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc,, +scipy/linalg/__pycache__/_matfuncs_inv_ssq.cpython-310.pyc,, +scipy/linalg/__pycache__/_matfuncs_sqrtm.cpython-310.pyc,, +scipy/linalg/__pycache__/_misc.cpython-310.pyc,, +scipy/linalg/__pycache__/_procrustes.cpython-310.pyc,, +scipy/linalg/__pycache__/_sketches.cpython-310.pyc,, +scipy/linalg/__pycache__/_solvers.cpython-310.pyc,, +scipy/linalg/__pycache__/_special_matrices.cpython-310.pyc,, +scipy/linalg/__pycache__/_testutils.cpython-310.pyc,, +scipy/linalg/__pycache__/basic.cpython-310.pyc,, +scipy/linalg/__pycache__/blas.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp_cholesky.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp_lu.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp_qr.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp_schur.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp_svd.cpython-310.pyc,, +scipy/linalg/__pycache__/flinalg.cpython-310.pyc,, +scipy/linalg/__pycache__/interpolative.cpython-310.pyc,, +scipy/linalg/__pycache__/lapack.cpython-310.pyc,, +scipy/linalg/__pycache__/matfuncs.cpython-310.pyc,, +scipy/linalg/__pycache__/misc.cpython-310.pyc,, +scipy/linalg/__pycache__/special_matrices.cpython-310.pyc,, +scipy/linalg/_basic.py,sha256=Sv96ztCBTc7u9hfO2Rq0nWUA3d0f1RhtX9z8YEVRVmE,64381 +scipy/linalg/_blas_subroutine_wrappers.f,sha256=pnqlE8yxj0Uh8HGug6v0JsD76QbNdRE-_5ErKUXAOxs,7757 +scipy/linalg/_blas_subroutines.h,sha256=iodn74tn1PwQFzOX-cbqOus6LjAx43ETe5YhndHhxs4,19068 +scipy/linalg/_cythonized_array_utils.cpython-310-darwin.so,sha256=kTQ6egcfye3bGd-L-gqcCA6PKix7vvH_ZbWqXT79WDU,460026 +scipy/linalg/_cythonized_array_utils.pxd,sha256=iFr-x1jLaNiZroQEErGwaMYZU09DI26lWf1h-cbhVMQ,861 +scipy/linalg/_cythonized_array_utils.pyi,sha256=7SHh1oIR-ET5or3WkUfCnwP2dGTpdaUwIsgGaQqrMYg,346 +scipy/linalg/_decomp.py,sha256=966DxNaiIrwXzsHI-zPDV0TO6znvTRvIibCtHPfIKqE,61405 +scipy/linalg/_decomp_cholesky.py,sha256=iCRl5kCijw__9VXbrE5Fdy_X1yUAAaP4vi1XBtZH9nA,11903 +scipy/linalg/_decomp_cossin.py,sha256=vtCgl-6E8AT3iwoBcMTMmVAhDvLyNXPOkg-xPRdmIsQ,9136 +scipy/linalg/_decomp_ldl.py,sha256=dUj9QPKS1o1jjKWNr37MAU3MGQE_00XyQdIIxR6lL_g,12516 +scipy/linalg/_decomp_lu.py,sha256=MUbpsSQpQtXve7y-yPnkhrVMaKSa7lhj4_LQmoKUn-c,6922 +scipy/linalg/_decomp_polar.py,sha256=arzJ40FP1-TFsRvXPCP1qdNTsT60lkBcKBHfhB2JxxY,3578 +scipy/linalg/_decomp_qr.py,sha256=E-ibpl1QdMu8HnllINd81j-FufmWCCbL35ImOoOiWAA,13727 +scipy/linalg/_decomp_qz.py,sha256=6lgUlMUdnWjJbyfzbgYaxzjFPixiyRSV-iJFpIBEmxc,16333 +scipy/linalg/_decomp_schur.py,sha256=_X7GVtoRpGB4BuBbBWUZWV0QYFpadrCl-q0CTocR3Mo,10272 +scipy/linalg/_decomp_svd.py,sha256=IqMNuweROqy1NYqj-2R3tuSf4JQbQyuI6T5bzmmFKIM,14907 +scipy/linalg/_decomp_update.cpython-310-darwin.so,sha256=-aP-iW80NI_7FzQoh11urXRmHK_d5PP3QVpbTM5MVco,273873 +scipy/linalg/_expm_frechet.py,sha256=gJQcBbSQ_Q6OORSvHNPokB5ahvXt9LC48zA5t3jftB8,12326 +scipy/linalg/_fblas.cpython-310-darwin.so,sha256=FQS7CG33SGlzbYWv-KLoHxG4VgbYfrg0v0lPqBMDvJk,564256 +scipy/linalg/_flapack.cpython-310-darwin.so,sha256=YXWupmpwncuWL76Qpw3LSb_bQWuJ7Ge3ofDupYDnUxk,1719648 +scipy/linalg/_flinalg.cpython-310-darwin.so,sha256=y6cgkk0xh7mf5EW8ENLJVPQd2WOiPFZYuU7wfTOL-A8,109296 +scipy/linalg/_flinalg_py.py,sha256=qSJJm0OCTEkswqZyyTjQ3-WalPnECw2jgDxSymdbaSM,1658 +scipy/linalg/_interpolative.cpython-310-darwin.so,sha256=yxlmxVgTRs5eKJVy-gU_p0y6KXC-PQmwQGGV0N7mUQo,378912 +scipy/linalg/_interpolative_backend.py,sha256=yycf_ceX0dgf7Usjvtaxmkm_cT-2jmEMBuWY6tJST2g,45192 +scipy/linalg/_lapack_subroutine_wrappers.f,sha256=lSvEytuOblN5KOmcHlyfj7MVfn5CyyTllZQAp7i_woM,34384 +scipy/linalg/_lapack_subroutines.h,sha256=WOzLcinUl8EqEGuYUMDwOvEal_sviBjztpLIrTp3eZc,246836 +scipy/linalg/_matfuncs.py,sha256=bvz-QzmziCuXBHqQoSEydk6RogSnesUy13toGJx6scI,25062 +scipy/linalg/_matfuncs_expm.cpython-310-darwin.so,sha256=tfmapOGU1IkJrQsE7bCx8qQCH3tAJ_jPH7sdEvIokRU,366929 +scipy/linalg/_matfuncs_expm.pyi,sha256=zpJd5n0J2JAlPSUKSumo46MN8X3hjJZcRMkD1msZKI0,194 +scipy/linalg/_matfuncs_inv_ssq.py,sha256=ddfyRK1AB1Toub3qnM9pyhfmmYwAiYadvddSrQeLkmM,28038 +scipy/linalg/_matfuncs_sqrtm.py,sha256=VnF1JNk4h7N9bfTF66ab6mKHGoFrDLcVDNrywzs1JSY,6637 +scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-darwin.so,sha256=dn5zJRQPU-50H5xtDBn3NvJqvmfUhzIBwAVSyq8tvvQ,209383 +scipy/linalg/_misc.py,sha256=3IPq-LIQcxV7ELbtcgZK8Ri60YWbhpN_y7UYe6BKEgA,6283 +scipy/linalg/_procrustes.py,sha256=bdr2I5Lcw68IPsupyRYdSC1_8-INJ2dlNtf34j73pwA,2745 +scipy/linalg/_sketches.py,sha256=n6PEJILrFpzWhdf-sKFgGN-0elEwqvBlI0Z3H54tk0c,6145 +scipy/linalg/_solve_toeplitz.cpython-310-darwin.so,sha256=C115lmElXcyZp35dWLtJ5Xd-37tZxBiLhZEmnstE8B4,231570 +scipy/linalg/_solvers.py,sha256=Q4sTmL0pn6_MlnAztXisB04dHgS3j500chK8AurpCvI,28379 +scipy/linalg/_special_matrices.py,sha256=VAGDaA1T36-YcxYb6ii73mNtwMYbyQF11eiIMlQRXG4,40052 +scipy/linalg/_testutils.py,sha256=Bbc3qTyBt3M0beyXqaqF8OI9fDeiZlXom6pL_DdWE8A,1732 +scipy/linalg/basic.py,sha256=Bm9qs8IDccT4i5ZYP148BRMRDXM5ltzS7acZ3gJwg6s,1026 +scipy/linalg/blas.py,sha256=9BFJUhgB6DkYFyfLyYMsUIm72icLVJPFm9h2e-b860M,11683 +scipy/linalg/cython_blas.cpython-310-darwin.so,sha256=iRbhuXkUbvHORUaNXUG6whHJ4EWHoH2n393fso43SEo,256288 +scipy/linalg/cython_blas.pxd,sha256=AlS8WsmUysG87D5T-hOuLiBsakmGMani_L8D_h_lzPs,14403 +scipy/linalg/cython_blas.pyx,sha256=nS4d8mvgs1Z8dMh3HMyY_wKYy2pAXxmG4omyFvrjo2I,63232 +scipy/linalg/cython_lapack.cpython-310-darwin.so,sha256=xi5S91EbrGY-Tdv4i7076iekSKPoQA5pJr-UGiGhXms,675328 +scipy/linalg/cython_lapack.pxd,sha256=P3BMEOCHBOopT4ijb3NtNXJMyYXBp_j5LiNnXAmGKZw,192579 +scipy/linalg/cython_lapack.pyx,sha256=Zkb13Sh2Yq98ul6EPQNPOdpcfk73No53Xge2JpSmtn4,688160 +scipy/linalg/decomp.py,sha256=2GO63DouH59OirrwuOdp1OsunrRrQqpUp_rh9cpzxAg,1057 +scipy/linalg/decomp_cholesky.py,sha256=GqKhTklJCBpycKC_hSTwMJ73t6HS4GdOItHbXzjLRb8,917 +scipy/linalg/decomp_lu.py,sha256=FS7TGi24g8Q33UBJagevgpbXB7i887Yk65VbKMLOF0U,856 +scipy/linalg/decomp_qr.py,sha256=wwe2u4Fn6IAOqO50_WTCQrwPKrhPzIpbxkqfPER4WpI,796 +scipy/linalg/decomp_schur.py,sha256=qTcDsoWVD4viq7eySdcIGaixEPuyvoLh0LESLzrzazI,882 +scipy/linalg/decomp_svd.py,sha256=KoBb6aFnHzFkHTi_DIbD5L3rhBFvwcZ6Tb5ROHayfhA,850 +scipy/linalg/flinalg.py,sha256=q4wlBcwHjfEYhUJ-qjI0FSpnb2-rjLPTqKFDu2fMGNw,677 +scipy/linalg/interpolative.py,sha256=tqgLvf-x0TeAX0Nd2ipyS8fEMcXXscHXkQAuaChNmCY,32091 +scipy/linalg/lapack.py,sha256=B-sC0kfbRO7zON2Iji4WlSBzuuRpuIbWJJjiROHOBRA,15626 +scipy/linalg/matfuncs.py,sha256=yj2Xh_u2Re1ytR3kuwK5L2o590JZANtEv_10Z92loa0,1098 +scipy/linalg/misc.py,sha256=YY_fRbg979uqbgyYrsjsDTnhkyLc1MjNjKNLDHBNeCs,799 +scipy/linalg/special_matrices.py,sha256=qaR-sECUZCHGI2G4tO4OOsC0kGs_C_AOW9YWnbBWCjo,1026 +scipy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/linalg/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_basic.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_blas.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_cython_blas.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_cython_lapack.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_cythonized_array_utils.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp_cholesky.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp_cossin.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp_ldl.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp_polar.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp_update.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_fblas.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_interpolative.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_lapack.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_matmul_toeplitz.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_misc.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_procrustes.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_sketches.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_solve_toeplitz.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_solvers.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_special_matrices.cpython-310.pyc,, +scipy/linalg/tests/data/carex_15_data.npz,sha256=E_PhSRqHa79Z1-oQrSnB-bWZaiq5khbzHVv81lkBLB4,34462 +scipy/linalg/tests/data/carex_18_data.npz,sha256=Wfg5Rn8nUrffb7bUCUOW7dMqWSm3ZPf_oeZmZDHmysY,161487 +scipy/linalg/tests/data/carex_19_data.npz,sha256=OOj8ewQd8LI9flyhXq0aBl5kZ2Ee-ahIzH25P4Ct_Yc,34050 +scipy/linalg/tests/data/carex_20_data.npz,sha256=FOIi00pxGMcoShZ1xv7O7ne4TflRpca6Kl7p_zBU-h0,31231 +scipy/linalg/tests/data/carex_6_data.npz,sha256=GyoHNrVB6_XEubTADW2rKB5zyfuZE8biWBp4Gze2Avk,15878 +scipy/linalg/tests/data/gendare_20170120_data.npz,sha256=o9-rRR2dXCAkPg7YXNi2yWV2afuaD4O1vhZVhXg9VbU,2164 +scipy/linalg/tests/test_basic.py,sha256=WKjiUj-WFzjujT7eGvGAHdMjmhYX4cNG-OQgkIAjxYQ,64797 +scipy/linalg/tests/test_blas.py,sha256=o6BEfT7IQLvhciT3waCSZbTZCKoIWqf51QTemINUe14,40206 +scipy/linalg/tests/test_cython_blas.py,sha256=lj8hm4wptSgUVe5969QH0AsRLxAkHox36kd3y9WPksg,4217 +scipy/linalg/tests/test_cython_lapack.py,sha256=EDhd6pmXxX0U4xxl5buBGH2ZjHj-J7LGq6rw6CZKA0k,574 +scipy/linalg/tests/test_cythonized_array_utils.py,sha256=O1EKWxsYt6k1zMWjFlQhTndQVOhHsJlSm-bHfPMny1U,3840 +scipy/linalg/tests/test_decomp.py,sha256=g19D_YQ895IaiXdnBapptZM6twDQ5ZYNoSxCS-NAElA,108164 +scipy/linalg/tests/test_decomp_cholesky.py,sha256=O8kkqod4sj46DtNpeyuZrKQfMmJeU5sSRANXuUyP6PM,7265 +scipy/linalg/tests/test_decomp_cossin.py,sha256=5PF6FGd-WisBFeWLJqKmgbbIdWedJ-skZ9NevCM5x1k,5772 +scipy/linalg/tests/test_decomp_ldl.py,sha256=8bbKacJFfH_x7RJm6AhfS434c3Plu1xI7ElW_j0uLi4,4979 +scipy/linalg/tests/test_decomp_polar.py,sha256=5x5vz9rJE2U2nvo0kx6xMX5Z9OcnqxayPZvAd4dwsUQ,2646 +scipy/linalg/tests/test_decomp_update.py,sha256=U1333Q_d13QnUeiXcQkJsE_rBJq6olHXu-6K3nUmEHg,68486 +scipy/linalg/tests/test_fblas.py,sha256=TIdXGmuvQ_na6eMlq7k4UdCELNWWDa7VG4imiyrSC0I,18685 +scipy/linalg/tests/test_interpolative.py,sha256=cAx8lJhE9YH-mXgC-Ltf4xv4nDhq0m0jq65tRkIut1g,8956 +scipy/linalg/tests/test_lapack.py,sha256=LsvtBfQIDf_pWe_aa1J8CSfFkgMLsc1u7roYEPxI6A8,125052 +scipy/linalg/tests/test_matfuncs.py,sha256=SLToErp3Mj1ujgg9QY5JKATMtyMFvy_u0yLE1ZkMqm0,38696 +scipy/linalg/tests/test_matmul_toeplitz.py,sha256=Wd9T03zZRwX3M3ppkhYJiJbkWZ_xop4VKj57TjeozUs,3870 +scipy/linalg/tests/test_misc.py,sha256=HP9jfKohbJIaKVcBqov9hAOHYk5dZck497-V5DMHe6E,76 +scipy/linalg/tests/test_procrustes.py,sha256=WkNNarBf69izBmlOhu4-u0eWdzkSzYHQuDZh-w89fOU,6758 +scipy/linalg/tests/test_sketches.py,sha256=FVEcNV43JteZZU7GDdBjtl-_alYDimxnjgKvpmtzVsI,3960 +scipy/linalg/tests/test_solve_toeplitz.py,sha256=KuTAYh-8MRWjaHclgQuIaBBx8IBTGEzXgZnhM_gjWxo,4010 +scipy/linalg/tests/test_solvers.py,sha256=2OkwSyCRE8Z-K6UgNlNIR6n95935DA8GXsVUYw3K2kw,31074 +scipy/linalg/tests/test_special_matrices.py,sha256=9cRvhLdK5udWkN_WIYmJ5rawGIFV_vSIYwO0dSEB4AE,26710 +scipy/misc/__init__.py,sha256=CdX9k6HUYu_cqVF4l2X5h1eqd9xUCuKafO_0aIY5RNE,1726 +scipy/misc/__pycache__/__init__.cpython-310.pyc,, +scipy/misc/__pycache__/_common.cpython-310.pyc,, +scipy/misc/__pycache__/common.cpython-310.pyc,, +scipy/misc/__pycache__/doccer.cpython-310.pyc,, +scipy/misc/_common.py,sha256=ndBktpW3llbZYf6IwS3lT7wBZIqV7AZygw2m9UTqoTA,11120 +scipy/misc/ascent.dat,sha256=6KhJOUhEY6uAUa7cW0CqJiqzOpHWRYps0TxqHK1aAj0,527630 +scipy/misc/common.py,sha256=BM-V8TKsvDKG_EtDRE4iIw8Of1q4U6JOwl7WSj6-1GI,869 +scipy/misc/doccer.py,sha256=D-G2jEalH4nXXlDEfZ59Ao9aj1_1t2SIb5ZlW9NHONE,766 +scipy/misc/ecg.dat,sha256=8grTNl-5t_hF0OXEi2_mcIE3fuRmw6Igt_afNciVi68,119035 +scipy/misc/face.dat,sha256=nYsLTQgTE-K0hXSMdwRy5ale0XOBRog9hMcDBJPoKIY,1581821 +scipy/misc/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/misc/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/misc/tests/__pycache__/test_common.cpython-310.pyc,, +scipy/misc/tests/__pycache__/test_config.cpython-310.pyc,, +scipy/misc/tests/__pycache__/test_doccer.cpython-310.pyc,, +scipy/misc/tests/test_common.py,sha256=0h_qT7hwQnqx4Oc6ccvM-U79EkbXPq5LNlC3QSvR88M,833 +scipy/misc/tests/test_config.py,sha256=j1Ppp6DCZy9wMxTmBEGxq4MScvsQXTQk7268EnNnPFQ,1244 +scipy/misc/tests/test_doccer.py,sha256=V1B5Z-XfIQFiSyRNo3PXG-AQfToFmoQ1oOBGjxK2zmo,3738 +scipy/ndimage/__init__.py,sha256=WKSnd4UmzibmbEZV-Sw31c9u7qDOa6WDqB7KMVcRIOU,5155 +scipy/ndimage/__pycache__/__init__.cpython-310.pyc,, +scipy/ndimage/__pycache__/_filters.cpython-310.pyc,, +scipy/ndimage/__pycache__/_fourier.cpython-310.pyc,, +scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc,, +scipy/ndimage/__pycache__/_measurements.cpython-310.pyc,, +scipy/ndimage/__pycache__/_morphology.cpython-310.pyc,, +scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc,, +scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc,, +scipy/ndimage/__pycache__/filters.cpython-310.pyc,, +scipy/ndimage/__pycache__/fourier.cpython-310.pyc,, +scipy/ndimage/__pycache__/interpolation.cpython-310.pyc,, +scipy/ndimage/__pycache__/measurements.cpython-310.pyc,, +scipy/ndimage/__pycache__/morphology.cpython-310.pyc,, +scipy/ndimage/_ctest.cpython-310-darwin.so,sha256=jKqB8CqimxdJPdIZMp9Bq6wlh6z5Ya1JbLYtOdQsisc,51081 +scipy/ndimage/_cytest.cpython-310-darwin.so,sha256=HyLcwCbF6pg1kx5l80ejTSWy1S1NjRB8X3BAmT76_0Q,80266 +scipy/ndimage/_filters.py,sha256=R4A5u_fxFvH_6x_Y7Sj0yNtxlX-p9tXHrPE-USD9XDs,57442 +scipy/ndimage/_fourier.py,sha256=57ONJoo_8CmvhP5vCL7ijVvaK5U-gvp7LM0fL3YZ55o,11390 +scipy/ndimage/_interpolation.py,sha256=C1WSqoNdHJCy2DBGFAQWJnOYMB6cHG63xrteSatgovU,35437 +scipy/ndimage/_measurements.py,sha256=2gQhZgxNW_6gcU7NByEmE-KEbmvBCufIOE2rxRxy20Y,55935 +scipy/ndimage/_morphology.py,sha256=Vi9jTT-rIpw8Kvi3M1HNnNHqtHHyb9SOlKlK3eT72Y0,87519 +scipy/ndimage/_nd_image.cpython-310-darwin.so,sha256=VR4XaT5AbEnHvg9mSEeOMDLAD_88zg3J6TaAlwsX160,139548 +scipy/ndimage/_ni_docstrings.py,sha256=9DSB07qpihY6Gv_czcMN3BNzKNJ0rq9zISTtFIe3LPk,8516 +scipy/ndimage/_ni_label.cpython-310-darwin.so,sha256=q8lE0JtnEIXYQSpq0niCUK460FgCGp1gYtiH8JadqgQ,342956 +scipy/ndimage/_ni_support.py,sha256=Zcl8cNKmR-InX_Vp_0GWg11RvY3CenJCEfJxkANnlHM,3827 +scipy/ndimage/filters.py,sha256=aflHOtOL7ZL3EtpYctkPPW-iqJhH2pAhN4JENdgv4kI,1217 +scipy/ndimage/fourier.py,sha256=ftajFZrIBb9HBkLjDUT8PgdnrGuUCVskcca_FeswrFc,840 +scipy/ndimage/interpolation.py,sha256=56huW77Dwa3DizXKT87dd4Jpf7Qt0ygq5dYyeOFbzuM,933 +scipy/ndimage/measurements.py,sha256=9gi9FD20M8lPdLPkI8iA8VVdUZPbYKmjhlQfLxqfoPM,1015 +scipy/ndimage/morphology.py,sha256=tufpeFNW3Amik0BGaITG9NToqtZtR6ejFtx1s75MNQM,1188 +scipy/ndimage/tests/__init__.py,sha256=P1A2R3ZwnUUvIQA8ao8JuRbIc1xwUL8z2H7goUd5hFM,427 +scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_datatypes.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc,, +scipy/ndimage/tests/data/label_inputs.txt,sha256=JPbEnncwUyhlAAv6grN8ysQW9w9M7ZSIn_NPopqU7z4,294 +scipy/ndimage/tests/data/label_results.txt,sha256=Cf2_l7FCWNjIkyi-XU1MaGzmLnf2J7NK2SZ_10O-8d0,4309 +scipy/ndimage/tests/data/label_strels.txt,sha256=AU2FUAg0WghfvnPDW6lhMB1kpNdfv3coCR8blcRNBJ8,252 +scipy/ndimage/tests/dots.png,sha256=sgtW-tx0ccBpTT6BSNniioPXlnusFr-IUglK_qOVBBQ,2114 +scipy/ndimage/tests/test_c_api.py,sha256=EPNsGMHzZHqd9jETd9Pw3gOQvo43On-jH5_4CJzf0S0,3476 +scipy/ndimage/tests/test_datatypes.py,sha256=UCYf_2mKXeZHxUsBRCAbadB1ojEnKimbuV499h0Jb7E,2742 +scipy/ndimage/tests/test_filters.py,sha256=tu9arp33w8c7WelWbcZerDmF6fYfQj5H8RnRDz7ofOc,84085 +scipy/ndimage/tests/test_fourier.py,sha256=5ykl99Q0o44pVqB4WUDdzmoMrEhjw0Fs9U9LemcqHAo,6668 +scipy/ndimage/tests/test_interpolation.py,sha256=6Y6hgQm6PPCKFLswWE1RCzK0HK8wgr9UvcbmQcGO2Jk,54798 +scipy/ndimage/tests/test_measurements.py,sha256=EQHm61KX66vOjQsm4TkAalYOi4PFRWdUCcNOKUhHM0I,47805 +scipy/ndimage/tests/test_morphology.py,sha256=r7PHBbFLn08tCqPvcViVsfv77TCuy7nStV-8Pg_BCM0,105591 +scipy/ndimage/tests/test_splines.py,sha256=KXQaTR1Odj45IQB4pfn8zWpWq26G2vPuFQxgc9qDYRk,2207 +scipy/odr/__init__.py,sha256=CErxMJ0yBfu_cvCoKJMu9WjqUaohLIqqf228Gm9XWJI,4325 +scipy/odr/__odrpack.cpython-310-darwin.so,sha256=GaqGXE68T4RwOSSAHuCaeRp192Ts7iNNyngJ-FJtOR0,223648 +scipy/odr/__pycache__/__init__.cpython-310.pyc,, +scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc,, +scipy/odr/__pycache__/_models.cpython-310.pyc,, +scipy/odr/__pycache__/_odrpack.cpython-310.pyc,, +scipy/odr/__pycache__/models.cpython-310.pyc,, +scipy/odr/__pycache__/odrpack.cpython-310.pyc,, +scipy/odr/_add_newdocs.py,sha256=zX9DJ9c4fJX-6RU9xYZEJVxlO72wmNxV6_aTKSQjoGk,1090 +scipy/odr/_models.py,sha256=tfOLgqnV4LR3VKi7NAg1g1Jp_Zw8lG_PA5BHwU_pTH0,7800 +scipy/odr/_odrpack.py,sha256=B4cL2RIuMzPDpLOQ5ZqlvHF3qEUyQWBKifNY77pR_Wg,42071 +scipy/odr/models.py,sha256=EuQE3U_-9jUSMATZySrKiUXiB-WxgIBjI8kTrVHOSKw,793 +scipy/odr/odrpack.py,sha256=nWDtxoCtRhx35KJPu2-UgH7YYuI_RxlwG4VZJqS8Ngo,837 +scipy/odr/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/odr/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc,, +scipy/odr/tests/test_odr.py,sha256=bupweXZsix6BVprTAQmSMXoH-sVteK6fQjsPZGu0A2o,19779 +scipy/optimize.pxd,sha256=kFYBK9tveJXql1KXuOkKGvj4Fu67GmuyRP5kMVkMbyk,39 +scipy/optimize/README,sha256=q7vAotiT7affj-8xYhiy0g9r0fQBE2caLUnvjqjgSv4,3416 +scipy/optimize/__init__.py,sha256=pEIFKGBrADiF4VTbWCPjwz9pgK9ORKlTSr8XE4E2z7M,12794 +scipy/optimize/__nnls.cpython-310-darwin.so,sha256=am_FZVcFIjn-77kLwCJkIoNo1BlpGgLETKtu5E9drOU,90624 +scipy/optimize/__nnls.pyi,sha256=fyGifwzaKwkeNdCWuW7au4oghq-H3pvS3Utng4UpBrE,448 +scipy/optimize/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/__pycache__/_basinhopping.cpython-310.pyc,, +scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc,, +scipy/optimize/__pycache__/_constraints.cpython-310.pyc,, +scipy/optimize/__pycache__/_differentiable_functions.cpython-310.pyc,, +scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc,, +scipy/optimize/__pycache__/_direct_py.cpython-310.pyc,, +scipy/optimize/__pycache__/_dual_annealing.cpython-310.pyc,, +scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc,, +scipy/optimize/__pycache__/_lbfgsb_py.cpython-310.pyc,, +scipy/optimize/__pycache__/_linesearch.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_doc.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_highs.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_ip.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_simplex.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_util.cpython-310.pyc,, +scipy/optimize/__pycache__/_milp.cpython-310.pyc,, +scipy/optimize/__pycache__/_minimize.cpython-310.pyc,, +scipy/optimize/__pycache__/_minpack_py.cpython-310.pyc,, +scipy/optimize/__pycache__/_nnls.cpython-310.pyc,, +scipy/optimize/__pycache__/_nonlin.cpython-310.pyc,, +scipy/optimize/__pycache__/_numdiff.cpython-310.pyc,, +scipy/optimize/__pycache__/_optimize.cpython-310.pyc,, +scipy/optimize/__pycache__/_qap.cpython-310.pyc,, +scipy/optimize/__pycache__/_remove_redundancy.cpython-310.pyc,, +scipy/optimize/__pycache__/_root.cpython-310.pyc,, +scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc,, +scipy/optimize/__pycache__/_shgo.cpython-310.pyc,, +scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc,, +scipy/optimize/__pycache__/_spectral.cpython-310.pyc,, +scipy/optimize/__pycache__/_tnc.cpython-310.pyc,, +scipy/optimize/__pycache__/_trustregion.cpython-310.pyc,, +scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc,, +scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc,, +scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc,, +scipy/optimize/__pycache__/_trustregion_ncg.cpython-310.pyc,, +scipy/optimize/__pycache__/_tstutils.cpython-310.pyc,, +scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc,, +scipy/optimize/__pycache__/cobyla.cpython-310.pyc,, +scipy/optimize/__pycache__/lbfgsb.cpython-310.pyc,, +scipy/optimize/__pycache__/linesearch.cpython-310.pyc,, +scipy/optimize/__pycache__/minpack.cpython-310.pyc,, +scipy/optimize/__pycache__/minpack2.cpython-310.pyc,, +scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc,, +scipy/optimize/__pycache__/nonlin.cpython-310.pyc,, +scipy/optimize/__pycache__/optimize.cpython-310.pyc,, +scipy/optimize/__pycache__/slsqp.cpython-310.pyc,, +scipy/optimize/__pycache__/tnc.cpython-310.pyc,, +scipy/optimize/__pycache__/zeros.cpython-310.pyc,, +scipy/optimize/_basinhopping.py,sha256=E3RK0RqvqOxak8cq1Jne2BaUrmvIM0nj4pu7UUR_rtA,29911 +scipy/optimize/_bglu_dense.cpython-310-darwin.so,sha256=YpKi-IdWfCxlHHG5ImreopJJUB1D4SBFtOhmAKlXW5A,297486 +scipy/optimize/_cobyla.cpython-310-darwin.so,sha256=o2d1_jYMVQrypymaYqM-VFSnJizNizityFPlsK6N4Ek,108784 +scipy/optimize/_cobyla_py.py,sha256=dKXCUT4O7WYhk3CQ_fgB7Sr7vu4Pn_OH9xi2wKHalTw,10184 +scipy/optimize/_constraints.py,sha256=BUAwllKXWzTkLYVZChS5fY2nkoDivquONZWHgsHukOA,22017 +scipy/optimize/_differentiable_functions.py,sha256=vpi8XCbBFAYgfA2DjSO7CfGWFIQvBFN-v-9g25vfbhk,22719 +scipy/optimize/_differentialevolution.py,sha256=AsGDj8nKndqhi0Yawzx_5CHnvnK9BN3ZWI90hD7NjHw,73494 +scipy/optimize/_direct.cpython-310-darwin.so,sha256=LxuO3BoNU6vsqfRRZcWc_pGTJ0_VF-yoYTc6iaLaSOw,69082 +scipy/optimize/_direct_py.py,sha256=UewdYnj8R9_6hkS1CeGSGEGNL5UL9KO3UX-xLOELLFw,11860 +scipy/optimize/_dual_annealing.py,sha256=5NEzcHEPi7VEeiu_PljsYP_qeOj6SZWIE84exskKmmk,30149 +scipy/optimize/_group_columns.cpython-310-darwin.so,sha256=zhr4IMGE6mk5tY5wpblVMy3iKZ1COm-rtM8Pw7-I6mY,94689 +scipy/optimize/_hessian_update_strategy.py,sha256=PJcNU7ERwtm_WTMzXZzZsBowDtWia4kHAEuvzbkD8fg,15830 +scipy/optimize/_highs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_highs/_highs_constants.cpython-310-darwin.so,sha256=05EdwqZFFwM-86-_qDZNIGpI61Kg414kPawp-dEENH0,59699 +scipy/optimize/_highs/_highs_wrapper.cpython-310-darwin.so,sha256=Hct6NCbb7FSuhOx4LhP3GvGGjUDu9QF7F7deuj6SZac,2972033 +scipy/optimize/_highs/src/cython/HConst.pxd,sha256=3a2mEDSx55TBAUYOuptSQYQzYTAHgP9c3anJGBlB7DE,5537 +scipy/optimize/_highs/src/cython/Highs.pxd,sha256=YKQXbDfI6gSg0X8j0iQrkBjX3HTb6J2ZIoFguGLkao8,2123 +scipy/optimize/_highs/src/cython/HighsIO.pxd,sha256=K7KXJoGm4s_cWtW4tQNByhujVF7SMIjDi86jWILzNoA,731 +scipy/optimize/_highs/src/cython/HighsInfo.pxd,sha256=RUePUSonorGWf2DYHjvpUc-ZhVQI6LDiro5eZftZlOg,761 +scipy/optimize/_highs/src/cython/HighsLp.pxd,sha256=_bcpjF7o-rK8gguxtXOF30XfVUrLjhLal90mAHumwAs,1132 +scipy/optimize/_highs/src/cython/HighsLpUtils.pxd,sha256=be2KJVDOjTNYH8AfXqDdp7HPB0EiUWL8I1RGVqCeKz4,315 +scipy/optimize/_highs/src/cython/HighsModelUtils.pxd,sha256=apvVhKle97Az82zThiHvCSdV9Bk2AsoVgx3rQdAsU2o,361 +scipy/optimize/_highs/src/cython/HighsOptions.pxd,sha256=xwBux1AmfTUtvLa3PJSdxCrdxeD851Cf4_ktkw7BnjA,3186 +scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd,sha256=GyX_sgvBmUhzpDJBsex6LWeKesV1L3fbGdH166j6K68,287 +scipy/optimize/_highs/src/cython/HighsStatus.pxd,sha256=s4nC9ViGKPTRD4VvqbZ5dZohVCtDqQPze_5svik6GjQ,365 +scipy/optimize/_highs/src/cython/SimplexConst.pxd,sha256=I6RjfzaBahKI9Eerg52c5tYU1gS1ZA2DWfYfWefgWVE,5044 +scipy/optimize/_highs/src/cython/highs_c_api.pxd,sha256=ut0M5I_pG2D6K3fUHzU9WD67Y1VMZuQOKhLLjv0umqo,358 +scipy/optimize/_lbfgsb.cpython-310-darwin.so,sha256=kxXARF0Fd_CC74VbMU82pysuDEk5mnbY8Xhg0V-Nx-A,126304 +scipy/optimize/_lbfgsb_py.py,sha256=uCrUiUdc_o0eW6n2np9mjnRW5L4J30AAx-Dh7XAYeXo,18643 +scipy/optimize/_linesearch.py,sha256=-DVtW9UYL3Oumt7_DP8i8wVMmz9jcpUvxxiy-SB5wU8,26582 +scipy/optimize/_linprog.py,sha256=9RAOhcMbgZtRvrRexfrYF3uKt6K0-POxAk1rvpU4UYY,29443 +scipy/optimize/_linprog_doc.py,sha256=kErL9JPKJr2hNOFomI0LA7UhxuoB6jEEzgfiiC1UScc,61967 +scipy/optimize/_linprog_highs.py,sha256=loTGHnHF_jZmEvGxCKLh1cmes025_X-OllqEJN3Jnho,17464 +scipy/optimize/_linprog_ip.py,sha256=R761IX5jLYttiMvyc9rMw7XPIVzOGye_rzby0CV7gdo,45913 +scipy/optimize/_linprog_rs.py,sha256=JruGeJ2uPVQUxN_A5A_4WuRTQfx4Mz4xoH27HNgGcFI,23149 +scipy/optimize/_linprog_simplex.py,sha256=WIlD3TQ6iBkbIeG4cBAwJW1pknMqt_mKnjBTKoTdNsg,24727 +scipy/optimize/_linprog_util.py,sha256=4UxdfkqjnsQ4bQZa59bCaVrkIXC6vox5tj_zmbcfgg4,62528 +scipy/optimize/_lsap.cpython-310-darwin.so,sha256=85oF_Ohuy3JZ_dJ10p0pl2UqdPs1ZyBxpiNETV02WKU,53384 +scipy/optimize/_lsq/__init__.py,sha256=Yk4FSVEqe1h-qPqVX7XSkQNBYDtZO2veTmMAebCxhIQ,172 +scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc,, +scipy/optimize/_lsq/bvls.py,sha256=7u5B8LfUbv3ZRZ8DAZKuDTSNRfDEBmTsn25VZtMMsKk,5195 +scipy/optimize/_lsq/common.py,sha256=mzYK9KDZeFtIUeBHrjrp1Gbx3NeaALwQ6Wj9-33QGRg,20606 +scipy/optimize/_lsq/dogbox.py,sha256=97htRlr-Yt-u4Ob3ks7avAMdnjJsO83uHUMjMYrhyjc,11682 +scipy/optimize/_lsq/givens_elimination.cpython-310-darwin.so,sha256=2BgprQGV5Bwm1gpTP2g-q95YjXYH9Hs6nxrWeZsfCjw,166645 +scipy/optimize/_lsq/least_squares.py,sha256=pSNhQP063G8wwQUQEVqiivlzKLrIBDWEJ_5VoneKCzM,39531 +scipy/optimize/_lsq/lsq_linear.py,sha256=j0FZ0JXhN6S4KzozB4f7PBp0WxnmWV2QvoEWSPkuM2Y,14847 +scipy/optimize/_lsq/trf.py,sha256=W0gc6j9vLIqZvDMOhM5BxubrWsd5PQpdncUJB88ol44,19479 +scipy/optimize/_lsq/trf_linear.py,sha256=jIs7WviOu_8Kpb7sTln8W7YLgkcndv0eGIP15g_mC4g,7642 +scipy/optimize/_milp.py,sha256=moAQzWyIq_y_Sbobzjayc66boGvWXWGtUwpg66yuNXc,14923 +scipy/optimize/_minimize.py,sha256=o7qAlosm9jJmPEcciDN-KLfgBODUxHMjS-BRg9dD9nY,45960 +scipy/optimize/_minpack.cpython-310-darwin.so,sha256=AOHosNK5tk1NbX8kBvfvVescSQTjMCOs-MDe5cHQofU,87515 +scipy/optimize/_minpack2.cpython-310-darwin.so,sha256=2fifxNQfTNIRPQ7qZFtWCN-Pxd9tIgJ0qx0PmDsbEuc,72812 +scipy/optimize/_minpack_py.py,sha256=oVLlB8oNkSgCvm0fD7l76DaSKuymKms267qQ5wmru6M,38340 +scipy/optimize/_moduleTNC.cpython-310-darwin.so,sha256=seGBK8K_TBOM6Ys0tDKwyh0ghooJebzcOjLmWndIYIc,135933 +scipy/optimize/_nnls.py,sha256=LSyKaugiuKDUf0Lrsep8OUaPeuUPReD5y5BbUaLAx9s,2300 +scipy/optimize/_nonlin.py,sha256=eW9U5cb0gXNxSTieb5G4c5otm-K87pRH8DaIsMyYdu4,49030 +scipy/optimize/_numdiff.py,sha256=o29kYYYEMXuqfigeXNcKNohnJaIsqfk8sA3IWWgr58g,28281 +scipy/optimize/_optimize.py,sha256=FlRWwdPH4xfOBcj0QBooS-s52sNwwvlIiyAfpHztL64,139726 +scipy/optimize/_qap.py,sha256=UkIA7YMjoaw00Lj_tdZ4u9VjSPNOmMDINPMK9GTv3MM,27658 +scipy/optimize/_remove_redundancy.py,sha256=Z-bdCyBuanuMzEhKGR-6Rs03b4L9uK7dKfWIdW1LA0E,18767 +scipy/optimize/_root.py,sha256=_V6gD8vJNPKnT3qrEyC0YYSg08TSOA9qcPIvfNFh-iU,28276 +scipy/optimize/_root_scalar.py,sha256=SiFpEx-bmQN8z8ih3chfmbt-7red0-DtwKT7_VVaEq8,18465 +scipy/optimize/_shgo.py,sha256=ZK0VYBt7IOfi2fjM273HhypMHeti5KbiYvhfAzTjiRE,60887 +scipy/optimize/_shgo_lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_shgo_lib/__pycache__/triangulation.cpython-310.pyc,, +scipy/optimize/_shgo_lib/triangulation.py,sha256=TpXFU0HDdYDxE52KakU27EkiK308kOxr7rYGabyIPuo,21439 +scipy/optimize/_slsqp.cpython-310-darwin.so,sha256=Av3ZRCQxwlFtn58DXR7J21O6pUGRfIdYA3O43REnhQY,89961 +scipy/optimize/_slsqp_py.py,sha256=VPQzFxE-L2lD-ghFTvVcm6wkPR54LiH4fb-8NUNWgvI,18767 +scipy/optimize/_spectral.py,sha256=felsd958JeNcy-mmGAw91MSt2AsuKVdMLf2h2_fmgvU,7923 +scipy/optimize/_tnc.py,sha256=Dp5GTXv81TsqnPXXGvYvSCgqLS9-52P16VJo6Dxh9nY,17304 +scipy/optimize/_trlib/__init__.py,sha256=cNGWE1VffijqhPtSaqwagtBJvjJK-XrJ6K80RURLd48,524 +scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_trlib/_trlib.cpython-310-darwin.so,sha256=ck5d4NNiEEqevg9aFkW-HwcysB1jfTlQuxHFtGSjTL8,303536 +scipy/optimize/_trustregion.py,sha256=35raAmEKyYGWxw6QciVxLKnIXhMqho8a7fW65beLPKQ,10651 +scipy/optimize/_trustregion_constr/__init__.py,sha256=c8J2wYGQZr9WpLIT4zE4MUgEj4YNbHEWYYYsFmxAeXI,180 +scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/canonical_constraint.py,sha256=690VxTb7JJ9RzGwa-LN2hASKlqQPmulyEDZA7I-XyLY,12538 +scipy/optimize/_trustregion_constr/equality_constrained_sqp.py,sha256=5NiEruWnhYL2zhhgZsuLMn-yb5NOFs_bX3sm5giG7I8,8592 +scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py,sha256=iqoRHz6J31UbAbLOZ_r39sA6bzA7KXMKN_yTCfsncLU,24890 +scipy/optimize/_trustregion_constr/projections.py,sha256=2V9GysEHMzuYcE93CpnK2Q5iwQQBIc1rbtOJJBIZUZQ,13105 +scipy/optimize/_trustregion_constr/qp_subproblem.py,sha256=EtAhRcEtSnGsEeEZ2HGEzm-7r0pnXMCgl9NemKWvdzg,22592 +scipy/optimize/_trustregion_constr/report.py,sha256=8Iyb1jm3xwUbAAny3KIDfo-YDaijPEvNqU-7GmB3_mQ,1858 +scipy/optimize/_trustregion_constr/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/tests/__pycache__/test_qp_subproblem.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py,sha256=zVPxZDa0WkG_tw9Fm_eo_JzsQ8rQrUJyQicq4J12Nd4,9869 +scipy/optimize/_trustregion_constr/tests/test_projections.py,sha256=P4GZxs_6RJnlb6OXJX-wnvFqzFeQAgs9qHvnHxjvD4o,8820 +scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py,sha256=vrP52LzzAA3D8T5fhVzQv9Eo-G9t3F8qfrNuq7XrzQM,27719 +scipy/optimize/_trustregion_constr/tests/test_report.py,sha256=M3e3flV1TB0g2_cUViF0Fd_VWb81kuApfT0aC9bmrvU,1088 +scipy/optimize/_trustregion_constr/tr_interior_point.py,sha256=fXuyoZ5WmIwce2EA-Gdld7S2YrM7usImXWBNk3DnURw,13802 +scipy/optimize/_trustregion_dogleg.py,sha256=HS783IZYHE-EEuF82c4rkFp9u3MNKUdCeynZ6ap8y8s,4389 +scipy/optimize/_trustregion_exact.py,sha256=VOk6Se5UrBGN3nqiRnSynQU_kftWItJLmw9uzJtw6Xw,15407 +scipy/optimize/_trustregion_krylov.py,sha256=KGdudJsoXXROXAc82aZ8ACojD3rimvyx5PYitbo4UzQ,3030 +scipy/optimize/_trustregion_ncg.py,sha256=y7b7QjFBfnB1wDtbwnvKD9DYpz7y7NqVrJ9RhNPcipw,4580 +scipy/optimize/_tstutils.py,sha256=Fk1jlD75jHH-5r9HeNCLRMm-aoYya2wkmwmdmKoAUGY,29475 +scipy/optimize/_zeros.cpython-310-darwin.so,sha256=6zkUxEGrZfbEcDkmS0e39uNtSpV1lMwOAtTRBZPhiMc,51481 +scipy/optimize/_zeros_py.py,sha256=cdMQEi1kF31zU8e5PEMfT5xuFFUVTqWQkMqtU88T_EA,51228 +scipy/optimize/cobyla.py,sha256=sJD7CvPLBZgAU1y0JsdB6BtPAJE1hBViTdAEtNyj0no,840 +scipy/optimize/cython_optimize.pxd,sha256=UQVKui1CYlTt1HS9ydLPLdgKNWH_-phR8fy4Rq2eEno,428 +scipy/optimize/cython_optimize/__init__.py,sha256=WZI65aSGqyqC3HJaxgmQuK6lljqdqTA4jw3WstIQ9HQ,4831 +scipy/optimize/cython_optimize/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/cython_optimize/_zeros.cpython-310-darwin.so,sha256=5fAwXtBdVWib2DKBSsTmQSfE_Z9qaGvBaYHsRrS17FA,102185 +scipy/optimize/cython_optimize/_zeros.pxd,sha256=wTtD2hT2XHUhSHem5glipOQNY67vExQxzxncdQPtbJ4,1194 +scipy/optimize/cython_optimize/c_zeros.pxd,sha256=9PVpBHg4R8ItYnwRX_lD_H7SHS_hSJzvtPY0E2edOHE,1109 +scipy/optimize/lbfgsb.py,sha256=9bkq6iN1Gx6yPu-VE0K7bIMm1vsDhoccQf9waNmc7vQ,929 +scipy/optimize/linesearch.py,sha256=oYmcsZxSYrEH5XDI_kIbeVywN-yVHZbsJuDaOmCndUQ,1007 +scipy/optimize/minpack.py,sha256=tjMKdQWY6z4mQQ5G7nwy8i4eXWJjPSqfqMvfIuQntqU,1277 +scipy/optimize/minpack2.py,sha256=oFSeWNLqI8ca-Aa0Kk5F0DMdNUjHdryvPLhtPo_k83o,769 +scipy/optimize/moduleTNC.py,sha256=E43jvlDbe0G4glHXWRC8GsrTdVLIaPxVMP90Ir6U6gU,746 +scipy/optimize/nonlin.py,sha256=9z4Q0LQ6mbuQBozfw98N9FgTvoOKeIPdDhf7nU7lOYY,1418 +scipy/optimize/optimize.py,sha256=tJkFXkADd5aHUyaIMIIMJMKHX5UzCjhXFH-SGsj8wtA,1524 +scipy/optimize/slsqp.py,sha256=b6vja9q2sl50Kmv-VxsMkj3bNcariO8IBL3Q1KRMhrc,1044 +scipy/optimize/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__basinhopping.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_hessian_update_strategy.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_lbfgsb_hessinv.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_linesearch.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_milp.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_minimize_constrained.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_minpack.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_optimize.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_quadratic_assignment.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_regression.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_slsqp.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_tnc.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_trustregion.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_zeros.cpython-310.pyc,, +scipy/optimize/tests/test__basinhopping.py,sha256=iERgf2vX2bhuw-7bj-5vOxA21ZZnj6pSEL9o_Y9-WUE,16708 +scipy/optimize/tests/test__differential_evolution.py,sha256=spTUBZRSW5WSidUCNY7XA57CmNorNqqCp5_Wwh-5YpI,61413 +scipy/optimize/tests/test__dual_annealing.py,sha256=LoyGUSDfpawnDHuqH7Lhqu1lLncU2DVR3BEYm_eChj0,14315 +scipy/optimize/tests/test__linprog_clean_inputs.py,sha256=MDCIrO33d3jVd6swt3Wi156x1wxWC-cxdjueqFRMJH8,11106 +scipy/optimize/tests/test__numdiff.py,sha256=d_ktsoY1KHb_xGAZB3Dgng-68nIeUjCFxJp7GYjZsDg,31338 +scipy/optimize/tests/test__remove_redundancy.py,sha256=y_JpKtO_0N1O2-Q9FE7fbewzn_cFdzBxPrrCvxtTBSM,7533 +scipy/optimize/tests/test__root.py,sha256=2whCM-nKElXilngI5EE42GuF9-z8oEzsNqqcqMtFa74,2613 +scipy/optimize/tests/test__shgo.py,sha256=PcdLagMromdRfr-BGrJBsC5hxORYwg1DSaBvid88XkU,29034 +scipy/optimize/tests/test__spectral.py,sha256=JR6rTBUdhOYdgOmRBCA6sJ4ks0JTBYZ7aI6u6aiVUag,6519 +scipy/optimize/tests/test_cobyla.py,sha256=bw-zvlr620bR4dXDUNmcrTQwyNFtUTNuVikSArb_qhk,4179 +scipy/optimize/tests/test_constraint_conversion.py,sha256=rKTA_E0KQEjGlsrDa5RltxtNhjCBQ72d0pInF8M2EDk,11746 +scipy/optimize/tests/test_constraints.py,sha256=dgSeg8h_0Y-hmlitXmVdznF6At-V9mdwLVzoLcUQD2E,8390 +scipy/optimize/tests/test_cython_optimize.py,sha256=n-HccBWoUmmBWq_OsNrAVnt4QrdssIYm4PWG29Ocias,2638 +scipy/optimize/tests/test_differentiable_functions.py,sha256=KoU2GotR94yJgb0Pf4pDgKrwNNDP0X_NSd7HbmiHLFw,26154 +scipy/optimize/tests/test_direct.py,sha256=dUfsmTx9phFmlwv93UYgjYBoHh-iuWUrdc_KBn7jGlY,13152 +scipy/optimize/tests/test_hessian_update_strategy.py,sha256=zwfXj6jxvihE0vXU5pBeOyERCpimSZu6gXSLcm-FRfo,10112 +scipy/optimize/tests/test_lbfgsb_hessinv.py,sha256=rpJbiCUfgJrjp-xVe4JiXjVNe6-l8-s8uPqzKROgmJQ,1137 +scipy/optimize/tests/test_lbfgsb_setulb.py,sha256=w1a-RPnLnZRyaKUK4tWgFks7eO1pEgEmcWH038oepcc,3172 +scipy/optimize/tests/test_least_squares.py,sha256=wSVHy0AVN_mey0ur9xVJtKVJacODvJMn1ojILkO_nno,31773 +scipy/optimize/tests/test_linear_assignment.py,sha256=84d4YHCf9RzjYDKUujQe2GbudkP8dtlSpZtMBwCf_Oc,4085 +scipy/optimize/tests/test_linesearch.py,sha256=vtWDWIdHFzvKQyZ5vSntIPOGfM7JlcMqzXFuLbJBs3k,10791 +scipy/optimize/tests/test_linprog.py,sha256=5jLH6V0FZxye7w2YWyPHEkNjw0eAcrPDQ02luYwtWH4,95566 +scipy/optimize/tests/test_lsq_common.py,sha256=alCLPPQB4mrxLIAo_rn7eg9xrCEH7DerNBozSimOQRA,9500 +scipy/optimize/tests/test_lsq_linear.py,sha256=jk1sneQrUahqIqTyq76ZIUtrdVfVX0y0Do09KYqWJR0,10342 +scipy/optimize/tests/test_milp.py,sha256=AwnZEpbRXLdjretaY-P0BRwK4YeUWDEZslnZOtDGXpk,13866 +scipy/optimize/tests/test_minimize_constrained.py,sha256=ph9e1hOPr3p-whDdEtIJC7fL-bXnJ_3rOuTa42-dRjU,25620 +scipy/optimize/tests/test_minpack.py,sha256=GW4DV_PKy9zbOUAQ_O_dbbarkB7SfSJHUCqsSNyt4-M,36498 +scipy/optimize/tests/test_nnls.py,sha256=VHlSu-AYWWgONgkRI7oGPXNzd5XuoLu4wRhp4dyAL9M,914 +scipy/optimize/tests/test_nonlin.py,sha256=6JUMvvGR9Pe4EX-b3nw28wF4t1FLGPLNltzOMjQI398,16949 +scipy/optimize/tests/test_optimize.py,sha256=kfsl-5X0Wc6pVtLLzaQXtnagmXZ22Rpw8IiGvxrrjm8,111180 +scipy/optimize/tests/test_quadratic_assignment.py,sha256=iZ6wJDGx4T0lDM2N5mjQhXwwTdIGGWrgCCGtLiMOC14,16309 +scipy/optimize/tests/test_regression.py,sha256=CSg8X-hq6-6jW8vki6aVfEFYRUGTWOg58silM1XNXbU,1077 +scipy/optimize/tests/test_slsqp.py,sha256=IRXdyEM9Yo1c6FYJhGDAptxExNq-BkTqJUJUVTX4J4E,23194 +scipy/optimize/tests/test_tnc.py,sha256=zMau_V2C9YuCVHdC2D3HahrPavjM2rhzUo1xyXWwbiI,13168 +scipy/optimize/tests/test_trustregion.py,sha256=HJtCc8Gdjznkzyn7Ei3XByBM_10pqv7VXgXBR9kCc8k,4701 +scipy/optimize/tests/test_trustregion_exact.py,sha256=lJ0RXXFvgqbDfWrjzm-6H1PNKHDLXPVEbDltbRHezNQ,12954 +scipy/optimize/tests/test_trustregion_krylov.py,sha256=K90fBdvxYKgsdl_lvopRf28nfcBN1CgrR-N2zjVXvhQ,6587 +scipy/optimize/tests/test_zeros.py,sha256=1w_yoE2wWU5hIuF1B03Tvfo4I1kNVC_zTdJbYyC5Adw,28439 +scipy/optimize/tnc.py,sha256=7HKQvI0end6nabnkAAtVcX8jMrvSCWi8CD-tBShfHkk,1148 +scipy/optimize/zeros.py,sha256=ybE9F-jqrlzpGrXW9DLGluOkWjPqlNJGmAyJyv0qIBY,1008 +scipy/signal/__init__.py,sha256=MhQZYpEurvBpEkA-xRAm-OiJ28zE2gX0T7aGP3KxEkw,15510 +scipy/signal/__pycache__/__init__.cpython-310.pyc,, +scipy/signal/__pycache__/_arraytools.cpython-310.pyc,, +scipy/signal/__pycache__/_bsplines.cpython-310.pyc,, +scipy/signal/__pycache__/_czt.cpython-310.pyc,, +scipy/signal/__pycache__/_filter_design.cpython-310.pyc,, +scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc,, +scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc,, +scipy/signal/__pycache__/_ltisys.cpython-310.pyc,, +scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc,, +scipy/signal/__pycache__/_peak_finding.cpython-310.pyc,, +scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc,, +scipy/signal/__pycache__/_signaltools.cpython-310.pyc,, +scipy/signal/__pycache__/_spectral.cpython-310.pyc,, +scipy/signal/__pycache__/_spectral_py.cpython-310.pyc,, +scipy/signal/__pycache__/_upfirdn.cpython-310.pyc,, +scipy/signal/__pycache__/_waveforms.cpython-310.pyc,, +scipy/signal/__pycache__/_wavelets.cpython-310.pyc,, +scipy/signal/__pycache__/bsplines.cpython-310.pyc,, +scipy/signal/__pycache__/filter_design.cpython-310.pyc,, +scipy/signal/__pycache__/fir_filter_design.cpython-310.pyc,, +scipy/signal/__pycache__/lti_conversion.cpython-310.pyc,, +scipy/signal/__pycache__/ltisys.cpython-310.pyc,, +scipy/signal/__pycache__/signaltools.cpython-310.pyc,, +scipy/signal/__pycache__/spectral.cpython-310.pyc,, +scipy/signal/__pycache__/spline.cpython-310.pyc,, +scipy/signal/__pycache__/waveforms.cpython-310.pyc,, +scipy/signal/__pycache__/wavelets.cpython-310.pyc,, +scipy/signal/_arraytools.py,sha256=qHqX1pgjguFawwag8J81ZEQMAa2J64FBUG7ihSGGBWQ,7489 +scipy/signal/_bsplines.py,sha256=LDC-JQ7swR5kD62AQLDaY-xf90pXJPj9XoSQqy1AIqc,19753 +scipy/signal/_czt.py,sha256=t5P1kRCM3iw3eCaL9hTgctMfQKezkqnjbghLjCkffQE,19445 +scipy/signal/_filter_design.py,sha256=qwdhF1P1758BPQmcKyB4JATJm_evMjNJrq-VYyvx4ls,185184 +scipy/signal/_fir_filter_design.py,sha256=xN4R3bVjEsqulO66Tw3Pi54fEjx8WMR_PYTYWehkB_w,49077 +scipy/signal/_lti_conversion.py,sha256=P3v4T4O01N2E5oQEQVwF9rae17YJNjWpAHxZFx3ivdA,16130 +scipy/signal/_ltisys.py,sha256=EX0uSOt2QwzqXCFJHip-mJxioTEqtIidQnL_ncuekLo,129028 +scipy/signal/_max_len_seq.py,sha256=FSOVHmSTQqBpUV3ThijyNdHYHNN7mwaTUjoDDn9m3eQ,5062 +scipy/signal/_max_len_seq_inner.cpython-310-darwin.so,sha256=wViRBV4unjvaS1THEWz41AiKOEphTBli0CyC9mcv7II,77285 +scipy/signal/_peak_finding.py,sha256=W2v4ZNjtXdRj-J6VS3KXRrr42ama-PhokFoKunAfDew,48807 +scipy/signal/_peak_finding_utils.cpython-310-darwin.so,sha256=J4sAJMfkBh-lvTSXHixfCKsm1uIsw_FVL4BI-iuiFFY,232070 +scipy/signal/_savitzky_golay.py,sha256=mnltOfknWRlNiZmNLLy-zKTCrw6nZSdJPEvpGi0kv8E,13417 +scipy/signal/_signaltools.py,sha256=oYLYGR2htJPrez1KBvB1GOyaWTMxrX8EuIGchRd_sbg,155686 +scipy/signal/_sigtools.cpython-310-darwin.so,sha256=cQwKULnu4oNOPKZMFvwcqYa_lj7XbotF3ksxti6iJb8,105084 +scipy/signal/_sosfilt.cpython-310-darwin.so,sha256=EI0hZz_LI876mNWXI20AHxuhve-VKjP0xI1THajL3FE,228379 +scipy/signal/_spectral.cpython-310-darwin.so,sha256=UkdZieNaX-EZ2tHNVFbP5OPTrgRyRy1Buwmx8Sre-8c,77532 +scipy/signal/_spectral.py,sha256=tWz_fFeYGjfkpQLNmTlKR7RVkOqUsG_jkjzzisLN_9M,1940 +scipy/signal/_spectral_py.py,sha256=tNcs0bR-6lO66mVp3KFxRRI_5QP4AnbvZ9Kk2h54VhM,76570 +scipy/signal/_spline.cpython-310-darwin.so,sha256=FfbKmlx6u89cVsO-eJ0MFVNuOGmyNLw37ZTe7qB1EHw,69562 +scipy/signal/_upfirdn.py,sha256=WsElY_Gj9RBlR8pMBqJmAU0Za-BR_Jy1SrTzKDJI5LE,7884 +scipy/signal/_upfirdn_apply.cpython-310-darwin.so,sha256=YK3TJiaxryO7rEyHoDHlswhZn1QEAXXvYZilXdBCjSw,288801 +scipy/signal/_waveforms.py,sha256=Bm5WOBhk1nXwK0A6yFVTY7tCCv6trdrUjje_xmM878Y,20523 +scipy/signal/_wavelets.py,sha256=98q-YzA3eGjhL-EF6s5EiYYd-lfB9TvyHW1t9ZjMtK0,14047 +scipy/signal/bsplines.py,sha256=RFFNZHHyfJ1WEbdgboPvqV_rif6ZpP2XcQY6yAZFMvs,1085 +scipy/signal/filter_design.py,sha256=eyU6-xkaC6gpGec_KU899jWj_J7NyHavnmH6ayjSNPE,1719 +scipy/signal/fir_filter_design.py,sha256=4YYa4xY42pwC_ROuj_GyuWRcV-qJk9-3mWGQJxNWha8,1003 +scipy/signal/lti_conversion.py,sha256=NLMKn39KEc0te0VpuQ8pi0ABrwq6T20JR9JQX_8K7NU,936 +scipy/signal/ltisys.py,sha256=6VEgR9mC1lrVrCiMUgSOnM39TxdpkKTb5Ouw9Xe0m0o,1470 +scipy/signal/signaltools.py,sha256=Ul0U0FNf6G3ifaxVu-nx66hW1JWy6bW_F2SxdNg-ME4,1401 +scipy/signal/spectral.py,sha256=AGqvyefESNmSpYkZFBKr2gu5dMvNergOOxxZjvunrL0,944 +scipy/signal/spline.py,sha256=iisoUmgbyuuEukQjBz99HM3SYao7j1ZsXXmtE-wo5cU,810 +scipy/signal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/signal/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_array_tools.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_bsplines.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_czt.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_dltisys.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_filter_design.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_fir_filter_design.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_ltisys.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_max_len_seq.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_peak_finding.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_result_type.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_savitzky_golay.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_spectral.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_upfirdn.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_waveforms.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_wavelets.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_windows.cpython-310.pyc,, +scipy/signal/tests/mpsig.py,sha256=DHB3eHB0KYA-E0SBebKG36YLk-T5egbwwryne3RwIHM,3308 +scipy/signal/tests/test_array_tools.py,sha256=J9Mr5DtqmhiTReWvsk3YclL6Cnv32bDuklBnw2zprJY,3632 +scipy/signal/tests/test_bsplines.py,sha256=dyiJg6ggE-UXMJcq5DlnQ_DZ8RZzlbhZTxOGIaHnlOg,13220 +scipy/signal/tests/test_cont2discrete.py,sha256=yhR7o0D0CMeCsIIpuaGdZh_rfOP25mHYqRRM4UZDDGk,14821 +scipy/signal/tests/test_czt.py,sha256=3HxxWwOWIrIc0GC-K5h6f0NRjkLrWRA5OhoB5y0zbw0,6993 +scipy/signal/tests/test_dltisys.py,sha256=f4wDe0rF_FATRWHkHddbPDOsFGV-Kv2Unz8QeOUUs-k,21558 +scipy/signal/tests/test_filter_design.py,sha256=0HnfV_hptqQw5VRvUxx69mD8g_q0ATd1H9KCePnGN04,185321 +scipy/signal/tests/test_fir_filter_design.py,sha256=mG_6Bo1NHN9Gj2LAzGHwWKqlcVwYSMic6FojcSLiIC0,28932 +scipy/signal/tests/test_ltisys.py,sha256=1FzNFb7i-1XFBADecWAZpZe7bj5M36UGpOf3AVkVCqU,47325 +scipy/signal/tests/test_max_len_seq.py,sha256=X9oyCvW0Ny8hOAVX22HmKaMgi2oioe1cZWO3PTgPOgw,3106 +scipy/signal/tests/test_peak_finding.py,sha256=ckPd0IqoaRcdCg8yJ2TzXdU1kWZPIEHw0cLdEC_VIlI,33667 +scipy/signal/tests/test_result_type.py,sha256=25ha15iRfFZxy3nDODyOuvaWequyBpA42YNiiU43iAc,1627 +scipy/signal/tests/test_savitzky_golay.py,sha256=hMD2YqRw3WypwzVQlHwAwa3s6yJHiujXd_Ccspk1yNs,12424 +scipy/signal/tests/test_signaltools.py,sha256=GgATAUyIae-wbOPiu9kiPv9rZk_Tlk-Z4BaVwleYBBM,137105 +scipy/signal/tests/test_spectral.py,sha256=cfpub2uP_6wpdbv3Mpu4O4co04oaQh-XXU21kICKRw4,59276 +scipy/signal/tests/test_upfirdn.py,sha256=i3EjQKnwS6FRRRPPzwl1B_zWsQ20Dfa_6WUUYH8I3xM,11240 +scipy/signal/tests/test_waveforms.py,sha256=sTT0DeOER5U9h8Xp54VGvGlbtcxhp_wjGNQXw1yOaGM,11975 +scipy/signal/tests/test_wavelets.py,sha256=PWe19weLoxo_iyrCQ-49oxBVZRXvYh055147ykS7vU8,5947 +scipy/signal/tests/test_windows.py,sha256=xgbGYyBhjSvLoTZDveBSJ8z1oxclr_jL2fpOff4hI8U,41019 +scipy/signal/waveforms.py,sha256=hHOTVCfrIOMD95n5v_jET4nJVTpB68SyMhnSraPTPhQ,890 +scipy/signal/wavelets.py,sha256=Xkoj6JZqZKRb0CSB_BDQRclk-gMEJFhwqPY8PgRRk4U,828 +scipy/signal/windows/__init__.py,sha256=BUSXzc_D5Agp59RacDdG6EE9QjkXXtlcfQrTop_IJwo,2119 +scipy/signal/windows/__pycache__/__init__.cpython-310.pyc,, +scipy/signal/windows/__pycache__/_windows.cpython-310.pyc,, +scipy/signal/windows/__pycache__/windows.cpython-310.pyc,, +scipy/signal/windows/_windows.py,sha256=x5gqdgq7htP9vjd5F7kg-SjWnHkSnDu_ej4NAXFAQYA,83617 +scipy/signal/windows/windows.py,sha256=a08un2az27LnmEwYno88Wwo4-yQaCUK8DogsOAcZwlE,1117 +scipy/sparse/__init__.py,sha256=PEu7Ji674DDoqIM4ONi_zbRpGe0kj6-R6r3YGfPnIw0,8636 +scipy/sparse/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/__pycache__/_arrays.cpython-310.pyc,, +scipy/sparse/__pycache__/_base.cpython-310.pyc,, +scipy/sparse/__pycache__/_bsr.cpython-310.pyc,, +scipy/sparse/__pycache__/_compressed.cpython-310.pyc,, +scipy/sparse/__pycache__/_construct.cpython-310.pyc,, +scipy/sparse/__pycache__/_coo.cpython-310.pyc,, +scipy/sparse/__pycache__/_csc.cpython-310.pyc,, +scipy/sparse/__pycache__/_csr.cpython-310.pyc,, +scipy/sparse/__pycache__/_data.cpython-310.pyc,, +scipy/sparse/__pycache__/_dia.cpython-310.pyc,, +scipy/sparse/__pycache__/_dok.cpython-310.pyc,, +scipy/sparse/__pycache__/_extract.cpython-310.pyc,, +scipy/sparse/__pycache__/_index.cpython-310.pyc,, +scipy/sparse/__pycache__/_lil.cpython-310.pyc,, +scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc,, +scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc,, +scipy/sparse/__pycache__/_sputils.cpython-310.pyc,, +scipy/sparse/__pycache__/base.cpython-310.pyc,, +scipy/sparse/__pycache__/bsr.cpython-310.pyc,, +scipy/sparse/__pycache__/compressed.cpython-310.pyc,, +scipy/sparse/__pycache__/construct.cpython-310.pyc,, +scipy/sparse/__pycache__/coo.cpython-310.pyc,, +scipy/sparse/__pycache__/csc.cpython-310.pyc,, +scipy/sparse/__pycache__/csr.cpython-310.pyc,, +scipy/sparse/__pycache__/data.cpython-310.pyc,, +scipy/sparse/__pycache__/dia.cpython-310.pyc,, +scipy/sparse/__pycache__/dok.cpython-310.pyc,, +scipy/sparse/__pycache__/extract.cpython-310.pyc,, +scipy/sparse/__pycache__/lil.cpython-310.pyc,, +scipy/sparse/__pycache__/sparsetools.cpython-310.pyc,, +scipy/sparse/__pycache__/spfuncs.cpython-310.pyc,, +scipy/sparse/__pycache__/sputils.cpython-310.pyc,, +scipy/sparse/_arrays.py,sha256=eZvXoGNqBBA5UMzeXCqUepqi8lxAXKlrOzBEs8Y_K-U,2196 +scipy/sparse/_base.py,sha256=XQ3MgGcplmQHbfMspycFwEe6HC3fiIEHaZFBAVcLlcY,44784 +scipy/sparse/_bsr.py,sha256=_1bnxR-6fBW16De1D3zoo7fzUeUm-pyf-gHskcfPT3w,25249 +scipy/sparse/_compressed.py,sha256=pCgN3XaoPeBcv7pE68xrNPzNceaV14Lwnj-EE2g5Uuo,51120 +scipy/sparse/_construct.py,sha256=t62-ISypioEFe-5H5gnHcmOfuiPw3NhSr_mFDxHOQPU,30308 +scipy/sparse/_coo.py,sha256=F4wm0cX3dCyX40cltyfb0tFC7Yv0pcU36WkBrSmaRPs,22174 +scipy/sparse/_csc.py,sha256=LzCgbMhiVvSOkyRrpf9xjWFZwpJ8elyF4EsdzFBM42I,7925 +scipy/sparse/_csparsetools.cpython-310-darwin.so,sha256=hmUCQuo18-BtmPgd0IG8cj6xVMW2YoVfntd9eIt3SPs,530208 +scipy/sparse/_csr.py,sha256=e2HdbFHQUjhgRM5nEuyTQBoFhi58UGeJN5PuvNx628o,11683 +scipy/sparse/_data.py,sha256=gbK511_hPDo0cpSSCZ1pPMvqFQnc6QsbhTnsWq6Rz_g,12891 +scipy/sparse/_dia.py,sha256=ptNC4BNfdCnD5D7H15A0b1C6g3prCxac9mZGjVNbWhY,16047 +scipy/sparse/_dok.py,sha256=0Tx47NAnzrK1BTWZpKgGvRnaZSJaW152N1SRTM4CXXY,15903 +scipy/sparse/_extract.py,sha256=Pz2B8VAcBZod80FM7ssBhjs6QNMNlZOkjNg9pjgv36I,4648 +scipy/sparse/_index.py,sha256=t9wzlSuxpj64cpoe2VI-028DmIv0kHE2RziRy9PqhyE,12928 +scipy/sparse/_lil.py,sha256=qEqH5OaAbaUN_SrifgSXYrjrvieejhgxbwExxZ4KfoQ,18296 +scipy/sparse/_matrix_io.py,sha256=KMTc-Y1jlYK3HctQ6EwvfBBFBDqziqSIKWh1LQFGbgQ,5379 +scipy/sparse/_sparsetools.cpython-310-darwin.so,sha256=MYmmkRdHamE-7zUijOlLH-3RRPyAbObQnj28NehmDJg,3671071 +scipy/sparse/_spfuncs.py,sha256=G4a1nPdxRJIxG1f1rCiJiFYZK8IFmks_EvCXcv-Opd0,1981 +scipy/sparse/_sputils.py,sha256=mYCdgcYrRMAe_oN1-5_bKubQBVWktBEz4eI7Kc0_Kgw,13136 +scipy/sparse/base.py,sha256=qxhdhbOyBKTVIhO4s5kFAZEcm7QhKBzDHBzRYUCNO9I,1016 +scipy/sparse/bsr.py,sha256=Ci4qlM-aGqtmHJD3RZgAwO0iuA7ziSnrdyfCUhjP3RE,1058 +scipy/sparse/compressed.py,sha256=aWYREJ4E9KXgqmk5-yivQx88HMNcFNRABmtomE2vUOk,1286 +scipy/sparse/construct.py,sha256=RdZSkoRKiwLOMiJxAzNOMCTTK5ZyW_m7cFkBFOa6MLk,1158 +scipy/sparse/coo.py,sha256=lZhO4RChFbyJEt9AYjQ49JMiILNIwDIku2TuW2NtdJQ,1091 +scipy/sparse/csc.py,sha256=CltVpoEbj8MM4iyNqsyLv7y2K_PUM9gFLhMiXvH1-KU,838 +scipy/sparse/csgraph/__init__.py,sha256=UzgDvD2sNSRqbF7YNIeg9UgIuJdl3huUVfCogfiy3c0,7739 +scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc,, +scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc,, +scipy/sparse/csgraph/__pycache__/setup.cpython-310.pyc,, +scipy/sparse/csgraph/_flow.cpython-310-darwin.so,sha256=9gOMLmpMMFXuwQTyDyrFnRD2Ntoxh6Bc8TxGHKy6eK8,271928 +scipy/sparse/csgraph/_laplacian.py,sha256=6_f6BBt-WSj4aI5wYx4rQBKgx0qObiPyvmwZCm281_Y,17833 +scipy/sparse/csgraph/_matching.cpython-310-darwin.so,sha256=OssONQhZ2bWV4onASD0Fkwo9ubBMZVNO_gMYHTn6O94,269228 +scipy/sparse/csgraph/_min_spanning_tree.cpython-310-darwin.so,sha256=pujQMf0dfA_1M-33R0-wFCdG0roxIICANxCyhVI3J9o,189237 +scipy/sparse/csgraph/_reordering.cpython-310-darwin.so,sha256=hDRf7fD0MRcbVAolZdH_puBDBI8DKFTJIly0DBWT1e8,255582 +scipy/sparse/csgraph/_shortest_path.cpython-310-darwin.so,sha256=IfNgTBxgR39Ks0C05ats7STuL5YC-cfxXzWp6nmJBh8,423889 +scipy/sparse/csgraph/_tools.cpython-310-darwin.so,sha256=GwE0kXbd1lsDtaVJ7j6O2pt-KO5n4T5YZZc5KhCo01g,187961 +scipy/sparse/csgraph/_traversal.cpython-310-darwin.so,sha256=unNMBTkIaLE3MspXWtxCgI9N6OBu6him5pjfg9libmk,161725 +scipy/sparse/csgraph/_validation.py,sha256=QNT8OusAIavxH4C989ljtgnduh61H6RBzqk3xRIO8Ho,2327 +scipy/sparse/csgraph/setup.py,sha256=hAWJsFo4-YNix-AKUqEkUROyUfe7l4c7I9D-V5XOPQc,1099 +scipy/sparse/csgraph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc,, +scipy/sparse/csgraph/tests/test_connected_components.py,sha256=DKsvhuW2BgDvMAa-MJ4GlYvyIDIOVe58QjxUhQ5yfgQ,3199 +scipy/sparse/csgraph/tests/test_conversions.py,sha256=Y48qwFRsE4tTxFYS_Bn8ndCkAwe8n0rovbaVYppCy34,1855 +scipy/sparse/csgraph/tests/test_flow.py,sha256=AWq0Gah3IuyWrZ3Lr0NReS-NIoPWiD4_avmYCBHsDAQ,7620 +scipy/sparse/csgraph/tests/test_graph_laplacian.py,sha256=9U_z12K0Ctoh0RaPCsLccz-_LpFFhcN2HN-iB7NzlUk,10623 +scipy/sparse/csgraph/tests/test_matching.py,sha256=Fj82svwQgmWKC9Fis1Lb5F5bH30RuUG72dMj7Km5v20,8532 +scipy/sparse/csgraph/tests/test_reordering.py,sha256=by-44sshHL-yaYE23lDp1EqnG-72MRbExi_HYSMJEz8,2613 +scipy/sparse/csgraph/tests/test_shortest_path.py,sha256=RmRAk_RxMo3C9do0f01DsHSPyDUVEUZXuq4h6aALrDo,14441 +scipy/sparse/csgraph/tests/test_spanning_tree.py,sha256=uyOB_TB8E1O2JFDuB16_r3kw7fizTpKy5ce81AT9XP8,2115 +scipy/sparse/csgraph/tests/test_traversal.py,sha256=bdZc-7WE4SPhyL2SLUdsKC-B_DNmscl4Z5bO9zNrh6k,2325 +scipy/sparse/csr.py,sha256=cmPYY83pa6OwO19bquQiRi4BpVkUa-uHT5yFoCWROS4,887 +scipy/sparse/data.py,sha256=dOqfmIpX9TfoosFAbq18WfFWfz10ai1a9-yhDrgvocQ,811 +scipy/sparse/dia.py,sha256=UjBrPBeMEoIShw-qEEEK5pCLRHxJk2wu8Eztw5ohxXE,936 +scipy/sparse/dok.py,sha256=5zAGkQHTx7ZOKaPcunLitFoROb4q4gyH48bva-Bg13A,980 +scipy/sparse/extract.py,sha256=O-kmKGLk118RQfbUnvo_jFUd18bxgM32oww_5DSMTfI,781 +scipy/sparse/lil.py,sha256=mWSsX2-CEsJL1DkRIghTf9GnEE8jaJ-gXVQ8-MMNlK4,981 +scipy/sparse/linalg/__init__.py,sha256=UTXDqJ3GiSh9tU5dSO9GhUmY7xwh4R4zBzdKkTq0cQ8,3717 +scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/__init__.py,sha256=mB_3u89ASCCQA48XGBS3bwRj2agYvgTuIJ0tnLnJly0,1991 +scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/_add_newdocs.py,sha256=Sjol-MfXrIch0chc7T5TeCVxaJowfFqJnsBlGnX8DZ8,3795 +scipy/sparse/linalg/_dsolve/_superlu.cpython-310-darwin.so,sha256=pQZZ76xKU18yT8sr8PyMeqZucv9pZSiXfit1NB2tA7o,341008 +scipy/sparse/linalg/_dsolve/linsolve.py,sha256=7Tvfh8DDqPdL3EgPy-nIonoOUssal1_w5UXHZ0XPlO4,25223 +scipy/sparse/linalg/_dsolve/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/tests/test_linsolve.py,sha256=5h-YDYQNsmQSN8Kz2IpwqymiXsB7my04nvykRZZF5k8,27279 +scipy/sparse/linalg/_eigen/__init__.py,sha256=SwNho3iWZu_lJvcdSomA5cQdcDU8gocKbmRnm6Bf9-0,460 +scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/__pycache__/_svds.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/__pycache__/_svds_doc.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/_svds.py,sha256=yjiKIxB6fcTAjAAqjXIUlXUg3WSaVK5xPzYxenWXFeY,20730 +scipy/sparse/linalg/_eigen/_svds_doc.py,sha256=gogZuEa_0k6W1MN6xB74Uvo0nrWO0jiV0FhJ6GE3nNU,15525 +scipy/sparse/linalg/_eigen/arpack/COPYING,sha256=CSZWb59AYXjRIU-Mx5bhZrEhPdfAXgxbRhqLisnlC74,1892 +scipy/sparse/linalg/_eigen/arpack/__init__.py,sha256=zDxf9LokyPitn3_0d-PUXoBCh6tWK0eUSvsAj6nkXI0,562 +scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-darwin.so,sha256=wQdzTtO19dxKCRV52_3xtxl13BEkA7kiVlmSDp_K_Ag,441232 +scipy/sparse/linalg/_eigen/arpack/arpack.py,sha256=qdhRRo9QUq0Zi66m1JmhP8h5Onjxh5SOpUnnwefPuRY,67330 +scipy/sparse/linalg/_eigen/arpack/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py,sha256=NlBJrPyeqJ0eMfGUhAYIxxNRRzg8ECxDJR87b-vdXGI,23895 +scipy/sparse/linalg/_eigen/lobpcg/__init__.py,sha256=E5JEPRoVz-TaLrj_rPm5LP3jCwei4XD-RxbcxYwf5lM,420 +scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py,sha256=gri-uEW1qMWM7I3tNoDE2DQCnsw2O6xGxYLE8_jEo5A,37341 +scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py,sha256=in4XV7JPmi2jVeCYJKzp-nNLHsQ7MN-rmUNLDj96nPg,18876 +scipy/sparse/linalg/_eigen/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/tests/test_svds.py,sha256=qsfQiXGsiC3zNSycsv6bQfT_xV8K_ABpN3gG_pYEA6Q,37285 +scipy/sparse/linalg/_expm_multiply.py,sha256=UktXq-SBk_CKuakScGsvMAAjw8x5X2Wf_Gr_jTSPIWg,26145 +scipy/sparse/linalg/_interface.py,sha256=lYf3tJ3IEiVhaEtMxz0ZbSrJKI1GCKWCcgIBS2evOO8,25289 +scipy/sparse/linalg/_isolve/__init__.py,sha256=Z_eQUYbe6RWMSNi09T9TfPEWm8RsVxcIKYAlihM-U-c,479 +scipy/sparse/linalg/_isolve/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/_gcrotmk.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/iterative.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/lgmres.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/lsmr.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/lsqr.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/minres.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/tfqmr.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/utils.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/_gcrotmk.py,sha256=CDI2Qwt-FY4-aCMM8wFP56dN9l0GcJKPW1s519ij1k0,15957 +scipy/sparse/linalg/_isolve/_iterative.cpython-310-darwin.so,sha256=XS0WhTNrOD-_zRnusHzrPi5GenXvOQ5UHAUGkSiGnBw,259872 +scipy/sparse/linalg/_isolve/iterative.py,sha256=dPp6f9WwZJedXqyl-AAPMpPu1PcpZTCqxGjUB_-1fQ4,30410 +scipy/sparse/linalg/_isolve/lgmres.py,sha256=HXHikhzZRBGJnp775MlwLbteR05C4A2KypTB4O0-kZQ,8932 +scipy/sparse/linalg/_isolve/lsmr.py,sha256=P-RPAaeSflcy_oSrOb49U3R1RCXHNaI76uqYubEBsPo,15681 +scipy/sparse/linalg/_isolve/lsqr.py,sha256=jeNMnBtkXWzUJykxUVi2YOTEnFF5IFBDBVFv7Upce9g,21242 +scipy/sparse/linalg/_isolve/minres.py,sha256=WJSdHAyuqtekVd3HWYDF55ApUvhMN8eD1e1I2lWbLLo,11425 +scipy/sparse/linalg/_isolve/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/_isolve/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_gcrotmk.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_iterative.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_lgmres.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsmr.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsqr.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_minres.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_utils.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py,sha256=7vJGIsxohY1QHMzNkQQ8T6PGNmleQUqUCntj-hdaNng,5408 +scipy/sparse/linalg/_isolve/tests/test_iterative.py,sha256=qCVU0sFS0b-Rz1-nxGIQMlCqdNvo_aTsLvnLHcUtldQ,27115 +scipy/sparse/linalg/_isolve/tests/test_lgmres.py,sha256=4I7jokIZTWBq_Zbd49JaK2QnfLUF2lYGCecWGoqHtLw,7060 +scipy/sparse/linalg/_isolve/tests/test_lsmr.py,sha256=XNkeOk-sK94sddu3dYVxJsXtQA0Lh80EgQYZ2pnSB48,7324 +scipy/sparse/linalg/_isolve/tests/test_lsqr.py,sha256=Ma1rAw3jw7nXFoP5-ZYLlTgjn2tG03tu6evCSsHaTUg,4810 +scipy/sparse/linalg/_isolve/tests/test_minres.py,sha256=17a2ezMO2OXKtruk_Rp9-e7QtGaXZ5h2sUUJz67JwHg,2446 +scipy/sparse/linalg/_isolve/tests/test_utils.py,sha256=whURuUHl3jyNnsS-QgHSfDe342LBTwf3C_JbK7q_Ft4,247 +scipy/sparse/linalg/_isolve/tfqmr.py,sha256=blnP76yRYJuYTkAYntQoZUFEqydaXVxjpzXDC_PBHf0,6241 +scipy/sparse/linalg/_isolve/utils.py,sha256=I-Fjco_b83YKUtZPVdobTjPyY41-2SHruVvKZVOIXaU,3598 +scipy/sparse/linalg/_matfuncs.py,sha256=3eWF5bLBu6SUVxrPdBZOP9WR_nrPyT68fyJxtalKhpo,27228 +scipy/sparse/linalg/_norm.py,sha256=YRUM-eEnJ2D_8pvaz4LEeiV4MFaWHrO2jx546dw_KQ8,6062 +scipy/sparse/linalg/_onenormest.py,sha256=8Yxe16ox-G9UV41iMN4yfGb_1JQxENeq2YhN8Icwg5M,15486 +scipy/sparse/linalg/_propack/_cpropack.cpython-310-darwin.so,sha256=I7cgY1sSA89P-OSpb-W7QoCn90UVVdgzWIWtvS8sAvc,145888 +scipy/sparse/linalg/_propack/_dpropack.cpython-310-darwin.so,sha256=EirZXSghebkCd94bchV0PCrsNqguRcpGcZE_UO6Wc3U,128208 +scipy/sparse/linalg/_propack/_spropack.cpython-310-darwin.so,sha256=IyyzxJVMNU7TrwnL1q6csh018b906Y9R2KVSaLGhMik,128208 +scipy/sparse/linalg/_propack/_zpropack.cpython-310-darwin.so,sha256=ATgscBaa_U4sOIjrqzD8OBYjRPqenetvPsZg3Ty43Rg,145888 +scipy/sparse/linalg/_svdp.py,sha256=EDQ1BVBxRsCygrGKoLCQHnUKEqUGO9LS5vjq2TEvAls,11581 +scipy/sparse/linalg/dsolve.py,sha256=s0PkMvkadWLa87Zi84K4fO3S82RyuOqA6xy1ZPaQEcs,1203 +scipy/sparse/linalg/eigen.py,sha256=onUc3vZGnS8jtajnkSvYxbXA9kq4KG1Djv1EqOkuvmw,1151 +scipy/sparse/linalg/interface.py,sha256=JyH79SJ72jqeE05MgNhNxXI8S-fucoR6B4d1yc448DU,935 +scipy/sparse/linalg/isolve.py,sha256=QAcHU8MkRlKU0ZJbHEc_H99HCSY6XKop3bF7bydcg54,904 +scipy/sparse/linalg/matfuncs.py,sha256=WgjTo4WEUMVlMoZK6iQ3C-R1bDQ6cNZRbTR1LHV1VdY,948 +scipy/sparse/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_expm_multiply.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_interface.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_norm.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_onenormest.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_propack.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_pydata_sparse.cpython-310.pyc,, +scipy/sparse/linalg/tests/propack_test_data.npz,sha256=v-NNmpI1Pgj0APODcTblU6jpHUQRhpE9ObWb-KYnu6M,600350 +scipy/sparse/linalg/tests/test_expm_multiply.py,sha256=pDCHrcxhGvknCrsInqi_Y37Bl4FrDVsqWwNyRJEQzG0,13919 +scipy/sparse/linalg/tests/test_interface.py,sha256=XFkDuO2dmyHxPLT1VKmojr8dps0oe50ZaRQjp9AYOkA,16519 +scipy/sparse/linalg/tests/test_matfuncs.py,sha256=67tWWI3Y3vGIa-w4k-DNr9NHw8QozESxqeBhKPneWT0,21280 +scipy/sparse/linalg/tests/test_norm.py,sha256=8waDQ-csiw4jTIQPz8qlseqgosvjY9OHfAU7lJ8yLxo,6163 +scipy/sparse/linalg/tests/test_onenormest.py,sha256=PSXSoTvGkBI2AlJy81kQaGh0qicMg89hoak919dpQ7U,9229 +scipy/sparse/linalg/tests/test_propack.py,sha256=6CL7xhQqPdAd1DGduqx0fmeo6NNn6anT5te3rl_yMkw,6284 +scipy/sparse/linalg/tests/test_pydata_sparse.py,sha256=MNBaBg4m-fnRrv4BHIPiyxsHGdRuU6iV_UphO7a2IbM,6124 +scipy/sparse/sparsetools.py,sha256=pe8yKLT3FTs7C2d3ZB6V8sZRkMbp0KKEH_teY_mks3E,2390 +scipy/sparse/spfuncs.py,sha256=-L313g_Rr1j-Gy8dqgKetxQFDGKYJu6P53l6CrYIWqg,842 +scipy/sparse/sputils.py,sha256=rMARLPcXcI1v00eEt5bOCOI9uEh-kk7pxEpbQ_ijcNM,1187 +scipy/sparse/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_array_api.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_construct.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_csc.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_csr.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_extract.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_matrix_io.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_sparsetools.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_spfuncs.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_sputils.cpython-310.pyc,, +scipy/sparse/tests/data/csc_py2.npz,sha256=usJ_Gj6x_dEC2uObfdYc6D6C8JY4jjROFChQcZhNAfo,846 +scipy/sparse/tests/data/csc_py3.npz,sha256=axuEMVxwd0F-cgUS0IalpiF8KHW4GNJ3BK6bcjfGnf4,851 +scipy/sparse/tests/test_array_api.py,sha256=7d4y5GS8e1sdGfBcP2ZV9rKk6DQaiwl1nqqEy1N5eps,7480 +scipy/sparse/tests/test_base.py,sha256=xccr0CEEG-cxigJ6yuLDeZ3szYTv1eyzOx2r0qqd2zg,181600 +scipy/sparse/tests/test_construct.py,sha256=bZLWowr_WkJx8eddudGXZk6dQGYEKLW_Xfh8nnxED7o,24891 +scipy/sparse/tests/test_csc.py,sha256=5JW9c3EiAPI_lgPwKXwHtx3yYPtAn9fskbSYAcoNVEw,2902 +scipy/sparse/tests/test_csr.py,sha256=vgQ2nH5-73Qd_ujYGIaScGv9_ErAjXHxgkJHN_eN1vQ,5651 +scipy/sparse/tests/test_extract.py,sha256=NhizzkOSFkX_qSQi3coKIaDJKcDOvrJYew98VJlTyeU,1313 +scipy/sparse/tests/test_matrix_io.py,sha256=vU0N5HkcjleHZhQlDt83bk5NsUU_NePl7rUr2zmAFA0,2542 +scipy/sparse/tests/test_sparsetools.py,sha256=zA_dsqsQLDpNeRb5BZilLr2zP5H9FQ9yC4X61CAveAM,10441 +scipy/sparse/tests/test_spfuncs.py,sha256=ECs34sgYYhTBWe4hIkx357obH2lLsnJWkh7TfacjThw,3258 +scipy/sparse/tests/test_sputils.py,sha256=3mJaPEf8-EICEljDNJr8z5WqjVYpghDryO9Axmu0l8U,6835 +scipy/spatial/__init__.py,sha256=B5EiDg59_GTnvqag3eFt-CAWo-4wEGzRMcYGtBu_EF4,3636 +scipy/spatial/__pycache__/__init__.cpython-310.pyc,, +scipy/spatial/__pycache__/_geometric_slerp.cpython-310.pyc,, +scipy/spatial/__pycache__/_kdtree.cpython-310.pyc,, +scipy/spatial/__pycache__/_plotutils.cpython-310.pyc,, +scipy/spatial/__pycache__/_procrustes.cpython-310.pyc,, +scipy/spatial/__pycache__/_spherical_voronoi.cpython-310.pyc,, +scipy/spatial/__pycache__/ckdtree.cpython-310.pyc,, +scipy/spatial/__pycache__/distance.cpython-310.pyc,, +scipy/spatial/__pycache__/kdtree.cpython-310.pyc,, +scipy/spatial/__pycache__/qhull.cpython-310.pyc,, +scipy/spatial/_ckdtree.cpython-310-darwin.so,sha256=7Qh85nAi0Ypnj3UDAGysGdWf7uz1VJ81mWNjb-b8wp0,674043 +scipy/spatial/_ckdtree.pyi,sha256=xLU8bXLL7QLB54h2y7H2m805k8-k6k8fO2gUoDf_YfE,6002 +scipy/spatial/_distance_pybind.cpython-310-darwin.so,sha256=vSk_johZYiyWQs2_s1tN50Ky54RbqKDICAT8DI-TF10,315523 +scipy/spatial/_distance_wrap.cpython-310-darwin.so,sha256=5GbBRR7vNWQBbaCEoXakQe3hUGM4TnnqO9VQJnrbUGg,120769 +scipy/spatial/_geometric_slerp.py,sha256=Ix-OSGGMTibHipoTLzApaVTmjtoOvA5y-A75b6uaTfs,7945 +scipy/spatial/_hausdorff.cpython-310-darwin.so,sha256=WexJ55nkYbIKITWa2knJATq0Ez-mA-bdM83EUKjVQxs,186973 +scipy/spatial/_kdtree.py,sha256=jFcpz1pozP1KGz0hRpHiHtUAkHyEEae9oOzzoYa7pzI,33444 +scipy/spatial/_plotutils.py,sha256=3IO7u0bDFNa6t1uPM5hkmj9uJFzgI76wAdUDM5ZB5AM,7168 +scipy/spatial/_procrustes.py,sha256=So7XHpYPIZ5hhrGQkmokNTgkiZHqlvmczIgWHi8eiEc,4427 +scipy/spatial/_qhull.cpython-310-darwin.so,sha256=9qiyMIcfnJoyRuSN4MECj3qcf0qZQbidf--i0AM5dVc,1015728 +scipy/spatial/_qhull.pyi,sha256=d7r0hRuSn0EE_K3B0Dd1f2EzoeYvEFZYykGhpUI09Yc,6008 +scipy/spatial/_spherical_voronoi.py,sha256=wfA6t_JOfFAUlsbhfsjB1YnGrSYF4aazfHhkjZoQg3s,13573 +scipy/spatial/_voronoi.cpython-310-darwin.so,sha256=Ufgaej--C9Dg-AB9O0ZgKLBLAYLo2JVHJ3kWtbo7KJg,186075 +scipy/spatial/_voronoi.pyi,sha256=O0O1J1x1rfhJwJmiBYohBD55WhL2124tVdFNNAxj-0M,136 +scipy/spatial/ckdtree.py,sha256=60uL2ynovFUeQT9NmAqPmYK_rsiT1YKwaMAW-FMeBr8,862 +scipy/spatial/distance.py,sha256=Vo39903djlfTGBgqc-G4B6sRHaYO4lVl_WYzY5SbTLw,90214 +scipy/spatial/distance.pyi,sha256=SJeY7R6v41Ej6RBOGQPNnC7hi4r8RwdZuJHCIaCnEVc,5497 +scipy/spatial/kdtree.py,sha256=L4l9CG0wUNP8ARMUagNXzNC8vA8k134tXpAjHsI3hpg,870 +scipy/spatial/qhull.py,sha256=4jL-ImgXrnmMo4zxfaNgPxE6uTAbGID_CJ22NrSWbR4,889 +scipy/spatial/qhull_src/COPYING.txt,sha256=NNsMDE-TGGHXIFVcnNei4ijRKQuimvDy7oDEG7IDivs,1635 +scipy/spatial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/spatial/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test__plotutils.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test__procrustes.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_distance.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_hausdorff.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_kdtree.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_qhull.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_slerp.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_spherical_voronoi.cpython-310.pyc,, +scipy/spatial/tests/data/cdist-X1.txt,sha256=ULnYAgX2_AwOVF-VE7XfnW5S0pzhx7UAoocxSnXMaWs,5750 +scipy/spatial/tests/data/cdist-X2.txt,sha256=_IJVjXsp3pvd8NNPNTLmVbHOrzl_RiEXz7cb86NfvZ4,11500 +scipy/spatial/tests/data/degenerate_pointset.npz,sha256=BIq8Hd2SS_LU0fIWAVVS7ZQx-emVRvvzgnaO2lh4gXU,22548 +scipy/spatial/tests/data/iris.txt,sha256=k19QSfkqhMmByqNMzwWDmM6wf5dt6whdGyfAyUO3AW0,15000 +scipy/spatial/tests/data/pdist-boolean-inp.txt,sha256=5Z9SMsXrtmzeUwJlVmGkrPDC_Km7nVpZIbBl7p3Hdc0,50000 +scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt,sha256=Yerj1wqIzcdyULlha-q02WBNGyS2Q5o2wAr0XVEkzis,178801 +scipy/spatial/tests/data/pdist-chebyshev-ml.txt,sha256=NEd2b-DONqUMV9f8gJ2yod17C_5fXGHHZ38PeFsXkyw,3041 +scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt,sha256=UCWZJeMkMajbpjeG0FW60b0q-4r1geAyguNY6Chx5bM,178801 +scipy/spatial/tests/data/pdist-cityblock-ml.txt,sha256=8Iq7cF8oMJjpqd6qsDt_mKPQK0T8Ldot2P8C5rgbGIU,3041 +scipy/spatial/tests/data/pdist-correlation-ml-iris.txt,sha256=l2kEAu0Pm3OsFJsQtHf9Qdy5jnnoOu1v3MooBISnjP0,178801 +scipy/spatial/tests/data/pdist-correlation-ml.txt,sha256=S4GY3z-rf_BGuHmsnColMvR8KwYDyE9lqEbYT_a3Qag,3041 +scipy/spatial/tests/data/pdist-cosine-ml-iris.txt,sha256=hQzzoZrmw9OXAbqkxC8eTFXtJZrbFzMgcWMLbJlOv7U,178801 +scipy/spatial/tests/data/pdist-cosine-ml.txt,sha256=P92Tm6Ie8xg4jGSP7k7bmFRAP5MfxtVR_KacS73a6PI,3041 +scipy/spatial/tests/data/pdist-double-inp.txt,sha256=0Sx5yL8D8pyYDXTIBZAoTiSsRpG_eJz8uD2ttVrklhU,50000 +scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt,sha256=3-UwBM7WZa4aCgmW_ZAdRSq8KYMq2gnkIUqU73Z0OLI,178801 +scipy/spatial/tests/data/pdist-euclidean-ml.txt,sha256=rkQA2-_d7uByKmw003lFXbXNDjHrUGBplZ8nB_TU5pk,3041 +scipy/spatial/tests/data/pdist-hamming-ml.txt,sha256=IAYroplsdz6n7PZ-vIMIJ4FjG9jC1OSxc3-oVJdSFDM,3041 +scipy/spatial/tests/data/pdist-jaccard-ml.txt,sha256=Zb42SoVEnlTj_N_ndnym3_d4RNZWeHm290hTtpp_zO8,3041 +scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt,sha256=L7STTmlRX-z-YvksmiAxEe1UoTmDnQ_lnAjZH53Szp0,172738 +scipy/spatial/tests/data/pdist-jensenshannon-ml.txt,sha256=-sZUikGMWskONojs6fJIMX8VEWpviYYg4u1vipY6Bak,2818 +scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt,sha256=N5L5CxRT5yf_vq6pFjorJ09Sr-RcnrAlH-_F3kEsyUU,178801 +scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt,sha256=DRgzqxRtvQVzFnpFAjNC9TDNgRtk2ZRkWPyAaeOx3q4,3041 +scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt,sha256=jz7SGKU8GuJWASH2u428QL9c-G_-8nZvOFSOUlMdCyA,178801 +scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt,sha256=37H01o6GibccR_hKIwwbWxGX0Tuxnb-4Qc6rmDxwwUI,178801 +scipy/spatial/tests/data/pdist-seuclidean-ml.txt,sha256=YmcI7LZ6i-Wg1wjAkLVX7fmxzCj621Pc5itO3PvCm_k,3041 +scipy/spatial/tests/data/pdist-spearman-ml.txt,sha256=IrtJmDQliv4lDZ_UUjkZNso3EZyu7pMACxMB-rvHUj0,3041 +scipy/spatial/tests/data/random-bool-data.txt,sha256=MHAQdE4hPVzgu-csVVbm1DNJ80dP7XthJ1kb2In8ImM,6000 +scipy/spatial/tests/data/random-double-data.txt,sha256=GA8hYrHsTBeS864GJf0X6JRTvGlbpM8P8sJairmfnBU,75000 +scipy/spatial/tests/data/random-int-data.txt,sha256=xTUbCgoT4X8nll3kXu7S9lv-eJzZtwewwm5lFepxkdQ,10266 +scipy/spatial/tests/data/random-uint-data.txt,sha256=8IPpXhwglxzinL5PcK-PEqleZRlNKdx3zCVMoDklyrY,8711 +scipy/spatial/tests/data/selfdual-4d-polytope.txt,sha256=rkVhIL1mupGuqDrw1a5QFaODzZkdoaLMbGI_DbLLTzM,480 +scipy/spatial/tests/test__plotutils.py,sha256=vmDDeXOe4N2XPMeyw8Zx1T8b8bl3Nw5ZwT9uXx21JkU,1943 +scipy/spatial/tests/test__procrustes.py,sha256=wmmnUHRdw_oID0YLi404IEWPH6vEGhvHXSeGPY_idHo,4974 +scipy/spatial/tests/test_distance.py,sha256=OV3o042VNfGcoCsnD09hbTZwL_lT5I3OoxDxZi2pypw,83940 +scipy/spatial/tests/test_hausdorff.py,sha256=n-Qm2gVF0zc11tDSCnXBznt5Mp0E1ekTtzfWXjqG54M,7114 +scipy/spatial/tests/test_kdtree.py,sha256=cxhOBCD5tBaPcnWi3dIynKihO1ooUCExAk0Lu40wXcs,47337 +scipy/spatial/tests/test_qhull.py,sha256=C_7pd_EDYfD-9kZKV-0rggx25TQ2D0vgtfD4WB_r5Os,44147 +scipy/spatial/tests/test_slerp.py,sha256=hYH-2ROq0iswTsli4c-yBLZfACvQL0QVCKrPWTeBNls,16396 +scipy/spatial/tests/test_spherical_voronoi.py,sha256=UJU6By1eOzOhxgVYTEF5RVEkryXN70PHsXMRDG9-awQ,14361 +scipy/spatial/transform/__init__.py,sha256=vkvtowJUcu-FrMMXjEiyfnG94Cqwl000z5Nwx2F8OX0,700 +scipy/spatial/transform/__pycache__/__init__.cpython-310.pyc,, +scipy/spatial/transform/__pycache__/_rotation_groups.cpython-310.pyc,, +scipy/spatial/transform/__pycache__/_rotation_spline.cpython-310.pyc,, +scipy/spatial/transform/__pycache__/rotation.cpython-310.pyc,, +scipy/spatial/transform/_rotation.cpython-310-darwin.so,sha256=BvclmwoAhFgfDvnQebKYe00Mg1HaiQXxyeDRT9r8NEg,608284 +scipy/spatial/transform/_rotation.pyi,sha256=aCmi2IBxlGWobw_qo0LG4wMWhYjnUhcC_8miNQIzMEk,2643 +scipy/spatial/transform/_rotation_groups.py,sha256=XS-9K6xYnnwWywMMYMVznBYc1-0DPhADHQp_FIT3_f8,4422 +scipy/spatial/transform/_rotation_spline.py,sha256=M2i8qbPQwQ49D3mNtqll31gsCMqfqBJe8vOxMPRlD5M,14083 +scipy/spatial/transform/rotation.py,sha256=1c1MrrZJrKsQXLpqM0MWV-0d8XNYW9xytpcGQAVbtfk,872 +scipy/spatial/transform/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/spatial/transform/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/spatial/transform/tests/__pycache__/test_rotation.cpython-310.pyc,, +scipy/spatial/transform/tests/__pycache__/test_rotation_groups.cpython-310.pyc,, +scipy/spatial/transform/tests/__pycache__/test_rotation_spline.cpython-310.pyc,, +scipy/spatial/transform/tests/test_rotation.py,sha256=DhAKh5Yj4QjFmMtZ1Y16ICxoJInZexf7nOxBN-ceXrk,41370 +scipy/spatial/transform/tests/test_rotation_groups.py,sha256=V6DiLWvJsrdklhS-GlzcA9qEy0cTQpwaNR-7vkhBt1M,5560 +scipy/spatial/transform/tests/test_rotation_spline.py,sha256=DRNIQM5Da8xFtVnRQcI5VRJIo7DgtCeiHYn52zD0qMk,5035 +scipy/special.pxd,sha256=h8GS4dlnM_hFchSEzjL74WPstvZWYXNMJRNAJMyFzM8,37 +scipy/special/__init__.py,sha256=KEN5FQt2nN_Ka3-RkXnGgC35W-JTvU2gpNhK-3u2W_8,29005 +scipy/special/__pycache__/__init__.cpython-310.pyc,, +scipy/special/__pycache__/_add_newdocs.cpython-310.pyc,, +scipy/special/__pycache__/_basic.cpython-310.pyc,, +scipy/special/__pycache__/_ellip_harm.cpython-310.pyc,, +scipy/special/__pycache__/_lambertw.cpython-310.pyc,, +scipy/special/__pycache__/_logsumexp.cpython-310.pyc,, +scipy/special/__pycache__/_mptestutils.cpython-310.pyc,, +scipy/special/__pycache__/_orthogonal.cpython-310.pyc,, +scipy/special/__pycache__/_sf_error.cpython-310.pyc,, +scipy/special/__pycache__/_spfun_stats.cpython-310.pyc,, +scipy/special/__pycache__/_spherical_bessel.cpython-310.pyc,, +scipy/special/__pycache__/_testutils.cpython-310.pyc,, +scipy/special/__pycache__/add_newdocs.cpython-310.pyc,, +scipy/special/__pycache__/basic.cpython-310.pyc,, +scipy/special/__pycache__/orthogonal.cpython-310.pyc,, +scipy/special/__pycache__/sf_error.cpython-310.pyc,, +scipy/special/__pycache__/specfun.cpython-310.pyc,, +scipy/special/__pycache__/spfun_stats.cpython-310.pyc,, +scipy/special/_add_newdocs.py,sha256=_sXy4AxEFRgLVduyyqQszjyg5EotjcHRN18eyNTJkR0,367144 +scipy/special/_basic.py,sha256=8PC7IIwTNWHJx0LxLKb-Wsev3eTcoH22zM1STgrmN3I,90027 +scipy/special/_comb.cpython-310-darwin.so,sha256=xxUk8eFBh1XARzSI0J4oydvUghYX_HAC669lrsIVBww,76056 +scipy/special/_ellip_harm.py,sha256=VLIdzP4XHbSHGbfbtSXdLenSZnh3c6MsYUFmED5kqhM,5272 +scipy/special/_ellip_harm_2.cpython-310-darwin.so,sha256=rkA2PfQ187sYMNZcJamYJ76RRQAcUeM41KGj71wPfTI,120224 +scipy/special/_lambertw.py,sha256=SHKKdhTcrB5dOUke16wftnSuwLpNJn1nlq6vAUohIYw,2994 +scipy/special/_logsumexp.py,sha256=YBUutkjQ35HNbJDPNvNLyhlQL2A3HqL7BJviY3DwjAY,8523 +scipy/special/_mptestutils.py,sha256=pgvlSc2cW_ZqOWb1vtR9-5NcKeTfm93XM1SYred_12I,14547 +scipy/special/_orthogonal.py,sha256=E4Cz3Ox0y0MCRP9-hxrRdJyeYcLIlIqTLpD2L0WI514,73921 +scipy/special/_orthogonal.pyi,sha256=Z92f9ZKkfz14rL1KXJdtuzuStoYK2lgMvBdi_jgICKU,8336 +scipy/special/_precompute/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/special/_precompute/__pycache__/__init__.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/cosine_cdf.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/expn_asy.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/gammainc_asy.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/gammainc_data.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/lambertw.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/loggamma.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/struve_convergence.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/utils.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/wright_bessel.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/wright_bessel_data.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/wrightomega.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/zetac.cpython-310.pyc,, +scipy/special/_precompute/cosine_cdf.py,sha256=OizpVXf6odTKmDw6vWuI87rNn88JcWr_GX8Rawh6ezk,355 +scipy/special/_precompute/expn_asy.py,sha256=o22tuPbjkwSLoJ2IJZ2I3xiV5TuXQS-eScXVaUcENbU,1333 +scipy/special/_precompute/gammainc_asy.py,sha256=P5OFRcPkkpjGQeYCaMZ8SFSUmZG_CjrEHv8OLwgcGFc,2502 +scipy/special/_precompute/gammainc_data.py,sha256=P9INHXlJgOB2cELqfoIiDggw_wf0PlXD0WC5sTGPX0c,4093 +scipy/special/_precompute/lambertw.py,sha256=YcjE3Wi1r67oTyo0hIR4TcodY6TpwTCFFL_zpqtVL6Q,1977 +scipy/special/_precompute/loggamma.py,sha256=iq7ZBrUmk8pXYZwO_wINI4u8ENsLbL9VUShGjGO0Pt0,1094 +scipy/special/_precompute/struve_convergence.py,sha256=rYyGbATscSQvamp2TjR4UscGwJnkTXpNCDdfNRPjiM0,3432 +scipy/special/_precompute/utils.py,sha256=JXJuI07Jlm4bDHJFVtj0jHq05p-V1ofeXZB16Y05kzI,887 +scipy/special/_precompute/wright_bessel.py,sha256=2DwcOwBAs8DgdHb1I-U6RxHNYNoVG1TqoyyNl70w3NU,12882 +scipy/special/_precompute/wright_bessel_data.py,sha256=F8N4cdbzh3_2sN5rX1kyRrMjjVn4FLoKnzQiLOILI6k,5655 +scipy/special/_precompute/wrightomega.py,sha256=YpmLwtGJ4qazMDY0RXjhnQiuRAISI-Pr9MwKc7pZlhc,955 +scipy/special/_precompute/zetac.py,sha256=LmhJP7JFg7XktHvfm-DgzuiWZFtVdpvYzzLOB1ePG1Q,591 +scipy/special/_sf_error.py,sha256=q_Rbfkws1ttgTQKYLt6zFTdY6DFX2HajJe_lXiNWC0c,375 +scipy/special/_specfun.cpython-310-darwin.so,sha256=L0peldyc-eaZVQ4UQpbZoMLUgcTL8_VlyGyg4YbtLVw,333984 +scipy/special/_spfun_stats.py,sha256=Xnh6seX993udMM_6ftVaUHHwKpRuD9IopER6lPixxS0,3806 +scipy/special/_spherical_bessel.py,sha256=2gQUI5_JeJ-OZ5XCrYlYgOHk8s0E6O_Qx62PusdUWAA,10217 +scipy/special/_test_internal.cpython-310-darwin.so,sha256=BSxBY03JAQmQ-4qXAkTDu0YkNfDFSDNfio6t69vnZAo,206721 +scipy/special/_test_internal.pyi,sha256=nwSk_u-Jhkkkhtz0ePxpD4e0PaC26pQTHljEBRgZiBQ,363 +scipy/special/_testutils.py,sha256=2sGwBxdXpnwijIZrOtPM-jde8DXJ2z6Dc0XoVz-MPno,11974 +scipy/special/_ufuncs.cpython-310-darwin.so,sha256=ifmmVApoDn8ibOK3bzaM7qXLBdOHs7XhvJKfBnlpkG0,1346112 +scipy/special/_ufuncs.pyi,sha256=kfqgOV4gytlYdvYkcYUT2u8Ysd4Z1kYsf1vMapYiE8A,8809 +scipy/special/_ufuncs.pyx,sha256=3oHEVhdFctsJ8GBBkf4s_RAHrkUvPak7t3Tjoit4GcM,868083 +scipy/special/_ufuncs_cxx.cpython-310-darwin.so,sha256=EmLlNl1WgqS7AmYBHcZ7BmRkU_tRO1U6DPtCh9gPN08,370734 +scipy/special/_ufuncs_cxx.pxd,sha256=6_gtnxTzfG9FT9n1mtjp7_QPcBu9JdAr7YK37pQh5Fo,1351 +scipy/special/_ufuncs_cxx.pyx,sha256=WZV7nN09oJjJwr1tQxId6lPz7xOU6gDvCECLpDClv4o,11474 +scipy/special/_ufuncs_cxx_defs.h,sha256=ILA7eh7wHjkImCubKcRkgsGCBAfxq8qVOpelwXSyvzI,2005 +scipy/special/_ufuncs_defs.h,sha256=TPRFhAmCC1d4yHGSOpiz9XIayKPtq_g_a-2geb_1Wu0,11058 +scipy/special/add_newdocs.py,sha256=lapv7DVKDeAr2vYaZr_6lMUK9hAP6IXy-wvzx8Qifi8,644 +scipy/special/basic.py,sha256=KUTMYKk038KKpJ8fsoQoZSEJV6djRQL_Y_RgdxbXe4k,1896 +scipy/special/cython_special.cpython-310-darwin.so,sha256=HR1u1yqSYuiVFlsuAx9-od9L9P4rv_czTJkSFrCdPUM,2051200 +scipy/special/cython_special.pxd,sha256=ivthlz5-cp0SCxYhC1cM6voxvKNOYLsIETolipP8muQ,14020 +scipy/special/cython_special.pyi,sha256=BQVUCzV8lCylnmLCtnN0Yz_ttlqyzcLc-BZx2KPXPzM,58 +scipy/special/cython_special.pyx,sha256=ceO_rFKhvaRO1dIhfM7XZ81OerEaJ45upvTA5DLFAdo,157472 +scipy/special/orthogonal.py,sha256=nL0enQ_z9S6eHvkjlszRdlV1GFU8q75LEaQWnhGERKs,2053 +scipy/special/sf_error.py,sha256=He7080Os7bMgBh9v42PDsv0pLDJ8u94GWjzU-5QemTc,792 +scipy/special/specfun.py,sha256=107XC40GRWPXmx3X2Hge0K5AtNMJPIdCOCJQo2c7f9I,1059 +scipy/special/spfun_stats.py,sha256=-oa8b53MxKJrtmgb5jbwLeEyZDp9l9yvsCuv6337p7U,770 +scipy/special/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/special/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_basic.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_bdtr.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_boxcox.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_cdflib.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_cdft_asymptotic.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_cosine_distr.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_cython_special.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_data.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_dd.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_digamma.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_ellip_harm.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_erfinv.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_exponential_integrals.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_faddeeva.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_gamma.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_gammainc.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_hyp2f1.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_hypergeometric.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_kolmogorov.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_lambertw.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_log_softmax.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_loggamma.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_logit.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_logsumexp.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_mpmath.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_nan_inputs.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_ndtr.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_ndtri_exp.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_orthogonal.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_orthogonal_eval.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_owens_t.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_pcf.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_pdtr.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_powm1.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_precompute_expn_asy.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_precompute_gammainc.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_precompute_utils.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_round.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_sf_error.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_sici.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_spence.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_spfun_stats.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_sph_harm.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_spherical_bessel.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_trig.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_wright_bessel.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_wrightomega.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_zeta.cpython-310.pyc,, +scipy/special/tests/data/boost.npz,sha256=DEqbmLTura2MI3Eqrp4w43cS_EaeFSStRcWUveTRe7c,1270643 +scipy/special/tests/data/gsl.npz,sha256=ec84WYD-4TUetsmSSLVxLW918p8zeGJ-2rwUsiRV85s,51433 +scipy/special/tests/data/local.npz,sha256=GY1SrPnE6R-j7t7_akoe8nIlX13YXUWmAFlp_6-W3nA,203438 +scipy/special/tests/test_basic.py,sha256=w_zC-nn2VTjPRh79ukTlteBiV2s6hMe2mQ99DPUqk20,144186 +scipy/special/tests/test_bdtr.py,sha256=QwGyt0tnutuou25mS0u2LjRgDTYI6ohM2cbZ-He6Os4,3231 +scipy/special/tests/test_boxcox.py,sha256=gUrGF7Ql1adxiPl_YxpsGunDfg-B_WpqI9Zghzool7o,2672 +scipy/special/tests/test_cdflib.py,sha256=a1t0yM7RowZPl8RhjXhMnIzh7ZvBxItK2aMu0ud41Vw,13165 +scipy/special/tests/test_cdft_asymptotic.py,sha256=UMwy8bSxUzzcj9MkG4FHzojJRFeshe05ZqFk_32iHKA,1429 +scipy/special/tests/test_cosine_distr.py,sha256=0NpWGY9XJ2MZw5gkDMP99IxXmhgPt3RysG_hZ5HQpO4,2691 +scipy/special/tests/test_cython_special.py,sha256=du2yZxAOZPE5mF3ANUbCVW4kTzKIY9o0Gy9sOXNcwVg,18694 +scipy/special/tests/test_data.py,sha256=bSnsxztKyPlUZWbA6mERLlOTMCTjJpLBGyBBJA4KLm0,28534 +scipy/special/tests/test_dd.py,sha256=GROHQEkzIAW6KXkj8J3nPcRDAONcf1nCoArcfx30_5s,1974 +scipy/special/tests/test_digamma.py,sha256=NlaFqc08di2L5FXArct9r0GaP_ciAxTIx-bfk-kdaks,1394 +scipy/special/tests/test_ellip_harm.py,sha256=51KiCpQjqmf2uLZEsty-Vmr0FhoABtvMUz4218WR_S0,9640 +scipy/special/tests/test_erfinv.py,sha256=fzdEHd6MxfSyzQDO93qndXukG2jWj-XNY2X4BJRIdBI,3059 +scipy/special/tests/test_exponential_integrals.py,sha256=6_iQmb3Y6G96dpgkB6z_saitaMPdJH1gLYjYwDngITQ,1868 +scipy/special/tests/test_faddeeva.py,sha256=YLY3Ylp4u_8zxTGxOb5kxNfXXEW0ld_GP2ceOR2ev_Y,2568 +scipy/special/tests/test_gamma.py,sha256=hb-ZlA2ZNz6gUGvVtMBgXFl_w30HPmthuUEAmNcz0sw,258 +scipy/special/tests/test_gammainc.py,sha256=Avv52EDQ7M8kUpiVU1BVsW_Gj5HDCzAOojLtoFojKbw,3815 +scipy/special/tests/test_hyp2f1.py,sha256=siIotfifZrhk-922XfChi9rPq_--2tsM_JZNyF2NeBk,78503 +scipy/special/tests/test_hypergeometric.py,sha256=jralqwVanO0mR0t8CI5zF0L8PqCnc_6oAtreg845Akc,5598 +scipy/special/tests/test_kolmogorov.py,sha256=nRZHg4P3GEcax-vgB-LlG9KrdlDRd36ZvGTF3oCv9po,18407 +scipy/special/tests/test_lambertw.py,sha256=A6SAKE2KBWY2YlqdOSnVRzxw1RKJ7f2ZYcBTg1-q7Bk,4556 +scipy/special/tests/test_log_softmax.py,sha256=JdiC5C1Fm16rNdQHVWRu-FGMVOv24DPWRnguDDd1zEY,3415 +scipy/special/tests/test_loggamma.py,sha256=x6kuJf-bEnn5ECdkDSgvk3An_A-9UxVsZpqa49IwAq8,1992 +scipy/special/tests/test_logit.py,sha256=PvIgcK33vQjcvHE3_3fVarKTjZ0t35-ksZnhvoqKQrA,5540 +scipy/special/tests/test_logsumexp.py,sha256=vcHdTDJQKvUfkO0I8VDRUQF4MhnF0dQi2pjDzRsggB0,6180 +scipy/special/tests/test_mpmath.py,sha256=yXaU8yhq3xT9gdiIO2uJWT5DNgbbme80-0KYT5Chbk4,75189 +scipy/special/tests/test_nan_inputs.py,sha256=1F3CRXp_DGmfUcJMr_61eW2yItjQQD9xzaIiDgbsXvI,1845 +scipy/special/tests/test_ndtr.py,sha256=-UMxTIi4CaaLoJ5-SGW9THChPIM3e1_fTY0L877ioNA,2680 +scipy/special/tests/test_ndtri_exp.py,sha256=13eabgdbfcL37RReiUH7g9amT9XMsTLOfwxFJXR_2Ww,3708 +scipy/special/tests/test_orthogonal.py,sha256=N-DdAMqe1c4o7-jHnn3aSu_gmI7ojfV-HogDLpJ251c,31295 +scipy/special/tests/test_orthogonal_eval.py,sha256=xj3-5r1s70kev3d-qiTk8m7tZ09b6JjJY9OTVxVVNx0,9319 +scipy/special/tests/test_owens_t.py,sha256=zRbiKje7KrYJ25f1ZuIBfiFSyNtK_bnkIW7dRETIqME,1792 +scipy/special/tests/test_pcf.py,sha256=RNjEWZGFS99DOGZkkPJ8HNqLULko8UkX0nEWFYX26NE,664 +scipy/special/tests/test_pdtr.py,sha256=VmupC2ezUR3p5tgZx0rqXEHAtzsikBW2YgaIxuGwO5A,1284 +scipy/special/tests/test_powm1.py,sha256=9hZeiQVKqV63J5oguYXv_vqolpnJX2XRO1JN0ouLWAM,2276 +scipy/special/tests/test_precompute_expn_asy.py,sha256=bCQikPkWbxVUeimvo79ToVPgwaudzxGC7Av-hPBgIU4,583 +scipy/special/tests/test_precompute_gammainc.py,sha256=H0UtrkFRZhqoIbyUZTcztwg81Q4j8Xkc6nOQCyuO8-8,4527 +scipy/special/tests/test_precompute_utils.py,sha256=MOvdbLbzjN5Z1JQQgtIyjwjuIMPX4s2bTc_kxaX67wc,1165 +scipy/special/tests/test_round.py,sha256=oZdjvm0Fxhv6o09IFOi8UUuLb3msbq00UdD8P_2Jwaw,421 +scipy/special/tests/test_sf_error.py,sha256=leNORk4GIa8gYQH69OK0TK_SPXOMGPvtNC-x5aJ_nT8,3521 +scipy/special/tests/test_sici.py,sha256=w4anBf8fiq2fmkwMSz3MX0uy35NLXVqfuW3Fwt2Nqek,1227 +scipy/special/tests/test_spence.py,sha256=fChPw7xncNCTPMUGb0C8BC-lDKHWoEXSz8Rb4Wv8vNo,1099 +scipy/special/tests/test_spfun_stats.py,sha256=A5SOVsQOyC12-BeIIHsi--hrog88mFmH190MOKdP4qU,1998 +scipy/special/tests/test_sph_harm.py,sha256=PQehyslic3K2uwj8lV2g0Gh6JNVjpSYLCuVnihUlByQ,1116 +scipy/special/tests/test_spherical_bessel.py,sha256=5f2tsw0DUbs_Q4A4-BNrrDA7NzFuKEGnSJ3nwnDNWqI,14284 +scipy/special/tests/test_trig.py,sha256=WiZ-ryT7F8-kaACJKcXaA7PXSbuU4gIz_MK9Pv1gsTc,2097 +scipy/special/tests/test_wright_bessel.py,sha256=v1yLL6Ki01VuKPj5nfL-9_FaACvwdIlDsarKsm-z9EQ,4155 +scipy/special/tests/test_wrightomega.py,sha256=8afmPCC6IYN-SqbeBgqTyRgz0JfQdCs2vtxFcR_Bj9I,3550 +scipy/special/tests/test_zeta.py,sha256=IoBUdssBRj7noPjW-xs9xGFFihZ7wvQpPJidgMOFCOs,1367 +scipy/stats/__init__.py,sha256=Pyu_QRExXWdwzWfTbGTiS-UKWveKjZTM4ZlDk6PG7Gs,13702 +scipy/stats/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/__pycache__/_axis_nan_policy.cpython-310.pyc,, +scipy/stats/__pycache__/_binned_statistic.cpython-310.pyc,, +scipy/stats/__pycache__/_binomtest.cpython-310.pyc,, +scipy/stats/__pycache__/_common.cpython-310.pyc,, +scipy/stats/__pycache__/_constants.cpython-310.pyc,, +scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc,, +scipy/stats/__pycache__/_covariance.cpython-310.pyc,, +scipy/stats/__pycache__/_crosstab.cpython-310.pyc,, +scipy/stats/__pycache__/_discrete_distns.cpython-310.pyc,, +scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc,, +scipy/stats/__pycache__/_distr_params.cpython-310.pyc,, +scipy/stats/__pycache__/_entropy.cpython-310.pyc,, +scipy/stats/__pycache__/_fit.cpython-310.pyc,, +scipy/stats/__pycache__/_generate_pyx.cpython-310.pyc,, +scipy/stats/__pycache__/_hypotests.cpython-310.pyc,, +scipy/stats/__pycache__/_kde.cpython-310.pyc,, +scipy/stats/__pycache__/_ksstats.cpython-310.pyc,, +scipy/stats/__pycache__/_mannwhitneyu.cpython-310.pyc,, +scipy/stats/__pycache__/_morestats.cpython-310.pyc,, +scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc,, +scipy/stats/__pycache__/_mstats_extras.cpython-310.pyc,, +scipy/stats/__pycache__/_multivariate.cpython-310.pyc,, +scipy/stats/__pycache__/_odds_ratio.cpython-310.pyc,, +scipy/stats/__pycache__/_page_trend_test.cpython-310.pyc,, +scipy/stats/__pycache__/_qmc.cpython-310.pyc,, +scipy/stats/__pycache__/_relative_risk.cpython-310.pyc,, +scipy/stats/__pycache__/_resampling.cpython-310.pyc,, +scipy/stats/__pycache__/_result_classes.cpython-310.pyc,, +scipy/stats/__pycache__/_rvs_sampling.cpython-310.pyc,, +scipy/stats/__pycache__/_stats_mstats_common.cpython-310.pyc,, +scipy/stats/__pycache__/_stats_py.cpython-310.pyc,, +scipy/stats/__pycache__/_tukeylambda_stats.cpython-310.pyc,, +scipy/stats/__pycache__/_variation.cpython-310.pyc,, +scipy/stats/__pycache__/_warnings_errors.cpython-310.pyc,, +scipy/stats/__pycache__/biasedurn.cpython-310.pyc,, +scipy/stats/__pycache__/contingency.cpython-310.pyc,, +scipy/stats/__pycache__/distributions.cpython-310.pyc,, +scipy/stats/__pycache__/kde.cpython-310.pyc,, +scipy/stats/__pycache__/morestats.cpython-310.pyc,, +scipy/stats/__pycache__/mstats.cpython-310.pyc,, +scipy/stats/__pycache__/mstats_basic.cpython-310.pyc,, +scipy/stats/__pycache__/mstats_extras.cpython-310.pyc,, +scipy/stats/__pycache__/mvn.cpython-310.pyc,, +scipy/stats/__pycache__/qmc.cpython-310.pyc,, +scipy/stats/__pycache__/sampling.cpython-310.pyc,, +scipy/stats/__pycache__/statlib.cpython-310.pyc,, +scipy/stats/__pycache__/stats.cpython-310.pyc,, +scipy/stats/_axis_nan_policy.py,sha256=75c6IO_lVMWARPqyumohgNDKuysE3rtF3UxGDc6jlmE,26865 +scipy/stats/_biasedurn.cpython-310-darwin.so,sha256=fhfAGDPc9ScXTu7KeTU0KTP-hcb4ZOJlL59nQsGNfMU,251357 +scipy/stats/_biasedurn.pxd,sha256=bQC6xG4RH1E5h2jCKXRMADfgGctiO5TgNlJegKrR7DY,1046 +scipy/stats/_binned_statistic.py,sha256=RXieYDA8LuPe_qycLZAUhFbfV4KVlqqTYbX0wQzP1yY,32710 +scipy/stats/_binomtest.py,sha256=cbmBHbwpXRap9zZElMvdYhy7ccTvH1kgi_7_iNctD1A,13043 +scipy/stats/_boost/__init__.py,sha256=e1_a5N-BBpz7qb0VeLQ7FOEURW9OfQ3tV42_fMDVkOU,1759 +scipy/stats/_boost/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/_boost/beta_ufunc.cpython-310-darwin.so,sha256=WxoZFBGUj2TvLC6opL8eD4d8xB6bpChHOjeQZx2qv-g,314653 +scipy/stats/_boost/binom_ufunc.cpython-310-darwin.so,sha256=jkldO_L3Ihf5HJB5MGcO4RiY4KTpuYtR5exS25BFqI4,273710 +scipy/stats/_boost/hypergeom_ufunc.cpython-310-darwin.so,sha256=PVx8B10prncz_uQt7NcFT9iHJhq59WUB8b7jnI7vFVs,201314 +scipy/stats/_boost/invgauss_ufunc.cpython-310-darwin.so,sha256=aNYy363TzaRtjzXidcbdZi11XZWEwi_5nOCqtU03SsE,257313 +scipy/stats/_boost/nbinom_ufunc.cpython-310-darwin.so,sha256=eRY2zh_3rlm5JNNVgI5svjchTGohPHz91SppG8bEhyU,294095 +scipy/stats/_boost/ncf_ufunc.cpython-310-darwin.so,sha256=mU7v-ZdOGAzOlk1KdtBewV_uQPbP1E6UeLycvng20wk,271612 +scipy/stats/_boost/nct_ufunc.cpython-310-darwin.so,sha256=iw9Kt_7mJOcbD8Qj2n6sOP44Of15IyiTbiFNSEwLD_o,345436 +scipy/stats/_boost/ncx2_ufunc.cpython-310-darwin.so,sha256=aNhOLY4cscIFHiWW02UtCaYfKIzYMRcwgupArVqt6bc,281581 +scipy/stats/_boost/skewnorm_ufunc.cpython-310-darwin.so,sha256=F3AV8mtU97jJ5KO3rI5_6tyqy0MOr1qcuoAUg3aZw0Q,186625 +scipy/stats/_common.py,sha256=orK_lFT7s7t4Vpvqcv5NkxduZuXTdZocoPCej0iGHQ0,173 +scipy/stats/_constants.py,sha256=IsVndgfRnFz5ULWeygOfPRypaWYkUn2lAuJDypEVMNA,793 +scipy/stats/_continuous_distns.py,sha256=Dh_iHMaiXaGwlr1wqD_u3k0TVsCsRXrKhRJRrCk2dWU,322887 +scipy/stats/_covariance.py,sha256=8lQg_auysz81C9728O2ZiPsbiDhm7b0pRWqIEFr9HZA,22475 +scipy/stats/_crosstab.py,sha256=zdAePa0po_x1FwZ_j8F6ok2XNRCDBO2x17XrBr_axbY,7354 +scipy/stats/_discrete_distns.py,sha256=AYDMqBDk68gVftqNRUZrLhpXI-KGMUoY7rQwto6tz2U,54699 +scipy/stats/_distn_infrastructure.py,sha256=_pL08snbzhpBNZUiYASyPgx3RRkQRK3SQdl2xBFUcN0,149072 +scipy/stats/_distr_params.py,sha256=rblTZ_5Y1LJgitA6J3LosKwWBwyauMzAKOb8T1IL_ms,8495 +scipy/stats/_entropy.py,sha256=rvKQ2GqTfOMqHhvt9Z2FJEyNuny-xpTkslKzBQbwV0w,14362 +scipy/stats/_fit.py,sha256=GWIhw-0YtCPXXM4Ad_pNxO2BHEtCjeAmjoxk8kSJ2Ys,56992 +scipy/stats/_generate_pyx.py,sha256=U02HJYA9AUARKiSwa8S2aUpvZUOuGTA4dQvLkNpUSxk,2684 +scipy/stats/_hypotests.py,sha256=Vn0S65eR6t63PdAOktVhSuPZ4HuPpzq_XoaYfdXTtVw,78541 +scipy/stats/_kde.py,sha256=g6aGUrnpzb6sP5tfX-Al_FMRM4tDTwVg75uMH0FWGjc,24984 +scipy/stats/_ksstats.py,sha256=679ltXKXKtbr1gJagXwlxa_cyaZdMMZZKqlrrJErOuw,20086 +scipy/stats/_levy_stable/__init__.py,sha256=rDM-vHqixRhEu8eDhr8FJ0ZdUu4Vg7dlBjz5lNexQNw,43857 +scipy/stats/_levy_stable/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/_levy_stable/levyst.cpython-310-darwin.so,sha256=ibzMZkH1APalK9_ZY2LaiyZzjR49ehiPpUchmTTc5SM,60153 +scipy/stats/_mannwhitneyu.py,sha256=EuSeGllPIXOLGSZXn7Bb3TNSVQmLznfQT6zccgupOvg,18955 +scipy/stats/_morestats.py,sha256=gM1Qo4ni_CFHRk_SA4r-gGbP9Czmv49QpVUlb2xaPx0,149863 +scipy/stats/_mstats_basic.py,sha256=0qHcJERJmjQlzbTpXq2RZS8NxvS5T8FYfmXIOJ4vpfs,117907 +scipy/stats/_mstats_extras.py,sha256=Eyg8-p6J5G20nZS0laPOFrLpfqcyqdK4t9Sn6z1laLs,15610 +scipy/stats/_multivariate.py,sha256=AXRSODqmzDf_8CtRqj7SrqxrdATBjqDQ9kBV6bAm7zI,189331 +scipy/stats/_mvn.cpython-310-darwin.so,sha256=_Ab1UqG-4cXrbxUHUu4nL2rPCQvEPNGDFUMTHcAPwtg,90711 +scipy/stats/_odds_ratio.py,sha256=7K-BHxttu8uS5IbvS2yWw-_LQlkedNKtDkjZ14h2Pck,16895 +scipy/stats/_page_trend_test.py,sha256=OUkFeY8ck7DqDTUfXsrDcy5RhNpCTRXwcvGVY_CDmVY,19042 +scipy/stats/_qmc.py,sha256=wDnTo7hsUiO9vREdDCSYTvn5bYarGyJV-VL4lVmRLHU,91136 +scipy/stats/_qmc_cy.cpython-310-darwin.so,sha256=36UKDdZCqb2rbXbtcMAVxgpQtplAa5P1tI9__MGCC1E,220010 +scipy/stats/_qmc_cy.pyi,sha256=xOpTSlaG_1YDZhkJjQQtukbcgOTAR9FpcRMkU5g9mXc,1134 +scipy/stats/_rcont/__init__.py,sha256=y84KLdKD_7JU_mfLrlTDtpdQG5LgqRFb_bBUbQVxZOY,108 +scipy/stats/_rcont/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/_rcont/rcont.cpython-310-darwin.so,sha256=8QVCEgpih-Xm3kPggRrDuCsdDfpC6n04EK5BRB42db8,272840 +scipy/stats/_relative_risk.py,sha256=Vj_pgUV9UkQ6li_4uPl7EXBk9Ou7r4kDUzc9G8rAA0Q,9572 +scipy/stats/_resampling.py,sha256=_h_ivvbTIVw33mPREo3gdlJ0tPzlRJ6N_3C594X_YwI,69836 +scipy/stats/_result_classes.py,sha256=z04e8P0QG0Rm7CsZjOo3mWtIp_A06adcknXZSoZQUyI,843 +scipy/stats/_rvs_sampling.py,sha256=EhgTP7HlSKWr7Jg7RwsT9k4ICzP2btnxSOYxkOvAaF0,7228 +scipy/stats/_sobol.cpython-310-darwin.so,sha256=FOVUvcAMabjfktvojdgiDuvxeoGRTKJovsnsAhp-JzU,292345 +scipy/stats/_sobol.pyi,sha256=TAywylI75AF9th9QZY8TYfHvIQ1cyM5QZi7eBOAkrbg,971 +scipy/stats/_sobol_direction_numbers.npz,sha256=SFmTEUfULORluGBcsnf5V9mLg50DGU_fBleTV5BtGTs,589334 +scipy/stats/_statlib.cpython-310-darwin.so,sha256=td7CrOEVe0yUnDE2x-0elQT-kNlx2_lM9qlHJIgSdpA,73547 +scipy/stats/_stats.cpython-310-darwin.so,sha256=rpxTrZnUYjDLXeJ38-Of_e2S5kuTDJTYC41goahLLcI,582825 +scipy/stats/_stats.pxd,sha256=7WeZIqov-BqAINcZdV2YajNlXERyeafQRPqNjMC_fhA,663 +scipy/stats/_stats_mstats_common.py,sha256=ESU0KLV8iXwxxrmHV-nwtfmBr5B1_EG1daw4N8XPhCM,18652 +scipy/stats/_stats_py.py,sha256=M-RUYxGOtIafRCe4NuFXN1sNrigmp7PSlX0qlAen5Bk,355576 +scipy/stats/_stats_pythran.cpython-310-darwin.so,sha256=wBQbAgmA8vO30tA0u5f-J6KsaQjXZJ32yAGxfocqG8Q,132865 +scipy/stats/_tukeylambda_stats.py,sha256=ZL8ifXFFS5O-5IAGVmx8nsErRwLWJK68585cvLC5khg,6869 +scipy/stats/_unuran/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/stats/_unuran/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/_unuran/unuran.pxd,sha256=IIB-izI1fWQKXZ6xt7_y827d-Z6RzKiZkBJ-sJHEsEE,43357 +scipy/stats/_unuran/unuran_wrapper.cpython-310-darwin.so,sha256=AO1Rh88tfJjlgKyLWVxPTu04mckI5vEbZP1RHco4WHQ,1230177 +scipy/stats/_unuran/unuran_wrapper.pyi,sha256=-AoK2lySYcifl6GtQSs26ZkcCBw_wYpvuY4mhxRYk2U,5601 +scipy/stats/_variation.py,sha256=XqYI_QMi9Oj9CPte7k5gxr7ifFvrByAmJCz3i9D0NFU,8329 +scipy/stats/_warnings_errors.py,sha256=Dp4wSeo1NC2JmqcUMbyVfEcLeWdTEpVDWqOFTg4134g,1195 +scipy/stats/biasedurn.py,sha256=EPX7Ft1Uo28nEk1Bo_XIFNASdYfknXRbx-lXnrw4jLU,690 +scipy/stats/contingency.py,sha256=tG8X-wpLLp18_7MUSZ3a6wwfimH3UUqDDNGHmw0T9G0,14208 +scipy/stats/distributions.py,sha256=0VRiF_gbq3hZMdB9AONbXNE8bDLfO0dlQMjZIbldSTg,803 +scipy/stats/kde.py,sha256=CteJ2T0-4kFFT0wqpwwa3nhewyE_LnAUC0qlLnfoWNo,923 +scipy/stats/morestats.py,sha256=Lzo2TJSmmnO2VuujfDTx6R-1h2mc-rdutjbKXQx6bJ8,1620 +scipy/stats/mstats.py,sha256=Uqwz-P46lDBWfL7uumXpD_qhV-M-OTJfSTCBJVUnJZk,2262 +scipy/stats/mstats_basic.py,sha256=fcqdbCirE88xnXXOu2fEgFOISLDwobB9_oBKb7Ma9YI,2123 +scipy/stats/mstats_extras.py,sha256=zpvhK6MODW78ymWOpnj-QHc7bxpdPdG22Yr2Rypndw8,1001 +scipy/stats/mvn.py,sha256=lBrOC0EQSv585vPnhUCdNCSvqq4Ns5X1i7zKJDy3rXU,784 +scipy/stats/qmc.py,sha256=RTwVBoIQCDo-oCbfa_O3RN5ZhPow6-aGgulYQhu9Te8,11648 +scipy/stats/sampling.py,sha256=Ca2PMnize44Q_bUlwysc31IrKKCs5VDkGghLcx8iRNE,1196 +scipy/stats/statlib.py,sha256=KU5sYHMhlfnpCbLt5FdMASKQ57GLIDA4AZ4gZWDCK4w,776 +scipy/stats/stats.py,sha256=MiI6nrmK_b2MoC14DIAzUCe9jGyuuhEtgSpUSr61ONQ,2702 +scipy/stats/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/stats/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/tests/__pycache__/common_tests.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_axis_nan_policy.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_contingency.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_continuous_basic.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_discrete_basic.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_discrete_distns.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_distributions.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_entropy.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_fit.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_hypotests.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_kdeoth.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_mstats_basic.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_odds_ratio.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_qmc.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_rank.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_relative_risk.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_resampling.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_sampling.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_tukeylambda_stats.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_variation.cpython-310.pyc,, +scipy/stats/tests/common_tests.py,sha256=dF0Hy7dxuTS2M51fSPOMYBQ8_4Nmu1uCYDJH3h1Uz0I,15380 +scipy/stats/tests/data/__pycache__/fisher_exact_results_from_r.cpython-310.pyc,, +scipy/stats/tests/data/fisher_exact_results_from_r.py,sha256=BKxPAi4h3IOebcZYGxCbutYuAX0tlb40P0DEkfEi918,27349 +scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy,sha256=zxjB8tZaIyvyxxISgt8xvyqL6Cevr8TtgQ7TdFfuiYo,183728 +scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy,sha256=_umVErq0zMZWm0e5JOSwNOHNurViT6_H4SBki9X3oSg,183688 +scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy,sha256=88cZ7dVDH7nnuey20Z48p6kJUpi9GfImaFsPykDwwHM,9328 +scipy/stats/tests/data/nist_anova/AtmWtAg.dat,sha256=Qdd0i7H4cNhAABfFOZPuplhi_9SCquFpO-hNkyRcMD8,3063 +scipy/stats/tests/data/nist_anova/SiRstv.dat,sha256=x9wJ2g1qnzf4DK_w9F_WiOiDMDEg4td2z6uU77G07xM,1947 +scipy/stats/tests/data/nist_anova/SmLs01.dat,sha256=KdnJedRthF7XLA-w7XkIPIMTgzu89yBAMmZA2H4uQOQ,6055 +scipy/stats/tests/data/nist_anova/SmLs02.dat,sha256=nCPyxRk1dAoSPWiC7kG4dLaXs2GL3-KRXRt2NwgXoIA,46561 +scipy/stats/tests/data/nist_anova/SmLs03.dat,sha256=6yPHiQSk0KI4oURQOk99t-uEm-IZN-8eIPHb_y0mQ1U,451566 +scipy/stats/tests/data/nist_anova/SmLs04.dat,sha256=fI-HpgJF9cdGdBinclhVzOcWCCc5ZJZuXalUwirV-lc,6815 +scipy/stats/tests/data/nist_anova/SmLs05.dat,sha256=iJTaAWUFn7DPLTd9bQh_EMKEK1DPG0fnN8xk7BQlPRE,53799 +scipy/stats/tests/data/nist_anova/SmLs06.dat,sha256=riOkYT-LRgmJhPpCK32x7xYnD38gwnh_Eo1X8OK3eN8,523605 +scipy/stats/tests/data/nist_anova/SmLs07.dat,sha256=QtSS11d-vkVvqaIEeJ6oNwyET1CKoyQqjlfBl2sTOJA,7381 +scipy/stats/tests/data/nist_anova/SmLs08.dat,sha256=qrxQQ0I6gnhrefygKwT48x-bz-8laD8Vpn7c81nITRg,59228 +scipy/stats/tests/data/nist_anova/SmLs09.dat,sha256=qmELOQyNlH7CWOMt8PQ0Z_yxgg9Hxc4lqZOuHZxxWuc,577633 +scipy/stats/tests/data/nist_linregress/Norris.dat,sha256=zD_RTRxfqJHVZTAAyddzLDDbhCzKSfwFGr3hwZ1nq30,2591 +scipy/stats/tests/data/studentized_range_mpmath_ref.json,sha256=icZGNBodwmJNzOyEki9MreI2lS6nQJNWfnVJiHRNRNM,29239 +scipy/stats/tests/test_axis_nan_policy.py,sha256=KXk4_PqHAh1SkQPJSxfX03d8Ly65Eg1-S0-GolyDDV4,44465 +scipy/stats/tests/test_binned_statistic.py,sha256=CCsd8CaAe5Obajj6q8hyrPEbFZJcc2OFVHiE5aRLxTk,18818 +scipy/stats/tests/test_boost_ufuncs.py,sha256=5IWVCvQ7-pD9EiXUBUMtbPePE5dbjsm5tkdZFY7zLHg,1612 +scipy/stats/tests/test_contingency.py,sha256=fMeGnTldQjLa5CSaaQ6qH90JXzrUivthVD-9DafgQm0,7706 +scipy/stats/tests/test_continuous_basic.py,sha256=K0eoTRnKzalMiQ5ZfXIsk3ew9IeT3FzIqEwuGx9WULs,41523 +scipy/stats/tests/test_crosstab.py,sha256=tvCoZGfVasNIhYxLQIe3dcdMm34s2ykxxPmCRTIOFc0,3882 +scipy/stats/tests/test_discrete_basic.py,sha256=Pw969IIdB0yf-HF9sZ5-WyUchbqlfgZTu6LnM4hV2BY,20097 +scipy/stats/tests/test_discrete_distns.py,sha256=hDZ5_6IX8Ec2tOTSMrdhxee3WMcjQi6hMpOZwgAGCuA,20066 +scipy/stats/tests/test_distributions.py,sha256=i3AzoT2RVTlm7q6_Vw08dcnQYqR9h67mOmhxXcKvNaw,299382 +scipy/stats/tests/test_entropy.py,sha256=yHMmAmQgvm7QyrQKuh5gnEXJra-NbcVDwOaI-FgCP4M,11278 +scipy/stats/tests/test_fit.py,sha256=18Bionw5I29f6EaKAgzbSO9tbSz-tj3XzqlWaGxe4R8,37232 +scipy/stats/tests/test_hypotests.py,sha256=YUJlkTWbcv1UaXYK9UUsNCirnHSb59ndyP_qVxltb00,73067 +scipy/stats/tests/test_kdeoth.py,sha256=7KOD-TJb8aDMQJ3DWnVWk9pm5LEf6LUuaqE7oy3GZHM,20331 +scipy/stats/tests/test_morestats.py,sha256=xQglIKZOdelT6vPZwPvqB-1nNDS4upctkj_KmQI9z1A,113187 +scipy/stats/tests/test_mstats_basic.py,sha256=1lUwHbHd5GkbPd1DTrheoK3md4P7kAUt3aKQj8KTQew,83139 +scipy/stats/tests/test_mstats_extras.py,sha256=miYVK6uwePW4c42pieoRv7GewL5C2WEpX99ZNPDzMIk,6066 +scipy/stats/tests/test_multivariate.py,sha256=ouctxcObCIpH04gjcbyrP8-YaWyvQZPcODiMdTILCAQ,112519 +scipy/stats/tests/test_odds_ratio.py,sha256=RIsmgnmUUH3DvynDRZUaS6llCbXm2oWIfPa48IJJ-gI,6705 +scipy/stats/tests/test_qmc.py,sha256=NcZJt5QliBs3jGJ_ApbkVrmXOwLHL6Zk_0-BPmK3R3Q,51173 +scipy/stats/tests/test_rank.py,sha256=SNomJb8wNZAZGMblTOQeonH7qt2Tv9DUTBIbRYo5gm0,11273 +scipy/stats/tests/test_relative_risk.py,sha256=oKdAXLoWNI4Wnk2N1NFH_I3kTj4jP_43kksW94WFL-4,3647 +scipy/stats/tests/test_resampling.py,sha256=hYKH9OscFqp9sSyaFJ3f2rXcXviamyFqTcDhnyAiD1Y,67172 +scipy/stats/tests/test_sampling.py,sha256=NMzyeq6R3XgH-OWWTAopz526szwNE-xB_LY4TpBPRjw,50757 +scipy/stats/tests/test_stats.py,sha256=g90ZFdlUNO-gCqC-GhO-vWeYlz2HuaxLANqPyW__FsY,337777 +scipy/stats/tests/test_tukeylambda_stats.py,sha256=eF6_VaRX71mlU3QdQnJF60uo0LcSzog-BHWFfunt_uI,3232 +scipy/stats/tests/test_variation.py,sha256=WUZAV07LhaScxztzN7Vv2OAWjV-b42FxgsGZtGf0WYI,6245 +scipy/version.py,sha256=xTEo67E68esPZSDVaIfIO7zyfB9m0OTr4fdGzYO6SyM,261 diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/REQUESTED b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/REQUESTED new file mode 100644 index 00000000..e69de29b diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/WHEEL b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/WHEEL new file mode 100644 index 00000000..5f08ebc3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: meson +Root-Is-Purelib: false +Tag: cp310-cp310-macosx_12_0_arm64 \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libgcc_s.1.1.dylib b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libgcc_s.1.1.dylib new file mode 100644 index 0000000000000000000000000000000000000000..99a742667a35d2236900921f6e4ebdb70104a2c5 GIT binary patch literal 156896 zcmeFadwf*Yx&OcR%p{pyfN-f&Ym=D}uHFSNrv! zCJ+P-o+{YtnVcg5jmn@sRRi>T7;X`bwo2O5miClMxCD(C5Ujuf=llNb**h5`_VWAv zUcbMFb~}5o%d?*K+}86vYwev^KmN&o8e=TWm^^;@7c$0E9Oq^*SNdO?0%;1QDUhZ> zngVGGq$!Z5K$-$+3ZyBJra+nkX$qt%kfuPI0%;1QDUhZ>ngVGGq$!Z5K$-$+3ZyBJ zra+nkX$qt%kfuPI0%;1QDUhZ>ngVGGq$!Z5K$-$+3ZyBJra+nkX$qt%kfuPI0%;1Q zDUhZ>ngVGGq$!Z5K$-$+3ZyBJra+nkX$t)RK!IPLeD`Bs{O9$5V=}phaNV6@%(C!M z6`9X+{g&FQs>*3!skDm*{KxK^QkHxMgmEubR#km#&fVYYZ#EF#&6AULJN`ZSt7l(- zb3KGpAFHYsee2t`i~5QN!W;4XWYuA(g`Pcqt&Qh{%m3LSI;i!WXYNT6<;6cs;Zh=T3K2(<)#~FIox&l8FJg7`7D^aRGQCKRWaZcz)U@|5<0?C7B!HR`Dymt_FA#uat_=nRu81&n{l>oU5vCtEpP_o!e`_d0W*T z-@5Bl;kCF!0MG5KqAae@xOMziRn4yXR!x7wK>S)9eC~Fd=s6W$f6wGyRaNbxe(C+> zd`Ffh=Q}Q?V=6p{8hy2*+YGL%s_U=2v3%Op>l|*Hu+uW;G-A-T}I0yW5Sp6t8vA1 zKfi(%;5Wc;h+qD1Vhxr#4)di$s-ORWWj@OUTYhSpn+uG|rR-)d{rf(RbbXWi4BDj1 zi`^jQKgWM!nei#l?(OHe(KyfF^!+QM=U+8w{nINR=xzI5`Q?-axHQY;sCo8%lGZ0Z z6DXCOyZ>Y_uI3Uf;qIMXh?#>v-(GZ=D57iDcNTqX&g~bM^$~@3W4Hw8M=LDzLw<#$ zo;7d%`#hJP5C6_Ghi9rJ{Vz>{GzHQWNK+t9fiwlu6i8DbO@TB8(iBKjAWeZZ1=18q zQy@)&GzHQWNK+t9fiwlu6i8DbO@TB8(iBKjAWeZZ1=18qQy@)&GzHQWNK+t9fiwlu z6i8DbO@TB8(iBKjAWeZZ1=18qQy@)&GzHQWNK+t9fiwmFzoNjki$kljxH3*vcV5Re z&ct??Ew-BXFV1X!nyY7IcJqVOw=T|Zw*21a{o$!i&l-QzUgPV{Xjm2rHTasIqAX~9 zO)+oPfAKo+kuI;*(Vev>68D;ppz(C5+{Jgl*NPTdrp@aQ@2;b+p7yUBQ#mTIJz|Qt zMR-Fo8st}f_IbVKynVSlT)f?W7L5H~bJqS|+5`jJB0>5ae%=1O*txp_*e(wHobfHj z7j7la_%(s8&iGceW?Zs=+-sHBoT^`4JucSQzu1H;N{qMnHgFDErjpmPD@JqG8_O;W zP?o`S*kqLRg7i?0TWCDJhdxf&Z9hyT&aE=0*PlpK{)u}R-u$z`Q`z4NFKA516=VBf zId$jl(%)h}!NsA~0b@4I1wQYBzN$8-@7c2cwAVaxc#L_}T3k4s@(Y~$nW^=MsCQxM zw+MJ;zzYFy3ima}`pNw^T+gfU;tbfov-?^buBAQ|W+&x^0q?JMY21PJ9cPT`*#O+1 z3~aNluZ`6^usdUqVO|8gOqp|kXn2ZgLlNuvyPBrufffe^=Mg?ci;#jkE?b^kL z{-&dpXZ4@MbG6;yLx1m{U`qc0GV9940`Ru14%jZPvfM*(D+@FQThjk1O3n`dwf0ff_p`3nKAR8FkcA_ zHb1&-FmHVqq~f9x5>w2Z-P_HJYY%#Esy%4kwC{kqDV}B4$Gx%1##jGMbAIr~F79df zNU$;fjezgIZwAjVy3y!r?7Gp6SWKHyH<}+E`6pxMJ|THAr|#}_$FahpSGQgMP(?6Q z`f4!1mCuzIS``|%^*n|j=C%_q`e(^rsePDs1NxY=^nq7*dh#zWu$~Hkwa|nBMMB<8&IewMKwdfZZ890L`NqFB#fmiLot99VjIPhxD z0`D8Zt3CtX>3e>*-iCR8%Z;+XM`u~DZ z_gS+#Eiuv<_D?K$hU?m# z;)2VzjVj3J>WPgiXy>}Dth*qetA{@AT!AlKTF^CaOo5>;$aOAdS8~ORt}2NC^P2Nw zMOPMd@e79+wB(q|)>xpRk;^jwR?+qTvv*x-#ul{FuaT>jevMo$0s6eW=DeUW6&Gcj zN)K&Eo7#eI+W!OX*L}$|xPdE7yQgAx1^a0mD!RO&k@ick@eFRHZSB=&Vk32bpXV98 zgtAheCviV_u&Jyrnpkj<-)Y+d1+%&SYFsk3uKpXkT3ZkY7YkfUf!zu{!hv8xHTQ-w z_kmAKV01wp_;@I*roF~jT@g4~lm&S9azBRoHQN1)3)-_xrOS^n^e=_}E#b=x*ZPJR zn#2E5;bA=EFDeLfx%3tGoBcrY_bpDgUN<}ya)d7 zPZ?9?`zUXxd`C+8MJE%L|4n%qUizGIe2gO+2w%zNe^TEnEylKCT@(|_295}<&9=vx_K5%@E{d3CCHf|f`qMvy7827SW`Zv}; zsvrZt%hh8GtO=JDm_WeRhX8WYrw`D2mgF!0hr`Ce+wVc2>Y_1y zvi~~r=AmvDbz@9X!HC(uAuY)6?(>gbEd2-tkOgE+bvr0~8h%y>&{KXZf#t$o#CY_kF(fJ21jyt-|o7UlqSNWBE#epHX@AE7^r)W$#|NA@oabT8 ztAqPY>b60<7oGdzjJN&#qZj`$rJsj>zfXAFBMZ1}>X%cgy} zVi&(X%qtEY!G4eD{B709M)Fzy$=u@t@HbahbPC+4Nu-R+&tPf z96#X#o>|9t@EpG+JlQTgH#Rw@=ZQR17sns?nVzqr?FF&PU6;frKP!LXb39Mv`Ks9D z;z2wY(*AR?$$|6N-gs)Ho@0~cvmDD>bDo9I@eyTuj`KXij93sJ32)@HgsypHB7P`7 zGw?$v;#16t_$8qe=6GmTd^-1|Vv`Tix8+~vPomk{C6{=fQV20UkZ!#xb zc!KqN`rruSrzOWwTa{nNT!hDKvKemO*ks)==u;g`>Eo0aJAEDsC{`Li;kM73N2k)a z5!@~FgD2#(#%Zg0G%&_~@K+32d_45hKCds@JtSlOW?+Yhn@QE+ZSYyY28{j8!|mS% zoz-6SQ{QAi&-(QaPU4N~T6vDA;MhmI&p{i}uJBCS{VVOyMu+$DGelSB(MMP0 z`IB_@t)C^FK4Y$sjEk-}@f;YzGkl2CCm~-~GETi{|35rSj^oJqp%La$WanWM6aQnA zk2!5!{W+{>>6-ci?;W=dCUh3!|{V<2O$>@K0L%d^SY zhQ4}7PXA9z{m;%)FaO=`|4-Dn9akK~JZw2;-m@GT`tq4FBMh-w5=*AcfA7 zh4?}T=Wm{g^JZYi5BJmCrQsk4=jZTW>wzWPcGfYmd)notP z|CsteGpAnF|C#cmX+a}b3;uosmm6;??vVd0pHJ~&i7^Yp#2&i>fdU`CSlm}!z{fu; zXK%x&Bi2%!m5+aUAeM|{>{yZU!+}c+Zpb5+;Xbup3*|HL8GcRsjoU^QY{XA*DY~@a zS>#=PJj78`fmZ?ifD=b{V^7QQZ7OIV!naXepja(LKgDk$Vl}n#5U(LqtOfW)UTZ>` z#-?2<^LN`RrmLg8nzEY4v4tU%RT%U$CoaXuireH1rQ)3hJ}rT<1rOm*xpPrjn6hWd zw@n4t{upj-fuAz9k1U;wYFfavdBwy>Qux>4lB{I|_Qa(Zo`tZMvq8 zHwZl4XN{$OhB=IO97WcO|LR~NK%m2uT^m5xN87+dYfg3r{v(n9rHITRV7Zma4?mK6pc zB?cOau5cOZ1G8%idca`@_tMpB-|T{|=zB|GPC;z;+=98%eLH44yxR-x0GDVP^3TD( z%uUkrQ*f#oi9Dy^G~1uV>3ZlHqi(J0M@%UUK0c)nCp(_azpX$t(!5gVskwMNd#e?Da6QQAJkvmuuB(e`HUH^#! zLJrg>rhdd%`ScSlviMEv5Hz%PC>e|D-on0RoPYS@t>^y`9h!r^B|rOc4fb{#Iy4VE zH7`js2M>+;5bJ`AgT|ZxVWLuUpnE^}YNL4z2hku4K56^__3oIi9!ZCM;NS^WBlEK_ zjuHE=<7zRbgWVG%(5%EM6Rx)GQYL>w?Ns(~pt?YEKP#oL^oKa8a#2c|cvt*x zqVgLleZ~9zJ*+({>Ry714&U;*WaePF|@$FQeO?$!aE5}Dv zxmV?k*H@13uJY5sQEt4i9DiQrAE{h;yEw~lQp^focH3}2E{AAq0uGL`eq0U;hKq~J z9bDoncX3grE(V+l{>iHrgCsltf{hq%OCDVH_=*kyZyBL zEBSK@`iVb-E{8u3{VN0q`p1AH{$#rRarifrdP^}Z?XPlpbUK$YB`e|!bq^8SN{0md zXjU?Jvw|``D|b_9hA*hb_F2Rj;z6l{hyBcaj-I>bylb4YIQMEFBffWIt8W8KGGxQ2 zUh&r*E_|NJ>r9xHW3>15unB+Z5f8E`-u1;`v1FOxX-`WwWQ6F89Cm|`V5@wH$`n`U zA_vS(bUuAghaFej`3~6`H{Ya~$kAQJFPgvoO#Uf=0XOJ{bmC9UtNOy@SK4JEbj8Uj z9j3hHf|PPc#@|-^!-*>wVE61XrE>$vlx@?P^f}v@zXL{KYzk~=%uc}`lTr@O!tr&= zV|x=<9#AZ&ypD20Zr;qDr}ApDvB0v&BzKmaXNf(}JophFWUg?_+hB{wZM!)OJY2i^W$rJCO@3FoMq>3o_>`%A$bplGEcMla zznlNstMQAHd7wmn=6XYpZ3**G9;h3dcSHMb#m;8#6Y5h2zq_G*5&SJPCAMAC+EdC{ z-O#CsTwFJ{{1xD`iraa+M7__3dz%Z_f#=#;^@->D*53iHvJcXA$?0u8yL$J#Ebx>} zA={eQ9JNi!Gu7xBnYR1F)Bol6MYd;ZtxM_q4{D2yPe;C7n%K0JjET0wPjc(=k2j~;aPwC^7?{)gTOMKl2&ymf=^a<@x^pE3||I!_2 zU;j9L_?{*2yFmDMe-h5m9XJ$NQllDoe~7SIz{ z7OoXOLtVHYTNd7O;X3f#wK6FSp7rm7YoG1MHoe8OEepuTKOhUToBP!!WgU(4+y%dF zIY55?;P#>H71?n5{6>A?{Vw!eYrDpMFelmX4fSWNF9BccKqKXXZ?~g7?6e!9z8Z(J zdsGITt?CP`OPs#ncKfOw<##&mZl|xycbkXugU+JQC&+KbJ5zXgK~m;!VjRibh3_RQ zAM1nFZ&RD85C1sPZ)+W!`aQ~HKT2G=?JRx%g}V5Tls=A4TtRt&^2hu7B=uc*Uqqd~ z7U7#`rRb4s^G2uCzm?kG(Sy&Y)Xz@s@8J8Hl={1!AXgP@EwfRuN;n4<#}^NtYIxOP-|X`{>f@a2 z>GB4W?LSVm>3q*|7Z=d!n&TOg&tCEZ`tz3{lCjR!!2;QZd{%0 zbGOr{#_4nS8GS~Q6H%OSj$+|Zu)sYpHq5{;&VQc1XREy~yWmF>FZ8#s`HSt9Pcr|R zjP=cz4(;|3uUN!z0rMU3sVfMC%L*;-17TyIE&H3=f)wY=R&l1Nsk^iO#S-{|;#XR4 ze0x>lxX=ed^X(S{`QeC%vwRj~2N~NiW*PBZi1^KfYLm2aCntDLJDvYi+ajl}D-#;8 zkh7wnGES%4KRFJ$#R22E^}z2;!cVqw;r|7G=xp1bRG$0=ete?6&5!#We%#m3kD&R^ z6G2n=g!o~&ckwZQOIzr7`ktS!Uy?|yvd=;h6COnu>N3p{otfk8oOl%xpJpf*K+JkQ zHaehnGnDji?D$A)tao_y+tkSh9?dY1-gS<7RQ}@Wd$!y1SnoG4j3*yfv+d4R^!0TTw~RLum?8Q1#@Iekamsi@o$!~HJwWE0)nlYxw2WB__kx!=0RYWzXr&z@^hpvj1J# zYOD^oeHZP^Xur>!6@7AX^-EQp{eA@-vE6t#jHXZM(kUl{e)DE9yeuLc#M#f?v+XmS z_Bm4fY^TnKXCtes!+!6UebzZs=DJGysIx`$m`6{(Y0KbTdaKFms0+LtnNhqm;&z>|%GgkoVcv`} zuNnAqPn*mRwZE122WUT&_M`4)PL|iEgV*wIm_z%}Wm8U$gSKnKR&<7a_CM5F1|7$6 z)tQ`*@w_7tUF>h(#nl?{G`Dl6b6t35(@vA!RA_m6pAO$2X%Ck*)tk)Teb8zqxEpXS zYw$E_&8xoj+LIRNOajnPG|^e9TGqWH?yvQ4kGujcvVCUbPR85C_@lr(NZqaeosm%w zWx&JiXwkj?<~n#Ne#n-J9|7SGKjxD|)Vl?>w0{e^DO;Ig^KTq|roh8O<|`gAA%Y#!6eL@Wx&E2%mro9(@N*wxY4rR0tK>rok zw>p#EQ3?-~R}Te3Cq=9H@->n9Jlki8d7i_4HS*VLGMg&6uVEgoyq}?SY;(A;;xG0Xl1Vcfr=bI!FJcd;6i`A$0E`y0;I$NcSEf4=vr> zCtt-${=i9@((7jBXM)#Y{4FTlg zW#el)D7iuRUZ!mqZR1`)x|h`)u=URL&LBP%4Gr`cE%?)NZO(aZThH`vh;(fRdmeMa zZElX)D7xuA3LkREx&A$I&TM?ZT-A@c_UWJGuNat($lrtZymF!sdd3R!4xgnT?sqg`f8YvYWR`?!ZlQ(O|3% z#k4h-mY!?~H$(!)bm)v(W8l?@2mLQY$28_Ro;B`R-iPsUl^K7u3%GvD1OEmd_gdhw z4O*UwM;5T-;DI;NkwyD1aE`;nvG*ovnMi3L2XBk^0lR%x$IIZM`S>&!>cW&iNm&+n z^_WcTQag6(W!o-|;x`Z7&cQDExDQgt+^|dSk%j(OBZl9)hjN*J6m$-4S1p#594UrGB?Y||Rc@s%uV!>zOrUsif@2D&t=!51xuHskLNbS8fG8P+#tn+?Sd!z=M+=}i5(J!4%oo(xp zzsWuqX8QkSB2x|MlH`qhTi$wjC&OLedc0%o_3Z%sc^mmXBpq|sxAE+24b(B|)HHOg z6dfCfJj~>|g}JYX7P;B6$o#d&UJHttzrW#CY?0rVIo{c_*MiWcGfvLue3swxbmY-~ z3GL(1Y}~zh&Ar~dXvnsYrguI$9S`%bZs1)7=1_*tdHJ1}qH}Y(*BUJwu_Vu|m(O>= z(LveQZtnHImDW7LI0BvKqg(THrI#~Ls(c0Qra|uj_tH|ZNko%gNz`n834e(R*u2 zJw*nApF{cDW!cT|cn3vu?;X?}Kv(Onq^`E0W6#+*g-*`UekM2#qEAbMa2gb~Xz$V5 zfZcNEfS-nsQg(6$cCZZHu4%}#*9O57uI94P$y+~2R5sc6J)=qMST=e)-twcjyCO4- zUy01bpBTmOP3Z0Q=xvbl44(0MqNUv5?td-f;rGdh@}T9QXfJrwg?EB?a-E%m-nQ6y zPdWK4d);;5tv=7uejj{$pZ3CkDY~0T=_B0ZtZO=RAv#_Ry%ydZ>bwy84dSXZLpt7v z7nh>DbM777`~$9qf0x(%9POUJcS!S`hG|V3to)|ERz~mb4G%?{8Zw$%DXYiVk?xk6 z@{{ZMU5LFf_!TopKggVNI^cD$WUtO->?teW6}gr5y2$cG>$uk*nAS_bmC*#W5qIL60|I7HTS@6R3Tig5m)@7#OZ{0P*^;?te<+o13Z|%m`tV53K zkt6Bk5_F*$`cxxFNx!v`H7)74N`JL}K0rIsO!}!Zt)&O?O?}8xqq7D~FWwR9;a$to z{JsUAhrrj%z1D!FZ`vM-;&V^^p5OM3M&s)=;u~$@eKyJSe&e%!qn1mj+P*1s=}`L_ zwBL^|j+bwQ47uyYKI7dmRr2rnrVaQ-(;2sMand&(ZOc%0hu&X(0H5I*uEz5{&8_&R zOTyPRJ%yc4K!auY>8-YJnqm8#L zx18~d@DF5vX5iO(@B`(ucv)x5!tJX?bInyW$FZ_$F5k4wcsta7A8q~g5s&79uSYuL z@TrkLweTqped_S<>cUGR)lQD1k@s1XeuB3{c#Pp$xb)kf%;Y2zOsB%e36b|*!v0JRP=IvQ{5+h zQ(u$Lx5@8pG+DN9I@Q*rEZa8?+PdSjebZ6g^OuEf-_$teu5T*6a`mZ)b;6DX9e?yK z?8?Bk@&I}?P_JBnbS*yS=tqs7uEHO-~ zKdSk`9Y3^-m~x3i;sEMOfqJWqKj+qNb1u_f|73!U}IwIxQj#P&V?9qMx(?d1n7q+nN; z_@47kUQ-pmrx)LI+$Vg`8Mg06uBL()tt%%sBZhZ8IGo@(?mV8k(su zd)s#3R2`d6JoJ{UV|ixd0p@#<`4&NM*>T16Wwss9iC%YaCU%@y)vHrde&pJ3C38K~@JCv0x!x6t zivHl`p-j1+Qhc*KeitI6?fljvr*UGWDfswh$hEu9tOLJl#?soJ^bx%s<@C7~KX)JP z%V;mTu7bX@>1&beI@*VbLwD)@ANbK>%WJ6fb;i%((l-(=My6|#;d1)dT{x)uI@-*{ z&#Pq}onj3pp7QiA2tODZ#Tr`HP)0oE=^ce$EwQGYB$lb%e>Bmy65RvVo|U6tj*P>< z^jmqvQ7=bUIQ-Q+ay=JS3M@DLhqN8G@%X>~$@tBM5))1D`sRY5S6`#mk7jGdiSSW9&64 ztLbL$1Kg9VYYK5+ZI>r~M8$<=l)oz&-1AO-(|Ybh_I*~9&eUD!w9y$am8<=2+`B%P z);j$o$MNt}BTIkWXv`Hw=8L~({hP3Er1j$+R{W|Z*WcW^l>MN(z_#=76>jmEq6fv> zZg^YAHyLzJzUZFZU*m7sJkgr8=8N86uLEAUb?BKA|AV*FR?y=jkK##vy>jFo@o zwvCTh=sm-AtAB7;kn^8qT!C@P`mh;!r8U(zB*6JrwU3#=l{&*}zqhS+R~HooRBn$s z_X+Lw7PG%Qn%}{ck3Ci2dDxCMebE-~i+^@+XY}|To$s=~zD4=dJil|k8J5Krpj`WF zjmK_|wjY}k&HdosPUYg}AHJ9Ul$`P{R(7u;S6ayay65P zu0{5GSrZ#g;hw44k8f(+62Qsv$4VCMuV3s z?-rO~rkY7z(54FAi(MsM8$g#Y5l85I1K>E%#&N5SqiE8=_f}ZHqG9l3jf=J$(_w(~ zF6)QB6VVHgc0ixvkJfCK99C!Jrx%&FSjK`?h34iXfdcWU`bhY5_(hfFYS3%F`5e{a}i z?y2@ZzR7Rp+BEKEecoYwdn6m7_Y-aP*`W{O*gu!Pp^KCB4PD4S+ZP=AdbMvI-u(mmG&j_~tJ;Pq}w2Ao3mFz5ksxo5QE3 zbV`qwTSI3pLEgORQV3nz=jf8{hYXGF3B6&43g_43iOMqRJ9=J=%}^ea_oaxvt?~f6 ztu_x$V$P?`E#>3}3XOGV9J_4ghbsc;R2jAboQ`mIFS5`g56}9R2~J^Tpd}M~a_TN@ zT8KE_3=_X>9RNq!vQoxgf?ZlS*|as9TV53HWeYy$9K`8+{?p#`i)V!=ulP9eSUgMT z>$chZrWfU!wmCub!iCIRv=CjScSVf5*Sh&g=sZ)o*Q!3!#d*IHGiZU!+aTwUeCE_D z<=nS|OE=?Og#HOu-T+ubfW?~GF%{jAK6rS3$D1`lwAT6OvB-O3QvC;O@mZEL*P73B zP6COdTA}Y{Q6K#da6}K$^mXn1T2imHu7&n!-8+e`xJ>2m%w)#$^FYQ=k{aSDLjHO*FZ`N{NUA$THhlHae zrs8V)V2@TwcHcpEucXg|e5*$=v?gf`;r3p<>Pib)k{!E%K9ajYZ=x-RuBol`O?D+o z2Qy6lf@+7Y!#8Kmy7>s_*X*?`2H&uEw#|`DN-x6j`X~jfql9|cLer>i4IdxY%{RGR^Ul-Q-{g&Kn&91Sm z=mq&r5v|+x$ZePHZ<^ZsL-=Nt~PcjAe*9?P`(@hxa7`H;+s?pn*lr{_+~CR;3t{kG<1 z$|Tp4*EsJ?xbmXD($^n@ms_@pwtwxMze&F+G2|uSExCI}G=R?m)zo0Iue|__dyieJ8oqSj6p+w~ZzN2?A@8Qh>=Ad^xUdQHZ z9LbU7NxT#ve?~v?m~U&?_E+tfg6lKn2HbfoZs__&qOz0nci&HJOogMqf)fRfd{f~k zzjH3nJ2D>Lq`t&3m2ZEPXv_E@v`Tv2ddwFU@2>57Z1Z+>T)aC#PFg(509WDg%;7}e z_@dvJ8G9>oP{SOb*^;nnT0xmR{&B{e>U(&TWJP1fi7{s!J9A9Q#TZ|bChz|t1-_$` z8drLuU%^<5ji2c=ZF-*j`^Pq=;{SWvwV!-!(>%U)=F&;?xG1H6>D#=k2tMPmYl4ya z_G6pg2ZvvtKC_SPK_UHKeJ{~=@YG|QQ^!$1y~m?p(cv?++jT0@_VoXJvYl+Z{CD`X zdpPvK7qt2DU)-Z-_IVv2u~7!Gkh0%EYx`eanRe$qlH+gWr&%C zZLBCXo`qWb4Eglnu^p=$vB#tFt=>#5UnPEf&CsSH{2IpVW=%K*tihi*!+L;M2Ap*{ zW>PP3s!Z;}kn$YB={eTEdV@81<{Q8$Gx<%o(O%~=dVt#w{Dq%qZ3kvau{rV_yev82 z9C;0Yvm_6n20!N_e7uqo=7?dR=^|qJl9807Q&m2GhnvdUF{Z7`PyGd^@&G=}JKn() zTJd4TE9h7OK}M9&)&h<(g_o<$rJWn^Jo&TGOW~AwC(c=|@>BRcoEvk`ij^?NTIK{D z{$hW_w`<&f@3CI=;3FMG7nN7+_6~{aTQ?;anj;qyizQMA^WcBWvz&qpS+LKH8tz*yT5IpaK=Njj|*D5>TexrT0_FG?n z?OUB$*}sS!Xc^qx%^p!Jxw|TpVdq+=Ss5D+(l?~DTENj6fmZ0f&i{+Zv_}Uwe|)c} zc`bP>ox$1bP3A|hwX6-L^odQp_M~4iB6YWfv!c&3)}{A_R;@eDxuuV*JGYvG4n8Wj zJL5io^D*`V{^{oY<{y$*JILPH&ZX0uUN*y<)_5{{b^a!QNk-Go@HFy08NExgUv{#AxlRSg zU5qDO*3o7SZKn{2ud}9|^zyrf{py4GC*)<4{Y=?O`PTX-NISCjo;Pp8A!3s5#PZ!W z#?G@Hxd?w@uRrv`T4+Acv~21*UcRa(KlI2p^h>zB##(!TT*m&q_SMR5NFM*dn$ZP} zb=cY(W3eyp>21QEYaDkBoj)nU$2I2s%d7BDs_{ds@K1RErgGb<#KvuWcd-gTq?&JP zwdb1`dL}K8#2tB0wVhiTOFBVbx})R`>&QE@-KWfvmDuTIe#9!@LA=^doU<32Dc{r0 zy>eIwC|4e=iurA&eQWOHn}km=#kLyr`O7sYy*sc?c5jDyg!2xMtT}=W`)#6f<%i~$ z*8;Caq?HBOKSyJe^PKR{^w~jzR+sQ&(QZ+uOlQGL(vQ7`YVAHeIYWk)Hr_*V~DWUHqPbUq1Nd%TBD> z6Qxe$=(~G|jwjm6z*G3_;927e)>QVCm0LRWQDURc$UaNFxz>j-VoE!I0Dh^mJW2H@ z>O0%n8{a~#SNgpN$u0VO8<2hJ+Gy%~>~oPhP3l|5eY?qN`Vsf)gB@yOJvq14x&O$4 zFCDSpjg~XTza0FhT>PgZen;_Z@T2COuJ4?U|1ciym{Cujb9SvOAyUcfI^d z?iK6pCB`$*pv21Qt%e4ih1s~AIo--{F1g1J^zWH0v$2pl3l}pY{8i!Nr_RqlA7{)q zuB3e)u{d<;)%qg(q_!2jXYfNxtW2BUst5OBF08Sq6Ky(scAW#mVs3j)b}u=d$_?no z8suj__sMr!&7GnAsV5IP6_>%IXb5JHg$+IGV#+=%)D&!q=Y3`$sCX zC`-Wi*P!7Ba9YEj%6#t6pf7Y%`IE?CVG4acvMmGnJLY7fO|~zUp0&_3#JDa!4fofc zO!7lGbm3Qb)91Mq+De8`-}BS;t_(|eTsihjj-jdIy9LnrW%OBk96!H5K8msDp!jI! zhg(vNkw`kXoOW$quCI=J}`&g|)||LgVMX8Fdi z-Ro`sRlTqI!>@VP|6tPK=0DxXdEADZn?AhH+g#$w?$tSqOYq00(C!h>kfzmN$!Y#Y z!}U$k%ADp&OCF8g@_=!k9 zzV2xH@AVAttpm>&!jIs`W;bo|5bUhjdipnMDVw#6ShIg`1~%y=+z;gVVO&E23n;;Ie3_HTy$ zT<^5azcM@8=xdA=;tMyjz8t{sS^wH~(eYP!qYu8eef3kruHK|IEXcmkhhHmOAM{-p z{fKuf$DhiH&OBY(DgO&)Z@E^;A=g;OLc&E9s=y|;2+LB zQNF4)#Xr1_eCxDVrgUC6=oQ;PTnZ20!B#YxLD5oT!txyUb6892h{d)rf5}%_AoPgh zFcKk6x@zMocc~28dvcFhw^4{)xhj?`cvdMiZzCrF#-RHg?=Uq?9V2?L{f-92; z(F4h3198(raA;tB&Re!!G{Us4Wgp~1;?D1;#Iw%_e;H{ap1s`~MBMpHoqOnblh`xp zmR` z|79k(!`0Q#0B1b)tg#z&sPhslDp!0X_tND-)U&TLfq2mNxzLGK(vOQNuOmhk-GtMH z`}s~Nbd;|tT^+>lO^Z*q4V)Ps=&zQs=cKc+*X2K8 zpC^}(A%B&=(b4i(*$0>1@H_HXsQn$~uJQepE8^YTwrM$!Z z_WrcK;j#B4@-@7_?Aot#Vu9M7<`K>UKT<_3aQplCzr+HvXA|(zb+)5{HWv~v1f96z zb!1d=#Y*A|(fM8C3hk|CGk4ohN7hTgD~8Rz&f6a|NFEe3C|>vt?cMmNm@`%5wWkM7 z?K1$}EB={Ac`5Nv3G^)`A2ZsQjDIxOAoFzXwESY%4$EiXi%l5l_rHbTFaP1--bCAY z_R(fu@nGZ>v>nAhW<7p?02vw2{-w(QM0t@L-*HAf=)?m$Bc94T|1Ybq6fRE<9MkdT z-@=y{Zr74q-N$b|`IxnQcPQsb{U+IxUEH_AYw=&}l5nnL>~><4pyTuFn?l#Vm)LlU zU*#!%_>%B=_qtPeC_nj9nHgH4oLnjUrUA|%B>gR|-n_L$+pHA-Glz9s`N(~&w`;kc zxh8r)0Xx#!rm(ct-^RVzQ7N*>N1-Of7u5Z3|usKn?k$TRyK6~-D| zu6Fh06IzI~3-L`JDlwJcDB|n`cF8(7xhEf|tdM?jay9VnjPoFQm$BwL=N?))_wam> za}US?aY>|sdvX}jFy%U@(PKpNGw>3+%vNCuL9XsulCR6uBEqT`kXL zpEVnAwahTdo8(=#M|MYhzL|!VIS@N9sItQD>zqS*ekNnRZNL` zPh~~N5=YkfL#t$m$m6z!&}n;r23!hROHCnt;H&6i(B<~4vF&Z$egFQ zU6|zQz z(SbVF0?EDdeWlb(*1?N4EnKnu(3;Jz90-4vDOXa1Z3;N&SO&3&7L=XBKI!jhRT*FJ z_2d+ZiORv(KF4wqGFZkrmLkrR7dq!yyyVZSu~$LtSP8aD=U8+GLUj8rV~B_7R$Dyf z9Lv|hFU-6-Q?OC`EPOUoCYpwcJ5yVYC)_>t|3h1iC499PDg1RW+RQjkz76^y>$Y79 z1e51M_M-3Dp?UC9|@fd+0w*&+a4UKy3*Dq&XDNLKnR^TjAO4!tTkGbV#p?E9Gi3lWoO8~Q$C&Y@P8#H9eOvpCWt>;6H?a$ z^vRY#_dLjd^DOzhnqS6gv*po&H<7Y4bi*mvUU{m#Y5l_AiH^!&8qpe7FTWPOXYGvM zNL%T)_!atXqV4Z_mVc)8s0EuSKP`ma3$TWZw-3?wAR%k}>f$MqR*xC11v!ujqb~I<2eg z$Say(@y#L1q?gAiZ=!4wJ?*ptCdVYuVAgQqa$hfb07!R4#_$PoXT8U=oP&c1*B4@ydCTb@capBi+ zUPSoDobw_Va$ckdIK-73)#kr|qxw+nOuZES<)<|RYZ+%=@;F11iktej0Y|^$#Usv` z+WUyTfE=9sWcw?e_LuUjJuvwg@-TKkBuQ3a&$v1&Nch!Sw%-sCq(To)p zJ5Dbjw#Z|Z7aJe_%$>7}J0puSSAVsPI8L|}aXv`>{zyOJrTc#eC!OyQO(NX?_Rq|B z6my-(9Ct9+di=Eu!FdWfFk^<>^Tg-fIBe}!p@VQy-FK*yj%Dw<&DH_vW+^^5+t^j{Rv-VCX6D_ zptITDl(X5$euYK7WH+^)`VVZ|hXZ44Phcx!dl*~e5|@>0UtawT;~HmN8wTUL?^Z}I z8Ed!3@>1@O0}d5uw&@#<{fo29??m{?oRa+X;D1BwS?mGTK~pYURz8#DRWwy*#nY#< zR_qpE>*@EdWS8f9Pf~VgF*lz%ylRoh*F-G5Ai$bcwf)PQ z`v~}*S&S`Z>2kObo=26bshVt#qgSmKT@q~_8-C!JERPWqIpORObl=#tv2jn)^nPik4!+Mp&ruG(rPN!>m4T!5P`C@8Vf>{jdM}+% zt&<%{tq+2q>MiA71dDsY5UkX8k}-WR&0RCy^+kK+v(ZnjXYJVAHSE2wM7|o?o0Y8C zHV6K6ooh~92w%JO{oXzqOMb6Td+Vd%%i19;cF#p8*N~Ue-$1kY_8l^@d$`H$Xs0fi z7dkQTBKj_j#@DyaUOti3 z<5w6@W4rUsIFXb|y$Ad9pcTWyCM$OedAj}V-N-*YN)Et7o?tDxfjacL26?VUu1jxj zKtE!Uqq9~=4*n_8H;#Ott>}W+Sh-#30`oZ%0M1(YjvSEN;v8T8l1K|N2R7!2XfFI^ zUtHVp=lG}ZZ-@utqx=K$!h)Y#@5M_Waw#2i=O_{oZvTqsRNf0N=RTpQH&?Vw+80wE08WTC#RDxT!WYp}`oLc2d7|ke zkJ%tPNDdBsl(=Fma3w=NM~2jP8unug{bWbfPqMU{`3LzuO#4)RXkQ*Vh}N>tr?pD4 z%QR@O99={9@y+{{yQA))Y{C%Pgv|1{y_pl@gH}}Vp7Lt&TX+%q=+NL5_&d@FWDlt?$kcQLiVk;y7Lr2-j zIPaOgclgYGgtZ?gHXCwndcUl}+^}jZc^vU#e1>^M^lt@*<(zK|k*hQ0Fx|G&r?p2D znF}Vr%Q4N4|c)>>{odYand1V z>uGqvd9#IjkF*ro+G4DQ@32S6{qDWkQpw6R8FGa4-m^*vsntMCB-wft+HaXkwtY>qw`eiItz9d(Y$tP3RJ^8De*K}k{ zy3f1p<(e1oy?4krxeZ@$KfLRYSe6OI#mMOl+$$GpD8Iz0@8S9oq$cTX^Jm1+$yB6eS39v&g;Yb7J&@fv3!!?nO zz%K=lR>pVN;10?L%FzEBAVZ>+`l_FFXD@zO1LuN-w`5f379~S!S3)j4q`&QwB1h|} z*V?VWi|)Jo%bnI&cwbjM_)4f#F-FzMdYgB_$LHC4pK*9Oxs8k_(b@Iom$Fywb@X4l zzaO8n2H)AOr>;})S*xy;_fT9|LC&?iZG6YK3=_<{t8aeMg{3okD_E~Z1KDs_kIo-J zgVFF<_T_2lSN!2}*4B)2!PR$~Z2JVt2HLNh0XlrTvFR;@8)c6M8|3HBLf}Ldibj3%>4&_^*7eEJ_N2Y6EljD z`C|0!>Y1J9nwvV8K<}zNNA6~SH7R!=|8;U#`YCc3l-xP6QsvI2Q=iP8%DVGxGFLx< zPN_1t!>739|E0{iwBc9sKG46;gl1aP>+sFyK_3gbG@Jo2$I7C`$Yz=L`LX4CXE;u- zwd?)mob}?2{BiFQ{XNM}Y%ky2uL$7t)O*+y;*3kZC%Gqt@3X+-Jx!GdobniLmDlO! z7vE~ZaQ29dS;F5pSrZu^$etL+o-Df0H{tJ~OSNZgL29g6kL`OM`7Zl66D3b7pCp<8 zo*jw?*guL7HErOt${vq(KzM{%V*H8EI<=#$MKvYWnU~ju}||z3(>3XHXY#o)jGN$M2~x{;P+5HtW>a(Gz3m5EZuu z&4L$=S##b|<~s4~!zXB7p%aNK2F^?Mi;A*mseWJ`ex{u(t^c}tp`5?{W!OupdE5-0 z&e;2mm$+T$89es6XMNjM`vGI=W3k3a*F_iE>HxO75IY^y{#L&IZriHm{LPEGymwd& z-^EYXYy3OUgigLWvzk70{*>4l3?$pW{BdH^LHr1l-?B<=wAYPd!90zuAJ-AIrnA6S1*5%n4~(j z`G_+4_M^!gZ-L*o{6H^TZa7!7XxN&F_Kt(}p9bDZ`<5I}-$_v41^jx!TYTRK-ZkJ{ z^mWgKS>U+_Jm;!EJY57&=Mi7Xe%${7XL+!pF5kXKJAGF{Z8XP|+}n2MD`sMK0di}s ziATL=!UJ}BQt8g7ZyBz>p=U>#<6Pib=$>@07YT7s7tm7UWcVh5xjmfp^7K-h%F_JVskv=lI=9eTJiRdjCQD zQyCXt|C03XrBlPBUt^xto_SBC>fIXX{w{bV3O&8jJIUCn!&9O)`Q*otuTh*y1gBLR zGls5c%sA)YcA|I79lcvVE~$52j5U#QN*O1qckKV6ck9r*7GM+#27LTQX*! z-t`EMqjyEL`QSs^_|11 zA2@IES2@@_l6~HC>73$``HJz`hn9a~M)NxL#fex$#$4HI7ry)8`V0wz%shZEfAi0 z|2esC(r)57)=g)AM*2Hoen)Au=$vzAxorm4!(;nzx^V9IghDzYW+lS->x26b#krN&JpKc zF1on%J@HQBG3mX#ek$HpK0ta)j*sj0Iiz{{g{D$Dt-xp}gJV)$QM&6%#^0}Ocb@m;UOMIF!&z2X~dn8qh$?3C>>N}f@JpcQ}s%l{IjE^~V z6!Zi^&w}fU}ckUyeul=~RvkseDO)hp((GEMOs63vzd5PfLf5Yh4^hxG{4d2~&_e_%c zY}ZEpCHe#$`V>N+cc72@{yxjJ;!MxMBu5 zpGM;^zn^^1n!7oJ$=<-4*?jYkoX?s&%$+x&&(jBUwuCmUp?A)>+8Ub0_ZoXvm0IEOvS99AQB(7j^sY|bfij-c#U<`z9`ObcCW$UkS|T68VITn%#-?RcIg zKQNeo&gCvV`LK3=q|p5QSG~mag`uCj-?J*)Wp3GfhdJT_W*K{BZ^Bc2h*j-g_NjnV z3jMpyqE+kILt}k-WG`|Aew96!PU#ffF2{xlzB^u=aegvjoE~Bj!BMQGdEYPF!CaId zuLHJdv=z8rN634Ib$(*r6B=Lmlo9t->8vJs%O2t$!TE#WfScfot}6QtWx=TqP&oLY!y_@kJQYQ1&mC7IWI zn&QLz)QwF|vB z$;3s>yLw5?yMWcqJb$^-On80*e)3tjz>gqv5*F^0^O(0Scz^KEXeohflmvfo9Qwcx z$fxrU^g$IUBihA27!6~bC-fDV6WPHWXhTAr?ri|fHG1*Wn}l|`F~+;jJSI;uZx873hrUDKUxooBnU~*Z=#O^Z%skp3u@5fz z^hY+d@Aej;D=)?ST^ew(tV+0Uu}A+xc_zcS2kmxF2j8`gmsg%;q=0!G^iA{uu94Ed z@^M@i*FUgqbu#Nh--EucG@7>pbT)^0W}wkLXq4n_(1r`(T#Ys{4aqw4;} zPi{X8o{gF32^hxi?MG@c^Zo)Z+#{bA#MsaDkFWjq63C%1D=+5iJ+Ja&=G}r%E3@&s z2g`{z8UY{6j^}neh%2D&Rx+|jFb5n6?eVU*74uF3?Y0uuf;vH(T*17X41G@ya(*BO zWgCcNFnqJ=9UY5(8XEaB^4i?E%nIVRf_Q#|0+}2^JVOvq62!HFxQ`&t3gWhccz%6> zenC7#5Kj`swSu^hAkGTnwt{%R6R-cdlaVbREBW0IuE)K$wv4O055qX}*J2EVaeV#? zzDvNx@p!LJ;&@-UA&KKPd}|WNYoHz^j`zz4lQ_=vRV02B;#v~N=da>P9G|C{L*n?{ z<$4mwYoOC4ejeiRz8_bfvk;d-V`Di_LA)l3<8v||Bz_d){YV_2lYw(2Jn}D>8$nQz zU@QtIr3BOeFqr+11AHSIKAkEz!XG$7LH8I3rx=SdO~*Wiu`&Fa0G>zF>7M3`F-=FV zv5AT4f66aSzt|qNi?&TnP0gSO2*T$Mp9_UguvAtk#1}%$=ShXcKYtX-f9g4OL_VK zl1J;bVEYP)xhmAaY6~#+0$$n}R(0(2VeVaJ>l<6l-9^`xCx3 z2T|tw4cL`dUwS;1*8XPXI4Dl8G&}eIrC+hG_{%1IY6p_IKyh*7A?@FWlvd6d!m|>RAYK~Jhw#3%emw|}>ZfKY7W)(XGqwXuc*7|{p@5i;EkEp( zy}X9wgXk&FE^U8W-Y@klJ)V2e@|Mw_FR5o)d0wCw%c>>3RAPtW{o}pDEX9-?S{Ilk z8!M(m?v3jga&1a&-%MH#BdU@Vfyi~$dQbS~4n!V|SSJvZ&wiC7I{Dt-w6MJ|*6o%r;Nza=}glDiPn1^u3=Z)*| z>}2unLiNP_hk9uJ{fVA09lw3Qu>RDZ58-tYI<18IQeG(GAr^+>{7re(URxmv;-&Fu z{mN>Owu`seFT(akqh)Mg=l@U-t-q~U5Aj23^*19tLq9ErhyLY!MR-$)JURccD@;<1 z6;_bAK%p22JJB#yYEz4Q27b&ln=nXBZyk>lPePPFqny@dej48}&eLXl#`4JoCJBgsI) zw#it|*BAcYDj)cH5q~_wLn3$}(&>Q03q*wY4&)~r!4caQWt$hhmX!Xm|7Q3Fg0+Z9jtKPb3XOg3F$#x>I-RZ-7*O;)Nou(M}L<2xS;*) z{$AdG4z;aNLF}LHUtD?Gf;84cyN}nCZ_tA-A?2}!=mNQ-3+&2zzQaC^dd>FpdUGMp z1^&3@>q+!_L55JT!by|>u(mZ7YAv8?z@I>?td6XL>9TzM$ZF_Y&l7m^@e9Iq|p zAvd1zwim7kKVEa~4tG{TuB`Q-{pmhDsD)ul33^dhs8@jpi!cFTapgx*EWa(>=)<6* z*UO4)V38aTjv!FVd5h|4`75u-)}SZjU+H0v5j}=_uYp2*d3myl9(cBg4~o2hr6(_& zFOQu;&$=(tGaRJR&%5D9D+YM)j}JxkchoFwK1HrL0UJapQGEMSyBTn^7Xv(J!-pd5 zW{9*Zu80%9-0s`6yxsT`?52y&ToWgt=gL-J9J5iHfd>#VI2zhZ0f{#J@rdV1m>v1R zPN-F}9TLa|LvhCo)t^c9zXb{xcz+hs?+Y1)^v{NbGV1q-43@ zG$&3sw+4abDXsje@W2R$`v(8u5iHifrXa11`sp~p!ovs{4D$ruKiqvRZURHb6O``* zKPr!y3qpAuKZ!j0e?0uC{B%wjHbVbf0mk|6Lb)OgqR<8xam|zy!NoHO1L2nizZg=^ zd`!f^>*42LTt9&+6gQ*7c@$Uj9~6s?mdF+4Xt{d%%Ee0s+EFh~+ib7$^HR$F0 z2R05VUk`{DE*~Ae3m3V#_G}A$?C;*ho)+R<;OoK9JGgqtlVye;U*L?=^!gIL*p~%* z3;H?um{X6J7v3O2eC-9%;qZ$g{36>`2?9`gVas59CBsikc&`loQvsQFnUu>vh;YLwh1355MI2F=(-{CdOD|lcQgEyBdZ6ps!9*{gBc|h`js~qNe8rk0``&VSomkPc=MIv-h)7d;ewrAfKmZsr;?ZyorLM+844@e%6 zJRo^M@_^(4$pexHBo9a)kUSuHK=Od(0m%cB2P6+j9*{gBc|h`jaHp>7xVv0yPmTbrkc} zXeNpA_Ve>ohezwfbbcXfy5hTbKW9^nPaJsijD}<2Qzo97T#C=xtV+}b5jE|+z^q0(ti675^?0C==uGNF~Ir1=OpIjfuJXb*G1cp%r@fo}1Slu*>aK27^ zEFvPKRA?+4OALZW>VPQdui#iFS02Y)Rz&FnnboFH&WVtz0^LvKI@rFDqn$T$Fgp=! z-@#4NB&hKiX1b9cTyWF^#XXtjMmpH8GQtx7WCY@qKr(LKX;;n!@Sg zwt28+jDmH{euXB6*=HIS8K7r!%ouQx2Xm66oC8YyRHUTBme0A%|N662B9LS%6+Z=A2KNI7b^SG3ST<%g}8)Mhq z$93JqT-VJu(KBaExbEP^tgwKpu0`RDblm3sj0S=6hY7UaJPT;nQz)_vYo;3uqtA5H zXbp762)_umPOnz^1;dd8qfL|_v)UR&pc0Eru`0_=fp(hH3|qe98KzkAosg5s<>Xj$0@7cYLi!9$zssj5S%XtF%w#i_hRHGm z!9(UyfiqZiNM4G`CouUKB(LFBZ#P6Nh2K!1hOyu(s{#?mBPf$u>aDtZoW18FhJKjY;StvLDh<{cs*kd$r}8q6%RhKg_!`ME@5ss%{QvH*##D6t$Q z;xD0&d~Ta}8RL03IIVYq14pkXvz;FXyqHXA)Ql&X8UG5msqeY?6gkFI6|>taA|NnK?Wc}W2f?@xXUG5;_p_||b0TvBjOS+{_tjS5%DFKug&)(_Sb~jh zn3uZ)w|U$)V>iY#xoyrdjK^o=Hvbmzu@V2U1aD5a0&k|G*Y1$yzc6_YCjSJ<8TXKr zbf4Qk;Iur%ZRTTc`;<$`$8G9!+~&Q)ZT=fB4x=iz4_5QMm5SS60Z$^n0lM?!jX*Yj zGH&yx8ga57%v+H3U_ODI2Qvj-J{|M(=ioMD5w~4|+nkl$b`@^(e*(6gKZ}AZtyStU zH5@t20pRDPqeSLr+~)m^a>5D5W9uk&hz1Tb7@#A=!y_Y@7uE_z-WHHn0-IkgN0u+klf}!lneupf zrYu#KX_RD?uSilP$@Ao?@_Zo1o2Eius&T#%s?7%(tv~w;>c-45VVGLb7ugQnS({l! za9%u*GYHNn_!PlO34H!tg8eV@_%qEXxT-0yKa*frz&2pdhkW0uJ;50S_eOaI=(^79 z8%*&H9tWd5J3I9Y$SLc!N&;BCipDDR|vk2*qmV;JM#QT z1S<)CNw71)e-q3SYzDTG-;!WEz$(6I+%`sp69ES^GRE`QF@Y$6R$b9Q?KpAf0^~ah zFy=eK&xgp}(;HLX73lA^b{&PjOlKN^(5aBz%e~AF9O##AsiL z(LOyMn-V_7l#ho92Fe#=T)#qNL--U^ehog` z;6EWozS5f)NFaQQDPJk#3o-J&y?KGzgikT$3(H$n`TYt12g0YA@`dF^z7V7Rc*0*z z_!LvVu)iQ*h>^dB@HY`Y#gwmP_<$JsLX7+zZ$5uF;ZuzAXsZWCAm9%9LX3Q6CoY>g zLiiM;Jo1%%wt+9i$oD4vGlWku<(KMz!oN!R{sdFL(BD{oAx8W0ME(xpQ%w1GqVfwd z@>2=_DdAI0`NHE3?Nj^~ZQ`AWjCL--U^KCM4uv`=vpkXI7jxWjzZ=R@?h zA^IrBeDsH~Jy4$zBVSASy$GLT${!9oFd#<05Fru+|(jsY>+7h;Yd%w;ooD4$@;H{-Jnd?7}D z29eJve2UA;&m?>-7tT*9rhKKyz7V7RJi<4#pFsOUjQmW( zA5ZucQ+``f`w21f84WKmi|{F?e4)Lf>?;X>DdAI0`9gmoUx?AZ58?kv_!O6wA5Zuh zgimo<`5A+96P<|Be-!rs`3B|%>vNc2Qh5f0ys&&BfQu>*UC%s7%0n?Nk5*J3Ar{)dBC^-T zrm+3KfQz!vB=QDUKt9d~D5mynfL;uU(H}xwlKr(J`x(X9SI*_sGfV~EKE>325AZn# zYF~&;vTp(Ug#NDtxTyMT7xVf9h<%EweFa7!Q2RoR{aL?+&mT$n6dU+_b^%|2ODexk zRQ_?rls}%>KSAtMOzjWE2n5_=`w20YKX)mY&0HXSic#JPehNO@z!&0@%70B%{yW8# zKabe=wB_5MVrt(9BM@+h{Cqb zx5NkpYF~&;vY#)q|FIbR-aC2yU)hoVM=-U|85Yufj(jVy8|w&{;9u(R!W`mt=pU$o}kN?E4?!?cXH!DW>*qMfQcbB>VXy`=5%jug&D`4{+f9 zPqD$i$p5wiT$24j&?of27I0Dik3YoQ|CQLMnA-Oh*u(Kph)c4cE3$v982fpLdHXFK z`TA2#?c0g$3vo&IyMaDo{RaRps{TwCZ+|VZPcgM0D!LvO;*#v|5ZOOmjD77f-oAZR z(*6Wf``bnKg}5a9PM}X%e|Nw|)t^1d+n-A8Q%vp0i|h+=N%j|r?5`-se#RNz{!?O~ zVrt(JBM@+h{ZEKXvj0wG-=td6_0KuWsb_{lz7y!D80|U1?=7EgurI_V*$)MMLjUWF zvri_J$EbaRsr^*Z_#?z6+0PZ(zg3KV?FGJqEvl3LM=-U2A0rTOhyE90p?&dsN;l9a ztiLbdqWT|w{%Q-cPcgO6a@Gs%3$ZH*C0*nNP7*%FW#!Z7yY3P`#gv~UvM%W%Z>c`8SO>GM(b2&U_QE(Fu(pIQ=3pJ(b!<-H*Sp&$HEKhqu& z!wi8RXP;nfd&Km9CH5=CspNj*c!KHix03Qnf5mAA$}35I&JmnR_}2-}BlrQq^m&n& zG`|x>Abf-$>Z8x+nL-?~EmjOdMS|(`8r2D=_lxTjOz&U25lq+f+Y(IgPj{p8!+8C; zj>XkyE{}&2Oz%%%TUL<88~S4$!L&amQ+eVacx0c`N7pZR5KQNTSyUb!2?3ujNBeZX zdW*^v|2!s`KELx9!G`;ta-L7`e_0Vs&;Jet)BCG+307)&`z{32>*baN)Af_i1k>xO zev}`|^M?~m@BfY@nBEVIA(+kwrV&i8RFls z2&VT}{b~MgUSA}^biHIE!CG=Z?0bsId}2A}ll+YY)9bB01k?5BBLvg+cMeYQTq^m%7Dg5$k; z|FkA}4Z&Rr&LP-`;xt}P` zQW8wBM_dV}*Jo`Brq4HbCz#$(97J%&7`}e~lpn|AQ3TWV-m!pTh=Tuom?*;2M0mCc zFA?EYBD`LNw}|jA5k4fs$3^&t2!9Y^Ta&`|aT4LyBHTlSM~HB+2u~1Ud|AgBjwuuP z!3%$k8T`!QX8}KWJCL!0pEdlraSHaf@T&;FO7N=;KRftUfuBA69N^~&zw*|JEvsbU z{muY*9}-_IBT;7HUf)z?%gqr zaBq+C?^YQg|B96X@^4@P3=Y3WhhnFdL)r-WMO`UJvA>x&i^vMFsQ) zsrq1epNyB)7YM*xhXU0cGKg`=pbC({93X*34heaJB0!#?6dV#t8p?w2p{g|SHXoLe z2k@F<0f1NhDB#{hq(Cp^=r!;XU_p*Rfgayoq&d+Mg)AWyh$1W`T*w~0$11dlP+${b zAyw2aywCbpC2%!a^D>=AmIlG3||^3<`V<|_#_0lQ5ESI5*8U45ay@S zM@H$;<>*Vlv3@XyjX@X?5ET^K+zWKV>(TsY3!(xdLe!Y23)cFD21Ka17ixpl@cw=z zyaLUAT0$Qc5U!03)4a*y!4EQk zQlezcKb2r3xUz-Z4{TX5jpV*XHGAhp3}w2t+I7+PfSL5sd;{B!%Iw z;PJY-+^pf*5)~SZ(FEQW7%p#XyvjoM_$0o`ROQ=bDviMsb_iJRp-onqR0$Q2DzI*D7*)#I8ow-Ws2rYn z_#`G&9iVk7SidW;rW2{vc{NK>x@oT{`o^7T<(2nXxfO^1R3x|bN!p^g^=XogT?j6$xi2jjtSbNG zHe}Ij%0$OMR*m`R1m54U%7nKZmjQx1)?9FcE!+)Pe8)dcpjZGhh=Uk8JOshzF=?>( zCGlE}>}tMJ&cGYTWeA2R6d1Cm3@zc#ERIzs_1mbniH@0V+$%NX>wY8S4<4(Q=JNJ@ zi=P}iznfN?;x0#GS9e|1_~q@pmD->9`}4;ipZ17sKWUto-0bz}BWn+2Z<>_8)i?6! zgWycX<#Xpo$@}{ne;&VmeB9LE_Rs1uqv_hx@^`iy@7K7=z!RR=GjBw!Qp|Tuy#M*f z=!8-1+ARmWdpB&fTswQk!d64VgSx)B-mvTO0ckFd(<%ls#%%^)%CO%3bLE$3vu=KK zpr&Ki^1xpL6YM)q)-z*j5BR|N z=2Wbk+}2W8@6E*fJNM)^&#HWNOZwO69n_WQ%skK_tJ7M)2WBU2!}W)E-S4)4#Prr@ zD+TYUc5^|E&FrBoyKmnN+0#E{&XUA6hhMBaInbe9`xDbA-p}iL`fj3Aj%xhJ>-jY% zc~q)&;MSi(@~B_F9T?H4e*WeAJ1UKsT6eLp<&qDMN39p%-&)I4+1)%Z0Z+qczwcR)4}^DJ#YT(`nCkW@je$%zgglqY1qp1@ygBh$F;e0Cg53~ z<*H-VX5=0WW1eI_Z@pqIQ?vKJKX)&1SN<@=R~xnR&-pGJ-9BEZ{4V4ZC^1e%??sYVNRi0SlYQ&2W!acm+QB zziq!_-$18t{`?pn7dG7ZwEM`)GsbS-as0%c(AO`$9_OTO+c;qTh(5M)>m#3TGJP^} zO@{;HpHA-l`=P12Nb!i{Xc9@C*-*RE!0GcZwTH0CMT@zBRXp z_wG4ai^kJ>OnVcgGLa*T4;aII42FCed1DvSEV&%WgXN%+@O^pL{)4y_v~AIRfX9$# zExUfz$BSY3BG+re#+#UPcC2Nl^3-@XUM`clS75D8%>AeJkg1Ina+xtB6ar1Di8fQ& zM1_J)ls`^2lFQ_BGsn2~>kcofV}(SbPG$+jKzB>l+{9>*!lbf%&;a+!Y()eXl`T5r zEj?w=h#*(@2CNe%T2`*fCw5lDGd=Lh`vAPPs_Yo84~@hxa=Y8Jb|`05*_xO8IxAwyt(>~x?Z!yBIjij1MOmC7S|F=Z z3PfvTC9I9GzHFkbuFxakN~1(s1qRYBai*L}Tj zCHD@>G0x#NYh)bmS5ebo*2zmb6%&v5tMGiVKIf`g@8Fo#8`Ia`I^XBJA;;xLGDen` z@DrPm#@2);s%fLJSJ-{y@MQJbYvUKMvd#N&;5(P5FK#;wWGi5HWtr@=!k9IJ9a|d{ z>M0yq`*^!1IhyXtW6VFj8uh)a_fK!S_nl<+E$fN`wG|E7`fR<_I;pj%)eP0^wXNOU zf}+A)xm(Jv@L^`RDCkD6ki^=cd-3ck6Lx@ z=$g4L8qeNV)1u41g-s{z8?rk+;BV8U`5wRI9&B^Sr`5E9Xg`Pno#t+sCBu+_AU+AM2{G$gcWB zFU!FpZin0N+?F-Db^h3JeU@vzubwXb(J0dY)sH>anjGFc^j7WK-MUV^^TX?gw!4&d zqavezJL(htYrFb$N2DbCTdR2N?8B%jw@|(Ywr=0cKbI8{xWUkqE<(re~O_h<_ogHNC z>|LGDR#?&QWs{!H)v`aG+3}0b)o*KDI@xYV>rJmJ*f8@gx74wB)n9CD{d=JO#W{x@~UF z8*1I>kZ;xddR`f`($h}8YHadx7yd|3l*Pf=H%4@*nf&wME*K1oJhV$a_7i1zqN%A= zB+AUdBAlvZ3^_9esf=Z8!V9*siCGg9nX$2{TxMSjM+O}X4P(cS6$}mBAQ2Sc#wHvw z2+P@ooz)VyeXS1Dz~d%rC7h!4p&Fes$Z*zDD+6Pd0THoEc(zOrF`X_tT&+_E1mWFU zo)ZilHU1)$Xaml5@PU3MTtY^`LoO;Md^0{A2oWkJ+<}IrXne#-85yjM)~TBoGK5bX z1)(vJG}y`}+I4U7V*N7jIZbo7Bvra{;FWIK)AOndCu942U0L1!UCifE*`|*tw3<9? zfbv4itStwZ_O}@IxygqiYmLpvJ9rPSSWACl*Q|PNx~E-?bh`EE+%D}O6}sMRc;b0@ z%#%NNI5@1kHnDEA6ZJ1%t#D~n?rNuc72m8%pQ}soKU#g5x!e4$Z@cu!_ivd}<#oFT zAe&3YM_0A~j;qZl9r|w2)wN50 z@#ysTm5ZZOCa(H=SM~i1IyqHpe5^vvoqd;o8`H*97+9D%5x(=IY zya$G`)npo@b$J*N($r#u&{a5%`5#UpV0Po-?#{Y5b8qhM?$r`!H_ZVy=U|JWY{Gi? zYOJG)h57sjGZwsPs*=g91QR{??rc{$5EMpM?F&tPa0W$YSgn=a@WH7_jn2h4V2lQ) zMx7(0;jbs@mCgkd#zu7NW?4wPlC6c>Evy`h(NSpn2M1xv)1mdf6C!`>-DRTkQs{=a z70tS~I6YT?Y+`)JtHEP8H*);yb?rw()RSKITKIZo|5F#MP99S0?Gc;#j}?Dx8`tP! zLbo9+Ozd9Gvw8B=AtX4#Z&3Gl&h6K4T03oJ^#<4dGnHtW5*`NUisuckXXgayX>&oI9CY?q_g^+huien@Zod-YxWL+{02d(61~-rF|q za<@(0P5R`m-!k>utje0~QNx!udy~<&W`$*sgDtnjezNGacE%v}+`BJsZ=Ub*A+bmI zeWtC`9B+?xpLcNO;Pxl|cE{w*YjpXPH9Nogybl-gJnP$rN62`eP|(Wz(F<%qKEPrxlEE z2w6`Tcm_|Sf-`8Mj2m`w(6j?!3qPwugXSmkKxp;<&4I8vbqx&tjfratkNgFrsXBmO zfVi>t*dq0DhG@lFP#s1_f~o8K5V^nE^V%)5+OQE${hYQhtN8t!!e=|vcxXz)#8OsA zQPHXbUc0cK?#=nX>M@k9ED*3pbHz+PgLnUKHGLf0a>ex6aY=`4DzC`c{wCqc(C?qV zJ7aEg?5e}o&rjz}S?F**>b!Yw<*jZLu627pxw*;t^WQkVb#q>`ey_2~=wFuE{n$Io zxt7`S!c};~UMfO@6sGVN$LlRQ>0$c2{S8_DSw+(x~z$ zyRnTdKR=1icQ&!FqN@248Yvjy#fsF9&9tVg?lTH7GBMU zvTg;r3irAS|mw}xqYgE9OxhKl1!9ZZ&wnI#Km~sp}t_$;uc6Hrd*}6(KoGw(Fh>&)52Mz4z z(z0%c_7-jHbm`xD;1Hj#N;t9T^h%#Wo%;0btgP$e=7#4QwVPX)fnAh7eR>WUs01Bu zZe9D;Rn{%?*aJbGn@<#8rR!t+fHD^#xT^H3x?qOaUZ^;bs?r4M+gq5og=dS~2dP5g z>07m~ts5dp42X&fz=(NU_^WdP7>OMa71gihS3Sadj~)26$KbebQL6B-`*-ZzP0zMN zIX5EM){Sr>g|pDsjkDk0LNq}DM={@}&|TxXjz4etyf5FQ z+Ue>)-QM+Qr0bqu%inhF(WLv_hW8#1U;T5H*|Uc{c^IiXT5-TZ-{)?h?tNo-etu4k zu~BDMsJrAf{(56l)akwh8d{s!gx>ex^62n;ZJf)o+e%C0*;eykeGXk`Q~yEVnzE0p z>IF7*-;!$cp-O9(SrdGGlo6sfn z?56hVi=5V8b@?SC=jOp^)l{D)zpp+%_}G>w%QZjT9`JfjcgOh_s}gO`vWd1QO)SX* zkh!hg#y!y%=2x~m4LunanG$W+LQNQ z8yhK%O;~sIx!}-JDDSUcFxT|ss%{+{+y6Ri@TC#cevh4y{Y#wd)XVZs$A;d!AUE^< z^lZpSAJ3VNFvJ!J|t7Y=3Bd89iH>?f~ck0 z@$M|QvH(-6hFs594%EQ(UJuXq{}&dNa5m@xm%8pAo*vCuPn->Uz?s{FgC0ZKj_x(t z>imSyuAp24G$9c%uj|{^jeq5=lW3%g@+%I?Rsh+TDxz#ey)MpJQ z{nXxg+{15^jW)c06PnX{+1f6h^p)1f>e8n^GgIh4Ok_TsezGa*L$@X)5B{22{aSX& zwjbW4XWg`S`P98*>P_{UOPgkFsJbQG(y5onkFpnWeJz{sZoD_Eo$Fvz(*RXsn+6$u z-!yJCakKB~s~x{uuzS&|OkK>`Bf~EKc)H;Mncse9<<6A!&dICDicnzi~+!D>yu&3#mmxs)|t*LU~W>Q$ng=c z7}Y@m|38_F6+NdiV?A-MRa?=NZOl5SHb`}vMo$)nSMT)n;NzC?3YHCyw(Qr(V$1Nu z6C=YvJ7lbT<+8a)(|=6FidfI9|7Ys)q4h03OkQla@ly8Q#{NO;zp4MY=av&!{9@DQ zHF#Zb@A|vB0nKm9U!R+~fB)rPcUIYXELl94*;8S~p8d>HifH?XJizd(C3?= zl`r#miATZz-nzaQ+YDFty}bB!{Q~$u9*Il4mc206an+k8wjsmMyQb`(TQ8*jhfcT5 zCs@qf?f60G?Y2MbiqEeNvp?ueJNjWmyxx_VW$Jw|FAjUKds6j!$5x-+zTWx4+w0otby~gr-E;Yo zsLq$#b^7XuJ)3IInSVZd){zm*U!R@WgJ^%S-JSWQWHM^n1~Hj&07E z;ma7Sn*C0+J^lXPo5Ss+cJ01#-)lpa)Xl>~)>)-5tEJv{y8_$E>%ry=vI}dkchW_V z|ElxUrT(_pHa9#!D&)9l*U00GYo#uo)VJb-o0`v$dv|@=??hdjZ!Xt0y<2ztcMq0d z=z21u!`(T%KXkh|{CLJjKacgkk#&x_AZmX`MU`-`O=>C)0Z-o%rZ_NAr$|EHGr z-4o>m!N z=NAIrzPa*UZSS?QCZ`m9ExO1`?LS(`~I zCb0)M`RWtrnv8!xY_7b1(EdvMTVL9fIW!{YO}#zyt#0&P_3iz=H*Q>dR^jJcn{Rg* zcsA*B3cI4=k~u5e_1=E8_sh9Hzl2OMi5op;;py2y9|K`isTWiH!e|lr&M&;j8l|#F&uld?-V_UDR%#JR%&f4nh zAFkcwrsmj>-+XX){pn47-+n*-l3MSl3PC+Cy(bUTNIS^`k_RLYNFIV!Z literal 0 HcmV?d00001 diff --git a/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libgfortran.5.dylib b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libgfortran.5.dylib new file mode 100755 index 0000000000000000000000000000000000000000..f058d1cc5f09b9be4a7fa9f2c57113e57159c76c GIT binary patch literal 1846176 zcmeFa3w%`No&W#LnaPE4OSs=^CJEl+1+83bZ6*O~>t(ALxBadA|7VhbH?%ckcU6!~ zAa1*j{WnwCrE9uNKxv&++qB}=wB2O@#kRCvz}~yRGP!WkfPjExRLKARea<w02&pRvlDf4_NyoVy)mILnsHB_-uw`r?wV0ZuHh_8VsV{?Ami5IK*d zpQCKEyhV4H-1%VX{fo>W63aXMyjfo6O=hd)+}XZ_;xr%c=VD38yu#wblDVJzo7*gI zw)od)KM{W}Wy&G_70xUvx$8?^z3jQX2aqB2Z){wh zeB{1+%9c#yzq{`I(t{5@So*~)XP6h2nobQ`15(dng!vll{O}WF)n{%u`B2J!_>HmZ z#jBP2Le@mpSZAoAT)Txs{;s^tJid(r{^eCly6 z{_EsxUwq<=lcr`(_OB&)-#EUb5uMtNEv?+wD2` z!fdGFQq#V?38#2^LHO7((Y-uFIn{9 zl4*B7JZ<5ke^_+iG}<_=?92Duw~$_!FD|`n(X@NY?)l=hE9D3K`HT1>^qbu>R{1F8 zQtMpCTmC*~oo7BbS=BDrm(Krpo&x77aGnC^DR7BCb-8av=BPe0F9!F3#-)WEJZXp(x*!j65= z;KI8Xc5LHw>%9v#UDortO}V#kfHu2TN_!!+sfIS$Dy_Y=7up>AoK2Tu!F`imp*_&y z5&9jwUOzJU<+$?^!3xe-!cSEk_37$xd0==j$3G&d>OU1M8xdIW!lyi|6ZuK_NNDwz z+sHpro*J_01;5@ZG+PJFgia>kc^dwB)V;dHqTBEIE_9oann1VmPls&2c`<=*Tj0;{ z!bkAsE>d*6sG~gqO;&n5HNr#t$G}%3+%@GB=7xl?HmLONLQkQg(65wxMV9ty8oJtD zi8OT79ix?RSg_5a;U!L*1x5v@#+_#cy@@oe&opW1`&6)HbYQ_kXn1nzGoh1i70gKY zMcv95EzDB47Cij12Mbfwt?9_ScS7N#_aNgXg{lIcF3xtTBhR^2u~Dyr2lG6^19OdF zdpW!!e1lvT(U*GqlZk)fcKi{Mf67)bUR)Rr6gAKZo?adRazYjvBH( zJCNP6nWK%P0D1n^$xgB^fn6t;{_z=++4dWK(buKEulS;`QD^f=UsQho`pKbnWhy*J z$|_gGYS0B0$`g39Kn>Np;7&EX2DJZ3nDW}tNe0i5J{xm=x(cqXRvq7Y!c|wK+nd&o z3>O#D&ODc^Wcpr6u|F_v2syXb7$vx;ZbQzPGEHKGH7lq zSkZ|mm%2hH=Zd~FLaQ^>Vf0;o)q;6V>xQTqUUXStfv2f5Uscp+sN!d#Yvnk;4^hQm z_wap^stBa=nX4)a`CaN+!I7gT1dG%}tzS)@J2|)^Rjt71u?K8)Yxd>H4MoM%ES=rA?Y*EDYo=VMew1R0t-hV!YMPvCdXy%T&*zDrcoN~LB9 zkI&`)t(~3IPcHrMke~4rxjc&fJlffLcuCQ9!Djd9hFWMKp8a?fKE793-vfMKZhf!k*`?NZ>B)1fYq#c7D_E z9Q0bBu69&$-3y%xBiEmlb6?x_Cv(EaNodrh($&dq`X_x8`s#b1NQs>borEqfm)K!= z51L6mu3s5Zc}Bih!MpbP$)%gbzSb(=l%UXNKjXLg)Y=6G`n|dH2M>yl|0?C_dg{%3 zV$X!lXa2%7wtQm$j>zw7%-ScVyjReNjLXK8OE+tsqw5NMEciv9k@m~I^32o;zPIIm zMSG29o9omvbconop09U2@1^bUKrfTuE(rcT<&3*d#!oe@U<~wibwMy!sixbd4y!$O z`8l*p+V(@9>DDfPcJMPiQ(!$K_ez_h$@-wY!RxrURogmO1If=ZWp_8vE(xd#bXsu) zo9XhXo&L_w7woYz`}<$oHXOapg|FsnXvROQRAbs(d8WlZrlE?uB77dbtuSG2}h!>2lD$QYg|pBt*r@HGh?PcD5)k7tW}0{4w?D6Ckqpf*jd zmiv?njFNlge*3<}_DkDOOjgm#usZS!Y!^NmewS)E0UsI4)sQ{0<#ZaVJstbnMmgd; zKW&v`EPJU=WTl9*3eyev)JTEV|O9nUp7GooP9%&~;0S za)Le8^D~e3k!!`y)}wpN{W-y$`kbKh6$H0r2NoQ-))UipiocUff1>$Icxm$pHT|8- zRP-JCsqk||#!py0hpgwn1??s<&MFQ5@dUos<(`TB zw$@M;2at)W@ZAgRS510o{pv~UQdL%g8nc)IFJ5Jg9oj16hYT)R?iyXXd`{h?lvTR8 zw8NL?i~bk>H~O!_Yw@D=`3rfzrs(2%p?5A5eK7%_UifXB=F3f~>T0j(U+NaV5ZQj= zi@9}AWfX=`2)e8*u-U?|@~q9Tc6oL=rP#Q2`1OALNxOYd@_fDOn7KL?`$XH9KBa=2 z#CM=9Dd%C@C2jhPbW_B5kb@sBzLc~@o_UyhrENdvnvB^Ce46t3{vGe%6Q>T=cyG1J z8t8aG&+~&l-v42>>0FJs$ah@4f&JCCFHJ?YpApFkmKwe$e2=Cli3{XAdVGPz1UVk` znxUGGVwWW@uzi5em4erpSkBSg-1q52J54ecM$*98$cV`lKB#)USNG!RG%_p@(V5ZuCYV!}oSGvRp51qAmE=O=8DI zujNE?g9`sgcqKA^?WB`Si@%&~fX6-&6N$+P^wCH<;MAPtZ1JU!U*jd_l(I7d(S5 zrthk$*&4SK=;$?+bAobC5DN(H@*Q*w(1z*wCnwMY5~oSI^4+GRlq)d~zC#>+CR%Ms zy!RRI72P362{dmf9_)0d#>KJX3)U0gsWHAe_$m3`H1IaA>G4u|!4+=e;-Thu>Y9 zYL3VQzFrU?Am5(pYkKgTFgZxHE(`M<5lvRCi`E1X_c`uf8f2HAWuRq6t! z@2;Ix#B&k{3a@#&W`7qfKw=P$1JGXdL-ZG)@T~ShUxnVy9U54W{SvYB!;?l$%`AGf z89Io6COiafn(P=}>-4ay2oDJ?0GD{Q5W2{H@;kbr;ZJwSP-V7$4sIVUhRXn>ixhGD{Mo?W6vU2J~p5O!3{_Qa*M_ z>L{F|M&Y~PS};{r+(W-hHi8kVxo<1JJL$6odc1_p`NbAO7n>%wJb)`S<~%&FiM~vi z@7`tew7v2!^XPFqx%8)cteUTJS#qK9JBfqvx9{)lTqC?A&)p8+%vw0F$xGWJp9^T; zt$u{65r1wx_>$@8zO2XNTik!_bH1i=i{~}%HpVP|4nEpyjJYwq{ION=_^mFl+F6E- zw2pLd&r$9gzfv_tV3_L+S53X4YU-fPYVQ3#zIeScre@q?Pt)`FPJdDA(egl@&iaJD z;4Y1t=dAHxbwg2f^gJUf<6Cf*tHM7_72iZV3ffe~6X0b9@YPLVmhkeZX2aAyHth-; zvH!$>U&jY1z=Om%uw`EE%POK5t;bsv8W82Tb0~ zZB(}+8>5OC&k``XpJUUPz-#FJ{KR_;ZgN#9FyDf&V()2dtgia0!6IzW+t39}Z^n{* z74#>mt6(;E9bY~6ocF@u7TS1#=lm|7o1}u?WX~N?75C#)6r@8Z+9x!YHrw=&_8RcA z@Khc=*+&0M$AxDXr5i)br>N0h`c~#KhAMu`hF@PpPYBOcTeR@ziqE#?ncRTMbFubC zrUuv3ua~5K?{>~uGE)TuN!piSlNyCT|CRFpg=6<}Pt)2vJ@B79^kc^SF?cq6n5QB< z!o;StJ)R05$on7%-~@6kZtiQmBCMmNZK*>xY9WA>*7JuE(z{C1FQomWNv zI)2stH{*56*5g?B6Su@p4V2Lop^P%Y$B1>!vAQfM746zdY`_GM;JCGHm9yyW$B0EY%3D(~h`Sz6-A}(LT(Wh9xWVz%;-`UQG!__)UD!W_Wx(eBUzNfenHt1 z7q@QB2ul9r^W5Xo?Hk+h5N%oAaz(K9Vij$p9Y?ns!4tDo)J2^#ehL{g>eoJbd}vze zSV?+liKD;M>F+V!C%eC6^1sKquJ=Y2mG4q-Idzm1lQ++Gi9fV}+(3TYH-?1X`aJQw zzJFZ4=v(2b&Xq&r`X_yx%5MuPqgq`V++zfG9RtH@T{(olXdZ14J<+<#2<^#M(N`(Y z@9LQ4rrr7(>Xms0Lf0~juE$1tLT%$yLOZ!v=q%-T@pBLKC5AC@J>r?$bLE3CF1ADK$q(n1YZ+fHKdI=3d$gxGnjD;Qy% z7jU1nLE2IsxiUDPcC>Cypbh$R?<#j_4`W=@QyZX<-G*Z$Q)okK=%2Y)>s0g?ZP2o4 z$14u~(eN|cD6%GP#kbs%m#GV(K2&by) zH>1B?R^49WlN{^&X38<^{J3d@((!4<$3M{5|#rSe7zsNi!KKgVyC?DkHp3&m;OP=#G(`b@o8Ul z8unXsu;eF4@I8{QW=I~c93R{7QKQ5UQ23?gDVNMt^kWnLT0Jp|f1JkLn&iHj>C~Bi z)vSdNT`+1>hPs-(RY&a_#_1)oF?X4h;e=U&v(Frt+p1zYZ=f+`uG+)LSdIgPk&AK zdz@vL#^e`Do%Cg{VfLe1Dc$b8x5m%%yftl>_~ilUiodcz=riu7aUoYYWzN`#T{>nE zdo&S!D7p5nCuYqoT~Qd4oYj<$&NWi5O`Aeu80NLz`BjJ~E) z=;~Ff=^x(i+$hg&JvmF$^?zgjikS;vDmr?KdQkixd#;6#w!Hv-MJE+7r=bj4F9Hj) zv9H^Z`9Jcz=pU_@BA*E6itV=ak~LG4U1ro!VUHugp#NH9Z9$lM|TV zRD?h5Qmzd2&b?m6D>S+xgaN z$}{mhqoIuQbCr?7+|qk<7~fpt!1dBE>`?c9xkJCkMt9>UhyC004eTHIb(YmH7wwT8 zqF^G@E*W#8>%EZ6M(qFlG{iC|McCA>|JvC`R=7=MfIcZVAK*p@fAPsX#5vhc%d z(KC%j65IGD>A8}ho$eW<_>1}8#6GK_-4ek8^Wh_R`Yd9dQ3Y!Gn?7XF$Xqe0Kt0Cy z3qG#&_hoMy>SG`GWd^vu^i7wV@^LTUbLL(9yMj#Lo5EK)cTXObGjHdRX*dk z*vECnH9Kw*`e+^Kzc%$SW!_xjW8HUy z>o%|IKGGk_sl~1(oC`h8aj4X^H^)K7MEY)yi9tKeaft7`l<)H>J&ta8!SBwTSx(>U zjR|~S@bQSt%jd^F?sZ+lC*wz5hRV$-S4Ms|__&N!p4DFIr}S6qe~a-En#|+b6`Ttu zEW9y~GUfbreZHLYY<=Fwxk3GT9J~20W9mXjN#9&xps~KwUKcvdCA33cg@*E*+@Isg zfEWE068r3@UBcT{*roz?i_S@^hc38H?k!LcRQUN#DU+sNzB|iT;I%SW@H65Qkl)IA zpdje|g8xOS1MKGp<;l=GF-NHzq#g3CvkW$axHX%(x%QaJ7%O%6F6Ez~>z8t}(N*4u z(?;c}^p7Lslk9S2Jd(+qKVLQZkvHL0;nPCsmJ1z)FAJe*j{d&v&0OeR2)_&ec^M0l zgP!=#<#jQJEP{?Hyt;OWu_AC;b&23eHpOf5y{Pc#9Er($D^_{F!-8MYj}1 z-Hapr8M`M=*Dz<)gGDAVk7LVUflYd&zq ztz+IDin-X>eC!^6Iri!g@TjeS1gqTxz98{{*Z{GK<$+HHB`5j{^^4yv_OVpjJeM3m zwkx!Eyb)>{?GF7rWxj-+wex>w|Nk?Fzi54Hj0?&b1SDUmZQ?Jre0kY)f{`>MR0zKix6jmN_$zJn z+f%F=!Dik`eDf9V-G)6#z5BwBuW;nLCd_nm$i4PGcga1>C70jjzFhQ$o5OjJE+_1~ zC!KrJ2f&D*xZE@vL48ZSjLOR-(eL;4I3ET4U~v z$4ac{;IjoaJSV=hHD^g%4!TM$2eVXcBe2K^@7; z7ya^ieiuwe@~!_!`Nuxz4#mp%n&mHoj>2!w@}+Dy&mOYcf4cr($Gs)a{`FrhHG_F~z zo}S+n8Kq`O9a1mYbH)7{*N6@Fm!}08%Uv=zR8db3^{iE@-A7vu+OSm_H5a># z8iV6PXxmA><$;op`FvJFtNWJeOLe>_s z9w18*&ufXz!kDebL`_X4XwVm05S`5ViA- zTh~5$0J^o)pVw(SytSay#kvpTXBi9praAZtH90D)W|PX)b2rP?*qU55riOe>#dG9B z+FD(D4VSFvkY{8)N9z-+ZcDYQD`5Refc%WCJ1J$Y$yYc&%@LsQm#~&YXta&b0QO@a zpM@&5rd*}fWHY|?V_ecx)XwK0RC82Hdd=pC@9wB4SFFbPqQt3DnR{N(+>rqKK*mqKyY)InoLGN> zc%r{{-E_>HtFf3iOM4HJv$6TBl{SBswya}*uJl{4?UDAuSLI$= zbC=%kJa!E@w^ilI%G;JA@B%Cfoc;Je6rB+ojpH-a6UOUy%i zcjfp}f`5;GP>#Qz^nt{wPJJ-j(g(L&`e1aLLm#vig~pt*J{a34ec-7~t`GbUeK1w) z15Ztx^3(|bltWLoOv*R)K_EdNgpnV{dt>;)z#QStoblintRoVA5EyOh1F6^62b<54 zK3Io7sJHaNrycrW6Z!yMQG5h_@C|a^htUHEmyfMGhF&=2n%MrS<(YLuXzynF^)IKg z>e|o?$@IagvBv~oGF2zfRmxhgZ>hSAnHRoR4QcmE?v}AALMMoRc$~v8vS{fB(Gh-h!v;Q^ zJx0wom7;Y+0No&E+wGEEe*{?(84=wpV`}S!=kOQZ=!$aYyujlNB!4W=k&lYg5i)+Z zjtHNDj(F9fBf^%B*zM2}^~4-nN2svY5n)S5gnQ5th4g9e(atqtLq+ejbi~b;j*vFn zI%0D#IwH{CKOJ#28m}V`aqiR+x1lqX^>1Ovs?ZPDzIkr-LpzwX(}&o{xl=#fh`u54v6b@E>+r{==^K+#n|_dWz1JK$xB8*=M7)03&AC%Q%tc?E z*AMaf!8@`mzF23*7ds@r07=kz!}~41aE%^>_+m8r;f-al=As|SPiR>jCOKdH_uOGB z5u3L`}ytfQM?huB!=}- zUx342eAv_%WmdfLYvzjzCehb;L-jr0XjEZ$@DFdr^hKirlX#VL^hG1_n#Lq%p+8Qi zFODbR5g#>t5A~sf23r@%=Xw00NBlwSgO36}DY#xjeDR3W zcW{vxaQY7C3ZJyw)yMc^I&qT5C*`^KD!z#M3uF4`F9<$4`HgcHUuYe%ot%kjw*;wB^u&KfEvSNxNMiNc}LOZ~8&-$yLLqEJP@JYK}{m>5@pOoj`L;qoi&TmfX zn|=^{a_E6`rXOl0#`r1u5=%eSivRFq&e0Ei2X7$8n2f$SuOH&|gL2@LRVF^E^+V)+ zI{%@0Fy=p4_@s^JNsRG+!zZ0>yV=4g6Y&QJHaXqKBW13n{p_#aJ8aU_72ZDT3b0AA zMLidS`5fkaseWRU-o9g#T30lZJ8|lYXE}H3icEBett;fSr+DL|fKQgV*oz*VzMlCI zKF9pY&CH(!pVa!oYw3%=&Y#R?{@vM_Kk1w=CG#h9&=;qhKe_KT^Cyv~7(OX`x92i{ za+@`OveTMBY2pxF^Cw%FCndQ8nLjCW4@;~$QeR>IWF>Q?Wd5Yznm;M?qvmn=m?LG+ zpJZKA)Mw9;Vm_Na2SVmg%0BXTo4$^2Xk(6))Fty{9|w>0F+Wi1tY-csa|SDTndWGN zwFbLg?)G6|CZZ$Wqj{93UYKg?g}&gFZ@*^Zlw_@*AmErk>2mlF{(kxogQ*XCn?G6F5C1{*LT~dY zZ5fpK!qN*de6sUBnm_5(4H942x*@4v_+;;TLGps6^bG*yOSOroV8P zr5En+$K1(m=9Jg>Q!hMCzQm~)>N$7n1rK^c^n(4@lfUp$(+}b=*z>C`f1y_J2isrh z>->jvr60s!=w|>(l9oQu?K+JT=0WD#vHx7KN1o&;Ms6JKGia>v$a zo^JgWF@)(WOz)#^0CQ~JW9o*pz5dGf6;_cCaq5O|aPHI%M>!YWAb%&9ekZoKlk8b{ z4eyI%9paJan4dcYoaimqYO^nRzTl6cOxeSdeHQSm4c0L~$6i4%vsN+klqcB2K6S6R zs^CS8ovbt6z?#UdjM;A9SL0)i^IqOX^2}Af&{nXhr&+UoBl{euam;nO^?UZJ)v)ag zWsNp#C--yU8y-0T2DR?^@>hjN;Dca+OEq1}JKboX-hW*7UNZNT`m^5i;OEHsbKKwR z9wPgr%UbsZIXnj+H3{!j8ZQ0ZXHIo)Ja7kV8Qrd6`x;-AcSG$wJTfp!aIGWR?7{C& zZ^&WY;~4h;G3ZM(ZT~C$xZ9n%_-o8BZe|a;ZE9#u9%Ziuvy}NkL08uHJ?@bW``km> zt8rKZ^ZIsOk~_3+1kYE|=W6avQ?8mqj;EljtXD4QS`MFkRqFPq!0pmqsWp|nf3O)j z*aQtOT9#I~8yRzfi6-{d*{1tgqwH)`ff}aU^cP~*eb6G0w#*;yrtHz}Qs)NNEaMks z?SU@)pi2(zKkU9BU*6f)pZi<6U*sVNI&YFSinGJl`Jv@H)-Z~XEV!HO!y)UJbCH49 z;GuffAwI}+Mfk2o{^7y@ZRuPSV%*qUd%Wh~$gp557`=>z9GxrE6ZRi;=eK-JZ4_RW zy%c1hV_7?#?N$|6WUFF%&y>7lN!DbyOzPs-mMLn+v+Ttxb+Lc-t+KCJ^*_&hlyNS8 zi)%yi+mFD{hqiixho4Ffu3-)5*K<|yjaDOg3O@0q1a{fHE&B_Eq2q4ePZ$A{k-a@O z@LtU@iWIpK0vh&3Ub>t@a z{c6_3+5Ennean7tWYts>A5}3%_TGQO^RnMJw9$LL1#)ChnJEv3!Fr+L4iz3A{3bN~ z8vT;;n%M_T$`pDk>Mvm2Wlg?a?hPlAhZ6Yl$YZY_tmD(Gb{_kKFWQmk3XW%Nk0TrV z@?`I+v~&`}2=3g*zE|7eqX5^|!mkndu??DdUEw+GJ2L&}$WFFv z>|)tFe-CZ((v~gk^&s_2dy3#k^n2EN+O>gp2|r4i$KC1q&T>nk&sSV&+h@_H*Do1f z*T#FT_PdAkKB8gB-5A}+EZVc*qWcy;73Kap%%e8D|khRst zInaRqOfSQh?Sm%h<>>8PYwLv$E1*NQMTb|9bM3-5M@UtNZ7R(&~PQyzQqAQ!T#OHqt{pKD1r* z)HcS0cbsOWqHoh(=6D=;kI#RNXAV&QdOoYjcN?tHXDkY!`VM{T*DumW#1b1;$u8z&rbBj zjw0S0n~$EzM<4rZpNr_*IknH|456Q< zn@dGcYoEE_VV`%2Ok$s-=k0Sh`y9kRNBd!)yWMZuXU&(|K7X>yK13!@HB)=OXlk(>}jR-c0nvpxS5rYi(zse-3+LIpe^7*=I`+ihUMZIPG%> zb~u@R{t^97w9ifSu_ycd>oKZ$1pbCIe*R4seu-tDbHSaS#pl>c{QMg6^XtUV2Z^7> zACvgGoERg3?5(xpXYr@+^!h@w-%T0u^Y!?)7jm#~l-`4=i1*UB;I~R_@g0s5D}Jsz zxU5TGJ*GmxIZgch1ot<)M>OEW>NTjM>tq~-?y{yfhxfDFbWbOaZgIrX68jyo;^=+E z(T9nnTY1ioqg(OQF5vw`yWLqjj&3E6&NPPBlyXlA7>o*ERO_jt0(k6+c_dpXFn{~rIHS(TBk;5GP zWbt3t2|vDDxoVw9^y9D^!8|tOa~~n5e#P;u(6PVIi$7@lmfD8<95x(m$!Wts3-7)G zpVVWQBrj$AmTq|W`-U&d`oWlQnPJIiGW%_0Cy%dh_QuE8#*{98HZJH}eEkOT^&v-m zeYltSy3&=JG`>zJrk416GrZ4xQu-HPvtC?yTKIYQauph={r(C0KW)D$ubcf|CUVk0 z`@L7k*QpKThwx6JVQTuzv~QmkU+*F25Yn!kae(*Hex8IJh_dDWiiJ@}nUwEXe zrVV)${#5X(^w$TUD)`g~pBCx%c{HC&`4Ve8>u?eC3cvmnF|x$lzw9O67Q92o!C+ka z8*dj{@wV9W`QTk=5^ww1>)#n~3oc{F+XJD?(W9L+!q83P?zULmZJ-;&bMOJCuStlz zWgeWw-3HItakovshD%e4yHo3afDG?+Q93*!M<>bACdf9rj)KZ?o=S|L}3_`;K!Of1f3M{1CYKVZq0-?}CpXh*rLO zr1Kl2#I6fI-pn}JcHP3FAF=JaiA4|7_;{s-kKYAux5u4{T_0-M^R zzwvS1rY?NkY*PVojcwPD^n#CH2>yDtVdCRr%LO0r&;1gA+kB~Q_q;CKU4{HRZTH_{ zqZ8t9&6i`~O9wu#`EoLRDY`=rc|Vu){X<8=*4AIZyM)2T1s@k|T(EM%#}6jppwP{m0adMhhQT>64xN ziNGJ75;=@WlNbvDg zu<>~uYw>6MjgMRJ9aFh+_&9rp3=c-pp}!y}VdLXs%N6aH>q1wphZ1n|dxxlv9Z&h9 z%fYY?+~te@H4mJ8w=WtkcLg)wseDyq<;aLUzxOi1%TpVE3|`KABc{IuJ#2g4#ydv# zsmvbna_spdiFkP#?|4gy$K&yG$~0|wSGj_h+xF*|mt=yMPXsTY&lc`Cul1uqxeJcgGei^=fvY;{3WygZ73W4{N^Ku>pJ=9zJrc{l+x*EXEKftf#I zW9CBNWc0u$$8hsi7H%G<553^#kHm0u>L_AN5^-~zei`{)_S=b@-!-fYH{XY!MQ%MS zEg5c3`Ss|7ZZ;d8VB2ib2~L}RJN8y=_9UK*v)Sq;v;Us!89X1e+3@#J z_+Drz$DrEp0pjNuC)#gIPbS09GoeW``~AzmbhqC-$vgBEpZApCKIiuPEaB&aWxs7( z`2n=wLF{+5ANIT3{oU;MXS(cnB7S}^b~>5;o(f+k+V9KYOI!Xva`wAF`1xt<_n*jF zTy5d!J=pI!9Q}P}zvJ=r_t<{hc>1}t-wFBuI9&Z)+HV_Ix9oQi``sN^@3P-J;$m)e zf^EMOarH&m-qYCcT07>R^yhB&yH;ZE`?zk|?^+#mW4|ZE_qP5Rp#5%48h;NEU$^Y{ zjvPDww)A8&eBBF866|-a#NU7ZQ=I+QeQhQ$(Ubiae0`*f$JhVI#Mc`o{uX?FJ^1=d z3HZ9;E)t{29G-f7!%Y^xE_m8kE;rvPvJ!m#Gx)+6aR|OHb9Tzr(Cso$M`Dv#IKZj& z{Io*BEGSdrg--DGL#M&li@3j;xL2^`6nTzA#!{Z2%A8DRUPJKpNJ1PwtfMOqKa~Vu z|D8Mh$R?u;U;nFnn8w#DT_(Ps+>WcbIQ+EudO{p7_}pbKBu zy!&x@SK5=zzHb3vPZpmK&xyy^x0v|4=I1To>wSsO4-%ih;fT)<^%9>~y3&%y=dZ)_ z5}&W*Q)0t@#pnIO*OS}#T01^(CZBP&g|8>I?+G~j>jQ|-XMwjrpNO}k>m@#~*ZLtT z-X4q3ZCu$utRHy0!`9n%bjRC!j@O@IOkUw=;plI?o`|=fslAu?^mfDC>7T^wHf=-> z`a_@RPmjCD#qHHr+@1_~x8wHbkaH*QUN&q1xI6u-qa0%Rcpm_r5%U2OaQB7SZ}9=L zcrG!9|Ew9q$9#Z^e@yNJEap0RrNr3xsCBa7!2fA=*FKK|}&dJpjT#<=($onYJUMEw2B*xlaZciuaH*pA=x ze;;qVb7g%C@%u8aOZ@KT+>YNbgYRwqF+lrWn>2nuGyJ_V7Qb71G8z7U6*O_$Z{1Jg z_t$^dWxwsd9s#Q#fc>sLr}^`9g})Ebeh(`Ces1k|H$459$U!gp^KSQB_`Bvy9h(<- z+3!UB{cG51r~S71avpqX+i#mMZ-g&x`AZg`_tSoBe*M7M?=!^TH4VdN{ya*~W6<#T zb8o-jTl_u2exH5(y}SKBbNqd9>~}x$_jvn_PO$BFBL4n3wzseL`xC!SZoi-4dZPWl z7QVOj$DrEpGsEAz?6;*Sli}|^Xp+o+zx`@=`|YBiJ=yPG@OP~bKeYHeK9(JaC&%A? zO=lf{Pq5>s$KTC1y^rztE*t*^hmB9b--Fn4C;o2R_#5F}8-KTL`~vb!BBROddq44a z&Ceed{M~8aUC?mQ@b@0<`@rz`uK3)>nEQh-?|CoqcgxDENDO*ZwePVgCE@e&g@DO@oBLH=HJZ&q|mt-(bb>nlE+y{`XzBJrRF@ z5*wZnziYl^Q%Gn0uKDr~a!9uSoy@-XCw{m2^#c>X_h)~YB>8o%!>ssy2y`4Y{Qc-- z7XE%%&u2M9{5=Z(-tOElCfR)N_0IibOq<@|oL7HV=f5M1=NNyFo(23p3jW^ygi94i zpx;@V|IStitXZRcYyNw@?M5fqwmT7j{}=4;Y2x>o?Y?10a@+lFu3PbY%y!>MuFuvV zgKEFe41e#k-~|q~?6}w)onYJVMErdNHt96>yU~ul|L!;4?02KY z-p_K~vfqt%?0pBhK3jhb(0(Uhe|`q|yAQg=;%`e&Cd1$T&?LcrH%k0HiGDilx9)2W z{p`tp_k_O_&+Pb6&wpo(>^Qs!{5=Z(uH$go%Oy6~+%<^v-xKV3AMkhGrn7**H<+5C4aUO$KUd(@_l$U%ST(+B)L&hDQB z{JlZ%J3|aF^WVGq0O*XE50HSrKaE{G4gMbU0shaglKTMPzGW^{SJ(Ajg`q<6>)5o6d|M0P@I5T1WTjnUW(bkt`tbhBywf^mO_LqB& z{pDKNU#_p~-!5To$)`EkGpg71Z<`(Xdp7%}WE#Ei&;A?kZ)P7D#k#jV)(a}u*vR{e za@2^}zH+iZ`w`Z?2~B1H8QFjPOYA{&IctH2|JK~6V=w#AG_(F~E6>~e(6q8I+stBL zXcO4sbL=}J_Fwj^TgW~3`nNmT%VLlF0``l}itR=7bMLUYecG>Q4PQ=g`^#l@$KgAB z*rz?#CS!Q}eeBzXEX22qHE_-B758W4s#K}sY&EX^GjDgYXG*5tCwso$_hYo)hckZP z_7QrY>TzcQfJpre$XCHS>Kf&o2dqUaJP}aZMSiQ)OzHXIYkho7xhT0fqpBfoY z*{4SKtC4+bWWSoR$j{sCS98^&Wg4%aV68v3@%n$X*2DF-UrjxH?A+h|1p88MGSb_B z1s`i3c8_nroA35s??Q*Z;P)Nw@eS-@6Ndq8em8L+I$0MtiG6G)4$eL{$g<%0u7rKx zdt4W{cwPv8*8A@FwvUashjnpZxZM}(503vTYvF``Vfd(py{ejH`_OqBglvkcjEmW9P&l7{POi>*40Jj*NVKt&IuX{>yIuK&@Z` z-{tx#?nOpwH70#?eQI zKcM^hG5RZYT8zc69Rx+6L$kSZmKe z1pa`v@B4uLfuzq{{($CT?GHTE{F%LGTAOPk&(i z0SohY_yfJ~8=u@CFxSnY>n1}(?GIRc-z4z|;`hDj@&|Tw^9PWh_lZByEgulGHQ2HM z_HQjd_PO>4PMa4v3;sZFd4c!BA4tp(;15Lm@{xn?4-7Cr(9IvPbZN5uz!#uLGJjwceN6NRCep{A{DFhu1Bv_N95V3%tvidx zbnTC0;{&q)e1GwQ_rv};;(v-?Ap6x`eazgib{JUB1^BZvU$2#zq7D6F;{>uV_CJ6V z6fpNiaDp~)f^u+zD((?p2Wu+s%RV{%#R+=L6WDE%+{LrRSMtt0XS>!W-~_wfCQh)I zeRE{Y)(uaoIb!W2ZSPCJS^OyWk>1Qc(%X1nLV;oKOMePq{|K1D7JTsC#s%%aAjUeA za(+)aw!f==g=G$3At8rQk56Fd5d1s`cA$NQzk9{ZA>6?=+pjMrSC-6IXz2kvxFR=x z|C|;RJJ9^z(gSvo`2M&4U8zkc9$4|SD{aC#} zy42T!tRLq+b|eg3bp_SG3| z9N}4H*2WR+J$4qdzs_+>A62t2ZZ*2K4SiINK0+5;``Vl1X8U<uRdI%j-(T@E?lUU+*c#yla0wcf<9(|Me++sCn$Km(IIiFXG*= zBOChvhgV&xZxGq+kWJ@FgGZ)ir3Y(cLzQ^p?j%JkU#V2y@#%J*MadA%1j z;6KQI_ZxWkLoWB(?}Pmc``zbK)_(8sx;^9?1dq4h3!C^Z8&_}dgtgZCRXN@X+f%MV z-kD>!>k0A<&UV@Fgq1vlytk9K>3P}sbh%7AWq?b{SfulgOc@txkCcA^+P%pA<@)`Yt_EqdylW%JYG3(q7x?Vx zc6qj(_LZ~0_Db4U?P#Cq3m^J|XUzA|2D^E}FmXAVmb^U!m&RdT6&ld6iS#lRzUJUl{+rj?&zo3r9{q>J?&3^x^{eGO-{`##( zkUbQ8++RP!`)6f;{cUGqfBj7s?kn+V6=Nm3)qW503CH_qo%`!++Tnk^Nsg#1XEC-x zd=DG{NX%Er`)5zoS^Mgjm~tuapOt)tyo2^Iayic8r+w@r|F5ZP`m4yL`1Hcd2K#y1 z`G^2~wAqr)3~Z>pM|PX-g9v^A)-AF*lk#5Id*aJGWXBIqF2P#k*G&FyEqcs;hip&2 zfV}@S>@NAem-oSjX?MNhYx1S3>4mn;WH7e$aUXrS!Da4~--+A^mcI)=-GdMBwdi`f z{EEo&fAI|b(&K*kg%;iPdI0K7wjRKyT}Hn79@$39+)q2ETJ5;gu^)c2_sB{+WIaHe z)%IlVUZLgN%r{7zyZHjhx9tmve788>HTzZUocID4I_eOX`wvla7d_$+?-&2Uk&nQCK#oqIk8t=8`u(){56FV#BhLE|eefTG_z&pZ zKKl>d?mx@^LlFOg_jh)+ubcl+-Q_UXma!T5N&^eB7{zF`T<|nh z2N@WM|KRRve}Mt!FD(C|R^qF*sVb^0{g^C&aS8NG@E>-_e1?|IasGqu-)rPLdh#D` zp4dHq@wstoqhakYuzq}3{-WLS{@h;k7gOW+_rH_*ja~VRA@AA#{(?iiX5}yBU3~mdkLq>wq=iF|l3ZN7#G*i+|pqefvcR$b82$d0yr>2u9Hz zpJ3kxt{12w8lQL#n*NA;%I+HGACdhH}Qn&bw6MUr64i3Y@c=yo+F9 z5_d|wzd>1G6N%lMPagA6*%aY6&5;`XH zH_FJD$U6Y#eSq@b0eL5&ybtg>=2|-OiZb2@Xvq6>IJWmd`U9A z!hYX?ybnwC+TYN*k_3MvAy3jDe`BwOS6qYq zzAUl4g;&_`?-jhF*^0aK!7I#n{dT>3K=?*-7lK!qxr?6hitj?Z77pcA!78xZ)4szm z0k7zfzY!6x^fV64uc_UC;7(w=>ezKGoQr#~<7ESN^TPl6tZ`6LPX zjoa`G#3z~JsIx!y2@gt}OL#sJ+whv$M$9+q*p%Eixgp%S=3O&CqV>(&5hZeI`5ULnkC;4nuJa?-8q7h+kC^Qr)clCQ zFZmHucgPxxvz8z6_d7q5LvA8&t_1RJ`w}AG&isfEJKjfpBg>hGG4Tz5Z~2i%8{gRX zU)_8Ny_drrbcls-Gzz})8o7o6`VYzXZ#aYeh!+~-mzwy7r5}^!M-JmBB=`@Ff^Rg? zXNUiw`}afLzy8gyKp*Znr}Hb$Vt(Yj|KR>zlKBz|`Hs{258duR%l<<`e#G=2Z28cb zMQxY=keDC21o=qtAGCb5{4CCYFy)_I${_d;T3$Xd{==E)NA&&>PeQwa=0`p({zHH8 zim*Aq!p@HjhX0U|A2~DsVes-Jr}ZD=@)nXG>E=Hm-?smdm>;h zX>$MJQgn#ZfA|x*hC%fo&LBTx>qm`0So$$pek6jQkj#I0l0GN;59@UQlKT(6yy@ zb!BY+=Gn}T_;r57mL=_LtakVs3Hg!X$WlUnMEe@A{3I?vqJ53ukV_e){D_vXk5GO@ zuRC21{RWyJ>CNBhV}3;Q;5pBaB;XbO@i&s>M@%_7EBO(BU-Bcm{&SNb@t<*i1fOI_ z9HxODi1{Q5`H`Xc1!tZg@%NS=skO0<#-?t*iQbdKK<8Nbky^nveng((4Dut%_oFz2 z{0KUAM-1PvbZWBv$cy+P3I0i~;2RIpZ-;-P`}hcbOpM`(K~l9bRj4+Q44 ztW`~whC1@=DXQ2YFWGXVik=`BeT=yX>nCaqxKS_ zIn8c&Fp`!LicAh%7m<1J*UNe~)$nQFPaj#!+;HZ-N3I9=bx&!SOdSR^d57PXJA7U9 zv*7L9A`HQHl&QRNzEg4_8XXFi` z2=kc9>*)C)K4>Yl6FSy&lrml$JhYo@iqEH&XM1V2>iG6(59=~i`)204%bFd}vXr_K zH~Z+2=RF<$qz&)ThY406q>p|_A8w%!*-BMNKb~d&2m=>gN^WT3%+_DGy zdo8->jZ`(+@I|vle|xRSQ;fp~*4MZg1EK3r2l(7>o|_Eagbydsj&IYRYdM$p$XeZi z)&9M8s!7ISiZu><>wHatQ=PMx5`T8cc+!_vN4s;ab_>rGvAzY`XO+?B7Wz@n+!4Fo z0qQ@-v#a@xljU!j{!ucl@?~^VfV^=8dCTWH=8tGuu6$9h!(UvV%5}wc(OHj^`{~@8 zvGe3o6*}ow!PZTxsq8|v@#t-?kjS;H!*t5_Cy?zD=8;@+k(t*y$~cOA8_YKW$J8?2 zlBZ_;hIKE0p}ug6x;5LQDqi~Md5<=;XP2LA$J}YbGGxBJH9go^q=G-lRl%cqDM3$4 zU>AJ4D{=?zpNm{vqoP9lYIHytTJ2Vo+5`CXd#Q8ZHIxnCzI^Y(j{D$QH=k8}9=vB^ z$KA|_z!ukRST>^WyIhxfI^}BkcF_qJbEL55Y^AI_LpNN`db1P68Mg_JZ1I(hODF4K zP8g$W=xfq-FfskGHACx%a1Q-^c*=u{eY`YnSr2n1wEZ#t^-*>+bwp;FI-_NlsV|Tv z^aX3Ns6+I{iFeT#t=v}*&wSSxy`B9#CyK6+`9A267jo6kx3UYNNr1c$vR@93?t)hE zp6H=>7yF{A%sGi{g8$GTE8*EataC|&hOLWbz1qYYKYhz~yW732A!;>caE`aLb9z1H zH6wd_Mh*!@@&ebj-=?B>pFpqV$+gdh+Ha#>Cp%}g(GRahr)I_=n=#2{zdE6r&C4r< zm+7O-gK9=MNc*I2X@gyV4(oHLxTkCvy|kueV%?#UH-%c<6B^!NZH1!zEZY1H#`BAe zWf`9`zeV)Yp%hau{f5syu9WRX=%<%lrhZ!L_SBphm7@3dTF2U4(NXi4jjVf#^|{$< zYt)fV{d2zP-c`P)@Q{v~ z--h1r(#I3V#G0keeC5Gpf0~f`_`c*?VG1% zgoat=w+{VmNc3X!v>QU=$2C)aIkYTN<7&KmEM4vE=$B2)StV2I{_@2sb+SJ9pfy%) z);K+Gjg!5n;0D%K?qQssbEU+_X+O`kTF><~zFJrDoaibU-xqmCY+O;Ay4A~gRx-!U zi;fcC&Htd9Bl)N@*6a8xIbX5jli$X#@S~$nFt^06OZd01^$DK~PkowtWjxE4`Cdhb z1^>*Pkn!lX6MD_g_=ch%i{Cb$xsPM*H9OjGTg~-9n)6z;4xC4REE%R`xYfoYcwmd6 zW{6KC{><0WKceqi47HItx7uf$>rpdg9j5T@3iMhL@-F>RjE^nj;uBOMbL*a?YaK? zZ_m;P@jnOPzXj2W(bM>Ejj>p)zRQ0TtUZ8kjJQWP1kjDQYd?+i8%_VMC5^obh{ZvkEE!qsK|CVO@Z!P$5-?qwc zO*8$smLB}K0ySZv{#y|LEjj@I&D2%h{kO(A{|y~w`){J7oc`P8_!s@~-@4V;oBwtK zy=MDwyEexAZ(0`yh6ksk<4nE#v+4-}L>p83bEM+jy~aU>^~jz z-zrt8-~JoV+y0vyU2pqvZ5IAR92&||o@Dr2V@|k#{@dPu;BWtNba4DPp<|e%ulU>3 z%haIZZx7H1i8ICj83wlH#NXDkUvw35p5SkSV}XBaJL?^3)U*-z2zIEzvBWR;BQJ_| zP=3}yMKqqJY6QDoCpZ9DoWzQD83TD<){ymt$pw;Na151-YKBJiY2M?W5+9fHk$oloMmE_r{(;oFh3aQb$CMQ;W0OT@R!<2hU2zx8~V zZ}%~-+cN(j z2|m~6z~`zhe6A9opgsYg3s@LYn**NY=SN+B8vaRw5irIaSI`_~IdtV(Hzk|HrM6th4wPXD1 zE!_l45+c%s={Z4FeE4UcG;SSq3yzxife8U~Gr{BMG{TH+W9lAr}(?C7S2n=_Cyac76q1W=&N72%Ea|*V;Dd$cA^+s+J0fP zj3s>$3}6I(6TeX2-)8r3EdA@8JB0j=D|B+M3eE$YJbs6pe5@z*xJMnC4Q_e#HX}3^ zY&N<$CG^a4-zwLw^WVOVwf1j;LDzdyY9udv6dckEK0IcBQE1{6Uvv>P@Yb_-b!AF$ z?^ABx*Av*4!CLxKd}D*W>vr&&W{z@bq3u68nj+Ru7u~Y<$$W74GV&{Hm0Pa?&32`< z-#ktgS9?4)8DPLSrl_4#ZtDn7jg);)bx~*o<&;rxJ>|2PjJ6L_Zn-YkrOO>cxhps- zDX$cNA`C6O9!*`;|4b|8azIQ;b`LcOl6>h5NU*Ufx_CYHmCpH(_&Lh`NZg}nxo%?x* zJk<^KDU7{rbEh_lZz?{Lhx)20$DqujWv;raYM;(U(T1Jb9wTHt;iCKu{Vt$%p8uVN zm8+EWnk^6C-4Pz4Y9^2IMR$XzFRe7{BIFN#jjw=@7`=miTi-#xgq}jfO@^v?g7=vm z{zqT*O6aona8-}Qzo1xjbQ(1L;_}qHy+9&s3i;iI|4p2rr zbUHQkg6)UcPca=Ff47VM7_md!pxGrlr^^0ZYGloBm8thj-aroGB6UFxbG9mmqrdmH zx`I5nE1iB9;hWN@U?+Lw$5*Smi_(qlC9d@L@Nx2EtUnMNQN{7JraSZ@k0Ww*DWCWT z(P}<7ryDgJ@aMDPs{lHgyccDg?YbYE)Bs?Ji;te_oN)zM{9DKowxU+(s2ao9QN}mJU~yq^xN3fDD_0>cXTA|Z z8RJioTNR#D$cE-QmEK-RuG;3g`lFrGWh|QE0ofz06}c7JEk|~X!kNJc}sVeRazXZ~rP+6~lS2=EC=7`0z!y%nhwhRl(JU zv3;%NIEJa>KO*C=@~kmh-CFYS#~w5&!#APu(P#KAhv#yTn=7@wBA7j`US?0?1QG?h~G0H0TqdrILDS!eIL_9q^j zQ1`&%g&m?t7Q;W!B7-~7C6WVfk({k-c>88}Ase~LAhvxvWq8dM$iNSfAE~>Vc1iu! z^|Effs||KMFKtNSp6z^I-a{J}JKFF)>X*7#P`9)rTaDE92i)*!mf9FtLOvYb<6@qR z%?JL8YR1Z^&^>uZaNpK6=7pHL$JS>hYPg=uBJ(b$Ld#T3_bh_myQMyKMhc&6d(l1b zEcrBZQ-*5&lRZ~e_>U4_iOmZS57v?ws-n&<*b8E!sL)4fu^XPd3AwqKwpVe;o(4x& zvj1h}gu1qOe9_CfFS^MWeF8nvF{?28spX!!{mk3AcKO7*Nu61BhoSEwX!{0b9i(5P zE2_zL33lD8_c$=;?C5@~u{BB!tyyar?Z{Yh3bMz37Sr$K^Lk|S#85R|emjT^$o&R7 zL*!ZR!=IVHh|f(bb@9H&6_d6#K0c}3DsMjb%5xG=ihj8my>x?XV$Bj`0=fxZ;mT^4 zb-_!FNi`Ruo6wyV!{6#GZk@}!@X(*4LxxaS%44cd^janQDVOg@f2Ha!Oi$Uq)n<2c%<{9=#*~hV&!_WB0{t8-8@##lTeG)yD>oT_Q<8xhldd&?kgSMohv!=91 z$Z?B4l$bXS9ras2?Xh4F`kevfL;Q2umqqNW*o*ayy_!q%w6oiB)Pv-MgaTSWsaSA6n* z+F;Z0K6K>|INw7IZ$R($(7lY$5~2H!$0u#uu_8wI^|WJ?8l&mHo@c}!@}9eVv4z5i zyYX)h-qocq4@n*a9>Z2?84-DHcIZlfd*^iF$p~|T67^*3Sie$TXw550YcEGvx#6ST*n=GG$|g0wyBC=!|v7V@k8A1z=C@GpXTzEV0rq?I;q#7&AGHQTe;iU(a!cI^n>=j{sUih zU%J{UvBYb%^S;M2>h|;84f38xkr6{REip3S>7hD@y%w3eLe>%ETeZ4}A+x63Z{Xgw zl)n}J+oaOk75>#ayzRqpLo$k37ux1LwVn&x6`@UB+IQ`#jo zlXkNo>dt!lBz)?Wm31*$an-G}WQDS|ZzZzQ2~9;-BFwwnh;QZcFh}rVe@FcYmzEd& zmZ(C`@B`yyr5+!k9v|T3(jt7mx!Bfu@QN=AUuk_S*CWg+kn17?ay=XVm+?A^KPuyu z4G*}8Cv2Ng|7IupPF38GZn69J)D{)=(qHjQh0ekco%kw&Bb~*Ih-K&DkBco3dgUl| zlXA6(r9WwwoJF#NZ9`Rb3_RaXYYsV8}w=Y8OHz+fn z$aW1No)~6q&vs?>FP<0{A5VyFw%cXL6K%v37ja$mkjy>Y));uQy74QM>^R~I=DuE! zjTCuBzbqcz**Rm|4({3Ul}RU%DaD#{9ZziRF`gh7Y{nB8kw*)oGgS9@Vu!@AN#cn{ zY+<7vPfYs$Y2%4TWyKRisjDyXMBV`6iOWAE@x;CO@b6_j5pcv4rIWSo(0mY{(0e?w z$z`0ac%o6_iDl#|MUQ+RpDbBCaSwjpw>dsbJaL$K;sz_8xQuwBgLq;w@x&?Oi7euY zeZ&)!mQSerQ)gz~A>xTQh$p}}g9nHwj*QIOF7cbh6Y{-a;doC_}X9sJo(^BsR`aZcmPCpR?y6LKtZiInBm`}5&j z1gnUVE_F@7Z@-}C+sq;DBo1xXF~r20$K6>vhPXc#Lrh8OVi94LSlz9x3N8kPl+9VhM##UaYQ!uUSbGhhh1W?g&u`M&cCx#Gx2HK&q5j%!>K>HHz_A8)?_-9G!y#sph=uYpac{VY2XzPj{v~G+@ zo-BFnpyfdwh>IQKbfUx#zkN0?c4!n_SYn59&vmZ3mHKz%2OY3thfSQrQ;mw)VGH$2 ze?H{#gQj6m@k8L?AjS`}&j|8G`~Y8{#rR>wdmleI#dwb5jboqN43UdyK)jUvC9$S6`hmFv}1^IIwv8v z+1a+6@f*Bc6D(8mYl2toC#D#RAA{pjyx*OgFW15D8_g&-ik7y%3;}CJtyWoWwYv<)TSa@L+tzwXxY&!qZDzG) zOS(${X-(8Meb;tNFG~b#Tcd5XecNukTP6uN84!38$)J$;`}?2spPWo)at9U6=W{-j zxt#Mq=YM&g=l49n|MQePL*iEwr*LhS+(i!)r`W?Y9Y9AVam9~2)t4Q9;`EL>8T@he}fqLf(05{~)!NoR6k$3DFi z`*R2Or+kPX#uqzhsvE1A&Y0;rHW4GZ93A!m@rd{0%l-`ShX?X1 zukzv*_++EE5F>bsLotI*yeIobJ@s{2H&`6ab4dsp{h;}!Nk)V_45 z?qL|e(LJ1v-Ku*S5g1mvi~rNz!zEtZidX!WqX=71@sbFAQZX7IDDjF}#4DbeY-Cft zaAxr&XGYn&%<+oyK;Q6%;-s?i{IODhyrR9-`CQnIS5ygK+{_%3ocT6(8!;qP7t0^7 z_%Qdb`~NWb;^;E3ZWi&1Gt0+UD^4MNp}55#%ZX74U$jrIex{r~WhwaL(CG1v?^CYl z4>}3;Z?lP83^Ds)g)_=nizQA@hb5uXGXYzur<>3wU;I`e)=cmEn z*%-`@@4=`2DBrISW;YV6eEb`H|DhS*Sf%TKkl+0U3~~>z<)Pd~rS;2K&HKSOR^|Sn zdLn-PAE>=){8nwQ#^<|)wLU8_DL$K5onOFQ51!@JJ_jF1@eOR$$|7pF&c+9!_=e+* zH`wGMu*O#8<~Cv+@@Xi(QHBpzewK~+W9Qx%s6PKiVgdyrut{#*q3;9C>s{cHWgNfe zSj(}7HCTqMlfQ8u@r^aeylK47Wq(~MpL0Ig!?e#gKY2h_%ty-FEKtZ^*lb`;rg26%h-En=k_LSxZQ5dxJo`A$)zWW!D;TI zBhA`VEZd8Nn6+24(_$I+2Ie?b_;;$Tf2Wmsx~3K^!`LcL(j1zoSV)!dOe>#X$UO+l ztYG}r-%s-%EK?;c^G({-9MW-u;~7_mWr&Adv1<_V40edYGjI=&DV{myZGmT+?3(Hi zo@swGV?4y2_k)jTsw|#qBJSbg8M6*w;Jx(A(YjvYnJW2AH*!uUJfn5(=U2(58RPM* zzK-o8e%0gP8O0sNuX^3buNr@h_*E~a;a6=QAit^x-*mB!XJIoebn%QCzX*6{4e>?G zuPR3N+c;JUwvRXw#MR~pv0XN^M=BeCRtvAEp<^t+YA&(PzVNFQ0~`pyN-;pi#Vnqw z@%UAW16qF7KQKpXz%zeI;a9DeoP}R?6g+b-ct*Ucy}~o#8ri>7?w@Sq?%|r}(7hd8s{*)IlYLyPv6YIw zKIy|X()G(du9Y}f;#;lH%4)QHtFVV{#Iv%Ts~BTo^f_$_(+Jy$cV+li#Jr`WB0j#A z?oGJH@U7av*#qAyRJnz*f6?Vz-JQy}0>?C&_{h&(9Ajlm1-i?_F=ovQd;FEyl;)s= z?sQ|SNxqdU^E>ga#&*%4FMq#%Wfj-ewKRT~Z*`yaq>E$BTG#iRY0Ki6TlhYeZ>2T< zdMImp=rosas&fsZ1-)hEF(>G)PV zSRbuZ`p&o7M~;l%@U7Xdhqw2Ch+#^S9f>jUAfn@oS`+?k!)r6I5#*I{h~|@GC*oU)cNIZyC3#obz2{x^2a}}ZUHLJ|KX{m=VXuox z98bPkJhBLxV*4-y@9OT(yeqfg*YeEicvr&K;$8ih7dvUtesC~Hlo(xSypiNxRT-?G z-3$Co&1u|=kW}h(|+ESX&1b)n08MRyy5m^2E1|J zjzPp5ClUV&yP)2#8EX^tg?}|ZbNs}N*I@In%)IyFCr=Y65#F$Cu%7qQ2QAniJ;hJV zIpB?Toa1G@bL&KU0UvF?^_kb#d?Z}3J# zdc0xRtT4bPVp$pRhAZ<^@J6cs{L!P`;|;sk)zF$NRng9opNa z{T&u>-09CaL>76f+UtxY3K5VB2=u-*KrgX43op9pkI7?q+|- z#z3AIFBulE^Y~c9v6nOL@38mKH@wkt51H^rg?zR?KGqtSk7ee8)5HFb!RBKrUZVXS zX8sT_DP-*cT>+a6HEPJg~UY0OMA-t?n#CRwB zcv<5O#t;X|#TY&FvQ(3@#={t=f-O?_c8HHNI}ZKb+tCv* z>subiD0X>SaPwT5W_ekk#y%IXEdwvh?e8Qnt8;(*d07@){8iY3c!rgcUm-qc)nygI*a=(I^Z8XCw(#?^OuJx7{(lLPlwZsJsm69)A5T8 zdph3Y{T!}t$$%#&Z5>2BadPpolIx>Cc%qyA9A>6UqruxBUOd*B@yv0K zUo;Fbe(`(lVd{a8bqRB%f%rvr3Li`UZ^bY6d;7!1!_xk6?GqOt%ejA42OsM`>;iCw z;uq`b!xxEP_&HezbHEXNcXTv+JAB-%E!gd6cj0D@L~oDsxLIS(esRSx9_mdD!{cWa zdoc|0vMfg{mL`TFp4KzsXBkW}&crbM{4CwyIqCRWV2r$s{H#!tpS2NQ_c<;<3tifU zpXKf&f7aE>4c!!vvzQkvWYnZmYeIF0emcW*&hz-XyLex$|D^v@m!>{o}_Q+nLL{fAO;pfj7=1uF(wMc!A$3z9GDE z)WkOe)%(5pMhmfy+tc8Uw$X)+7m{P;uYA{%k*Aewcv{*Ao{6V*IXtXSISqGb!P8pp z<7qwM@w6V~`%lhzFqNlucX?>2;b|@U!Go)&C3#xx>*lw9p4I{%PwRc+X`L;e)?^b8 zamb4?D>&ZA({ke>SNeEbL-D~_o)$bO!_&GMp4PPp#WMa{aVM7L!4N`RsDDwC*f7%dWeIBsXiK@JuHeEN<2ZYW&pysJ))RQ=eO#4fA#s(43oLqdY{=;K#!L(!tCPIcUiOlIu`v^aFOqxN z%lJG4AIt1zA4$6>10O5dj~U`8FZ|CS;*FDtkL6!8{lOcV_*izljsf0C?q|PxeG+e& zHFyQ@r4R1M{^$kXFgD06&UqJa`1XHzd@MVD?^L|e6Cdj!a&;hhqZ1#iC%kd2_*lXl z{pDl%@J2d5Ru*`pXFgVVLnb~}5^ucFGv4@nVp%m_oTNW^F=E+1=n z<=cgiWo6M@$j35x<7wg|k}KQ4JIH)2@J2>HmWMa`#>Wb8$jrwIZ+J)YvA`K`2R;^Z zt;z9ZTJQK+UVrzGk5wg~%sA#ErzYK*uwC!%)WBv z$-08iGw`v@-txA!Cv^P6^j)U-#gG3yhL>1z!S;+#w~&ujrTE4_6W37u{Q2(;G9N1q-bnJX z_-^2Qtnh}u@v*`iPAEPWa`5fK$1?VqCmZ|D$MVKy@c39p7PqX<$j374E`^WPNd}9L z^@neF;bR$0Q@AoCAIspGXKD8Y=3`Zv*nEcg$nrl9BAz*k_*kr;ddFKo{lPQc@UhHz z4K|*sQ4G}MW92g6g=frX_z|*rFMV<$c1%xr#+(DU{1CB}f#8`Fn2%+~@0}VS>4}f^ zB(iiM@sUn^EcTVACq7mJK33dc zk5F>O;FjN~kB`_jD=aaNHJBkj;>!FKKGr&JZY_u1M>6bN-N#4lT31_d5Erw!XNsJk7ec$c;!y!j^xU}VxJ8vAM3z} z%A9vdeyrhdD?V1eG)WtUG{uDPP-=wAIt5>40z-A-wz_*IJx*($u-j-ywMFG%heHs zoR8Pcdk=5yCQjnV8#{R~eJ~OGqqlhDFP!r(-bl;GGUNA7#Tz~Gv7(O*3~zMeWA%hL zjujtEc%#33EFa!T&&S&LavDC?s#3?FAB#O3Zhow|uU_FQ)hmp_2{3r$PZ{w>limMu zm{?XiyisFwDs;sgP3XwvzV%n5J>rcfyZ>VgZS@9kyvxUW`|z>sJb0`4Sm>N4#Ye7Y z?ntgI#U>+~E*&4st_wdO3vAIzHVRvOugYSJiNY35 z7F+D(^K^VH(=OQJ5!yXb;}^E?GV!tIzch$=;^g9ECD%uP@I)p)mL0ES5Wh(7C;t<1 z3J*^h+vNAWm%JbHPrbqu#>V(1=L`f-oTz*(JANl>eysbjK?jYG1)fOD$MSH*G38?k zQ}maQWiUlw_*na1_VKYyUBiz0h3WERh0&$Caew{7B+mHn^n5IlkLv!z7>g+crNIa`5fK$7&#!;CQmJ zAABr#TsrtzJ&sFv`LWEr2G872ye^z3KbBp0em)j5xE{=s%=IBY*5@DU%*WFA!Zc6w zdAj^qrd{w%gmzD0JY!^5COlL8n?b}gClMdZzkd3IXEO1z>|8$vcqX}D{ZZl|9-gU} zueFBv(kCxt$MhmTQZJwB!<;h^JQMfj`#t2#_dE6&)hirccpU1llkYbB(dzth1BJy9Z$qiO=E`G=H$(+CT-c(2APgBCR+Shmy_p3U^!^!nqM2+Fy z)MH=BQ54FKf1TGhYPRpBF8lRxwMy}?I>b;^%RxDRvkStti>b%H8eH=NHPG~I8=b=V zulfJ~xw#$PJX%{j!|fXp3~?P)EsT{ zYNV&;)7nMO-AA+Ex_C;AdIs9ZyMUa)&D2`7vMYS5Gh>oR$I5E!K+)s{5AVZt}t;xH_xwspFHcU zUjBoMS8NUTNp;BDDsW1b<;W^76MnE$T&Bv#WtQ^!#k8+Hzb|-set*P!(=IsXy|ioO z?VOyKj@%QhJ+jP+9=tKDR&}AvyxJwyG@UV*e9i~1CFlBkoWuiJ&VJ=u|2=uuSCT)w z=IZMYKSAx1Z5e8p==yJE{Sw=Unefc+=LZqboJ4#t|5_SkJY(kkF~Bp){qmnB4&&h& zv;OYnz2X02xAX$fn7y(KIOknFw-h|{a*%t@?*Y#oLf={}bESu6WE=H> zXWaesyEEb$v$lmnzF=Z%ZeHMq&nw5JTc_RXhBSDlla9Up>pkKbBNNotCupm;cqY^j zJd^(p$20!=iPPU=JX7JrGbJvbaqX>%-Q$^KQ9n`jOpILyp7|AV8p)x<-#89<#@0_v z!86&cS!?SGd!X;uU4jb9ScQ)hG;KMKaz3}9}aIUb+WndTK7W-VU z^LZ+6c`gOFOlF>D;FFnk6yDW6ZaL*^>2b?^J_j=!+;TS0q*^}0lNPr)w0i=_UrgU+ z!Yd8G97Mcw67k89U-gc+Zu)~)a+v%7jxYKE_4?dgpX1{JYRO?kR0*RGHeRVvyw%&E zGWVMW~#s zA8jPH+}*m1{uqpO{*Kl_RC`HOe{A`^PPNV}<+Xje(`I5Y9p@VXbh zCfQ{6xV+XG%Y26K6q~W@Uif0RiKnH-GDZ$LDY2Pl$dRQS)?YZ|f%cWr&XKr4f;d%8QX}{ zUgXtfJcz$gwHsBJv4UFg&efM4zT&Ko*i3L_u(C9z4*VWsGvB5*qhdXqPH{fA;LHE( z^P9NN!*D)S3*H}}2~wL;^%-yQYQZZ$v$JeSbjQ?T(dK1Y(IMPxXsQ#<%C301Z}l0+ zRgd)Q!S__3@ebnN zLLJi>uead$-9kKSdBuXZY+hG8&VJ3`#G*@TQ)@Ju`i!CcsOmFrDRt{Jnz;m?d4#x( z_yW&;)!kpO{?VAoFMS2;ZV30T`&WI&wue=paa?tC-t5FKewVm(c48j&8DIY`^*PG} z)%&gwCqCF-SRLXwA0a;TDt)|%{@u;>;yl}%ql1kXfo1+c9JqCklTbf56Q|Ld4%z2+ zY<>6T)Uk$&>k2UKL^|-4q7Q~PUAPWMx){evjUUj3whlh zx~%r~(8Rg|>NJMtNNJk8ED9FJ1BZo+PNkOpBvVs=3q2koyM&D z1JwtTbsC4oE4(_5P1I&A<9Lc=DTnGb77XdA)0h>^YTQfR#x;(U=%~{;(#TEALo1*U zpFkFNLzYy=J+fQt4JN?+eZ|s<>mScpqxDDAL`8t&& zi@CU#ITqr6%I*zTuU#H)qtBwu&zFjFgSDX*0p?!UT8u>k^p;ofp*AFrJVISXj zs`~>y&s<9qk(VeqzUijGoMduWo;K5b@Q?#c!*(>#aZd z>Mw7;b@*yGK7GZ5%sK#H?c}}m)kJKi-r}pjaLz#RRVDaJaaZB1*M0o9V^z=o$u#(C z^8oAF*MN_UllT}PyANN5!B=aJb3OZ-0oJpBntJwrep`*lZ`I1d=T-!)?-JU0l!V$wugGaRqMSPkVS!Eaa+&+ zQS88*IF=ZDFzng{w6z>g8}?yh8n4g5ZR6ZdImhh1DHaq1AIXPm>|H1P^Mc|jlXqb+ z$o5psXMr;!emVDgCeQO5bkU`7WajeSmOwC`kIt2U^$uS3+ok67tat@B##a2$0j_sm zaq;BQ?d>zJ;F`)8P#i9vyd#ulco*0KZtj3K7gt%i^f`X#;i@K^J0K2MLi5+5=Doj` zONEy^AlwO8jg{VQLa#PiT(xBhF=VcrimN`&XCAI-at$-|ZmJp!ksN zfdulv^4vZEp1Kk{>AD;jPd$%~Tx0Omyu)YvVnb6h;3?s<-tgU~fTf;s0+k;JPvOf? zr1IU)#D<&d;i>m{WS|*&x;RT$$EJTyYZFQY=ftEt{dv&sZdtb-iMiA z%n#i|HaMn{*RwNl?iRthv-j}0@C_JfDV#g;?pRatXO6VbuK;@n@xPk&V`yD z;zNDqyB#W<+X+v}cJaEnf-+$kT>cjBeCLbMHedxZdYWa6^$)mi7e%?(V$GPUN z%{kHid7~PC1Gi;4HC@!t7EfifW*zQhs^UVzJ*>x#j`O+oV=5+wSjPp$C6lKU{}Ij= z{xR4m=){*`8{dFTJRKg}bB=gy1(h>HBPth!M#f9%`^CY@m8XYBRc=9UopY#te(RiI zv?VmYPW~GCgl;K!stY;49DmSK{=fFOPW9>V*oxTuQ$xHUz}};uf^Tl%_zuTHjtFb! z-W)_+z>Io!@%Bo2_N$Ra&ts?CXQdC|?J=Kt-e)BT()@n6WTDqL`h5|cGX4I?{635< z+ZJ-BxB32mh5zk%KgpV?=m(bSi8J?Y4@<>dELElX>3A~K#+>92N{tU~QhaEUv7?M! zm8|)eiD7o-w%N5*?28el%2aXNzENRgL=%K#nk=`ikh2w3Jz zw5z$QBVDZMMB%q3`!Pd4vkRUXL_BkH@!P8GoVRwvk$XyO4_rH^_TY_k;X;NJ%q!#X z(&w?z*gAbKo~J&~v|q=L*D;6}HQ9L4kBJL;c*d;5r+F`Zu^k(w7kI|lDc|QD$NOua zGc@V%b-AFyTCHtyLEn!(BA@RV>=D^2U&kH^eC(3Li;gp%dFWW<8SH`*+OXqS{0_x4 zetugzJo9(Vk(!{Bs6JkJ#>+(@jNr#JQ_#x;#xq_{vp(S&xNWI;#@?I7GY-05_(K>* zc;rJLBp^8(*VF9BA(-(3QLo7JR1% zegAv=cU&yf|;aew>DrCc`^%P1a|ie>(Vwk(#pp6`vm zU6%LKc5EFRBa(a&p4ohw^R(oK#WHVzYa~B}XU3($GrOmD#D{LmfM4L#d^dX^>POj|y~Q*39#Zj4 zkux%hXBx)1ct$n811_Fv5_cie1D@$C-z{T&Xjz`omp(jW@)P7Uha{K&H#VK{jO9~V zJaao(rkdj;#E15SXQqK)_JL>ez%@s~Gr~2(GZ)=oP`v{@^QZPv)idwUs$K-1nFpTP z%Q(NvIPYPccY|jR!FeknH;TnG+Q-up%B|DBl>mp!a|_g!(Eq>iaLpECU(XdRStU#} z3`|q%3?m*hv~qPYVDQWietRqTo@@5yOs>2qR8ZOOjPmeIVdX9C%jt|~CYbhwXD$K5 z$cJO`%z+o2>RU!?UrwIEGtYo$-vGm`;`k-U8V*06xxvFTJHRm(&nyn+ROV89@~8P7 zcm}_Whi5_t&-`C-jOLl;i#^Z<$Jl3dd3)Pgug|f@)BWDXFCFg| z^6hQu@XI;O5#g7=j7XnvU?1@qI2qBs#A9;h7bPC^BypLe_Z66U%o<_>?iXN?A_S<`~#N8s8{!-Yj(MSdNXvWW;rofAX~XPVIVlZY9o$ z%A!z#;l8gY*T5OXX%v&O`37{J<+}Y44v6KtnV5{>x{WpY25PWbZYCBpg=2Pxn9PNW z$-s4UF^ro-O@34V{fX<=LR`k-bLsyuSVVjk*_+Au40R*Q9mn9A)A*h4u@ODBmEVnp z^HL7iV-Ind2H8E%kjCd6=kXjqfBk~5t;)fsnTH>g>%n`;uiU|B*iUhBC}z`^joFA( zm--vUt+oVnj1TlF+7lj9n+M_A?dHAYn&HYh&s^*{75`*?GZEdJ%at=SL)hhuCN`Hg zrsL|Iz5YA~CZ^L#Z{KlG`^wL9-BcX(cYNmIAR}w^{f)F`anL1vpDCu(dRE^sP`-~N zmktBH=;bfSgn=I8`x3ZZCD@Ru7$}mG!@yz>apV-!>1{m6Vvio6@zaXD=_m3Ik zIrbjX;h%H7c+R;gITRGnvG-u{&st(R`#6e-FDBzT6I|PE0lu8sB^|PmIn-_bg4KaR z;>Y>$PXzvru`l7ry+9mCa_#87F8)#f_~SXZVRy>*ycGO%6#R1*_-DU|e-3&0XCL_I znetK9Tfskxqm!#Ql@}QNb6Fbv^FFw71<16v(SgP#N6Q~>$;qizf46{xT3Kf$jJ>eX zsT{*NLagUz_-WhW6Ww6Aa)G*uUsy&G3@8IAA9J*V+gR&C8<&ynIM3j&i2E|ZUCVxd#JgShzO z;CL__vRC;MkX_1`;5dZ_m)!+NZY#2W8?h$ckK}RLy#a&EHWFu=3-|C^k5eaH7TX7p zmo;@5EVhi}d5*OlYw$e?j|n@LA&cjRvKr@g;nWSQ+~KRWDcQURnW1~GAkLKfocYMo zOwW0M^G=5+cNlDT2K}af+)WJZFx;^1jE%T=!e9Dc?_W)MU(b6x-&XW9p0pZ2S2KMM zKlY_?_OQy5FW=GT&%t1ESZt@kVNC{ydGg!hunV}4R2=qC9XPDY;;?{;d8Wl-_3~vD ziyH^d7x#^LQWaRM%8$bi-rWg@RaqQ%(O246PT?NX;4o8zvvui76HjvcF+)75<|l)Q z$4(-C9Ck~SlVY><1&{qU13wOX-i+5Vz++VwkDbQc_wblmhsC^?zPbkcs26ywN_cEE z=L`goov8dcJANlBKknDaRpGJz@Z-dX+m23l;K~_Z9QdaTFV3sQDSJlPO*J{4qwVA! zxb0u@clL=RXYp93y+`Z4x}4(3+5B(4=E(W#}OJ#M&Qa*m1S?|I(Q%zhjEtWF! zCk;QYlkOe=Ro?soGI69Otfx9` zy}t0{#ETO@PCU4D{J30rWf{3~U;)XU0CJ}re%z7H{J4eh9DW>e@d5JVj$Dl_>H|N{ z{JwjBT*SwZbN@fy{5Z)_!;kZ1sKqlq@Z*eJRV@6)@{DmM*ADL-SL!5F6<2!Zix$rq zeq0jIguaxKA7}8)Q?x70uj2%dFI5>_m;uk+`5%LbXHFu19P_^3NwFLHf@iwn$C>dO zY&;WDJj~0du#Xsl@QnG4`0j4rOJ7XJM(GL9m~+50TR3MRc;@lr%#VBVIPl{Zy8JjZ zevuQDANMWfs_@KT`oWLu6P^L<7(4?%&f*ziguZa(dWB~^j$EJcj5u;Bc*fqF#WTi# z51#4Z$=#HeC#O9>mM5q9lAk9R1INIVJ0c&ha7<^OT*SwdbN?4lPIAd$n%vHqMsXa) zhwPdc&Uh8>MGctI>AL5~)fp)~xiVzPO&r#r*zyH*H`mp*R>MQrex>ci2k+uD#fPk{ z(D%QiEsJUXmhUsght~CxBlkiN9J#l9eCTb?k8@>N4|wKS@Z%I8GV=#KGncs|{=*ls z_4>n)6AvyOKW-j)CL=e_hi7`hk2})Ak6ZZ8;m1)6v>*I9ACBqDcT2;M^Wm7Tzjyg@ z5g$L!{r@fC$GJHZJh^FcOn3Y^D@zr7t~RmDv~d}C4f$|PsvH%L`Q~DaV@|D;jJ0u@ zm-sv#KhCrbKkh!-J(=;#hn^lp{Bm;fnneWFEzcdhc@#B}D@m~7j zRqT=8;+Lm5XF&Mn1m(v?y#3xMDnITP=7_;B{ou!m59jB{?FOTWAJ>H&=jE%m+&GJ0 z#B=B?NA5~5Z$UEOqC@tHLIIPLTC^W%oI&!->! zxMa+xCw`o`8^z&BDnHKMCzRhA|5$z;db7#qJoq;go9l{y>>4Oe;m4u3o6yb4oCj+^ z*F8VZw1uxk_~&8TviPSvew=d1^<|HcKZo2qogeq$Tg;CO`}lE7;K$i{5POUGac2I2 zf3lc6;u~CuEhzkB`AT+wPgVq$)(D5 z;j#C8wi6y}vUuz+J}>4TQh9QwZSdFx+C7o+nC-tz`<4DwKZtnjysQ)0A_evX~r^iY}jex+J*S}g6yGgB-c|jM;X5MQVxflNTm+7w69*hYC1WRZh}j{+6kF_ zrA6d5-UI&Aex*Ce_h9xbaqV@=AIJ4SwY;=;J=eI2wrjZlv+O^Jap<~b=DGoM-Jx7} zDMvZiTL^!+LcVzN$5nVWTGr5p9lr`{3@%arxMd-7`VNg2^9?bS+Y|+=86I4 zj*ICB3h{BafJkDEX)E%yE-W|jx5W1$Ik0p>^r@udG6rCeW^)T%#YPlY$AAB)JP zbSE*8=BL7mKd>*TB_2-vA~-%CJ9BnoTTXuMT=?m&i^&&vt&<4xe}H_r&2yYY^XRO` z#lxJJ{)4f5opI5AdxzXj-=%+Fqwjtyo*dV2{tWq~$(5^TZoZN8@9)d)R*k{n|_}+$H2|YQB-&Z`Xzs_pqP7d4U#%yG5X^Z zoYNkh7=MCT&ri{VzvTPanGF|5oVJS+e6MqKZC$t6{B}aT7z~@x{wl5;dpx|Mxro;@ zD=sdkUh7XdU*oK6TuHy}_4K=cZfah*`IX%{%G|$3_oBLkx@Ti26u5Y=VzQH{Si1B> zAK|;LSD6@8z2(kp9=wnErCDd%$Ly`Mh3qe2oi&Lk;p}Ell;#Hcl4GGUb=U^=V7}yD z=F>mjzEXJ}G@hC_x_9AJouhnAUfj#OuoA@bBx^inG5jN-D+05VIr*3ZhDn8R&$p3JTGodpMJO_%!!$MW=fhVea9*-aV*&wcWz&Khi$&WV1giOn+cwJ{=~1c)9jRvG2^z ziHg&m8FSV-(OpxsqOWWo8lB?FUMtVV=R*!B^M`+!y@|yfnevC*xh)wi*_)i-%Z8i$ z5F1OK&w24LlOJL^aWC;Re)O64m0|E-Y+ZPRn>%2Lk&$P!pYcrYe-6i)?2&5T98N60 ze{6N)gR>JY%%NDjyVrB;mu4qQ?wwG*{cGXG_N$!4hmkoB^6wG$Lmj5C-=Lpk*K>XL zj%0%woAUyVuh8djvJQ6g+|t1sZuVCMl?nm-|c_6EC5$_w2_a(gN{K~z7 zyviF!EB9z19;0^D8=;YA4BEN>^~f3jbMFfk(!b-Tx1n?O{0YWFeRF_*SWRC&Ne;Q# z_rn|1hZ>VZp@Qk|xTxRA?d6S2Ea>L`(_COYX2#f$<}fZsrcbT7cn)I`o7!-3MONE| zg`u(JH5^@8p=S?{F*eR?jJ57d_wXjyRr~+Sz1r(JjJN-Kf5Y{Re&CubkhA`4t`3fi z&t{C%UGp1Ulle4V`bc}k?Ein^f4g>+cO#5U+rb(Uww7I}TV_3-n^5DFKJ}BH~-mhoT$M@>kGcUC&T_PJNJ(PzD?%i zc$Aothi}b#tl_=%@ymDh3f~%A@L|qzyg#s3(*Cpg1bc&TA1^w7_;&cQ#<#M&CUwXz zTru`8(d5) zn@5<}+Kq*Ep+_pfw(Y?QroR0~^xbHE29^~bJ&cYtcs6;>3GoK7tn8DST*r@P!(iDQ z&Xuk55Ph_dYo+>Fl6Jo5Yxw~Ezn0%28{IwOg`F|%Rea}R*eXAUtxy~YABNKv!`2JK zlD9Y&!&YGfB=>=T;g0r|f6ukkVc5&`xy#+Gl3&hZ*fQF*81_7VlNQ51MGdU;dVygd z@4&EEoSTASC(WtdfzA7-hhfj@1%~z4!Aixj#LK$HutskW0*>7h$Tv7rHrX9-0gknK z4vl^2;aJ_v5*NpsdC|}Vjunq{uyJhI<#C#QX)k}?<>_Q$ZyFqnFUZ>BS~tS6XJOyY zD!b{D1NdmJlCp2JdcL>?C$PHKJDa#}i-{;n>%}uP#;{ zT6g&Rq2%`zwiE_FweAh_OpN2V!bhr+CEsP(#j#C}CyO=)o&7&N<=d;QEIRlYat91s z2!0i2Y6sg0zwR>E({*+Iv9xtr?JE|~g5Or6TdYsgU|63|@`oOFeMGO|R^h~EI2rOu zt`~O2N4Z?DjDs-jLtxm}qK_I3yYt+uYV&ypi(lot5q{kaeqH}9r+PWO`PG4(cnthn zfe&Z|m~{#Vb&ySNrUvlqZ@{Y-zitkM8lU8SgNI)^zjD1Z9Q-PurHfxR76-}8)Xta* zziy|W{m-xGwtZm6j^{5Al7~sy6@T0ze0HyaT{SMuuL}+uj5#zpCK``Jj72O3vwoYt z7iNuQcl7^8{LD+d{#U!2GivhyV*rk>dyR2W&XPB&@pTZqt+wpg+jdg1Y&q>{o~teS zPwiN%t%W_b^;Oz>UDypgE1tjo|M&cF*O0}teXlJx+~hAYcy?X)c-C?8Y?HyWp1$(q z*&}K2?A#7KYxJ1Kv%kUjp8?MrdF6EA*{T#gTO~ZZ>Gm#o*6jbw`c(VMSA=IZue;(| zcmE%4pCowJ^kpVIduQz+;@OjkKP#Kf@#cSj@N9WzJZr}57~t7tK9hZL7(6^{*5hv8 z2f#6tu?u?I7}UYLVj=V ziC9XJ6O4z6r(y$L9NX;0de5!6xPki5v$&SAf4JTgX z;%RRkoX@yx{Fm`t(9mR@r!HVoI*8ql*n(MmmAet(WW^WkW<^-r|r`p-us<)s_zC2qFS z*)Zn<4?80}ZS82)20%`HY#6cMT|ad;)OfYdR-EtRX2k`l6;T;feSrCMY8Ag(8w@tK zuyz)(2DJb2x;z(CZ6f|F+soFGR*itK`|@K3iGj^diT}Q`E;qWfEI-=3c}O&eSV?HA z6U|QM$GoNOR&@Rqi*9K<&%|DDX*-wL>yTvpcSL-}edDS}@V$66%8&U1ao1OgyRIh& z8(b>x%Ww}j4~sv4jx&Ef`7w7R&rE(y+Fs8(Qv2%_|MlI2`cZ4R<@Q@l{P%L&IG204 zi}&Ysx`*NMMcl)1?;aXyUv0N=55sBS=Ep1sa~8Arw3xVxxHN0H7i4*r;RKxH%$Q32 z_m$Qx_&39%sqtUs#|(S%-_se3Gr>UE{*|{fcI|49c+wDFH%cZUV`MvPE?e%6k>ztn z7+J2`(G_6kuwtHYe>HEim^YHgTR-R|W?=IcrshMRRXAez07gehl_*(Tb4(%9LdkFlM0CT+wu2wEi*%q%M zcf#N~#Strnt&xv4!G??JhyASmdIx#<-O&%Px@Ytwt0b=^4=1e;JrX{p?ZU_q=LbI| z_vH=LBv^XkBdf}(F}+Z^IaewciXPy9dyO{Q-9`JabDwW=pAOe)rY*_NLp$cx>e}t? z!)o7{TYpBF#XecSly zfk#~|+AyN+!bi+~QVW~k-w>Katp)B=I_hEJUdPo}IrC~4G9TmvSeV0WS%+>r=lWj_fC^8LD!Y(mANEc_at+9+Xt_zjfbcYa^}^>53uW^kJb8p z0p$-kZQGbf&sGu3CP%Y)95Ljkd;vMk{k0rtaLne21c-kTv(|c*4&3HsH$KN}B;due z(SgfZyN6E6PWl4GiP8LOXTE+BS(;Jyd+SEyF_iuuMt>gwlatr9@#Ew~9pemr+~_l} zzgd?T`1^Ym{jIelOum9xD!P-oF|JZNO?_TY+7Rjof9kYk?i`~u-b1)Coo@L`|Yq=#i#ZEf!1NQt^BXGtaYp$q{YZt z)tZp4VPn#pd@<=w#H0;p%wHcoke$py+R|D&)8-*9_F~erm>W~Pm~_qmn3#0E!SJ4r zEG}{q#l)lk?8EO?F6D|}O^t3NlgJlh@w?Xk{}A6OM&96WF|t#&Ri1HU(nfa1{!C1o z=hAO)M5f(SHk{m~6UeiBY902&KH+%Dwcp+>EYG^a#!$^}^BuA2QMa_OR9sy0?pa}a z%~8f{1%7y4S9Jw-?N@j;_eZQ+@Bf4QI!W-o>E}%N{tL*wwSyI}K8g6)M*e$iqd)lm zgPHNY8Lz>{_h#Vr4 z#;*UH;bMz}EgSYCu>Nb^(mp+mmx?`Pk)-%bPBOkF#ftGrue=RJjW|>hiHwKOzJlJ7nt+sYryss!S)-$_BR-8Ke6t+$lRJB@ztTk&b+o)!@FAnt}g&DzQSkP2U|eg zbThBY7r5>GaANp1t5@xP9J_Q5^`>@&6LIc&7kv{OS#e4c@s8ct6~&G-Q)61@)rAmd z-$-qQX8ODJq3r5S4`f%XHpD~V)2-_Q25+mLg3b{?x^;z9y=l2qJqvvoK?ewDFQjk3 z#v$Hi1^p{Nv}_I87!?k&8(wE)Uz9mH*cZ9+8tl7l`oP}TU8WvH$Z)_k4=*Lpy5`G= zo$$<3hggi`ENZB!nCMJ$MvMEE54T5odyI@Y%f*;hMhK@8Lvq(%WOQ^a>qzs+Va}U* zMEfN!zI3t0LB(oA9=;q~H%B^=qbxQyD&07=tbOHbuBrZi0Xss!RU5*VX06tYiN3jr zJx4wtob=zD*iTc50}3C?j&^aTQ#X@!syTR}uijh*>)5%P`q|WH3$&I0PIxliUeY~a zMXmRh;K_&4`Tm-7>FUib!S4K9=GOZ;D$>+ixYfgw;u5Rhh2<)-AD4mE-s|L5Hu3%( zIF5N7TC1Po^Rj!#SC={kamAywX3M~FPkDH96R)cs8;2bhpXcTLI-8hbcl8!@A8KE7 z#@3r#24DC+)LUqRPkfHA-oj`2+}^`F*01hC?Q4ICc*R@9Yhiwc=^t#br(K@ju=s4` zVc6SSUo*z#6YrcFjPAhKRqV+QwO__HR&$LQHk9^qoaO|<)9zjlKcDy#Bd1(EDGsq; zek6GYo?pb)n`6EZe;gkDEb~P&U@bNmc3VRI1mOtapd&pT|0CBG&dfUO&rPnib4il(lyG z#O16b^Q_KTYQyA%YrxyA|3{Gx8t3Pb4U2=rD`U%uQ&Kx-0rH`QKK=I>gVn$IR^xaaksV7$6+6Dz0JslCI{K0wR#YrzUE>B z^2g4_X5^2Zi^cowKFEoZJx$1-CM$bdXE}*i7Gjsr3Ma$~7pBqo+6(!fo7z{Z_U0Dk z(i8Nr=Ff|~H*?Gx7yTpcYE9`FC|`8NSnz1TdHUr?oXEfluT^5RdBtN%O*Wb)o z{Sxk__HH5L<7KSL5V~TEWG%9KqccAK;7Hg1FFxjWa8nECPgVW{&c8X!-8Z4N7tYVF zlnyU(vf?FNC%`(}f}J7#^zY1<^K)G}-p-s|cuGe$ z(3!b7IE*<7zk}zKzNDtp{H>b8*P`>m zs~W2V#AaNbJ3bzO3ufm$Yw7~&(}(bZ*ymce$>}18i|Df&I1#sGg=^0^9-`TwLY{TQhtb?ZR zk58dSfb8%c*x{-du!O#Q@o@R?9_3Xv17wq1d;DNduQ@1Uj zH6rd^vE$smoH{`z)C_8;R!{&Lvm9N0x-+J-99i=ucK3E{^BkVT?qSq<+UH{SxMYPI zPtOWAwzGdW%p8v~=F8Bxr5v)=8<0ExSXhJO?qOV$g&z~pg|&moSfd4|qU?guRK z=||@^zkNQYi;gUbw69#rH8nmjyz1KLYQwY7n;h2j7}`i@pNEQ_r-h|upUY0SYgv2$ zgr{YvA7njOu&(PEW7+7td$7@;#76(NXP+zHfTNdGRLl99o&XJcO!@Dq3y>;9$r4q)Pvs1 z9C;nPTzRTwum5lAL2t)?|Krh7)$hN5Wc7RBDF5AV`Z!Mi?&5l{V6z|1nQUah-bDHD z+`Rocb*;z)hxSjyZfEcM1(&ein2UFBbV7@k)hwG>#yXRnEyXr3Vf~@YjQ#!`pAkp6 zdxx>#$C(-q;OEVz9m$Z>u-T<+%CXx@`Tuv=?>FaU zu@5>o?szyKvSj5+@JsyS0{>pV;hor`qhp zdttN3(8;~B*@tAd+0o6%++L61v$AvC`oel^uT$4PlfB-g_`Z_F!-9_3eIpof=+g?RD~jypQi*qqdjod^t}2HP&92&MkJF z4c1=Y(jR+07ki!k=zX@=*@M13x5HkSZngI&d%YEVUH&rsmK%g2%di!Soa{wT%>xs+ zz?r!MT+xK?F|y;l&UX6O`P{S9&AOaJ?5ZNYo$k)Bu6BAozN&ib%ev_A+E;#uYieBb zJwLkI@a%N6j;GUxaBwrw;+ZeUA!#AEpYwlX3UGR)5XggEI)d0?DU9hr`OAm{;$NzBpaHr zwTjc&>9=$5ALKy4nYzWo>8e{i0()Kg7=_oBpYadmW!#3H-bPNw{~|ZzRpiY`t+Vxd z-Z&!zwEkT?Js00w{WY@Fr(&ls!%inQyMY{4i1MA&+}xTgYiB4{3qDumvVfSLpVR; zv(FWSOKYF!=2Yem?Xb^<2b6cfZ=Z_~GZ_ALKNn_(XP-Ox*ZuOx@?h@c9{l!sUWa{d zY&)?#E{e4=Uxro*Qixhz;7I9 z-^(fXd165Jc|P_z`)&JdpR>nyd48&W?%rF9eU3jqX`jcj&sU|`=S|K98SV2b;pQ8O zZ)LF0>xJRNUG4KK?DHyXpI>!DxAu9JaCRweylbC3QNwfb`O)8o_PH7Jx6nSXl70Ro z;%Jf$J8v9h`#en@+}B59pR@M0{yXgRec<6v_IW}%WUhkb2B$n?DI~tNcQ=AKGDTKH+#$P;`0pld6n$*3AB5H z+vleLGTG;U`pzKQ=Wqe@(WRb!PCTbG=Veg+=WrD=abAv-|2#Rz2hlz^V|5Jt=j7Sp>B2YfC*P$8|7OH;mc%0+XJXQ33}bLc z*YFIMZ(fXEIaYiZQ>O-A(7Zw5oBw;tUh*2oWsJx5dgOlP03TE5Fjs2upMN3GdD^yZ z{pYX2cR3Hf%Tf3)1?2SFzk_{V^!cXpG1Z@ei?|d2dGY;Ys@KqW@~1t^>mK$jj)03U z&dc59#gm6mjY~c_QT0mS3Ra)b6>Ov&X>Pi_m;~T>;BHcm#+JZdH!^9 z`y%9zkjV(;<%9q@3s!2v&nFKG)rwp#pu_A}*=!ROWFe5f;b zT`}oa@{WeZy~C!zl^B@jq{I60bJyWXxH=RbT?%)-lODa{`u3G8xTePP&OI(SMs0XL zceD1dq>WU6yY~J~o#JxWU&p_$HE;JrTTYC8@#4f(SnhgB2Y21ne!D57U2e=20KH*y`rUs)VHm6R#tjh}uvvS=&Z>}_yq6nnG#b9Mekcr;n$>N)_AW-9V%3w)~z z9?O|f_i$TrD>_cEU}-^;;3EJO?-1BqeP1|6E0@_W75tgy;BE)c5e={WK4qOW- z@I6lA0c6~T*gL<63%C-QS9A6Cho2y}{)-Hp_}=j0pC2SX{9+#;evQkAH+>h*z=v=8 zcJJ}Qp2cPj*YAGHUBlvP$KgwB|H-bh>}W20<$t>`zdCjixzfG8skfiu)GiL@Gf#&n zx$zx1VT6firQ^n1o=wr{cL`({0hbV<}j}jlUJp496)5T`wxJW782(EYyG9{4)GeXq~=0zcS$NW~A` z?|bbXuRewyBVPRi`7m7kV8*_<@c8rU#n&61xA3xlCeBrOp;;@#5BZMtfpG9?A{54u4LFeb0;}7{g;))*k z+`jevpej4(`x<|klmS;1d-)CqJO04jZ?f@+i<$o(u4odjpUHd4_(j+e7FXE)wrOxh zlXwMZYYqAS`in2}(7VSAUz`Ic_gL?@T?4*Ij^R5PU-ZNeiXmewz!x`kgDIdNrIqiF24hCcYl*Riw z9FcG&6=xjj4rlCPKT5Z;2p?}qzQ10_B2sumx~~j4<48gAO&M@TBn4+ggfotuR~+dE zXHah(9TrjiVQy!fA>V+9Gt4@lMa(E2&PdMJ{r=d5!5K~H)F$h{JO5+tD<9yR!Wk<( zoS`;6oMGg`8ML7>au^%o4Cz**Uzc^{ySBM@TKj=FUN}MU#xUwQ^%So-mUyFhbO+uD zAcO53h`e=p!|dbzHe3R1p8p0WpABt5t$c|}nk-A_GxJcq5?S(%f z*`hfd;mqy5J6z-{<_R z+gHw~?KHTfNj#@5w5xd)U|tD#q{AF1LOjFvW2Sh@8f=5TtO=cgOall0ti;yoqjd|z?LxE^rFF^Fd*aYqsJ-;X;^<-KJ6 zmDm*)ci4DFYMvj1U%(wBw1)U=Up6e658me2RDAMg;(q57_fu|=yNUgY2ccYS%~#Jo z{5X3H55kS`=hqzH)H%QAcJkxyY|VSrwHIyS#%Hol?;atInuxw&rV{F>!(mlxvSslgVGczh?t zlxF*N_XIb;=Jrr1svPtsJVU1Zn#$*w;7tcV?r03}1JLQ@@GgD|Y zi@5$0@;(K*ZYeS6CFHeI44}lzYZWA~RXMSB`Kz>EHh6reSXNGD@b25lZPhiu=CjDw z8t}#k*w<*=o)vQU3&;LK{X+Oo&Esb$f}t_-FK};%;2j(&qt+R@+*C$CoZFj~m@ubLLcs%1tUhp=MZ4P&A~a+WJ6b&+z7A{&~og#!aOCbrtk34Xoy z=2cv*`eE3^ZoccB%WEU|JJpNt4^-a@U+UV?qZ`Q~{W(+PaVowM`b*=IO&{4DKxQtn zPjEHdgIlS~aV@-wjn0@j`M!)^Uq>B`DU4BoqeA@&$3SwfjMoasNu0^+S5C3@FmmD% z{FwF`M$?Y}IaCv)jeK;Iym|3m`mB(A?puPB;^!!?p=Wo-Mjh%<1bF^`d5FB~)WNWy z>stEli{x37i4n-$5iJwsXYWoEmhi14JG3t`xSF8u))LV>m_{#Q`w{u<1t=&7@=j*rE^FCJO zwSx>2e%VcYt$kU3QUxTxKF;U3jeXC1TZcqnT{nzp&W=taj&c@pl#mxknewSJlRtFsqB81HFY@8k33_bP^woy;G4GkMH@z)^;rQay^*idiu4+F8?gP;Zy{S@}3?@Uw6s)pi#5 zrS_Mb{6ro1;OWcg6lzJf@|!SxtFxUD_i!`!5UQm9osoy{<8#NmhX`_3`9szIQV!)< zD+=bA_8s-dSm$ZSi=!+--rI3+IMtaET1OsU*5SU*xlzd}$wn)0%LBnqbsW!Py)lO> zZ)4nw)DH42hqj3&Bb9!m0dDYjY;G6aNrY?Zin&zbqT+%rNI510Bgg z*0;u~8Ce)(eYf65{vhU;PoAL|<&u4tV(PjFaMVMzoen##^Wrs;Uc_r&fD5)Q0~bv5{jKDK z**?sKpRQo6*1{1Cg(|M8fxpsRmK`n2y|7yM9VDmg6l{qiClp@;7eVow+021m@WD!0 z)51^UgcY;46|)hhO2%zGK3F7Jad9e!>fnQA#!xYAh+gGgdW-pBS`$u+ebHM!SWAW) z1$NAi0e-5tahu-~pYiaMS&NOlmrgl?&60|r(!_1*6}S0M&T;rVa?kf_kAxiRk_6CS z!CGM^?AZNJj&|k`MgC~t2=#)d3oq4@^KT0FQz`aSkhS&XIO@|e?_%q+qAi<)(Os?N z7e;;=jv)GYGkReQb=Jbj1F%TlW9NlyH>2MlB}eMz==U6sIp}ubpXE+|qwI-tbn8w} zzwXACSfkiK*Tj~H&I&jirh&a=XDfH_for96RS)XVj|q$X3VZKKe&0$Bn*isWhHe9E zL~ka?kMie~Q=drZedK8S)4S0>dqaU}d}TQCX>#iB8(DFU*3JGR>O5f+mSE$L$#yOX zVjDDb?|+4dyd_ky=pp8IGjj7eY^14Ndv$=CM_e-+7}dBfG!kBO9&#+-$nvwMZ%*|V;IrtS{(}76L0?FxTrxbwwMWO*rfO<|H%4uvUv|(hCH!_@X!7*u`}lk1 zHr5&fUtU9Q!h|#bnTsRz5?)BHQ_x?2YB$nEMsRdH&OU7Q#31pUmf1H)lont{W14 z3Ax%d#fk1|4MxcoZ1_fJ@=SJa$LPm)WGk=mx1GA}cihsZ7}06GGWja=czuS~og6%K zMI1?@};pjkmtnvc4VGjn?kdx!E1;@+gs}lGB7Cs-(`OM)e2RSg7Ieg@v|E$#>2&<#S5op2IEQRa8 zmicG()m_MCt@%xSH z{)gCBKlE??an--zE{Azz`VCywqyFXhKK=W$)*t#e@vi>uqJN|4-$WnuZ}5yNG!ynWSS7^shT#-zoZ+n6RDi=-)&? z^lz2&XKW{yVe~K8OQU}i$6NnOKLmWb^zEpBhftelAo{nQSc~i{Pya^IzvKh4`m<;K zy8wI6r+=g9U)HBR|Cs6DDEgNiM{Yaa>R;Bk)xTQbe*Jqty0f?X_xz8f*S`<&xnKXz zU=9ze{>8rZ^e^*&0Q%RHbJD-+2fzOP5dDx&|Gs`{SN(g0SW+ha`y%z~Mqp3)@$YX& z89ZU+8GE2-Jb_K2J-x4he|Ld@_ke$83koC1Ctkr^T`p#89_Vs+( zd)9U|I{K479gWY%ucIGA7r)9p(K`MV=ULnFQs(Zz1;dG(`OMnJA7$=l($7;Sr^m;4 z5tH%b<0+^5=JWsR9X{RzK7Q4QkN0+ikITuO5KDuPcQNOMkJs=j+{W|v9v`#E+^&b! z_dB)y)z3$XzZm_@^}6Wid$msbhmU`B4Ds>O6nre5UgFc~-QnZM=;vQ@v~ctnA4@-9 zf={Y1_;?3+Nw|s_&H7L3aMr(Hhd+e=)!LuT zd8rusIxy%Md~Fs(mwvdj4yTQM!qAWK`3TN;A&t;yc)8Rql!NSns*-S$M|-l+=yCeOugB-n7gmqQcpmrp3Zq#( zpK!Efx_!nx#ZtY$O!~YVe|FVz#nZ>gpS>49_CEQu(dY7K@1}mn{`PNPBRwvEb~E$U z>T%C^e7Du(uJ1VC__NDBfA(zw>G1-5G2CS6`|^7aPn-E^bakXdS1&;h_;vMKbg~~$+xhv)OFHnh zou6eN_2KEwUH#dcGWfHHr|9cVu0Pw%*G=H*y`Ddtc$@KOZ}s`JC*zk}hkd$-nCYu< zFRVX%H@t^~b29m}C&wLRK&n6cL3BoXeLb>Ae|DLCZuFhxC3qTtcBpbo$n`sS@vjo= zR~#qi^Jkkr1WzZBr=#dcT~GR2x?9)P=Sg{F{MsGYA4W}yL+iqcCNOv$zJl>1LkE%E5^|qxE;&=PcnF-}d_X82P&gOFvs(_zu+1QS@`75Bj;w z@4M*dn>+M#s=xau=;3tw`3rNp>gRii*=X(QK>l@!H=oe@Sw6WA{fxi6Lx1*FKTo3{ z%eY=Q`We4p$MxUU&z<#i(*Hd`{k$cV&JS+&YoGPA=Lfgv51xLO9q7}~o*z7|ey&2V zbi|=q_f|ir`oVvO-swR_`){OlTRv5lTTT-=1Oo$c1lUWPyXBiOZPaL6BC#_!7D$AsX; z$RGYo4mbC(<8>kUt#0X(t?= z4=3N^=%Z=;;r}<(aQDVwhd=zUp?u>HFAuo>@br4x>4c+u^oOV5X!*l4@xtT}xA$cI z;mLdQ;b?Ky)F(arvx7c=_*l;$?w+fD?CbfSKite+gRyV->F^YP_^;8`dwu?JWPq8w z%VxK)dk?KnY#}X_sOKcTQc~=&nfQg4{yOAZs&6g{_x)5>)qh%JwAN>YB%_L zODHETzJ8uLFMPd(SK&P5m*)@f1-|yJ2iG6opcHQr)xbKCw2P?s8QgZA3>UnUFdfItDsfRg|BJn#F=yfc}}Iza*XeC9J` zX5ROC-e)=I{LcA3=V*Ul^!|O(-|r3}zTOmvuRHt4ljG}08TTLZ+QiHF$9s#fBmVJT z>TlyCm-XAdpIpY@@{Cv*CL33lrOb2lXNliQfPcYbnd z*YcBpjWz{?SMd%8kJa(Wg@}%i^OK8y79F3*buoT&F!=Qr29N0Y`@D&D{A+xk$@NBl z$ojvZ_*-A_cT)W>dlNm=TR(Zeg}k-+POn>(Vf1CAs8u`Z~`ui%zF=hU7@-B*hJdZsh^Kwf1$Ju{<_m8s|`{f_! z%;O37k9+I;=pT>O&Fp)lo8$fCe?jl`6o32wA+c_5<@0#_{Vn2cvJU&GpPLfm?>_Ru z1%DUV=U%FX4{poBME>#bG9GdIxlZu+jXa}6Ki7$$;h%WMWcWK-{2F=JbVB3TC(1wG zNBx{q|M-d3&z=0;-Ndgu{a*azW?gD*eNR+B$NR_sh8~X7&w5=xdufb**6Z>o#CTGu zpY?h@Y4mgd{o{28UoT`FQ|2E(4fS*H{Nu^>bI<(aJ=V`D^pD5t=brk<3(-X}{&Dub z(a-Vz@k8jHZuE1V@s9^C>7<{L5xM?jd~WM!Egu9s*Aj0_xqj}4f85AHt)Fct&tK#j-BsZ`Nt#r`(DN{W&ZIb z`umjfkH_Kb-ucI)`n&)BaZ8tfF#d6GeJcFpopd~MA)@2s{NtjZyYY`lbbQ^X66<(3 zGQ{?eM|AuP#N$$mzx&}IM@BY9;@`H6OynQ`8DkNr-zx=we}(6E==Vy&-}me1>$QKp z?u6p+6U83~=MPu=ztx3*JdA%l6wQY+0Q~XXUi-&&pHk!>uk8kZ50B%U*V_17uS>1F z?~Cg0c>nkrtjjq3t=Hx2vtsbKUYBnX<4Ga@*6a18!QcJok5?M}y_9iGnSY!d;g)}V z|F>jrPD%eb`>!AV@i6}JP#^f?wHAMze2cEUP#^f?wSDK0d&7P4kE5HLV(>Tn-st9d z|M*DsYB%`1(%|o3f3lNqMn>fN$0H+b{H^7K;P2OnxAg;m*A0MwJgT2l-h>`b zq@N$-+2i%IpJz;lzq{~{pIH5TqWt51)XypPkDplm9PjV$PCs}0eJB0=U{pWH`^TrV zE)(hJwu?IJ=MXWT(^fzC-#;GF&);JlQ|2E(4fS&`{o_gXb2t9+-sPZ}fA#e|!eIwWs>I`NG8d`FvzVynb#W-j;Ix+&}+#R6pBtFp+G8 z`BeJMAFs9Z=hcbtM`P@AhYpYPkI!Q5I{o7xa1B-j5bWQh33jrOmF5_}u<;{p1}<@`gwHM7@i<$jctIbEZZ5#Tl_?av1%IwY`w~JzL3Z4z4q6IDCG`$~w-8E#fn?h81f#k&d5~pCo5m+!?V+6QXN+(!|7P z#OByJ#rv9rrC@+^u*f{f`tEHWmKyS7NA&#Sw7~PkE+qeo@ukb&3G=zRzm;EE4&d0)D5`X$Q_LAJk_|q@U?LOalU-PlVpBEeOd@StK zUis4(&ke9vbxxemk~=b=b6+dildC!BwZJnv^abAA(VcRRGhtp9kE+k}Sm&vavM)L3 zrS<)VaxZ-AqVKIeJ}&eswQ+irr^Vc_j=j~@xv`QY=f-Yc=nWWOyUe%bCGRRh@1*OI4Mx|;``bT>o)%p{n(N}vid_qj+o|jK{(WLyKN~qRi0h3Ek+uE~ethY7 zO7(po`RL;`o6N0zMs%XeQ%$=J_Yo>ork>7`aa?Hw!W{7==;j3zK{3K7qf=r^nH`y z`u9H(qwkvp*Qc?D`>XF;GVL4(CyTzfeey;hqwjl_*TvTNO*(FltosSE5UuaIugR4f zr|)C(x=>$R=UqeJS4!Ts7=6$FIbHRAxBmHF>ibT9dXLoIOPRip@!JoCzVFI^AE)mt zW3WB4!RY&V|NSNC>TdLXlfm`}KHf>+BS&)m#mEs`-)k8nYyEG;1^TM*Tah0LK=Xd4DH+`Dz9u;~PoG&%loc*fqg!3nAoWGsCFfwPWbJKkX zsDHDLXE1rvH;SD^zT=HL&$8P$+im4ZKMuAy0OsF>jl30H@@wj7_UArtb05(i5nJH9 z4qG73$M0o*8y|lO*Ma@@`aXJ|<>SA_vfK6gcCo$_*#a#|u>ZM*art4u{${~U-?(XhsbS6$fw zZx0~upOk6kZ#8*Xdy^+d^7BX@^@3>vo7}?ZQnN&-$F+1Gu*c;UjTZ!bA2)} zz4!u5450fw>(Z{3KjuE#6Q97hc^8=-VHo`Eji}wk5EuTQdM%ekW z#5TgGkS~hs_5J{7h>fs|xI$n20ZHwHKKlQMTK<5jePGMag#Lf*fJF8|EzccqAH1NS zFR}lB?-(^dOU34g+dVQWFN;Q7{{KyOez+k1|B%BzC?Y@HT69e`Kb)ub3hITf#YUP- zez;8XMxI5j&{9Xe&@{VVsN{#Mw)4X|{r?N7zc;U&{BYv`m->Y+WTg223+Y2K{{JHF z4~W(a-H-qO0QP{?4He&iF6Vb#`=7s2)_Z}PI#KL`X7<3Q!D?nR_t=JwFf&p=)Lpw; zd4Kuwf`W+we7llweI0c-C-FN|KlBmutxw`!I}38Fwo%tm{N<*OXngGyS64Me?Xz%8 z9W_Lg#R8;8i|JPdd(+u3Q$tkZ0NWWK`lj>d-I^=;n@8yU&2}A!;lE_S#BKAEmupUjK&NAACev6I>mrPMc+n%UClHu_g+ z_pdb9g^zVaNUklVPD3g64VTlu3P=CG%k|36-=H}JPetvErQD~+QMdGL_Q!5Jws1Yy zVPDjVuc77Kj+M3ez^~>rWI&zx8g>zfNK`LvQ(XSJ3#eDR!L9DiwPOoX(@^RzOd`KK z_C=HM2grGo)V}yW?XIPE^OKU_J+2%`ePUGrg<_XU$SsGj8%yN=?%HOZ*EM4Ss>zbvzr@5(F z>b7f^QtOnuniY|nEJGu8PGy~%Yv!FTIp?HiX^L{X+h^+L#MdlMl5@_~h%KP64eXf) zkGH;QH zx;FbctT=@A9I<)ht#)KRN#E|`ni$)GwLJ^H+m0;ATP}4@^J@P%Lu`kHMANK8U%VRupU++jb2u^-l-0QFA2#~?g!Th_+t?4Xx1IJwA$q(A_Jf_*zW4USucs%rAI2j)oc2Q_F^>VU zANm-d=wv_G@-b0-q6s@8k^S%(&+f1vDg|HpdCp|^!?j~O$0x2Et?^Wy;HfoZqVb84 zqh71nmPzx|Pl?S>e>;A^XnbPu2b!OLmtE&A2hNE2A%=r@oPLNGvee90Y~=$EKZN9l zFLX!!5W+|NgxktjKig%UH<*ea;uG9!Cmckn|0cD-nyI;##yC0s5O-h~by>qT&JWR5 ztisu^Hk*$a-!HKvOwM{)>%GZ4FZR0d5&Lt!@Ds#G(b+#i-UB|Ltp;oV#4g5ExX`iw ziJ`UpM&q8= zEF)h&{t9b<<@elQvByzA_MI;L70K$yntb_EKUVU#*YqM^{*&+wBlbp_3iQF=(B~Du z#Q14@gZp)|H*_3iVC{`bU;{T9ZxU%ooIqKosJZjC0TF3R)1-at@w)Zb^IUh5%24mX!$xSjj!YTXLqtykUhEn{d|6u-x|47 zL+mAykHg(fJz8QUI!8fq3hUACeLpH&wV%V5t%>3!KVUqKog(YN$kq~`PwW)Y59S%~ z;2D+uFPXj4MZ84I(-Y?JaGaMI(0B>B%yoRC$EO*)l*CKK|H8hs;w5){|CEcD*k>$L5HHdDtOxND z{X0jzME@Na@OVi{FXJU86K#J-tWCk*Ha3OqZKqAK4PDm*f5-4;I^OE^ca(G$FNxR? zkDry;hSC*wvr+pYK3?)Vc7oG>F#F@{XGZM@vp*guwvqz-LGPE7#(ofeja*jXRrnUYj+f}M zgKzNuzvz9uWT5Sbe#T3>wIAZ*B@z3fF2*Op-Zu6_e7xj; z(Q!Sn9}>h%lG_h|Kz|w^#6weJ?FUPC1Vv8WTGQpqWc;3PYh(dq{PHaqP9kz!`6t4m%M;Y;f$9UTVwI$XuQPO z8ebt!nWA`!wZBf2c!{w$CNX{~iI?=z-Z*LECCTiKzQs%Q-s?fUME}kaFVTO0aPg9o ze#T2mOuVEr#x_9~L~N6|c*&2^pQ(wLluS$(FTqx6GPcT%lRDWdO%gA8n$IO(!oFz| z&cl3Sx_yb4Y_`vw4nVx5GUDg3Wox2%$<2(Xu~Q^of^6ma^86!tJ`*o7&nSEr6EBI! zbBAwn2ac3_0&>3Q*pl?X`S+C1YN=NBWo~ugjR|VL@a9{tR^cPW@FXt1W~^Gj$v&^| z9fPh6+@}rGvOnv&Z8PW@H6|7~ix9)2kME+#wfn-R{Ov`f!Zr$g`hAp0mX99EVFhPs%gY zxZ5vc;=&K`4Dc%a`JR!X)!hHLtkG?Ze}O&jlZRS)uA1%fH)l!KYEHY$C%irx*I)GU zx~*I{o-vbkm`6XJqCXe&xAaHO>XzGm-(IEaWga%se=`r;tGxAh(dP$x<}f3(rvsx5V_joc0fKot&Lx9HM)Ay=5;Y@?GABT_iFx$bOs7b!JcJ z7s}_XFFv2b=Vo7@NzAlkbJmvkA5ek!J<8wuysDoxMXf)0lPe(m`d#*{b8pY$T+D2b zx_9ps>)fs3nYcNhYxeY-N$P^VS*%&wt4UWk<&udXKNmh@qInRPDaJx_*z zduyiuw?)eTn@P%lY;w9kRP6DmrI)`sl{pUHq{4>x6=bYBR947q=iS=&#ar8htl8Et z+}i#H_WtXN9+8=pNeNKk^%e_TT z$o<(n$cX6u$BK+-WnOMk;iEtFhOfj|J6g^ZGT#cBF``gyIh;QyoWZ(N$c$hq@`5!X z>-xxIZ}<+*%>4Mk?WMFVOCAHbzKqHnQIE!SLT}gZyjCr&WNiy z8#+$sl#^#2PP_lXT)e?tEaKhpP=1qc$&=@J-|X@<79m&GxzZb#B3JS}>AqPbJUaK5 zi31fsIHYRbL++}4WPc*wYy4bDe+xJ_6Vzkwtyi4$E3o(IZsfJ>(H%Zj9~wV6@cRXp ztl7<;lRX<6e^p?I&s+a^7VDa^Ji=Jb@{IQ7c}Dp<+~Y!ptd*D9H|5NM$lbQ=%z)f) zcgEmAXnaoKW&3_@*;fX3XI#Z+*96KBbzHcYz7*K=e^;)^t1Nx~X2sG4?JLsN@Mk&y z_?*j%!^oVjqz&dvx6zVew{hO!!1hw=H`;ATe_JwW=e(=<+#Ak0$eBAwJGVL7G5P-< zrkxdzc6_wcW?!?&@p~hWma$GNY+mp4{8n_>qzuQI9b}NP(@YGifPF1zcIIL~hz#n$ zMrw8RD^WWl_e$+{WIf9mTtj=JOBOxoeGA#-Z-WE!7PiTfWpZX`MD3n@vCBq8&g@t+ z4u5F@_Kd?WdxtoV?7Kv^SquE=BIT`L!+sHaMQoM3ku74YY~nj(n@x`Avh*?j^b?E6 z(jysu2w83HvS!A@$nYQY+{=(Fl3%&Owmo0u8Bbb!j2wd^FBGx}do0{*dyMOCd+g05 z_L$Cjl+Yei69&v4Yxw7=9dI(+V}70^1@@R9dyF#zy|%|XUDsQC?4KR;&`W#Fk6a4( z*dF^u)E*OmdbxczS!AJ0+g<3MID72p6BFBG8;Px*_V$<~e#YJn_ro5mll*9RF{auc z@kt2eXz&E*kkLH*<(xo zPe1K3+g55h$Fj#3+4pP1PHH))$M%>Xdn}yP9y`af$C_<>>~gyeZI7LEWuUp|_Lv`g zEZhrwEGFJ2_89j5{up}<8D#7+6Vo&HSP;9RhxVA3dp)$r>Wn@1=-+j+$LhpS_9s5K z?Xf!XlRZXUtRMDR;`7sBK(QadRqSJTBp>*}o8|=Ge_+#(8axZ$7C%Re$1`0%YlaUl zpNl;vdphDL>q3TCGJbLP7|)mIpU3k#>@od}H}H(TvB&oQU1$H;CUQ)GrJIB!L@w)y zf9x>+v3GmvAA1E~&bprZ$M!|fT~u@KqSy8q*Bg7xgN!vcSsNHj=4eum3KXiePW~}j z%Z25=v&Xjg!aw%;&!^5Fld){*g@5en2UW`aW9RZ5Vvi|LtbeSU8ji+4CO#(olUlzO z40Ze3$R8tqJ#%iNgnc6CCQ3NJ5M*9B1LYIns$4JFOX@nBd-UacIRlo=XV#FwXLk6O z=-ec-(OP&O_Ne_lE}uS!=6D{j{XAa#d5UnBMdAiBZPeY|GB--iLPW)L0){3un5_zIUIQ*oU?; zfe-DN!-saT8z0&NS4OBLkq?bEZG32V;eQDx%S8={H>GvW054YR? zw58qn(@MJZr;ThBUwa~dT0u^%KTT{t))@XY>zc0oX?HU(iPinj6Qg6 zTxa`l3-OnZ;=#mIT!HtCm47b&#Dh0Ud^9caaGE;sNqmjyyue6&so}-xfk(-6`R~$3OwDs+MVxmoy1shVbmAV1hiY;-&XQH$-Tc&Q50CmeHPJf4fp5Fq%X)z?pvzw>(cif%zc;fD&u}~-v(rLL7H0M z@Rjme`N-r#+GyZRpu&zV7^NP9OqP8kb66amft9G8^yDIn>>bdiStye%*Agoj-nrn)Cg=L6~doB z|7HH3im!d08t#*vs#|TY$~xjYXQ_!k&Q{)g_hIUbw7UG6+%J={D#CVU9Q+-`XC8i1 zRh^aTZd~BX3>6&en6HRaEn<$A@_Jg&6=O;~N7|jnJ98JV;C*AJ+qVvzKcBT&j=V?r zzWE#Wkm+kS&*JRs8^o2(ck}4?PEXqO4DtgW;TrS*U-SPU{%;v?_4>w3dH!!T;B#xh z-}Y6$Yb#ZO2024tljoPUu!nertWCup(rZ&?hURe2*sRTxzje%%IcsLENNuFeT+1G> zECu5gH4O6y$9AlIjQh&HWL|UVv;1Cc%iv_Yt>9SQ))v|_YhVH2%e-|g&)%|~d;&%$ zh%7k5bG1$u8_~@g7Vbnke1k!O_mBa{*#EPT6W6j%FGNn%q^kq3At%)J^N#MBq~^0q z-9}k-`{;ja;|#w=6v;4zANOq zLiX4qxDOkV7oTB0e~MQ*XM1l}=VV7JCkJCeP7w=dJCxG4HSQ zex6GA-NXCGT_-#;UPKayTm=tcP4TPS$l8pyB+gei>XD2ycPK|n6}a%QdJ_; z%8*Ty`2WGzRn?i9>5bK{K_TL<^EWa#gQ+v~BXXeZ;I)xg3G*Yl5VVZqy#yI`2{LMu z%iXwx_tlx1zIiS;eaS#xO$Y_akuI`Oaw>}K{T=USE~pi^rJOw?c?r_^jp$?1FKd{4 z5ABpb=&o9Weu1wi`gluzraRz4zihy!Yv$QSKY7!$e1-RYvArBVh++>`XQ=RZmZ(_@ z-K6$+yeKlx$hY!*6)s=C?2^0bgBioS$O$dJz-?fBH{-pAabLvy0vY$3hsV8Kvn(?1 zYv{)YW$|d%aE<6s>dZ|S-6`wvE$pGacSPl8wZtu0W9T-$M`XV?J7s35W2UUhAbg&9 zdD%KXBk)8XHsSay@VAYx6Fhe#zf@?H7#qFQwg%;tcF3 zci{cy%76Hlw7_9xVkvbDn7{ghGt>o^P2~w3IY))Xrn(SW`8)DSu0%%GT>rVF&r%0r zn@g>K6#Hfpd;hEkHz(aEHVkVwRLFf??E9_cqL4ZM2|DNeOtroS zd#FQv=4V7@Ou-O!0rL7@**`7BC0wkp;98g8`%j)H*I>7JJmojmU@tY7ru$1XFRYSw z-Sl}9{mfU`=kznQgy*1tyPx-lcVwz9!Hc}%UG(#=hq9`6a@{=18z6f`b6T=k)8q{0 zI??s#!V_!57Hjq7uxG7(zmDHlbN|h(zYUCw!j4$YK2q$RNxYV_t_Lx8^1NQ&H!Dx$ zQuff2Ea7dakm8xq#c#GUpVBWGGwJsZ#;=BFl6C6bE2|@W#Z|T1-YeW&+hVd;Iv7(K zNA|&%_1I#rG;GlOO4=nKi0qdQgH%|t&uRv#n7vYiEl`6k@csiu*oejG*SV|}7&^rn zTHea%L24n$=duUn^AW6nnXiM`uQFePkzM#nj83S*Mv(cs8`)x>ZS5xIFW`B_j+Jqi z_0YjMlppAre;astF7~_V0vWGDg=|u;P=h>AhP}^-C$(j(@UP%4wA~Wjdky@(l>7b- zyW=2hzs9Y?|H_z3``55%WL$sF-|&DeI|tk2CtRcKe|^Cp&zxZV@y0;o5BBmx2mUBI zN$`hYZ&~vOe@HB+EBx^V)(A2>{O5k+k2co6;E$d0_+u#e1Dj9wvFvTZA8&~Li~nOA z_(Rseo7aork5|w=Vhg+s?&whIi@igJg!W+Lo<*&qVEfk7Zo-J$#)G4EyNZS zPMX0W*LHzHo~{Ig-|1 z4jj_e_cNHI*JTeH9MTkvL+<0dcpUN?aij*uStY?C1>N9~|2w*4<)hp;9*2l*h{qw% z)0V*@!gWuEK_>SBgUsj#gFM~^2KiAJ7^I{d43cX6@kMO@4-$WrJMhQC@mkkZim#+0 zuWS6V!R1al{wM)|phx!$U!s7uBXZ|PY_&xA;|th@Pw~16{IMVWG0(;y=YT)j!5@pk zAIHHT!@(arz#rqjmRI%1j$u{%z#s2`KlWnt?E!xr7&^RB@Sfn07V;Om*q5S9g`cWa z7TCiTLRV_+Vd6e#f%g7!xMTOSu}{h#gFp??BC1zKlaz+!iMFm)-`;6T=ZTZ zogWNa#J?0wHO-ZWoj=j{lqU^YH7wMuF~?Zn!=B+9bKD)l9ODvVjzwh_=6HB$dSe-w zBWPoeW7tWL@_L2WYF;JSOa^m2kYQmC!5WJ^?#4pi1#A2QyL1}3BOm)fFb7!UP0{Z% z9;v__)!@~hV2%-L%KX8XNdqw+q&| zop$AUPI0`U$1oY*nDW^a;tlKsv4g=Itm^^98$&)6cq6h;`hqvcB*z{ExRx~{cw>A{|Kc46Z|sW48zaCQdmnsDaE5TF1z+IzuiXX?7QMI~%prK=+nkFV zL%ozt7g)nRXmL@)BdcC+SUGN8L-{IX&@k?W%d=er&q4ey@+^NuC&fHV4bM`; zv(&^sOBp_i*LfDfE}|mc&*~UeLfoO(YVbg8{3Bk+){N*_ z`E~9ak2{t~KZt)AnIPZ$Y0Kb_Tlik<*X7xfc&HWQ@YJ@Isruz`WF+<>=NTm?A~BGo z_){fDdJT5dXyO*-#6EtFFG*d0)zN3qjK)5SB=#}CIPfB|j~9u3Y#{cr7i_UI+lsfg zoMFX2TJQ@#fo%~V|0p8&o_p_`l%ZB4P{c9voA3ogM$c$Lqh1n8WkDBnGjGci{wB@sGiw z*Yc4`={de^>}JlMY?1cG9+p0s_=o9-v@g0<`jEwMqz^N&k);nqTv@)|{6A42F0P z;vae)fj<;$NaWDpW50n3!!kb3_{Zh+UHX4H_~ZC;um1i3@sFuxW2z+XA^1b$Aipmo z<{|i_V`A0Dvay};$B_|Z8b2Z2qKj2n#Vx!@bC1&1!teOWEtQT@ylcc$e#kvDV#ZtH8_3 zikB$-DEaP*p*i>}BwnJ_7>!*XM~<~2SGN%}5nqSIOG@$SivMK;{@u9`xvI{8gBZgw z57;F$q>yJW*7a`i%5q-6=Cy{`YW83$7)AV-^N5$M#&@tk@e$2iXxt7K2E&gxjk9gpg682SZVS z8*h&#i<=}CEwSFKi0R4NMThFWCq5~OtJH8FUa)`7PJ?yE*CK}{?_*O0>#$BQJp-)6 z+)CV~nKPI2z2KcPKEIGY2-aD_{L6k>&)@8+CI!|xhjwKR$?H_cJ7ye{;hmi~rV#I7 zr-)3pxQ+vech2}w;GIgdr}~0-j=YyRzGCKU0Ps$eoY#@~%3biZY`mlQ;hp>~`ehY% zO%L!+lLGJD!Zp3YJ0om<*PYld!tWBiBXLaOcgdOhLpHx_%mDGb65*ZA{p5Gm;JYp` z@iA-$!8?)p1MjRRUTOGU1;~CA7c0c}S-|g+Mf2U*E}NAb&WPLB!uu3-jNy09C3afY z1!p3j-z71{{_wkg636eVvH4vRS2X;t;jEFG1b7D@hvs)3hu?J`{4U{j?StR7AD)-k zzmvW;QS-aDBdZnvU(MLPbF}QOmw0a_&f?^Fy_?}~`~=?}&Q^`XsErBExv|5^^XmBK zjZat|>vS*r>iYsGRczL#06v3O@xCp~-X zuqfVHy1?dp)oRQ$ z@92!lQOwgar0;yM7Pv6N_xe0oClTLk2Yab?Qs4Mq`^mM@6Ta8H9`LGmDc{TV zA)fEWK8oNSxFg!;5?M5d*pFEcHSoQ7?r;n57>*H`R`b1f zvz8ZPGwPan;2rt?ICw{RUt#dhbKspe@J>5&W+!;(QypWfjuCrVajfjE17OPi;K_a1 zgYOV?Y0u7U{405kD&P<%=6&TP<9*$K1-!3!yXSq~AItl)?sploqBHL+4g4?`-q%EU zUz|yZ@V@eV$l!b5Ku-0P_q7#V>$PJr(^%`Tf?qOtZ|1!Oc_p|-E4}BuufAZIM7%F2hWS?;!!+!(FpRS0oWU=Pk%49mlkmRokL7(?+n_i6uk0l87}kB0#^ovIf3fcC3_h7VCW24u#82Sm zZ_x)|!4ByGKB*IYaz586`(ID^U-9@P8~(wG#3$z_h{vQ9pTPgxXyFq*e+?%IK5_ED z1fMwhUy1O^F664KCyUU>9uX(6Ty4p^ZT8}1G~Qn zy(9Gx;BaalSX)y?Cmxt}7G)T^f1>7rIWbCZPcVwj1vBxO9$^%?VBnhwMiKw0@VMZ5 z#r7F4SX-UdXTd04eJ@|sD})wQH-S6K7*oqa0X=sXHZ^Bat38H^ZY@b zKQUvMEIv|mO$zbHX~YL}?hoUe><#`%cK*c7*8s#vBKTtp>)noz=zUkf-At4_(OPLavr|R_{a$G2Yd~|9@j>=Vc-we zjpl}dLo^0~8z%M{oT~dXU0@J*lpj{n1wU*G{4j$_W`jvK!e?+|5;^;2_+d%oCUC^` z`IO$oP2h+*F^O=*Su;6aR-d29JypeOn10sWf(d@p#!oQ;p?hne&7)|q)LW^$DImNTEi5rd4bIbs2uBeuf95la>? z`7RtW;e>T{K3?J_!Vh!e5hp)vyCYuGJAPPR)jm63(j7mn*5-#b@@&cYVWuAjkMxWm z7VSekKWu{?FYyW z2iHa9QZ4qK7k-$Gj}wn=1- zOn^t)N8~kLNRFAm@?A?(o>(R^3^-fv|vWH_88xP{({Hi zd1CjMc?N5q*y4?kt(q3$iE&Py-#U3>3miPLPY6$JhVaBD>iCM{eBEsK7za?WHhHXGhz19^}NvFWne8*7QH2w!XyFX4$TCaxkpu~KZ? zTj7TpzL<^^Yo1t^?`8Zhh9^dCuLw^poBs<>%*>>JgUpTO8xW3I{M z{Z#m277t&`;Z4N<^u9v|%f54P!}Pvuh;YNkb&|iWtj(WhI=Nvw{-V#_xAXZ$w6F0F zb1r-3zxiA5C-BbGw0jEU9X)o*@XoDQr4a9&Mtm^m{^<+e>4FcY=W77)PNj*r>?E#Y z;~l;4+W1@a$!Kht9^f5q$NZja`h#~)VLq5Oe#eOG)ikv}@t(iR(Qaqz(+ z*k+H8ky+R#jt{2SY63o347Ldl>Xr`{!8TiH%U~Pl`InFQV4d@SeF*trl{(h{0rA1g zg~I^8@v??={2#kb&c6uX%ZYFL%m;(_m6Q*bG!IxZKA5+@XM8Yk{Yl9O8~owngCXnc zlr86a&Ij|>_lFOr*CJe$|0L#DBEEpgd6ftsOz#yZA8a}6cNs6^6OtH=Ki$M&#?;Dp zCI<5spC{sj>2|>+Wwd*m@WHHcOp*`mlFL(wPfjmBSY*%i1)p@m2eWiUiuquA-P`9? z8i>0HJ~4an7yK>y;2rFc?(m7e20!cbT+<(Xatiao^!%Nwe6ZVCBPrv9b%#%q@WIl+ zC$W4mZ~cJt!33l9l@I2?C{aFGT%SAf!IFRP;Dgm^jIuKpqZmGz+20Zq*-RX*#*V+l zVH9h<#>8Jn)yC`E|Mqmv2QzX(+WHY~8I1A~A1pRM@rRrbrq{v;#Rt=C2z-*q`VoG> zJZw104`z5s{pN##Pm=P%Y<$u?KA5*YIUmehe^TCMOp8lG|XBae_tkxp_I8Qpe|s_+YwS zu*m0W_f(CKn6XP1ADK{`Li}-h@xdz1{^$$-NX7><^OfrOh+gY9{-_}iV&f0J@1Ejs z&Hu&5=mGv9t~%ELO|Iz={y0_nU}pYKRX*5dtdTzR!N40m;)9v^NH=^i+-7IQ^1;0I zCzKB+n53_KFb5_{&IfZmPtW*Zl^T!iNz4Z`dR8z(HF2(B0zOz|{l?&tF@i^8^z9R> zYd)B6%jSbs(3Zg?AMwGG=L`F=@xjbm_@MY;dJTa`4iP7j`2AVfaVh14CBP#QJ{aHi zpAY7(?;Rh^TYpON!H{tuB0gB9vgKm$`C#_Eq=*k@HP7rc{<55`(=8i@1v zto6wGms>L;c&ASM3?=+6`s7>KFg?IKb%J+p;+p>8ol}($7MZ_Oln*wTH6rnr@qIY| zBIjMUJI~*9?lmDFY$$v%zDv#r+yAXNK3Et&SZKiVgXQ#t4<}T!NNV{gN5OPh3vYJt4h^Lk{>Mh!@~y?S*L6{*9$&a7(Q6Y zt_v9%_ccAt4`$@&Sk_`9KA71naeS~CxhZ_G-1opFx-O)~BXx!kb{(H5;e+Wj`Nz^u z4?b9A9Fy?Des)O;@yTh#2XpS3zTlH&d@wU#0}zji;FC*O`*u7=J$6Yeuf7j1SfwK1s?4+yAXNJ{b9fBl*EXQYVu9V5`dF>O`8e z_TveoE^HuF&Z(DUEKV<_(R(#dHk*Uzt|W(z#rPq$mW{<;Ez+752okuRON$hM3(lM4+h@o5g*LNN4n*M z?f+IBA1rM1!9sTZ$W;{snIB9rNniP34ouPoA1wMj79VU?MHl(OL?-E&NqbU0SOkxZ zC(ad2%m=g9ZwwyMe6SdOJM_r$F7tzFoo>fW6m1zi@(~~Gq~e2B3O4<>GODP{L0Un9)!T2r(e6VmY`CwuAV8|}=P+9rGR#|lwFy7H)mkjTG{=yXEozsgC7TG_2!8={>!SsBk8t>?} zZl8Y%5?2wtWA@!&_*?YJ5Nw$4@Q%I)JLb1s(;vL^47CVMKIFrWdXb|CC?E10W2r?= zenzpot|tHDj^*k8wf(FYDfXf>AM!%lz-OuFuVl>0nGadwZYn*~x9Pq++k*-6AwT@C zg?Ij(tJaHcZR}X#39V@Fn16tJk&~$xxu1HGrD|MgAN3*!Gv@BE4X=7bYEd(0!H0%d zZG#u~f6+ld;O}Yh#J+TW^{PG0$zM0;`$KCz{@tY+{&z>Xbv|U_h3zAjn?a38sh{~A zbr%0H*&C3vjZ^tdFpt!Y95c)t5T2FfPL^1OQn|jZE_djf4)PrH_f)0oUsl7&WjfTi zK@Hb+BVQ(W=$Xo@8@ace{Kqd3OKUtc)7?0mn$qQV{^JVjMi%i}$SX*m&?54KD%vu2 zBQN6b)tPP|*W$+RH+5OeZ_ecz+C5`xg%hTE zC|ti#sreo5ywHyrcjtE-`7YlxCKRAXrStP$7LTk>V(a%81k4O)X(_ zuVQ{D*K8#B@5}uE5s#{$=8DE?qz3M4U0ZUj?_rNs$7OG%wq!STT(Z*}rH)GpvTq}6 zyiavpGBbVChDYNxk_WM#JuLG2cf5<7D)VH99wp!VPS5z-Z9MM|k9+#7d}nei^tHxI zqPmmic24AKaw3}=FN(U8HG+9;xo2WHB8$xUO5MpH@pr4o&73yOS|jt3=?!ckZhsf^ z736y}CySVeGBq-^nmG|{lkXZInud%P{AoR-n`iV)Kg4&E7rE7wHr>>LS#Q^YVf{8q zJp4f&SG01FInVu<*ysMscXiB^{CGjn1d*{#;F_ig?jg=|&)=eP9{JA1dH##fFQt9S zTk*P`7x}0Bt=k3n{4?!Je-mM!Qo9xmHDxcDi(Wf;lP7TG92I^zO&yTj^uHr7{YvtU z*IfU(qt8+cW?Pb4FmnIR=)e!E9*h~oWcX*o`6`3(Kn+fobuAgG?kJuD~ zfzAa3CBQ%7e&Zj_IUC$N{6kI3WcWw=D;Q`7`c?3c>9cUo1pAopo%jdaq4r($n881< zI`EHtw~6oK@sE6N&YCf1jZ-%+9{I)LxW&CxUsFk%uiF^SG#AwZt98JlFQJc4b36 z#5_N;G0zT9dadBRU-O&(VxFGXwJfnPPo=VDoxwcf>yfd&7K|wNxtWJ`_B?ceKSdrl zIPi}AUhwCCifjezTm{w$Hq5x;$OIxzePWM2YaLk_@++q&F8qLKltWqboGgh@7zs&&4G^ZxM+j@ zrsq#M>8E17%Usro;F|}A_CLP!ed6z(@Xh7~_y%6vK4Qm$ZRAYk;id7wINGRKm_gqvUHR7W29vATt zXS}DodY3xhqxo!uL#j9N9_n4j;~janYp?~z*v}&MF6BA$JsF`D=vvXkjC-f&+3Ly& zJ;!zCw|mjSM?GU}+dUb2Y$P6Zkb4Ohn(Y}Edh|%=@1+*n0?*jcG@d{H+BUu`XPz`B z8WxL*3vcDPA5!MhflK3_$x2ldwk_$?ko3l#)STn&+M3x zmOt|Sg|uZbk(cjfjO5kf$?05A^P8%^bedX!_!dv#7;8r2L+!+sFS6@s9>!lOaU-dt zS)Q&AsOzsfy5j68E^-fb`--kF4zR}jPrBWW)oyiQFR`ERP)n2Ie}PSBs2djC_jg~} z#C@J1N2@n)&R1V`#D?6&hNPb6%_F_xS-h~r*Y7MH?<*|U0@(LN-r zrzvNq3r4B+C8nNcp+i>Ys|!jcKD5^D-%*_DpTr&zydb!sv;;poG1{O<&G*vRDa6_a z=~$}r-G<+JEAg<3@&)bbyjQb_?qdCi7he{Luc@i)X?k+}QcrU$@gc7jAF304^fYlH z;Wxbfj+I|P#z*Ev{NmRTHyK3VrT7 zO;4y{u$n&w93(Lj!9r8ONpmmp2HIw;aN9S%_4iUY^VT6&P0d`!$XU1XsAqKTH~4&jF8Ic(3%SpCu;IP`=nZUj57lvS;mZ}#-ZZ{9^)$tfm-@d;dEf52D)6pnT&?tT z7;E1%S7hdByRPOKUq10=7rvM!?(shC3*Ux8>fT&%Q(Lh&ylu1FFI?UDx|(TUb5$LT z)YZ%imD_bSo2aE(%IkSv%Xmp$&0&M0bv4u6X^s1+v$SN&-RA%IsgACzMy@K3-OlP1H|TnjN=A=W9D3HZxuuT18Z z##&s%8uQSf(g)pDYbw0$jGLeJxg|f-9q_DhvGzLG-Yl`>Ol8EG{?)F%*|4YMMUkhn zMwKm3OQ^qDLjBE~=!ewbe1q}pZXVyZ=W#o-tiv@rvu;Y`rfwkLa1D+{ev5iym}UEk)0W&My=_ljaV%(KEfR*SQ*5TH}U4H6$@! zI}RW3JotDA;N$IcjSKCEk0&rWJ zxu}Qn-D-X#Ts*b^`5Rqo>O|ppbhy-k9OAa3FNI%cavD^_NpT4d!p|QHN9E$5Z&5p2ikj#A|61e%&;<1smblacv~;S{nB^ zeY+O>E{N~d&1Xt^e>tq6aAE)*Cbp+i9^V4+;??x?$#iu=fwy4dRG#T_zLU=vV^_R} zUfPZin>qXC^#uhJpB9Y9b({FjR(?~+wZgZ%0&dUz;~g`FZ}=P-Ozv-Sdj{!ihZRhk zI34{h*P8pwwg1JnUfSK^88m%&Uwf@}2Z^E$=oyt2B8Pb`b-R zeW0jk=d82CKB&`YSN1va*(lN5mFVS4gU|louH!4e%zfkW+2{Gp#%Gn{ACm8XLt6%) z&EH=hEZUCVy>Y=(u;Uc=_D(fDlM(HRcj z-lQb>OmJQ@?w%8$O|rRr{|GiC)}Zm3le;$+n{cv?&(2}3$aD0RzxQz)pLNIIn`g(b zuI+-qXZj)Si)@lUq$86Wd7qJlL%0|Yq3Odjf|rnI%is4C zHaCJd{?I zNe=71ts=_?`$ODs_vQ@$f$ZUpzk#PyLA@O##|%D$mwTh4ACo0+CH6OaeuGl?t{YiC z&cpr|*)a_rA@d@bO5-WF(m3j7WbB#n@?KWL%NypKlAxBI^nL_bZVH34Y&(%yj-fAC8>-{%8DN zy$qm4b(a$NmbR7tse<^D9^*vi@J^Z!nS0$*_cW zBl5L41OJyh@cwe;KYUACKI*)ybH4QoAHlzK;-f+I%k<6QBW-V@^V@8`j@UX^*;q#~%?$c_ zD9yr1@|#KAV=KJ9Yl%$>HWK_N_-IPxnzYayt`RQY?=&t-3++X(NjuKp54JH;HCRf1 zFZ^_|6I$@!zT^16!I_Ll9~O7B153cL@_kQkNVq0Mml#HKJ<=oq-@lQte2 zod6Fxd3=kK;32_6g0WiccvTDWs<*HewF7S|(&zIE$ zKA-7_UZ1S7p5h_Xhj=`cuZBkOP{T+I56RgomxYHa1tXVqgNJ&{=Sv!|io-*CErEyH ziBpMOn~qKB7_!TWhvZySizl;I;Gu)BtE$_E$~mQMjfdo%(lId5DqgSfTFuLehij z8NN(vi~c+(iia|IHVY4VG#>h2aFDDs!-spg6Am)ZXz>!avtM6kpC|ggg@Zc({(tzr z$hT(Xfx$uYf5AZm%?mU*s9>*-QPt@fl`TsR4*D$ph{r+Zypmp@@QFmeT*n$sf`hC* zXT$sCb*fLE~%XJA;EB=5z5S#p58|HnE{k&~BpG&?$s}^!O!<5A8oQ zh4|+*;`5aYKQ7L0c>jT-z`>h}1BY*!%h@(>n02N7Q}TJxGpbfTmvejac^BuE^n9iI zypmq;Hvah+;y{9b%>KKFzePW+!8Yj*|LAM5PfEF_U-+ls#Kwp2MjyxX`BEJpa_W4= z8Ro9yLt-P;duyAyl?R#C;oYd7*K1n_z-*-jep?s8T=#qRboAY zfo2dR5}iI3PM#A3HRBs}#)yQ&-))S@&M_B+N73UL5uChu4CL(Jxp8qKCl2}oyc?y4 z_)PxpCo>|kBK>^w`MKbrxo~Z+f{(LExf_LVvrM_4xrlcYJGwQ^;%~^ksh|)RYcq7zSZ=n5G-Zx zyT-OTc(Gk=#OA2kPXLM*oWG~Z3N=dqIi3%)XPNHEd0I{t_sx^uiq z%f<3NvGJyOTVPJ>@s+|ejK^1kpW^Y=-_w@CS0Cs5?(kJHe8Aq|tGgV0z>^hUIrBw4 zHp$8tkpnLH58}~+?}QJy6Z^kH_<&%5uJF~lcD(7lxSSOdZ;HoPYlt=N=amnRH}NL# z*l4_IX`znA>opOa*!6j-YFCQ*fJyLG4RI!sn{T#R_)5md8E?7+Y$dks72vDm;H$I2 zR|jl-wI5t1`05Ba>knnatJ=U<8_R}Og^y3Hx+(#_lKgp}KsHMJyqO$jOOKa5(F`}r zMGU==7`m+C7VuUpeBMI#nP9H*ys~(Cz(UP%^R~mEx><7sUA5!DC}5pme$|szyTGFk zJTPMEs(Hh|u5s8mveo=gAqPsqVG@J+%gnE@noazrnHW?vJglt>Y{vf!z#kF|lD348 zc6XMVznbf6kt+|o(;Meq@Yt$#%Jt0WdH>f8HS-qQt^x}!GBF`Ii(om}|nV8h`);RLLPpC@Owl*+U4AKXCUAZ#NhCjBv7LeUi) zh==fOT0Wqo4r{OCUBU-+i zG2&&aZ!`RlAU}&a8Vt&C~_87;$TbvYH-n5iG$S{T(p_b6Y&9c+u))v z({9B7bqezVkxAA#CgB6l$WI|YI*s^%CBj>Yvm<(gkCO2L&3p|2K8obGs3WFf<0HKf zpXG1S7k|Z8=>a~f6MR(7HT}Uyrz#)N%-^ZX2mB0cMDS5c_<&vEBPSowwkrg4q?8Zn zr~%t6d?b882R;(NtLdA;N1|H2pO_C=;@|^X z{}(=>$S@tVni7kL;`x9^Uke^u4ZkD_9@2Uyo(~wWv%me9Zux*(zDQeNr!9krKH>u= z#zP-wKA@3>A0Qu4=dUL1Ciek8>Z6a_q&HUkd1#T#Sd{8e~W(jKDJ2@@K2@Sp9NggFZ^?g@&V2KovM7m zQLGWcKPlk@f^jc6bkv;fH6Jhz0}&&N^e>JN7=wcn^8t0NsHc2D zZ4=y@dOo1OZ_oIEIOaIs{A5hCLY3pOOW$@KUe89x`>ch+jG;87m9q`TFwjJ`)yG6N)V2q5HxYy?2$Kbt6 z6ZcC0LsCAV&V%s+?VjRzPmf6Ty3)>`4CPx$veXFY+OB$ehpFbE)qp=hGRl8zjD+`JnfS;L!2Y zTsK^HbTM^gBtM|k>n+GpzP!H_1x8Qsh9BnaM*-&vc5qH^duh5~a;l0R%bMa1EX0rK z=1h8{a`~EhNxnPIs`wQ-s)`h~-N|b_ot#yRI6EL`T^7P;&UcX`6?`e@zrIL5u&d!W z%e{5IUhe;eilV?e?y-orYqifF%eFt;jWsxFkz$$s_Oae=~3IGAEL| zK#@!9DaQ5?46eSHJoB=4HoKK?8L!p6%GnD= zoE?z7SaOD-oO~T;^Zp)u!atLXSk_Dl=O=Xk>6dwiT+R-J;r+{*igeCwNG`Bho;>n@ zjR|E*O*`R>sLB2UPs61y)o^Jrzx>iKkMcYza<Ev9Df+<>eBD_6F)9;Hn$c|A-Y75yuKOQ`$9J?7f&h7`Hbxu5ifK2l?BOvo!| zH|)MN(}%X3m|OaDF@2GKwB5uQ9`CqN`qD<94p=ce>)iPa3s;s?f4F?vvP)+2ooV+$ z{@-?;3KR%O!9IT=>+HWI{vmr9d1LnQLe6NghbuMLhB}$DuC02+BWovf{Ul!I9KsL& z_xMW5BQNVk)}@TU)VYys&XLc_=b_ugKPKNxo8P8Q*)#H+Gx&|jwT|Vv5&0>oj)d>)@>T`BT;WFt#MYgK)N&Tn}2(VX+5`R8?h!;!^WuKhxh zf1a~Fxw)LVMHc@nIp{@3ud#D_tU)&a8@89^^l;WEmK=u*dCf-#oX@L-9Q3O zX3LoP&*qGY$#FP?wd(%Dt?ggO=X)FZI?h#td|xKV;1#YxzO#6VEG;5e#<~YbRh?(& z^WdC{|0U$J^oDt-78eYhs34?}bxK4$kp+LyLlc>fRDH~9^{j7LqrTJI(Qd>J_p7jl-*tc#j6 z)dj_Cm4El-H2<#6gFEG)m;8nbUyR8=e>u5E3wR~VKX3MvoSTW+Q_FMp`SA_pH!LBq zwO&i`8PixpBBw9J=Jk>jBe>REZ{>Ozq-F37&Yn)C|HZtfa^|dgvp2ltYon^d7taZ| zu!e#i-f$*o)Y|@iPPp*Fu~pk2@rJiwr^1&b*Az1LNqlWb8S7(=a}Zf8XK>Ovqt=}5 zYTU)VzvmfQyOZY@9p6%_!pq)sS8ZE7gf*H`du-}Z*6--rcR7Q0P}VUrx&;|6^1G1p zY|RT)c>9-?&ST%sSv2Zb+!!X8_fh1ZoOL>QwF)29*N>TggmwkUx#~?KD&0b*c;xd?<@Bo zL*0wf({+EZ_J;r6ZR?HeIRE9;8;p(aGK{`$MUPZGq^iFBOI1}$E%E&k9$iWq&AqjXOQ!P&U^lpd&>RFUF{b+?^o>}t!=YJ_xlF-V@*vL{UPV= z%>Vzw|IPl9oRMDS+79-K;CHb-)0nRoM?U|S1o`|;-Ds%?(6YQ}R!Mp`=jRr;d+nV5 z3o@udVCRi2Xz6%S*0Ah*W$P``r`~)O_LA5C`0==W`p5zH@tAiwE3IV!I!xrZ$a2^J z6B%IPW|37QH(gTu!sxKAa*l40U#=Tdp6736y$6|R(PKpo!~BQe0ypc=#*6hvIwP5FV>a0%SMq} z41!@dMi1PsE;k?E}EUCaPs{h!P0>?AmNFgUo?#=+ab!Ge7S_f8eOJJi*<8{1hh{!uXKVKB-z zWX)w@H^H=vz`e)HRQQ<2x`I;$7p2vPz|MlB#Xs2!-faQ?tl9{t>4^0^8w6%0MT_89p-M)TVOu$9zk6W?lyg@YsVYy1JfyJfe&r`%iG7)5(m1$JpHNB@hx;g#qmOWxZU z7|bg=K;-|6Huil|-od?sGdBtbMh~nL?E7VKsk}26f`RV_1GnaXT4Ugy=Uo@b;W-Tc z75~px?BmVg-*w+sRTbnSuBJX^1NgTbKhh8|?<8K-_V&%O@$YZIyV4iIznfj2#^?CE z!N$K_?^~yG!N1~LweYXZ$6@NVbkKLfzuOsW=kv>Rn=#Py$McuD2kG3x!RNj8N64jr z2<$6!!``2HSmV>dk$I8*F7tAPIf=);S@s+VPNjZgWF9u)6Bpen{gC!${YcvfnG5iE z?IGr)n!dg#xwXLU(xzDl&h~W;_b z;$G*J!57v{`x+Oo?HU)?G;3U3sd2HbtDLy_XaZdPi6}1Cy363=i-~0=!o@~*)%-Pr zi<{zbag*TU^O`#0Vx42=UOwM0xLDSAXI!jv%uJx|L^0*!GpJ{&y!Gk~b%AB)d&m>1 z!V)995Wnv4@WZYIAJ$y|xuYvasrfIt)cQw>Sxv$}c~%4a53G14b>cU(_Fn;8O6;qY z8uz8dPCV?Rt;7=~Pum83>9s>GdtLD6cJd0faP4H_r?FmdO|$&MrQFw>lkO|zK82Dy zj{CUqgKj1FlFZpp(EsOWT6KRqq%O%B4%wds4`Z994iBGR{NYMvulc^<;V&n_!;$r# z;(7`N692aGa0xL%8xQL}_<8;oz5E~8fIYy&+77&qYn1&@>OGm<<~_l~!J!j%JlThB z;>5yZf^qTW&BT*iQxi|#duALS=H5D<%>7dpPhL$MX8y`2+jy9qSUc8sjwee#(T0BD zVa8m?8pOBIR~+oDH5c4$#gkp|3cA9<>8ugK!L!Ki7l|hyB%aJTihR}a6kM2Z~k{HrYtsD zGk7$(GAGcCZ6>x%M_HD{^K?8pC~-%yaFBR%c?R#L)MN(xJz(SBr{o>GXSPRK@nmY` z>3A~!J{?cyd9*D=Uc5G9MqS2}srRcfF&wxa$CJrL8WT^p`#%<2OP)dE$r4+U`cMWJ z-%P&rd}^M*!J6^n3s!21-wUQ~$JUYBQO1t^V{Q}|tB$fKbUf&Yx4s>Fsw79vY+imP z75{?7 zYa|wqUVB0Mm&di@JDr0r3KBPw{)+wj6n2Tk%1qoWHfCnWq3L4{c~#A`DJu^BW{`Om zJ4f)m-0xiMQ~7^dhI%2L=kut%#-Gi6cvZQwo*_~HC8j0)|2F-XzPHi$5_kJW(s$0O zI{Kcb?L5=>hP}k*?J+R-sBqjvf!Sr8qmBdlf=AL8@OKf26k@{o0OP$9k@P{V`A4E z$5)P#ezB%y@5vt3F>kJM%jXuhuM=!9-^)!oyRbOM*^4oL173-E52z5|=ZmC}}zc?VVsx@wRV+(fk z0@nT;)Kvd$wuJ$u=DOI2rtZ4LyC03KxjrW@-o0yWrhjK?j=y>HAb$pNAoyszpB^9`DZd0Q^9IrA>ZxHU5B(s>}(M48<-FAugSiZ{p;kAuqL$rky`8J z7RJ|SRb~BL$@&pJ(sr>5-~U_qoXBFy@hRgJ{JFPY#zofQ9^_mm&+$oKJKrPj{b#sF z=!W2!IpJ1h(DrKL+{mr7>^OJJO>>at>}6uTOTIR`YTGyGgeOz4TOqfCEBT!97k_~0 zn$_>Qs)Eb;jJbH1eQ^YTKp1Q({d*65c8nOd=nkz%9^rTNY1sYb)68HWQ#<~=pyiB%7bcyg7<5RvD%=h!|0?D&#F>V+4{n<{d4*@aK8 zDnqZ%LAExpl-RE;)J*Ky+^3zkchiS=>Fay+RdKIo+LLiRvSXf>ryU(xfn&v%OwAn* z2Wx}3eq*+pnT}i?j9kq_uAWbh>O)}hJWu*FB^N%iD)@vldO^pGrT^NWO3e6TyRU+s z-x#9iKdJjm9eJ+1*)xH|A#6|26{3iVpAI3J7-uN=_|DV10fsd*> z^Z(DxBqWd^LH>$-$po}omDc{MZE1C#1k~1QtJb#GcGn5PKNYEU>9^HN$zW3L8f`O` zE?v{_5=6HR)-}88cInz>1Z%sEwyEy6+wN|eBq(S=bU|fM$?yF+_uk2!$xJf90D|y( z-PbUanS1WN=bYz!p6B_T&l3%}IegagX6Eth$CjpJ7~%tItPOA)mHw z4b}&p)sQIp)s+eX-vmWqnLYKi^hsKHZCN#^WyDeI=-j1f^oU?H)A@K zm)HMy^o{B0J?qi?^IS)jTc_c7&*TyfWbUufnmt zeRXp@E!(i0i$904G1JY(NB(EvQH^sOb8y$yaJ5N=$;1EN7jLlnBA)Lb@Uym2*FG!` ziZ2f@J1~R%pX_La7_scAF!|%;;ypVW`y(+2?n~eO2*1CjX)5}*$D!4Hyz=gyiaW^0 zRXwHH1M`bmmFnEvzmb!_l+VSrlkIz4F$j%O?B1PVFP--Zo}JFEz25fv$kFTlE^zGPzp7^-SZ!d-49*UocZ4;hTets2L&d+amZCg%$dn0i= z+2d+-ySGg=mo=1xT3)1mhu>Zr-*8N*Mdw#M0>8xa+hd&n=U_DKeolV-z2LN!Xfrk+x;TFzwGBzCqvXj0Qg!*_MtUa2~| zzhs6IjnI}?7m_I^*r0#Im9!K@f^f47rqd#h#7KCyw-v#h!uk=%KO{Fd8Ir9 z@46Pj7wrXOi0`Ixcs~048ZYOW$ecPu-pH7RiSF1mGu=!O&#jUe^RHQBuo2y6p5u#p zHsafSPR>i~Be|*n6aO5!IgKO3n789dF>1%(?6m&otI8vXTz_+t{LNO@yVeF1j%z)< z=vRAIC>NsV{}k9k-&Gr~pUHm98Sw~h_+y4z^WNlsX2IW;e<&R7_?c$S9OkLU;0Jv5 z5M#_^b20VdsUOH_c~<^pzIq*+?p?`EC%*F?-!4{;e-^V5v24{;DbdL+9Z;?=G% zDeknMyL`D8Fk=(G+IvhzOE>Rl!{@Bv5P$jxK5tkurLE3X^(%KKpRxfz?jhHoT+i!D zOV^j|R5iBX0nDQuYUgYxzxg4qw-%D)hB>ujAT_jaW-44}>7k6(PG01-687~q~e1Vwk7VbkjuYK62P0V-0 z`A=sa5hOq3Mqa0sN>zZY!l{YJ1&wGJ=B9=%MRi}AEo&hvX8v5)H8%&zs+JG@&p z4wsuY)krkhk}!PYrPzmg93hSx4#T|sQ<-Bc8n6-TL&2_Q*$!-94X^i`5Z6)C@+8k* zev;uDg0v+~BJY_AqesB#dz}}|QES4_mHmv1&lCS^@wroD-L*9yPqvBcP``V9JsC{A z>6$^?qq#bVzSdYwV1K-^*yFQ5;g`KS6Vry+p1yNTYWBZ@#m|YP#DSL|Bip3Cr8n^T zU()_Y?9z`dH?}1HH1F*g3y0&!)2`-}j$GJ#Z|Vyw7x-MBT%gWhNIvk61CS4_ck+Rb z9nX^w{Lj(*kG~hW{MfCUSIm#=gJ#a+;26eaMgKC^BCVIR;gD_G5P;u2*7}Ot=j5%K z8^8tY$dejk-NILf`Ytm$maz_uVw?-JM0nj`LS4&PFBbU z%-V^MUsV$6m_7kfMFKksXJMlG!49`^n! z&r|JRPkkFb)5myEzQDl@tmhZ<8{_*24$O(awQtmqzw24YcYwjKrLO{))b8$L{j>WA zN&+pyd6QlIy$oA$AJ}y>I8AusM6OT$dI$Jhcd zFT&q@h}n$;gTDgK-X1uv|7CEt@c7K7Gunj1H-p0s|6j(ldwO@{v-k0;niIn09v*+S zEZFrv{CI=8{m?i6z0V0rn$D7~@I$yovR;jzd^If!(RY-pH0|-Ax#0U+Ah5 zVt@E>-o8+s-@7*HsH%pa{GRhn;%50c=bFgPb?}`wg3sH9&(AUwHU>hO`23@pxuV;c zYkqt#K8wZ~YkqnUWQ}iqG&}wH+~$Ka@cC80*t22<=T!eZ^^$|n)rKFRUrrmj@OcPc zp?qoKbK!JvE^Du&{AuCzSDDW-=Jgu-SQvfl2pIiA@;QI);&bJD{J9=KuT%BPh0i}% zbbLN}czh1-5kA)Z9CMt5&(rxH@MNxfdV7!0oog5opH~fHq5!tivn|m{#l`1-Y@RDu ztDH~7$@$>Vjg3D=jz>0P&o3Q(ZsWmV>~)N>3z-MkfagBXp?Z3+fRV>GPPX;Rw=hOt z1DBU$2ZY!EjC$qIgWrF@XMEd-mX2$C&l`=;Zsi&GgV|r?d@q36_ms`BHeh?I@mZ%% zL)n@>?17>E6Tt0HfZxw$zA+YWUS~oJo4T8(H89UKck96BHOxQOGK=4z)O-$^n>Snh zKH1hN-w6J$rN33PZb4{9%R1M$?*q5@65Ev@E*o+Jm|bg4Ble??|33?UUr<&87ofb~ zxPCt5&XzeGS99200`o&P>U1r0c&!7!%jb_`PlVqW2S>HEl^oZy8M`9NBrOu2~A8y*3 z5Wfj>crgRv_H&-h#O?aKaQiYozn-==mbSXM{WrX~?Sk89)9w+D*X{E&dGPwyQN_dS z7ci%`YA!K8!R$9@WA?EHVD==qwIIwsDnDjtZ9W)yJw3*~SXiNW{iFHuy6vk%@%jKh zOCfmOj&&EW-}q|>uY2=u5$|PtSF)~Wa5w~0`AF=_g zP;E*Vn(l8-+X!doLSh%)tUVFsk}u4}>EGgW;dF0a-cIf+mK&!#<0}iNXRnF3Jh5lR z-*8U#OQjn}R~y3V-aP&iZP<8K(;)xWi>0q7mYxf%r-s4mcaDJ7?-~ZH`*lb>tbT=y z)uSWC(hG~D7mm|^<;Kwu1x^>AAW0lO%_nf;=(f+p;B@h_iWNsM1gA&A>CSi=8}B7g zCfhI(OcjQA<6%l+E8+BOx%Q86u)aN_y4Uj4RrmTB@Ve?<%U@Uh>)%oL`Z;j=E^1!? zf!f!fL?uG(uAldTP~b+3uD_rkv_!43$g&jP16fz!#&u0``}VKW%r!RJnnZ6)|z zaW~=X1?WPR2b>zxw}i&GtixU`v-o@}_MS@m+lW zQ*inD9Jg`E&yV`>xpHth@p*Y!OZn(Qd@g@L8VlaD6h0RRrWo;cKmX-S7oQs^zMi%} z9{1%NT!SBeEW?yj+Wi_b4S`1qW(p%YL4D{_>Ir(<)w zF1|SW+YO(4 zeSQe>Ipbw)d^a2g*@n}=R7J(-IqDw2HV%Bw+}Hef;^@NXFMl@+pQpg*{owO^hQa5l zqT%y#MZo8k;B)GFdGYf@gwNCVrvQ9z?a?jteJ*_Njm<24o?(j=KffU{1fSde@BhH( zdGI-V-^1}gr`;nNpL@?QU;KP}posV!E?}kRkb}?3=VWqTiW)zMtB{ZLa**QZ=`mg` zeD3wt0f?Wc@%buplzx2v|9CH(`y1BtLh*SkzbOQti%0I|$fW~ZPhNZiJXVkIa_TgP za~_88l7Lq+1ueMa;GBr>@cB)BH)}SN;l50=+?R_+;=WXY)!73ltgiZ51#@4t@6D&{rfVywk9B>si6!d#l5RX* z+`cF}+rsQd{1;;Bv)TJOsxs0+j=q03arMpcaG!$%uDZShW>kL%@jve~>6k}&cINU- z?z(c)eQ2zDzV;gO8XA*^`QzuVdv>KA9-W`NPRu=H9sS#X-Lv9O&Z)kkm7v3PM}Vjg$BW{|sX_d;Jhw}U;C@#XA= zUdc1mK4@{^Z-yJQn7G}{2@W@ACiWv+_d5tLrk>*qjMEQuWXIdJ4|>!rXCJh<>t4LQ zArEhToaL<_S1>Q;y5mRY#dy~se@N?NFFf^9cvcO0cVd?m`3$P!p*oT+k$ZUfnvlWg^KOC9YUArNhA#B)ddG4dvY-WGrbL?LX9>Jem@8#dg4>nT0*t z2;VA(J$jrm@=LhC^ErOVv5cdgIDI+xMb{!;UP+*&>nvW|jk9;A9^3Tp@g-b~_RmaD z^JxB+xjX$y0>8%LScf3T()IL%=AN7cs~N1iQ9 z%rL10TDsS99sc-zKQT;~U(=~LqHMumlDE-#XDp@1yJGm>csJWTj?HAn@H>fDbb9d$ zjf1Oy=J^QHj-I+ke;3E*M|}P{+Fys=^K;|vIL1e~jCOPR2u-d&7V~dJeXJi7yZd<_ zPW%S?ZX|uIUloZDzu2dbwF*AG_v|8h_%##v1Pk7ex^1yDhqpfD%wci0`{7G#|H+FD zrSWq3%76cj%C_VO(S>*Sre6Cl)3G>M$v7RG=Ee{D3CGxcRxWP5$FqsJb7L8EA}2Tg z*hA|lXuNx4eQ*7QQ}V>|y*`V%y!eYo;Khq)6D@!jZ^yeE$6xXDw4Y$--`%{IE&n+f z!SfTo!kl5PzA*>S#$tur_>JM;zQ*q>SXU1gAAUAD^8?L?Uxg3i^<9Eo__BlKA9#C0 zgCIuTgGY@%^AVX~wH?Zp^NSE)7<79$Uiuxg60*c!n3(7ymqL%+I{Ieq#>a%{KUA z!}8>Pyc?~}1^WtFyc=DYc=5V6k0&oo5;2jVRfT8HcmWM(ySBsEU58&O-c5|(i1(@Y z+wgxPyszc^r-#Ecp2+%sE#H^6LYVLD)~jsL7c%n!n!7Gv$IkCIn{#n|hnQO*tj4#t zCSLLT4iks0uiyEXJu9TkEZ^bIt&Z=YHvGQBt+b&&GW3l!v4n;5x0f%QV`ypS#U_0IomnYkw5SK@`W;^@?*%poA5U(Epo;`JpX@}Rd8eWe# zk2~1Ubb|gSoNh+r7nEBc<8ge3mFS=>tDH4M{U@GIeaQUG;o;A)Jp6U=@B?WczQgmm z3;&@Y9=_%IjOjPnvv&N4Kf>*~nd9#`mSypL^78P@;o)Pq{CNb~6xRFxr_lwh#kP1n zCg*-@W^78^p}~6i0Dc#6H|KkN+j>Yjk^d&*Skoa1DMx;}3&=MyuWb z&lroiJR$%7TjoHg)T2{5O_nzPKYXwdvDJ0~%?54SR}uK_MD zjMG01XGrt>VB{rO>(gU?Z~cwQ`TY&sX9vLFX!UqQx2#Y58+IOkjrX$k9pH*=egLS;cvKH0mt9C z=5JqxLu3ZFKNAeT{M{1j*3adLMxxpN##{U4Z}@mbd*yFr@rZO?dHjvHs>1X0_#4qI zebbLaSsN71>u=ck-AE28hVN#G_(9?RYob zq#^zW+$8amw!^28ZP6GG@j8sZVdwG4{zik}-*~HN{)Xi(?Wey{iNAr}%I|MP{r(2` zTR(!o5j`~ihHLi<^*1PdwO{_m`+kz!-&oA&!}%N4w5xFyU|hYE{f+;Ay@>t>^EJSn z4r|Uj{>EDe)ZZ9}19cGnjZWi^`6Buot@5>t?Qf7XdfD?gu3YE%8}>8ui=XGcZ2eu} zijn*c`y2d?Pw*S-Z#0b=qW7bG=nZD_mylxOy;tEv27{QooP5hlwjI#hqHml^=l{tl}a8+FXV(ZuH86%VSe zBD&Po(jFxi(eo)dF>qy)D_vdi&uXt;>}=J#B>wK`jF)}9Bf8YIEnXUEyBhw~rzTA3 zLPz>W)vVYwD`dx&`bGR;PZP+FCH4)jgo|)B^+i5qYkN#g*SlOzowS+sQGg>xKh=@9 zmYr+hHD*leBwoLMtXJEktUpTp(z}NVwBx^zZPZ}fg+ASMKG(ST0i3a^XuEF=PVc{n z{D%Eq;$61(Wq|wd+ZkNW{onW$i6`25VqO|JLu0pxF`J~GS|2v5m&1!G&UtjtifcHp z#@AC@1}@WgKclv*-FC1;YUfni(KFC7&{UbWM~;pt{H%95{H&DA$=rqQITPCyp&wUa zn|fB5wWmFIv`8!yKQ<*sSHa3{J@VIc{f9^!C0{@t0PQ;oeK*?VlI%ogD(SVIny^58@L8MY6qRWGTJ7mkgiuDUY6qA88s`KD$_M9q+jH$V`Jk#AfKY# z{;&Uu^^Ckqaz$jVo{^r1=Gx zY^2sI?n(7;uG$)C8{lu(VE2-rkEF^bly!xcgxd5BY&`{XC|l-+;{LI!J{rS)M8JDn z>5B^b;t5`*!)xGy-sy6)7V_E@2ss+10b}juznGGiT~!rs&BtJW4STn*f%nDQ6|bf) zLlgi1=2p|D>$oy7$=0=yZBl=Vo2loZn6>xpyl0^Cs^@C-TzQ^eU5fAXOg;OjXO;a- zwT^dkUa!BY`SM%CT-=PE@M}f(rnMr`b9J>M!3XIY2Eqi&^=J+5{nTI$1LM{(s4g?9 zpu2~=eXGsQblMsQu3gprnQD_V)G@eJ_Dxs~+iG47F4uM6&Sx6WR_Bj#yqZ`wV=HZI zZLSy_^ul8r!wyGGb4|Ks0oDg^u7zJv{-!jp^G`Qn^f#wtcc-qA&6R%h|2&L8Wj|9b zgX5gf$=_5zvS-DIbzK@4UJV1zqnwz%R&qGMqHSw`=aylAgB@=*nfR-pFQGQ7S+m;Z zibV_HiaiBa?72K#F`fSq^Tq7Cl`ntuE!Nz7!C;|K?9z7lEjzis^2MUT*xA`W)gWIi ze-4K}DlCU{==oxuCJSp6s>}4JJT(lwv40@_sS3uv_LbPVfIn5nd#x$LFWLT7HebyC z1{|}SIKJUuKE9azDQ`bXcKw0};8tb(QwOqs!Gi~(enAawczq|1>5*E$pcC8Ljz9HB zxT)f5Xx{-mgMF@F5C%I`gHhjZ^$Wb~S_Jmk#GZ*D7(#aOHq|WvuQXG;z$_Vcyi>Pe zP;ZH~zSVp=)Bk8?-4TC_xzlZ8GvlW+cQXA9r)~jy$1f|Eg05@E-VgkYZq~f+hVY4T zt$Wq~xMu}6+44CnUv~IgYD4@j+eUI${8-ws>st=Jr42Lou6Dsw|7URjM=q|G_J3%u z=c`?iOK-`mT@Wpb-csRp3&yy5ONXj%LGl=%E-H4=>+|R#s9PYumK`tTPi`Y`qPWjL zgQ=81@ih21GPk9-w7+!=EVqjDpU!>_hy-V-WFO$1@-ud+4TzU zDzIKb1PouydIjQ3dC%%V>79D@3d|0R#apFw>)9ADcOgEfpDQ)ZtykdL9L>+MHaF(r zZ9iWsJqCUC3bJjE_GAS9Av1p=n74HSKvKI zlV^X%k6+n4ye)h!ye(`kye)j~)hmF@>S>?O$;Q}&JgFn2UO}!kwNRXWc^;mW*H?)m z*DJ7NKKw3Qui$-*dlzThb$>SRWn-^o{q}HnzIp}rJgnnkej}Xi{mX~9N2*tFBc)^^PAG+D@$J~du{y4L;3ZUym|Z{+Q^5; zkLsV64A!OYJ9u>p!o(i4^-sM%C;op(^iS=00gvAcCq%a4-#d$}uSA`KA^N9f=qq6# zRIkL-S5i#h(7x&vG=ta4_1@gDq~!R)`UFGuPgS3wX!=UFKEZzKpOP0YFU{mB9c=wm zqy1d2{RtB@SMA??`ls|gIpmq~S@gZeW`aCq*o}+#u)mA3*{%41UtbAZ)Cmsn^kM_T z=~MoG2u`;RwR#_W9+*=TE#;JbbSD1U`&@MAs+qkMX_b z73I@E^~QRk_{xg4`Nzniy7=7AyK{LjoBK7^^K5*cBMwe2-&yewDG%%W zx3Bno;X$lVa2GkN1HFGbKpS2khL7O-1Y5DG!skETUwrQFj~-G_EUP|&)@46FpY=AX zPvBjff1fmU#MT<{xoU|ufX_F=@wlFR;%~v=)^6OCiOv6N5S!aM86_u`2b)`49vzC! zGuFe)e!ORexIT@yME71Zxg)eS(CqK7sZ~ zc{aryI(*)#{RTfJ54Mc8V(WuN#^(jrC-CCw+Sl9Q)+cCQ5<1?QYlGN4z+4koFjsAY zRd&zvU~PhJds&;ns}WwT+64c`y!7um4#RCZP_+rd)F$xT9}k-^nL`-z9(^Q`&=U|_2iW#RK~vm`G*w{!9)a#?xsxgA?M@Odlos#Y&vb$!RM_}tE6<;#xZ^Xgsq z!%@$!Pw+Ox=eFMuAwIYJfuA97CfiT~t_tVSAKK^oVFlJF0H3p$-O(9x@p%e--Zl(A zA5xoOpYgd{n_$22d5+oyhX9|a?N0&t+}fkd==VJM+`%iE_&mcFDR%zR9}U6hRukwB zKF@>CTcv^f0or{B;B#kg=i#^P+)_k*&VCK>`7@d?)b~B2>l0Xf?yj?k6F$G0+@v3$ z|6ktA=Khd1y-<99Ex#!SK0lRu_XkUV^df2p9O(G@<4*kC&bQ=Y96#5-g5mfr(-|wm z=U*Q)$R}59)TU#M9@pueDh92NXliy-&L$y{}dLb30aur$0{~O}1hxI7|FvRPp3zCUGq$sT{up0^*-Y2=Tgg{iTSpnvb3$K^0>BU<{LT*wzfbWn!h#7 z$Cd2k*Z%Y?*aN%Zc+=r%^-Q+fpjU>*+kK&ngHxyhJj2!&n4ONZPhI2f=|7<{M-AW! zsxemH-|lLI-ow7tIUHAW)N(`(duYqBfncP(+Mrcv^8c-~ zngOaGpqc?EGuB3HPBA`n)eacsspqd9z}T^SLHDP2fabIGLZmnHTqu~aS9(=?uXLbo zmAhBEMth}8f*qkd1G}WhqWvhzQRcUgNN)s9g1VccXpIDH&5U)U*ay8N*cKaQGRFDd z;`qA!Jas$XV@0kXV8^=~XTO~srf|GB_nLSwTl^?%yockpzXLmWW44~B{S9mVSNV-_ z{89dkQ~%fHzu3Mzoa5`tw+x5hPh*ViYkXbx1Jd}tK>mxVdQY})=dg7G9vlg~`|1R+ z@3L_0o>eCxr03*bn?JrT-GA9X@n1}N&5W%0dZ%g5#OdOu_;Gr>&0p}Gva~lnTb>1o8D&&;E=QU#H)@6*pL7^C?5Q=?Qo5x}X1oE$Rk$cY84d;qouvI|P?^3zz>R zpXZ9N+jha_e?hxPdVJk`hWYsE)B6g?=Y{C)+c~7Ukwa_0s5&jfXzd>)T%I21dyC8S z@zcG&DmMScj&~QAuUeDF<#z7Py-9d{|xm5%IZCSAVbYd6uregU_97%fjbx4dV0qY<%8o zzLgiB+d28qHh(b`pL=5~2R_eO6Mx)3EI#iRhixTo9L48{2R`@u{ZR2=tlbBnAIq4L zZI}nHDk?rNkpCh(kcH3RGUX^~T3CBbbNkBvWWN`{)?SM4n7~EOqfzRg-)u$J)MY{CTaC#lBSLsAc!}-H#Lf50S7%4NCBpc1z zax|d>m(=d=r>=asB+!yX4}HScx{gWHBdKM?4>zIlxw&an{Q1>`I`wx@=UH_JZ!M3c zy3oW;a#(F~#+@{gsfB3u=r{azj33((1zPQA|E$5_(&zt~#WQNuar)3$Wu zr5UetTdfXz-jWQR`g2T$)v1rc<%pqEZ*@4N2hE-@Y*)A4Ye3A)*5Ta{jBjozkB9Mv zq(`nbrAGSA=*JrLc7wFH!30}M1HqPFUQZxT>gm+a<37+7w%W^ibJTMkEpgqK?nOGe z`&)}QxubSY%+;&+$J3{xuf38S($vOao3w2&XY2^CPo*AjvN5vOy9Vjlo`%L*?}|w3 zQ8+6-&qh+KgU9zL$JM6J<2pY=4ypzn=kM|Srt$wb(MRs97~L@+jq6=}Ce8Xkpd+;y z4$SNHQ}0Jis&~TiU2kxI=$Uu)OqkZSn`hdC4sDZ%T z9Ir#q`AYPZOVF{u|JYzxBejOZa08;)z&?Io$GPe`BIq^yb?hSnbn4Ktf0X?8_p3tW zxGSxWy?5=>uQt-1cCUZ-?g2PrQ`b!9^JT%r*}vmCE=RBWWB-G0@kexzlWqOpUl4=0 z=eRQy`zr_5x6tOv$Y;-tueG_4OzdBaPJ=gI4LLiFpPtYtbc-#ArjvMBrxy=<@c->u zA+2Qle56yXTD*K;?YKM|yFYa$?WFO4XzUQZ_|-EVK88Uf7w*2^SA)0r7EF2vuSPF^Y|Ex=uikY-U~J1p9I_FwTzB=Zh9y(ci!bT_wZ;hJcs2UzuW&rc zT$f(_#z3jnYgfPDK|PE6OG;b5L{7S!`KtEs#}4Qk?r&$l_^w0k%Vx~{;#Ipw@twH~ zui7=+jN%%u;u=P^oX6o@!&T{P*x>5LtNnX9#Ko_M_whJ;C=;?-XhtV&E#}JpRoGu| z3?!$RGdEy&UtCRXjk%*UY*qv3P(Fxju{P^OY}NVLteG5F(`N&;c|ZB-^LSl1lFbT@ zwY-|@x#q@*!>h6TRI3;}%H`I9-xAoB zV>)`qjm4f!?b<~fJ?OPRj?H;ZW8um{yV67bgdL$WTT?=F)Z^RKGcUKVEbHhw-=rG; zJGgxN%JcbuBqe(z4o!_Xd7text8&~-Wm#ay^Owm)RdM}QU_U@E$P3zNqg<#uy_zLb? zoz`~OoDg3@yq?~>>*mBtD_d%Ax_(yzEMS~0 zS*Px0Ocbiu-oqMh_1am-r5`E2PMEp&PStDokM+IvGp6O?=XiZ~0Qfnb-agos_oe*| zi#ytQFPr`}_`<Jbq4&Jb=Xz-{v=lfBE=1uer5m-+xr6?eJ(`Z1ng!MRocO6T8KzoH#`6?N-h zRkP*>Xv6EfFxtzjxQ8uGf%xWGWf^O<{E932emmHAfBXubyIr4kPr48J6yl?80vjqe zE#9~6d!hBOz-hmISpEe5rsq$z(=QXT@#3T{Un+lM;+lDkk#^SoN8s0cZ8yT>62E>Y z>)q+Bt9u@Zq^g3``d`@`NsZ?EZiQ379or#(PHgG;wpBbk@$H}BbsKqrV=i5JkLvAi zuYGsCpL=@zivg*e_j* zW;40vL^H{9>=%v1)A8j2%--;Ha^wMYZOQ=zO3b(<{Q4kVf}lHYLnbm?*w27T=aq+N zggNHNW>(i%1*$hv+ozK2o>^jMi)U}`$@d)}!LujJ_`I)SaINel|Dv&HSSy{HQjXTU zH}>E_XK1~XGtcl5?z?Btitlnx_1S4I&t7f#{eu;>k@gRoD$@Re@W1%>v-W{+UwMS^ z?JqdQeEX=6Z(j@Fo__DN`zsC|-`?(z9mANB?Klg}71>X|z4$w6zCAWz2;Y7je0ywy zYUBo)a~|hD;Bt60c7^{pI65}7PhiePIBH_Es_Uvk)xz|CuKgeXS${DEU(0+66@o$CcwPr+0$AIUr+ZAzZHC;)q zCXZ_$gKLjX>Nc)T^0@Xla}9p3{Qy{Ild~77RdI~l$r0t^+IwR)oXNFs1;@8~F${5f zZuy%bIfGVldVa>|*U|R6pTYn4Hh5xhsc;4FQ1Ayn#x;782O5=Gu|9;4O+2TL2 z))#>1Tg3^uhu<8<^Dd?u#PhcA4r@G*UpE|{znC$yk9fX2C!QxZm&Wst`|v#bX8%Nt z?^X8AYX7Vc&o2VcFWPTB@7puGS9m^a&#Zi>G@jpN-s|A`FZuEOqCq?#1fb_ z-i~>fr~i|`b@06X41fAz-pdw0$67xUp0~dN&;K32v3P#NVEk?_c|_GpR$gDRcg5(H z&#!lDt173jc)fT($&F^M;`AR6JKP_|=qGNf>zG1}o^kWY6N=G?%Hz+^8f9bj`xBpk zntG7ZzqyXwfaU#gUK^k1+{KE|*V6|6v+cVmHP>x?er`#8b6;i--HXpB_cuPj!C6;z zPkKhhh|eb%&z;k5_l##^`e|;A{tCJ~XbQ20p~@P;KgJt~Yf~ZAy9e=}V@y{U;ospD5nE+ETUVCmgsapQOn|{3Y#ao{3U1(B0i^G_{|ax|Rntm-F1pQNt0*!vp%s#9&L?L>~``Yo8$h z0PcjiKi0mOQ{y|SaX*RA2O7&djQBybxEFW<;#D$lXi?|pB=e(wW(7MR}t3H|mg{vYK3J^WqY@8!Gi z;j^Ao6KC&umfunc`a`TmjaFt&7ag|OS)c^5wl_tSeBEI;_HkYfU z_WfL?&%<3Rh_AG|JpN)sWxQv@sDk)PTk`Ui$_wKwkzdK+E788?D{&3?i?8In4n0f7 z^RoF$Tk`Ui%KJy+D@7&`ayM2{tH86hNo=j9Y5?_*hD@eP!fA)GFUT#O-#P-@|AcVvWKT-^OgR~`FJvW%t`^Avx@9x50q;sMR+>Wbk5-nqY=F=9i{{#0 ziU&5Bvi=5eNeHgoMs%xc$d#`%Q~JkLIhtqUA3cw@PA|WmRaw#^ySAWYZihH*vMrIy z(v}+jUjye;_$C1FaiekgY!CDN9;W8Ja_9r#i=JZ#ZB{Ig9%(mo=UzA#d%>4-{ILl5 z$mL7Tboo-@66O-vN;P=5G?d0Kc;ObCW0}u-9c8Wd{IYuP89w)oMa{EJ4z-*E-_Vc) zG~kwA`d80V&+%aF&q$K*^Tz$&=2@P4b6|@4ZimOcVLSxjg|vIM?p4(axIQ{OMMnCBkov?_WOt)B@Kh6UO-<^JgdX=Uenq;F5W} zA7BrI{I?kMM?PGb>z{ycHWlBj8JiGpAIhH^!@i7Fa7%-BUj{ZCeyiq*jb$@eynPv` zFh}OYM{Z=E)bk9g(HeYk;!u6kG=HkOu0y$V?aPR9{-OIa0vY=Lp+70!2<8u!-r8D~FalC^vt<6OD^zPn1>)_=3K zO}6AN?E0qFfp{s`TE=tLZ&qSs`|dPwfKA&uaDF{4l6s!)?HuQE#CX0ZW1qFQKgny% znAB!oYvH zSmn;s%9kCjQMKXv9n8~{R z7_<8#F4+APv4`QbMxVyl&>o8w6_M24_!qs$I(s8}ne*~Be#GBv7~eIF_xa$hWgMTz z=l>8#l)Vuz^p(c9tR4-fEQw!3jiak)nRqFCBcylT3#M-&f8WHOg!h<=mTun94pg*M zaHt<{;PZwhQ`i?#)epzp?u%$({y*gINwD<+;EQNqg7!m=aqD-T2Cwhi@JG~kGuNf| zo3T6oYw-Mx-fI`^D&;#(3m5D<)s%7#3%G_-=548a4GYrOu$pc1U~1w5LJs zD+ZlBW>CK|fF1PuJPPNsn0lkHH_XfMGsImNUSm91KVzWI+(>TjGCL;mGk!&$M05Ym z?>hMsJs&-bjjRRI8@-xqKbPY?_C)la&)x|BE`8C;#wl$tz2RscZbNsppXd1^&+-L+ zyJbTe&vU|>J~*A^3NB?%J##w#K`XqbFEfvtO;trR^GEZs0p4ScskGXmn(y~v3)H_) zVha`r$J&_5{pgEci9M+2Sw8%9u#>dk}IX2;a#4f6D zGrQKaSK;obOxsn`yQ|EbeYq-+DI2%Yl58>+o}$rb$2a znn9bR`FV-Wg*mv}uOXTqgD-=->9=HPn&KT9Hb=asb6bbxL9ABM?R+l2wZ@9wqrf?| zN5PJ*@G0^4)2`-{wMTQyh<^k--fA-OS3h53~*=>Hj~- zoIKF_|24GX^vN!t@N`fuT zS!mYJY^2pRbMpB?-in_*Hh_V0a* zx#r?;@~J1qze9d0#HzogV&f`u3g`f5SPkzc*I8 zHFVX6i@!UCzrRWwcD-vF%wH-Nul)zB_o067okvLh++Vr)TO9D>;_u1U2N({2hlRg= z^>d}AfK9P|9zI0+plu$L}Z-9)G_t9$xR@aXYt7o_KiWuZxGrFG#VLI+*e=Ch8~>Izisx(Oi+{HlhRa!x9b6v&UK*EM9B>!!WpmfDo_o05(+|CV zX#Cr*>oI;KT<-nbSA2dX>W6xLcsT2aE@M0lhtJPotO%d~bbs-Af%^Vhmj?pQ-eGBP z%ra)}e&O>X>H7yWh@k7Z(WPv75L3x7l| zDmDx@&(;skUJrlJl3zd6wgEQ(4sFO*=ELVr`+(1{KSJ>N+pT`CXHyQ9eyANY#K(h- z8QF$2z*I%W=LPEfd+~GeYZ{z=!PtRNMK(51JSBcL?OOYkBVK+4;dA>8^WpOox{8R;UtQ|q^R)*QpAVz$e-QC`dYl&# zpF88d`1ril!{;639#wbC&H?7${k)gW{XJ`X0r;HroE-l?zwxlSUXSAQ!TMXQrM3?b zXM7&;>HFl0pI^vWDGEL(ZKT^|>p z+iT0h=a&DX`di@hmMnbUZ4!C$xm^zvHb0Rwc5cU34t$=mCO-15Vez@0!#|{rqxd}c ze&4qtK6lpeLl-}{`+cV|W@H;K1XmRmpBJd_4?b7E>KPwC#}0&AhQa5lqT%xcpznXk z@VRS$3dQF?u=$dlv2zEnjDXKuZ_ka-pW*Z2@cB~OeW&B|vwm4beC}%WkW)<^P<%d& zw*NuJ=k7Q^obdVM-^s-1KjXb@?snGnLh*SgzbOVj?>dP5mTd=t-%{`JTUr%Uh#kiK z7VYyME`Gj%u_Amv4gF1zKQ11H^f&jw_Z|m_{1tfO+tIv#8J@WKU>#FT@^fBp}ftH_Df?Km@8JeMYZcPob%zxuat?pd*dbEn{KFkptfnzSaUl`Ba;h5y*nX}hKJa5Z0KZH7)idTlw+4Q&s0c<6{zwPtrA;9So zc;=<;N9&tI7^zH2*LA;0G6b$RHr2g0@i4)yyym+!J58e#8y$piPB zsrS?{@5C>wvwZY`wGUTdm%>v_$CGf-7YApw%n6NaxiVDMUu$FPlUq&*jc-}!?oH`~ zhu({hyV?=oWgZ-t`#GohF7x^Sgx{F96RaK&d4HGh@^v`q$8lWFAwGJP`SE#|kN#m^ z=fg*z%4-x3fcWU**gT0n`J1X~Rx>}ze)(y%_ayW4VDViN=xZj#Uvlk`huLr88vJ~h zGB>8~*`vuE2R#ne|fXJU5QEb(2Q<8yM2L-;PXU1I8S+C9SgE{f4r^oo-;`NW_=b_uaDmLH6j&+ys@*j6Pc-@@w#)KljBeDn_}SgBb4uA`|xm%rz^iQ9A3W(47jiH^gcgcFOcs-ylk8o zPZ$0T8}l;{t2gcoAH7I?7hRVZ>-WdgYfa>5{LR7XC)Z@u%S~u){ADIa_xLW}oZM>j z75OoGc04_MJxt!3Kc4Q*;b&<>wo*qftbUa5Qqpp$_~;2AAAJ>k^q8vyaH#k$j2Uu3 zW8>#BW@H;~0#oI}>3hRRmmdDEJu}+=&{);>$r1Q2nr9v--O#=lPq*`o`Q^;Dob_^l z=8vbZ#8$kW_%6y190I;et7m^aY@UzrV*8z#!7pw8WGFWG#$_TCn|Fh~yS;dUu=!)l zhG6qtFo-3Yi+Xb7ipbp#}+V)e|+=rbNUjKdt+V+i+wca&UOUoG!Oz#RucfIG? zNNRQP`2OU$+SGYm=SP+V+iIvO|2>}HH2(kQ=c&zJF}h}@%d*>N_E>=%Wt6FSGK~7NzRR=+R<)RUHPeK z$|s50Ju1!jPohr~GPb^a^0SdtiT?H}^w1n_d#iyg4W$?Rf_e4pOD2T6?mt%g^~d!~ zb1sVQ>*M!zoU5KAl1IP(QS#g0ukz{FbIsEk*WPt6Lc@M%XzH5De6|d|>EEIGwmdkY zCHfe8&>xYG{AA0QQQXbipzchpFAb)?g@f~ZG;=i@>(`>g0OqsfGsfN*jTc7~e&QNp z@twrKI=%SUTTAw=XyKgdSM_J*HnbV}zS?m)Gu9?-C#RCe`=POEd*JE8H_Xhf2QQm2 zL=*mpgPQQC)eP!1`ZeK?5Af_m<`wa0E{8w!Y4{a>P56TJ;2X-QYg`dm9s3uUE1PGU z_$V|Q-{abunP0qm*VFXt+=W-~IvwuBro4LaB?alhUvPYu9z5+^J$SC+e(5y&u0zjK zxHDT1ep6mO_>z(J;L*THpsieuc4R&F)*A!uQ(Qgx)uH$cb2DpiG`L!iT#L0)C!!U2 zejtdRL$Kv)_#6XhL*9>0?|Hng8%EpJ+NjXj3_W`V?L30{CcGZSh z7)jkH8+47U$If%GdhF6|fJfN3A(UT_{dcBK_V2mzt{yu!X`l4iu}=?Gjc@s$Uyq$@ z{uk!E)nn&*?W-R9Ml@`tgDFkP-n;9D*JIy4@TlLuCD6S}pnLU2t|P6*&Uor^wb<=g z0?#~6ZYRMwVH~B$dLT47*2CM-gXA9c{fXuj^h+kLNron`k8D}5ZCwn6We$%TVDN*!u^RPzcIdl`S!O;S6%*uFvW*lKVkxY z#I=ke`4FeVPn8C{{D=g8#FQ;{9mlzT#1r=p)nDHqKjNavjvq0`^CRY#I`y#{Li_AT zv}3oTKK=E_op`^+X-W9=tGEYGe?1IFk^Z{Y6!{TXqDdnC_5JZ9cfj{rH?eXV#vhJM@e&6$e^w+2Lzp^=! z8qM|H3V;3~^0eaP#Fmb4lRmrn^iS}*jktfh-fivdxTnkh8v9)TL@e-C_PJ`mYc(3} zmG}ugfyivMyl0D>(`;s-**w0b(M)u8A}6<;h*o4HJlI7eap-+ z#L1z5$Ju(fd=`j=XJ~y1l}BbvFVcWXYfhdu!<;;m9Dt)2sr&dfYZCUP+x(~F8+i7l ziO>5QtZ&dN-#^H_)0i`?m8!WRoyr==H|SK%s2x37jRUV<6uv<#>tUcb$^ySf`**y%G{;3hT^mh%zrO&0;{yD~B@$}jc4<3C4 zKS?}#Pb*tIde#Zs-{R31Q?Fe(EoWUbSFe3zow0iD;3vD^^)DDJHvSI=i{#LYyoPI6 zt*`U(0bXgM{sKJum*LTG!T!GnkG`DwFa7pETUym7e$MarjBoo8oci}L=ePEi#{1EW zd@)4LEA%4wjGJNix^H)4mM%Z1Y>n~FGvWRw>C!OYym=;jTL)jhhPk)W)s1ZCv-*&k zon6<~;n7cNSrD3$#iO5^=Fy+v7m&@Pj}%EaGBOUWdFIlUWhV7$SJz!UdcO^d zRGU-;-NN-rA)9A@`#(n!qM`W3Ap=ZJKqon~q=19jfQP+SQMY7N8&b6zlf>>bd`_NcxeBefp8B(2w-ydL$2@e#zRs z!|$tDXN2EtBD2%=lxj?5_K^CvZf>AHXl7?~>x)`PX_&pRo2g6Hj&Jf@D5%;E@bF^K zlz^dD5x)pAry9_$T-6-erTJXLcdJW+9ick|y9TILCBHQ}%KTRKWCA6``EH8tYCk4m z$GxzOA$}PfWirORx9%3I?_QT5$6NcXSRFR!#)}Hjk&G6gBWdlf%c~zC{_o;=I}hLB zy=?L9uMgsQ)k9GG+4}Czc}|YM#BT)SkK%VnpT@=Sw(kyW{O*hY=fdyzVOxue--*Sg z@%!UG{LVhuKM~V=m3^?<4_iHm-xq;<7wtEG_w9k*EBv0d2Q~x0!z~!Z?@WfzxVv#O#H6Di)%mY ztGV&JZI{^lZ)o=&g5T|P%!l9a{HMLf?@m3`LA+deBcez#-a#qZ-7|HALyJgnfoZ1LHw@jHmAS$viUzdPqSIlhOOy@THe>$`Y- z9&vdTTUQKT`TBad7HSl&3dQ5a<4Hbb)+!GF0JRmp82m(PpiUtM&$xNy2^)hii$C8` zYPmf76MvsgE?qpH>xlhW9?#q|H~wB)y!gA5Be#8*nCivfsmD7jGe_>l-|-LkGyd+> zG1NWj8HvLqe{~bJ28AhSy8h}i{M8`-Y90RSGW=EHUe8|*;;%NU)&~A6x-h>@K3q4a z-TJJVG5Be24Bo5J^f2~SV`Lh8_mU4q)_T{VT<3*wcwUExBCbXcHBk9`swY(0q593c z`y;8Vx!%-C)L|!2JblTuw*Q2)^Ap9DmmJr&V=>>IGQBG~#dN*NbNeqo|0i;-zpHys zJh_lsBGhb8aj&YOUd#V?(~q}Oo4prKPtWJ!0+5eSHYi7q)(y{hJvsOUHQ0OM^(3ju zoFuQlm}|R593FD&ue9+8~3K1`qP~2MfeUCJX7geG*G`~ zp=xdNdgV9Gt_4#}Sw$uH-+qxxLm|D>M9r2gx5 zZ+t(g=fHRLEPlrP&Kq+LIb*AxXK%J<`W1i9@A^JDXgk!;Z`hnnW^SO?iNo3XkbFtG z0s4)xnS?8my_RoZ?9^$o&zmuH{R^t;S{na6Z7DzC*&MZ%q^*Ibp`4+kmAbObzOVmY1C5J^$X0EIUG?mcl!I_46TMI z5*jsxGnCC2+LD(qR9+BYXbg7r{*uxb@h-ZV%WB*5g=qhN@r8WXAnYS-o6Q&6l9w-3 zJ|bTzF=3EHQ8V4f4sG8@k8}A#vlL6uw1o}(z!wrXDA%}UOr-fjmSgAo8J96$0@V1v z4Xl{JW+i{&@`ZRFvR9{N^My9Je4&rHe4*RL7pidhLIc>R#qfo`244vKWBEcn=iS_w z*>a1^7pjCWG_Ip(3Vb2j==mCaA#BuZ8VA^|AalBhIlUuPwnkjOUgoL#!}5j#4sS?v zw3oTQdEMxa9=M9v|6_1@z3iCgFyD_%zxNZ@@Y&{dAL|HCZ~uh&L*gwi4o)H0FtKG_ z5MB|wJU!QN?WN9~x4a_kT2eeCY~L-6)gJ7f#)0hSc53eQV1MO9^k93Ex452yP zZ&CO~6Xp)fFETIL{6we5o@WmeF2_jrFwHSy3_j*^jI7PV$C2%Lm>gAtwT$tX%`@^8W=iL<4>r5d!VKYPIo zw^%*>`K+l?)>F^Vspp!l&8uGeSJ&3dwHzcrr_~$xd+X=ydUIgP!;JkM z9#@6&5D-Rk`!3?&JeSMZmtq4x|xYf8UH)oz)6Q1!Oli{6@#s{hm&w$@@O6Stx@^XHgqBaq*uc!rt!?im9b zkG!96CUqGzp`xc^QrG%1#EV#Wf@bE7S)7M;;F7xX@Qj-Fnbp_wz82k?IumFC8!wc9 z@cOJs%D?VzIqBR*JPW(#t_%hiUPrCxfQfzAu%_M1JbE1axQzctUS5B;xR`$j#!H}; z^%&1P77Tox_Qo6S=OA~slh4)%=XUgPA6k!$33gQ{YWfZKEJ>a?w*LKH+ro5$P?Ze=Ci)vhu&Kck6OP~e?`Kx zW+aTB^~@PHZhw80^UT!sa?dk)9yNTQF}-lrKJ*(q7}Nb0rQbe8zeVY{f6dcx^Xa!` zTyu^3GiTqOnbCJIlmDsb`P|BHlH8m6>2;okq0d}gIzDXgVTx+qr~8e5w*UP7H~jpI z7BK&!zWEom^Y7dLp!vuDGw0u%gYz$H=U+453+Q`8=ii%y^DoN$`{K~~H;(ytj5q)0 zGynLWoqtj0-+bm@RP*nx{hfaSaCuYb;dJ!k#%Y-jTL*x8@Kp2nyTxDosJ zK)JK_rA{@eKhUP^XJU*wGk_nIJU(&uE^O%Bhv<_EbB5l-uRo=;(}`b%;coV5oGqE! z1`myT*Riv8&LsFjw)_3s7szkE!dy)-5AW6QPKurVI=@-IbZT2WHn->4i7jD%$6hFF zi`&H?GpEOU_?-dU&7f_?Rn946jbwdY9-P|Mdr|^CH>qM5dp>sZd)Xz8gI@0K1inuU z>5?9Nv2*yp&U3b2@!96_Dt}DZbSt=!bG67{TgTcRz!r<&mO9m0M_>K?T=qN8>lkPX z+BI}zjy1FgTdcTjWY_w$lZ=J`xN*;lB(``Bwm6Yv4eiAi*OX=2;RLu!YiMG9rXAMv zyNleLFmFxbz40W^EX?!JGfwRuIAbz-v^hGLncPyt*pPj%USiOQo!oMcInLT;?PnMW z4UPw$Q+qkRwUqepDZ2kpGaiy)_jSR^3oG35AS|S@5b?$W&u;}|A@TRY*V$>)%T|8nbY_9!7-kHBV#4X`kU@^e)CVk$sbq0 zx6_WUZ6oV!#P<6X+t2p9k<;VJDT%WKeD4Q*FNR;P{=c99tDhg&D}P_7SNi%%_r2Qh z;ac@u#>jYGgRVvWFMeM(hT_@TzF(K4@0a=dK1SbvhQ5!{_ws$)bM*bK^!+OCEqfid z`rPz?`}z-Lti#99_ujoOcSrOlC)$FsNNj9BYd;bddxs81W*TI-6(uQN`) z^}f4G+tz=xv~4Bx^)CE^O{)X(Qm(a(c)WhIl9)r^ou+MlvuQhry<<^VSGt~OdppN@ z95J5n$;2xR-xmiqW=v``ueD|rwV=w#pO^QyGf#D1@46NdAKVlQiR*i8kk4;3kvSS0 z&1M4Y(Wo(Ix|y)f<5%=eAh2emJ*L-LqG zu5ti7==FJYVsZKOvJ-y0A)8_M)iyb~oq;-sPjAO0e4IPT>1pmic7wym(eu%>*vJ^W zkU1b-`g1v?OW%7wdu#c-`16&GQ`%mFSFhOEHhA^f8uZf2=_Bt0hxS%>20sN> zzZY!l{YJ1&YxrH{9bcXsjE|>{a%{r;7@yU*nO*DQ%-{W#X}fB0^z-1a`;Imrswu zmlcODi6=wTl%vbAIpW)tE*iqO=@j4Qd_EW7S7XKQb>$qwhIVWv-y7dS?nQIS+M~Hc z`SP>eSafo{Ia4-h$EVN&!C&|$_U8lGrGLkctiT?%U;NqK7Z!mpe?gJ>@;mTPWxof= zAJ;hXXluU{W6hb^#T&N~6O*4A081rVuR0&xyLhzXq51Z^YR)tU%^WZ8toWAJ#-hfZ zgPC#XKKj&)(H@Mrv&OnN&b_s?P=5RsdHC_(SZ6L-e!R3hynD(OcdjOwZrs_f_wVPuZ0l95+aAA0niv`*+5C7rXIQ`A#cvG%^5N@OefwL5uj`pB zJD4Zl{?;S0K9tvYvBS7NR4wDc!`I}kT7<8ew<(WX^WWIg7;DbH)Q5T=oSm)@wYxEQ zeW)_(L-DtrWz>h_l{NX&0ct~OZ|jrTl&~>3H?ya;hMeohebJjQ33Z)V66_klAB*hG zzSfdpOEVlTVelwxt7<}d*QUL#eW8*x2G=@rm5JOee4sit#*~gRXPF7!zE=6C)^=1n zI6MXpmk)g#pZjt6MdX2EE)I|R_OyEAEW18b_L^9`Fn@h0ZyujP8#ys}o;|I@)rb1= z2zy%hwm#GqE(VVsg8ERua&h>f(w$HGbmt%U;qdSwst;wy3po5oukl(=?9;^xS=fmTG?c43uFg8O-s0g7N?)!)`bF(-P~YG z#xyX$ggNS5-<-9}@&@I5H-ya1er?Nj*o|e(Vby3_5SncFvpx;(cIrYMm)_613I3dZ zuewlm;Phv~=?ltAy4J~dxc>W3!RF_4+{PhaJ_v=ec!6Mz`N=Du#3;bi?zodZYDWE>`|xU z0_|x&{BgqPt;7Ucy_kUT`Tx0Y2tK#_S+C^tFVXfo@Wxj9K^R}TP}?r}yn=Rz!{-ey zJ`Web6}htr`23C{;q&+V_#*WVKDW=&Ho((e2j(=y2Nmx$@UmudEMwe{V!YRI&Iqp?OsH!a@vFt7@?&)M=|RHj z>9M}II6Yr|C~wRsTz<%1BjEG_;sWi3;dJx^9Grd;xiuH3+xhn~-plsh!n*F^^sh2c zgwwB=t`>Y7@fYCBDVEo&INMqLMmXL3x375p0oFt1PQ-<82NzCuDpbn@cPTxR^j#C zQ#AIifLvP0*Z$XW+Kj3w4y zIz};3G!`r;CnnBTRVG%yoq6VBbvrlbEB}=nt6Q5M&Z^~_y*_^C+C3}2&pFjMTV4Gb zwc*Ch?OgsCZREo0N9#kqedW)31hRbWoig1e#YFZ=e=z1-K^y! z;dA>N@cCu@#>3`%J&MmAy_s4EpW8k>obkE1lEdNia>hzg@Hu(B`@o+sU9$2XaacV5 z%B$!^N*{73I+0bpf_b0u$J4>*sr|y|KAp(Dil=AkM7sFgUYie}f7fumEZvabMx!uw({U}JI1o{`CR6C9fudc`qEYT@wqpb>uKXCKF`#bS#yZv z=-KsKy>;PG;d47yz~{dsk0#qt4z4O{9DRiPP(FNa=avtjV+TTa41>>8MZ@O@pgxph z=!XEGr|r&2_}sHYm(%Zg@VSFmd@=NF8zg*w;et$jE*m9|%eVPF7e2S`5<~wm?Y;x> zxqXKD;^({WD9Q#_2)r~pQp$9;e^k>L5|Xo&u`|vZ0-ZB<%Qz& z>-kMF@OcG#?guM=KHJrEKalbBRX%*4IE>@x+P^X!J`XcigwL-)&wX#>=Ue=I7xpdW z#OLThO8;>uI*`(U6K7LvnK1fka9+^8iO-y9Iy{VC9hhPB+RgA$Pee)f1mg4|slIXkWW;+7`x3B*HRm8At)&UN{(Z#KLa-hWq`kkX-*Z&5Zi z($T|3US3~ho@z3{0&wV$%m$aVy2zrpj(Z4Iq{v30x z7flw=9~N$JBCdX3Wu)T?Q*y^u%rghOXX?3M!Jd}|Q%r|n5Aw>;`2JcOTi*vg_XD5@ zISsqjVDL?MoA&;Y;PtV=6OJ8IRXpAPZ`042}n#S(ahS(%w_dl;6 zg5A4?-B(-s1Opd3fqxUlp7G zV#mB2U%!jorHkJ!F8C|n%Le}o>w5wCy<7a3CH$rk{J!8FxF6K(!^jc5AM|v_N;~-d zowgrTI9=;S)_%}aa_k3fHPanD?%4)!Kd7CHk^e9L06B_wm)qjs51Jl7S^GiLc)a}# zyB~DrmHGFB+BRbEkAIgo^5OBL`$6A6`$4@vKUDico%LaCd=%rQhB5vju#@n(x5sg# zi^q$wAM{dT?b9>&gPwA9KWM<#50K`TG<_qbV_Ir%x^9=hZc^5M&{n%2)U`1l*3P#d zl>Y5B*c|N#y^GvP9=zSH+QU+spo92;R2)K{xPuuKl34?Zg@J z8)$bpynUqX2W2kX=a_Fl=xHm9hqvXM5-UHV_k%{o4ar(d_jf<2*GI+P4{FDH` z=)+g1akia%5Aj~MwTE@v!`b=vgWB`3e&5S)gtPaRztVCL`Q;BD1b%r9ZFqebIgI1u zwTy$|;^UVyMid|aq_hB#Sog?f8 z-P`)6R}@u`rSSTuZ?}D*UY{R|eW2o(+c8Nz{9TL}*@nx(Qbpyj6uuAC;`6M1pr;(& z2bvq7huylS=m@6H^z>TT2kN&!9)~4+A83WU57e_qKO_f|i^JlLP5(a7Y+EE;-uC(2 zxcqihsWPZ`#=3-+>3D)ip%ry%e}rj0P%11 z(Hva@ydVeVDoNsLMAr^0a{hxovn2~MxGMK8U`27E8?_A)btn&YVo|%CG1_lth$QT#3 zyriX>2|CJ*TDGNQmodBj5=E1XVy=}Sq?o3)qia(ro3^R7Z9BN7yWN&*SIn}@bkV5n z_Dc{@apbm&@F>CmeFnx`Qtht)-|klMIBZ-#*_TWI z&jB4aDxsX;WR%|k@ciumnb~qKY@X18%}??lA2#pG{}Xxdz2ow9|4*>_ z8N5hh^A~RZ7B=^u(Q=IYUD%g1-Y$1ONxokF!8y)P{IU3do}AlVoX?W_*m=a~KOz55 z@cGGcejf38$9O;g{-2!1(1FjZQ%~Y^@7$~4TJqe8TYnHf_saHGb=o7JkK?{|Z*uc>O*b~>Nee*M%xz>d)y@RixrAt3sKKj|aKJVN& zpIg`uIoBC2+63`}{H~Dv=8*jMkhDMbeS1iLzd(NXqlxbq6yPUP%y%#3cZUvtkB!Se zdfxdh|HkvqgYZw6-yM=Zc<1-KLmjp@-}l|2bNGFF`~2VG_i4e-#XDZW>9ZLxl1DrN z?vi%qJ|D$x|DXLnJI-m*d(U6S2h@wzC4M{I?-SqZ;u(VP4lwV=pifW4=UVF2rERwK z`~^LC+! z;{285j_-e7-?hJI$2?7PMv;eCA9W6UmY@AS|8x93eSQA#^7r(P7jXEm*e6Q_2fqfE zk~sYT$lvptpZz`0ZBO2nzo+CcUVqO{9^=E{x&bPm32Ok~L@%>N6`$?RQydTQ7DUURm;r$z4n zUw)<@+~@Df+}bLbkCWJ+DmaHtIpb@Wv|X@#TMxb!{JqtOzsK6T_V@JW!Th~~Ji5Z) zKl^+Bd-UMaZnyk2dvf{RPhX$^yZk-9;|2U(%s!YE8S&Mm^K7s3ulajk^RvI_x$*hA zY{~^T<)`x>pUp~F{+>R5bTNH@E}L@S*gW0e6I^}9wUALBk9*AK?|BrSdS2|we;WRt z*PL!o?&TXvXJT`ozo&P8fz8v|iz@gIY<{-C=h^b`_G`PgC-=?aN#t=-A9W7>_|N{H z|2}+PdKx~L{+>Rb@^7>!M}CAjUuWNI39{nog!7Eg|22QlYreyt{O9j|o@GxC|Jfxv z`Hv5qcjfO1uXtGM3dYgzU10OAQde-6J^91v2_E+82_!a8y5?Kh-0SbTko#TOlRqrL z<`BL`Fox|&d(!0?-=jr-#!KZ<_>(G!QNvh zKKIVOnOsYrTZByi0r(t$&m`x~yyL^>GX5E#cj(Whzo&1${Ws(D@ALNzW~`hSeE!q$ z_k8y(e7;MVyTs?-Ia$CSRu}l(%OB2x&(DyFcVGRT_}n{(XOYLx`24@a-_yra{*Cw? z`4Qs$f_*c|8-l@A|3Utq?|uh9|JmR3T==|`|NKyVemnjCui*3R#-AIXKf?X*!snyO z_ot4}TUVS%eEt*i_w?fP&T;;qj?abMPw&L%OW2!~_}qJswefviOP;$0nf`tF-1`pr z{4U-(ANc&o=0EL`}~3ap6}xC306Pl?+Fj#TLNcd zb)UbdZ+@;}U#knO?j2|1EISu1b6rF5>8~ETvTJ`&-+ay|kDvWLzkM#tf11CikGK50 z{5`$n1)Sc%{+aYS>IJ^~5A^qZ7k|%A%>J{#XII}JI^Ey%bUtK>7pi6H{pPOxJ$-y> zE@QxQj_-V)9HXcCd!Egcq)t6;+?kkN@+^5q%hTLH*LOa>`Jz+5o_v4g_AVWKy$gTO zum{c~UjGUCd!9bV|53dD9sZu)zB=FbF5a=;iPzV#S9v;KcXBOx?+)bp55?=tc;`&K z9<=+LJjM0RSm7MYnGNzx2kBcP{XH+|90z%}!*4jpL7wg4zu5BV_UpzS_w*EYnO_0t z9~|TShlF6^h^!jpitcGqm+`INgpI;kGuGdEFiq#Y5KJW3)j>Tiu1$PLT@zqY4T|DO zBs?ws@Z}j8oP#z-=sjbu+pumxL43<}&O}=*{5;d}%ik+>o`O;=C4PZFkb)|yN=we$SM)L#}P z?-jzciSrzM{Hios44eL>aPD8KaUSX&;7s)5I?+8*mX|bBgu%1XAA0iBlGmF;I4s2S z%!_U0kybFZMMu0?h{oCE^-Z4Db>zk|uBO|@(zfF`C*w+2yEe#Ks-60&tdp!q7WGOU zBb-&*0PlCVmRVZlx8f?gKyrew=UMKFW1HvCoxu1C0;j*7kqigYsI{kE$#MZf8mth zbZ^%5@SIp}Q452_?&EHY`XaV1s@|#|?5m$Mez9r&eA>=TyzfSfdcdG}zrppb?0X-( zVZ*vsLr-@yzXQWAi9=Tw+ODAu{DfWh2l@VWnTU1F*Yt2q6(;Xl#><4B>zGaS%B6(KS<-=pz-$G7wz%D&NmYsWY+$IXkU;8B_ z#FYwR?^T)*cU?lixRqLAPmJjommrjQnSP>(bnN9Ei9gcU0nlEk5%wxafa|~{pme(1aSO9GhfALcLY&2`CNLOb~j=QMixODOz>-+^J- zaiU6GdKQ2A=DhKjVEBvipWrV>_)G9<{Dm@po4-)@Q~c$dehI0a{3R9slHSQ*(%~=G zbMTkcGx$r`QXhXw{RjEWH26zuCx4j^e@TVEz)AX8Pw|(le;9w6%O1_Y!e7#?Kb*hV z$?G)sBHDL+o4-h`BJs=6PW+M!evz1_FZiVb{Nhd!_M@HnMSjOc;+Lqe#<*fm!!P@_ zdSt)fN&FI+a0Pk~=#`sruk zmn)H7rQiuEFVjxLF9D=0@yiC}y~HmC@blEp^Pk({FVo1w$6pFeV%Yr>zl_y7J(cIe zFM%iV%UJT0_$8J2AHpv?I`E6EldMO7>Q&&yFG2qxehKvAmjmFJ$G|TV&z9@-jxo8j zS_(Sx%ZAhNOTgF;edF+;Li(_SqB zV)tvrQ^1)=vMlOx`k`DO>=K9b%l1tXYWZzKP2hL=RDMrEURH^@@nE=tV7-dlzz*P} z@@FjS<66a0F_re$7L3Z{_hMxK68fZEKf#r{NUWc~NLjx;Lb&HgM7a5W*u05B;DI!) zcRb1dkUByirtxB6FwF)q&GHC%PK3t2O~iOM1_=8`FwI!4a7?Fftb~HIFin9FYJV_I zUN?>7t1!uP0=<|1NZnr4 zL^;|u-93YJ!md#FjT)ZE7mR9MtSKyy=XYEC<^NkCe;16Z=v_MMYwpuN^Cm)H!E>2L zIrn8cugP~vyP|LDs2YAt`U5hC&R3QSo-35$50aL0$-F1Dat<=MtDN`tF6nshbDnPm zZ*+y5-~nDNEz2nL`7QKH&y6;BXDriN-I>Qu((&d$;Y?lYc+<+BaZ^0`h|a&R?3;dy=kNIV z*^l}BosW@y8(EK?uk&|wqTq&j)-s2glYN9pmi#46bbLRid@k!#WJLwMs&bXz`qv-u zTMuuYx2n0_UiFmX05wSHEWenadw zMR?D_l;3;x&Q*KIT=nT{eCO{7Y5!X~+Bu0msEzP-xxU%xUEc(pF;6XLoxjtmgWQ#7 zQM~J$u_;}jze64kOikw%D1M{gl=mx*xBstjX(#C7USA{`fs?hf=i z^PXj1?q}}msfQ0!fTg^8lD$$-qSLxB)M?!9%wgt%y&O!jl({SQByuj7o`oq&nZt5U zZ_=@jLFXptu+)?IlfJKP(tm#+dJ;K@uN0-Z9rHKDu}x^RDw)6125~}ryIs!T1UY|h zK2z5E{P&q#Tjl(n%HB~y=lm@=bN=o+m;ZjJeqyU{{@yscYyW*Ok6`{@MIJxTU(xYB zZ2Cu@zrpj6-;%$n-#vd9vnTYg$lv+q@1MVSlE=R?e}9bj zfj;^BW3~_M2{x1R_n)y3JTR0s-J-blOTdQjqr>|haozPB)*T+}=k$2>X?kZ9`m!NB zJBq$7j99KU+ZE9$@w3Fi7v`V(7DW3$gP+5|&td-rehvdadv$Uh_*vp}U)g;4S$?;| z!gnC#p6(dVq8GyNT_OC7rOvV1JCC)VU~u#YJ{&#c%z1p8eV`XduVU}zyXNsZ>;vUK z-$@+3WaM|+2kv72I`1YAIgfpR?b>km_q48Wb_nN@8+EQD=+mWcz3DbT*D>iZ+cx*ECnAv6t2h&x^r?kQkTSL#X2(y^TDC zL)iCZj&n8773v$Le?MirQpCEehb!u*lyx!t3-!?(V&jRAJtfozeoH-{+#9dzVOhA2 zxO6!C3}dY-dCXfDWIeu#{0^dnN#Pw`59@I;RM_|G#Jr7n2>X8MnubHm-fY$cVc$l% zLWZC>=KjSki_2!Q=g|M?qOue716*d-a1H!+qN`rXvj-KOBSxdRBQ=Vn;Vq#K*67^# zL-Q+W8WvDC>TSedmd$R*YOUhF1^S6X=N?I#pYnS-I_%-3b#${(-&Az&qfc4XLzf24 ziBlJs9Ug9&QQE!_97%YUS4sZb;R#qNC$p+3pIqmK%;af499 z8Q1%1ljdg#l&g7#P^-6a-YEC$UKVN{W#0FyP~{r3b(>Jlq}%$uP-kecC)pv?o^9jG zTHHcy`cSAurHuqmE(K5(~a1dMt}`mE<13Mc7+DWE_nYjqmWg>C5X~4If_Miss#hNmE=$ z*T=b9JU6?l+l6{`2YPg!m{e`L$yH7pR+C0?=5({PlXyX68TARlojJ{H_tR$oF>|*0 z{md-$J^X$yGu`}Q<}7oepI`Q$Gc(PfW@eaQ9b2R7n#4v%r-ai z`_;@j=4yvUwfhHVf0a4c{0Z^y%=zXgGiREs^DOF7e?#_PGiR71^@i-PGw(L16g#Q5gZl~yL zLw^8$)G7Ko#vec*dy2j`_y^F(ouZ#(_yP32PSMu}{s8)mPSMX{Z|Zy7f50jFTK3Pr zm;RDd^mCBi-%EelDf(LMalV)Sic|D+uzm5-r!sba!`NvafgM`DIKd4sE2dz(Hgw%sD*#T6pq=Ip5@s#jb1|c4b#$S9TS4W#h3S`UN&b z;okkQT6l!^^Tazmxsjpc_;CVfZlNh#P0<3jeCIKgRwIT>vdt=FK8Dj&jz4_ ztfCx&VTG|$RvlU&mOLcv!PxcPo7H>q=@y>Pi|@ zbR~b>pxD7*^K`M6g-fo7*CM~;LV%eynJeEd`rJnNA${FNS6>&w3XKXLy#!Ji1gk z-%j_K6NTdZyeP%ymsM&0IK9%GytbcBlhtP4p5A6IDZJcvOJSlnoeFfUgY_|YB)mGr zcG-%-#<+osaVp<~EoE-jvA^gcOcqA>Uy!%)Ceip1N{l%q0p zM(aBnGh2UI*xxw*l_AD9(%2Sc`TIuf>ZWN7j;9oZ<8JWJu=E*8Iq5T#{J8U>;Q7U^Gqh(nNaqj_=k)I$lyD9Xa8``2^{Fk~uSJyFRirofyyi9+_7RdBr%= zB89W1Gq3J9Mwynt<7Hm8)Nc`ay+~eJJ%n=)c|Auu75>J~bPS$VHL^S%^@`&8BI>mL zhB(vco+6?xQY6W_)2;-2to7~odd6G;b?>i)IaU&OqEmmG(5?h{Hr2M<2Z6C88TTQb zvBU$n@m(W{!``oIx7RS<4BV4Ap7G!KEckVR^iJAFkaTkIu8lb% z*EV(3DNBiS>{axh?dar>GT&>Ehqaw$4Ib@A&_3o>4A)_VDZ=2GLEG=s1{O=q zw2k__&lvd=^(cpzrpS3gT~e4AvM%d{CAk-6s@7PNWt$FDEXfz~-c3{dOjjr(c?HjR zQl1IK@_9XU2M33mu4eynjLwIqOeBQ?MAtU2V*LObOruchG$yercHT08-A0VA$ z(3KGD$$Mv*;GEwm(SVR^?vss2--n(8@fQU~3?8cNjbYoa%^y?{3h9c@o(VZGb1^FX3#Vt+N;} z&9rrY#%#ML$TN+7l<(ucRnTvMAJu-;ZVw0V4kq*kPx+HiB-ng3p@n;)V04v!YXK|& zh37R~zfHJ=z#MS=Q6t>%g4r?jRV9Avr3iNvxcv>@brT<@zehkP*99gpyqVyqDDD$S zjIKPbaAT|D-Um*X`209Hd>!$zBB9Hz&s)^)@M=eTs`*p;WR0+lx`{9$eVP|n zuS%b1wx!=?UJJ%voj%Ll3f7gl`iAs7&7Tq%rO!3rpPp}SMgB@$eRF!Y`6hpT_G9UD z%wy&K&GMUsf!VjFXPKk)f!XWRbIi-+{g=VVVC&n{^UOC9|2BQL`AT_zsYSg9Y<)+1 zrg<&#hV*;P%jJD!^^0KZ>(dvRKP7%JJ;Q8GPdAU3&%nz~`ta;4z}pYW>mrLf8H{}| z!6L7>SkyJ(>z4=*%j;Lc++gctglpur1MC34z5;yxsJvcJ+5Ak|QQ+)rR(*J_& zo50-5L{CR6>w|m1-S>jK$H;5O(hF;^0Dnh$wlhu(CGKYZb3d3nMH}NO01NlYk69l@ zxJ|2bKqlCJkDI*EO|r0aW%ZV>#mOS+3s z(ba-=yQI7H6rGeeUD6FbMK=dN+9loaQ*?vi@2Atnj1D~3Eye9p4mJ)f zZ?|I$d&3cA>}ck@KB3^F19OGihE8Sk3GAf6noAb6+r9di<-#tpa5!>hj5geTob*-n zNmEH@4t;txvim5yrkfeF6ZMwAv(IbaCS@=6-zH@*a;pJ3DA)Y&Q-^YF+xKb1iot<5 zyuLtFcn}@c0n)cqjsQjPI0WrfDF;u|26<^;?xbykR@%1(fNLZ+U3>JoyDm*c-BTp%6!xlwtb1SOnL+x%VOwIA3(;;_y5vg|il)diI22z8x(U(9+r zmZseoxBkS3R`r;6Xt8ITqPj(}V?TJS4m-hHdAAT8&^(@UWhtsUS5cE`D;2%gi9AI; z{EVU=eOXZ}UZuQWQa0qXPc{c86ns>_$D&G%CzMb(2I7t3yB=ECi0m~VU;Iex21S*4 zZsK&tKXfg#$U__IY(@uL{jx<3V*TK65Uye5cdO#(*hiWF$olHY2CMovY)mS$z_j=Z z)jY#|d!Dl8S=CfcnB%}6t2&qQ`7Uz(@RxkoRj)`{y}Rm&qAmj;9cF$F0e8H{JwvGC zGJwll%aH$jr5=s`c#C!}Q`Fio!Qs?FmhEXJ(tTtHV~aYxuzo;-blM&M_IITolUteWDI#&FXAw?`*EHfVaz4z zbh}3g-R^!`bjorJf`jU`!NvWl`!;aiN@UIKK(V_*1UhoTnB~}|J@JjT>~+$bpks{) zPmy>&6`msT{6=`nbYx|-9+?g?=71t$*{Va{PfmmIluYSKUvmOPO6Wc`MT z;N)SXALlP?CGZPW?9ywcsn28sPK)=8=9^O8hX|D;6Zk(IbVYt{h8#F-=Ty!T9`L|;9KfqtLmA=SZ91U zKB}numck404?D5~ea^LtTH~;)bV8UYro1VGi&`EsTx1W z9U{ivM;qW6p^b50q{O(>H35z)WTlj=2Ny6O^1$D$8QQ?(`=3Xz6ahcGQBm*G#JC&4 z<;NB%>J=J;``BvuUlHYiS5;HDO1D*g^)lhwa);tNMB5%sQB-M%dE#2D+B^xlhP-|+ z37Nkmzf)f$cCz@>`Xsu16XZGE(GwZX>@F-Jp~>nB98Tdpr}QG(so63a8c zYc^Zd>x7@93cPz2^UDLaznbxQ{6?#KJ?qaSQ{XR*)7q($zXZFFF*l#${Yus-<@#_} z{iF1IGj%&becRFHYvG3%E2@>XNJS;{c+d5&X8Kg><=)knCcmREO|Pw+t2^Nfv#GI%C( zr52gevYzs7VBcVbXmriSUW&EJvAuV=niz|%vg}~}t*nviXoqK6gSE6HAC*u?tJbP! z!k3E_lpga|@+LL?!I5KV|7^dQ6%OoXt^a`T&nnv8OD6|pv} z+`NLYlj}_cf!@iq_Ci}pVSiif+CH|VHGOOmYkS*B)y3R#(YJ0V*WV_44pLE3P`r^^QzgO>F+ZsdOLQl;eT<^7CbqszL1>$*IpWH50v>0D=4 zj0Z@^#e35UF~E5>j_6YcHc`5H}>;dfF`Cc+F? z@ZR+0y^Iy4YZEbsG-9cbJ+h*&@x|r+jZ;ad+@X~8C*CTe3?D*2Wch{0nOv_GA%@{x zH!r`~xOqh%W4Q=7T)_Rh^-9Sg%2<~|o|Q^TI2gBS$D7F;p=~NtN^(S~p{erCj|*|vAtU$p0Po!@ z?2U~1#TRTgwlIbpn8T6UaEIJC3)O@;o+HF)LmbDLyPqr9wI0uy*V>YCZ>!u->s2(u z*zk-+?N^j!G-Tb|dV9|&jZbFIX{}}62NhjxY+~IyiMT3bPOE>(5MxEgU9Hy;i>!NE zA9;0%@sgrG#(J(t79|?5FC1ZvFG?`pL6}h3$2h34m+=Fh6&7A>%!77#VS@3-!X)GS zwDmCVt>wCb`#T7e85^I`U-!}<*AdqfSo0d23HK3(5LS}bvxFD<{VBo7JIm<*Ii%<0 zH<;dVjS^#6sYDr?xL&IXGyH}0?$U%BF6a7D(%eEgwnI0nsZuxUR;|&HtPL@M!Aj~z zT1yfcI|oE}Lw_a0uv0`D(in?b^b_BhH#|)~jp%3PcQFI#lWPbo2_Mr(5rjnU-%9_& zgd%qGJB@IF&>xyCf?%B^*I7>!+R^2fqNi=mi*U7`xX#tW{E&L8+WGWXW@=I-dagZ$ zd4#outr@9Ff4nTzREv(lGbq|r7Z_!ddGGHLZmM5kQHuwJnQATyGri6|zo>AN%&(4j z6m-7wU1`@V%OuMz%OlJ1hh9dLEc5TWg_)Y*zX#%srq5@Fn`GU}CPtZLJu8i2CRvBr z%y5&eSN9vDO!KL$tedQBA-~b9dIlilTk~$D9_Y4KudA@!RSFBCpPFs z@;hbmNi~nc=Xb;>)vsWGUlZo+hu+ac+}BSe*C}DnWjtF7-7$wn1>YuXLPfGa@4s0Y zpY$$t)s+@?`;qvh#^BZ*HOyXge*U-AEVYA#;DDqW7P8X zV4BU8TWCDj<_No7zrLu^ITtIDo?ktMUlQwsgW%#8Fio{Ky7;O6 zRB%VKHo#etW(B`k)xqF^MsQMrR_AU89}Poia14A@M|{9RTM$cq{bAamA&qt1d_`?q z431gK`fdg5BZmSM>)|PWq+?I4D_v|kJWcL}+2(7yTd_Au!QMpqMBtBNQ6K0hUeI$s z+d=G04(=4gG6sxwUCX*`wi4);b|ccpq%vAjpTH($FFt&!;Lau3mQ-*r5t&{w6g)Ck zQKe0a+{+TRZT35w@gd{+?ZoO>u%(_gL%(+W3a+JXiPUS9qj%YJXq+pIxs`zJjh6Pk zUK{Pchjo)Od&j6Xzt*}o?9{oI;Lk68%3}Iz3T1xpUfx!;Fb*G^MZ$F_X-$u{sN-0t zJiuB=+9iA}bt;?*AlCtUbvgE>i!mpl4Z(UV2a*y6-6yT>^XIeEx+zSI*- zy|0n5SL(cXcI?3g63ex#)bW0;33kXeYz1qq7RH*yQZMNeuDBN)iW$s-Ihm>EY4{BN z65ELd*ipQMj(0Y;6FJyM?b0M?V~x9fcnE3UszNp`X21SSH?!y~A2;8(zYe;ZuE&>|=TpI@$}eqtKg{J^=>O z#(4_R0ZLn!b(G_N^v=VHe@%X^=(Y>dH(!~RYJT0Tdrmix%St!DpwnhAr5x8}WtsP( ze}0(q{30vUT&VNQc2K7AS#!+Ob^7d!DE~3+H0(HOf&9{1B8M+vX% zvEx9u+zXxZ$Fz+Fe5lcvj`|Jf!xXIT%vrDm;DWI*& zX^Y6h!Ctw)iSor8*bChv>@oQ|^4B?vn1f%S`_9kINwVsDO1ai;FKdB`$k-TkO{0}) z$2!*0dpc<<{CzaBovXA%38r)WZ`t9%|O;vDOc6X&9-g&)Z0 zBcS_Dcj1(F1!tziekpo~4w@LpGS=WXL$iRqdg`N1OW41~Z_e{9{AmPrT9maYX&leD z>vdi^ZS@n~t*j4r75`I{J-plR_H`SgBBU2$4^@LIpy3xT{ksnL|f6@@TPb_F|S~qus;M(HPXhD z;H$e7jk_G)+5oRYC+k`A8)09%bDZmojv_Ym^O&Igiwe5;Ae?#tHIS0-|ho9SAe@GT~KVS0xMrjTnS#jgIEQh z-$;DCOewjZ_{f*6Rfvy*_s0;+HKm<+GT|n|6v70;M1qZQ8=+CFAJCu;7*MYb9MGaQ z3^=9@8qlN-9w2db`qS{%`PPyH+VBC@dDfDH+OPrW0ZMALp#v%$))J34Wwd8o2wdBZ`){>)@){+(9v`2qe8uDG4hD@ugG&YmQiRUM~D)rH> z=aJR*V1v6PZ{K5g)u6MeE3&GwT$iu5f=2=!m5<6j?qElY0o#iYLtS@bliG&N_JE5H z!2{(Qzj}CtONU;iHUjK|UZajRZnWs`IJN-2A>;3A#o#!w1G`J)P_#B0y&?O~tSN4U z_Il_hlfIqz8$4mIt?+<1@c(RGO1j{>_uvB+4hyp6+oGb&Y)6O-)?Q}Yt0jK#C8c`Tvm-aC9?**!g_N$@d|>4`%kU8(6*ZJ z>!J&7o8`U23vHD}eQcrBt&V!75-;xgl&$T8Ew-`?p7WMJ5$rDY1%GGFd?)Mf?Zj8I zMzXSA`jU0k2*L^0u^$t93O~a=tld`&#Sn(fxI>r>70lfX5o@SsO?!OBNaN=#E;r^Z zA7%_(HrV($YlJTeBUn?&b!-ZloHH%#716i3+Ia6B_O!t|yZ0hbYGQ=_M3F@u-FV+4 z#~Gjb=!aI*=TE~^rLO3NXR@n}wQD_CT<-r4O<&lWke=6iMS6bg@btN@8EZ!yV+uzb zKPNs;7{xjyHeuEUp6C%Hb8Ya6-tNeQ(4 zJt2%e5X!Vk=-sX8Ik&(A`?GIjf-lQ+&qff(p;Jv1*Cq7{>uKs8Y%=u^fs5|M?nT(Q3LAQxpi zy#ll**p?Edwrb=3k+hXeBa!elax;{=<RSN-X;Uc|C5{phy@g|^A*X^NYzHJQ>i zrdYF|cip>#F*}{WxtS4fbQejVvR+!6F*xZk>-6cw8wq!eEV3>Ru8`s-aN&1 zV24GW)7l;_$B!IG)Gb2lcPq<*^_;Wh+y_t7;pKCE;%X6l9YpfV7?NZJi_0Syif^9#)ii!-&unFl;838 zXsHXY-E37Gw^`Zd^5YDbaL>dQtovExFQrc9FUxgmB=$Oy?$_Dh`iePx>_8-TE|J9# zuzy*NUicV&Q_UJp>iQ3-#JK#y9dchY4lJX>*H|+;jv^nM*tc$Z%c`D;k9M74eSNTu zd|B7aJ+>pSc6ZgjYE^578(mYi{+`)GScA+*?^hr0+5_HgVxH@TpIiDNL=+T!^fESo zVW|ZlghUX)rxm)=qVN$&2A1@1fgnqfPfB zH*3&afd3{vKBhx%uECb)Z{V-x=&h&5{n_>ab-kVV9ro166CY_HprCfdOw-?&B{dS=38D!|6e|w3|kWpp6I;+b3MtYU` zSH!<3UWV+OwWgmfU&@y?18l!sbCFGf?lzu(%=0zGTRZ9c7LK%;v6oCK9A>+$aIkHI zM(35GanP?Jog1P3H8Qjm`c=r#wOk+X`HXF8{A;#)Wa$MNP3D(nTFB7rvzpAGr8k)$ zBmSajsBK44g6+eiD{RJ$I`iXMb!HdOt|Wdxz0Ukm(Qw-Tml38M66v}?t6tp!;* ztvw2d8eA!PpOWWD zeWK)h(pEK?vcMX`{xw2I}@?kjX1kb?|m6H>PTXJSzu-zqAJTX!Z?XWBeB&D&Oa)3Qh4=t&u3DT6Ps z$0^6H1Ye%FQWlw~ER)P%%ECTj0u0g9v=BMiDw(F+O_LTX^_DrN+zBcu>XCnDMW$fu~&sQ=#+VaUD#U$I4@z&9%?3cR1fv-}p7l})Wefjw6BkLjSBl}I(N7h5uXGD5OKEC=`PtOM#{}uIVqdsdGBmJ=t z{sLb8D*LIoQ-?Q*U*THTXEMQ8ADPE17&;Tv8H_LQL2rUcEjCnBg7I? zB;V{M?86v~Rg6WPi<~<}pyOF35NsLf_<(o^ZMD(QvU@w>MSf2wU4KG+SV`@)k@}j z6>B6ZA1c@jl`7js7$d{TWN(b!i+7>>}s2jFvFo=3gj*XZ4U@)NsLV)6CD*5e^;o?x(=Fc^vML|>+_v7-gFonzI6IBCM#r_pc{xiijOW)<_A6=N`ABA!cEYCUqd_| z`s+`m72?g0JtXjNM}8q4Wnw-39+->0cxa}Bal;)$!L`%;z0V^mu%G_`+!c>bZ(1Gp z^E+&=N_bprH27{7YkuZKBWtQE_~tC^1P-ugQMK3T60G3^&^5$>{Zcg1o@piM3U8Z? ze~hVEeI8tcUin1)6xZRDn_XY4?a|Vn-q%m~X@u_68jA5D9U%Mc!N1G%cq0bLe%k=n zl+UHVhm>PA&`DW#0xWzS99+jcBjQVpy$P2RYQe%1(?~ocW!>BOom6OzVBwGfIO*JCuQCYbX%v-P55U-5Au`urb)a@Ebn#iyaz8jiIqdox+m#^xaZ5mm!}{1WL=7h z`5tw|c7kuM&_@2{V>@(Rtva!*K+9Qyr`dcJ4QnfMTsGEKk4|M3^YS_RbuVk3)V^)@ z{m|E?2v@1leoG(s5bp){8|#|reJ9YX+q1IHx^me#S6{|)J-D)6F?#1z<|SiY{b}0( z_DO2k+W>2;Qja&AcULJv?ppMAa<8MDb{H&yzsey!0>qZ$3e{pCTD==tw_e?RA43cQ)1y^7mn$?SsyesyIL7_jJuz*Gk5+-20JjQr*yTZ#(oY z&>bg4a1N4oslU^@L*q=n{|e{6@k?$vTYs97g)JqtkksC`w|uYB!7A*9y-xY1@Ach6<6g2}yinyY zc0Itp*qbkkVRO*uX4CK6=+{!>(#M2-F8yEMTT$P>Xq@Y)Nm1X$Kl)(2qFzfo-!1pg zt{96in%=9s-NL;FB^ZC}?w$hpjkL!tz~4If&%&;f_CC^NpKM;i6xQwdTetRQkB{`` zA=gK1V%=@n{Oo1TQPt^dT`gn$tn=`%UKV3n_yut*!4k)QY`nB3WGr_2S|96+O)BH- zjacjPjhyc;b=&0@)|t?$7i04(glFKTtYxr$T?Ou}=Kfgr!;bJCe&~*7&dK~usK&9J zd%q(*B>Ws}+s2k%%^uhjVC4Cn=@rcVXW2LVfIYIlgw^mQ7tf_$UHaeDvTi#JZ*0&@^g#}(m+wJnWWH_S*{c57n=YXJ;fERgt^hOC zqKh>ks}Hl591LD;Vvj}oT>nX9xlj5N7H7`Le?+m|_h?31{`G$T*)J@dk-sK4E&rGP z{@E{O&dM*!xHJDnO`yZ+ACw&fZ3%QMl>o<|xsHYQRcLqn8?$>t`&VfH&hPLsmWg4! z^JlAN;!Y$SBV4bK&Hg39sgKM4GvVU3zqCnz#ZA~2FDjX5+kUCWxIxpgx7MJI zD-OvITR(yDCwTfAbj8=_&C6MncTY~Df21j#vL~8Fw+*~si`)%C|2zSG@i}Sn@Ac8H zLjHayEjG_STDfoYowWG%`e@%r&VDB?KD|C#=~L>XeUZ93saqI6ypyS$lR9paSioDi zFt1Okk9ONw7H4%;m#(0c2!f}n#!;z%~JK{BbQ{iyO9{suY%%y*+*MD?J zH?RL_Gx{6Eej(NR_ zwP_RcQC?f{9o@*BR`#Jclk=8&oVu{X-&6XIR#VQ$n3pZ&JGwr|2L`5#^j4nIdV!Uvp=&r+G}Crd;WTeL5+CnCKT!J0JX_iFG-LQcIsbH?jg0$d&RdXYF09r> zyBom8@%Z<>L%D0PH*MDVI~to7J<^Z%X@akfVGVj+R(k7$*lR_sO*dw|GS4_pKoOM6w2|331!k;ZTGrsuprGsg6%Q6ggZMIz^E){T2LCXe+< zyIu100}{6|2J*DQ*!M&uzNir%xSvE6n3cY6jnoO-F)(wK zmv`24e+*@+rEG#2W7(53MUGMFuk?W4;E;ZM(yn8hwD|{{9j5Op!P?_!pRM$j9P8!u zSA3^Wr|ffCr<<~7=SW}8zX?lj8v7*aT1)Ol(3bUzGtJkETzooncT&e}VoB#BZb}L? z-9w$ysGGC%`RvZ;TZji^Yj#)XbDvM=TJEPqKcD!C&U8LNuc@b9q)+E3wvIHN>8Uwy z_zB1C^l3RK)1uufb(~R>m6{WnF)c@ywd|reQv~hzc1CJW6SU89FMP7m)ClcKpHBFO zJO}(~DZ?38($_`WEL1aoq)%tFcl=16=;!t4T!Nq6YmAc??t?8F2kpVG3R-C!GoH`` zy+ti)NtrFj&&BYOV&WS3ixa*RioCZGt_u~8TfhRV;5kpga~|bg+5W)sAGvPC_5}GJ%2^|to&wl!_&F;&r8pDY-Oxv zVhlX&ev4(|GS>Up`DO3ZMP+Z-#b!_Ui_K2-kINq87nhwv1pj z*fCo2C_k^f_R&6lvc5BODsT1HH$w8Im5i$<#!nh^2Rl%QJQrT_rfr&FPr)1QUb!+5 zKLpNvy5T7NDUrU`CUA}ccvsrBk7uu5%F7DrD+eAf9gWW#JfMm+<7n$o7;_uoSA*DZ zI|NTS0;Ye7@AFhM*MGxv2m9Sq7?TCG(_-XNspMbCBMWo=F!USX57I_tADEG~sn_pJ zWNHd$vR8T|W$|Ir4!<)$>Qh5nDbf#$v?P}G(bb+sx0pFs zN?BA%&zw4&zT!0cKalPz8tJFkk@siN93*e)6L;#l^oi?uzKiG5C+__-o`;-#{x;7! z8>DzX`0r_GWSJx`ZItZ+50y5ZePZ!C>$QIFWAK~7 z*xWsiP4;N6_j88J7Tyj2-yD49{E81<9OBDShD%V8b_%adY@K*1K;>A2W?waT{aRQt;7T_<_Z10vdaM^u)T>XB4%l zdC?=-t9#pg;>C)+0lLGK=@9lN5$KAVW?9ucz_N#Elc$;Eo8bp9WBWM`TiA+8_|nj( zZ!Qj6e<%xFdoA;2zE$0`*s#9sWkpS*48x#r+NP+V%ne$cPpHEQKER<%4r zqoTvx-G;rQv{9*kp7PRHccIHSqyLsXUHarn+eGPqDeeEt(SK~@*}f^z&c&|=`_|?S z_^xdxFn3PCx23=PXXsm7o}#@B8ue(jMs1EBqZ$$lJ~~RDSFmB_XDfs75kyX8!vDH0avwmyq*s`(gfp2cO?~&>B_r6!@ zi_PeGO&YH)@2|Bn4h=p~QJig;`O|(?wBNz0a&OeCuEuw* zUBSnZv3Yd&@^!265p2gtFnAdLjYrs<(S$oH)?3v`z9Dvkwye~5cORsz<>>XQ-@^Zr zxpBnJUN7U#qaB0)Utn=fv-H)9bo}P)oU(h6OW5M>mS>B7stMTb&wi)sF|yAaTrAfJ z&Df?({Yi@S7sTiMbM(iX=y&NW_&R0F^y}f6plIEXD`C5zXuqcHOK?YSr#~`$-0P2= zg1u3J!W`9hbwK6iRfo=%smLObv@+F)qS zms)a=1-U(;8wpM}K___sAEAv$?`uKkwxr-Ih+d*W=7a3Jl=38zUvdI^7s_Y4#FFzk zWf>S?$&DfJq<*y1!s$6-9T;ZGHFcINR2PtYX_9cpiQwFx zy5L;%VC?xtSgy30O5Qj%Cq|atUqC1ktV)^n0cV*qz#<-;LdF?#i5LIb(8@a)NV` zz_@;nH0l-rU){-ip_Tsjo`v2EJP$wp6|#6NI?u(}hHp<)>^k&e>}$PH?Ju^cMFYs@#{L*Xt1Pm z_l{AEc4}SIymsLc#gCXYg;w$#*T1c3J9goGcgD4owBEojyf=Q}WNNR;ATJ7+{Hb`xu6iE)pK07o72X+J)~&u9{^xKV4lZ!&wy z_N?6e=h%OooR^z_Q`VyVN6}BcoSvKimz>=ElJrISU+A`*pGPNEhW_b!FzXTUY6QBb zq3D_No>*wxE26yL{*yfXN#^8h;N@NL?Z0N`<{!ZaVH3KosigOs-aq?q3v=_YB;5*r zKbg5Ge=O-wChxEGLD{7lx%s;?X5?=IJHMTqn;*#gw}YK`W-Q9r^WHRm8+d1i|910N z=)a8QrTBNt-mNPgbtm6E>IsHU^oz>w35Ko!OMj({%bo^~USBlBHnMP@x18ABd+nls z37($f#nV9^VNl=+LB*@@6YG~C>((;HoUDPb%A1=r{0@Ip1?%JutOw^XUz=Eie1KoX z{p?pioR^(*Bf6qc{|HlqcfGCgtO`)M5k+h;nXBRYbK3(Il*msgw1?6$Fe_KR4lk@J(sT&h%+V0H@-5hgn zFloFFwqMEK>uT2aHmk!&z+=W<^3XbV2PAvIJ&La0MQ+Ns1q3B@kYC^ce>JcjWrZILaE-v+*Oh;IF1q4hgM zCqCiQwxR>fgLd9cXcG4Hi^jS_m=BlGwl63ecPVoi?B}UCiXQv*ajt#K$GN25akM7X zE$59qe{=)-m{QKFXv6;4du}QDBw&Ahw!VG{c^zhaOFXK2Y%b}4-i*Gk2EUhD^-U)t2#yyB_Yz6a9k8PA(bDqEl7`tC}X@uBa2VQws zDDETR9(gWj#a^qr2&_;)3mo%mxC{HtF7uX@I%WEtD27P=0F+#HO$ZI zJyw+yTh+QRt@!uhOSDui%$?TaOyb*c7m_XfH zmm_gd)Z39nW-&=J6EVcss@MrK#YQ`pfBxe?+fDy-Qy!*_@M_SnnLI17| z^c=n!-P4WOK@E?0)#3-#aw%9s6Xe);E&f34Xhp*i9WnCLdY~rg7j4wE^AwhmbB zXOFcr@%5c$i&;CvcC2s){=aFqw!$=9{MzaG{Z7a4_fGtN@5Jx-PTOsS=g}ED2@44p z!Zszq>j#_xkE*0?uOYq|J>nL^C8IRPYX}B@Dq4T((iaZw;yRePk4`fDnC?8E+wDJve$Es-dHKA zmG>)O&&^EH82eMk=Lofgx5;-1x`BT3IlctrWxRApuC&iOB!Ub(iTU8G;c4PV>bOj% z_m)zUykf2~bVatYZuuPJ`^#q=Z{og|@*ad&L}0(r_!fSAG{+zjVz`F;{Rx${r)-lU z%hQdr>^Cf*Wo%z@7k<@u8vnXtrt$F=GmH~f+-saqc|L&tSwa}UR}$9~;+1fZxDuTK zdDLjbix1XY)UisSdmflK#fUEITI>y1^WA6c?+3FNQUCH3&hB6h`lv;njcjkgX8Ko3 zsJju~_DV)(>mX=0gQaS*bCl2Jvy0h-n9sf8nVGG#;W`VhzLUY6bSp$jUy%Q%?) zLe?xahQg~?Ks!^j_?x&j3yjy4%rhQddzbN|%(<=gg$s?XYiAn&Ubw(`J?|KbW_ri% zv(S_k-evrtFx~k0tMl*)o@e};*v|6{*5nvJDV&L~@H~8y=i#qD&zMDwGS--^4Kk>O zcN&WsC+&qf#`!$^fM-sw-{JWsjLBMl-$;BHaUsutN4hT(R}v=^KGYZt%NQ4*@;no| zm4s#uHYSV_C*#S*bqqem2k7@~uAjyK7~2%XVA4Or_-|q?qkAzNqd(^|PYyA+ga|hL zku)5PmtQD>hMgkJaQD$gkGu$tlTb$(Bx879Ej%xPvmG1Zd&%hCn;9Q5#DY1sjBtP; z_ngKMo+j)h+|9d-(J7VpVV%WZ{1I$DLOFw?l5tW)KN?u`G%{|aeL&{IxjC!ow|e^f z5@^!sHz#r=E5Ne5lK#!mhdTxZT6W*{OGPYW$8V6aHjlJ8uLK*iP}5T4t9pc)hP377Quc0h7S73`%-v=$ zOv|DC-Hy;+)T>)1?ey8SaMLuu5EFIn*8PTHlQBhf%bFPM-HTf@In?wJc;zwtmnENh z>6TDack0uE+|UGtm|p&$a4(*@DJ0ZH+jP5TNVti8i*Aj~$y(ma0uPlDZy_*W!96ja z_bvmg(Jsxj(_b{*9diF}sV3UpNSn)RY_ZX zO1&h_J*|1UlBSt9lJ!5#Jy~bjCJSWS(msc|UVvXsC2e%KjOnM`oZKAww}Ah<`L9f~ zmh6n}VR|br#x!_Lv`Mzr>p{^b*>(?I6lMBdw-|5z){YK0g@P{~mqeQ2J>7;*3OC7i zOwh!bWL=%fF{a-bgS~Bels39BI?}YsKiagWe-D#v_s51so1$r-X4)r&Hj(?CgNZW< z&k^1u$oBhxl)VdFRaN@`zxO%Z?+D7}5Eo5N%e2XBQ5@8?%uYATQoo7f1;p#9c@P9| zWp#4wj3^De5zWfBkH*Q|It?q?F&79_0qr?i(-znY#1J8k#RlWx0k2Qu0cI5FJt47zFBZrjHgc`$3$5E z%HNB}MOl`FgjyQumwuLB7T<(kmNNmpJbnFUuNY5Xdqwp0^m8?~Zs{-S+s!;H{q!UG ztMtzZQVuBre2@4$$sB8-p9V3nn(3Dil^7}X(>Ktm#A#oGeY_8JA+)oAK3d|7Y){!^ z%(n&fhuk~Ld|JTu8v3S^>xHy`9?uCaEr6B|aIJwpT0p-^9|;y9lRcaCkMzlW{?Fro zCjXz04YjO~54Nlc2(p~(86oY9nAsy(+7~h1CrH{Cu}uw^_C>ss>Y<;Ao=Cdk^ECt*4A7dwTydL>lV$ArkU#(`$=a8Ra zT_!+-!#HX+jnNs?14Z>{hg`CVe+Q&sm3JA*KS0v0}D{M zNkPXKgX8!ous>)#tq=>z&$VGY{+Q6A=mXC{7sdf_ zH+*|Da=yeII6r>0(Y6U0emK*i$ftYM86)S@)g1S3?KY z;P{%LgGT5epZ(wk>S%_Ci$AF7B5H+4o7I7jPEQ$tes|!LPbdQ#zzK>RREIou4p~Zk zIz<+0!4FjAAp;q*o;HfF=v2~wk;Gqg8tE~T_=<|(=yl)F>;9nPWBN3yiS?$DwdXRJ zM3>oXpvYIZd`Q3D&_4O@>+P%RZr`>4XrJs;q#v%gFA>|M%!SPE`H;n2c!b{uGOWx$ znQJotWG$EZCu_OPv!5vQG3hF)i8>k2@_PE~cl6iw`X1okQPTgbu4vx*5#``d5%8>t z%Qw^)PkrBV{l+@~Z{H>JUFN#Xf0^qt$1mPcpWr92Fb|HK0xBBUrxIf+prXb>>~eUw z;d)fx$lq5-gz7WUL0tS|vMzf;*Bus}~lloC4v zT*x-C(<8u9XOevJmx+CCy3M_Hx-IO_(`~iLW>xY)Vm{3x{exIfo#YBS)TcJj zv;DPj9)2nFY_+6olx?Q$Q0~tsy-V4X-DO|hve@?5&5LcVr00tk6YDA7g9T~e8I`&^ zc&_Iv#ra2`iQET{nCG%dA4+T~Fxi*DqWlS5%H8;%&Lr*N_qZ+jHalt2=6u`V$*OLhB`R<#fB&E;fv*bPG9RknB{nW{bi)@`V;eMA94!m(sVu%EpV;sl8u0@$+9Xohm5_5xxJQm zu0~+L?9Ah79A5D7%FF#G;+>`$X*E8Z_ z_>%A+_D;wV4G~Z3u05>j@bX%hwYJdTd=LwG!nni++H?eWq8iqw!s2orU4L8(!+#8KFOj z&as`jbzW?}U$VSLfB9n^0>^tz>D*94vbm!<#3vpSh2IIGhJQKVM zd`Yeeik7XY8_& z;|{ftJvxUNm2W`L(*HXX{$YEbz3e?_`#^6-`mVZOKeG*waI6>w}AY!?AqkS ztlH$8ik8`i7cH`lD9W|26>~qO|v(6>ogMDK-ynO_^#Xg1UwhP?5 zr!dR5Wpk#jX!9J~TH5z#+WA%dN4EO-k8N}CeU$G{f~O~d4X(nM_73v1*p#lo*URA7 zyW#E4*k}gh+w+(8_K-Mo!6~=G--kc9FvRP*ypVh1-wSD5w<6>pV4BL&gO%WWoXCE1 zD|6;6?9$?!`r_?imEg&~#G3R`0vs948v>0Tz0cRzG0xWq{Zg-ror%`U&q?0!1qH0} zz1dlWtS$KG1K81<$oryqvU1MA=inE9ELsxcPuYX$l>WxOb>JUwMF(}D==UK%z>hWf z?{`JnAvxsB`2K_N>0#KGHj5o;-LjC=ViSfJZdIZxzCd0FlYmdN;&`6)fW6NC4DY;; z@A@LpS~*K#kFPf=F_?|iQNoz1?c5c!^C)IE-O z?xucu-$7CdHo`{!evP!3wmN-H3p$~L+9Fy15}gy+n~ASCXM8w!lV(4!v=@LM-G7=G zu(=Px7i{na_%S$uw)OC)jqsuZ_R-k%3e0}43B*MZd2p6#Uyg@w5VzjLH}D?_fM*DQ zz()J<6yl9!FoyHt6Y1D)_jY4|_jY4|M|SgvmGA|hl9jf5nS;LWl4H9y^Fs2>tP9CLTXJo~Ia6kN zR!#Ek%`WR>3g^EbwmR7oq>%%oPRNmw39pv*))f(EsGZ^pFRTNA5=UAl7c6tSi%y ze}z}hWz6@m_O4O`9N)4I&t>eT9);&0LYH7+{hbvR?9qWtLqFX>yB|`cD!!)tDs+Im zc{T$Yox(E{>~rnu;GMdj3Gh6Vf*s~Tc(A=|}5^L;+=c(bR) zvzE^SN1TmK=4ICB0oqYYczIwb;~6LEg|-bSt! zActG2zY@{F8*88Cje)Fn9n0BE!M6ne z`i)c2DFi9NArw6kKiVo25qx7@wk>w&Ux<5U#TW2D z?arI}cNF=7Nx7Bh=Ba)jJNb6jmbAjL`cA&BUWu$oP=j36$iue|0lUI@_=9n|05&BA zzrGZFxx|k!k>4MvffZKp1>)0n0o;oCb-jWg9C-4@XTMHev0+I^+BC|q}Kfc&qbvwAeP}R>p-otrzZ4YNtgksbz>EXO^tM`C289kiaOesI6 zUG9vMb3#WFOYt!ER^g*}4jS6e?7%H`+@{opOR zVCOQFo}8KF?^=DKBkf-51%L3EV6?YVxBUKJ)YZ_ZYYe3CPwX2jxmt$ zInOhPxX=1_1V6^69s5*g(taOjx;nz+zjxga?#5>q{+FrnB;}>vDxO)3U*PluKFFRU zJbr;UmYqpi<%WO%dERQ*IjiF9r~K;c=fNz1w~=pe6TiO)m4DuN=4$mn{@w3y37CiN z`;-q3cKiL!8mbQ44Tj<`V3RoY~ zHS{9>eW$@k$$6>;`1$?aWHsL4Jk_7bTS!(g^a-4&de*<|Jk@LHJyV$H;^U`)&qGdM zybYhePT~=~j8CB8d<8SzMmYoC(sCGmDwx{pGmB$3zFZ!JVzPk!OBY5j7_>6z{!m@Sf&y&s8 z^1k3S`%$j<$~em^#ajM3eYBHY?!U>t*W~AGdAa-gt{bke@ei;Rm^c>;KK2@A-=*v- zbPX?2#>JZVVrFL9`s~ayFE2mhcTg|9W>02D*(}ZrYsktdd)PnDVxmkszi++AVmW^s zy7`+GPo1mbJ>n<$nMo=CkhnZQn($E(pFKBwNpxbn8IMt5L%+j+M{xhC_)f@L@SE$v zgY2(}vAWMZ?Z>P1tvCK+Gw~H$McPT);nz35316|<4OH#!8vBRXYt-{yc^#vVUW5kL;U8vZ%muSEg|W#&2O}}%7PF>& z#B~`{!F^7IJ{oy$f!SOkIJr(R@4?WK(8GM@PYP>F7CEtJD*sB%R6k_GK>BPK`ar>f zJU|~NBM*s8S3*0}p^p;QzirS*e0Ec^AA84X$cXhNpV~%e)+bNz#&FzUxZP&u@0s0y z`xU-nOHlDCqMq?wj|JzE!M)(sigR3MYjWs*_7PdF$$qNptS{VQdjx;5-{TkdW#OB) z$1@v~7iTpl`)&D~Epy9zo_b{6i7bE5Rt+t$GWCluAU=%H*f4OCw;~%1gg(P^mzAAg zkzJ;k{GpcsPyETL_{!W5J&4{WHaD;A;)={Nt2vnas!Oo_gLzi!aIIKa7QplAYJg?m zeM*_cVLPqjzr|Uwm+1dG>KEUw(dZC9XJ1gl{$&4^&a_N;uFR?5qRZ-<&a}zgm%02c z`VJXSnYZHKCD=pg#B&~>*l8@x(}`w_V<)nJ7xxs})+u^e`d{K#tm0c=MyGW%_oijv zUAB(zDN&Vjxpw1k&Lr{palOQKxj$9%tfVsW<9gXw_wKT)m;|Jh4B#D~kDvc&Cz*Z4c+ zOg^DW@!^s=B6J|MC++E(odusUJD!Bk%s`iwLoU4L?b*F7nP#PYqVNU2312SwjoC4k z`yE5#EYB!PIq^1#$JlkBbJjffAK?B+(OnZj97s1ElpgEb}!p9TnU*Y2k^sn&o zi*py2AwN4*zR?R_(U)?`dBGOxw>Rlm;oEXw`u4TO;g+YnuS>K)O)P1p* ztFwYFuRRgysZ;p)Ir>D#IFIqpHHSFrn1dy(8*nTSuX-7tE@h;h*UL#6DJ$iKc3-6b z{1aDwKa}>JXHAahZ=tpAhwxj1m#*5<6+i4CYyF?-KjZOk-*jZotH|5K(F-;+_s-Gp z8tcl-^y?`4-ii&RgtgI1-t0z2HuUcQd!gpSYRue1)orSt==*MXDuPWt$E z{FbB-PbnLV$0o)*N|X@SmLq>C-p4qskoVG0#7be`509xqH`Zse-ppKTc-(3vQ%2VA z2h2egX0CZPuROZb%iB?nP9&Z2t~ucE3=9L0%07*l%oXpkR?2y5Yr>UEiG2`;p5Qio zj*?iL{sb-fkeu|@94qlUcz!Z}KdJave2tx?CwnEMhf?`5cGv8oDtORnVp9+&p7vE^ zZ?1-(T$8|Z@(pKpsD=srxx_&~kMB*x9uoEVS1vnR|HHFKcOF@Gv^A77vf!1uyzfjd z&lQl^58n&@bok=Cga1)epm#;v$Es1kS2f;dZ}uj2p2MzPFbA%V+W#K2jAHIFqqe)>@h-9jbv~;&1?Og8hh3Q#9V5tQMgBZ z(OPf)vN^=rHYwHkjPXnFt>}lax7+YHs%IW9o2=?rX=}fMivBv+wiHBZ&Fs@J(+_1c zEZUh)Fty-w+u4)9R^X@MAL1AVR(>e&8$p{|=70rgQH%xX5UbiazX#mwwK<&4la22U zSY2Zi->2atH3$1mB4=@;+iz!V517=70{oGd9a&X;6U%w1z#D=_@m#r1%0rb0apW_xqc)2-6GoYF4*SxRbvn`%*7Jb z2uM(fcWh!#M(i?na1Qohl`)Oqb)GRj+XfAGf`_J01L@zoKxl)$ds*?Vs7Ak1qIxxN zsdN;drM-vgV>Kc@(47gYED(7sk@+2kEd6MfFNq;Bhcb7>X zstn*Pweov!DAT~b-J})Q%M4Kl45G|k-DTuE&~2nOr3C1qyeEh@9ieP8?-+u-@U4Ae z*&xNQT>F0T)f`#=if^Q+ zuLi+in^Gp|ooX-FK>FyDM5R*2pKBhu(i~d3A$Mt6an91RLG~eKv7~G{Yb;6P!C@yO z$vM&)BsouGH8HE2J>wd^>k_ucE6kA+CYh{hq_=Wj&5q`3lvJENBJLspqkH51v zCDpi}dEN-!#p4Tb243MjY|-Jzm2wWrA;nbDyff8!1YUY(3~>yIK`Q#lOY*LPv5psf z6@53~P+!D3lbho7YP0XIR%~%Km*H~`unv|`&Y|~mTm&m|2HbwlCU{2?SP2ch515G- z^d@ciF9IjaQkM4R=HHxzOr^@R>&T>-bfQe*A(9+va6ranolLhq9m7rs*~C zt&S7mD>8_6(}G@oQZM}www23mV2hcbw_vwB8R+duM8Dd;5&K?5lwLh2T0dW*7$Se1 zpT@T^m)e$FjhM040Y<4(uBz5@u#V-`JCyR4_my)0xYPm9rdbEf3$m66Z?+Eb3bvL< z+N=Wt2c`~~X12H{Vh3_hgO_0^G@(oN0e2N|4syylamYfD;1K zwD2+gC+vW77R_;JQ_gGoOc~;g$A*?bx``A`>O~T}T2E3ODVAg*#gM{CJxCFxaMBmx zCM0gS4=IDRhP01#4h%&q_mZsm`V{W8eb4@G3HhSJw`@<4KS>@%9!*}(y*=#d{!#e4 zZExW=+g<#f%-{F&_bvP_abM)zF4@o3(&zPi$lte??P(@MQ3NUxK!RXIy3DlnSglSsQ&H86=X?TlX@e~S&Cm^6Vun_~l8OWBj`v>JDq zEP+$Zra*&nZe!e+nAAYxa|M2AiVFPE6c>0J9enyt`1-wTHO@3n)LSwh)(ytFbs_Q# zb7KwbKnMJPGI8fdk#b3K?1_&1$2xxY2cO|@aoinYtu%UAD`UxPd2c(eW+OUt%X3I= zORUCIsMq_ym(>1!dcER+I&guvpxT(X6N_%ghLq zYT`Bh%J)+S*E6N0?0&XILMljnclYU@GEB(ne`t zD{Zszt%4o0OPkn>N*inWw&UDyt=~k1c&EexMmN~RDctngPi#9)Hu~<0IyZmO_ z$hiUK_#Kt^rOl@nMp-5Y##p4it+e$n)`b#ttc$=`Y3;N%34QwYu^nxWs~AmRTp!O4 zVz9=6A35GVo-&sIU)nqBH`+V&hV~Bs&GyDRegWTeV|!Z}zyGtnv7Yvh>~8Pa8`^tc zcY8+>V^rEJV_RZYUAM9il{U7+>ttMeD{&PDv@30uGYu`!N;`ckENOqMknCX4XwGqm&BG-bdwo{h9A1HdVl_hP*dXAKvch#aO2 z=%C%XL(~CRGmxVOsskD}66ZWl9dMlZ0`L1+oeKQXJ6s*m!kUo5@5}JFFnlfjQWauS z2D$cs*O}&;^rYSpF^M(gDb^77RCV-cqpV4>uF~G%%dzujgQdM(V8x#1?`Vye3Q3z`z9a7H~9)O{U%dH#n0&T@ZUVL0@?oW_$I3b2kzP4?VBun z)D07{(H=$r%3eZr{0G1ah+gL;zQ{A!16)AvIVgJw^gO>@UU_sLaut5El`-5q%zoCN z_lv$w_RG8Hcby6DPyPzHO|VzHqFDbo6B8^PeeESMM}6U|qB9ixwdf10L~q!Q_34qF z`H30&=w-J4BO-gnCz07RK6%n7Xu%KO`;(vbiCnP7CvL&*jctX9NSD&U4qGt&Gf>pX}%FC%u9ecu{5!b$y2(*oXUlxW1O_hq&I$ zD{?_JWsl;&e1P9!l)L5?x8Qrql~Lvn${geV3H85|&9r40*N4)+pDA~I%lo#w$e*H} z|LwEyo)5@J@_ow-_t;LVr<2q89aZ!XThW$JJm0$=eY?mO)!3qIS?dc-K_1;#-@JHj znBwn1j(66rSXLIEJFBcVIvhP#m`A_-7xctt=42x4!L=0`Woqt%vd?2x?kgVs@*!f8 z_vHC7o|ET&Vtp;M%}RMT^i&IdOu&9EYt#Ub%_7j1iXGr6F*&hCKlTi?EV;;g|y8kfd4;g)9aQ5>WRIr`s>)Up5gw*+yLVE$2eLB2kR|!9?{Rh$6R+l zuAfEzZKB?5@FwwdlCyN1kvnVq;0u6!-TF9k@D#A8(>P;rMS59AZh9Fu>ax+~az=I# zvVq9BVpI64SC>57AAAaWzV9!BXZw_XQISJWfIBjgn@K|31=u2lw#62o&R(Yy8$=wo zi2mSI#*$`}R*`C6d(XCpw4dZhI+odK+!hanl0OG)#PuJ))u{K8%>%{@*~N2RzuT%$k<6`@7YF^GDy|g zX?i?z^xaCo_=iY&q|r)zAQ+iIIa~BiO_?71s8@GY^_d6GPrdcHu0=-|1#z$jk?6O#m8i(I+$pU97%Z=GIt zE;8P7HZ0Ck!+9}hI4`Dyb^Xm1iDefjfq7*et46Lln44I(BPX#8Uqxqcdt#YF+Y-!* zYdf*|TyEN;rGC%&F-Tn7l zS+|t!h2QL3!=8%!9*hU~&B&zdiJN4n+^77#2z|gp^0KVk%ktnyg7f%mR{yf;xfx}j zWZhe~J!`P19h>uU8>)UX5zu^XP_{W6=A zw~&6wZcbiGT0-1=1KVsB-`BzSb@F}pWH#|_O?+Du-`2#pH6{OC2!AX5&~^&`db;p^ z+i!iR@Th3`QY||EQS9?&|6fIlL9X~2nIeYue%-oq&Ts7H+J`>zloC?W%vyPXee)@3 zeGj&&%cMu_i_0ztL^zrQ*e|nJSVCHit!fPRFZ?FUn*+k(3AyEWaJ?qLOl%$Y#`zKA zi(78Vw^$0y{o}#cmlwnb*~0U~En)d)OG z&*59|{rZ0RAMcQNcJLj!>}!saI=EIzF5e;Vmv52x%X{|kUicbO|j1|%jTOd2XIC&@0(2 z{HjO%%np}@7-{I`&o0Cd0-2`{o;l9X$I%)^9IZ);(UH(4Ka^rylzBN1J+REhEHGY! znTIm>MD}e2zuJh5(AbSp5Sd8$W+!@L;hVjgYcl87l23YVhwXRJzRWF|Pm`e6A^cw7 z{dW{}D|1Ek%oiV5jDB37Pq{7R3z;v&z$6rs<}+uGL(4K>?gX3gXVS-8KCt!W-fN`g zTmC`(3Ss>F|A%;dgc85(F?4LZr|_d`wpx?eh0DXW8rtz(=d?qfB!WwD#N zu<ABa=SUQS|z{^x$qpbpv7wcYv{ zSW(Uab4vWzP^aKTTd=)IOvf|qMcx`{)jRIB>K8|+>O1?T>aG^CJD`j0L=Ov(Z@!b5 zu;@0cu_aVt4?4Fu)i9=|8huROj-l9gYwpC}F+!ENi>|K+3m!GZ>Bg3U|0H&%m|f|8 ztokMF8Ox@mq9-=(GO(qDnK%m$o$o02!yTci#v1gd=k89$UgPT!|IQs?klN8ZYo?x# zGrlSz`2Ep8^>j3cVoSLj-Ej)>-^Qq{BPQ2`zFipDF8rwFxACLhxS_f9U`R#|yeLa?s*plBSmXGW|1=F{Lz2_}p9}7%=E@E7zz4Osny^(l6f8pLK ze5Au!UxpGVLSm2a<6JLn->-CltK7%NJ<*)lqzRs#O zUD(J$kDCg?xW4ie$>81?aFD9m*O|Ds3kTVk`<(~E|LuH(F1+J)tfPwU*c(~KS2v*J z|HM1?M_0lgesOhCm#*YY_c;fGcU)cU!8^Vp=N!!XZ)MoGjPZBfSjP@>>z|ag!;G!? zKVKaO-tj1M(yw^O@QU8PWAr}ILRY+~F1%w&H{LM_`+DM%eWM0PAy+{s+lXb}FKVn_ ziwufjd$!6Is^lE z8lUS@WUvzS9fEfh+45~-eqM!NwK3=0v8M;4C)r0XSXU3;h_Zrrd^D6Z<+v6~V!z!y zpZ#PE&z{Gw{sprBtS-Ex2fHLVnY|*PLHm2rj|fiYcxH|pKRCy;d~X(hdjqNGOnxt| znpk>**Xg5z`{Mop_9L9(sGaS;e(Md_*Z2i$!k_oS-)p#kj`E|pKayuY=65gZt}?xSn*gmv&}uru!?OILD#6dG09RKyCC?Z%>_qlaaNq zw+S60c*i@iHSaZrdhm{-<9nHVx%e0e{$FC3rT)M0jt9E%j@Rhh8}W``nEq$HWBcRb z9=xNA_k9+zId%|zB)Y%)@S@nlhf@Zu*`W-WrYMWAJ?K^L^_6vnYZ9^X?=%^mBTPn6 zC2}-0aI@eY`*!`Vis_DtkDkLr1M9oSUht0B$J_-C{LGk3EP!o{r(mg`W&FK(_erh` zX6;e4Qc**!Jl@@W5k4w(Av6%oZ#&~IIup;nsvExyH-|9~S?Q}~*{8dg z=T3JgF(+;x6{XGP??v5z+eU>k4yp&kIEeeVA@42YeqZ)g3#APEr+oW-cXsZ4_v?3s zYIgfP_nh2$?k@uawdM9%?yTHd?tnhN+NFX1o_a(!xczy>odIp8m||ViY4bYjoeYiS zf*talt;e|l8{t1-3cvrYvER?0b2ay6zU-zC>*&@N1&%;(dzLokI=jKBC#9%K(+Njn4=_gk34eVNPM>)7LiDK^GnTezR` zmGvq(F;08PtQhDZJs8dx$)#Kw`Mpm^Yx&*nUEX~k1-|G0HuB_W6fLj&{%O_MwGSR5 z<+5o12J$W4^_*hAc?!64C{wrEr%rmPpbKM7hEsng*8SalM8@wn=n@>NP>93#W z=DOeG+VHFyZe>cimPp@ym7VLBZ;$8Nh@3brifi)D)!0de4#JRM-o&m^!2B0X;W_4r z@TkdPQY9YzbxfhsO&j0hH}(VleZAlP=nLsP>1)9ghH>vK^QRBk--XN{7Vi5tBrbGo}h4b=K(O?R)O55J>t9=an;TSH#s zqdKn5o$jt2=B-)BcGW3-Qr3++=8~1Mo{W6whcCEbbXOh!7oJecNIS2WlQL3P$_d?y zpN`n`J$ORaym9<3eX;#({61y9LeE+S{l@X0*PvUOgM--f5WfRorqEqQeU!?2XjQO; zuh6F>p8$`Px7tRluh{+ zODHkMqdizcDf=m9>)AI;d5OL62cApjmCT(kEaAVG|J!erKMhV#VUOkUWriLEM}PV^ z-gln&z25!&dEPe&+LJgGvTq9Fy?c443pW__>o;zpd;{OtNI3^(I)B9t2L4)R17%#4 z>7dLU%AEc+=DP2%W!6!qjxuL>Z#{hA0r+nU*zE1_&sy|K7kF0adTdM=zOV`#)>+1B zFa0iZ$0}uWaUXDo(r0bG!R4CGuEj^!7AG=3)2X`(`Cs}_#xwXqY}Y(H9vf~A&sMPy zO!eZd`5MJo#JE4h7+0~M2;td5@Ylb8zwGFC;#m*p`U|AK=m*gGIVC=14*6w#7A2PK zO*QTl4B`~kgU`Eo5AjDRCvu#@zOo(vKd~MD z0?*lpKg#D|d3S;_ytpG+WbDe8I^LI%YJ8NW=-2v$IN$JBInL4SI50!i-R%2h?f44( z;REPWF0yYtSm3LP-+kRC;&r26I6&Dm%TtYasI%28KpW0C*P&myhRvb|%wZK6y7Qgv zJI%yLC)Q9yf7;>z6St9gJ}U9M(K}XY+$$j-DEbM%MB2>Q%r*O0OamV`zVe0QR^p+z zF2uhPdfxzk=pyB=aZUU(cVJVT309~!G`QkpRdH0YH$Dfgtj&wiB4#T3W%kU*j!-AD z9UZ%Zd>z^Ms`_A_IS(BSxm(fy0*x0A>xg44YAK5_Et6N&M3 zfU^Qxic-Nudo|BLvbFf)am6S#3jUZt90@u1-Ho1MA#pQ^k7OX9Rn}yS4%mNJ-I7!z z6b#s*MEnrpy&`jq-DofVI)WG6Z(ru-Jk*MIa&@tyZwH6@5x;w>UdD3rC~q$#KQQ&E z;%#-ND8U}Q;HTgPWiL`C@=dpW=mc__g>|@{9DZ$lN*=Fx8PAf7y`h(vm!UxG$N3v0 zo6$y291G(Ua^jR5`Q7E>yUYE^-(UfZ7IO4Z#vXFwh8ua^<>I=_$v26OV>s^^0WL5Z znc$TDS>EkuoUkwFnP{U!{`T^t4*5HdZ3@hsPSc?B7{a-I4=BJagJ zPucS*A7)%5Pof|9koWQmqh6^ml5fhR&5`uia_WgTqP*e^C9JEz9zqr?P@_F~!R=P1 z(xN6-rjz%oPN+<_FL3AO4tB?rpIR~4P0V}`ZsBv}ojQ`7OCz#j2|5PB3%V9Ew&u~` z2EYQ$K)=P9U#mlZj}LY|^ZvpK#Yg~W_YLvEL@(cPD;U%<0Zze=%9&T`xBEB>4@K$C z8DY*O=8NcU&Jxf6EPVF>wuLLyw>ntSzY9|;+o*4dP1PHjD^<+tPp}h9{PeBJm>0+6 zFBzcdUqNrzuu;r_mo#BFxaz`YS(0jOMs~W8%lE+l>ow>Q{KJ{c#8>hKV>=1m5A;VY z664%uS^K;_^xJ?=DI^_iUyYny7VN>Oew66NTH3JNW0~(tap}TS;b+>O%Ff_e%LyfK=xS7#7B3l0z-Oa^m z*jA^+=&$g*X$pPEceP!9R9`XEsuxbQ>K~<9^`E9%^~Kgy-Em8*zI#Zjet2Z6UI6AQ zh&8-wJu)q8gPdVM6L3@tx!ijl|Z1ZZ1PJKcY7keW}9!?KNzlVo!`FKSjz0 zU(>?<-K;$YX5yOTFMgp9`YGrt?LMpifPbpqa4WH_IcwvxWi)g?n*PU*#yYlFF}t#F zVg9hzd}#7=-eF2~c7maJ=@>Dj%z@5pH6dWjLXjyVceQmw^V6;R506=O7rNH{<5Kme zyHfQ{=WO~Z_^IgCTN(dz=nK!W?oBW!I>nxrLOj2@Um-8C)>g5$cCa>F^sLFUHd}W6 zm}u4gZ?WnJ{Ze&vdMb4h3yOJ_fu2#;@H*^xwahz>cmxg1ySnkL9n9f<_?1Y^>>{<7 zqb-#6A3bcV;_s-6;_Q(G)>mYTbI23Z6pL%_r~k7*rpQ$FqFY5r@yf%d$m@8a>1ka$ zu+$LcgV%kOCJ$cdeq~tA4R|5Ze_cY~b?OGZ(2?HYg}641_2ueO>rd@tXC75as}*j03df))A&tkCx758Si=2CUHT-*dJvGL6VbB5R7yV;OQ~ zB66iS-1!|nDY7}cXHk(^0H-P&pbZuU5 zb!V?;U$*)gcmKIL?h*cG%_}R%eTP?!Bg7Y7 zGt?tR4&Sy2n=<`<9k(K5v3jhM_8orY9^49jEw~k(KD!Ws%y>EwdvDmTy2n$EN1?69 zITxj)CP+_HBOIGW=U*4BkAdGC%-7M_+(ce$LWXYH1pV&~VVyATlKFPA06FJDXzO9* zr%B+^S7f^Tk>-%rl2X(t*E{GPi35ym?7!>p=oq%4Kbh7oPfY8UCl2OhxqH*s0d@{D zB;A{v<$f+QK%39?=cD7a^>Ojq+Mr>wVU@bX!rh6vOjF0l= z8?xL}Epb{r?U@|vMIBl0eza@XfOsv7d!OFqtsTh8bnjg3qj}jg-JfLnYyHRv3m%JwD7PlY$mq4tVYR*LL zsF|oYLC0;UAJp6H9@1O)awdF%)!53OyRJXuwh4VPvebO`@0Ya6dL8?e`i;cw93P@z zfiAA?NHtC{{-=-=?C>hVVB{fBG%@~-p_YnjN2*bQ+;A?qr{nXGXk=m2uEt<|#_@X^ zfo|5$vo-jFiLcE0Tw;Zq@CP7{OdIQc1pABggV94HcVD_Q)mRc5;c$)Mn~n$Q7gC4^ zdNfcsTtWIdFbL7Jth-ByfzyD@`zQQd1gmhDU^`fkg@)3FmY|_T=I0>BU=?F{2mPN$ zT1I+vWw>^Ve9y{A?UR)~wZAEIwr(=d*;>9bUi*DQwCzq(^!op08@2wj?Jj$|dr(fg z`%%(T(tQQN+7IN<6ohM23L>=+cy<}jD!gNAK~L=%&u;cSe}(6-{`&mWJhPaT#`A|f z&&Tk5xVFVKXY2Cr=btF>)_&yKkK^V2*Lc2#7(LJ;I=Qo~hhVzg-n=K7=OdxV`Q+iy z!3`{Sf(N-lN4u+dj#U)9$h_ zb??p()}F~(>MqZZ(e5UHGv8ZVM*e5YA0xk)JZ^ofZAyVf`^QbuwoiFBWqq4%Y(c!1 zY+p=&^wjRlS?qp-J{d$lo$@*4hsZ0*S5y9l_5ZXL(#AJwUorh~c72EKxq=u?qaVgl z?k)OK@G)z-Z+foUHk)r;weFnlsdY8Bn02+DzNkhIcNV!JdX}==5B@m7-^w{tR%bTp zJCeu7*N8p%_yoQA=!4MAL}&)PtnAU&68{rfQDRO6Kr5Ll`UmLfHs<=)ZaO*%9SL7A z>DFPFbnCFEKrcc^1E8nOKK`1GzP^+3ka7PqDT2OBhevh)-6u|yag%!dxgWt;$^8(< z%2S3mPGPPOVD5iO*=KT=P}dT79`#PiS>V2%y}(yXBDBo@A)b1w;pv&rDeei(>(dH& zMdrKc#C9{cUGQy*-&1dqGeL>v%RElP267tAfO-zT08XF@8Aj~IXR#xmVIOd1yiNZM zd#U)?A7)Ov*()Z9P3X_8jXUvMWG~pp`X;fThTqoHgOw`i?vH}*{`dqP%r^G5@hg_O zPt8qs2jE}nx1ziKa|gP8_}jPp?=R-|bEAiHZ7XIyqCcYPk6`Y5(;v&pEk~Cf4WaCI zeh03|aDO*?o4D z`qB4aBd5ulx*dKq%yr!!+|y$ZUXAXt1{>mM-L^uJg}nII>-J!=i5)^-x-Kh;kLfjH z{Cxc%HsNh?$nDs5-{%{|CS1b%MK5&Kqc>!o<+*yE6T9$7l-Wm_EtC=acmuzG!Y#V)*kjp|GcD=f~%E-ZT4 zQm(&CT7+Hr5;o)g;ja|eF9~;w&Fb2YaOb_)g$?52)MbP_&#mPecHy&Q!ksmDQYL~u zV4rYjN3Qn(1)K2i7$f=4RnX#l)R{O#b=LCD=-iHMpU(O9{NBayG7|A`n#Sx`o$450 zXS#W$Ctl9m51Iy~aE}W>2^u)&5#(m|6f1L*+{>?sog?+8qh_B*{ zc8&SblHsGbu#fDr5t|2!jkx|l;^C+p1n)9NuSQN2S{D28{*B59Z*<4Q@s3u9U1ZG5 z81F{XUgF`b`U6;D#`+Svpifp{A4UiCI`&~J&qv_*dcbFlelx$HQ_K~6;Z3X1|BG%{ z;^Dk=lbZG=^&O$E1nR1KL`nN%An|b6Lsw8o7Ib(U_<;s=N-{oT8%|)Zwvne06X!#| zQ*41hlDCqSDDb>oUl-UF6Q>3F3|ZV`BbFF%25YX&nN`TYCFW??JAJV!qaS_<+wzNG z=r4h(Z|4~^`qxI*YaeLwC9Ye@#nvJE=3xAD?vLW~;ZvoA-A#=YOVE<(ssF<4u~; zL)C~CSDG~9PsOj~SasnCYaX}iQtmZmiYUrfPBUp>>Yum6ezb@VYej z-T>W6oogPo>O~$*3%nIw@yl6R?wk3}hx1NpG5lS4Z@l&@deM#98SZDmvcHDBzR#?b zcFfIkOYDXgALQpp;s*zb*?&Yug8Y0>15*xW=3kqSbLjz=F}W7&1niReK;>v8*Po&l9c#j z&N!sqrebMJHTF*T-U+&_ABwl4=kTX*u^9k z-v2eW&w&U}Y@ZJ3Le7Gh*gjvFdv85MJ)X1RmEKz=zK@&*e?7j>i>#5C%wbNU+4H&fgYcSpf~Eq}bK zjVw^vhs0_MbT~ncD=tv{Tve=FtEq44NUNTknW}H}gAVieYv0|cYW9M|+Uk5&o177< z72Xx6B{FYbElbt&@abPTcCsC*S7LIt!ib6HbIRo zPF@hAVT*MBAr5&h|CrXXI9^i~wKOz8Qd`IMQ5liiqiTF{*z6FkN>xh-n!-HaH649v z;sA70sbXI>%ipwh(5mnz2R#1HB(q8q`!ri$hq#qufDy2?88`9 zF?NDeFq6=OdCqW`F@HKD#4#6HPsUYYJf^`r6B!TL#|!^WDd0>oFc2fS--4WT{qGxp z7Z9_070(Q%oZNSU6&XxwA+A6Px(N(Vjui6Fl?wX8DA#`MooAu54BF5^TfS4hkkibb zbB61%SNTAPt=)8ZiEs4c-48%>(k`K~i|ls}_3(0ttzP)*Y1*?JUfV<)Ymn{UMi=%I zbS9V`EBs34l)@M%AhSsuOQE$IWW_ym7rAevOeFkN`qw~LvAm!{bHiKmIV;!sOrW-6 ze1JB@9A7%c99`TrPtgVU5`a!k%9o-K&Ec6`%BQWY(C&RKP@B9mP;?=kaZIybKOv>B_IQA&&)U;q-t0$^h*OK8$lyW3?fS!3y4iOj!D$IiNWAHbpOG zUIbw0e}OXW6^&>3?q_lXwFi0sgdB^Oo0;y8=lTkBVCjm?ME4lz((J1kJ9utj_6&DS zcDnn)m1f#`K)aVR20Z%KY_n$OT@wabw7dB`K0DF4WZTE*J#SAxpWr>F%$I z-SJDoaV=L3D1CKqlorVKQyJ{F<|eu;&4I;W5V0W#xmFW{NBD9h`{g&;3m0Ic*$?KV z5qe32ZqC7v>d^JcS*4w->Pls=qp^P%njdr&9tSN5EeS94gPw#IZd`{q1S@HOpiC|N z>!2y**L7HUoj>Wf*5Qw5TlKf-w|#RLxCc;XEqyC7>004c1G5#a4PJF+pjjjKYUy*s z3yWWcR|)R2(hD9$Ik9C7&i_ffJvmw%k{_-0xhr0S9!rU7S6oCNN!eeZ&Ff{S&`)Xe zqqL{!vu7TU@$f8SgRyq`d(NYMi81*T^%jBUIzZj~!-O_Wjw9ec?m#b*PaX=5WbmE+ z@&dHG^1en7=jSNQ4A6dokKLS|=~nWOYoi$NyC1P=)0HS~0Q98rcVvE)7QlGp(_TCh zjM($0*wQ-~tK$U+wN~a!m`{whpWHmvOFLNLrR`C|iYKl7Nh?%*O7B}4t&N)&tsPYT z9H&CVwFxn*RtxRsnB$ALDUqdpp+#wf%KlBpQ}8|#C!{J;^zU&Fky$#y^HgyLyv+A+ zSc|2c@HQzYVb|&Gh%cQivQnQAu;b zUuvmy2K79Jo%*!cRMEvC;i9dEdHq)~cOojJ>qj;Kck+|!u+uaxiIsd)uEcyAAk$6lJkL68lII^b7 zTH1wW_*c1%|0tJYj&uoqUZDK6UvUlpDnF^ayvU_e_g|=6e8tC+8`)G2aT754uUzJ0@L z+RJmq4)V~-Wb9OL_OMh$Gfwn{bNu<-qiymY>>;6y=jgAN9K9d>*`xTvr4iq3;<*(^ z#}P+K&J(||Hq~gojD3LbOX9iP{rxL`1oyla9Kx-X>p*^a1v~K|Z-4MfinAKq_YaK! z+2z>l*&j))p=xBCa|No=mpWY)U>*3bn$xP$hxc01zueBaH;(~R{xCEMCPB{a>A;R% zzfd*Y*!iELe8b25Zc~k_POf8*>1AE@{bKOhDVzg8>iaFl26pgaU>dHLsGfctguK>3 zIf;dIr38Ox?B?yrj3bbPF3b^}IoN}2Z2p0&zSK8f6Z_6n;Qp=5z2~D8eVkwvz%tB2 z57|Lq%K7qge&{*uDaJx_(QPhIH5Q=DYC-?egnsfed&4im1;{yS$+W2*yJ`cr`l`Kr z`vL6sU(mjzD$j&#ZO2vPZu-2w25fsB7Mj8!5`hF z*ku^kLHHL}QSKzyegvOiudewcKJS$$kQqjSJDrO@qit=N(+nOV3VuHeKZS6{cP(q( zPo#e6+o}o}Ps%PEqoCIfcSeQxa?~@%XTgHr92V<{3B%VT1Nt4V>aPWYp*fA;$K58Q zaT99@Ska4}sYd@WZ^wDY_8j(y8NC12umHys*gHf%hca;*(s zL)+!dp0*R{oK0TM9p5{PkJ1+@#ARu~F4sa?H!@8V-+zWSo^8QyhkSF5HZPzr*2C|s z>9e6j;vFln0dGcz&X(9n5%Cpie9r~+%-{70aeULm*Aa$}Ynkd-DeIuvcRNjyt_Cn< z<;eNHSp!#N=NRj4b$%2cgMK<-*9pFB4l(mv+aEgmH1vA6=mg(J!+iarLqABBAKbe5`*UIjjIUV&bW%#_6MQJDq$pE6Z_Y_OH!bBIV-YtNLco^)dQ=7UZ%-xh}u4`SSa| z=&|}a>KUz=xGukiMoi9eaVz!N#Kr0xy;7e=*~#%-O>lv33Q_g%m>Xehh#5#e0p0JjU?ocM zqp_mXaKV>eLiT?K9RF!z9;n4inh9BPbWi02_U?Z*gWa#$pbSe2yB_mEtq}c84|yJjGm^MYj;B{ZG%-q^f>LT38E9(hwbnc>}m4MAIOh%KPT&ACH^2^qJs)# z-F$%asFe->vSVJvj@gMFv&v(~#OIax( zqmcPrvlVRsGT&{t2WW|^U+K%kUM|i{?%H>5U@y6jx%t(nN>zKfZ>{XAftkiC{hHDkra}I|GXg4GC{Q+4mPgP1^ME>Sn+2ZUV@a)KCOT&=e zl(^Ccu2)m$V`R2FkoSHDJ3JUW(C%4*TH?xM+Eg{T_<|Z%Iz)-omU@k052HHmtDbQe zaqmx*8;}~U-SJGM_A-0qKbZZC6U_mo->5;J?-d#r*-G}PRYMg0NBCA9>yY4E_OnlY z51Nra7oEWU=&wZ%(=Sp9$UPm?tq_KQZ=Vqct(^Vx#>%v|R^&;Bu+R7CtTq z>xVHOdmQUz4Qr#|2Md%K*E{%+wA6$~E|+XINXkpu?K?v6n+oGf z-p~NPu+D`1ij6^ZduP7DkGKNt-(k+#PGOyRob_To>wtr`ej{sV5&q2|^Sygm1NMRw zJir?E1@cY>>+s_yqvJu=lu2E2!#enWiFJ0`?Bkku8|_^ZtGD;@b4t82iIrB&y4Sh` zEa#Y1{Eed>f~gb?Pz|`?s{X9i$SWU)1v~te7dSJ1&G!=rEBbh1$NMVJf8WL#5F1(3 z&VvQ21NYOs7p%|${C7^=q`PlbJvgAJ;2&=6je?nWVXyRKKT!`~-9mnCj$%Ab&Y_w{ zK6yuhV)#I75?gB%`DJ{jCy}=-SB%Bv5(nlvXi)GWY2;Tl#aQN{NzQSm{!r+7B55vZ z1IeFzS4+XZu}-$PfC<<(TCZNO7{%yVS~n@i6w$SmD2AQf{jOp>hz`cJ1N<8Kwf7Yx z2AzwXdA^lg&U7C~Ub~ZX8_6$!tQhMl*NLCNM0jWOUT{i#z)M{Qo8++QSKByaVr`s$ zt`odHIIeQ&L)OijwUhMrO;71<<0pe}igAn@L)-{tt99T}euOt{Wj|s@Px(Apb4O;L zyO`(hq;F0!&z9y@XuZwT+{@VW*TLiMd4bw7?&YP$YtLsccF$UA(!$JPrOQ`-rm?Rn zepd-Coyv2^(cQMLpP<*#?}tp&-0Q%r=jMH`EneWG^;A^Hi8a&Q_Pj&d^s#YTEOTw~ z3~$Xo)}r09cAC2m{L=FO3fCr6PaV3`<#|3@9ph6s2R>^GDV;<;b&6V?%JmV?_@E~Z zE}b+rPP>>b@msO4sZqss9|!5n^A2kNo3e8Oi?Z7O|NG8>pdc3kL9u!9eoxK3CBzJB z7t7I(mX;+bUO>DaGP#&wYPZp9(yW;B(#=NmRL449ghVYp9RwmxD=WDQG9nNvjB@#Z z)-ar(r*qEl_xyRD{k+4w_q*Ty?!ETfd#&}|YY`uEU*%k4zs&QntzGA7OTItYrY0KwJe z!!~Dfu&ti9Ri;BD(2B%ujaR(($58J;T~Jms{IN{)x5q!3Yb%?yqT=(;1eM78sNs}Ij$r$40+%Bs?N?>{;@#MTIw zt7x5Sb6~gsZC$RdOsCHpFYQ0X8Ip9xR{yHexj@xrwc4N+XztWH#a0#{Xg{{9r>zv) zdy8k2DE}Gnka%T_V^!NI>UdN!?8kqX^YgF}+j~4y-_XszRu`64r0DiHLaX)A=i9u` z0j;I;4Xq8c?5|PBM(*uh*3-6mfoj|Cr`YgI=iI?_~+s69YYoORG$ zfCqg9UEpKVY4BChW1yc@dNtwW+Iktf*s9#pB;)@=X0N8LnYYLp3~MvvdO!ECGXC3w zj8^fPHk)TIG0yi>?h4~Mi?X$cnZJ~~c8s|gXtZ8uOn>4`cr|1CaW|tijc1lpzF{_f zF;{Uq4&zth7<`8@P3L*>)73dlC)h#jMaIZm(VD6lyUv+fQxjwLtYT;q|5LAJ8k$NN zn`g+cr2jXQFR`LcA=^6k z{0Ke--r4{^btXSz|9O(pzU)?`y>Oz*zGMY{s2bR(3}jC}lr>=@GSq!w=)m#hp--wf zLBFE&tjaSw-$O^&P>3EyB_U|PcOfI} z6*)rZa0IhXux?a>GrolUH5A#(fy}ZCJxP~0IB$(iC72`P+jHL&e0Gd;H1?byV!-u* z=^H%8WUm|-gPauOym1cR8gFv0r;d7LrY`y*OY1qsIj)n@S{)D8ZX4?ad)4P(!rwUZ zXc=;2E3#Ar@}%IRn%QsN6>79k@}XYU(^AZudw}a@*3};Pz7X6ju?O>RB#Dm?y``G9 zc{R44M%L>`&<%@!&L%K`68F~^{DN~XvQ#Xx)Dq;^B;@#&tf$E4mtLhEto{4&ckS zA!pW4bg^H3(Z^L@VpD{`tNU^_sN!$U+Y*|P*5r#0BW^HyV)Xv=`9FzkSW;S3Aa=DW zQw+8sY-#IxP7m&F3H(QRbkMr9HmfIRD}(hmS!eE_7HZRv46$w3d1i&I`_UGWK_d3FUuJOcF z+I^pW-~;XU2*(om9loj?p@DbqL?&V{`zdQ*pF0%$D6SbQv0}ie)E%BiKbh&L>GabK z`#AP*Wy#=}&a!_~0v$E*r_1qvRyo6T6?%2DkNVNXIezp>XGcx8=OTmGGS99ADS6>a z0Qw8gaxfOxk!2l$N|FC4f6KORA(oAS;mEcj#AH*79E{T?X!-*5S3#B^m!J(qFFVUOrmzeVx{HNI+=n!h=PG{}$zz_5c zdn4ao3O}!74^a&tzs$9qv7HWvvYIj5rFc1(qK^=~ez~X7eqI4v&iHuiL-Q(_JHhDX zWK6S=*=8cIrMqP{(MJ>`dx}0nY>9zsMmw=8tySQZ`>+4I?dMQ$TLu4*Uhj>aA=tK1 z=dqtSWm)f|dw3VRJ)OdsakWzR)ZN}T8|AXhJ#7n_6TOw5S?}Nf@8!->E{k&8Rs`85 zyUGR8Z=Q}!#u|SB&cDkutY051k2Za&Jl14DMoUf$v;`ugon~KqCvA8q^U
|4F3 zdDz}j<~0pcyt6i}?`iuCIcpSqh~G|{>>JkqY&+uB)AnWNV@+Qvu}#}M^|Z}j|FtdM z%fsf4?Q!jjP}}Kclk5xEd)SWiKK7c_8{4F^XS1&V&9-elyy9+y?bS37zPl&i6Y6TG z%J?=izQlXC*Frx5%;OA&^PSiiFEDP<_P%Q7cq4Xvv4h`(Jbs;dTeU>NS32i_u)T{t zJRbUug-2`vV^sx>7mFP9D*6Oupk~&98t}yx;@6Hj^(eaeCg$B);z`vbM@4fcbLh+H zgFj?`XPBHN=%KDV&|`Mv%mDl?Rj=la5qbS>&NngU=;f?tbny|4^?CI0qJKZ{#v5Ov z%*e?{-rt1&>ZK`)eaNsQ?>~>d3!H#;?4w8C-vPE?erM=aN9GG{vl`$FO|-QU8nklX zf<7qA*J!XBYOT5B$opHz^9(wm+aFTwW9DSoYw<0z(xliQWKKvuy|Fjcvet<| z{#fK#k!eMTVB+@)^cZdEN(T8#I2QP}zAAOy?1N(ux{7nZPqy!5J{;AD?Np%0=}kOWwvl+t!T9N?f{63kT$?;ptbHNzV#c~0~{LGqlB zV*i9PnUtA88Nme!=Bfg`75FK9ba+}vb3a-zS9$p2xIuh0+T!xXalLgG{w+W0A@R|e z$Jvy(lQz7qT8D$H+8wwd>vb?!V!r`@_R@!>*4^!XI@E4Evr2=4tYsU6tj^Iv)>-gyH%udz~8@#N! zPqpK&V#rtcYn$}1{y45cbAq?(z**hwkK?+Y*kc1bFTXkVnCZ~JW3AM$Tv#iYFOJP4 z+ObwUMUO{1jEyTd+l{rVL{9Gp*6OU{+yG9hKeRv$;Jkr-C)owp)djk{kFz=%_+1fu ze+D{v!CP$^tIiq*Zg6-g*ih<|_+t~3C);m06zAZfM(3?O_k-FiuZ?+F3dZU@V<(ua zSTI%F!BiypW%iIC5P$42x;pX4A?Ko+ zu-(YnE_}e_bF|k!i8qzc8C9{-ihi$+_KH7_Wj>u{Hk zdbw=8I`Si!A1?d}^FZ(?#eDb6OV?$O!dLPs)tGY@%zq!s%~=qn-L5m{#L#C~xk`Sc zuDj*61tA*e7hLxnZ@T~Ef>14ky|?H=qqskT@~vEpd1k`WwCsND>#i?-B3qB1;BdS_ zn}}c3UI|ZR3r_x7{D)dSdRpf5|5^{;pV3oW>ObT=rh1@ukc*PI56N=gS;;{Xh%=ztll}=YS7=FSBL8pg zc=&I*_G4Z7kh((g>-97~!=A_gr_jc|(Dg{LBXNAM(1`3OghqBlBd;y3jntv9PQ}01 zxWe(~w1m>gbEFvd4zmjnZ&%_5FzB$dgiuD_= zB5$899AsXSa4GUQY4*wf=7kBDBae_8PYy7@da}2xo)X5t?98|o5?l0%(|uQT_%jG) z4fa4D@n`&>OIw@$Qu@N|u@478GyX0gu@{iDs-X!5tnse2#o5;MrP518yX{)jydPLDA{@B>Nl zh7LYL)_8XeGKmj0lDv$h5uXRJPC`Gfm{B+Xj@1(rm}efKtmKbFwvJ7R_*~@j(oW!z zmPUNuMv`(Rtlv&(OZ-LPTc2+VbNLJTne*v;XiqD7vYub0ePS~!4`z>w7jWU7k(M0L$C>CXPquUyP6B z0hG-Q(`(q(a_|$9bDaMCh^v%a!gbUw0UEwNas*4X&7J>=JO9sI*Gvu8*0}SdRbn9W z-dDN*63^prBqxFAp5WRAo}|NWbR3@Jd?ZXu<(Vva&t%V@;=d#Z{7?2{D|=~ACPZXU z@=z=W_}2T}*AnBhk7RgjYGOooOk!I0JKUQ>*-613+9b-#J2!B)vH&^>!=FsMzmWDA zWI{iJ*GOl4v3=0bKCT#KKe8Vb+Q|PIeFXh|mj3&KtMr@9UFmD_7xEkPRDs5>qg&6! z9-P9yU;5p}$7Sy?bCkWm@bRaU)@O@O_8ItY7WU9vDOap?(mK;#Y1<6?TJj$On?bBT zEzo`cPdD8+>STWSx%hY}`S((G4*4q2%}iRGJ%Vd+LTdJKY{EVCq1u?lwb^6&zX0sa zm;C26MpQ9KgN2kyo6i}{r&`PkEdTSm@PA-^%=CU3X(#B>q@CHYCTS z+nzw*JVG0US4aN~exyCd*C&(_{KzA;Lwtl>qTDX#OFM={a3<~L_T5zOFSJj}#_IhX z3j1eS|7D&3<1=r%pAlcyQoje!od0JVJpNhtHp-RLhHA=Xf`y5tuC35Y$Ga`$%N+M4 zPv&y_djwC@-nNb83r?qQCGP>_^1jpo|1cp#mGPX==|ZqA??`+xFiVv;9<`rm?HHx( z{=QM!k(Jjs)RIBW?&U?+tlji+JoR0tJ{gmma}o#UQF{V-mO#ehH+<$c_U&SQf#0pi zROe6h#X{0^-ISsqp@(lcm$^~zX>oXXSyaaL694Z+PHM*fDdQBuy&>4TmMd!BYhXzP z4_SR1{v43OC!-rFY|1#LBVR@4bD+1Gp!3cvMt>GZyDYz?ygU-DNG0b_j&c^X{=DK0 z#1 zFRIR?lzWeFxJrLcfL5GA$POQ>&Xqh{+lC%G2uznT^ZRq00YKkWpLS|<)^&7yld;{E zqwACY?fM;ZwMN`1fD_<$CXMTh%c?Yj^f)4e0zNp0L=yJ7O0*8x-eH z_z5c4tIl`0ia*C|eX;9d+bcu&SerrJyTAwSr4LSEbIVhm>=`V@KIkV;sm{*G*$ zcpnBp>odCWy!E`l3R>F1)wzqf>CBzhQ~1gphz=F~an(@9n7;mw=PT*UIA}`zdOYQ$ zv)rYITS~%_7qPJ;s}@Zm7MUM9fUe+kJm8Ds7wY;m3i0K9_W6KMF2e?QjJ99J*6{;0 zaE88a3q@a>t~$fumrWDtzm4?eHgH7fZ)gmvn0 zW%&K7N6s`)j~q?u>e#}5V4m9#$ZpxUFh0UJ{ZiwyN03gX#ASa%l0BF?QW-u|@yMCw z;gJ)gL|8+}A4@t*{^#y856)4B&!)^AcbOjK-$6P@ezv>J!wJgpxs-X-T}Ik*zwkfq}d*t~D^gXF5*HCz_?A@ftQk3B_ywAk@W+>h{u^!$z_bR?Q_jvf`+^q!U+~pCFGerr`nd}jq zGf4@{i9w%I?-`KuyBeHR>>2FpC-MJL3lH6;w#N+nF+wTYO^P9I*gRr}bxDuU_DPA) zo|-ZuJ1>1ib`tbDwYRJVe#kX`tdoB1EBxT4A+GZoSK(=bam(PW(0z}Cjbq$vvCq7Q z50FxLL{pi<*?)h>7Ks^#?~iJDVO3eX-Pb41h%fp8O9eF5KukheXP+ACZSg>-d|dCD zS4CYCc8g?8m+zs&oF#yuN;;-M>cbwt4pRdJo4iunHIag;sm*lUOXr zz#3f9n_NDCioLKy_~a_KF33tAG%(<%_>PxGLS+@8mT+UgXBr-MY@F) zO6oxhBlRTh@rW3^5x?gl$WL!^JwmD^ZBmXzZAPAY{bXPB?34Y>V@R!}P0G4=-~Jh2i^CRIk- zx&EE>J89d}%E-q_Y55b)Kb;(EZaO*9EOK7=f{Eskf&u39-2W|qfH|sQs5!D=pt*0s zNOSLkTg?^uL(S#+1I?xRBhAJ6x0)ZxA8vj9H$(g!B` zz(gOI=mQgdV4@F9^noeonX{2A;*mk$J>7qWaynu~@6)|k96B4d;+L~`t~hgM?21O@ z)JM+V;VN?@+3P^%>4C1YF{k^i*mSzzKc5LZJAOsSyZ3lata3bF?@Pv%!<~T_*EGcVc0XMx8eApeg*?| zrk_QK<8$3wqA)|Aq5}vn&b?axaR`|EU7lfB3gyaP#cDs+kvePHX@{Es{5{OIwmXllAwvw)C72Mu%&!aNj;K zRBPt@>hBKK(2?i-^*)2PZ)k{iJw9A(Nj7NLsK=SMCj0$9nsz+AyS6gAySD$mK0Phv zY3s8eq3zp=M=Ia49zPe-{z|^%A=-Ec_TC_pd}DEQdX9YKX4?Gi01vGsEjfE8>rEMT zynK(MmD0{FBR#b{6Fjwvk1N_<$}J94w3U&bv{kQt7o*dP?ozeq2YG3ZVS26Mew|ho zp=z^ccxvYHigtr`x6=M-U62EX-mEq5w(q6wF`SeBPwl_@zqFrNUH{U4f<3s}{|4>4 zI3Pr8q5W+<|JSKKwCl9}gWGy(d((Sqe_Po@`^qv6@yF(^IRz>lSU_ zkTA_>gr)^e@2PqBy+tDqoMxWbL%T-XD_w2ZIrg&_KM3vo2Hku@+XrJe=B$_{fHR+7 zT(i(uZu99vp9WZ7qR(XvW-$hLLLWjaOX*{~&=5(-y)xQXN}1xc1=$sRL+_UKoOvXn zo#*;_YSD8PZTan<+Br?pW*9xSz{#H4ZTEO+1JEhlI$o!p9PObMM(efHGj+7zLrWSK zqRqTNMDvaa(b{PD4ceVJNEt3ZkAx2MhbhCwUIK=I^+aI}QHG1HY}q~PaIy6^SU58u zrVbaoS@3vuxWe^|Of5S*SIZW^0(;|F8cUu*g{3GnK`*_mVr1*wdD8W`Y*nJ&Q7*}#o6u7)O~=sN=IHj7BxcE zWUK<)YkIqD{jo})3jk5m|1)jS!XKkal$wQU$=n$ zd8t8h=I%HaC79bfw=QnlbnNoTObgMiif%^Yggu2UCjR$R(Y5>two!CoNAL-hgq{`r zLQz-r@3)dfzbg7Wk;%3uwV#8zcZo7(kuX2| z7_9_FHYK2p9t_T)%&^vS`l_6nx9P1Z-`s#dNUqC=aj%=%rY1b0VQv<;e) ztHRi%^IP-}nN!+k*(F}r>B;sF-0`|h;AKC8HZDOII_RPlx>yQb$a$YB=#a{omkr0d z+FNIuoU;@?{*JKCv(NmJYh}9PoToSBUEHWRkAsttvrpeprg$`SN^}F`M84{@uVFUW z3UHG5AZt967MJ}VX-axrw(L122G>RQoYB~A6T2(3!l0Ld(2kyVUqA={4fLYdJ~!K; zneJReNuraAMz>XrY$eZ$emBbrgnxyyPZ~?=dvdsI5A`zTc9Qo2=``tvyWH8r+sr%iZ!>RX&y>RNXr(>A%`VC( zQjd*tOEQTq&2@3EV*Q=#DR~yX{-%VBk*_CSjNDK92dSB~wlLa!hBT)jn!Q=HIg5Sb zo9qjBla7*VNaG6onV-n-XP!iQjo%U@XJ_IC+IE4qU7&3jBIP@~yT9}E{5#D5shuUz zj34`|Rn*LeQ8ud!$# z`YlgVIqx`L7hP1w{Zi`8rEc$t?wVIG4^8$IQ-T#uFhmlAE0H>n(?(C~TtYjaBW7|9 z_KwB0u|GPLM5PC|i!e(ab=j$>p1MoY;j-c^YabIrr8CjdT}HECRqok$-H6ye9Am3XPw)EJh&C! zeVCY*NfvAjtUWKFPpCbF9|7zF7s0SJ4epUAI#!vdyP;?CiJps2vN)VG4Cnwm!^?fq zH~1#?$!_J|&>x;Sm5+@^&WLtR8k$|ry+PbFr?%hoOB$5Di}kFSxE&Sb4X0c`Yp?qTh4t3CEZG##-#Xs z-ioZdaVY+S!P^SPGnO;(m)JiD{!`@TD&*z;=$-`kd1YLn3peJ8F0gxNA4{hQ{KF)J zEro9hKY9nAAw1|LF&|Fg!{+Z`504Rd>pVQ=BC>Edc+u_fqSqPsSCD%%S;Mv?`}#2U z!dGV+{%THQ{N{3UjeE;h)6 zr0)yH!|XD{ZAGAc@ocfQQuQsec(?KJns#EI>7TUNgeQ~2jD#$NQ044-$y2W zhP0mbHXIrIV18e7w}QUryAmA4A#z0Cn&gQ5V5uYWMbe!Gz0DKzN1202zn>gso{~@% z`3h-BQdQ*srB#u;NWY!D-Fyx?b{pw;(pM+PnCIsAHz$*RFYNDXuka_qj8`#_9N0C} zk=uSlhD<&)dc~j4+~$(wM5avP|JnQ>$(q`Wb+egxo10EYnk5cq95O;Gddw?eWh2mI z{)B$yD(QOKGUwO*b+X#z9-+E1U7XH@0NY>Y0}lSB=}c|B@;gVI`8tK-hnRJ&Kv$0 zVv+A%irnxlI;B?Lp#&O>4)fmqU4|GAcDd8w$-P5eq78rT(%+EGUM!jZp6UAL(PoK3 zC^Er29vdRhV}}yVm*BmwjC;U-HJLdKPpp79T!SA-{t&QyWdlw4GxxWY+r&?;zr}*B z{Ti{tDz>qA@6yRqHk5NP#0nGt%umxVwb1Sn_G2_$Hg!0HQNt>7t&VoD4kMa9&?*G3i91AUaxZ-e$J!~5HEtK2M^+o2* zBxwIFl7+eRMScW&Db+fI7+fEdQkg6FlJ`ETN5Lrbg#1CSx`b~!pzDF?F)pC{=sjqF zL1FCL7|Wxnr49}i18eZQUl-q6WU6MDMDI^fURwNl0I>*H^Ek+HkPSZ(n1*J9}p ziNoaqHu)R&%N@_Z!t;APkmFdhBv#sy$qMpUr}j8p%quzneT_XGaW*Z2+fd+*f~z@z z{9guNuVrnN^HRNu!6kB%;AH=staM}tm<((Z_1GHjoME)zI@V|(ILu@pbDPPY4mP|Y z8Xe{&^qj}g(<$!(XE!Due^S@$$2N|jVtam zIVXbyxiQ`3{ERblW!RSvP;Mo1bE}3u;chVM_o0KGF7_3@HIcP8BpF6th8l^Y}Ddk2SPS=%dF;|=VuyF-I#!Bl<&Ry&t9+M2#7`(h>yaBP0! zD`O9NtzS*Hk3zOYjo7(cOjb=bS^*0o~| zhw$uu)G0Eo)HySAvR%*n<|AiDD?SdDHE`#S82dBGqUFe;A|DF|4Owrs$jNcgd^K|M zSo-Z6GI2HX?|g~h<;EIrRlDRJR)ZYMKU!zifESeZyg_?1_46*D4+Cn#|YcO^9<@o1^9wnBe=wFFJxtbOFYc_zm5A_NlWn&_x>K$Y6^TdtCZMa z;_J;B6lfgLFpJ;4#`VZCFRT8TYRyz8xctHWu^%GF>$XduZYQy4 z`EDb=;WixgVox){QdVyE$;-bZW z$12|6U!B#JIS~-rj(s$ORrDP=+3r7-a{@dQgI%=rV7UD}V^!wHDwcDNc4HN>-KYIb zTf_96kqtnn0X@9q#wyBMrm;u=8vj`ltY?kzQke@1c*Vo$iBGZ@Z9^9_4Q#~)%6@^} zJcK0snYvS)=^)lk0XBNUD~j$!6I(WT#cBSXErKobC%JSf>`ASZ4J%hhltSa1uwTc4 z$q>2Ng>wc{6VggFM6}r7Cfl! zaNnPO)BPV8%!ViFT{xFFxxa(*^SDmonb#9in~&(g$t0|4-pl`o*3&k05w5 z>B}USZLbTyKfGPBg@h;nksrHwF>@nVAA6>RKJDMZR^pN`&^Z@pxD|=c8ZCul8I#t6vKf_L53tcQ@ zY$bk9>h+A57wU{JOYEeNxp$c)cqhRwmhs#}V8A{3&3jzfC!vjLf8o8f+sO4I@yho> zPoCw<%4gA=_9iB&$R=YL^Zn3^@LQ2dL`Nbr$!^x04NI#dMJAcVerVCj`R2*U1f3Gw z<&DiJ7n|?q|IY6JD^4cjUpCZ*Tb#lDuIz;Z$k-T?ma>*kr-^g$Jw`t>{kLFaV`8$=84dk(3a5H zKk17-X#)8&ClBBUUt;(5pnoJbk@WLB_{zo&3t?(LB)zO!VW^Z`S6nBUQoC@0u0 z>{!m@)K$xOPGJAi26k=_w(%m`Be8v2$rs*mkv95p{hIqO9>IP%)ZZ7pCi`GgG~Xg| z%DVE5#3|bW?r_jexI<0ofN#>E1NkO!hs|f;D-}9p%Or3Zcj=8S1E}YjuqoQzdZi_o z`_isDu2L=$UJ%%ChTslc1b2AKoqxrh5ALvK&D2?fJ9OoD;117{&pyM_#B~wRfjexO z3T9OLr5$%TM{tKNKQCR~{E%m$rI~Wz4x1la`KaIyoBPsd^Ac7!S8@L^_m5=UqwV2- zOycV1a_;rz-W1=-T7T|!;0|Sh*A50 z@fjEWoNJF`_8a{#vEcOdmC%rkMGdmwD(0aA9oDn1`w>?t6*`i+DEvCwjZ2Jn;}Sb1 zt!gd+p9U_m#mfFLf^x;mUD~(sZ-qV|$$iNmMV_L&S1WSgzwM^`M%@GOR${F)rf10? zOxf||f6a5Fm#%N#3NGz*;@ajx%!wZQ+29h_H}6PT+blkVd-6Nb(_lG7{$Ic+u3J0{ zT;fCE66d<=6n-tSr)2zP-Wj0>iBBeY-&o`cBRYRCV}9F@I{_~Y6U zf!_sa+{Spm0gaENY$o$~7yT^#vCECQkyyj5>u#(fG1z%VW&ic^INkP1;3prZO&xK{ z+Hrn*aDJh#GS5+_j6IIfl6<$sH)L&a;{pGtT*e>DiLV}MOEcwniBA&vMlp7uj(U@C zDxXBX|5HX{k+r{jDP<%USsZ5%B!4S)bmYa8*Y#hX>-x{<9wkrCN{a8Tw|Q3DQUWg# z8AirS)=QZO{lF)d6QfL_@9XIE!|=HZ#@S2vO4eX-iqg+-MR+mRogH_5_gq#kv^{~k zo2gs)ajfu~0`@pO`xSD;wIric;wQ@7I_uHNGF9I_FP}IOH=v76#D#j6-(`%Ef%Chc zvBxjRzEp!;R&&hT8pm@z;JcoT>0rH*XX>jI$++zaQ8=&D#ackw?s9(v{y@PFE>ODU z^#wOm3wE&%`;-smZy>+@NWId&IB<(#a%bn;!o#1wz$2wcf(AQpEqB@6t=wZLKv8#0vdg?(F75hTwGx`S0vlA410=guL ze{`Ate!gC{@8iD6!Tq`KU`)!v6kVK){JqZ4b}6iftvrb~Vv897O-MX5@!fL+UlY~f zEiZsuoWOVASgATIlkrRPq3WDZec&XVa^AHH%w6t0{CqR-e(a*!ThFS_@wy&)p%Izi zyXJ)6qdwr&t66n#V?$y;Q@RTbCwm*2E5kn{j=}z!_Mwbd9eB$68(^X0Ig<_+D#T12 zvPu5d6>1ktuuiq#)nYo8NF4AZ#2$D^?O`d7H92EhvtIR6EyMKBWsO0HUZqp=w)=On zoW%xtjec5A+_D-OW3gB6=bN$fKBM-?yKuN+p?_WkgSbs~-T;#* zvHn8ngF3KU#ptkSqSMd7XIC)vvhw>^vgBNUZ6R$U78yLqaY=lVPJfuQ#=Z7Qe5oPy zx%hE;JA!v*n4Eo(@dY1tAGEL%JmOLAFF~KKVcR>2Jh3ljRr505k;hdb3vWGFU?*AoaesduymGlTnVvwzf zR+gvgJzCPdJX+GdjKuMp-~0>dNm8u4OnS1i{7K4WxXZlH{i7t}lQmCqmr3wZmg5(o zWs$p#wBr)IFoJiCVcoeM4B|(WEg})Ww)r7>j_>?yN?bFz$mT@SEYjYTHOe zOnN)z@#Y^%^GQb1s=JltYk1ESyywq)@0MgQ@0LWpZ_DFezAa1i0WFKY0$SGUgIgAQ z1-HcO!&*|j!dm9*d$q)Q^=je#GV$7c9LwR0(dZh6&?n2#Cq6_{)rg{S;*UL_x}-TT zZAtTxlo8EI=_8su;*Y({Toik=*p6aIvDiRVd>B>kooR1_S2~t4#)aSy(BA}OE5F2i zzk+^Z7Z}C+iDz>o&*&`a>uZ&>FJdDqg?2o_&=xa~|Dx#fYSCeVLoK@cDz+5xwzd)8 zmYO#7Wz=~9x)A=+pelJ+?lU=0{F-v=@;H1x)GE#z=7OBttV;)j$iAn-hK>x~;b8nk z)$diDLr0qI#RuVg=;KO0#2<(kG&~e~9)VvW+EiML+@k0l!$zU6`cT^Dx39^o9dlTZ zo-mw!X~_-N_s_c9BmBZG7uq-rF1Tks@fDa)#jMlC(Df1KSKXmdd&LRGd8^U5d^s}4 zx4n!99D|GpM$a%UUww~hc{DzMmkos%?&Y0+*jnxdUx@q?fQ`k^!)Wb_jxdn9A-F-o z77BJxu!Mpy>_ieQq2LOANgY_j4qTyN48utV@P(edI}kai4|d>ql9q5i(!}*yQg?8G z2IcFhxhEHye=b;HUd#1T;R5pnuATB1nZM?@yi?x2=HwIR@uZf*c(Z5z6Xv5_<-O9j z=EC{r;=%-Mxbw}&xSmHZyPG}uh+FLDD}`N`xAod<{{4jC^12MArDlZpf$IA*qb}Q& z7DGaPWPfmj!xQTxy_VKTnz$yDtfbSVzmi6Q8w^OOh#Z<&5$U(IB62F%MAG}DL;35? z*H12|z3a_u3)Y(lkalu^Q^9ic{iJ+;k3YG@yry7@xj$(~!F;fO$=Iz|n;+%(WBfMq zJ3W7u`RDv3^TS-{+!->=5GpDnBO7qZ~05jZ{;VN-{zgZ{NIcJZ{dH1 z|K)oHH+ZFR+VbB|n3va#xQB1djkBaH*tNJbevRqdZl^l z4-3qLe|W;2^h2uozz^x>?L*?g@TD03G-MfgzJ-Q>A&U$JgBKez@Z0ag@$DBMFg^0V zflsdjkJXhmzz=h7`Sfs_p}^}Fc!sn#~Tv7R~jBN#u?5|iZj$PC+4@Mx4cWaH&RwNubw?2)eiiSVb1IJ>-2BC{V=_f zRLyTqM{G;5eBD`xSF=XQw}jK)Go;sfht)savctc-Wnv#=(Z_P1tF+n5_l)BAV%l@k z-R5^`^A75m$p2q^ryBbEEHfOT?O!&hx74#Ht>GI!B0n?biRRUJKdfz_t*`NpcWArx z(}Rr94EXz>?t54ZFy5~P-9J|gp799nUTF3kf0s5i;K%8M1CCAK+wIu&ecgU^wS5_O zI%#(}?Z^M93(MEh{+-kYoo^N`K@`01* z(5I8NmyA=i7wFG8+WzNz=4hXCeSF4bZ3Ex2Gi5>Z{%%L6KSlfP-F}hgI0znp{}U)dFTf;4)Dy1X~{G88|@ zks@sTpFL!+J2V@S7kzd3RAjCB zlzaG{0k0PT<}rZygaK|mUl@44Fw1##eKp_%>sb%P_i813Z0U0q|H zOjd~lyr1=tec2DhCX{&5#&~6x;OgJJ39na@j*m>yHMisSUJ|_C^BR)Bvv1IJ&CnfjGVEi5+9dp=Jx}yw!E5!jU;!yVt?9=*NcIcI`Dd5qMH}I z-cfK_M(UGTe=fWpdcftgvAeOyUy4pqbcqk*&$5dB_E~Ux61Q(5d+;Ql9fr(SgTBj) z?-6~o$RJx6wCe#c&SxJOBR;Lc=&|;`hyAJu-C;3jpvJ;Wj-k5{y+wvw&z#}bGe3*Y zcQfZGH=t+Gc^rvUp|@Cch;O6+eHXos_^tNf{vv2|7i;Nz+_$^$dvX6;?zg#giukdl z{*TD7MrPyeYRev;{TTg3AFh7_qc@#vF#JD)GNB&eD6q8#^88Hxk42|AhO&=%=0;VZ z?^r{*Yl*4NZLDc?xZguLANeEqVhh)pqYG2eJ)%?NJZdXzOdWC@`*fGyq!YF~Kd{^4 zXCpj)Me`eJi<@_*iw-2vbzb@mwzuaw^eUTisEg#4DE*F zOR6Fr8=JmUUdi09_OaZTdG;OST{@JxHUimYbUQw8Z)#lgagrzBvXJE3r{Om&8oA*j zIK5k-lW#aDkN)KQ_rT^IW4$`8<(Lz|=4EsJkhGhpFye z*S(|!(yOHD>?I;e8mXm#JqYLD??^0(e2A1zdX3cENKB8yRP!$dspbr>w~!`L?l$s_ zq}8M?9pwt*&Am>>o1?kDnIF&IM0Lc8jgsL~~?eqB)lIE5BE=PwYj?IGJF6EMNAFszb0ZRnN?{%O0tXBzvS<+3O5? z((vw}4DfjCz~iknL@VAdEM66R7(H_CV#cuo%w-8|En;0$!L3v&o|eVn=QjI$ zTdI&F-t7f8foI?2-bT_ZDGAN*7DpGoMV&8k|AmzJ=4T8s+Iu}?G|%oa+Sh?GS~PVE z7H<%BF4C!%GHCAtGQ+d}ddp7sJQcK4&pPowc^gPusV9;8EY!V$HlEP#)!yrIueK=U zUaeoiz1juVr@g5W%@qf+2Y}Z)o`Mb-ncFE=?Iv-Yhdy&6VLZynH$Uz z!Qwe4K5YLETu>0aQSh>P_!_#xx)P@I$&+)A7qPR2;BR2Z@Zdb5Uzv-!(C=aBzgTp5 zdB|>|Ca3WEaL%$0Lry)pbYipMBL)?%IW;Xcv03JQ{?b9sa*k{O_x_U7ey?EZfM!kC z*%9#~_8{^ke&?<9^Gco(Jl;Ucrlu}vo=&}KsVkaK@qc=1d~*u_7cL#xEO@-1mX2ut zdFklpb4$m&>JnSZBJg;nebFf;qbtGJ&jE1o7xWL?>sX&J^KDftP0kQ(EM>bSPC_Tk zL3{}*-ISs${+@Zh^u6HZUi0f@xd=VI;D>LuB;qfid*F;hQ4r;??lL-UBLXbqCvTar zucaviofrMkrUW}az^97DCKehj$v}4)fz8pEIF&=Y+WT_WvjlmtYBVu2;Xi`6TNrFS zAUs84a0^d)ZaDS}c*;NEDI*!jVID^7J*+{uBmeo)KRWnKi{Uf#arC=4xH?I~U$C#a z_{(`WZoDV_MF)Qw%k?$zbt2#CxjtO@1aeB6IsIgsIkYg%tl@8C2&vc7(#RJROCw+A z8UtT&7A`g03Ny?t`5ETf1sUc*$_yg~k)KJq?e20D3YMAESzm5Bx!U|H*YkRvi@$_W zb_(^3`r+W{-TN;N9u$e_Z&PSi|ZE!tH8vqF+X;44S3x(X0L?m z$g!j_>~mWat0OmZop3VAe2(=sDu2FtHZrCLt}c?)1x($GiB*x$a&;n`h8Hd|=j1Oz zj$LAQ7On$Z*Zw`BT&11DUmB6OD!^3e@1B0ryy{>nyu_Y%OK)RQ0V#%k z?G@-Nggx#S_Ou@`XI@5c^8oWZm;YyWS!S5gWsSkyWwl{empH>I_7N}9mUmAl(~ec< zU23QcPxlf$?h+U$9qo7*8DkcH55!-y8D0_teQx&;X20Fu4r#-7+7LkMXv5BwWzC&v z!+pH>DDTbU_qXh?&-f%6jvy^X?_)!|*M^(|tf6RPYVkp+z~X zgMT$kof3@xCz$W1AK;DK{X6mP0hWL8&Cima=G`;+_SfKVdfwNS_x-}Vr}Mju4^Do3 zmK#ES))~BgRvM~#ugHDxo?gm(Gt3Kvl>@R5mzd+d*{f@My<`3b{2>mVX0IA;W*)Kc zX763W+&-*?IjYe!)D1)rME_ivX>zJy>0CM`_}FG&m9wG77MX9p*ru?r)GPR%{BctI z`YmgD0CBj&=+^-8<1gzq{7%+ujk&oSei!aux5dBBldQYlSVvcrWKDhy`uF8`vitux zSsz{NE%gL*ZzN^ba2?P01SHf&t{{!fWe-)b#w_ctA)zUfa5Is&6s$0-E4$E8jud(ms)q2D2uVZ*p~H+F#g zRBU2;$I?59i-Zoc9rxDjzvJG(T9`-bf-6`@ud|-YzE$?Cqma4e_hhbKH(}5u4yZ5l z{Xb#Qe*1E=UF0QXBWoQPv@fB*7=1eiEpFTYV9?Szlk!Il+Ivkct{9{LKNz&H-54~H z_XK~o3v7na`ah8M#8-;MW3NW0y9CCnBgSY41}zT!R|f`d9qsue2JI=@GgRsj3>x)_ zZ1^+vET-&#!=SC&*9W`MQ&ZE$AE0oFm-L7Tw* zxo~Ll+!z0N8w0H6lLD-lgA`}k=zwv4QMUB%4S$lpIK;DYJa;n| z?a!1~_-or3{BL+PiR0TIv$O+~b`u`$53x$;TifwydqhV7oi0c3-g~?9$-+)bo#4?n zcU8yy1-f|=n)!%y9@}*B%gU^ep_%%*#JKRAY_CCYz5)Mv{dj(>qR(5zS-@(3-v^Cl zs9hbI@T&~$U%P+8cOICb)ztR{bw#5yi1?E-YvFB^?TI`;hBRsej zELxk$P%{;0sT-H}IC)+c}*>rsa?>Wyio5D=jOT8U!S;+5?sXq(d12#_A`{Z0| zJ@$?AWX0)qQ(bw~RZM%Nj=AnSLa<-9a=+s{j=IZ~CSg-aX6#r?=aa79ROSnJ858v= zq{}zu{gvO+57OQ}?BPl*_zm93`G{@UU(gp-uzw$h{Ze8&(uYx%jLjS9ATKdC5+71* zQ&o&zHG78|bZSQE4Sl?`6FP}nXypQ9U(DEV2Pe~}a-NBpdV&@Bk9ZjIlf5z6g}ZCu zeS*7tNA0t734Q{_?}p&+;{HP{({JIo8<^w$Gw-kpEi~%Gtkr=p^u%|a^{@1W(C>+E zn|cbrtG>~WyQ`+HZRPE15Z605AQ(lVP;)K4zYfcs%9x+Yj zcP==*T;@bcB{44D^B^J>S%Z1F6&r=bEISDOi(PyNv|kO)-Uh$n)pX#~tVb-b64aPbstF(5YS11?BZ% z9yAFK2tA2wtwV+>W)8^wcI~y;V?{6*RIekExtteuj?0W%)iLhla-(kNx7f07&QG{0 zf7pM>kGm(Lc(2&gfKpvaueZk28|WpqWx=V8%_)2lKm_va0^q z$Xwm8k@wz|*VUa@0Zo-ai@VXy^(Gl_D&z6b=g;%}`N#=3<+TiJ|2Aox2^(xB^qK0l zA(B`TmJB_d2y9b4U_b_ICVHEOFOan+`66 z`%+Iax_~VAGnstDJbq{L4W-Ou$rpb7vOC{M{zLpWy1!Ar+2YPq_&uH93VCI;IYY(~ zeq2ZY)(cPGk1t*Nq?|EQ;U|?g<_6n#|8M7+ zT>9}IcV85b$c^&lcd&bX-p)5j|44axHjJ@|x~Z)Cf0VuLrn1+Dw%2=}V>WTfs~zjJ z)Gzg}5%rK3)K#*|QJF?kjAeF@_wbxM5` zS>r#aj&3)VJ>xDb@np&w*J9{n1F7>(W%4N_<9La>Wc`n$Z#IyWjxy|DzP{-@N@VR} z%mqi-${2sb{CGs)%kn9h*z@`xj!b+6%2-KUQyCj6C*`F~NBNGj!c)Sa1>sX35yatU z-}qGK-sE*jo`nf?%-EwsItz1>XO zCwzxBy?!gt7ISV_p8w;sBi+wdA`f>wFZ(^>5?N%OmG$+1dp_cSd0u#M1mAQGEQ_oU zH<%x?u1ieU!?dd%m-#>Kl66hmAv{UiaG5q}g1cvZD5j2%_Uq-Hl&huOG3c=EpLGew zv;7;dLkof(m9=P*)Fb`xsz>}twwI}=jQEjE_EW5TDI@Le&Dv0oOkRVIK=xsm8LLuv zdrKHk_B+=FyQv)Hd>(7adHDEH@S5d}hgE$c>#}=HUX2Lk426&5`uCPBBX+@m@Gg-J ziZ%S_F;=oJ$XE$a+4^Vv#8KCs(8{~I9(e#r=PZT|yRhdUQu$tA!R2OY*U98M1q1@F%Ï%K5D4!0TKbN*&lRif@3)qR;-e zSh0t$>u#%k75&l1Kx?JV{lE!I4$ z%*q3kc?@}_x)9wye0~MKbmJ&<1b;h;)V&!T<{9q$VgoIJ7UEgg#5Yc>zMFL%`IY-k z&IIi6Ka5lCH5~1+#`f#3I3ne?h#^>%?D8#dcW|uCkuOOZKA6Isx|btm@p1UA^HP zv~ZqpZs1;<>b&lwb2j_yoL4!gQyi{yUejWn{(OH`DEKtaI9_TKUgA+-_}ztUJxhxU9Ih3_s+dPP$=8(MBBlI2lv7DN*|Y5O%8Yu$$B3`m%&0w-f0ugY{Ew9PaF>sv ztS4zBk;clMWNL`({;$zVI0DJ!|FqJ`E4+R*l4|-$Y4L4f5 zaea?CsSo+`9z|yjR?cAy(OI9N{->q>zDDZ|<(J4%(#O2#pu8u-Xnk1uH8PB}hI@bJ z-XpXxoODK)8Fji-W|XTRXy**-nN2;ZTvNDSC;tZdzwlfx&q;e_k0$4AC2s0t>iNh! zNl&VoQR_W3qo#sAN!lR3xWX)z^g{^txbu2%M+AOos#xbzbGQZo3aCe4G}2gb%30 zZ+X~vyD%jo`-*>SkDL1PrIuHBfGOGS#*}2?0}7n9wMN$-H#Lj;zT)|>zz~@B9*f%J z#*{?M8l5S60(8jm+iUCvBwlKV9NdN9B1=@W#_W~YH`Z6xHZI(B5BD8S{nh+W)S;IL| z#+5i3&OYxQi+YPOO^0r4uo*PO$FP>+H!l{9)Drj_eo$*z(+-10J~>30b&xYy?*}MF z;?GOQ?#d(|s{w!P#qhqSB%QNqm&v*LF2!E2k8zf-)H%yp+lGL7ZuW|C4puxYQrA%G zxXQZrBL0Pk!3QrN!puWjDUC5}e9f;on}4EfM&7!9S}v@wWss-%9MQEAb}h z7q=^{vt6u#tR*G*fNK_C&aAtc-2yGOtflqr6NV5MwI?yV2D3&vIWrJS?5+>+mn<<+ zB_8YL+3oKesLPESz}VEQxluE@j_H&ebsJ-O%`-P@7T1wokd@(W&d#|}bGY8&hb#e) zYwn6X$+a>%GfHUa!rhrszk~O!pPm_YZDwXv!|cqcD|0iW>SEzl@tIMLOW;>YnNb8D zjA~BKjA~k&8C9H4K5N$HaQ5B!od2L3YZT*gkr;%IzU)hpf2xOqABa-ykF&P)#)tHH zM96gq<>zwy#9rP&^cxd2o9=9}Ne^tTsrR$SjGb?}`#I?BZ6Mm@rB@mZ@!){-)9jLm#0M}KYUC>Ng_ zHACuR%u0AqG2gnmBX0?qCdp$gOL$)L)_3G3v2IHqW9)j4?_ArFw-TC>JjUMj9C@oc z@{-a2Ngg!e>O0<<*pat3H)=S)y=k*6k9Sr3z@!*UIVW^w{fPn2E1dRkhaB--2Uk zpkMpy-iYj{do%JX{r5Y4(_eWbvXAm+%S?8O?|mf+UX{#u>cSjJ$hS?<{{gTvmCOe{^FsRIx)PLkfw}V(d)7wy z;AyTE>DW!-JJ;bkpQ3Xrh3`zz2jyLX_nhJXbe{bqUR7|u65Dg@Ir=l+$9jczrGobd zO25vJwU@7qaW)@=C*{!|y}^+uzU@YsoTJzy_h$bWjh~*W`?2S(W0=K2rB z_xo{`(SCZl$^KJUO_A6z`3r{RI+Uuw{t*6O;aGBAb9c;2+Efkw~PmA~)`;;}i1b$u3`KqdoCir+K zODjBC0rTDjPc8r_*l063z4Vo$;K7$PPBC^yrWFH%H#vu}CSL7UB zG`Op&ul#r0fvhR6IGflH4h_-;SAK}S{1ANqGrke5&;`b=8NYF5dl^sg9d!p~+jrr14fsGa{7m#hv8<6N+1EB~lbD`{eXZ&E@!SYzl4put{3|o+xq)up z$$Xo}?_%n)Lnkr^htZd!|HVTafL!nn;5> zZHUx}ow$KB&^5Mpy`y74df|iYR}UHKKYTpqu`fMkbdF^2Q2aJ$if*v};6uKv#N%va ztQ`0<$DisY_6OG)%h&PKaJ9kY+|T;ofUnh(GR3K)V{Z!5?AL;P^P1@6ee5l6;6L*c z`hZ0CoyWWRSSF1o&MV_1I_~=a$Jd#FM^U5!yJvEcgCmdtAz*j7b2wZ9xf0Ve0YQ!i zBzS-;fpCO`8IFJ;AcR{P6b4WbTmvF%z$l34npv(DcQ*k=N`Ofcgevjv@oWOjr z5<5)ntJ4@Gk4Iy~82r?uX1J{CwF7~G5;u-aCVm*mxM#|Pbdd|3g$Ny&J zeG}f9cwR<3I+MroC=cWMg{XtJ9TT!Z_(Y|=Q^;~^s2|*YzLt6f?!HZ4VdHj-IdU3Y zeI8sbaQp@E?nUtKb^PrYFc(C>Dwz-W_d~NX_CjZ-j`{F2>#(p_|N5%AA6qs@cA+HY$UOUr!a>S0q|licu~9$xr=Qw zAZQUWe(ye3elq{XSawFvr2< zajKodcTH5CJG@of@2o{3_YyL$N_cxWnad^H;m1ueI{(V|bL=!a_cE7numeFJ9_&-ugi!rf3P2<1u;s11E#qK4Zw1}CT=yg2i8J<52k6=cN<1r)P zQ4A+<3iO1DXTwDq_ywm(yXaVdq@sLhc-*<1dBj;#@Y#i|N$5j;&3kv~JGj4t&=1^) zFU?8!8r#q2+}y}nD2?&|^fBF|x3Iwo)A`^}%Ew{knFaq>-*9#kaY9<72U}0QuVAMh zi{GBmHP{A>X!19O?$K7Ed$g&_wNuphxV}*V=UVfoVHql8n<-;G zXYYNqMeqXzFCdmS3BG{D_dEz6pciN8TkrsGaNb{pF3rLJh_Bhgk$`SbIlMG+?p{F~ zguc-n+8}g{ZVA38Wd*NbCS}p>+3LWD)FVGvQ^_avicWyP9sf~>m+eKKO+~gY^oPve z@Iz?VNB=M`Yr%i?|J=Gnjhru8zgwpR*1bAgT$;|75MXud62X(GS-|~Qfx#c@Y`?9+ zr&Zv4yGtjk&Z@W|+l|^l+uaL+ws7PEb&*m=Sy7;^irB!|me`6<@hlKLe>Bim(G#Cm z`vcuRt;!$j68*$^79FLn()1=>BGIo$u{LM&A3nm?UD(3tR3F>^<2uR?`NOA`rMrH| zBA%oFv~}$oZ`(f8zx9XqP$q%AGs)9M2g=*oy2wfp!Vz~WR-G#RJcg)TIJKm-a z8+IMMpObuFp)a&Z=nMVDtuM4y=nKufC=D<(?iR-UBybFUp|tOa*~6Gu!LPiCzEEAZ z&iOa=eb!U{EA)lRy8L&%&7ePYhSG?)S$Q751$7osA7is1Z2{-Ng~Z#0=UGWTZ!(7s zx=z_Qi*#-ofIHSEI9KQn#i9#w89gBpSMgO+{HZwepMVd;{YQSRYOfo`{H&RQ&O`$G zME8oIxtSNo@gaQ-8@I~-O?pFScwxol-H-2vQ0$4`fLAaZJ8o~_O|Bmxr`Q>&v+lxo z_<E&}i(z3FaO`9gqnc-CfoY*n*o3H{VL{P)E7Vifb=B}f((J5BNr_n|WErm#yRHErny=59m8J7miJ$3d{ekP? zISeo9~HS(R|MY^fu>1JND)Kr{eplcAn%6;D7Br)j2<9L6!|aGq#lZSzDP8 zi|9)f(63j366x1pUQC+u&md(~_yAc!o{Bzfys#$(|5`=8snoj}o@XU>&Zo{N-S22_ z6?MK#or9?BHDF1~+$<66>^Hx`@=*UlvMV`QxpHeD^@`XT(X=_hVFt%Ns(Z}Cx`qF; z>C`!&cEfzLevmRfD~WHZr_NQh{TZL9NX_<$6# zsEzo261uhjOQ-1%-;Sq(-Fi)TsbAKyLu{M! zhuEgFrYiOa+ZG(w>>~r-37JTL#X9@SQ?>UD*dDU&JI$_uiygH4uD|#9`K`R&e#Yto zy}x&~_x`@Tqc{G>yua@|A=c1utYp~yxh@`g%>tqO*Es>%0RKbL^%MTarWK-J@^Ibe zzsECVl{e9ScF+fS$A?S!ChuS)xP);KF;f2yZTVN$Ih;jni=P%(1BvTw?eK_jjqXF z=A1&_G`^KvuN<0UT1ri{A2OY}@L75#t=1yKR%=$@z1s1sPg+{t+iOW6j?Fxv02}h4 z&?mINU<Y9iuciFl(^?Urf8tYZJo$Iz_0bMc zX2!!Z8AIe6Ys2*WKT1 zIe$IEvIigTQ)$-*((>>v+C;ujIPIRT{GeqK2jC-MM_#P9fcQPXkZ+BWrL6_;&Efi; zynfmX-0LDA(YoS?;N!f3?mp&%i$ouF=p(H04thoSLfYls{FV2-JH0YCtiRkYaxiN- z#3ASvfAC~G&fC+NJICF4K?h!ttQ%7jvgV~s%d)VB^IvMF;4963!nddF@M@>zt!}4e zhDXYI10=)Q&d8GSPlJ5bG3^%TPUfP}2N(MczG?BF(HuL97UZW2o$co)!Ov|1)v@mxp>VnFWpU_n4bvz6zgi!p2q1VX+<+A=75w&15|u?4%kh zn4f3B37*ilq8>rtrn4pmeVZ=yUZ8LPnH-+=B+owalI)r2@NcHA2cM7(|NUR_Xzs|T z+)?s~=kw6>KS&JJ8m?!16YCbmVx_a)OCYrJ-IK+O@pp2XYHMa26(`CD8q*3 zx`($z$G%WcU*c%zCfcB|jzw%nN3-;h7m0c3h|+!U>o3_l^L*n9Y0qo$N7k?K*fW;B zwv_*shtqdo*t==>D#{(<`ViM8yeI7SmU~952;LLGy;9bH#D|-93mu`>{`x(htk24H z$tl)>~F1ikya!RoRS#_qc0l{~E*bs>8YzPoy&XCvn5apqNJS}9- zbZ@O1x?^uygHPukG zc%FJ@vkt6&;qtUmzRKcWt(9W>Qb``+=PzwTYvoeJ09k!mGJFRuHe{}9SWi73%~bnK z#18H>J46xm!RtOlWLd<=t?sCJ6PIf$@r2hLD zpPupr;vQ`~?xie>=qoQ|-PL(Y_5m-d2FIMltXZyLaw({ zR)F{FdB^+IFZwF*V`ZX7Uz_LzwSMsSNW?9i>e0$!EN_he7y4TQXSMi&__5?2 z!f8=qSpbx=j^BmW9?#nE4J-~M4jJq9ZD{S?(B?}hC+3*ggTB zdxN0~PvZB!=bV(R8KiF}EyxOB+-!W;b@*}b0$uD${2Q;e>e#;ciT9zLh+z}Ow;m2% z+;7f;EKky4>RCWOVWafbl`a-5Fadw^@5o#!Yd0ZfY1S@i!f4w0 zUdkNkb=4lSw1e`2|6o~J+EMwy(NVES#cdTI)ts^ThtehKZT|E z-F}aHl2goC8>z>KdU^nb(5|O|pMCt4lFbp+)knS-*KY)6`5^4rikUm|V#y%%;!d+> zd{__Z*!!v(jc%H(5*^Ve*eCxQ7b@4XfA(QqLqgFHu%Xrb%Nz1CJ!wq_Kx+i-=4AC)T*| zW&gx>sl`3cD1TlpjeVWx3Tx}c!*v|1$JYb&LKSDnr&oacn0s&ds)nB!j}+lAp0mXa z#$Z;;?5s_kNq!8Km7l^CW$O+_c@{jlwYy}9C4NEXb$nu4RcGIks^J{#IB=qBC}p0_ zW-oATLAQZ^rKHTt+UwU>o;J3%@~WccO7H~s z4nQXp98<~Mc!f1^fw`K_`C0g$u7;k#LBPQ!T|_+Zb_aJeW?P`ggV}G>nCpVybDTNa zh;D|UgNn(Y4*ie`EhOYp!dHn7e$L;!=y!;7!%NJU8{~nH^iYpc$k&_n7>|*s9G;5M zTNQD-%gEPAT}}Fq|IYXIf8~qy2yx`o7I9u#^*`#(c$iIUC6z4PBXmb-W^bS!U?Cxf+f{g3yEJ-Mma3z%CG|G) z&bNl>;>8*DApD3j%Fky!ra?!o3-q^2jEN0d{d#;{?v$kLXuXtO2u>Gg&tKuQ-^9;l zJ-#Sk3dLt(E5Gc6=%5K(tDC*`&K8WjpyTh2(BrSX17S$fuf+G|d@p}%Dq}wby~|sz z{IefGx6L}U=t3p_FZ(iYWus_0FQvdh{;% zpRRWn?AJT@ads?Nu6NcV8}e1r)6?PeANoD)!kfFxr|6v?=$915>7A?C%M5*V@zrrA z_CvpeH^`sF_!WPlceaKPP>lYF4%^jg_M&^}2MS%09q4To@m+e-wm~a8?#X7q92(@g zrwTq!1hj4i`;`-#u3;*Ae(YQI!snw$==p+i=&8)psV3)Y=157b-g$uXJ<+8RI-3#9&l2=*x`S_TuhnCp+R9qJ zQSY43+*o^U>9&G#CTA|~DBG@g{tbPFGW2^wDW6}4PAYUkV;+c6vt*0jIlPr@EqQ_Ov{Uc=C;h&$Q|G+Bhi`~1_d!uxTN(VR z@+!Uaa;z?+DzTmIAK0nheMy&br>LFnLG9R#^FwqQHA|2Oc91fv(@oBky`+qS{U+zl zTE_Deld~n?TJUOw-lZeO_=3)PfjHd?v~x}`Vn8CN zp2^&VuHUo1Ba2U+G$tvm5>}iZ+^@o21b5<;zV@19SBo$>;oNbX7|zchO%u z|3Wj>_IYRN{3!O@R?_NU&;vMM#dti!ep!2;73`=)IbEAux=o4p{xsBMG{BPqEZ1*6Bw%#+?9@sqCF8p|j zcj&N@uVBxM)rB~2fD>PuW6l~e$C7m}$xOUMOV;d}k|7~RXGo#lqc|U(=DfKUexL9~ zDRo7*iC*zBXayrWjmxeUU#Rq(iP!P6G@ ztg+}SCh*>s@VSLg*igQyh;uJ^;J5kK*9AX$WMjOFUn5cG>4*M}5|SI^XYjw8I^Yet zeQ4F>x%C~l3Vla+%*(Hv^c@S4V@*fjaTPwarn&VU0}pS(J%BJw|)Rkyi)kk`u0>z!b5#WK_`iGtDu$ULwk!o#V{3J6Y$S= z%G7|5Hp4f2{V%6tD%b~wzt(p->rMhEzry;p!aEbRc6=Z0!81FPZtw1?MojdJNpD;qZ8A7LHw8GcIn zK>K^-2U;nTa ze&KIw+wr&6|5}t~F}$gF;XlQn9iTlAtbtxX#ks-gF5g+Zj;=r`a^c7EZUO7*ZR*@i zdp@AvwcyMM-jl%fHT+&|#s~Rv^yyBcS2T6bCe2Fhh_T_Dw562!nSO4Zv$)|Yo_mtN z9G-eB?prDM8u>aVrD?yR1FVPt_bIVHo6*l%*CQ=)oj%k~B){wC1v<&QpJyxJoq2_& zX@8^t6KL1iqAo7~RptM$7&*m5c2T}|WpM>)}26z`6Q$9Ri)XQSxz$K2Zp zZ>^B)ZD*t0{JKq~r>Sc+@L66D%|!gSxF-&3yDmm*#92tk zGYc|Ku_ni{9;iL3tJyOSa#pBhJ&pu_7xy+f zP2hq4lhh19>_ndWlQH9lzusBSI{g?OlZq&l^DOyqP=AFLn}C5P?4xx~D|?RP6P>ei zgvoh`wOYkH3pw4__B7#Z8GVsRlk?f$Ue*$91=g)*Um9w1j$~gcnrLzsZb9Fv7F|?m z=;FINr-gl}A`m)=IbGgfkBv*fK}SdSeDqt2x|*C1?n0N`7qp4sH?=re*2q2#y(09G z9?-WchxiVy9G_u#QG$+B!5;i@pg&U(*TUwY%nfkNI(V)X;N+6K=!`XIe?8TLxLD2D ztJePV5i~?AbXaaSrfri$v6aV8CK~_QJ@Cg}jBb{APY7|so&=@1`= zrngr+^!EEzdV6E7-d=fEZ+~!KkLxEKIF7+pVd7#ej?5MNf>#DOmB6arsD4o4< zsLp;jPG`S$S#Q5{U2j(;=F$jsR(3+S!}F+s=g}P8JKm#3_H2Oj!Y}Dhe5O@KHP8IO zowmr_<4#*!&JbEj=Gce&HR%$@EuDDz!++BWkYkseKp5#K)* zI!)N#tR#U)lG}7g}9vT1-G*lva18m*qq1 zy+B%x{%Q50<1x88qTDB105X7Iu6DJI;d%8To_%pV<~g3t;QayMyz^JPSl;H{$Hnt6 zc)tC3%z~URv@NvD{jQzp8zFxj?x$66IUaMA_HCoh zbyjh&uU5SuU1`eLuJ*LlZ9(68&+(X6dA+sjbjni45n?}9?N5uT`UJhJJ!vt4)N|`% zFKx%w9+qvisWzQ5Ihoq6PiWIx@@y34`)Y5~mqmO75p%O@E#;4*KeaI}=5zM4RPvjz zhFYpu;X8r4zPu7X%T1kX=kr=v;5;q zXG={^H?3+pZOZvX3j->0Lbb2y^G0}FM**P^J3Hq9^>o#W6VboD)K{xXK}UwNUOaD} z_oQaXGicsCpP92yTbmoHRnEiyP<&cREQxK~Kb=zW)BEaINH zzH_m+rE2Jy%&ECuEJDvYpP274@Y@RfCu9bM^^uDL$7gotI=@J@kGwR*Qq3AQk+*uS z(Vj>=Vew2NAG%2Pl8(=1hFlD@ly#k$na`RQ`SNSQ3FOm7be5~{t9I+9XiG)pq)dHS z7kM2r@dDQU0p#MRb9-3Ikc$_x{(Y#!BciLkkT{?>$7z`>E=F1$QSq7K$jn8V$McK4 zQ}X&*YDSOEM6cJLPn`Nff1|xR&}c8XY_fZWb%&Q`vOh!Y#ZkmK&W|+OQ>d?SsL{Up z;*;kmznIF!Xo6s#DlB=r`?68DRA1? z#9yuiuT?P?H^6g!^ZL5uo{IRVI~c>UVFvkZST9+Lh?O6NJuSZ;79)2;{ysEpsJpJ4 z0v}S>EpVc!t8sKwT@NNU)pdW0&MxApy1?V2PJzosy%mW@dwz=1?nvdEEjQYSgb$H# zUNG6)hYggA9VYv$;ZMmos!a9?VSVM2T9f_D@P2aTU6cL2u)%WaeUtsg@ImseMw9*P zumSRx@ZNHLqRxJS_|bP$boSiv{_>qvoxOS^dc4bZ_A=;!-`Pu?>>>BqLvFK&RI!KD zvWL{Mhcrq74z&Y5==YOekAW{A+*d!+#?*MH%$ZCQsF{E|hp$KFCW*dnB#M>YX$%E2*1m81LId zP6Ya^t(5W2TPc%>*P7E@H7vZ?Te;1*OeBA=q`6tANo#z&$`{ZjI6thpLhN7S6{Tj4 zkW_<-X9oD`-Qb5FL3VMQu{a#jN50J%h;P=HIw$Qj713Wl?9)Zg<^5lcZ?1eznco@nD(1rk z-uKIiNTr&2EB5`bY2UevPb%IKJ>(k3@w{(m`Dy#M#EI{TOOW$U2k%IAEy?IZn_ZNYxZ z$F#-$PH@ZJi0<+M-d6+OdWZK5I%11&sJtk;nUd%eD(CU84M9H2ZScxFZ}n8x(T2G^ z&$NXp!}~N-OysEqFU@$fr!t!Ry`OBRbmm^%`ksm(*P+0*NctPanhtNKG{T#|9cZ*S zTisI$qi$mT+24-nCD%k6?P8p^u}0oU7I20&TMn-5!+73`f|d%@+3k_t;IuMuRC}GA)y^nqd99YS^e5!3Zd^xl-J9#qT*q-eiffhYXStru zbrRRBxL(P119Rg(-=m&y=3>qWTvWhdu*Q9xXko%Q^ zgA#%F(T_>RrdYw>xj7v^@?8_>2k(Oo_tBq6MqSoua^8wFIqRmFoY!?hjzh#CFNFst zY?;qVp|(2C8QJb=h1zbfGC8Z@BRb##&cNp627JIu_<*bo61)tDzsXs%rFBLLHUSOi+GLbk12Z;-s)m5!s-a5@)u4K*@WEO-a^N!w zy&vC*CVjn5(BPXnOK$*#o}M^Y?*e}97B(=SHJir&0@mniQgIF_XMGg1Rc5VSY+3|*Yl zxr4P8=Q6aAGXwE|ok9Hc0oozb&bhs`Al~;uu0gxO^D)qc!Iz@7;kiR77el!y@lW?kUe}N@(dfYGIV3`A?t6=;(ru8vl`MP?SG02SjqU#aGSiX!t!Bph@soWoeT<&S)YCVzF^SCR}#zI;Wt!vP63L*%fzRysgLyY)8QZMf9hEvD`r= zFLzgd?dXM$W^ej3nDL2ZoSt;|$;7w?L`2iKLGo1k_8EN0IQsTJV}hQytyGK=wgyK? z#rTN6i2ik9ToOrF&_AKKEBeuu{tTgy6Ol)VG3v|sH1$hwjYZ#c27S0pe+H2LPWlyX zP9=Swba={&tbdTcfb9DdU>;D)^=rWA*SjmDk9AkZKIf&(o$RG#_3%=DrC+YH?#eQE zzk1Ox!(;u5W-bn*UorG+h`b4Yx}e$6?X{_161p*=wo#plzu7`Egp&pgmkd6n>5}X) z{suoQeSAjEmI)aR=bi(XP0YBDe$h(s<7ce%PM*Y>1{dT4UjlCfhgi$IfRC^Cvm9WZ zZxpzL{~ulLWqB8{vZn8y&D7SjravLwa3)GyNV<-5{TU=m2cH?~*sk3?i+@`7usV2E1?Qlf(c`+w zepV!Op;ArG57GCnf(JGeS$`>eMGdmRN_bp1T6ts>AME^S$veBqij8**cwzHQPOSxc zF7Tu#_HJRlgPzu>$WiWq_iNGH!k^IjYV@V5(Z^T}FYfMYd|tNDVcSN$IP}Ay`JFe> z$*Q6qjk*?YJM#kc4fN2|@j0JLh(;( zMEA?n`a<*eRv&m>w|@%ADE*>MMsaOmMn!s1Mg;pzXmg{XvuZMQ85YMr6X<}s67S7e zH^Yw6m-sx_;s5-@*tF-g{eSE^yP_Z4bJ$}Z-*W{23ErPw@O5(GTQ6Y$*h>208qNWv zS6CYn%!6Y7uOY=Iz#2p_0KU9rhHl2Z?>AJ*n7ZqR;$*Z}b7$UG|qU_90*PC9$sz$nE0Zdp5E6 z%*}nc@8pSn=P91YX#3F38FsP17J6|YI_+)U`;OqLJ=%AQ*k?ritv~i1_90?~**{>v zX+XBNoW12N`_DM`p6`VoJbO^n`k zOYAw(;Ic>i&Qt6=F|vpUBVlv<4(G*>pcOu3&k5qY|IFB+)M~YXvj%Xz4jd+Q=!Lv{ z2e>SybLe@`E}`c?p8UeL3SNy7wUivH|8dK3Lz;OWB8@Swz3qp%0o% zpE}W>YEscJ8@T%Yhke`8m44B`fQS9+ODg*G4DF1jF9YbypV09iODvL?{3`uuOS%L2f%E5f_NYVt-Q^efKG*!Z%bS5u*oQLE)l2t}l9zJ*_Wd5pmZLGs zwt4N8_j(5@H&=Qo8;S-hf9}~9Hy}{FBk2NrEJ?Lu;8)$Nf|BihbiP8mFPv>1=rt1r>Ify=kSB>s){was1Ktn6EiWL8{FAq*1-riAucEh?1}!j^yh0`^Y&Ruzg#M}{W(z(>mZ7h&LvQ~w zvc+6vi<6L(d0|74aD9lS9kR2%XZvg4079RzWvCbYNqDiuS4pPq+=qVRM<;L7Mnr7W z-b5z%k|_I7=RfpJPpvl~bp8WFJGgn9$0)mmvWXAN=BK3N>mgm6kKFDivRUa$S4%MO zmCkn7f&rcKo#q|dg0el}g?4i6WiDk=M;BG-@)tyh@m-G)lOf_b>nG)XP1HS|m<__u z!GL6&mWDq6Ux=x&C-Qx*9C>FDb$@)VizS(O3xCBXpkIVd8_m0Oxc@vd#31N+3pN50 z>EEtJSXLtw{RiJ>?wNjC744fvx)I#iA+(iN&2<{(IztfE$fjlt|q+%&Th?r7yNP2-+kDA zeAF!Hl|p1vA2#ds%3qo_e`OmyLM;s3gN~x_2tyf2#1~gPeaVdUwa& zdTb_VDfymXr#76DLT!0zN`uCK3uz+PM*dHG_Bw)34FSA@KY6OIvCUQ2V)6)m z5&!jS!|Ozx@bgk#DIz~QBAE}Gxp17|66J*L!Cq`-?{!6gXMa0eS=aW&0LIth{1kdkb~VkMrGW9ZhU0N@jiIB9(whzh(Ci2*XZG6tByja2l-d+K$FuK z9Of8ma?T^IDKa`|L-Q4&kCulXd|hv$&n8(bqV&$Hfyh#a>e12HIU;f%ptmAFj9Jk9 z{}Zzy*w#5%AC&yi#_xlfcCtdjm;Ryz(m z+^)fHJKUkaH?|jct?&=ptJEtcG}+-k%l}6{{Y85ow>5rT$HU0?La)5l2~()-WX%g* zwxTz^6dl>!=uIz2Pga2*k=d_TGz(G6p{?e3m6d{8S;>RXnvUIlCi#kN0L~O`GSikpqD{1CD|+1Tpu_D=I|{*SWd z9y}tx^GAH=;e5}r06GOf3!CPBt9@+8bwmHKYkvIu4rr&}fsVytF;|z0y->tSSW8R= zkD=~Z36JNEAg{!)ac#(d*gHQS7vU~v#DCj5>mT|>{U0{Z_nT~<3m@7%r%L+l6E5ki zVfeliHqRw1bptY)`=6uh^#C{~Y?K?M_%E3ARq!0|BoE7|ML(heTjkZzxKUC_b{CH( z`{^0fC1|}g@LxK*R|(iVf3{v1-<=pDhW6COx=6bt#rKRHmQe=pGBQ$i&S75l*ZF0O z_y^a)p;g#9<72Ppb_!=v{-blcK-f7KcTkrN_Q216d-#sRZW24meZ1!`yi(d&Q;fYc z@vns6;|2Mg55>OtB)mzX18Ttjr<8of;J1IF>-sA?Uw7b({)pb!5B&FGezpJ}ee>&- z5&o5{Q=0VYTk_1mO$)1@m?*1wX6_-y*HBI+mgj0{Y!N?x8FZGOeEEXDqWzN46XdKa z_Jqy-RdX7CRwKdfomn@VQWi9HA@4iBDe^Z!+P{*O4=0UM1}3Ox6KkVGl4=(DB+3l+ zUn;*0#MCZPUYYj-bm;5Qn=9qm(5AeeLI2LX)+b5c2W(ikO8KPuRPxRu?`(OMUhhz$ z|N4zl&EeD=6sDT}sM|Mbc|%WZg`?S9CgJbL>X#~y@lBN{8J<&K;oe(cE>xZ(CS#26 z3VHmTz3?Z@}Yk>uqKwd6IS$L}Oh0sb6b3|lN0 z(&yPg`!KUS#P2mZLA=**x;%>*O#!=;l!ZM0& zd~&GGiY`C@_jVu#0&`wR3_SN7FKU|G z0*8sYyac^QAwRf>t-hG+In48N=5Gaa{xUvZ&FD79G2cV@HfKo%PWA&H&G}og&CMe{ znr9bKm#{_h=9$o+%w!#iIp3m|RM-ZS2I0RWV6Ak%6*%}VI=5{f48I`!Kp)~}x2DFi|#mr}|dTzcxrtt0SW8$yOv`o4(!?Gx6hW2$%k@o2R819ea{#@=K zEz0FBm;{j(Tf7Q#0vavIxs z-rEh(fG2@9SLR#RURhuX+pLZm1?bCYvGh4${1*_OXwB&OMwS(eVk&Gye*sC|<&ODoPP(R?pX(`H?q ztMw;#^~Ky7?)G(N-`YZ)wibe~HH0vSR`}1BzwJLl**kx-V!l3EsR~b!Tf|6aAERVDjb3{h>q0-#n6TO4yZBoArm z%(FY5lB*1z_|D+G+b>Gy|NehW(z$JND}>%W&j(-|bvr4v;UIJ*LEv4T!8HTsgf@)e z{x{x|>+9suhUd92$_cvoF6&`AdsHyzpmcahVhqmHzQgq60ndc3R6T1%;CHe1Px8z? z*RieZ7TC)AsITe5wuE)y&+{VaVf^b@-(Zf5HIh&N71sB0=DPyzpHDw`%}~v8(71nL z+zucg+?kTn(1W~xLhpA9xO`YM)fEjcmzTVtOuhe-vYr^AQ`<}CBy?#%30BRg`5vpA zt7cJlA$6SenI?yZj!@?Kj8J|~i&G{>%#r7hk<5vFmj#r;R@7Z)M|@M66_mN>w@7~P zndg)oxNl+b*-eX2(G77{u93OYM)hd z(emL+Gq2&wSbQ{yaS-;N8#a$rRQ8owl4QWTk-evpbG_IG`SeSHQrY@*yOq$zK>-@o%#`%IUA0c!s~M>(%POWsuSE@1z}Rt_5A5x#$t zdi|eM&3fK3fOTtS{kKS7+AtD7uDGyCv z*^o&7F-c1s)-fMExEDZ~kKbCZG;2LY*@pi20&tSwoTUwisjpKQ`##TCQa&bZnVdnY z^GlH5@=cKapL$N&yndmwh%!@sSIT{XHIcLAqmi@aQ9mwH2D~1xJRde!UJ{WaACE|u z3nCV~`yyz8NBY-dy%aE}$b-zIbnWmXrDIIJ9SO%9b+Rco;m?L`` z`*iBa7yI5r|H${rCv2xO*#q;*`^Y}6EQnYFTu)&>%mPQ>BH#5t?AW~juw%PQzEbu! zF_%x1uaJCG!M#m3asSSD;lJ{&dT56y_H?nA|9AeM9_BCRxv2Y`*eCwT*Xob=+sRkO z`>V<4V7(}Oho-vx|H$(dc^u@aAx{E%V#)Is{e9He&v?EZJ3MdRA=)ME@XE+5#- zymcYi;C;!M^1ZJ^XH+nr!*%cO4#NiT>CRsGQ}A(Iy70#CeAe7s)ODYCiSanIO^UB4 z@2_GUC_9#ZIQXWYGZv<@Wf!&t24{C=oeysog8Z^&_Tcs*R^#23*b}$Rj^Mg7s$KTa z*e201+gWVrs$S4L&;331LJ782CD@1v+oHJa)C&o^w%IoWL$beS-2aXp-rc}<*}p<- z{i}G{h2_oE^U;i9IsA--MJC62-E zjY8*xdH4orAMy!Ya&M!F7;W;wf;}c@2ll#rzC%GG@8i2X;A~Pbf_$r_J%d)Y-;*z7 zgv2LUOn(O|x;-`Ub?VcFJzv1V9`L(H0(YS0s>#=Yzx?_Xz5{l6PHfBW9inYt{HG1x zCwdv5kS6RxJ!?9IvY-C^-tJ-8+SOwNRtyF%OVv9SVKDB#`2N8t-0d!H~)mr=XFy{#{PE{eC9oJrWG^@8VEPCYf**unAM zTj$V&fd^99%)`0nb+0`gu-&VqFT>#ljR!CEM<(e7t{=j8S`q^Pgf-uu_pU}hdKVtW zV8`+c_3)+&dZS-6oALOB7#LzM9mD4E6z%v)@0=YNYz?Np_qKDrlRTICCf9imf3Nm} z$@z~KQpSch#ELcOG90IZZ3UO$ed(~%C^9+ClXMv#d>3IWS#jCqynPfKH_p4qbe=U| zoL{@UO5wf4Ew~OZ6C3FGV)8BV#cmIIt>By0qa(DQ{+IKvV)$uy6HU&q&@V4!%;L^1 zx={TCwt39Ud)FCj{B%5EJo9%-&ioY4GOdHGwR=p?3i|pvb|7W&juv^U=NBK3JzvE< zzK_p``|u5%ds^c+JLF*N)!51MoeF3}EOux)*!^ta-5*LVto!k6AjW238+d`P#E)SniC03Kbw+?s(71(1= z9HeGO5|=>ud7i&l&Fn?~=kVhl*TL~maNoDkOFvM)KDjFTe$rRb^+{hx`(USc0RJXs z^jG+@Qgp$N)x& zsbT%!>?-)D!H&+%qZYp zMc{|0v6(Mk&RoG3=mB;ypRi^g@D4rv@Hf!2O-4sR=nxb}G4HqYzAH;Flr2G*fO>B5 z{$gx>ir1R3*+Fk^l?j~L>fjtnXPXE9O^NcimgA$K7(3Q7Y;A6!qjqbk-l++jn^?W` z<~Y6c4C~%8k^fWR2T$c4-@-SK^l(=8255gNd}Tq)eKrZ*;k)<+!uFBW27F(q)ukn_3$P-e?a_LjFHU zr@CGO{D4J3TWFN7fC)$iTKdAT0iFZq1JAcmT|PixU@lO-Q*zbrkzAGAC0AX#{pCs4qY{_-woaFLv4KD@oZL7Mx zJE*Q^PpGcq6vb4+*FDmz*cO@f?zCm*eRtX_v%#GPWV+mG>&yr4v`uECI}OaN ziVXtV0_`%Zen^Y)hPV1RWCG*C>%vB2B76VaoU2E0&yO>95ogvU(h=xjEaLqA3^MSE zq{G>-`;m5o&)b?*;Ewn+^RRz$Xl+PKsUs+Fkyee3MGa^1lUxVnrD_+hSS)k-Up=u! zrm&Hy#*lALnPLYiaJX3$UA+ZqcsJwN!u0Gi+TJ zo0f5B%-ZL?ziz*98+1uQE|I%Dd*!Wa#5jHl*V{F8r-+pnv+y2LLrUd=R1WPTpJ>#gO z4dpk}hq1(`xAH#%2+Xr+mDuKJJdY&*5qKZ9*avONnWJsyf3GtInjimt_#Y0~h*@6= zKcqaTK=bAQOpGFNz`VOrv$qlPp%8G)$!)u49dKfXV&)TF3^m* zbG5;j=3%#!tleBooIdP!u3bvf3b4ruzGOizJO|%0DcH-*#(riNJfRs{tcTvALvC5X zSk}a~u;tU|d$WTxiDTiKNBnVgeqFVsMW>9eM#iN+wL>O$?XID}&R==9Q~BJ#!S{bKwSDG;pNy_w;N`prUabKVr80nYcGp=b9ZhN5C z96|Xq@Y0*4-aIb^H@yWv_gds!c`o;Efu~9~j?eTDPmyaF*P8-=ox?A?1KfBYJl0X? zI!d|Ht~%E|Qi0Pdx2vv_ovN#7kLtRa4&J+MGkKxz>kGm&@*hyTR98siV51(M7CH*BSU3<@>?WU5&0@5%c8=@Oa4~$u*a;EdrO{ zJPPgxmskEke~wZ2DcbxKeaKc_y~F3p`R7#EsIX*t8$9cyoVkO-7RgoM{Cf898>iR< z*u#GZ_t$2#SAhGcdU!ZOk!6%s8J$0S=&}QeCF!YK_lt*a!!KUCHNVUX48*3TMYizK zdJ>wg^fJChxGut{#9zdHWN{_X$gSDy`eNg9x{a6h$2J~T@%%pi&+x(K`kS~$m%WI6 z{1pGEK*tHY7wlck2UN*6D`i>3Jot;)!I%?$XUXH6k5eZ2%o27k=7m4bQzmiGk}|iU zfBZOQW=d*9t#6`y(I-(pKW2ZPblLm8 zdCED`qtT~N($#txa$*jGfedkcmNNio^U=-mgu>_4!s+jj%Xm6rVf_#_u9-*#U*40t_TxH8!vz58z4W>QM^%kGi^!AnDw)AO4`i3RR1+0r(%;(#CI+$;;wklXtjnE`#z`OVHopup^ zOEq)E2j7}yjJ>d9Ddk<6t=048^rNb^YO7^#Y$ip|$yH2W>uBfi`+3G%FQR?7XbXPS zT@J>rhBg$^eji<@2Jh9K8k*^zXb4#SL_>d_u z1ze|aJ(KH#Eqn*Q%MHFqPcQZ`htvL- z&3b4r5BN*?3H_5E8q5P;xy~kd?kC_EaelVdv^P1I`KcN6+Zi)z1CgUZM++PBRZ=s@ zp-B94K)>$5PNbqy_&jfIy)n?_j1<1#SljD}`6GO33Y&D{L$aJRf>^s>pr26zU3(h6 za$zq&m-`Xu%aucSS3z^%rJhn~>|o@OmC)IOr+EW9J^ zW~8BSBI2JEB=TK2n?5N?*-p;5^_-C^;Z=xOM}pTNeEtZ(?W10A;_o#;=L@})YRzz2_R^gBEKIf84dLNtcBQwhbpZJ_dJo#4jYfa9!e22Sn zdgmze8Tm$nKU2IM-$3SKVlJc_YXakj{k2x6l8|0TIqv?9&>+a>hegv za~yg&FXM9s8}$YIb~MS^SBFSn!B41Gk*$Yrlm-;FiOX0I4nB&na3N#A)l>f;apevp zXRqd-@a0s~QSX+)-=WV!=6Bo^9wzS)Wn|Jm$kmmHepdQk7?zQQ&j^3&epBB%TlbGs zF>^WdBcC~+`6Y4XKK9HIap6Rq*HNdM;>tDr?AlrNFs@uFGXGlqtkl6P*{%=F{ui>J zSE+9q{!spcAByAuJQXA2(p5i{3B85B(ygBGD3Ci=agMws_*a~N@8C!2UKi}pgxnF{ z%3S1*X6iV}c~Hb*7cxhGK=7~(%bVg=hc6}uALo|8t&z?bFrGi-N6`jvLHM*2v6n@x zjs5T#_9AOp1z$Z4zWOTo>S^#8um?4-hX1}hWl6)Hl%)+Pk?*~X&yd%F)z_z6_5%CP z6=`Yb616_a%46YKi##Pfe>!Oia;T2hpZWhOw5R8_=@xH%4S$4e(fe#7wm37e!-!9~Zx7{r?W&jR`uX|{OQ+mdwS4|Z zoUPFAk^fW9Un>3oGxZ+h`tX^V+Uav8$XwMiLvxe0L)W4EDOXPFiR?8I`g+@?`P!%F zDm2HrIojLUa|J@b&(2NKo`nW)d2XgwkeiAgxyAhrrlGs|h)*xpui!rhL$eJAR_U8N z_RW;yHI%P7xAwkiOn{l#*k1f^%^VTe-*GMY!3jQ%@o%DIGvVi9#399B3F~1s=UsRS z)qb5M=Sk+fK5b3(Nakl5IPMU<{8I2{_0BdK52k`=tgSPurkI?~kb4OKBcCFl5I#xH zkXC?qhr!>w0p1;l{M7;8t^Fx5qZ*nmADg+-ap3WZCg)oCN(DeG^k(-0;{2V{sfpho zE+gM&4okHAk^P&ng?j^d6R-lCfWaPNPl+?Cgg#`uE$Wy$zWPp>Yr zya{}Ab)jVg@X^)zmi2)3%o6R!*_+xMXBKN2q#MtqYO_e!omrrDCsm{lj(m7_5140F zTf8$1);7yr27V3X{N0+fw>Rf(J!fna@0HRd+mpOgJikJF#Pfw*i@Yk&Mc&EW7x|ZR zU*unPWufI2VE&c4mI=VTD=RD-@D$H;$vZVCRa?nAIC7>)dyeZdSC(1E<-D$a&id3`YI!2(HEnFpaxIv8hjJZ#u0VV0$|B2Pu7gNB<}A_L=0_`hcDUf(^mG<1lm71RO>p;%LWx4Z^wOh2bT(fqR^I}Tw>)NQi723$$MRzwAYtXYOm-%)F)UZzXK&sm3a~7Ha6cx-)Y@{z z*wXTs_b!in7x-$=<#F!;FW%3${8XQB*#LCm|F40U$NdI0Yjb&AJD>;9A2`|i^0BDSyU!>gWe4mz%O7QJp=|j}Rc26KhmTCQs_V=aDD**{z z5}`x)i@W`S)=RWowEtpqP4p|Y|3Y$p^tHzOF;h)#Eaj&5mIJ1?mKml%OZA>><66IW zZQSR(u8n&O@Tc89fz!@>%ZI?|HrK|D2g-mN;A7x(U^-w1{AqVj;Pmid%ZET>ZpweO zf34^0=+ly?EfIRAfd8$$R!8pz($N(;fzQx->x7KzozG>|{xmToADgiEz!&-8jqTtM z;oq=uKfKN@$e@rLAHxTh;al{Yq347gEFU_l^jt9U*t`zj?rL)OM^=983zPFZQU|=! z_8wApDZJ8(z@Y3?#B3~M536IYeAtF~oZys**1=ZcGcc^RWGy*na!%$OC$`2nG`!Vd zWD|w>5-T{`A!AxpTkBkC%!}|#9oX0uV6#<^EmkFVV4@9*6zq7iD{`QXFJx3;n{U4+ye4!c1YQ$3P1yE6!fOJj3EU>|+Lx^Pmf*EcZ>eK` ze^gO4XGWR+J)!ullB6?ts&ns+WZ~q)U#&o74rQV+!_sT zJq#bx2DGEj{=i?))M{JdMG8DugHF)fSC?7707ikALZCTffh}j2YnF32wPiU@O@aP+ z3H-JcSPou$9vB0TTLioaj&p)Hr-0|aA&m#u%_U6%zxiBzUF&yog?1@-zIGd4*jwPa zqu@F5o|KErc)pZ<_cg6FZ@KnK-mBV27Z-uw=DNR`z;AUwOfaLz$ajO!!g;5N^UfM@ z+fDfNA3el#Wu#sYaosDVuLGUolL$S7v*EMk9q^43(Z$H*+*cL868o3e(Vq=;grWoS zxlYdB1z+I=bmJIo21aA27l)nR1itIrz=zy7lTPD%2Y@%;4s22!1~a<2)6nZ}j;@4fGu5?g;$-DDcnpq#FDtpw zFu(iHmH!WW?*boXasB^4&u*?HfnY)i7fAw$g0@>eZnS&Ta{M* zH+em;eQq<)%$YOiGv~~iGlLil6@)(2=MAnO3B9+^f{=NAR^D)1+=_lT#0?0$G47AE zejRs3>TPir<4tjoy>eq*zVp{{%k4MDS#ocP%erI^z7goX)bXQ%_p@fUn`;?95;rMQ zC)SEM#FPK(|&jc%+$BOS1)tF)R#YG-enMR) z?3p^Y6Tg=+9to#ThxXhvb{h42;nOK&|B2sV?5ESTGK#-iIXcyTaOqO)%b^@QE}2f- z-lg?Fp_fAcdxSahAn3W5xv(H~tiRz?Xo>2cHwPcIPp93{d?EZcdwNE5Dg5?a(D=Al z@tG=08XvcMMGCf}j6MlHl$itZBdh{1ZwQqAOpThKloS2#W%{r9SA0Ytt{lZ$7%;tN zw57p}(Cy>&7uZ zo723|G$xS!eR+3QbDn8t^HNh5eM8dZ;uo|Q9V8Z=M8@_#QZxDl&SM!U^WsADg^Yh> z-|H*r^AhX%O8Po$5So7mt)+jjo}3of7hk+LLVguDe95okV)28^!QY5|MA9~)d%RCR zuP}}}Odb~G-bLsi!?4M|jjZU+u?m@J37gaBPhr`8Zf}?qmu{RGH~i9vu@mO>DW_eg z(>}k+qWy?_cmwnk`;Ww&R2O^FwjcGDTgrTlwzwF3fk*5J@V@MSC$@rtq@S&N_tVS4 zXF8W>FTF9+cKP;^wpV!el^R~s75ZOk=u%QBYt0_=qzz!b!U_B(dq-7W`+fDIT=s|9SC9C`%vW!Fc71f!34F(EQhQpD z)b+5s*~|Pg*7~^V_cdXP8z)s{RPW*wtnsNT>tQ`OEO21?_8#n=+|ydRwuiOs^&Z-K zpX+FADSr-quP^zVGuR87x?Ik=GZXtO=hs4~pUT4{Mixwy~8*vve{DQ3NOu0_kDt#+SEt$>clD0i)q|Hv=26%NOI$SwCm2WV+ zldw(f;kVMaZlxYfIl4k|@AvK3H=Aeg#hM?ouzvTz<*eU@fBJ_eAJ3KVk#;hBepy%p zF8$Ji4YrDRj$?QA5WnIB)~}@FJM|d)q^xUu1Ant@>}$=u6AN!2g-1LE%=J*0mDt*v zURT_8=m>*(ukJ;BfzT6bu{kGEuL!XV6qw!OyUm<}+p|`2&yDVr7Xu#G50kxTdFNu* z$*>iG9NL^AXHnhZyp;xB|T69A!5(@y-4` z?~Tn*OP^{tHIrwYo`(0zNTcO7jQLp39a{OLTwC!8J;A*e^4-6dJt2abJAcva4yT=6 ztVuN)ncIgRvd45bgbM7%j;%0>uK}E=iLc?^YOddg03Zd zyUM^|7+_=s`-ZOH7A&NEo(@`uX8=WlZkz1=XYnw#eEiY-nCPFpDX&Z z4jp+ontpa8$E>Z1*5%12YooE#pla5JRMY20rhJXP$zkj~D9XV8#!i1lt`XqSg$31anbN#I)1NX zSeteL{yR#0%RUH?8KX;vcI%P%Vc;c0|4O|Jh|#f>-#z^9FzicQPTf~C9(43xKeh_K zse}iQkhTJzI|@%e0dHtL1b+w*{=tuL@_pyE@K-$hsnzn%81BjbzVfaUKK+`!a=90! z^CEmz$2aEb`3$z3%=$TeXW~4XcO_1e=&PSn)(44W^#J`Qn|+EOWFKPL*LknuUx};x zY#J*uVd^PsBY8IP{bQ8*D1B!#`N*E%vM03cF`c1*gYVy=pHs#J&U;9CdB;gT+`RuQ z?)8-NN_y%b{qdK6X`=OW`dU}crSE)U_$qOp@vFptLDN3;>rB>9TL{0_OnF);+X-n8 z%5^pG`}gTz=y~y-Ryt6|reH&UoRmxG$ag%XznphNq+FEML0R{c?;t%r_YRRp@6+s6 z(Mm@reXf*z8fbF|efp@OFtL>Vu7@+W6@5fypQbAM?k42ZA^Lhg^XTFidMjh8XIP(< zM0~Ua<|8iv|3scOG8Pq?dJr3s?0+u&JRBaCc?nqq$oklw`{2ccj9V_IA9}F&?k86H zy)m8g_AmzR&N~^5PxgsT8a-tnG0=*TUtmh94q8A zfOqA+&(Wjg-P_pfqk$N8`xJ}G%K6^{*vH;zFh&qOAXp(dQ9yq!qb~|hoS@(B6Kshhu7u!79(@-&3NAEvi_9y9 zHtF=y2=<1QJ?D=y|K_A`_9U-zo{flB@}3J+hAx1&e@7iA^D92KS@2Ib>z5Yun@r61 za@szFnDk}vc?E4=3QxU6`#Nb;oxfku+aBJT!FeYAdK$mM!Y}e3{38APS$RgE%HlUj z=OuWrl5ec!p2k1C|0m8fpy?6v6M1tFblt*OzKA|)rJQpp-)w%Lq}=yW&RfISTY%pL z=2UbgEww^KGJ`3bgd zrF`kK){MR>`T*x(9v^1}{dy+nYF=iK`wB z-w9s|zh1_io2;=o0VXuUuYx(@89;#K|b0*4}(spady+Y+z)_`n%txP%b{^C_X7QBTS4A6 zdV1PogML2BZhDz>jpwQB3E?Z|bCMa;$oh<1Df`2;K|cGvOojflS#w(mPg~yh(ne?^ z_5TStEp?aoC7ryhr_iNdM-FoFu`ukg@W4Z~^^L}ECH2hN%KlRakqnn&6*i=EP-V`F8fq@%PqJj`&1RTdETk@c_q)QdHzgi#X5#>q%(F{joqgV`@M`cMNS`} ztRkx&v_*gRstRDejDvQnrO*A8^)XenQxa__^sH2@tL}`K{?^$Qw(Ae8GHAE0e8Yj? zv*;D|&@P?4US7f845a@6I+oH;ifB(a`TV8gq21R(c2W;FvgZnY;u<}@9{c3Ue-Oh z#iP=au+lxISi&tKHZS zn7a)#n-esl-k{TAY1%Odgfhuk7|3_K>TTJ!_Clc_??Pv!NY+C2PP25H7Bq@ z=4fPJMi+E$q7Fxy`+h4Uv|t7{wBz)FI@adwWj~0DJj!S=vX;7Q@h*PFX3>wjmA%g% z7jZnp$G;Z)h4|pe8l2KMsq@>cGiu_Xk3`Tv55CX78Sf{%gX8H#@1wWCGkMrxa*(_8 zb}e1Cgf%&pkK?S+yO>inU6d*zy(7Igd3_k>1AsDG|8tmF{$Z%N3}gq^Ea4VFEtSPLpK*+eIri_P=! z)D-u<;L?8TS#nIt;w&>8jpHhBVcaWp@a@BxSzeN93ppTI?8Ue%z`)71y-Z6OpXUreg8m<`H zS+SeMHXn|yvKw~$X@j0%XjB-^yIq_bG&A-nEoF)v!Rp4^m=EDX;WXbHJ|UVwZiAiM$(};{FAF{Q!MA ziLr7`7IOgUDXdwEE3oDQrs;kavQ0DC44(T_;FWe`U|S=9MY# z0m!H$1u5Vus#FpBka_yU%8@#eqM$at7oO>_B`H8TY&V-&9U z@Oy&mKLy)OF0OOXHrK)dNnj#XjU#yN6B?WVtREa-wx%JGK?yXo&7Z_&>0(#D%3 zEvC0)EvCQprhVcqrgtu}m{$LUeKW40&91bVim#&W(`oxG##vJtZ%wzDj?c1~mPNsL z9E-c!P49D9yR&|az3isR9Qi%$rcXIm#@S7CISMYs7Qk_zvYftPO`NSf zk@yeeio_-*Kk-xJ9~0}9CllwuGjrjWMbY$w{ua}^M8;pU8JEpr9YHoaLJs9Y793?> zexD)QHRBp3&Aq*|wKlc0^(gaOL54-23?Y72Px{GKQ}^X)gz9nn~{*XEBYEycn-)wgl3r>Ul8^y#yg5E;2|zr|e3k@T(!e zhI7$HEL}-MKC%Ak$J3b@*;hio>!`!z3ROA?kBwh|F7lxu#|=O96h}G?DbDSfqvtcex>hR`hF2I zeg)TMlAiZ6=(|FT_x1Er7xLZim;V2J=OQaCKe3w@axHQ~-pjbmoBmDz^yK@xU-~(| z^D5FfuE3VeHS@047ofvx=wU}b?3cO=PfWC!p5wU_`6D*1cx7dx(4^3pM=AHs%HBkI zN27HXYiu}|y=~-u$s?a@gRX~7WW5c2?s5Hn@*1z7QwHS{)^>5N@f7b`272owb##!< zA@m^M96b%`7yHgN-WYy`?)J;$=e~1|KZe^)Kjm8Z zBbojYKipz6(?^`h4y_FIGp&x&SNXp5(TRFq*VCLC=X%EsUD(yjDBYbHObKjxvO_X<} zJv1KcFLnzbkC7$=xhr`|9m9ntQkHRcQw=h4pf8==J4hPIL*p~XCI`6I_>6LAsN^x; zZn}{79i-FhH^Oe};g>#6KhL(CJ}0ds+xH&ny71(-Xlse0I$+L{I*ZAeJ!FR67 zY1S&g#I?w2DWjROlVbwq;GVbLxu(y3>6gA#KNnt3w40vcTH|H-#KOO%-^o3pnWU9` zrH-nv9hUpf57CZxb!Xx-uB)Z3DXUpso+#<EzgalZ3%(pZ#F5=V3GlJt}{Lw!O^ zPYg9Jy+#A_$o5O0>pPb*lSBD5aRk?z9S1&OKS8T}|9$0;iFb3qtI$EpK;E^K=Pti| zF7us>?2A|SBo5@-EAO66l-P$`-@&%SxWqofZ2MpqYfNbKBUA0BBfMWZowhd`toL=M zk22P-no2+foa+;_?WVg8cGIK**w5!+*X_huCWiLRrtUd*Q=nmIVz6O%VpqdI60@$h zn<5Rn5-%`(k{D(9G%?7qC-GV1Gl`EF|CG4E_-x`IjD?9ATxW7Ujq4d)FI1mNT&(^n zajE)jqE#(S+{yJPT<_+32iLXuekT#vH1h>~;21-fc1dwR8)UXEz98A=9>%yj2_1k~ z{dohKbNV9L{Z$k`3;2Owq6TU)OULkgq$=4xnAlw6PuZE@n!zdVX6`#uS?|u+Ui@JW zGQO{4e4oO6;UUKOix?X=GtN)odY>`eHFGtwp0{_k9>uo`0@6 z+4kgZ$+qMDQr)GDV=K{D&G=>1Vte=ipKgcP5U?90p;L6qVecWaZD7NwA)i`erHNgo zjth#o2HXYd73 zmndn0irGD-m$EU4^)r8C?*EYol_3K!Nwtl}E-*t4EvaEXU;F@MJ-@_jEo8nj0~_NM z=KU+UHvoORl6n8i%MF^ZfXrvcE5!L>9f71d#_wmOi^JaOU@qqKB_nOOv*sld`@zqY zo+Vq$wwt;U>*6i+`3LtHZ8w4*XEh}?>{)z>XIrqxJQl%z&pe-oUU{%PIxDubc3H*KcupHuEOeg}O2IC+cD!G7it>c_M8p6__E%3J8UMQq%K%yA{6e!rGb?`hl#iNr7L*-@OSRI*HS?nYn-2!|GSqKO1JJ zxEXWh9h@aL<8H-u$V?aV#fFSHP{MrUZpH2VLUC7=DZ~X*-0TDCmN~;Tjv*WkjQ8f! z#uGV)a%nye;4s9dih(7b44$2mhmogKr*(-=eiNvh)yhaiUrV5cthsX zMF%;KZZcn|FPOeiKObQ?UCp_JvTA%on%QzqK9bIvZa2-+-{sk0{hT(sLf)fpeHizL zFy@!^TZLEn{(jPD%5%P}S_5twmNSgf^YiBdp>@WGLM&QzweLJ-{o19ehyDX z%6rgs8+7>yTFCch-nNwQA0mCSJoo#)Nl(l7Q}uJk_9r}QzJqqr>o0Zm|NiCryL`Wn zceTDhjSY9M+OIWMzZlWK$=z}GsB|Zan zRNWO}*e0W}RrbQyt{<^_E@ch!VAcIFSaqTaf3^{-`*$(MyrcBlgOu|?6S17tP}fh` zcVa(vY@}ZwHWCk>@;mWEsK-9pge*S}w*NIoWj>=#11C?@e&P$afIeuB`}0^0>nN{`kz{;xobo!kAl<*g>q<$4;*o2H*r-kUg=@?HZ!E`uKzLdU5xcEhL80#8;@F0t3F zpxnYsHIz;2=wG(EdVZ8GM?a@*zu;VSeZ6erUwAk7BK^uHzKVD0X(-!e`Z;AA#JNsO z@qs(K!t53t`kK5Oh|N&PJNJPpv%!=&V=q?%JS={@#PV=ggDK(@AbQf_uw=JQ4c6iZ zYU8kpDQ*`S@CUd;s=6WrtLR` zUZ<_?X&_B0n0bh~!)nHTWytPB^qp$@&W+rYcwec^FU|lfvlP~Epa);$yFZHiQ@B6X zcRx;I+b4Kfv$ZdA2Jiin{O)G1W<2A^Y0T9~{6C4~cP;anBbmcYWB&42kKZBqb+u_&Y~8*RpM#H8M?X>@I!fc&q&)g z^iFh4Yft=Q<9XMJ-+K;o((%OFINrs(r%5mT5;KUOA$|$slkl7OTULgm3y&e+SIASw z?2_Ne{E@cj$>U=spu~d@$ZF_+^>Zn*rr?u7z0K}}=))oC*#0_n*J%9cDQ^bvc8*70 zp&K7WXC6vjYFUpU`e<*yukESg`~GmgKmF&aw!`S6k7;EHE7`|5JbvMcv|!#nltlbX z#b}kZdYOMhn!ZY^?H%+8(ZA&z7xE32_m5?<-a-j%^L{+<7mA;9N%BLFt@7@x@*Ha~ zF5`R->n(=+)>{OUz7L12wfLC*Q|w(IU$t4*Ti_ED!2KYO1x2d$1D@x0U9d`g+^c1+ z#dNc~j5r?ktiQNQ)?8ew*kp}`tgR>`_Nc70ko6VyQRpjcdsrK@0tY$_Nqa1mS=J~V zW32_Yf}P4F;>}a1uAC=uzMu2%ocH8lEy$5F)>u=rjvVt_~PUwUh8Cbzr^~4D=tuOihJn7`-TQ7H`#k8KNQjnn*z@s z;n|5u<)(ev7v5JMpxk7MO@8P)#(dMTxn#iOGkIQyO(G87Gh=tj!tS!}(G=VB*h5zH z{Quy?OYw&p2;V31PDW4Gi(Wm_wuyBV8{qeZhdUPpu&(II<0-Zkq_L7FgZriPVhRFT zgNaYmy$)hz6)J%)^%B++{$ivppR|wj&I#gG9+?+Zun%9R%kW`s$O^EHdMvcyhGP%! zzH)w8fvi`)oHnh+U#Jd0@*(p(6;v=cUJ1?y^DIex*}zp9k79$_44>6r#(Kqi=FT19 z@yZbM*GAq`@YC?XN651@55B$@`k1FPzQ4pB^<2Q3Mf!{<#=Fmfr>9rGZCC8WvlQjZ z7}Y+!zOypK&e~V(4tf2^FP3Ky@@%icKD;~6@bA#}lg@%x{?CR*och^>@KZmV6L#un zD?6R~*_)xt5GjLRoJ)TqcvCsb`fQ|aAabRcy|VsB%#?%!+f6xZ0&Fv=XD0SX**ohM{8a9K!^qmr zsN#{Ysrk9&Q~#Hil^G8!Hu05DA-{C&u}*xajmVHx`-25nQ$D+EyGaGF zGJST@e^HNx$buYXhJ}7MrDv*5WXX%*%^-aDPT~z~3HjcvZ6j^ZJ~7fJ>lB9~Pe@}` zMSd_2cd(B|J-#qPOOeTrlyK{#eB1i@NZb3+q>yho>pf|#lRFZOt*aCoiLFa8s(OSs zT`cL8?VYUiNH?`_s!hrf3JnLT!EMT7`P7rvg?A;LBiCEcS)>ts5)8VSZ>+UPSht!( ztq$|m*1oZpn_q8I^ZRq3b!9sR2UG3r{l#+&&kZ4#o2A^vymJZf^zC7}SxLTHqo348 z%2ftmsi~pX)x7gRJm>%q*#FQE4?d}7uduVg1I-8de|XSVe*O;+&I%9Q=Yj_xGk^47 z!-Lw>(qCkkQ%d zuzT^~^5Y%g0naTw*YRKg@3i8<)(-F>tF!e&-tjCvJN+QHuU9{?`{@VU&<{4C1H8f* zzbpDd68b@mPd|7B{oq-iJ<7B9qLiBoAH48Bd$4kof`0G<`oRqJgG}&ymZl$s7t7dR z*AJdYKUmLLe;v>FgVPZY2Ni^&A8g?rg?w$-di8@hz;gBB(1Q2C=oL?<*es-3P8x93 zx-BQF;C;p$GS10}75yOCmCQW;BLlqp!5>Ncx*@b=S58Plj;tT@=?Ca{?x!CMD_BO1 zjQi(D6v&wB5{^)g2j+(tTum&OqpWF>81|LM&YGT(EhKBCiwJ(9_R^k8Ot`JCtT&z z6B-!%%Glfg*)2Xjp`&LH`t*d3o;~H$6FPeKmQPRU=-Fp}dV)OH^#uPiy@j68!%t6; zF}u`7^ntzT3G%GFZ@ez|WxUSXYwf;1zSrNMfS%CN`%j@K$o=;3??q1#eE`3(Hu?Z# zB~9NL=DRQY$#A~=8$Ufkug^(6Asjs+f_0T%J>f9xb3Q^>h=jjBLSMK69YOrfa}1HK zdV>2>v%8J1u<)d=@IrsZCi$&n&70(zi>}~eoVhO*KKAMg%l&l);nz{fi#YU!1oVYd z^|{}dvo@Q2WUMG@q(9`MGjxTo2XlzdAal3n=nGDCU|B~YYhA-R26A858O$ejhMlcC zgZuOtG3*sEg1y5JyzRw^IzOEuleJ=^Gc5G#3|jn#JO1@eok7O%?RAFulRCo&U1yNG z6>Mw8iJkVQwm9)6bqVOho(Pl@Yf$C-R-0cL5~dOxQH>oachJ%VRdI~kvU1zA=Wgi z`N+i$!5rukJGY}lIE%mxp6i$)`8>sYgLp1_ggH4$*CRI2H#TT`#M;hUIsd?WVrFW| ze%IDw6x))@njHl{A+=cSNXM{;4M8uAH!2%*v4bhdq&P*@V%ltBZ&udvq-Acy{%f?{ zyZ{VX3?3{B7*Y@)fN#vApUvTzl+Fg%4lJCl?EmkN*9jZd~6i;_OH{RO)* zeNO`AW!!mFVUXgEZ{BWF=9yPm&=ZRuHm^{XJD#^hPPS{&%MGWAAKJ z{+aNhlwV@=ED{W(4uQTptfTyk8gq7wz33G6n43cE^(3)XbzF}lt@Xf0t=A)&J}7mO zdc^wb@rAD*-+B45g`KASo6oxZ*w*#(i@iWE|NcLF+u^*GAKmRV<;Mq4;&iBIrayB6 zdiiC1tC#<+b1Z*RJN|ju$3MbH9r6z`o=#VOr8#GXIRJkfUp=<6PPc>paFUN$*U?%J zWUyY3CBAw{{4KpN$ggK%1omR3wNKYEemOSLfUP*NI0jp3LzdAderE3J;0S%4^$n~+ z4YZXFQ`}dEnB9{VWurwgZ;rthoItxf@E7#p8{nLyx;MQ94j9e&1}Vj>FIF~oCti=k z5Mx#Pn>WY*L>V%oud*?Iwq>|u96kWz^Y#<(T3|c8A0B3QU)qngT;a)$wcA6j`^OOj zdu^EYaK8ZSA!CsBU|Fy=Akd3r-TAJ{A!9bm42CJuRm@)`AXAba>esj^gFee|)7*ZI zJ2=*J$e1Zf>`=7J9RbF?Ywn!fII2rfUa#qs8`tq{0KfC@n%r10zgNMv^MeZ3DZNWX zp2^%#4)$fq`+=zZRr@G+1GM_T$oy6E9mzA7cNzw>|I~QJ?PPAabSW``crO-O^yfW? zhxN8xU&DFDicU7EyX<4FL>I0a$M1tZl#R~uW_J*MvuqA)pAD+3l-NOq2Ii3YZH5lX zirOh-o>DN0{nMQvE1PqhjRmqtuQM;g<_N>jFw3&p3C$eSi3`d)(`Lr6jkL{0A<8Z{ z?NRx7n5`08mgXq#g+}w{wGoZO9D~j78OYK?V&M@0Fs&vBTYi>#a}M(@V~F49piIZf zV`ZjNAnzKK@Dld8*i6jFD^uo0vyWU)ZLi&$5sDUbzARhWToD&ys~oMkUj#oY;;ysR zj36%3X!6ZAZ*~l0Uo7&*u)pipwNI~#@1ks6W>7XB8y9RVC$BnuM2_)}(Dyj=i%syR z?5kbMoTd|hGU5;I`Xl)Uu#P$>NO?`hL&wIAvQ^+?Rz>yDR|^K&)PlpI;U($Fdo$x4GqMRkPiu*yLJV?(jDo7QHuS5-l0 zo5&?ux9DJuC^VP#^~0g}Z00w^pt*-QDuQd$j>o6P6iC}jyB-^NolV-cf_9d+ttuF6 z)7rPzvuRnKXCrM@o6K0Nfig+E)^!hNo{F{YwC`$k8);Vu<&tkU5Cfnu-_+Z;uKOsP z-nPF-mbGM?vB5Jwn3#e+H`V5zK9ae~Yi!6h%^u%4H5mETv)D~Nj>AXZ?*tcDrXq8w zpU93v^ufy*pUAaB94lF;FMi9yV=WD8zU%>TBlVPWNsI_7i^N)~Of|DdVvpj+slszT z;kjPLlPQw}9+G{M`{Fb0q#Z=Yp$EGAhUeP)ZC9+qH*wHM#zc2Sn{EBM&O&w&Ke1#D z^0a?%vuyyfb%zpM@?K;>K~l8ACVbR?nc7JEY5N#Ep^by@iA?RXxn-qdq@Vj_tNByU zm8Hg*K~BmheKLTtoRMRZqciJ-yI7~b*|KsqV{-5F*kXBRz^^ll=PC9`YZT89Qht%8 z9^{w@Irb6n3w;=C9vQ&>b&6`eSIfgtEZ3#POemvomP2D=8ys0@>2JNu_k2ma=aR>| zNaEb_+>;uyYd^eqCJZ`>Kg4>VOn%to_Q#(6;F1SFHRmkV-R(s+KN)!{__ZG%&FrsX z&~lwlmVwF;k+p(n8V-%};!p%J)`SNHhi>%8p@pH=1Eav9EO4mIXg#z&(0Xv~NgRr$ zo`OTdQ|E?5p>1#|I3g%-;=h4IjnIB?D-MNfIMfI|8tBi?x4|Lgc@q8qR%oK(P=h}X zg=#p|MfKv4;L$^o;1GQ{2su_Z>zg>#1RdJo&=xNaRX*OyMx56YXI{9i6nZ(b1cQ3e z2YVJ@3;rCZJu2wyBA=VUCE=@id{5*^A=sL?{+Z7W_%LE;aXkhOHFID3jQC#4^Oq%- zI^_~P%e)IL+G5_UbW^Te@UQ_4iY;~_Ph!#OrH@C@e_hB7>8p}97Mu|oIS;vH?yPK_ z4&F3yT~0coUmY^Aj_)*6J}K8pyy>ptjk~@r-qa!UDo9%?*eiI8pNxhz$W*}^{F?Fv zgNXH|;g4XAlR0I_RImms9v|X0 ze24-cqBMNCU-I(dgT!ec4n833HGFUj-@z9Tk?)!Y&!$QII%xB2`1N8#PYoN=JHUnr z>LUF2?eHNMxu>|7BF{zsffdWmM*5;Y2LH@+r32pv9V@_zmnZ}bbuK<1vBRPo_By5JJ(&I zT7SoL)Rxvi!H=`Sj^pr}jC)1a%UXp~>n}Zvb^KUtQ}Yv$OM)NjMQyR82<-R_>@bV| zLOsFrS}$(E|KP@@VP^LY=r7l!zZ?NKSkL4>jJ)*k&+YXWvCXx|jX0nFQi0C$8OQS+ zOHZM{OhtdW%7+`*@_QTl%Y*Y{3kI~)U*eF1A`4>t^p{<&xZ%@Zkb|}8W&XG!nD7nU zi1O+$f+?cEWb@mjE4YCy6n#Z-!>hZft{`+>(O=4Xh1qo6aK5i>79B?Pm(p4A3-Z)C zmG43?N0d_H)n9rR572N!@Ion0K_@O{3?Tg@HOg#Tz;&`>{5xyh{ys=CxQJWwxf5G_ zDRMoIYl%J17&}e!PFD@C5U%CCg6Ap39xhUp&kJQ-%>6RT-3+cgI72OPzHi>VI$FmS zA@R@@I9Q}e4=(8rP6lKvUN7W&yOV2d4WK@M+h;JU00o=BW1(O z!IG|l_{#*h!;&tvT}W|0@}VOf(Z=jPEGeg51nbIZBZc2;#+FVFp>cSfCoP$_-2Sy^ z)0cO6+ep?cI=}V|F)NB-&&KM#!RR)z#Rs5!Bkfs7Th-C$MV6QM()F5djEzIJwrD`F zG13m3Sf3E8;Z2mL*HrHvWoy1Krl5}YE$=nfwv@6DB7gDC7QD%1{4V;6w4;M>NxMqC z+xnww{>k?2Uc8KSl}TQHZI36Gc6j0>tv{Yfemb61CYjxH$xG_#X!LBF!nxo{62B>$ zuF<1d1;;JP{13L)ORp;fQ^&u5a|zbdvTsGJ0dn%1xSFm*+i1-)Ms$ z4X^v@8^|3Gaz}7O`gbdKn2D$F!-i8~$9&)O4zc5xs`Y-Jds4f!;>Agwg7JuE&(>{U z0mP*?+8EE@EO^l@xB>skUN99CDBpTDzkz#$k@5GVudOkdH{T+@kjOrCijCc%QIQX0 ziv}n|R)Q0k(k}7vpJ%q}cD$;xA2>QE_O2S}VaPGBc$~a`3SK-Gk=%GHI3jl79Ru zjNhjr7b?Ml-*e6wAaCt_V?i)^q+_qLbYrd;d)dD1{*4I_^=}N~*v{eLSVUe0)G>qd zjp~LkI5wx?=>r=j4%a$#);RpcWuJ5B7W9jSUj4$+#8@+O#j14rs}q^-+@iRXxpq$H zd?DkH3-CF`w&k47c@xJl_SBlHFm4w4CUzOK`)&EAqHL<;e%&yY@w3@|8}=awGGRLM zwVwWV02(ou>uRR|J6=t83%2B7lW;KhaB$v;-6CF5IyrKb%?@l`VrOK2)+%`IC<`VI z55L>-yJB4GSQ}<_py!IdEH*9~EA+!IwlLaj7n_1zYzw-#=p&*(lo^#lf!xbMkB=aZ zx0I`i*mbM#)NNydC5{~Ql5pk9Nt#~LtGHsK#F#eZ5r=j2fh^+k)nOBx$l9qa@_XBB zXM2G5?TL4*0K>F|6!`hrHUgm$XLq!2S@L=IZ0hXcNnzIGSa<Eg74YjhqSHA{JsMW$tT|E6uy_km_Y1nM}SAOCypNWF^}|3BnBa<6EB zd2>})C>#&zSC-ySM-(D8vvzK|lb1YCfk}BUa!avl-UygoEdBB?uf-mbb zym;Yc+|U8uY!F>#=Be>!!@7G^>y+;ZZ?c~M2Hv=JysNhg0CqOzc|U z3U53?Ho+Ld8o`u~u%7H5z_9pa4Oir|dkiQtUj zj^K>oj^IqHJa2x50Z-p;l zq~J>e{`G>Ff-mK>uCpCZz1~)Pw)lcRd#?Dh`a8lGe{69s46@yhtWcO2*0E&<*FyU* ze6m+L>KRh%vv~>TG(rC++NpkbXXLTqjH=naq=|Jc7h}SDcsm3-}1)|p)-03AW{eA8SNMK`xi^o5EP|T6B~YpRO_uJ9!ZPpQ5LzoC|(z2S4J#kD?eQ zEy1U!EI?No;L}r_etJq~tDd43EWN7(J>`A$lsuoFLVSW&J!KAk@_nD4!uZjdhm4K_ zKg4F0E%F*VBqh5Y=rgxLw_kAd;AquTxL=p#)l=p%Z{kSu>M4zHCKGE`(dO+n-2}bl z0JPHdlBMV+$cSq66!G)W^p-52o{|cF2#)k4J|a4&!`R7(C*X(Rh^C_mhKQY7DPGvl z4mP0G4i;2g3WkUstj5m{wvX#Oxvt2&)|P_Jr^sgmds%E?lwIa#GbgJBWlPPQx5wx@ z40iGrVK(uJtX>~(d*cZBvSPHYdi{->4{XDlRjc-ZFJFM41DIDSNA5MCyBtlIF>g$9 zz2+b5m3cDelyT-d@(`IPW6cB^>!u4mVzAwGFLus0yB*odUj1c`PoIGwwfU87pB{sr zBepTw_teRrJ++Tt$GWB1;uBl0w+U_@VGMkT_C83v@29=1XhYFuLeN*L&}D+qWirua zo{0*|`v_g8Qt{ViCN(~%257oWCc4b6#z1UZ0mQNL=`zft3;*W$beZSSVS+du=*a2V z6;G?nEIg^p=vb499#a||Y#WcvRPfz#z;~j@WO7~p__bPn6s~0~pxeVpe2BEV1gD z;{caTv`;=dLLhCl4|y-R)Uf+TTV>g3TltIQNN0BMy}et3g)%Pr+Nb9TF0Bv4KTz6S zcmTiXiR{Bdd!N*EVv0q_xRm^*?Iqu%>)|EdUqD`sRbl97#9)>@NGtp%;}qGu!C#MY zFi$Uf%y6UVF)_tm$uEbvcrsrv-;=!?bUjA+RP>m}s?j#l4b9lm;=v}NcgNV&v-p>< zp2Q||tDQ^cL1JZ($Tm8Sm3LqB!=`~?m08nidY*Tkrn7v@ADfW1fAz@~H58vU=6%*OkGqh0+^k@$(Pzh#HJX7OosmaR zf{Tv1{Tt)JviL4Bd0RPO!y$G#3%_EATgQ=w&25X))s=z2l5$s8W9IZM&3871-}*bV z8k4c*Z<}YpWXHaWYM|z?mc-cK1xDytC+on|kTo^%eY(X^t1wTHZVsr;#XfN^wEER; zN_2`jQln1-viDrlj=dc^n+>&M4?350*G*KS<^2wQex&{I4~}LWbO&`imwd0z^5%QC zeA4r`N;EQKCw7nxS@DsY-;D5-@Kfq5Ei3oHzjo{(@zpY(jm8e*wS6Nqc1pTMq!C}7 zMZ9|*WtI8w3mFG{?fAWluc4oe+^$$NkRM_PDT3z(&$jsNAWOo{wu`vV^7#y1x_xOSK*Tf>4W{mo_1LG4Oy|O;_+b3 zH>5w$WRCCG{5FCsrBh{W)6I2Uv%5xXV;u2WOg5^W%wS+;9($?4`_1sZbG@=zWJ?Nj zeaGnwrHlz0#FvlxnapOjppr3AGdk06nCEL28Lf?P#7-7dEdGpj*mRvT4q`l8%J?-U zv~k$4p^3x>oF?;N>&=@#gy$`tlq;Q#cVs@Mas>Mx&Wk7*O6&l^olNFZet$^KXTBT! z>{Vz+m9xjY z?k9K|_BbavR+ANKJwjiXH5H-s?=0x?GB)$V#Y&^f7@;CxDOeO@EZM@|MwSDfv?A7| zUPpdL^17Hgw!WNORD1%NTcdwk#b3jsdgt0yRV_*3KIvIE5{zwC#yGeqVqw=h2u zfjroHcXUA%b*P7yHQcX$QE09kz!rU$ITZ z8NGV4v|mHnjkd?(YdiIep?)8nWB(tTac=#8K)Mp`fCqldcO3L*#oWc}W?t+nABO7w za96gwzJhDT@9)fb>cvv;9F(f#sf?xWEx$@h%kbgpW1P>{@U&O)gw<+(J@P)6ekyp{ zK%durJ_Jvn1WzY{r+0LKr)JjM>3EvKy1g^wX-^-X_VVFr?>2aP#oedI(|de)dIUR} zj;C>gr$1BD9&LxGcY>!=!PCRw=^Yd-Q@~RdUK0Na9Z!b_sCnDwCNz3D zQoz%-{4SDn@N^Qt2|hekG(3%TVM9wZ+?CO|aC(NOlTPNh{?3fXRCLex<^>d__~EJ8 z(rS@qS;)yk&W|x3&p#_XJw2_~O*tK&o<8mKQ%{Abr%$(>@y@@8r>D=ict-pFbo%%2 zzM~^NWnK@NGwY?6mCU_Z9c}Pbp7oS5cpE(3z`Hu0UJtfT(&oi`7f*%PCWEJ9D-t{v zEcJk;`n>pJ<`w#JPoGoh7Yn{>a|%HvYr~DLa|(i|{WLsf?>c;drEdwIwjFDnzOLh^ zof~bE?oy&_k%#^`{Um+cAE!scf8||5G@Sko&jhFE@H@2jBu+Q^;dCJOl}sN_SA>Py zn&?Z%=~vEIb({_;5FgG`Fu7Uq8k?;H%$D(u;PoxwbQARV?!zT|9&5n_qbK5fz<0&2 z0Y<->jvad%V`qt9<#?5Op->&4V~a=QD>9qIl|(xBQxc3`U4y?SSS)g10iR`$sAst6 zO!eZk=DQCj3nuEgtl0w?M~J;S8T*A`vdEJP!DZ|T{RO8fzvf#>3_kJscY)7`ksY$$ zQOD;$X-D)=03EGcA9L#4`-W&xjYt+ZJE(nQgp~O3?8^zp2Relk>h-;2r7%=KLi=1N_>^)WyrudjMSJ8Zs? z>-PSH{`zerv^nVyq89h_(`_gJ;v^PFaIRzV?+Q<1vDyxc%8~Y6`*8Qq}@nFX|T*R8-Rvf19itnQX-+95|F@sNo!wbRTthPR7 zXTo8_+2Am~6yFsN+Zn%#UMp)&+REos)9H#?Cowqr8#?W&)7^Zp7k_2`>1@B%GE<2b z+e3T&)!R(hWv9W546XYQ36 z-Jt8RyLopkWfUED2Kaj>%$0R7-vVzpe0~n}xbk;rPmiv%eK`9z`f!d{9~PYLFZytU zpFX_8i?gzxPsiDMa8`U>MIZM1yGD8SVdf9S*HwI6OQ#a!A8gTlT+xTc=e4QKXmx_C z&D+tDCxUVKyo!IT*wTIh#=7ZaqN|pIt)*b113Z>7kzlX*sUAn)lC@squOwI?GO7;Q zU5SmVO!Vao%(g|~u;|3Ho_Z#GH;arGoaqNPJJFlB3MM1NHCr6Eqrvlx1+G-ZtrAmO z@Z_CpFgaB)IkwpB!{j%)=LCnvPgV0TLO&Kv*6MNLh5PWq`1(Vl;$!pJm$&rG|y}_X zK2hd=zlqPH$4-TQIzG#OAA+Ne#xb_CY26CSCyuo>K<`5E?L*@2iJsgzF~U~gnzz_T zMDJF7dbjAuUVOfZe7}Lu za}&@;baDqgAbwgBn--4PG>db8>`st(kS&@%O&{pboNKRB;CHun_}x|T+Zcm?r@_{; zP_Z7H7@*O=AuG_9d|N6uL!%ZC(>*s^iE+; zNcNFFMt!S{>Y(HOqOCg^GoxSLY`~r)bTIQyY8N%HKYWj!1%ASwVlP~k!Fj-|2E^?H4u_SCPAEN6r$9Z#Kco`k`X2-(L)~Y=G1DWHi zC{Q++9Z>TR(6>Z4sme2N&RwfmD+k>( zB%rHEpH#3B1`$I>%9p~gl(EX5*RzoL@pj5Ok|R4uDahd6Y-8c793|Sdnl;SJjP&n7 z+Jd-#j=su}{*<9Q)o87x?njNmR(HQZtJtplu|_pHJF`*hEo1Q9OR*av=i<~L*8qih z4y=i{khjMW;IhNd=xoKg%oBbpbJ4_9SR?e|{p1HTL@(df7u|g!bug>OygcH!PwGtB z$@@Cm=FZ$W=8C=XCGhiiU4*}&nQuETRyMU{r_jd4V4XOUwV>B9A8{@1Jjw=7Xm;Vs zpze9Xhk{e_)a8!OL3xuFnH%Gq4K|_GNWR@aQgJKDz$E5G;l(teSxyl5iV9cdlBR;T zc^$mVFwj1<$DI3=;g7nhQQ=xf*g#BAFN zkC{A1@cHKvvCigNQ zpAFq7vv2e^ev|tU=d_IPuS|A(l)!1Z3B*{8REEqx=Go|4kH68^)MGmQ0Dm5iV_%y2 zto53h?EbL+;oaH&lwD#Exd+>CIs7=3a%wmQUy5I^jJxS8n-7c6Fnj>*HXn|Y{gEPx zlN>>OhA@}P7|nx^q=hl={w(3KPQ_x&38Mb%;_)*^FWUi+%AN-q#GQN=ztW4Dk5$mY zz6$nYkLX}Kj6o%e%D(ZPq(26`;^}{qk4I(Q9s9^Sq<(yRzcJJr#Qg+lvo8?(F(!%S zTknT>zvaY6yF++Z>FhEy&bHHzUBH)79P-V>*kh${$v2(YW96GEA%ZEvE(d+*b(d$O z&`D?_bh5)ENsOsW(a{z`dj;N-{{I;D7JXFYxA4xv^&z&Jm7zA*i=Axcj0^sStQBnZ zKJQulPp%KHzs{CG*=tr_&wDrU9{q>%EktipC|@c3LO-&mE1|9|OM0UbK9F{GLJRr+ zA!Lu(%Jxzwp-VDYDl`(Dd5e3Z6JHDukKk8gDtM5qnyf-sC>zaQ#@|!J^m5$E_oOdQBCd+a1>twm^{x#t+futI zc~X9MuVw2bVxx}m*0}sd=QKzC@Abx$sjvZ7)OsKLbVs{EA6*Z>S@5FB#I5}5axwM=HCk_rK-xlR_Ab8;;-hN$ z#&@-rg~8k8tzDZVtx24pEKjs`ggmSC#s~Vb;jfXl{2%^)Kk!%L@<>11{ld4$-^Z$c zB=`$1x8g7IUo~x$hujs*||B8P1;h}ja@fTaKj=$K9ytZ1O4s1rBX~o|l1)uB4 zJi*{^qyHwf(|;5E_1_Bwe^o#IH(1kuo$d5rC-bl0nf@Eu0scahs&AqHUI?G~<8KeH zzgzqzPV{%A|9)NG7Jm=v*FOBM=K6<@zjo$#&V;{jP|x#;zlD^cBm8}u`x^cdv%5q6 z7hcrxmtP%!Z+Y(Q@OJ@u>-d|)xj+79a~)sy=bFs|`{zk3)_W1iD z{Q5(~-*0Juepkm|UH8Qf{qDEH-~T_@pH1+IKmHEq`n$#7@6P^w0cG>oe>>~fKK$*( z^$!()?W{Et{nx?V)0yyhPwDx=U#$%2zgsASKmK~}ce8Hh{<+$pA6#>4{MGXH>A!2p zTf0X8E#zGA*ZbXQYXR40=Z_PAvuMlzCjQ!4i}gSK*Qfvb?9YSOetY~q2*3U)@b~QP z&n+F`Z_9bZ-yezn`M2+dW6{+zP;Ti#~ z5wz=%0)J0ofBvTbmEH7B{I!_Q8~$qc=l`(()hX=H-)8=IK61?;f9G=jeZ}9KDVsn3 zj@7Sy_Kr_BF)nEz^rzufO;b#wn*?ayyLds_V6 zsn7qGlDGDK^xu7)`|y|VMq9t&+VZa-C;qPe>c5V^JO8Kuw$*=U7k+#Ey^MDKQQ+@c z>c9ANUe+FeGY!gR=M8^L~4kZb<<`!v_zSNy$?vialh-TJi;f2VQ%L&sk;b5Cco zKM$pz=M#V9C__j1+k^Y(s{elW%-P|ufxLD6^*q-Wf19{2`r^llzuUk3AO3z{)*tEm z?}|Ttd;A?kyZ$Kf_bl~a@ONeh_&f8w;qQ+`|9uy^=8wOdx&FT5?-P{GAAkR#U;FU) zQLcZ$_^Yiy%3$v4O!)h2>Uo~%Xg!#z2wwn|>o&EaX zt^bAfzwwux|MkCWfN%Y8!MDfXnY8Qo34e3jt^eK1`rmbDv;Ma_`=tN#{_Ix&=l1sJ z6!y0l{57*rysQD%eXu4OET)X0^Js^T`Q|#L)#rO1aP&z(=-K)@U}z(DXo+R?@2ms< z0{P~T!+W{@u5nmBgCBIuS*`=#K-v6p_$B??hr=&$-5!S}Uh5BG9k7N$*r6Y04(g9* zSO@$&>iKPO*m8<>z*-r=;onjQ9f$u&+j#GHvrgc?hQqGxj{TtFMa>VIU)>Kn=IN6- zY;3mvuhq)1GOL9*Xcb(vy4x;DUiY`eiKd);m!_K3t|*r5~>uOdp@C%K$G!>9B& z+j3b)HHml#>xs{LovP;L@+hqI1dVo_3l;>N$n8jCnJ%9TN^ie$=s9We%$P}a2^ zt6AqQWtO;clCQ*jeNPD}S*;K!LJ1&$qZUs3Z5?M?lCo==wwC*Ia0^yd(M^Bj&_6=xg4r z(f1nVt@BRbkRJzqWxe@T9nn|fUwo6kTK`RvI3?$sf5U$y^p!m>p6Wn89AwXx_Vgv* zxyHW{xi6e|`bPdZ=qvK!k&fuwfqbwl=68j@LR<4S$~)&({{D#M!xbITw*&bAeOp@T zyI5&Huk`&9$%oHYo}GNKqhGetH!wHxywdkaA|C|bEFICe1ODC0{IyQs zj7av4eU;OU(c=G|F09}hd=(^Kgg%D=BNCM7()EJ?SZ>QA6C$Z5A`F?Bl@t5_^>rb1F`Cq z=zU6{YYK7v(uw0|Ats&Z!9iD=Z4xWdfF8V*c!~eVk;Nf8rK0J<5t<%+hffbqLJwYx zKCSD*d{c{!cuk

0pl8%^t%W52X_4kC=KN&yOhR8*RTz;@lZjWq61hK}>1M&`WD@tx!fyrp_-;ox z3sD2}a?tl3q*I`S=)0nuJB%U4eYx)Wsn{rp)5^7!JBDk2dJ1WUsdG znx2c>(eqVwUZLrS&{X2l_3dwe&_~y~A!?U4bd~yh=^D`?U4>82iLQaCrfUjxeeWM? z^qNM>=cB8gYh54y!O)j6Z5#TIG{5De@8*s!@RQ_ z`Vuokr|(-_|Ip<_{7CaVLSN#xzI)#3dj|P1S?KF0A2NQ3^lc{};uo2lee`WP@AN%` zd(Z<53`r!QwQ-c^){5(i`5YyHN%Yl!n3=bQV{$Ghv= zkI&liZ^9z4e2BNFze8;3%XAuxJ|<&d`x(Y)_H!Mhwc}yY$MmtUVBAEZu^$gl@X^?R zeD=MjZ#(`?2uRlCL%b!u8T!sV@APfQ!)@pb#u?l3uxFQ-hyCeW?5An*rMEJ*~1fG-=qVEb17oCBJi_VpYyR{uhh`!s(!|3Y8&~y!R&iXjw zd9K-qWDxrBz47=!Wvay5X0HV~zm!{I483v`^Vcc-X3bN_$46RvW-(VQ`J%&4 zw^J8|_tOKEpJj5M#JlF`1-Hs`kwLQ0i7)?8{PO?AFMnc+Yxx)W@?Yhbzsr~ZC*=Q{ zU;dx#`BR2_*&C#odEa98AGwmUYkLt+qx?Cs)+ox*{*lIl0PZW?m;EIsb3cVTKgazAC%kpuLS9>Gw{40tO#hbTcTtOHQ#16I zy}O+pdih$i-_hpZEtIW}INpZdMPuVAGkb>&Dxqu+@u{<`$W6P+5oY=S*gFsKsID{p z-zh3c6k!3PEs`jF0jQyVdjyPT0Wtnts-OIv-%0KHGEiLf&nEIxz$8 zZ`#wRH_`u#?~_ zsN{DEo9an~cPvRAHDZ41$PtwbMrCjGd$W`6>jq^=JMZG#xR%dW{Me}MrRR7?-cSoh5Qzi-`lMGl9r*g%={LT-$FaTg?4_6 zM;P4W;U4|Z5`TdFUP*p$9pUHvsKHarckr^;`&jv%VqWuP%r>9#_^%|tlgO|2%*esT z<}*I_dd8jKhM6y$^)r%c)lW13)I-(JYUYM7}P`j>GXH`z9;eYA+dvEbP|4+|H zGAk$C>@zM+ane*eob%~7|I@r@SmlI9J;VPu{|~=EKnQePlyQV}I1PBkr#rC0lyE*_OzA}fe=`oIra8zmYc9;$aSe_vokmG?^CC+MrcRPUhwU#9o7^Y(Z- zXZgkho?*lLgsC~?U*)Rf0QhXA=8iB(zmRo*aykBxZ)5Tl*ZICxp21w__@L{XdB6)R zy?b5h-D{^;_hCO3HXky_wuLheA12xLOm^bt!^Srg z@ZraK@4<(w6Yyb7x=PE%mF6|s1AJH|8~+t)%MKFo;Xe3qRk!$%ZvY>z>I5HF9Y;R2 z^VpDldIK1~;e(U`2*f;SO zc@!5N*1ejupYJ3-RQkGioz9bnn-Bksb7om_UEg`T?&aI8VBeH3*Tjd_dft`pbGn!R z`J#QVn-4eH*K|)JK7>oo8*O+^4jc_8hW#nPT%~G?pjps(qfnI9&O67gy?kh%c@5?{%f$ z%$F_GFB;LDzR8<*`WAP_r|!!n@ zt8Ab0lbm<^ln*BVVc~S_U%q9~n^_SMm)@KgZo!wNuk()In&|og-+(&V6z}#I-B(*> z+0vg}Wu~$k$@@Een(M27xwHPIyG*?acy%BDuN6PjEbFdv>t9K?v%cnAuDq+hH0R&q zS2O>e^fz1PKbB|B{3rH1?fiF*Ypwiu&i9PZzvF+p>odOSDvNVj;}k4j!uq+k54ylG ze69MRGpS#0&ixL*h4&8UPUp9Y@!Z}We^xrH3ZbM6q`pMY~6yVLv1?#jizvb#jwOBp!aYu#h{qA@;e0$)`2Xxd^M zKIt&t&&|Py-e}J6D6Z-Kw(A__Sl>NG_wucN=T4Vv;$Xa$m~=OAk2v@Z`(8H(e{5gVJ$mnHE*ot0^#`M z5q;4Iqf$z1=6Oo1S#Q&q6nr;@d%eU`w(vhI?<}r=j{b(T?=G(YfyWA%c9%VF} zL0`*fId2&0Rj^-YnNIiLi#^ZaqUASnF&rCSSJ>4IC*< zVI9hCf#}bYjd!YFWNZofxPR8*vYq7RzK{Br?I1r>yp`j3wEn|1y^cw}+ijx?-lTne%dui-ZD&rL!?eROm{GF$4u_v_- zt}F1+S9CSwjmEG|VXNkQIOBIT=0xLn^4V9TX`0SDrB8b`j>lUUq#X2K^4Ie}WV{jY zrFYSLX`D~dk>>aL8DkU`*F1!;S3~)1NS+j4>hV_P_pV%|=W-2SmD1E&>KiGiT*F(n zf%~}j&PG4&xZZwKXEezguk_9`@}JMQQ~oE?c2Pf8!^}L^%gtw8)+E-;y^{5EuVSs- zUZx)G>DhsW2ktYX`bH`H8!svFkstdzRsX$l^-FMO_ z;e6gN-!Q5s8$;?md@Ft1C#dsN3@`O3_^xofgnNf@JcvzkSf_sc*ki0NG>p~G|JVQh z6!%nfPlWZ(zaL`E8SkO>&Np)|@L^-jy;{>$_MV$U-59~VRTmSdOD6usvmWF2pG4-) zy<)@TtmWY~sB=a|^n=FL8k1Sc{OTSf_(S(IR(sKU<|X@?Uvh7`*}myh`q09LXQJsw znBOan!a=1o_}!j8Ykl^^l!xqDd5~?p?OL`hyLRk(-EhmE$7RLZ^Vh+jvniL|`LgHa z^LC9iN^7u#YHT1v{r6+f)ML}0w}K@b?!2}Bx7c&a-M7|XZzP3&#_zH0lh~xg+j-Zm z^~FZ-s%_Xa?f-qn*l+5(r-uF%d!COypU3YF+_%=7Y})e?+ny(5&$nRDLED~R!=7{b zW*?KsYRjI@@#9>-H&l*$}o};;oz^EGqFBGp4gn^y8i^Y%Rmo(pk3M-?Z%&*!E^@ zyT;eYTvJ%Ktue@pvF#fAhTr?0))h{#E5x?TDWh|+kz}?!wjj2xHGcJfWaBm1SuXpa zA=^$4J)O+lsI0Rj+tz!j&g5a+x?k_2_cAq;rfpmABip_d`|@TI!O)vFUc z!?*mQo?-v4o>|Q^y1#;Z%WVvp^}~i|4!bbmyZokE-Sl^?@Zc-7ElxQ+IE-dec!0LE zV|Wl)BO3w_*2wN;f5HUW-!a33w4T6&{Ep*6sy8%``niWPvhctx1L47%lL8OWz2%aJ z?%=`v?{^#zN`wbjw8Vq|22)zY1Lv7|Jop9A#N)wtOJniiL4G%Z0SCtl5B~Y}QxFeW zSJlJ=+TR_-1CzJG1GxG);X&V?z=KI0$AjM9kPp4s9?HnZ1C@dB03YEbXdlr2$|Voo z!Gqvq9mfNWfinUv@!;2BN^5xFJQI%xD|jXz4<1|)iwF1f`*`5NO|PATc)&W3HEJKA zEo=)93e_&hUkD}#Oy4w^WQ5)R!sKJwJ~*ox5AY8e}T5P&*d-V&}U8= z*m$7)pWt}V1OCGO^E-hD@)y3)5)UfClveS;eui%u)4%ZVJQI%xx6HhCy$>?>-ob?>rsnV%=#mCC_cJ(t zgC~J5DK}1+^z9g3(p4?`8Kx!BB~4O4!vOpS#`y1{tgL=V%WueGzCFbjo6Et%2Tv#T zhpBP;r0RV=r%zh9*U~4gN7G~KlbD}Cd69;wq>K8b!jewl17&7>ttCDz0%II}_~Xtd zeUjB)u%C&?hbMR@9v=$mlXT<57x>*0A6V7iXOu?o_J@z4Py7lRWa;6hGuE8_hl_pT zdTFIHjhjAXT(R_uIVr|X(lLGueW{~Ao;b;Ppc>tA3fino@ly|7WJK?f9ZnC9|Ijy} zLi*hmnMT1}bji{rd(bDIzmD~7(DrRd+gF3OuP^%KKGG)p231JIh&Jqts!!4{K1`qX z687gaPvAA^7!CH)E?y-Kqv5IA%Dnp?&TpGF#MCf)&@S%$pT(wzQF_LI@eDdoTE@rG z^=>kT)fJ)Hi%7?a-v9j+;}X612mH?A-K9JJnEtd)Xcgb;A6&Evo#QUFW&f8MTy#gW zv1$)`#gOtr|Kr`c#yk5S>Rl#{;@b`V${d}d-dlR#YTv-~7|V0<5>Ms$6`m2Me!AMQ z^vK(c-sqG1)vYseAtSUHE%G#Uh+9p)Vt+KNmKHfThj&J=xD>tOLiCDScOVB_lm^+) zHi@m0?dkycg9aS?5cl2juaobRcZE_EBNk^!d&9;n>D4wrw2524o-7BF5W?w7$~uyY0g>#3R-3+^4yvGnObNHJC&JSQ)DpYm82i0;95elfobnx3m40EyojxrY2do zB;Bvyw1?g&~s`v=LJ>gu=H>smF(de3UEYYh-x;~PXZF2=FJ z=jkgGPg#1oX8TLTPjJwTwm8adf4SI~o1cCf$4|TB_-P*H?r6{5{InU|NQ&X7+v*=7 z*0AKx+v;z??ykN2w)!0Q!ZO*O_-UKZ4?o>z+MlpYHdjX(%jV`$hTn#tmQ#-Nc}6zp z@Ka7NW7VKki=Q^T_-U_;pR%z%@lyu2_tQ9j$~ZFTy`RkLSN1sC>wOJ<%fweso}{Nw zz*93;U|R<70Z+})_>@?l>L15bBYKvn(1&-Gr(U8CwZ&8Y_@3gaBsQlE^j-Bm-8{8c zJjM6axRr8%Uxp*oM z8_B^24ETq6m$UQ^-)AqL(!1#TV6Ka&bWQh_ST?d!JoW$RKZ@t4D138^-F|b{TME!_ zlYOY&7NFhs;q)Z<%3Dzncd3o%*vDkrZzb^4>(rx5_}>S?rC$AwReyk=KEg(Y?>FN2 zsr}}Gb0U*cN`Ld;o~Nter#IlI9bkAZ_+0~bSA*T+reSvb?J98Rd7it2{RP;?b}(cb zb`f9`Kb@_;ky7=R$xlPeJ^|*4pZ1#kG_ayKWjD+_EQX&}H}TW(CVt9EH&zWxGcHjc zHrxEP*XAeLNETxiWFul$x|6)h#tx+nsw#q~)>(Wt<3ewiX+yb;%_uf9LSu=IO!vC^Y7oB% zTef75KNDYRoKo=JUhq{uZ9Q?8#-NF_G)|@1@2%S8@tB2Q|D zpNX@?T@l0LES}~6C5Of)S)66S>3UyrR*A(~GyXvRO2}E$H8w1kv!3VP1FpKU0?u;R zjf<%pn|%W-R6io@cTq>wmPu4sgr%x0bM72d|0s5MfhQ&ON%-}7@bp^t3)sI(J-LJ9 zSB;FSt=Qb}vANH4f8||cuq&&sM5rfw?z#=WN-k4<5$66Qn7j3%p=BB3DbjeAy>PbC z7;f^_CO9dNGMF%@;h9Q!=zj90_txA&x~6y6^-H*}wZL>u_ua{L_{y}!BlO*LoUa0F zjuv12@s(46uMW+&_zJC0$M`C+#>H2R{W!*a#rTDz#aG3A&l82OO0av617=SN$l@Q<|@^9h0xHoi_Q3`wpeM`6{r+#aC;@SK=yhR&ISuVibb54Fx$y8eBxx5ihWqi?Yjd=))fe04r$?rw)W@xPPG zzoM;Z`d74D+TknB=|dba5OwhtvAJW+SB$SYT732LZ%!P(^3WIm+N{?3O4lp7o`A0w z$iI4+{%WpPMdtieTVwR@Kw~sSJ58htKQwTkb3dH71Uwq<7Pe5LFEkLwBe%CyC$^lc^LE6t(d#5)w{P|QPd4)K*@9g1-%-k})B zhr~P{EC__Zgr9sT^W`Ysp?KiA`29|Q)oa89FCq^6ksXg$JkW~ATm4mYD04SojUyiW z3F5Kwe65f@nQXVa;J<#@6Y((%cTyUElNx`mWsHG>vaMlX&2mHe!PD@xZ&g z7!NF>-^z*y4z=Tf!_9sxQ#*$4V>mXD6BiHEHN^upHf;dcx7+bR-Dk%GO&j?$eOfK~ z%G+cgZN~#W!~=Z^oH?SZ5Id?Q9vH!X_IL)G>+(hXM!48eo39UikrA3~^tb!3 zM(}%3Q#`P~Sng*|LVP8#qw3Ne>KqTzZz)A=Qr4V<@8^@ zMhrC(U$xbL^)1gpSO3+}CcaYt)xhrbUv(fJSfn_1Ue1UjxT@R}c*XIr;Ez|1xqsEc zc;NQ)0}b&y3-w>Azs32UE8=t(llh+SFHO{cHIV+Rffdg8jOVMWC%e{vweFF;`hk4c z=KiapO*#w381A3p_OAxI`mY97==xNyJ2oQRar4zUY@{V$9ozn^bjtIT_OH4a56od~ zwBui)d3dGA{HvoF4=e;PJIhyX^`rEDZm9GDp>#gzCPw3l<=c_4o0o&2tU& zvd^TP|B9G~$ydY!TkF3PUujJS%fDh>5?db_F@lVBImZ20#4S3~ ze--ary=?nd`fiSYwJXlQO0mCZdp_2&@UIku(7R?~2lAa%&mJKUilIEp-qHyUG~Z0^s>VXld0{t8YI zfAxr7koaJi^@4VMaBTnP_@LQt#g~v?un@fMN&Y&Qy5Wuwp2+@|Z1cizwZUIpAItUD z_-i+D{CNIqjl0BCx%6#m4Aw+@?A7*jlBOvxC?1B-MBao ztNP{f_qPh+uYc}Dd~jR5zP%lPOLex|Z|W|_IlP6geVm)OocW;_(}%^n0QUH+wsr00 zogIJW^0OQ}Dq>8ghqz@9+h4%zM15M@&$((^JeRHF-nQa||IBx7kHf?V$Argd%iaE< z#bF8gi|gOuy4&xXV{zBp(qW9kR`jkX0)I9B=ZVi>`)s|t`hcZ(S0Auj@18-w&|%Ic z)VrJhSO2o7+RfQ)4IxE!zlCid@td~(e%H%&0p>KUoQTd=Hjnu$DF@h zepf7i6~bSIJ<4Cpe{llw7j3zhzS#zUas5BI-Wq>Rrw{Bz;jhnAt|xl@uv`3P`d{!@ zJN}n6P;UPV{+j0EuW3ETUq{jZg1@HqB!9iO@+9IfZ0?;0+Tbs)zs~j6`0Gjf!c6{} zr1kf#@x`~=@x$Ahvm{XrF_9l8|0_nHG1wYkY{n2jZA2gKXU%`%_+KlSA46m0BJ(Ug z;b!7HTUn3sPL;jlh-iMc4y&B7V*}$Tmo?9ap>~DF$T|J{M~%-u%@}|Du^&197{7Dy z_ji8>%~UJnjn83B|DHYed>FrEeC5uWRy>ipRGQ+6!zigS-@#`KHb{ztPq~Ip2Ku&#iAQ|4iRHNxDP3eDuxrtsO1LG4aosPsVxYkM3`HM)gqd z<-`#6KJv$!;*H$@ckVO!&W<-a_iKzlF-NnFxSGDSo{l$qhQ^LhzB5jb_-5Sr@j~u9lx4*mJ;}z!Ud?UGJXH?ojbxsx z9OkJK@44fSC1`t#jplJ*Pjqyg@&GIDNF6HZ^f+bg(bUloY#DdVBkrhvo?RW0lpWJ6_|JJ=kD>Y_QU6MDHPQdUwsA;fz<- z^WY{fu8{RxIo{`C&4%28#wBkOZ@h+a+DTyC z?nz0dyGEsyeo)NZE@;gSc*V!2HAc@vhjlPM7l5(aucuB5 zzd9fgeS~xQXvFi4UR6sup3Jy!gWtK8KNvd4NUcMwRIn6IZe*ULC5&&^{2K?*{VNS! z&t={*=4rTO7kc@%yyptmq1=Jqch5zE=ni;H>riT4*)8bgrHTK=Lw(D3^1hnasg`%1 zLrg`QylTdk>-|SEr<4BwNal0OWlkEd*A?+UG^fE>PoL0yqhDx~C#g<$G@qFMTH=dZ zk8%qd;Tm86ie%D|MqXpv*D}uE%Q*kd3k=qpO*Yq~tRxO|E$dL;VHlwz_T}Ww8RM_{ zXuKY4J<4*!h|Xth{8*2;%vz5!iL~{;KVy!!`1e^CZGIoEN7*nlpLw7r@%^vl`!}xx zYfLxBXH&*2dRyO7^E-XOcQnTfP%fJ5Y8vI`;eY8{p3SE3Ie-U)RpDdn{Tfe<; zYUphCNxde8QwIj3*K%xq$E2z$9A8BpS^P9>(ohbMFo(v!Fn*sg5p_Yn=T@}u`kp0x z&n>Lssr&R@b>FvVCzl=YC0FR)b(FEiQG+USd56#5XZdWKiJNbt9Cp<=7U;Xr#}4$4 z;rjW`2V2p3m=rq9nm7$@_xE5qHzXS7JZKFPBdwSKC*EJT@1_N4?2Yc|b zW^ONYE{g+tp1M>{o>WdXJ|iq%-Jfnmjlhu7|D$g~c~w3m*rM{&hx{t9$|JZNy^j2i z<$r+z=Cif-v%lwArDaqOF8!Tz92i{sx^rBfRr;#Z2j3cIzGBV|r}^NdSw2Ym#{HG! zv;E8!f*nlu8~Z*Wze};912!gnoAR8?y2VSL%nz53;9ba<(R6H~D=lpT3Ge+}rRBWl^z&^*76B8{wS7$_X#A z&ac+?EoW`t9TvvDZ4?eG{gIO&^7cdLxN>OeQydHX^w$>tT@3yOZT$P4jeqB}7yccx z^ZFe4*9-ic2>w;u_{Xt{e@no>8)@%fOL-(Gs9!1v)i14qs=6ioO9B7pf@`WLjqLxY zFY{ecS889#4=>|)&G10fvYWK3^)dJt8oHLcBm7%W-N^=Xgnvp$bG;}HT^Ii4F@KlF z3pMhdT8H;_zMba5Q` zz{DglaX#~&G#Y(OOpJhuQ^CS~Hs+i)=e=_zy9&046>GChFVz+Q7sy_V*PYKI->}a>>);8KcebI)?pLcx@XP zaSg}TcTENlN7r9Xx^sSPgf(BuZQ$HL&q`vQb#R^Uy9KV#X3U51a4UGI`;@Nk`_}BF zvctZl3fRb=HlCufo+`!9%UH z?%-ig3p`9U@$hgvc({oEBjKU)nPb1->(eYeR34S@tI41K7vra5@G!))4jw+{9D^AT zJICPRT&3R*9n_Lqr=%;Rg~A@S^lc-Su? z9!3q?D&Qf$OAodW(Ft@357Fd|?Q;eXrMG_Q?o$^J&!T@;c&L0jc$i0iJA#Kko^|lh zaE`%>2KtxG^WfnTj*l81Vs8oX(6l4)Fxnm-D$mK}H3kpwX&n#$$H^CY+~^#GhwC`* zC?0-b`^(oS@Ryx$22zp{_;TjnuUiW=uSuxc<$3tYQ`nZ+{_>sp z$--gz%gh1Y>@TD7NbTP`9=hMj@|Uf5>WIJGUJOz3Ll2u`i2BCznWy@Fw$D7O{xvg( zILZ}69Oa53j%tb_I zF+|k`#SZn3-|L{ytQcZX`pk;!$!DJENv$(l_{>%eu@#@Wsg3w1{R1W@l1~Q{Kf2Sx zMCH-IXTFO3xnhV-ZN&fPSqBq;;~aw*zjBVj#MKrHcZ6Cmn+QUPA+u7vR#>2Nw zfBC_Lc*q*0!%8PQ`67>3ILF}Oc#e+(9xgZ){ACjlyWlT_hYdC!HuNYS_K?4Py5gaH zs$bl3>f+(m^cBVX%ikiu9l^uPc-Fzgi=AVz;zH*bJp826KU#R0z+VOrO*;Y)8`{G| zecQ{)YYZNKw{<-HkiHi)zvQvrIR+08a(o646ZEIhemC)uIr7Z-A^vj0_+bKnIS0+C zg@-|NT|wGnX8&@0Tg=kibfSG|`paGDU&db^>++XRTRg<4ns)oCi-%rpIo?;kkocXz&ORRJd9^XXGQ$b(=&K@rhV8HJM41~?)<^QHXA-;XM>(-9p;m>*2!NZ?9$6!U+IR+1(SNff7AMQt!cK|(%^oP>h zB+?&BZ}&^34+E@sf71KQU1%S=@Nk-qhtql#56`p@yN8FgA+{8s zT6id%cH)QMqn{|geHbRct+fyJtP?-Hn`cdYs2p56)j0+$ZgY;o!zoJtXyKvVK8(Y| zX*M2CYZDLouEN86$!iQA&TkzLFL3fj9zWq6gNI`|?kFDi&auX$+#T1yyxFD4&ZU3Z zjfd6rFF$bt>aj~&(PO{v=&>2wWwsA@%(VPv#&RX}m$R(?J(bX15ci1&{Ze^UQH7o$#9(=hY?M4P(5<4s7mccKqfW&}GNJQ_(4`%houp#ZwaF zqULW?pYe2Nz{WE!v6XSGagCd5yuv$6|4DtE-*3gFYBDzvOYNa||xla%|Xtjg!*e8O?h+ zW7ah;#~OFzt#HQYXib6d_>G%1pP1IZ8OJ!5#;+RD#%GLZl<@+M?=c3TpC{VrH6G}b zYZS0X{dx7AUyBasM%KIWmK&v`*n}a;{I9;~Fqmr~I+68PswsFPao#8LrWDKb@`Wpp#Jo75pGtn^W3~=RA(%L#ZuSdg}eB0q{`UYwIe{oSG&(3G8 z@$PkB4MZnX9@nup$Q<@}YfoMH722P|rC&SpBlhQ!$3KD5Tl?ijpY~-|H8QsEzv$zs z&RRT_ahUre>}zHZD9bU7w8&G&n#zyT%4)u3M5jEIT=wiFii0hj;!wCH&=b<{V{xBCXA1_y&Xu_`UVz ztGRYfc(5-$6nT$zO1x?2T0PlDa-E0wj9kxnAHMs~y+&wlFUA>A$2WgI5UpO8Tq@no z=X}1tq^&M9%PK+V4ue3X4I_%uO~7_XCs=U&;NmO zi#}`IWvkWGu&$Fq1Y-Ych zFY@TeF(T5LqM z&Q{}Jjp&vv3y-FaGz#$H!K2IyeNrEBik8!wwLXB>dLMS z$>30G=}sTxTWg!~XbAV~|2ufJ7W^r;@#re{qnq$(xQR!#Q>WB#WD_3!*2bgf*f$be zaO2SnHXdp1_NSWgh&9c5$1RI3JUUR!cx5nac}8%N>dMp1-!|C?-he}aWnVe+SN3<2 z#}ApiV^>yQ^nBmYs$PuW+sT%lWbB)YM*CN@`jj08lLB+n${IPB`%rq>pgiN9NJ@HG zSXDBc>t3)bWpGs_B`sXexTJ%WLzFUjnlXT^4^YCB_t@gFigAmO3OP5H-#cEuhI_6J&jh<7e+7fUA$^x; zeI78SPhFAl3foYy|m`WkWuZUbYy_ssOXfY|NS~%%WZivv&Wa8M8t@VS8r9TrlfyVHV}B z`lx!ioATa8*~_jsQ;&pOZ?NCt!mZbA+*)GdR$3Eoy=TEQUo@0z( z>EKs-0{luJQ8kBg_0lyTEByLz%D`!J9Si)*20z4w#|FOwYi#^l(>eUI#;1W_YtG=; znf902{=ygB$<}H7>SyB@eBXp$wBMTWi!@FA+Rwfn{Ce}*Sp0gO-^T^N`hj2l65v zP5j!!z8(Df_p(_0`Zs-N_-D1(z7zxLoi&UN{Zib2W`46x>~ z;w2H*7|#d4_IP@Q=D7UF`*@}n|55!#ZvU~6=hSbc=Zfv;CZF*iPw)8G3E)3YTNI05 zck=tV;MWP{KYo%jIN9;5wfL988nkMEsqIDl@s*CozvypyMe#4Sy`1OH#J^7S`27js zKTi0!So|8#@8g1BCy@X6@sCew{AzIFS3@W8tHFg|=5uH8>omu&6TpA$gRQ#zAA9lp zxZu|b7we0u4K|XoJ~@o_ad6Bd z{XII?_;%O|TgTe=^F!;*=UdUU7TS8&uiJXo2`)YB23ya1PXaw_WsIJ+|2yf7k%-sh zYfO+c9;nzbR^4p`*4&+B+%yI){-ON+Wuqcws%aX&JpGgUS zeWB;+YV^!omt7sMAwLmsYUs#c(wd1_3Q$~8SvpYEN^%&6yQu~L_Vf`|#e^to0Jiyv$`zc42 zTj8O`OAW(+zRIrf3s)Z5$yk77j-9d#9BRBoWtMGg(Wl%ntgM>f26?#J9uIT2xxUme ze5&E-%2Q2Ux$(8(Wg182;l4S1_a_cE7U+J(zmx6z4ep;qS)wg3cn&)&?7eZ3?{R+_ zWBbml{jl++py5l`d;Kl$+MoE{x*oT`H3o|BqWObmXR?`FIBuNR|DE0)u-|*I-^a1v zbFp95WqspX?AI&<)}hi^k}Yg|*dE80KZy;G!-ngz;lq6ILwxUpeD4E%@BP%=Y_mi6Rhx3cBK zjBT0B?=LgnWdp~{*&m|*4DZDlJ<8??WpkLiSUb^(E&~G&GG?ouu>`Vz$Hr?Xar}&B z=Yh2P%ly;{tr57`$f~=ppHWcr4EFE!S1rZPeWX8{t%h$?mOij@b zvF&~uFOykO!}B?earqkOwtqSh9q+^5CkCRj_35(pNtUgfb`K87=8Ld-*}CdO1lyO* z7bSbDo=!8WvKjX`4V&NEip|$CK1=v5yFY;4YVECD>@=O@r&9-1DIMWo0J|^5?)80Z zu=n4xJ;#=Y-S;wvhElM7%`0vg!$Qm1=Nsu&^VyG2@rJZE*+s0Mca?4b>(Lv|PxE8< zNw(elO`A7No6kC5dMwrUbBfF;@YYsMEl8w!dInqO4+e->)3OLs-Jm}j) zctASc!h?P`9`ruSc+l6x1JnPYeZY8!4&i}0t^z!uJ#z*RPHQ}1EQ5&$jBnV$@pAUv z#e+1pYl&4%_E`QxHErD*Z(7w7`~^ShPhv}i2mkz3OFZ}j=emUlXY~*skWRPoAk)T! zjE>`h$}w=0#19-M~$g7o%Jc0a$8=q*22iJ10TX=99`U~Jeg9{HDI)DeJj^4(DhBJ6@8souH@E6+b zU%*!w-HN~P*~eSr!Ihlr79O0${z8}f7tZp8q!(|-gW!br@xaohTX+yeTX3d-;k4~< zJb~H=SA|;QL4b4J!h_SGeIOrR@c^^Gk$7S*x^4A`%3sJc*N{+uqZJQOf1~ye^~^hjux&He@S%Nsae&c3_-3vPemjd(p0 z>5pbh)W0y{`z`U{Q=IE49#kjNBMoZqU)a?hJre1tf1$FgdZaA7e_=q!`xhL4;Rmhg zkyhAxq!>IHcK(jU1debUajd*0!9NAyYebDvwEbPc~<@+3q0qz>whqpj(Urw#9Uz44Ga z$tSnoI0L;geuC5APts4++^u=yXvBqh3MpxYDP-0#2MB6`j0h?;l72A67$luX7(${zcT4-sTr7!G= zz0T#nR5Z$|CH}J4(E02AeI9>kyDzmWu($D&x45oz+tHT#@JGt`G+rW~q&I8qJi;|c zi+nT3vhz;p52LN=k*C@GFCDU@N$!juSz2_rKQJFX@+|boGtnc@|K8cQ9{KG0`Lldw zwfwGPv-HSkqeng)J@VQ0em3dUv(XusqetF=4tb+L+0-Kom!(H8HY`2zmA(CK=#izX zZq_3!y>97|&$9H$5p>6n-dNl(&9Ur$EyvO%NBX6Nmw^Y;EZ6!{&H4Is7+WCC@@h6| zk2R0{Va5>bXI&7DL9lenM#WsPz?)Y8@?>LB-5bIJ`o%YTy;V7A3nuaGd{c|edZK8J z#TB)g$>E8QdY-P$ln!|?I%FR@WPj*#&Q+sBmi~A=bM;G$tg!}zb&M9-twUY{CdBKI zJt^>hU!$s#`TSRa30u%1??H>c8!h@SwCEq8MXy7HzB5jTyjFAdW5;P|i2u-gKvjv~ z!<>u5>w@5dqeET~9!P`y2-^;}R8xnX-lRj8{+Rg$t6sHr$V<^7kM{dR(l?7=m!j)= zq;E2P{vI0_lF@kk&9ch}7Xrqh7q0{tHv7C)HNpkTFh}DKZ0&L0ETc^04aj3i_#pkU z%BER=oJl;u(i0fBW|q}bt{QhBO~N#I{#ILm>_>kr{efX*n(rdL@-l2O(EI&G-p2-& zIo~b-KA3iE;)A6>?g&0SL|T5*njaigwo&*%KIXDc$Y!6vN?7nauIt=p#Z z#>o7xzOkT$YwotdmHa+~5Apisql6D=3|hqpt|h{UFQX|IMyzIU;lp4PAGEfL@Ig3n zkh8RLd{uGCrV7Ht`|#DBy$U@Hg?nnlB!F zu=U98;e(||1|Qa(!3ST(nf8MU%y z9&U*b=W(uE_|UJ1@S%4%+YjiJO?;r8*yHU7&5>{6gW0xf)+e`%52ijDe2AW|_~7^! z$8)@+<6rdDc*h>|FX&@v6(6|PDn6hiSk2a1e0an0FL?Gk=Ao%U+uY<|oSz&%XFm9F zzT3ZGtUc$tg%6oMgbx|r#0PZCCO%|z9v|?1TKE@R?7qe;@iF|Fth)$4IQrvqTYtP9 ze_@W1X6lcN@h`UEUr68V_!s52zPXV5m*Za)oBoC6kHq^I`gZa!q;>9%f1$CJ9gRQ8 zzevNsxUOVS*&DWhk?$E4+UZM+@h^5B2mj&>J|q|q?X(|GF#jTZUd#5wP|kG=A5J6x z0==R=pSYu6ZoBlA7avSLvyBhvna|+E@t9BLjDHc=e}R87pG|Y6H2D|wU-TlQwo0{%t2 z{TEu}3jad-X7yiK7_{TEzowf_QL z^J=z^_Fr75Ia%HMD4v~fYNKQQi^C7K#D~9guA}&nn?M)s$!hMu7>6#p%lfDRR{w?O zXYGe=(qQ{TCZMqL0!X*7z6FH47hdY(4Z8bj^j#v+Ur6rH9UB+*ETrVw|mq zmR`?!u2~Ol;f0=COdmwNUaE4R=7=4_e4Oa5m_PI{+|v=g)C}%xO)r&$HtIF5yY*7R z-IiYJ2J}+u3lZl#b2|q3-3h%^)BLVuXh&&2&n@=+uG4ai0`+ljWe(CkY#r4?q%pUv z=4ICVpxHL(cD1yWLu@VO*m^YD%4kG|MU08)?a7N?K&1MyWAJO)ceJ^_G@0-JIKXB6Bh;{qfA@sk4pcC;G{c(NFF}Ke-qEU|hxWWRck?0YWCAoDM44)1`+7(X9f_jR5uGhOK*^KZ>6n<)D=ab|$2pVa-Dw>7|g&ALC|$YMTl zYrf{6VsAB?w^!d~R7yBwrswH8{(o)u)nRmT6$h>vg2}VzhDfEdEDF@aU`DH}_EEYNfGDW<`h0~in*V(}HnN+|ng2b0?&$dW-&4SsB+?qdmd2*Jquuy&>-WakTJ145zKj81 z#?)^sG)k{vKaB0yY@26{sb9`FooH*dm)&_s{d3G2yokN9f5=q!CcfNZ;>#R+?r7nQ z=8hiEeDA`S1K`Vc%IRu;>whRcXa4umyx$gk-sqtwj?7%4M*Zt_@ISZY6qc@@Fb>?}`iq0{$mP@6@X1@0b+R9Hcf3)!CBk<-!a}H^v zqJ}x7*Q4itUz%%hVv1sy*BH^e!J0g8xQ6% zU%c=p7rcqrYv-cZo_>~5HIaFv$ALGRH#(7CdoOxz2XpqLuhzWL(ragMJRQ7II@vyd zo#Db9>9v=$&12iimdaewDTbrh9&XPYol#Xx8n@WX5y zoBm?8*TS34!W+s`_AIQCJ&P}x?>P<0jjn4!8>iSBwnI_Ou?Mkq}aZ>A13@n%<& zbl*c9yfNpA4%qi=J_iSHmScl8V0|s$e8)vb^uP?h0URLA$pLQyV9k8qEyBB1+wUZ- z5zeSRB8*wg_da*8gE7r%Rg;#|QF^KqwRZg=t-p){XM`^|fF~Bd3^noPo+FI~9@0Ah zNaO0+q~8&I`3CoS4+~$0n)o6NkUgKlm&StQiZ85XPaR5IDlK<=_=1+>eQCDa!558V zaq#6i?$2k-Wt@lbWpkE=FR#FDo3o_T8dCAFHwk?4R|L|NLN9Z^+xUVWqet+Cbh?Ev z_B_<%2wQU?0K@s$IX-75q#O& zqWzLw@ha^XwE^YBZyXQ^t4{g{7~xmJ3&Zq}ES-72tNo%lp4u{Xy=R ze>B;S_r}Lxp8l4FFFV0FxW|mYJnT(##b3a&7V(!fJN}YJjJG-dQbJlv$BMtCnei9Z zk3{$q?;oAlh4{-E|44IjoIvs2Zu&=w@MW_LUmkA7Kl(bnmdHQ)lDM*_KfwKJzqE!gvmR`TFY~*IFL#nwJie%YB*K?? z|EMeYa-#Z2r)~SCoBmO|_|oJb#r7Xv1+TTh7si-Q6nAzPUrsXrsB8FAIiVR}f)m=u z7fUB^;Y)BrTljM8^q#|)n@H;nzMRJW)5od(G7esAfiJ{&&k=Wa7hg`{_RALHFPitR z1f9L+trK1>L1(YA`;(327X8!epHqL6boTP6a=AaxoQGWP7i*kz`&k!28ndyZVpZ{?U!n@|yao=J@Wz+@H@D z-#^_zy=#FlS^m&r)$LB;%eqARI`sZd|IwS>(btiV`j6b2-cIT3?0L|~$IXKt-+$!x zk8+KyMY;a^mlMTbHs0=M{voUVG8dh{@Fj@OUu~GB=={sk`8zlxy}r|rbR)W6ryZm5 z0m7Lab9{jMse=;&Jm=tzo?C)`#2up@e1H3RvyM5CrMG*ZXDz*5THOn#rZ+a;n?zcz z;mtBM{?gk?&r2JtU~avoms6j0Z!~vBr1u7zUX2~-i01AqE*;E;92>5`#!0E~G|V)@ z4L+mv!|6%kE70rizu6N$_*HNCaIr63iw3mjQDeWYE8{q#w8CA3Ph(YHA)W^Czl?c=P#{a<}E#kc4IPH-F!5%5z4#~eKD)j#LTqv~b$vVf@VwMO^mXe+)DF}@1*qgf1Ll} zp^PH^J{#UaKZBQamUxXS>4raj&w#SNyjMUmVcu8e9bqft+e&Y*GB?o0p6f}iDne^J z)0Y~e%u)D04FO`Mn{>||jD|0F5#mPq0 zc)W6g%CO{{#`qH6FF1m7^$o8G&YgS3-!E)dvgZ74 zoaepYHX^-C^(}w#sfK47=cP5(=goYh+MgP0rVQa*P^VRARmVR~9asIPzC3W_Fr(lA z`r<>>Mb+(mc%sIx&-sImOP)vzM4#pVl_wcDW&4c>p8Nil-^)oc3d(bhn`9r^-yQzV zC4-HEjnw-}y%*)bV?%Q3&Sj~k_vIU<_l`75>uUX_jMFJiM!!zpOJ#(H@kk0a6;o;sFRvoS9M$Uy3uQ_IqYSAH~wd<_AxKbv=8M)`BT1>rz*<2 zm~%?6nsk+>%3SGEH<)9#Io(m|#;QY$1JQf-KlI+gzXqZ|&NbdSoRt@ijPjKZ;kn3U zzH4zHdVp_z1YLFHt41`FbkbBOef5{BzV9zKqPO@4hMx9jg$k36(oNpXx^gf><2!=h z%JD{HrHMhAi-PDvmg(l;fl<>}zLbr6W{gz*?r0`V9OS^wPkbm_shl3rlc4*k;gA?!q#%IwS3kG zue5D#J+@Xjhu_#*7PeN2tzE@8mMzz!!L6a}u%*)Pe%zR)dTWYczoh}$Kg|^iBE6*W5Y9= zr#(oyRg+)JV5RVBK4qi%uK#A0!AfCMh`bv1U%Pw@j@Qhw__ohLn{R1nI(%DW@$K8< z+x*OCzC8%v9)fQVca(48sMl=14cqwNF5fZ-e~<7j>F8UHY=v+C3g1TH*=q4Eyqn3r zk(6HH61c+P+eR4sP))nUv_I@vTu=|cMEM@mJ;v&@u(=51?rt07c{&33Mi|o|P82r| zrd=`7H=wQ%{*BBDtWh1Vp)7*%uAcca&tx0M1JlI6yeIZ!@^4Mww)xlnKlYQ3^M9%} zfBsLerHF&?YP?=8m|kM?@Bnz&AH&1?4%lJqJe+RvaFLsbX*W$4H@AfecAJN~y`YJE z7qrg3nbd97>j?F3s;h3lInL%@&QEdGZ`t#^ms<85!@qo|+WU+(BZtQFuiBX6U)pI8 z^kHotH~)IGO#U_Nw|9tf$&>J})=sIk`FEquzm+cjEgE7J{MO;$n~i84{JR7Gy&wL) z5B}W=|89kUHGa(DUuhn5z=*{@PZR&%bJrav|31cEczuxXCCv8odnEfq=pQTL-_(ct zmdST`9R9`UF!}dd`1fbv^l12ZvBSSNqn&NRze#30uA}^`d_~BU`1jkCgVL)eUD;18 z|2C(42>y-0yHALJ;o)D|{5ynahsu`VTXAtS5Bn-Ugule|;@3W={S2%4(P+k?XI&HC zasIX8x?!IU8*uYKKA<~iPhCNBr!%$xApL{+VKYE?6i0BIoG`9Niuoad5gO?lm-lenqmfg?R30__>*Uih=VLUGfZC<_*J_Qe=6Z!T{yiD1O zm+x)n<&_REZ}~(EUS63GFJFe;ikHVZHf!_pmP<@tHaW`S-eXtozS&dV2Byc~mFt?}|^_}Ag(2>Eq*`6rZ(c=@kR8NkcS z$!oiO+%f;x#H-`t|KcyGUaMT^_+*!z^nZEY<^P)L;QvN*8J{it6HfKC|2x?;FvkBK zKdnptZz=vSbI~XEfAv2+U`kK>zvn(=bl3lFuE$R|bFjw~>;L`}_1NTK+BUZTYql>x zIN#=A&fEU4X}{uveHZtb|7*1`PX+(iWgqyz(Qf;{vKQK%vN;$3VlO9_|9k5_UG{%V z@qeQyqW}Bzhx&JlXJhuM1B)!?#+$PI&Pfp8p>05a9>@uZwSoHSz6Vy5#>3ZQ=he&+WYb`(vI_ zySxMbug$;1%=UO!{NKx`cFzC3#pVAt@o*>nU&CkYZSsGA+&cH(Ok2_9Ui@6!|6QQi zP9N%akZ&iy*!F)HII-umsN=Hd9`b)zD!y|{_`fcDvG}*yPmS+4F#8PR;?FJoU(!q9 z|AKSP>GqWW`@cCy$Nybxr`t*Ym$R5v|)#rb2!&c|953V|Cf5}@_#$U%kqEY zv8y#+9!;4!ylnfwCU*VbxiS8)nddFU{X66T-sr@m@jaY)^hnL0=qL6KUd<&Q{Rr`@ z`2OE9@SM~CTTZ)Vt=<2tv0Li@EvIa{+W)%_-MP3_?VUMujBscD-@ov@{9kdgPwB8Z z-)UrTBkcBrXLg|<8EIeu`U~@5TCUwe*$bl3#;_pRi)mZS@HkCg>CX0&z<9 z3E#tBeZq=Kcd1X^0y zOrKTgLbe;(I@>>7(#8H^@o!vug-9LPPa~!&fhbNtr zn1_GFdGYW+IyP$i$CK!90&8M;_%obS{cMYiJJ3IDa&Zg)SoI&=oM_h1j{C=##Pcv^ z*2Ke2SY`Rg`IMQ%!?u5HV%0}y$N0x)o_*xiu>acSV^{z13WtyJM;tz$>*C`Y7azO( zheuPt9X<|H4vTF*mex*u9E6Xz&?nlme>kaaJr+K0kMJ?+bcByTokm|%5A+lN^&eXI zlPg>4Cw_}(96z~+_jUL=(zoOM+?vl^B930xu|DG$Tz$rFp7wkE7Eh-$){g#R4?OL6 zV%MGXncpUFmR_s8b#8vd$^TqtDnYpE*E%H@TVfU*|kHve)sMucCh|hMVpF z;wf$P7vC7yUz~6C7cVgN5FP9pM32x&!^j7xQgzX1kZI zv;D@*U(wWW+)2Or?HIoqTa4#x$8TN+&x)@nxcS=dH=b(q^&hcA$8Y}9m{`6xZ5O{e z%z5$k`HtP%eElW*rW`wV{N^umt_S#fK|(!O7x{Xe#n&WbwympwcJJx?};+Oi5OYonIml~x9(Q-*UeE?n8xi()HqggRDf0xJ7Pj82>@AyO@ zY%umx{2n6xAlF>yPU1aIQsf&zH4)Rey*91OQ<7m{q$0k zcP(F8{l}I~4=(-M=w@G8{l`w7R^LPseL69GZ0o1zx_o74T!8L(#tWbmcwpL4OUG4D zo%B);BQ2lVtk)xLpLr3n^KzGtdL8323?I6G^jf~GKvbG7w|~6ZrJ0syNp03<-SqIf zpzR~y?b1y@^4XyY9{gZTCj(%FU(g(YcM(c5p^wWb(&GJNFOa1hpn)K6y(ed`K z_yGO%_JwG+UQ7-D;a1B}{s8^-_JwG`UbOt=KttoDyLkT``~Byd`e_r7nsi$9AETf4 zHR-48Nn7us_W^%P^)5|1E&T0$(odt)LbF`;0b1_8Xwg(Yzq!{__LsXX4RwFM=RdOD zU~}$L5A%Q9S}kd*`+ECUJx-iTTI!`}wQlkCMHAK!tyb^4C2;OqzVQXKdzbZPqaUI} z8ftvxCeF=HK|_tbiF1wV)_4VPHe*20Y8mt9UhyhCfo(>wc5|-nBYy`TdLO=hf9_!P z(}OBzjuPJvYSK?TeETNnFLl}EL-a#+)BjD(r>fi7fmQE1?*CpD&!?1!!>7&oWBI>x zDUTL>8XR1D1$m6;(@EsB!+h%Ue{XcgBjH;({_jXjA8q0ezHkhmN*mo_KE?la^JxTp zZh%X~^Ys{i=FU(|O(kF~}fhn618@p})!tBv?ggZaOXMq1ivb3799P>s8h zE?T~3A=>B+Q`?zYF=D9Yzdix4I@;*z-oa?22i1{ROBE+vzb|O7WY?*Q+c-> z-mN`7w9?$qds&=qrCTIEwYAXFL9a9UbP#+xxO04J_XEd_N22{Rxq~|B7!G}*bq;;U zs?33?~k)@k#O_#IYKmq;X2( zSdCK>zdoC8jZ>0e>mH}n365QvZ|k8=j>R71IW}N(>@K)N9QzqJ$J&1FH!Y5R+u_)y zxh**MZS@5o;QS|DHv1fXN^Nm$dwS;$_86o|J*an{Xz}dIrnnnr(i+dcK$$o^tNvcv z66AAm=@jxQo|WF&!KXXPYrFAf>Uw+QeH~0XF8(U9X>p@?5q|Bo-uZH#cloO>{Z*{q zxu^Zr$)2njfAyoHPW!6~^v=!xs?t9i{%VuXYGR;E<9)H$=6d|IX0C146I$ba51E`A ze zUp-g$gum4Vf7RAIpP2sYlMiKd#$R3O#E4BU#oin)mA$$B)uGs1JN{}Dm;OFGF_-3I zo3_7d+T=U*0ky@Y?e*6>T-u_)R&^V^r+r7g>$JZbgFg!O6e}IdnXCBF38fSec{8heFvz~cqr~Fk* z#~kmkesfUg{MFmJza4)yM!(z{fA#XIo%2^6-Lm}ECO++)zuH`fSGLZfW2nPHus=dP z#P(NB4&5}Ob$@j;{T?wKdOG{7wk zEjV_i{MBo*UE5z>sTfl|{W@)NYzO?+w|iQjZ2PMVn*3GDq&1%X2tIXq*7jG)=it(p z$iK^9HS_ukdF_n9+L}HYetp}~Cm-|vRhOQlEBfTirQhg;KDo1gtD{eDtDiOK>Sz7j zH(L6wHQmxDH~X;ab6scalXnoG@~TbI%|2J@le;r#m90;1uG_0z{M)4YvwYaAsM{w0 z5_gwAnKq@g{6OS?Lx7tsB)$s)S z#fzOjSH+9l>vMJD#qK^=^~*_@tiCz*xhh^PU2>=UTy0x%_;C}uyj2UE`(0~ze=ydJ2M62L zC;QMR+v5O=H9toQcuVGUyHil|GFT?pT+i?xE9-U z`?I&hlj7P**jKzp`93Q)+{CrRu*bOmSCea}ab9y*IrjMv)Sb4twgdjG$+a!~S=Dp- zvz^o*YkV$wjKQKNo^8S+%by)dc{n_4`?Dq%{cTu`KWpMpE&W_w@n?4>=zm>L|Eq&R zBheqb`SzIfzuFk0^6fI;;>V7}&U|c!ZEs31(RiTl_QU?yS*`oE0eCl}IjUU!uq(M= zeyyW9?u_0zk&kQ6k^-y-9Uu2j7ssAWy$%wGknbz~v17yH z*z>91;uLK$TxZ|6bDmMHv^HYtYk9LkT zKNZ-&++{D#dgDO}`)A$bbCnlqKb0@#sfse!I3T6x__=0$s0rt!H@4CpW$TUKWS!rR z%}+I)`Kf$O{j!?7RP#vf>fcfqybJAu(--QVqpH%SIsQvipR7Gcl}mF>zpFV%)%$kZ zJ8ZpiA^ow=*dW!Tj_8dydE3?-muP$t{j!>;>cB*+UsiKf33EpE9#VG2{nk8Hoz)vZ z>&plgr&{w=byaUnyL<2YWNZG&wVFQ?n-uq!Gk>H9u35_bk(IOg&HRy%!kgv9o&G<2 zZvrP(dFK1Swe&_ei?R!mZg54-GD-}S>SnP_quXD%%yWNFO>osi8LsyDU*m9TUdOZxx)z2}_j(?wNxqiBr6 z=To1etLmKdp7(v;=l%Vj-}62vSHAJRtXZ-X@{N~w+59K+C(qFO+gm>poV;9IcNw4i z@{N~E@0SgI9F3Ftyj=X(%Qx1XoW|#GaeZz&nfb;a;a-#UTZRYIXZhCWKc~;K)@S-) zT>QZJKI^l+=Oc>MyZ?GNs=jR?ajeS3FDK9P$o#^@QGAnX+I}l+_U*qpn5afiKX_{> zQNnn&hen=XapM(-OV7k7g$rZpc?F3B2FcHtGwRCcZQ1M zJEj!Gr?~6h5b}%X5W5PJSG>mrVu3&)Cf`!Y%aD(J%|qks$S+P+-mrSrRPsO;lV`Wy zgix*jm>Nn>%ah@P*ga_Y*A z{u<6-u~6qPu;&kO{<)lg8##SCe>6mXMA$qVogbY(BS20ce2978WY~*b4gXo(VB))~ zLh+sL#c|>00{ljW+Rl0P%(W!BGv!>fook*WT+2&+x{d49u(!5=clkS136Z>zj zq%L+9^-C`&_w|YdV-Z&^(XN`Xo?*Ks%9wP9iW-F*gdf6OZw==Oa?Z+yfx0zIBi-sd z+OTC(IG6}N8sPjxQmvf-e2!1fubdura|_E0N@8sf-rXIg?(;aF;k=ss!*R7io|`+a z@hPo!VBLQjW}fY;xh$c+RlRxJr{sTL#~O?lhJOwxwvunzJ!vNOQqM@Ww@1=)Re4UzaSUF7%z$Mw&{Hy%f-Ri4@?>BPaqXl4jSzp%lW-FFQcY=K+%_-p^PW=C0&e#i{bhV_@l@5&j6aeu(mLHq4oSxk z&AQN3>Zap|gq_sh!}uLxtG{4AG_$sp;-6CAH|xMo`^*j39=`5;Gm9DVirRvP6szR9 z_LXNCuy8oQJC8v{dQ^Z9%z@A6CMqsi`AI2OM&()y)GY2UZ;OUg}eyaqPiIF2%=P!TW00SE~K;m9l;L%RY8rHPrgdo=Wq% z=HJ`=c)QM5Vq1&2ZYTBh=Ys?4gC^Zc&WZXg`F%JLV`b+GbM;!r`g!grb+qT%Hv04X zj7977k@SoD?Zq?G-*`A!*VYwDA7q@{@1L0-i|<~6^nEPE? zg6}OImPnS4ZRz4Vui*>cpnmBgFy`yvlq2}u!{oqrgVVZLGY@Lc-*0UFVbw5K{cl6t zm0PJkn;%qdRO&D&Uk`g+#9p_V)J83$X1_k$i|-2YyMwM)OpJAm+V100<;?vq=305k zsl}?kacXp*(U|<}z>1H%&lZqx%05=>smATK^;aj-+|Ob9K=*y{cULBMghtV(t7t=wBpC%rRnE_fmmye<8Pl}p0(o3x9T(25&7CVS_|f5Bef>|gnaz{wr+P!<0)Ui z_qCo`oexCP+e6`a5&c)f{ng;_chEQ6cw`x_$|8sooiBLH9Tu>Z@b)OjXA?k(4@nKx|CUKV} zZCu8?>`k`z4D+*|`oXG89;R(mcxgS~fUSpgy@kQL4Xo#?1B_4DGO0M22(g~8=UHf4 z&snp!v~8@MUR_Wct9Wo;cRlr^scxSZmR<{nHrQcwXU}s)@a8LdYC(`Ut?dpwRbI$AC$lRJX(|HxWUJHb9^T8gJu^GHFx%0qWjVsFdzR9 zE@Vv@-Kh0IYk}t7?PG<@_4x$$CY<4X2QL3MpKC6DK>eh#)0#z0Pv_gjXU&|$nA74n z(uSU@BG!Xo;@A@G`_{1a{YmVYTI*AFFA?^q8)~{6a`UgCANSu(ZTSmK`o~~btqrf> z%O7A2>Tme;;d|NN-pM-rGjQu<{Ls10CZ1dpieFt;5^HB%cP^btP2S>`ni1i;T6|AU z{pwXA>}E!wC>5Y?>3VGDp5gdjj{hm=EYg^8{FJi7*d5`jg!Tf-zeUQZDQpT`Dmc#! z&Qrm8gf)A>oi8w!&(g1>sNc zpHL`zQ$4=5wWc6mQ~c4oAZ?yZ8z-4iY7K2nF+YAn+jf61lHLI()3ZZ%(Z+ip8dA5D zW3J)3^YSUHnRhFKL$Kdc%gt*G&F3y6&d{+jz*v?w7P$UL_q3YxZ^XXVo1&BfPpx6j z7;yV!UW@4SLi(T`YeuSa{vYMc(h2ORq!n+o86Jo``nzULB+)3O6Sv%LFi*gy?% z?zPWa$9mbkuV>cn;Yi{#IFi;j^%eEST2|vbn(3cpkvspAr^P!;P5NF7)o=z z8($dxHQ9vX3e5m@SM=1}Vmmmi)-+^QbVb%eFbz#m4w;c&dKfD=sv zK5a>1Jk=1k`$|iw_YeZ_h~M4Io-OPl##m?g-T4nq0Ot&`IOlHWg!tVI##HNt=EpK( zTNU9z>}qh~h1iJB9im2&&M&)}Q(tN4nfP5jc6g8V*M5Gd^A~6MT_xvV@9{gvGb(90R{&UhQB{>7JV7`P~foond~6-_>xJS6FXZ``d=kOwWIKLfyvT#8iZKeuI9Sh%Y&OE0_1J$_Ba0H{V4-%noSs({(Y_(_W?M^#B;Z6hiOX>Qeb?bPTH!Lcu#YFX z(p}fl_%4OlO=qp2t=iCbE?kp{)S9|0;gmlc0*|Df=av?=XnnsATkmWST3gc?6*4Eh zHC~)w+-^O->SsejE$71Pwo!+qc|VwtF$=NAFJ`>v@TzCbJYJ`Dz8-(2bv+HQ`)G;u zyFiNg+$^Zx`&})#r@zIc;3sSdC49}DfYJ{vaZdpaibdR1LhL#Z~ARnV1=;N z58-qNv<9-?JxAZp7e@t0O){fX7jnPPYu@txFZjL2;ZZ(2sL!+qsM+Ohtl@fwk@nrT z&g=CH|KR3f8SyoX!;4Zg9lqw+^cy{w$i^(E1P?agPuBAC_}W9GkF9)I*P@Sg?R!(K zAN*d|NlxnZCHHe3-K$Ps~4L=LuBjzy2E5=pX*v)TEpapx__Y7E9e(Im#XYyHrk>;|+fhy*HKXB|TP|OgYMtcs zcN5LDDfsT4HKF*?PzgDf&bp<06^7BXf+BFV2<~GOsw5DwbCzgT-%ZWd2XO5`9lH^^he`J$;m~(%} zeO$upApFeMy`?XcJA`urb*)oornfE}SJyUsX8H)erI1a*4!ar zHLk@PX?d37;zzT1))S!#vH9$Ek}ryunOSq;S=6Yy1dMT7>tkk}K3_(kh-+zIm!li7 z-wMC?U!~-)Q3p?R{wRB<+WGx60q1s|%EUU9N{uLkOR`0h6Rrs8KyhlE?im(>5(--28Af-N)_vXNnoS2Jt-hp~glcqvY% zxJo^7veVelWw_oO@TDoiP|IVy;|r~yeGV+RqIg8?fxvKlYcN#-W(q%S>V5$(n8N#d zVsWYe#HY%c&z)I(YHbET2J6=L!>5R`Y-xL( zb+e!hK6P(*Ik-lAYDSSsPbOw6K4nstPi0l@EebJqo8PVExI(sbrt; z0$$~;g?V_@4`-d?Rd0enT-^D1liF0c9|w(sXv)A?I#&^&O^ zV0qPr93x)k{d=?e*|mQp{Y=lNd?)lXX$LPj`k5Wq`afa)OnZC!)6af~?*$vBe|@s_ zv*AAdjJ@V>Qa^i{HNdZ*rQeKx_R>K5*>Fcci=K#nR+d*kvu)1S&!pQ`G{sh}_?O35 ziIe$tv5VNtFRg45Ymi@G``FFB`kL0lAhGI-&79-q$5%-k^Khz%F?#W;!_UQ;#M?Uf zZIb+2t^4LjeRZ_id?y_ZAG29;8gUiHQ>Iuwm33ac%%`K(v$tEZ29u?uvEFZHjo<9? zvazQhTY0~(n?pytMW3Ui*=xEwS`pXtbTrjeC9d0tqxm(ojpv`3hE_HR4Q<%~8rog` zXlTuY(9nb|_8PZ9TuQej9YOdUP~v zU+}cgqMgbADW1P(^XjVTPKDI@Anlv=4slingqxJMOX=w++z=xQto~9-} zP4!ON!N&)P)kuF=Jaz>dnx5@dORcD8xLvhrp1yx{+`H#z)n7Jg~mD;)cB-(wXIb+j|JgE|}Bi?p*N+_R^heV&DaIaN=N|c4qaod5SgV*3R%b8SU(Po&i57?X1kv&YIt>c2?$SXIp*R*+Ddb75FWU zm8YF8=kIq?J4-s+S(DYyTtDw&+e>ImF70fQtDV`og8%olvj%dm>WP_(=Q-LzX-YiL zo7d&r9qmkh->;o%4e+$HuaD@fo!Rf;dB5g!X=m7MPVLOL1)jHEkL<~zpNZei4nEiY>7Hz!q}9&~Pf$PeYO{v+EW{g#@G)bn*#~XN|F;<&e$Z0UnkMXtkwjVyHFY$xsc*%Mq95QUSc)0>!qPhamFa0e)erbP(RSrUdhNf6SbnFA$)uq<&xI~%>_j^pRv1{(3Aa=H zd_Q>|2NmP2BL5#-QCxVE8I`(-K3>7SkL;li+vA?*hPGzo=FWMJ7C5*{=kn!2HkqIH z)zVaFuhY@etdIBs8bnlUwm6&B(y(vvn`hrqv^3dUYp4+XX0^0FoUL&f`vSEl)7P?x zGPE@PUf1#SwI{evP7RH5??6M_cuxNsn$2-UNAq$V%h1r2ytbTSjV{@LXpWVsanZz}oM5id?{@0&?en!rTcv+M5 zvmw}#{M9GGgXPSBX*QlVV*L?&L2k##NH;s1x`nNb$8%_y(#?*bpGiAQ)0fiEy6H>l zXNRQS`?WLrGNYZ5gDdUqDEy<9`RVbh3NwaWxY1~4BU7wxCDsDimpm@UhHUQt$e?7>&I zaX*e;=A7>jVQ1cZ=agpVKbK!GYeO%4lC@P@ne@)~*^zXcxFuTItkXnOI7IJm6$-J*dC+m<-M*e`VpRJQA_gdrf6W%>r@N}{rj6(t6 zP2r`y!#1Bz)`m_dZJWOEzC7W0`R}`Mjyg z<4u2u4(9SEJ0CyC-&$iDz&3->$C|{OZsQp7ChuQQRcR&`Evzs8qJMG zuv++1v0ko~8CUeNx!lX{jqI7IDLHO`*$nuWpFdp*o-l8QKUJ05d>q}M?n(KF)o+SF zIr)dm$PFE%b#}sFp>tB#@jzMljly3Pwm=IP9AwX_wYPOjl+EUuk=_+-4E4|4^OPyvq1Y1 z*vrd(93@VBn0`wWo83KfNJ|a-hLiKjM`Rq54;ag##5q{g_mvUn6{h&RoAjK*5^{b* zvDE=5ued8%%HKm`tF@m1uC%8;6d%fQ(x5i6-m{*@wGMZ^YU<{nUf80V81<~v^(;+W9G+*=hqp!6DYmd@ekuDliyC(gFG}pZU`V3lcV?aJ zb-$4=HV(|DJpA#EpT91!;~`1iD-*4pqiyz8iTyl>Y3BvkB=*d|Hj%#nI@%CPcl<80?rGLg;m&)(U3<00Du@3? zhjUevzgSKF;;nq%>EtL||MD4hQ_YD8^VrS_@^147|hjWco{$dkx&?ZkO zlf7M6dTgca=|0XaJGyQcyo+P3PBz8rWY#bJH?F1r=-HS<8v~Dauy3s40*8A^!&B~} z_Lp6PPx=&oi+u7MmAklyeP~I$zl{9FslD9GzqjlYfndw(kdwbSztqWHoXg%a`8#p1 zxt!}kaCyG{WuIo=D2Gwp>$NKOB~2-2&nf%MstV&*!g;j6OgW4f$xg^mOvv`g8Jvo* zyPfyX21;U|a7JM3GdAMq3x(iI-Rz%(+$F$eZOh?Fh_O!5rjCpdTX=x1&O;oVAtd;#_ zi?wo`j+?{xAVRr6~*-}Umg z@v%*=9+n(NUMPClYd+pq{fFAe_mhXHT<%()bM_KGlf2#d--d>@{2~x~Nk&j>z1 zJf!-gkFSzmX4iDf$0YGE`K>wb--?&DU90*l^ab&# z$zk5<3voX3C0S=eu{Qdut;W;NSkLcdKMQMmEB)u~JqsxIw&eN*d5G!d`1d5|Er3HN z8F%GM4rT9IH|OnG5{|cj`!5oTtsVN+rxFJqiKLaIF0QB?b?v1Iu@CKyW>fd%2*ub+ zQssM`9CbY(b1iH7w|NKK#G=@%7iZHxv?%wWwImb>*?njn9~;41GA`ugtZN_I(a?y- zZg=dL;1hdlOA_nlk7;);@1Bj4duj70#S8uWZ?bt|_I|W&0cSrN>*toXSBRVTmp{3q z=ecHb7cDPz*8)E;Je&6P;f0Jf{y9D$UikXW)^{)W^TL#&hofEj zlZEkm)}GPT+LJcgu6e8(%f$;{C=oBzXU={!`#HQYH00RI5$Yq2B{t^p!os-A3zusz z+IP9O>}w!y_#V>JoIcHyH+lCpCy5vCb!u0wrZ(H#s;8a)$Jf&|7d%XKqP+0C`FWu; zPA8uiQY*ySkM=schsz7?d`$7T*1ivby#~Pxmx~wv7srSb`t>xwmL_bJe?QvkefFcp zJ|Ha6&wjLx>_;Q^uIC)S)%|GCki*z#KN{z@`_VYR{K3iGj~3usU>rN1M){KBwxQg5 znx7B8rTu72eEZS3r<1ZD&C}Dck^c6hEyM3d;e($cZ_;X1+7qXK#RuE{Xs({t=IUw3 z?MGwH?z10_vB~U51Gh*=i--q;MOsTs6XIFY)1*mpjeYmJdYavnwz^kOn+8Ym_N2{m z^|U^F($IL$9qH^z^Yk>eTe@dHP0j8}8|i3j^(V3?jWxc{o;2F8J!!fZ?>_dD11Vii zv0dRiamg=eU)tl2M%E4bM~xxcHa!=Q(r3o($(3PYn}Eu-{zuYEB{XIU<`Eq>vpqejo<4!?w&R4Z#Hlp zXT8qB_oTHwh1Nzcq?fa!oH4Ja$Z?$Sdp^#0UKZ!OuqvT?GGBu8&4BaWhnD8&d>-HH zub#-|aJ}rp2#lp0n4%P0x-4OPy=tA z1mCOn@jcZOnar!ddLqK49@p!WU*Ogg@$tRo(%U|WJ!#GBoKl>a4UX)ba#g~F@G;+W zc^~~Fdwda%`>(i{vAhmxzZ(5H`k|3^$u4v^#x=pdqQe?n>WZjt$j8`^-$qRlfzg|8!f&E<*dEVa06tOqU zsZ!Z|j(MNN7t9Ei+FZvP?Dm3CY0EqxpA(1k_*@rra6hpi`Oz}^TfFS7KbRHL=p9a1 z>~gvZE~lGlIURl`!|Bk}*4fw*{b1~~|3){CR61Q_YP_N01-E7~1`A13iQuU)Y!P|nop5#^GayTyzC@!b^2#Nzn;c^wcbA0R?_I8dd?d5QA z`^Ij@{tP-hqdnqnh8WO^))DE$+rHn++l;&KP0x1;y8GUm$%Vw1JNw>T zf6g3C>$#+=XWNIfF}|uJ@->&UMTK!)EpN_nFdy@Pd6JpeGaVgoxz*6!JV+hW%h$#= zF6M8W2gzKURoIua+3(5!x^iXM2z7dA4u!>3K)4 zIK|seRL|2~Fz!4!QQr2W{JhN>r<2Uvtlhi3Z4r8lc$@uqUYa(Co&;f1Fy8U%9+@FB@Fw60LPZ$*0m@IPG8QvmcIpCGgX8UM|8Th+YrpD7&gMJqceB2seq3h1+nwaqM77R~)Ail& zX6Hioezzs8?^?%es86eP{?_oo``zp{ljH3*ujP7!-S0N#1Zzg#`ySix_NP!c(({?s z1MGKuyXr=2znir$c;2(qmyLf0y@d zHq6Z)bQY~?9|30#g7>kHZhZXr93#?qGU`V@^$ymLeCZvmA6db*yzwk|_rJZD>qoA^ z|KelQUpiGkGLx6{R_aHNf-m`NM<(6ck=J%R@o~%Xh?{5EjeM!AXBKq>p7Ux;isPw< z{U%Cnrj&-Xc8uF3qmuO_GG97&U7EUka&LeGfi(BwSZuFt9?$=bb{HF>kg-%^HW z(y9%QIC0nu-8z!GPLev3_B!~K|KvJ3_r9S^beyUm`6p37(zA)=AE|z%wIz7kHQ18Y zG}Xjg0A4I79<`mhqJ2-=-_u(4?yny?+-ho0{m8%Qa^}41J&IT5t0Bq#&+4y+WYkwf zaysYK`Rb4LT$)`!(#w}TRX?(C{^MG5gG1z9N*i2z66!}bgCTAHqw9nH`HutDkF>T9 ze)IOlEh~2NAH}V_ylClZ-kdCV_r=LSf5CDqr+%b-wc>1=E76{#ed>mbf698AS63w8^VhD$eom^McB+14pXaaH z`jO4le%vtl`jOxHZ@rqEV%Bp6`1)ze_fJ?%6wWI;ZMK_VT;;L;c7t_+ISnRQ*WetKPnLzkYV2^&_17; zQs%26>F8xn4axgBr*}SSX8!t-e!c8e{m7GFKe8SlYjrZ$&-!(;f$B$Eo5lxwI@!iR zcAXHXexx_|>U(vvF3hKF?P_(zXHK^i8h)KGVnS`j(G3&7OXW zH=U?Hrn%tZ4KIc;Sl*PcexzsnZv$_lK8}-j*olte@+OOkTKQXR%t)}!Ab8Vq#iRd& zV?1rl@~5g(^&|VMADJ1~_d)&0Md_ob>PNoA^&@X4*Ti2xvhC8qidF6&wEMtT`RYbq zOZ<5-btAv496|P<`D;;X51RFX9S-NJ&Z-~jt@l4ePpBquF+v`tzkZ}QC$j5DW^*pp zkNmeoNBgZGX|IJ}`YzY9@z7KCBUk;Y){n$4mWzx3G3!TGYi);roq;`RFZOj{O64x9 z_MBInPkQsI`jI>r+_%rHzZ#PCX{O#t8}&i_H6(xV(cT)8YsigkhS%k)9~p4>oq6>m z7oDmfnSM9akE|zm(egrfE%5Wgf$B%vx#r`ApZtU7+VS-xJ-?82>qqLi(NXP9bLvOV z9N`#HRDKA-2SA8FeFFFccLzt8kE>udAz!rx3gNxblVR6kO4!MO9_ zM0sJp`jHviKgoHM*6v+iI2+weywHBee7u#vwe~Fodkw@3?J?l5zu*|l2bT=VJWt@w z){nfvKF8;ssUNB5ZNF3XBiA_jl6KzNe98BC{m5hZ-6(om`c(bMyw7AyzsdTM&%&v+ zUS-#iWIkTv)qq^H+tu8t0eKPe`Fyn_cT<<^KaTZ0cTmsKMRQY~$%Ep3ZY{~jIFIT` zqQ$`jdw;XX;engY&c52)f9G@UU-SIQl~z~vYi~|XNzb>HXVsLfVLjK{?P+fx-+y%F zXVo6JrsUH+LqIhpbsgdX%^X$jbjmvjAK1BLme>EkIkK_4kN)kJorn;05a~}MM%kBEs zlTS zZtV*%FH*?bz-*Fsnv$Vb7@0~fAub!lB2R!a++VFnT_q@K%$K&q& zvr|0oP3U_~9;P`_9@m_wu1IE_PCk#z)D_u_{^IgDJ12MXx7M%;;HN?GxXse{Uga3S zzISZF^NC|2!~6~=((}U9KMqU{%&JPL=D~BpK+F1|*|&_Ed|fpm`&oA=*f^G2+52w} zBray}-6(R&KD}^kT^d|@h#Kdrhqod)HdY%PonlYbtiR%UYpv8qmxiHw;{WoIz=}Ei z<}kGu>RB(M{C0*JoieO1I$rmobLsc{sfDa#bo@8nGO6H-snmxs9rjnhB!`5~U6qJ32>{$gVrDn0l5me53=oi?668Dsl%4(FW7 zIos?rW3?CgjK-VjNA=%d(~k>YagJN@@~TxjPRHw5t=H-!p3^c_ed_h)k@=U?pPOud z&Zsh^oqKGS#Jc^2_}v`U`q)fx&-72el(8ZXaA zv+a|I?@SH+byY(5yA}J~hJD^g?QxA)7q+Ro{Nu9h^RE|!a&om6nIdVWwB&l8qi$yVpvc@egD zB^#K74Xh7_Gj>&18?IZy?`}jc5w$(=YXtl3`=cM_6@6B1yZ+f=t_lNR%{rN=MtLGEn zE`NAI(TV%RUVGIuv>ksqj6NNVKU5!i{i)|ysh|9|m(L&Gg}yied(tsFhu6RUW4-UQ z&#B*Qjq1Q|z4gg&&o?^u{PIBl(%N&j&t%RI)@SP4x%_5;d9G&>^yfEqPg-}pzVqzr z_no~yLVnY`KhNH>eWh&5!6c5qyuKfQDVxjol{*LUmGyk@_m$=D8l3GbwGR7zr8Ls@ z+>4&$;`vJ5OSa$3=P#!-m%r!sg`VS*eVor<=CvRB%W}tGHsRmwTKw~_qbv0s8sR6; zc5~TY_HP_aEXvOX7HoxF^_7T z{GoyIqt2!J&3^ni$~`V0e$+AQrzE&jwi@WEecbCW&t~N}^}Aou5BiPzLj4us-TRGa zzkYw|_m#5S6UC2O=ljEtvRyxZ^lbO7;>TS6a>EA)=P%Vip3j@(>r21ilh1V5`HbCa z-cP`GH5Te$je+`C{p>$hpX+<4?{nMhXIy*LZ#SBa8GLRd*fr+mC#v$?};Gp@({QXMKQcf7!No{CMQHzuO0Ee;Fy2M&#A^Hw&27dMmLJ{ z%x`!6+e_G=pKCW_SGpH`V@{j8o#UcbqjYS`=NtRPat7lQ`@@g-pyT>y}KfpQ4iK%7#M8zB2Y%H(Rz9%W(YI`TQ{4FlVvw>|^^3QjJNUUb z=IHrApTD&D((6la&CljPe*E~Z=G*=F!(90B&*;y=_(S!pY)kz)0N?K84K%);~@BU#&$LDXJEhbeSZpk`x_CxLI@*N?A!yRl`qmLq?;((Ru&>n|VS_<{VT{9S)s+r^d{yIV`X zhi7xLUBzf}`AipI`r>T){bg>xy~gUf>i3&J+dMrx zHAkNjqogf2wek4~=H6Y@$?Iw_z>kLG4dnCgni7ml_b#MgYk2l?H9odBSe&Xyzupgz zzHMS8QT_XeUYUc2R}eC>?L33FJixPP(c6|T9936gcy4)gQui8OKjJk5P5DLkx~MO7 zuj+$i3$}en@hZg$*Rj9u8%5^X!_@4OT3qQ4C->;nS_>!YMgZP$L>Q+UIOngCTbZm8KOll1rA^F%9iS1~k z9q4^Q?(x;2Y5B&Tqw9uouFI(Nr)OJl4~ApmP&igitjBw_xIz4;kwyoWGcYvQ!yI+g(xIf+ZGv^0J>0S>p535z%C0S;5 zWsQ;gLsH5y^_MRJ9(Gya>y2)tz((OBV z-a(S*(>I*1=Z^>I_h3TfF8h#eymY#1yayUv*~?Um9^cB}vIp5kj6Q51!n2JTm+G*& zQO_;D?e8!9@*;G;&tfYFstV)Zt2FWNPUd;pHR1T)CBb+>LGV8Y1W=*(${zLT<@>)l1-IiQ^yuW5`Mc<|I;6F*;aKqZGUj$=c{>Mw(VzdjMr~B z)4xCD{ysg#JbQ@e-RtwDiTG3S0(mhH`}r2J=l7PKCKR!mQT+%bswuSb*Ish*`#4>H#pM)vx_Lq0$F%t)<=0lPlR z7pk8XGg-su@}p}G@hnBpMm}xQmtiCFjfb;*;~aeB{rJY6OA6x$YKr5(z(2l(f85m& zj_=tRjEC`$7tqi0i%Yep;um`YrKu=>F~yi|y63iTJ=?vL_YdR$cF>j}?=ReaTld%K z`+w#=%HL1AesR;nqKsd>fT7Peiq>pNCW**5O7IB~AUHfmv=2(Ag z%mnh~#-#8Mv#Rlxtb|^so2ddp(Drlzm5rTR*As`Tg0BkL=Gs{yW#1%UH>$eS{Ac#GZu@r_vrjevu7% z*jV?gJxmesUoMO|*|C}BV8rE7auh8_tQSTc(Z?s=hTWV@j40a(((d;fMzsCx!-zkB z?Zhym_J6z?eE2QatUmZ~?YkErdRTBFZO-<8e(ar%5x2P*@pYd)dl>Pv^fO%Ncsp)( ze4qEbe>rPl|L5UD+307^85kcvlfwtLX5mBY3t87L=bq&Y-wpWidHmtY!iQJA>cfW( ze;j-$EVu(dmD{EqY%Gk}pFjMsy*cbCDU8_HAC_ZBZwE%Sb^=Cxp8I$&VMNa_3WI!w zK0Z;5c(U=~6Ti#ChYjyee3%3a{#g#2_G59+PYNS$bupqee{aqAW5jdv*>_|_1|!}* zC9q;J_)xZb+S#suw0tR$I93H-znM4@`{4APm>G(3RhU^fe>OT@`7_mEXZE$+n4VX_ zK7?@NdD_#k(yR-I%|07byz#Qbr96jsQ8*l%Sr&**PgW*AIuUG5JiD@hmIrjd0AF#1Wq|p;$YwP2h6HeJbHd=RY(Sjx!`xal`6WcZ0*{z;PxK!LSm-u-MOW9Y@M|J|J&)* zKKR!%)*AM$d+iW@E~D19^vL-`%$8QTUvqF=s+BdeLdOzkF!bpm#^cER!bBZ?DNg)v zydUH~T9*_h#4nTZ$kx!1gg6`L-1j0JS~)a|UbKeNoP1zHwQ>^kL^7O<{!`!2I=u(cG1h@Oi8-copJpp19>=WTy zix{6x!9eUQyw>sx(1+rJx{l($0b^_p@dc(JcGMJdyBM1#`EPmUpH__{&Z*krlg#+k)3jf|_wM@~e!r3VpUWPO z%)cUI6H)C+8uh{_4g5Sx?YijV#h^us~*wN_MAYUu0-n38-)o;qa*BJc`cd{;e@JDmzvUl@ zS42MmA2}^vX=By}#I_3)k;$yTOUg1eR^(3(<4^QVk=xET&&vNC!r!RBD}EmS^4uvVUPx@`EXF=`_Z{6)j{D%eJGwtY4Dfp1KTFJJKlc&gz4NX+ z?A(0jq0x1+$zMD)vaW_WLk)9fD867A@taNfk14$O;NKLNoo&ieL#QRQyo~fyIcU4 zJJ1fi^JzWoJ!VYfXnx-syC%_oXC&P*+lh508zSlUJ572%er^H&b1NEGt^6l_p}x6& zPq40s?{E8cxUPnEM1FMWfu31As+=(`v)%BnA(JQ`=4Lb@wLtP+FNLoY*XvBV&}5S@4EE_tUbE^cinE;A?L$)Xzu9#^gP|n z+)4FZqWjV@Ba@@42j|!jKG@b1kS#ym>OE)a47ASijrP0f(D-Y7F8jVpzL+tTjl4+T z>U^8{%(lafiT@LAkYC5|zoGl7@%%FWJqQkzAC*7+ZMnm*do~u~n}dmCON0kQ=*Ncm zxG-NqpYnb~lOne5vTtnYq%%0%ez=B_7tfDvk)#2;LQlo`uVcZ!G z#b)9ME^lVML*e+jWkoUhsWsTjx0cTAmc3MfQG~++*ur{Kl&a`KOE6?6-4#RawX$e|y;3w=B%nTo$&tt^$8*&ttJwk;Og#ME`$`b|0Bvln|a{KW%Ex zsNONCA7Z9SD^hO726S!B-Q<*D{A$*zD7F>~MQ>V!e`{+mh}W3ub>*~K7%oCvrKw}5 zvkol5XOx;PR5 z3BIfXUtR{jyqs~nf-%1`!B|^gs@mTjU@v{=?5C=--=(Pqp#FOhO~C8FNsP-J@;@>* ze5mIV+kag>vouZ`qs1Fcyqs$b7aPV`czzP=hB??X>#{Jozs$sHn6u8JncRZMnd1RC!-fZ|!My=H_l0{6 zenVqg9W-{%t3SLsFMOqb6UI^>dj041-8AmG{nKu{?Yu8aJWsnTxPSH8p0=YaaZZiQ zz4RT7h5G7Vv<3Ck?=*L*TNlw929A1$})~VdE+3o8wLJUNbDeD7N+=ZH+0Yk%$Tx_OMf&Aq;|9gYEdE~NXlRxTabMzO z_0}fe0w=q;a(X>>xd?mlI9a{R$<||=4ksIDIoW5hrF!h>mZ5O6$6QXP{>At2Vg4n5 z2XA4Yd)wHVX}||uYe>SN2Q3LsNPR>cS^?-gJf>C9b>(W|^4q~AJf&|o=N61}v% zHc*#DXO@224h9iMZ3myX2_Jz&xZb`;;aP2;j(~$4zIKfDqYZ8(9aS8yjd-}`cXG8p zGil)t`K5s7dFi!@a&+R0nRl(+N3_(m%r70;GDG|g+|db^ETlh5#NY6LiVFyrtj8Zl zOT+v&)UwTtN;QN3UI15ifJqv7ZQ!*Azo=(E>DtQMSz}xtHzX9BSRA%Iu39{f`{eT_$+<#@62uHe3L16 zc$!y-MSDzLp0?TGFPe60OdXDvv2XC8XWz|lz|j~N)^+gZXmPa7!T>ek1#z@*Zacd2 zA)S+clpS0sj>hlpbsUaneaqLm&T%-P8GhF6@U!2Z>8=00+)V!Fh`1THaMrozSz!hF zvLt=rakE7iXuoHH-TSIok@#6PoJzb*{LJHKFP_ak`MFtnRYGgr8G%5{?2y@~So{~@ zX5xQ~TyD0A^DGQG-0bhOxEXmdv1wqpF}yZ_uToQr;=9}7W>p38^Ru{_{Lhpues-VB ziFR@?efilam!B06ik~%3a`@SO%rDE&Xouxzw8io>UwhOJ@iVo>&(Chl(-!5^IsB}* zExGuap^utJn`g~@ho3EiGiK~(f|(X>AYN4yioX#m=@YN=_!+;Q)rX(aC+EV=LSXlN z{7kVbkDqxlE9)aSI{a)k{4DC@XNPtWOJGi)2W|xqr-fP7XBscr?H;(urQFAOUY8#0 zdA5}{{95>F2YE2~tF|dK(-rux_SrMjhv?(ysRwd^V-M2L$?e}wEPszs;W5uV|;!|6R$0gx=w-e*) z;rl!At;rg=c4=Wtjs7owfN#Bz@klLk>|iK;B70l|rhE~<8Rk{bYca1mfk^r>IG%jy z|HOy3!|lSn&k4}Cy#J{o#P?~SzbODFcZC367$Syb?c8N_ijkTOk zG0kz}ZB6pmANw!yIVaBP^0_9}hx;<0e_Vdt<#J`L=Q__%`Ap|BaJjR&w){U_ZZm7k za?9o9!=tBJE~oa%7vK2-$Cqn-kLPpLMuod6#ugs#@HueWG;zFaK6eK=@&V>(^9`Rq z{3v{GJACdom(N}4^0~9%Ub;>zcCjpYk(T2|=tIk=#BA$-o}FEU4d&06`O%jd+Yd*E|x96o3JKk4IhZ9YEtOV+n$u$uVX z3v09a+^9Z$PC3XPpIbo=nD`v)VEl&UN_466!E%$=ll+@8`Fo^b-?R1 z-UZAz`5EPDt_H_!6rLdloP>9-0mF#b&E>U-^|O$^)ZEit+h{^9i@-4BN}ai8=z|mE zb;?K9{=G4bJ9%Jmz8&y7+m9}KetC*CpX~ueiK!F}$wX z<#mebIJ|C@$Lp3`Ugyqx#d6dyis$%waSmQ*=f1ey<+NLCnEwAoaF?)9bC9?hbJXK? zx+lx)W?Np@BwlwZ#~JsZlZ(?=Z#%@ji`UJW?C5REeXZu+=eTk118}+XrMEp|))g`* zD{j2PYHcrs3$51nu~#b-)8KVqb$Q*+DtKKDu@>5T&4uLaVqenPo;IxMtm`|lE!mfN z`85xXfp?YKd3F!;Y?0O3sBes&%?K2xmbtv{PW-?g_>RsU6|dv`n)jjaRN8qfooxfx zd5ZXXkbFCx-_hB0en-cX&L$mio8n-cKgvCp!|S5p#z}BLm)AM=C!MXH9P=gUY*UWs zbv;?U?pwqnuO=2Loz1~4V=S+mVXv24XH)*Nvk##lL7!}iMzjv~<#I3h?C^!0cId>J z&>6NQN20HN*3s9>3rb=g;O4at-rXH#UW^B)t`3{D>f=qX9Rr^m-`I)I*IE;0ey>Ak zQ#?`qoO~p*&i1d%>$J};z%li~u_D7rYt_7D!--0g^kCT6$!&s?J zJ9Y=^+J7JH)!7=Hu~96xkJh#mt&R0xT3ZrqDXr~W;EDpqM))E?+jD7c3(?vR`?WUO zI+k;*z0ZITwHLCRm$bH4dHifagR8Uo_?dp+h7Z*JUdQi$?5_9O zx~s*ou3bb~V>Kq`LRV{>hz<~4{E5HSI_=>CG%hbT^GRu@+*^cs=-Kodhs#-;E|=cM z*boOLmRle$*Cbp}W6CX;lf8WSRmUbC)-~vJom=;IA?t*WRSYS4uIpQx&&*u=Os*~4 z(Qkxhj>8G&OmM>KR*Msl^VPky!1Hfa-JraBp*PWuLC_U z>gsV(%lVv~fC`uMjfL}JS5}V`X8AjK-`$I%AM_h^UZd%m{FKQ86me(otfZmy@t(Uu6>;89l_tn8!5 zIc*8jmR>#1(FKJ+rN?=E&(-4`8$*w?e6Kwie`85szv=P4XcphwO*{JXy&=cxae4V( z)p2@Ulk~VRqrp`$4xJ4}iKqUP+Q$u7CM@qmOJ0VZYRqKUo*p-bd$M}mukcyoaU;>) zq|4E^ap-Xe>ECtYGN0meVpLJ-arnO^{VyLU-gOk;aKP12CdfzN*FHVb(cu0izxW?1tZHhgug}Z%={Pro>cPTH!{};pE{ub`0ID_&Senf7&;eBzy zkV+Xz|119W5H>d-4RE&dUC@k%|2k;7oA%hPhQIyjpdry!13BJV} zzmvkCf{6wf6+^)Sez1&T<{L|lxzv=S{o*rlU+mPQmHg5GeH($c)ah%W9 z8$DgGQIel~F^gbN!(0DH%_a4>bSnHyF^|&VS=XxK1^XqZ9vra|f2`8$@ky{c! zf?fNyIQa$5-y1lmI9qL~SBtyIr^R)^^#;=8qTtRduu?1Q{y=)1e9|`NPoHP)dU_oG z#^WCO^tgR~J2cy}tu9ydYjDC!(%qCBDST7_R}0*D`C(~u z^T?T56E27;cT)M0AM0?mxF>yDTt|EE{Ky!*?ZomU3sTE?R+I7PM{acVI9&HO(k?Y<3Na+<|h>RZB>gaLS{*ta9x05w%7rK)8ymVl#M>XIn>2d!2 z$gePeir@tmyq<(>`ST;!gZuO5M=t5r;&e}7IIG2-cpSl(ADN}aEhazmcrDJ!kNhJ3 zz8)Jo?9<|G-+~d8A1N)al{nGyTHN@I7Pr3!{70<%xAdnpxkHSR_?^v_*k=>NvBiYoNTSm<84o))rq&MJptnU@>gCi<;US}wIN5Zo0UVa`y23;xSONbIoyr- zxXq=MUZ*(7kFxZ-V&eZrQ&ca&;cnJHx%_RI%iped`J48FiobPc_!}A-zU3DWm1Oj~ z{`lM12BFt^?eX}V@+ppIqwPg!#UdL8XhKIKW$>#)V4@gdmZBIf2Nz`IHOoOB6Kvk?!HUN?@h?&4lP zCcTa}Os~zY*Bxjd68~3x%|Ud#{`}|_o)NzfSh9M9GDoV-cJgvhlVUYzJ4Mi%~mX2lMk6{mab zxE9{VHKf;VUHix?ok!;mi0k2FomvlL8~+b&lUCQ^YIWjz~gc1^HLaa683+x1m9F@c$?G1?{zasA1yi zbYH|T9YV7Z*Yn0)TrUa!K3O_l(#^f>DP%p%%Ds%B)5-T)ozC^oNpN41SpBDHQ=i;R zbh?$jI-T{|;(GF3pGnGgo#*jtuKBb&JJ+gvwYueFh{3hn_~2M+b<3sIUH>a_J=uqH zFD(yTF0Jk{KL3P#x5i6zL3x*qmHG?&S}vaV=Uh9dA2%a2JWp+0I7U7^o8$FrbRBNa zB{*b2jqYAFx=ndBx_4Zk18XT}|E;(5~N{Mwwy z^R{wshv)TbbK-fPHn#(wH;_J;1gBPlp%yXc2GZwFjOQJ%&$avYIkz8u`kd2`Ct05> z7Z!4PUJPwZJnx;<=a}!CEzfi3`(X7s+isWV{dd}}HA_72e}cKh^O{+^^67K-Sa{y2 zIL^5LjxA_N91AJ;*{Pe9*@Jh1NnZ?Z0z>b+kNseyv4bPzp&#bCKA)LrMk-HwGviNNLbEO$$@woDb&*uBv8cbZzE$yx;hr7TBhg4gd~GLdKp`52K<_v%0g-=$~H;be_2cj=WwLTGhR-W+dtPhF{L^*FQYr>9=f%rUr zyNmH{1$T>Awb3`KJ0|WV{8}QO26t*_49{mR7v`v74KEIqw6wEU8&gcK54F1XJ2jln zBgdu|eO6fH5%(JRyvb{vN3AITH3HxS?N2;!VM*OKv|C+6nwMP{qi4n+_~p@+|H!(k zSZBHX1~tqcM$6PV_f(bismG++W0^U`9%63}U0=>~gP1o*v56q}OuT^Dy=j~~p?3~V z{(zY_p(K(%gP3+T&mWt_eywWu85sD*PI5{9k@*#*FIUo*=a^qfFpI_$+fN%qTrWt? z7RGcK=Qx+&rI!@McY*b~Yl`B_;2e*_ISw?0Z2dCX^pniDB*zO2p*79gL3`?{Nha*| zHQr@2g}ldvznFUg%|A1wWrcU?^0?fqum`QU%NpF80Y+7J0 zk?uSPUu{fDY)`1Dg^b zn`8dAxoyz=eUj_%Y7c;$i_m)u8eb=$PBC2Nu%1po1kp^Cclu$*ZaA;SjOAI_*XztV z;r0=Qk@W7skd{j3-Y)_pTFycbA%B5!9tM67rsgm=lv6P5;b7f03y0UO4-}{73}TFT z(x$z%NxHA{3F(JSo1~BH+85yiN`*1-13}s}&&BrIZ*@0*K>nbSwgqX^Euj+G9{!T& z1Jb5Rv}x+Xp>=oT7gn=>a4>BetUqgaZF1e#qbqk}gYs*#%klK}UxIg^TYP3*>zwzy z`~SoFT|1w(5|5dT9VA1=jl#~&?sq!A#OFW$7L8E;3-Zm_ZGD zk2VI)G|z`DBZoo0gYkd1igmY!b(i;P^09{gU5tOIq;GXz&D|OJg$hH?B$!lwLB9Uj zf(>>p)V`PYPKpC3sT7C6S6;~dF2+A8Cnz%?@RzH& zU&Fut@Wv(Jjn%pE#wX0k{ah;>Z+vk8yurEbeS3Ige_p&X>V)vd{5OF&_KkAz#&&SV z9DG|lm}3Jmr+)==XkBd!1zXCAUu&%|!aww4j$jO5j=vyB7tB#$n!iLIoy8ni`Y?y* zw+6-=qXxtqNnaaeN3tQcVUpSa-Z(p}4XVXgj{V8*9K12=E#Zw?II@Yn@X09sGRMUm z)fR7zYODrtyux~HbqH)vcw;B)NsQm_#*TUJB=y*$FSomRqm}uvgL@d_;*A3Q%r*EI zX#w&t=i^@r@GmA@Le0oPDuDhh-*Pp6rHA;xhd2D!P`yk4HIkWYIJE=)*Dze;YJ8cl zQDLt^OvLqHB@2gSuHnZUx4v?8<=-)f{CHz7I?jpUjb*|c%$sbyvDSw-=Jta(&SU?< zdl_#GWj+dT2wMnm$Uh5n>|{=QxI>uZn?B63GgREN2fQI$@nen>=0-2(CqO7k69>?wAo6I;b^XSm-quckIoBJ3fdn80zAV$+Sr|5``CqJ6dRqhdU-)+yNE} zmu7H>u+Rly-^LTe9ZeY?gtL>Z&dl>9BYYl3=VG;!&?i9a}?Q_!ge>#vBZrje3)8U!+9+Us%E@{H?+2Uctd#_ z!W)$y-e7OvY6<^NPdvP#_=e{z5B$GA@r?}Ln9F^> zNxaecUcejcM&!U7*nc0q!5q&Y-^k#NG_@v{F zvUdP)*lP@oH>NO$eDMv7H>RRrz6re1cnWXK-kJ+<{Bn36ya6vC5N}X(%875BFy7$# zI%Dj!Hl$&^9eAS{J2^qTkp^$rHoYtGM%lX%ZD`eq~&Drsd z@xM3@Z!~%FjeFehe0ZZt@r`k4(TZ=p3B0lSZPHT))i-C-Cs}x7v*H`SeyI=MX!7D4 zS8$*G;tjOtY3M2I(NiuWUwsU?(~WPSJBRR_Xg~0gACSWVKijMrM;|_Diq9UG*WtT94*z%V?{!e2H~hbu;*V z`paNoX&_JWTSsT<8nwLUSl-U90CVe$#15n{unP%D()FP;3Orvg2N7T*f_$eC8514P_jQ@vr zC}&Fh?__V{jZWM{aYd)@;$xhLaeMjj#sYFTiB&cf#s8KZ!uuv0>H?8NMLxN7L)B0! z=lB`evhq4wX;U*f-IJMzPw}of_y~J&pC%XJ8s?;8AI6ln%)94~?j3O7bBT8-?r|;e zQC_s8|GbVRPF_bxC>Rf5R~ol=czI{7S@->+j_$eZ`AC`?YFj#MBkQWU*Qt9KzS2(I zV0$fbkFH4i>_T%1_q=5hagW`hA>?%s_ZSh1>6nUp!68*Vf6E=4;vV3Tv34GZu#27OnYD&m(9G-0g-fcA&(k92oik4hJEO*=#=wb#XrIMubeD*u zyEy)sI7l0sq-w!2mcD$CA=IDkje}Iv#<`h%j|jNLX`}Yhb<#%ZMQWqPDYUViv0e;k z!p{24^Pt-%&GS0VnJMgj4wYo$B8s7H=Xm9RG%)6hq1`}CHXRx#J>x14C`D zMFsiOW`v7V+&MJt;ZWUu%Ku(&+n}-L|?}5MX{|luD)FF>dW=$ z%c@Ht+no-^P`!$J{GaT07=8UwG={A<|6@#}_q*3yoZo5PY$Yd-SOGXCZ1X=NFFN1p z_&@N>1JBRAhMu8$F08U6EB6DA`|MI;ci+sf!&s~`B}Z&Tb9)Bsb$@vu)YNzLK8kGK z$LmhsM=wsHZ?uNy(x?vLGd-N*>M+)qewRM*=`b0bGLd;9oZ|hu)AuS6oO=3kdmcX~8}3h!@wZ#}#A}1>Nw%amc=;dY zS#9tziR@YaY7+ga<2>DXt$S^?IUASwuUqZMB{$@ckKBw6=8lgj-z;SFO}+e&7Z^+B zn+}-&kwcH!mNWmOi2V_LuhE>j#sK*rIrJFs8p{8`Z`ir?yPx-ok62vNh6bhn9SEE3 zyx1J`^2MHEpVdyViRMud+f}S&2im6c%hrzX#VF*UP8;v$otC>8r93O|6wTq~ud{AD zd8cCHFbJUN@NiDgdDzO z^jloDkNg;A$9%IcR_f+ufln0k*fHP91No_HDuPWq!6uKuDcC>1r2=f?KQ$?{&scd8bzf9Ne*k@%8i=FaP68pB|%HS99{@e<=5R zm&F!NuGb!}D~{k@*W)3>z!iJI70NB0%ym6%@u)Ay<7?!8WpX@R&8339Or+jPVPVhrJ_~Crke$`uz(r-Ih{~3SfaWr{x4)1r{e&+nn zmp{7Pi*tM!U!Zg7cT{;Mt&!Ys|`zAF8myla3#9=qujZE}x~? ziIb!H7Vtw3eZ{+miy!P z_`%Mdw+lZsVJ9buA2RLv|Ji#N@F>eO|NohpoFL(#oC0CZBta1m-F7P~tuB)Ug(|jH z>0!11WkRs1NLxy~RiS1$SZ&h-v#_OWx=nyuomjVY#dgzn%K%oljkciux!rDm%j5tA zMidr-3z@5*(jR{F%&GN^*C{DvOhfO_)4tIldt4#><_iB{UOzX{ej$V;h@?x*dNY+ z9rg$1*?D(r>s}vgk3}E7v4ENln?kles%lSmFAt?k@Ry7S+bjb291E1R-aKMN{ZVqu zskeNS#&-)f!xs5V>aXJO*@0qvmsi*yzq+j?e)Tu=e5*}Q&*V>FgE%_9*PbeSnXruP z5XygzxL8Iw#+;P}QByO=jz@K;ZwZvtzgIdqVtGt`=eiC zZDrHmwWX`S>srtWyox{9$asR$t?Ng~@HL!REQ>T;1P6y%c;Ubzt4^ zVw0Dx+sB$MqmNzmp`88)+&-{9r~3%g2ln*n1#Tb9=tK1pt?iumU(m-g#hZ=D#b@RC zN)&6ByCkon&g9!Zgn7!rE7H$@^t7W>xV{prQ~VqLq0GC5Ps$;8eURd*P0H&kH?veP z%=49y1ACF`^^~7yZ4r)MUTP*kGdPs`Ew+Zla3=>gkz;HQ?CnF%tYg^xWQ#b?_lpn8 zUozhMOM2}QCHN^Fd&IA4NA`%L%k%J;i8yS9^D^{&8})|ztz^%RTQe(=MAhF5Rx z^+V`h9PSCD{$+uU$87YE_V{>85PL+>IQ-KFZtti!Ejud)#r9B#>pQvB)xwUjv*H8% z4K3G)TAEJuOqOrr)mt<89U2tB6VG=_e#gc`JudAF`N`1kPoS%w5ce4#=$YjMQycb3 zwu>nA^eA&^%NW0SVv#?$el@<;3iy61u^i(2i0%(${9#ufWsd*Tj9ugP#-DJ<-Nv|s z^kwA&ZY6O3B$z zFh}Vz%AZa12J`$C^OPQQO8dve+y$Qh6+jUD7J`Y1=9h?lQ1JD8EJi6y?*V>;5{e@fQ4GHoIn*O~k(YZ=Et{3`p|Pp_WGfcz@1 zU8Ifky}!KL<@7rczsk$_Q;P7d4A`&Y+C|#%=k(`Sxs853d1cQ`S@>b_L&#&S~XYl+gzjwdW??>=^Pd+?|&*tr-Pvkm&6{}aB zX>GpFn)Ihn%wV4V=@UO0Zv5z}{3)(Ju^J!5U?pMjuC(^w0N6;s>8P~?~ zap>hN^@%>?dfVs|#D~hy^1rT6xG|t-&?h#?7Gcj!%$R`BiJnda<18!mVskmr=O)ZkGi5+eirN$@qX+S z1M#z*x~5@)jVnv6cWt@TP3o`@8}4tg;c~B&^|h?R*V484g->9&m9K@nbXpVFo7Bx? z@V8*glfNY&d!p{>md{207TvckpG#ArnHA2%Mq3)cdWv!GWRlJE*qFv~^21!_ewN_x zQd4p#w$54BCxiVM9(cYJ|A}lG1M$bmrXl<6ndW+&G527r)|j!U-XyL?sW8C=RPO{Igb8R59-JEHhA$%8- z1J({Q_tbjJCeC$FZxJ5Z`62Ax*yj8AZWz~D`EEu#z8fDl?rh)90{YlRAD#5k$9LoQ z;n{G{=(`!?_->YBYsvQA%*xPPrpFz<g-YA`KFpl#iSqPw`e8bU2!kpBkW-~Ejlu_xgD_Qp`^ z@R-WfzvnyoF1-~;LP;rNpw$pzO3l5pjNd^a8IO-uVDgJbXN zaqbXFAS-sce76h#&V}yhyYP=lX2`!&YVz)Ee}ucY*W=^i?rQ7fsdeQCJ|M}Dv+?iz z3v?#`PJF+$rKEMAvCv#w#=Od~HEUk0v1wMYj#JSmBs(Hm^Xfphw5=HwYllC&U+bNh zmOK8R!`L`RqHCr7KYU-Z z=_LQpHq}yHz~}ni9>@P<<=A-QTi!1E)n>>4)2m;x4yV#R%UOf|^s7Joynp@bI_8Ob zh5u*WDP{HL#ILkdyLN~267d0zffiM>om}7ML&QldhUE+Re>~l48UCNU@u%;^hvvu>W;GCs?^7Hd-{7OOdtLt;4N4Gll+IJJbVyN*~>KkqSKV7uH zhkLR8)EL-Bu3`u|dp~wc`ADBOzScbaiZNb`?#sj1d5U`NHh$&5ZH?L#wRNm~#p&{NQ_7(sF$re2k2Y2R{XlPhW0(SL5=e03b2tm#uvb+pa%)!N>Nj-a(wJVuy4 zYz#}lAN_ZJBl#lT$CE7&pqo}Qwsp`~0za~7MmfGu8)I|zUYMyp@FkaWm-;65JCFNb zlz-`Cyep8YyJ+{9w2NKMw%f#l5_!*C4R;bg%eayMsfnb`KQAZU%+9p4$pE|04$K6n0fG*?bIx#D(wT)Sk-{;yf zp%ird{4BozKg?CNm$k2@@R{niPlYzbXP>4G_UrlM0q*&wznvYu`MpOga-#We{*3r8 z$nW(#={s$VCw|8~o8S9=-bKr|&!5-*Vfc?rlIqtJ15PdU*1PVU*In?%VGXylcGuzC zzl+=%vuptQy<45!nPb&1#^El|z0h?hvU(?Y=O>)KoycXys_caacA~pa0P9>wt@anW z`?m7|>d})wbMp9#kQk3RdO+f} zp36Gwucbz{_@%hHb4`A1H+}BfNS|w_^MBTy_zta`_&Jk*SeujhCjxD+CXZ%nptr8s zj;gfZXM>>~#iG1Sywe26e3-EgW~^y5|UaYwNBh7;ijl zygOIr$9~FKcdnWqQyfar9p@b64)oPW?ksc1Rt7En6ZE{dR&ggeww}B64dd2wU&g%F z6IU1Fz0xQCb7DJG+w7}8zm4sfPd;}gIW?=a=Kew(+fnKIerCcqg-ZerJ@DjdV>{e^ z?_wOvvC$Y3(C=>OYbkxNbH@;14B~%}UoWO_!{65tBNbo_ix|TZ;$Da=NG^44tcA#g z>ljBPv1S7qgD1;o=fg9|GV;^5S1xkyq6n3mlt?;6FP|#VSQ5LSjW-G zg>=p?-v|E9x#!5CsS?GPY>sK=dKjhPFEfs zxwOr@$+y`_tO<8prfaol)=do{C*DZD%}%h7$I&yaY_~Qp)+3BO?Jv%x zfVJ@SjJ6CJx#ee$o}s#__&XiCXTI-dzS1+ie9`ubN)EFs7PAXsb zRcQ9Gt8e_Pqi;BJl9<^g=o{_uvH0xU=oHfQEh|o-Z;%go3v#efp<$`fEdaV$sn@6gEQaJIg|@N4w*Hw`NN#?VOQsH^o=on z_}&kl76-9pt6+DO2w-?if|G;M97DGft0Jt=;9W%~vNFb6>q< zjH7q3#$nEBt+DPd3M2pSh8}lu&g|hg0q9^DbMy3$N6`;z7~2;1Rr-PSj?SszGxQG0 zpDpMJ2WRG3-J=rSLpnk}a%L(qY;D-ZIAh0j(e_^2mONA2o;-`u_8i*ILzV?;TRN-c zYQO|q^N?Y+?mq88|JeMBQ@f-Z`PE|v+qUn_;4RV4-k%;{bGP&l=BAigPoC_@M>Zh+ zqr}xesN*jEV-I}A-Ah*gkRFe3D6N02L;v_Fy2Hny(`^0Yc4+dG@SKnNs^;2nt^P48 zL;o0=);~Vv>L17(Y8QhW&_CW)$vquqCVK2F^^Y0oAD@9IA4B(8&b#!FOVCX`{X;c> z&Q||8%HA;#>WhqP-a|~gWc|)+lX~d5S-l5Zm+anI%~`}A&UJ0A(m(Q#-1owToEOqV zUi{k;4U6*ftq$Uy>(W7Vo=XP_F41}EZ(YRwgqeDXJ=Y6+^^n~o@?$$I3Sxi5*}i+k zbpAgh*3^$4B7V|2oy|{8(E5}9k;sj)mY+65Bho{focNQp9`Y>oayer^h#pc1y-H`2 ze32ew8Rg84onZ(!#H%e%?QTe=|a*$mb+t^$QZ=4(odudO*1)m4AO;G zutpOZ#}RZPty2%WNR1nllEWD0GnO5GYQ&tbzN)JaWy?3|B7NoCSS#N=8l8wQ*O{U_ zf_3ktFkRS$Qt;A^IownJ4056E!_$sFhTd`nS+_JNr~YQ{;b|M{tH11}s@VI+BSX;Z z+SYLg=$)^pcAvgkk~cza0CW?c7m*5p@Kv2OFtf-3zL=2Yt0FCL6e)ewQMnA2q)E&AimEvF!$IyOU_w zPrG%x^N4m0GC#uI|uxZ5|u`!v=`XQylqIlQB{AGnLYbPwslo2yvkYVJ6j7E8^)hPAym#vWK7nC7Lq z2=^KGL%Olf@N!-`@a$6B(^*!8c z*RQ)9OpBZv&t1x;`Q#GR`5R*W~0|PSZv{4+(-UFbfHO%=hwcH{%3dtA;WzbdCJ%eodC3TUO2;{r>bb1koKvqk+~DI}2ozB_oBqs5 zWTQC{=3ZlO4mHf-H)Q`J<;7|yv{EUrQQ#J zc8y1;BER63io9qtP!N5oIw$%;=>3KY*+V1GPh$NxbDy_rV0?vpyQAFS{d4}_&-)x+ z;=eNX=(;Z!H~cO4e3v0FbXUrC@Y;FUW5n-eyw|b+o7qSCwVoS?zb7Y9zn7e!8=+tI ztvRTF(`5RLLF**_bqjr}{>^g6@MZ3*cgEm@jvTpC#TaTi7c~aStP1*9-{r^`jbn#7 z$Mzp|WlRtmvkCp8lCgOE8wAspNyczC@9r2B4Knv!mru%Qt5hDTQVkSWlYiQk}*wSfD2t2)5Y2A=kBfD515wa_k_EH!?4o=OeeZCssD;{=QAfoxM6A*bm7ioh_ZjBTVuQuTRvY zL;BA{MitvTx;NwV>%@L?EHE65aZa<3-&F9DuJ=A<+564V`XF~2NnS5w-^zmIVdhH) zndoBPr>j;Ua_E!XjkFB;RWKyKev&T_-4^)$%=hH@IdT*kcc;F$ z@-1(>?gR8>oTJZ}<`sj?#t1g1&>-v(=sAj`k^EbQop~;@?{@SV-;L-qBdKG?o!Ik` z6+!s#{J8Eg4@C2e^6S<9I`*k)MWww5wu=1`-)~{hHc4)?mZj(w3x=Cnwfz18+T6vy z?Bw_5)P&aWLpjw}Pm%txz9`3)WhN;ZR>L0eK+n+`-o*KuKrd<1J>;B~YtPA8SMK&+ zhhDX(!WZr8$mt_9)UM7Dbn00?WX5`QpbL;!K3+nkqt`bWZDA2QO0qGO`ZD8L%UVfCX`hHZ(b!m%J2^YF-cvc-#b@H_M6GH> zf?L?9IoxGmQRHvM#Vx`g~;o-WbIzUmq25ee!VNsnmftn;BqKuc*I;sR)b zGcZ+Cj)#jt(kBVOYSjX=S|FZRcY>gL*M-FqAXP*C8!XGuvaTMqH z1o-0-)>-=g^_*SuaYdx>a|c$OJuB&|ij8&k{dktXKiJjx{r%|sIxDmF{i#|b)=&EW z0`8|T%kk9@&d~QagD<4xOmg-8a{7(1FORY>ab9+>ZM%-XKL|R{)b|tYM+Ck~=&pHa zp)Et-H>1#58UO#5zOQ+C`hMbd@lvn8f5^)fb@hGWBI#W{@RFzRe*|4I0WU2nG#kCX z(MPPle<|neUB~KPc-%KGna{W$<9yv+V74DXpTAG~5_*4Upr~~->-XPvMGZT7-wPht z#W}u*{pdpXfA`V47hZ<%HfHGln_S)hDp#ke;vB3)o)sdWID7B$bpIwJ9b9}my#8U% zOX>bQ*dOWsLHymjzL?i=MUH8GxG1j``LOU(^#5Jx|D)kE*>9CUF8`8vcL#iXMNWWc z{jEFv`N=pu5l0`WhHw46%6P>y__r4Rg?3U`^3HGS+wzdrhI$S3ok#l{Ee1HRUUP9U zK%t8Pa_SAd5p*zsor`3PbbpztL^3KpWT^cZe=rS7v zVAIOL0OTuYVSpd}==hqi(x%$up=i|WifARlL{FC|c|EuVXOThq1 zFo1lw!T=2}21sRKfZNdjrTc#bxiBY7|G(VT|F3{wAN&hP|Ci1t{cSJj`K9Qhm$>@B z>;!Aky%h(s65abl=)6(>{w43d`u{n};rEp^yvNo5WkdKdy#5|^Z^@N5VSHqX^#4Zm z{{_U!O8+l_wueE_>N`{a&m4o+NwlajsHUREP(6S#sDJgXb6n%tVTRfML+D77IU#hU zM)d!+j{d*W;QNlF|5u{_?>VLZUyzKr`oG3-0k1cr|7-r5tIpq$)&B>drT$-F&+iEO z|8!UXm)`G3|Ci2Rjs71(|CjDBU!&Fi(eb4FPe#Y%EKIed=SlaM-rtVi?~Mt0yzu+z z{oT)mQm?_wN6`PjQdiQ@fo}TWf!yRV^!?x54Ax^kwV)4MR z=Ir@~Egl%Ixu)^J)3l*D1g-l9{$B?sh_v_2YV-BhnmCN#3E8&IxUww-CXoMFdb#M` zp!0~{RXe84(RnPrH|_7aO!U_l7z`#DCt0?#opwEYhVX#s>E0(@JOGWzr`6%e%ayX3 ze44&3t?cQUrMCV*zE3zh5|||zoyYg$VMpG92areaP&{DQH1ZbVfdy{?9+-em+=LGE z4#NY^KAwyR`tZh+8~fk^%M0JfHmJ|jG^TY4fo;snlh;Q`$>C_K<=0@Qm6)XTOadDlN4aP0J@ z*nhI|fZ7!vP}{--vi}MX2&X*-7D$`|4=m$+uA@ESj0Ad@hX+cL^)4Q;`*AjSU@Lv) zqF1U<;p|QHakln<>ExP=Y;z7C$UDok*}(%k+dVuWY`q0+-5(yf5xp@1FL`)CefPlw z_z{V<05adj1NjZRznI@}MS*F3x+p)52X<%Rfrni@AY9NRJW!C!vjOmcKaB^j zVZQ<{9!Rif!T|;vQY|s|(c%GqQ{Of?H;o766A&J#Fel@ITGNXMyt(v)2ez5JPl*RC zPyZOdjOfy&@j!v+6L8K@@PPJ8cS9spzbNYree8+8ctFouJmA;^9R8;d=Y0I*f=f;Nby{LF?2X9yrq&{)`7sj|bj^j(4_r zpdLIRTeXJ=wt)vO5*|R-$WPE-IGQ_u9Xuc(&&&7(_Msz=LzZRZfev&~$)rp?a6Wk8 ze(-?9zrA>%79XJQ%P9rFd3Ye`>-7s<=j+7-lfVOwl2zb=o%mV1zyrHoJfJf$>EeNj zW>|fN^$iTMXJ8q4pbObEk5?gg?5;;9?J{{54+wXxM~+=LsIY$0kUWbwga?)n$*I>_ zQ@Es{ArDNTGiSuJ0pdb?@qiDkR$=i#VMFzxKn5Nd-kd#O;elHGOU$(o9!P=*wC-7W zV4Gn+@)7)x{5tY!y0Yy|cpwn~OMri~@jwSy06efQ8xQ>5znqK*62c?kf%){^A0DvZ zg9mE)J`)dQV}ZG@Zy3Q z;ec$Oc;bh>IKbn9X8a_X{C6fC;PJ+{`JKu5_a@>2#WBAl@PM~3?{qwH1A1fPba;Sx z^EZSCu*aPx9^gL75y?ty>hXc#fugg(14VBD4-9=>c)-r3A3U(ZEa@K)c)b0D_1m0` z2Ryoze;^YNWXp?3Hk=v{cyiz&`uJbO1I>TN1E=>7czgS&c9WQj2{()2C0c4HE z1H(>%2asiFi3i}{QOSfGC-k<#14G^*9w4@;*FVq)57_y>UOezJz8vsk z6^eVt|GjG~IRYI{j(~Xcjl>P9c7x)E_TZ~x_f;ry1|NP9zr=d{1Ft(zV7Zegpm}L7 z#5}t(yl0yyFesfTFgYVnU_E&PLH4Il+%C2%`R=bphYY%Bb5)Vq=#4@Cf=h^>yN39o zufMY7g@;W^vYqigPCTRh1+V=Se+=W?e*}MWU}&q4bsWQ;4-}B1y z^LynET!z2C)AJXQJ0X8TwHrUQ0(%)ggk+NS{1P;`!^H<*!e+LEnhuJc6#Z~crxX)X zhTU#AcD6(vxexrV3?I3wCGXu3Ph`*^zJ%{!8m}5&K_9ft800^An)jg3q^9uBZ|Z&LD5qeo zjVDt6)n(xO31*C9j1N3+a|(voc%l{f!uY+7C-UYdyx`5v$NztYuUj@Z#q7f0D^0|~ z4nh3yipgEb=WVl1484AvY-8WW$0OP`@R<{%>*N=Bv>PWzSG45BAd&nLPVh{!S2^Yx}^yR!<-4^E6PD&M#QO{&@KX4>GUJ_(I7H-Pw^u zW_*qugQYjkIQrQ6j=Yf1>L+e|k>pGnJk!Lhn)o7o3C(rH7*!Ep^aSlIzu>Azt70X@ zn_P}OxsZAj4=T5yHjh~Ff@orSPIPFsi4MVzA8>OEG}g^`)Y|-lpYuK(d*=^tukDr} zVYmDV#PwUh0yO-d`^Gi+h~@XquO$}4L~oQ|j~Jge-W!Pvb>fQ(lKaSMP!5#%;@!}@ z`Y(guHHI?$37O+i|6A^;=@xzdoOk6z{qXjh?tJ!Ne$Y-I?~Gx&-rX^@OCHUy>0ZdY z{FgR2z&G6XU&j6}MK+b$_#$7jhOx9Eo63+)mDK;MKsF`U`=ebIgQMlpj`9oCkLEF( zImF?mO2(Q)tnfTHRv7zts)v47U{BWlERAQ%VG0!4xM9s-bCql=W8HTm|8wY5@$kx< zSx0*nv|UTi;5u@cT;K2ZsRjO61vyMwQ}OUJY}OaMvMEU%T6K=UelPi&Id{)NMg{73 zH4<~go;&#kISuWuoEnc_=*g+QQLpPdd@Dy=_|dtW{O>sbgVkpvC;K1z=me`C>w`C}p79m@F`4`~0K9?kFc@9J?@Y$O zO#g%8Xrw<#Z@3@*K{|tY=2dhCVUGtnYZQmnN*vN*_*ZdAhtL-eaxYr2$kdPPtcn$* z6Lei+P84wO$ZgVrIE%e_B%MjZ88w`JPCSy2{7+xKWMXza(nXwsbBO6}G}QBxj!FE` zbL1zi;LP94IFyg9cGnNhvGGWEtZ?Fy=JXSfq;t2*_$-eqKOr+7DNfu+h_gKI=Ka*r zhsR%G@)NcUFhAk-#v^rI5jw#h*t7f?weS>+6t}qvuRj*)w;6dpibb+9Vw~MO@=R*6 z%|$M0eiwRX9DZsbH{lpH-n6E&X*at02}97uXWDqAvB}+h*I#}@2l)xg1yX*()4VGl zX=gw2NKXtfKOtm7|7L<0o>z(djDsf>U#9#7FCGc~-Hk`eZP@w6+=j`-BmDxuLZh3X zaF}?cG0>Rsgm8rFWZ3+KFPZMi1vxzHr$&a8pO8R*5SH-sD&r+Os6wWL2cp6iS{vmj ze2@HuwS#lg@kqI}{{;M6-cLMIRR*5$=2F94vhovFYQ4=02S>!uGpRT^3itG$Z5xCm zL<{1hmH3D>HUlqN8s*%sBwraC-5@L+>f?(Ttuu84XX#20PpoJ@zUIrct@gCHMd(J_ z=fC5#1o26nm7M88+u!Hu%cFz8<$LMPJsJ4o23I~vCV1FF`3S04z#S$hBrn$HngiX) zi;={zExoDoXlsR&!w|$iu-L^I<*I)HzEJ+cu0YO3%3+ubzF0*25jTHX^5Y}W>@;$i zKMGIZP7cFTbc#JL&e+L$ez}mQ)$W_QlEqeJ_T>8f8{YqFEz-v z%yFoH>5&@4MLeT1JnoL+FHbWD^{>9y@qU2UR5LW4$Iz6K$54aZ_4YkN9zzv4V=p*k z4|;JK^Op{&{;Ozfswqr{Gi)F;EM_+K(^=zE`cT|TKC-ax3^;>(YfeA|Rz7JBG+)-t z<}&Qgs+Z74n||6?y@a-Y>m?MXa~VGFVhxKc$Yof6cRzUys|v`8^fw3414FWfteO!M zw%9@rgEyAafLZvgau~oC%11f^wvdma1AX>I>hc`}TWq_@iCOx&8?$tva8#@I?|2rj z*iHVW7q=u_;p8&AF-y7+6yEXT`91zlr1O~1tM4yfNoQM!#TCRWSzJ*9u5j`fgeyA1 z6o-gc;!aG9D|ALC!Ra2ZD0kzP)J|E@L`Q)uW+0ay^B3Cug2v4 zhF#zU$8MiKAwyh`keU>JlY2@SRS|!e@!O;4FE4#-uR4um+|k9hZn;CFXIKf zFYjc$P!2!E;iuE$1skvQ2Jiwpx#E@10xz5}!P^Ip7w|s}7%$*^c-?pbOzPz`p9Nm9 zbIHaF;#rFq9vm26Sb^Ut{s!>E-H!|$FWf<2Zy&r6{xe=ckNPuS@HZdf{CiXI0`f>P zO$UaZ1~0gAO#{FSmcJ8kH@slunce_ipoZHH)l;_l&VBHLo$uM;h0PiH&OgDggzVd9 zT)8&@ys*;d_n(XxR&L42clPkYUw^M(ys%QXm-o{5DewY#CK$a~-@mi*f?dl$CA_eg zbFb;0hZmfEJQ*+a;fci$_rVL62R@6xC6oUKfEO%p%+>EQ{`G7foc6zY{skxJ*)+?? zpnJ<@OANa{h5~AzB!-QcBtJufJG=_d!~W*_8RTOa`-iI7NbG_4d%lH0esotwZd7&? z@#GuvFWi8wPyU5Fv2$7f0&QFW0`0#M|3Wy!ztHB`@a(=Mie3LgwQ6V)w{VgTZ?L_m zL2*o5v6sG{d}r3*+U_(bt<#&~Ur5weO^Ns8V<>ig48_*RP~1EUo{Ga$R}u@5Xb81< zV<@HW$V_6GR%5R#!|xyQk4b)x@wIn^QpMP06NQB_;u0^o|kyw`j}ZVqT#)~=8Z6|n)j+v!tcpW&R}FOyrrUOKtf z)}PTwR(<%p$JZ>NU9GF7BWjZ9`CIgvhAIFE`ySegc{Y7S$?#_@8Thy9WN z;(BD1YJtgb@gVmP%5Sl|BR{%l>mcg)=-=~OMBv8=GA|;Z zDtE@LEwg?LyT6;N$<_$*%Q38l6nOAdUYGBq3-n0S9> zN-`{tOw>L-#5x{f-xf>0u~xbd;M;Yh8iwM}IK-Sjw{OV{iE82y!70jj?xKF<5o)g< z4h(JHqgs;O<&lsq98uVsL=Glynbp!n{_T53f9UVe5u@`ezduYY)j@m` z$7!c~YY}nBW1Bm742d0KPOm*v7~8#mXiW5C0`VzL$rRDV@6f3x+M zn1-^s;~PFVf*O=sKji1(|0txE`$+OISx4lpvyNlf6CZ0=6x+XQh+V%ub*#VcJ7E2q z-1Yku>!-h;O5jRzE$clbZ9XYI%H`E0N8&wXUI_(VL?vF`X9j}NBOW-&aukNQ&Hx$qyvw}>ackIzW$ zozF7~?|bmpZ}?s`r&oXRT<-w?jI*9^k$+~)Ti~DhI_G#3^6{ONPabz`EM&)XZH3pc zL8NOel)?{Qo!Pkf5SznjXdE8gU|b#QOnA%w4&Erx?=t>n=8t6JEn%(6F4o$Z70-1& zxJNZ+g|oUQ<}XlfT4624U`*nr_%1KDD~-FhQe&zk@TTCdA>?)lcQt#si?+8au8a21 z7I$qT?m_Wl3FKA?9VFq#b*&|?%iEtOukLK%RQV&l_e{KA+|@O4`U3W%Ph3|5{R8>d zNB>xwp?@sxSO4g(G4*N|=1Qm*FnULi)jL$bxup4D*E>1o|K-MYX-&0`jc#1mP~y67 zz-IYt-`J$;is_z-0%|!cZzO__CP8?|IeLmTf&_G5Q7@m;jDXFYi%{9bt@ zFW9)Q(S7cm_-k}3BTPfx#YXZ!u2-zP30)c_uFK?1I9D$Q7(!=?_ z6zmeO%Wuf%?ui$Nn%0Tbno?}n5Nglvtnf!w8`g{MQhgxdrcL0c7l-EXEVXBejcQWu zS#;RB#JiO9s^rzg*`FEP)kObSpck|e+r@9eO=B!>Qmv^~*h;pbzwDrHmf}5n@1N++ z70H}WCG&Zf^UCmBn=1kjTYt?;2MZ-QOI3sR3p~?F{dAAtZG0EJR*YXwvCA zE{|4?;=5RPul7{7EGb%he9Z&2-6y__=Rc=26g=d~34MQp7zvLagw5=EI?(to#@b(e z*Tvu))u?^F@m-QHUX0hH#CX-5A;#;?j_>-~+bF(kGqxdb-xL2^@m-QrZ*hEAJ?CKv z{ojl4Dj)hf@)A z2-k?`gFa((I!+biwfT<`tIN1D(DU8Q=J`gCty)hG{*3SY!w}2e7%%dK()U&L z7vuGIzOTYt$G6IFQ}Y(!h$WnVO@E>|VhDVYjU#fP?@ZnsD2_Oct={nO&BYI4@I&~| z_yHZ1Gvuu5)V@{t;nAi6;0NrJZwNo|`?JIk3xdRXnVdTYjvvlF3;b~I8^90ezApTb zo=-pc!O0I zeei=5?z_P-m8p0z~y#7f+cRe*K*>$DhBlv`|u{j zdj*_(XYZ8VpLx6~@m}i(7Vq_s=#p`;_}%AV%YMv__hJk-PREP)N-ln(J&CVD@eYMW zF+;qUV!srpV7TM5V`61WabJr0I>LS&Ttp1Or?H_E_mz(MV(o0q7i}w+A=I__1?qm- z_y!yEH5;GveE$AHs3l%N%vWvSn6D!2d||~u5zj^ZQ&jO!ivKD##C;X^#(jPH_hz-m z9?Xwl-94spTw6u_>J{#13H~O>2ti3^+U#)bXBi3{6&OmSf|d*i~!5EsV! zq~pT4m%DTQY2w1hWWkyyyYy4W` zEuQa_2k-G#?D|Uv7DIL~zA76-mZ*y9Zq|oX7eChE#*ZPhy!bK23o3r>+cth|eDk&F zs1amo5*gJ+tmE;~Zu}Tysl&c)}C&b4DFc0R?*QC`04Q0oM;2@M_9{_HSj;NWyIbfZ;QFddo*!YW172( z1?!^U!{qxPA+Br>XL%ACoA@#Eb#QMC**;=cU!6xxZaR)ku}|ZhJ8Fj!M>ekcwdq4* zsp_Gz_KCw|dn?ZAjU)5*#*tm}pN`B;5 zki)M{zkyh(5zVP;#XF6n-w)B(hw1A^C(i7F55yMXXHqVG#~Riexv$tV)xJu+5F+l( zXYcu_#P88b-GP^gGfQU0nO#hL+4;mo28lKEa`NvY&g?P#$q)GQTknT|6>p|mUyAL$ zAOG~X$K=;99+qeCIB!GN=Tc9tkr>AOB}LpJQrPe?F#zMba)~n=YvatUoZ<{ooY@3? z+-0|qX;?i5JChq{b_kiRxUR#ChuFBTSDEuc_DOMO2UuUNx3|U##hJ0ban@7k*KdF4 ztZ$sSGkc~HyZ0OSGw)1W3toK@A9y+rjkP_z*jd{HoX3hk+y9LZ#=7f26x;LIhhtse z2&H1kQPGd?JxcF`eO57rqncmkbH$#;|I_-4RvOn=6vy|MPz;QF21MN0vvE8hfi8pT z*t4-V_6&J%NQP_H3R#TQ=;=K6ua4|K0e{GWl-+c+c|1o&3)5FB9+8BKy4hqw})rj~?U9Q2bdFv1f@9 z!zL-u$9LoOqe<$ACb&aevP(6KQ=Bo1J$scnsnPqLdSdfj+_wun(234A3E44tWGGfn zyu6Qi{CeshZsA1@%&7Db#iW;+++-Qp;yUDP4e{x9W{_J)EI;`i^*U?0n>YsUxP+J| zwVNRJS8WfbUh!_)@S%6L6GwEv+jeiefpoiTX}i&vZ`%#y*N%Yu!n%W%xThNW@MKkZ zoS8gFwR*sPUH#&|-g?D~dA}Bak>PdLG4HN!t$EpXVs2q-BvE6jPt4iojF_{{{l=U* zH9>W+VWOjQO4zN{qdKw6sV5e8YZlk#o762!6z@K+c^rHd=4}1RaIoLvP>VMP>DsDS ztlGYt?;G1-0^@DmStl59lyZGW42nS$+q>C=TGmjwubp$_ID4@ZEO?CdI?6eq7&R~d zXJ_qT^a+3S@lO{L6CG$i7AR;=fcGLx#x^J(Uie6H^lie8b;P*~1A=WXhi-&*_6;W9 z5&YAQo+G_Pm`D0Vaex}Z=n@z6y=scKbI!G)2dPHo;jf$3iPw52cXRfBlX)hp$yH)b zw+K5D4`K6wegKw2U#+ouKp%*iI@92<^EKpkF)r3*Ik`Q`0~+!_)INUfx>y3tm*ku; zVqA)AdziT^uVV%8rRWXc`_|ZoTIvuq`bv`6HfJq`-p{9A>|9<|sv8Txtw7HZZ9dGq z&lnr$#&7DsS3I_1s_{GdK&-)0_(^M}ddp41f9Po+OXFVdStl>(FgnB`*5t_iP)fNG zFZ##SKS>;V2)x|89;5Ag{0r-$oG6{`(no&Exv6*E5C5|2NvpnS1IZN}((4!qgp~*XVIK$ zg6etowb3G zP?Q@KTvpm0q-L@7(hnk=RI}Ld*(P7Wsb>}_NIpl+=XK--UCLZGz~Ab-iuToiRg>~# zddDDKm$-e7t(*J;?>^{nIqyHDW~0XNb!gB~54=Y2jG>kqQ_dLXbXU-S9JyP?7)sr` z$%g%vj5Mm744+go7H@xpG6UBQcID&VdI#6-p&r$G@`4QavcCcjQ{CiJ zWaZ6PR*q=?4d<{yhP;BEWp90`We+v0ZZ0+(y)n(ErmgN}mtE-<sFBhnA7=AGFhCT`qVc7g%+f#tda!|5|G>Su1M;jB7qGr)7z+>2WYUD^8BLF$+1I`x|$0n@FA z#`3^)Wyo0ZmhOd2%^lZJ45q82eP03kgnKXKL}I2daO*c;@rOXeR9{ZA2>hgY+dZ3R zGQaW7N1jps)5zw-FI*pMXTJNHpXRIi$v*c>$t?E%DbB^E;J+20C~25Xf7**7#2Z`e z_d;RpRoasLJugxLj&{8mhduD+EAY!+Fk$>3j<0zIU0@Gug1#Rz}XwRXQ!h;*}2Z{fRc0e<< zS-|&+Meg_D!SC~Ze|Rv#S|dX);y2AlxfoM7RCyPI-bE;zoT9y30 zAAMRnLYZ-NSLv-gh__cfXA`+__G3pF2f@$F5oD{nL3 z&QCh*6B&6E%g|q&$eWODbyJ>6y?CGUCPp{kiT;p@3)OcWITJdUP0TGnb3 zdT9GZ&TfsZBPXg_qgwB&R`=2x53WzthElVYugE^l`J%s}g4~2Uq|r)JgntY;_-8X!+oO@a1IkVt=?$Z9XdgaO~$R4gAr`?@Y!& z;lsV?O5Po?;sfPgUyg2dJG>yg7>6&UcMSp)s?K!TjWdoWMhu;FJvq$Mv38?ly^6kn z(8Y%2zBU(=V?7of>OmJ9?gT$b-@AbI(0&wM5{hkM9fc3ShTgP}mvTmWaa^IraWkRM zwH6=dSsXXjSCG6EOe%c%G<*IW^ryBH@Riyx0w3xg3E@LOdoS#%wtL%kcuegIcb58w z*mgrcY+3MGXdyN6;oUO&@b)J@o?5PJxiexp`85^&`33VVpGo%-KD-^e2)g*ttL1v8 zoM`sJ@|*Np)?s_7^7Z6G=UJWtXKznzt(+2CXr}ml#K7kcs^5#A92#Qkm)(6ww`|$7 zb7jjGZ;fwW^-b!8HBOI(d?xjWbE!AI=-QapHnAwwqBSj*KL*~I0-pLMp9ceDk{ah| zbjmhxV&Z`U(a842)Nf7k{I#s5SyJ2(pw8=Y)~f@YS4O>sjzFMQ{AreyG`z%~wSylM z-zbU&U%l^z4lrf=?m@8>v~v_1JdRylcu2Z$Vk)@@)#ycE2(=`H711rO5FN=chVIi1 zR!mJS zd|nL+)i9PUksP^gbxFh1k~cnN3BJOxbarT^idVKg$*dv4Z|ZLwS<;ZMA;DT4h2OMZ ze8ch@o(j~|0c2WuwC4Ux>u)+a>k|Gdhf=?|)J_DAhnI%dwI=_~ZP zr&k~s_5DB5ujt;=Jl{)BWn$4!L6@HW?BCCTMQ;Va)PO~kV9_Hk7S-K7**J8pheN;J z7l(e6zJx=sLXO>jp@T#BfhCTD2c-8Dfr$z;aA<^kbA&@b!@F?kH!^VO1~6z!f zxH#0fIP^ovK@W%S@HsfN4jg&|KFuw;L+jtgJhmZE)VFw1{d+jn8-vyQfBu zgG0T#+QOlpEIu0?s`+cZe=V76^Fz<}Zn2?OCWpbHdtE(e?;;n6qW21iCa6!byJS#o z9JJ7b&U1_PF^+4#6kRysCl|Bi1F^l(uyo?zgG0SBNe>nd75;2RhPHu8rhq4fLxcC_ zH5_3b-;4iBc=H>`HQ@m5UmJBS49~xUZB&@ED=?yU4|rfN^7B}r5PQkk=HuYe6td%G z_PrY%m_&A3d4P;KD0?0@T1D{?7|M#Tvsr*c;y`b6^h0O;aDWvwn zIQkme{K^g&qk@TqQ}@@O8l%<^?Zv2xVGc%Zug1n)?_ktdcFc^uOuOCSqN5t?BiF~0 z^tr$OgBH^{7}fE4iEa}Q%!rLd_GjYMBYu32;8WGMD28TdqbpjUql-~@fl*(ACVb3K zHXvct3#bELPVQL{oGJ`j21b1jeehad!lnN$+lQ>j1=eX5!K@&FjIW;L>erTneA=ZYYQemyY}U<7;*|%!uuQ z4~0uhY5Qf)k0#+`aOqISAb$Mne>=D|M}2?V{hsqBkMHeyvi0Qp<(b$t;$qXxJh%R^ zsq);I^Tv7T!P(eU{k)a<)LY-T3ZJ&V1^9Fe`o=pApQiUQ3!i55$<5JTeCqMTb@-by zdGbv7)cy`Wxr*Oqz*BQ#@lRtX0w#L!W*>K&aIXon$~Ut*rW$XH{JvIf00;KL(<70u zsS1;lK6U6;zdc6|fgkR{=d>$A7=-@2P>}!VGmu!Zo>rXtb*NMeH>9ZcvDYMt(2JD@w`8n3UD1YrOU~H|)M_Hpv z?sPeJu}MwE*HOuxE51GuED|y#i9=ork;TCYt`W$oF=UK1M-8`z{Be#xf zklol>zmnD`{p0K!hKVa`;!Lo1W7e@9{lQtwG0nr7Uxahvq`A$X7CUya*7Iq*o*h}g z^Xqr)8@ABsYJW-ni`@5D=O2|UWWVC{r8cv_BQ9~4-50m*llR8yOZVkPz_Zvk(`QH? zbSHc1i^%O=*fQFwslxpT&EGR-<5SoQ=NdCv=Z*GWz6jN`6P*r(K5t-sd>M2(iuDcS zLlnL04&brqV(rZJbF6bJFt&B|eWqdK;F0wiv|8Ukt&VQK;mdEFR;8z@FV%Z|($eb4 zELt4}UyN@4E4MGj7oD`98N88Aqy6#h<1USMXVB+i*4xtOqTHBApMS$ziAKAi(NV0S z#wr?BJY5OjiAH~h-Jt_I)%qO|jBkDNzL5>H=8bIleBr2ik6xejkFQ^yL9dZCy?XrU z%rSL3dR=r)te0NB-<8<4NxJl^HC!ARPkl7e>qtwlL3}Er*X-~5&?{}9j$VUV^m+?% zQ`(1t((6ZEdM#tkmJT)>dzPDY-0Z|z@hLs>d$Yzp1!kdQ;3n`~C%Q<*CGmT(8KmZn zG&fd_H|N%lHzQ{A%rAK+@HsPO68i0+0+T8&HZ2!^I6lSy7v|jASDA&9DFvS~Q}VAe z_jIv0f zu`K7e*kaGobLD1s_Z%=TI{Wrz^j%K8L>28WTjA@z_FI7l?#`TFh^&>3f_X;s_^tT% z5nuP^y91`ho;8t*=vw@I8k6^&p6O8TWoTTqc1HV^x>H#5*IIb<*IIaMP{5ujZce{T z`$@tRq6cZ$_7N(JKFI%1EdIHb*#)$tb4;}=RpW97u^6BH>-ar8SbN>+{G}n?m#5oF zOo+De+a0Wd?9{dBv9&xuXK*ML7-V98YPJShCv%Qzx#P3(DT8h_Q-Z90DQo{I80l&D zDeog@O8D>MQ`nn@32b1c7xGT*MCnLsLhx=bHTSdy%tfWc`J4UvDm=-0yoa&R^5vKt z%h>O!^gn~or_tV)qARcP1#=2Zi;$lMGwuzWr8kvbWk!?@kBsxt7H7rAFSFNoo*PQl zfYF2a<^46>Gt4*(kuNL2Keg=P1K^|Sth;<46Y!y|pqAe>>xURLKZ2d-d}vDZ{_gO` zho-UK6F9$0O<=x{J>7&&T6YZRo5r+Xe>-<+Mm$QF`DVV zn$C~Trmyth(ATZ}?a|kM{37Y^nl6aW;=7)JZ~J;M4rdB>{Xp!*R>dP=Mui9&Ec4*|Q=1tu}A($ zJ(qoey^;@g!hcr&q!JpQ_FB)6SnvJjc50BoYaNBArJTBs8e>`TLOYIKBTUQAdS7hM z7?Y}G&W1iuEUu0*ZfgTy&hKmaz2>+qpLGdN#BURMUN~sF`@Hzh(T9E>`!3IS!B5f? zS1`weg{(hwpO$MThu;;yXKf^MQ|ZW@g*n43?~S7?-f>f0HvWau8z)dBt0pg$x@`{f zhd#6qYB%qmx!s)osfpm{>bvLKdzouZp6EBa{@W(@Fl%Lq7hvx5zYkqTe{EL7OU*Ux zJ@dHm`4J>L93Zk3eATYQy97m9qrl3I&5>- zVOzP@A$&=C9dcQRF#YV-I$&cV$NK1IXf*RTbB5?Qm!^Ny%@~a@a?|hmP3Cj{LY_-p zlz#4Io=ed7wyfutrlcYfnI==vtkzm#vdY1srX=6={r>HPQ5z5ZEdO5hT6kL00f zCeHqb@%`pQckS5x3;3PJ@XH)C86Hkm!W(V0)8(f>c+xl0r0|2CvzzCuf{O9Uo&Ol? zrM1mG@9H=k<`!4JFG9S*-0|kd6<3*a8a> z-&o7}q&~~whc?dRcAnLEDvw!NuuZZ6xz^G~j16*NTUlg^l>=4UYv|$+JUEnLUUln@CP62;e2|ayY7Wfv*IDi%LKgj2+#Cz)}dRZ*OGNuD4Tf` zeid)cAGK>f-S zjI;CVU|x1k!7J>XhR{ZwHXfl3@0m8{h-|RW?L;1y1>b9*_48~K&*}`x9M=}{ZRoP} zxCZlFg6IA|>$!EJn>e5FV@q$1 zl`}qLY11SPwCRnhzuydI9H;ut&a8FxO9n@-O0VO0wT_J8zj!X_&O37-%OyXW-j{y< zTki9}$!fP;bQper`nj*W&;7z}S2cvv@+MHQ{b9+^#W}34FM6alADr)R{#TuQH|M5x z%R{&3Vb{%RPR%dCFB)k69X{;$vG#{*{n6P0v+!%ooArbK9XayUy|;C9CXXYTxD_eA_ZJ@kv7cW({2LgRRv{oQ18lAGbb zD~WSyqhHxretDwD=3Ql;;h#j;#OHmE`q;~OB%?UXt$dVEZ0VA^Rf-{d#P6^7D;6L= z!P@AT4kZ=_>~I@))ZM(VK#%k<1xuLx)~cdkJS3mM)CYaBEzoY&lH7)JWa`t%Q`zGy z(Ji%hCl*hO9lZIP*r8jm#YQwemYP2UJ*L3wG3)#V^##5_{dC$^yFJAFs%D!f7ntXb zbMuYJ|;Bku3lvnFNCdm4KA zT>Bd*PUe1ovlJbsDqyZBZh3zAm7ZD4bnYSBCEJcNcIkKtbi1;6PBg@Pe#MxZSfd~F zTk4G7C>?7p@}i5q)cy?SRfql*M23Hl=Qk6hRL)qxfIgIu|91!D{eF>|#lxI&{^U&l ze+SR~CpMWK_=NA0?DzSTm+-FVd-!Yta>LIvTZVN1Nc(B^G-#=y%tUK>Z#3iPH|;~0 zXB=Mt^6jhtsmQdwBtwm&rmGzzM=dOwVSbGU|)ZM*YNIO1wtEtG5j|u)CLANR8{EdUp#INGjOS$W! zjPXX$8Rh@q4DUwJYioG%_xyNU-@O3q*}W(W1?)cDPJeOMTK=OD`{08gg7IK|m^dqg ztfapmE@e$JWY_+7=~ChRv`o5#Hgr}`qm5I}HALtg3x>df^6?p!bDcNz2&6Rnhm zeRg~yJ;V4eJw@Al;D@sKaNG9oVJ6jCVOrY34u8e(f{aPNn9Q*+vvOClUJhn>AI~+> z{;0msZCfUJ-ZVA++`D-$!E=RK&slvj_}A&@F5o$xv+z=8zuW9{k*m|sjd!0r@>a`>A38|iJxLqp%2(IIta z_iUA2$R9g_-K3IxoS_j1n?RFI*bsv3ufg^(fpPqTSEI?DUxw{~b7P@$6yUiVA3#@l z5IwjvGNFC~xehC!u|{aD5`Ds>9qEIX2Aam&ecjaRtZ5n8^C{*mUAYmRc?J4%f^)qB z`k#wzXhaTl!0*rScPC@7A86Ca|_+)HfKL49x-Fm?akA3^edUJ^G5h* zz12s2u@lwM8FX8Vyxt5>lilomXyk(7p_TyjC)=55F%PT*%{{L@dk&gVJAUxpG-4s< z@{Hm9(Q^|Fw95L6c0Ib~Hx_U6`#v~EeH~y-@cd1sv>`logujF6C;_nbdg_Kk?{-Ys z1vYA2>R&Xw9-93mG`lrYQC~qEVJ-Ap2ffxXw#+qKC%#Qoq}O5sYhhSh?Nc7~C>q_N z7+&ZxVA~JOUkdH!LC>-y^4)xLd&C19go)sDYafA@wv~k)+sKJuIkH+Y<`s;aaV%WR zy&8;TgJ%nju*&_DoNv?&P@y&oyQ}=NBev3Z|d?FP;!jdVJ?c+#{VMv@>h?qIV9UB_D+L53Fn1cPF!zR%Qnl} zZ1mj~O07lDkk0wYr^(44Z$7&jT2Z^DIidfl;s3f*W9~;-(?ItXkz4p2yJ(ot{{b8o z`BJE*3HcrQ4!DQ?NhpV&wiI9BTe$3|s`w?*`-$sSjG%0VhepU&wAEspkFc)7#G8mo z4$_{!Q(ulvaqFWW`yjPZGTUmpDEe92+QD!1xuvJ3i=%(XXW|L(*-G_mE{@LQ^Q5~U zMFq>A@2S0{^~7Rh*U|B@8uHMVJuhr0zoFto9J{rTn3qthtVp)2sV|N~9>>v1Hsys5 zJn{Uu9`ZBpC%GFXz%$a(_Y<0rlpSc3V)RMTZo%sAEwWw5Bfun9#~Nlx>q#q&G(q~fj#r+H-BYD zs!d`~Z;Bt|?lpcl`YwKVtmiTxyjsrZ8eirf{T7KKv;fpLnE9v>JL> z^k>9X+yq^~OB+O+qo1ux$+vy)oKVY)$ack~bq7Yb{@2mE7gFne(ITFGb$+BpeK!&t zu=Gc(f3LBWnG#Ev9!;7--9P@^;O-ykIpvV*nar`8g6<#dyLR?pxYmB>(yZ_>=aWg9{c&3biysAcuw_${l6Pu1C={+4pisK3=i}U$THeVzX8LSTEPm$E=dPC=vAp!WzFW(4`h51PbJ6{+{MN=l z;8$Nn+iT#55@fRSQSZhNvyRWg@SdJA@LY=L!tmYge6BfmGe`MFu;*_x<}agLi5KzY ze^0gUpLAr8=Ns#}fHMf5`N!)xU$Gqp;1%(V&cZM`HImJe^OfLs$;GLka^!OuI(Ou# z!|Swdb+}Qi3vv0A!|;l1?1nb9KTSjU?4nSM_Qe$T-K*YxQcTE*<}Wb^?WN>P4f3Rl zHs`QU-rnl@urW`1d*JcUcgKb9t%Oz_89oAkOz+qhu>P87WN`NtbC|1@3&`B~YdueH z1|Kcwths+&=EQU)RXJu@ zC$^T_V(h12-`ryJ_-%|cWPFdeF^5_Fzhk-R$~+#5FTcuH5V`u*E5NIz=0s<;X?gW# z^pUOiU)8=f@A%Fyn3e;`E{!R2s0aI9_ROdyP4Rz+D@v>fzvz|8{{0%vnMdtA^^68OKa-L53}(S`t|lhg@Z zK)?MP*3R=~d+S5stL4>y*7|U6i8r-IYtiS`&&S<0S<6|jvH7(QI=_f_345{;xu>~l zKAMBZ@jduK&wJ~5^xw@$-^@@;*%^N4J^xMS>HR+*nEhf6XQA2+vF7qmcx#|%s?2+% zn%~le%gzllN8gp9mdkl=7te)}Kj}HU{wh-tnpAojz6SKw8tp0Vmcef|?5k{JFX_C@ z@cqp$Fz*R7e?5!8WWMa`;q&nEu|{2-!%gtJ{x*6PIKpY_qH zjc*y}gY+3E&ui{DWIcAQFO*ZCia4n@@_8GvOC^}MfzKnx7Y&ay%7uHJ|3BX3x4zts z5#Dohuz&fPBmeiY-&+48CDd%lF_R+rj&u&4Sp1aLgZ12Zc^zVGfBR)87Cua$Ih@0D z%S}r8G_r@nA1$TtHZADrsj2LX4?W0>L6WX87M*A;whYm%<{>>oF>t{Ezq``0YfODF zdq+O2_O%V$+m*vYsedI-#<fC3_`1@X- zISLJh+-D-31+#f(w)@Oxbi0r7%$hv-Mc9Jz1o^B7TIykcG@csd%whKHa`-FmJ`+c0 z-oZ0F+-H3J{ac>-E!c&1w(W@K8hNJCeFmPKwTfpJIlff;jNvoURGs?_vTWApd1e&! zv-bbz>`maKs_y>(Gc#ck5ix`v$xJ|m)U6t3z%7%+1+9u=Tx$Esoj_Y!+CGiqQncnK zP-@j$Mx$8l(@eCrB-LswsOiQ8`zWpLQ;loer;igrs^E&KiK6p=f6l!(nZ#iG`@<`* zx%b?&e9w13-?LGV{D4Yea?xq`z)PEu*$d#$%lUm9erx8p8Nd8Fe&g`jd;C7%o8NTF zh-B}|p8Te3_?`M*e$ym=A9*LgX+FQ7Wgbp+4}5X01C-AYtgR9cig)!lczh@S&VhGQ zv#`Cy2~zNfG0FLrkD)K>%=}W<)H8-FFPe15%8=x2R#N%Sb69_sDZ@J%d$Pq4L?^~2_BeI(_h)G6ItdSa+P8Z))8!7HWbhU$}?RipDCbF|x17pl)O zhu{$BYxzp3-sRcqZo#t)_?X94T}k@dGMw-D)Vow?#?-07HW`^;t9sNaILB3JT z&-=?|X~V{L7SPUk+CM8)&wNB}3Ots9NAO*t{?!e_|BL_KkAM7Ii+^xln+&^)DwFhK zFZYV+nfh|v0jB!us3(TrG^C53Rrrpg=Q_bVEySF+sXYie%ksTt7C7NtmZ z%JhDdI@CwvL__xRo}iD48PGSz7t0%Gc(_3?br|~MP=D-0uY6c>Eb6PHPwJoe^tOUJ za?nb!sUEFu(zu~zxcPK9JSO;RdC#ct)!=Uzb8-591O0>M`o4nq(h_(Wn!4hLDLiwp zPD^p9zLj{s>Q;X`_d~PG0$7llLYn2kt7w+l$#@K2sT2?LoK^a0==ah<`Z=Vle*ygt z2QJ}zFppd~M0<_g`xn5OLJk6+$k3m{I*a#HXVnmO##kp&Sf^kSpQ%0rk2%mI3g3T7 zynQG*j~JrPszG%Ia3zRggbVRY z1io#A76x1iZ^D=QcpLXmiO+M)0jgfXl%#FemDsj}c1JQ#mm*(mWavOs#2@e|Mq0w} ziUN2nJ%H!QLGXltr_P55p0e;X0gvjeG}w3G$}!HV1~$Q9MnhY_f7b&`7(6I%HSH}1 z4_Z4cSW1AUd5bwuaWYqYokTzWC0~0aIy|HITI7fKte%&_|NrCh`8UX)RN7d+lji7o z9^e0rKGHKXk>pu^iVKY@k3m2EiF|rSk2*XHzZyGdp_$5NpHQrWXY`@Nv%XjH-2~tN zNZon{4u@xbm;8%I9Lpa`i+|AvJUkWXm<9A-bX5MeUbac1@q@dn2se!diXP}_fX&4*09f+c}&?M;efGS zZ*#zKEhzMA5IIrja1c+Fp}uO8r_VJ)zXGw`;DN8xk{bmz>Fbq;(;AM1}i99)AJ z#k$1L1{t3LeP@8<8gx$?Yr>Zx&kj0gwcwiu%_aa>6nl=WcEfwNe)#ZN>Mi6E(di4q z4>8qc$i0mj0T1J!4&(E0DgEp|1Nu|Ghq?-Bb~mtQsZ;RH1iqCw)XbR)Y-QB@tJxl2 ztFD<|T=@1vrOCqQi&vR?@x>`q|6^t9Gw2+iTj7uGktyYcZET?aeLi2@ zDZZHH)hn2@vw#_TW{<(9&#JO<(bUWMI$`38FTWViy=0DdXPDo*2b&Ki58{KQa739H zdUwg|HFIW6_UnVMU3|XToMj>0C6DD79%@Ad=qeqU%T@m@LSR@p56OVeqD++ z=Sk)zbNQl^LU zwT$t0K-+3)`>i2p+nLL6%00taNwOcEYx@zRtw3)}hgWBhv%34Bn+NJ{?duKrs<8c6 zKR{3W^3+!kK1_9XUV15g`rv!}*4+T^htjEq_56)`2Ix~_LEy4zdG8 z7P(tDQZD|e;{3;;?_s`MK8Q}U`1=LrrCaB)W@@X-LaXSJo=$R(^7>E;?4687gE|WP z{X_aK9S8jurQm}wIFp_e{U3ciUy=9gcmGO!2)@n1t8I)Iq>DRc3uj61PF=p7@y1Z~ zi{Dn!#x|cuOVG(GlPYsNPN6Mhrhevgw3lLiRt>x-UrW0AP#-T7!HZ<}SN{7rA5Zd4 zMbqE$Yw(2@UydGUY$H7x8tX1HNozx=EQ9`v%V*J5q9r=3LVast&D5%ceEu#$Z@nO& zX3dj?fU}BA2@MK(JSAFCY#wSL)jVQ{c-$B%D`-&SoVxhGq2+(P*q`7Yn%1z=KNzvI6v zzxFUFzk7fOTR>;|C(?m}EqS>)ujxCHIkBU4bGcm+DRVb%e> z_+kQie8A*9z5Hjt-7nK_v)`XD@~k;3$@3U~m#nFoV`!@?5}CvLA`5#1@FlUa$*K6D z^dJe7o}X83j8=)BToGrpW4#P(6(k=n_QltqYsE*^x%ugP`Svxx4|AzYwnR8RHmDbv8>It)!|{10 z!09s9qNp$8u`1*z`jdRcpZ)S>e)$w|THLWWTlV)l-fcfwH?Q}Vecz|r_bY?%ZiRin ztna&5{_DIeo~SvoriSZo-In0IlZnY*=D6BlTD5Q854n#6fB1oI%xfki_`}gHCw-n+ zX*cJSBspvFzAdIX&;EY(u@~MGyz(R3;9C#>9$dAwW6H0;#mxTO`ec4YVp{-*TWaAt zi+7tpI^Vu82;Q?N*!NR-AI$%1b3Sd#21yqe+R}%}ukZ5fpC*s|`5Tc3i#y*RIGQ{k zn@cdvLmDT}JS1~I%yrT~=Hv(LeaW&OHi&~NKB;vDie-;WhSFwKQ`bh~@2ssVVLv=) z;+5>q!S^Znz7>C~5~O?zHE*4rbca`{OA`4~EDsl<*?BX%vtd+e8Ht>MN1r-_l?zJn;d#5$th zcR9Py)3p=YtCJ_C8xM24egZyS=6D}Gig5&uCBc?|6O`{I!Dr2I6et zQSPG4yPBq4&OCdo#Y2Sg;JEgK$%mY_r_8t;lh`t6*>UrZW2{2FuKv&PUK3~JiT;wy z%UNfBfa*Pvx%{yCrtaww?mC%AyZ%P4A*o_5N;UECnzr}YH%*_@!ljGN)!w3svzY^y zzJhNSv#%nCPIT+C^VL6MFCFTaVE;O8Xf42p{uv3O1y$?;@b;H#o?`;_{RZlhY3Ew=jlf{v=Y7>}5RL!QN9l zKT~>&L9CcFf$tlbvtev!=QuR3t7D!f3_MkQlRl81fQM?Y@6E5%8H?5M;SzplFkjU- z9%b$*ICFvX#@J`$?c-jKE^(0~$;V%sv{#z@&5BQLi$p`|FTjU6_C$o4*J>EWT0zP+ zYzg%&{itXTHmdhlVE+O9Dta5{vr^Te^pY~ph-S^hlIn1}%w+e7)=}oSqRekCVXmu= zww?vf(hBS`{z68!z&Xk4l8IUmc4ifR)YxO)GeaHP!y9IQpSK>vo2LebF3p3fFO|@= z1sD>*rG2RGm{Z#XV+7npjCX!$7&^sF_{;|QLv&*7SA09PoqKKa>DXxKWyar_)%xxY ziqV;kZ{X{18b1EUXUB}cF#`=>$y&MS&D_O29kP+Rhu3E{19 z?lMZh9C%}J`WMh`thGgMhPAi!Hh_9 zPy2mWy@q}hXSHjkX_xaK`sYgbqEE$VlD+Xo$vKLpm5nrqJQSW>yjZa)^wi#%+1R+J zx}z0a9Hxz`%bOOhx?$hC_y{vs^=~rCIJVlZUDCdpkYU|#ctUX|JgWGi@FB(T3||n( z59(d<8MAQ}c&_1lW2vcVx&%9p?p`ckE^2CJpUU`_CFT*VO-wR2YQ#@;DOZ-Ake>0t zsU1t#oWr?zk>!RmHANaHM3!fW7gUebb9i}@=cXUYRur|TkfRGHo4L15c_i^{B%64) z$W3e-wl#6v@Fx@Vu|M(m^A$1L)n4zQZ)efBD>*-A?`P2&!{H0&!nbR#BN8i*U0Zs- z!(6iZxJ5jN&;AT^hw4WqX)bY$3(usz$&V!FhO%+>**zJ!z-8;FIiC(XRkIG(S#xgN z@xV4df8Q*KfhW}2QIh|jPr55l@tPLDIfG&EV+F=cm%N{P{u6Nd) z+4g$lzI9iG?ueJv(0;Xt)7%MO*%Watm8&)p;QdV7cANKwc*T6$CU2H+f>CRbtHF<8 zQN5|A^V?pqd2{iKd4uwjPqc!r_0`Cvv!=G~&o=MN@sjv{^3o5l-V55ckauo46R(}^ z({DETIup3n*Gg;zap(d4HS{+bvHcYuoi%5*{en6!!Y6v>FdyfK`8Wsn0nUZ*p9-!+ z-;6gOPrf?FA~8!pcrB6+=B%AH7uxltTB~qplL0-w_D&d;6j zVSerfA8k(D_Rq%?x4rzG#2I{l7oMGa>?3jMF2i3VYiRI`+7;t{=B50`k0@*9>P5=4 zX7hr5lz-{*#IbyLhik1PYeA6ni&z5`O_;e?T`9Sr*nS1JqHd+zC#^v+HN*m?1~J z@0FfRIHDgs7Gn+``aMwX8eso?+jQD`E;K2=;y7TrjJ%BdUE*(B$*24|;ya?dVy#WA z+pA8h51(rrOSvm29uu#fMLBRDGR$xI>!0*~_3M6`zuwE>*TyF#6AvA=Anso0!^B*M z+Gw5P<0lh>rxv&My8G6>`+pb3_fDhkCSNv)Q_dDlo-SA`zoa_LxFHwvbiwed-qfh7 z-d&Vc40c7xY)at^!zaBC>m#M7HWh71JUg`~alxT$SS3Oe`2EaP@VFMZ|Ub^ z$|PUN+cH=D_5Ot9PCnTMf6K=;x9)&<;OLn5PJZr{@0KPe^du4&9-K^E*t;-s`}-Fq zwo+#q@k|c)*i?6{<+t7#aex4x9P4QX&lMlu-v`Gdl3ntjfO)7SWAx{23q@$vXe((d5Sx7wZg zO*RcU6<3XilJOOj;Ya$Rwy!6S6O?h*T-tUPe0Sxph4G44@|&6jC%WNm$|-NEp#G7h z;m`KdG&>?a*{5mieiGBPoJg-_rT#2!#_$K=sk7wTb?fr?l zV|x;tUSF3GzZ?+M#`qqLJpYQm1h!%k>o;X9eoNZ3n;!48A=CWypBCi3 z%1={y$LcHh7JRtg_^4#!-M1Dd-tD<4@$CPtOI-20mOi*&C$Gf~?H&ybw!g_xx{GIv z2k_s{bDvIREEqMF&%FywQFKz2KDn%Ix8i+V4C>8s?py$;6H0Fgo|7i+@?3{53z_Qq zF=G~0!5h*ewjY_w^aa7QTbVwM=W1h~2oV>^5od_}&NRn2=jR?n`nz2hB{uzHU1I#k zmc#{{UQGOwwrw5o$9(uhzMbNP@|!F8-69?Ve_yAp?Yjx5*YX^sSMn@=iT{3LqCCix zG3h#<`*=E2l>Q9!&i(Jh(x(@`XGf)L3g6k&TRGtUfb3Z$m#xrW5X(>;Lu09ZQ&_7K#)l4lBjJFD=99fWTHg7By-HKEg1R+c+k5c5#0r1xB^%Xj7Q~%0 z(bz2x+4pnf4&#y-e5Ch{;~z-e&UkFaL02c5M?aF-cwjd1%qTap_kgX5iUXcZG%Kce zlr3*%V}-fc#`vhacg2Y76V1anBx*xFiRNKz6A!T_d}HK=#53VsV#T# zQ;j7b4o@$>ZS2^@BjM8GO=B{N7dV??(?J^&&vPHpI|m+<7=K_galxnsi3^I)O~~fd zez-1P!dOiBxMzVU-}1ZPxNhG%ySAD1Z~N);-CrTSWZysId*MC#r~zMXC%xi>7j3%6 z4ng`qNWYLVpVqy|Zw}I*C%v{eAMdj1XY{2%Mf!@K`{T;5F;$bV@1G!Dachmkg0#m- z+xyP*@poR7pMFmIianeC zdQNM*f^p~#@C@_Y9oJb{7WdV0H+g?%;kdLf?MI|NyLX`v-)U`Ep$ET9d1SozowrZ- z>cz(maB>Uz$G@HM;poTJO{6E78}V>*7UxG4wfz-2Kfb1nbqQ12SM&V^_(S8DGSXb$ zwf^FYH}8+@TqF6>K^x!XoBS;K%0b!+(#F4We_Ur4)ZKhqn{?j!q@8E`b6VRlpZ@Yy zQcV~5W19co@Wt!)QFm3Z=pp}5b~!~k@m2G#b#aR$+7XPOBV92g!Xd;%`s2ZECz<9f z;~UBG_bSS|86?oM;T%|6REG&L`l;{Wr>A^$EDq z+P;6){+B)hH_pE(FWg-8FL1MvItJsnke{DF5k6*JzxepK|Kag5_^bFBe-h^`Jwpcf6_)wf$e0WUZG8|7Dh z0&cX<>R+{g`6uA!tN)_BaI@rJ;O2AG5%4GS-M1gg@+dNUH2f*vjN)r4)?OIXkZuoj zz^6}QPga3$xbRf%<@k7gQ~%BS4joiqHT8Ym9z<+B-e2F^UPRqV-zIIr_Y=IW;eq>< zHNPsjhao>Jz)uDla`6lL{W4FlOI}uRri8xr`ZzM!A^G6}z5?1Q-Q&qndxALyXOOP{ zr2L=q@(o^WOa*wU2pLb-B1k=aq+i&l`N1ft9)hSyp z*kr$FPZ(!#|&k*CXU{3pozw-7*&i}P(R{C9AqEEP6qBq|DE7R=Tb${&e z=!WY~G$n=b1oN)UTb6rs^r7}I15dWEk8am}*X+}}m-aAcTu2W$Vk`HZiarN67alC4 z9l`h|VBE|%waMNPyY@CX8$$UG0ltj?UHJoFN0+vl~=Ha=!IA`sn@8vgY ztT$MmgYk6*=QJ7UY~a(>dgu(U>(+-lO18%4d3@!`Fyr1XmvskGc-Z+JYXCnXZy@hM z8_bI`BS1U{A0~B4RY%>WRdyfjD0mYe@WvY2TyiOG8vI|oKhJNI^IuYzggRpAgM(>T z<(!8&L)_1=vL7qs!};z?;3RM3VfHNXMXG10HvXSr-8ukPYYzf`iS@yH-K{%l`(vNgWPSsg>3%Rfzngk9oRiT=ohjDcxUA<;eQ$q? zd6RNamo%_nZ3gw{tUfX2@cUCU;8(SuM5k!I19CWEJfu2QrU97lWlgGG8!_S=3H_EK z2j{cC#+YKeR^nB7H`WZCq-_H>jS;s4S21V&NEWvpZP&xjT}7ReA&0&#J(jM>Ur2D4|~XaL%=t<4*~r0V;g~^G3?<^a3+z}{pJ5X`7QiDyxMCi7=gu}8FLeJ zw^`CO24svjvHNZhzxbnl`C2b)TGT|ohe%WWtBJEtf;&%S%OsoA#hUQV>Wmc#o#ILm%<)*piUFMI8iz@b|v@5D* zH^#TGnCZpM9QGr3Vgo}ZW?nOEgJd&PrzLwYh7NyvgtHiioB3Bh#=dRp*F6@K$vYBd zHQCSKIh->X{d~_I#ol1X;aSq^I>YEpfJL?kf*#vrek{37D_uGy)&5DtzPVtfIRrywEb3E;$A2$*QeRGDHkuJ+F z=tzxC&JQD_p|NJZ%KVcu%DxGaM7YshV<$SVXIhS8w?1s&OrTa?j58%BMIdAdxn0(eM^i?PNN^lgykcF2*PF3%WkyUMrIp1vy zc#cYU(w`ZlO>YVP{5AMZ!HdGtBRq4zLwR#JGD`czwP#)VXOh>DH&isGc++t9J(qC5 zf(h5RvJSUoQpthbZ?jzcwF}#0{XJ^{BfBH?_YCTPi@F1w6}kysY-1*jy?UyxzvseV zW$2o+p0M@zQuMKC(%CB;k@c9Fd~8bnD;R@0e2+$)gu%~q_!gy~k)WUKi;+(v7>a=1 z0rs*JSy#neRwFnl`#kgLd@K9B=?#2KwI@6dZfEK5JYRANenTDY$d}CczT|88Yw{(P z-hl66V+r_bF6)+Kz@@sJ_u2mm9TaEOy4B&}f_^TNPv<~OeIq_GF-bk@hw9PVVEK^p zRp7ltQlyDa`ACE_hrII&-$Qpz#5d7eQ)tInG1B%n^2B(toc)mHmDC^N9EX>Xqf^J3 ziUTv>?i!W6qw7<%GJ7KM|5v`%x+lc%PuOe9ofKWn4R-wmeJz^ju(e;Fke&CK8Tqpx zo1*pq==ET@Naqwi!E|WPc;_^kgIFeOE^hO7DCa9q^Fn z8^87tIv*da1)oUkS*0Vyo07rp%)x3+Dmno>UAQhaV%MdD@A93KO*7|fw@+Z5TG)hs zmiuS;m(>+b>oY(2!JfNmQ}|iB;*{Q+YqNKLaqo4vJUX)GM#dLURC7M>EK~Y7r9V2F zyA(zj-I?T0h2=%rTbFPT$Eok;=LUF_9_@~dVC+`Zz8zTH%Z8^jv^yR+)sGx&CVySf z55fEKwX`10!iNLT-_BaWe?~^^-VrJKTTr*gDy6_RhjRjmdrr)o19u<5`2>pbk76EC zeT!8z+P+nXLO)ag)Yt#NeLU=q{yzSLHu`Afj815x$4Y;>2FYGFbyw*hKlp%SB3j{+v91*$M7_Fp_S_nZ)^tlcn)jVeYesdkp)#9 z+bY>_dG(m$$O3$A#!WlcaaQp{qx;uC9pwxLjiDrG_!707xpVzU_U!B2H{At}5T@U) zWZfw1r6AlNO zg|8>I51&Q6;WJt*I^|gN8T1@=4d9(6+S7U(_XUM5&ufgBLpDo)%sK>qGpz3}HItVg zfsg#)SYy{YVlNkUe}^^9qgZ3FHbpze){W#>n|Z#~(pTkEo*3iA3CgpBds|gEYh?d~ z9sLdGd84w&&KV_$vpSxvaxrObngB!=9EBGZ+8kkcKhd zI9T-&&zu;Bj`-)c&gx}Vy-AJ32h$w-#W>SLav$I~#(5q4#`*Y&j^T{6>`CTZ$HQ{S@@`wGj=Yp>yE>ej;-3@)RqwLsXch!^0_m}g&Wej5?6MDj2 z8@>}7y>-fl&;;htvpB~)_Pa25OJ0-kw^wa-`5UcjGO_NuZEdl7e} zmW%h^3s^2-Cxd{GCg@3y)z8|Mc6l3Fzxd4~h;mC-OS3Q4#znC4xUU2$U z%G!J6rdD^*ich2Xq~hIV==#J7ym!)`<(JGD&V@_fXP&I;$qhI6<+?&W#3LsfWLZ4k z@ln1ahiq;^MopAEK)|zN8OG)8ICpx_Vr)qxF#^?jFXco}WZcqo0`G!n4Zn>Mvo^N+ z)Q;{0X2-kNG4?27ts8sjJ$X9V%9G;c(-g-NA3Mb#~X zdy9MG!F(4k&kyQ|7POsA&d*hw`NR1pI^-f#tvv50h8*KP3vbNgn`o!M!1oLMKG}s6 z;8XcOdM?zbIq5}YOnP;+^v!CX(LvwiS^1??rCVl6C!vc{@Phn7=}7rd`et<+x;2ZB zCEe!oD<4#TUiXKmC)DOt+P8j~^e=hkZ!%`CRr!OdTX`GzEsP8PkHKm4R~?TmS)JWT zd`tN#TU_X;DouZV{SVfMX#XKFC|;&^VpmVJ{$f?3T~XhK2W0Xr;e>uTK0dmI3-x8# zf76KzqH^uH*aT2W(_N9R7F%{?O5o-a;N1n zXjIsD^*c-268f#NbBsO9b@Y#U6ZsZ>xYYWbmL~QWz>}5m*GyzQhpiTG6yjqrKM5a{ zIi&PW_#FD)1wI$gT)3wSzio#5-L4w*`#m$vwmplXr|2!;FAJ_k2elv2RW!YkwuDpl zF*S&OmB@FFxX8@0$(78j^p#iMx3RrhbdgiXIC2PFXF1>WClB`Hx)5)|X{FDb240FX zk6_>-?PtpE8<7zIRU*rrkzZWI{WTeQCI;W-gm38y&IJf)ev{e=XpVos*B`%m{(Xt( z-@D_|S3~!0jL$4zF+RKITl>~&OdQ|@o?0Zl2q(%D=3RVtCcY;3veJG@YOfqP#49QAsIwY7UER<@?HF(q$O-LO8Bx0W^ELP)%J&{%kS>~#ykk)bfytEO(wA_Ux)7UYik0<8!g6r^JsL zl(dWQ{y|%6NBpK(K<-+d6J67$Hh-x$3(6|L#-BOs!95c>Xs)s_?xL7WH9RXm!(NojX6K;$y|UTJSeE?`y4!jJut`s74|${26D57TZp$He zQT9Bfrkmp6OtP&96B{8f@lkl0^B&-9@p=M2QhS5z38Ax8SEaAFwwHCTZ$NKp+!aM{ zCDB_^^j3p0yHaK7#Ov`dXj8nuME=wcQ$HHMlpWVOlGq?SmUYp!O6vrkpiS|o@N^t} z8R+=OSnn%d&%sMBI>wo_AmL&=GGol9Z>oNL7{j-(Aqx%Y#nqWvZmY*efic;5O2@ZdWHaU#j~E6kw=x={QU_zZ$6 zvwNZCFZE@c%KEXw051W}0=**tet+NOTgqOkUNh3`tLptzLB0Ef`%xcm!708fw8gi@ zOo?!I`E)Z*ZQJ_I1UpYCy6vxT*^mcAk~yuPQNLs~=%3Ex@pxV~&eIFx`F)JPl*#(za)0!3fm`21k22sD&t%wZtMVyiQE=}R4#|V>X7?rQyo&eO#~ifNNj~gspU?U& zc-P}O&#w%4?kdtPEoLu>tB>Mu@KY?BmQY^b4DznnL?igyU*DghJ%_s7l~amKXNKxq z*~2-wf97hwaSmrreu9sWK*w46(D$NHeGK{7?{~^a;mbPUYD6cMP3h;MG;|7R)PhcT z4}*`<=~3!azdSx2*WMWT=(80!De%`mJ~nuu+8Lj&hwt{slPlDw)%pEn2l3}})|UvM z7lRX>VO@n?6}?0g#rcNnAHG(F(q+G9&qN-x8vAtlW1nZ%XIVETdPp{du}>0vS^-_e zC*qIJgFOBS_#jrmZ$X;!1oUsY3tI_2CeqeC?0SOsgj?0s@;ZJvxC_mAW_=~&@Wv=( z6UJOh=j<=5BTixb)yWuAa2L{2F?@~3hw3LbvfhPXRu?mNqkh%5f%%PrW4)pKt1~G@ z$7cH6M4$C5SS8a#$#c`V0(tJ{%>6<7LNp5GH^5J!jCwNj@9F6*XEYb;<}>J1!DGK8?Zqa)$w#B~ zDe0?=xm(+pk?R>dcr?!1_(5#uGu~QCUnj(nP2J}!oe zKu-vsIlv=ci~b{D`wB3Hv-t|aC!Fc~H%YVl_nC!p?dMScWOHo)o?aN2AEy3w*tl za>Z@!nEd1RV{LVGLb@B?*`NK`p|%FukN>@T;Fv)^aiF6_ziICbG53Hig~-)Fa)iF_8MB`hs!y%e`7(GOm5+-l6-Yv-$#<#Cz&j z>+p;0In1l5XBz!toND{^?0)L_SN#%CC{7!|I>45pYZnk#z&_oQ&Hs5%F8|RUGy9qK zX1KSGS+J`=4{;Wi`t8C)UoYSx7oDRxsK(KfDF^ykorJtPujVV%W-~g(OY?j!)z?6N zo94?3@x+Z67u5TCecw+!>l~l2JbTw4mkxNTbh_t*7y1&d_>eiy6Zl{1&DRVqkF3Kh ze~WRr!?_TGTjP@VppS46;LLCq{M`F|y9<2di9x0gWuBHbL1AQ6Yn|tAjn0#v(_Fmt z#LwWJCg!wb$Sq^}t}4l`VKzRxh4=!tj5!Fwnj}^(e=3Vis(s-{xQb5jc@EqkT!16h zZDvshKd{Q4TS{E+?vRa_HQF^EHU`W&029*hQEq?ue4qXdmeV~~c|M+S+&5;f@#X}G z57bA|F)ge}ZS?*5LL6xwVG7(RKHE76Hx=Nf^bO8!0N#N-_rm(*AttUqmvSCz-~+aD z7ODJ7&5y5VTp)ichQAr`a-ZCxzx(74I7cCOL7sqig>W@a9Z0hPF2(f*!{tme3E2|H z49xeqT^B9OT`<73+WTd1FPY^I5T6lSMVc(xu8zT3H2!N zi<}*%eI*X-lQdS-+SMz^LKApNG%K9PF{~fG8G6ZHiDpC5$I8`vo(+<`Hi^Fe_--&x z0zDsny&tCmzJhOxzkC_F8C+JfaM`_{ER^06vUll~J{js8BfAGJu=9_%A^)Pt1_ zEv?I&D;+@p3j2m7us&?SZ|WnmYGdr;fzlDIorB**3&tURwD>Pz6JB>P=ehJ9<~+qi zs~H=igEWWYur4c#Klo$v$!@yrI|%=$w|7Q+4-O$+b?0>EGJG~UySi&W@xOPl1$STz zCNrNgxbGT!TfNIV*%)yP;XFCo>#t}kUi>n29b8UjO82tf23`<8xHE9m`}iFJT>txZ z_89AfJ&HB3FM~`1moG*HKc75gpZyu>o|5Z^& z?X8Cv8Sa@MT))nltNzIupQIPkWWN6%$je;b zgZh44P+wH_0n=bUaNyZz;j4hZh0nn}anCeUG6nr&armxhiv#~(av8{^ASr5b- zo>SYLS;OY5KRQ<>D0e^4(hd46v_0NBIxnUc%u~T1?Xo+84BD7B@vIDbQ1Ps6b^rWD zhIs33=3qxs(AR}VnN#}Xq)qgpg?_dW-)X>RBJX&)|6Q*HkC?D2xYx;mSe2U4i`CxI1=m7_q(0JZ4nVDWpbhe-N8flGun+Ttj+1Nq}d&I|;OJ^%xaoYj-M~`Qjr?G23u+2_E zjB;xf+fJFnd7%B)qd9`4N5>4m_j0~hNOKR+b%E}p0w06t9vY!<*!+?83E^Gc=o!sJ zbfc$shD?e27sSXy^es$ZG)@ux(wCnENA4)#AfA`R_qz|eD5iKXZHZ1t=$GP}YfGj?t$oXzeDkIb=-gs~Sf%;=60@do^p9K0<)T8zIUUex+1 z*`wHYZ|*Ic_vS`2jJ@ylVG(X6pOaMvetUwLfq2848)Pm@vDWSpan-9f{&2pj)m*@7 zI9+sZ(wU2`~B6!bx|A-M?pp>=j@+c~OVS0)N@wLjf=l=7ha z_NR~XQ`noMyZw$vAI~Cg-VFadZG8~#K=){w?HXa%uc?izq_f%4*vJ^-a$?M7(7g$r zI}y8YX$38$kJ!Id-^jd16dcGdG*WgVHn89nA1 z6=!7*S$0yWz7>DtlghU0+(g>dcp2MRzqGh|rN%&lJ%OK}In0|&UOMH>l{sR*&dNiH zpQ8hjaq)15w86Bmf`4VBvNOOP{i!7WCfhI+9D&YV{1!Z@*gX9W;RAhAUKSgAd8xP6 z=S|_bP~Qjjo)P3FMpuRI{iJ$N4)PMO>qe&rdBtx-)qO1a??HA4(~`K;(DWz@)w}3h z;Y(wqXlK5{_5BaoXZas4zDHCt48H_+p_RKTCU)iL>I`U4X1w(ZvVDOZT;-SfhWwP@ zhw5MbEpv)5v)({@3wz)^!@QzDC)c($Uwb}&@lMVEqYvv|H|K@Whq7HE^r6PIA*&NZ zyF%c>{q+c|OKzoZ=^pK4^7`P-v)=04^}+s~bZ?-03T;Y&*Fv01UuMyl`eq8|m6G7A zP;Yz@7z%L!ZMp&+3{|gzm+d;UO`I=GEV`2Q8r{UFJsbV|JweR1HYt2|`tvXWePV-b zevl`??f&%W|F(I0G=Nq5TJgW2-cSLL4z}qv2eBve-u&DH!EYe1FM~7j*fMxM25nu& zdDk!J+;((z+aTHpur>KMz&+SouN(B$>V!f4RKNSm(T?qBLEp75>EVa+bLAsR?sb<% z0LK@BL*ppHGO_?Zry%|Nz^Ju1(wCjWyKFIMf|V)`@3&XT_r}#6T6T~0rC~g&vRl#j zX6DoD-H%2jg!ijuTgG^{C9o@#uq%q!sm@8+)m`d`WMLljV})miTj!VY=dFH|xV<&z}OEJ98JndF^xMdV$NygSKV;Fxj zrdIviA`|U+VyxE0UPye2y-!Dw->IwWSiyKj??sbNTNz@U-aOn~)~WlCMk0^=rpO;F zGUug$xsrTmv+hu39AtpJ#Z~m@UC!42@S19^QDVJE4RZ;nuwVOB;$Nd}{HqTyYmZB> zPL}rJrS83h6RKbJ9)UcTO*ke|ebBtPVE*pg-g;Drb{iPuXnnrm&3urr(Aw-q#;z?> z=r?}e)t4~$4h~%Ap!4BL%RBiM9&`tt(qB<*fxXqLH9o^mJY0@u%%HWPEz{vs=F#oE3w6hmQ_?QK+a3+2mx7-!aK_Qz z2Se#u>@nJTg6WN&o9wAN*YseQ7k0DPDRknNlX5lTbpBD(n}5;tzSMBg9NjG%bJw3} zxJv{6;4jQyguf#GhVeK2J~Pif=@c_NzG6bM_s8RpNxa0Jc_X53*Rq|Ao2GJAluNlIL6WNpUAjfB8<(e=XLlA{gT~N8NZb@@r1r2mdT-w-SkP+4k?{2>3 z_VvyfGq;&_M+Ti;cD3`!T`QxH07Dv@+Wl8Aau?2*S)ZMU77qMqknzmQibsGet#2Ec z(-RCjhf6RpR_^2PXNg$`{CyFV3fOznEy6kEedghU7xerDGX8(U$#y?U=p*#gKJ*_P-9WimaVV&HhhKmoJ)DRkXj9mjYJe^`4avi*e&giH%lhe5o zrdM|7y>Z;j%6({!%%8N=-~GXTpAUE8x_aWBi++q;e4-pjJh@oReA-*+Q_(1cpDbF^ z=Z*WL=gZtlE}FegnJ?X5)pkJv9sUBI3;X{nc|5qdUkNxx^H=_!uiX?bx$(Jhi9I_w z;K#T5u6kdf%qy%Xk&hxBgwAe%iT40c#3k9!K0IBWoRsbcN5YkGlmbVR?{{CwZ&Y8R z$g|?UOGvx$Rqo!PZuCLFeOPr+-(2_zY{8DHBibG}=Oz?)@1@Nc_px+xzJ+3`QShlT zc7D}CRtJWn>`}M->Lcyw(?h)YY1qa~p=19(Q zU5~D-9L>6*+0sQ*%D?fy`E~lvn&S3S?AJA%mEuGuq;oZoxSp55lHbdn28h z=O#i+z$f>Zs6PuBi?F{B*`3fHx_o$gCA3sN*IXa>_fjs>zD>biG?8;Q1RHw23?FL- zZGC&>ad%b$cXG5n|NG7wV|r=xt=E7*$v#E)MC~30ZzeelC0P{iS!{;w*4(7ZsQln^ zUq$b`;36n{7Vp*U4OKaZIwO?9woha|YnSd3ZbdG0>?d`iX5$1i@rmcqPZ_?89>Tlu zFPvw{_W}1034X2Nj@{_v7WiY}R`i$weq-P_34YP@?9GkbIrXcJu|NinQut*1Koy2Tn0MhA(XPv)Z#k=-7>n=I%bv}T( zV^3dBZ8y+k0`y4JAN4iG9MrVMjU6wBr|muy9?aD+R-NhXcQ))})_!a0_$X)6=Z2f7 zRF4xeHx8o?^-r+gKwYKORRu29-tox2))Fi3#~d^^*V`ZbYxMHvq#GZ9jrc?5@atEw zClT68u~#V;oESj9Z@78d-D#|^*kB&?q z#u;)uFT&O?!RCI6zlMX_^1JyR5$cV(Km51s?zD;6-2;z_k62gLwj(mFUG}znRX9Do z(B5tjr?=OPNGGwk+UMN_jN)V2+weksKi}qxCvL*_h`z$ZS>U!XmLNT%efY1TgMxj` zsz*L;A>Li~_ind%pVq!45@G#Cr2QWFK=e8i{xfFUja$OgtnO60=%IbIqQlX|a`c{i zEn(Ml?}K(r4xD)7lCkEgmWX-lNa)q{-gQqK_H$1r#23t&4AATL;Sm!etGu0pAUDuJCF}o0Ph>*dx^PJ z`8jt}Mkh>9|D{fi;DBq$ZI7-n3S@ z^Y?`R#NuE|dD-9l^{D2%RG;d|QJ?BnUDcVX>1U}+^Z^%~%fosZe{M}Q`Znd_te&+rZ%`*8iX zaP3Yh!1cKH@6k46QH$#f%E*-6$bIb^{GUs}6d%C79 z)S>x2?(Zv?egB@uI_&um&M(qA;e}^|NiX&7pZ3oUG8?@$LSLY-fgdIxHpM->SDX%R z@DVdb^Agd>vV_t#_UqKyR{8v!;O5P0#(Xix6HCrvO(A2yFk`<**<&yNP_a^t@8B!D zSK81I`Qp~+9AZ9Wf4O`+!J?j7FQO0-933(TzE6W8)N2S&{^p=&4;@RJSM@T=p}g;9^ser z(mU{pu-C7$Mn&^euEwlu8K;j6ox9F0h%a#}}*J+c{Sfz46vp!TWgHdHZgA z_FB0cPR`fZ;tk$6Gp?k+oL@P?@>wJOQky}n2mi8M>qaV}V@Upj>Ss-e>Q`R9YfYXr z?%agRC4rTDM9LM1URJPX$jsid9$Aeq2JUXgI6*tw@2#_{M!m_pIqJ$4ACu60y6PI{ zr#XillgN&pmk7!Puzldi52Bg}-HXjcTqnp5$wW;99FhAiwp6r$1>f$r7 zR;I%o!FYsw0g0*IG9l^S@&apvo_q1dJ)C2+NItRTu!p$GyZI0H>hJ4w^izFlC|DQefMey`UmtS%P9MHYJkTjM#n(d{ z;a+$+VmEQML39cFo`NQ_nfPkO;z{Lir0p1II5a||8gP3$W2g(D|KK`;^+HwHDt!}P zmm)Kg*>2zo=2-2yIE>#F;}?v!FZ=MV!LPS&d0Kmy52MaZb*U>vUFY%qu{J4(jIjP^~no*Mr&2{UElPq{+>WA$@1kJ*z=393(zNa40jqc zE|Dx|u=BE8qMc}8UO+o{GxqCFcv(2p{yA`FVw5M3+G*KrdVeSQfICE-9^cW@&8Fbi@zW{ zK#M)x4!BjIpoLUfcutQ;sthWM*YHJu7U9$dde8HNaq#t{FO%LH8_u_{*rOc z>jNH|56(n)>BE5k8=0dQj|6#w@8V&Hc(3T@uvP<}ebPh^wP(RNoCOo`!cUR2K#qsv z1!%YNN6G^qcxz{-wQDcs?O*fuNsF)Iv@bs39-1ec@1hO#@47K2Egl$!+}(DUY1STC z`fc@Yn)es`^zOv}XT7Vovaengw|&ZbeKK!W^{8LYo>0#pgb(pLv_03StHvTb;fs;z zw4Ha4Y#a53eT;o3wm15G_dI=6nVokpXsbHFY}^Y!ZHtU--wE%DcmBfn!8|kHuUliI z3;1478wS1pmeG8z@PQvJA6ULYbtaaUFFX#Jk)FMbveFBjtu#^n zQ$H$~&6@WvaxD4zE#C|3c)-@d-RY_$?AMV}9%$$uaBe&Y|0lt>L6-b>y)w>pTjm{b zrgr7S3+4=KRVG10&HqTY;B&jbJmBZI;jMt5g`*1Mm1-+D?3hGy*vD+ZD)L{cEi9|&;AI( zF27sz+B1g2O{UED^Vfbqr;tbGA0rlIarAy7t~zc2-|FK49OXP5-RqY<(3Tz7{s-Qh zpd)^W9TP#z2V&g8Rjs?<(V-bXe_nO+P5gF!8RKZ+H_0t0ZG&gKANBenyZZ7H)0;b* z^T!hINiSc{^Ht`S5CNk zJ7@1Y@R#oS-o_YaJ9cIb^+Y)@dOJKEMKC;~B4bnPTodbIs@Cp}(e<5NT zDfSk|R!lDr8Pg%3qwqZ1lHcPWj5EDDhj*hfn+`$0O2+0=$1|K^E*x<$PkS@{KZdjv zwhY)RoLR}>h$lnSDS)G|N6#D!Flua9~gUM{{3?3 z;Ej@dhjwi`u{r4<#r9qNqr$V2gl8u{@C@+6zA1)VH7&_m-i!kRobW?sB?3Oef?bQUp+ zDtu(%w)Y3y{MBCmD*SUt&p|!cdFh?>B8F2T>9RJC#|@3dic zQP-T-dz$HR#_&#k_WexW%L=|<%KK3T?_c2^diIt7I`8Ux zP~S@4W4nX3AxMb*hU>6@9?F2u=qiG%99>XV&O{_13T zTKQLzH^5h`&bnZn5a1<7%s9YVE3xbVKQZFW0d87_e;+R~)>8#I(0Zw$|2g8B0gN%h znEiD63$*(KD}x2FyR*}aNtZ1S^4mUT4o`oU^p%6tqqEZIldd%!1M(*iOP@`;_5}?{ z&&^7oPI?{befV-_rmIQ&OhH}?W9FFjiKGX8(D{ox2Pde{=5dcsAE!J%-F1Gl#vlXo zI7g++mB+`CffgUnb7VTEJU+j<#G5~sCwWAAlJfXGs(Gr9<%xbOeX#QQeD0#>l}EG| zOd)h~CEw@yFv?DbphuXr!F=@v_)5G<9^wpx_!GV5(r58y%Ocb3?AD&Ml6KCJSf_p7 z;vxOzR}Jf%zX{HRtwP5tRvoe(-9j#FHr}qi%H8N z^Ba)8s@chSGQ#~$oM~ysE{I>ia~3_=`pZ+TzQo_D-Lz#wx|KBatf5SeO5WGF$E4li%nucr zgDG=gYE;#Iv5}$sQd=H9F_v9*qFWu_=T=AdNl&#L1#Pbz)z)%}>6NZxEd=rAWQQ|h zUObB)S38YgBqnhVK00~aI`}~SmwqF!@DS)X^4`)&z8d;kCZ3b7Gc~<2?A59pMlp9+ z`5Z8%-Ag%xw5g`;hB|c8k65b$kI^T~Tij{u=`_(a4xRZ-!Wq8qJKk~_a5Ti>OuIR0X&q_|4!lq;sxMe zRCg?C==lsfQG8X!H~A9Q*OPujcO1#unFsL?7Q^2T^`+3i$`i{ah|LzY9q{FS>(p1# ze-U>g81|2d2U3%J;;qm>#ytv+rYFHY8PGnwJO^!U`p9HQ_muwhMsU5d%rvX4_{iE~ z#`=Qa;^y_!S>I98{#uncMiP&TUt^3vx}zQ~OBw(EiL+;eK3JV=;^}?B*hC*vQ<8}n z%S>+}uZkZ#DI*;#IjiOlk~(avrTY$W71&yhrD-oJdtU~8;IdWU3-H~j_X1p}c(?c; zgzu2@A%k_C6I_QaR9jK(jOy1|Jb>ToW^Bx#fPWG2TfWp-jkAaAikY+LY<2BhvT|r9 zq^qGp3%neP(vN-By-o0Hiv98i{^U&Ow|K7g^49d4q*;QL=2e#iO>^Ya9W{->6y7*ZL6--vx8{j z9GU(Sb!ktHbW^S-lx9p0JR%^g*8y!}>yIY?roph}j!n-Y{r-Y9Is{&|Uo=F&1a}c_==(7Gu5*zi{3@pONIyJlQ_^S2x#?c`NBgk- z{;u`ltVq92y5^P9Z)^1}mYrw$?J4HqKD?%*t?(|H5@7r(^F(fVWO1#@#+_q{4J^lo zAOjm?#PDa}GxJ;<{YD~pxc9tw`qo75ONKr7+&gmUlZo6C@3~@{lgM!wkMch=KH?Sm)6qxhwk*%D;s=vnzX3m{ z`vmx$Z?u>2A`P`?S7KRPzbsts-xpHm&Yr{DPd+=B7Sk z+s+=^lX$1p?xk&a@zJ8#4&_TuNyh8e)Y!AjzBilu-zJ;6`YwALo92xT1#b?&#{Cho zSA2mrKO$y>tyL^#8|yGNHob*&1RUZoJ2+R$>ftat=<~^2%!V)DQfi#H8iCEZoq0O& z5Hi7-^6?ECy9)MhY?1+2o#L0n=`XssXj09}EdH_TuY#u}^VPYc^b+`|u>Udg82lOG zXe7Rn@+)r$-?ch3JpFz0=i!qi^@}Eb_#rkWzTG?S`UHCONzudHgii%c8nvgF{Qo6B zyfiU^CMX|nTe#YM*oKSY zqv(Q}{4U`hq;HpG=gl{$RjchG(4&&~daqX8gKis_Rig_viSZXs$V;Gmb4O7fp#M9)TSX-mJ zn=~J?7kJJCKe{&wKZP~XIzRpQ8`MwcQi0V=^X^p&(o05lZ(NcdzaPq3f6DtK_3qyf<*YyT{p)q!NWuM3TX~l+vIAT;Fn-9Q`-51Z_E|Nc z8?(X(^o#Kg9sJGUym;0pw>Ql8Xx)(X=-6GueP8%fJ0^zOuEkdf@-{>%PkoeUf8rL4 zFP`TYJlhzkcn*Eu70ZoD|DCxT@rwNewkYe`?A|Y6a)3oVWo=dd`(5&NtxfRZDFvQZ z+6%9kem*hXMd)OkC&K+hv7YR$tF?Z5wE0Dh`)^o_x4a5G+w#C!MLA>Myd|hJcd%Dh z-*eX_e;K5Ic+KXv0ABeXg8An@%vv84tc~dbx9ofC72m$izJh>fXEb9+fld1lQnSr@ z`NK`GZ1;+&JLM&K3?EGEUCQ6#8DH9-sn<~L^}RuB;-QP$x4sZ}s(hceyJv*pv+?cE z$WM!y5An6iRaf`n^6iPqz-NYX!P=R^bu&TPtzOyRz6Dv ziSMBsEDY2?fpZMN^`e)_7w}PVA8?h)&ab@JgI{MhCdFUi+l8*;pCmLD4`%Rb)UOzQ zx`aMe6=mluUsU^B$S3%fubcVx${&a79rEX%4;}4<&2P2FMg0u;Dk#hN#Fnk1tmbz` z!!&(zxNl`}8P%C2pUJ<`Wz3g;M7Yt4xSF67#0_E?E8n4$MdW zbtpb&;T3=7^L=%{;-^RHPf(xA|Jj!B&nG>Qdp=_O`54cI`fx9ODx2i$e$0018gK3( zQoap+Gj}vJK_3=@f8?dT#r0(QCDsA|1RWeU!|WZdGr6x%--M$g6MAAb={qP-Ou)`{ zTe>@At=}fPA8hHab@iM_!d`aEQ+95_(|awCKuhY1(yq>7fgfxCMBhujaC_OEK6 zh3>%8n2&n8GHHSq_Z{H?xD9aUfX(5~${6t+tw{*#5p7iuXUKlM9$%jX z^6ATD`~Sz>xj;u*oqK=3?<5ciDkkAh$xJ}2skJB}0TnYDz*@1MimA4oo@OTDR4TR= z@q%E@Ou!!7*p?9#3;jysr8Q~&j#{For}3l}p3B{u;@~ z=$X7_Rwk!!n79oer^-CX+P}#ywy~U&r*prrHPYO+`@X|`MtoZM&a6F;Z^1ERX0%?C zdxzgItxlD^9!8HmS?KLSr?hW_>}{k7r)|7^j^DgDjd%Q*GDE9a?2TepPO*p z0j@j1b1shW2gh5%O9wa}1CHNeY{IjT+x0~zsrPAkN_&10|5#mXYX1pbr;cT| z9H7reH?L+7I-*BKD-;ZzK0E zdHn`#zi+Tlu$X?)o4Z6~^_$3^-6&&gM(*HTBp-{4L=QxV_T#(jOvAqNW?(*z(^zMt!!|p^doE_qi88i^w{P3s)YrAH!4&?u$#FM!fvy@s3FZ>)UQ{M?I^$&?tbO1}ro!R_I%`~S;I|P@nvP^9@!zE;y8T$vg`7!U8i}%i*IJ` zoq_N0e0

;rVKKn{zquF3-1dw(k<3@JD$6yWnWmaP!i;l>0vM4XaKyTW$}eW^eU* zKQN*Em9Lr3e(F~PcL{t|*`HcRTsNgd@T4G-#+VmX8mix z^2*o3#IQK+TalkI`%Py+i>=5|4F0wKsqf{(+g24IkNchE2=Id}?b4heJMtUQA|G1h zLkGc>hL>fFDyNfdQSE!;8pV+1DzzCj?Y{%zZ z!S^1%{~j^QXOlUvia%Gj z@m6dgt!1_vXW0wL_&^zS%yaC?%GOgm?i%Ov2%mNAL0-)KV7_lI7d;;2y+3}RuXW$# zhryOl9^?(w6Fw4N`?jsS_+E|7e;J<7m3Q%fwNq&C4?W8FW2+v|S<`s-<`H;HzF_Ho z>3tiY1K-~}f_taA^WgwA^5ny`KMlX*pDSVQ_0j>#XdYh7ydLFF)zU1#uRqHqt^C~Y zBqb}GpErjJ6z@Tq{=kIxZraNum&Sdi@r9d>dyVW!hk34^_jR-byepKZ2q-|NC*{G)Rg#(+<29{i-WU(sIP0rHq2Gu1!mY!EOh z*H(hx_2l?`^(4;x5F3^v*FYI{que##kNqTD{gv#vZs!i|6h3X{-UZUrE@$t4#Qvq~ z8R+cSX>`r9D*~pXg?tdqC9~I{V=zi;-wG5jSW{?{E%Xr|Zf2*u*DR#$(reM__kZtu z3+vGVtH0NIeeAaTH--8eb_WZb!ssxwKsi9*)l=h3%4S7?r;a&A>#*;0Ziq8Qv!!c2 zJ6C6SC$wJ(Pg)v+6XDyZh4vdlqn*n`@V4+GyE8X`KYdlH-W}){E03w-f%WDgaP6-* zS7J|`nCCrOYP}gmC+XfSYz=$oH0S)SUtH(+MAvHgM)Ph8{UEuGLJ!_mU~4XH=e_jm zibee!`p`|{L!Sr5k3JuEv&X5J$)WiF4$pGwCO*h_OvgUtz<wn7k(tzhL ze;K)fD%{;UCNEjG;#B^L)>SA@_GhgKP=@HAgqZi;^Mkyfnnm)ZZ0fIBx?v z{ou_ygB-v|ubHNa&y;MQs52=M#-g!5#;15icuNznAz#1nAsE)=tyv`5=%kF`Gwc!n zp6|uC=1rQ-XL?Gw^EET9e7S2DT>YA2NbDZ^TBiVCf4Dfg9-cafa-wf&M0}xtZ$jSa znw72O@OhBCW6$Rd?y5kz?@0`1n0Yca}WDW@QrXT9h56$G4cV3zw(J85q~Y= zy>x6hypu-H78_@QWc;c>W@bLj=VsddA)n%dC+MPStZ$qzy8C&8K z&W|}us@5Uri3s|8oat-jPF3Ok*s2|N{&7CXJc2AOzb~=zicDsU>ezDmOR7|-^}WnY zt%FYnms9b-PaB1u%boRZoOla-qH*+GHGWXtT?k%lyT^Har8b`f1}9q6H?^RqZ65fO z4YnU2%(drPe}Q*DB=zI0iv@cRjar8};^YASP7tE&pik5f{ezxM~mbk+?lm%@e z=&0YVrqJ49{#Xogq}})92Vp-V4IFB#6gZ04uG^@#n=Y#9oC?pZjyS{*1(rtXyNU5O znbDQ{jF^v{+r(PDx`MeK`^Se}aNoyRG=7c4!@<5;8h4uKI!~gx(}9+6&o=AT@4wUE zxr|fifm-Ot!onUUd-Ao~clXB}rd}L)cJRz(|FqsEcHT2)h0`}b1zxcq3vX8ph^o`l+ z#bEmDx>3WLc4ZpQ>N>W{9JkjkK64iki5e~-TE^1_EAJJ)Yi zP7d2A{c5by_cD)bP3Mo(Wt_QvD;Ci2|AU@;^7V7i_GC?0McckjDZBIVNX3Oa?PCt^ zoh6)p`*S+;VcHXSd~0FNG_&F=@R4Uq?78Izbjw`qsjY5c>GHAbHcA)iEIxLkV(KjZ ziC0U?=d;ZvRvO#T$N#ZaKN-Ng=iPqRE`{f~*Do0e>TL3u_SD_zVQ9M-p7%%Rs!@tXafd4%H^Z6gf|L}+3VxAgDn*r*IzfJ<*tAP_= z&!PHV8!WyxX7QQu&HUDdemnFNbf(52`g&vdjnyrBmcj=(^m$#-97;21J;eNt4ms3< z-BmZ5cqGOp+WGr-(^k|!xuzhsC~rmClM&>uk@b8G+=+j%1rMe8UX4#hZ8e~~rZJDP z-q`gTevdWqXE&eHE8Ee3qU-60_s;qu^r$w&t)HnNpSgi@ji#U{#u#k7(>XK6xa-2^ zPzYUxe%{4-7F(aYH`=fVlA$m*z^K$8BK*6fO`G9&Bfb|0cw`5t&VGDlWyG|pJo}cN zq3I?WVJt3j1sYRXyzWW+d#yRs@J{s;Sr~+a{k@E5MSZXOVf?*A$G`tbW=jL>bCsR8 zq<+PkJn*!nY()fq%Ei5jmT>e6cKNH?*FYDvg4@uw#7yw1yjJzj+A4?fRx7pxy)*m{&87L} z`vSw5Pvg4|28OpAw{(x%k)Hb~&m!m!>AC{SrnSbKYm%>Xmim=Rrt{Te)3={^m;$x;Cg{b1tjWvYl*fLb*Y3{-WXG2q`AW1GV|^*`MK3zr_njQ2Ppzx| zh+Wms|H;@`R_Bu2Fg!kmfyC`?X3zK=iFAx|MVF%AV0oO z!CUJ;EQMd!I-EgW8%}u)x}Z z4cH1{W16R87y5c)XuW8E<_YE2tYLgX%7z)AVDQVUy?*#eI5K8TSNwC}4Zim2qdCnl z>%*=(-T1Bb{FN{EbC%J@YYdD{b)4ni`#)vhZ=(;%sQxpnM{u6Nv$KXlQ_)ZQO8bM# ztrSH*wj+-o@8-4F!8?c1UyC^x6k@H{Kn#S*zN3YaFQ+1y%jv?=7y8aCAb7jDp0rQf$!9`9Rw!nWRfZM zX$)z`TFrh}jJabA^V>S+u3q|@5RUI!dRxgRjn6wXJifirDLC{ibcphj^uUw*@Y(zO zo>lOx#%*=!RAiEVw%nSxX*sx3TtQXg?v4BLJO7D#^|y_qT`#6|kLxy{LEZK4xSj-l z8ttQdcUKL~=NM>zTQCEw?xRvHh0Y1Z$SHyjI%v`*+84RJj^_#9&*Rw|o{dJ%L{rht zmW!4oRmU%zLVtb=-B|}N!m^JjyNyp-G!!8w%-BtYJzzxLi>@_nB;}n zf{KOJ{+IMeVpj$mIMNnG{%lS~aN*xIg$=bV1)jBzV_oR=VaC|~ZQEa}Bzb`G+wZ|K zc2CB_%=6tmuhBTs9U<;eEX>ijQ^~m`z0-pH=H5eYbA4z|3f6SKjXu&|E&O=7=pJW` zcQEd1>M|YqDdFw7reo`S+9K zD_ypYG1_(+qvi@r2mcwkeT8R?XQ6o0mSfE6A>(}M3tCe=HDF)JHqe@=o3*uz4i>$I zQy2X~d{<8)OR)9G1kNmYvKet++j^@*I=xP9dw!fjxO_cs=p@IycbDDn~D6@Tp%;(TribbSn(VpElzm4z)x4BpW8wch{!Ob>SSl!aaU#eQ23ZMD#j z#w1@&?sN6=J>F?;9>G=yj-_GN-G9z4$DF5f-=&;nxKuh5yhY&^<=!oYt}XCODR__Z z+m}7juoS+EsthtzLm6M*>^t$9UmiZwGy5Kz4}6DD2EMn9v)>hm<@3F4CGq~H@B}t- z`#f}uY~uacm#zPeT|5k1cON=jHnHsEgV@JI?Ba!|^Uk-6BX<5Aw2fcG4tfRK_<(HV zNrATERfD$i|7%zdR1U%-dc99x;Kfqb2XW*?I@;$|^g%f`dU@7|(SFIVG8+ycU+8z% z59D!V{Vx7dof!B~y+4ts_kY^{xqnCdzoh;DDL!j}(OdHzPg~Jd^b~EiKJwQ*uc6EL z4WVmt2rh-s|CjK5Ig4Sl9YD7aE4aK3JNHnC^FE<;QF5wTz41Qe zqUPGc^`gsqQTbDRJncsowt<^mS@;#dStnXt3TMKX@Tc{}tKjT)aJC;=cule}F3>h_ z2#o)~!8dsbe4?k1Kk2E~6X7palswJc{yzNeGs0hfyYz=}_yM@HdgMfSlMeCM*YBXm zecSf`7nokkfl0it|Np7HpZDws>yrfhBz^MSyZ>{2QucSW|EO*MK5IYh`e=Vo@HLU^UlbCq}@fqZe4x=k)u#PwNS5y8D$8G*3|MmPg;G0^**^LKpJ!w;e z8PQdmDDC|f&*sNVd-wCN_3#V$z~qNe9+Gc-OKaOP#GVYUZ^U=|uMT$}TlMFM^66V^ zRmrZ_Sn{vD^8}x(b41+dKtqJ%Jwsk zbd~9Q=eDt%L~r%0m=pD*{$+!_!rr4`N}1|C-PPz&d@`Yc)4Yg$q1j!e5nqg|&kNd{e>28TiRua$K(F|7qX~G42R9*&06OcbLz5pBRF^MS*;Bt_FIv zAFy|g>7CCSqSTqx(+yrU4+$5?`)mGe{Wab6y$gF~5&4WYzTYywe#Y0!*lJ|kVYe7; zeT~iLcwuZBqsFJPX^e{kfVe*iWjc5uq-E$`lK&jxt*A)VdP z+;8%^*D!#7KPAvsc3o{X6@CO#O_?cStv{8^dPs+m|gTm^B{xBy>1{;^eOQ<>ww+Y#UsE0YyQ4!hdeUi^vry7`Lwu6<`X{|K>qRR+A{}d zp8DXXVbDIrJ951@Hz3>9j?*LlUkrVx;j6wH9q`ct&h$e6SD^Q1eBq~$V($Fy=7|s7 zI&9PHrl5Q;a{pIoKc6zR)1~&*)&qQ>XW<~GllSB$TV~AWjT&Q+@%#7{PKQsg>CE7# zco9Ag04M9JUFfETujNhMQ~->tk*{I=o;C{pqy4DK+a$ld0oJJ{;kL!%NtGeb%o)Hu zjef!HuFd%BKaSk^^-eQWdi}ccPhVtAr{GXu>eK&y{P_UWe!gLrQ5V-hF%VSv=ZW9B-8Aj;`7q_z(2ziFH)INypR15-VCqVRc*{)RMvrS zRKBF*_&R0uUB)xpHfzyipZD5UyDIOu9ip5wq^`!S|6|$nHNNjyXTikdnxnNoaTu5U zKT-TY+As6g{(&AQo7&T`eeof8KKRJ|&b=moso}m1)_$G;j1Jt(8Z=EFf&9@+s`lwF zBNrb6dWf}rc@5|Eg{`rM;RY`W9q5@+V`bd^i7OZT5UxKE!}o&)f#@#+kT z^rgcc8Xg?%E#vRtUR&$uxRiSGU#bt`U-dr0Gs#ny~2Z;jt1fr8X3ECa`=7{ z__9d!{RjAVD`ONN>zUqP!ZX3B_rmKI;Q*cq9jdEz(77suuWV-|H79n zS3U5{$9*?Er}6l{?go5J@od~h+|mahKlE80tszIW(;UIaaV6u4p|@{@--H8nWLF0{ z;1thn`TbJP`rucB-+NlQJ4n2){l8)0>X)y7ty1OW$BJ>MyJEuyr^fImXK22W{Y|d! z4T3evZ^0@!w_-zXXa2j7J*gc31MqiJyceD?Fz3W|cN4rFf@dU)b`OR5G|}s- z-MgZudXHe2?WJ-$QzT#be~tBU(XF#B#F9Jg5BoA89U?gqe)kSDhb~6X)uMll;-U18 z{d~P6E-R@z8R*u;Gh=0JeEaWcTkW#n-68!d`Y(sx%!QVhZ;B*{d7s<12c8maIzQ|% z9&F}w!sPMLSRCNjcr=Fb=b&3CC*7ntGVwzgU97r3PE$H3178ZS#KHB1`Aywjx`!dG z_Tg{EDM|O$^G>={y3o_7UaXJWOk?-?{T_v1)W==IU6yun?43bA{K#>9csk__{fpN{ z*D`(!?swol>6H-oKS-Z!fY!SE-~eN}8Q#%*pU BTMJkc`w{e8-hEP5$@8wKNZ|4 z1_|G$#Rs@KhrBMexxQH8vx+zt*{ePt!_wRIt@HYVSv)2fu#sL8eFTH{#w1J6(9h5{ zxj#pifk!XK%FMKiwAKmWBaL4h{k)UdE-Rzn-0$s&3NGnq@sQRCcf%`R zgpV}eHge8%Kl8tI;9_)H8St`q*;NL7i_vFg=o*)}w=(pZ)&tttEJLT|wy*Dc=ldds z*I%vq`$9~Pt404}n=&7AlwbEu2)r{SAM&MU^W<3gKO_L3d;SY6__ZX_0)`4~KVdzs!ip7z$E z=f`FHENmqwBsvjU&r_cap2XJ%}L!|A#AdLRUE_sj=c9N=qb1y z;1gUS_U{${qTC=M_Uc2-6)*D}d-tIByveDI^Z|5PaANbC< zkwsh4bp&vFv~|jDKlCA+Z{{5Az0=66gpIb9Sky3Q#aoS$YMB_0-k=gv5IoB2Y7 zIm;aCD2i`%(T~gNTQO_mr!#=@%fP7b$lt=Um%^QHCNndJUxar0f}fYYFrxj-pD^oH zE=Zr+<7kCH4v)6yQrd&af@tE1uds_kL2qyRpzKrDpsp!MmR{g=1}bVg>%zsof;9@Q zZX5>Qf%U#&9=@LB4h{8@+jl$twhv1n%1RnVvwkQUeF;t&vIzyi| z)_x0OcLgt(%#P3=Uov>P>HF{C4bj&=?n?7v(ca@$?w_k+AJ*cQvpLcean39>qMQE1 ze-8}y{kyzpUX71lQW-rR`vm^u491oa_$;Gs*($PkzZnF!d}5bePaga^QFCE9&x`9Y z@Emy)x_o*CxT|U(^hZv&2)@uE<|FLJ=;@}`<{rY{ihbLo)xNjb7rNV{?HtY|?5mdF z%Cy(}{5_Mrm-H{WoCa*_KZIYYfq7Xlq)+V~h+}L;_I76F#>dMK7`Je(c}}`Pc=mB@ zc`M7~;tkP33wR4S1T}oh37$Fnl{SxnI-#Xg(Oq( zH}gVm=%0WUo(cVv(Hd}Bl-xOAYPPY|ooZ8j7>XSqSFH8QmGGvW-^eGZa>T$F+nAug zjPv@Iok)&7!4LszSRxiwfJn#v~%9DhP+A+c_h@fFON0{ z5BMkpX5q4f_<_)9=1X7~PLcnfOE~MK`AcyNcCQw^KFb+)wIi7;7mnc9Fz4dLH{u`R z@_!RE|2BMc13EAb54D_vj>+<_-*<+q?ooPpDG`;GGMF>jF5cN z)`?`moO-+rygb5m2!6pjA6%#p$+eGzP&!jPf_{WA;njf$eo8sjkw0@5-|PAAAR}S; zF~aFD*oZBC#oEZ3-#=ZD0#nFCOGhQ$6@rV`CE7`eWgpgZ%JA z3zKQT&*HSW{U_PB6GL#kNI0(lavgj!9Ks-0GDL2hV$KF?qAYchnS_>Us2A- zqj1^=E`8qk1%2k?E%xua+hIiel{sy<3^SLzv~Bej_dAu*w(zL7RX4YN>5(Mu?_}JW zz_89d%!v*83#6P;LBy=ab1{`!F z` z_=CLHI!ig<{rg9hZ(lj_z^nBQZoL3l&I6V(^X&rGYK*H9ID8lc+fXpXl{7gC4#9{FBu{TAhta0gsJ@|w)55{(D7q@UdtQ; z-Z`A3wZtP6$%7dlj-U6uGb&5XEpI+OvSwwh5_^?BmOMAUa1wUTR^mgD&)O2shHKm( zHs)6K`#JiZLSJ7Xj!X8~wB!3qR}J>XSvA|&Jm|t%X&c9&vE0EuJQ~Yy)PImUu^L=Tg<-oLKAGtP68#arXz)LOJBNJ_LR7>*_3i0$hjx&bn`??gQ{O zvJ}SN8KC`#^!FdCb1Qm4ycCDteAbR)eAwM$`iQ?E|7KZ|3(F) z)|Aku=BH3s_WXTT?+n0-tV!ovS&gyA%{jtn(dNp7_7Q)-k*asA2ne^KWNO-kTDA$WX=imxrj0+!Vf<4wZc&(IEgd&9-jX7S;oVh z>(OV5_3M3@ULHr=A^H{08<-0hqeIDqRM;S$Y}`W4X${y54a|FYa(AY9-?p7I{|wdj zE^UfdDkmF({6p+x59(0y>C4ORAJ^Jb>v9p33Y zJ^M>mH;;n%gTKhk)I54Syqo(jpLgM>ymx2NXYRZAXfOJ6@7)E0{S z1Y_*HghzYLgU0DH_-7iS-97ZbCKc?;GYJQff^lX;`>ofl9$IN== z=#cNCkIx$56wD#+1pT+m$vrpldt&kle#>Ule`d$<<7{8;CwsQ9XZMm9qr4Zp*SkMD zP=4Q)W((^Nt6$>SI&sElMvSjYFpi1$Zryd%+4-hy;CE-3KHmVSC z8|`)MgT~9SIp$*@voFwiE;<)G_?xufc#&rxH(tU#iEWjD*EM&=;diwwd^Xv(UCtgl zJ*FkGF^)|vJZTJq+qa9uS-V(ca%e-mHG;L2Kc1Fs-l2DA$ZuA|84_Z~vv#rNmmR~c zZQOXFH@7uL(AS>_AHcC0cm#*wV~?^+aOJ|Ie0-eqTGsL@Q#*pa%*Jx&LhS2$bi!)% zf$B*Q+X0SEA83824GwrIhdAp+D^E#*M&6pVC8uML3AxNzUmkZG?+lJZuuI*n2T-5Oh<8 z>iA_(M91%{d>P~TG`X*=J#2HrFWf|2fli-21&f01_Jq0gRvJU*hI9K=Ur z-uv-ULOyu)#s2u^>T3^o#(o&B>(%GC;Ma>edsSvCGS;q})E=~U-FS4Chv!!ElC*KB zf!fv{dgGPg@*>umnxBM6>~{4h9G!@#E>@k#u<{tTL&!{;xsxsanVrKZAQ zGjr}W>D$e#S);y%{hDb!rA;=oZ0F#AV>d^!o9jnnH}A)8UWMH}mvfaDdUmtEOMVW# zr8YgAc?~vmeCG73Z(<|ALVVlsV0_n?S%ZIzaZ2AeU_{A%dg{g4 z$g-Vw{%_=v^_uj;01vMJU>-b*o^N~{ z`mpYf|DAP|XSwl*&}X)12md|&XumCiYb33q&MP>zwBeoKlrVN&OO3S6LXLBspWg*E5TlfbgSfJsD2G`H^Ix?pPo6{ zw__*4f8(qki81yUnS0~ElO21!4!9w!1Lg|Px4~D~v5ifhE&o|;d9{~f9d`#hIx`F& zWWApm2_4|Sp>3Hgk55-yH=*-c&&ZVT6pw{jn*5o*-_F|d>RK*=j$@YS_e*aaLF~t&b4jh*<>STV6zQ4AcRv(@9v=>)7)L!)a zN(Jin48?zGc6^*uWnFf>o>~HKqCd@4eDMVDV!Si|mZ`Yv1n(9Jrw20?pW$8Hn2rZI zKZi|e*SBt}tV_CF_g50zWcB|>%C|g}nYkRhFv=YlTd5b-nvC_(4*VGMsgD3Rxpls+ z{(m%B=d0AoeSf>&|8Veq9eswtp=3krn%ghy-!S-lVlu&R#Z&6cr}o&jpRRq{cUZp+ z#8I%PubBB>d}|%Vx9c2%);N8fbLnu`Y)qq5b-rD9iT@JWi$g!`_*pr_fadBlB z7?dj@PK-eqdqm}Ln&|DPU4?E0&Y63LnU`Yh%kAOZwD#t-_x2U`RaH)K=)LOJP~Ju6 zw3gnMvrlPCdY{OJw*|PO$h^+UN69;-y62N?R%IlU8gtEA;m*0UxJ!?D^*Z*`N)#V^ z5o4juHpU`cB+O{?X|Vq|&DiyzVJz4zHs6&OH~O@ViBTSqFgAti3clE7=z97@?qAAX z_oq+x_CfXDA%1ga;NmL5Bv{i&zg8)jeE59WJ_T&G#Ko#@XBzwdeD~vn)1l>B+Y3M`&9(d+T(MCMzk=-pX2YIMSDoZj3&U?K67q zLe}5cw36pA`V2Taoau-nmwCvjc*vfM&^-vqB{<(D93=3!dhsjVO8{Pki(hg+>V&Yw zJy^aCEGK-Q{)mMYeRJaX#;gBz-iyzC8VDA(r@cn?e`y;=VDVv4>vTs#hK zSQD}rxy6C*(yKM(yq0ch<&LIzMtz3R9%SZVA$d`qzITSNtr~Z2xUUMmDn3_SY|F^C zmBL3mb0=r1uh+hi0@^VacO6pnQBLE>?40)jX8OS z#{DRL2#y5#czrm!1xIRX^3R|4;8;%?y?Zr_&lYm<3cjIqEQKcD7tEuZ7VdS~lg9sY zle3~S1|HGFTgD^nejnn&Nb0oYFSC8DqMl&ecKBnvNwcT|`));cgy{X)lS(`|8B*jgRVhzWqbWDuHN+WTA>7mN zvM&=`1ilZdUENRgnM?aOjP3b{DjzqN&%$RUzm&y|)~WTwF0y)lfS0%*s)76F9-L3S z1pKQQB;h>B{e-?O82m$`Sz`Q>D&cXP>x`_<3okRoaQl5(ItR~AF39%f(AVSmcK1vU zR^9WF&s0e!s%g6hS!tx7drDfY96783oh6*v;Pa-UCv7Z59NRvKO&`22+^NsX(W$E7 z4d8;c;7uprTP66+Jbtqld1vH9woVir2iH>P;;`aQx5(az;}g)cD+?c~3hVtHy#E4i z>XSLR6de8rU7vl4Y7{qo>l{Dlv#R@2Fh~>Y6D|-#f#8ZWgYe>*VwkbyWIxL!m|zBIeKIs zW#J3K5h4aqaNTIz_U?KcC?Bt&{JF^<%3IjsE8Qs@;-2X*+4jA=#0JW@%s|IhB==I@ z*RkSJ@v!b;B`4_s4lS*?2kp)*4kJV97(6q1d-k2y(?0G!8lRTDE&Hy*zSI6}3G0L6 z_FrMwh%e3_S|?qWT$Zi#(V_36XC<%6z6@!trPq8}N zhmH621$S)(n6Dd|8(+YNMSr~+Dnu6{-xVWgRmm1qyRGOa|Gd+F=Fh-%(-~m>B3&NE zUTF9sXR*ziT{@%3KIyj_$uAejui@7T)27-TouvInB`tjh>GtSYg?D{D>h1f*S2oS5?r@BpjrIo&|O1nk>xqv&? z_59e^Dy17s54xQ%vaZ+nwC-JzO+}2d?CYq#hXp)N_H1lhKF{=NU#O)2>dNT$aNnO2 z)Gha7xCPJe6Xb?c4h{CLgs+z>24C zfQ_RP-q5q-v<6CZw}sA2m4dH$)-DmAF3RHQ3Cam4*a+yi?W{*;S)2B;1^wf+cP(08 z%KYkHN~{}xG3`G`*t@RB*PwTi)aHpU`2Pj43P1PLUVwgUOfmLgpxk*YGgEUv{J87m z_%wx!b(DE5TRyVlBSks~bvMuI@!@&)?i#ril%#358ej zVlIFCHg*NMu;&sJEu2O6dbW=E!O{yqeFf(ba(O~?a1Hyx!h36AXJrgt42@cTSLpC{ zch%&Zj>D`yOE^ckJ~G_ySC=&bCw9&+^2~bAKkM-q&r5RaZi6R8_t;hsmW6!Q;#UFfy7guNCS<8&J?l)Z(H;(%^*V#0em4P) zX5k^1TS;!az}0a#w9xxOy6na`{IDzjetvi{`)nvbbSn=R zZ9SW9>q$Oeq^+Sam#I&15I*v?s`#+1Phga%ub7LOAB(NOb22<5ygx!4cM~HidKUAn zGYj8nzH4nYNWa0e@0wTS-wFehf!Ct&oa8IythMsh4UZ+D%a5{pLHHAoz3Yl^vh!W` znfwm^d!PSQ_d4o|_XhE~{h=P}55n`%z*&63qjk6+H!L}@@rcYP_4rH_!xcn-%jej1 ztGmhbL*}>Fl1tIWkENU}8_B6S4Sjor{dwg`=qJW+GxJs$J9slX)+n}qF+9j!I>D38 z%t`2qXkcve{KfDLF&7iyk39Yx1Cx8o*`NM?U{aU*oVM6-Mqm)+`Q8?s}Jd1}5`dF}>M5B<(Hq9UbNQ>f|R38Pi;YAFM85dRnmy z!qG4>B4*ivi8U*`!JlX&ds()WpC{zls^3|kguw@qp6e!c)|fMao%+1D}3Jt=tT z`|=>~eOP8Qhsj_2Jh-p^oN%sKr!jWCY0m#3i}s#3;a79(RLVD+i9P8bZJzjr`&Vxi zU7LBnnEaK(R~>V$@GhJ`_B!`T@!iskoS)sF!e*cyopaW6hjqYzWS>c%^ejZV-)5ib z-0$2?qB)$-{;OP#hHmTtZb87vqWt$@X(cRdPg^^oNDURH_dO_sQ{7Lm?rnBi0rxQ8Q zdayll)E(dfZ%%#I>XT!uyd2-s<a9}jCUh_>##TKc~{4~QRw(CY~hxs z)CE5U$1U9H8pRfgbBCV#dXTpDJ4KF@RZW59txb8@h?CmCr{~zP!ECv^`RzaNM)$%O zMOS{^o?ly5W6#n}YX62UQ{2A$HnU#mXqUqSyWh=p{0zGX+oSwD;7Ii0Okzixv8#Np zzv6g3{EWrha(GUDjVQ3)O!+W(<-~vyx-XnU|NqWAAD)T){_ol|;M&Af7l`-F`H4Bw zhYoGs%cpRxf6YJgE&9Lxy)f2$+4InDxjT(L*laFqd!NZH;u%U!-_%P@U+`L|Z|ZeU z-%Qq9iPKr572-c;tr{d3nnMnBop-2$XT>jZ-pB4ir|&n($B4I9PN>C~;ak7HrZ4u@ zn!YvUMh-Bq>#kXH`E-%TsjK07r%(57#fhO}?a^-Vo8#MDYw&5-Q+FEhmznv;^vv3A z!+>ke_Br|;jkx^{ncwaT-LZX6!YxQPX6?%ce4G0-nKy~~>8wB79E&x5SKSZ3tR$?R*=m_CH@eH~Xo@brd;n@@c z-HEy^h`uQ%f2hI!5q_hWp@YE*{-zF{>yCoAeViR{0Ef)eUBcBi?v@j-o*QP?Yi|1* zxrqYV`VG_qO4$e;5dGG%H}wCwEkpIIIEL(hN8+z&E639|M!rAF~~3a zWv9!3;xYMw58f3&jDA>U9-C_deJ$^~?BVrRu}&$+Uub3H?JO;$&@2qS%Eo(ibfA}h zPhGoxj%e2UUZhX7GH1*`22OVpJJ@N0!;{27b;dLv=vg1h?-w1T7n)=Jz!Uv}VIR$# z>wj9xZ)EC0p2xR)ahK^pZ`FfYIDR_>jv)Pre+J;Vcl(@ZrFqJS!v#(Y2mKo}Jb5A* z{x}4NyRtB>;rV)Ch>v9M28K0R7=HFaFl75G11AG8Tm-KQAL(HOIEZZz^vA$KeHI7* z=Jy};aBzLL|HV9qZbe_BU(Fj)eh2woFF4?NJ@a(=j)y*f8*31Buby4WckzC1E(zt4 zaPTFF2dwRG*PF;cd*LzH%=|Tdt|!+;beQ58O1%4EO5j;jz2`9d24Tub|AhIELFed| zrc=68aQHB&&+iKM4`{=nBV{j%Z)Lw-`^kQr3&L;5Uw8WY&4GXR(RZ*nBA(mg7<_5i zII=einOn4;i!jMp%C8}1D>v@JLcbGPPj-+pn<^6d%JUX88&ZQ2NB_jdmP%;FJ4 ze&-na!p-ygp>6I>(|2R@YQ@fDEh0NAYs=I!$>}$E+4XU?0gN^51vs2-)^nBj%YKTo zL2Lm+nUa^_FV+}+{jIroEd5I^TbO?&w~8J3GxJ8ODbk0`ZIQgL>z~)D`MLY(JYXtb z`aJr^O@&zFn34!Os?l|pN(ap~;n~^`MQ@#ozh-98(#_27KAP95_J5t7kAhCTNPDp3aP|EWIMppJ!v;es>{uH#RLi(4J~?C8y+rbDLMtMvVKy)OP&&8MD-`+JEAG zZ5JQcws2+K_^uAxReKtj!}&qoQK&wDHAJ6DkGZ45X9_3c+or&EmEvKIuTeQzh@ZS0 z7#hiG)5>@2XJIcgiXRLese486gO!vM>$O2+;A|MaYR}G{jvUGN;EX0O(3JQ4A}k-_ zv(-H#W<^m^<9Xet_7bc(2Ra(MGdJknw}0mObLK*)C^A|K{yzp@ zYT3J0d_;?4nJg{HtD5y02`93DOOSyZm{UZ9Q{YdPH`tF>=EK92BYAeBx|)ZM0E7CU z$LER4_-(w!``mHI*f-O-ORsbL{IS>YOk=OkW4_CdKg2WqYJJK{rha|{Pd;eBm$d&b z+xKaFidV#6>-ZGyJo*j#hl;@=KDM24XUVd-=}=T zLEUddKk7p`58^L+SK|ZU`mS;N{apV6{fK7y$XtLrUjjDWi?x8cA&>UJWN@(V1UJbTFJ z**^gvuv<99v%~pqfVUKQ6TlnbcPad#d||Xx^q;oRjsYL*;nmpij9p{xy~V7z_78Qp z(Qg#nLNYXsegzY@Pi^;t@GWW1goH+w6jcYq%dEaCj&D4+s>|}cm2u09-tXh>L+%T( z{#@t>?ZiJR`tbI4$FyJL;k2E;9&H8o-vCd5v9O=GEXCMz*SEeMmy18qFiv}A;0ryr z>&hYRrE&N5q z>2S{i{FbHP9n|-E6*;JF$C(<+tHQMfn~^p zuPbxut$9Q7!~;AEePtsb;aMGd>%PtUUHsbUj_Il2bvN`bX`c$LqIFa_fz}Y9nR5k) zUJ!rf+@~lT*!LGq$)R%`ItwPrjpd7+a_UR*Q0iwywjbqkJ~^kpbdTyE%<>4|kDpJ6 z{64nor-QPubq{#U^0vKuvWmSsr;wZr=PL)PX)l>&Iz{(GD+YB#!unp68?B`LBSUyA z-5kE9n|zT5{jmRvaNmw1{5x}r!DEfO4I8%533_Oqy6m>TQoWv(cB!bf2c7y!d9@MALU0_z;Z*>xtlV zrrR~#vcIxNBRQ%uE!g8uJ3eFC40qx0?@NJ$6gc<+xU_OUjX8xq{@v%9&Zj8nm^HSJ za;d2e{cA4Q9$r23>|)Kc%%`_AKX1lf_!ux=&0M?H4DWdXoN|w5*FQ7YK3$M!=h-0k z4RLn6)J_?T!lo5dpKQpUXJJpEHg}%&`;$IZpR(;Wx98EPeB_&%+kZ@d#TUdomvaaB zX6)*-_`TfC>(V%7ci+w&9|OMSyP7txF~!Y`fhA<}f1E~_Yk%@R_;%<#Fa0+3SuPBD zz>uGXVJkMpAPjlUTL)msw=k%0!J+<@|NKE<5I#cSLon38bKCAe{hcrj;={`?=<5U% z4tMUo5Wl)z3pj1NzY%V;YYcFqI+9u07A}1CIDJVsbmNB(`7~!Plgwu4$s&)(J^in_ zWjs9h^4M@+X@K(p@a$pj389D7?q=>eYs%j3y2+C}JI}z&@0(}FGtZ$=_WrC<@yYfefPI={2j=r^ zo=Dab8=&3HC^+c{hq=7+sMf*wNoc3Yw}*z#kD7}$cgg?GJXxMXXN*SvpJGh@T$w<2 zqLywY?SG{YXl8Al8?v}zPC9PA#~QWhXcp$&`9pmf_L@Cj@$N6}MK*3=uGE}&!!Ye% zmbC9>ez;R-fWcK|q3ICcY;jAPZ#dbbg}>h?diXS`;hy4pXgD_&#LnifGSP@kODXWioU`5I-zM>CJBp4O?2j78%Mx6~dzi?jb@@E@&r-7djN-o8S? z*MZL{1YQK^YrrBnkMMnbU`+EKcTAUX`ZL;UpiY9i-k8R=$BfhbHgNqRTc?#WL*-re zpLls6`$@jd=OX*wT&epW@pYn?4168QGZha&8{O3rM>jXJevI#|Ir=wZ3q(uh$8Lcp zra#uEv8)pv{Kzj0P?Je0+24BN9fNA`qQ$tG~!bt3!GrTtcT ztaNx&&!w7&*c%_kr|!Odo#cN z_ScvI?dN6N&uiEH9crKb-Y(Yd%>lJv;GVGkGT_q~BnxG>U8m z0#?BpHO_JEsD2=)zI2=F%ZDJGsJ?s%_(*L1I5`e;$C<0|wHK*1pzPNm^zzn5Zu^Yc z{Tqru!FA7b6B~-H6_*Fny$k*?pb%0?I0P%Wix(jqGiviikZXPeB;Cwj-Ci z?>6&a=9cZ8Z>&S!y3v*C)eb&Z_>(;l;cj1#x#f@21Nps`^0yf1o_p38{8``2Vd{bV zgC=}IvV?sBt*O3Dei<{rTr%6v zj6<0ZrOSkHV&O#o#bZ~~=T_G&J4&w1MgKVNiq8iM&aLNJ>Js@8paFOycjgu1ALSSk zuGlxI5Kq*D@1cD0Szu5cqWEME-z}ZQ_v{A*DStB06#F1vu)GnyAbEh?H7fV@Y`srp z>xC)r^8HSpm*=z<#kM!Rdly`(%|1R?Q|2@uchC;ms}1?Tp`B>?5&t*v)Nod0CUqrW zW5B~_ktNZv#4iI4bv`6l&UW}PWaTXQR;Eh*`?7To??sa{f#p5g(U=71OMI7ICOGv> zbzb1R_QVe7lzW!%diP5{xA9K4lZ*Ux>ugNRoa9(yIHvI&+n)26#z@^k(7yK`z|zkC!`T zeEV}l;pX0S#kNl5T-{Z3hokq#wG$V$b{XY@#5$=S>o1EBgPmH!zWkVYp5235N*Tou z+P!VY=yIm(RQgpLSMyFXTF0ki_4Oz*-O$_@3+N%?cp}t{$H05)>-o*<<2!n!=3Bdr;fA6i^69PHqD?vb8LHlw)_Y_eY`$HTf`jMeOv=_mc60xA#4uK z`xo<0Hk9^OB0NvC2jJf^CLM~bm1};@G`em3syORI`_lRTKE{Ae10KWhwPafMMkTx= zog&^kl{$^+a@knY+k@-#!M%rbsGGb0@H@r@?rg4pt;v0P4uChwvi2V|-lOVA?XnNy z?>kue%sywV?1g#z4=u>Wu~mb8mJI3h-*WnFWlr_`lupR)^G~#`y^k{dn&G_g<#qTm zO+j%{-uUp!H26$sZs+`r8HrxmX@cd+>e1n4QJ&l1rkr>S?K65X`OoZ{f$=;;+rE!K zIt1R=E04#~cXQx%un~NCe>Mc(M`<$+ypGwpP`+4qJTNQ&G%&#rmzM%xFbltQq2Lo- zg3$o4V9SN`xc7|DhkXP6$j2$((?9FkQd(C!}|)h+jYKkWO_mgZvCv`c@&nn!CT>kA@3ej3=!T5Qy;ZKpNBXKmhm_LANB z#JWK7(o3kb+?}{!5&qcO@a|3S#OB$n&rZJkG1K?%zd6a`RV$xte9u{S2W#}LP1uM< zSFDU<8)$A0F@|671rO|%EuUhFi;4YG4xPR3#Gab0Pw3r`;ipH2?Hs}!d~B5$drhnk z@okgZ8y+}2sn}S>acK`g_fX>(DvypeMayp+c7=&fI5&K-bk=>OWr=pxc234p!}!Q`4O`v;5;2##BCB}wlIpovQ zGyH3{zmZ%q7I=HtCdx;TX1uXbMlsGSjk};0+(@6T<9!Qvc*s}kfJcjGcvCSenkS#( zQ+?>)pX)WJi+^lcXeGL_Z&xmQy};VI1sc6WTMl&6{MZ5yX)e4{&-mZtCMM!@vin2^ zKKmVbmVL0{`Kso1%P1bsoSYGR6hg_WFLsmzP;e* zLh|d)Q~&Q}wlunV%fu_o&;8!Sy%zUgyBFyFG38AfTduQ*KHQZ&5AwX2J08@hY?Bx? znPK<9irdfTz2qs#xyW2u#NOvjLvgbKhr&zP-Vx#2J0k9<4aunRdpq>g`Iq<0yLW%K z_8K=smlW;Q@hP09z!mk&#S{3|It}?tG4W!}SQt54PWuWWfA@BV(d_Ibw8zRJBynFEYL zxZg&5zOFLJu;dw@s2u}8>;}h{$KB#)tzo2(Li}qFDUFS-eL5G|*l%7MLT-RNIR+V1 ztjLMR9_G1tVW=*gy4;k!ogKSk!e(GsNS+qs6R%-Rb4yaQxQlJ6a@^J7`%BPQi2X9H z4K*I_x!I87yI@ZVcIbKqe0?13qX%J+LbojJ#9!zRFQ6j{>|0XiL9Qk%j)U-DC^6+WUCsvG04g&`jjlk^(lG-C+*zg^f#`PeNr8y zoZf};vG8f~!fkp^nc03BgRMUJotR-@%$Di&pDT_4KHn)AwJ#cqUYIVPi@EOqpc&#`tY_wSYwdk}@krWORw3vMCzFOV@j2n42>OM|ZXu@K_U^;yw^xBw2uu zm}{m@2{D(Ng3uIf@1>dtRZf0CIsa&n#e>hA&j_%&d_2 zj_>1nJ_=fGffs_rl9|-m$*@Vy$uth<&al)R`;6QvF5mZ6#pk4F@IRa!9_Qn~?+xE& z@6;$w&3WoN&OH!+;kS|Ind*Ow=YsDinulhj=2$!ghte@VNBmI-98Y4G z(Nw%Inu?y=Z}ZNMMQ**v(vy5e1N6*bJ0EG7-&R9D6N9fI$+!&XKX3KN$5_ONd(jcU z|KUB03@{qn9uICU?Br1yKir%u*sl}pfeA}%z_H4|2#$SxIFnm0Q{5`c>|(xokxscz&SNgY` zC-&hxW`AqJc3}5-A!>OcKEMl$r$}FT{1BVM8f6GSd=-9(PLVtX3!+of$X5EQu$8Y_ z$W|WX);lvR=Dp+lc%I+M*`De2IY4JqvrBZ|%`?l};n0+?={w~^;nf5IxnwaqC6QfMq?oI7fHs+rd6X`wZ3PYGc0f^8K}ZI5k#Om2Y2TNEYX;`~3~y=TLKXxo3DC(quq_uB7z z*Sp@^de^(w(z`CscdK6;7}WUjJjNoxjOdI&5BVQK*?GzN^u)O&CznIEy1;Jdb5C}m zgFD%CD2cNE(8RFz^3=Y69)1Uj%`E(i{10FyoGaL&{9MWij>UVO7wU{v9QcAK`2J7< zzPy*E>7j}?;0jOR?$UW*Pkr%-eI{y)io7pf5tjv; zzkH7zVCboD1>*od%i=f;!&EQVe)0#2f4M%bH$DzYE?fvd$^H@pE^JZE0RQ#| z@>;fdG8b=pY)$Suc+;sPJF)=yt(-4d;~7(~_luFk5111WzX<0wp3HLBzw|M-$xP$i zt@4ugO4_QNh-Lh*_xfho{8_`wH+_#;#7is3V+8ScNRq@0R`RxOSxp~}Z zU%9j1(H&zXwyzf+()nZauz5b&Aoh65`ubJRI=LQ>ub+t@dAAMUiN{;ua67*>HkEt{ zo^TQTApWRmC|Z9XTHCxzd$ZHB8%y9H%jVB)n?F_VR5JZFpfVqr$ zJJF{)$v2}k%}g(g>oU&^6B?7iO}sRbdNkiJIoRg$HcIboL2lj#7rifA)H%z0v;SFm zKb-Hi?D&D_g=seJ7JS6Mq4xLA*?2#LKh=)2*n2DMh3WR#D9C%|v>Yol=f12*AQ_&!6u$-vY(n}V%z zbTMZ^1(DTx$ZYZV@C}jW;v1Ng;&;+H>~DuXZb28Y$KcQjudK?IKK0>y8GNo9YhY$R zdVdk$lcQKD{x1$W@QUxnD?pxQXY^dL>;F32=|bW*h)F zyDU8KV_!1&n-6$i1wKA>lkhGY52o4|28KG+)CM^0XO4t_rQP8B!66MEOOX!!con zhaaO2b?o!cIir`v`PBPc&-%mr^`C$D{rW||0sO2bZ71#d z;pUYY>}R$aH99)v=9$7l^=i)gOva*;C)F?c5-yS}$(Ozjf7fJLXO`IW39aHC=Ex1# zPWV;$XKIZLutRA#GcCa03$zVd6w=1gnrl4y*>;|FdRUM4+d1tPYdCEfv{NvG>~l7g z@5hoSpznmIVCk90lQp6RG#I9}Ptr-j;&A;4^Xh7HO_K3LSYKIm`y&6%*t{aq;stVU zmyI^_>sTATi2JqZ$5q?%XFUrn`}X{dgCj2y?c z3Ao)4ES-%)OvQn}G=Ekr&qN2&;K$r2wnZ>j@l0(~yMDyI!y995-q17g#w|P#^jen~ zV{SER=7L6kA6@axu{lTiJ<1m*uO^Oqxg6;u;JRX>+?sn_lc!k#3kC( z4VmiK-l)XOt_#9b@|Ch|yXl;P{j=8{pGkJJT7FO5XNod@bM)^RTmQY5;L8s5 zvCe1+V)rg*zo^rAF4}cX4>fBH!kvhaFcyz{%ezByDFy~)z>}a%h}^)sC=;*-tj|!;2B%)#_oK?bjOm@ zqV=GyDf4ECw+!_NPkgM(QsSAieZN94KgzvgZQH*iJDfv3+S71+q@CaTKA1djW+HDL z-;>9o4|w&72Am83`a$wI8t{PO>yWKu;$JD8tXdUI)C}n3>&^<|Hs;PZgaY)Q8W(XJBh~C z^pD-nx#rm~q8mhS)%EL_zStlUF9zcUTnMP)i;P+?_=G?o8DyBj9G0*8Dsg8SvTM742$?Pd0o+KzQ!75C-3X< zvXr*N$a$2rFuND9M&%df$|X7%pS-g0bCf5xKVn)*dy8cT3vV2lc|N?8f{q%0HqVA) zHz?OK65o5CsXTw4VcR`)=n~`$KX?=V{hMcxocczY8C*8hghTMb-bT}IhMDj?`J<-> zy{7T4OU$6>dgie4XWz-Mpe(tUm$2u~*_TY++ZbrS6B`#y4HN}A)3fdt;LhRPS>i)= zw~|Z1xuXVuYyx%mdee0#aA5}bzfMEPbHz8Q7-dE+eOIt;orLa!Z~tiV`$BTX;NvO& zK>NbSS6|UGk+mAtrt)`>JW}D~yB|H$zEpI*d*$z%Ia{d^IUF_dXzlLed#xqv*L=$? z*_n&qFdyD|bmXWJeDfUt`LptuECR3Kz1FD5^*hJAB*e8L6w3WGaoWVT1kdK2Fyb(3 zOEtV`@W~B6Q9eFJ$HnB-ab~FE&m9+W#V&>d%G*pYRNIv2qjt(4+FEKV_l_c$RD5+y z-spV%FF(&7Gh>F%#&?j-CBJ*|Pw1=LB@ zBWULZ^4maClWb+6RA%H?M{_mtS|@ESE3eIm6-0K6sVf@4{ee zM^QfXdoDe&{0~>q?|B`wq)+wiah@T&U7fp*y) z@%|Gf<~+A9@l-2kVNIFr@Kh^jHjAhHlTPg8k;qfjJF@VKy|MH9tx==J*NO>Dj`zD& z>GY-guMmCZ_T^4uZ>m#tU(C65K6rJGHKP1O?});U@PpbtU^)EIiOjgXGyFXVhInT< zezanCk|*jMEm=x~pCn822lB^zmk6ILrq$;*a23D5f&V7`dS8iRMWJ1JJWm5x@iabU z<%`~|p`vNAMJ_tOmH%b)JZBXvPVgU;*L#&ynPHLx@O{j9hSu}%=e`)+3}-)!=g#t( z%E?}<{F~%%?_f-`$CxFlz({&2uv|J*&rav}x#QWVV!=_-06_Mkn$vl`H$?U&C|BCH~PY$>MP8-l2M;4T%_ZD=#L-ZpCfO(RhM8MMCOEF zH@NkJ*CX*dx~ZcV_*;PSIJ64E^DUm_#bfBRwYzT5U0Z0*dqQJGp6vtAC~}Ej-zAu( zkKftUJI;aG1Iz|s_Kv%RIyjRKm4lYbiQZ@AO=-BwsDW#GmSnRZ8c# zLbvLdx@Hvvw;X;7U?+Ocu;(rxTrTcCdVc$$Ps*vM_1%8_$~2N z>BbxBd-d66^uPK`eRt!^&CQ!XuHL;lWz5hU&6x69{;?mUF5DHHF1m}yT$-~!EBiz5 z^uo=zXZJ$zLqEIus=~QrQd<4 zetdaX9M+KI{TQZM@XC&M9%%m;8_oY~65_rgt&F8tT2jE}Hl*YEpI7dl+7DT6fj`!Q*GrvJy$-PSpy&L4WU?=L3USLmPtR;IW9Hj$>gInySqFFjOu$D1{6L0rM+;Kz??N0QA zKEBSZ)mTCP*&J)^8S;l_4+$*qfflYmbS7iztB7rXe3i$63;~&jh!@>()g+xSn%!=`RuD~yN&Ep-leY#Q}7~jc!}dN z!BATg`!=|9`mA!`vT^=&&HGx7U*{0leA zoa^!kyj%E9p1E~>fqRR%oV`xKSmC}e99ef!f=oZ^ynn!c-&!2*=DmDG{m06ZkL5FH z-g&^AQaF>bv6I{2toQoxO64{rvnDf@9KDpb)4=KQvedSo-+|82;H~*=&5cLlt!O#2%DBTK51uR_G|68ow(kbrBv zuR7_ghrlWH1;$wP>9s#En<}2FhIiheuN04KBtDqT9;DYjgugwU_Vcc46j#n5uS$jn zT$3}-%qvD0`8VEuMc`ZZxTnI92jU@51lvTO$OQ5NjGY(o;(rXx+(3Rv3h&h~v*5Aq zvkLN3-{69_QJKnUy;mATV z|EePySnEyNQ2)vj{;kZ3_-Css8?Fd3zLs7UE-oJV#>r3UnL9Akq!r{vsV90c@IM(o zf8M=w;Ro&~fwO);4$V4=U8S*)`!O0tPEDhv9&`30Xp|qPQ9d+kWn7sLjiTU@l=1Cj zH0oI3;`JS9W-8wX=V7!P8xMUJ<|P56-XtHRPXY9)sa$KcEly&=>kt=QVsm^hqjYp+pu~v`2Td=6(X#eWu-}*B;aE z@{@RJfcGKzDS8v@pzDUTDYg{tMwj64`q2N1351X-`PaeO;6}Sm$Jt>cSwB5=L3Fn> z*2y6k$$UBfo?IIF$I1Jv>XIy!3?RzNaj7!S{G3y~6nym7UTB zWgQnRjz7DX_=fZS* zH_W4~VSf$g9hcpJ&mw2!Ax9=`n1D}pnQwn;17ntC1-mmH%6UNY<|uw59j)4-aPZ5C#T${O^7_hXr|SE@C>W{y{nyi zRiEh5!v4*)xg5I8&rDjA8|}sxpKaOm!Ml+K@#GZFVOL+v-Z3B5E?tlQf2Oq6A`dWh z!7JDc+wP(FwN^-azt@{tIDqb#-IEQpZC_bZcoyriqIl+Ito zcwg-o9pW^2K=xm@e;>T`JLsS>-9GwE^dOebxX@&sj7#F79qOGb%&0^L*O2 zg*bC?OAH%p73N#Xi!b^uIbPt%zBi**Z(W@m0>_xgySNJ+%U4z8T&sKY0!&+=u zyRsiU{SG+qR$JoFedMRJ??$28@;J{Oz1(rZzy|a`w(|pIXzNwvPmLa!5&et!IdgH4 zbDanNhHOC#_~jMZEb+?^DA!3|>2qA~;+hviayQY z6b$(C8lX!s5X^gttMXvK#dE>B3!dz}Nqg*t?7SRKV6u63KXlO;!S~zP^u+ecANji@ zGskG4a(dqJYtEp@zxiL@Yb+)IbK@{y-pqvG$-^e)O*{7cJ9!Pmf_XP5{7xFSFz*I_ zJGy^T-rdBEF0g+~zc0-D@@uimdVZ^|^Js&v#o#Kvdlgp~SJ7U+(N4;)rridaUCjLk zXd%4vxc@YHodujvlh<$XPP*^Y=r#kK#53|8#^@gY)ct7gV_Y-1THu-n99_@m8uN}g zwkOC$@;o#r?CyQy?2D>AiX?k4kU{2%TiCzBhYhU3_mm&H`XkQiq_2Azr+06+$RACM z$7`}8p%0z&0<|88eWb7fV+wmWSm7S_r|9-(7fScH@_jGw56vWxkGzYIQf055{77s^ zWN*A&H{b3Ax)(r@$TmjZ*zBUK*Rsp9dp*B;-#_<@7> zE@+@NDg(&MXO&kx;a$pzHziB@KXLsL>zRlpC_jsJ62uS4+k{>nhI~rsnn|oi`E2A5 zEnhZzb(4H@6GFb}&nWpP?$~_at4?l=VeM-JWkT^X{H_ee*LigygL^!geM4xY;;zbx zQQm};E2=!0?N83Qnp{QC*qwUj#%5p&a>bh%I}>AjnLL`}2j~O%xA>>z4&e7G z7Dv1(9I8nzY5`s~^Rvb1%;B`7dY->g^z+2?DLv$C;0xRN7@{rvkrj$Q%UH9vhj`lc z&BO5F216)~P%#%~OWJrT95#zA})MJKCzRuMl1?9d4#v zEpbZCnTXaFYj^bCmu032_wU7@+utnA9j#}Je#sYcp2mCXut*z}mLijCbbs$FhIHKWB3l9^!j9U(sDR z>Em1FE|RYpQQnyHg9fmDifj4k3*{;6oN@WHQ{j7TZv@`nBzYvSsqkUsn!OO_Z-7sf zPo;g*f^ivWl^ufjm$Ao};^M?H*KGETjogNe5_|0LAeLXBuCY>mZjhL(%dg5Aa&zsn zIJ1*nxLpU(G0FpPp%2yXH5W~%-<|mk^`}L@(~nNSr)M{+o=?=VE&o~hf9HI3Tt67j zKGPl~Z+5dMLOiEjZ9D)Cv?f`(byb$9NP7VXEY5wmIQQM++;_|Sl;VnSL!%n>#(pnn zZL^lv;(v(oFi+>~Np-r_QIz60uf`1iun9GFt;}DrFDE~jU?gw7zP15C81d z7g#&T+;(3z^W6*C_d43$QPk~Ck0?iBJ#wyg2FG{I((ewwS)X1uwU>Ey(cz1n7i!aw zb<#&y#P>6y4Gri}>^%Hpu{N5%q3@XAiF9{+!h-j_F{9@u?O3x9e6Bf?Kdbae$?o49 zPh<{zx+lV)a0>X_3}*)f{NcoMIj1P6_b*Y_#kH)&A0Bava>7UFU%CBo0KB!H&Fu&E zfoOgfdg9B>dy2-f4OWrnpF6!n3cIme(hKRxQiEC5G?Z(dXXs+Z9W?i!#{Xo?jMiM> z)LX609ljTG_nDMJL^;wd-x(OIBD_;hnOV`ca<+clYSAMD4!`>3AS({ z-k>}j`gc{6p8b?uTBCUc)^VZ_UTvT+6Z>r<_&PMpwC(Fo`sjGyIqSHU8#(5rz6;TJ zz3MyKsdkHAKTb2F=M4M(njPSEwcUSZyVuiy#24cI=hX4@Q`C`&!*=?omcD%Q6y+LN zmmGuVqU0UQ7OSjiukwGib&7M|m$M$jTPXQapSk@t9J^SJjC3Poe&|82$SeEMUuh+u z56|JZV389}-a}~D+yi@Nbqq73_dw4b<7e+~;Qa9+F9v6s7q6ZbooHsg0Zpq+a^F_^ zB6M5b?z%<3Sv_%C`B&uRdndGShnVHILhpT|cP;d;b?Ds%ZqH{t zzoulo8C5mWESb=;DtE_`*}EmrHPFA)q(nMdCz2==3rRS9$;O&J^^gOnG%&U`;tA(tucXJ<; zu~+hG50E?N?s2z~JQn%1T3@pTUQj-ylLKiY-K=l)p2Jzo{;0rgOC?Jb>RUKU(m#UP->T@X(&As>}7Aixq3x;0Xh}O=Y9_%Wm0T{BF10wv+%i za9m^><%7%xA0Tfgls<392h6SNx#a15`kQ#+rfTv(+`bIr=ULFqve(4RKUaS8xX5Xo zAL)Mg`BQvn~Bts{Db?yb1z!1A~!_stoLLNMNgIEe-%z#zv2WXlGR8Kx?~#mNku7ulKnuF-EC`Rri}rejgV==R^t2fp zZ_XQ?jK0~*_>Zy868ytT$z0VR<1&X1b!EqTuUBf z9dev%`KFV#p8i0t_OE=VtIP^t#oX@I_~|{9MYf6;NH3+Q?;L_KlJeV=AWG5J89&UnY|;=nQ_ezID@vp3NP9l7`%gZ z{J{gI_P*c21D-XS;cwNmlX_anfw1VKlkvHPy+TxGo64{*q0`1$bN6ElE767W zv-4SBvyJi1YQ{JE{xY*VSG@7pKfoKT!A@LL6GBeMz!!q~)Jg4onew57F}r+@+8kep zV`!T-k~PKIq4tUJvf5uy9+c{G>GpOU&xe7zkiC%a|L|KmkG-p{ zaH@|plKCwhq&L0Uf%YWe*U>Ncu_mGxpTYwFRjiBj#Mea@vyPZ=D!(GQ4sW-AnS0t| zpZBf3IN{~@gp)sNg|97Szl!Z6UcDV#;OaN&GuhQ3{(xiOE$mjHJq3H^=swOuIGc64 z_ouaO!zYjqtO6&KaeJ=xU(W)@XjR^#;w*BW@MnVH95`F!)11hS=muTUr@OWyQ(8Bz z_1NNvE8r*c>~_iK$j>vZ%f1OZj-{Qlql#NC!G?(`@(7Hmvl3|x^XsVa)&CGvlphu-3@8JW`Dqh3qn-` zWl5d^Uqi`&G2Wfo-^^~-J~E6mDr1c0wLdt125a%ghdYVaM9Den&~NDd5&ch&&aO`U zHrv-=or&z}0DjM5;)Ul!w?pXU54__G6+e*On9M%^@_YVlavG~x&+`&8;-ek;^mBkY z3g;~Lari%DBGuK*?IxS^h?R^weOid~OPn*@W8fEn*3t+5`)9YU+P`8q=TkHW7u5_* zP9v_x7%#YJ!T`3XPca1_JkSt-wlT=Q2WlUEsPDD^R*rWZxUer1^9HAiL&~3sV&~v- zTgQo}%L`cV|DhAZcounxGN*9(A$&sGsQr@aq^Gb2Y8&U=jZ&Wknd5ka_Nt#+b$>4R z<0;4WEcpBF9J6UVa@xgyCwni!UTjYa%MM(F-0Z6xH}!ehh-GFl6^wFicD6M*+h>Y;mU-FNVRYNp(2vgdVIO>)@$NGo{=NJs@t@3p%2U2sp$q14#`|;V zkL1zk%&121KaZV2_m8SZ_p3jA;P2Q6;>xw&a0<4g6+5vN9FO1^_#8W8pYtwVq*!Ao zV?uH!+9fmKK41D-G4n+I)XjV8Ye!F~;d76T#0os^Jzw&)_ZEBFe}}&!_#N0C<*{~P zZ)_fcZq>l=sIbp;KJSAnZ@49!eTSf_BeT+xlG(Ag%wEn}#Jay0-Q9^VqB8oeGRmQt z#=r2qm~xARKl#X8C*li(pW3|}J^vc-T)n@_*7@v-?^$K*{3~B(Zzc9W6Fpr0tvsYm zbp9*A6Mm<{>s(+8Z>^tETcYeit@6TWE?4o3_$Nr7UKg@o+Un8SUhM}MgVb{#rgWGe z+0Zq>RXT*ZFuToaA90h49O@AL1k0H5UpPiX_^`7NxU|vn+r~z|!FiXO59^>EdR72$ zPXvY`*4Rp{Q7~8W+Y8Jl$~iPlDvZ4qTlWKgi+6fgC)u_pk$=!rmA*{khpRK%x>u)# zWA~#c;G<|k(lK7TUhu%>oVe_8reSsv-9{cx`zzc-%l2C-<3ryj(^fyS=TEbSo_|B$ z0)4{`_E47DbOb#cWqpqHe_%BBf~#!6M|w`4Tqk}%{n==L!y1j{{v&p5 zpmXR&#PE|MtBLV-SDC>%lvm84yNWdnBZsw3lD@w(Fu2~B#?I`alqFtEnJ&t7^1KV! z-Pcft_{R*McQ#(s<^$K>HRWw()YrtPi^?$go=L}9v zPh$afWl_%rWU$Jdws;l4i=b60W9XXee6xzT`}@ksIp4@Ww}bTghgW{3jWIHO#5!^2 zC=dLC*fY2?&lGtKyAdK5hTfl^kNqlvZzsUxK4O+3&dZE~Z%?fm{9&9PtY;2)fp-D4 z70zAI(+~a1z$bctdfPiy(1+)(z&&3+05tL~nsD>qS+jcpS{$HXM86~0-Vpc=U}q0{ z*(FpB%R;Zjl+e1V88axHX$t%qPdK-{t#Lja$h=vU^VYQ9in zuh;qm{(LKQPlEqN+V9fGrH$s%Ts)n4O(y5GAaBrFbosT-6JsJz|L3Q~IudY@Ke~}} zish<4!C8MKHbY~pqwKGvv6cI?(}cOjG0X#Qh&z(KG0h`6ZOGeT&ugxz!cqMNqfws%*DxIkNGJKj;l# z*+)N?qF3|(~2qJ)n_EWnkTr}!By*}u^yfOdhu;=BlM}ef?F(xbb|a26ajtd2 zF9zDHsf&J@UJQ%xeBZ!ZPfQVH|qx;Gk9K_BMZ{k;%%Kp>CTI1(}!BCC4JyYclIgm z+`yh_aX#tFHiPWxvCj4@vhi`zNs?WQHhQSH03XW>o`FTxvz)$j=6|5s=p(>2@jtiy zZag&W+xY3U`ymhKT+nWfe_o`|I<%fLkQV;Om1Zrz_}~v#dBVgGDt7WN^kaOhUBn?6 zBZPj2JsoK_wd}$V;M)$~>-sF$H4*J~W;R#jmpkvzKxLEl)(lI6Jj_%c**IVDD?rn35SVxBMIVQ4^Is{ifksdXa5H*!T2w=2bi3TI6d@V_|W*xq3>JmuZw3G z3$F*JzO`ukoWDuOXGQMh8+*@u+EPzldbW~h^^^;u|3Z(M!FqUDe7Z$A^St`2?3?}< z@{9zRwr^E@h5En4cYa{|_~(qfqENck9x6==x7;?St&DfFDL;$jdM&@DOX<^HC-aYR zsOJ4ofxoUveK9sNjo+f7^-*l|r`1tNnfv%=;Sb8%u7)l(aavVFt6Ihs9q{undGFHT zhByu8^4&8$UsK}^d+A@B58jP!cE*;>EASrwb-HYz@b$NP!{3_3-dyf)UsCuR{60CK z%beq!`!zbxIVWb#cZ_Ji%LKjQ8{yG9_{5%i>-%9>Z1R zo8W|GyS&F3^0fnjG_mhi) zbIs9izEeL4w!8nl=Uc3ow(YHBFC80a;YWE2{lIhO%^=&qM5flK2cEKsNwubTT%f!N zf0}tp?GsF2TId4wMSIHxt7tEE1(C51*+iV|eVMjD z`Hyd!tQ#We9QsMPCE}NePaIbp&-=m6#)o>oZ{ssI@({SxgUj|fF4$=smm42sOzz?X zE{}Pvp*7I28r)pG9^@PG;;)b6$NYPz`b@srym;MPX|vl`_0Zh(wJEN=Ao>3D3iL>H^Q&wH3P3^M?|L|eATqi zkK-HtCp=Yf4Br#&)rE1K->2VPpGNpj@0e4BV~K8d&tckT`@7QJ{+mnMYkrQcnaCJ+ zyjdE>H>~@m>`hjoHM&|yeg^tBF0*m|b9`~pQ}h#khKermv|S#(9=yl7zX)7!xW}~Hc*W)J@9;vf ziFHQynL+cX%4WT3`@y9}t;8vAK$l1_20f)kKCNv6zI60o_zoO7XUO?xD87r@C4D0t z-s8Ql`?=moJN^uO{eF|*iTEe_4l_1!PB(oa`byTSmzT6R$LSoxeu>U5jkVra>oXI} zK5&Y%vQwfRymSJ++`f66_ZkOPFIz>Kv5`7p2+t1i6mIhA+_H*sqW7OMl$Ra&Tfy=5 z!LC1I_b2mc;v=^|)i(ir*&FB|`5vO*W5B;D-UlnV7k#7ZgY=U28?grlT8o~N2g!lT zbfjo+rP88m?2+`6;QWB+qA$A5K9fK=pQ^pb`yeZFH{X0#AG}686Y2Uzm)GG7!B6Z5 z<H~e}(ouE)m1j308_H!!r0E@TnnH_X;|$k#WKRyA@w6-WV`6cl znPX2j%YK}t-_Y$0UE$fOxed_CrQf?SjD4YW35Ab0-XF72;(^|6p9_Y}?eBF6}RlIjh zWDb2{$D6sw_V?+#ng>@kHSw)@B5_~o&%$pEbP#?|{1g0K+=S2&FY zfFWj%o>%UM;^#g?Tn~PaF2MfPI_LB$mxBqP!S4XK1GX!#;qz zKRcxLjwi%pj;?t2SbnbKW030!EX4|$FCk8@yd-Bo&#sw_v6Ic_RrE(^H#T#(70!ar zhi(eA6K|_vUe}(J+XL@sLTBw&IgaaaWLEoTP-b)|aqbS!xW>M3Fn`W>fmG%SP3B_d z#eXUFktUtB^fvMzEe;Hhzj+xvHa>DYv}%F>PBY^gd)V8Jcz$K|c3GBi zTK_NhkxBTAd5`BcNe8LV)K_jF)uBJ1V*Re>+*Wo zLzNlTA7ua7yP1cr3iLGkFS-8^1}}`8aW$vhjho zxy0ahnADxyhk2W;l00n<#C4Q^QFgC2(1M>_E&FEMKXSoxkF4`~%^*3-Ha`C<&)o5w zkFy=0`n1S)bmcb2^tHZ>aA`+I`1G73>n;Ue-DuXNuy@4u(3HKL`xy&W9`XbR&xhwj zO&$C-Rd5?{DSZzeB&TZT&dAt`t9H{7Eo}1>QI8QzY2YeXKiO8Y#7Il@_Y5I-sXk3CN-DRe=)7M!EV)olhzN_W~^zVzxrisAL9Nk z(KH2`WL6rwJG7zD%^R`qnF6sCi-fzd-p5;6lu@ z|EX?dV#Al)`f5|d>}f*3nyh>2!@AqezkQbYtOkcjp@t*<&(E&F^(O$oVI^AFqkfQ^dEbn zL;7^=S55GzGblCpZ$neX|L-jzPQqCAduT{*3Pgj!zF%g~S=X)^_lN)TcdUA!vE9YZ~M&e z18aW$neT5~^K;(IjY`Z5m(QfUJ^4iPr9NP;`+zqUUq!LDB=)sVvHf7}Wq*V+UH7N7 zJ(uqI{UzSO;2X$#$ro(D=>gksnrY6PHO_3RUluCDmwb@%V|Y)m+*8Y1at!Kn*mnLN|9}Z?D!k7oGm( z1A#W#RO#&oleRNpOmjVRsavenK0`j`V|?=z`a|=x277xLKRSoJsdDVgssa8 zyX2X_(>)~oHhY4+Rge<)VpmeAQ?^BUN*Vl1PGny;qFZ9OOdjaqU-hr&+Q2{K^zasw z+SiKS2rjN2DB<7MLtN)CsvW4l)oPxNeIf5*XTFzpUg#jTRq*_A`?8<*Do^wQehaqX zt*0F4y$u#yra#&k7>uH;rCTMh(dYSHOe}ok-LJn_XKcOt)M=)2Hhpq0|4I0+U6egc zyE@4agLeJBb@=nFO*5^lUZ3*bt-H7AyvA$Dt1ly;2piK~T*5hJJmdZlH1T0ql-CkC zGPd~$w7Gg^N^^9z84RpUZr+Pc*vt4NdK0+R1_skg%&Pd2f#t>KwxpQ<_M^K$+&zyxuxxN=4;N?>^_C2-klmu4>d!}Ye~IvAJuCaK zD<@7oe6R3@#KYhkv=acL+R8xS}OKw~NSHF*( zT?>CPZog#5wi(Y(dmi30gZEm=jVow7QD7s<7*L2UELbfF{?2&(lmPS zDs8jID~9ITITc^W;FHS#*yA~EaXNmNNb(~2Q6K(vFLKDU;p}tb zGedU10Zrt4X@3gG7aA7{y&c>920R6i^j9;EI*5PN1AiTWN5ogs*X!8_;GpuuiAl*f zV6XMYLzl6aXq>Nl;4AUeV!>UFtU>SPaUM5s;DaDPl7Z{rh*fG1{7u1Llh$?!dHry~ zaPlHGW)v@T#sa>x6W4Y85Ur#CKk8b`SIUJTu6uW+m*zHt`1SieK8 zh1WPqdv{B=HiJ){C%f+};DMj?s_2oQYW}+f9lQ^J>i}g3yCrM65&10GoECypC^rz0oT##MWq=W5*b0QJ9D8y#VfyHxIlEC=>~P|jcjB}?hhEV??i?d!mkZ0ech zQ_oe`XUQqK9obqVEq`nxw3vfG_Bj67B-*=|x#ZJlYPxkRR58ciENNX@wZVh(bX${s%@3N#>1f!DiJAD;fB>EIoV# zyWU?gEd1Vvr0~0|Q^N1mriGKSbxFXL?8-NkzL2h1wCJjV`lO`Bs~2Bo#~v=^x(+*c z_qA6I{P@r0?W=>b>n#;s?qm4nwEwx4vPgN_>W#ow@iZf!(1d`HDq+|LAFsZ}|pv z>A^;!U;E`(mf$DKx0DariJvc@k~7@2*$+*-?<}!n4!vcp?_nHn z@DaPqDK8%p+FymQ*^ACiCq@xkNlaoj&#}jEVvBn5%c}7$mv`zB}$`jBs%lul_rgKX(Up zy8fK{PrSOHx*otb@1Z@i`#X@Q826oAKSGbOK5;OU_n+#k$M*ztX>u>|A>lm^pHq1F z-i>W?@om*UsL;X2+2%VF8H8@aSw5l4YdzwLe%WdGWjT=|Z;tJkT|yh`m=BU)CVy3a znc5`3Y>+k`h_}i17r{FUubs;^c4takC;CJ>&ejd^&$pqUbi~`tQAkIqZs`l@h=I5- zr}dB<@Qv_~UUB`S`q4+$Ji~cjrZ_DSKh|ecGzmYim;~7betsI|kqV4mg*goH_)Fs^5lY4NN z@IptPtPiEb2M1!C65IJ3%BnBib{X*$Ft+miRK8d*v9rm**=WuUYmYuR26m!N?p#KYN~}Nac>V1s)*on3tp6nc^Alx{)ql>p;SUo0lT-OuiS_TNtV7dJ`bdfOvtBBJ z{+}#+tbX6Q;obj6{V)8B_5bc)tv}=3@GmGU-${Qt>@VsG`18E4oRR0wg?nC}khlMI z>-r(=^?0eTAa4lvz+_C|p8Y`?x0=e;6~PHlF4cJedd5L+l>zo@_p&df@9^4+H#5!A z)(4(gn$G)NubfA+-EWS+4r zJ#`0;hUPr6G>P9!W@qNNURYn@t9`=8HTakNDoocCOVfDwy};=FYsQbtzd9!)|EjFi z{OY8X{GdtBUzlO#uV#OD;ha`fke7DaguGnhE`jg;sGnbJ!XL63o58d?S(%vkCT85{fjp!np z1fZ1B}g5WL}=j-rT{w zbbPY_`}BL+Z&lCLADMrhdsO$e>?J(v@6P?`sB=y4ajqHroomWl&NcNt=bCiDxh4-d zS98RNE?4()!bwTAl5nF^lY z#@qQUaE5>{^z`S{fOmD?otH`;7QvUNfR%LN(Q~NlxGY?qXFl4uOJ`{m4!I6lxC&Va zA`92$-3XisMb{w<*C7i$OS>-bdpw)Udn+Rtmxb#5-I9fm-(?=)njf92PW89-sDp26 zYy3LCBtPwO=RWyK=bAQUQC`YVoqO*y&NbzZrf z{Vs@Rvj@T@W^D&&6WQ-Wv%=AA=ggF1o`*Qowv*q=pUfm@Nqe1-BAt+W`-_spT+7td*Q=} zRsIO}U3O_FNziadgF2JGV+zy6?BrMwM~j>D$Eb+wJ$E zWAEeWnqqwD0^=E4{Y-tX?k8rHE^lR=@1LhJ+S+m^e~G!Pu*z6VR{OR!tv0J)Pcq(n1dn~kBlW(mODv z9~k$t59n~qQ+^R}hFIS6zJa>k`@!|zNq=a%%Swyf<9n%T-u%0ouJymz^lf9!U&Xtw z1*@7GCT?$z4PzeV@|I|B zlR2O$vhE?qSj+evdiO;czqx#NobQg;PzujXf)6Kig-463eB`mx=Z0L4x_q<4gB*<#%Bh^^{{-GGF1$J5uqDKvE3}_@ zT}7$y_qoI#=J$HplTP^YU(Y`{wwsANa-mpyDLlH$OaRZV{Pq+E$x#$MXw2R!$s7-R zT)u4h^m!cqF5ou-|5e~efIX->;_wH7-vs=7fZqiCdw|bxPXzdbarh4cKavPv^r(VY zTjA9?)T43Wwm9$XpspR4wy3`T9VY+vx2W@N_z3T`O*}c3Z!dx`@8mkd8dA==0h~LV zTK*Gw(@cD^X+7t-E%g7fDMX&H@6u4ybH3-BEHmt$R^BZ>C(t&PZ=(xBP0S_EpF8OV za2>USd-Ts$G*^>s$8%~tfi`r&U@)bG z_0NWVEUY$S8}YndXN`=j|1{S7TY^l*%F>VR@1Q*#zCm5~UYwM3_Tq$=nViSKK3(}Y ze{QLd{8{bOr95AgQ?k5=7(w^thP|Q!gA4dSANjl%nJX@1EhO`$EyO*pXTJIn@sd>^ zFh@#U(}HiWWZv`$G|(K&aAL^scvB1CA`VC5R=A5;r`O3@tMFhm_7R&5FyFbC7+f;f zdMl-|;kw#^Yq>ssZSBBZlhTNMhBvMp*KEwloee8bYtBbU_?QEIj{mf+|CKw5Ini2l zLzHqcGrq5yIGnwI$2wwMibD|}ZOEu^>KI`*<)g0-dRm(POzz)iez$6ED|xQOKhNtR zzx97v^SF=i))Grq-f;`_duvCBnqpiRlEY8VbLC#*$<55?9imNxv}u4gy|^-|dH<1= zX7xpoc~JJ#JWp}ygPcpa%#&)jL2DG0(-|Z7zRZ)>*plkZlSenCgnOA^v}56XU(I~; zS;Rqy@n6b6x{CY_a*k-9;*lFIt!vOaRKXd>S;5JT*xSAmV&Uv>9L{er0~^V;Sn~&~ zxz0PZuO51IUv9@qH*KZeHKwd>y~o<=ANIdZqAha0GavnVJ-n(l;N*Jmyoq>wCb}U= z-3M>;H2;M>?-?fBZeJI4AxHaFt<~3>gx8q&+{`{Sx6tQx#L+ZPJmp>)Izybf1j&H9YdhP3Tvj^Y~N^yD*d3;p&I*AvoBJs3>>zjFTw2yfZbNu2d)r5uo7{s8i4S^+-NH*-sPzVui4xq{E1Jxb$?rR155BvAK~oJARZH} zF-KyXI#SBCuNF4V>}>H^eXVbC{v!Kpwh|LG!Li|UnTHkb_;x4YF8$j2W^A+82vHV4 zb9i_%WyNPlS3GtsUtc-gta-EZm6QXnIU)9XKQ>pS`K1}iuVi|U=B(kv%a~W$_WrHA zC9~1(-tb81?n7ovH<++;#`kYX4l7q)Ir~L*$<1Z#50`|TPBxrXY$Y|O(wLN`;FM?N!Vq@53xzR-lZK!pdIV0c1eFcnz^Rw2sjm`n9beL ztBn1gJCVaKtvzPVM&(MY*^$9DP00O{iQ@u?N{Hipoj(4E@|L%(?fYiz&euk`yllbC zWBV?Kmo3_7c$b|`eaiRoikGeCzp#!Rp0)RyaOWt9r@#8*#b&K|*-IYdmN*}uI^X~Q z**f&`#x}D@QzXK@`p(CmH_>=|)kevk z!H!00GuK^F+WZ7*2d?&`CTi*rJm)+}L{8 zPaCeNv%uBZ`|+QQ6YEZ16;8@y5KgNS`r+4ovCVbVBY2ZotF{;0s2ob!t&OaOcmtW+ zkDP5}tg>-@Qe+EjBBJ#3RPfoNH4(%g+u+45Az#1723uGYQNr~fcYmC{CB!-h;sR6o z-@-bFfXcDXVJq){e>?93zJB#zH}h#-*oRL1CCS#W8}u#f64;Yy3+oarY|{VSq4KPe z;MvLLf4oZNJ^caIA4uxoiVZ)x%p>*sp0xwuaPqq!g+6BN9bkSf@P0M8cEs;3?%CIG zlVP4r@9IzSZR6^Xvs*IkdC|%#yl=t((>z%Vb9EN@2yTZlGX*omb>3n0AMniK)EOt^ z@iMXB7N6;FWsk;|fE#zAZu0Ec!-F~v)k4@5szJqKg1V0b4mC5Kl%H$@%ezn z@9*cCiT@q*Cy6y^ZCj^w0dg|}e(d6&Jt+Ep(3~~X{ner!@94{C{2l4xQgi@1W{D47 zZjwS*xN9|KYx5^E&j;<}%4coB7I*^K_s`kjaLm4YvPKf;`HWIQEcrnT!hzdR}|R`0*Wt zv&!pAnOFTu_&~#ShQKj>+GqGi@7=m}b^KjV+tJlp3+H7$RFb`Z*oWPlPT$C0>D*!E z^T*CN{bv?v?$e5tz(I+xUZT;S0*og*N!NPX4THT_L}7u%mO5JdJy9nJ~~hBDwMD zVP)}ksv~!9;8`7h&3a(&rS3ZH#h&&{N8+P zNFO?^vispcn{se>81K$vbpLwz_(NcA2hQf(yv=nU*2p3oqI>5!$M;ZN%LGmf>x|_L z<{sZ>KIA3J7tk&@&*N>{^cHP;jdohJC53;r=?vPGNt-6oKdWx>+xfpSu4>zBHLNXd zEp2;jyfc4ZN?vj=xL47xjg;BOzv*bq6)weAN}qJoHt1f;{JiKOA6{+I`cA=88Tpk# z{A%GEU5DHQE94;`1E1?K-zpbCd;NV0+uK=Aegpjx#aAzqoy8A3NIO2F9ooOvg5NqB z+jKJD>pOz4kKfsg{+B(ki|6zy=d%l+PV=$aV{&V`S)0t>K6Re)HlGTnd{jF>0XPPH zufTq~xv`z-V&x^s-=D2{Q*g>h|DA;l%OCH>?-wmQu)q7s`+S~%59M0vvo!pC!P|$d zcrCB(@3&%un@lP>uwHUtJ$?1eg^Na8u*Ke6O>@tF@@l;)w!g2lq)&C`qU(RlcYY4p zeSWDaia|&1@%tS8`xboM2i@L;R&PM7>#?cobH!mM@vnX#L!VEk&!^CbioLXO-8&+s z@de~_2fQl0chJ{{KA+1w<|bZkX*F#@?sM=NZfAU}evc`JL%(xIHtTr#IHPy+)eR_%`5Q7 z&9qgx>-Q4wbyYrvk7t6TcMZVM_sS&_EY0;_M_K5O-<@O2$c33l*4e)U&hIjQ8^&Bw zN1mSPPLQYb-LkFOVd)puIg@9ueMnrNt-U?m^_vDg`h8>&URhF){TFS-H!ox7b?!w%fCP9^%iDfo3P89s_!XnBa)9U_v|#ow&E{m_3eb$Sckd`nk;z=TC47bz}8x& zAU?Ky$y#6;#;;~rRvqD~m#)*EJyFZo2eL?D+1-*i|nKo?DdpF=e`5xVV zK~R3IOS9m(@L}>CTzU<-_Z_+654lgmpX#OEspw|;RSo!44Zv*}pBibwkMgmO@mg?h z!H)`%rz1b91wSevKSFCuc}L7?P&YqQIx*=tB!@TW>B@0PprG0Ndr zbwW!$lTW4ZCF>dM1tX&$`c}5!S8e6$eiz=L?^w$jf0p>&@*n9t$G>Wc`&TE!KI@12 z-qY`opn|ODwyg<>Txa{#1xI&f|Ryz6c>JG5o1&{5rvb zJ~Q-<+Vmy-i%uSUq<`(YxUc74`%t>={l~_Sv-PgyXzI*A*!?LU z1iu8osT2F6edb+$PV{#^capDj8o$-gPY_3N`(1QDRlm2Qi@qH1_YUq~RDaX&Mpyd1 zK2DdUCq?(;K6Fbctv{$T^tnsBzIcCcP3UjYWqrJ_#V?6{UGTWm&m{@{tn$O5yZYEC zyy)Zg(Aw=|z26$|-+Jm#e16lTT3fs2|6}i5z@w_py}xEAfj~eI!bPNHCcz7_YL7w{ zm1ZYF(W-3?^m4RcnIxzv+7_{jf@Bi#)@bX9mKItG;Dt%`lvAN4r{ypauh5GIYkOLI zoLsmWwE`+ci1~j1wf9aYAzZ)&g{#&yz9NKcdhlVcO{b2A<=&J9J0?^ zIk0xG-FNEqr%$ub)_!E^aL?l|t!pXsUeOvlI?zdURQ0#U zV`llnr@7LmKI(st<}SPPil&tL9PfPkg`iUfcKu??|08AfhQA;1i{^c$C%(uL=|4Q) zjh7)C)=0l&zYd^Hc_vs>lI1)Y#9mp{kVTS7n2q>p))eGw~}2Q7lvWiywA z%`4asHZFbM?9{m-($0(#+YQ_Zo)j=^{H!|kUEiYcyTudIp0yf`k$oApvs=DF_eqiH8JaP%^k;JIk92{w5 z-%B5^369)Wh5s6BPQ&s|h`9^F7O?hI9%~uuXMH?Km&cYYxiUTGmapG^e+_*De$H^% zV&^!v1V?NuV{a!uXQhhOsPtj(dzYa$EB=M#SuhLFL1f5pvl}BNST~%$DTQtl4&G;6 z^#QUohj#U2{z7|v#pmi@+*k_z>Spn+92g9Cm+B4>Tfwip;u4c1Ur43bH@93jp4eCH zH;teA)lKI<9slX`_1IhMy)(kRbIn3^Um+GlG-zyG#R&9XH1O~;=HYE$srV|;z?*;H zvi1VX*1KB8OZZGqJpG&KpNq|Qv~LdldOkR_>EI9bh0kd2k~%g2e=+oZmN^v7!<@>y zC%~QT#WxtIILOaQ%#EvEN}mDUjc5(tsc*)k@OK&4N)xa=srRzAo;O+etY+1y49UIn zs{Ty!t~9J)<+$UjEaqt*r@md-VCv74d{?>vys2?&E3o=;(O4I^^)n|NFJv#Bq5hKD z`@7_8q8Yxek~vTOW@i*}zL?@_%dUR=VmH3})5dHN{rqvI>=n^922c1j*SB348yj~> zZT9IYocVBQ{?o>U8E)F8KbBnIa_&lGyn%Z6!;=-*vh#@1D;r91&gQ+#Kg2ty{#tm< zji0k+?eo+%3AmMR4|sC|N&~uU3y}%&YmKt7o^K%Xk?bg1@G8mWS_DCv$+5fsOf!@|+A!CF9CBfpkaY zgI=Q3F7oFxZe5G6^JyG~w_5QRWgPqp?;cdS)d91?uS+~87^|tXpI1kiI(ES?o$yZ- zeEG18-vqDVFyKdU_^@lv+OM-S6zp1q^3Hr=49+=MjF+?qbxXgZgKm8BV1d@2Mj|i! zncEx17*2lcA403c*tqASb1Kc~IZuJBo5`az->IH?@DH>p%DkX#-D3L0n?u;`tSKlC z3um=iK3h%9tBi#woGAYO!OwrN{Fgspww(P~$FB^Hp0f%aul!FVzw^$@i6@BzqqU3o zuPPyy%2aIjixSf*bN}>W;;LMjpgmR&*vB=uiM)l_)z8qkH*gh)`^O_r|2X~Zy2#7b z%bCAnY@HQ9Z|LD=(jzwvkZPOHA>x<|=+`c2LZQ29(EZ;?cwU<}#)BBI-r5PK73ll?<+5x%&258#nV__^hV?P@hu;9(QFOURG`U z7UHW(2k}jMq?o$Z2R};Ptbf=(MBhy~C+?^5`)I3c(23Ueq0BYVJxZN<^P?MQQfHs` zk@rpT`1a8!?tilS)eLxl(RS#0WdB3?Wn6vtKLh^Z{-?qAKbv~=KQv>Mu92Ct|uqY<_b@^!fQQ*#1UStDv5f{zUH8HSGv z(K8L;%<>U*@@b&-{`t)(4K=S=x>II0@Gw`mSF*Qeez}DuMt;s1X}b^_%ZEZS6lTLO zCM%=nROlU|-c0JtoS$#)%9SS6ww65l9`<kA}0N;LP&d86mJ{ma>zMF-X2Fi+XO=S`)`KRuVkZZ2G*^z{mOB+ zUzz@|OPAmLuMaK1k28fg2L{=GMfr&(_rU0UYZuynMg7R1B;#KH(h1zc;mS{dKKZprhXd2Kvwk<~^gc zoQBu^3OK4*1Ju1_rO2 zK3r9NQ+br%xQ6oc&Mj&$p^jqW9F#2FvVu5;+oE3iAp>g6F-;Y&&c_#jN zaA5rR_#FH#aGut7F{j+QUCDkX*Ku6b^;AR5NxbmGq6FiB(zkE>=0U;pBz{x_OuwxK zrvKLLjo;d?^Ru5c=Ck#u8{vGqA zZye7&6?W4G;CSpolUVcJKmuG&3{wwypKSH2?VnwLRkt1H0aH6LelmEfrA;y76)7I< zfqdc)!t(*nB)j86#>UvzsZ*SGjL#hUNSh~@JVz@#39kpjwWTIJAl^)V;u{q(&-zbM zY!TUMLCPIxX*06mgQ@ToWj|W|{l@%X{wmqknsl`{Z{Jr<+SusUea)l~FLYLz!_$_IJY2QywP0xL zYr)`F`DOTHb@ijdyE|q#b+z9uZr?y1;x8k6JLpVKh0Mf??>dQD)$X{b2pbQ+-Wq9b zSUSsOzzf{7W_Ka?)l-98&1K1@k;BpL6+bYE$k#^?E3XbD!m-e}SkP>8<`Pd2+)p>zS?njuXa>vr@5B?Z>(c%HQp4I3i%J0H25Wm~1U|{QF=(q$~ES{RP zwd}HntFUio1Y+5PlNLxewl2f*-v6 zkz(gr-|_G9EWh>G&R3Tz&A0sTN%#9*JX`;FeAYkDeQhu{#mcX>i@f-@9z8Gg@SwJc zo`oJBQi?t1(UbLtW!rkt^YJeKCiN};yjheojwiqRac7WO|0-*=+v)S)1C(AE(8^u9p0l^A3*Ej|AR zJzvI8?kSI+PeRXk(&_2x5TBm+1qNGsKGj9fL0$B`!3^3OL6lQt>?Uy4}t4x)=O=C4&ZENue*E<)cym(oUOM7 zPuSWN9KN+6l(Y4~MZ>pFzU)7?hO+|r}7eTL=Gm$XI-TlT$v;yVQxXDT;r5od4a^!6)%- zb?}MFDSL{@iweD(gG6lXk#%wJ^V${g+Nv~O+mzwz4Yz+7QTI#wlbdaM|CY#w zo|<wGB*F4W^CR!SsQzbZ`)?E#=ZtUUk=|J>2TKBE0QxemT=Ct{IX>8 z`TdD(KYx#ve2JxB`-7+EG(61%Pl8?T(3tN=+Ay6vblfaj3fB70%2PoW0wAb_wyp zMJMG||3bd!Nvm_aYVk8L(E;(DtjE~!rcc&P}#&mO!q2p?76iI>72FEx9-WP0&ZZ$2u6J{8bL zK1-s()FsZrS&SQU@dJoJgWfpQ*wE*nTfyCqH2!G`_2i$w0{^@|_-7mMzeAgTkj6g| z*5Z5c4{bUfo_f~fp9YVAmcT!&Wm5txd_$NWct-XUW1RmA&rCfE&pcjyB+pDv=NV^U z{GWY(fPcQ?-@_ZzyY92&U-IwarOUZ*KG6AYO!F`Xdk~)$C@!PfTg*)B{D4mN;pU%8XWPVg*>MO=GCU$_g>e)!msn`scait8l z130{V!K{P1z%_njabgMkhr;+Nsx1L?+_nJEg=TPD*%|oauwP&$Yw;t2y^!|3g+11n zU!DBz;0J48+}$syMlt;q^CwJuP8kBN&_C74ZTN675y5V`kokG$iSfG%u8`lU0cmO#*rhdyzhnXGf!D$wI^}rX@7I3X;00;Ul5z1_34WfCjrNv=}!B!Y{o`2i5G?K)w05=f7|j~W`W>4;{qqK z+M};EJ+nBi=r<$kMop_A4H)_7&usQEhLZ8L)Au_JF51mtK;Pzlrus$o@{@gZ55Kh&a3> z>xO_s#o{hT5C2VbS;UhnV*g8L-~`K$DfVsM9U|T~e8xOYO+!HADtAm}_-zOf&wT6% z=*?QZWMEDa^$%_qHe1F1tq5gTVz)(@@B9??KE*rPXR24W<*GYI ztjI;alGw`P^NXqbe(FVM*}DBbV}9NL7IEuNQunVp2k_(g*pJ5~SYP1SJolZ@ru~YgCY1$>u1f>C0>#*>?-u0VjIna z9|WV$wfHf6xLyDztsS3Fxt-`g#XN1rN9hoGD>V+Ge^>1DTiL&;e40bwf0b;jA@SBH zvJ!9obUX7cfcNSrs%YD_$bK|05D(%{U-pcR z=YNIt^)9n|MHt@7!=K)j)u8o_UxFU=^@@C`u7`G_V<2RPi9U)$$3DM;TKoZ8N0|gX z^RB5K7XLo$k%zZ;u9u$C+NjzT0Dt#*?Yesuyg-l+a*C9ubcSyhCd53^>pq}2IPKBZb0xAmol$LCR>`WDe=f&A8>p=cvo{J5!e z!&kwd`1I<^5i9mjFG;LfGz}wtU3{=UZDx@+aObpRIM=;GCyeqtraLY&hes;mh`q#Lkb0Nh7;6y*vl| zF+`cGm~##D+suAjc%ipL{6DqdElJZ-GPQ4`vJ~c<$%9jJgF6>u7UM=UHj7Ss167 zxl2YnwJKCE5CXN+>NULx0D$L zrtt?mEv#5DSM{3jygByYR3O~+dJXVtZVA%Sxv zgV=K;h79C*Bxx}c-8y7j3m{+7nHfA*a^(Upl z*ySFs|Pt~QxE7#w&`1IPFf(wHuPR*(w z&YJ$5i{WQO+@{=@gL8}EiL85x+XX+?Zm1~@zwnEj6t^iAf5O7yWC4c>Z57Sb zHr1b16d9KegUSm7L)Zj!8yLTy4?ePpo7Eehiz&DFyVzG;ctU&=j5cmm)P3i}F+qIh z)vbDkd$rxRgL!U~5zL)l*ms@b8(bXtbbJXJDxxg4SH9~d>+UQq&Zr(9x~lf3nc#gl z-wo}{olT61v8RsvsnVp?osRb?ZH|I>&5M>f#0jI!joimB>RIO?D@iNt8M#bSDGa} zo1qujZ&lm}limF6z{~f*Lsjr_k;lU?p;sN?6tDYq6~E^|uK@fTN#kGi-i(yTze$gO z!wb!cp^TX2-;1G1Z~o0o^s4Y+vSJnw*k^N2MTZu?Yv#sIij|HZ z>fEk21!%Jw8yTm*It#vFK5j--@feW5z{W3ZGPFrLN<1z;&|Wp4r!Pt4bDys+A;0)s zJTCt6`9b{V;{spRE*?J`e=nH=u81p0|2kKD9>ZgWPdCgaQCGb zeEz%-`f4s;WxVA5zQ?clRJily>^|f@0Dq`F?J>`%Ufu8EUOZZOYUW(=-}uqrxZTPD z@5Hlek7%VhT)zGDar*VtUi!5a+#F56-T=;n8&`d!=ueSv`1-Yh^u^}nakc1B2mQG! zT2mVMMeR)vJgUC)MerIyw+7*}Ds<~i-d#t(S%kbBcsPK(E8VHQb10|jkD)Uq^NQ`K zwwXc18bs#)5uI!GlBZwq1rA^5`gl&)tJ0;uUOgJm&HIGq$I)t`1X>RaI4 zGWr%`%|RoE4y-X=`-tNHv3%Tk59K6}+r3Ze)OYT?Kz-+YbZ0quASNF9BINt8^qpDQ zafVoM>N}fk-$_hAWG9INFtyV)^`flu4jAB1bbO%^ftdc$@xW%qt(;T`D072><#y$;=G4DiwpmF>eM{TTLO2&Cb| zwu!YylTnGagQmDSATl@ z#S>B5AB7$bq$zWVH%OTqxVP=g>RGSO=n_NafAJf!oz`ZLR49?$!Hm#}^9YmJhHbbX+{U${}9 z;^W4bg>*hhmxIU7SAScU*55|c`deVO{p~lq;7lR|s?(R*mwXsKS?=Ol+wVqgznT)w zJ-!avycMqe*uK|IIi1{i^Tv`8i!K z-TpUf``llVCLbA5`O@9F^u3JV9v~Fh?c&!k{2G9-ML(^rew==Ok^Z)~e$KFTI<|iP zF8J@QpI3p?RrI^7>2r(ddxP}58r#UGRll1@znd>Rmws1cojvri#q_%o#y&y%U8P&g zJL!YKN1cs|AsPYiiqmNZ5z`d8{73b5I)A<-y(9g16rC*n`}gwU_x}N1oX-1hKO7ym z3tAOHSJmChz2sKy{f+LWBk6x5^uLnPtg*AliD#sPv_5I|RBVdfCz`_dO6&ir6PYK) zr!9s5kU8i)m>R`BaXLSD9(2R!(a+<@0?p=qF5~mT=*A{|v%Pv0v7D%S9u4MF3@Ve^(UUg8D^67 zo${B zo^7z{>{wz1b0gOW=0R1I{w`ZD(HweFqGYjn!Di1oU%}3BO*4^f{ zUcG|xyWFeIH_~Ug^>(-U5xyx+m6zspWY)F={HYyk!^0{Iy8AHu__~YtJ>d56I1hMl zhF3Hmv3X+M^;LWIUG3F(c^Vy5pYW?X=c`U=@pH-$e3gyCxkP@Lks40BnIBCt4{b#A zT<*zNq4XYJrujUO?8e6|uimL%y-zXcWZMbO{B~aD)!Effj}DjdzDGN~_mgR7CYOAo zQpC~~%?0zFJjc9wfS8>JFmtrmu=00|_33YHy^bqWqTxi<>%l&nyh`_JpP!PSi+{j7 z(ffFQU6?-?*ZPaSH-&NSyLI))uM+O}zfXUhLA}x+ejdRhzB_d8k@_RYgVE)==d9nO z@cx$fJ}Fs>x%@nUccR%_UO8{m2f46*F0TA`98W#_yu2@y4!^O2SNT<(<92?P|5<)( zGx6z0p9Ih+nxhDyPon+Ii9vLM#(QhMeoFep_EVldDO(h<`h-2^UHW7#@yRWm$v|BE zB%F%ZgnL#ETo}=Z{=4VEtKbmKzXKM-{3!Di>sCq5@sW*?R`?2Y{f6%;=^roMzw%9GJ>;bsjGpxIEByJo zM|Jfjw?E{44}SLK_D<+=g-4U&1I@(Vx>fT}wjN~0hvj>!!-M(TK0Llmn{Jqw@J%pZ z>!o=cz3SshF#B?UyUGJsJ#>fH?s;ArU-8PgA6@0+FujcHR0c5FZ|GQGpI*uP9_{wz zcp7cK7&#XGp7Gn5>~70-2hMK*m`3<)9ReUjs>4g z-wXpked(Jw&N))waE6YhhuR=`GzM_n-_u@MDx8BK>7tjocX8)(l5W} zSGvB5U$yT^e(~#@Bs*7h^7Y4JY+mV)5H>IK-PyUCt4D8#NTcyr#DubCv&df2TyG)Y zuaoWV*(dkFm$FY{zU^S}`G%eajKM5?PItfb@4(>8Pc8S-4R6CA$zHa}dJpDhUK*_n zsV#nc#V&LX3>pp|T83Xr+Z<$V1Z!z)Si4-!C7;xa+QIQ+ zVykLz!Wh!U@LP$IMti~aPW$!bQ>XoV%17OC<*9~F#mS-Z!3Pf^4@C)mS6Y1+&!(=j zan{xuXHa~jIlQbhG>H9*fjZxBTI;%BW?fmntng@E?J&h-4%v9j@=31ctUkpxkpHpH z`?GPG&o=E}V@>@!=0G+Vv%hyNzB9o4PN8YPYM5Dn3x3ugoa4oL<=d`&V(#L=oV%i@7*YB*Hme{$5m|uA&enT;9WQ&ObR?#nR*u%kkTqFK3 zrLr@su8STlXKnbW{6<(GzW(~?!M*qgoivAMlf7*T&oA?wjjvE1*XQpTBcGSOk1ZUv zBEp%qOYm#C8sDL*_;KugVda>CysN~=%0x~4%;1oM3d+>}(l6q__WA%~+>C%0_>UdH zFF>)|K3IwmGcnoTn^x5R9&JvY&tA{pJMDk|C}+iXd~V*SUGg_Nfp{^oMWa?6f`0q) z#cF4L{=;dC5!Sy>F#u~x{~qxY-Y2$Z3LKbFa&q(8)WuqI3w|An`zAjR#ak-|Kk`v& zqOK)oR81u6dk#J=_(0Ucdx}k`c-UI6 z6h6=3tiAh#!J40Pm2e-U4Zq98HUSTF_-(*%P`*6yBeCvvu3I)|&JC_%uMc&Gtp8AP zm_5(<1vTJ@_3OQ6#r!6-VkY}OBlH_b_sJgQ@+fW8S-bmvsXNcSx1E8(ltNUwc$car`A(#?Y3 z+Tg2Jcx)#AU5~+Uw**EOT*z|-9xJBIGWe?rTC<eOnPG77E_wuev0eHC zn{l1=S4CW75?L@(Brf{vYh`1ea5X znMqgQlAJsXS||o>kJz)!7Y?AWQtVj=A1uv{Z6~N)yXOnPpuO^gOA01pf)#PEvUJ~s9#DC@mo2G1S)XC=);IY*R{3qj@3rL-C#(VA zS6d$UW!&5HxDRl@oO0yPRK*@3@$bp-ul$!Z)>G^*^=2*VDB%pIxgM zbxqm9L@VR^9fhWz^P877X}&~y8$Hv){1<1O)+_#?@}24VV12Us7IoE^+pE5J*kf_@ z`W7wq&-~N;9P(+~*Hb>-xa%KCPHE?}#~)ojYt!U2#kj3+`E-4_T=^W4CZEr*onvD^ zA4@)up7&A79r9Ua<@4z2-ho#opV9#~zO#=!{xVE>mf88S~__47pU< zu?+KBHx{gP=7_qM78Cz~xu?#`49-W-Ones}>ei#~-u2ut@{MdrE+fdIg%SB{K>oU5 zl$`SXW8^19pK&SW%(XIkbePX0p0}SrdZ_<7idAjPMi!g!z4E{1N=|uR!FRWRsXtOa zu{&M))cFCXj{SAWXHH#9ejsjT^c1x*lQ#BCXwR0~*sK5& z(J%cYy01st3%)^Gf2Zi*3P5h?UbEnNqyiwlw&O6TL zb>oc|($6)(qrKB?`nu{S-YB+P?{DRNdwwwU^qybo^B){qHV+=RaYTORw0{;meKOMmR(dBm=gZELWpa?OagRjirn9B}qA>`j4>om&Eo zsdDPXE3>fi=5g*wl=G4=W7`?i@y+?Abe=d1h}I zS?w%#hT3-KA(+}(KEQ?VKwHO{SUB<8M(hAw9X27;) z?AVjI?iOCH9c%X9=e65!v-VwgYQHyrm7N2zA&~Vyec1n;J0ySoUD=l|NkNY|ziPMg zx@+y$-?v@kvL!={K8(R<9Sa^!$VXlMx9e}r+0)39J|4p$iq&rO1xN)wr@b#KLKbZPSX+MkqvG^#R|F*iw`3Cb7Cnj2M~WY%2i z?DVu}l_@R$PsmuMlQFl6bSg)_%}S?us~rnoU;84mSQ#5SH>GsRp>t)MrI#~bI}Z&$%EaA>xReoHrN? z=CcPUHxz6WPMYDh$Kf^Y=d6W>r+B=kGd?7*kHKpOUei9UlewmOyjI5Efn4}P{N}?b zz7fs^w|Fkb8QtPJ*?xjI3EwS)$CU0?P%nHm@`r}A&N$yM{4D$9#@I2&N^|1A2>iU#n0?aklrfC{ zqeo1mdBJgKX`j|+=pKPqvbTT#K4VbQsV{%-EO*{B>h&t^{Vh%$1|QtX#`L37sI z{9aF<;rN)#e?f4!rMEGehhN5K+s5Z8%Q)pcH@Nzi@vBMup8S~azxtT(uS54^mH)?& z`QF&acR#+3&WE0W52QUWnBU*f<|lzCi~Az{tHWH6^G<$}I!D&@Is-U;ye@jX<>|~+ z&L75~X#;s5w&g`B4__~?@7wa+b5*ThSK1lLA6MQJ)1veeDA6wU7k+x%B-YJ#*((oHtwHx$CEwe zqMmZZ!@YSuy}V4Xyg6y*{gHdY2b_}xoA2}cmnM?Dd_CLp|o86^qcFdp7J} zcP2-Y=86dNUN{~ZN7hp%idkja(ev*{qQ`NcQzu!Vk1GfxZ}i7=lK-T>IBhN&yFxV3 z{I=pj{hahs_H8M>)`k_|?%+?I+dky|$P3J{gZz#<(+t!1(V=0(-lgme+NC+)eq5Pc zmBEa*d-L7>7X5kF+%B=dN{{Cnz%`I7n~QS~O9yjhp??-%-+FH4j{|efDP{(q+cPTh zW2xo-M$Sg5j0WbWxUXCim|Mo2^UMr0_r5Uxx#T6zX92pffIWOHfpCT?I!pCep|iwG zN_#qG=>0gpV=I>4NSrI?%?h63nn`T4Jbo+2j-Gy_Iq|FtbN?8viLDj|j(Yw8o9 zoEB`=X%^MIaeKQywI50F2ID!Q;iR0&&zh|tOxSH~>Gb3zS6DUit<$Gl% zBW6a08F^L(aG#N7CSFGyJA?E0DV@@E25;MU9qs6TzMbb!bv@%hUoZi<8|V{%7BLfd zun%ufsDHt<Z48w3#+yyPvIcLcn<|-@i7(Ol%I0I;#-BdA0qkLzj&? z2fIXcQhR)Ss6FMT8~>fb+j;KBr(pgYdrZ`>)m*e~nCj8IUbz`fSqsw2T1eg#x@gfE zyo2X4UC)fsCc5?a&XZoh@^sgKH!;?0Kj@rTVLm%hG|2^TF?@}32N1s%-=o~YtY?je z*Wuv-W6X>?p%08X6}c1*t_3I0(1s_tI=wbrYum7B-!95-floVw zmHV!x4c*TU&ttltr;$%{Mdg++7Nxbl61c~9wY4+o@cgN+XZg)mzGvnZ!51^xH+31X z7sCsuiXZgt<-pu9V^gg+R=$p0Gj^*?5vvcoqd3c4A-U8%ol`(3mc{u`(yxgw)%l**|5Hh&t|Wn@RUjs+m3k3(n&8d|5o{GU@ray7OoV2tEy`d z&+Jn#m5$KawZ!HbRrw<8sEo}je-p5FxDVG_bjJkJ2`=sLto${8hP+cd)eftFu;-+I z-15*rq?^Bw*nzi?9CqMyX4nI?=U?9HwDb{wC3ldXcPb;r+O5jCficy|U1jtuc!e_b z{Vl!=m%E8Q`WxQ)aP7uNNoPF?uIIh9_kk~^oz2ydmi8WO=+nKlze;=mBIoldE~(1c zkoG={_y6R*{~7PUkm{Ty9|LU5Qmx%Lt#NKsUta$i^U$~X-V)3o$k<`-t>QEG`!>_= z{9vd+a#(3}4r=w7FDidq=cIPhOHT-=!h7)Cs$p5@R-q4q181A8ft=q|@P?<;qzm?u zroxOWP}y_e@0>WF>j1K^c=m#)n(HpooZ;Os=3cZ{KX#_~ZV~U8gRu6;eZVaKDCXVg z_+H6xdVj4o5bckZ4Ks58A?76T+n2sQq;*@Jh2WRJd!Mz<8P_nU z3*7H&4$GFW^~pfNFDd_K$}d~wEM+d~2>Zyly{D4Sx4Vlz!+s&}uVcXmYnwMMW(>l- zyXp|{|Bm#{!x=lVuc(!|^rq9C`j+#ZdWUgA1#@A<2&^efMrvvg+*O)f_xMfPTast; z9&zjy$i-IM$3IT_V!)h&#AA;*0;#k*X-37_Uc1lyj-5lt5IAS^EIcV*pxpK z;c_w@)zy(+SI2^<-8O{+c3kDRNjUu>^{k>S{4wJ1=PPzma>{$m1vOvl)Hhw_)F-J= zbA4gzI`2&D8}hPoLe>}Kt30Rh4XI^+^x527zXyB}Msptx;`h*H|NgeC%}2xUE&pTq z{Y66b6VEo4nE2OK59=Y8j^MZHMCb@^|6V$FEZAz>;`57ZyJmo2;WiWh1LTYK|M#!z z=f>G;3B;ROUst`g?BQ-1_0dr3w{c8vEsbH6;nB!|TF zlle6n$?-Lht^3sRtwT)WgM7|?9&YNj*0}pbQy<76Zb81OuU4MZOnq_excH8`-r+6a6q2YYx&->U}^%J8GHXi{46+6*t3ll7v5iUcysTeWk*Zr z%cv`pRz4bid5vJ_OP{1dvBvLEMlCYv>X}$0b7sp7yrlTx{@BHj{oSo!y2tv^q5XvYx80yCv4gC|RAR|qGyCTuV zdU=?&=<*eJH6E@A#-I7%-}W^@kK&jAvQP0C$}4v7E3YhGQGB>`=~ahUY&-OZseeg% zy*MrZ!u_tA|21LPHdEhnz^M-q+alHJ)E8%%<6F-us;_{5TCzuD24-BEOI+S<*cE{c zlbx&nIT#xj`Sz!#N7lv0>}+GKmtR!Bd(_t_B#pV0xE9&1xkdFm){L1jhWY+gPMGiz>jL1a{_tY>GFyB(wr&^v86k$7c(OP)G`?fS&B!K#m**0%?tbQRXR^keVvTQRnDt3>?;UdhzMhhW9hf2DN)T z{ZljeYM_szpRFsQz5Cuhv`n~s)%(_DzcJ^}w>G{VHO@>)rXAPT%Ae}1!O5mI@WswG z!O4NIC8u<@1t+JLnoX(c!6~WfrUO5!j?}!s9ZmiFJ@p?4IyVTWhdC=&?PvzC0nPxI z?Xw2_2+rS9?{?yv$^UHLzRnFB_IGYLftWXnZD8?1EDqr#9S_CeA!tm!-}WNxQPuNh z&SBP`juVJ`BfrWo@9o@BPkGiqm^OMbL|!&*dMG?_Xj$v&u1|;jBfISlbWK*Q5q|0T z2Czv#sNK7^nD}>r*?c`YWkM8tlKU3$dM&y~WA*C?>MWaB>2>Fs+Ze-_&gPuGAZY^1 z!=>}~2BH&0izm*2SBG4Ddc%O-jm^U^IsFv&+ElQg_A#yid!JNa)rj5uPUd|v@0M^? zFlYA|*U%*MTxLsSEa+^i9k+Ym#k^a>RgrZq`}3}SWX+(9ay7q~HKAhMJ>f6qW)+o> zTS$EY<^av)$hgtr@VKqWr!hY)HSe#zN&9Of>_yc#>AT&`qxpWE!nv-F1@$g|x^0!l zBsxKRU6A*GHS_^ZE%*>GHt}CkSE~QC2`T!C<`QCw^v|5oBfcI!6sI#*QLMZr;H8}P zEXhU~I;)Q~LzgUXJ=3W#GNwlMMG16nIoqk9Iy<~EpUYp>z|bnOe~rALAJd!GR24EU%EW@NSHw3r8TEFI?dma}7m`5-VKJSNO0GoE(_ z@p(R-?q75yzGtPu?xf*+Ca^c(ees0U7wPNK$HKQk9}EBg$MBxah_9X)EB%GnhQ4q= z@hI@0`47PV-%f*AM!fkP#z7wZidWaM;1`zXi<#$4t?})2`s~HnTBA6l0v;G-7%tzItq3Ql!4 z=E3LK$HZbHP8a&jwXIz{um`WFd|n6U!dU4)19MM!@lmyM8mJT8KD?WNHzhcM7d>6C z^GD0Z&^L9#9I!A;HU+a{>UY5mpQpndoEj6n#6$c9@S2QxaY?N7^dsQy8^4MF+xSg} z;^miO&mIA9-}t@b|2BSOq4+ajh?V}~H0i$eUHzFIhY>Te+@rPZY0IDZv<+r0RA;O) zPJTJIIy=6RHZ;MTvMo>JewW6ejMthOgSIeMOEP~NWNfOqB}H7)gUTmeuD%1Ey~&v$ z`SohXq3-!SzgY_%bLLP!^HVQR#csKt-)kN&ZoiJ-%=?SmWy{=U`fZ!_aIn4jN5$<| zvmO|Z7q@R=ocTDqeKo(#VQvc>v+dbuosJ@&4cD|E1=}4nHMcBdYHlPRY!BTZY>%jq z`-8GAH0`RfLXHZ zd&Riayv9_%Yb{*%x7KCkj}u_LqdvVrxMOT1+vZR29J0Rb&%rxNr}0!B_apFAcl|kY zy!CPS%oUAKo_ObwUH8{Iu0LL}@W*oEIlyo4Q^VuO+qQJuD}qyN|Lysv-5FrkAD}+L zr*Vnv=ax}-FqCP1s@Wq^7sg-YYOXiD_SRwxG~;irHmEHJfwKu8jqhM1)^aXPlGq1| zS+<5YxosIy_kEj2?HU0d0(J9vp2WCQ&x#Ru7q}1{ODUr;WZJfXGsC%|U*VZEb?Pxb z)-Utdt~}p^cKFP=MHslaN9j1zWu#J%ZRbss2G9TM<{q;hql+&_#Q&W#UHhhc{Y`k=4_%i4V>M&5)~8Lp`AlpO zbZPTa)}-N`oB5VHB~~B9_M4}BY=GWlO_TQL&qugpMsK`{O#1p4J9Zm()LzZ+YEDS< zcd2lUEykYW4$aHp&t4z%8*pzJr@B}=`)o$C88X5mw z7yVs(OLBDXSTjs9d=DTKF1>u8Zq{U(du)2!T{E5fi_T>~%rcXvL;p&6c|LHDZpn$8 z5NAx31mcYGZVEFWUPZf9UnDvTU-D7$Cj5?)jVAHNaqIyBrc8KDXuQY}JE*TjZYDb5@oZ!p9;??nA zEFUuYxLBFBI_8D7-+cQJa+FiInDUl(bh_uNdGhGntc)=qs7Rg_pBhLuO1}xNSTI)F zGCVW!CF%$R8!%4R-guRzdNo(7d4RCw{$St=?&t2oW-T0z4>Io}7hJqO?7f$d(NfNG zbYXMP_iUh@t{!}0?blB;IZ?GA-RZYq-@gLP^7l~v(-~)qkMNORwjDne(L=B(-DRX> zPKf!NUnh2{cJ$eIz%hGN>T`%U*s>q_Z!z)j zs1C_I@`xYZoGAKJa9N#zPi2gI;oI&1+}MrECz@U(Ss@>Eo0GCW-$$l>^>F6~KmC=Y zm#@b$VMqUau-kEzU_YB!;I|;-9{~INp^*g>)8N05GWVy!4{y%Ud>L(MciZ6VHQNTu zCq3IR$!o(KUK{+pz0*IG)(+O#mc2zgjQ1z~0FT%?EYYF}c*5ZRm&k+Yc(m_7F{P>@VM(0uCKY^`ZwUlcC!8BX6jKH z<(yqs%h?ZOdDb4A8Q|atKJQq(yLErtYg6t$tYsoAH}E`?Hs114=R|+rRpkw&9Qg;` z!kLP)H_m)ZYZxQ30ZiR;>L>4dJ^OhM@=obg_hQ|Xmsw%SuC-S_OC5Lcey8O>uYdH$ z6;1Ss_%trFeczd0-^WeBI#)N5j~ z#*farJ}M z%YQ{4(N4CGXxXvg#kErlOimfsbzI^-UEktfboKu_7ChC}p6+>Hg}= z_lW#}RF}6cZ!)*tvA})r%8NIj?b8PvhJD$~ro&TF@sy>Fi~FOc zXD^whXO}#uT=u5jY_|L!<5Sa&XM6fe}e{z5(KOL}mBNEtqUefyw*d{OfF_P_=& zt^cgL^w(3~yVn;rmg#^;Bx9Pl7hT(V)*QL}%$fvsAo~TP$4l_H`h^$xg$|R1U-A^V zyx_{)L%<-Pq`tm&!=H|C;#*@cYR?RQhJE6^H@^Jz#OFFH&gC=QU7zdAM@&Dvws9nW zy5Xt!WadQLZTZ}z<98>d^E7k!$VzXxW{MuY_~lAufj*PD*1q$U_K^pk&+%!je3y{M zhgog%zxm}B9r3+;eMYoojcL7fYBCTj{VZuU=NLt9V)V~7(96PX<=N$NrTaAL^xdWH zGi$3(?UwJZcH92L)U8e{f1Fo-%qxGMcygh$ojU9o*VkdHV|cGRW_xw?E?@0TZ$oCU zHe7j>HXMRa)7$W+W0arXhPTNl9Qkc1K1Lgwd)09cb%CNQXvdszI{WgodR*M%?i*8s1K8@w`( zrwrMXmX}$3{}bhKUy!cTdcxCFKfM2DPyMi$=XCw>BiirlhXz}APTgjHkES2)JLUgH z{g6H%nj3nR zK6fEF1BcLLWZli+igyzqhPD@in;dv&y-#Cn$Na|jU)xFN=ed^qhd%0@qV`%O=G>1-)!FRKHF(;V;-xGd|SB^TszKZ{gvMfxtiHO+`0xh zxXjI|+qZ^p!*v}2w z8jk+}j`P8>D?20WP6S`PoA?koe(PlXpV24xc>2h-MP1l#C!L??IPMdLNewk~oxnzFiU^%}}@Vf`RPUHSA?sxKg9ly`< zyXVpv_%=14=UI97@T)w#EZ+6qX@5z1z`4p1o>fNcnppkS{7&T(zJvU(KL(z^oQCI8 z?$v*2eeKe;{&76d)6$;xUHkO>{!`zzkJf*dzG! z{BETgBHOFmmU8zAxnO+WC1KzsqyW7oFQKLGB*sS#r3TU&*0lP-}FO!;;(b zHY)F%%;zoU_pNiB`Z9h$&3nn@Ta14PaoD-PTOM!#?@mv#G7k&l0 z`7+n|w{DsHJ@?)8>>FQ2N5R+W9=<;KSvS5$f-mMa>X}3H`uT2r6?ym)t~CE)Whp;a zzgxHhCvVcms3%L;NEX1KWJK~Id69g)$nPF-1U{`?_2;aVdwD0B+H-yk+?kr|c~+hQ zx4C(siT0P*AcO6aFUgk5>5{3Xv3kjr~&w`0&tet)|IoXZ|>;rD3$?Yfix zU+izY&j;4{*3JiPVXq!GlEx|qJ>k&i*8U|4hqy-N_~n>8Vx{uS37K`J^2?D=PToS- zC#Q(>pG`)_iL%K8CX^ke`B26{I$}D$!GD0pDZx-7{m6J{@$+ij@yRp?tZ}^->Cdb(izH3hbjI0 zt@gX~p#QzMd|6*VbiR0}c(*HCH8#& z%KtLu`!af1@`pWgGj+_vUx>NdG#a>dyg(fqGavB!SRY^FpAnQP`FFa?%&FT&nNI_+ zwd2#qZ5zCDdcOycn*vGJC5Ve2<}3x)ONL#;oOgipLh|rm35WB>{g&~XgDG%_CvrQ=7w;+bz7rYtO?+Cmb$6D!hSUs!?e^ zLz=A?W@32*Gkj5My5PO$kwE9)@GH8!(GX`Ac-#If@TSYV=2CNJIEmU1od;R-Of=zZ ztN3#FLz`M+#>Nm@V>s82_fLORxjld{?_GQ|=z;WadjA#P3s0xh4)lzrjdV>EoXKt# zjT9?HIJS8mw_N2dB5yKy@Eqk6?v*drM?U(GZNCU!0WO&HExh9AN|*e|sY}O;tqWRq z)fFTzKC#ehgFky@1ofogMP%OAAso9lwF{T(4|w(0_EEp3H|P5qoewkK%)JLbtO7?7 z&aw=DE&6NAU;3^X`03xx0DV7fzXJ>Tuh91w$!``e+-}Q=^^1q;{|)%CeCY9)uRqSV z4P5t3_U>o@EA#3BN%b#TLJ^FoDo^$F>Cy(SSKziXyw(Q}*9qO8!H}=n|r`~$M zPUNr0bC|sTH?{x&KmUJxd=h>Tf5zY;Yhy6yqy5W2gGUWxT!XAV$$c&RCJpP13E8aZ zko~M{^`7?;@9B~}D6Pu-iI-kyPy2nU{Z6_1{_s)0`|#XT> znEW?=%=hd2_^x`tg^YH$w|l-9xvra)mIpo7!*)>~<)3zp{HK$@cfK%r{>1s9{LgTn3TK$7{4$KKuUp^w?a}+6;A0~GI~u(o{+RFgf6VtgKjwQ?AKxRu zCD_W45$W;E@Ey_|sp0+-?tANh*^p;bP8IXv*b3LHEzzKB_xp1hz015ct<0N`Qsy^4 zrc9sS50L*Jt}grod)Vt=?7Ix-^grZv+vm{d#^0qs)BgoBl2b5@a@;b!`RPYvSBi$< z!kwRfH1=ov%$c2ehbx!j=eCtOlCAu<@#}uG1vr~@-kh6PFYu|xUh|&=#z}ORm{(*& zfLGQF@pCUR)(75Z&u}*RX}#x;p3iQ7ZNJ%n&Zd6(KJ0qobP$8YYm1&~U;20Okx2gz zK31ncd-zz>_0HM!t@Qkq|Mecv$tl;TKU4nWU3saV@>ixmd*#=5y>m8wAw56kPwVlV zoN`%@XW=)}g-;j1WoggJDLc~B1J9FPb-D07mj3L)(+#t;X?TzCPRF1g&&esHdOSOu z2KRjK=-1;pIVG#dv$JVuB%MA^#~VGKlT+U8@$76$^?dI5j~>s-DSzql%y_0}eI4)g z_@11S+4K9RLp{Db9q;vcPEPrt$FsBPrgZ#5uQ@%QlT&UGi=67oVyrXQulNBIx-cur3FNsnh|(*-@B zJ0|sbPEI+e$FuW^@wuR9{*Ie_JRgbA2*yv;oKbm+3FUxnOHyQm-fIQ?TIYX z{uG{j=^CMz@vljq)Ro_rr>oPRlT&8)fQR(X$!XuwZ_YJo&&etOnNA<{vGhOdB%hBg z%pK17I-BeBKD^Y2uea0A_pK4DSBXo+z5hLrI6df)L^1D*UA_BL^zCwA|N7}{Q}`F; z>`t|bGB<_Cu0EspcePLRNqp}p)_D1dG|4Fu@VBUohg|r9X^#1lYa94=xxX&_e79#V z_pUFLi% z$DXcd7azev4P%^G;6xY3?$UgE@_jkqSM>gFeRsV0L6)wAoWxDQ>Dy{P?XK&_zlEo~ z>)C~8R@XE9XvSOqlT9G{8Z)cwx!c}KPT3OauE)0@RG;$8H^s-d^83%}`JX1g-utk6 z^=%4}Kg|2c!kJxV1Sli@onK#ic`GRo8D&36!&;T==P$DIVkQe7?vV}mUS;%tCeC?* z()s19&fd>{xk|Ic*0(v)irvGk+^TtptVU3t`gA4cr$P4N4sV)mJ_XM3E(IdLkpo5P@&=Ii*T_g(m;AGiuW()%9t z!JhKds{QbswWoZ1b>*kd^?l^`IHva^?%_L)?fH{*8djide?8|1AEKIVPEjf_=NCW_;eQ2oBm_@X?vgWb@3vTxSjwDr1mH{Dxa zMQowkn|4jShd5)bp)4#|F&?|Gn0@Z{9HxashnJX)OP847rDaR9PK+$0n<@duxTM zZyGW%(f=o%%al*C+Ex>{LTR?%CqJ|ib&o#MxlA~Ji1`v^_;}5k{0cuae;#}z_xLxp zmr-|AbN8`NG1oUPzGxToVmzDZr>HaSd-z1YkB$G1@9Oiq*28`OTl}Mmd0GF~r(D}1 zYUf?d+~2T{)6CxH=oxlyYi{$XmCLUtrZu+mwpqg%1F(joc$y*LqmQ?HKx$3lYxKbd z_$b%c^1g_*3eEGl{gk`@W>`Cj22H(uY@_|yuSb7gah%hB%LvzZZZGdU*yDZ@YaqK= zOWcVqq5htFY#jkpZO;^yo?%>GpJJcQ&ZVJ5=cVi)7-RjH*UJtFk`A27{%BO*pYtsb zSmaZJ|2SuDeJPkyKoAwmn*6&XH-PJWqTV#FRGhW-KUy9jO3KD|^v`+kTc;;y-8;4_d3p3MVG*f*Os z4nKVGA7)RA#)_#^G~X3yYXEPm^O@03`}4$z6@F^jU)8{PO!#5X<&0+dK>p@0#J+Km zG_N#}zmvRM$m^#&mG!xqre9kVGN?771NhX7$J})=_e`fhGFDaG)>ipz*nI>#mts^M zu@#nyZ>#Z*kw0K7zfHi0uS|Vyu<$j9wc{puvMR{fb{K2KlwAg|s-E=n{bzmmc_tm7 z#3ihYaIT2(>f$H}KdPNUV$5=ujPK zZ834-G=zRw;`EE}fCinN@Hw>knRpf)Ct2tAe80Ut0&e@IN6-&1i;pAGa6Cra6w^^K z`ttt*xb2^s>FcKdKhcKI{|~pJn_pc2712O@cja><{o&GvSlL$g;1y`W+Ee45 z@MkN$EWQ?JTci;`i+@)x zGMq1^wbf_s+P3zhNMDq2RU()9&{*_(5&Gss_o-YfO{lGeXXr9aF@_`1ub3;sKG9;X znaFI@A6JeE*T%;5){jS&PkviIvJlR}FOUn}FegQRe7LQyQa!$Y4FQ+%Q;95XA@6Li z5HM+8NAFrFM=%+|!(~V-9UdEJa#T-mczl0?-ABkne=7^ZzkFT~ARFqdT9A`dp+#0^ zR!tM-X6?OWdsdlwWxUC%iOimTun<`YgEL4L-a!B(TcYVn(%i()2X^rZn z%m&3k^wUeWN1J*l?5#9LLl!9)jrx8h1k_bHatE$Fz{$|KmQnTcDuKiCq8A7~E8N07e#Y}0-M@)9jAO57YU z8_LdO&aj`EsJKD$2(=J**Mh3CA-p6!n-4;yi!?2i4=tsFU&?G{BoWk+ca)y6a*u05cm z>jLZ-Za}wJR2L`IX6w4onhj(3gZ$1+5KF+1v%7XZ|u#Y|p-ovbw zt4;$wj)wP&ftu4)41@^&IwtWRZ4;fa@wYY8ru25s?`m7l5pB~RKttb~2Mx=EfdmeJ zlUG7Re9&jqS{lA;Y4{K{eE-A3b2bFW4AXaDd+^ca&g~}r>T+bu($x(8(ek6h3tjWU zhxYeJJ-U`bS7dBklSfz4LU4<=mv+HCx(jCI*IDqLB^g~Y)O`Ln=p)%h1K>U3kp6Wd zFhUc~riORPWBC=l;pZFJruSurGam*95@FJ%rxD)>w`HWY_=R`g%XjfhT$F81z$qRQ ztm30^7pzC(S1>3pf!}`}4R%`>awpi6M}gg>(aCQYI%jfP+j{bZ2XkLMv45=3?|sAE zR{(Qhd29qP8X89An{RwtS)mwrB- z(r8!jzxyO@`NA>UqL?p|p<}j(*n2)drnSfFP#>Oh@ne|wgnP7yaS8f%Q1Z{|^rD^i zefqmk(w;G&uswxCiE)kI^>x|}$W|e?iGyuYh#gVHx3P@L%mwl(?y1+9W5x$lG3&eE zZvba8rDu<$-gn`^)o<&p&WYu5j&z^;ZJn>*h!^VhjorG!;>+uw{BnsYz08a~61LfP zjlmr^XAa0*krl|S$qfYBCID}W^HJ2_(oe68nEZH>e%IC8&sqCA&tz^Ret2D`$*f_| zT0sTrDr)oNsS*=!IVF(Tp)>2wHuXDKx^o&&(q3Y!Xzdld+|H>$L$@r;Ka2bI=sVa0 zXKf?Jy3u$-wvmm?Nq*vT+E|-ArkVEeRHyyWcBlP7#Z`&^=!R0?Y}bh2Py_7I=9P)8D4jt#Xm8$Pr&Yk zmy(>delh!DWP?VD?=73Oh`8KJdkb-ot4{_`6`}azvz&UZ8P&392)woL0_L5JnO2>r zvC|0!#OPk;eAqcr`DT-^`CFXbLi(mZI_=MJUh2*k%^!?r{$LbitTA;JCRh^<6~4yV zIg_4+2Op&!tj}(4Wp40+1q3BDMn7JXp_oOe2T-ss{6@`^=x4tYheyiW;)!0ne#;_ z=?tCR9DIYpm0!+C%1OZokMqrccf41&|2&xIEyR6L9V%CN5l)^7j;#@1Z281KucQpY zGs34qe(+;yaHi^iw{z0By}bSWyd@u>H%wlK80)I@;NicY?!=m37JIXEV(<1eU#j__ zF?B8Qf#|=5_y%fQiWue>d+_}y_4s)I3-`OAL5uc)fftphJi<>kX9M|s(_-9mYWH+b z`Yh!%fBzEMGwYhZD*B&f_0)d&RqaT#!@Bg;zcUBX61of>zZ#`6u>!4c0thzqWgcanC%H)dAR?>#jiu6gwwt&7rh; z$e_f|(BQF(!K>wVyUc-v|Gb>C#HPwvY{_g;qGSb*KQ9KL!Z zI9b7Wxous%^@3pgH?RQ?ms`DYaMFrwejYvSXqBZH$fBD2699GHK zXYm;%hFO2t34Mf5Bo>K2_H%c+V9bK2b@_ceZTw7x7f!;5dTE=b{y$+4fy2K7f1O){ z-xXffM+jc?KF<=r&-y^W_a9Fz(p@@n>S~Un!|(9h;#Vx&*n@7FZ>mKAouQy;<^{9&p67yWEYt0VLA-;P!LxP`n0?vyTL`x+kPp2MT? zeIrM#=`B(E?0#ZMEWUeaOg{FmjaK{N$=S$Jt9?Iud9(OEa7C}Sed=Ff?CpxhIDL~3 zv@`6Nk5xLlJ;p-X_>epDKXg61%;rDym_x;MoBzP82jV|p$9L78b)(CFO2mJJ|6kIV zXvO9ysr$ceTyp>59^0>&Q^p>gZk4?%yw1muf)0^0!@6t6XZVYJi|6t^Xnp(Sa_Y^r z--Y29t>nL>H?|9|4e(kYI+)5y{Nbxva0VEn*zK3o2RtR`G1l)2z8S*)4#jtr5Odf# z99_ZZl)uGyt8-edyJqrj72p4~Zn**7;Z=}hkmpUZ4~e1&ha~Q~0}@@1B68IDNbLI1YS*>ECGMJ7mvOI6Ha;JO><2 z<=Zell27Q2s+av)YhS>@=RawjhtVs*QO=*E4**ltgX?Lh?Y@bVjL+EsUS@$?`p(%x znQznN+>gG+y?@-VZ^o+Po5aruU(nYP#@ipXeHJy~bw7UK=YV@V_y|DPf<=2j*MYfn zath|Gi>!rz@4);}fV-bo+x?87pU&BR`}zL_-)8X>U}|Cw@QZ5hADv1(^|;vzyy&%aQe+n^Wp$)IQcubgi!)O-Z%gVgc9`w#lo^9R)SBHHqe8>3UP z2P-`5^ENn?J)^punIDf&!F*%Jri-E6Louf9MQ4nSdJi3L1CCY?9KWIuAARKO{dMpl ztyj>thb*Vw?%~Kaz9U0El;QW+mZUeKF}y8mGmZi;XAG8(4gQ$uT6Wm*;%)Xc<#mV zf6_*L@m-w_c?SAs4K#*XKc4?@Pe0q}*K*qX{M_f|@TPyGZG~su&SxHi;Uk{MJv345 z!0MrtNqaUQ8SO^42!McT{pQEM{<_OavrokI5hs|nL&7f4|>)2cse}&FQu;d z|Nr87kmnr^OeguP@t$=4{;BiVh4XtJ{QQ!(jz7@0mxHrxd%3%hKQO6>UOFMoUN(BD z#dtq<;Bd$J-;AUA>-$39_4xH4zN)_L^kvP!M=W1FeslQ@92_|Q z(8pYV!M`Q@OZf{l|NFgtMQ60={zJiklzE8xX5;I5{`p|2KqvbW zjaKiAU3~L=PnP-4v2r!8<$D;X8_y?3Gp21azmK7dp1B!c1$`v2R}Yn1y(7`9+1=yY!sNo-Mg91h#G)OK6IVe8*L=Fx2YV+o zC!Gr6Xh+KFB-GEr(oo`8q@o z+nLu6cYn6dYT54DpU1jwTRC%o9;e?OCe~%_WTU5MuF-cdww{z9Y zfepO>q|NVxeD|fB`id1+yy9zFe(qE4EJ5SQ~Tuz z{M9#9V~=Z#a3_p4)Y#6we)d8I-<7mzmR7$TwKDM|vGUqO12SR?GUB zy>b|Q*m)4TKXtP4hZR`lz4j*diN9!GnxE#jq8I&;IqSQ_@=Jn)$Ktfl4bJxZF%R}P zj`+aQDu2rn>I;6st-K<^E!k5G-14!hEixfJzUlfw9hkcwy~)V1PWB3Eo3{PMT8YUDV!P_|` z9ncRt-)j;6J{aWNXb@bMX<6Am?YF>ItW6*9}gUYxf*` z$IY`+zw&2QI`#Bi^|Wu?arGU^{g33uyyI#-jjJ&$8MF12Jtlf{a!ifo_N5qm;+0e^ zb_MUW@6A5n73@8^C)oR~(bm2*_!~~Zi<0&(IMrVM2)s&`{T94xpK1>t$r>Izw440O z>`_Lh{UduW%E=f$bajovIz*jelwV&Z5FXWalJ}X}FP*E)Y(D^2&9H&3+scQ|- znAlT;EqERLLVQ8|;wWok^9=6DH9UiFr^7EKYd=P>fnO~YkFjd*|ABH>;_%8Cxgt4y zhm(AdvK+o0nu{Fs!mthAVPyOa;8PiSS8)aUxCah&_A==Rz4*?wMn4=yZt5=L&gWZ; z0?fx+#(fos@IOGexy1Q96F>T}^QFXxff*f7!>_IpFF?PDb?;_R??ygdhx!u`P3T2#o2!0?23>XJDwG44+4Yqw!nq>M}ez? z9H5Th>8?II|3o}4z?xmJbzyJ$K1!s||2T7I4VF+R{?K``R%kSb@0af9oAKZvIc|vG zE}l9+bnrCQ>Xl94t>@Fb9@;UMa(}jc;M8T0d?lmqv+t++45=*KPiOy9&*ZJJ|Nb4? zy^r7Glk2N|E#k{ZplRs=(r3c($u96Zh4rsErn`cPk24pSzl(2*ubX($D?Vs9sr zzlIOwjEEiH0T0+Sq(yr8q4mH8EPR*M@B{F=uj|6XSiCCOTmMnwDe1edj3Hi-^8>9_ z{uf*2gTdd?I*mP|?+Cn-)l$Jecx!?sy>oBJ@aWQDHhRG6U@zaF&^Naa#y4`;ss6J6 z??yK`1??W+k=3I9-?vE7gVcCB8 zcw6BS+As9od!OMmlY3^czu!UEJk1=AKno{`t5vX{I_UpMP9bN{kOj;+4s7uRf3wk# zS!;b$E{=Y#_(^wtu=lI1H+Wmv${q~x-8FX)njA1kR%`7)!(Qy{CY~?^8Rx_hBu!U; zX#c2gF1|hdt37^VmwJ{Ppgs16`U(uOdRu6Bc(T?d1>;fnAlOgBc^PnmlZEl8@XNoS z*mnXtH0|+~Z=-!D<46w{Oozr7A}6Kiv7W5u0oiT8PrPuC;M2N8xgUa0~hu95DhJNJMUFDYDJf>rCx{P zr1XhS^tZ=ZqpIk{7Qg9lY}-8-fnVU(m9*XPv$#9E0l{`1>4r8$r&jJPS@OYp$aY!ylQOs}JY#-u1(Lag6oko(wxrV<%o2 zne>U+yx0DAhWPF&{2pH(L*6O+)ZsD1V_jRV-sRZ8<@y#NI_;CDqenv26K4(s- z`?~6GWzDoUs;hPn6JOd5zB1 zom+eH4eaf}Cy>~BQ7p0bVvZ&HdUh~I{K6Hno?@#v1kE$&lHB;*7RB6piiPuuZHJ#& zZQgf}S`q8KS>L1_x-;>_MX`pZv;7@zt)6j-jrY>ZR2dClz?PN_lpNID`W>v*J_|34kBi}K2;#)zZV~6v7?VaC7H~PG< z{SdmC;snh9yFKW%H$Vp+#2Jl0LH5{gt9K)Hl=B$}R^M>z7E=%TI-J;G&RA?M>UR*I zjz8~f@1c#_{>ymoLvnOK)sOGsd!Slp-v4$t=P1q#?hOtJ?e(8$?d^OCx*6izdxLde zPYiu2{`pXQ^mmCxT^E)Z{@J+;{;9laeApj@Kk@PY_@`)VCjPtb9cA!p^L-@aD-%s@ z&>GI>yCi&%XTP62oPUh&!+zH~jhWASUC!FlW(n=(AJ!UNLs@<(Ly!Imiv`+h_ zynhe;{JGDEKOrl&mph=}TQDltQ)TtOw_r@{wd3S}K||K1tlzhN?e8tPIabcv^;DtP zK)Wk*z&GpDL!0+#)A=1=`zy4GXRnR*Q1?CR?Pt$*?h3W<$A87XLcg=?3rD8S_k8W& zr;Sfzh<0D}wSSl26Zn06)Y{m|ANkskceBRX&<=f$;jj7?kCtC9PT%k|W1IY*xIkCd zt!ACYC)~3DT3hhx%1iiiBQaKGcWv^5v5weE_4bzOLC%-h#VXvo`Mb5ETu zm|*7gRI<-hKD%I-Y!~eDUB34J1@683vWT%Ra`uyGKA-oU;QlY61N49cC$q-KZXmY$ z*TgQjlmC)9Y8hS&4>_2Vaz5}!=(Ef{ zzQS548kNntf4Q~jA zVg$-jdKN#V`l_TawSSem`uzodcWo>smTI+4;=A>d!T4MkR)BvKKfyPnXQ$`g3@=$3 zI{`n59!xCie#n;5S~KzJt|vGHwg(x_Uhv9kFTV|9UuDW@mtQ~d39*0nn()1ry>|j! zYt1^)A2*=S3m+9)TjHa+?Ay-g?L9w*B;WnM$+`S}bGsir`gY1rKVA&H^Q;qlv0HP* zhUcCwbAHR5pKO$TwtC~>K=!icta$Vg=6lHKB;!r)d=#BmawLxa^)2<+jeax4y5(~# zDi-e|?^krIHR=5T-(xmwhF-6EnY;&{3zlK*;R#a%JN0}meW?CT&YCv*tx+XB6FrKy zM1QtVkX&A8Uw@Il3pF-6uWXDi)-D@3C0nGYVVCtx!Oqf}D-Pvd_l}p*&9X{d8U0$C zj7HAI;pbZW9rS-k7Qdwj(SMw^6r8Ux_9-K8E@|tatY?4EUyU0a(s6ALx|wpH;^_RL zt=xV6yRukLa7oWQ3wQ=kAG#%c^04|tru-6H-tKFB+X2eh!8^P00qQ$;qGjo8+H>C= zY3po<86&<(dfQOyOlq4*n>cz~^v)%VgXnFQMt2);bhpv!8~i5VpVC1!mjHUU=5dN= zUYrO|)(_!tkK%V}PwYEhY}+UwgM0ZYv^NxQcYU+(0*~~QiXSLu6Mfu=A4l(?YwQbj zDC!FC*`w@wffw;t(06wt@!0Vl@>fhq`YUcmR?E)O`%ZL2)9-n9tdY3y!7ZcAxy-i2 z)^RcUiO`!~%Ogg6qWHYn4(x{^S;1HrHpuoIY>v~6pO0+bkrOkv3bw=vbdO_e@!PDG zZbD2-G95iZbLt-DZ|@xC!$0NgO-%K-o7%o(s!y9TOgO3 z7(%adH>3D0r6&va<7;n@ohV)zi{r7$471X(yan_;sGZH*9ou z@TdAcPmPHE3-!ep3{LQC94^KFy#PIz^;cfG@F~BK{6B}Wkxqi|!`~^4$=-Y!``&A7 zsz2%BzZ$`sJSBgSea?0Y`*9ulnu1&7oItk`uaF(8^OIl8&uY;c9Ah5r<-Ynn{l4t< ziQi$oXzn=plf0iw{&Q8ugqCY48y@*9cyB$jiSaa7&9jp^9mWQK*1=8SOk&}`U`HNi z&3;CIJ!LWc{rpwF!S;*#e?OLY z$ADS6AvsxNd%lHS=z;dcw|)!E@3GHMm9LET!u!s!M#5RwqqoI+&~qjCrE6=vPWs4Y zeRr`IaeNy+V{F|yxC{MvuC4z@kxi}biG|8P8iyX-L##vXZQT%3-kWpcg5EB#saIbiK6@(aep!w*>SvUu_MnAYp=_tEpR zY`fKhFLy$p(pRdGg|crXb31=z_pAOb@;=!iK9)%8rvRExq>( z^sm2v!rznhWAW|eieBso`jY+d7;;(mr*tB-zI@xap7|+%MEj^#>kUrv&FwTiw_N;r zV%rLEY5P0I5SOPec~d{eewJU|wwD(yu3+7!fTt*JrLXVfyNH{ z-FGA0%2$azN%~N|=@aOYNU`6%AKNc?2-rJN7+wSV-zP;U+P?dhsv z-2af>auObfjr5vmBe!Bfiv>L;@TI-I^}JZTkui2yy+?~Lj(zk!pULNs{|efJH^rgX z9`^J&e0&qZK^#BdTkxa!mH4_Z4DPMS4(*Nq2DyI$c^1Qbdp|x+{D}88vW?!r#$b(G z=U59*!Yj5QKR@1yw=w=}%QmKz1yCkJe>;+r5`s3x5E7de()`5W+vHduDFHKmV0WxnrejI6T}sz?$#;h;sK1 zY_1_b!yk<7*hoL*VjQa)(Gvfhzx`Cz$QH#>Ia_1mcRPk#u}fL2_;h~5+Y_b!c4AU{ zTZe`AR)u-*^m+}t?a7D8uXzG~G|$(5q8@wDv5)?a{!K2g;+d?|UVhtW z7%ypi{CCO$wnl5d*h>e!bGF-$X?vFXF5KjW?Nk2mGd$DWeArx?!$-Snhs;>t3AS4ov6ryL3(2$Udh`piIJDPU5A9}Q zyTQw%;O5YJcm(>*Huh8QMbW3f#=cap%JvH^(eQ!jXdnL6aXXo7adb3i<95Rb)-x~0 zmQ0DxVytfm+q>Bt(W@v!Gk-OgIQo6$qFx5C+{*LO(WQsj1G{H2=JqdLxBF_VcXuBD z|NZdT?joyq88L`BzUBBBn|@7>}pv*cR{2VlTpWm7(Iu(s>gNN$mh#df6zu?U9@^$@ZnjZl>Kg zpSdO$$2O}*riva9AFz5q@3e3I->moVh%GWl`A{{s@L~H4Cbiu{z34wC7Jj^bZA>=d ziSH24 zVYP34;QZLu2QG-MA&=vG$QR}2FN#_%0dzTd_o4u2oY<%MG|=zRMLOWYve8Xl;Lq)t z(z15Fb`GZ}YxWhwIlp9jCs90TO8O8)twP0?47xVid1Vd(2mZTdRFTq~|J zI`&}nm2c=P)T=!d&lZ15o%wzTz1CfGt+&<%yVIyOpYYyt{w1=q$c6C5KFQZz zS$_Nxu1=dpyurQ`qJlicDEIPUS1giWA%26Uc={B1fF0k#U19P4h2C@V)+zYz@DFc8Z|)o=86{i5ju%Hqa^9CS)4HowaP@91h()=(3f!|sA=W65 zHR7CWR+iCm)YjoWqjQ4`TTINEb%4I^V(rVGVvKv3qu~Eno|p3X1TsM9YPMbmuaM1% z-mbMx++(#r$NG|oz7RRo?%p9O-RATHtM@lU?K}|4)>iUv)$VJmpKUEVy@0p^eQJI` z;BT}GS#{>Ehb``oFmpPdSQxD|&t83w_a7rm;HytaUh!?i7TFq#ha28AT=y|8Z22*M z3&}lbY?kdKv+uJ!+-;xP`lli6%a0Qaw}G?x4d_YO3BsGf8NM9h?|Zb@-zR}5`up7v zyYJPn2X`g8!9@mltvr7cIH~)pcMYBu9Nt8>+eqf<{10R`+ZKG zRnpyU$~`A<68lQu?baQ8@}cS8$<^c)#L4T^S=m>51(esP}j{#7c7Irqq`T0?eTvb9(`*vtBw4kr=q|U1|HE~u3#F+o?+}1 zT%z;wZLN+Tk+iY;9slpqqy5M;=s3&5e#C}C9}r$7!z}b$^;0p4m?k{j{l0<=e^xZe ze6!j#S8aLS53PH6X**i|X%FODR~y+Yc)fSms}I#x`{Z6}z1rL(;P&q)-W1^f%3JF1 zzF0r->b+vqZ{xF>%I_+x<63fNIKLPAVUTA5?xM7K7PasH9eo4&Bo*wza{MvzYtrwj z{lvL{wsSi10Qj}+@HG1y+IW_4Iga7pSaPTyJ2BPQ^5piMmSfy4CYU>?Yb}y??m^+u z&IhsYZ1vvJD*GJ!cwhLDU@VJsoQlbbH+`PH+buqsNbIZR`w{9-{mOP8s4u}Xri1v4 z1CRUw>UV|HR|k5%Y<2HAV~`C4!TZn2{qlp@+oNpSuzeE#He*k+XGEVL>+FH?Nm%$K ze6xBt4#97iH4EFj?R4YN*rk5!Z%>oY^30CUwFp12qT6k|uhICV{LJ|^>8?DJ-MRI% z)&~LjUX=I4S+9?P;jr1)liH?i4RUU8!YtwE$Q~ayD)(=&$6IOJ@yi7}JFkJy{d&Pk zzDbD>z`?bA2OF1>%U!@(kXMaABauG}d1T)+@^&M>Z0;91W_$!g;Q_j18-B1cgim-F zXBOp0$TsJ!db5UcR}uMo(CQiHo#W%oEbWdUV|Ji>9Da!VcCyCJQa*P+a8ntAS!1ipKQpFn8{ayKc63~K2;FI0^e?YF zls~d18hq@!KcQWGC%GNB?=E@v-(PA_OtTf)_ps!}+NVnPm$#c|E1>)5`2Di^?ccb9 zSZe5BcbE9LWRDL0>cs|Ve`5e&r1Vi|@4;)YX{o@!br8IF(tp=>t3_kX22PFhEc$Eb zQ=#@7IQ#H0^NKzUf4F@A(^fWT7QFq$?S4Ac&!eICJjTsC@gi{fHwL)VjJvxh9eIe{ z@=?~zcs(}Jh3Iu!i(e@YF>x00DDqOt*`I<>8C(f3Q-R^wLnHSp-cKyFu#e(dBU%paB7TXVKe2TT{KqnRcHFbXUt3>1bTfA&d_LH_^)uE7 zU%A;T`|Xc{?WbSKZpmh!zq@Bx%Wj`_*NL7{=-Q*&&NOn*4fNKVJ|jPS|k&$(;4@LUO)d4G@PVJ{iD&k~7LVdzVGr znESS@M8z93hgugugAaSRHDcpew7yfh7n``^bLd2;(EW90Q~IS#!$YFiqT#X}HBil}I4YdomN3lVx z*z0j}aE0fp`?Gg--4|*p2A?7m1*M9g>t9My11@Mz;&s4mThfI74c;L5X-vFL(0MFr>KYjWee(P%S z;h!a88n!WjuPeNv;$$UrnT~H4pH_S46U0{Nrwf0n=Jhaehku`K=G8fK|IdoX*Kbc_zddmO80_%eC!#oHX-vEr|yT*?Z@&;XfwVl>P7QGh%jnX3c7y zck&t&4~MQh;b$M=t2y)v`RV9unlt?C;87!A#pfuoyZ>Xhbe!4cdYBeF|m*3KNuF=8ysqJ z)~{o!{L#S?(WRZ-9~7DhEIm69ZhO1!!`v0W_;CO5_=oW1o+x&i`cpsq|1GiT2{x~eX2XMrhW19YeS3$8 zI5#^!uf2P6Ui-uOR(pO{)~KIyjw6S@bnZTWMX`xLp5lD=J3Z8emm7S*U%#*Uzoqyh zc?g{E{e{iTY<%FWHTW3n+uJz>eB}0zk5=qc7ayU21|L1(Bj9_T`{NegaGmc1cyXD3 zWVB@RNN_R5dR=%BKDxk>@ZsV@XDHL~k#FN;Dzg1h20o&EyEPLZQF3~PkM5~??a_-m zmI@DtF2sjCCc1Q+2Or9DcjsKedDYkXEt!Ds@R($OZgjCZyDa^|&QX!y-PUF1LBIbC zpMZFb_{z`VFGt`j;xFPYUwJc8_98e{e;?5AVR-8|*lXYAncIhGRx(fV8S(qepx<}# zJsAEt&e|uwZSh{bF-{$5TXu7A99}AZm(~yZ+PnAB&nWV#cUrxp)YpCHoL<|Zo#c32 zA{%;S5`K++h(7tS-jniooPfu5K8jz|8ov{{&Nogbv_v;tgPSn=c>WN{A--3DA4mFa zm6g--Pt0Go_ERr&_Wn!b%HsQ_DzaEp!$_-p=^tgA6gGR6HVoaA0XfA5^xQ{uT z_v!Qe33HSm%!{v=cqaSV@Mrv2lIwAD$;3aNVO?$8dm?+294PkPM~rdI+*zmmsdQhI z#kxq|9s2eKu@T5!`TP!nhgN*_?_`ZZ|GtE_%3GLBUqjmDyO94Zfj^>)eQowPyifaE zb8+K|r#RD=gUuJ``@Hu6lRICRkE`w)=FIpd^}Lt9BwgtrDND|L3;&hy?Vb0@Q(NDd zuv7A7LL2!2MmDrQo7g9Q{}bdvFY-uoq55(QnJ}L7;?}kDy+}6v4&7)N^x?_{wfQMB z!G1r~$OYpwH*(=cBNw`8le6KP#ZhN3{FQJH+^M-JCudySRA3SAmjb)iUNrD!>Wa=2 zuVlSGoY=rupglJ(YR(LO{tGh|8$AvmC0=;fo(F<&=seQIi@)-;VxuSFBUSfL+$Dd* z;mIRnhoAv-cB$Lna(IXEj=r2d92;lCtfQQb8wpKxkZ<@D_x>CsUq*MMaIUjte4w@A z|6uzpVcq|R`MksWif)?F11)s_ndf)>nq0w)bFA?jgV-$hWlh}ql7G_14)~pX{kCi! z)3yWnbFIlcKmH`K=u+rJeY}GXwh-HUCi(OH1y40N6cxSEl-cC=od z`Rs$8$iJJJPbvC7yagWDZe566hv!V__)JSLYo@i(dh3pSb6)^7Ol+}4_DK8d8CdhN8)e^rv=jcpSifK_`I&U?C5|8D zN6-uWe3pCuPqeNzdCW{NKn$(;01@I||6Jj*sz&fzCH$JtOu^+1Vy@$D|Z}O94 zz5wxGazn&#+;895cX?QC$MGfVdqQ#UHnyWqE)c0}NHzq&F_KWGB zE9K_+O>RyFbo34MzvunFmM>Y>3jx+Zy7`xg-Rt{Q-{U zeEa6Wll*P^vIP%D?{NHvZXS}>^J~19ULe0FG7_C6tL>+JN90Z7!#1C?WwLC`do_RR zR*-Y`MSrmO*T8)G3iLN@$@@5CD4UP7+E41tHurv`YpCs{wo1+rsQnz;pS~j48=G2=?qF{Pa0a36t-z<)Kmgit;{%rS&%N)*<`25-NB6{aqYyDk%ib79}WxkP<{2nYcWY2;7WWm>$882#` zyhi=HbN*+izxUba(%bY-ed~R$wban68z*t`lgRg_#Ygp>M)$j0C#J&Z;h|N0yKE~u zzsB_iT1z%FHhXhcAnW8cnj`zTJ!JllnI-*Q@krz*ce~j!$rDrAVc3OQZ?%Uv+3kOy zxcYeqX7`R*_R(l}Pbn7~A0#?6GMyZWR_JY)`*UB4_x`% zD#TMAT&cc`uZNwnZTydI%MI8#k_{=`X${=ENqoIxy}lc{JCwN6Hv&^y{wLoAK*k`4 zZJp^kYx~m{va1stOZ|4C*X0J6wscRn+U4_bb+W`O_;SEUA}fV|<_VoBMlHVKpQYa) z5%2lPXTATzoy6&leI~yM=bl=Z5|<42{+Z^+U*ZP#^Nyu0TX@EvxN8oyxQ};Qi$}mY z>tyPPHgW^kx6~PULb0H9Xx8dk>V7m?wvMmmPCxhWuwRAG!{Cs;eX;f@_r3A$N$H!` zV^z`NEqOuCwT$=0)^`dvX@X*b{4RPIU8I`{8rYR&X5O~G!%|6}~SR@*cOe)QKCzP$i)xY7;;%DzXwD`F@FJJ82aQWf~(RFtF`Bpu0_j}wePLAcl#rz+78jD|$ z^M+%0=5U8(9DLaRH}VCbN8#jr+UTyS8?q+!w2JoEj@{LS-l}mTjG?iHGDan1dB^eg zE8o)PrDJ!!0*`ynKV;{>qMzMuO(X|jes$Tset0qGd!P|)-=~Nneee~0Is5Rrbo@T_ z1~)hClD}yivI|@vd=@#%TJD=lPX2S`?C-fVuz1Rbz+%hH(=zjP#|ksfTf-*yJgi(N z#`)ODAbYHDI%B=cJ1?*7g?G5$_YiG;a$h_2=$qvmLF|V($Duc@7SW}V8GG=tjz{+s z|I7cx14|F zmuLN-i})V!MP0XDyBU2jbSD0);CLUJ3Z5x{HOIQBCk$T-ow@i`?s-|1N&lJc16JS% zPWxGb`^z7`*t%%cmG@uxFn2+lwtebHTT}JVls`QG{)->R_mv!{Pkk$%R^Rl0Ce^>i zT&$IOy;IrWnqt|PQn@l_%6k8BUhib*eJ8vp_tg+<@KQ`|g=?!~bPG6vm+k-7zaKcA z%(YQ_)l;3Xzk2$q&<^Y}_4h9Y?NRi?pTB$>9bL*k*p`#< z2i4v3Qeyo3z;F?=Ub4%S;StF)`4Gu6JR?~qCm~tJx0Nh&&qT5e4J6B-^^|9M%ADg! z*5OQZvV66tyu?!`pEFq}&r@FRDc|HN7kSDhp7O1p@*SS?7d+)kPkEiEe7C3kMNjzw zPkFni{AEx15l{K6p7P_K^4C1&r#Tr~G|S`G=nJ-+Rh0d&)oalz-tV|I$-F=qbPH zDgWA2{tr*N%Tqq$DZlF}|EH(?p{M+@r+ms&{=KK{&-2i~r##YAzQ9u+?J1A*lqY%0 zmwC!lJmt@N%CkJ>xt{V>p7Pb6@)A$^dQUmeQ(o>V-{dJ5dCDc8@~xio9iH+RJmpGH zd7Y9)!DK~h^cYDfT^pqd)l(&1zU-pzA@sz*nDL?Khf6Y^V+Ef0zr~I6!{7p}} z%~SpxPx-r^^7lREA9~7v?nYFj zl;?WNS9!`;d&)~Z_+t@|~V?ou}O3 zDc|iWf6-HZz*FAtDSz2he#BG$s;B(8r~EZf`Dsu2>z?v+p7J+6_Z+4God1ra)v%Q>3mbP*WFZh%_}f)CKMi*EUB2_3HxD8hP5( zP*c}fQ?(>e(-@dmTRl6_xS_tewmMK5(N9l5jWu74s25uGX`5>E#YjW_996A0)a%a;|D=9d(%E-Nl6F9{Xh zJR>|auykplzc-aL?O9k>Wqos9b>rMB_jd{|gO|bQl`$vv(QkXVm*kq1+H27dUiyP} zuz3XL1g`97+`-qgpK;F(?t$rHd2?Oey#YfYf!g}@HC5r-K)9hHe6NCRRSPn%?uyDr*Q=MdnpDM8X?4 z*F)`rriL(fx!RYNgjQR5;p#x+y^T$gO#wiu)$`KZOD!u@RTXJ$3^dgT!gT?bKC-T+ zE>a!XT;I?n5@V&pflZN3^$qt1>MQSLp9yIG(LQet?Mox;A;+e$sl6`TT-(G7)YP%} zYO39a4Uu(`hDcpi#BNtwaO17k^4faZ)UBUmL#t_THhs(qL>e0E>mv2djUcS1X~U*S zQ%zN%esiS33}Qw%<1b%bU|Fjh>dgegl|T$4*Vlz>9r!|dC1qA&ZEa*dxM%Y>H8fXg z2-5@b7p^49w=B}IsfIRzyPCm%TKikp6qZ>d8DM=wcoR&eDpGSdsIlt=rd7`!Nlyd6 zZ(PFWH+3}N$h>gXM%JV%5?EK?5NO%}8`4A@mZ%r)6B}hV(^3@%tZx8^;f9TohJM~} z3U8^|)VwLs*vzV%sW#IGgOI0t9`Z3@>I0vefbq=z`iW=4RYyX(6opo(t$*;?hgdi$ZC z57Lftk|Uc=s9#yLzC2P_T@$v+)~c;(YO0N-9@Vk9ceAk??=jmQzF%CzhA%2FEn8Mt zRKBJN8rDDW^WrsS$rs+|1tleYpRXy(TeEz5K}o@~@(>#)S>N+~bxA>faZyo0ep$gX zGg#VqD~i{auPn|Nv+zDIUR_pLTohVazIvrzCHq}jSXx$ITCh4)5<+mLo-@L&1tm$q zll1`_`IZ;TznN+0!x8L%$@goDz{sjl8L*YxGbt}EyrXZu0cs`pw_z}m>bZgBMm9Tl zWPvtupvh(Qlx1 z>$e{7ifum0_H($s&13?0E6Z{TaXIX=0hZr@B$8NhtBoA!h0{Tw_xFSw>eyj!zv3$m z;->~L?o-Pus51z#ITd|5zl9Lfb7`{?MC+Ps9reu_umAa2wmd`ZL?nHX4lZLaoL3y! z9BxFAdIt<3%9`uos++=1O)!t-uvUJ3-MZQuh4JCHO*fxY4(fT41{|ybzn}LJ=ZL+h`uPLYWj!?cfNT6|Zq)MYX zoVz4~AU6{%C|b6>aAkobC)8H|^6kFUX4@FKt64C!D#?jUAFyRjb+b4m-GntnQS+uq zgXFC%7r>nJ%JiRHUqdkBl~B+eM>DBx2sb#ABU#T-x>m(Bu+}2dp*ExCGzM&mrJb>? z5GGO<2;1sNl1!{MFu100)dm(#J@I>EUHv_Ec5h&^+9n@sRZU}Ke`{!f=0BQm;WELy zHdI)aT2A-+ zYc_S&(+yH=9Y|88v3awUFgT9c@n!WaLnDhrwMbR7$bEL8>E6we!20^Rfk3f=vZiix zb5r1+n%deVLyDkkL;`o#S2~)=`g)szloXV%Syiyi;U?%&p*5uivM8J<_HXE+zA3UK zP(~l?>JiEJ2*>CPNS(gAn-n%Fv(mv}dh5wGv;u_%1qJ1WMYo1l7A`AazA|*9jdA<= zvcg*nmlc%f-Cll2K}oUR*w2e$^~+ZxIFb$Q=WB|VWxR0uDJT+?N1wBQ7Zw%V#{Nk) zw@S;3SHo_T^EJO2&xntJ`z!6YsSS_ZDEf44N>_!~7M9Toi69mElQd=eH!nT=dSE*> zttWNuG4%$v^lW?%YHy@b+0x(Ao7s-*;TUzY*#;a(o$_y4 z&8B2^Cg7oOyT`#1RvBr!N0RMI7-qHcr<|R>&ZS>@X6%1npN^h<`u^>H&SgC_A`|cK!XiV*gV3d^@!x>YbLcmP z&vT%|jQ&0a9y3~g8axh)x3tyi&xQD3+RsMF5568Lys^ILN{1Q!ek%N>&Ge6hzttth z%hsT(4l48eBG4i1v(w8t^`AtfQ~MnFVV`~tMCZZ}`?UNNh#Z&>(LetSI6OQ4&f2fR z9ktB-KV^UCq{pQ}@F{R5fX-z-@(M~z3s#ojR1}!OTmpAD&XvfuUHAjzyD&`)Jhxqc zE^^}R{R-i#rTLv5-|79SjsBerUea5BI>Zl#H?NlY$Hd#&`nB;^bi3w%E_n0yr#AX` zZg}&y{G;Iw7hYl6S!{?ufKL{lOaDTljR(8-x!^IgU%P!$B_7xg=mXKGK;^*nC(D+O z1~D-EL*m)#8*Ou2@>4XwOCYEQSrGZT~jl8bfU?Mx!$?)uzCVFP- ziBHet|1$#)IMz9|ziP1d{q1Yl+zaH1@bG!BTH~=d(uwiP{+20)PHTN+)22Y~(m>G~ zn{T`Gzsi}v`+H%=@5LFvue{oR;M8x(sNaxLzagW3RYv`)jQUj>^=sYw{oyCWhtmf` zgQ4WV(qAS${5SW1)nNUbkaIub|AO1f3W~~RRL-7n>krO4)n?$i*2Qx{ap{V}oPPcc0{o`EZ8y?FQq16Q` zR%kjHWL<3cHX!tAnZ6aE`hhW*>fg-^DJ@=8l3$RVx~9};fPwqF)g+}bWFPz#m*CqR zyjy0xta&rx3gRA)_g+1*+0U&p?Hf0Q@fpiY(*(PSGzF^b19CT45eLB^eNOtqYJzR4 zW%%3x88!C$I(yEr`=1FI$}|EnX7IKS4;qfh9zocd?cY&t8dSI|MZ4&ln$$ZTl3&IDSB-Gm9h5obwCM>cF8 zvzw7WGf>^UY16%_H}??gu>&TNb?b2N6IoBaRKf=7D0Ha(;tWQVi0H3*(AZdCRl~-# z0dn&>h+Z|+R5r6;O|WK~*{%7@3(KpZqpEWDAAcHZHaFEbn2EUUT*}IL&HiLrNv%os z3;C@l6T)@~XI4+vcez()1-Cx~o%TaIILY=KYhW4uyw_C1)z!qNNM5K6H-a$xmv(W! zr@O$WW}>pf6uxR{6?Q->tv#tMHZmGNKdt&tJwn9EN(&Hu^XynVex7RN1k{OAMH=1Hi|M5|WdStZ zt1&ai6QL;LTuY)sAW|FIBuSE-$Uuwl&|4CgC4n?{GQ%(x?b75lGsi73NxKg5d6+|> z#%PZHlb%6m8n5x7$3bYu{L;p;iM)=yK*Ql@*sQ?jIffskz%}sJG?t?tHPx(dCMj_c zS`Y)*xEb}+NH>!!_Cw0*roVprE^~^c$yAV`=~9#3zEL8qzHUjNw7$84#KZnLu;5MB zDfO05?}ZD5f- zA_id?H+9^#si`BORxj$BH&sd!tut&~sqxBDLhmLm&7G}jywqts=X&#Vj;YU1AY*Jc zds^NB{yQgad#B^UyI#)bMtX!BmB&(Tvghrk)^1OeO6&@Uv8Uk`uIPYj^n;~=`Gy(R zrO7zw6>{I;*xRq?)u8xYDGkC_7gBQZ9+W?DB^27~h%55llptFJOHvh)#1Gcs~f`DUHK#(P z?4w7f36{Q1-3g?@enoOklA!l99eZb7VfIIVY|N1-O^z+UB;b&JNi+ImpSxg>mGTU3{ zi?Gi(C12Up#yW=UrZ)v{E+|-ym4(y7j{hi$n1pH5iCxnfqY+OlirKP)l?7!5$tnR+ zic+#ULqeWyl~U6y@5P-QT2@@N^7drzH?AR*pIjBLbJVD67T36%Hbx>k{$kk=3|y+B z^AM%@X$yeP4B(_Ri_p_jtv&O}XqTQbk3frX3QB#MChoZ^yipvoRJYC{B)g$an>RtF;0oJFz4pxls}i`Fb`j{4=-Z`u@gM{o!C!tG4Z`^c@X{QhvY z$;v1A1C`*3b=;iEJb4=*B|3HLg&`do23}70>eTC!z~!Eb%p>{y4!lw%zgIVHa42D= zx1s~**($9zZP!5^h~nsSmvfW_x2z!mNa~E;kIfu- zUIrH;@}E2wyc7q40s((jVu zeA1UVvbC%L4}bE&Sm~P8L}yCvLt?6DPK8<4n$`T^-!gT|zoLQ=)ymZ0>QGr>-b&{r z*Rq15+e_9`kH1C5Mb1;|7wd^-=~$WCGans~~ z;qpQULh)p^UrU;SI@fQvtmmQC#h|Qs^$KU?ThLIimtTM+Uv7ae}%pJ=fP9D^rY zK~lISvy=YnnXcG>&FJ3)v{C=&y=PcG(?5-CKW4DaC_DD!UBWKPsds;j2vxiN6TbD}#*<XfA>BEyMF%! zr+N@!t?OxT$Y~lB|hpnkMc8ufCMl%;3@3 z26}8Tr>$bk2V3^p3}5gk>(yZ&n7WR0WLl$Dl<`Q%ce!(+xlXwmP>FqNa&G#5P9+9t z^nQ9}KZI5lxU!1nfsWXBI!~LdEqGJsMbjQHQ7W48I#d%2HLR6muAd89h;z7`HXEbJ zWT9~ARF^A=6m2z1V!z^`e4H#qCl+HEQM`a+ove|)J9KTpd>pvFHoEC{rGl&XYSt;!&54 zQ6=(CAIgl5yj#f)J)RlR2;pT3u6D7_q{{e$|@lnRRSi@AW}#y_;95>9LW zVKcH3%zZCKe<&>KX<*9w^te`keRgU_H!1tfJsaOY1-W+L*>|KymHm>wbLFnA<1!vO zQ?P#fAB1-1%7GMt8Q+>j3NBU>`Wj^b_uCeB-ZU4@s4j9(nw=w0TQd^F*}rmA*$yiS zR9%N+pv4Z3rvu1Q|?!C^lT*!9ef3MuFZs5kH$_idnl&(57}eKvEC_wCKD zm&sEGs+Ydk`>W;lm$|3=*3~@v*|U8sI_qKZS$ca>{)&>~qT)<1YF`$o`k8yVzs?Ol z)AwqB)r=l@c4=kLAJe?>>??0i%#7cUUpR9{s^?ru<6$d(LG9}&by_)ibm+dB#JMDx zM!l%U*~bL~&V?BAyPB=wNYD%}k@Oh%1;8E0bI*yhU*tAVj=8Qjyx!yjWK7iKU$b2# z_!V)Frrf?xX9Eo^JZ1P@Ge)ohG}py&WfTlZH%!Qe?YGN&Z8TVQ^>#Z&v{X{^68y(y z7Zwh6-3wOVK(?8=(#)=InaiRmS96Jo9!Z1K-#Xn@)!1Z70VOR6pTC}3R&&!jG zU2bNX*bvWMzelruABk08>*(1Lz8=p%Y&%8&EGINzh4JCq&$&W_s$oL%`U_t~mo~0^ z)6R2Cjrk2%Hjsp2%}ovUn>RRc-PKIMfEsn!*QzMC z$FlCxJuJ>|x2*T6yJGZ<_J$03Zv6lDej8aw)>1J=G-k)SZvwr z>-^3zOkJ4Nm*~YNE=(xb(DYPDy?6R@fpO~GYG3;3++Ksd(0?_nrj9Sf@{hlAl{&Z9 z$WAh3VO2M6o^yTk+|6^1BaM5;B&L+OZJ2nu>-IGG)Cm`Mpw8Op{uLC(#-@8A;OPZd z6cst=9QwdL01on3hEUv5GC2%Tcx%!iCmUMF!`~2Lp%oQK`h}Lk=oL+? zim>nm;}p)2yNatnZF`=(lNuq5v*N%3D2beM7de46%;wi~v0xZjla3yT3hai4o*j_u zbXn0dgwuWneUU1gN$~bFv;vd8Fw@q-fHBB&6k%>w>Pdsh4Qm- zwMVt9Nx7lX)VS&IQ{xt)9Y(gmWW=7GP@k=i*@#MS=Yh*$+6KZEW@cGkbUJu;=qn?eS9?fK!9m`U@M5xb|QGE=yB)(!ht@Oz2ZFP!I9i&aBmByKKGqx?DTfRXp67 zrMm3zz3Wra`+S?-EC0ye4V;nVLOaLF;>ikf1WP4!;i}YL{a()zUbO+|;j}B}Or1L` zV%dLhakz=(fUQ>-g^M)2?1E&b3X$@_-*17Q6K=E95Qn^E8k?L}T6Bn1*j%@n3#-f5 zbI~c+y^2HG|EASDe;3SMKsWmDv;{<1=U!F4B>5jUgKJD4xb*Eh88ey4@xtE06nR zlOUR_8`-6iy1RMC0WAg!lOlKTT+8H*Fc=rQJErA~#+hi6#(1^&Hm&9wxv2$DCaOtj zYUA^^1~wrm%H8lQFsR=-Y5lsL%5GBp)?pk2v^P!xCSPz{K|buP+~jqE;FaWZ>9*%a z=l=OQ4n@tYtf`yF!?M%`*+%Jwdx0jU{yH3L4hc)^o5LoN0xgxT$$vf8N_}NhSf3_v z5MSWlf~#(-$a&#bWam==HH46jb}pgh=3zLa5c0SEY3cjD)GU#A%ct-6)O*jCHIO)Q zY=>97>E6lXsqOU{C75rY=4_Gyr7#_+l&p?6mdMHg>IPT4;pp)Z0OeH zZkkKZubyxHLEVAT8uxsAC0{ilDb6u}+-siEpTrCKFDy@7XxZ9O$#Mi?wfz|bYh^*+ zl~*lJUSExj1ZR8;z?G9qiohB@TQ&4H8Jp|t^o5t?!2w&PGwZ&LyNOXl&Dr-YuE#Px zHOSPPTOFxvUT>svOY54sHys<8hyC)!*kQ`^ql^AU2enZ&{bNy0ijRpe{&xsc*Yl9=_?PJmE3Va zlkGVWw|CYA24E~!o!Yg%;_2g+>1$5iq`Lqz2*tra?rU3Mr}oX@{W#>2gwvjv zeI%4&L1Cz@9NoOuSRCv!oTs|J!fj{FA2a{{7uSJHHB6IcPh7x@HrH>9kI&AcX|z8i z!(n6{DkE-V^dNcil7R(v=+5)p@SCciF;6cj&I;Ny?1v6qP)&uf;Y$PWOK1LeHCoJ* zRGL@Sj6mbBps>fWpEKEY^SIERxQ#JBC47yG(7xndS3^KQwz*=`RGt1%Rp!~rNb|yT zp$yqImB}xVAX4>ZE8&jd2FYSZR+`S}Up60cj(U>g;Y_A`{u5ZmCHr%@r9WBMhLd`Y z_in1J=jtX0n*M91+Bv~HTYFA*Y)`@(pq}wF>0WycqLH05=}AUzEPu%{hfZwhsAmHQ z(d(tdH2dYlrMIt|m(MYNsb%v@BKCoy#(747%p;yR5XzXsI)sh0bmhbo#n9c2=l*sH z-u>+ofcx9#zn10x24FJ<&u_y>-FdmslMLK>K@`QD_u153XG)5g3Ss7xHps=1BfMX$ zL2u^50yXv0hbOIq?}6AFoqeRaoC7A!gigwBr5c{@JjY8Ku1!6KpVDW-r=BfKe{=;+ zO;f#6!TA?8|Fx_R{sv9iscOpl&;R?KM1?8*{|z7M&{@$;`2NuMnf?8D|4xTD;gW9p z6eW0D*=QO27gUqC_@8)Sz+gYIMFoG8^6QTD7im->Olg zM(t{Js~e@zAOW*$U9)S~jqi5Xs$F=W|MQ%C=iWOr$t0PX&J@q@cW3TBAJ6xB&c{9X z9DDv3E#ZY1t?-vmuN_~upDpaY_~PSs{L4+Z!^`xuO+Rt`EH^#V^rsvCa`m@R`E=8} z5WMB;Z=v!F;kT!!TRv8JFFc-ZdhPTt_Sr@c9pcz02RW()J3NV=Gx8)(*P*^>{%^;x$Ls$KrR(|he|!GLKI?GgKTf_bTY6moY_a&x^!zq?txq?<^hQlk!;6JzkeA;h3F@A~Fe_^jy|DyQ6Ga3E@ z$BLDeuXyFFiq9%3UH#hEy@-w%6+ko%%wa}&276x8wWMv*=C#J>BvsD&*~{b#e+;&>gNax>l-t{Of_&XUJARctIem zv199`=zELKJ7~jmdU?!?9%Ct{j~D=}kxtd66*#oYgI`wSO9GyIr(-?&lxM7vP)Awg zR=q{Rx;k1Op?}05`;6THVi2VWHm#Ra;1tdB=7iuG>*>_m6&65Ow) zIJ`C7iB*nEq-P;}|McfRt9SJiP_S^6zA6^08IjvAPsD)+1ihX?Mx`7hKf26JQGAR1 zNYl&f>3V)MJe^Pd8-BB%^>X;v=z`u6%rxM+55x#1T zH;8{hk00i2!9VKHaqTRMI1^m+#Ut&h=RbI2_oL^#e)XdlNAB5kn=9Hq@XH5((C2!~ z(5H@n^oF2o#ku!g9Q@K}Tpuj|VsEtMi>_~+{e$&m&wti+$Id4ozx6V=YhA^!i=R6C zU)HY}8@+h#p*OfDoriuh{Gn#ou6>{T;CY{Y(Dm1U^|m)hKlTyV(TUr>T{9kXz4L)T zdqZ{6uvcD+tNZnzU$y+>JFV#9mu{??__r_-zRef!rom#z57<*v71 zd+8&eTKS#F?+b5eK1y=<{u2kQ{&;Y3{r}uG_tl=icyj%-1q013do|bSo7R*cJLl;7 z&(Gb^IrO{#U4MRmNnq&J`iH0YHuMax|J9?9etg5Jzgu7bi+7#< zzuryjfAY1;;cMP-=lYgk{7v}K@a5y$U;Os(~mmBEJ37LG%QM(A7NGVb~2 z#q}>7eWCI*zj)10zI>kc?&q9_J*jN*?+~d^NQV!&hydUj`QAX+ z#+O_D+ZCg4{Pgu(8rFTbsde^|FaLVI`u`SXEE%XU$M&7I&8-V?9={d$F2{T;SFzi8x_3@^TD&Hv)#YJD_5g}@h61%LS; zUmE`1_eR!P^XX47e(M8IxN9G8dgqf1H-L>-`eY&JTJ*&|Ht(UmZ znn8coGu*=qk7MQ4vz}Z)+jLmqt7jvf%%{(G>BRE?sn7iOr`vWluh;p~|1FvH2iJ>_ z>$m*hXM3jB!IIvrja#1k3i>S1M4Ro|`e;vWw%JR8j{k(B>!kXhQ*>+^7l6e&B>i8a zxt8H}lS?AzsQfTp<8<{AjOyzl z--vrXpr3*MkX{D&E9fHqaX!T39=-tOaPOoG@pv|;BOdUPNB1|tZO5U*rz8&5SDnZI z4|hKBKo_kUGA%Eze#h=R_B?dgJJveY4RQh}0`PE99h{vzc|nyPYy4NLJI5o}6*zic z=cp)qJ;$K$x|*(v-M?Xx)~kF4nv?FzYczC*hgL*&JbUP|i7uzgU!^B#ps2iy|AvaA zt29W)UF0cb2wu&%l5hMgv^Gj8dG7h6Pd)wQk@5mAP^vX2h@_|j;2WcBQsJvv7#|Qb zKHLz#2KtM`2e|e~ieh|g3CG~wB9Dfo_{u0Z_%pEq!(#aRmi%7!ktJ z(JA;^toZs?4%I}@*M=!q$@9@>FS{Wse1)ZwE<@@#g;pV0D?uZLhGZL=bwABoc05bX6GC~)cY zgH&?>Z}oJ|DY@*?6NSH;p`vJ^n2zRI$+9MbW2PgOeT4=TPw|7K767h?E}ehKfM zT!mNo92!U=1eVHFOL2M>>*F{@xRiWi_M%a&(qoN4F;yQfw}$MeNtJ`ua4_o;KGmj= z4vK1{ayq^-DMbomd@if&V?JW8kNiWIfVBAsV1NEOJrW?h={iJs0*XEl-E%PG0e!j) zB1bnzk3_E3u{I=%gnH~bDT+d+{KeS~ct)xH_lf-;x6;!-R2D5|yYLqU#6ZF;SpS~0 z+$EggTjed*V7{T1K}~C=n{|uUK`pd0 zyn?X-7J-0_?I55F$TJ}bRlFvSf@hfk9h+d{C`G72p+X5PdT!#jnjTB61PMeFC8eB; z$VJ&_nf4=3(krv^a z9HEHDF0q$-18*6{miMg`#P5i{ou%ns>v@~zp%hC)&%G7Z@&h^*L&hPLLhX_yx}opm z=|W$q>Rp0wlw;|K@s!*A9%%4ty0d%A)INYQa^8sH0NF^oJqI&iP!ID3 zYKbo(FA<+4*7lSRQtzOb*C4-l1)6^bha$)6YEyW+IhKAB(Az}O7J5Z!T!-2yt(NpZ zsNftk;H;s$^5&tE=vmw%xQjij_{Ll0EA(rDmE0jjBWCSt@1bI`$by6Ph_1qPXQf+XS44J$ zXKB1uv&Tae5K}G{P@`MK$(V694ag%3KH7f{W<8*!8+w3~wMrc-(q)5kknkRYw_m|S z)#PA?CwgVAs_MaYY#)^yk1K~duEsS=Sw{UrlCY7qe}p2q>aLfW37|Fy&|6PeK;d1h zCz|OM{@KOr^a5r#U4yC|NHGV%^w8C((wp=|fjc6)8oD~VN!o!1-LWhGi&}PsE**Z0 zwY)Lj0-jTJjVV0s)_i9FMEV1F7X1|<25r=|DxXmQWt7P+h7NptD1Y(8VvmjV%t82y z`FFE_uBIF&|9*hVSE=%os<>W%SQ@5wors3$tj%MCJx1(KBN7J{++izRHC|^$zQgl& zaae18Z{zQ^>Z#mGRc_i^ZlsX;8oDuhgXpQnbG;P4&UY=}_?x*zE&cWbe~-we#|mdg z6F5!LtpaC@XEW!3JW;==R5{oT2a#u=!rvU-A?3Dvwh4Yng#yZ-qVj9sD|q*EEd5CN zIfZw7beoiK@@&;Wo8E<3OZW9u?|M}EE^B%5^DH*<^OUxlF4XT)mH&k9w^g}b`rk|G zd;eVMDJI%DD0)_KCx!u{>w{|JeXH+0z0t7rx!|Sz)zY)lM(Ke}*OCFoN*T)NkGftQ zpocOlSoNsjM?K;o_+=iTl41y!?njW0at%~YQ{@uM0p;AqS_}P|{Xj33t5@Y3Da{;^ zH$v&1D!o#d7f)i8%lHfo&}eKA{h@XeT@X+yXOU;f2?H(+u;^CwaDw2jc})1M;@H>^ z^jb+>g0Ai2_txl1EG^SK$^!lq_640!Df$Bj973-9Wme}3t(#&L{YYd&QMQe)Iz^|8 z?#%(|y>zvzbOXN1g!BkqQz{*zG6$rO(KRto`ZT4Fs`PquVY)~8Q*`O@JqglRlg^qR z7dclZNQeJyoF~1F((6@vLxS?Xl-{Y*YZIhLD7{yuL)7K~`o~P=VJPN+^l3_ORpk?< zpQ3b~e&jcYdU{w*by!a~^dt4j+`c5#zb-HPI01NYjcgVBgu$2t(tx{5(VZy0jnZ3G zx{gPOf%3g{HLG+yn**LBbm{5#`U5?T(bcZ#tD$=iW9F$)cua$$r-y;j zkHC|56Uw1pTs!^g{srY)=+fn1P4^s>{P_=)uk0qDdtM*^qW>R<2f;t=7^;lET$+}1H3-q>06<3@U7dYWf9km~{V2YL{@A>?PIr&W-)_wx z*B&81^t4~)2QCf*KVkmklwm4GdXnk0n_#S_Yf`~sbU-Y4Csn(P{vB(z!L!l3!FLsV zU%xA`KDaJ)rPfWtq@K+)KeLUM4kwNW_=V_c?pN)BUSZNf4+dum=uCy4nGfX_JzXEk zEqV$*CiGONQ(tpS*VphTgq~cnXQwZ>^feO9EqYu-xkb<9u+Xz6o%W2E8cXy)UHgO{ zj0rfT!yg`5Iz4lr7J4evNe?=BOO#LF=W*(gZTGU>%$zCnYTU+9}@VkjMRs|1?mId>@BT6 zR8M4A4*`WQ-F|7B6a>FC^+VyO&Z-BRM@NKd9^JE%ySH?Vs6SL1U6|4j9V`)9JNud08}tiD=*ac1jl^k1{9uj*e3zD)VUfqxVDXR1Fe{*UbH zt4HC>ls_E(PvPfG^@mmem0f)eD14dH*TjFH+4`#feRlN~Q1~+C4^RG4_&HPkVb6=% z)z_@TcPP#C7RP8jiScsNivQ#n>zzyL&o4BO$K&PDm6j1RKPH&qQ>*hWgXUs%>VLNA zQ8Q2wZwMGScMz1xLg8};kq9#Xld*X}SCQb$l-~9gzcln#U6x(F1$2B%t8XXCvJ%b{ z>h-Pa9O0*LN%f7VAs3pTN>tx^;>Xa5>f7qH)whvzv#U4f%LSj4))aDB-25(P=Mw7e zASJ+0&b&h4XJ&pEZzgP092P$~Na+cu4Htah(fR7^>S0RZ%ak4htMgM2PG@%YFsSe~ zEv-IiFv$7d|MmTu@UzG2$I|rvZ=w1yw&eOy_m=GHVZXwcsru0Q*8Hpwb8pYC9@^g_ z_%fvj=LPwxhyAtL)kD?Af-h5ksklzy=bK*|zal$&=%&8RYPxFb1>dBteK4)xz~Ifi znj%;qx*~jek=J&-0!19qoH+}8nr4N1l&4C@k?V)YqrpQ)&3Pg;w(6yHkdNvB*7zc1*Vl4Y` z&?ijoltj($GS;2w`mJannu;v1 z{*GKPe9v&@rJscLw{6+=cOOB3e|EMgd$Q_R|6ZUmdJTW|m`nLLyl#x{ z57E_jlhC==Y7aBAhse+^(VH2hza=2wLwaskKuyd2*uY9J^-Ril?d!z3LU=Lmwx;ES#*#VL$<6gS@3D?A} zLVul2&T9XKOQIJu{oV_G7w~5*o{QYIOs&60TSM5@htv8RMnk&glPghX2mmUXFmsZ- z)^ib({WZuy$w|N0(Cwl9_WZDi$Q?pw)oHPZ_GR0{$-DBy9tJyw&QmtKRsFa0`-fJl zEQ|0D_;w2ZVfLLu=Rll2z_%Fw0X3673%do>`oQ&m=>f8TXrvf+9heD=?a>1mYW*;I z2^2%;b72RdwUD8p+lZ=J%H1x#c~DD=%}^4#5Y7gQ2mk{7$w8uRpesM@$N7-Zd18M1 zabNA(WVA27jotE^!8=XdxXx~IQs!! z!+xM5?v@+v3)>I!SlgE)pjsJF%yK}fF;FlZb*a|N{zUcqrF}6{Q)^#lAIT4U=zBCj z?BQ^q(CJ)~JwVTF4=@bwZ!FFpI+tw^YoAc|Y^!&v^f#P@JzM>a@ec`|Nxp~D+n4nD zhfb=POYjfX9}znH7BgUkygXgE^n ze;)r>e%M3bCxp&iZ!Zpel|19@AwSxS#*omT>*JA`NPgJE+F_wHU)zhl`;K!E}DuL(k8J&X&_+5BTcl$s@~| zpDa5j^w;Kidold4%5G6L5cnlrdFK3sXL0^Pf8SB}??UIa)gJnanBJk=qqp&ddAKEV zy}a+Z*?SAe1a9)T^6LoCt?oAd;vsZH@J0>qJJhQUn)(Ctu~ja%>8SF)B6pkTR_`so zoBgeUn}RonZU7p_IY5R|LU81LM<0Pn`o6;o#ejg>Z%{;k2>Sbu>i<-FS%Up!^Zn`g zZ-vg9Y}pSM{Vbw=nI@SYqO0-0h0a#1{n+298}B=;v)NED&v4e#4Nq*sjHz9Lsi>)A z`|I>nUTA3jWIqH3@Si+ldfT3;u=!0p+c*e;z3d}eCVrnE_R@Yz=v}7Ik_A*u?bRJ2Qy}-@I+NUMki$={yTI^-?ocyqt=5vM4 zTyKx2ULkazsqIngYx2Wh=2i=xwb}A7Xb6_jf9Z7!olUvrU$BrPE&gS8O@7!*-&=&v zTyKw>-X?U~#!E^2y9@PS($pT6rK>$^tjTSA!H0fWZ#;9M&^crEFXN_jP0A=6zI2dr ztL3{4eJvOYt640|_p6BK?>2hrhUy_!&GF|OPCX$&blARtrF@v6Q4i8Ot?YA4G+Kh1G?A@AId5>Xz{1O$^-an%j?@3h4&kR0hdCigeH&cG|3h8T2blU`XXtM6gZKFF)oLi8@15Q~ zeB-&#eZST(<^}#@RN@JzS?KGu2Uof8_uS{*<9nz7-oQNo$?!4RR1&!>yqi&$Y7u(o z$30*=X0%{JO>r4a8^BAoaZ0ATh*ii`20%?aDH!?62Ul5;v)cp*fX+PO#-N4(LW=xc zPd9|vNq=T?p>CwpABDYiH4xnsH{`ZGw-BAM=k`{i6KCyl5dDvv_R&Q)^QN_YR7orR zWc*Pjd?t7|6*zFK!1X|!2m+V-dx)^E*MtO(?*X`aKzsAeZvuw0vqbG{(hq{P2_oB= z-^BMs+{RiOXQOb^`AyWF&Yi;FQr@SeIv)<(#Idt_{h3^u`u9X_R2GUW*d=rxO0Hj^ z(e=yXFwUb%rerEGMprLg9d`-*11aj6wF<{zU6+f}ta<%CsY%1CqZzO0Qtc#Z!g#>} zGfJ$2{+%9A9|d9r_ec;Aw4!eAn=FNq{ky9+j!p%;ED7&w`@IDKa?MO9DjFa z_2_(We(SN|(frV3;BldIF3t6EnraCAYV`*M{)rUpqtSkHKhx5FvR_62U)8*XJ*w0k zr?!^5HUtLn#qZA3e!}Y}eBaYXwA6n{_*=^R9(0C@3A7^fevg}u_H**XLT80l59a>S zvf9tSzZCdoar3A^7~fA?sQp}=-)$yIF0cKZ{J7Ac>;0fkkI-p5wJ@!U~{#>n(@QbVIs`x9Rv)@`Dht;^^y6Cm+oZQWx9l$^+Bp>)^U{ZM6L-p@}eB;aMm>NGzydt7XqDkyKkGnDXOoSguh;^9Sy5_$z9PqWITRL=w^1kzT3RF z3vajj+jw5mv(w$qF_D|Xts0gaD2Gq9gIOy>=JEM(k-Ocq(|fz`Hh){-)*zt>w?=Ln z0?va3M(@rk8I)>A2pS^M@uo=p$zdttQix^glE(($!$oH33>i{kk|7k3On(&i(p5%Q z?fkUJ17}imka{(l%6F7#tg_WkjDAMwO!xlS2Z?F)2Tp!Y;1}5J-n5Ssl#6|wSn7*A zhpPkXjTr`TSmvRelx~_$F`52C=Ww~cAau_VNDiXEjP1ueqVQ!(4|8A4Pd(IqDZ6^u zukfXN|8c!O6pso&YfejjKpV2O`q2G#q5Igp^`V_om$yGy8$m#OIj8WYyFN@4Z1|;` zZwf!Q_535I`9t8W5JiH`sJDw_-Cp=?0Yi1`kGVtGNrGYr_XGCbv=_^ea$F*nevCOY2oKg^@qbh z&#t~IW&~fR>R0o>2>dhE9}fO9yZS2l*Uak6HG5|3tMAv@)z_TDmnnbP{2SrtO!bE& zzs;_`oc~Yo9iq8I4l-Z0?DNAh-p6>k|AfGAw9Z?k*q=X15EdLSCps6XDSm>FUgLoV z%RC=uq0@5?*X-}ItGCvZnbq6aA6^=IbH0#Wy$$O4mR8@mLL@rBMz3!bj^FZkh3!kK zZz;`BEu+3wr>(vXXu{_VRcQ_xo@YO)@MY?pmC;qgkL~={;q<)k=!|!V7C(p;ehH@y z7d(f{RhnHrj3|7W(nD8ye(K@m%d)G7&T|D{rp~)*eTBfcedm>>`he9#OREo+uNJ!D zkCXeQc1lXm`;I|{FH`lQdA0D9Z+>a&^+NXyD}jD8+&(lq1z)E0Q2b~4sfWQgXIBpe zZxMW%@=J5)3H*HXO9$RAbZ6QxbyHPaO;`0h1Ydg3;o`GjuyoUYLYHw4SKkGKciPtf zHGS{ND{xXhhl@MhYC(~8!H6198Y`sKR||A_3p!hiD{`2M=<5=VWkmXC5GkF*)p@b- zJwvkRkRgAk>vu}w%eH=NE_-R}x9ZC5>UTilv+Z+h?tfL0oh|Yly@l(qc9rm1ZCwwz zaeKBSTCOP zIb73L^R>O0IiTpz@%EzU3qohEw-d(rY0p+DE#i`lOVow?p#bbUkU z%-8m!_B;7u4-k2S zDL?F?<4K`2)pJm+^OMW92Ye;9ymPot92NR=y}cOtnb0|qE&p(AasEMn-_iBF(0R=2 zANq3Y9IgSH=Fne<$L;04xO2EVevu#cbMlu$XHT~52TO*G`R%nN@Dh3g@=~o)Jz=4f zYz`k@H%<44=$iaDq4P{V_oVqZ!e6epKeKZ}XJ@wTWo&8oGIB!bOtt-4=o|~ItfmDQ z=#HqZs?=Mu_}61uIfu*n`~0w%(LV~E6WOwtDr))-|58KhO3lB_yeM?eSlcK2dL83^ zYL@IJZReiMX#X!S&TZ>iC3NO`d(>Pibe^g0(d0S#VK1HM3Z1#$9@V^3=sZ)~qv6-& zhrQIjR_M(2_Ne_0Lg$&<9<{$Yx9w#$_4ig&@8M*%&{=Qo7mS;}%P6C4X*!3ig+N)) z;ljSKww(m*>wb-0K1;PODo@{GpqEOyu)a39J+v*{6xqs~`WXiWFZXmVIg_TIXsdd= z$Y(4~=O67Qn6YnAz({O08lEispC><`YnopB5E!V2zJ9@98qVj!KDDzK=C<8pybQbT zyIAPdmSne(G#|_c6(=@7%Xio_l%MSu>Pwv87$=#`wS$V0x#EKiwo#y^XN><#7#s!- z>O7>a;U;?Evye{F|GvwFzsWS&9maQQu)E`#u)E_#TmO3DH&^E)b$?m4LFkNcx1h^~ z&fQ9|WvIo@0E5>d_0kP_xLW;r4c+j%#py*E;1Mcj^Ox6!uEk*p>^x2Wl8(UUSt`>p zU9iUzrY1IFWnCl%a``kvImZi?Y3gc`Pj?QU!*n>Lz5;~WyUSo9S54Gbz2Pi=T$ zt4H(LOa%cZ+_@; zc28#Y*w~fddaUlw4?T{*N9fGdIbO|=2>cT%)<>iL4C&_K)Sa z9uK;6TaVLJ$a|b^JB?#r)7?=Ru)! zGcv|YCMsEU^S08)-N$Ah@adO2^h0aX*)vl)me!lwE=4XWdT<_=1 zKATxRHvT-n^;rFj+}2|U^~f>M82_cvX`8niRO964=nh_m;@vrdqe{GB2%FD*w zw|H*$w)$@3o{HQ#lny6`}x6DvRS@A2QOH4&)DUE$pt=9$!lV{924cr@NO#T@nED);@K`@DO6 z@ATgrxF<*`!n-4P4FM-Uq$V)>Jk!ysI4>VxmOUt}3V)YG zFJ>mZ7y2&X^?#m=+_jwKuhD9V2x0=0r$;%?x+nu>h5!IKtx4`$&qYY~*B}EW7l0E4 z$0OGjIC@^^s3?0qKex~?{KNipgwAx&BhH;GaBcJEN#`Yu_lZf?+(a70hiF3DZhGhqrlHszdChIe(14l zOJ?;rxh=o-IC5=n>#;-iR~uV|&SLAlP}=7a=WY@_r>uC!vonwAq7i_+r&;PeVoh6a z>r)?pOx!MXX6k)g-7bN@->S!?L!d+E5djCZT0es|<`LW8BmCIUnNoiI-sn9{d2mddgM^?zTA*QzmkLN{X$O#RRj)V55o>Ih;ySaV}tOQ1xnctyyd>LH7Df^mT1lN z=Q+n+p+PNbHvbu}8Dtelr%dPYz#^zXja~r8@#GQuDd?bUA6*k45IWFOa}YZE6dmQ! zvzZQenMcu4;ya7!@E2<*sK8Mjuj$1{sRq1$n9+js>y1wc{&df;4-N?2Gj)F5wO9Dd z_4)OQj|!ccng?3@34y;4ri~!>yX5|D;`I&y9{*u;sWwNoW_kVFDxc7w>+|a)0iko; z`aUOZ>jEnx0zY5pf#&`yKlC{8sm$te_HXiAj}xEC4?VUY5IPTMs6I}9LEz`>yutYY z&22rlP>X{8aO;{~7*+ z(3z?JPr(ZUKVSPlGmaDdokXtdu}2d+Go{C=RRTX>^*CCZA9`#q7dkW5|2gJwjPhG`JcKfp|i#M{-RHmGviqZT{_sJph2|<1Uju?4y*(=I|Dc57A8eRj;W;HI<;1c=#*f zk^ruhO@Qd3k}d@Mdw;#3Kl4W6D_#BkWcb>M2FS7E&4Mr8{mzB(ww`x-@D8s%J$Pr& z7rg0ihmy(JL+?AFU)M!~x9>`NK#+PeUd;bP?{`Gs=itiW?sIq^mn9UB`5xsce{bN= z`6l>C=)HVX*!Z5p&F|(fU7)Nz-;U>CK?4n|W8Y{c|$_fgIB58G) zPqZ7%XFJ`HB%Jh@jK~54pM+h4Yo`jUTbL?+_v1SRw#M{q#aDn*`fgg@xeR5etZg z+a51$qLov8zvI!u?T=~Lju@vzSSmCKC#s2dBACc#Me$h@J+>5s6c`jEJp?>DDf`Z5)xPhDy3XuB6MQbo5EUb&z z)E!QN^eibZJq2ZSasXe+7zI*MSt<9&;%h)EIzk?cO*$7wN;#6(a|i0`yD=VJ)q0~en)_u1gp2t5qg{>!;eLz zk5SmzJ1^D4)!=uFO?Q{4Z_dga8QwjEl{+y{TZ0srA{4n>`BJT}9qMe{BUt&1-gdMNdXQ67n zyKu{U#7S?X&Q~{Uyl@Bgz#i0&mBzK|#~_$TBJXAR)QXb1@iPGdsHW~hg0lTV0-*Jk zD9?w=Z|x3tQ_Os{l}{E6#|E?D6L^PMCc#BsRpI6@e|Zbh8emYVGs4a$%x1tfX`yIMq52YDJt)NDn@k~>QON>RGMPY@~Ien zlt3}HF$@POkudbe@?fJo%7ZbcDup94DF)Snh@wcXmkt4>Qcj97kH{5S<-;XzW?J|a zR)tF`FVbOVn+OuY7UwEVg9F{*%rOvji6RLmQYv3cs2C|FHX)@X4=bG`s0al}1Hs;Z zSxwm?=fK0F*W>k^P1oy4ndo)g5&f3Z>o*;qZ^*58%;EdG((Bh8fv>7g%%~&u6^Siu zJV-;V|Lc&p^-GTMmnjQLwQ%bfX$4u98MPJe z_`IWV``n$ZnjV{PCwgakWF%k zjo7eB_AZN~3lx{~WRMbyhayO6+_4Zff?|q+v=nS+b77v5%_936fdrFN*p5j|HT^9U zwv#|CtOv@J)NeGAF++mQBjPs#7a}(W)gGKJe&ffE=#Rv<-G?2Xe{^_%=P{lTHr_CO z*?5vt3%4eM#9G?%b@Cx&4sA9LENchTv7C?SU?@aFA(6rjg04I52z!G>I3@_YwPJQB z&1<2joy>^~!qBYKrhDh*cMUnN6!<+AN0`Ac>=|-5^9vajlB^6LYWNG-Y|`8<)K=+I z;t6R?u~GLuk*F-}*@1q&O$C$%f`eMfl0 zp`ph^&K3CP>32!qaUx82bP^Nb_D%X-64ONRC=Xh0g<+UL6NWMRQ!(RoTiA4nD!1|g zh1;qApMu3V=`RZCWu%mI(qCfAieor14kE0zl$V$yGyu&g0Qn@w)>{bnc5N-?po}d5 zs;D6ojZwo!h%#b;ds55?|E$Af5IT1@cxzJ3Hv`AB;?9estJs{}MIQOS+^hLk^7p#|hku3EMv|vj#iTplFVlGOTi%QU$m?Gba0~$o zaM7-xT=BT9$QUXq;b)}C;lJ`M>0L%TG*!wflyrH91LEQkF2Q%wpSe6pLweG7O_xL9 zaF(BF7}}BH(+ctz~B9!fRI8D5o8BqiDnZr!B|6Oj!r$JOv#iW zP}rgn3lAP#fnNmsIIDr;xPHA>)P9?G;x_|2~q`ZEQ8B*XA4UN7_?N<}{1smljhq2E3Qf33B?%&2`* z_C(()TE5pK8>Vg{Z1Pqs=zq97J$G}Acen2@ zHgA7N;7;Bxrj3##*b-(9^&hBjfT|puUD4-OCX??0QzN~MX(|_PkL;vB3gByuu9mkZ zmkUZXsB|aYn*++9qAOW?GP$5ip=_OkI~^w%QfNUca)DCrPc(v$J4_jl&j3ts0c1z7x51AW52YVSlD?3>235I^rRWPSQyBUh zrJKZZtR~tLr<{I$%_gNfsgAjRKwo29=hxSMm7Yvrfh6h4^o4hcu!SyF&STXV^%snG zv5c~zzrbAus0dvJX(7-Y9@M$U4pp(9F|S=>P1)j&+GC5aAviE;OLX}>{R|LDi;mDg zNrVUR62(_1iJwUlpDh1g${)HTxJ#R*Q8pc?18L%!{wUz7mt?%}+T?nTQ2L-sPo&o| zO7BaOo=mSEg}YUi>$U2&kKWr*j-dmN=;vhm$-Cd-`?Q*r`ddffZ{*hfS%>E{YBG4A zBlOpf@TVM+Pdc>YL{j4FhlAA*Y|7rn*r%P)GTJ|y(^XUqStUYh@HAm5C(YV@N*FWzZ$kbd^?%AwlmMKV&n&~t&e z#&^DkCI+)qrp7+Gf7jvf|CqpU%@q8>48#9;hT(rA!|>f1hVRKRd~b%~`vgAv#Tl`G ze}>@)1b%y_=no3~n167Rn9Ao?Xx;Q*qK`kd0AdSx>H&!uJCxIvwEw63hd@Z+$NYm6 zMdRQPBX%k9!x@Gj$uRt(48xCR82(>n82)gE;eS%#$NWQL``ouw`}|af;s3S3kNJnh z^zUCJ{pdU33hYz#)4e$)8~5qu#+O)bR+WpGkwfWkYg<5m9)m23W6p*PiXI-kX#mU38p z!9n;ObPQn&a%QxG7lJ&=4>6`x4kr~dN4hIeFu8nnJ_|mxG&`tXV!38jE+SkSzYEEA zRF!KqO32MZ=9Q-y$lFlZbe5qv!f{Twq`;W2gp?sIZG(D4_1 zS@5$OL9*y`To(5XT^-%T*xU=?hq#u6fAFiP7r*lxrx*Xo`02&(I&^yRPk#UO;&1=Q z(~E!Z$EO#6-%n33{)(psf2Ah=a*6(DV6pyZkM4h;5&Tv3Zk&VF|20NsE3Kd_B;MA2 zwPzE%U2lVLqklu-Dt-vB<8KP^zAD}YGg#o;OIQ1};KRBQ4ho;zx2ZLH6Z_j>b7+UG zx46-BgSW+Zz4-j=0@n(@?OL^PoD5&Gc1P#C_2)tdK3X!ET!J#{}k5=hsOlg?emMVc2zeUF1qWrzf#}0Z%_&&Hr_J_|)rd`~RfQXPJr5Sd~SE zDr__ymR@CJJ7rM1oco1(Tk-qT%dhiPcKMxr;q>sU+wqL!cl>?ETAQCV`DLip(jHk| zGCqb**cFFb`8+q+@qSJ4oGyPnx=Qd@C+8EjavnaF9T#pks%^}aW1j>&pdyyH&rF%8s{0H*@zUxFIAxD0idh8t>IU8EyI-0vol;WL-Ma zX3UP8UnzL7h?>Ka`V|d}?pNsZ1zoQe{P@n^j9>BD5Zx%n`HqAgJAFsuUEy=c`!f6M^Q7}XI^ClR{@O**U6lge zgKrSJXBMQpmvYdAiS@Prf^=8BQQ)VW?%6kGmhR3sXNGRqTQf`dxPo7kW;+SIBeQf@ zou3(b&s~^Vy1OpQ4Bd^FW|r;=1wY;Oa`=kO((SCzjJ!{-&n(?NuFTNg+>lwiCl&nK zMfekE3hmB;Mxp!Ig6&SNX4fK`Y&Z@=@o|b;MIn6$G&cov$+aBUMhMy+%VD>|_6H}?)`-*0cTPiXOP4+t&B zPT=7!k!Bh&{6O} zZqQNpkkDZ}zaZ;=tnXo=W86A^oGE7KJ+#*mrJbcQ)!_aMhv&l%?|{SiA&38ij=&R+ zV80{u0Y~`#jtK2P+E3+^I@1*V>F6Uu2M=EBqYMLFi~(rawjdw4C;(@dvp6nW@)28l%~G|0GOu#sox6y2dv`_n$H5TA^ki~&$aIB z7%rcV2?RbI%Gp3Sd?+XAk0T!tXyZKmJ@>r+0oib&Tg~xfGM^Cu97A!^O=xSdN$Vr( z5`v2zuC*Z4Is*3Ok*A(kn*q;D0e~?DYw1R93CeFDAUUQ8IcT@dpYdA?&Wac$(H{k@ zPgqMfP%HrISNiJ%SD+>gp<0-&QS9+J@iXy1B0tOrauE5QW_t|G!!GO~ z6)VraJ=T*=)c;=O3E6T;XpbF~WU;f^3@+s{P9oz@Ork9^!UT-7qaUQ z_bYs7ia#7JILYg3|KtysZ;$YYYgdXqv*i!>D}2YY+dgA^gS7My4;PBOMwizfPf+ua zutS$h%NgH4#DNeA!R3Dcuvp}owf>>PHzPME%jzFam5BTfFV7wirPm&Z=V3Ru z`-iP(XU87({^69umv8;U6J#*cbj_Y4@)}Ih{&<)S6R?aKxr4<+=~I-H)KMogol<$~`}cKsHnMUpivOY&Q= z8}v0!R0=;MX|kJw#T&ul#TQwg-5eu?2VNz7XRE(eT_yPP)xXreM(BU3`IpJn!gsd% zMa{1jd}oS(X?VTx^HTFKr`{lZXUo6zyixGw+jvgzzeLsu{kHE8lg_U$)PGq*J2aZ@ zb_o3!*IR_|Z26aQg>Ndm{TGb;)7pQ*nGC2~ljn*2CKuCxsicD3%SAteJHUtNCLw)G zn+M3j;YkG-G zg>kMAj`N8XqAW}Vi&4#DJ%x9EWoe_4l`vD^Z34K+4u6cdh3lB>QWSfs&Jc7uG>(u!gESp|cS@Ac#HE z_i2b2YL4z;I^5em+qgn`n|xdSTLPQ;A=ntYCVX{dleP!u1ZnA8Dd=y^`K+{lz^xZN zhpgwGji~coI-=?%ms=t?vlcvexOa)(Zw@$K}sv)*G0hp3cWOQ83W zBKI!O9o}}|PXFzJ+k$POTf>BA2sn9TpcowREGG(69O@xiJ^r3NLUwPr2mxyW2aPa! z00e>yU=RHyokK3+-?apLSd#^NI7M{A9y&J)ogQ$2Aoh@L|1@~I{L`tch5usfxn@iD zPy2`wRhO-1mcUNu@lRU1{gbmv_{??xG`d~rY_Zy--e1P|v#6l_{VZBXd_U`_PiKv< zW;>I*CRn%OpY~lRc!tyDpCW`drtl^EC;o&v1}Ag=so^H!KUe)z2k8p_Y4&EJ)7F2Q zH0^^2C+ya@zrzEzfy2glycJyXriyY(U%Q|FD{dD&w)2hWZ0EJ|S*&OnJomWoWmg%x zGuWXuQ;hF!Zzs1`fxG;>wZ+)OT#=iOfyD{1IQ%t8=e`2X5YC8&dQxbb1cId`H%%Z3 zHUyIBj{@4KZnD+(I~Bi6XrHp#UpaY~(76X}AV~XU_6MkJmhb7|57P9eDwOjBX# zzu;cMqovpmSo(p|aY2W*i6C(o7X4IGoXHWWWp0aW2Z}}iu(&6WB$~0`nRu7bd7Aqpdr4%ekC)I8rD?p>_HN<7GDSOFXuOoB{z%%#OM{OH zpSj*2seV-GY_|HJw2zl6-Y0los^g_2PYD0H?w@)FgwE6KpHAC&scf(Cf2NO@YCbM} z=DL5Ha0{K89xomA3!azic&Q~K{O78F>Yy$N#!DxoLT9GOOU}O*JTH~;QrCXPZ;p?b zia#fG_F4NYX&)~+zaV(d)bUcoLE$e~>wB-N@3UVPI*Vz*!a>Hz%|j*8vv>^bE%Frx zSA-n$E@-7+vcB{`R4=XemSV7X=FCjPjL1jR8g+VL71DbC$bAgX8k8@)m2IbfZShAPUsbSQ%!l z-c>&T+|eohJp*sG#Kn-|si!%gVkK#*@^%ozl!0Y|M>h!YotP!48Wnypq%#MMz!4me zTvy=ed7Y!8?DhPt>!CoEJH)Z{vj{yjQTF-ufXST1dN@ca&_jd5-oGyLVFVfW__yDMqnJOqj`1N^=)`R-xYbwZKZ$9j;9AJR8gqJdNmpE_xI{c;U)H zK-%g9tSh`CB4^%*^oqgtVGku4^?}HPeX9v!GHfCC*=eIDl()M}JZHhKpcJx{J_#wW1pnKpf2av(zJq7*b)O1}qh7mXb;mRUp$rXKN|X zyjeuEmKp?omwja<{(w3k!S^EK2l9n>HA7Wgng(^TgXg{jQL=Sj22M7Qm zS^H6;%0Vt1lG=T|9-64Y{CWVn+T$=x>Q{|S@Z)$|Okr~1M z68j&R%>4bPUWLEU>i?v@24LL*&)08ayC*`>9wzeXMz0lCYb7`ZyBA-he5Y>r4c}Uv zygh^&WvlgaR(=!sdne_N_xCX_&15NOm9XCg9awqR8wpA<`b{H>u9tehscw96dhI0{ zLa$C$ZYljHv>%JsE0sYwNLPfec7;Eo{w(!qo_^CY0;IPemWq+NepB7Imnd&l?sWB= zMil-trQg*49nk}9*&N_K5R&zq8dbT&l5J7rZoU10CtINX02yYtW!QZ{;a}M9!I^cK zfNH`0rYXA7?Vc(ky1f?jx3?cAf)nLSrQg)`-NniKIMDsq4p>@$K znR0i%=L%js>ATE(xeSgj_19?wl)FR?WJ%a)DlG>;BO-Kh@Dj~QH<63Nwb}^X5PM+O zR*-t=kddW4BGgh|$d!=;W&p?y;J9JnxZ!Uzopi%^^(1+B1@ zCvsa$x8{(nJ|Cw{D0}iBq+Eqf-fDj@S9HA?**edaVq{l%>&3_}^ItAAnpPuw?s-$= z!8*P;d zRGPaM^;Bp+K^hjO`lICk&7B5(9mJi6Y64hdbXH1Zy&%vTYY)xc8t!YYCj19}B615@ z*X0kZb=bS2cW|Q@yd`uqH`>12yti|U<=N?OmpPhS{cT!3c)BUvs$u;j(T$1_#~LD^ zd;Xc9>Ff*_x!XNEy|?>r^S1?V1&VNM+l zFwoEi!3(tm1Tu1dxJJW!D%As3Jp)6lN2d(WLG2?T0@x5xz}=v?XwTri#CI_=1uqO; zz!}jMU_9M)L+pW1G=^Z@=!ux%$hU?iLK@|fcgFmPA_M_{;hM<#^oIbMBiP4-Z4`(4 zGcYCc>b2G%^ZExcWACEyfoqrpO#D@wj0JkB;QncWKVh}cgx|G+bpDDBQsEEWa1+7@ zZcTKAX|qp^Ct;rxzYuxZ){pAr$?Z`&fzPwmEgfX<7T;!8 zLf{&Iqt;AN;MU=}6xF5=LQAE-Zwg+`^^W*iL-`^b!wu3fAOUn(0odJuvHYW)#-mN1 zt)Rf)7`P^Qb!gL&PM?BZ5&}#JkFnsURBNGMNq{-Y_#!?ZRc+J;woWJAFn<7G$;$0Q z3(>shS0XoCyD+HQh4$!97E1WW$PMf=0yq0_k#^xWw=@u;n}V(UVX5y{Z=1H5dN)|) zzTI=1x6OB}|CYeb!Pd}CKr;lK+%BjAS1R>RK*fRKdIz%LFJtu%F)R`yoe~h>zdNY1 zkNjHXk%o35VXL5TBW?24%W4z|NjH)IY^p5+h@$+`9?%0MBy&OG8oEu|4q(m(^{nDI zBA;PEL11v@=5_#;$YU8-qacr~*-v9!tyefeQEIk(3uAKb2<72%Gz!IaHUPn1)r^iu zp}jrgerh!pF%Q%`bp(Kgilg@*5{{-R4+Y3$EnUOE6?rW1eF461mimLQuOS9*w`%Do za&!0=b|}f)ElAXG*Rg)pJndH5eC?KM{muOMybrmPZj8^f zktFp8`;xZ&kI18cdF|GI!T{?e30v(#uRo@?WLb@tQGYyyF@+Xusr9Fyz@r+BpA`AT zt$#P#TXYc3{XfEZ7_`yCA^WZX+QDQA-ANJ(z~(->dj6>BfOZk29#5+A^_J*n?wqyx~`8th3|ChqwcKS z)W@;1dGvvHB~9lt==x|sTkxH3eNOR7ct;i?pTE!OsRa^*bjN925? z1&P~OMow(OqSUy?rB^C-djwO0u}omRg!quP!tPo2)@+oNnxYW zu9O{iK32Xc5W$*#OBF3nAO zk6o4>d3Rr#U3t$be71Ak233Co2X1jaH=V>4ygYOnpG$XyY{X(ghJmAYCQjLxEQR6` zJaoani?0$n9J1QCKJSI|<(MvYCY*T(`uN7wpsRaX{-=rRL+z&Q=tuA8j3|6I|1-04 zs6Kjyh~RS1W!^g9rT$9-7YA!Y7lkj3Trf|&5Fz0vZWofKPzj^|+0iKc+s?(EQ}%*+ zb=He*!adv&**H)C^Mu_O85-*YxrutFAc_L~6xxMTTZOJUj-?-S{}WSdJWhl6T;8}@ z*ULbY;IoYjmfl_`mA=MRCQ(ZcR#9Kj|7_YJ^w{=yS*Bi2CDltm;Y>v@+V$Dd3p9%c zZ&2ZzwYAsEJ{zOg@NBUAYR@KbgKs0hZ1HahT*a|LS7?2BUF1rwJsIBxZG;27SKTCh zO>6XkAoez=*1NSuZ{@dL?pr)Jds}@s`ELx|5NruuAHFVft#%|S^n!w30(Fe8z^wv* zwUv*Nl}zzaZS*3(@z(gx*D5K$=K}YIT*1^!G1Kd`@{yIu#$}Mzn&iIFbAg^riO7Ip ze=n`e*Ll0pb;?SYK933c7E^J^R|tfVQU4(1MaXyb4#6{Ak8yZ_H~Q@%Bi z30*iJnS=O6eO!X~9NZe4-gAiS)n9h(NzHywzEAKtt?O6%)O+;Xqqm9wZS&mfy~TI4 zzcp}E@W#*$;g-ntDf!>g59XG950Nm@zPUaubgj0^*X)1c!r_0pS%Wk7ZS*#>5HHeQyfR)bw zBK!zt<>N5A=fG+G(PPL25(SzBk7D+ov zvB;_5<5u^qF0*HK73`s;<8(PcFZ`Ua^0OGfTDbYzM6T5|IY@Y**ES-1-xmd6)nfX2 zc#)Bzex9z^>Vs#NUI)ICUA>l#<(6K@$lg$22frzFO<3)FT>0A@qq1z$-Qv04d!6rE z@wYqt&0M)c+k)G*h5hXm;fZVwH)*5s^*IH9i;|#D`CEFVhYq@?=^Fly&~Mvsu5T3= z8M;4upZME7o_C7Bz1Me-zcX-maChjga7X0MRQ+xJL~iM?jd+Lt#(yYu9Zar2CuP(1 z=Wh<|2yPE;3pYi!a^ExAoD<;p(pC9S0^eobciP;ZF??#z;+nE4s5{AABVbR{b>fJ? zcc$R)fQaX1GB~b<&tFZ~z)_)VwRJp1^#ZNR5b8x4-Ef`2jpurIi!^4I%FzfBDA#OV z2xyu$k|6=tkbVM1RbmbO@iRV8OkCbnag51b^zc>b5-Cm*YT-3M#|960q(q*-C?g_A zpA~+amZ0Z_=HK=Z{Qdtd@YgIx&lzmj4iN9qv+J19RZKK;kp4<+-xKUzg8klj_#0fo zI`$QMi)27X<2}e!_9Mi`fu>iasF7-I@H6atEa~OV)?HC}FS$A?5)YE+qz_Z-u%Z{! zQXGWd*uFIShHSF5A6v))8t?fAJEbiayXnvN?TxHTwa{0}-=Zk`)p%A$sFrN8-N<5l z&0(iG$hMbgZ26_g7hPv_KqnBvJ};{4rBRiG32>NKFRT#oNeXJBKb^hI>*XL}gI*3O zdeQadu&`dB4nr@z`6X(P;9@SXzlpd#p$PwkU+Ps zs$93hFW4z}GTTEdq7LmCJ(Kq*=LC-_TLQ>4Dn_YRi~KZsE`oBR48^q2)l1i&UkP1Z zEA~{ z{I}paVDQ}!dTL_!rU{5xQIN@UKTKs#{!YrTHOhg3%1)?q^@g69-kPDJsB}Txnn&JUc~@v-RDcZTDO2O) zjEkZO3d!(xQyTCN{z3RbubzYGecUO69?+s664mK-5=CEPiWdC z6(w07owAJ48!0vxDc~`e;i22kKMLMzYyY5Ys62W$li)4!oyBtlo>F%iH^z~bVNDAF zlj>lpE)fYrKP9o0Ejej&d6&D)Qwqq8W(~m#YY`D^V3og^5r+zcMcObW@h54hNB-i# zs$fy5kmrLqwv1xcM>m8Fnlcel-eq8b^erhmejOyxGcCwzo5hmt5vdmmTchRyS@d$U zhKXd^{J$bc)EN%qU(D;wP@K<2T1z)-*AO}7(6pxszTxKWI{1sD0m>;o9nz}F%0>l3 zHT{)5#h5I~EnEy}R5avdfz16+57AovqR@?5bPhtd_*JGD{=R{dEZ*JlwY2Xkmk^E1 z!Zg6?rt1X3pH=YZ5Q&fw{vJxO!Uyrz7!YHPf!*jaNG+uc%0aOE4TuEwtg+w^{Jq2p zWI~X7X69F`p=iK0SZlLKG*U!)!IRHD&+R67J*MD}&^-s?(=}8aUBxKeMV>-&z?yF* z_YM6kGzIvmxw=Ymz&uP>y+i0l8^VF>VtH);V%|d<`sJOgPz8*AhIw}@#8Bt6kIJhm zqWYFMX z-VJ~_OIU`QjZlSF5 zKK0)G9nrT7+O?jy@s0PbzVrCTU#(41PPAg?TG2sq(*$?ztAw6T=nO&Vv5p%BVv(;< z>Y1iZQ$~`p1)?^QAhG}!-9V()yjt*_q-PFhzF~ya-vrmNe#1{x+9K$2HC_0&d`7`T zQFA~M1iQcLr#O@yRplD3cA3ntK$ZR#0U8C6tiZ9gC$KkR>@XFI%S&+xkla4H`ra(^ z7$XQA%yMS~qVYP9Eg+Ftt58jutue^V<3f(NO`G$Y)7X49EfZf-|3e_EUN~?DH_eNr!z_7N*BO z+X&8b?X!U_ZXdzX?X%giPdGO<|0DJ(%LL~0e*|a0veTu)K@F&-x%v?Wr`Mo|girG# zJWs&%#QeI?!3Ppk6*Tu~q#*oPKf#+(@W7Qhpo!Z@>AF3mE^{!~bHzV=pK|vFC^t>z zbo*UQxk~Eu>-M^sau_R&Dn1rduAj>3@`S!Q#Pdh~2OR?9klYXW8>4b#3SXm?O60e= zo1n|3h4P)Ca=JYCTFWKWpFaFGwaaX4s-RHp77%08^+ZR3{;D$hOM)dI-!eyf(^ zxC4#4qF%hHZnR8J5$3o8I6jG0r*g_MWE}GhFQt6?DS{?)ME}HjzR-DsW9f&-ROPX8 zhgYA*z{Ca*Ibb4;+U(`Dc8ZOSLs(B3&mL6phplq3)<5qGa-BT_)&jIIK8|7#n&_&f z_WwkU(C@OsH}_{`l7bxr%b}FdK7-oy4m82qLl?%g$1V_fwta!e9b9zimC;wo=(U3X z+?Ae}%OLbz-^;WzqQO3%<=wRHk%S9&dxG#a*9yKVYO^>np7LRZ@7(Ci1W|?O96?m> zllfYIS)i2tWVkr8N^?@#&{@F}{`iJQOsGa!8DJR~5>z+A@e&A>4wi(@Lg67G*-HgV zs32TLskVnWp@%gJD*eG2_h~dfyiDjlNzWXFUd#AomJ(RH^bcdfHz$pkOSyfC^kn!a z3s0RRnV0mhfSZkr@ zqtX~-6-P|dGA^;jRg#yX3TDZ-pCH)d)>51$Tf9zS5G7&(@-pD4)p2jIf;RD*l-{nytzP=VaZ&o9ik{oL0z?^ zSU6Q+_@j5q3_B{-;GXr@A(o=0+9}F_BvJK@dMe{$z=Sk;g6Zi%N;Q||DPqn^_x*I8 zAdsFWk=r2oLJne|X1M{e&CyZ}Nv!}x+vX3oBa{{wiB$vkNXEu%LVHPPksU%uH`y)+ zp<`0jld9;eSOD%iU!ik? zQia9NbQxL-mufQv4@s4FxTW&JEG0pGwo-d2is7RckQaw?f)$~2xJJ>|qKHmZ(nc(b zFu{PQtJP>BKoTOM9EC%cSt-c+xrE0keY%9k3oRmFAFYN+80#XdgI&bWo|1aNH#LZ4I!ag1aHBvE@DtB)*H=$re|v~t zG>q&LKC7&Dr0CcQ(g1NiM0<=W9D2McTJOt-3=zl=y z@5Rd}1cBS9)@|sAw(zDXpqWo^8KxUPKxKk%_yCnQy5Un&;MRzNHT1_DU%TjrEce-I zQ|s7DaoBN%=_66E)}NJ$P<;4%QpK#YJd)2CfIi!}_?_ zF7919xq~z@dYtdNy8$wWOx_B*h#{e!oMCZaf-fndAA9dh z$n5C@i4=K$x(mgj|4~0VtSW=bAJ>Ppo6g>>Pxm@PPQ*&^G`)m+$%-fftYi zR%r2D;MV}cR-gEq+8uaa9cU*uMOR4QF7OQoeJj}yS_Wc+pbj`FLs#B@z3K_{hq!;l zE%cALr33|e6iWZ1XXheA1$_Nuu}hPuG-MVpOUD5d0q!Digs`weQxxZj@vJ7hlK zog-~Iy=j3~^6G~`gcig+tVgnMRJr@1Lqh8Q9)|j0eGY&x2rHZlQ}H{HZGqm-4%O?R zEJ5{#Gd5*yK3Y7`Ge2^q6gn1m+N*7u-TH}P@4UVv~k+FYnbdD%tS91SJ5#JOmHrP5-qHCbEA%rzTm)0ZOzg`>2*u+b6X@-T7qh#eT z7u;<8&SN@`mA>A$pf^)!qDS`6mo8~%NUILkJ#0}OW@Z>~{@@~?!g?UNH?wa+|NQ8Y zqAnZ6va2-aSjOgtDkrK`+wlPT}-?5bSnL6f&0=(e02p1hDDl9Sw~O;+h{VX`r3@n&W}33 z&{1+8$VUP5O9?WYIo6|Cfmpan`8hZwNINea$`QLZT^rhr*FWj2jJekUhiuE}>|V`>A_S95Ve0?WbP6%_i#kni~spf4Bw(w3<#ADC<1P z^TdS7qihK}UPoqa%H4dXN;;$Sbd|KE`@OO(?Kw?|bV9=crR#xjMqD^Ds{_1UVCy+e z#D#=VyS~}77@%#iqiD1Gb;RulVhrXonN@(+k1!*aA>RW1drbeB z!L1+ad9(;Xbobbl#qQ`iCJV>UKFDdjS?f7UAcMA_|NgzM^mr4$9TdSiokPZLIN8IS z?-A@ZxjWtVP`QC|Le}ot7%42E=WRr0*N}R*#l|c<;=&RREp|tF7n5}YDQ7g#z*Zt5 zV9QUz#Ms%DOcPrvOM;YEB_!&ZB^H>DERgAql!U|{QV?!IYy)HKE!r*v ze*cZzzERv)msW63U9r6LqK=E*m$j*vD{J@6Fdib=Ch}|mN><&mzQIax7g%YZ$VYng zwXHT1JW6MWOUYP*vqQCF_-a;Ahkqde*l*z>=Et|}3?g}no zW0T|TjszQ4%5Ii$3vYQb6WJqr%<0vyA|_kUtIKddy!CEv*Fg|V&~^&krzi%>Kkg{J zYrb@P?+HfMB~i#7HMyko2KJ|6;I~cD+X&3m{=FE`eW1@D4A^NxiO>$lc#rZ|8QixHq37^^bvZIjAF=gS4fraIFIW%iZ3DhE3BC;YEx_j)z7EF{)W6>Q z2)M6S0+ipsivY2yOxReD{T_X}0!3o-q@&5ledOfrbPZB}bixv|gZ`J`^08vVmxDr_ zkM*koUy zOHh4*`v|mN?q~2;9MpL4AILI9?s2q%3ZpIuS&-%+V6i(#7+W+&j#$8f)X-+kIoCkbTG`UTGOdnzqpT}~`o;Z^DK}-NH+hcJi*W4B zECp`QEd7#uH0z5-1@bWTc^Sx4z9r^gwx*mYm>!DBH0{>=IX$R%j~b>5;+WV&J@c0)Ec|Mbr4%W&Tnn_}v6uz2tO ziEx~^(4tF@#ktn>9`N6%)Nvpl_kAZA0XwcSIYB)uE6(@;K>xy^Xh#}{(O!Fs7Mo-h zRt}606u&ICt4{XRXWchJeG@c$eNaa-SZW=#`B~{FF*<^#*s_=!^(6)$p}t1tUgCRh zD>=L1-!@#Lt$147wUEAKaQpQp_tC0636+M$k0@fV;Cdgp84nyZuuAN76YCi!$G58>Q&!SKcdJemus@^V*)dKuOT{^sd*eKj}J= zF53vcp=t8ffNux*wi@4V%s3Lf`lQ$Jedz;0VMKUNq8q0`q`Mdm8L?PIpq{)FaJ~AX z)@uozD#5Eq<^k#v_xSXbg4nNl3XO=Po=)i5V|qFTibPu8&fhUTIY5{i!WQOmm-$WH z@?!}O{rWgIl@to|K1f^VmKq7^S3RW~3LC@Kd zvwCy;&g?%Udb$vog4ljs%IbOXp(p@cN)Q1v=n!01vnTt+Yqw7N{DVGX9C%sVfhKzc zam9#jDex7*H>SX^2Yz`HJUbCXviB@(IAn2K2pX*-~^aYFUeLT?~`0{E&Vc>0mgWy)Q=e_oGYf8cUS+`#1*9R`PbrWxs@ ze14z(s@mcB<>)-eF(Ti8uz-1v4;M`g?c4~S-M>bV^4CZw+D#gJQ$PT)A7sM9qSw`5!EsawJfF+t8omn061@F@ zO!EJkG8EJaY(a+L{7QX{$@CFdjOgkM+CzK=Vs;XHiC>R6FA1I&MiCGETI<`2vIOt^ z8LN+Z(bLRqM?ML{7B3RmHTEI8_7X2a<>@|NY$nfhhQ;Xni_N}9BX{JAL;Ha(T;BlgLad;&lx`2@(w)h;TW%aC$@-C**n1d7BvUr2xOeYaZ*3U|399P~Ti zZ5l8+R4gH3y^4WSBUpdFAGO{1BFg3JkDrHP?*q#7ZCVt`!1PG_;}0g-B;H9V_K2Gw z;>m;IMWEmd!L)$yM~$1_3LHyFQEyRZ=dI-+o&dk|8*RrCVMe0;J}^(wE_9SOWPy1~ zherIoln0Vxbh$c{%N`&>x|j9$#emp&szH(ON4fO7l-ln<(chI=3E6r@Kc?Y#qbHBPFZatZVVsXy>x!lYABgRoFG6?jpL)lg>kTYO%622!R_V~sDA@Y$f>%$J_`;r8(LBw5< zoBgKxAxFFepxPH-x5efs@i+~Yi7JeOJ#K~P2~IsfO@Jcs?SO~DzJ7v0O0tV!FJ9Vh zJ!-Y#Ew#4{bl$+G=5z1uhF{@DqD=fS@CQ=h_W)nCU*(r1;6DibiX`}A>cDgCR>Q~a zOW58Vh-mLzWY*l|uK@oB<1fdtgjD=_pz!cV!5=Yxt_mb5zwiE84iMgcOS(+VTYEhG zBOyW&ls|0Wzqz|aaz8|wHr$OBWziv9I1Ygcw-Kt4;cXj#$CJ;J13RXZrz#sOS1;Y&b@r#0J<$qtiLW=8W^`wO3Ymmbjmz<%is)WzrC zMJe!n?wy+i&v9J_e751YyTaAqBN_6^9XgzT0mnkuBKM-nGeLO!5Je0k^0{}|XTMp?(ZI#24#?$+-adrpWPFOzTIalMP=wQqPM2eJZF$HMUpmyayHi~5ce zfjr%_vFMfni?4a&I1n<>56j`)<$qK^9}p-K^@IQUOza77>{|kChY$fVdh>Jso>mCx zAmkn}xivAlTzO#K%LbJNEW1MdxjXqLfLrn>wTI(U!qjnu`?YyM=`>Y_(qcS6&NaS9 z<#Ov0T)&nA%^?`S&Ri4|C7hJk0C}Y*Z&08}bR8Cy7Y4?}u_+efJvMx$&NVZR7%C8l zA#bb6i}yEnEztYd*DU~=JR0VaCysMXCoei1g47=ihnw1@D+xybCIggJKqKFtBcK23y(T|@7bg8?;$S_&!Z!MC;RA+ z^^mvS}ddPpA(>oeEb0E%xh?4CsWLvk9~ z3dX0Od0r+n5lG3;JiQ(%-x=6>Q2T4(eoWwf7TPBdh{aI;rGWNe^65nOoUNaI6Oey5 z!roc3A0eIn93ny+Vkh(;_>1Z<@~^)Fey7c{IZ81K^t|`+C8wGojN*f^MN?sV|BBb` zL+_{^@%C>8dHnv>5*#f9UAFUQ*-y6}#}YzeWO2HEIBEc0*ysZLZ;U7Uj$UfV@96Wu zEZ@kO2hjggCrX1@BdbT0SP9+H=6PaZw*iw~fX-Rxj zZbG4LD+u)Q$FsXh(Sl=sb>5o=8afXq5YFK;w-gCO4PphZmq%u+U*hvB$BNyTDMS&i z;kz#J9#gpYn6(sgs!|!9DD8lr?2zgykDb>^Z13%{DC>0!#0_p5H}$O}xtNj*Db*vs zBZvD~UT_@HK3=iKF2qOyJVG8~1)6Kr?4lDTNE6w0y|@A=Ss-|;eIw}0=V(3n<-O7a z=zcdS^>9BI#3-Y);h*O`t(-d*L@xREdmak%g4r;M;&)L9_e&`ufn7P;#fP6@$)O}r zsk;FR1Wi@Z0s{F`I|l{MAzZ}hoZE?Y=7uBG58HtC2Ksdy*P-(iA90cTo^$;EBmWry zTpJ><+cy|}i+?^za6Km%-ZWpX6E~m$7yITg-Q#;I)E<}kX~EBshxXMirt?(P2e z4bR$?v-#cPrjAoPPm!6kYjO9<`fYh&V#|d*X!lKaG&pin&%0z!=sU4DTME(R`%j2% zP{NLcr6@Bzz+@JXu>v4_3QBAeam#>mUw=p`jzcFg;et>!u4)O2J@1N~)SKORV*d$I zTbPj`%CU5m`YRjfa0y~K)?MNv`8rS9iXQ{A!0-0-0?a<<<41g%A4PzDb38eBhbJzMpU`o~|g^LZQCf?Zq`1-dL};{Y8X2#XPU-R_*D zdP`9B2KwwE@HvL(*p;CDD?TsCbij#PP9X4Pig^J@>RpcO<6W*?9D4)XqXqcwhTj;| zOVQYfNUm?$0DjqOX_=T9O!Kd_W>JAo3m1o#(1w_zXmX);qBNq#amm z?n4$(Uo{mpRXu1}U%s!NnXmc+*WK~``(uyFyXMK=kbu6%!_g$?fwPgtt@ zqmUy(>#O}w-0gWjngfb}{=z9Bk>YGbwha6$On%^d@5%Wxh&ByoY`!}~iXC@lbl#aE z1HNl(M)w^VvTo|RJtJ~kM(>u4zFRZ;Z^?+>jQK6;oK-?w63u3SFGd`LJ;Uc~eL2d# zfp~deJq3Odu7@|Kz!w7FngYKZ_=+TW4(t}-OAXJRrzB{5jOtbhn_4$tsVBoLI@jsR zu%)}Xrz!GzIp}TdTdPy%n$S47>5Sm~6Dmo902qOG9E2VDh3coq*!h^9Vd12161j={ z5fRYg+mDD4{()S75F5C)wji_tb&{{=@Z8D$ztrR$ig15jPPveQ(16&6>+9_Is~uZ8 zXECThhRh!=o13M=J!>NRL&^TjqxE{8YwG;GT*P%XcCYPi==)qKlC0xUEbF}=`c{;v zzR4;6XOD^bzYA_x#q>R=s1CLW9UzUsLjRf&uNV_HLKyf&2)RDsb^FM_s2!akl%VyF z-`|pZTiHM**~h2^s3~6)(NHeh7V2{w=-Fp_9DjWS&l}6ZhwFnfWKX-F-SF-ILx0yE z`H_g*@AhpC>d4%J02jW4AQ*`87?X&qj=IgL3)x z`V?Y9^PE12l84#3C}3~G<;W%|AAMLIOGTp-&Dgm&88KhkGD*ugPUq zyn(C-fzMs0ei?9)+CG8)LxW|(K=b zQotf62+8hSwVK>^|9UIFu8BRr^4iPQKyrIqpl|4->W5vT9f{Xo)>_{C%0vMjeh8WM zbN1J5h$SCW{s_tvg8XtjM=Oqz(g+$D_oaElJvCR%ltm`K*bFO)6EdB*WV!ljJfp{@8`wTT6$a!xT4g zk+v`2S&|TJ7k~XsU>!*9q?N)1`yfS}Z%&y~(FOYYhCXUGk@oqMA7$?*n2;uJ&3r>=mYPQ#;>FNzdxfI`TZGNus0-+GVOMp-U+fUV>bh1 z_hN&;Zsjj^#W#L>h(ce&-C?E)n1wCVwSh#vOb`kYcVW!5TYbs}ibU(L>n6HW=Eouw zbZ)633Feip#<#*h?!xhXuH5~pWKxW6dgbm^70Lo5rnITIgrER{_VIrGK$*7tGJzuT z+9%_L3p!G(eT$4P*uR%S9s8#%ug2~Zo4gUb-dM5uKjdx5j(_j`x31;gpYHioS zr@$v0KMiPCw(Wqm>o))R@m{aXRQKA_mTmn6TwhSUp8a~g1W0Ck>Ec~bPC&KRoI?M2hXCf!_~&P73+Fi?bvJe;NK5?V+#Bt^p|i7`Gvr@rQqlFe0vi7BKV#mxGvZT7~6J(@s(jx zk|4_#&wsS$l!oS9G+WN+b1t4GTd&T&IK(LitC+c^b1#bBjai-m#+4-5N|1w%8Ut*A zFwmYs#L^n=hkH;b2}$hX%FJU=6o&!9?o(=?jdaIh*;4^R-am@0QhQ2KmLOxy{{LGD zNXPupFf#Y=*Y5S4E{yYlfmtyZ@jRdl>tyXh4>2}grh2Ku8xF^seMyxYD zbx6?pMW0_&xSE&rdWUkzTVit9z!JQ2;?lrwI_I9seoL{c8qU3%M4Tdk2^uf@8{=% z4(^NtxEk>SlHZ4+PDa;X?2Fz&TruK$vn$7oL^)Y~7Sjj7HyYnotfwUS`{yN_KOrZE zjg zuQ%^ zRjsOz1JoOc-vxYm5)t;Qc4H=;~&#VD74LG57K8@Aw}02o(HycXokU>z={x=M{2TX3vzk*df*X(uc)ekKO zcQYOz1b*1?RrDEy`ZgT+o+-8)hP;K&{?IkQ`$+MLypB#te<(UzU)f3WP71}-|J)B@ z%m1id!vuN){nZJ4$^R;T(8UG&ql_E2t~hQ53Mu;|0L1$lIhm;=fzda**TZaC-E76%av;#g%ctIW-PKFU1cR=`xF7*dTk~gpp6NsgT5BiIE zt}~P2=_-ccyw7n--h7kWMj{67_ks7WSbe;wL1FT~1~r}V&AvvrKN>Ib#fT$4iT##LZVPzG8!^5ySFWyu^t{FD&MBQ08bOZvb#7_w#^%N; z5T1SgG!;Pzj^#bnPUQQ0)!w$4-aL}q0(cN*-IKt#B9_;x^dd!Wva)jS>mu?}(^FzS zwuYzy<|#xO&+HdjQPmfpca>S*Lvial1_pl}>ZU-Z)RS8DdZYn*S~sbl1F?D~(6b8+ zem!y$^m%xS9;uU$i+URnC!p6EP`v{&y@dcK^(TiH+{E6V%%4?)qxB%9A7cLG^N0Lf zRNo%zW=Pa;_5kj)*nKa~)03vuQ=~bxjI+hoTU8I=C6b`yQs;YXomj7=EW1BHUw?*%UD5X+{r?{Qw-9@9 z!ZVb-`?L>Y^S`O}8S(p7*Nx&w-mjpuq$)HIJ>9d;jeWf5gDpuwmzMzcx6qX&kheSm z%;smWeY*bMhu%5)EvJkf8SHJRt4iZhC2HU0E?m+qww16BT-_qU!`8K0JU+x^4!7-&|roD2T zv+Kg{3)E<71n$5^@%`xFE7r+m+4Z zxphpV(eZt-B?(~CECIT|1y%?hF>&l23rA*m!P*`e8Z$p=7@7Lc>&=&MDa@xaA#`0( z2rcZ*?>kTY3I-l0V<4SqrrSF&=(sQxMZp@w{#wy-Vdn*;0|Fsg`z!~Jn##9p`|Kt$ zgP38fW!72t4Xr|fGj#m^aY+z@V}@k|)SY8GR{^C1`uZ?=#tZHEbHbkdlua>-~%MZ52IV za_rUp9pvot*gsQylJy7HmwmvkFHc$$g7x*@PiAF#28{PZxj$C}aI4j~LZ!IC?hm(E5 zNzJ$FU+I5g+@5@hwAfBVwEJZ4f3T z$~UU}p^CoveyDPJZw@nh>K{YgIZeCsz{B~|8PR?cm>&l8zuIar>$wP#A?UXX!05MZ zle3>h3|bF&d?fg9Y4_3c#a3{9ECH{3t%+hW;(EkElfN8g2`WE6|DZdHw|Kdy-CCxx zLJ$ul?lgWLOHls6yhG*j@xZ$5FupS7a_i;O%fTtrAB9`<&?klP@2K%Fa``QHkAwbf z0iL|ujjtiLt_uOWFu0FMFEh{b?LpkIL*;BUxb+9`eiCw5i>n^@f`FvtUktw;{2TZy zz~B0)>K{&_e4eVX zwI?rfznAtbNbxG<Puw@-YYify+Td-X-$co_pP z9O8d(K^AvYq2WZdfH71ca^1Cfm$vJ$Knaw7sL!=zx`^*v?f(A5Gc4qO12lU;Dv0uH zAive*?>r3o_|Ie&$SBU++1VrkSn_qj5)dy&95uZxOVD~r`|+4`%Mf=Sg6>fLc&qVm zq)-Oc6F*POxR?io3}!BO0(FrN;#N87dZ!GGtcz>!ILZSM!+zG4&t`jo8@Sp(vV zC$#WhDw-3w$RhM2Q}P1Zh?&FI!Igt9jfSIlVo6X7d>ckK(}lj&U!9?Eu1Zj@yPt=G`3 zP27qt69CzPO&aR7Shx4gb5vmCitL4mEr_Kim+c_I>vwUNZWG|P7x@MeON=kj&fayX zubo+k3B;l#{-Jpc!Sih~VC2m*J}!wRB(W2n)4Pn2HmLxE%b#oP=0BZ(=`}FA>o;-d zA^O{Lu+Xl(HeNUQ9jt693G{CZV$L-EOFp(sw(;>;LIOW60m5xh z)`9J^C`r4pUvOS``yYp*1eF{2-y!@(d#G!-`76*4zJ8;_T`o(Ljz?GCc9R$Hhndjg zyD#b9a}f`dZ#m+S@$p!Keh22WV-XOppXi5;h=ay=C_j*X7%^h>th)rS-&BFvRR-if z#0^HjLaE(x?zJnhzcpu)Dcfnwx~&-P=IXCVDUV*@$cZnDtnO~N6nPsV&pki=J>>EF z!fn5*l=c-J(mvYD5S%~gcSg6pITF2rxMIWs^Lr4VOfGrI+iHB}C`-`x4UGRxpm1SN zH`^S{Vg;hx-X$nYNHAX6x7~i2%%0_-@!IqEo{wDr(H02_?92hewUhN5M0EQLk0m7F zOEe#CF~72RdqW-$I3Kke{!rsEnZM|J`m4_5aBV3;?F(MV%>%BBTUIxq5Lf`Y*E-%s zrnK#P&^Fk-T9;bBbP&p~OCW#eRG8Sl2k_RP=z5C=dIRfr5V0-=elcuXkpj>8qcjPg z?N9-Hk>S~f61?q8Bka6Ddd?qt)1V&)eeN{q_kn)#VbT}Fp2fI7kZtt2;FX~E2d^8; zfb&g|7L6jm< zy@7d)vmxd!tNr1uTip6gcb=FudkX#bc+Y$CoY|dUSceJ3ea4q;p4e^pna&fgzlKcC zh~K`z``F^Ikb6JO>#tn!d6wU@25A8-dsO?+;Ni%v0ud)4&v`m1cag~no(I|A=(823 z?@<0AEt|d6=!5f1GChT+C&%~__Jd3i;Uz!naNCo7%Mo3F94;U0wGlDP^iie+?a#VC zrLmrIG)#iSq$evw(4Wfyqb|39tsoIY!g0h3V;r$k{`Ql51~mY?{l5Zb3901JSphj^ z01YC}vwD?_5F{$cJC3k^WUui}XYLv~w%$utZ`PU7wRbDZ-auS2V%Y2m;@MUeh*?i+ zzvnqdg4*eS|5@I7ZUmZ@_1?m99pHtz&h^5qo<3e$=!(<2>;q)P?8!md8%UiA8&lyq z_q%v%^oBeflmS0#@+nV()>r#yCLq>7S+n&Xc|4YoupO5Gg%N~SJ5+lOnSQGB2HI1M zIFJHg0erjR=Xse?CjWZGa0+}B_{J3YVc@G$;P(Juo&tXm_>vU(#qei-3OxH@P6~Vl z@L4JFY%eDTJ_`J#jfX`38wP$n1%40kqbcwQfgeeM=Q?L71-=;gffRVIbJ|nj*8}g4 z_eAxJ0^gW|e;D|x6!<;Bm#4rV1imB%elg!iOo1;3J|_jf0{E;H`1O3YnF1dL-kmQK z`DYmTaa&&`!tVioGzI=3@a}pp5&vQ=j)qe3^Ii9W6!;3@+f(4z10PO-j{@JA0zV9V zRT4bszdgX08@?3B61?*vCpD}mNb9aAIPda5t#!tigJTKad6$bI&$?h4=vPdGz6JE9 z)1co1`l4yjkAXgS8uTIT>6JYV`a;mpJ52h;a8(WH57_lmu%FTQZNTp}JpC`h+fHN( ztotZ;7<}U?G%7kxP6b@LN;hmjfS3f@l3&fZt$v z_xx+?ttsbq(r*EM!!+o}K)>QJ=~*AXf>mtv7&5*<{1V{Z>*<)iMn(KG;N9!zMEDxu z-RtE<_%`4(gZ7$ymp-_Tre4NLyHAp7^lnC6G2(a$99=(4_WD89XRzi_&IBgQvfJKg*D zSw1Td{hYGV^i7wq)cDvRyx~xO$)92U=1r52J(jkO|5&ddV|IqvA`7$hG0vg=Gh+PH zZO6zA`5I@)S2{z!oEh>Ru=hx&>zC0P@@<_VU)cDjJDyg|P+$HG`R2`#Z`|Iunr=I6 zpCMnn@lDsCbu-jgGDE)X8S+iq`*G8)-_9BG4H(~a+pl$oeC0Fb%bj8UoEhrdZSP}F z*DpgeX_i@{QYj^V9X`_8IcE&ycUq_@;YaDVd?Z>>2V++V>FC z_2(BBT>dT!WpEEIGI`IlP?eUuy17>Uy<48d;f#4 zNZ9kPL$ynm$(e4u?0s10>2V+es^Z=vNH)E`&u#L_9S@vs{(PW;bZfH_qhyjPMg0dZzE#F=!5sI zi62DVkOWVCyAWFqPkISnee4t-eFt$^XY_7=Q6?Qrd@fUw1W$d%z^^cTY@SV_kI!Yw zj6Ty>p{O_ntEv8Ht>~ClaKbXzn31SeQXathb%JsVEfaKV&HS9 zX$SdQz?U;kKH5JBzU;%apY(e`KW`fJnRsvLz(YFT57nP3zZmp;r$Ns>A9hcJJ_`Ds z)1co4`t65F&vxK@Qd^CFI^&}-gy;Fwnp{ekc3_)Jv4UBK^6f?s6vgXg&xz$tsg z_;}8jpmvPgzUvonzDQ2&9i^Qg3$d=%7e`1QcM>#;=qQQ+P6T(E!APs1L3Db95g zy!D<32>0ZW7U~Jf$HtvUz!PC0Y&Sstn>t^JH@7Y<=ie=KD-ZeMMHz_k!vqQg4>J$7 z?&ofB?_bE-N`yh@Y>1FEx*)YYjn+Z8_M&Yi+me02s!Tc&kmL1wzKw= zOv>TLNm6nC*QXVb;Ku`O%Gd`0%W)9G7-+NWs=t?9I*se!117f}#}c&O@$ZJ@8wYU! zH^!jh*zmm$88+q6?-qn@F;cDvitM_)LZI=4_+7xe_cw~;C@B%Y5BP}j(>`yYAGjWH z_uty)BE9yCf8+aiV1Es>w-MBEbCUlK)IjbMyN|Wcm23G%L>I$cnaiv3_6k!i#}TXY}*6so)`Sd!TC2dSu+|dCcyA z2L1z+&>ouMw}V-6Aoclv6!u8adJmbtg3bD`rH=g0 z^R$`sBJY#aSM<#O+)xQvy3gr4H&lT_-&wt9helAam9}jXZNKv5UyNccv2bWMC3c`@*=eVsOjZe zK!UZS_21Jrzei_<#gUWyv!f^K()!&UIburpDP5;ZqxPND`>qggE7@FzDpDR=k{<2I z>3nzBsokgazN_yf`ELlQ2#-^7M&O8#@9aJ_%jN2UYI0KjCfXAQaZrNX6P!SO7VS|# z4&dA=LF+SV{gRvKb0o>0`LscHoTvKedD#A)kjVKxg;J1x;Uj1df2I8YmPK<*6|p}& zw}y}JX&*f=R0U~=!5cdrES!{m@q==>wY|gZ*BwVe-niDU@B|2Fu=biG`KC{8{#UW5 zzcl(WX`LW`EuXfkNWVUODA0S+GuD_*CpMb=qZbQoB}9K zMDGA0J31TJ{D7aXBm%Z40!}6ZE=dHWPv>`jvZGw~WKQVB1@hNup=kg$XsVFO@dyg; z$yU#f?&kjsk~fN5Vd?K3<(;1l?Lpf=H6|A+a?Dyo4SJcX$@i(*f)u}m^#|W z5WIhM5HL=jjek}DZba)!Q2Rz~AMq15AFtzoQKznAx{mEWMn**B=$?NV{wgtEX=$}f&*t_9;u{cC;Wv8l^39(XK^>p!)DvG_Jd~cQChzdlqINM>`O~S{MR!a)p9+<;m6Au zQO7<1Wu(hRO;$3!>gz<8JEC?MQvS;?`)IYN1Z4?ok2?>!`?YsvVVic^)3Y#gluq)Q zefkri&SS8HHBr$c`{&D@ZJbzmY-{C?V>^%OpC8qqL)#h17PGju^K;MNW)_ZQP!Cn~ zLb~jb&3*yD`sh@z$P9@dB+14T`#)o2me#WYWpAMTV#E={?{$&t2k-s!*#E=i>Vm7} z*aBYveHZ|hL6i}}*EraMSbB`wG2n0i_`D~}aP|e60LA6AA{p+z`@lZK1EAv|!(V(n zwy-`^pnx%KM9hQks$rl8aPAf5*63oA@{X0!=LO-)Niv6_KZ$CT8;n-gY zI&zPgo^q5WsGss>ZjqN;@&V>0jD}^%Soht=7d5!`+Y+t!+|6@z+U7$Hb|q{Y)q)Ib zBKjI350Sk2i?p3+j|6W!k;u-kiwqrH#}xz4dgPj%R--d}yz_?pIL7zfjVdY->x`fB zB&Z$Q4{6*X>{t#G+EHb4+GF!dCZKj-kL@PUvFH+cP()-;D54J_u-$w)*gU(o2;}%MWVEG3v{|C#p z|7L~%(T@Mga{ddPa!V}#7t23n`G+m%_$J*)EdQwGAG5sF@{26L*z%8CUS|0xEWgC^ zPg-7X`M+BJDa$`?ImZ$8|C{CiZux&$zQXd)SpJ`u|Ci+zmjAcqpS8Tw@+!-#Ex*+A zm6q!`$_m%o@hZzNv%Jpoddn}j{BxE!SiaiwHI}cnywUQ{Ti#@Ov*j(8ue1CL%dfP& z)$*$>|AOWJ$MW@-|BvPWYx)1RJZ$+FEx+3GFInDZ`8AebYx$Qg-(dMyEWgh3uUfv* z@~>Hbz2!Gp-fsDgmfvJ~hvl7?cUj(Td5`50%X=;Fv%KH(sO6h1-)#BUEg!J_X3KA} z{8r1iSbm%3w_ARPe_;6z%OAD;G0Pvfe5d6?`EM*Q{jXzC%5gH}81H||afuyG&5D|o z-o=#2aY>Bdg}Zs_S$_I*J9hQu+VL!7D3aq6XSORKke8Z%m%iMNU3pbzPk=3U*lP44 zgMKC-)_gQzK9*qH-1;&f|v$>VXM)*&>=Za?yv0u`jJD>Pq#gG2IP<0vFpFx zcI?)7+>Tv;?Um!??K5fgZha5fu`AzcW`8JgT=_1Yo6i$`j(g~28-0vFhS5T9fIi=j zUHTH^Po}Rlz_0C}ynV_8^eY1T)Sl$^s|xVf$#HUhtpWOQK;H)APgcK3fPZU%U)v)m z)*eG9&+Y#KcRSPVZ_}ku+l2`a*s;`^*vO3mRkRG{Z}67ukQGE3c%9e?lnq2|J)Av()xyg>89bL==z^D4{3mJgh-@GPSn zvg2{%&pP*SlauuU4gfDl$|JCnZoiDzyNYTwLHMLDmj)Q#dx)#an z`EB_fci$y7^9h;8!91gNbC)$|Q2aRSZ41q&<-KK-vRo52QVi_CVSLX%D15koG{@18EPWJ&^W5 z+5>41q&<-KK-vRo52QVi_CVSLX%D15koG{@18EPWJ&^W5+5>41q&<-KK-vRo52QVi z_CVSLX%D15koG{@18EPWJ&^W5+5>41q&<-KK-vR`LL5OHL;N*h^JY7< z7b6zTc4n0#eipF>#~Ts1Bko6hSIC*2i&%uX4DnNlRUs#%5iyMN^@vf#t%wgG?f`rY zaRTKx5&w#qIR|>?I2n1!ixDqIT!G^{#8#BAMZ5`d3*r#s!+`HX{&U1%BmN%8p}EfN zV-ZiC3pvO?h**mF?}#;sS0i2zTon0T$hRZ^3F2_fZ@aTsw7aRTx8h;xrbJ0X4$@nXbF5nB;AAVv`f5w{@@AL(T5K>icNpCi7C z=*)L!&qvHg%$@IK6d^A~z8tX%FybiUIO2XpXMvNEwE*pcSb+FX zh?gMNAx02)ApROLE7O^sk9Y}UD`F?&FyaK_u}8sv#7hy|5r+|XALV4sS_r!kFGTzh z;&Q~Ag-%8b@;1ai#Jdo;A&wyK0`BLCdr{txIO}L<_6dlm9qnY`pQy9{3Goud|3thT z@e7F8A#O%|0P!)zal|(fLs{TO%tb89axzMhUy9g**oKI2^=J1X-VWS0#P6g06XZWf z{%b_%7-#lE#8VLq5idrpK)f9B3y4=EUU!U>aTE4@sH|L8yQ+F!L(8Yut*I?ryRy2W z7O+b`{ju7XkF2?(zG>~6*kNn!%5^Q(mp0UvSGUvw`fqEltY1@8`Jw8SpKEEVUP(R! zf%&5Hi`J}K3s7ZsO-;+H^QF|>T-(%A+0;Url`C7TE2~>-TkBhRR8xP2QPr+q*V52j zd0A}>S(|E`n~AUee9Ni=DKs^$ZDL_neQV3A=6WeMtgUXTsc%^oJ81SEtXt#aAXKGX z)?$SQ7t-KDR#!KFt_jYdtkt!vS2kW%xw7tacGL{3+@rNME%j2b)o`EIaE&WX_wQ&V z7mD?3+!8kPuwv7imDMp>&Fd~T2`y_I)?QgF6>M3%uCY;z>*yX;wr+Jy@47W>o8Viz zrRJ)J8gW_u>gvmCE7w%7ru_lv6|3pc%I1dpniy8;SFT;R2F*}?>Dne{D_4rwt5>dt z@0r4}%+TtRxrd+EV`dH2O{1Ig&?mU=oFhnHzeJ{*v*VQn)}Ya6d>uD^`6RfVcg{0IQ6!c5Uj z#d}PNQ8%q@{ydvRGJ)a{q&4ecY;Du3l_={F67$u1HGY-WdDY4_E%3mqRSoOdeydep~2t!iA?0$;6aszp_pqWUsBtZHtlhDobhFiOyeiM_I^ zo;hT%yb_*5UVB-+71p#g!4C>tbA{&X4CxZpt!-}6J`2`H^x)?DFQ~jmqinnqJe6n+ z5Hzi828k3^&}udXk@d~hEiKS>c|PJ&M0Bys>Hp8wH^2!E^=r_8QV*9yS`W!ma7c!l zOHOiv!His?EH`1O&`%A`wY3zc&RA_dixb9bG>uLilCNHS1#ATXa|iOzp{lh)vbvgt z>()rVrkZB1slC$m^4i8)7?1HU(_Q1*=K9vk=EmwP32dors;#a;=fU)QY17*3nw8bf zknMxl)HcKM@P!XrzlPJT4|HX<4+P0by(QyVhcQx& zI;Jpy8Cn?`(}vnwbbz>89qCL-s^h%5d1WAdr`zI4!R(K;>@yhgHslSE${D)~3&cw2`dJOND_fj>vsOCc7H96-M)u5RXZI{j z7FS^P<-9PfsdgpS4V9~_n?8rt#mlo+S7T;bcd3EV6)wZ_?pL!`HP$+RoKxL&S+n!b zoY_wJLs(djFGDiPWZOq@QO6!qzveRMud`}vDxF(D2ZB##n6Ed^p6!f&8bq#xojYb@ zRZ-he={$A8Y^S}-fpyNSvzzPcSHVU9ysDma!OF^(wUw8xS}A2`=reWBQ%#NQr2AAl zFEvXyda%U_t#Q5*!jh%Z`Nol}8>^dYT}vKlXy#d>r_q_k+KMgCmJpX(X6vI3&a9=> zqFrJnROlWobsi1Pb_S|YyKps<(W^4B2wFLfX7#^UA-Dp&2Q#7Z_NogV{I8u$Qj=tX$RA`E?lOEs+~LLHm$8$ zw-O$ToVWJMO6Q(`5kuL~&dyd;rWrMPcrMP{S5=}{{sUGvFJA779r{}lUUT!>mGy95jdSx#ScIYW9t_vb?Qah1H9*_w&xI z2CzOh59?Pt2X*<H8&MilVoPo#4vlXSGp9u9cLj5As_FpN_V@DQ1|8qwcB7Nz| zBBZYziFR&qo;vbEq_2Z{@4mChTuXs&mzUC`w0$ZB?O zSVmt&78E%978W|U99`t>J{lf~%z`hj&%%Vz>U?7XRtMb!*1pqgh?%@Yft=Fa1w zW|yjYX#v#q9Yr+}TvyjSzk! zn(I%N+HA{&npddicdF*GOsLr*YQ8UO#xk3nH=yQTQS)kMfpY+Az5z88P&0WO)$k&` z-uWvizjREq^Q2I=A64M|6_ndR`D+?<2Pp@Ha_dn|P6U*{5X$d>vhC0hNoB-u@pd10^??KsqKPd-=@*pU`56X^@(CgPPEO6RE`4}hON<#teZgK`%rug*gI43l!ZP>zCfA1Ln=%9lYI1?A5`xdD`uJ4pF3 zN~4dl8&3Y1=@9P7!Lnzh{Jg+_$#nD;fxj-0w*>OGl>fwZ=u5N7bq&*@>zGd7#B{V% z;C%wQMIc+Gd?(YPZ!vA(CgleO{s`029|-)10(nv(PciL$j%oO1rsKb4y5%*d?Qb%j ze2eMmA0@pScSL6wI!|R4IbS?>>4na>kA=E61?~m#%LINGz|rdz_!j`bM&J(sZ0}Iu zO~({D9R&Umz{ySp-T`1Qfj=c>zXFE={5pY8Q|7G-d=kLh2z(yEq1zPrDu8zr_zM8T zg9`jJfZrtWR{&0K3$Y@&g^BVo)1gP1wm&ZACj|a9)6r*{4!p>;{Z|6t%XDOt>5e}! z9XiM~+%^Yk+x1K*Z(us!!*t*l>|e=g;GJVJaBg12@%i=R*>%5mT$A(miOo*`$Jimi zeq4dG=>$0Rgd*oFCqmwqIp{p!J5hKaIu5))7TzBU?+?LyU3Rl`x9~m>-dn-@9C&X4 z@5J5YeN=h>K;E~7_buVQW>J&#b?`nUygiEwoIAn$5_r47+x`G~e+=Gw=Uz~51LX@s z`3NZQ1?4NC><8uOFe#rD%I84&Ls0%kC|?KV{h)julsAKN+YVAbE0kY4zR7tJlz$P* zuHy@whd}u|P~JhAKOyA{LU|7;-vZ^=OX;8Qf$|Yh{sENt(9Wkx`AeaE7L*%K!W1Nw zuYvM$Q2qs!_mT1iQobgXUp%47*#ye(31!C#1MOecDo4h%3I+RAj>eM~1GWIFK((}7(~$A89j?B`7PPcR*No$1J1 zOn2ZWHCpEtG+6SpyK8)7>00MpTjnU4LCX*+IEgMK&DfnPEme~oGQ zO{QZ9nYMpv9@5dTG9Bu{R<;NTgpHyHE zfO`nM3Bdi&DR2L-QbQZrjFm z=(|iOe<0-_GaVgc+Wx$hUuHV_I@6&ynGXDkX=mF4=@^?97dcNXhQnWi*wO10_$>gR zBJdRe$8S>L;{ZNK;OhWRMi)@vtxQMnW;$^{(}7{8TOO72C>hr{*Pc@CbiN;#AzVz? zJ2!q%7aX0ZaKZ6to-8y@7t$f{_1%*=gqDh&PtD-(&W5;TC?-? zN7xk4o>Jg+yrbb?7SkPzjbPX(+kl*h3IQegV*;^^g|~4N2k*2`_B=ppQq@JXF&AtRrGHt z`gIZgnuz{0M0Y{-+amgF?=En@4$)6T^j9f*f}$TejrF_j-A&G2pzQf5>-TL?-UiC& zL3tx6$6q1k<3jmUP<{`TcL?P#KzTPPe*wxK%6y%aPYC57K{*D>ZwuvDa|)bqg7R0O z+(gQ^Ncogd-kQ_oyavkWgz|n+ejAj(0p+dKzMquO3FQ-@{3|G56UvuB`CU-H1|0u z_M4@A3)7*kQXXPD`YkDcn`!%qlpm4u<4lKsD&;4cjy@yhmzYkDOZio%LvKiVQp#^L z9sP@x-(fo0b`)v8#{LyszO3xL}Q{561+Pbly~0KY@vB!HtoRp3tm93k-c zqs zo>$Kj-j~VyrtnS*?<9D?3Etlc@6~4%I6ne!_^cx50q_pIO5T0SyZH?8M$e?)zFg`Z zJfq2Z47^{whbqCV1 z@R?1{Bq;9_%A3wCaQ1-mCQ$AK<(79y`B$O53zXNqui5#5P!5CgWl%;y`4do%eC23R zww+DwW1#E<9jy=S*9Zzo|`A1AA$E5r`)6rit z-Txbb>}NWDkmIQj<#z60Rz2)qNpk?^si zsPlbA&L0T8hwNWf;GF>eg~0pBexm{(2JlOH06qxdK)(W?0`MyYeh~Blk%9 z0Mmg-n2tZrbaYf8&kN)iOov`)+WrToqkoa|hT~Z7V%i>M+Ic(EiTjuie1~cHF{b;+ zj)VP`&a-)3w!9_FmOsd{1pL%W+`uFI{r{$qV4Z;TQp|C%rG=3kA9e9Z8$PT9AA4~aXOvj&RI`FcTUt=1^ zWhW^A$h7^7Cm`K%-3j8$_N6$3Ero}EwiIMHD)2S{A0u!#fFltFeh0u^1ilF1_{|hD zu$Affy-dRoG9CGzKptm0@uV`o1;$$oz<7TFSavIL1D?;^N#H{OhQ}4S3BY>^d<4Lu z*C=H2w@gR>z%=~D6OoR5nQ8ltOgp0|N^?D3(ByoraHTU@NWcB60De0kemft2J0Aw# zaw7e9K0W*nC~pDfH5ZWb#`6oDAA|A*Q2rK_;k!ur#X?fvd47}gZBX_Jc98Ljwv2F2pVOdgr;Krps13Z?9y-U-$k3=fho*Z%#&nmD#&_ce{IIr_q?;673bme)OjtOFV4f{SN<9~4xC40n~1)(==&IbU!yNQ zgZiF0pZZ=!-#h606n)>J&z(zs1JB32Eu{GAB__)%vf(=O_(QV(7P9Hj&Ev1ghToI* z9>*tpr@9|mcW6_yV|wSea3-IR9dic41|z5|Gu}8%hXV#tb^h7Tn)8dC^>F+UKH==9 z7(|Fi>4oIyzfLy&rg{7pX|MyH-`$zf5?^7o?Ck7nNl^y0Jlm;gS>%jr*~0N!BK-#x zWimZleinTY5|aU71~E{e{0V4jbU`2{~+sL)81X z=n8)%o8EGYarhAqy)S@6{|n%-v>A1+BkOKzW{Mks0Y1N8P~^OLK?`Rde1gxLVOWdt z$lXbP!57B+OY=A~nzqgd7o5}9dHq6s?=q+ZK7G&cNKf$Xh|uRkq^i6Svi;5Yt5#0SZ(;@KouoKwAX}@15~z&cIXA>p!Etv(OfRikwAw6g_jQsp+i?apaE(L zy)GgJAnX%(6z-s;J?P7HXLhl<0HKpJ2wz;{I)P$mO*f|=PE!}-{c$k{vAMXrv$eRL z)8}Fg213~HjqQJ$nW7pveou~Rau{(MIZQG3B{&omInQ2W- zwU>tEY=q}B^868={%lUP8;z@}v3Dyh=3H*t?M#ct<;g;9J znNBV{5G;Cy8x~T-R}}qKqw&cGuaWhakj*S5>%B=fy^?Hbth<^XN5%$gj89cT zeTVr6@RQWP!9>_(B2=3QJJ@5)UyGb2?eBhvYc@eyt zBEG=<8u)Fn`x2?r^Nn^f*_tZ$*aDCKmm$X+%wL0_0#~hK?tNL2Go1S0CROkb*~~{| z(_7eM5S~N1>kqU;fKsCz#oCVPe~PiK{nV!c6~40 zB7vFP0goY9(8!p706znsGn9GY6-CY%a47pxJz)rmi&-Nb9=T}kKHz0fZDhzC;=fm<5mb$wT;c@FuYPGWV|oSA)I9r1FhT zziOhr&K@=J7USq+TE!;m6 z{2cK%QU%6(+fB3`?6D9YAHyRMUJ3q`_)G5p2>b=u`^H2tHl)wyj(>nXeuT%wYcLtY zPhNw!_TZY{XPc!S1AYPQ_s55DCo;C2K9D=%F!p#A9$VqzRxqyx?*NyNVcrb>3hYfK zmG^?N^G&oR?D2RCbDhG>tz;eoo(Zm4&paMH8=Tops?0XB!Ixw!et<_qy|(V!j_rFo z)wkm6XXo47=%Q)7A`ZB{hvVJevpp^%e))D>hlHl-o#-QxGr8y4xL!#viN#rniMS+* zE|+=wTKc}{`m?b|?7E5GuTH(TyEEt??VK`*kR) z*Ex7&`2BT8|1RlYH~RfXza9GV(3iLA;e2BB-$VZ<^c$ex1%23$^yzHGWxi!kgQnw*hjD<+UlSCaLolP#Z3Hkv~= zvyg1f60+e^vU66E_1-zhyiIxbA1$1f|G?|}Mhg2LH)hcI7By}`W3Z7Mm!k1KYWxh1 z;Z_O~d`i~cWdiOokN1)d_mi#eTVzt~hw%9}5c+K(>CcVB+TiO3YJ3BY6)rbUMdQoV zxC)JPM&P`LzZ7+kfA zRQ?*W74Mh`8{vV~pL~6HXWI?!oE10raAsgVasKQ2vz@Uw6gin2a6bJ86t%esv%mfZ z@?H<`AK<+S-iwU)Hh5RU`z3fUhj;KPd4FiUd)?68ac^wrd=2ka#=GK%vz_ViUIgzo z@XmZi-XZC}rBMY3%sBhwEQ}LAW$ZI#gYjgeDaO8F+82;b`(!iA$p-8GWTyQa#GZ5` zCUf46ki5^018&4MD%7|bjqbnN{#xKw~m>NAaMl-mv*G(;)3Tm8;#_E?SNP02Z%u=$IYsvcWkaahc zt=It}R`#Wv@ce-$^A+6y$Uz|+CiFObU3kgc3WwqmY{W9*#y@W#21=;qS)=F&VmwhXxi7sumdv@x0Z?}7UIAdWd^ypIz=o?cDP;rm9o36h32hh@g@BvA zCf?e1{V-*+mmXw~#rDZ^hZ#wdfDOrk?a3>gjsVy`2zv)2-d)lH?tCz+p`bbhUH7xYe0; z8{)m!B8Fq}-dpjEGMBcFx(!EMT;l58hBr9)d-82~O~&6d;OxKOf`WfQ);Bi&p?UnF z39|S$4YFB+Y?UB8B*<3?vffB4J|f|IV=FfBkkbEjmi`kX++d;&{HI2n`A;6w?0@o* z7W|Wk^eUXIH&PsL6WP#MZ?k#4*#rq{gY1?ddnL$z2~vJLj=n}RCnRBwv2#AAAr(0l zx3_Rc!6W?%^#!oFUb+3I^YhX-JNJC?d<)E7 z0M44WIPKV@L;LRdj1akrDryoNJd_`r1@ zt|F)Xwj=i3NhP@0A1}})xVFC&t{`^aDQV|?UUEnG?sq)s1b39;!sh$hmpWUq8&!=t zKh~CSCYV!#nHhUNqJ`tm&y&WrC3LUDx7Z_nkGj0^wf@!c&rCQUGl`G&xbXvt(|2_1 zQr5Zs1I}E8_9v37Gw=?4JwZ1`jk<&Gw)1G;t)x)t$+e+gKx4%eYRpWnP5tH_lzLD* zR632i{15P-Bg;)pO8Q+6}iVeuti_xil!cnP>2wCi6xNKbQ-bJ9<2bOP$96 z*BhVva6aOtJ8}17x3=_f36=R0h3eGBSpkS%q8#2lvK5nCN{$`mR!d)jYM#e=Hz$}) z)?Wxep3y!Sd(}eIb+GYSG_L2y>I>nY*>)kFhwk1HyWr+_ZJi(5wa3k2`|hRt$9mx= zsE$tNe!4sCrTgi|k41F%m-G7lv?qPm4kxD&%|`@G>ZfGWpCKAbP?%kz0&)B}mnbjUwg0+Wc|An|NpHA|Wx5uSsXz}-7h;vSZ@hIGHhIimX zIU6?tZ62t|nL&OBs5h+b_0g72UxeN_$uE5ojRnlnH=!*wz1zv}aq9IR*WP_-^FjD7 zzXSZxOJg0|;cUn6fX`JOa6K^E=M1?>#+vDX*VT^j9739x(dP~|`G#u5HE4@K_>_i6 z-Z1J7hH39kw52PkcMl$ADyTOE^H>KUvOAo5EAYr4PQ4MBd#9q!dzyM@;Zg83kM+%t zxMI9xku%^fTvT!wRFyi`4IS}D4lUJ{q}fKXCz-KM(um)qtr~<20paIPHocRzcLZrc z_(V#*Q%vs^?R|-QLAbt}dZ(J+soJ}qdO?^&>YawM;;h)$7~^+$;SI+DH2P>nSB7jf z7Y=#tAL!Y>YY*Ic(-!wnRdzb~x}9`}{R@Km$Ct2UvXqOBaXVtpmNEOxMkgiUB+~tIOv%27YNL%x8L>CMm z$NMQ===MBkQy1K8)TzUR&KF(Kf!nE^eO-{`tJ)aPAY}UMS{ws(58_1JMbz2xyL5Nf zLN=$S)^8g8{BLUU1z@0;P9kCP30 zk@b6<$K_dIO7NxOK+a+!epe;Oyy&J6}t&i_XV8M!ebtxNQ#m;1k_& z$zM0@gF|TXT+#(YOpIa0W+$BthZ*EB3l83J>WW5?^+&@2YxX)EmZPt7yy=@nHuJLa zc)6IKf9MMhyNgJ1b`sy=@9F03qjt0);cJ{~NEvqUGNgLD3|-PV8I8-R@dGr@S;n3V z(YTr#H>1&CMM2!a*muchJ~EFtlMOy4>qnHjt5Z`(H!flpw%}a~-VopIl;2}+VH|_! zDel9#C+W<%2f+>$%M)QVC@8-ebxg)1|A~t!e4mS@_hqz21F5$PkHSG0BZ-N+;huZD zng^ed{z+1$E64_;;FF*1;2gN;tPai--Eo)b!0re++H@Kl!5Wtd-QhBoT%L!EJI3TT zHh^{VIyj5*HUM3bY4pcl9P8|?Cf9M)jho<6XmxkoAl}v)i(7??oL${pIB7gG@6W#P z?hNgLQ5$E|GN^|$0xiBrnY{62BUogjo35V2uQt4hZtq2E;`f$IdhmP86=;vpjsj?z|L)zk5M zVrpI@FK-^%ngy77dT%)!9l^`gRK8G}-k_$16pOy|rZ*JkUbSkz;lKJ`3=qAj_jNOT z=lh_K}bp!TYDWJ6fe47#s{GxR*8Rj}!nbPoJJadSni=TW`*~?A??=U3OpL8$GsXtShTEv6+SmvdHR2|y`<veyGnzXwnkTF`6Sac$uP_{s^F449!*KHw}Oj~%s|mmY9l zM~jaZ>=nkQchX}I7T*>k?8XPoUK_zJ*ol1vFImp#fbO zONbV{)vqDz!O|!LDEF&!!$Zyld`@)ng(UUuM4wBeps0Hva-OHpkfzVH40za?ik8g5 zmeCJ8bI{_W1(RfKjj`Sj)KUdYajPE2Z4??ec-YwvO=S8^+)o~Mc2TDNCJJsyMlUQW z_e1IG=fwe!I1?}vUN7o1%F&NFQ_&Kj1yA(eWL;QN&VK~AgP<>CTBCBKF$@0QBQ5wQ z(Txgv7GzPke`IOmtK(v zPs1xR-HmWpm-an7wQKwLYg)B+{QqnJK)d#QJ2xffH&YnIo+@NqTs-M|6#tP@HzOjRW z+B)U^V)%KKjg>Ln<^53m?%kX%l`))5q7zYgxAtXajDAg{Cr$?72C~Xnx1M+5GyJki zF?^2RGAUNt4L`B*)?~~eetieO1<=;nnU3N7R-c&paR<8TnWB7F8FT96%d^L0ptjD7 zy<&KU>=i3@KIk1oN%(0N{Em+~Vb{GIPJXR;f=6iXncnvVqT_cFO7FmJ_|D45WB9Fy zw$A>?WB1-kKOI09Ic3UYxXHa6KC3$09*^P0Zs+588QlFi-XQFKJcfM%vx1${*o?70 z&67w*_9Dr+UXV=c1<7;0APJ0HXl!I`dgwvPDkE7(lJ`lni6kx@kj2=Hu|7%iy0vw_ z?iK6W0hhJ+t{vy#-juYWHzJJcjR;!>4j75C8Do7*vfoI`pMYe*6Og!1P!{7B8XFm#9)2*(i%-zN z=8@itb@-zv3V?$Ui|Cv6NWlSGPCia13S|4Od zKVz~O>l+)w;5=5=!s2r)c7F2NO+I`3#(25M z_Jgx$+~yb?7+Xz}JUo+!_KUT@#~DFhk?AQPB|S4t&n%-y8(zUp4~}~)`o;Lb_jZ3g zK@T4JtOF+>`KI-c9X#~?Tkk8`hrWOF7%?5QOYidTmBZb#{;>|7@94-*!2heW@pv`1+77d8qbw}r(SS9C6w9-_+Hvm(;aR8=z%YYakfIb^QlW4}{ zK1&mpF*YzZg2gZHaTH$bkpoRJ&WeFCuF-jCAR@S<|IKrWc&@GW{D?d=@Wh)zW6Q_T zW8c^+Sn~q4d!Q`TbEwZ+sM?n^vrM&QUS7T)WUX0kR}RPR*~?d#^Xt=Tt!P}w+~K_T zWDGyO)z+Et6h1!fc`AmVLKqTj=j^J8(Qo8@H5{)F!HAgo6t!Vw48InF)yRy(0^(dk z+q3ZA;5+=PMg2Q_;MB&GcsbcIIEG&ea&{vIeur(vP!q))#=`E-`}jSM{C3U(a-BFh zhL^CxXuy>zgRz3o3^vDr!5Cd=<}NZeJ(kv>$eBw%3ysff?EJi^hU;}Cr}EM`1sJ+UHEI#P!o38P)J4&rBRI=3W*Ch)M#vmBnUf?d=?v@ zCFJ9q-oV(9d?@Vu9Cjav{hTC`k?bK!WSr9zP06qncJMIxR1BlkqlUr9GrbvOedB|$ zFAX!J{cu=}ekE!CNHgf=q+DZ^>q!|HO=xUnl=$31KD&+2Uh+v-n%u^EuykT(P(`dG z-jMCX-iUo*RE0@Z`Lvn)$rYIUH!DoO`4x!uN(GIyssgbxCW>!tK(SEG2J-pD_-rSi z(DX*erYBLpZcchQj-lPne)l5ncX-kdk9GYQej0~%Z4Moi;$QyV>1*UKAXvBtMS=EKECM$~VS!8@(Cm+xBW{ma4=R@+rZ~q|9Hu4EfZ)j{JKKse1{2B5Y z@CAGP_eRjzCXYbg(0Hb&nMsDFn6HpemGOC#d;-%O8S73r zF>&IvdPIzVS*I;ddX2&LJ;)PLo(kS`dyPblej_Q*;E{;oL5GDhHl!HHGnss57@t|> zlYY*O!dM@ceBLCV!1%mtd}f$AFxG>myxT`2?;!l968yg>|MV!z+h-JFW{hWGY()O> z89NF-mBwc}`M5JpUSl(`@M-HTg;QJSGxBQdJc*qc!uLqH(FnJY&^MC6*pP+J59IUs zvowId&r(s(n*kf^!BU)Y&%$St@p+DXeABz$*hqYqkk1O^vzB}+UND0+HUmp>J|&+o zjL+95j_D1I4aH~RX!r~nP2+iHG~z_2*UM0}^~Sm{B3j`c&gRklO8xej7=6{ae+*u$ zUmc6rS!aT0US@ZAct?d_i^9r{H?P=uGz%xX&8zd1V{ka10u5fBzav!-yviBblyUOv zjO!BV*W{pjc1)}tt`r=D@nn!4GeBdI*Up7w%xfo37?C%w&mDsbR+DdNd?VwFC?TRa z+o&Twi{!Wkh+Owli-#8MxuzXI05{gW>J>Q)#>xONLTCI~lFnx7%(0NprWOw^kQ$qw zL%rtpYRy;_`2IN7e?ixO)VL;V63K{CTqYrIuPMn{&DDZ=Tbnz@zT7C znlsd#PtCqGuNy}%1o%pHJh}sPW9f`_U!unuSUR$AA4f-a_<1y0ZJlpL-b-e>+`~Wz zcof4ZaQ4f?Ku1$cWFmR_ z+CI?^He*LKavAF`Fk?41Fg7$cGS+?B^cow$(u!7%N0K)wPGs8C3(3cYbsBeYmQRS4 z;y30x(2pWJnIv&LLU+5V+G1iH$h;UX?m-oe`G5Wc? z4hY%SnKTi>@pWBix|GOSIx)t-OIO>nl9GA5E=97?*vybt^!RDA!H8DacRF_OcE8iB zGKSwaqf4Xu)fVLprhL(}lvFKM~{8aw2Vgx{Ja@1pL&&qbdH}srl3qCo?u)j zkxfrAkBtqc)8laK-(aha-EQnoSf`#j%QS6L4Ch1n+=TN2&L^33InLZkSfLKiLY!KX z6Bo{FlW3SmUt{cEWA_=G-iC;IxM~3>xt#a#TXP%9yBw_rPCwJv@eV$D_1&Zxe)F)Q!$lJt~sNoV(_@|xAwE@J5%vV>Z7TMyn*86)lPc9sdOsM8BATFNmOm@ zc4K#vbG{j1Ta$3f)K~{+#Z(?DH?1XUWZY|v-D~VVBQ+nfoqZ6<()OCB=bxN@(;!TL zN{du(Y(Hb&^6N3cgC{4QS<~dCrTI`gpHx%(Tu-|e?4c(GHcYcl3VcGIBl?nOU~IpB zS&;`#w<8aw52pPFk^dmrLnFU5-HN=3JYD0NF?J5DS5)lfW%8=>HsodUGkM8)tCq=< zqUOmmH`c1TAM^4f{Fh1>_<4RTUgE~eV`X0Kk?L5OKcilG?6y=Y8Bet-xh)mMyjU1Z z$0}ljVr4b?g9@6JU0spa)Qc5+v3R_=BG$CY)kTft@dS>oO%sW9ESU=Pic3n0-9pd& zmnl5-k;`1qD-2_vv3@<@JU(dsdf|WBQdMs!JlTfz(hU)_zKLUOP(PwL|JD*TC_iRr zjBV7wKPHS-H<)pZU%$G+XxMb2+sLn9(_j%E*EH~sSybOGEN`^vziROcBdEN>^-y6P zh?6M{(UK_)C|co$+Lk$mt7==qhW=0d`c(~A!Dfz$j`izTHW=LyEe&SC`VG8V8#SnD zNaJx$mI`DkZ@7;h*GrojBkR|zEZiw`d*}>%#)quVd3H-xy;bOC8}cd~GJ7=?`QKWC z1|epiZ4G8JjJbswjH;rL=F>Ct=@t6saV5r_G4uJ~w5%`O3DtVc9;$9wja9%Jqa`x) z88oCB{`VGS!c3MQ6Z~XR{jgEZv5O8fpI#x&BwM#(8Zo^FZta}g^B**rgZZ~xyuxy5 z!v-1nvke-SH(F&PWDfCU)xWQ)TbM>gfr-Gj2D7)SzMs$kxdrJ08c!z2{=oj+Ymds# zRd;V#7#0Q%y#~JdFKCc{IO-N%>$Qxo%v|gKJ#%^Vpw~XS({1cF_8a>#{x67S8pjif z>PGQI^AINC)uK<2ykiUEO{IDtx&~oU1UZGb%-N38w=KPaEyZ#BWVDx0b)Ag`_=HN+y-Q(Z>3mW=4 z#}FcN_J7Ee<3H@y^)d~jSXi&B0S-{so@62<Vn%Cq-1v_Jz`~U~gOn%rX zQ!i5xFZT0-Jb2af@}gKxEEC(19~_(hb`XX&)f$48iyx( zjWfr3F)uIuizbhhgoWvNbWG0;c^;JIcxElapD@3sUb_D1W5fLN*gm|zZ;1JL)O1eX zhFA~_@;$sO2xIAddSQ3*G@|SXh!^_N^rWhni51W|Dq|B!aVkLeIq2uvPepxw2;^!rc%P_ct6y8D#sDXD<#Yv0uf_lZ}d5zutIr!(KyjV>= zjIW?^lo!>{{5+z{^YA_;HYe8EkKw>l;?|>gJ6IL~}ygLZ<{lcg)T@TY!=+`SNqd%GY7+Z|1Nzt%Usqk3* z)ueG*Dc~rG5H>TOUcT_lksMF*=f?WNn`wJiZ`cbD@wG)(9D)V+u`FK-XqB_G{yvHp`jw1 zX{Rhrn$l3PC5u|Z1udu!|C=Jsu$Ya8n4UMZ>aEr2A7U8Jk z<;C&D!}`%vuozBmepxXR(m0b2Z5^Iy74ksgs#c5l;S=xs`Ave3NkE6z;!AI4%5y@EqY$!lCf>TS%vT7rOXeSi zBV1HY+X>@%yd}j3og(3KoE)OI6aJHM zM))RSU-)6+^p%d;UQoTT&W|r#g?^f^P^Ca8S{C|0QP?@2xwCNM zm(0C|i<>f!682AKeo;95E%S2WveTG13%mczyhk|kN9LbQ=J6LNnVSg*=PF)tU6U&R~>d##zj6%MXtK6VO^x8!=}Q-$4| zm|F@bZewmEoNmk9S^L{D_YyAd$UIK_?_!=Soa)THPB__xdAo4QJD=!ex&$za;EE!Th#xx)1Z`!kGcgh0}O^ z?m*^~gnjzec5LShdoJ^}!a)UdSK-WX<|l-!M>0=T|53~@3zv*zeoHt}$-G@SoXUJa z*qh1xi|Ml7vzX5iPRwKet8loG`Jcj`&)ieEWGVAN;l!KF6NRHS%!`CG>zLmW_TOdR zB^-Rn+~7GLU$}|+H^R{;%;yO^pECbLIKG3qi*Vvg=03v7ubIaQ7k|sVK)B=w=5^YC zfO&`Z=QlFzSulggmu|p(qOjM9`CMWDIOdda_;cn?!p;fIy@eA^nMVs3Cz$67mo#Hu zEA0M``7`0DIdlF@9$)G#<`ac8e_}pYIDQ`Ub;4!%H+X65EF4_I+($Th8S{8y_bTRv z!qGL%?+B;bFh|10_|asyM!x#WbP?kc7VCB zuwVaJlW!E5-VYQvVV))2Q+Ta#{CMu)EnFhp=mpkS{15jx6Lx;d+*0{h%-4gdp15#F z;lv-f{~lqlh&e5s{uA>cVITi~F>ND-o1M!%LpTtgulg3;ze>2|Jm&X=qw|@+5Uy^? z{G)LBh0O5`kH6|7=HCc=moQ%}fqHrFs$2p3<=JVDsKf%$pi z>bA^_gnPDQep5K=#QctM@Bs5y+W&vdjb7ySOY~v>gRn1rxp3H*`|lJk8OHpGa8Kc3 zU|Qcuc&c!!g8k=+e+K{N6>Te|KQWnky>Nx_X7P`vaR0Z$?o{SuX373LjrkPeR@0d; z6s~-Z`C8#{26L%!BE$TsaFf~070NF$PZ3ToW_}rL_UBibtAs0r-x79x?hl2VyvDpu zI1>I^IJ1QNkDtx)i>jD^C!7*)DO|pk``ZYYyusX2IJu1ZL1AYF^B`gGP3G~!!Aj z(N^Y(;_q%_UMT&|b}@e>ocNA;pX%!!XZFANTpn+Aeda%dDPMRD^OeG7KWF}@_^0r1 zpwsrKaQp=3vEpCyE9Q)Fcp~#+;Wj5RuN3x$-xe-AnfpWGq6G6UFs(=M8|H$Sm_y-a z!jbSL!cH^xzghV-=6i(W!UKf^;pxInPG|qOgfqfB)c<$f@66-z`2WlNOEAr6^7qWA zOMmzW<_m=*;Tyz1)tvk95H1nETewX4Ug5NGZ(ÝG4ZIo>GYBH>EmO5x|#{|xqD zAY3NATsUG_-z_Jl*>S;B5>)-Mw- zPBFhPoW72Et8n}u%zK4B;rJrX7u?AGCkeYZG5=XOyqWp0+J6i4jjI1Aa}VM2+nM_d zC)zU45U#j`d7*HsJ@Z=OvYyNz3zzq1j=;1(`{m39i<#ZN%qIy4!e_ zVDox8l;d9{oO+tMwfH+8^IgKxc;^1X&ji!{pZu2ja_RT$#U0x8{CVPd=9{IzSom(yr%&Ym zKB_;DdAM-T^O+~B{#xdjgq_=%ec?=J=C_1H;jO|YUAX@T;dBq?dauj+-^<)YIPoC! zslvq%F&7C}KFoZnu$yLX12(UBPcZis&Infsm-ONOxx!vw=68gB;ctXP;s05}zl~xPiFNOXZ4q4^_OM!yIK8SR==Os9|)Jo{JN8Qymt%x!uJb@!u^B`r?CGs z!o|W9h0BCz3%kP0gy##tFB}SgChXu}0i^AF;ok|@UoOuV;hze7!p93w75{T=Xj;UX<@CepRm^F z3U?8IPxwLMQ1}Vq6Q1MvLxfYpV}xDd3BsQ6bHY=F=L#ogaJ<#R&4u3;P6}@pE)w1@ z+(KBN&n<;DUMpdZce${}yINS|rGzzJ8{w}cUy1Oy!k+Mt!rO)OR`C8+IFsvZAe?@l z`8eTFxT*TT!2SCC_A<=BSAXF@3rE722s@;NmkA#!L zn!iN2TKawA-Ksx^_4`yW+~7?azwpn5J>g#ohr*`{#~X3HvxHN^we<_@cmrY0?=)t; z<}Vi3`6(0D`N;_Dd?bFt`U^C_@Rh=8;TwgQ3%3)FAIth~!Z!&&EbIyQ7S0F{6rL|U zOxPEmARG$MBc`u1i#gviVeew*)xuMS-xkgYe;_dv0xhwjcSxo)S@TVAmXd`Y7Q%r!3nDl!6@(|H0 zQ$Ohw^Z*X~lRilgu+d+nx0&>Mee}DcS0;bbx1tBw=r7XSOnSYZ z`a97plRxR(&;xAr7wMIMvet1vJmnOQ}P)bHTaRN3ZW5 ziGEriWeQE>%SaCTi^gX&)#v;Mf3VFHy)t!?-kwj=+e~`Bo_vkym234B92>FG8)7DX z$#1#gOVKNvNXnCi2c>eK7_cZ*(G^(9$)n@O+N z|2KMz^DC=f=YyE?+e~`BU*KfXE33YFR(_l7n!h5%v*+&uaIX0aPvHblOMYd|Z_gKv z-)0)W-p?>y^vbF)%NoDUq)#_vhgU?eta>*~Z?nmNDm$##{KBfY_h%Zv&7{}+D?S&! zvg+;mCcVw1*ZVOVtdsT6%I{~5-)7S5{TnBXURm?wx24QRe^GwrBKXtsQ_n|He_i`S zG5qcQ;Ray?+f4e@ncUx9^vbIDxw}?xv&o-ihbJ|^u<8r5^vV<9Psi(<wA;cxFx zuY+^#PYKEYoAtaN$~vBstnt`P^`u3Aspyqe-!DsVb6w+ak9hX@yMbx^UuC_&i=V{_ zR!Dwj&0n6C-)0)W-aqxO=#^DpmZi6u^m@Nlwdj>q?`7$2CcWODwM+EMs(&O)Z!_uj zey)9@S5|#>mfmJF{vvj0_ztgsfw1aBdVr1oY7JvE>8t-_x}D=iuT1{U@MlKW^U-F~ z>-}Y?iC$Ut_Igo%n@O+tqqPvdvg&W+;nj}cX432ZYuAZhS@o$bz0IW8``x;TUionP zA@Dz~5rWJ4f$_w6ewWDed$O>8UVdI!KQGS{*3ZkY2tOeD<-+=TdA)F2`Zo#d=jBg@ z_4D%Q;Dpppe^EVkJzw|3ztr-t|L%WyzMjWUx1+xqg6jQ&I$ueNK}_pyGtIZ&KX|X` zl~o_l(%VdWz2ERj(JQMym8G|t^m>2dMA0j&zD<_iX432Zj0;4stoo8Hz0IW8`ybyF zy|U`<{kt_9Z6>|mFZr?Pl~r%oOM070ulHAeCwgVo2U+8{ne;(Rc8I^n>#wYOFH3JT z>B}x)hi0NzR(+VIx0&>*3)$g((JQMyou#+gj9>I^bo|1qugKEdO#0G`*r81H%Bmlf zrMH>%dOz!6(JQOIEK6@Q=}U^)VVdZbRbN97u+d+wVQeOSn@dc$vsCoT!tly^jg;h^6Xd|ZlHj`fO*LB|K{YP2#cD__;9T!N_5SI-G9G0ek0&v7K5VA> zj9%dcS+YhrIsEs<)rNG=7^&ulL8FCVFMn7iHzQne=);{pF%pR=xfFqx?3L-jjN} zie6du_WnY8n@O+t>yHq<@_(!MCI3RvE2}<{Rlm)YKNS5Y(JQNdM3!FpEBMp-5xqW# z{OfvuxexyK@g)DF+{csjm7MSv^waxGWgSmJ)_82DddfvVSoF%OxA$LK51UEviGG6U zl~td}%5O92ebLVoy|U_0$kN+PuMc{?)iTX5*Hfw9uD3P(ZKl@;z5Xf?y|U_)c7FIP zhw!J@2Txufb_nbDX{4`fefPk>RASR#`@y+B?}su!H*DbbRo3yPB?d9AkIgi{C0DUS ziRhJ8ul1K}r5cKsv3IqR4FUrPQ^So4?A18npc<+qu} zU;KB|?Zh_n{M9m=o8eC}ch~A|CVg7;zZAVPx8|1UzWUs?12E^Gd6u512(k9cPV2T6Wq&2R5- zN3#Eo&B{LooGX8_qr=3@AlyQx1*o(D{KB{S@qjo z*Zkjucy|6r|3iNNe$M}a7{r7`&t$*|w=YL!BD{FqcUfTa{t}Flcto$|qA-_}3`BT+gzw-ZY{`QDx*WVqS ztNyrrfwNZfD{FrH`F14F|E*d1YyLz25s?udMozyKCS7*j(58k3qar(b8Wtz`5o>7{*SGw#oi4toiMFbJbt= zH0w_hy|U`Q7DN1Zng)LsDhUf4KFJ zCb52m=#^D(*Gubfvzfo?tbbATNnzE8+JMgdg58o|S@YZbJB{CFT0eI# z>l=N>^QWx(lC1sTX3|FsSbwVMl~o^d_Yv2B5#rhPw+2)FyR*LU4_@X3GbO*W=0Du} zmn~-fQqe1`{&4G`d5!g(M6ay+kh_n#{%;Y_uD|~0x$Add=LGH1PtSj4&40N1lS^6u zsOXhdf4KU~RwguTYyDGia)Oh0$nj5D^V|D-uKhp0n)NM2 zudI4|ebRXeZCVvg$+Ad*t;uLp;0wbHKUkkK;TrZ9^r$vgSWr{TZA$ zqiwqAl~sSZ`lHauoh71IR=so|@&3O7@$C9{f^*d$e9ZZ;{(|RUS@R!m|97id-(K{} zs<-#|T>F27*B#m(6TPzPL(_ZY>pv3l?E0sHbJmaJByA0MN&Ui_zbfnfpUw3At-!p} z_FK^_tG;sq_Y+h7Hk01@iu+rNURm`acOUWk-+*{_{T;x$>JKIV8)3v>W_cN`tL-qtooy^|L0#GT7NTeuKFX%{{;H!^-sArf7bek7E}Gf zJ|6!#(JQMyowfcplRgzAPse##^vbFaP4AKK|L-84z5dnUT=ggOIRB-Q9Djv1ei3vc9M2l~sSV^*@PtcKxHlx#}-FhV%a*`IR-lU2m@X z-Jh`j=Xf5O_fNvAKV1F6Pg&nw^vbF~+WP;Bcy|5QgLBoN`5EV*EBTc*e=2MJZKm~i z{)hEzM6ay+vaJ2zX404aiuKz?udMo`tv_$~q4ob1oU8uSuQ`7i`f2@?HUHt(zs&r| zjN|kZy|U^LxBi*ma{lq6S62Pe*8ei%+3UXooU4BK6wd#&$1(&h@DbXvd-maJQHrG}EG{m#(e;G{m zf1P#yBa-~T-oy1PYyK!}{cWc4hgXp&{yhuPE33XZYyE8|ed1cyr$n!;`p`r_xc)ZR zRsY?HXV?D_I9L77b)5eL$*-*WtF!92nZ{pzJ?p;`y|U_?XVq^r>EpMu{+Mrh{*_hV zA}hbmb=7|g;@S0|4bD}6>UPdQMDi*j;w!O z^9!rqu9x!LTvz=cA)a0T4sfpe-G6cZtM>BzD{FqIfOkM*%5O7`-|b3`$fbH=YHfx; zZdUy^lRnXt_5UY&W!2mDwnn4Pb=5xt@$CAifOFMfazE#-)7P$@Gq~^Rw{aB)gSHt|0v?w^$!B)s=s6! z=iedul{LRzFOA=38h@Latk1`J5ZZr~skIsY*!w%_Z6%c9UPZ60`l77)x4EwM z{}bZb_5T%|tNzSf&Ob@=D{Fpxf2Z-=Oyl<#u>KX%E34k#-$`#X>07-)7QhRb#Ix&P0M51k(HhQw!hU)F3u}J6UK+p6H2&&!tWSzwS@rh*PI{Y3U+_Nb zuMoYm>Wi}G-{!j3|4ziS>+cTERe$kEoPV9m%=#UoS602fzmwi( z(yy;(eZ2!b|H`Vj>m|L-b=7||;@S0|3C>l2d^_iVO7bgf{zQQ}JUPUa-)0(rQhv~2 zs_2zfUzYXyXEW)&ugDYsKB(xGRe!Ym{|3af>)#2^RexB+`L9NW^!!)W{C2&$>UX|l zeS6U>tG-QE{WeqmZT7SNG0`il-d=CYZ*yJiKN9im`lo|))$hjmgnlEOho}0LHGgr| z`rAz759+c0RM9J|KFq4$X3}RGvA$UJ%Bnxw^}iMI?E3!&&Q*UzKeCH$h2&S({K>5P zZKm-je$M)jMX#*-lC1h|CVlz@)_*U0Wz`>T{Xfq;wEo|MbJg$vn)5%7ep-KJ&2QJ+ z8jUv7_@k3qKT`C{s!wIrZ!_tOPi6fq(JQO|XzO2zc=q~#1g83Hvd;gcf5-XH&*%D; zHGj{n^|zVEU(=lRZA7oE`VCq2+f4f4Y}R)by|U_$w*G#IXV?D>I9L7gKXd*)l3!W# z+w0vLjW(P0JCF6p7V!KP3#;DV-$`#X>Aj0sf4bm{^upX zvgR+!T7R2q{7o)n{TrfJR{hmk>u)pZORi@9X3;CF{%Gspi+FbZh4@}FSN+~Koc}KL zH$zfo&2R7Tw0<_5`rEMnanUysR=vHylip_1M>n(nSmSJwPt*81B_T{oN7IuD=&J zSN-wMoc|LYzp&=7%BtUH8vpt-*8d=SW!10Bs^4bPd-t>c_y)5739J5S>pv6m?D~tr zx#|xe;{4+!zp~~}W<7sxHtU~e{X)^lg;ihH;L!EAne^$ttbbSZ%Bnxw`oBUvyZ#@+ zx$5@^aQ>SMdH$6(e`VI^FPmxnrGr`DRrJcLADy-T*i3q780*VLudI6e{cdYC+FVLb zvSok&H45?U`X_^P)nEKH=g-4+7rv8?~2 z=#^D(*Lx)OUy68k{nvnV)gO)L{Le{#WzFwr)o-&I|0LG?I(}i*ds+3{O#0Mx*1s!y zWz`>T{W}rQu758$SN-lx&ff;t!O;3EYks?4ntz+k_+MmwC();bRd3fzdYeff&1L;# zqE}Y^(bhj4@$C91fOFNKT)_DcNPcC_Z-3uOv~v#$U36^}gtpYxP;LA90IGU$%zz z?}}bo^+&t@I}y*We=j&!{h7Bof1AdUu~xGJGt;qgB%dS%ty^^)FZqyLWeqf{@fdb?iI+gw-uvk}j(e+f9}`hU;)PdSe3FA>)K zK6lryzs+aBDe)sav=qIv>YHV)zs;m?72_TJ2GJ|4z8X_T8*ytGo9n8-4Dsyx9|PyA zKbgn*H%WeF&2O(a<+qu}9~7{DkLZkc!7fO!-3sY{Yfd z|2xF9>pvHqtNx%N=YLxAD{Fqc-dy#UHD>+uqE}YET`%cvruyAD>z9dMS@j_mi;eh5 z>JJgmu0I0js=w@~oIi!@km>oatoiNzo$}jE<4-nWeJ9Z?tG+mE{cR?F*$J%gt@(vj zA5!tyh>xWHF^Ff^KLebr{?soye9)<_=Pq9;p%r!;qiYYdS%ty_2#NSb2{s5M6ay+ zkc!84r1k&g=ZDsRA~;w5?(aDNqv)sgSJwRY{!Zh!nbt4$2i89=dS%ty`#b4vCcS$m z>ob~PSoNW){>bZp6Y=cz{{Wn;{<0+JZ_z}a|H7L8aO)pGhxIAZE35u+>z_QA^_@ho ztooy^zZc@!^$!8(sy{Aoup=G6u;xEp{h9N4{DsH!{3)ycaP)sS62Pe)_)%2 z+4Wxu&Q*WrBF;Zu$1kk;?fpI1{$F+p>z9aLS@rh*o@@Wl{EhV+HNUXxkGB4A5znr_ z0q#@DRlj=~=kJJq^ZXaq{Pz8BG=7`u`P=47)~7|Utoo}9_z6r*dYeh_(T}2H8zp*W z)rY3{$e;gn5YMi^3Y@F{^fjEn*)O<$WzBEbo2&j-*RlQr(JQOot~Xcx?hUNJQS{2H zKic~5K|H(uUf^8ym)*$uw@7~F+WcAX|3iyu{ZhBE{zuU(tG*;_{cR?F=62SfaDp5^ zgjIjE^`C=ycKw%tbJg#*<^1C%zq00+w*$4Wzc!ok-^uy~I(}i*AMW`dbzuEE(JQO| zXzSmBcy|5YgLBm%bmaUu|B~llS@YZV(*A2R)gS$f_4kNgS@nmjKiQ4-14OT^`lGFX zBI4Qg&j#nJKVHW9e~$a`==i6s`BU6oyZ$y){Utrvp}FXlRd0V^O7mwk>DS-S`oD=@ zS@qQpZ^Wdxxs)1Z%l`hO1o7 zk!Jk?(JQMyl=_G%zs+^ke**4nwCg_|oU4B4an9cd{j~ndn%}NBSN(}{){hmvvg!|4 ze@Q>q&lA0}>g{@ur2YW$?D|7+uKJ7nbN=F{T)(pBxA%9-Z}S;ocM$7u7QM3S?fsqf zHj}stRO5znrF44CTQlXdAg{`Pl{f-R&Ugn-r~CIzY_86`fmZ}sz3cK=YLu9D{Fqc zUK+p6H2%~$*1s)!W!2mD=Bhtg$@(uuuUxCQ>NR?c>#Dy2?n}1oKLMPp{`e%${~-G5 z`LC?`Yq-01{cNWC3#PHdFwrZk-oD?B#&0v}lh3pMdC@DYKD6pRlKNL5o?ZX@;GFfp z!1>QTS?U+o{Py=h0?#NN+RgQ}bDWm*|yMFCRy1=ilbK*8g$D zv+H-kx$1Wpa{ezRzq00UQosv9Oyjqi#$Wj=>l-9^{*+bUH0%2xn@R7>1qBJwE34kF zm-5?OSN(rMJiGoYz`5#omhg{@ur2ajKXV+ivH@WMN)^q++^waZSS@YZVQhu9h{P7Q1|A^?7RbQM{zs;miZDRd! z(JQO|XzPC-@$C8+gLAFF`!VNl`dhAFS@YZV=BhulmGwoUS5|$qtom)H`qzKX`oD`_ zS@lKS?GRIbo9o*DJ0PB2|9#+G^~ZN{f_EjqvgS`_?f*8@_=|V5{!7s-tKPofgT`+& z>8rkFeZy0D{*_g4*Lx)OpMrRH{bz%7)$jbk`G-h;WzB!M^-mmN{WQ@ltKQzN?>W_B)hasL_|3q-E`V;bkmVYYOudMm){hh{dGmSqbKTzCM^vbHY_jl6U zO#1aF^8Ec-^vbF~+WJ$7XV>2roU8u$Z#e%el3!W#A8!4Vr?UQC(JQOo-ruQyo2mY? z-?RQp(JQO|X!rlZ(+;iw7vNm=Xa30f??XR!5l30`AFlr3EY=Sey|U`<{hjK!nd-0k zGwY{_URm`=yZ%1nmCAc(`YQnEsy}%i=TDx_^($+Bdw-|#+f3sxxsdf&h+bLs_Wn+K zn@L}NDeK#bURm`=TmPeoXV?EEI9L6N%Q*iRl3!W#x8d&E*I%1y{5`K?hx)(c`BPSX z=d9O%n@Jz0Sf3ERvg(g^|344$O0D(35}dPsDQLRnPY7#%yIxv9n@#;U@%WdBzNfJ2 zQ(5)fO!}&SvVNoJl~sSV>;Em{+4UFxZ|?e?J2-zQ^waZSS@XNxUAz7^)A-%??9f~E z%BrubKd<-vg|_N%XF;>W_B+zX$P3t@_j8T=fSJbN~)ad&O~ zHvd1;&IHb_vTWZY6xk{StO$M}QHMp=K`J7$1T4xr1gLC+1Q<5i0u%uS2~uRSVAP88 zu>~lL2oj(O$YS`YvI#^es~`c&Ca41xS(X2D=eh6UnfHGt6KBrvmnPSo_j>Q=dC$p7 zCNphOe}N0~7Jae&c)s`B7bCa(KiLCvtNZ#_$@0hA|L1MK*#DP}Gy6Z)^Ur2}ldSw^ z(tl*sUpvDGTuWapzm**S$nYyabN^5D#q!76{|h$XKsf$y8fW$2ILq^=uHox1R(?F+ zar`2q{=zx#A532?Kc4UKBg3!N+&`JVSpHc1Ki}qy{aNWZ+8D3`eONG?f>I8`9L`TKWCiPfAcoaKgssv^-rw)Lh}5L zjQVqP+&`DTSbih<`)`rqH}7%(x5_8WA8Y?jn=kg?GS2EhcfaQs*7p9z%Ab(*9~t#e zddU4<>5Jtzll~*apZB=?N75I|AM5M?88%<+|9s=D{_{_H{s+t#D?gsE{r1JksK4hq z_m}JQ^%KjVwy>XsVEB>YPy3tulj)1)kG22pY`)n4H;uFUum9ciZ(zPy`TX(B(DOes z>Yw_W`+uS@mLLDVANr3BfACHBpQSIBKi2-=w)tZJ6V}P@zwaH-FWG)P|HaDZ$MZw| zM@Ieg-*^8g`eOO<@B5+u$na|)@h8}i=!@l#wf_rkzS#d&##zU|#V7QlU-16L%5V6` zdqe$4Zr8tv*Z*nyHL`rZybt-2;TJyU{$%=M`D5*WE1NI&zmsuR|COBQU(0;4@?*a^ zev#Yt^Di{rsrt$CW54ht!*4C?`A^Xo%O7k1Z`gdX|DJWT`_J`XjgN-^2X}R6kjMEjfQ8!!PXZ{tNWQ^2gf$ziqzQ|6*Ut?!U6H=kIU( zasG>yAN$Sfzi^=Y$I}-gsn_xw++&*wi` z`J?sUn&$rM^u_XHzghiPj&*-C`eONG?SEIBFZRE`aaR8g{)OZ}Fkh_v`1@X{KQfMA zl^3)p>5JvhPR{?x@TZ*Y{m-W_mOs|(|HLm3?|(Vtto|#fdj5B8fBXC=D}S`(Uzp+k z4EkdEqaFWN)%}alf~CKwm6>wEipS zy1zYrvHY?2e}K&w`~Qw{R{xC)Jby0p#mdho=Wk@3zpabie}%qSe*C-y&!5Qf=Uwjp zf?x6V7t0^(`TsvQU+jNXTllc{+9H`@?*d7Bg3yW z+%M4=%a8rSkKC#Mqiw#}|H;N#{WtFL{O6c2R(>-%evwgs^)C0{r7xC0-FFYY|BVcP z()~VQ>5Y8-#qtZu`4_oU|Et-2vHx|Av-&Un+4E;GU#$FEk{=oM_dM+W`Siu|tI7IB zZm-|t?q5T{N|wJyk{`KK|8s1<*#CXTS^YPj^!$}K_VpJlzmeoeM*WqR`|Hye%daK< zM~0t&-u%=i4=Y=198$;yxYX7%5A%l(7ti{;0Dv-&T+ z@BRt&#q!76|4(hc*#Ei4S;xQjf#?5=`C{eA&wEjSWE{VN1^ovcixzzS#PZ|kz3?N$ z&n@Eq3iQSD$J+loHec+21LLgzi;H{y&zLV({%FTPx1{@*sD85i(T;!pc6&u=l6Wo`xh%e_M6pzd1LpNrZ1Ks`-LAF{pY{x{z~-4@?*dMP5KNL0>Gtki7qj-0Ao~ zYV*bZpEJ(tzqq64Z!*P?zgYPVJg^OSSpUeVKlcsyccw4K_+Go+hzGXe4u0fL^&e*Q z#rnTzoK=5wC(nPB`C{eAdb8@U?&AJG>5JvZdjFgH7v6k${gaHd>M!o*`G?#7UJDf~ zKNtSJ1>{Fw%eb_=`=`?v%YPdWY{MP=$OGJm{prsSE>7ydYJ~br-}3y0zvlIeF*S~V zEjfQ9qyC<~-TxGQvHa=;8)F+7^+$$Z-Ov5C=!@mY@0Z|5?$rNgHeWpdwlU5+evJb? z|5E0Ql^@?PB0n*q5zU+n)CNn{*l}DAM5@u^!v#2M|=O&Jl_3->5Ju$wf~CE7yCcmIII8MiJm`?`C{eA zesTOFqyOe~_uo?eWcj1@Up~$K#kb)4ljV=K{}pY%*#BzAS^d{%c>c-E7b}0X_Yak- z`)AV^%a89Duzr!-=g-gG{|)^;4Ax#q#6z4*f@lU#q#_PhTuQUhm*X?sWaz+vbb?A8d^N-%UP0X5Ju$cK-KW>Hgp8i{+2?_hE@xtl$I!q(ouSox#%-@MKJPtq65AFcoL z9qzAAUo3yD{cmFP#s0T4&g#GZN6){6`C{eA*LxiQ$TplF)@beG2 ze?NV({IT}`oXr>eA2iPDzx1HzZ@Y~jf3fmwqg?-b9&vvk`eOO%Z}u`;X8U%O7k1f4BK!|L+=S z_22WR=kIJM7q0(e<&V~X<8Aj3p)Zy{TK}bgyFZ=2SpHc1KgZ^a{r}21tN&V$|APC% z+j;+D<;UxtP4WL-{{|+w|7rSS`SE%OKk{0}xkcSykG@#`So`m{`C|XO8fW!ioap(p znJ-rU)Cu_R^~3L9BX_#~+?C`%G(vto=lN?Def`Db&EL%Ci~aW-XZ4@^gy&zue6jNH zO8Sr7ssG<6`S*^H-&)S|SJ~eC7c2kVvgkJ*_V5xj(7ri4p1JL?gvo`Y8!UXS=u)|2nDp68N!{y9QDl_eHw2X4KSua8*u)RW^SF4=gjPoc|t z4z}@eeU35CTA#izc|CJkk6880Sb6yRh+k$sr7r7vGpT2RowMs{?BVt7Y5RMv2(jwv z>m6Q?_$V8X^{I4O&xtlZuFn~)=l$gRGkBuc^D^rZtDaXk9A1z3Kdh(GWj%}SGQ6H; zjB&iudRn)6JxADntdCgr{PGLK>k*$~*Swy8vmUYP8CYj{ zJ>rk=>h+YntY<|VACK4S##!q#_}N9#?bE=6bB>`R6h}M^^qVNq*!` z`Bx|TzZ)UH$-GbO?){6$o4=~f7yDn=III8am%RTonJ*r1{)I{YRU_mV*Z2HI_u%m- zD}PS1{*gPa{|Yu=?0+@mtp4*~_WVW>V+#POHW z7t7Bleq{I+`u|5?JevPk8;|SDlv;PM2QS+AxSsQkv(~f5dbaxA9odZMs~~YixX6&)*wo zt!In%tgxrAr&#s0lJ$&?ei~c*dagoWEI+Ov)*~|fCjE`+i{;1lgC80GApPCwi%0X1 zw((fcsa>vT#m2|=Ji|C^J#*Xmdi;&`h==M)p6~g<=%;T7_vh0W%a7}a^@t3=K!354 zA5XFTxPI^>!!OZaiN1I=e^VQe_1wM7_1wzF$MxLVIBPvCtY_+GxA16{7?BpV;s^RvcT>)BvE z72A*ZuVU3x2-n6GkL&qQvx4Q*6-iR_rEpfe{>&TKe6%$!g`_p$f$q9 zp}u}g(ih90n55IGZJFVZYHeX!7{fx8LuaEh6Fkh_v znaTP^M*VY+^7Z=@eX;zwUg$qE{3iWp>5JvZ^@1N6evAIU>5IGZJFVYxwll8Z%Enpi zH^}^xY=5tXij{v{vVM_G8QRj{zx<``jQN)sXXTfV_w`?3U(XkhH~*71U(8?0I4gf} zy62z7eDQem&rb3$9U;Fw!}Ay1kNuO!o4=gR7yDn?IJ5tsv47?dlE<5Wc9MVD2>Io6 zJb$76**|%_`ODjUvH#B-XZC-Q=l_uTgXHn%pPS_WYJ~jq6`udm1K2-#y!p%9e6j!4 zj5GVc#`8~Q{vcWT)05|a3b8Z$CC|Z@9ztGjgZ=m2AG) z{}+t2`fvWu^MA~IvGVO>*J1gQJLUg6$-i-g{JvW}{}bQ#{>9_XU&rQ){cmiX)qnL? z&p(Ix;_>F+kmUbig#5wVJpXeCdH>?^<`-r z&tKJpXsh7mqjpu_XWb5%SA(Jb$x8ynnIs?PJYh&%el> zo_~AVe6jz7jkEf1{=xI_X1;j5`7b2-^GC?C3CGtOH7+MhiC2eu#mi^rQ^OY-X@)-^DG_kl$!}{%S{f|KjoHZ)x+z{&zLb>OcRW=ikJ9 z@p$tePx4I&?Jl_1{ll;?1$S*PfMdpi@e{Rx$IF5hhPWi{%eDV07HbQ=bdCxOnto)af{v&tFe>chh=+W8z*PrnI_qP4$ zUp(IYqiw#}|MU^^Yk%?lXP7Tm{y_5gha-3D|E(l{!DF)f&;8Z&_q6@!U#$H2d~09- z!tv;oKh5Tg{Z~fFFEjs1=8MOh|5}p&pAqt#%>U-G-oIG+@%e`SBX{cmaGNjof4p(l z^{4!_um9uB7b|~@u-?dz+$sMbN&dSdMn^ z{CK_jZ>~SDB>8WRkl$qfj^E?)Co4Z*Z;&6k)A}D^^TqzB8E5t1c*gs0GG9F2{O6PW z*O~u8^8HW!S~`#-`M{ipf)=RN-c=8KiTOLG24?v($0 zlK=V$`4#4Gcf9v69&i4>Hec-jNaL*jTg?A6^To=amGmFEQ~xg|`EQPpUwy&Xe~0gT z|6=7&P4Xjm%0Iy7i~S#EoYnsz^B-isc)a;9CHemvA;0*d_doRn?_aF^yORDRck2Hj zn=kf%jB!@~`IkKZVdjgKKR?Nj+$sN+B>(M^@?ZA+Z&bYhJbAqN2ibhF|6`3a`~REg zKdOAP@{dX0|3vQ8|38xaw@1jYz3TZpoap_FmEY;#Kj@UdpUoHhFB@m|Umo=Qdzdd) zKL2~&9x$%Ikvrx8HOYTzg#5~U&)@6^-oJRf`8(NsvHy~BR{zzvJ^y#i7mqjp{v`kL z5%O#Ad;a8;c>KxZ&EL}Ii~a9toYjB5XR-D({3_;)$D4n9l7H_A`HcylztVK?Up(IY zFWY>v|0%{<{Wll!{0o>b9&i5aB>%P%@>>%xe||a7KZW^X z<>!;Xe;Ii#8&p`{{qyOInu+|4dT<-$$rFzozGJd8)6!Sox#XU+8ncL|-gF*8AVof3(dP*MGWk*7`Ts_55d; zFIN6&^$)J+{yX%=^2b{LQl|~Ce+A>L`YRiF{;{^d*Fwe0AFck{M($VXi{+2C{>zg3 zuN$HMzD+%U$-`(>MvHf`dij`kV&Y#Fw z|H_{3pG;pY|J=ln-0Av%UQ++BMyh`w&tK>a)=yS`euBS20b~6mqyEDF?*9*cvHZte z$17*skKC#L^=-a*{!B5>T>k?+zoz=h%I{0+kBs_@2fP1U`eOO5r2fdA>VGJy|LGCx z?>ofvzj7w8pJe67da?eIQGe-h_qV4nmLKbdAGuTghuVB`{l9CRwf==8JpU2qiGZ|&W4-@P{g)^8&mN)v(s7=jt9t!n<&Rc>wc`HD^u_YWTK`vVzIgs@XPmYEtG_wZ z{j=zc<&U-g>yr8#Bh+92x#zF&Gp}E){K?7dSLC&9(7dzVpG;pYe_oGoU|fG9ce;M= zVDrWEXHVm-^>6;K=ikD7vGS`){gFH6Ka}LRM#wK+;Q1TPOX&j=l_BE;_>D`ndJX%g#6acp1;K}c>KxZ z%`e$}vHwGiv-+UERZ~opkU+lkZoYjB+9?!p@ z`C{eAfA0_1-^iV=KQAQtZ;p^Zc)#aQJ=^;iE5DI^{}{Pb{vkGB?Eg69tp4i{dj4OS zFIIm1c^mqV+$n#4lE2{pX6H8^@%+7Qf3M9i9&i5lY`)n4>C9gs`Tn8#gy+A^eDQem z7dU5l{?f)+|1`h#SIGHZ<2q-2>H2JJb$tCJYPKC{9cm0>%3t2*i}`C9XXOvB;rTO|FCK6H zrAhv8N60U(6>ivtyo8M>i#r_M%=zqcF{crt?o?m0Wc)a<)OY-j>AwRc)=dW>@ z_b(oA{?~23*#FMPS^bwc_WT=|FCK6HpOgG&M#yh&=J}gm?){6$oBvIlFZO@1aaR9* zU-$g`m@gi0{!2;z+au&xxAy#9e#PfMdA#{Y*nF}79~fu#Ke(OeKg)dac=O*+@;`P( zc7AcH=O1eOdo5I~{DBF>uYZv{UH?wA`C|XS93j84i|4<^e6jMI$?qRWUdskmzv=#x zzvl5H%daQzpCiLB?CJih^u_Yy-#5YaKXRx3H?{d<|3%}hn#KQiji z?c@HP^u_W!{rmm!=kDizi@sR?t9*S0qyNaA`hUmfi~TQhWp@9K{XKtw+mG{4to(Sq zkslfLSHJE43G~JCnvHe~rx-`)?R$9{DN7t0^@_#fqdk^Wq={CK>PAGuTi2iknG|6`4_`fq&4^B-rvSo!mk z^FK2BpLMMJuhSRHza@G8M}}WI-u;Qc@%0zWkH;JRNAA@B$~Ir@e?8-@{)^xD{Ii%Z zR{mhpf8@1nP~k-Pucj}S-%R?C3_o|W`*+h9%a8r`+ZQ8u>i;>LFZTbsaaR95KlJ?V zulDs9D?dKpkso<2@Tu+}NM9_!n)DwTerbmLKcFv`ANxgqaYC5{m1Ex<;UXIqttsUo1Z!Z}^cr^}qNv!~6e~aaR95 z=X(CJw!aslWaY=>jr_={f6n>t|Af9+emvgrBg3C|k^5KB7t4>w8-CncYkB=8D}P>c{zpdrQ!aIX8~S4T=O*WWWcaOLxxXKMvHWpBR{!N|J^$Ct7b`#hz6bInqyC;7+`ogqSib)M4>0`5@N>7i|0I2}{0)=!i`?n> zziIQu{wK`N?!VaZ{C#Xcp8sOy7bjTZwt-Q9WYk}qf3f^{ywQK;PW`W7^TqzxG|uY3`I6_K!F;juc2L@^B20&`xh%eKHrfa z8TC(I)cqCci{&q$^dA|1aY^^rqc4^pk2mrocj|vTn=khNE#s{It4n+St;`oIznC2V z$f&=yjQbDM7t4>=d(Chqs#%YiioRHWJl^O(a;N?$+kCPADaKj-=eP9y zSA?|0sR2{CK?KNAA@BYc^l(zvq_h{s(vR z{5@@dugx!3ehv?8!yWP?qyC;<+&`MW7`uAyR>lL{a0fp!{PM2uSLur}9{#d+!zMp+ zr~ZFs^TqyeG|oEyRpx*4R_|Y|{5c*!lph)OPuGfYgUyS|T^qn~P*4i9X@9r7ci|H9s0|8Dff7?1ud zcwigu;75kv*vI{8^u-tte|fuMlOMTL|7Y5KvH$apv-)o_|3AzZE5GUSL-~fo-^h9~pk@0Qdh!UySiM{_%W=AGuTi3*J7w|D}zy`tLc=^AETEy$B^M ze~x_#8yNYKQU4r{KaIXvegzL~!yWv{@TVN)`4`a_WBhpfzuD#+Krpv^jI;VLG5_=b z=lzS7pT`5+aEJOMqyEalUjL@_#TbwJt9W1=?%+p;pF7n3-RO%kemwmjY4gSYe_))| ze;@OoVZK=T4XXni81***wt-RqApLjfi?P3({wrngf9X4X{l)U*`HuX^ozDN&ZNAw5 zSB$gzuQC4|=8Kh|!vouJhy2LszkayaKbyW7t9JeS$_O^A?l9|zxG}C zH>59?zlNOy*ue0`JKA{se}?so{r}h9liVOp1W475ZZNt;CNEzj1>53;faZ#qzZt;J7}^*?6o^lk2e(xz*+RtYhQj z`fO~RwLXKa?;P8Y^%1MS2}ysE(ckf05DOgj2k~qv?y)-;;?S8Gh?D z_s^s+mOs+^Tw>#~J~^((HRODk>vOA(kLz=fan|{hJKgJBpGsdWzmoj?O?;4z$NKbfJ&q<9x?G==YA-Cyu-_w!`=&18Kd!ymlR{ZG*s%Re<)AMrXi9_v%$ zdTc~4cey@W+W5FWI~ixKPvsJ??>gI$^%1MSnPENp?TeAo-=r(tzni{Temq~`M}}X& z+Wi)Nv3#8`VEE#$9YdkL$C%an}0uUFY>3Yx}W2V%1km)+aLh zYu@1gkLio$$De1QzsT@s-s=9P^u_Y0C+Cm&RvVA?X>dL6CO5lWpNDOHT%YHRv(_hn zyVtkry?lOHln__e#-FVh#xe-#%$+rU`g$OHHyxAgDd zoNDvM{?9Vb>c4uo=g()pSoyJE>}`!|vZkUo1cNo7MmH$K8K~zF7WP`~Qc{7yEzT zIII8i6P~}@eSZAJ${(%&`cv*7PG2m4wEpKm z?_aF^smb#{GS1)0FSx%7eX;z5lk-0^{Mz5#FVGjupU3AnxZnPW-0AthtIZet-`_Z^ z|LQBA{|Dxal|S0?Z~nvmC+Umj=aT*-uVsT;ue(2=zF2Mr;M}u zZ@%IA$JlW_^6^Y6KT8GW(**zbSS|L<+S0lhfb{r$#S z{pa5I{56~2zgYRj@pV)Iyg_P@Sy zR{!~rdHz|<7b`#Zi{lp=^$+CSzly$Ce(V>1Wcbr3x&H_HV)dN8`udBNUr&yIWYnKu&i#Gpi{;Nrj(=qM^Hy;GIQnAwW9|RvHec-jV&km-o1gal zk37WdpV*$2y>^So8^Ywyk_m`zFR{ecR|B>NOS=s$Q`eONm?W@Cw?|&nAy8drr z^TqymGS2G1u!`r;X1-YY@%0}4M@IcKS9AYf`eOOTr2ok9=dI=bbM(dXW539c+^PTf zY`)n4qI13fk0zi0w$}Fi{cS&9|HMQ2$@xDR81*;Tb^rVH#qwK8|B>O>*LVLc`eOOZ zhdlHjxl{kYvH4>Ew;1F2r~Nm-;`yI?nCCxP`8`Sfk=y-m;{H1F$?^viKQjEr6!*8J zFP0zs{crl;%jS#yA7-4@f9va>e;@P3%8&gbKQiiXZSDR%`DFRAU-*&XH;eASPhTv5 zQgZwwcRK!)9vR;MXN|M^AKcONE4Cl!zgYRP->m)zcXt05^2zdJzghjac6a}3`eOO9 zU-*$b^?#?$7mxoV###OMlsx~7k9z-N<@Y7$Z)6<5z5(~Qr7xDBPd;Ca48M4w`}@%s z%a8s3H~pVr^TqzpFwW|~e6Z)g!hEsv^GW}aQGd1U{sNEr`ibR_cKj?7v`~)qm|6&p)5}V&%tvvHp?Kf8#j!uct4TKU)9w6Wza$zF7WP`+wf%i~Y|x z&g#E8-Sc;RoY#M{@+-;lkBt6{r@4O+eX;!bc{hWEd; zaaRA0D?I;j+mF{jvGSYA@r#W5t5>^!8hx?+`1{VNKQjE8*SmiaeX;!5?|;+(%{E^= z{&yQ^^d--GwM{~CR<{9@97Wb|Kn(EUZ9;`N6tzc=YWa;N@3 zWAnxS*EY`Tzx=T0pTT^w@^g4#8}4xaMMnLVN8F!9UySj+c2kY=Bg3EfsQb6j7t3GO z=D;RDvgX46^yfcwZNAw5v&LEd=N|L?!e4#;#mb+A2ev6cGV1Sr-2I*Di{+OSKQjE8 zPq=>=eX;!5Z@+ypa_s-C`)sYf)b^jJ*nE-CGe-XlC*S`zn7`oDy#A4uKMfCT!yWP? zqyEWHdi|fHFUEKr|8nyC8Ij@7`m6h2r7xEMs?C87jQq&(t53UMq%W3V#{=7N2R}0W z!ZYrFi@q4+QGe5J*yKlspL^E*L+Oj&&MkBw|mCx z7b|~5Jg^OSI3L9O*m(R`j^%|vF|@x^J%`!&SkHHjv+8NS?(6w5>k+G-Vp30JtY?0{ z`+uV^mfx4WzC~^ykGI@k;8|afB3XVlIlm&qZ@uIGvh>CB^LStz?y#Pb;rG1f{+jf~ z*oEuEpxv;^kKAefH?{fV`fp{Nwf=+5znJ-A<;VXI9Q8*={qx@U`mdocmS6Yshw6_E zf9Aj4zk|M5zP;^fZ@5E#WccL|+<$<+7~`>iMf)8WZ1N++Z+vu#_Woz-i{)2*`XN6u z{K6#n-=r^=AD_RdKQjDj^gs5TpMPTc=eDc+u>Q#KbD!}1)#!`m7ZX1+{1W|b>5Ju0 z#sk}M*RDS>{F%#m{(khu7>4yfIC=g=hTpTC`zO#B%WntUFfj5XxBXAK{|otK`To<> z4}Edn#EPza_c8 ziXXM{?bl!S^DMbWet~>>m;JtC<72;X8)x;KU)}5d#`E5o@4v=!?7cd)D;&Z=^5o)-TX+s(-TlNlE{a z(f?GAZ_yXakL%TMUyKZYCjGbQi@Wvb(qG~QKYzsC`jgi3^;?O)SpGIi|B+FDiT;N4 z#qy^oeq{Lb=xVI;duisqy;%@zE^k1Pb?$)18e}NbM{1wZO=Nsye zjQ)Gp@%opeFP2}lA=tq1Bf~G!Uz5JLTfatsQ~F~06@=RcMt%ig8yNZdFL?bs(HCQX zuibKX!v=;Q8GaxA{ppM4_qFX0{mAf3^iQBK?$)o;{{?+aMx#?_5P&BdS=-EUMoth`dXX=jP;4!UY{>|f9KGzkmVQfz&81j z;WyTE|1$bw`RYM_Wca;ba(^~`vHUANekeaO{3iW7>5Jvp6F+jh|MflpA@xs|-@*gi zaMy2tM2275!2K8Li!r{}ZdE+64R`P(!yo*L`>)d%V?6x%cEct=GW>ZPy1&TFe*DGq zYj_a+$nd9c?EbR!#qtY@9~u6fP269dzF2{2n|A{YP&5Te^Rke6svmi60q$@7C_0OkXU2w4X;#-Ol|B>5JvRn&d}Deqnp} zucj}SUratvjSRoKll!;Q7t5cz=i{-a`cl)sS+ZQ9lZyw_V2I-6C=ac-%?fmb%Kk;w={2L_8?@K-}iVQ#h zL-$vvFP2~P(7WI)Cz)`uaa-`*Hq@wf^yZP0wE(znS#^L0>F?cRa8Sckm;_ zAH2---=QzY_+Go!?1oK#txeAGtCzdK)Ze{+vHbY{4Ed4a7k=e_FMYB633y-|?%+p; zKjjMdH=-}bc+_9Q1KV&1KQjDTzjl8Y`eKZSzabvjhCBF?;kT}I{~-EejPEAD@EiBP zul~vMTX?@3SEMf<^6|hn+#$ai7=F*)?yo~%j9pm&S{M&MGW^;-?oXjF zmfx57k>NM*b$?g-V)-qr0UH?kEr4xc`1L=ze;9o+_M`p^9@vIE_>tlF-S7S>^u-tt zf2Q5A$&UTeb_xzdk z#qwjl@FT--(!ZL%SbjWT;75i(@Py~zM_(*I9&h-O;m@HzkG@#`#AN*dEJk{ zSbkh@1?y27R&ozQm6VzfAvY^u^u!HTwI~7kBG7>7PVj zEWer59~t%6p7j2Ip?tFZ-sJsvWcYKQbN@Q}V)=tfeq{Kw=eggcFP0y_U+T9nMutE5 zy!(&S7t60D`H|sQUvU3L`eOMrll~*a@BN4SAJ7-ee;=Po*ak-Zk>Tgxaeu`({QMWo zZ}JHvKQjEmg_dm3U!X6RKfk^A!~Dqb=Y7=usr1G2-%q}OiwwWDnET(QFP1+!`TQs{ z{7D~o|4jN~`D-NgM}}XZe<6Lb{JzAG48KhOdirAdt>pD5GW_B)UjJS6#q#kz9X8zc z+aHnPPy3|%FVPoce6QW&>oxqy@N>($-!tFOAF=!f9@vIE_>ti^mvjF!^u-vD{Hoos z$&U=b^eOi@pf8rcOLG2*x3Tf<&;R&&*v{l8xkT=n=%1e*L@toOLoSmmF#Dv4XGX zUbY|SidgFzU;l7CBjb3?p?^Gmv3y=mhK^_C_W3~n0{Ts|{Fs-OKbPZgqc0xKf6m6Q zY(HD5ZR*G4W%A`@_?_1GpEf?O@4t+*J}+&tzHR^I>nm1$(|C-)=r1zXXX>Z@`0Pes zEdSudj|{&-{{Z@8`Srw)48Odh=a=b=<olG?T^Uto1bz2 zNA$%Q-)py=-LT1z48KMHLi*xv{XzP-(-#l<$@MxH82LTDUjNhd#qxW?c=R8+?XT?q zzvPqU$9~~QhM(j3CEnuoI`JD8i4F6?Zvboq!*6o@s`SO!kM*BoH*8?|k>O8T#q0k% zeX;!b`T{>P{5t(Z>5Jv(@xV6R!H*2TxvJ-%s`|+okNOAghE0BC_>IrIe;$3Y{Icsq zeq{K4tGWMM`eONYJg^OSs6R6N*6Qxhp)baG)St5(Hu;g^57Pe&eX;yf+y2lOzhUF? zdG#PafBpb$*ULJuj|;u+d?R^ja)Z1g`7ZM6#(8GpZd2^H7lRgG&$cDMP2P?CKKTH0 z&*px;KEgQ8cS^$NCH&ij(SN7w`+YWk0EKd!Yn*j`FRtOoXZ3gd`6br+#^ZtG8yUx^ zM1MW{;%@yi{Q`Y)w|<5GR`kW)`c?Y7&=O9K ze;9qS{Icsqeq{Kw=pRd8EI+=UqW;M3`q%RMPg4D4`K2U3GW;^fpGjXVzm@or;TPBT z{PXCG<;V9Es6R6NIrJ~5FP0zsg&!GyU!Uh+OJ6KM_6t8U{3`uh=!@kSll~*aZ_)oF zeX;yn;zx#GS;y=DGkvlAQsPH$=hJ^$`DFQx#E%TW_65&>oxWIpU*boG-=zOOeX;yz z;zw@huj~0Aeb=vFHM0C%;zx#G=lG@Qi@Wt3^gm5sEI+;;_9G8FY{MP$%U|^R*Pt)P zeq1ZccEbjS9~pj&{`&OA@~5}$5B5DNQ{pXYS*OB4(Y~=pW=!@kSZ3s3n@*~49(!ZX*SbhnzZSqTP za)w{0e;<8uw|;J8um1)5V)=!Thx#M8>!<%8)lcr$uX6k+-}CE-SbkhD({bu!FroRt;vHZpaUqCSY$f&<>isv7%e6swU>qEysGW-I^pG9BX ztzV>Hr!Vf-FVVl7zF2-EsXsF6&u{Mizd~Oue>xu6hC8fZWcahb=Ke?C_w#3nZ9Ja8 z@7oQV{K)WYUw3~6`eKZSKLFV_`H|sI*uwoU(ih9$(DfldGW-JlZRv~UmlHoS{5t)8 z=!@mo6F)Ni(w1KT59o{KHxfTG{QOq#pGRNZt>2=5GkvlA2}yorlV8TEDg z{`qPfujd+;;09xS|D66lT9N%P`5#|@vHGtj>lYd8*P{P9`eOO<{UVNkWcby7KYm}N zFYeYa6y4v7zF2-CdH)d^`IQ~q|0aF0{Maw*j|@Mzqx%QY7kBH|=zovCSpH~#f26sy z=l_hpSbifpev#Yjzl-}9(yx-`7xBP0+@b%-@LRjOe-nK%#`oH-ZZ~Z5Be(N+bN?>o zljRqZpC?6z-`vCfC)7V#{%nsQ%8v|x`nTNw7k#n(a}z%@{QREoPyE22KVtdQlJhSz z{F!^XzczibeEs_e;C}lfGW_Dc?r%(AET13$5A`1zetDYvJJ1))pX9rT{K)X9pX37u z=!@lVhzGXe4)sTd-N&8}5)F8TqZB`ubl&UySj+b}J{>@5u0rXSx4>^u_Y)$=^SW48N!5 z{-5cKL+s=>9A8#TbwJXW9*${K)X9U*!J8kNEj7mfseGFJ8sQ$%+P|LB5#{KU$yChxZ*qyFj@?&s)>bEaOhTn6g`&+7ha<_hs0^|t+?9~t?@-+2D->5Ju;6F)Ni2K^R&vHT_;*oHgQ9~pkN z?)mT17h^p7FXDl1xPu=Ve(M_dm$LsD8P`8C#>0>G!WVzu#^ZXQztXSgn;PeByf*mp z`LV0*AJ9&_e=y;vjWfTWzt-#9a3Qa+NLGDwIVTwPMedZpgUuJ~+08gBzwbKFzn1x8 z<#&2N(kcIrB>$cf@(axW%tyU{vGQw4|B>6rm;QS6TjXy2%53j{Yx-jOI7hI7vHp>f zKa2kE^u_X*#{=7N2R}0W#`T_m0(~*YBdcgPZ1N++&;8c@i|C8xm+&C?k>QtbaQ_zi zV);{CAIgv1>G(fl^9|6&UCTJ@{A)0OgN6P0itk4=pR8}+^t`se+GSVw|x9C4gU)-(V z)9~Z>Dt&RcexCkE7xD8)+^t`r{~7w?Zv7Jd_2`Sc^(*v?^u^u!HTnn97kBG7=$}Ym z+^yfDe=dEo{P=vs^EYy*>&J~YU%YM)?{a@t z`eONOB+viI@LTjZrZ1M?m-vz4=kE6WUFnOv^$YZmqA%{&FVp`KeQ~#bjsB(d#ohW% z`u|5?+^yepkFVcj^u^u!ee~a;FYeYa(O+^gKmWwt`c?WX(HD2?H|Q7Wi@Ws)>F-Wo z+^wI#*Vq3@`r>Z=BK;rJ7t4>&H(Wo&SK4^IKVO6I&#x!(S#jK3l?^8v_^`P*eujiKZ#qxWT>q%tz1N8T#FP5+M z1IP6Q!>`i+E`725c>RJO8GeKQS@gy7W4-Vr!=Fe0O8R2?qt%~#$k*>q`eOO9-mLly z^q-?ImLKcQs(*m~JM_i!N2|X|f6_!hf5h@ftG_|NkG@#`X!Xyd|26tz`J>gJo9pYp z7k#n((dsYIKc2o={%G|N(4R?PEPu56tMu#i#qw88o`2%|Y&@<}Vzk6+b zT)(G`v##fbC%yhnmhklxV}6{AlkJ90^+)cMznje$^Y=2&%C9j0M&^r^pY!;k{K#1U zD*b!ui{;1n<5>U5o$CKnu+|81Ku zuK$t7S?fQ@{68^Yto&k<9~t!*p7QhOY5L-B{VM&}>5JvhPx2#o>i?s;;r%aToYjBz zuU`LmY=5tXij_Y#$&cJAznbKqJwpB<^IvDaSov|iasEbbum96t|H4c0`AhEBuX6nI z^u_W^N&k_NKS+Na`r>Z=V$19Q8hx?+!6ZL&r}f|6=8Nb5fyPR)hauYb|x-*0O%e{b85^H;3=R`U868TB`w_x=y3 zFP0yFK92e$!|!|9{U6X5%OAu8+i(XzGW`52?*Ej&7~}E!Gs$k)o<%3LX*6HvHX1ENA6Vr zGB#hVe-&d~Kho!4lliAIU#$H41TO%L;~yFIS6=h}&!#Vy-#+ool^?n7zwZ8x^2zd> zco6)^@Qd@^zlXk9emvh%e`NS2`p?i8ck7qwze8UvzlH~){>aGB{mbiL`V)Tq#q#@H zA6mc2@C)?&=!@ld`hKd@`LmtP7tf!4jI++4GV||bzF7H5Ju`oA{C857Pf4eX;y%;zx#GdB^j&p)Z!7OZ>?2 ztMvD$FP1+m@gu{Z{I2JpKwm6>M&d_?-=P0f`eOOD#E%TW^q%KmLSHODu2;W(F*5u? z`nS**%b%9yM}|M}zUMzoUo5|b2e#o3`H|tz`M3N3pfASwUc235H*E4F!>|0u{Utu> z=dW0P-Sr_qGW@;|++UNvSbi<>Bg1df--^Ciej)KA!_V=GdJuiF{CK|g+ZQ9lpH2TX z`eOO<`365S{NhJ)?FuiVFP2|U>W>V+LH}m@V)@m?kKE2*AlDx8nDWW;I8}8so zhF_lO{*Cm-7?1kn_$kU6L@@M7Tf1bWr{x*pp8UEnX?tegEEWa=D zBg4;s!u{nw<>!xBe*C-)^+#UI=B_W}pQnDFzF59pBHA0c-^NE?%f7eHf71O8l~0zB zV}Pyg+j#LdHXfhi z+uy!EkyZbo)rhTKzx@#z>)*G6um3{-Yv3A4&3G9U(vWXJDBa|3;`{27TK8Gd19 z_wS)EmOt9{J-4d+Pth04?@2x%jEwx;>h2H9C(ExT^+$$ZU(@{sKkdg;EWd>Zw&AYd z{)h~}y0-hv&=+HTuiXk0><`=IM~2^A*ZtM#i{+OSKQjEn`tEN`Uo5|p^e^7Q#<%}{ zQU3kt-N<$F-sBrc`1{l8?^}P{#>eyZd&XJkYhwei_etB2>$zC<&i3URTCd3HciLCn ze}%qS{_@FsMQ$IjP27K+ zKA-AI{!Js~7q|8Nr9Z>-k*xg3lH(D%Q~u{|zS#eljkEgi>-YS#nJ-p;KFN=a^QlPx zeAQ3x)-QAX#q`D9`c?Xu(-(K^*XdtHU)-(Vq<<}aaku^;{Tt|u&c8p~eDVBy+&Js}YcYSF&-(chTkKFFR=>2a=zd-KR zuW|fd^u_YydiUEGBO`y1{!#SB^0!IqkKAefPP6&q`u)T>YyAq_d;c#pU#$F6^8Hn0 z)L+}h{dee#<;Q+A`=`I;=lu8$lI6$a1wS(Ki@SRMD)hzO_?^~o6Pqut-`9<^*00X| z3z#of{-osiMMnK|=wCx$EPq1cNA6VroTUB-MyS8Z{MCB-`bk!PImwTV`U|`H`WNVn zP=;zx!*o#R)c zFP0y#SN-s%W zfBFva{YVm8SrGwmGi@q4+v7S@yhE0CtPUpinY`)n49>!VyH<*7t^To<< zdi+p+WYk|h*z3QCzF5AFzv_?Nss6tt^}o*g7fXI$vCjNmKkw@=R(@Yne`M6(qW^9B zV)>QCkK8_fhj{(w#M-MWsI}(OGoC~35VJKUJDf~e?pQU8S6Jd|3vy?`Sz4+Z(y9ik>Qu= z|BSv^e$0a(xl{j_*n9)b#@%m>v-+ z`19!>L0>FCKHrcZ8Gg@qa_xkZ)jwJOvPpjAPV4_mn=hU}7aC`sKY8Z8%Y3o&Y_wXab zZ_>YszF2-fIewAh57M7YUo5|r_>tikkM{HDRr=y?{W|>#_KQkbe{r{d?ikO{(-(K+ zcRGJIwE5!sJH%XUQ*81m<^%cF5`C{c)ll;i2zfS*d`eONg zi66P$KmBLvx5)D2^S$4`7#aSg@8;SG@6i{_kJoGXk>Tg*=f2>_Pb@!Pui;1TwEipE zd~yBPFwR>40`pH|zF7In03$y#>Yw#J@BgRt#qtlv1KV&1KQjE>@$O$pUySj+cJsX- z`jO%1>EB3S+^ye7ze!)*tzV%3B7JeUev$qM^u_YA2H3z@zsTr+8vUi$_47w8e~q^N zp&uE3js8mX#qwibR{k9NU#2gXAM@ZxM*blEZRv~U_u+wUxPu=Ve*OeMBle{)#(2~o z$7lJ)iu=dY7t7Dv9N56{^8nkx$S>3X34Jm4BR{_0!;h@zk`3!ge_!Jgn{NO+xc$aB z>-k${{=$=a{U&$zW`C{calKLZ~{_+oe{okiAmVag9 zNA6VrGG82C{|d%g_2*CW{Asqo*Fwe0&nNkjQGbE{Y4pYN7fJlco$9|JssFMO>Mt_? z1LljBAJ11D|H!DnLjRNNdH>>W{W|@%>5JvZ^+JARK=f7C_^&~$s`mfV}pT4+Tzw|@TUw(bh7t1f;fo-_M@sEuB+9~dDL|=^Y zy>`pn4I3DK9$*_7euMrV^u^fUjo+ewJbkhJK7`o@Mt)@E=T7zd&!;bzpNDLl{Ct}} ztiO-`ZS=+Rs6tDEq!sfewF?peQ~#bjsE90@cPBw`gQs{(ieB@H|YO> zzPMYzN&g!9;%@yG{T6+3xBejgiC^*c6L;(PoaSf5m*|VT^>g(1rZ4W+&(p8c7kBIT z(Z89#xLdzK|9Sf2Zv7(tB{%f-7kBHI=x<10+^t`xzaM>Zw|<5GkLioM^{e!6q%ZE) zuhD;jzPMYzPJgM5eEr4U`VIPDp)c;%Z_?kFzPMYzMgJ`N;%@yx`g7=uyY+ic_vim> z^u^u!Ir^X8*wxpfB#$FVSCmQ=Wh1Zv6_!Z$)3+tzV;m zJbiJueuMs1^u^u!E&5N=7kBIT%<$v4_-4L-V)?#z`Ww&}%kN9hpUCawPyayr z1+x6n@{1h*3;JUD_?`_L80TMPOY9|Bb#_{%G}A=`ZnBUw?78 zevSSY=!@k~YIpx(|B+GuZ2CLX7t4>Iub}_P@SF6%M_(*I=E08)e;)k{=!@mY^@blA z{viGP>5JvZysZ2QXZR6%o4#0n>=%Ay7zjngACVXnbvlD(Q;l;n6%%5;E;WHEd zQ^HGZF?>F(f2YqkiZ(uezA<2&^?6#6$M=u6AD?fCb$my=K9uM$w59vvZv8U-&FG8e z=W+7b2FCfF2iOM2`CX-dD19;Z<64@x8#Zv3U#EXIeQ~#bll~m~;%@yx`ftz|ckAcQ z^dq$5R=)n?Zv8&`+t3%wpWI%(57#d;)_;KhvGm3A^NAlB{&f21(-+I%F!3Y9Z_vMk zzF7W*#E%Sr4*kE;7mwyIwe|4lvfXt$Mr(2^@^{5{q{w1!^Z2_f0+4V<;VAz zSpUeK`hPvi|IY~dMdt6aqxb*+*n9IhIg0xKe@Ma|V1Oux0t0f%IXQp`0;`1EzyJZc zXD7R}n@M(eCOfk^IAp;f$T>i`5f=z@2(sKLAS*-#1RX+@OJ%`;AcxDL9HM;R?|Q%9 zJzdq+lO_9m{Qmmv<59c&tarVSs;;iC?yl}uR{txy{VQZIT>oBTU;Ufm|3&y$9;yF- zJN`#c6aQR`17~{VRf3Ly6vic8?H(zA@8GWSf ze_dj3f4Ba9@ZU*tuD|j~{h#ajzbeLmKm30L|H>ov|K+3X`fow(tN#G}pF(o3zw$`^ zU*Y&)6yv`J|Nn!3<&pZ|_`7!fw;}e`e-Qr9AUW4xd8Gb-?D+p_jQ=6{U-@Y9uRK!! zo0Glp{|7#v2^;cH^D?0veOZLL`pG54de+vFD zfPZE6zoO&Mt&acuV*ID!KY6UwUwNedcOZM=`hSDiSN|sXUkLxoBlUl$41F3IEC?^?$G9|Irx#CHUXuc&Wd#`d`uhzdP9r*Z%-w zU;TUGe-ZpEtN#`4|BpKUYcc+-@V~FlD%;K+lhVkAAtWy;a_>A{$Fi=!W|0gm2hv0u&v(#UCr2c1R_cBlgulagiMVUxI&S z^}nM1?-R%WDmlOZB>d+{&hsy2^}nLq|9fOFT>o=p{HNgmb@*2vssGW_ZU5^M``SMZ z|D7b~`YVsr|GAF;D`WgO!T-PDUwNed*Ke`wzd142f2?!e5Zz8d;{w4Uo0RELn>i?&X{|93H_rm}Bd8xnhNd2eDUby~y z5&P<2h5xJIUwNedA8`EtCdPjs{BO}F^;aIL|0c2*uKyv#zWVpW|E=(^JW~IUJN}=G z@jn3nliH>J$|LoE1lbGMznR!q{~G*12>;3>_5Zx%|FszZgYe%pL+YR_! z5&P;t1pmKVv>^3Y9;yFMvKOxZ$;7_;C*l7&_*WjO|9?6D z$ISHmPr-kV59;yFcvKOxZc`^Re@c%meE05Iws9CoEwTXT0-vs|NNY3?F9;yEY zj{l2d{Ab|*ZTMFnssGhG?D~I+*jN8%_%D*2>#sag|K~dXFOTtGg8%p6UwNed*Xp$E zzcI0|{=M+uLvpUavie`q`Txa^|7&CXSKe?R>H5B`-$>i;W6yZ+k{`|3Xc|EH6j>#sag|37m4-xA}$2LGc=;$L~B z{x>6g;qi9|Vqg6S;r}f7S62Tk+W!|h{`+J655fN$v!(vZ>VHN1|JGzLT>lBgT>rJ4 z^RL7ua{NCJ{*_1S|7OSky)pii@Sp6K`YVsr|BhrYT>ssPef3Yl|E2J+JW~I6IsPAs z@t=nOjmuJh<&pZ|jqHW%zb~<`{!Q?I9sDbg)c+%n|EFU7XW)OEiqv0ur2eOny>R^x zBlgw58UAmBf8~+-f5P$qT#Ww`{7>wW`YVsr|6yb=T>le^ef95!|DVIZ@<{#v$?^ZU z82?rH-)oN4UwNedk0X2G`WJ|O_3wlKU&FuhNd5oK@&8_o|9<$NI#=qito~Q@_%oC2 zh3h|$*jN7n_+JYD$|Lpvp5s3;&+oqm|0j@~=O4=Ie_O0)W9IcAVQ%G2{J87iP9}Td z`kx)+Ke14be}992W%a+J?Vp%$`(KOL*ZxWPpFwi2zw$`^pXvC&B*uRV{{IR8%Ibeb z=O1gGWY_;I#J>8c;eRg4x&F%Pe?{B>Qpf)dG5(w2|8w|P9;yFLPPXg61F^6E8TkJp z$+`Z@>VHMs|7OSky)piq;Xiqb_*Yi{E4uz`N3s{*|8^(#)xQ`1FNJ^Qk@~;O@jnpb zzYqSudaBf4S^aN`@r9YkpD;HXe|9H(;rj1S?5lqb{;!9BW%b|ad6`E3A94IY72|&h z{i;>%|KDQ#_rd?3r(^&B|ET`QkiBsI+laaT?)zQ_;O|%PudMZ- z?6iNF8@2ym9sh5~_)lCW``-a)Nd1-7|BCK^ZDcQ8{|d3M{z>?+!N2lI{lDw@|8I={ z6#O6ieeD0>k@}xc_QLf)lh{}PH2l2`|H|rrMaQ52IQ~~%;P;<_{~XD;B&W(F^?xea z3)lag82=^se+~YX)qi-r;p0!38}%EQ&KLH|PND-TzH5c;=6 zUwOFtL(qQ;`pT-GHe0X0{ld)cm%2h)^lj)XtNtWMKg_H@4f|NZd4)j1fy!RkNUU)g_{ zx%~!EeqZP-tA5I9zc91@AoTOlS62P7KcD|0=${3B<>Bflt`rZqKwnw)!|}5JFmwDV z=sy8{Wz|nR^$#=aXQ2Nk^p%IJ-wXZK&z0?`toq@2eew4}|Lf3KR{e0itRH5MzXtsS zp|3n#{lryC(?SL4E35wU;!i>UT<9ySzF4o1A7SSB)6l;S`pT*wuCK5D&Cq`u`pU!A zuR{M_=qs!K^6K9Y{dLa6{tF(iehuZ*&{rO={t)yJgTC@`^^;f27AZqtdARy%=wAwb z<>BgQpnpH~m4~Zeg8p;RS62Py^`AcIe*}Hy;p*3*zrp#k{gqXJdHrVy`ny42S@o9} zf9e|9e&2<@@^JMt(4Px^Wz}C^{3YmL4t?d}>Q|xv0Q8lItKSd(7oe{^T>TpKKZU-s z>Pzd!k6(k(|H=ij{gsESKLq_fp|7m^%WMDSk0inqp|3n#{WSEy2Yuz?>Sv(e2Yuz? z>X)ED0DWcEUtat7LjM)$D-TD%(fn=Hh4%b$J!0?t5&lag=kxC>SpAP3l`ucdeEd>A zi^}u&Xq%>d$;dBcrjwnfnD1X+PV(@4_QfUk`$AT&N_^`$>x24!EG>8j$vHk{jc<{J z7o4AknUBv?`b7U6=qs!K*^Yjg!|m(Fe-+sa*XR0}_-pXL(M1?v!RkNl_zyG3pSVuq z-v;{1!_`kie=q1O4_Ch#`iDVZdARyj=${CEWz|nP^$#=GzaRQt&{tOd@b`eZe}>t# z-wo1!XG1>$R{cK5f0$Xn59Ke1zOw2scJ#x{`mKw^|4*T>tolzm`eA1M^o^qb3+OAW ze$~+rGwTmP|8eLmtA4YiA7<81{Y3o#8T!hq-{k0rnf05Y{}1RZtA57O4>RlcLx0r8 zvi~Tn{ymO{pX>t zton)3wtkq6{sW@FO!dL4A0BVnf0$W+=;xyUA@r41zqG3DKg_J({43EPbBXN#%BtV& z#2;qXA6P89~b@O)IV7D$2r?C z%&fokDbX)MUs?70RyOVf|6ykR$`a8(6Z*=k|IIJk`eA1M{x?MbGUzL-ev1F0AO3R) z{R%Vd*WMKUpFm%k%Wp~P=GEyJ{iuGJ8_mBSBzvu*YJUHQnCG|d{IT}~@!zyi`j4{u zr)it{QU77)_>=z@{Uf2T9Oxs4KtIH+-v|A6=qsy!Z>T)CUzk}x^^y2L3Hr*a-{k0r znf05Ye=YQtRe!9bALd5w|2WwTw||Y;*Zw8=-~Uo+e`WQbcKnCgv>)`3f_@)Z^;3?1 zm|1@S}v(;p{Ii3_2xtolj*hkp3aA@nQEtl#vB=wAzc zWiHSAPmL1)sD7APKl!QX{}TGjsy~PguKHnS{h?LldqYn^Us?4B_@9vfFtdLDYNG!< z^p#b=$=Uyu-=*?fuR+)M*Ov23F5jp>j=s$9k82S7`s2_V5>GG5wq`9lLSI?+!|i{6b3Sp3|G^L258LCz&^na4=&x&;66=7g zFAHx1ZhA#{M{x30Vct%xUt1=8D7g1^;p4&0Zwq&-{y&9JWv1(?$#2W??o!o1J4yc~ z63>E5;C0E@I;1*wKPi6%cs1~)%w(^5ACW%`-WI$rRhZ+|?^Emw*6&3e3)b&X(DG$d zUcZNMGg!Z`u!Nb;<+i+Aw(kd!XYf6U&FMUi{UyN}W-6aT`3m@};OoH~f`11d2mT10 z1aCb??2QFyz^j3~!DGNzffL}z!JpxKitm9x25&;=Ssd?&;O~Hkz;nQFgRfzx`s(*9 zegoF;VGM!w`x=|ABITRVK2yO7@F~pX{|(sx3FP{Hqd$T5dqXQzS@y5rGnxdh;`>7F z;AZgU;9l@=z$Ng%z!~ti^k4|v*YArR1x`TzeXxE%tRJl3GkXQxhwqJzrSm|xHvrxh z+z)OBSHTy6o52r&d%>@OOW-wVF3I*Y;N8F}@N{q!_&jhLd>=RoUIy;N_kPCFzGm73 zyboBvAJh%*h5SY>kNx=>a02!}1MBw}x1o7D+pnSgQQ$uC55W4p$$P>5kS_yQ!5h%n z!S+(vUI&1i!1KUq@Xg>P_|K}3`h00^vDXZq3T^_Q39do^SIpF>mZ3kt1|9^Dp#hlV z)9?R$9bDD@1KbOq4c6~*{0yAI_a@$9rkK+hA9tc*l{pEX%}i}x!}!RT9ht8Hzr#%N z9)R(Jr`^m`z>}FNUj3fK3E(u|FMkeLzqi1jb!Gh~=>HM?P4Fk+$>2@b6Z@^Ow+}c8 zE`jxX5SN2XsQ+W&e(<~CDtM#y#eNNZ0C)&oVWxN|A)c$i+kzibeZ=z#Gqvv|wD0cY zh^akS!~Md!%w+FuwFh1Rei+;f{ww$taDonNtUnLD4Y&g4+mI|Tfjht*;B&w;z_){2 z!B2sk!9(EV!5h(aGv40myQyQ*&vxL7^w_p1I%qJJfL zXpZomDxWL-1h^mk8aO#m!KI?`{otmxCEnkI`_>R%rsc;9e+tgrCGDBqQ0kXVi@mMD znPcfdNI!ciA0d1cxOS9q7r6O&;j_TWBZYqqt{yFXKX~9p>eTe}9Jntd@x28eJXCn} zMiSo;$`R z=vcA;y6U6;pMjeoU+*gt|CWzP{k8%34G8ZE9snN=9t2mwP0+sp+zh@2+zb9CxDWh1 zcmVtXco4kWS0%m#{A~eFX?)-&@ZsQQa0j>-d=_|d4^3?7=SScg_yO=BcqzElCgtA* zSHPp`gvK1Nz?*`{bV>P%;3Rk|I0c@e`X|Wtnh&mlF8~jLZvsy#NcjiA=^4ULgYS7< zw%^}W|4HFb!KvQ~e|b}hKLef!9)$iO;N%lhekQmH+zT#&uLk#l?+4ewPlFSP=M8Wg zJZ3YAry0C4xC-6{JODmS`Cw`98Q>at0eA>}9XPYMl)oR`3|<261-}bku&h?9I+a8JA(exl2M>VX1up}yzPZGgd{+Ez1#SZG z0d55!1+Id-!2{s4!OOrmfRl*#5pW_a?eiy_wMz)j$0@bTc* zqosT|xB@;Oybyc~c<4o`|6=d};{7W)`Mk(K1~=jO{^hSpe1lUH=4T?f+9CVb6!5?- z;bXv4io&OW6D8pvgKOZsz(e3)gLS?BYhZmIVC5|(o_?(F-5Q+4deH;Gx?X=KSl8d5 z2iEnBw`+N5n*^>wex%9~&s?ys-@5{=>+yaL*7dC~fOWm< zC*T_FZM3!cuOi;v!AZn(EI4(gwD(D1UB7=7I05-Dlo9`*!Mfi5GqA3o|LQj4zZd%7 z1ZUv?c(AV5JQZ9*{67Ngdg6ywAM(FwdDQQJ;56dfY+LcK>)D&Yy1qUK&OrZka6kBZ z@Q24s``oSNaXfh(+_WQ|IMUDa;A!B0f)|2U`MSij6ub#|+)koD30&P?cwcbS4#FpZ zd%->60r2_Y1nl1kE`3Sniw}cqV}zH2Q)>yorTi;8QKz3#+ey5GuL^Gru0A0=9-LSr zd;qxgqHq?R{7iVR>c1s?0l4;o@QuowN<0sNo3;^t7Cdk(;#K*@!lSlFf4f$ABk(<}Si_rXJ_3SSK_L4F4~b(+Y3 zt2{yK^E@~W9s)OmSKCSAt%6hF0q|sS^Axi=6NiJReOtH;PV$8t`Z*K4=pNY~H-iW6 z75<%;zfbrzaOr;G&s9EC>}|cX#Mg}D zxcS6YjQ#t-{pSfk2_7g2{{>v?5gr03PZu6N9`Rox{AFCs>E_2OxhR+=uaKl}S>c6nIncV(2t4>p z;k&@q9b|j{7My%j_%F&ch2IA!t`}Z)7m0u9N#U=8lRK|&{7+E%7Q*|36JHlT9z1Y@ z#M2G#U0e7paC$f4Yrv(ejrqjA;Gs{1{{XJtCHw|B^Q7=fyQ06pBD^tpX#bT=#w2k6 zdcsq+{Px0a;DI@7n)0WCGv5=w0-Qci?Dwnwo5D|M`5S~^QTZ=~KT-J-;dOVDcuSMS z|90T?DB*8|GhY!t7CdmY#M7;Oe8oZ>ju6;n9;N zzVu$=|Eu8A8p4yn)vpPE2RzU&@wBRbQTP-sKU2zIs=U%D^r!Gr%6o(E1GF7~V7G zjqnTL-baPs1E)_A|7+|n@eQplyahPPH@4|#cW~)0iRW-|Wo79-PjNHTCIH z{TAU1z(agvkbZum`o{?WQsu`9{|P*BqVT)mT95FUZ%TZ%lY}=0C(jX1gZnNQ-VZ!L z&!O|rap3gNg=d4CE))I%c<4IetHIUVh5Ny&zY6~bocXozOW^)zgol)Iy?E4~68~cG zm%yowWPY?0c-(5j`+-}N!Y6{87fb!-fG52!d?C2@l<-a9rauZl0`C8d@N?ki&xMCn z|6SqL_L6v$9|@bNzAL=m-V$%-hr-)~t0m!mzyqfU9|vxIShx%x`knB(;NCw8-vHk71>pz5 ziRm&Q`h%9Q3NHgM=oJ1KoSrW{wh8@tfp7|(yheBuxbmd%f#B**!Y6>!cMH!3*M2E{ zCb;=i>5tcfOWz%x;GYMS*A;#ST-{dqAK?DIg-3r2{ozL8O_WQ*yMmj3D10cm`34y; zW`JwA3ZJg>wU^)<4JIOcPaljmH$Qfzu?;0 z!e5>u@ecM2?+i}vA>+dV;M9S_IhEfr+Qc(o<@=8@d@*=vsmN~yC;lV+C^)&flz$Oi z`=#&);QlV*HTRYH1}+o+8n|za#Ipyux}NY6$`6XaS>V(((O;nQS44gdxNlXF-vjR7 zQTQov^7q28fomrV{}()XgYcL3lX!N5qL`s&JN`yQ+1KM{Em zJUB+=3&6ce;VV`DTf%pMo3<5R4DO#K`~o<^3(e?f2;4emRl}B2{VYZr+94)DUag?|7Zx>5MY;L^jw52$>p@H611$%qe}Sy_0@K@wkS zQ{hd()#D_dUBSr~;X}3jWnzB@c+rQ#XMp=}7rq*t{IUVtVQ|k+lnkw-%?JoQkaN>T6XA-z?vG7#zr0HV6P0N2G+zVcOmhhF}X}1gC z1zxs9_;=u?CGr0ncxW@>|AEKtFTBAa67Q5H5>Fbu>}BBt!A;Z0n)Yo0_qPh43Qim? zd>MG!vBGzNlP3s24(`ngzXGn!75*4Jc#7~khe~`4E)d=toVrGMPjD-}SA&0!Q2E2c zGr?o767B`}-za<;ICH1)ZQw<(3O@#JenR*KaP5!6{{*N0E4+F};>{c@$G0uP>2bn) zseF6kqrs(b3wMLZ+#-BFxVbER6S#Vw@UOsAej?lJC2;@k!v6v%-j(>)K1|}9c97_A z3$Be3{uX%XE5gTulM{u@;DJkp&j1Jqd;cZzt#LT| z;;S4i`dfpOFNl0^aO!j6qrvH|r2K3xzgYNOm6wGVft#)tei%Gyb&2Os z;K45o{}a6E5%IU$k%;eT(cc2x|C-4601vDw@@e4Oc;Qa)xMzjW0yp&v_klAv3;!Iv zU`vT-33$s1!f%25XA7@cO~8rKNI;D;J(#FJ{jCJPWTXT|GvU` zaPtnrCxdH~g)dV5SJpB9Z&Eq<5peZ!DgPWe(J%b2mVZ!q<)bCu!!o&xs{ihLqC^|tVS;DL{Xj{)~>E&83{YE&j=a({GCZ9&qX{ z;d8)~ztp`RncgRcp9fSaEeJ_B6)tMC=z6w3b$-1nTw9|I>*{&{dO%D)Rv!2akH zB>oc0ZwSu(UF_`)PN2T~f|IE4ap3CfQhqkL^s?{|z(bH<10I0-*Q9R$g7?e*-616aE-n1Fw~p_!lKbz72S2J>hSG$DJg66u5uBa1oq>d;xe6 z@*jcIn@IV7@X!L;KEF}M`s0_trBg*dr1Cq3S8bMfhprdi1YEjRcp|t7{tgB=gIl%y z)l&Xs<@1Cu0uNmxd=r?joAb{=v-q!p{|s*3o27}wr_Ca7&Pjd8=7jr>6W$b@22TPH zZe!FFQ^B>b3wMC47{AT|CpQ&)H);8l@UOwmkpCULfX>VL=RYdnLU_ID65rr~!drpw zf&4&lVsnuk`52!8|~_l)p5EfU{? z7lgL~fB1^<-r!}U#s1OY!S_X80;m2Xd^UK>)>6Jt^>-3}0KD*C;b*|DR|~%hUUZZ2 zO05!K|778fz?p*Z1n|HV;e)`njPP{u;PJvIfm0U>Ujpv!5&kK-|8(KUz>6LeehJ*P zMEC>nvgd@?%uBor@jS}cz?og7zwfF%S@2(C z!nY}R3lD&6rwT6xmwqVx2DqM+rt3NCFd+zzfz6h0l?e5#xeUky&4DSR(@=wh+|dvM}C;WxnPTZLCD zNId<&65be`enEI5c*>rL4_sYcxJ~u95Izmul$Cg{055G5zEkzD68lerlb;AL0}tIP z{9kbC3E?l#MEea2?+hM;^)Ls5Yy6--{Y=;L+X!QX)DS2 zdp9_FoY;R#%fBc5I=I>^{5d%F6X6Xzu)TjLJRV$|f%w3^8w$6A`}P(-6`Xu{O%vZ` z;MOOF?*M1o#s1^qY5x;`6};>`;s1aucL|T{l=#LxFT5kTZ?5>?AKbK+a5K31JHqq9 zndc>*i@^(C6aFc90P8n?4Q|5yrI*1atl#(;T*LZ}b-N_KKCIu^4xG%5HSMvF%CUaq zSn$AUBA*RTEfhW*JanD#kHHCguNwc{3m*5V@KfOaYlUA0XKoe#7(8^Z@Y+R*H}!__ zR^Za_h4%n2dr|l>@RXHC8~?Mw{U-|lK=n5i?gLkM7XF2n-%ofcIQ=u>e}b3J6<)oB z?T!1{Ujq-`BHL?EaPOVM-vuXdzq$nOJ4EcA10GsU_y%wi_p2WQr*{+iA65S<;eV=r zoA7G0(SE(cn}eIrlX!LqFTOQSQq1LdUfkHP)h2tNR>?IrvSIK4>t zE#;!{m~M%$`E21$z{#s6o?XF}>xB;m_dg@$3o4&A#%%8e;Kd(_{95qRO+@|xc-cO} zOH}_K!ta24FA-j~Eb&zz6y6*>xUcm0J-~~O5Iz!|c|iPkY59!kpRMx0iu^io^B9r; z0$kco_*ro6N#S?E)p^3JR3yH>KH<&4nRO(d$>2p_6+R3+uvq-h1TQ{8^cR4a{zK$H z0xwI7{66r9y9)n79TM;xF74wP6rRp z5cyT$DY$=rH#mvwrB8u-aew|b@Br?ge+C}>q1ao0j>Ox4hwygbg%1mV3q15Msn2)8 zW42n^^uI1}-%7#@z)fojUj-iAP`DqQK3Mp-;L3R6zkrwRDf|IAabeQN1J{OmG5x12_r(6?h2xFN15~&%lG=jZQ}UTrBn{gA?GR zz)5fg+<&>0UkKLkmE8^QgM0~CzlT1g`j?3QTBnGA{a*Wa;6eD?U*+Hya1DGKxF38C zxC(v%+y`C?F2Vo%;0$=3Q?Y%(J1Ac&@g4|Hf%D)d@abUv{^_;gUdVp|?gRe`T)jf< ze+W)ih1dO_#508cydyXP`GMd;$lJgJ;4{EA@U>w5zStvR{odKj;1uHd9IW3{+w?SS zFKpj$f@|Po!F`BtKDZZrIk*bG7hD1_0cXJPf}6o>^osu`@HXHScpq>YoCTM#f1ILn z@D<<;_#SXS_!)2&{0_Jey!z=9PYLz^I=BgZ09d~#*bdh3GyVWP0DFtTHSnY06zshM zPJllLC&8PXA@S(-Yxl1^0mUd)1eKYmoPYhrmy(KK7@7fSbXqd>`!r-U6Hf z?+s3aj{`S>=PG0WyA+%P-=+HCKY$Y$&)xwKYJXWE@eFBy0oTC$g8RYK!2{saz)ASK z20Vm#9{?vHUkdI6zYp#OuXCovQw8q;?tOoq1pn*>&U`3*B)A#e32yqg$j<{O!MB1- zDE}CE-0xR5_Fn?`-y{4!xOYH!^&g=B93`9rPdQU~GPr5F@S)(rPT@9i?|k7?!OLzI zzEsOsg>TdH{GvPhc?{fy=hK#g$7EJ9@;AWEy9xgXoZ3ft?X%FnFNnR(!O7J`z6&_@ zn#d0T5Acih=;t_a^Jl_c;J)oe{|s>d`ofojGyLK``negL`fx471K{d=!b`RMN5cOA zC;lV+KXCIlYa9J>XG?urUliURycoPcxW7;2$AZVLFa4tvT-iYQOz_|*;_oVO=_S$s zIk*Y@EVvK+9=HboV}2;{Ed_4^Zi4^u;Mzvw{~&M%&(oa%PJ%1o3ivE=8hkCdX{u?S z#C>31$HqTTgR7s2zt_OYgd9&tpCj@1LcS@u_PLbb8{GPt@X_GKqeOo$xcNUKzW_Xd z_igv(d{>R`9>~CgF6X-6Wq7E@F80M8R@UhD*s%# z3?BHi@R?fv4dKhdi4TQu0av#W`wxLLYYP7!JoIJZLGXgprG5SdPJ!1xPwG(PgR`yrnJE}bFv&j43Z{t9r@WRc&dytVKn;Qk$jm#F;v!f$|6;QxXLo|FAy zgYzZ+((}T*fd`%uJ`$XGR(KwGA^19Q^AeH&8k}A#{0g|XiS(z$1>$chcq?!f<@X0y z5Kjwu@$c6*{r@y@<`2Txf%|I04};T)|7GwX%6|+Vfd6q9N<53fyMmiw??`ZRlGtwt z_u~2BQ^Eac-;2N*@GamP_+ixt|55c9$oBd>cnI=DRpQG)z7aURkCfj5JhZRy6mT=1 zw>|+}-CN`(aOT^>XM+d8*Qp%uU%v~i@6Ucr<#<2wOJMz8#SmEE_nx>&;@9tqCBgbW z@-4yozWd$5`o8aj!1_IjrUnM*ERj)dtiNE`DI{zfAq~@{l3J5;M7M+qyI9v z8NB+%62HFxeG9O@U;Z0leZTt=;HE>x-f>#~5aCvE74lAS8uD&%20RyB0-pkI`kC~{ zbHG)|F9i347lLcxD~VT1Bu1Oc(y!Yce#qe^4!`E`$HJr3-l$7#`$>m46%NX8YZ!~w z$;e)ge1AtjiR!(SB+{B7;X$2Uu8w?9ho?CF z9Y=qf!^b)0a}LjRxa9C$hrcHr#CxVAKi}a?9KOoYzs`~0vq&e7#ft7Ds-U!#{WU5r=>0@Dhjr z;_zz@zvJ+~9UgVL(_b83-{G$~ysg8#IJ~FB2Rb~>;o}`{bGXalZinYMe3Ed`zfW`I zXF7bY!&f-`BZn6`e4E4f2nYTja^#OX^2Z&1#wq{2!!JAhcZdJ!@J9|OuCTZNY7VdG z@P-bjgoFBR?a0SFyqm*&JNzAoTOB^h;qx8tbGYB(M;u=2@T(60+u=2?wA*WQhj(%K zK!=+hp5yQWhp!Y4+V?g`ez(Ivclc3PLv!yh~RKZjSo%CukL zZ>+=X2?z6;jU4`(!(VqeEgab2&5`fz@V*XDb@)h!PjI;1;j+UgJAAgo7dd>D!#6m5 zo5S}w{E%=^|0f*z5{F-O_+^J*bNDTX-xm)2ed6%vPWhFuwzvOU!ol|5(BaJ--p1hx z4)5vkehyD{_(+G3argv>bHYJ=f0s@qo}%SKRPn4dc z^gN{(D7{GOB}#v$#P8+%E2Wny4N`iA(yNsIM(OXA-k|g*rGHR*i_+Va-l6m^rGHWy zqVyi6_bGiq=|f8YqV#V{A5r?4(kGNYrSu<4pHccRrTW;v(t4EEr!?wA*GEdZA|Ga zl)g%76H1#>+Kf_)(&m)5p!79LTTN@rQIk^rt}R;yHnbO(l;sby9@WCv^S+DO5dXNZA$x4nnGz`O8ZgT zpV9%84y1GtrGqJbhtgC^hfq3{QijrDln$qK1f^+|j-+%HrSDQYn$j_pj-_-QrQ<1` zK6h$O7keqr*smflPR4-=~PPJqjVajUP`A^I)l>pDJ`IMCZ!)xI*ZcTlzvF*97^X> zI*-!%lrErjA*Cv%izr=8=@Lo{DP2nGGD??Ix`NV`l&+$5HKl7P{fN@FlzvR9kJ5FN zuBUVZrA3r(r1TR?H&MEo(k+y3rSwxuw^6#C($6T}L8+h8os{mPbT_4YDBVlxK1%mf z;`d6kBqYyijXC zXK}t<$xZJllxK*zP|mg#yDEkDo?;L5%lVc{q1Ywza=wEwTgr7;G^0`~7P>0=?nE}5 z&3E=xI?CBOJ5^?M=X0&uo~{Cw%VrDtd_J4&E>vc8<|~Dk?9@Z2P;N`HG(RFbkc+l- zE_F+P?=l72^OdY=j6zp?wzJsU*c_Lpj+5_h z>nP4$e(|n+j&`!x?rj3-%D3k#g*o|bYrd=4-I?oX7;aFy(A74)f?vdFxo7$pDSlGE zyVx*@Vr7Qx^roiH&TaBciR_$QM^EF1FBQv$;kQyydU>rB6dXpigR;YGrJ(q7TWNMr zfx1;!u5m}L@3G6?PfDwiC7s|E-CVeWiE`Qe2f6dzvfLGj^Npgfz#z+nb& z$__7ZQ*?Mio1$MJ;7YEmHP_ub?4Y&$Fay`(!wg=Fe}M|<^k~>M2o#1{g`oT}>kyP5 zekJOMWIl`QE_MvJGEv3hRVu1CyvjxOmS4$AF}vL9R!^zn!P(43m)9mzsB!(JOuer8 zc`f-8pC*?h=e8lyfqZV;){$$sPfkN6+!N!F$SBjYKHu(AzM_0u$lI);JuWmO*HMX_XvSHT ziZrxY=*rK_wN#czJdCl_=S~aKf`HUQ2)S8eOJV(NuwfWZ*5S(-4;!M0J)=l!*AsGi?Nmuus6Smx+; zWa+-2Wr=h6dS}QuWlf0WI8LaHZMti|Yue=4ra8+t%{jJd&aq9?CTcBqX1kp@HQQF# zT-!vsTx==O6@NY~1T~$WYnesczlATw)iY_s=R2}9yIgjKMTDBtpl{P}T)Sl~0n z`Ie!UwgO!&Ew^N8w%6X)V*U?D+%luJ(CzdB&ErnxDQnvo{mId?bbUCo546nS+`=S( z=|a>>v6F6%t>H)*_zA%Rzq4(}Bs7|)Fs**D2@P-2pnu=b0V zVwtWI+8s{vtW73ac5Bj|7CukSTEj$(ip#a3{I#-TCy%|pcid{K+WpG_1t)4o<1C? z`6H7}>1_nhRt}6={kWbRADL%EL(T2Vq3k$w{>~gI`f55p8(UKq`PF{QRK%f!34M?FuWWH@M z0b_?}8>q-v&58cj=Ippu3uJzKF}d;9;%hq59zM}*lpAl%w_kPBG_Tx}>)>0JPTvZP z?7+hkyS=-+i`_JjDweWcJ)QaPf^|}puS=tIG+LTs59KB!G8WIvxAf3x-cs!BLgCbe3M{84bc6!?e2596B0%Un3`yIvh@#*E2Xcz%m z$hI_yfaQ%M;8Vaxa$el9oB~}91LV%!D8S`)_T~0w%&fleUdbVu#Xj)01^M=#7i8O;O0#Lnd+5|QFj9yyUa+Q6_2Pb?_SLnZv~$R= z=liYLS9^levha0h*8CPBQogUXDBJJT+gjt@2~J?# z*|lVYQgMIe-qxaB!WRnd625v;wq3fThYkczWhBpzfhJXMF_UGND)m&X#wGUcMkYJD z)0*t)PGqv9`w(T@J1u9#c=PVGQr_#&yVI8Q`B{;HhHrJb4THRxvUZC}*=X%5d2=f> zI5}zK3}5XeKiZg*AFZ9_M;x~2yYk(9wZcr5!yD0-nE5X+3$I^IZuh8Ef_XA6oTK@8 zTJMY%(X_bIh&VsO{QD2 zWv+GMor+No>AKxO1)fb%p_uHJt+boFyg@f0Wtz7*WlE7UJx&g;sGG~cxYBOf=rk9v zoJK~bRnHm4a;2*?tSdR5da58&&jQ`0r=A=(f-GwUmON|pl`LxjlPoL7LXK{)xb{t! zW1sUJ`<&(2=Pbv5ce^$66wCz1(UUw|k=KEgx{EFOR=U$m9k9|p-)aR%PA7)8xVyWn z=*CAoA7|Q*_`wWXc}ov$w9K+hsGz4SBBDDOg-WF1f?T`pG}~@I&9z%lbL|lM9+nka z_z{$}BgxOBU879vY_dhZQ5)tS!G23_7G}HE1p3zourd#y*3+36tnk-w>vv1J4 zb-E{EH~OrCJz_Jv`#xPXnB{ChWcf>4fk?^7?$qHq1ZiHxDh@Z{HbmK|T~EDfrAwsV zow-V9Plr1>(_E`{HPfmVGVL8U$h5kh=32c@Gku=yErgQJyDxDHQPP?EYOYV; zp2&6P=9yVPUBq*18xlA*u1lO+*9DH}y1;4YKoF@xT@q(2wkGy8GgL);qN##OF;(Y$ zZ_P>ItvKa-W2Jm=wI{ig@Onh{EIgDSW5{1+I?#IqnjBMO&iB@s1l}4`ew+hupp)Zj zOoA9g{;fYM{u;;hZ=>L?vA=(t z^4>sW`Zp{1YfSkuhWs`5_iuAs%cmd$Pp{zHMt%i=p3-?Zu6%Wt84 zZ_~LK;OgaLn$Eoh7gC6c)ZcWGt8jHg&TxUIaIeGF%ln(oy%MM8-Ai$ye7gcT3$_j) z$g>Zcl4*A+$@3YsH-qQwxD$D)NK=$VAtajU1y*t~=4yOn0mDoHR0InM_Xr(T&L^ zqkF8$_369YP_Luf^Y=oK?L@2j&S<0Ajz`V5JD{HFN3U2}65DfFWIGQ_Z2w_NYzS`K zg_N9eJ}ikf=L~ou9cOP)No2?i3;b;m8T3Lb{uUq&f5g#ouWl=*`jI1hT_dLQkz>1R z@E~VROv3_y-6O+aNX1|G$N*T^h}-rwHpba<_Mf_~n7T*C9MLexKyT$C@zq{LzTPhq zU(6!$ZG0o~w}6OZ&3W6v=y)4JBz|i#z2A4l*M^Jf{hnifNW=ur6ZF8DIJ*&NVaFF}l1NQLqF8hBHFCRZ-I}|fLJ}RL zLIQsqM8~MQia%u1@Hc>Gj0$YI`)OSeXDg=i(IYfW@la$Rp+f?H-J@evUBzE_((uRY z8KVMQF?A=EI9oAwkB(8shdF9_D-VgU_9F82ev$aDy$a_eUDqMA;f=1ebC%5H3$X-9`#J3eIEevoOk z0l)E-9&WP7J7ikg$g`B`iKECuUd_wqD&gMit{r5KJp31ATAocVy<)n)hEH<*>a-@% zedh=+RN1fi2r{kK)Lg4IHPea8tbMeXdzErM<-G4{LJ>qCBrKWsbJyIJ=&i!kqus$N z$hHH~!q%oSW%=lCSC7k5Zqxu}M)e~HLSvP3qxzH?(I@A=08Dnog2|1V@K%9VNJm59 z%&5M%3iNo5N8eWk+A(}pFxinRnA}JR-QFs2ZnUU3+U`hT{sCV{O?!E2kZXH`348R@ zvTm=@!uDRFxpq5fuGgJ2&Zs|owjbnLCfvopc%coynLRu-=zDJLey_P!sG1pZLod~# z2RC>jzQ?jiY@K{pj@P)_#WmC2xw|3onxGkz{bm|^z1NJKdAWVCf*D!P^WK{41Yxx8 zr?0i7bIc5~y%y}J4ui5Z)LLyM(ME@0Rd6~$knQzp9}~Oj1v0#pIC8|X1kPbcWpi`g z^r&#VBc%@(bhTPX7Ro8wi_9g*-7NUJL3BX31krW~UbW2!DSg#;uw?@&Kh=!)a7TsV zw{$mhdEs=B+>B&1hcnXLmZzqDx)e4&Bs7^!nD0d{=vgJ_b=g zEOVKnyxY>vhE8X$dzN_>d_C@95s}j!%XNX(IYRk%^@8&Dm{u?EjA|i)HLiv7V;tK9 zcfGvRxkG{&Lv{zRm$%R6Lit`hk#$z8;I6X@<$IgXU1wD$M% z8?p}#q5PN{_?ynXYO8kKbylH#y8=80+b=nhfz>|DNS@uBjHbt!9lvKCSJ}6Y3z@bH z&pIxYbDi+AE9;6Ml{0zPED@Qm6E0|1M3fyT{QmyP>(S)3{wA9~rc$wn2lK}N$N(GU zM$6NyG~8W{9)or6>d~X_l;y;RnY`NuLAKKjn(wrPW;>0c*^bYX^46nC<-$qy{ju<< z*^$qesAVH9Ao-CQL79eO|57y6SPo)ZV?m7BXdD!3+d^y5OyQ>j((d(pl#a~R#I|Rsc0M zY;4%=)8_t%=MH;0{tmk(a-t8Jb7~NhI9;nQiHuPpQLMT68o6DyZq4nhkVMC*kigkb z>k@wxM8~MQhQ9$kV^pBxZp^wM&Q?t2qhl0IMaQU+z+d<17*$vCZ!XgC$LkrR0$VY4 zCzUu`F?ElQQO1WkYPm<~u*6q;QSxnik@)H_5?|0p;_v+;iZ$nL1Eb?@1d;fy#q@r2 zRCUfCWpFSOe-p&?exu=UfVkezD&8Jo1aY=vDj(DPF+0rIE(#1`#FAUG{#$`Qww#oLIaQ0~XO_@$Nv4yrdTW0Slmdx$o zwy=RwXDqk$+$!6`21aA6Z(j5nTYn>sGi!u#Eg)};Y-HGd$zSO0rJ@{XT3#C(YufYP z(U57+J4eG(e&1-k<;dGeUDddd|cQu6xw0??Fb`z;2_~Al(M7A<`Jr=0*@-dACQ& zATE{1w13M z8*DilY2w<}yzw^1F2C(IcB2_#j8!9RU^KP`qVLzOH>;7w8RZ-5vF&5tx*ON%(O34? zT}EHmYs+Hm5q*!ZqZm^Ux}cw*mu;}C1O;MRK9I*apgWq4%tIVw9MU+`@^0d|cJ{rx zxZWs`#+#1c75R58K$DWUuIOq>~$)p4`y%Sky{Au9$CNR>taE-8$-S{oWs&T`Eyd6Bou9<7L)hNTfe$zTb+oS$HjauX(1(BD;%Z`Lw(9b7_X=#KvpNx;i_u^!2M^ zmR`*qBW@_k((KsLo13$jjZ&nXmCcps+uyci0WI`)Yija5m599AJLbjSo|md3E3ynE z=KD0}yEUHmhvs{Nv`J~MY}c8;Gbs6SjmpsHLp$t!zwSt6<0sgyrA{B|MK+m|L`#3${JXvlgk24nY`MNHPEI*kzbVsacu(Ga?E78GvQ{t3OW^4%G|s~RaK5#7 ziom+(gsd1Fv}l=Zla1gr#QG>?Jpa|-#n;XHoKgqGRT+RbpT%y;UVrItchTQOXsLcNKdc1 z51W9Xy}M{N3bIjy1dsXj1up$dZ{3#+YTTJ=qM76QR~P-Ft<{>%>4u)q!uc$m&%*4K zd|R%kqjGppSN=Oiijx{4d&m*{=PUclS2_YT-_GMtb!2kpL4+f_cmkRIHZ=eZDrS<5 zEK)u&bD-Ha^l`k_)=Jw%vO=@HGJg@dyJEh=-ZC#o&xhpY6ACnEoP$#N&Yns~Im_LG zROyiweuN^MpIvF=BY5yU3bot3N?W<{(j?i`i(ame*t^tPxkAHocZs#=-$IxESL~_a z-){5uXxO6B1QsgAj^bQ1Vp9+5DbWLqY@>&pN1XN0g*5$_9ulRt@&0iEkHd`X;3`I!IpCa{w>E(Q`yM;=% z(AOB{Hx*?dd;Et=@?Kb^r@329#rQWDp+796>*VsA^9VSPze&!2sLkmQ&#~}_+nOgh z2I*@ryz8+Z{b4;hKWr(M=JN$L&f%7XKhuu%KUanSh?swy*5ltSk!EV)OE^sQ$K=vq zD%#pZ!RF~(oAkfzLMD9rMTI2zHmP%4OIL+jpbclD{NXeTrL8kZnr)_D{F}7+59gUN zlvzICB*QoVrs|u~p^e9Vl2>Rip--bz8?@n^jcl85?9ku5`793qL*G!Lc1UoGnUCl2 zU-R`F`bP_yFVUDjOPM9=!MtqWsF*M3aOPan$?AgdYIWMY0w5Uzjz=u<0;kL{G~QCe@U7CN-qE3P2W)GcIx1dP4OSo z7@g=@JYaOvrjdce{OzO{37CIq?4ZB1==(f*Q=~J;1$*c`j{bJ#ILxm6Ty14J*vSLS ziEOD@F3h7Zj^^f4jY^dnW=Q8}=d#ngi@Da8T$yZJ`K@_6;VM$QTehygcUpCFL%)Y9H=}AA@f^`a2g6h{w9|)O|Eq7_p@6HwY@(#DL`7MJ+cV4!nJWxsk zbOhSN>2!p?e3QuX&d#l6_G8(N={Jo@{F?&cKeUzU5C7%|PUuhIR`zu4==_HZ@rgNW znqEd*v}Fc$TK>yji2j;^fd1k!P5qDe6#AboBbdLmpVEVsQ0WT(QROZz7nrZ`fJX)@ z{52o>+fgx-c0Q9eQ{A#Tzf5G!I8MK)Adk=T8$dOnJpOIQN;6_GnYnqMgESLr`h)p1 zJ4YH z^A(|Sj5lC|GG;fRft80d8a|^&y6KBi^wlVD5p#6SQmkP$oi$}=(0O-qc~O zjL#40ilXNk&q8*d4mcr`$aBxEsjwdR!iA|-q+f?JX=Il!tsOGi_($?M|;lp35`n^$WqwpG=tv z0(CXM7HL{|dXcWK@-)OynRiF?UvS#Y576@N+Rn4$F53JwDVxRnRBMsG+|^yq?^Gyv z=XRn^YxH_(%G*tYNyR!HFOcUJ{+t*Mp7!VCu%Y!6cy`C4(iCf8kh+8W;BXw)9PORq-1%jl`O$O3J>BLjUM(~7)<+x2D1QYT!%RrK-SmZMzG;2h zmwW75anqU)pG7sym5B~Mv5d-sPaNXOa;pN=vG_aOIkOMbbUP}XPd$^fX`Y=uFuUt6 zyG-20*&lggPct4%mA3%p(KXs!iK^d{k34M6n|2swXdX|ep~hBQM=tzvKr#>#vLkN< z;+#%xa@GUob@#Cav<^5|4{bP&JStHgbA;o|*fcYw{_oLkP$E>V;gzD{nkKY#_QO4$ zrY>|G4SCd<>ZM|XYE-1sL&p(wsnT>7^CAh_#i)Bw?`h#r#?yNM>c+E>Y4RqGOdMA*o#eMd2*D%Z5EC&Z)Dm>Wm9Q8>Y%hQo8z`y zGAQoNPiLJ**5cl5D(!RXd{n32D(;2}jK{>{Y%?wH&8AD-{#d-(?q}IGM)%bmbbOTM zB2EWKt}N_u`-ZC68{1Lw3Z0JNsMy^>z1X#ORJ=mBOE@~t#(>o60Je@_=yVQ8#T%O2 zLmVA{Xik5yb^Jo7Q#dN#(A5%^lc2z>1zKI}nGqGE}+EsP+}mfHpEw!AIE zw!Cd(1Tn3`kJEYjg?VVs-zucxZx!chx`NIb=?OS@Ki8T#&%1|JynQ74WL{V$rd1}# zwF(JhTE)D&&)Y04@wW?0{0+kr|28&${2hiwF+GMQvF76X3@ODl4@+XrxedhEva@a7 z^qM=q=Hk9+jE+%(94i7)q<-qp7csC7&0K^K30-wCXGL9fb{1TRp!;QXzQS2_S>9ZX zpf`uj+lek#RQNK5xo8mx3VhW#bOORx`5}zU&(c@Z>9UMpiLPapXV4Wtx<_aikwBuS z_2FOAvI!nlGQX zN>5;qY}KnE%=u%MVlL;p)Bf}CJ-(eoJmn0Es&etljl#vn+sUX)<&o+^;FuZ zpVT?c? z7N^g&g9!BSZM&~y2+8RppL+8*tRc25Wvx2l*g(#ir-%qEvRZSLaUWwT5c?bTA!`x z4R?LdVu7z2h8DU}hv@QM;_oOHsfp;p8_ScamubYHwL|`9@MT!OEfdN!+iE(8W_HI$ zG8)sUC0T(6I%)*IvnY43LiR!}tA`QZ!GlU+tBRHSamtmWy`u%J0QrbgH*GGY&n?P} zE2APa4w{ugfr@2}c4^ZXyj(h~Wo67=$++O-Q})l)EMhiYw6v3 zD>%CI$1l@0)02oF9e(NVGOfj#lj~}sdv@6WLsn>TG%lhyl=xsBv8u&oWr0~MVG22y z^XdYtlJ#t=e0OdxT@*J<>}j7jGbwYcoyK;a1JGUgP`%BT;5GVmFl7l=Y4E>tFW;Cj zcl9|BotJOcvl88=;u}Bg5sLh;tWz-m8Abk2eDi-g-f(8n{3c#rWM?W}=32Akx|Vc6 z4gpnTQ)cMngMJ(BhDE;7mgn`3L7=o-^Z#{S{*N-*bf|>_Gn|il81F%NOk#!qLk&V3 zTf#52@!&y+sV-W?=G^V#I80aI`@12{P(B6b70`M&f4;52o5CDpdH1kXEQzfodjYN7 zvu6=TgKz9cY|*N%9-1_wr9*M?A}+tg%y4}wJhRWyXlr^B|H2Bb`T-;8omPFE&SJ@A zPoS5eTF;X3O;zfxbhobT3=bw>I&7#iI)^UPL6L55)2d6$iHd0<6m4m*C>Y+o($LWA zl}lgq1QhLh8VjpLrLv)dB|ssIkYLq;P7=(bLEcv8hL5qA<;k2mFQBd?Ihe59nS4Hg zCFs<1_+RdgGI?p^GlBZKL@V9%?WTpKv_-niE>qy!t2B3qUXM`>rn2IcmjRIq8|T=JSmvn)XnqmXlw^EDAR7t6ED3{J-w$uO9f?O3B#BJ1x2*xm_3Sy-CX7@j8|Pc zu}LW!U_;F>=Ir??^0ZkarR`2ArFp4meSeX>>7G1V5gaLZk|&SmwqgXQ-JT-$P$8x5 zQ(MWiXJaU5&)bn{AF(Bq`ZLwnITsR9_-smY=yb{xD4WR6wcFii&{DN~oh54H!JnrY z(cECgR;ZCp@#x{-WaH@LmNpkEqSj@=AOD|OHoI*!qpM@?%UGowf8 zER$vuC6dS#B~KR?@;!8hfLXkys6sQvEZAKnEb}Vim>a4@pc0t|TLxN+9cC<$h|Ph< z?n@#tkJULr)g_ii$wol95**yVrmln=*%SDx5uI4*on#;_t-`Hk8m4fUB`yPJfx!S1 z)E=V;zucbMy3iS%jRhs>q}eQ~)RDq$HQFkzq-xfl>NgGQ(S&qW$Q9sOirj<3+OPoY zs%E8csA6sTRDm#>bzMZpXVzlogAVJ;%0GcC$Bf* zJ8nvDUquam{ z`DCjvMCHy^UQ>YH_eDEJyaP+_abU^WL63QafdBIMmvjX;KQv~RXYg|MdbXLs*;(Ac zfm&`leJf#(xol;oL|CLj?{uS5^+g}F*U)MAtRA{fVH9Y1Xst_Wf=`ziB`GOF}t(2#Z%&i*01e(SKFp+lT0CrUgx2Ji;f_@h}_(N#?h=6&XH3)`- zHsO_V8JpI_@$Ax5fL(|komd%{v?Q*(Fr60sA_TMDW>6ieqv9$xuM;ou)mfuJBX^#5 z4_;9zGi6_{M8metRRMa&MJj_}VJg@Z?13)X%0o)%0wucgA{jh8=CIh5{70=&e+sFS z){qv>Ro>}6v_43Y_DKp^i2k2mq=$mEM_9Sqw}VWYfzk0ukQ-Q{pp}B$^MjTRd5CoGZxhXr-X& z23)?wT(b=F%X3SUxV)!qV8zT3=o5K#eS;>Y_JYC|UU10|FN6!xSXtzU_rlBb_zs)d zjk4LD!vE7ixX%&aowm;bJB_Dob4j1Cjw5du>i#F?yNmqvCqJY_0|H(Crxw*Uh0|%j zn!$%sddi&SjM`J_nV)%dDUL66(N$sI%j?{c@1*WSSGDw*kAaFctvBg0G6=tPeXMP0P@=W~Cjh5g*;_#pwhEKQzwn#hOPN>`cd%}*nh zLlqpKrad=k7}}+^;xyd$bj|82@&f|Mn=s)U_5aiLvh=B%Ab*2+tjbP!u*NRgS{Wp|Le)*6}mp|#VIXWve z|LXO21H6h~N|=R<@-K}=MS6~_T?kj!1xnP1jG|sj72`wv)NvJ&@dweO3~wQbj1<}PEjnDF1h z9Njvmo9SY){2s=IxT@nZg^weSCry|)05>gfJ}7kAfE+=|Aq0L9&<#bw!SHp!A?H)Io% z;9lGviWV&tcZ$0dr?@+m;$Ha9%-p$m=H9tW-skJQ@BjIp?|u4`z2`UgoZp-?b4IR# zK`;~<6@xBEp}hvw7r0|+q1-XlgF6OSbH~7*(1IZeB@JcwH6jic$n=10^C(9^6z4=p zF7OE{g$A8a8xi7e;(9BtlqVMTU5^L3I5rmb-&ovnp{L+MQgbW%TpLc;+Cm+ThiFF% z8ZYEw;KEE>MKZVxmp+E7DZv#coAJ!)baMt`z;779g*K}@Bm`fR(#Etfhy{9X3mznN zM>_2ui*Fm)$@ln%#QO%rI1%9+YPb86-43*1fM?8nok32QuQFDawMD@r!exVP(eT`B zfWI$XWuydseSDPvKt);qfUh@qmG^#wFpZr53DT0Ec(AnH0q0y#}L@cS2^1k-AyT7W@aH$|XxLbTa0 zp>CXd&iyRnk1qBh256#7M;k~t_)#zd&=UR@c(}|N4U5NrzsFF`C=CA}d5TUyv;Eg5 zgld?a>XZjhNM(d2I@%rW!!{oF**~Vmu;QMa9IovDZyyP?(l3}Zv{V_))L5}Hg`#VD z)zelEFgZ#1O*}|36}hE-9flKpqG6@@VAOOZ*>v#1c<;L(sR3 zmHijkq>HpUWsFH!t*(Xr$B%bnzPiseljtZyw&%dYTiw3Y?^O!d{ZTe}T2w;cB6^|j z+7qc-=E0y zN<1sdGU{JZU=^<3p$|mMX+}gb-;A(~*p$+YkbZqL!ZKo0N;5+G_00&&h)pTY2(mDWw@91BPZqgs_(e+l}yfjR*|DA77R_$D-uY^0a3XVgp!8{ z$e1#4>V?#iVIgxppc2ObRSXf3$s4N|@<)cMu?EN9#DB>fbvG(mr0!H7c$3?kh%d; zxf6tvhX}w_`rp5D!s{23$EJnk4TwsfAXGVoK`MEoUq~K9Lh=SgB~K7a9>qZ_d&Pi| zKEZ_ijgVRa1)?=T?Ie}s*8?hX3{c(>0hzq9dLeIw2zl!P zl{W?`Z{&TM#EE_(a|{Wo8xWN{K`42MfYj7T^b5&jNJ!p*sN@Mk$wL^Vk|+9wW3<&8HOvv8|sTEKlQUmnEAZ=8_ z@96Rujiv+<@$sA!~!aD3{c(>0hzq9dZEh^!qQ_K3kRsY@fnmi^1e*sM8A+ZhJ@4& zh{~NHlsrU0-t-GM3Eg!!*07K{V#ER}aSYJYAp$aaWA#FpBZQ^LHWm(0dE+xEZ{&TM z#EE_(a|{Wo8xWN{K`42MfW%1`ZY`i+NFJLOk~bhKd4f>#5C*B_iGCq@3<=2_5S2VZ zD0vhIsq7U4Liz*~@;5?i1r&(Z02UuIlO$FzWRDObcRipo#{lIG>kgT`v3enIga~=- z0hKofC~t7TRN_RxkU55g)D4KrogkDvL_lVe#Oj6A5h7%+2UOx1pu8ahGI?Y5Lf!}w z^40??Zwyf0$on#h6a7Nw7!p!9AS!o)Q1TD~si~3Z7m~-2kh}p=$rFTD3zQeYSYaSwIDWQdf zQK}LRREf&!Vc0k*7y6WYzYx9xq!eeXppu2sN^NA#IgoYGE#hjNCXvD-rcjm=l^A~* zONB%YLo_DJNvTU=IHyEq#vi3`Ayskwp}5eiDE{ywCwj}CjHJq{=NU5yOD>XS!~z}; zpa*EZC6PB)FXW97mY&L652(B`KzYM}CY3nRFJz7(A$0?yawiBS4=Z#j1Dn<)^kW-? z2$>^BET9s{0Obu4kjWdX7rGoFEIqcdaDd7ipFw#e@5>}k^b46|NJ!m)sN4xc$wLIB zOfIx0p&wruM93U5VgZ#n1}JZcfK1+4z0lO4% zJkc*Ck0Bv>1EP{A2qll=AeFshKuDipLjFcbt$+fN8jvgL(snZx^#b+;5pXvG3UdXZ zywU7HB5$l-$QvO--g-dgjRDFV+%J_l(Jy3X2zOx!B4myjv4Bb(1C%#JKqhajUg&a!u=Lo*!T~C8dyf2eD(Jy381h2C3wUej#}b3CSA}l{`Tx zc@zh!>=gq-`UDg5H$rL!6o}RU79TQ`BvvnEj}ReuJ)knj0Obwq4w<~MdLeIw2zl!P zl{W?`Z*ad<;zYlYIfjJP4T#E}Ae1~rKxUG}>V?!1B4n-yRN@$*ydeTId1LiL-Ut!$ z)&nYU3{c+4`!b0W{X*s#5>huHDtCfV@(=;3sgdXxlE;vcya7?k6NHk7Fi0g&^b5&j zNJ!p*sN@Mk$)h+(Wv>_z(kGaZzY$U^pg>Rqa<`P6tqeuCo;MNeDI2ATtsT8715g7ht<$_HBI%8OO9Acjvx1;Z!n83Tw3>v|enp8l4SYZ>Lj=Ag^AT5+}-s)M1lT0&-X&}UrDC`(Ii zyti@nQ-T_eoPHUBc1*Dfq&%JdBRLaSHUcF-Gs=D%=qr1l)`_cbPKB4 z7*D?06Swg)rLB=}AZ36=?DQ)UA~QISA1g665aGhILaHAKi7_KHV@P34#D%4W)G+8N z=8WV7A%&AMJt>16BBCGQh|IuEH?S$C@E=(r)emOGn30(=q;?WgIfECbp&z&up^=;* zq(I9O1fOVj;FbNq|K?atHSW+IAEK#%Ji3cWzu_GIjo5-R#WL8StQd>^6h5A_fD zw2Y+wK{V?cAQtKf8f3ta$jDzg{R3IhZH6i41oXH*qtHYB11XWyKZs_5_qZuJ^bcH+ zrL53H{R3fg`UlZ0pvUzEg&rqU=%N0Blt>vhRMi54d{KWWOvErIJnnr}I=#uoY(RVb%>G1|p^`TnF4=H(h=kh|Q`kpXh<4^mZgWT})| zmJwLRhHfT7rPQ*NASp)llLS?4QGzJtyM&r<&gH6DH7+AlQ7gJcrHWPKG7=B9qRTs~ zST!!Axlt>+_NIze<5F52HKT8GnjGFv6B8LLX+@^#=GvSphJZ3} zE$Z5zDi#uv(H5yiU0YPeLLxHSCAFw)m#SDuL`oZ_CiQJplZ&`W*|b$m>i2C`v5<(w z=;*g;DG@ZuFmxQWar*CXk5Q8*(IHzs1};+eUll6)Em&17a*CAeht#6D@4$AdAUbk( zS1A*d=%Bq*0T=jeue64u|0J-&Fapiu4b23;@9Wcr{#S>!ZNs;aqhm`MFC3MymZRaFZN!v$4RV;Rj+RV^?K7uSek8O={sEieohT9g>eXl|Nr zfoW)K4!0-F84?2zP)TU(jB>C}kDz^rhPb0+qHQi!X5|+2v?P2-#~x;j7N3A2IL#Af zvpZm_sGcMvfQ<65)r4E@Yv-BQB+#@bQWB6#iDV#pD0DJTM#f;lo$#h{Q7~g*%E@eE zNVyi6a)Qc039;0yDsX~HV9Hq~HZi39Yc&xyF*wh>CT2~n9DJlmUcblMzg8Dv6GLa3 z*T&ey>=KihrW>lF9I!1(#f3scE!5&l%*Wl+uP6<=)Bj z4M;o9l)l=0q8-ZUC}ZqnlmmZl#G~@^7Wzxdr5{GQ(0rg~UP|~>`(%_c)*r$~%Sc(a zGs=YyfV-*GA~O0Oqg-rXi-4R(x=O$-0%(~|3rSg;VXY(`AJjY`B~?9;2Kk;IukjF^*^#JQ;;;`)e#@EcjFw{8p=;1zf48zL1fT z8Rg!50oM>I3mJ`)QSOZeTpOe;q%=)dyEh(81C^4dDG%NDs=jGJwpL5ZjEZh^)j&fF zi=3pbR#KNS3`Cf?NXgxlh;Cz5&jV7pT2f|QbSd0GLyL=?Jyo@mUx^FaT!pf+E~zP9 zQ*Y9Bl>#`e)54MxyQbbd7o6N_VM*y-Q}3M%ZnU(pq-LwB_s#{kTv}LGs%P|jXTvpL zSqWcZW1jNW0!&|m%>3_f#%o>GJn>U9Y@nnie`?G;&1fYU=g1lY6l3OtfLeehBR5j! zLjfhjMoQXnK#hGhQfNqsa>v09sB-Q>)pWC6+p3C*ad;<~jHHXLVBJRJNKA~wj*C3& zM7hyCmSg+STjkkTlpE|5Iks%Ezafy+)izAQ4#JEh)L&rj4HP&5=JnC~Y zvJvfEy35I`7+2m2-jQY;>o!y!n%EzafzOf7JD9x ziE){p$5}WCcMM_pf5;E8U7=dqX=m*{zvRs>bYVq#pT=W(_Xnb+Dp$~IDF@HDUJ z<{Ii4PiGk5AbIwQazpG<%Axj=E94PSv>O=^fOI~YAwUU66apeA_GN|u!Eb=qFR))k^#%&v_YLR7>5oJ;kn^u8du&%OpzUf@DfeOWD=Il{0MUC{o3)G*=t;Ziro_ zRYMAsu*XMCh+Pd9Ey_lYlt)K3-F(X?az7YLm|H`f%4Gw8B&KJ|NT`}_6ISfApbV=3 zm6eQiOIhg?uIelH{ym0Od6kunW=mP|Gks|D@E$D+aheyXb7UkWDG%?ZZS1}1kzE1XG9dQ$ry(;-C}Bd z0@G20r$`iPGCF{!TTG45VmfN@oC$4fGNxIKaxpnwC(yCex`LEpNwH=amQ=Zyt^i{b zEhaIjhOtSt>5ZBiR5XVC``5=Q2SaMp0B`KHph_8~6cUETj4JoS4TdUOOkz+CLzQaN z8#Ohka>gsl!7Qly;Yum53=9Y*H`$}*i-b1EDCZN2251Cij7_Q?{g#skXar=8K&l=6 zQb+?d0y0{!YDd57(EyErl*FSNGBi>KQ8`E%GN>WLn4tk00hv)Sj0`G)K#hRZC>R$g zDg`DRNXhZkkRi!yfa(Ouw*USRsM;~>3$W@?%Sep^8V$r5QOn8XLx|Gcpr{wIAdun; zd>}73E9y;Y1h6#ZWoS%8oTCZE(va6Su`O}yBoIqOVhhE_Oxmb|F-#=pY-G$NYZHj2 zAvZ22xf|1^Cvj*D3aS8Ov5k(kAiF_Sz_AeM&QxELnpB!y!dY+O*zl{KbdV_y0T zo7K_fWr;+vdT}~MkQx^_keSu7dQ%!&R)=yjvpQC9N~1jM4VkSJXqaVn1hF)vwo))= z+Db9TFp-+o;R}*xqb-718gk=en$>{@8y7j7GBj1;4{8c$g#Pd@2(ZmKL~2%t_W_v4 z1vXkV!V+ibsLrNY9YHJ&$^FG_rj9u>altpPNlfcRzc`a&O!Wyml9}0wesd;TZbv0$ zCU>IWoQc+4QAwG3gy=VCqP195QfiY`448A_8?DqV4_r*sJjU2Y3z=Bn^bA=u8-X## zF_9f0^C>*y!Usrdx(9Qn`5t4eTcl=uWX(%}n4FJF$_|iu)+g5CWw!AVZo40>f;=)N z^PGCND_dEFx$t0(Gy^;wz#PcP%$#~N-t1LZjyDIeyy0`bioXNp`Wvek@F7wN*NJ{p@|=6s z{)tawtHqJWAEuU?S)49SJx`rMSbpfM&*~{GfWG^z0nC9EpKG@CEuBGFdMs})6fAGe z->ADvn1E2XVD&qNgPdCt9RJI5!n)#Aw0oQ}3C<+OA>L@Vlf>I}m2Q*t6Xg$0z9r~o9L z;{z!^*VyS>I)kwESl(PHSl*hyQFoQm&=q!Az0ltX5%SgpDsK!>f5U-He`ED#yg9e3 z{zj*;6=Hd7{>EKZqQi-PA#)6|3{~m|MCDEp>U4A@)9XaPDS6Jl9C;4m$kUvzbp3yS z5l^zNqMoOY$9^SGX!qs9s=@?NQlfGOQ%Z`@H4^&F!XSKeF#%ZKn!izZmC?`@5?H;^ z-v|-%)&nYU3{Zc=flPm6^=7=ecC5A&bP8J`mbd0_+*Ktyoah%a#}La2I)Q;%|hQ@#X-Qx7NOKSC!~+qF?B7 z3<;?l5S2SYsMFDrOs^CDrsO&IDsz?q^wxPkA{=>|)0M6-YtEvm=c(h-Rml^^Ig1fc zQlbKobdC?C_*`SBZ|MxeHy0Ct<*oS}bypb;T_J(h3;m4{A#XjP^2Px5Hyp_HH&$=P zTi3$1s&W9!Tk|*WsuCSe^b46|Na%6{qH-q)bvind>2;#tlsxBNWzHhZEJ{SMI&`g~ zr!B%yQajg_^R!ht?{J|Qr9y=wl%}8ULcN3jMxSd`ib(Olrd(VLo#)l!Y~{3Cpwg7N zt`<(0#8_y``C6!QmM&^Pn$zjCtya|XHSt$!Ate^nzA@huUb7rQzR1Otq#Y&4(#3r$zXG)2Mrd(VLT`bh%;#zo#1&*SuSZK=mTBvfC zuF@}zv0zJ^#DW%)5(`baxE8utsKwdJDg6Q}XC8|Xrz;p=N0V}2Fsq%b2LrH-BTeKU zR<8Y)ai^0k=mB{mjc5%1rB!b2qi-5!2RzZmtuv<^hH_+d8D&jHlAp-HNBV zntMkJ(>i#$#u?;@rW{VKvgcT;mLKO{vtuP`_Eqm}T9`Q04|nhO*kA(Hqqy>}Dv;uy&D` z@r&F9i9;`^l(Ae>Dsh$smiU(&(B_N)k(c#yhO#VXg0uj)}IptU<7Q=X8ZiXdoghoY(Ck zPHV8kW4Fe*;iJrU{F|){l_8v#wOH%aZ)CLv2g8?hJ<34@>^|CJ9ONk2VdIV+o)}wD zgwqp7V5i4wce`Sop|S2*<$RCB9^-Vo2<~x2z#+U!K01a9Vxrvm6SZ*sL%SdAH1w%Z z(c;_2jcO8+HBGGu!`l&+M=Q&9A$%T z(Q!^!fWL2qGl+HzUmqXkKTuKDKcH+71jQc46>++KEgokthczVH9cc}RfJum!&Hx)I zr&R;t6`>KB0uv|}x4R>us_~?VJ;LpANZ?BJvJ|bJ7%P@q;Fm2X)?;-?#XvTRaKM*I zJQ66Vs)0p_J37)96XOWB2En`3J@A%qXp7os#(|od+~EpFigcx;9negIb;6K{SZKq< zl#Ke|I-vQK%Ml-=RLRd`4RyeQ%N^|SaeAU{J|KaP2Qnn0HMc^*eNc}2Fq+hcf?0QB7rsm zG3JjW9FYzexW(mmxg4ROwnwZp+5v^4K#@C`6}UZ+c^HA+9RcRC@0~7unuMl6HA7oa zep-7vZBQjA8%xyV6c|-6*Z~6)s)P$pOK^DLBaFxu9;aPK?_-pMxzG@6NQ@&Qf{CLw z1fpm+2w=f#^~44V^|_rXwSl<0IDCOG-&b}B+V4oA4*WJ5bZ0ufarH+qK1 zjbw@3z}D%3%%5P5D9^fw3c1x06m!`k^@NEOMlEMNq*G^4hXv(K=<+rhgS@62x{W=| z0UuEbiVX>Yv}Un7Y%x|mcERrmYZMBCJbzMShWGuG|0Co$;3E99l!%ACF#6JtS!PB z;&ep&Kp{^|RJ1z^4Lx8A=6P0+14`Lq&_ICeL=Tw}3^FxhY=Z=?(?JYGJEGuJl=9IF z(J29np&DXDnd~S>5$;eYys=z4LK7u$BnqZBKfq^^x7U*TV1YZRJ6{oIz!WH8S{e*| zq|Rz!IS|^LgF(Q7Fc|riLxXgT%i{xUok*J}+ylOY8FaXWp#-airp8qWX1LJ_Xqtwm z*bIW_1~9xvMp)cY4wnQvSS`?rfkgotm(koyowzuHoiWx(nASl1XIQx$aiXCQsj^sY zo&=X2Pc_l10ykGQ?}XkQ>LTK3H;j$sr^97;qoo!(3U=6?kuaCW<)fgPkrTDaE1I}E z(ZYlvs1eaKBb>_5pa`2QoJj2o4-d3!2N6N@R!_It?XY0Na@6J|I2Wr0=c-*<=?J8Q zGag0LN3~C}awmG~pL0HSD(HsJ#FV9g(-q>fK_D0icPYL!7Qvj7{KZ`#5|{oQB_c*3 z+7^tmv%Y+YQ3R!wHKM;CvOsV92@rHSwEFfy>Xi@*RW*bImO^nbKZWVCC)^nYF0i|! zVVZ^}xxq9w0R};V8LTJH7G;_fnxN zHWFshu%!SX7av76%1E3X6||y6$w<`-3EHUvqqNlKp_R}zTOROoo;s2UbSjvav(=GJ zSR)lpj0Uvy5j%!a14X>)A{zo?F3M(s5?HmiPR&!eF+vzzXa=Ab(BbBY7h=KCPI{sh zi>lQLc3=#MaJ!-5!|IKLaBC%OXdhBf#A_j?GcYHyVr#J5#d#7&pg5ahMTJCURMF{S zI+h6%49On#I};cXYp^qtL7mnxFy?Rz@>me+CV1Y$mB;y0!yv;!uSP;hZ!qZ40gF3_ z>E(340QF^>GH8Y&TC~2d$CV~xEWjBwnno#EWW(qWz>WGPq0-wOY=*xM|7zx8WRtt57U^`+Q)@ZjI#%|>x25K9H zHf~^l8l>c}GJ!>7KH5{)?d0G^M6ij}9c{IPN-$$Mi_YWFlFx|Z&WFz7r_-_Ml{?|a z;;1BqCDuiD^40!>$iXZF#%HqoZD2tcKsM-yv=ti?v=e5fao7aDIKVUoC!Przw9?{e zO#&-rw2zHBTiNSFP6psqYUNSo=_C+NjrT+AR8I_yJ<-af0}kA6M; zpflP%?f@Kj}%^{1*iRdTsB>sg@M8Ak_^h;5Jg&GaKcLi+> z$H{|Q3o@*oMKU*{pi~W=kLSf`yCMX}MK?;k4jT;ADp5$m=(lz*`UOIyn(>gT2FaCr zVCyP9bVRhEUrG&_s4LrSA!sy*nZ65eU~?@S@xWck3Urw>895LW0&!+?{sa^XNLzF# ziBZKN{7JZ1fa^l|=^Q=-E@vpaQj9LA3++i#Pcs1YMH2*X`io9MLjYQHf0i2*J-ICYC?3J1gG2YU=>heGO{ zLx?hS5+P37nt+h8=~08|6eCIkWhMKnoGAq%+o+J)=p|Yh?qglyE_WRCYkEln^`i3D zp32oD)MV*z)E($=)FYMOn23ZFNAyCD!#|)8yV2gOa_NZ3(5?yHs2~!WlF`ZwT0ar0 z%P^RA#lutxE?q<4A?t8_*-qQD4TiZwG(4>5fSoVg!JX(@mUdn&J`s&Qc*~IJhan~c zPC*Lsb#b0#kh_<yqldr`@$M2DS^Qduv-xv*_S6ssG` z2K9VmBc=I)oXGgaoKlY8AX9{V0wF@^eNbQBo-={iU27tNZEHbYkP2%Y?{|?x?o__j zsl7NLptAPc4`$U$?+)+-{dA+PGIVttTBeY_Y#4H32?Wzlm}!MTi<8iOv06U1NCZ8P zThGJP6-i-3IVRc-8){5UF<_jEVzDx!>r3Oy?nWhK$VLt}Vr0H!RD{;+dMUbG3UdP( zM zkECAO_l36Naoe4+K9q4uflDv#Z0!VWM|>I=ZuJafymgRR86R==6hX-K80f<{gP@5O`B9Gk0N)V_ zOtr`lKiWWKJSV0w!w-qk-+~4L@3j$2&0-2Cn1&(1Drz7gHCJZdK;T_hLT0871ic&! zbxHAQq6QKmNC?zd3M5u0Y#{g*LHeAas#BA~f7U>vVdELCA*4(c9E^5u5M==LSuQ3g za|rKKO&R`}i8a{AzXA+EKK`H}7VoE zh<&bI5fSt_?imCr6q#on(A-Iz3UH?3C0RX#&`Mdt>jQ~m&NeXy_hPbysF2IIRLFUh z3OS!rA$T>?-P1w#re)MeQ_j~(m2;I+rB=HI9tQ}A89Cg*;FQrGIrYE|A8Y&P_|PNT zP{$=%*0`uX?W;mA7DTJzWO%klH!D@llhKT91H#3BbSUjP7Q9i(8Q~&kB!Qo;kVf>VlaxW$CAtPdedo5+`wPCe<(?%k;H8}MGa*X~(*Vcp4 z?FDeDT|LHB-7==7$OI?`x_H-^ScF@r*()NL2RtvyPy5N4y4M;|A-@`e=L3)xodQ+L z%|lcvuR_s)ox^XTaR^ccj}uuNSu0kkP_BX%#t3}XAl42~F(~!W6@f`q<2KD&D);ci zl|ATtxc-V_HWUc3hC320a19~M2Fn$BBaPkyM2L#Cu|~^h3dS}vT3kPzu=4V1P0;*8 z%8HVjQw*>j!>Lw@fEIO10aYZ^nvlwV|K=mY@Vv0c*Jy<$rCgwePDn|hzJaa=hrsi% zei}!7eGWfEES02YM$A&h;aVQolhkUeB~TNBYl^fcFxG|2NPt8y&lWwq#ODvY+oKo7 zp!~;|g2y&N#MO(O$ce~_oQRyrClPWcFC(WSS&Y+_-~+2C{1>tU{_6)fXW^;0yq>Nz z0De;>LFkDWxD=)win=|4xOJc+!XqM^4p=!ijU)g!n^r_7I{fHptKN|32M?)|^^!a{ z=`C>Wi~5Ev#h@sufyDf5v>_%$h*-!|LlV|Tiek>V@`zFh+yiQ{SFVh2n?w&V$=ILZ z^#hIWesJeRcYpK`dJrW69VVc|1at@&Wkdaui2R^`BP{kXxOqx~e~4brJy-<$UHT_T z6}jQbR9c?xO|&fVh9}*m@J4!pH#UGqApN~}3WB4|E&vcAVePP-)DFBaAj8 zS5FDq5NYMe!Yvgb16R@4|G-d>o<#2HjE3i)(Onwwn3fa0-9*=B%!>tD9Fm+(^m6>M zNZ(1Z5Uz(g?6J@#=}jf*wow@_6`28!@Q9N=3~mk$gOmU-WB~!X!6h;nt%}u6IT1%T z7$RI32yqeUQD7_56AJN*h(%A310NXu;IR#l4IVN?Ei7Kjvp`raA0seiAdN(U7I=*U zydOtGR20P=4G2&L%Pjwb!>%Qdsb|5dt8VFvXMN8LU{(5{x06?VXZ5 zgB0s+hH|$dqRbOzki>!{%Vbu&lSZe2H1qKIZ?>aoG4(Z3hn1g2I8fwB^EtGB%$jOy&M%R5~Nr~ZU{0h z&$d`v7B>6QKrVwgQOq;PG7)tJFmlJhzj;9{5%0`(I2^ukMipPDQcKwSN)LrZW-F?dK^4@LJQ00S*@!;Bd|WsQ|oJ&+Z70g-4rVX9B&; zPn)dc3NkpXIn^if0$}fmKBxzKE%c{9i3aR%#Dqn{o`Kz`zJ#l!m>6RN#f8*H6Iz?K zs0&iNJ1W5dHA0aiFKts^A!R7?wX;dwy`7CodSauZ;DI7E0K4GL(D0H(H$1CjMpt|a z4w{glxIAhE|3Psyq9met52(d|KpquOEOAsek)%=4#1V!!#{CC;QRzg|MMV?G7L`mK zS$L^a;(ZH+6G;_HCXOi-Oe9fP$bZ5VN+*^o6ip;oD4962!OouOwPlI-ta3skDJ!KD z$66_zIO4F;?Lsfai}(+?Yv&}AzIH<5YM_-*TqS}X|8X7EQxaJPwS3}gpcYSL1>o6W z^5Vcm=WMtJK~70*1!(z1R)7{yTm{ezyQAS13Gl)j_!>gA`#)(;#&Z){CF6;StDEt> z#8naxDT%C-T0U_#QHy`20&wj@t2A5yM^{SC2V&UC{ZERcDk!it|Vz?;ay%yt|1^w7z1D6!LD2l<{s%bYGg6m|&NYhr{rDAI+o3lyvm)8fR@{ z39tT2Y{{?ajE(ZD2;QlLZb9~1hHQx9eui%oH?Jyuu>jg0%zeD-Bd?K!hk3qgSuLp! zK#5aKMxVr$h1(7jSC9@NUQ4@U!r)e2+_=A9&T9$ybc8JiUSRrlFR3LH=OwjxBD|!Q z{hF85f{F5y%K`7x!f!M&Z%e`W)lT9bXvHsZGMt;}wHllW|{3z`MTir%;cJA3Gg zD_To3%g#jG_pX8AfGs%U?T#pNqnxxAHDg0d2Aa^YqGljmYqUQL?~3IX#Y`FflA3|= z=^+ueP_HLrN6-~DF3k)Jj5cJ*SXLVyzFJfpB)(ddpLKXE9%h3(bI@mFl;$if3z<_X zhu>1~wJ@3yB)Tw~4Kw^yDR>wYPj$T(a=IWX z}R z#D-VjAG|8C1;I1W<)t)atD+k21KoF|bqruA#SGx< zt1>1ds&a2$Ktn$w0p9?Z#sPGDp@44W#SA0{LjOT;J`m`u-k5>RK%mcmd-DPf1bh%% zS|Icv+veSX9?L>*fbUz&W8k$%pr^nv1DSz9U)uH(2=sMps|`Lk6e=${QSD|l6kO#Y zD8j$O;5DVjc(uCM0C_2xP>(M-WtDATbc;Jpwa;;T!*z83->uLa~iW`1{H1 z6GXdtufVv`V#Yiq6jyl2N?X{BdEQcBNM2&yoU6$os^yhkW7At9?rAS32If}jcmiGeWh5tsq=2>e%OAiVSl#WuV_ zQsR6{(QVQr2&yoU6bNj?v_}wBVIV6I*ob+LP+*)NVc3>5je_Khm79b@fhh?|!5~&l zqoKf*gsgZFGv)!IIPRbFA_5)ITSOF?l8_V;V#PEf3QS4JiU=`d9ubP;jw&xA&=b8y zM1d&@Nf9AdOe3Pel!UB^5Hscxp*U(8uI}>gGf}mhOR;!?aO z=hif;-HeBVgFnbGSy7?dz4JhEAtj{+Mpf^f2?`B*rcK)Yqg1&G4F#qoB!z`oF&$PE zn39kc6Jo|ZeNkNDAuD}hGv?__fhh@@5i#FSBqX4lkRMVb;$<_DT8FpmI?;y!<=j`R z=r-vg1XUPFiUzh}+C2!WFpw1rY{a~8C@>)*t7jl1<{g8e3ImCOFz*$Z0dxxdS7so* zbP2^a3}7$|4v;s;qS{S*1+}j7kQ5Nbj%nYZxXMFTSQJC%Jwz>FL61yIOIC_*ldup} zVIV0a*oJ992&yoU6%1^|JQNB{NXSZ7$cTBeBB;VZVj#?u6=nd*3jdWE2rtP>v2BTT zdfCy=cJqtbVnCH$SU4-z>Ut>wM!ecUxK~dh+)K28qF4P0_og4? zUi5#>i(hr(SAF>xZyS#z+T$A%?;Gss>4@+RwcCAz zVx90g8T1iFTd2d=8RT^N+HBGGu!`l&+M=Q&9A)91t#M9QfWI$%oihlE#YEfe4qqSB zR}*Vx{@>f96B1t^{Ap15he~`RZ1Ef;ADgdg8F?hu1+3w-zN!#jeGM-n368Txhe&uw z@>i%4vzV4M#sl#{F1FyeaicdH!8fUcqcw3XdaDvIf)`&q)YEK1LD7z$aOnqsY#P0_ zAHQMOiQYdAUq(f5{>JY~LQfC5!(*f1!EAH_EL4Lv6n$?u6ke}}WU4`+6pdcygv-O{ zeN+JQ54DD=s7KV8!dI0ewWy@}X?5lER8e+pR~5rIPr-hKBgj7h5*R*$&o7W8TX0Vs zv|vRXlz_gZM#{ql1vE8N4+5Y|st4tu#j6JuDT4gz@Iam5Uv~t&Pz(v6gTe>HRWgV) zrsn}apid&ZLL;1>7>}TUriXmVS}|#hf$ysZ!3Uz0W9+Ojs3W`%Ed;&L6nzE@ilA>N zVUE#`5C?n{0bW22FN}+%r|G1G4blw(0?L>>R7N4Is8U0ryv72P0sVEjk81QO1r5knPxGXd-UtDB7UW6I=t|CJ&8v#|gyH7o8r+*;iG^-y>cIeN`x{C~f-6SEvuca#I(ovC>hzO4byjEaV| z|3Bb|%>qAcUrP>FU9w>K;*u7RGZH3H|9!%WU0_Up3Fblsuz29b{Qm=f$SN1_9*7A3 zAkhE9UGUCE2hYb2LoL5<0KtKzTy@?L)6NtBD)FdT$GGdCmin}Qn)CGB zy>;TpzyFdVOVXuR?^Im8vhtUx96$Q{Zm&{&#*cSvwQSccUz@+rHJdi&L&=>Vi&y%! zT!!yHZ+=^A@y&(5dWPk%f9mJ#Z3o}`U>G7s&NU0IEuQfU` z-qGj7(5AEVY+G6Hn_ov1zp&YT_MdaT%f3snCI4gnFY|^M7(1cb$zQ?_w(L=^N9(ZN zEngMh+~=Qd0aI%29~;tQSfPT+%TyTLs78xV6Y|XZoGHVgu*^A*@A!U3nWA6%Ror&u zM)&$#veY~?Fk7Wm4Ktov+qQL^vrTTTNrqDJHA}Bybst@VgI3GbxWSl z`rCV3wG9(L1>F4e?4)l3)7BW9GtbQrU4D7jqTj|vLsswfZ1{G-=IP59W-Pxihx63v zEd%2&B>y|~xqaHvFOv(ryScC3cMS_I?EPVAt^VB$FJ9ZIdy$HzH!kp*mnCPP6KN-| z_j_|>)U1yQ{Zp10`EX*kbr*+}&F;45hEz>jw69O`CA7N$8T@*PAEH)HeKS_EUHIZ@oX|Ww)zsv-aAuZBN^^ep5$$>G#W) z9%a9MvhCNBIeHBKbhG%N(`(9y9(|g*)T^fZ9YucJm!?_zZ>w)F6MG|e*@C}LZtwqX zhnmeU^vK!oc>VR0(>_dg{5SvV=Q>UAopH|NY&(A_oBPP&T^%}QXi~D-g5|v)WGOi+ z)9GTTKlCr@Q*!Bm=Y)K1PcQU~S?xJ9v4*wElfT>6OnUXs^A=WX;HWEIwhrmPb=UB< zCsRg@KKNyNy50TjU98%;Vsz?s>He-!u$eV;reU26%5iz{vcXehS8ur8xLY5~-0E%8T%;qkp%ep4&i#_uz>8hdN*u6w@m-jRWB@mb7jS|{Nc0W=YD!s;h(?96?t-Q zY?+WsP4eDNoA$ujcXgBgob^KAg5`>@JKiUtL$&qRABtaml(%{Kq_=73=Sz2^=jXsf zfwR7uI1iScXD;2FUV7}rwhuy9^{zZx& z%-TQnQkOr^c1g2tX4CI)3~9I|WoYyBoerNb^Xg!Ub2r+)%euSU@EhfaC*+ys*!}Rk z9y@O|`B-q>qoDD>G+FaYOz)i4etnx{QDpiq(*|}~w*1S}zLVMxD7f#h*#)!I&+syS z#rCIJ>a9rjYpYoY!qye5|M!~7?JCA)yB~J2Pw|aKF0F48H{e{+W-mKG%3P??>3$an zg@4<5Ugs|vJ5Fx6u>7{VWA?_y7vJ4Fq(!i6(Q$W!wChXNE>dFna=()IdIpaRi7M!~ zdt11#w0xfKm#d4qovmD**cGE&3f4B=!uS=sXM=`{IFx&o6GmiX|lOa z<16JB%$VA=Os5KG(tP>JC*6*SLG|ax?LL?PK+_8Ms}v5l7GGIm(AltJOUGnv@W>sK z?dTTE#aSUg)}P-ZB+F6t#6Y|ywzy(nnUAyb=$bL&&Yv?_CNaOb6~&Rlas9sJebvfIgRiBDK)At zI`U*n)Rwd+N_6s#xxjw=0v(`XO|5cn?A$zYyG-Ml<;e_@mBXG8}{tXI6Sh< z`&otd{Zk*>C!cDf{>P znD<#?#(wATN_IN1ZvOm3E!84AycpWA&V>6V>i@AWU0vtWd&#O-xmvGj$?Q$s>6`ce zw$kG6%`@L>eI>UiYR#JVOG92hD|mNr!*c6JKVEVqUCkFsJHJZZdhdh*3r1zO4jupe z+JRSHnq(axH00_((*l}pxj%o+_`B^hG;Up}NWKO|+dq2Kx8##$?N%*n+w?of{pC4V zjz2djOZSPBR}Hw4^z7Lj$LCCH7E@|%-yR+Re0$>eEVow<+vC|^Z}q844=qiS9=5&p z>DRv|Gil5qff<}Iy>H4{QyB18WnzH(`z{=k@ z%GoVVg~<99j{Z>m@tc+9-=t19tJRLfhte15cyPtYm2YcZIsQ-NA0JY!zEH2#jVZpP z`hN)NcBtO{>__sCDUo#^Ow++D3s_GXss1LlAA4ocXY^%v-U-b4Ly1lcdX48Qy51wd{apZ;ML8Ie}-~P~h^u@XFT5c$? zIorEQonvZ0YP!5<&m0T;j2&6KMd#yphWm}Nk0{g1d9d_^fIj^rch&x9%jT#gA-5mw z3Ft90cJc9Xn`@LzHge;M@4F^VF*p0sfq61rt3IMl^S|Wj!qfoo^0Rj zr}__4KE1ZMdQH#vkHbPscU>~H$K9~ronGt<(WWnD60RWm$8e)x1`%eHd|zq@^~(!ozN8#bPN?QpN%_D>^1v%j}(ou2Pq_%_+rg_>Z+EzRrT3W6_jV7Rc6;#a5#N0J@%)ed z8%LCAI<~>&<1NbtY|XxSV4sI+N{w&(yZ`rjUp-k~Z%Ug&b$hk;R1DmD=-ipVns(jy z+*SF}{FZ;*D&KL$P`|#d?!@%SH*jm0EcrLo%+zpzb7S@81-e(5`O~wgr`C@dK4sfn zU}e~bsndVnJ-EgB$f}!e^@tDnd1s!LVI6C|Jlyd~p0Ss|@f>>aq(k?kg_dtzaHM0A zk2|v8PdIzz_ob`Tr|B|uR_-C`ze)14%MarVbWC%)OQqwvv$ol|{_^I`?Rt*scyajP z8q50BOZv;m&#ipA)_?!gh(b4?U+eJk`_K{tJ3cyAxOtxN%Tx1h-7`Bw*Tt9GUv$m5 zSG~;0b)~mfdEPE}&sCwNettQ8YVOOa;!<_|D|6B_>3*o1;m7l*bB&n(G+BzHqiVEj zNjdrRm^>-E*83;_>ox8Ftolcm)^is0$=GyK-DeF_RnDEe>Wo&0eX8_5 z^KxAExQAQo99fv*OuO0R`ew6z)3WBpQNef8{8qSbg{1|4IoamUyflOU89i)7w-q^m zD}SzG=WDt1ROz$#^v>oblgB2^JiL2bv5p(|S0B4%XZk;uJba(}r`bVaC9X6Im^C&1 zwa{+++73DS&ZoeGIin`t4*WhO)vo@2K9(l~to4ig{=E0@tMjA3UAC-kskj|QdhH0? zb7B6Qu-9+W{JztDIzIKS!(k-?$F6AG(BpeH^Z6QU*4C)IGhY`^j*_nyEY7em=k6j` z>fGqwWYgUX*|uFe(evPUJxKnsqqUuM4*ORo(Jajg#pzyuQ9App9+!>(?nieV&tj zM8v11V<(T=aH;W?>a&M8UO2B&;DRBY>fU&Cq)WL56JDLU7*)JOqdAXCCCi^`W6Dwm z_ZFGIy413_-{o4jK1qwHy)PF%Ejy;+htD%Z4&ILqpWQsV*3@?g@9z0Kwnfsm^;=)v z9eN<=>MrNkcinGG;(2zs=+TuIFa6Nsc9oZt2QA&vx>D1e$;Qk`vOjPB(bJo3*?r+- zc*S|+2GqWFx$~AfC4c?cbVC78joaThX}G;`w%;y27<=b-uk=Z~XKwJcTpFJjDNC%H z_FMezX7yi}xH~t)uz9b_{?Kt#jo@Dowrqc_RpSoxckkSA{6p3!HK&|(tZ$oZaNpk1 zF?TL^^?#Z&`s|Oli|zcmbkw{`CkJ-7jlMGbLGXwTIh!6iTd1LPMa1!yU-I@{@}%>F zjYke=OR=->*b{$_D*MMS-`?E=eRdrkGtLt*u$+LI8FYAZ_0(<_^bH0-Hz0< z94#}eYn7c%rv6dkblUKB2OpOf2o@5x6w`0&e;CP+_Phgx$0Ex@NI7YyUr{ZeJ_m9uq$YMmK3E&=IgK~XO=Rz zZZ`N>bZ72oar?*pR_1g-u3siy`gz;d;L82e?-*Igk}cz=@24F(y>nF&=l9>wx|4Ti zkK%WC{n*QAUG4Z=RTpkfsQ6^(m0QKr1)Q7syv*^%)i36--I#kc`RSh7cKXEJUbkm- z#{~z@kF8vN>bbM!7p+N^V%^N;A*WwA+wo=h^^x^9eA;@X)T`7vo%5-XG_s4(-y^xl~>)$4XR|E8Mt8@Pi1c(^ll*KXZ1r#R3r<=lg=jd}JR zIyye?el}aKJ`egw{MBSe-qfoPwLhG{Vp!&OCmTGP^0RNz%b`H^*=LJNnGLUdQg(`@Y>=^JLFI^MwxyYdLCy?bCv! zi#G%v8ei+_p1V&9y!$RimPTU=y-t24*|u+Y7hYOq)s!2xl7$?qcr>Cww}o#PG>Lt7 z<<*XhoA+%#Q}Iy8N1diOx_96~voD)hKVJNUU(D`}*^(vPF712Aw@LoDM^mR<`>A2w z(p8-`GIR>=ceYnpp0#&(*1B`^X|gKUpI1Kod`j(_wJy9^HM)AWA#FnocG=Xp=HU1z z4_{45JQ&ole7(ruvp01PYMkWV%Io!~##Ovn^zrr-YZ@d?dt^t|<2lz}=&=8{ zbLF}eqyk6f{c(D9SvTqJLPW}9I&E4*H^9DB^Gof11 zxpSu9ZGJ5#t20aU>}OUr`IIKh63@tob^of@!nVh6`@<)Vvp>1|CilHE*1Xf&uPYrp zaP|+S_h!AGy!4$$`PL6v+T`7&)!C;MP0=aCA0599nw&Gs@P&sPuABWP{}P{B?bfwF zxheA5ftrEs&*mY)9@g0q6!hr0Lp^~0&k z+XBW1pZ+p?XoryFiz=+ml&o6Udi^@bHcGSV+oY*aEM2ka<)O#3ej4BE)4&1e=8bK2 z=jS^|@3z=pXW1?Pqj!3DXj1)9pXaf@nzow#L;I8+la3hpSDmDl z=eIoN_hIP(pHnU8wjJyr?tB_jCIeN&i#Bh3l&XjY^ZP^XD7G3xvf~zntV* z7T6TM>S>TLcHn~o*I$jb0ebOZ^xA8Dh;a zs7!8q@6KcM?QFehg?($vv(^3DUprVPOZze#+V#91)?}k~ZRw`X{l69-GtUd*U-^bB+}4FE6cfYin;@clnPu zp#wf#t9Wlg@|X|P=Dli@<?fEKp zYq?=h{%M;^bq*=(v-Mb#$nyKE* zyj^lmE1oms#QarLzBu%#%!c%z+P1ASyo{@3iWXV&ujvO~IL zZFQ>L^Co9>G!?`^B@cr_>On?8@`QtkGA>HNG~#UHJTW~Y@P@uINru%8#ajz7F<*`CQsiu!EM`^oo@d_{+J{4l3zx5DLaR9$;)+q$Z+ zCzMZrs`ApklYW}{yZ_mrXRnIO+<*M_cT;@N^iTh0T$vR8s$WgAcgz^41E8gpp>qxL`$Ap6E zqS~%Mb>Lk0^BK1-Up2e)rgGidhyU{9`^k4hx0iM#`@4W&_>Y}!dv?}e_b}y*X&+y% zuGA!<+}0`^&JJm4E!3_{(vKNt25is0y<_kE!5en;A8u~7-eo9pC~KNs(|6VHH)cg_gO8K?JRaA&+BXjqGCq7Czh%acMK2AUJu9Sj zGb~PjCyHyEqc&<#)+Y+ z{4R8Td+hz)G$-O#&;3v%VaDt+pT?b@+i=Ci`yDfPj=QmAUHV=nr*^vDG%muGzkI^G zN?{4*=d9k-*Pf$Oe|vD+f3h{4xbRKR1&79Tn|E>bkb}t<&%9T&_Loa3CN^r*ty#J} zccWgOFE{u`P}zX6dne!J9yI#OEdrImCBh`r;)Y_iC^)=K7LRpM!fg zFFW;qf#0S#ZPT`Y-fGj`_oo*~d-TU?lfRpMsL+)SZ`h@|nSB&c}7`wL59I z1%Dj7Rq;lHAKY%z z7hkQ`wZXuFlUHW>ef`*ioBQ1@Jz!JTz-<1tPP7Rs`0nn6!c}@ZFXt^=b^Y_28IBcB zF*I%24>_{@u~-iB|#S$!#}_xLX7im$G}vETdl>5JOe3``y!l(uDu z4owI5ua#`&mREkmovG7oTJkQ1+c)i*Nk4_P__T0S%U{!%&2;wMwaHZr-Pt}ZSwNscGFymQ`!9>f4UK*IJjmAC}ZH zuV;^DGoRfYmyqF6#?>Xum2Nc1R^!6nZ~gAp3VAoXPTs@SrVSpE-8OASx^%^R&Z@e; z{maMGmi7B|W#8tYOp%-7V~ZCb*(>j=?X^xE{AX=o-8HofEjZb+dg>gOYvIqFNelKE z^lb6ydwJWA>G9)Fsh=I0e1CVdk8Nx-=e#RB?VHH!r6HiE z?9+2U-&ElFn%MW&l2@WPUs~_VRbxx~0^|S5aPx=EYx=%Ad%AzEluKLZj(;)uWQ%U4 zHdkyFkgGt8EGriE8^2+KbIi;tEk5mQ+Bi?|v-5qjMwH&~S3OBw(P~|XE=hGRI(3sW zu16Z^ddZ%Nr`Q01!`ta7; zGqyxO?sxC+#aTWKJy5s!+rfWtoS*CAH&gF@Q>ydXeHUhCJ99r{e3f;Jmv{O2WK@NG zH{-g$9k;ey%B;0h>@Bq6VY`8|{%kU%TgizLTZ;C5nrnj3)w!1z`qm1mn5ldC(U8#9 z)n4cI+cQ10d&jSzFD(7Axp<|n8|N&Va^vsylYe^^R;^g)J6{@P-JZ!6G-qkE#@%2H zI2t@*^UmhuO5MD&HoA4uyGhzE{<78PIx_EEs?JC2wu~&?H{;U6?FNo{wq^5=b6XVg z>z*>pgY=twv}m>`=ZfYBVK;AdlT!y)oNfDOuTr&N@0`?SQIu!V-A#qgZK!@_|LV?_ z0`lbQbN^!5%uy*TWG+#DbNl|e0=pmF+h=Ngqe}bUzPb2My4aFai~T?CeFan;+1f4L zxI^&Z?$C|9OK=F1gy1g08x7h7Cy?O5-GaLYC%6RH1P{S2KtjT6GMSmo{pbGo$y)cm z_ZGcY({)ZAsoGU%pZ$GR=YtW%#K2u#ZXc`Ts??OZDt;DynQZTU)xR)8wqRk2O%5=c z!TLtoAXJr-k4E^Z0#H0CBggD~F8;Z^r8}h`oac$Rib|svcQ6_7%*I%QZh#~JrK#*I z8ds}6MN$Fas`w)>p0@Tk!QMQdc(z`UM9yg$y>wLR(VLolN8|ERPRK0r`Iq8nvgEGH zw6ji6y|KG7BnogV1mVF|I4$dKWWhVnv)q;Wry=q+3S`K@l8P!4#IZ${=kk~Zy2Yez z$4pbIw!#(kn>%;#N*^Z#628uC9vaxGRGb#Mj8A~}F7Gmnvlz|y;LSam%3ty$(XHd$ zzzY7uoV8ALZ2aJ5vDuWXnXC%oQC(os?vdqAn<}GjsL{He9lQX3810 zn-U(~3?JHnXg`Uwn_z)ttM5X6O+uWFiIPQ89)B$2dEUM#l_F)~TgPSgosHSXb_Pg% zWxeWfKzhKNawH2C;W=V4+d{<{>(2~SsF%ynHdr4P7{>WyV~A~djcI**8MBe8TXVG}Jz z@F-m({9#X~D+6k{887g&^}_pr^xsGvyfdpZ?sIR3E-(*0543sFo!+pmj!%n1#v}T6 zIB3tdaZ^-w7U>|#vcrWiMxvz|rH*NHel!s!wM-^UTSydZzxm4p3g=Kc6$fIeXB`^i zg-qy)v7AlLDp*HCBp)lL_xUHR<=Il(li5&dZOroQW!Dht<&)>jH=gB%7=lKT)Y&&9)vjKu@6Fa|Hu*U9S1 zgt3Je@G(inT2Rt61b83)uL$p3zj>kQWxvfP1rLULYbEdby#OZ8ZIF>|&Z&;|e=T%X zrwP#ybfACZ;3)Dy+VIn=0+X4n&Jb;px>d{zpSm{+GWSm4sP7qDCDUyprRTtHBz(r= zV)M1}!@@pJ!+dAPSn_J&K`TiyW|sT%sSL8DeqRV$moctb;AL6A#kYRba3j77Vz4@j z@Rkf0@Pt3w72|?l-z8;Sx3e%(uyPOD^hI7)HY*}@emlh<@?P#-sq(a=c)v@q;>0S7 zcP|<78z~L0A6sjG`^)iRHzCbQ)jHXjqo(AKmCTi^p$m7FS!CMcQ^;rQqKxb!tbE>Q zzXUzzdez$^_qC4yTSka9&e6hS%c`sS5!!dD4X8eJnmHKRjegQE0Da-P9l05NReX$|H_Dmb%n(lv(D>WEST zdqn(20)^31cb$RutWuqa$oC)X2%WbGyswY-s|gC)u~_SgTzvD$!)Ey- zlAGxBnPuH>>!9`cwQ#BSCWkq3@$AB3RQUbpz=0r9RFkA!R#hLMUe9^C#vALf4!J04 zD+wwB{kNe>por;4jBmhuZMNLj9G~$SyY|xq=`#|SnF1zduxv|;h0Q4y4@gCy@w%8# zNOL;0@Q(zutPep3fTL|BgDMZA-W0dG7M~|-t55hx;vQnuh*C&uRQ0?(sMP?mclB&U&nA}_ z2%cVlZM`SPS{KQZqm5d)Jue;J9^ReqN)ttA;8`9G8gY4<#r^f|`8oIJhs3s{FGfa> zRnG2qgEMx|17A(!A%C@&;G1TvCLM`N5GzpLau3!8tL7 z`KXjOO6;AMn6|>=$PLwOMVK*F76+9P5#zZVTa9k*cc$`X6+#y#+2ZR73W*$Z%_ZNx zKC*E;qK+qRn_H_G(HPfCMi8uFA?FRP9%g@cxQh3BG54Y4^I$)`;%`i>LZ~$cd5P|d z$kE73bFV`17b_HA6Dc}~c@U`bezP&j@RsQc&u&X@ERm_Hd%WSdL``jZ(&`aB0(mSw z_Lv@77Zu4b7FB|^Bct8i2Gvk5;u*6A@<=Pf*H65{uKV{clw`wS;C@sn9v+RzM}%tN zB2{NLi;zb><*5Op<}s>ID1D)+`EX$M>PmX0 z8SbVvxM&&7LKm;0qL~Vk!%Q#>9p3`t+K>t^?kjunY0|L zq_QC4%NBP#`t&2?UX)J~wSA(Rh2`hPhuxm)k57kQ4yqmkw^W!2jrKJl`LCp|w7*!Y zw#l**fNE6Obo-hIi$fSh-^h;SDl$rcg*(;hFkhKIwdCwAZqb@fG%`Zhw{1V!A9OD- zMLw5dnTZJ^wrYxQGjlzg7Qe5=(JpI&Kb|%p# zY;bx-NQQ-?_OziDC2t|{!@@HnfO_1cBhzbP2(u=lQ z+|S#x&t)Bq^44Sef~=mtce*GDM`Uw?x=&di2g$>=8(*JuUd-F7VJn?8lw&DeatK%j z9;<(sS&(*(z|y9%&#Pr~l4%wQ>OZfm7@lwOXwlUEtlLr^dKUW9`H|A%`_Z>N6>i2? zlPmXI*e}zA9Gq2E#lOaKX8=~P5%@0=X6m|l#qO{)Zr!)bhPL)3j9XcsWWDCH$DT-; zTv--UwZ*n6w+=A;y%Qqdti#g^bB88N{mD%YWNC1IQ0@zm6dDq?GH-2{W_4qHWsfXUu;; zybS6SYhJ4oGX|`35~H-ine)&Ekz3NGVppMpdcIN-rc$4J6nd}QAK>;gE4`+Az(yqI z9)^>k+T-C}Ji+9yX~EBv$3GX-YvMaW|LXl5GjQ# zMj%pL%N_-J7kaheuuCMAt9g}ZU*tNAEgF$Tt=gX3R4W+h3#9jyb%IpWURGl#g!yY? zCT4$ZA4|Ll&^q6O?1+cAppY|1PgNH$>2*`%Q#R-C=$%yPUW+b7V0~u6uvRk9Iq~l% z=cqtYD|<x9!S=}bbB*PCy49r=><~mHft_o;C?VcG=O!}qLx@zkUB+3D znqRtCSmBFwXSv$Qp+&P9414Mz;-s^D+o8kM?6m^lx6Qa)ZZ%#nj)=THA`qtPAM~lb zt$wz8=4>A)+F|U3%OqBvn1@AF`XT*=`oib$P9_XM9*0S6n;IfLX*>M4>oN(ksIBuW zT-+~oYm%=I^~N%H$xCI@VlK+Lqr^mKRJS+QeP(^T>b;;V77D>mbCYsg_a`#1I?+r> zY2GAL+va5(%yY3Ow=wT&BF!(?n*$ig23B8}m`oU^PQIxRger)Yzu0(mc~#^;xMO~% zlsZucFKltR?ngFg#Jp%?nLnXhUosE{SWBecq=c$gwOHTRIDS8v)my-bk*lmD5EDJe zL?S{`ivM)&9kIY2B=p2YpnuUtD5KD=Hr6;LCr68sD^GgR*of1xwNKAWchh9Pl; zBFc%Z@8Z_ns7(FTp_cbpdTi0q@ow5Xfyr58GNH6-tg)37H(BpEdp~JQdfY72;Jx`I zq7eh*q$pEnE*8p)krp?@;)j^gJaRdT$Ev4?wtA6-Ew3bJAXyGPSZ(S`zBIW9USh_o zv@V9Bmh#5^+}1}g2x2!Jle!?POMLwRjz`y{fV-FXX%vW3*Y-|0L_B5HzCYN)EHTLh zz`?tOOJxI^rL!-dxl3x9g;hW7a?f%KheFKZQwX*#w2S0E+~-@oN2_T`^=-yF;PgX~ zeE6mM>J*uEdz+2CctmA0Gk969RwnhbbB9vsw2V^kmT9NjY~l)L#sVTKZJ?ZI1Yl`L&H z_GT9FS1f@(7I}f1t@}$sns__f4e#^6>VqGW${NpPXLIgTj-_^x#>6>ebI&&8l6c0_ z0BF4K)!CR!yZb8WnOg#9`n7GlDMWbD1ibQIRq^IqIu!^074t3Z*LRQ$S5fhd zyFNl>$KnUL@;&j5ieCHrFo|V-_>{cvAi#fj)n|ZI)t^C00^)ktSO_OPgzY}B31K6y z0Zq3ylHLbPCL)SDr8gLxq@CfSwv-lvQ@)C=q7R-yb0jX{A1DJO(x|s3f{37`g=DV! z!Zi~6{z!Sp0Zq;(wg+$RQnWAepgVDBm4Huv$aR6OTNpl#kqh<}qY7&>ST85P6wqfJ z5C;TQ0vRWS5v>pu>cdIe<8b-EKV|-m{Iy(5h)Z?ZETMAO0;(fFj{`ls&1xm! zMw44Vi{&1_sN#%QDTWp-?qV2Ny!yB4S%ZQniWgeBD zU~ju{o9qvEAD9Kzq+heKu1oK3`0aGUQzLxY{m2r6fBl&fIzi3hK@*L}DN4@q#`L{a z(x=m}f)Zc_om4%_q7j00TDzvI802q9*dY<;ju~GTC`k-ix!UYxRlj8OhHj1WzE21v zFfz>ahWhq;VX%26LgN48XQV?ViC@i(*hOPiSG21%2`( zs1YxTDKJu{a%J4UR;bDE;n;rjBGV%!OglL9O`OXn#U=~e1RD}7ZP{W zy#Q1OjEQf8%Oc^Yy+u-H{5+>#ZQ)%ae$P}@l;m|X)MJna+d z4lpbu&es?%2#*qwAss9*K!8ljZw*s~jOD_S67|REFyHCzzC=p+SR=EQbWSjF$!lqW zO^>2O`jrUdwFO{SXUMf!D0^u_`Mn(zm)#ph5)AeoGK;eM(9&%!l5a2521q9EG!x>) z?O9t+9V9`a4pMi^1{8yrV$>NZwQ<0OGqVp#Ub}EyiCK;-;nn$AL6&DNKq=7oTtPLw z>5uIC#W)Iz%{kuOU;GTRfEv5-BzUks)OpNnx+rlmjW7v^!s)TXPzcH{in= z4H5MRPt;)dGRFGol>&+yX`3%IE}9n759|m@N4;h+3>Jj!26eY;EUPM)OIf6qHD;CW zTdF%)E^qNF7qt7n*bn5F)DA+`4y|?WRHG1$awK`3<~n;Z_b~CEs`|o05NO*YQ9Th| z5B1uBp;EW2X(G?|`hoo3WyOd~CBI2dh`2`6P$Zb3Yz$McPIPx=?g|00E3SV%gkktn zvl68_;bnGEeSbc3-Bz&X1>oG)x#Xf+FRf$S0ir)H;%-eujgNL_;w*W#m@7b)fMh)q zSgVUtY#(devk=LksHu61 zFTO)Pl?)%P#;h07WgjdYOggk{#ZQ_%`p6!FxlZ&NX$@)-4%&~nT#UBUru%qjTBE#C z6ElpLi(=faDtsZK1fm)e1>wuBAlOWJ2QQ)h6kPqZUmLMNn&s;(z#iXcQ=5t0WtuVi ziFy^+24nD3ZiR!h#_jW3U&nYQZP(JRjtK4&78*{)%(#x}lU`uPRklw*ySdro>!Q0Z zmlw08)MT9FEShW<-~G!ozSEPfvxHI;^gd+LbVW4nTFC{C|{p8p@HoiqPPJF{Hwp3oQQwfFW4^fOV#dx$)+_qCTH zke&AUvW&*U`0>z}noA%71qgq8-eAfntgwkI)|_!pr#|_YDX;hOXpg%6a=%fvyd)YK)3wfQJsRpn(rk` zxR($ES3Mczvl^2WNolSkoV`@SMRMlmw%sgCLDmTi!I(4BXR8&*l?vD@z?=1eKUVpD zSw_FvM)@tP0=9XRyjehq4=V=yflGybZ}N~}-`b7>xT*NFIRHAWq6rusgo*^Ojevv))Yb&!fpBjUXn1In zH{n&N6zokozziU|n@n^(^4~J0&7C3E7S^W5H_P&=BwZm^4$jsPPcR_}|E3%~9_DZ5 z?mIX@sH7!96hxR{E)W+8%*6%f0`uw;VRG^Rk_35z*+Hy7YGTmI+=PCCEwFZ`Qjnqg zXC3{1Kp6kxfPOz4j9<^@*YgEI0W`lN0)rAB3c!Yg<)Z9m_doix>8HL|ATI-W;T`xQk%c;c4`b=4jm+?4*Z zn6eJdSh|n>gtlf;X3(*??7jx`tv`9)jT`RxjG;I~X8fq3_h}qa%Dr>+hlEYEJsAXo zz61JQT!z>m?)yo6dQ*a87XP^aZt8~r0Huu#*%Ptu3I4`UuHD5U3S%!E{)XZ4PyvIS|3TGBD#(xXQx%+x8&-P?9XwW_I@HxNn008(a zMu`7D0s!%Wc({1Ez&s#sJ{SRT^TG%~@MrShLPGy)q_4j$LZy7UKq0TbgScnl`HX#s zFZ+SA_HG18*z%>)lQZPkG$7zdVzCQ#)CoVy!lcXb0gF^RTimgn=5`x*eWCV*5k0pt!MgJQ+|di$Mo<`+R-Zdm`7&3v;;~7kI1Xz&OfK7kE%Cy zJXE)gMya7XA1q;`a&W8@2Y@2$rAM*TL{C^1nC|v{AE|A`nlm7q9u%z=Dm}%1nr;EJ@WjwW9=ce%DSFmF+qb<5z!H_vj9>u3LMq(cE-Fz$2vhoOe_UkCS}Wbn5` z`(KWIDBxzIA*Vus0#IN*!lo(!+|QZG)C>^-3OEL_Afd1#0T2<9fdE2^8)k5UQG>g? z`!Ce+gCtCiIY9wke-s9S0_yGtRNse15xNP_La#6o>Vsx zS72K%F0OXwE>y;*rsj^YNO3oHEMRraZ@%_aH+{h7IuqDz2#dI24+}VEMg=i-t`d-jL@W-5a_OKOIAa(y=J|7RDg);O(da%O97dvkDwKn z&zW+dTiqG2cYKd6yTUkdWaqKBRZB!vxadp6J3v1*hdpgOnN3HFd!AJi?Gj+rOZpg< zGp6c7=H8)^Ksv#hD1#+cIJeC?CUkUpCx`P=)8?^RyD7K(xezj&&dxkGHP(ES3yE6o z9G_oDi1u8;t41!Vud`E5>AqR_>q%Q&;!eZWMKmV0Ukr_FVg2mLH$%%LR50Fs9&ZI z)-LQ1jbR~joT;Q8TwxKrAXH3$w!{3pbVL73>r^0$o7T}Vi2g|%f02K%LHOUxp(3RK zhj}F#U#e-V;tO09S^nW@$Xj23$vF%6GG=0pGs>Mu=6s>uwr%* z-d8_mo(hnAl!k-TxlmQ&~@RS&OHHlOb#77tsk_!ARPqcFtHgL-0~rG)T+#1fGO()w1bP0z($IL-wnoSYfC zQh6lh4@H$hixGI%eYSeZyyq1ns13 z+`%u~GIYcS4I4bhUW!h%tOr(MCsw_tXIc+fNg4GMZ7CQm>kf`cJRl9otw8)ql0ZC| zvc?8s+%VV=#RCMv{YCEltw56aouU8$1RmJ*2l_n?0tD0jR6Y)-E{?zO+f5S0%?^u) zXl(|YL7{*jw0lFE_^<>H?2GXi()|99M2PYKCK2-e4h@X@S$;zESLXkdP0fveH$XT+ zgf~}&{zf5UfY5$#10Mbt>H5VIXoPY^DbOtpvZqs}t4YCqe*V|l4%r`eDgO|b0BQtW z4D1`*1;h>J1O52ug7AJRfO*s1KM|i64GS2dPeB4{p`K4)bYS77Rn(jZ?CCz=zZ^kD zdOJr{b-f?;ES_k|`5o#cUKOX$qTEp+AJV&b59ls9nR1Jo5s{o4Q}FYYoS7(i3hQUk z;}=T4hCqi4_^2+~^ zNsMbGZte1;H**wD+(}h{-z0*S`PO66xrl4^IB6tiyzlt#%;?v9u7_Y`Vl_gprf5w8 z0d$s8NLunoE3^q=oEMEfp%39(Sin$xJrETCA&)?s<(vrxOv2dt zcbga$9*q7+0vP}z`y+vh2*!p{I~Rx-#19ta;^*Pj1#$kl7XeIzK=tAH30UB+;D^JM z>?Y_nd0J9I;BMv{UP9YF*E6bO$7djspA}5aP=R1h5c@Ai{QpF4D1iCT`T#H#a2G}d zgd*Y|c2B6>V4+iCx8Y-H<9ec2XE}ZJlH&!~t zQyJD6JFEhS8N`eR*3ECN|K)ICxn|a;5HU1V5nFRlF;g=uV;3uP7ZJ{z6fD!&+1dDJ z169Nf76bQYgGXJ_MMcZfSj}9<-HuO*&(cl6%w89ELagqhH|02g78K$9Sr?Y`qX*bR z_M5X4L;DAT00Rn+sWxz(uHEHh2@gtyy7FA!@?~Z?Do%Y~5A{-wAyhMcc7btL%zn+1 z8`zLAN}iPVu_(nQb%#35ES4p0_BmO9(iE?K7vGr!N`jLJX2HN-6eQdL-+x?3JQ&?k zW6<98ZSc#qMR+nv+14(iDR*_D@zs}w96JWc{-5)7=CWHVj*6A`){_*e+YRsaRaQf! zbOAo8dsf~wA2BZUcW3SfhQ(KYcN=1lObog3p}=Pj^RvB{-0_XWimn?QHDbnETOyry zF3!M6(Q~aE{!H&wzGl#8n&8FPOgHqTN;95w2bVe7asL9O)Me_;HJ1dm=%W-P5$TW$ zUEgtgNe3K$kOjpV1wnBJk%h0$5fK5C9ttFy{~= zD;7NPE=**3{ZbQvkZFrx4YGudY$QY$cmzZw5cmf3UxpR})#T2lLXMq91*+Eeu2ejtin0m2Joi|^CKFpB-_*n(O303Zq=XxHw8*E%2*ia7?8 zf@GQV272}UPn@YoBhtiE215tZ81S-@cc;lkKPDh0zL3>4pzw_~Z&iw4w0Ugo{fP)5TZ$Vu!ufV1$K`9W zZ@sIg%aHw*Rd+;XasCvo7Ar4{xF`7$zOHR{O2U5OE|18S2l$+?bwJT+_YMg$@6vbh zjHH6OPiZPf3U}rKgI~TF;?qwCaqz`o&aK9TQVL|GAa{y9T}pxuP!J$jzHnE=d!^1- za;!FOJ?*56eU{aU=Ox`MgzVqtB{NsdbEiwgib*Q%(CHMp@&LzPD(HL-EY01b96HFz z#%54qh6;Z3 zWPi(#Hx{G0sqz0OTI}yN6$*&^Mr%F_3QyU~?y>zBA@(=^Jo>H7eqDOBtH31u*VBE?EJmh959oIlDhFl_ zJ#$_%oY6KHZA=>TtpLx)!&_Tt6xXxxxn3nj!!=>21@@KSb*i0=Wd7c(JWBY9TrD@9 z%150Pj*0VmLb@PtvWj8di_)1v`Q;afn*b?qu9<~sTc60m5;HYaLwmJxifL6s@nw=T z&=B7XlNDAHyKv|vF+34(Hu4w5kK?A8JD6nDLfypgz9)x#pLi-g+jk*!B+k#}{oTyG zD7?#$uVyN>lBbt1nx4d=YSO77TeWUY+G!-Pk5&SV6B7SU z;Z>kEjrjMimQMrl%#TzhjeH3A%6i6O#xOF>`Z)N>f;I7j|9yUa9uV)1 zcb|vnH^2U$$^S4D|0`Q|!kswl{1uL+-aFW;)M#2uu`8)nR3EhP2I4Ch3C`B;S@j;q zzV8h@j+vNvBCRN@$*~TC{PXGNXEmm$FZvYh^r$>@W;7a#8d%(x^i)y)tSiX&;tH=<^Pmh9+vfl!5*5^rfeduXDD?a{_$7 zUvEY#tP7)E;^aGXH?wss>N=wdsza8T>^%*A3kvZ^b*goqI>jG{{8!K6#=Ji^np`x%2dB>E+`Nl(&OcOmRDVjqmfkY#;& zTp5xiQe4yfKzmAU?$Lkm+W%Wp{Li}fQU95B#rN)P*+~<*pLW(G#k}QY>hX4iA1UqI z?9rMcrtOO*$53j)lLhY7E@$a!Q7MhYrc&~#n0KxbUHYkKgOQ6iHbP4~T0VRDwkOkK zv3+)4T878cQ8;66dMR)isJ(9W_7{d8Zf|8#Qmi+JujgMY1%?R3}1 zC09TnSN6S?D5M;c_3A-krS~6(u&T2ONmb z{8i$?UR1boX+&&GpU1LfCe(%#k)bgHVvH{_WUiJX)oNL7&@{&f1a1+yMc@{JTLf+qxJBR=fm;M_ r5x7O*7J*v?ZV|Xe;1+>f1a1+yMc@{JTLf+qxJBR=fm;Orml60MeUv$0 literal 0 HcmV?d00001 diff --git a/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libopenblas.0.dylib b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libopenblas.0.dylib new file mode 100755 index 0000000000000000000000000000000000000000..bebeee53dce7d0abc14a6e36400aa38c4b064b61 GIT binary patch literal 20952288 zcmeFa3w%}OneV^$-U(Sr059Yonv0+&!CPBIv~{w>H3@1*r8lSNY{Eqo0xI6BASMyo z2?RS(=|$}S1iWSkJ0q>_G##CgYAuSk*o(IRnezv{CImgTq9!WM`TpLu*4`_dgaq3; z^FQZ=&u6jMT3PE|@B7@|-}AiB`@X;W;LSgex2%B0zi}L=a-3{i*4M0)b<4`-C<$6t zB(iYsmlm2^ga2zh!@L{2>0T#uLx;-Lzewb!Ww+ksoIBTp%WJ&c`PpY4`M+s!|xyh!b`71uAjfjf!i zt+>!B?>V#n^q-r&UY!WTQ`74>*SLR7 zEN@ZCWK+;tyPOBSYp<+9b>(IxvaB|8^$j&EmpM-)mbdv2PI-$SN;MDY+HJa54 z$xLbxiOi{3SP_~1xy!C}q{WkuM)&cAYnAC8lDJ6Zn(N|J8@{|Ew>XRP{PmK&ea`zE zzmdqw>y}39uC2Q%a`nyEE;*)tMQ(i@X}2t0`^$4$A1@F-B=B``d(U&r`^ja_JG%Cl z7ca>*05?NYvQ%(Ga!`JxrtwEmBBbbY*bif$`cUU#imLSlK=+fI4wy$ASJ zoxFb@6};<6Wbuu0?f2xw`zybHa>{G=3B!oxeJcKbV*Q#cN=y}2#2>L@q~zX^_4{=E z;l%R#-15E@f5ho7+S#})1WrG_8#@7y_;%s%(VC3V-|cJ4V#Z(C8f_{MYj@4D-kUAuTi&5h@r=iIcc z)A-_f=8cko^nGTaB=EOx8Tx7F`c@h4l#;-CpAHxr>05nYDGOZOYgvCg>B7L54uq_9 z?s@M$`I2SnICGL^9Xd2v4tZ?Qzda{k7`SiH9p@sSAAM)xCp+Ky*SzxA9=zi5fBWHq zE3Xwxi#Qw_=&R}G+{kK5O|XnRp6+vMsXqvFsH`A|(}Rt(!Yi(;S-R|o$n`Zh)rvCL zTz%aQH{DQk?K$UrlrayR_h)h%e}6pG2w3OdeB%vzPsf$F-FVZomFLXzzd2c)D-zdR?e7y_TroCRxCBFaEiC|oah#N&a24TOKWcUYTdPGoA=C@u356gEv4rA z>sKsuANbUHF!Q`~KY89cpE~E0mURYYsr~0&R2tZGTEHrGuf;D;txveukG}KSzLX_;`7; zwQ6sQwa_jqDrn^QS<5clm}E^aOSWF9UTfc5ZKv%m3|On??z(eZnbmkjbs%+b_1baw zZnV={_FLAf`McI{D|>nU73G1n`t*Rcu-mexKh5#%-PXRsqR{WEQ!6?meB)q{vb}pv z;r{B&!~FvxtN%bKwLM@p{yNC6 zw;o+HI|_Za><&d<989NhMZ!{t&ewCWUgZn++TgtE2q~ZfWIy zXxF-Jdt7?k&^vi~q1H}c6N!G&G zLc#WOYtE|Yp;N3(=u`;3MW@(3zGHvhlNXiimzQzv$y|@)kjz-f3~SJ3Jmc1}ly^(@ zE;7+Ac{tON2TvCCPT_wfeX1P#h`*xIk@B+GC8nPBjH=`VB z@WQWq6*1kj(7jZnd*$flY;-RsU$nEjsiOa7WcyWQUAj{f3_nr48{H|XDX_we3s_w( zV-fd`uBnI~`P|RiRG+`9Ebx8Fg4b47x-)XdvV1XKO5P+lp^L-)i$?ow#R)un;R!rj zd;-tr4u00p$4RuQVI2B|{Zsn}$?Ye@*tkjUlFNpl74hjJWU3s27@ATQWF7INU@m>& zVh+~!eWHj%I&GtOYOk@QC!c5RZTM7cVHNkMtqtG3QSyq7U9_Wmbr?H*7P5F(z+Mnt zo1Wi*UD@#9hP+GHPRNgNzUjfc@`Mder~li&K796ln<~yO-Ba39dW!Wz{kwPkX2qVf zS~hWB&H2 zgnm`u`C78IQ1#wB-ddQ3ZPqt!>a6cB&^K!9A792X=o_`u9`}D^Dc9hXmIcYymWF`c zdnwwg+`U&k@ko>DNUWNN`w>Ke39E0|R>rQptldhejji`_C+-vwYN zQ(wwgeO1@fsOJ)nSb5aTD{pZ^9q*z%*2c`(o)ms!$I0O*hNH1p$0X{to_eW{Q>gFa znWqg|M+{AdsUP~_)XynDv5v9wsFzpXP<51iEk?FN$W;L*K%leO;v^?n$;GH%Db zqX&wvE~w=?_4?=)Y3DtBg}!@c;LL&tu|F&v`@~%8I^VrkImO(!%-p4`Gd5`)&$$N{6t|@rNlxZQW ze^a1$Q`~P&Fl9Y-MJjbpM$db*bu6|7D_sQg|+O*m4t8TD@Wp((fscVDz*h}NLoQ~guzmi|Or)01El@$Dy z68zIb%QpV$%lM{Gam2pw`WwaI@7O)?sp^x8FIfrwzRn?Eb+F$N_fw0&cGB^Hby`6Y zHli3_%TKKl-_hMg$PQ~DXB?!=d6cz*1Ke)fU(L7V3-`D_tMO5$4favq|JGn%RdkpJ z@BQsAPH4C5r^RS2Kh2}@dcN<;(Z?zapC+*^5A*j-`PY@6e`8~tRKDc&cy+~BGxb!R zOI+I9_!5#`)zyN&fiB07vB5~zC)zHP-1bcQ*$RVqJ`O!7f4DY$ESi836U$TG{&&EO zvL(IX!|ljxfPP)La}n1O?(ak|B34p;4ZoGQTLz<@4%RW4;uQGm!)S*>!7|~rX(xcw zO2L8Cd^qi9^rRG9%Ie??D$#4O!m5%7oAavCY3Xy>gBuMVeNb4=#iJ#AN?L3%Xw~g^ z+)XT|rJC~=&Xd4$a})6BIPj7mE6(BC1}j#BNiEtdJj&Ad85NYTHcm@(+vwstwNdRG z3ETPGC2UvjmMQ!>Tz!Mob+!+GCQ&yJFV0G+r|>6h%NuN$PF)QjT4ET~O@DbJcv09$b_E&e5Jp_i^!JBE}K_WPk0Piq)0GOM6Y6 zG|*lckCQrm`rtTete;n$RQ=Ga*r`O!l8mh$$C1LJKGnk{uk)$?fSQoyWjoE7|2#0g@94nvpPm%%Z!NTLICAQD^YSNs zC(pAnVRTFOVo^i*ZnbIZ!}nY@a8hAkT$l39*vc(wR{wjUaqWKF@p>q${gyPwRT#&x zi;D`je=xAHd7O2*y*4v{55AY^lDzBd+iFvEFE_t(Z^>SLBW2gNZQa3~bo(yr6#M3a zQ)|ntj@mND36kwqv+>;`uwQjQ<6F1Y z_Vw3yMh}Ft+Yg3v%y^@Gl~+O;jLBrRzaGjo;{c~nj*;6gYfB~XK*JY$(yb1@i|#w` z>b;kF?-kyAmAbyhyRTE2*BA4BwGBF*e}MY`>Mmpl8sXz_xsI5` zJ*(zz-EjT9tt0B!lU7W>qUe_mEp24aM)qp6pf&Nf^Uq5$Kl-HZDwoXUM+vwT9pJJ96)hq=w%f z@ANgB3*EScy)4}SUW53V3qLbs{G3Yt&)|3i{-_@nFJ6SsD9Gi9?nujTXFOQ#ziQXl zwxu?t=GPIMC`9%@gMV9_hfL@Bywf7 zR>?Z=HzN;{^$;@t5i;FII}2&2JKpNF(~Ex$)%F{YZTl^yGD9)uX{gWLk3E zjU4kW2`sT^fXa(lky zd7LB9w?pUGkmdc?&D%QgWZ;G4^%pjMK`BQy$IZy_q0G^bxJW)cR-OP06li(}!$u&#_ah>fG~k&a2$>2F`7C zrTZkSV^6@B4q&U=u4- z^2_LJ9xV+7wv_y#w(kJ)evolX#|DjSm&^-4O5R_G2W`muo;z$~>!IhWY;3(`t+pk3 zYagyqjypjZN zNd~`+1IMI*@$Bf)P;g5dSS8iPaO!8i4PSrDae#ATrlxI~^f{GH;J5R4UtAmxTzfF3 z@!~R9cdK08El2kZhC}!M4&Cd7R#h%$6WtCWtBIJcUH8Cj1N_~?Y0^94v>x=X$J4vr zasK>w)9y;zFrT)}qfK*Z+Z?diY_Qm@m9*LD-uuF0^yBY@LhT*&tsVy34o`%^RKEuv z`qotgE(VBWu-s$vThyN|`es$wXxDeQwQ*en)>3)7yY4YMxo)GQ zlb&qY$VMI5Me(hJe7~Fi%f79y550BqM|QhowOb})=`k-#vIRtJ){ zRPk7Z4$-;~L zeupC!UdwKmxOUrbXD@`;t>|L2!Qpm&H$1gnJQC%+B+!iQjoVe(*kkPKzsLEzY#6!j znNqRRv#S}{Rf92z$-M)Q--XAm-w3Q4kMBMp8%yj|b-Ks4rtbuYpu?wRt-a&!myqZ8 zgGpPGq2-TGwYJ&U(8M-fj+v8QhEtj!pKm0DEb$VY}R2P)Ip=GU;* z!X%z=e;C>}BOl$!L))_@29MU_) zMmRsNjp>Wa;8W1+=$iS|a~}1bOTFh%|Jm4>S=bol5WA^3<(J{%A6&fKXk;;^{fH}% z<^K>^br$g$Vc&zqizSyk_!aJv{)AJB^Ygd8$e!dg8~OBj(htu(o_v9C6wMDm9_LGz zk1rWR%A)sf0Bh!-cW>?1Z zc5O8>J`Wi`;*)W|%?l{s729QZB^kRXtm?>kCNiGm+O2Ht)+Fp!Rt)3LL^egEo$m}R zw2|AFd~%zL%$A~~S&U`mg44Gzziresl1+@!XxR6Lr%^X`jD)g!4}LhX@D^y~*yUuO zT^_l8j$O`Z57SpHldi0ju7uE)BxEfanHz`fr67a!(`!=6U!<>oxFXt~AMQ_g`)kEq zgvajTP~0U0-T4Yf8+uek%%v9nX~1sVT`dN07lM<8mkWurlnO5sXXyzMGa=3*yxfY8 zw4p~faTa5%-tNwQc>w-!uJvysw`$sINaX$?E?gG`_v3(CQE->nRL(9Hvh_ zMBGdL%>iOAvR9?#C6}(v$WML7!2z^)TQ~1ka^GG%IlmU&(D(9o-EDN^9m>3;IeBZZ z>7S8t%2Ui^IDBAqr6Sqr%16Ccv^Rzm)FN%9};>UPdV7zyUAN9c(Vff?mMRj`hq3^hS@%N9JgY;W;5?_j(K6en7p9z-t zW4BBfyKROq{T^Qe$$r-Ri#-Z_IRg;=~Kp$3272KW%Kg!;ij55c?Lw&Lv^f ziL=iRY_W|kcd*a5;KT8-_DtyCNn6{XxoouBUFF(a$&#lJZ63{Q<23&VL-VV&S}5U#m7HG25j)L?7Q%>XefNV z$CXvlH`kTbJH9?xR#Ox&qV5C8YL~`svHPW6p8R3cjSnb%EFUn}^#O&C9hp5JSrc8R z9dWR+N1Gn>(8I+!1{dFYLby0ao8`m3Hw-RL%W4rWE_ZS98fatM6i9LPZ{#*fw{AzL zSMcsgF-flvD?{!VaNLBv%YI7khw8@?#~hb8IoP-wZ0yCFd)-)bJ-VSW$lg$ki8Zh0 zI^xEb6>An&_QybkWyE{0Ule|kUU~Q>>e}d5p4s4@H=tK_?zwcV%6o=$&yIIH@yUJG z_PD-%0)5Ma_tQg)PbRkuU#Q>eW(-hu$l3Mx22Z^FHK*UIO_@wwI8Sk5Y$3cbv1SKT z#PNj27c@=~@4NEg!%%vyF;6cJbD})D1>VRu{f{vf` z5MDXF%fv1S3o2IV;6OJ%#Pg}6dS2rIt=Og!dCeI32iF9^4MSrX|G3X@;cq(nrI=x@ z8#A27bvf5{VER&GAPcyTaJ}3&)-#;`-I}0(iaGlAPqQdo?ZBt{IHqBX^}s(D6R40hQF~o3bzJU6%p?) z{BB@X;3P28<4$a+e76(ZDc|G7cFLb}eC-D50e%};@2XDZH;r@ucuXXa<>b(ypAF>w zE=RT#$7a5l-gm(V_yl!)zb?IRMiKPWcT0J;GAF2(TM>qpBV0V3maSK%e@iZD^08? zGv9W{e#YZJmY93_#!u1rCV>4aL)r1%8}cq{3w*fsPj? z1iIEjYsM%aHMvTEa^hu^(Un?qm3qEu{n4MNQJg-?JPpbecM#^YjtZ>JLpI-aWDX7weL@z6BlMgic<24%4rwZ##ewR{_rLzT=`n`8_?M z$z{s#LB{u)7z=u=dm1C_LVs)TbjHY@;{As>G=A2zq?6Pw#>BzKukhX1sKXo7LAtE? z*CFa&T`|GLVV~rk2RW`0#-8ouNoc&yiK9%!7LTXToM6VsOiqMyL;|@^E(GnYHMu=G z?Z3y~)WUn^_S_FHy{~rO)`zZ{cS5;6%7f`P;{uMZzsEfEpH2#Q`1w~yzK?Xd%gqze z*mij!&l%U~YQ1Vdx^PH38E|4^?e2NRrz77+M?NH$^%{9uA7M}GkgrnoBp=!|a4ws% z179P8-c_M@{_t z&@Q7NzC5Od*lOi5`RS`%zQBah=|{84q02XYz4UlIeUC7la_wr}T)P}{i*6Z3Zc!T; z{yXS+w(^aTG3G92c-PYJ;c|_J(!*Tzuv~gbIeXk(qkieYoyl7dqkD=|CDK>9M=|=M zW74mH6Wet34w^rJo`~*OLw7^_YvQz59+9JGj^8PKc|XrD)He4EetRQAw96LG19kU=9`qi>h_)8j&H|&&kWs*^=sV!kbW$JEL6FD zXscgeknLyD*GM3<-slZ|s9$gXZCu|4J$(0>aeXtsK!4@+PrIpu`lsE{Q)SQj;J*a>3VwjryE168yIE0&zpctmTHZQ-MIGM`J-Sr)VQ6tEQFlvk@#@6N=Tgx1| zSU!U4A>MwCL%ijke*SJ^5%M>bhvxZ+g|7Z}(=MZb%F%Ggb8dpKqZ!XJ_PWWj*ZTH- z9P2se@onWdI57~#yd{Img~)dE8M51NpwD?@6mbH{h?66b)IM;M!+X(Pyl=u5+UTI4 z|BA6jTr3p<&y;a3IWRFo_gU#}T5|JM<;X=JdCGcT*Lq%g%N@vH1Po-8ix~j}UBsA1 z!^y!_-NY9f#s^pB3md^78(B&PkKtRaYB%ze5yN1z#~b;M>}~_kE_ctxSI-_7avqP( z^<7~2;LBw?)$n2Hyk*aaSB{$Ug%v+gow8lu^w&)>oNerc@|$W?a!f42pWkF`Y}W*X zpSQ!iJ2|GpKjoyn0>56Pe~`?Ie$KNKM|@Uz`vYY5g)#mf%U@p(;x=?(B)?t!_3{rD zFBG==DHu+?bog_;}-0!Wz=y_p$RDlkn`kr_Y`(4=7fQ-Ikt7H*?W5&z4JPZC7Xe zLyR$^b9>OaLTq-o{szVxKIJ7bXXm|Sjh-v!D8wUhg1d^_1PV=HNc z$e)8VbgqZ;j?^~Q&*;A|m+I7EsZHV`7uwbsRI8=L4sV>UqV1KR~vm|LKgmXMy>DbjKZc{{%WF<}AIa zah4dp{9}JZ(@V79&p1o?eT}o+5*uexIit;4dI#+BJ~F9XAB|5c|ICv~VUI+)tZ*@g zCzrDSL&@cbgXA)=-oz?hx$JS}QZ|1m8T}B9AsOffW84D9FgTpN8zZCWMWJ+`YsqSp z?(@55f;BxI`K?T`PI(_(o`bA*kf(DYd0`)b^Gn>kFpUj{HO6uRboSmgV=P0yH_SLf zFo!Xgymsk_a*#Zn@5=XP%a^Bh74~B^d0Gxm${CcyWUr0Oyf;RZ8JGD-<36d2ldzlJ z-(-G9#VIB)Loy$vpL>;G;T1>jHLj?5?4&_?8Q`3K)lH`uzclf^IL;aTZV`Q^^k61> z@C)={Z=W;H^Th_E2aie*`r(;B4=T@$_pBZ@pY#pe<#FTk;ocjLPuYXUd;XiTbu30{Iq-H+54ldt%>=GvyahpIBc)olISX^;9?NBpnux8daUV z{zi489vgi1@am-cd3E}iRMjaHd!u^E?{?}|lG-=ppHr>r-{yFn+?)o!zxUMd?4O->Z=RGOGrZho^nAI;wNy=~puR=;BB7dE)_WW0L${E`Y}!0207 zT;C45I_LLMO3}TZcdY24duK%t(FW<^tLUFF=V_jg0{`e6H(Sa%d&h~PEkLi1uBk>g ztB}!3WOY6=JCB^Rx#XP9A=lW+IWxK&I5nobL)D>-I&_n7b%=U&Qx9}HRu4yyC%4bT zw!A7G8AVR4bfga*5uX(cclwqPJP)=j7M$3>)FkvTlZWYFK0-Dn8?~;!$cIgJ^(B^b zg|5hV_2f^O$dkX1e0|JU>0^{v@Aw#a`kslq9wT2v=h<%;--q;T^G%+; z%JcH*qzS+g$$%x{An&&6ENc3Sfckci)cuXJT%OmT8Z=>mB zG!}Y14C`SA@l=@McsRC#++aVB%^QZFABdmur^MxFC+E+|hL{|>IN!Fpd}{_9WclI` zL-9=glgeEBg}aBa2}8#s7Qr*8kIgi(2-U-hMNBfW2$h%Z#v(oo&lIOJJZloqR`=eW=P`;JQ>cd9){E5w$FNf^U&_8hu%+S^y zvural(k$aksO?^!R_EH7FJbJ1#_YO7Ib|BFYoZSL2K!38PL0I{eioAhZ~j9(Ps=!YD^wLBW0un$453-+>i`*1$QC~d(|{=+fyb^^9w0GfBZ z@>YnQj1FN7e(KtSHf+HR*H1Wr53v7fYtWzRg{Aki=I^0%~(jCrSN^zTTfrz6vQk?95fjz8e%iQgB9&50P2 zw#WL$gM9(!QVjXtFg$T{zXpQNoCuARjg%YY`2p$=Wph2b_hPder?Qb>e{5EHA14x< z?H?jGD?h^@oBd2eY*yGQg6_oo)C?z2NInGmWpv8*6*fq}IB(_oI`>@oq{=-n=iJLN z*%L7F+0B+OPpBPUjugLBPOk9vGuYNrV*8qNp!&^Z&Vev=?d`GHzOk*6%jT3^;;EYmb(8#bs>=GP;gqF8C;pi=5b;#(13PGh*?Y<2|o=h~Gdzo*(18k%vUSZ`V2t zz8t=@{sO*V;PU;@8Yg$G#Kd0VbzrjMukbk2#B*hXj^&x~ls8`>r@a`S{eR*am}t1% zE=NzDF}6e!hlM^pY(ozXKYY1I zd!R!iuJPw$oi80U*e1iwzt{J@`S;E6!Och7teBCoO{EzpK?i;LNDgnZVz@=R=gix6 z=H<&@RX&pQT=sFF)A+~_(EH!~-^x2*p5B)~2Jf=mIsCcsE) z=(qXp`E(%qdWzNG!})s;Cq=8tg|#^j-JcwN?Vo~C&F?#lV@5-rcLeuQocR zHEUC&6{YE!;f`w7Oi$ktu3HkYTFO$>(yIccX|xtSR*N4)P=%-ODw+SA5=HyY_B7aN7@J z_v=`vV~VxKi<5clb7;NaUm{CK*DOE=s*!~%;z^ZJWW&s@A(r$)VjQ&n)3c&Gr&L5W z_xWs&SMLu-6Z4^?@VgU!mv-G{a%Q!ziTJH~n~%Z2a&pp5Y?S+%Ttjp6mXM>9kgfYe z(T$R+Ph0(J-&sFz-6o!L?~3ZK*1SXT{f+Nf(QA2bFYmo|ZgQ0Tkhdq8XD6C< zm^uULy;{Fnzp_uSwOY~VdA6Q+-sJh#zy$MLD}Pt6iLT4|MlXC+Zm+&mYxT|0Shmi0 z1~ba)1F2Sg4zIWCv8kxPGk(2EB;GO@;$3CcZ=OjIEg|3fxnc8LDfMkZ2` zjWlEg8?k0Q>u-d%wBKJ5)m%zX=PyMTuI12Nh97gh`){G>8#}EiH0?Nv<8@-Hhsu+q ziF125xVE7b+b|8=kSIIP@%R16(x$F&7~64-EMY5*EMY4qNVbqM(P={9E;9#0x_trX zt;n2YP`2eKJlDXr*63+K2Jhh-J8;8JYznc9w;y?0GIvt%3glrEGPiPV_?r#zWhe60 zfGkQ*s;TdOc!K@_-{JKjZlh zqxYA<(*^Lh8Xi{>M@IK&JG$@LpA=;C4ZbCRu>yTExb2h-`W)p{WV_g{z{siJ#yER4 zIeYF1BgodsKE8rnHGd)NO8b=TGAmH{so~0-$~gQ%Y?gBALL4_?gKxQod`;f(;8)|v zWAgdem(SpTrhM*>%jYS7Rrx$^RPvcVYWX~E%TV(9%bOkfJjIjGA^Oj@Gb^H+e?E;v z_-X~mkxwT@UjtV$R@}db>+We5>o^K;S^clJ2BVi^10&eLc6ZK6BEE{awk?7!`;lxL zHco3A316+W8?RV;%B8Enzp|% zSbW|YjJCsP;ihVEQ#F0z^IWU%`#yc&_qi6I--55=^Kap2r_1L~_TALAG0{G&FD@T{hF?R;hh(ZUNIQ!fuPDM_Z@eP&)BZXn>iAXYwfeCD<(d9xoyGd9 z&K_-wb){R4r|x{R@F&8?n%{XA{40llo1p1k@XtR_u#D?Y{Fzeds5vN|;111Rh@pkLR57xz*3YW67~{?xnL~;<(j8>-_XUxZ^VBQa1!9 z)=%S}>Z5WMn^Z2n{(i0d_cHRfronsFSI=xmCp5O;y>pOvRQ3VxZzS(-g8ObKW$)#l z=0ND4zNdT3x#yLqx*g>DdH&}AdoOL(_v8b%@=bk1&#W+Ij3Tdw~8ec~ix{#Ho-!KIYn%kwR+9nijys+(}k#-KI5 z?v(lcS{pt>Ii8&<=Umvjje3Ha_E{OHZ`r~6*0Z413L{6F>t-4Ms2xmFmpi{-x$$)q z=J#tI&brL`{Y}JA8d;a(N%UO)@yo=DUuA4u^BWYeHrV>f5Vn z{(j=S$X2UzuAq^|R2D(QFCkma!Enc3j>15CeKq$=poi+CvbS?BI_jC7?lbA|MRhiR zhj$t`@UAO(US;oQjHr+K02|%+ut7$Sys}jf$Zq-R&nYT4O z-g=BV8|{hp+(FxBYAhFBs3Ep@fORaCU+@n3RnA=9T;wnn`AtQB^G$w=zYH6>mpP6q$QOtoncD%-cJ$s^{A!=|m}AR=$k~O+Svqu~@0ec9vB9;GW?ld1=fnFD^wA!nV+ox2m0(;UkPw?W8Z<5&F#8ZFgHQeE)vtuw4*}dNKG$bj8eJV9sAH>(=+~ z3`95b`;Q#m(~_dEr<1S6`6lF7ah*V)WfsNbVqJ7Gp54Sr}O>v zSM}~WZ8K|Tn`b!p=2+A@V+Z>z#V_4;raHiG{gc&wU3_^tteM)6F)zK;~o z6sA9rGH0dr`6-g&Gnu-`Fp&4*K>&1sqbxF&Ot;z#qsXb&L2E-+S<5bZ_yXb`^e>ukwaAlK*3&Pa z?#zWLpBY@0AsKtsi9JQSoj7J1{T(sP4#~Cl?&>}>DHfL$246|vqd2CXIg(BcGdmDn zft>B-*~9;o90i-aeGa&MKYU-ovwG)J_P5maSCCbGzXZOdYCZsUI3m1A9W*~cb$FND z9j^|O>$(x@pfYOci(e}YM4#i?ddWQX(085hr}th;|Dfyfe1A0ceV%%&zJp?^w0DUY zOXa*Jm_&RmvuqkU9nX@_K_2&Gr5uvEmv1_8ncL{vu2?J;Ir3tu0mk}_Pf>A}k)u-D zalH1b{%UVx{g)1^f5@r-2Lm&tbFqG%^FjT(;cpRC21${Kw zah>9_sl@Ekh~1?V!y_iUW&-PSg|V|f1($9s{asASBnhm342bY)!Ek}vs|4cIsnTfok0&x;1^tmck2U}wL? zbu075^!HZgi+x$w*xP4=-`PfP*xP&Em_waw>-;f?{n#}556>Y3ExTIqNA2D{zYnjj z#l{KiZ3rfV*)q${hJPM&Q7dk z9QDrS$T2aK<|J|%ldZ?zPjck_jJUk-Gr9Xij2R8(LwDbiW#Yk`ym;_z=3&c5fGxod z24@O$D*mn5jpVe-J%=ZLIepYpT-cSj(bx^g{7yS1(o~r$^WPHT2?>qtc67?83-; z@ku8RouC(Ixq5LOzeX>Hz(87?GD<%o{IeXtuLXO0=%0vBf_>ir3+>^WInw<-V4+7` z9Dmq{<1KVv{YRIZ=V|u4ppW<#HeWbd*TUl3|3X+?`(Fr)f17LV0iygk#V76}K2gql z?+UB)o{3lRo_U`4%=^6e9`Em`FIf<1*+yLI?P@oES>^U6%iS?L-4{OYrR+@{USHBY z$SzO99(ytDz8P1#G3+OAaPoaU8#h!ubI2GrvE>GT3>&+6+!!|f8GhiSCWh_G*YDu> zQ24;Bi-}>o`DgN-gcDW-Hf?Kw zxApK)3kRQEygnzy-@jqc-^dVP-IN& z_Da6a=NnUyJJsWCp8v<>yNrCk75ave&tFPTk+UOQpJ-j)qP*wax_m?D!_+0pHyv4b zFikWVLJl*^HV{`YVUF(oZd|<>IduK56S94tteCiZYdo&r%y^0;6EjA%J&LOfA9$Fy zGZ8B&uI|cg$uKf&-uK1Tw-ZU z^X9KDLJ!K}X$8MLd&M-%`c%WuiL1lULg@+qjjrEhA0Neb5Ai!-1?vw#tlv=mTl{{X z82g)+b-~6!S{lKZ^W{V78@nk}a~9+aw5tx>JD5Rpue+Bb&Gh-B7tJIhWpuF2aVToELIlLfoi< zU;C7eUL5>Ue;m9WKKbK8QQBDL#=)zIwzWxQAFE*{9?t2{IbEbSK#}7svKERE@n9~TFHj4 z#S_JOyZLPmX4W5qzOBJ5!&B&-)(bBy19n;)vY~JKWy6bAd$Qp#ug8>^<(8KjD-W5t zpem4>RtMd@_f;P~uX2oxQV+?)G~|K&^!gso^$vd2D%FqkGICX7=he({kH9nijlFuy zdXbH1kd0v=^J` zw7@-=uc3X}BtMm>Fh1<{Yrc4Pp^=r5*E3h$n(vr??}j@rxYvGR!#2gOYrZgV>w$FZ zF|EgSFY>*Hxx=-@rB&x9@JF5-x7PfaSHLh^Io@NghqEUY>v#C(&UthC^}b@s((4cC z_Z71~gY#`}%=(1OLe3TQRc@>He#8Eln6>uq(|1m|Jl@IHyEo78R}Lp_DLY8}y|yTx z{D}M|zI(!D$Lgwe1+&atHD_JH_?ib=SM#;c&)a%8-!K@3SoPn+ha!0XJoo*vYU!PP zzjACziHS>(t50?DSYaTkzKwU3LlB{lcI-=YUO;om&|TT|9(-Yq9dx?3RpTkLOO?kd ztK3)CeT>m`vnJU=x4b&WXu28K>*64u{iytD#j_8D0%c{-&+iWq-%?ym*d-p{)}D!} z?P2I@Q#RNaQt-<;|j__O}u|6W2cL#tL6fI5`SH>zM;nCwT6~|?-1q6P>&Aer{k4-nWy}4 zDB8j~bGrKv^P89(6Jd@a`~vP&;ES%JFL<7a|?Jww%1acE&jd27*!SZbvh=q>e*8 z*mYeDzm1v)^xx*$SPWA+z9A`p_}yU7`lx=Bc@+BK{vmCDeu;kdoPDbWtAiQO~}5XXT=3uLOf^An}(;Y zC;K-*-_gpx?1>G1!8XR8NcNX=E_-qanoE|`U0=zgtK``0p9?ux|14TFX7AXOl6ZS( zCbV}l?GO(t!HuOH;=`Y>9f}X(x+&zSrhdPo)F!vRb}f6!rP}E>Iqh{;TFca~l9ojI zI>P>h2dIZfhx1}|7%Zd1$(?5hWLLY9yE^F50}cG^F)Kex^9Gb3b@L!yPwnkR?&^`d zUh1$D9bSMANAMRn(dVemvS-u4NNRJ}kZnGG9>4Tf|kkA4N6{4zPL?1Z$}bBH+{)|NMro7d~d+M13)Bqg<=+0i+QXiPPW%+H(`%39I$sOp|e&zI~roVOiPvy1P?cO@H z>O<8gJ^viCa@4> z3eLS8JM}dt#!X+Nn0JGVc^bf<%C(jqa&Ts9?@nkcJLL5}^~9d*-TvpWum5Rr`=8>E zL4VW#IB~vxrGX2bI3G5|)A`t(FJB+@IcTNzr}u-4X!}rk)|;(I<9s0|LTkg`k9C~ZqS-ur(k#5ttv7g?( z!Axg9j*)e{_ppmIe*{*)hkj3M#;L3#Xs@+xN{J7Yej;r4XLRBSA^hYllLKY5Hv2)1 zwS$1!_Uc)Zf;xQ*fZ5( zq+Fato$LV%+bP**es8%S!!}(Q$FPBmM%1w`_P?u$rdwe%AGDI+o8T{T z`VP&*JMzz==rf#4zpBZP{RVo~ijFCUZgPX+sq)-gFAuCzT)q__PI3BHe7KjmmMnO4 zx0J71$hB}^A@AL$=aB_(euKsey!d=4cv$|P6Q56Gtf1e-yN5s`92xi6YGzUFGH!w zkj58Or;%hxb_I;GS+NxPK$@rMjP)NQM~{{^UF69T=bsr?juh)Z&yyh^ZcRldYB?U_ zD6+ywx34SwNOHQD_@rI5sGyPGXDyp~2*lKrtrx1-+V@tEPunX$XYQ^$x0OMs>Okt= z>b2wU-8eq2Wk0c^`McI{D|>nU73G1n`gG#G-Nam<=0G=0o2tRQ5x#MdJ}P#vDcoPZ zp`w2v1g0W3_6tfR6A;A`_O<)7D6 z?&}R!RCxFhxOjkhA&aQ@LB1o5r@3yz#_RFTj;`@x;@QIY?7s&_#?N@vPDbVe;r=%8 ze5&Q({I5f|&Vj#C9@(mN)**pLmoLCKoA%r^L!)K8nzlWAds_bT=gRgvv{{EwK4(=@ zX?XQhIpNhB?}*XBrAr`@E?XSBIKIH~X>&~5dDpak$?@Ahm$tX9i}cawKkDBnLO%D= zHFKDw==cdM|8}1G`i?^UnR@Cui}Upyc9Qjiy*7An$#{FOVvhmZT>_2mfVJ0Nn{;pO zc)O*Q`ytlC_hegmJ^8kZ{_R%bi`rKZUnJ&Fz3Tc>S3~DUcq{*DPg}+6z{%GZBt1}5 z(1?D&MhvptnzQP8>KH2%8WzF}(J*$8@7Q;FvLzqQ!}ju}ocWUe{WygyiDer3F}b;V z7x@A2ql=F0cyc3u>OFiGzuf#)>9f|Mk9aQ{SN)E2YDQO{C(k>5`rf6! zg5zWB{nz4pAE2DmIf`~KD!88`ShA#`lD?-484n=$N$7l#U&*!NgI3X!f~n6fD>x%( zalykJPuo`)ykTEc@N@~s?wW%6T+eF0x}cddW?gbkfq9Vi4+8#Nrr(a|Cf$&Zb$sg1^-oP52pJ7LzDt>XSP;w>8`qu827JE~1w=Pcy! zu7JHD%DNK`^p6`J+>m$a+6nm)&Nn@HSDwu`Pp6OAzCOJ2zD*S?OP?!kDJ`~MsDFFy zZ&o~aR?8;Nt2v(<2p|0#$2fZ_ZCPg8bw)?!nK8urSp zhc+ejJ^Ieqz)7n2UUHe!uu=M^=BMbp3-pcJ`j3`z4Ejdxw2%DXSjshhd&>eaRs$IS zQoeBo-zcW+?McJe|1|2pj$^95q+m|T(t@efHE1s`m<3<&OsMw`%DmVuQ}tZ{-Z1s0 zeAQQVJ&k%U;fR$-y}a@kC)DvS%2U4a_N4I99Vds64o73Jj!D#OJ@rx@r%>O;GZzn8 zM~q8`sUJGv)XynDv5v9wsFzpXP<51iEk?FN$W|de9 zx*Qxy$babMI|`%+(g*t4e(6V3v$e7r*%M|BvR1Of|dGKxTauM&XNKvr>0;w&!``tGIL2mE$@GyHa@`L znA3N3D&?%_d@AR2IA4a$hp5X`>YzDA%{)KXM-$N_nZK*fo{bQl)Q1=6EGk%v&ab0B z7T;R}K75+8ALKZl@)JDI)~kh&OT%x^J>JC-0bdbaxSUg1YbFn_+(KbGm<> z`uZEc);CjF8|G?$FJ7>Dr^SCbNzGeOvHf7YhypWFm0-C6tlX>2wjs43vew~`t zZOTpKZ=)?+W&N#N&I-zw{OF$QTSR>iQ2%ad6|1+Yp8sVmA9eidrUbN za(RcTXR;}OS-}Lg->s+W7#gCiC;9eV?k}R8@yNs(&_d;W3BOf*e(KP`Lj6{}$uDwM z#Zk}Sy#A$^Z*ZTOd1FTtZ7JeN16TTej*6JiVc~P6-rJ7PQI36`&Av-991Rwl8_;|z z&813h_wc1~Qw!hl_m6{c1vmg7t!x%!)gHbmv?le|-r2Y<72gM+=j{Wb4cq(}gS!7( zx;$dp^tFFU+47_Obrse-;MUcT`TQ7KzMM^){l44=D_B;CFPFMDn2&8VSp9UcIv72_ z_Br;e1f!>b(M#~(3N73CZ!hD!J;hP$&iSxij9pAzCUy^+t3IjtT9wf6>m2gs24n5G zA6JAQCw&fBiwlad2gUGOeq4?Cj-DcqM-{g`NSX5}OLKdcyWdy7m3+k>*Qe_iE@zx$ z5%T2W@>B59{g_;ImqRCC_Kc@j8hZUR;t2bS>Ts5xBQ;HYZwnnion4=kh(6_l?wPGe1W z(?%EbsEumhNVvz}F5#YXw@hKP;p!WtuCsmEEQz{#SZY>6J%!E6C{uMzr>;&N&Slk%LQrg?#cvz|e zSuWvw#n_cF_G&0Br80#_nvpljTzK+*moq58EOj4!^HlC# z13rCY_h2k#fu$@kOflFdgv|_sZD!9LJ(ik79gc^kJRIfWKw+s_;I^gUsddyx*lkHk z%`sSN8TGa3@25aF4@)&~vi|s&V5wQ)DB&sLGU2hmCYJJ-{R?RPaj}&3SHx0YSsxcm zCHDW~m29n@Q?a^|_*t)sn+4hn-MCq-zw^b-V)y806*p5q^C~teu|H47E|23#;jrk> zz5e=jo~`rGZFl2m4-kJfef1DAw9|=?`}=F+jsDo{W`FEe@mBSj3-G1={rONhM(qrN zorc3O6~dezuJJGoF;hQ=k$pHGj`7+(Bu=3$gHwpVnzkq4lsD>Qao5eqi@Tn*VBS{s zp~8Xw`i6+jc{sq213wm~05f}K`SH$B*w$YMo4C|pf^8Eq`Jl3i_5CGfA44zVo?aXM z^g59}xwY<4lyw)LW!~7JaV2U8y_9 zZz&G!zPM&$_I=d}?gNBGwgGo2EX@-a;o8_QiEA&*a_Q!rs&Gg~qk}ZO!Y9|K1YYXM0h>cJ^&( zHtQ~D=I_Ce>7=d6tle8{_UFybul$v>XJyK+ZQHtoIqCML)+yuypITdHb<~y-Q%_D> zH5(tef$MwOgEyZ&c-Nfa{Ea?mx}H&txdi&%G?D#+L)Ml$+GvB*?2FlddhPAs?7`pM z2fa=Xve!+p{6#%e60rB?u@`TIbCpp^&QB3#e870+hm2{3ma#V}YrJ^he2sNrv`>xQ zkUeM*>}F`BoM2CeQnA;%R}MDQoQk@i@vYlx`}*rUqgrGAAUR9&g_OhY?PLGCJN~JB zRwKJjpWRZ)JKC4GC*A7M-Z$R6PVRdKb4Rq6v2xooSr0sgy~>>RTg*P#XF#_=zOz>^ zZG%qdYaN@ctC1aOgulGyI{fBD9*gn**ymQ@*yxZDyItzJczi1=x{Ce_kWPRMRvi|50`@c^{*0C*S-|1%L zL9$-G)ZM#_b{5i3jn^T^`wAJ~j>Qt3wmbKPJJj|Yk8S%a=!)|3q$jT<=Wj6fFYKZD zt=|699~n7zXy(XqK60!*g7SDz>)L1VURL|I7vpER~#_=A<{-E`?=0A-m2cwW2jAqu#wpmx} zEUwGB);cum2QJ|{!nN|Cv^JsgZp@rt`UTC=RllG)yXqITo^FKv2+h5W@Z3dQD_=s_ zg`afxs;*u2i7PviIp#w!*DAIC8F=HZQ?S83$4%}WfTE5r2((8;Xbru}7=i|Fk!yPvvrLF*1|Vr5ExSyXEnbbZ#zqe9-DoDJEa zaqW_M;Z5=Sb$HN*tnazQHntvm#@5*XrzOr_2P)4_%{@+;&RY7fAr~rBb8xzm(_c~c ziSYFn`1vD_BOI;&iF_3;$@j;;#rZ0_oCsgL>E{ezH^uq73E5W+Um*ePXhS5*)Bb{$L&;HGb z&#KS54c?6;e*?2M?=i@plOfjENMfChWY*gl$9QoHFbIpGdiK!g1!1J@p8uXz7l41?j&J#Rk7-=T}0(5lMCY@*vC;G6occHK+BY0^94v>x=X$J4vEIDe+XpQCFkX~TTlGLJURrEPQAn{YN*Y}QKJ zY;^B^VKMq~twEr@_&f}zbp++(seTVU^ewZGo9G$GU_hv#U@evB?Y;NxVn-)E*^tez1G^}$bdc|N&jvr;R@cXzLl=K!$L8KS_(OI( z1G}w#-JKjR<+-$DFRHuB&3=pBA*)06mc4gmDhs=jhs~EvsEN$YC_Jm&K8L3{i?rkH)NC6DF* zz}h3kYlM9d5=)j`?%-FrNBR?HO%v6BFR~~3%tk&vp7g^rTlt>w-v(c7F7i4yHaInU;(dQ!?-iiQ_*PWJI;P?FZtv)75bFY z#w^A;a`8#GFTZWnG%BLM_2}C;?0du0sGB-QLe73}w?HG~TXtDKyF7CHR0rw$-(wq= zQHJKEvMwNfbrStqGX2^(`nMGNIr`}}>=8}B*7k5kw4J?$)7}1BaTnpSdpO?Z$Ut|# z!qJ8v6)FCL-EP2c+ssKaatBT}_!vBG)~*0gYdz>f@bqV7$I+2C^oTt<9h=R$?00D> zD_uA%ReuAkm-C#l?c@;e$F_TztC0J3viaN}@Y#Ino3Z)lS{>b9pZ*d0_Cv?-JQw?P zbd57d@4YY1r|t8g!Cd3TBH+j&doGXWT}KYV8|ZJVgymXjJRUo8(3EC3%bOw~VZH za^I7wR-a6Xz8;Q<$y82B%s<jYKk_83uehCu6D>!M9yLBY*jJcQ^-X&C zyDnz*V*)2`=a1J(UMy_8FhLLYvJ}jyHeR^vo^3YvRPi~Lmq&a~F@uYdj}MWFHZY#u z1n>XAe$9sO%H^l;ch{M4@L?{tQht4&!Fci?4iG1}2_8Nv9zJ>e{g%BxcLuytpZgfU zyZOzAXZ}7nk!SL;)rJz6XX3{*F2>vM<4wfJn>~)cZ)O#e7;ippcynw#)sOMCx8u9S zkF+1OF#PfOqB_0$(05$E`1{AqLHaE^i7!P?pF0T4YfolBcFT0J+e;B=PiDiHF7ahG zd~x*8!QJY=GU&fDU7k4cLiKSc!;hzpZ6AJLW!urJ{%8g&xHP+w6zVI%SNl+ zRj$32EP49S=F$9b<1~Mfx+L!7=ii@L`_zi&CU)oIYRy&J@8arC7gr0%e96Vtw=lQq zMR(8TJuYT`mvZ7*WfXfZPt0uD3KrL%wbq9N+H=|D>uJwrmDLCRz(<$M&l2WP`@OxB zuZK69zmFX?daC^eZSYd3-}ep#WB#;*IkL^X7}uD&^jrB#l>(j(}!nRSCK4vSh*Zo^02aW+4IMr@#JbnT&}`Ku8yzEGvP^}i<7q};N-$zu-23h zAMZ!LYJ52PfX&)3V8C8<7XQuc@!f~6c|O_<%C=4rAA9efFh2g}h2Z0y_6uEn{4-?0 z1|Q453m=Py!pD1DSrvVAU3~oW|2kM!wHIwAbss=hyYLfC?5D|-KVo#ouNOX+515Or z$_Erac4YQ^WKDFLcErKP9&LKiLk}0{7+ieo3E|=xZI%!B-Y~d$VpfZAak-0&*FYO5 zo;CV5Nw=Ovr&sXqNHIyT4=Y3N7jWE!yvu${?uY8f62}~uyVzLs^}SfL=J%C@jkO=7 zV$HoF=JJD$S92Y4W6O#)3oH9$Ai^@@z1J@azeulyl{>i~b!~Jj=NsJf2K1`VJ(rGE zdCzd}+3{{CKDpUyj_cb}Y)>A%pB_?tGPzy&Lj6`ZV}Pnd&aS^Vc%l?}>z8dznM_Az$XJbo#LEZd;?kkO++Qmh|42(KL8Wx_jQLB$H4xW3{;&hyEm zdS0=?*O8SGdCeI3aQh&u@9~fO{1*PEqhE>{*19o6Z_c&$8Bn}<0e{z6kM_YFN&jw5 z&_Bf-{rV?dD!O)~L&8O!J|3zs^YpCHr)Sc+WAtoyoQE6Xp>oFF4LaihZRnYe&)4Si zFpIH9;r7qLLyZHJq~xJzIj)|KOBmBpY*rYNc&)+e+3gz)&F7*obI_aF=+7+lNc&&7 z>y!N7K<|H#)B7CgT_SoX4NdQCm)`Gu#o1HZkNG^>OJ{`rYGOK*;_84mwx+&Rd;q`0 zhqRck*vEZNc>fb4@ashKX&k?19P7Fyx4(sdV>1`yedc!l3;UYjXYBKGziP>`@!F~R zj>@4aB8O&CTI0nHe`A@wBZ`Q5YmL0XN#G>bD3yP${YwonR zc<(CdNE+w_evA*$;`Li@t^VdkR|3`zVTJ`y$RsI%20MZ|AyR)+5%tx4cGTD z7N5&l{B6or;BO`L?fRAdQT|OWv|gk+`AOtz{K<)#Ek+(|$ydVG51PMR%$_%+%GID; zljlHN6LWgVb9gi{rzeLzhr$qf4%DmJ&FSeQuGJr!K+Jn$`vmmzE$H|rbn@DBn7&2p zY97Fks{reE-*M5P9G{-hq3WX?{wnDPx1ak92!IGS<)Hx@iX3*&QR>#~eNr1ACHp9^|-2IGdcGZgPG!=H|pwCSnK1({D~NV`L`3ha8B) zK(3PyK|5PnSar)LXtOaXgEa=B%Y&mOJO>e+YX~L&d455Nw>}gpOJh5*NnEaSWcosY7 z$be#Pm*W?0uydEkC$oBMu?;8E^rN`sx zfA$y*kZI-(iB>t}7u_<7{Gv8+{CCjtY~>swW6b-@@UErb$H_N3Mh|n5r*i2bf-;}FsV3maqBmt{VRe;s%Ecg%A% z`v=7OHP(0kkbW$JEL6FDXscgeknLyD*GM4K$!nkw_3O>Qu@6xX-+g9W-;6KNUwQr0 zZt9@^X*cv#*>k?QZO|Bn(?4Yo>Yrrmq+^;lE168yScIFCpg5+zHZQ-MSeeSp-Sr)V zQy;u4mah_%gPu^nv9$*oFZXg1R1fj?YaHS&*tVa)$KucO6+J(((A7WoaB%cbc^dAR z&QIa%XvTENY1mg?;n-_^`#z5K9P{|LavYpE$RzBHWKj7K*=|llcKc6|`8P%pE0Byh zc>+o8-o62%yLjJ(Ews@=KmQeH)f)c7QrcgljBCk(;)JR0v(no%?U99C^pUHq=XI^; zmAl-5{6)Y(HaVDD^Zz2oH5%BrNc+e)uy0X5WunLYz43}v@EHEZD%O@lellVhOg3gC z-;v#I;MwKwx%leY<3i5k@wvVWj2-mlGkwYM;n?_`a+is*#PZbawYesSpgLu{{%H;} zqqPS;JyPC6ZHo3}|DWu=dwf;Zng74fISHI3pdtx`geE~$k|1g;0@m6jfRF@jr%J1} zoe6kNh-k5wsZvd-*a;9Vbf~S+PM~Vd3D{z5)u^2i?FB?TFt*yxcjgDEKrU$ISJcEx z^L>BT-s_xwPEIbU?R37=KlbbFz0ba^y`Il=f7V*~I@nWt(IG47M`Y~oq7`YzhTa72 zzRGqg^izCXa|9khKM1p4oRw?ShrCuccPaGU^EdkTr?0OLaUXp&l<%%OL|cQXn4xT| z$Fbo=OP6*T#$UCxbLloRt_D$j=Eul54K^;d z7#NBwIr~to3S@m#}i)^0SrY0mX}72^c;TZ%**Za`CLzcF_4= z*0Ga~wF92ZM`m~EKWwZ%crL=en}>|W_Lo2Zm%P8!_j}2;l)j~0%a@|L7CmRUbxWEj_AM}Z6ig~dshS^)N!cEW zxGeK*49S1tPx9Z|7=B#-F%FkGwI*Ka;j+`irDT3GjQ$ZDLm245#`qF8hOxt`y)hVt zFY?9v91E)}be`Ykqn+s);J19FGv+Pq@@%krGg#G}g>Pf$7kYI_$`6(*&vG37Z0|LB zmZbL$kV^<=lV{0k5q|{8`&c_aif>=7)+R7AoIJm4CuPUgF@`D>waOwQ+)Vbe+P2zxZHUoy~r1JP-G zT+ZiTj2xHUo?zfi zgt4L8@M#U-P@g2*YL)nj{W{Lqc>A*1E4=;BoX|5Qci8Qq-3wfh<$?taCR+!c^B1Gbg}LhYa+DDmg|OI zUDTpU#_U7yDc4hD>Y{Q?T~rm%G<8wjQ*L5n6LK58F=^q4zO$iYywj`rdEI61Sea{V9H15#zW09?$uGlp=Vq^AAp>Yvb%l7vCU0)Y{l;=PA}lfqv-5-Hz(c{@}*Y z*1=c)^&xty=7ZUJ)SX>O-PzowcCKW@yX`UFP1c5D+R#DGRTu5(pdIjZv>h%VPi$#K zwz%t|4P9fUcu@~LB08%!$<-|(Xzs>|6ZOmD1pP8`0R8d~*c3LZJ$`8dXX1~zx+{3a zkH0=2PrT#PG5?5;QEk2TWebQYMCs_^YajH|`%K)mAHI^;+HZpH9ZCJ#TvKbW=h<5O z-+wA~y=v)PxqJe1X;U}FJKNbTMil?kSU=%Hc{9HctMPlMLze^{v)l32l8rPt8M+>L zsJbg_Gl-_L83was%e*xYt-X*l06pIpJ)uvbN6$9)cS?SlI=YzL*x=Ev0o!1lFaD5> zX3|f3=GCA2=Oi*Ac`RZPG;?)qmWf5E9d0aQtcgYFdE>oU#8;u2;#7uaWunXfM;z;p%=_imE{b=Q@E&C3%Oy&>y?Ok$g=%~Qbo~j_aDTHof&eP@Ot>*;R z$;WupM;omH){ob)o%}vV_5if$kuAq}H$xBUCiRJxx7%8+RA??=!paBLYITINi`!x`gl$DDPKd?@o{HkY zj)91))!J6nKJ>M+oz4O;zvg>tJs(2rS=l~Ry_W66F}a4oiBW&p`UZ)9fvw@vJTvB=vf3OPZPf{5FHnhv~T8W_9ltV%Fpn}X3tNE&B}JFf_Gv%HPfvXk`DoY8J@ykmwa0# z{$jt0>ubDy*(Vj=ehK@w&SXcx#AkOqzFMKzp=C0B`s1^z%awh-6WLlsY+vIJ)V^7a zJCF_igN1Gk&dOHdvSH)|;TL=8J@kJ?LzFMm|f(?=Dc_f*Pj%{^I+F2 zPW5Z}V^`qNDI zRBR0Rb>L(#zU zVZM(Kn_WI+ZR}Y6(Fukg@pg^B9*YGUrn|OHrWu2;_mB6+g8%ALH%|ip?N%LwY@2eE zCxHj!Ym%VN$x$0eyyuP~aL4A$U-fE|xNhvA*C{{J1O1=>-%2}QtzHutP8{1X6z#@& zw5x)4s^4?-7tkwUYGvvF%xF#|Mkm$E#&Q?mrvG13|A&&VCARh6*^$W9vd9+VF$<}Q z{36@Y7ekS|`RzO>5IM*?K%MNr`9Mmfl6u%MTNi7cANXOAbxpRLbxoF1C#f-fTc}g6 zrEc<0j+;2vy5w5Nq>g%ItwmW!-LlrAtfOvOYf;uwxBRI1g#T67taVOQ*WBdQH8=6U z>MHwK6Maqa?hWwAP0t3uxgno;S+j|kaV_hk)2>U(+wn13!^y-6*xw8GBc6Wq^PYIZ z@}Bt3@*eMSbs*~kbx;dI9A=xdu*j^5_!7@K3;LadEwqreq0VJU!#C)xAn8y{v|$063b zyMgO=ao=m+8h4}3wI_66PoK20)}7`=)~{rg*4TNDYiqgZRjzLej5gP4O+3|`=(rf{ zbVEzk_;x}2YKJw%IoAG`U?%I|q!o92>))i|19wm_B--EdE@@xsVMX+>@Ya!O9^tNU zs`-ap`b{u&9mh8ayXCx3Yn%$Z#!l1PRc5Z27(V~W(6s*=YA!C8d${1agW%#$5uHg|jt@V3oixZ(=TTf&=NNlyMBsG#a&Uck3BZ`m_ zQ^89jexBw3z2K>_{VpRz`r!$gVeo{^7%hB(GxgKx0BhJHH^k#J*>3`O!l7i$V_a9q zv0XFi9**hLo3X0+SN+s>x3mw3q znXf8_6Rnk`HIv2{F90t~ST9L+!Y_DudF>NJ%L?XDkCPP%NlomAa@NH*$bMt`%U=eW%N+Pr^a0x9y7j}x}XmY z^t@Ba$?r#Ysjg1z0DS>@-1;$UH@W{Eew9lebj_{5Vf0;L^uqscj5fzGdg=R$(Y#?{ z^x|P-G;dupjGp9S^iqq_r~9uR-8`i%qA}@H*<^2B&vx{jl*j?>EON)aJ2=+*e?`d7 zPW0g`O~J^;$igaQVT(5oCDGoh@?@OW`~8Vz9I{UH9Le5V8m_MU-5$u^iBkwSj4)G705X9 zN9TUQlW&jw-O%zanJpwe*vFddO~jP9O2+Um$+sP>;i%ej$=Y(Y1^w5SsQ-Qz(|^B& zcK$q>&5K!CX4kFObFnd&#cZfk5^N}|D>*-iu6zn!F?q5iG&c4$Hq&Tmd?p+1Z@rQ2 z5Nn)ul(@8(4I>(NPf3k5@_Sgc-V%(oKx^4imDo|0=*8zamL7Z%J@_EUqP5lp6s>;^ zP1`(Lw?XS0p!I!YK6c9|bsk!$2fn@`AA3qRQX6Zz^B$wuF7{~MVrZS#O*_CqBXxZl z-Cgu`1$6I&?$Rx)>+6K>t63|$GLTi=LTz=ABzHc}Tv^8{9@(>+FkRHNoJFKg`Q8PKo<8aC3ecS1w| z7{X$X+pt%P=u3@FX~T}u*pw>h`r1fhd>sFt-`4^sr2i`5jIDysH$vyRV?KV%GtgOB zRxQ4Gx0Lv9Wzab{BT(Aba&uf3;WFY{D*9 z&cWWZpL_J|eVkuSJ>F>V-Zq}Si*p(qp>ukl&Mo1beV*F3pX2BF&;Nfn->UbSH7`M zM*?%9;}PWotLnPHgS9&O1fxHjj1o z%%-olLib~=Y1VhUV`@`duup1==J%>*zUBk-do@RA&1v&{R}fdJXU>YJ;dS}VFA+n2 zg?O^YJt&TCY;AHdgN;vBTub$Ys@+lzN*G$I#z@$b4)M=%CoEkL*6P688qXK60$W-G zt1-3l3SsFT=BAW>I1HxhIDVDit$|VLjbO{KFTM@7npBHLA1T+eh(5d=Y&8T+TX(VL z2Qq3aIaf%3s6Bf2CXUsQx@L=aO$KyPo6Y~wPWc3TT*mc!_I7edJ&Y4r?cIkQGB~o& zRy%~FXP~FvZU67!yYz1PnbOJF%;$(HA>{Ju~!<^PsK1vnn*Ub}@NR#%?v*vvm7jEyxI8@wi3{bl>3>&?D}4`UY}2sypE zw5=qRTU+?_#vH}=s_ViV9cr#VK7HQ$(oxRCjO9?wA}X)d&dq$=ER*A%-@BNY-#+HJ zP~E{FsJn8<@lF7TY2Y^v{N|dv75_8B;9mTgt!J#7->Vv=$FvV$ysPKIKbu(3y`Ak0 zVdmEPy^>3M&%1ga_sO0J-{HT1Y_WJ!^=$G}?xX+0y!Tx_pZm7c*3E3Uv#H)h^CGCm z=K#Fx`a$DccAqnE{c7H0=zaBR>&}NJ%8xw8dE`k{k95*5^lR%RHt^VbfDL(eY!dSp z<)bIl7#F2^ai8H{&8^(#xN~0_9}t;W$$9B+$v@TJN04KkZd_}vb5XI8WkGOuGx|G& zenEFkpTt(-;lix3xku|Yh3Frx*)+n)EWg}SO!3Xj(x^?KW^Q>#U8DDM&fkW?-eF>0gS0Vu8&c?B z=COgE5om%venlVl+C}VZ-r+XvvJCb$E?xC^TcB-?e0=t$^R&MgeO2r2^KE86ZF3F# zX6yz1UgPEz*81a`y&sQ{1wvD!)2jRW9`D?CnS8#{luphIC`An|PtoU+R;W*2#w)qMLU)7^10C9{IdGKI5O+;~(Kb<1zJ~?tG_W53D@5VMqbuVSz zNFDz@c-8w0p-q~`3ebk5vKeWE#tx_rZ&24`+aPS$4ABNXV=?+zG1F(cwpO^O9r7F8 z_h)opjE>OpDBeGu_CCiqs=bLZ)~kFm)-}NtVrE&z3#j+#qSm8&r1NkEn{YSjUE+=y ztHEiq7;AuBpo^J8gQX(AWw3A7H|U!azv1e*ZwQ%hU<|+v@m^HFv+s-7u5~^a^F?w8 z;5!?yjka+RTu%bm#)iCI@!B-%nA52h%RnBFLLQH%RxGqGB5aSXZ*2kJn{Lm2=MXZk zWpZFoS3dEE2T~(8SE~A0GtQ8C^dEu#FS5N^l@bxo%$&*KW1)w09SdXfGwYCbCf0zw z)w&pU$Xksqt3%#i&T$jt$@G5{3t&jTJTg*KRQdfI?!M*TBCC4sb z(6&EQnR)xzGH}U(bWd7fHN%8(a7_yphCy*F2UH^o4ApCHR4BkgHv1xiQN_icfQ_ z__SivTRpq}u+Oe{;Ctzj{a!t&SsMdg@l9mE>}DOyHrHAivdy(thHUe@Io5h1s-si9 z;!fffCEWLhYS z8`-Q*X|VDc`#2IDi(Iy`?VcH*_hQ?9mC>5Y0cG+)_OD{wb;lXoHuBdO+dj!)ES9gg zHoBF&$zt2Cy!~K;y}VrZGIE&n1KJgtEg2rhlG)2Go;~rPXD`=+hlp%t^1O}g%a+>7 zZ#OxO&dI3}<@A35PF@8&uQ8s++R~dmTYB*c#22COKt7xNDcKCy2O2ljLEl>Fs9daU z=Sut}{IhQ~5zBMWr*)sf`B$N@YEMM(JGK81F)!?lMSIHM7QGvkCk1C_9wIPyF7KEO z_SBAPT>pdAI}Ju(3*BWf`YT}-tR3a}c-!(C&wJKu%U#+Zpe+&J>Ea#x#$Yi77Bh<@ z#M{dm&-=6&Z~rV<^lDB9#Nr_F_Nbor$J-moiMY7HYCQ|{}p+qc>6WT|HDS7$KoVu_*sf=Nan~NlK-Rl zf*tR+2>ZPRx|Z?FwUrM5X9U8?bX3TD-I(Z5Z>afYV!b9y(lEDwa!o56BokRVOwnYpVw)gH_m%rR`hvb zVrE4kjao5gqy}612DL}m>p2Fav_p893LdCuukB=C_uyA8Q~TI2rluvjU&)yGDrlzv zSo^oQ8*JU~=j!Lj1{FIL&f6R9ntoY*GpEg(-#<(@w)^OYOjjL{(xiP$F)dt10H=a3f z{k{z6VbvFZ27E7PtZ_B*YPERzxn9rJrt^U1dq z@8|pNx9A(T%6HxQerh|MRIW5Jx4aODc-WLiK7Zd9a5c5*3O!iAG?yYqe z(^^XD*D!417*70If4r^gvx))ukzOnwgjVF~8tEUZ&BO+%s)>)^zuCLLCj*+E|_X#hk z4mz!8)80CMe*<B5d#Vxrr~XjAhVsDC zXA;lVIq4bY?e_WX3C&?xKpnekRuj+9WgOHZ+N<$HOYq+n4@p+jp?SCbYmTU%hIX{l zPpvO+WNb6B)-~)i4!rj;zlrrTRpdX7y-6knwh zbJSxMbo0kHJ3?;#Xc@eQ9k;D$#fR2OzR1oJeUxjIO=*W1lqZItg$UY?mZ$~EGf=YT9lu*IDd08bA&<%Vcegu zE}@pi#1^S#Q7jR?a)kK=4gR~pe z$ys(0{ic2rW;>9lS@d07vc9`7)^}U@em}3NR@c(f`eS-l2|Ta@9x#0xIA>jm>&$-H zIRo~u<$(yiI$R!*YzfoHMQoBS;(;aXOSW{;@4|G3=R4WH6`rlG&Szh`TK!F4-<2)f zW8ZsY!uP8FRCK784ap{2oSTdmk;_>A1}Xn#F?`Wj(55oevOUAd38 z*uJL<)C;i}`(^<7t8h4;tsxQXzeOh-n_^Snf_?qw`AJ_Y$kp8Tl3yM1{=)W5$7-#p}u63Wa?niy=m29`OsrEr@y-TN`8Y#V{l>R6_R`vW;PcGFm zuBeW_3BXfp__o^ibJi`nc}Y(Zx|OCH3Afm}ZKY@Rjk>;(`y07` z^{1EgbWe6#@4JdP1Nn=-d?&Jqem}^4nqO);erEISRXy-k{JPlio%`)`BdlF-@|xe9 zij6|ttEVbVEIU4Tt?|xL9wHxSd1x2kZTQF=b1+J=B=TC_w4s*TNsU1Kw^n|e05T>4p?I``Ex$O$L_8-8Q*5|V_M)f6$di%13`tie9eJ1imfQw%b?qc; z1C>o*chW$z@{CwtwR$+SHnVcuMnA8ZKBRtbhbLWZ4!3VpTWsyOWa`V()9}bj-dU3)dHdy6NNqI!#X_8L!jjY3~p z-L=KjUFGkR2GZH8r8a%lPG2!*BTsCmd9vwNdj#K z?CUuLk5`giWb1H0>Gk;=^mzmOvPbQj54HHA{<_>~AJgZT(Z?m$R;ACC*OsrwT&NqA zm-hF0cQDHx2V^iG?mq0l6`hDEfrEMx+~;2r;M3I8$pC zW^VZX%H=91vA7IBRerhVoN?zWm2IT{R8H6SXCHlQeMHlrSI7GEPxQalH!-;|@Himx z==QmuY@-c{`X*Vw55D$5bc>VdMU7*wykM-MD``If#n4oADXD@E2D{ z{Y8x%)A+IR#kJr@exqyw_@s4usx$q=j2XNF+~}Q~z>P3nE=+)%b6Hzd{_=j#y#zkQ zbHa^cCFUA~8~r~v`u}#GtN50%0v}vb!KV0va3t&~Zil@b9rFQ>wmJ1N99{3j(T=Eo z9vnw!f+Lf+7$%M+U$9$t=QCDa{*=bQy7dJ8u*5pUGc1 zS$BxI^DEd|vUO$GZXtKsNzAAkynLUnKBaV8xS$|!vEm*4ZepIwXPj{H68xTO;#CLP zieFlJNr{tQn?cODgShQ46xa9KUx`gz1@_CJUG&_F(%#C4X7~1mG=HkzZS61B*oLvi zM;hB!_ONP{PH|4jn>?m;S|~g@@6_z3ezY)&GXnMWoPTL$n)8XuTSw#`+EKjg zSMV6Nyy+<9^WoR^=luAUj*7z+ z_v^dwo9yiI<7z*6ve*)?QWL%%{ok4oe@>-6v)NzC7EW>YRD318v1nA2Ud>AXG+~XyLXe5|HAfA)eGP?Dg!M4U+c+$9nEF8tbtz-AP4H5 z(w_!SIy28Hn3Shj&We=MpGpqYBV$XPxyzoDT<~~CYf58}x%E%>9PbUc&hFKH{rO$} z<&Kpd0Z-Zal8q1S-EV5KxxbG-!(K!d#`B@>c^sQRjtA$&eEVvx*P2Qj0;Rq3>BD@Z zzO#reQuooKg20qKwOQY+?@YOGl4OSY_I$9R?<=xx;roO$VIq2t?<-RKLd?x+@DNVG zL;Uv@Ak&12I81rpci$X*e0?K+zGTgQa9}Z^@2uxLG$U#5XA}D7qY59`o~? zr_7^2+9Jn7%)9L2ruIGHCLEdFE8Ti*d3&?4W3g|K-`98DKlr;G=q5hs_Tf(aAbf}J znccexe5-w?or7Kb-Q&3So5ykO9r4#5{rJC9kLr$(8%sOulrKb2s?AZpq3z*2XQK

06!u*|SsfR%V}_H{z*N@>XIKjD)vW z^30XnIo`gQbJymrEWI*s3hx@hw!L6V-Y>bQgLe|ES@`qpDR~FE_Wtl`=HC0c_kJC7 z@BQ3+Klk2Icv{}P?9=kDV>_E|44ba`Mfmi*L*XfTzu=yc>1XF1EId7LO8N)Q?=SQ} z&-i)x8uN_V+1N5{E4glye=vfX6w7ht=6m3~e4**=w?PAo(BrAO_bwA>cXm!K=HuDX~1mw?L)Q#B4oV_?!+ ztnLv12?Ks?Ct?|Uw|F+WM2qQsXKZ)%SL-*Vr8;|vw;$OTTD2h_*Y*Fd?-=sgVD-H` zTQ;h{t&$o0ytevfhhIOcO|D%WXi=Yr!N_9CAAYs|ZL-3)|v6m;IgZw&1Z7jkby_!D#KAX`u z=mhBpjZv~TM*(tLV|s$lDS2~F>Tffz^lawa!H@n=W;54=Ddo`czl*QsU0c}B@N{4! zm}sOO-PSH+sd)$AqUTD#39EXxlSI#1T)r>wLcb05E=j2-Atc& zcDiJ+Vglc$KN4l0r*ERz3t>xIS+>ZD*{bS~U3`Pp*|B~J}k7H+X`vr?muD`&!hTn1g*6^Fd?^1sA>My81wRBoRYUwn8 zzQfF+KrGwT;}8SaSU%0EumIm{Nq~BJ;^5b_zgqhj1Wa9hi}s00KgfBre{q0Vfww<~ z`g-=Kb4`=t$*TMF*1b+=-@m5s-e7udL}S&tZWX_ZOZ&$z*L+lCGfYj&TIV}cqwC8) zQ_8xG^H|eEV;cOi0M=s}&ib-5|I{}Q!ntAMb32f0ZNb1zKS!=rv%Y6ToqN7~7j|JNaMS!y!-jB(wR*WW ze5W&p8o`sRKTI9YhlvTKhL_F3Ca>doBWro)vX&%a)uD|Z&(q7FAVc+`-eW5ik!gb>rlUQ;@V~885SM}-yjeJQXmZWovC#i0}CKxcW z`*&*FBATmtKXnMwE2?ds^nnO#AS+Hkj+nEX=TKkDE`dj0uPmFjuAF+czT1f(jdfbz zsdb{yc!~KP`rUgw-!p?g3*@@%ee!Me(Yc!QVnGSGVqI2zv~`SwAD=aO-uh3TGVi$7 z($zdbeq5@r$HS|$*vjF*yZQYMTMoS2r+R9ws~8H`7Q!20&F26Qh=1%F84+(^JW`>3 z@-^ZU^IBa!v9a;~k95;Z|u-2R%2TpSg4W{ci z4yPlAhtrAR6yA(mclI4H`t=J^*B=N?WDQ^skEy2~d0M_}g8ftmk4Mj8>xB2u?tT07 z17R^YibcDImhh;!tuWXD2H*aCA_n8{c`-Mjdg*o0sC5;PE9e`wt#p{cp!(_eb6(9uNF^@%a2O@fb?L<8xqf65CK~ zDTJ|Iir6aIzKHC5nC(qAz?kV4ejW`R&;Fn$lPMXCuQfkwKb;pLpl6RblHd&tu-!NL$<|1WI{7CS23k9 zGFLXlUS#o$$YSa0kApXjk>( zqE7r;yM{bI{WhCxLNE3DY5rws{peqS=I1BS{5uIWSFO2dp3gX;^j~Uz@fgjeUoFki zC5GmrwP-FpSbeJ;e}U#n*c6j*r$h4_k^jFPt=i2IEsroqNK8kL%U%CW;jVC9_Qrhl z54AIjZLZBHMsuEWhS*5e=%11dcb>6NLpQ}A&*L|Y9cZsLF_%DTYk2g|oWfgO+b~?_ zj1hLyQnNF{fmJ!OvGkk;e?mw9W8S6V&CZzoIoKFaH{`s}I=J83?@?P39lis5%G!!Q zIG=Iro=w;=k~tx~bwA8lE!lpu;mgi%x#{L1oCbq z@~(fKc=H2gkrvjK%z%#87ylCUyNB%!Hre!_Wo!2Q?8Ldx^U*_wuIMc@4jH{w37^X+ z&PQ+6vR|Wp{Om^dS83n#vm@Ha&;AwXcWQqOW1ZR0WnXk|mj8gCUB$lUK#>1X6dIS& z7If!_(|EfQU57l|3tbxc&F8q9U(v0c-#U&BUOw)$M)8t44c!)4Cp+#iedQn5t$OUY zuba=Bl=B$#f8kPFoBbAe`7N8)5Bu?Wv)<=To%q-3KL7gQWT)5Wk?Ns;6c@6;@wZDE z2hc=&jIRwnV{yx|aWU$yd^0D7@wGe8DQ)e_=YAhX>+!S2dqw!#6?JL3MflHYJGDkp z;3j0|h7RtRA8!5aYGk$i?dl-@409Vuj@|m<=$Nf2c56KB%6Bl>ZFjNz&c8X4&fGZ> zd}uJ}`m(Mc>W({=Kl{pL`OH~h*p+RHyBc4%-ElEY9mG2yXU^dKIgz&~C;Adv=OeS_ z0MC#=?BaNA%Xi?fdkl_;A&ZDFDbA#r(sXe9Nw#V*tGLozY>E+OgX40~?h3cBHg;J* zxg`8}xD$rbJlwJNT*4mmy72E<2h`w0^_j#pyf}wD-=^rRb(Xaj zQVtlgHd$`VWN7@vFlj9NOERh)x_*=2U$dn_Pubapo}KNd=lSRv(MK{xazu2qI;GD? zx2TNp)9s=d-Cm}R!_g^{5d-Oz4%QNjcsfPxI1ioD;OP{#F~`#>-=bg58dE-*KIXcv8+9(-gtLCE5dt@!BQOu?{*`GVZ9-^M_QE zT!LQFIy&-)E`pA00*p)Y{GkuCUxh9)G7X)Ypi{3$r$=VR^PtxEvo!Os z@8{Al#k1WtC&GC6X{^Ujl6T@Kmj^vtA9{}%*7dUW{u8_mN$=YO%$Jzlm-YLrUxdz* zml^PxL zLJVRY^panOEH(D?_?FvDzxVf%ShLTJyBuuozW-8{Ne_Qud^Z9d!o{*1y z-GrSYe@EYN-}k@i*`(P%n>07h-$}Gd_u=2fVojOGCKX>7Vte8X_s0Iq%B{vOZR5Y2 zJ-c*1_>)~)^*|~)UT0wgK8<2XmR2TD$2sNcTAtLwx^cb}2t(*xMON$%Z;-+m3U>ATofADlSMSj+q1-x&WKyj5B(fwyY* zCkj_!r^druA9TxLzlQU&nRana5q6pAY1iX2c9UOLyRn$v&e53M$TB?n{`fnVe&RJ@ zOYQwj`a2KJi26Ib!>l>|Zv7p}N8|5cPY+|e|e%4nl@zc)g*^|zU`l{atpVnTIzgSA$TD;u^@0oa5;HEEovcH-> zGHZK!vVVkRKfJb=TByx7F6ym^ESW9eMR{@8ZyD?PEv_Dp#zq6~ddNdvFMnn&eVfc) z>YQA*6ua2f-T!mqn!BAdV)U2|J%;0h4jR`q>lQ1oGTzXx54m{@wDZeTH^v#b=?Raf ze>m6mS)%-xEXK*2)#00K@c(17bJ#J?z?gRx&|^KQ0!+Bv7Zi~AEeli^sHh(vGZBEVNb{Acslk5FrgSlv>jj~Ax5z~ z%$nFPCj2o9*?5XK$lg}GL9y$&7{%_&z{yEs6!NJic=92KdlYAk#VEdk+-VjT@P#B( zljZd!PtWz_>G~mL=`7D*X@=G|Ze-=A^iDtjgRv-cr1th)Z8exKV>tAk(+JE%@t9(>8@j~ar3U+l&`7$spKZI6ni9U)l zNeug$Cg&qsJ?_!!htr3H{jo`~KV*Ck>}Mvy{^0Ss=fTdug8k#E$CQkDgPiWG#6jL9 z#%%0(*1CC;^+O`~K*SiM^CneOr)hC1+iR$L%m!k!inZ5K=cxHT3slF?oTV#HBp=M& zwPv1T=DwCKs(DMTEn32y#L5-jg3T#;>e-@KG_gMlTXYrY`><~g%f1O@)jrF$s->u7 zT&#Q_Baivb8PzjWHIS;a?9AwyQ2iD&XDuxgH)$Y0RLA*$ zQhq2nVaR&SXq}r^k6Ea>1E@LFxcox;v6{Ze_uS^MZ#@})t){-UB4Mm+0N-;s^?Q2m zKy{vf$1@Y_JhQ2v%MVeHM!U+rI?rpU?-cfrhDNvi^c?1mM}A6=TtmI5^w~b@Hzy%? zJ8nBKTIbmr$|?5z$KCnf7!;keIo`XebsMLeKk7ZD@9yInU*j3lhn-ioQR?fj`5bNJ zv})_T{Mfci*1fOr-M57M`n;OEuB)7g=0|y%_7))DIlnIYK2z^GE}`BtyXD`IPqyAu zYk=L8J#T%0ZAkg>v=gm+QN4%eAbtbc@FrO9Wt@)s=SkMVDp$UT`ecp2rp~dqrl541 z_RsBODIsN>=M7Q=+Yd^uCozTwOOc8Itit|>d{%&N?%s)bxx3PCr|JeZFOax1H z?B{=O9LKKmu+3sOGpD0)+Kj{VW_r`|Qe)Pd@K zVdiGtz<9r#*$TZpt8_Q{tc6cd_fk$hfaMj{y*$dfLe6z~amSx3c%ApVe87oCh>(p49&8ew0p%XLAg_YWVU9-12J zF7XHRE|-DN@K*w+?niZ}=!Pg?h$q@z`_w&$zR2GgSSA@4rru9=u;I?xy|2I{*YccI z;*qIsk+(wWrZ&2OJaPb^Q28i3hp~7?a|HYE5r62ps%KGcD)Zo&XSlrL-uwKR!1^%v z4fKEQ$-}8v9pCatu0jP7k=u2pPopk?;87vfX6IeOCKmwHaFxpY4h&_7|{U&G{`(Rz@9sq?l%y^|#v46^Q|W_j0a~-;bQwGgE6<_MiiqPq($>gq|5w=u_E~vPZN&gW6=*XQ<(v z^qkrxyY{%+#5<#Hq8+wPwk@`8-$_@Sq|4Nl^Y_1jH0{>)N zctw`6x3+`t0D9!$1G6Ki(EfWlzJ}xZY(;EughsV={#g2f@uFU>gyw*kzdRXQijL|( zqk~l!_6IjtaxGZ*&)3~kH6Luxqb}@1>cZv{PYjxPB6StmaN~>(XWMdU%k0Q(uPxGZ zm26YlI;er_W_-Qc)K%p~Y@LQ|JNfF1kPk1zSHr2je9PFesu7Z%=B}swI6j==C;DD~ znBgaCTI7e7BJZSI+OZ+DUX0-<){B`4KZ#eWJ-(@9KaG9ixQ%rRcG5qJ`HN2szCHdh z{Kx)8@Tj#AgulDN-edSV1H`PNHi7&c^_AW`_p%KMSQ{c{g)O+l^V`#I&CTt6hOx8; zJDPJK$Hc0tIL`KBRZpMo)m#|NjAw4ruk`5Mo7uGHfW?aHD=j{h_fbCB#->zz(F-PJ zldA8k>60{MjMlhu`Fxe>L+i7u4{iQLeYlJ3izIjGL-nP2T5{&IZ}!bF_@n;}{(QNU zUx0<-#CwM$do|{%$H-oHT@m@GiY-2Y{8en!uN%o7_R0Rv z^i3!Jz?bl$HLfucTQ=7yn(95qZ-$mJ*)@3mo8};l;VCmWZC82VGkk$Tl+2e=jk{V%09Y&v0vFR0pVgU745NG8w*l7Mb_^ zOxY2G=fq?`xbiNgB|LG{$An>vzeRk%!5=vE%eM*O&*sJQ1DS4Kti9=qHu^e3ze~=U ze%C%c`DxZ}l{_mS(KF+P5v~ne&$BH?G(Ipf@6~cvH1B2lR(Y=ud?l0j0wXo5DOizh z^33Yr@3F09v-vN@a8(;Aopgpz#>kElmLzM>Q#-xkxVy@3hCYOnHcKY}N^hpD#|i!Vi+Xv2;w7BYr&<`~H~UQU@J7 zJvt6VQ^^t0w8N7lep_W9`s7h$prvmb^j$^We;YC1d(8X{W6c<(I*$Ltivgbry(I&Z z@zM&gV&bPOCPev3^uC%+wt&?!s@GJlrs!_#Vb2+W_8*C(eeB!yy^^J(z4{>$2mR!T zFQ0g(!NJh+bL<;O@loDB16&*$CN6BuNj1+=d&a}Y zzLXd~wlklp@sWue`uRN9vtx{}?CF`E9xnFMH{v1j#uI5S7Mjyot5)_3v`^G2UXIO` zljy$pVy`%H_DZI)SCmia4JNl&zIA52EYX;f&QPwgecO>GF1B)_*pdx%VH~d99Gm97 z7cC@Hl*hMri(jUk4{oZ^nf^X|#>12P%=A~hjk4D9KjT}z1D|yWKZE4;qz{gQ8R-M9lV;_J^tx~|iT?NFBpaMG@omj~ zr~0DJhmCJ*y(Z*D18Wp!d9r^y&uNb4i89?et)QRH-A#pFUhdA|MY+2Ue0$~Y+VNFf z*`I4<=_bDWt8A{^SFKwtcX!$VG@1pC65A`f{5Da>`*M&o4PD~v7yWrXSH4divQ5en zo(~P&^;L%+gCW_j@iCTv{c^n&Iv9F*YpGhfF21dU1|1$f^sN!`s$`yzHs^aXybu}w z$jJEEQb9i%o@r#b_-r5<{?lp6WOyQ?mR^a}mC&9ix@a7%jy)CW1k)UdqT0)tNVpoDdyP+J_AB>ZY(C zKM=LYtnVth@7m)rS?|?Naee-YYY3+|oWVbvIhW^XbkpqnbaQW8F=Oyy&e)OB4 zXQdyHhi7{R;aNYw%75|mtJWp;%fvPxU&+&659gdw_U>5Z7bopBO$kOHIBeUU0c#?RYboydsU4 zDcN8#dTCsuYa>ezI?!Q1bQtdZ5aO#hJ$*=CJZ`QCle;GFYIWo01l=h79}l0sJqVvE z=IiG*`HOyD8_&Dy#5=^`6t`>u$E&GjsS1oU^Mfev)HGHP{Qy;u2ovZyO?vuXO zeii!~UrtqVr4W8MTc2Z}@^czeH0hw%U7~U*aC-YfO@N?>EOpv`)pBcUP=73mnU?RGgt> zgvKv1W)K;qn38G&T}&4l+42;az8YIVaccQfs;{*3o~S0!y>{x5*Cx&{q5OJmeu?ZB z$;WCh7ydQ)BzV%i^$jPwal66ohFIJ#QU4_J#rvs$S`+x`R4+6=G1xfP5dMKP2P1{14-94ahi;TOeF?jceeDb8v1;4pLG1X?} zz%*upH4tgvCFRh+iM8HLK8*dn$i)cnlJDThMHA<%Imfe>?ckSZyZ*ySVvdZhFnIXv zS!PW;%{lRL@Ss@VLShW(vN@ca0By9U5HUq#d#Khj?a18L^f%_R&S-jW`h%r4$=0Z< z4_=w4^@TR`>x38QwR^wO=dj*T+Qg23pA@+6Uz~!)d1-evZP5Bd9qHLx&-KV=YI9tC zon`Q)wT#2J-ulnsz|B93p062TJv1xth8dSAGn$| zvC^my@1RCTwBJzl?r3lIq3B=Xtt-_$!d*XE^>{A*SeqB#)|$S;Y&q}K8p^_~@o{LI z!RMD__{CmlYIRmd}*K%)JM00jn-oF?;+`y(f@<-U- zU`_EuTbu~}+Ik|}LFyN}N>U@$;6pMZjEvCSYO6Swj3`1zOyxKcKhN_2UjA=vzsszT z)(=m}3WFzP#c1IRoT;BiBPU9{xdvuRc7QwKP_pGQuB+qNuA_Dj$Moq`( z_4JwoCw5;CCK|!r(pyWvQAd5;7BE(ad=XYEY42X>00u8v@{o%e7bjXjP3x$QFJ1s% zmVlQxz{(3AUQQSWUM@^nKQ-cHtT|ol9J1zl{;`oqE}|Z*koQvi(5v;2CA);x%Ac$@ z@~epUlyJ_D_lv{IdKW8Cas5Ptm4VkwvvO!8_&mhB6w@dJo5trIlZkyU|7yIaM9N1EwM8l`XT^b(4y{xU{z*_@7^HlJ^e=_rkQj4p3y}y7={_SaQ zTr>A{FL(7foWAcky}HnqYaRH@Cg+?)r~Mgmy4%?n!|6?6=GgLqacbK*44m#RS~1JW z)O}zoR%g&3r`N-$!^Ek1W-^?f>EU!4zb40;M5i`SDT`>{qp57NyO`U$_2@Y%kptLU zf{}}niB-tN7H^)1M7yiXlXX?-*q=z&A@ek^itH|}(~8`> zX?3u1Lz8c zpqsyz!q$3OjA#wVPVc$c7|T2x3L1VS!Gl^a1qhuSk1x9uAd?P0=_GsKr{~P*Rev+m8aQ&n%PX`TREAK7}d~`Ur@@^;P?5HpIpU^Lvtnk^& z?sEs@fADW?G`~t7N=9h-^1)NR;gtp6bf> z!(iRWcE&5&H6xW1&T#5a-tu(*V~T-jzPvNg%O&XLMs((#=;aEIWt$aYn`zFVHfUCi zo~%q=eMK9-X;%o}3|%eVU4pLG8sG9$Kh8C;1@1EbYYqEF?8^=dpbtw%GoKwX#qLmA z@d=I2>%U^n@~=D2NxN%mXZ=3yq&52j`7dMyN?Sk1dX#m66Kbb&t=g&gm9j4%QUBlI z{a@VIw{R*tU*Dl?6mQTwH1G4P_8p(F?^qgr$7t^zZMY zo}MwehYRNS&f*;a>rp@owJZW*~&YjKZ92RPiig13Bie_?Q^IxO*rd>a28TE+0g z_3%d>{ISN15m$lzGoW2#YU34E;GrkfX#5oun}I(xpMD+J*!lG9xTcotzY=IP^J%T1 zf16IE-i_Z5>bW!EkJ`oOt^+qGEv>1Z)tEMVO+Eb45G-xo#g-q)F#Mr*>V3-bO`>n~ z{}%6m@kb#v)pzKcP2f&Gh`mSrqIZg4y6iib*>^69zH_zrPVDL^zv!IaqjO7mu6>Wj z42oZ#0k>+W{l9~6RXfc5W3)y5vXg6CxuzsA!SIW{Mlm1DFWMKstfHQ+2!7EwntM1e zerfQYtN-Q0{U4rJr+fIOhEUH8y<;-?P`{Lfa%(kqhxx{8%lK7W%c$G>NV+qLhy!0hx z?@&3H1Jt(X(7rk+r&jZsRd3&rqa0g#U3jB&wCJj z{HD!ZE1bqX^PBT~HTKs&Is0hC#fk6RJioUn@g2w{ z$*#C|MdtS+pX0w3nRII6b?cJ!336}PZKhA8E8^aL&-~s3uTS@b56L~nC$GYni|c#j z=CI#G-)y2^+SzVrqi#Phz#7P!7g1x@4uDZN20FgwNO<1*a^7Wd-+cDE&w|wg_~Lh* z*EmmWZ{T0IPGTeOLN2P_tf!Yw?!P3EZpJ0mlJClAKOLT_WQ@zdvC+s@wtbuNwHdlI;z0jv4%R{V+9BLXDxBTbLR{Gq*5xh0(c%sV!W}_dgm~ zW9l=^+``^T`gjNd-uBWsj9B!~DQasR`7r?9u4_aCAyU-Gqu_(yLA?_ZM8 z7S(_xzQeXf@3C!3)@Cyo@_=p8`eLD$Cgg21vrCFEl6~bh>wJ05Hs~DTn-!O9 zVjrxy``9Wq-o9jQg}2XlM{`FFZoYe)DV&gw} z4r?Y%nH^#LTJHgJoUam-v$$R4;db*F^x<^3hP7l?a9JjHO4TcFE~<)p*es6Iu`x_8 z5iDu_(vAm0k$mEDClGhrOU`jA`gt`p)is%;oau)*I+5$aSU1-mCQpg|cx0N1-DQ}3 zJnV=?x{LB`x3 z=Gt1?qIT%L%B$|>y%(d~bv%mq52wA)ai7|&oLM(*wfWVneEHQi!4&dkS;Y$&ztzRq zt?H4^!xe18+3a_TGb_)M#f$>j{uRi(q7}^lOnyoFYOrtCH|U!azv1e*ZwQ%h$e_00 z_?)yoNv?LA2;**?(k3jnu+1}h6iU?z7El#kp(8IWng>l6q>X3mZw}6~opkw50CEtD# zGPVx+CR_c>*y>Hxd&{P8qTbu)Tb~VnYXjrLjyy-cwGEk814fFFb$1e2--h^&9%d;na}3U6zAa=*@XYEP8wRy4j6;EZCuiP zaS8fB_P@2mRsUizD;?s~55_+6^2MrsSqs*N$io?3;LpR!7bwgiPn^nHOXQ4QzDpf4 z_M`av8hBsvxA2Vrnxmn|%NqmgY5zL9d-4anYVPj5tmW0AQwf88_V^Irb?bg*>P%~5T$@r&WuwKm65 zcl`P5wf_9|OoN~P^;_7-rac4XIJOn77;W$pvxiOo+C4v@{gB!6ArC*v^4Bf0gRvVP zJfGT7?1ozC+{o5VeZs*9SXYXD)+mdpUhg&8%0B1&HGWsD@pQj?KCSx<&c6x`RYNM8-l_eEh!J8hEZS54wrJX*dMB`B z=BETV=kktx@TB)neIxLq|filkIJe6@im~V@-UE?FK?hm0=){YPZ&~WDAp_;Z{u)n@%nzPKVQBc zY$UT!&HZut@~mQi8xrNr>&O=A^KZdh1Id`b`Cfm0zSqmxOsrPCNwn$CFJjL0$OmohqY zEV0ti`O~B`r7ydFNX-vxU>q7tT@%M0XQ3}=I=x%}+1c~pz0RJ)GeeO+;^B&q4NG_8 zUkyula^3r;J89oQy7Myd@~(9!HCb`GbIuU8UvA!Dt)uz?Z+@L>L(>J&p^~w^6%kv@ z9;Z9K=Mr17wO`Pgbqa_B^wXU>FZ$1k)e73Rjx{goRnXtm2jEw{d09H|h4#xwMUI6+ z>smaY`$2qe>CUOd8){S0p#gmG+dt5=>FJ42N}q0h-q~~DoKQr1_286%(W%`(PKk6f zwxSDcyb4wrBhE&)TzAt{nV)ua!!%Z2Ois$QubGN2|bFZ_Wx^n1RS3N%vtUdgI>K&YAzt?%Ld)1r|)xO60lY%Ft|6RbDSsOT! zSlX!cZpWFqC2&Ie7S?Z8yG-7ISe)`Xw$4E3)ITM>>xlQR!`?Gr_1u?(}{1+d^T{Rey7`K>pRS|c!%1#*9n?--bI`CIwA8dTWMz< zb@6%^`^8nhebpkg@$L4%GlR~|Q9rgiQvtj z%Ne3(!RU0;mRQX~>>PPEuVx{3j+|H~ZI1hnSj|H0x>(IZ>|Cs7A$E@Z;J#3>cz-BV zT(7zXWTtYyiFFIR6zdtrdb_wf?!G*9cEbFpME5}vi0IDCF6vvx7Rde?ri{~y#8s9#E%JGP2^dp+@J^_Tbo zJ-*iZb6-M!+Tb3P_4nnz`XUFs5Q_W<@%0EwDrUAwZ3DM{9n}#P-md;2xK}7 z-{w26pw6J&S7-3Hcl{#icCL54>sPpS2C?s^&fo^VJIA}e)VscbIs@~3@=|fnhgVhm z5!ZKj&hJ(JDDFD=_pQ+Q;$CV6wo_{mB0s(!yH#WB-SufT-%)IiV$*i+(frVvrNMV4qj;JCI^YoPCmyAFAE z+-;{{RC5>iY-$ep4%4^p90sa87#poShD9;0d2A=B)`yk(peRQJgda_tnX3F*U6mg`@QEap_Z>R=uAiF zt7gH_*w!rU3k8ZNp$`_J8}3CnJVpINs?iV1r;lt=?LwHg6w*%B@R{%T)kIv5t?1W> z$zo#C7g^L|%wT(o_@~A>R8HCyL2xx7Mp)>^Tk`r zk-w%57k$vwq(4QwX3~a^k)_N-7G6MoS?k%fw~pW6!0(T+iB89IqJCBs8IA0$%89bj*ZjWO`jg-_hPTE){c3q zqK4MQ6sZx^7;)9<*|FrRDU^O*$hc41?~Q-S=sp2m_e0LV#uysufy#&OV!ZgM?oE3Z zyj}Ra(zg#l!^8OGmDJTJC%=Xu17GxVLG-!#^`KV7mk-hSB%_`s#h$?HN~XZG6%?qACXpQew8%LkG#Vfwg; zP4Y#20G5q>>7w6-=L|1qZu?ePwsuawaE+a#{;st0WmD{X^Ao;TdP;PtW*wjkHevd} zge3AMQd%>aamkW18D?ICjNI_(($=)mX&HrqLTt+PHH!Bo;!NwF@1rfYFUCdtA|9*B zWo=4}l?$dXz+Vl0!I~kx{(Bd%2-*@K+1|x@p4}__-E-u0rl@ zMGxt_xpvtC>`#671IfO781p#jyUXF%Fl)DkKmEnzyyokJ-+=Ye17Y*Y7_A~xR`qz3TSq^Ah|NeykULoK7~8PTyu7rS+V8aEqmQA}(EoAj%`OZQyF zGa7lu>Q67}>7MMg-Zz;z&eTApFW-rjI$yoyAoq>ro-^@5-S3)1yw0xk8QRlR6()w4 z7*ib?;|=D3u)ML0@00%RqE<sHAmghw3acgP0BB^U)k>Z$jNx6*z(G6-Twa` z*Dv(-zie8`e_^H}>`bp=J4#<@&M{#p8|=vbx3VMI*tW|O#`SjwGuF%kJJEYI4s!Cy z$WH1BHpUUKWyNh4)301*Z#|AQ`eu&HzfOS z3v)l|Ii<{5Gt_g={Clp&t~?bC3!Ac0T>8c0u02)gAY!1~Lsc*QHSqOA*-7Nt@$mJE z;qmoeFfbrrzaHc3fA#v#+IVjNC9#p7POg*RZ<(lhO(YY~@ML232v-kW<@MW+LFNER zmj8Js$#_la;}TE46!Cvi`&hG%S`z)d+Zpwt=orno0_KB5L;ji-}FDGmZZiRZU?Iw|-rX&{}XvGb;{%IGWd8;QEv*nmF0bQb-k@kD#E znfQKVZ;C!x+! z8DHlgTNmwP`usBbxWw9^^!eo+%MWY8236gMzt6jaS;Ur^pT%H4+D+lM0UBL<;$%V)|WqnuJRF07p2FF<4JIa)P8)6qvmk)7;| z&pyUjv?U&YNoQ;R1NnQ##`{EI*^SUw{+{&sje)P{Sby&__M4#H?^S0CE_%d!9E&&e zx!=^Cf{R9Q(S*D^3xALR;3X$M^>zF}XBmT#E;;c>JR@*Icm1A6#V=L_)%O>?_30agza)+1N@w8@RJEXQcYY& zyeIrjLgt!l41V;#=749NVE*6EbLIaDGw_31^Ga9}Kb8nn#5+BIaJRE4hNIJcINHH8 zT)rF}M;+r_KUQsdw>T<1QFWflu#^?UQv3J;v7{Q26D*G6Y+A){T$@(*>lR|2iZ4}z zm+!NY3s@R1Sd_O|u?&7UQE&E)6E0qYKT}OC>L6S3ODiuaane~wj5snmo?oz8JynVA zTLt#Zpjq_XiqhW7SLgQjg|NfSyRH4DaaUn%?2*QPmCdXA@l%{D^CpieT^b5sm3M0R z>b$FFv4yY9dnL1U>FtGA<-P1U$MpXs{&)Dlpl}htSLSWvcVCKgOyjnSQ=CtvGhbZg z&CH|uT=A}JsUtXqZ1JvN!DG9?Sw3^z*3!mW+IXs(KC1B@%hr(hD`q`4OZQ@EoF53>begkzLoqn8v|qyc zy-wh!>o{K$n7u5esPvYfWtZNv9n42D>)}{FT=bl9-0Svr%!fnX9TERL_DI2R#3h!#-IP}o%8?5e4e{quwb!7C7mKJk6)5cCAP$Jtmn^j~p&><}TA%kLWw- zPuXJX&*(Yc8-8PMukPzF8)EZF$sd+~D>*KoV5i5kCV$HPee@@`CHgm>XLV1_abPOU zH|jf!9O_KKQ~`5Es?GXleP_yjizQ#ox911k?~AsD?-K^~ebIA#Uy<4uVjV8y?}rnx zG9v2ltE~lmpYRfgH}Cs|7yQ`PKKy>krTt*U;zjqKHB7u*240RWZ-b|tbrv_Z?*TXA z19N*tmt)J@n}r?6i*4BB_w`--2Y;6X?ef8Bw-0yX2jM&Po4LJV~z8A@Y7FUaHUg!LBMgAioW{_uFZS> z*=zGsp?CO{MR_Uw7SOjk|FdVW%UhZKxxAIw4kO^}m4(;lt=zsiFD3ozywiF9NH$$} zko$M=o_)OMK=!qH_lK{^8_D(e^Zxs}{^uOuQFu+>yzFc8u46l!Z4BEl!q?^<3SXD^ z3$7cP{<*w^Jbz01GV}Wj?z^A=XJ=#6u&w0!x!5zOV!w>Qo-u9VoNL=$nO86i+lJ$- z`L0sl=loyx-aS6b^34B#W+s7|1UwNSB$Onjtx3=xRynC{0*Hj5c5CTcZTB0{b`3DuI;w_{gH@h4r*oPkRWb;@6Y`_ z_slbsnPfn0x4+##<~2_y&*6Tq>vNs2`@T7MnZEIJ{5Z}hc9IV#zrPe8?oMcy=ktMT zJzF>ro0k{&gCfQ4{9c8P#76CvAM`E$zY=*_7+J*J_%Qxiq)Nd;1Q!li5EpaCF*WcN+Ig-m>)dA;F7yAh&W47tnS=}2PzMgx_SM`ro`k>1j zt6wjWH;w0i^}eou9B3C!!(d@KSSS}J@Qd4=P-Qc*N*H?P;aAq&9xAT+CHo}eThy@D zbnWfA#p|K(PX7Lad=UKR63=hOhrRG;uHS6vXZ>cyFxU_Co9vs$Sb<;q5or2Pe2gtY z-v{{|fIrsPC`AWr4TK=R^MbR6`_7;BeCM^`NB>XfJ2!zT#Yn{Am4C;xw((q9>0X~ zS^e?L*0s}4>tNx-w|T18f5$Pd_)`ToBHYF&p$|qTKZhZHCkr?U)<2&^u=($UN3et zH1d3L>0;#wuFR4?pMpMm|6w)>U()KkWlq9Z)w!L#pS9}=nuUrNqf2FP<{6reDQ-iT z?&t3|Xy(W6Te0W5_jSC}Px;7pY~>kN$3?lnUk~_c8D25(b=lq#@BPu&ckf{|#CK0e zLwscu8)z)^IaZ8(3jS$ar{tcTf4cX7s(;!$y<^o#d!qC%2=1%uU41vcU5n;+&?h&T zhkmIg*Has)9$a~u!y0V6=FWy+ALR1`K1cXWC9mzrXnyy(Ez1JC-o5OcrgtyP<$Ds} z5x$G~9?$o*rgzuBrFvCqZuKgE?Zd1EMn2p0+>j5iRsF-uhp!AU7n1&`8;P^l>-gOP zGiS10$K<6S;CFL;1$%1PZ_u?3_t0L%!>(P+ z`^AKBLO;V@_O#wXUZhm!kD# z71jwO+6TE zv-0r}^6_!z=Ww46?>+qEP@h~G{oN-cAKKS!kUXa!w`3}phRvR8&ik|ny6mlR`yFuG z!ycCrVw{TGCl#;2F7IcY7n5hLF9M52KAiG*GdK-{)A~v7{|rt!Ckdxx(&BV)Do&?@ zQ)IJ6&)5Smh0_yY^fK^yFf_IOgonq9Cbv)ZF)#@hx0ev_YhPvMIOief@!#;D!f#qQ zc>Lx}cr1?N@p)>rl5=5Fxmn?`2OJ*1ITeS=*L;0*K>g7zBk#2}wf%3;29w@iTTw81 zE_mF1_>?dip9l6Q!sJHh+N%V6yCu#pIW>#N-c=D?cXh1dngy zGuk|a{jL4 zZ}rTmj$eh}V%7n{Z_(PW@>{Mz_U~tnDETcbIG)RKomaQ7!)MvpxFn#itjf5ZsgnavM~2&kK6CJ(K+ve`25Qc zBDb0R5W6?9_IguawB7G*KV#U9P3pkb){HGyzs>fF-9>J{jp+wbJ-+~(Hm2Rb-<#%N zCiZ)qR4iX&KRVg&XPN`%w11%PsXa_H-&gBPYOl9KZ+=H%`@7+_>crXZg&D>s+c+Ql zH0bB~XW;cCe*&IgoWk>aQh5FWkLM-Ksml9#EA}+O^Ah-KdEV;rT)Y;~g$HYQd+@99 zOE)?-7Z|%Mzw{5#>L0KH1LM`tIi~#wtm&4pm6M7$Jz3rvZYVwY1Rm|BsAA58S_f$S|F5h8%LVK>s#RRH5!s8z=D!<+J6T|Ov&K7}F~{O_n_>xK?mCXOo|R&W ziqNF|oxw;$ME~2({Ck~;PTLQkn)xo_?|Qz)w;H}1`P<;-y-r6QFZT#90TY97g;uF+ zyB@z`QS8LWE{pX}U%1NlY##$JzvFWlfBgHCs|UI7$9svj^L(-PbJLwcTUTmI^}~t1 ztI-jyjK{>?@G}v2Pe{sL_m!A6!%W=WdqH(aUkQG^FXnC{=9cVL5O*(bj1*T8OGlod z7DId;4y@bU!}W^EZR}l-zENe$HiEFpl&U>?qVRF$Y-pd|O zB@1H5r>Djg+PAu({UG;HY+Q=2Q{C~e(X-z$IL<;Bd25O&$8;gM{Qw{3iIitL#z*<0 zLU3H;`CegsFZpJ}^pf!7;Z7J5&fFMwV#DLmO_(>q$ z9u@fdQ{0@6&F9FDBw>lU)Xl`Zs_BJqpIlr{o@NN2Yc6@3#P5a0%HNcLp=5BvHwnId4IE};Q=}s@ z*_56!&ii7XP0={!VpEzuo1!rmc{XJUbd`^bP1$JUdMlUR*l*~)F}wzP8=EpYPG@&M zy5=?)xw2$zic9A+Tv^;$yL-ATiy^1Om1Efw$?-(4DS|%*o-HZ#WU#RPWAJCU`14p` z-$?mi>kCxEqa}Pk0dK^kr}<1q?o$1M)ZA@qZ?j?|^)_oi7_+81cI9&TXx0e#Vxo&! zPggOK>4E0_)v`bQZDXPj^SAhHa#HYre{f<$pRj=JiwEr-%SMR))>jr5B-eF(H|u|X zY~U;J()tK5C$MoA*eHg+OX;gstv4YvlZ$(ZUnENdq`G?LS#Q zjk_=&$Nm$vJ0CuMmHFc0)1J7_D=>U&g-?SXpH{*rzYLDWud_UppFcz$?_Sr>hktML zeD_J{J*_VweJ6R6zLPw;GU)mGoL`>R`SSHPq6Rfu! z0xkZS^^@>f`ZC|sm-U{$v~x*TVAqtJ@XsNw^>P!V$>E$7IlLuB4zEq4hpoI-dGc1` z$(!UZ?%!K^D^Kw8LingPbtW2l%V}>zuS$=qo*@5ReEfmO$ALU|fB5pTMaUa_z8HC{ zOvyt`f?tZu(51#EO>X}yL-*k^($HFOF6RFPnqQir`EQ|z`uQgs*-20HM?9Kuf##b3 zY3)}ovSaBl+1ZKgERW01d7dn2j*xOfi5OYD(E7J7&)z=bd0qGyKSO3;TRj5LvWbPL zp%F80w=r|?xz1n&TV~_iuX-}nl~cWHBD&JPqk87@?yX>-o?Y*rUmg7WXY!lgJ)hd8 zIX<)L-Trj}Or6rRQB}mkrcQZ{J8#F-DQCL*zDuY(?nK`y?y10bNk2*_E+#gz`5x6B zr5m*$spQAh9QjtwvA4vnIU0Ml3|`*s)*L?qFMG~Seoy7EyYtnDl{{ZHT^v-@SbcYq z@lSK{Pu=yQeoQQq>Z5Lj$Er8#9lrffU-A6XLZ4q+oD}n<`lVgOIEkE7f$>Wv+vWJ5 z#KVJ$|4l5e$2Z-{|8DVo)8i4>H?6xbmzuA$w3)cZtJ@i$%GB-nO?5lP>Alo1RJX&Q z#eY?doru-r_Lz5c_4yS1S9dOw7pHHm4ZPFT?s|#S-Fp=IYyrM+nrpSMP$8K8fAFoI zeJRU)%V~&lOpFfRYAu$)TRq29g)3pm!`l#i%jb9nzsqmx4_9yvF?~40CiWA*@ zf?^k~r$rvXv&C_rX-#(LE2dchZPWQoCcb{muBDJgJ^`8a>;6ASZqvxpLkWHyG4D3I zERC4kjMuy;8{Q3}H*bY^etqiZIRhX6vB%SY3di%D@tAa-mn-t}oEyV|zdF@C=f>JK ze|1WE&OKmlN0zw*8=uF3*mq#ytL?G*xAWGJL|5CktzSPOl#@+ zVj<;!WM`HCN&G&sIPBTjBG1N_feFP(@o^yM{yasP@r45yq{&mr&r{ApKDTlX%3mk) z6sBiv%>`-l6pE{+c>1A;Ym{qD@&)2Ew8(WP^|n57L~+uV`W zo3cA8G0I-&g#@qj;q?rB*AL(?OK&=!-fZ*HS$b1+{|lLE?M;aM z#^>qQ)4OSLX<{9EG^@QH&1$blv)b#?toC{|o5|5C=iW%~qt-YqRi8iWCaz&W)cBgo zcAZ$(PPYE2)^SzM=vMqr=~T}jy|$I(armR__4%yn617@^F7(_A&e!_QTBEsF>(}z^mHei-O3z+0%Cj4h#~WFv(f@4w zOgn~)!g}^(XzD$G?7*yB!p?-PKe;VVe@p?Z$O3$b8e>gP0{bOTtH@%1sl`Z)wJ)n~7E_yn($7#)e*iPQ1yNsLq8lrKO6A)us-kf3+srt42QctSP29RL={uF6(CI0p7x=+^e_B zepA<4`eS-vYUm5FvZ5ZCNBFIr-+H|KiBuug8jA6-%J^{{S6raslTaMK(r^FRo(jl05D30io59;USSpcAY-v?CAS>s`3%4nA7nuy=Y6eMheIa&YGq!O}J@sD2(NXVM{+x20`}wV$?@P|qy4$(nJJim4x*a`d49q$gnrb}K zwc2A?W9ng#VU1}8zscTdO!99}YD_#cJ|@Ot$7IK1$97Mi#w44jaZNIIT4M|64b1v_ zo^#=+_&h>?S1Ueer|DPvYJ6^x_K>lDr}~x9-+TQz1@$Y5hHtt08zz6B=vSJY|87pG&_I{PW9uHSQ~}=`KRTcAKHT*t-=pIh99cB`50v71>__? zKI3~$HNMvxaDgwfHyoI4{H|T#JAf^D?!Ni4H#7ck@%Kmhdl8=sK1V|1+I!i{nH=Sy z^~bapyzI+#cqu-Le%Rvej{3x2aqA|N(Ea{3y9eqPf$hua-C2mey>u0rH*0(1%k8ZV z%rU;49n16E=EvrHW09?^_)2nO@a@Tm^|PjeN9`ja{CySd{g8MkL;flWYoe8&d+F7iQ?NEl{tBOP zj~By7ZZ9tGeUf>!20QCmC#lHft?Kx@(92uh9CCA+E@tMkHt}r(diK41vWLkRW z!$*f^8T>&%gFjy#xg|w9kulhI-5trwT7WM*k{L+S`(t7fu~8$K4_vC zT=k1zXf7GCeY<`mwikUwSJgRB;M1@NeMc{`!Ka9)HSaMMU$*urzUnz9hK8RB{WWq= z+*a~+2}~6fM}Adda&=Q!^CS$$YKWhOG209G{|4QB(EfkQz3G0g?F~(>e3yp}$%x+3 zkB!uK@8=v-XXD*FdG|)%UAkXA8G3iU_iY0y*icQ^+av3bV6lfDQ9s{IOpV^nMrXzO z>eH*r>-P|!DwY);rAI%<=LYl^G2kF;55#-VT=|%Se7L=zs*4&uxMr@=gZ^vDIaaEl z*}aF`FQZ-=U8~qu_19k`qrXJ&ee-#DF3w0bGWig$&dX^JPu+T@aBT6njQ2PA^Y+@c zI&KR1vvsnPK!IB)+ueHYPG}v2?$ULJ?m9*;uNmX&w3;ykvz}%hVA-}N?rkxm`Gcu- zuxr4C>R^_(s)O|qH<>yZ7$K(HTM@X(;{6lxOCx;j_4t^Hr_v*`Q$3y@@%t-X*px@mgO>{J-!l8yn-w_Qi1@$M{&hi*!RmwzcN3 z>NTp>OpeR;ACQaZv&4nXKgmYLeHsrNT{#JS>|*Ux6DyNL^!qZ!o-bo!X3x$%?%`rT zw2=%+Hhvs&vCtM7m^Dj23%pOYDPGO(wD>GelFw3Ld=}Lg27~E+mdn7FUzcdEZ!i0Z znH&qc#KqPnacs$tS(tdZ8udBMg^=z!ML^DILR6oU6 z=e+!+Fmy>AL(+Yh2|x5EtAE_>yHO8iA+licGr4^U_~{XTMyT`29vlWUvIj?e_CR)B zI4Ohvew-A7lUClXjrSBScKWdKb=Jw1o@i#j>WQB2-^G2};&r0}cik)aXKQ)CcjC1? zgBR8EdWi8=%iB%7;_Ci)8{NN^_r8;ltM}FamZ;^uH3N_4z@yahiZ8!Q)$zWXho{ruHdYu(&h%dP z9reDcy87b5B%8g_U;F&H;pgbRAn5AtwQJrJmow>xkMc>#Swctq>*+VaV$j zuUO(0FP2ap(c0`?T{dHI_&Gdg1S9{n`Gm zHWkFDcHUMY^R|@h=p4$ix_-NIG3q^U9y=vI$8-oCqTX}lb$dmiD(*kp_*Ht}^~V#s zUUYSJj=MgI{LPWhm;ZJMS&$Dt+I)il(fs$#1NkEIlFo-#JtiCePxkn~1pZ40sg7jk z6$hqPy1WaSzV-CH^XDcC+wX*T|JSpTndSz#d~<7isWRJ*ee=t#?BmJE?7#>z>z7x> zFMfF~^6aqHiIPz(m-0!hTuK%fdvaOglglJL*ec^0YaG~Fp@A3L_?3x45 z|5uLbXU8TOd6X@$8OxgftUuuIXLos&8@K7wv$m(*_U5Rc-92yYi08@1o(&Iw{Xdz9 zzBwO{K#Nr0<@gQAmDi(`iHA>nvDv5Kq5Nmnk__JhMV?GWj7-S~OYlo`6%%tozKc9| z!-s6wjgVX&_3U9!OY#~LrjAYS)!N2;Q*5K~e=>4*d;~dD-q$Z{iWmK|HkoHNN_NP> zDRPeaI}Y_5)5xlZ<4$8{W=HO{DCuVY_QV;6mE zNz}kQtwlg?YOlZFqg!h@El>@6xNM&I*uR=}urH+-Qg!!rtU2r2%VO4^sOVgG+gr9& zvIbHWdzfkMiFQvmFGDsfSbJhIYftPCJ2#Cdhjkg_RFB2m_>Ot!Wm^d`sDgo-vx TsZ z3(6~beS+kb@{N9Z6kLACKv}>YxeS(}je$;bQ*PT$UJ+bZteg^VY@72XW zhpcRWX5psK^BLWDNaXHPZBVLgoK73mks=!xrO1Zjk19_#Ogu=Ajs0ucAJwgYXs)r} zHc5`GZIV2svrTcI(YLnF8g$EOK7-cQvFqxnuVy#2{}Fvm_1GZSmYf;aVTwm&a}J<` z{Ojr{-;%I#Q_Orv)z^-C`tj{xDxn{(ZalLk94M~T9$7Xs0A&hH&k;G5C1k4(|oF4>R->Xr&uq?NAJst)iSRNoz&6y zt=!nb?*?P}^p>8-XBG1n8=2?5jX5X{%uBqBzgzkH{@ojjHt=^N^MAGXZ6ouAwfAi! z^M&o+w~gL96rX1A+g9(qR`0x4&U-??bIxb@M$dhGL-4DcOQ@GV5&X(#mhj`EmEY@$Be>U1d-&$wZVbV`Nm_Tu=*nm5XPw~I7+-^q z%J(fL&oGmZgUc5PvSlp4fYH>#e`{Lw1P`x6)-`v@D-_iVNQ}DWJ z+VAXMzMb&$X}i7e_UqHp5i#Gq@(nS7q3kD;$BH&zcmq$9%U z2(1ONj=!ZND$o%#_&XIpzvTb>`Tqv?zLpLdh9`7|!4tY-yzm9iM5poSi7Iamg4xm? z;7&M{Zuy~f4}WVtw?=UI4gQ9vAK!*f*~va@_nUpz&g{PtOl$ymt8TBpyOCbGZD6bs z{UWT?GT#000SsQX@5Pn`}DEb+7j@x61*G%D^GiP+3{*zS7gM?Lc8Cprz>Jk ze)MhH7m@wXOI{m$=yL7d$8+g{7}P$<(p|!8?ZfMh{;FU+Rs3e>048B&s*9CJIsX?% z{xk2dh8{9w91A|5=UK{Y%mbSy?mfGpGR%I2;mBm!I50KIV6|&{V!!nRX&H#n@R5vpFO#91^eqb$X2F%z6Jbub5Hf=YTfTopp!p8kYjS2kMnKn zSO1IC(~i?~{@gg7lfY@|X~n6x#^ES9EjlTj7PX|q>3ADo*Bys1$cY`q-=d~5xQD;BpJ)X-v=_VhY-=!f1v;?~o!E}8szoQJ`dxLNuB$`0 zJuF>^&eM7;^1HOJD|+kW>w_CMx1#H!=((0WR( z>@&HJW93|XusbnqA~IjO7#&BRdgdoQ{r1`8qwBYHzL4zT(8bOmx&NE)myY3C(r?-e zR{ht~wKWFl4?W!gKm*Sjx1#!a3Nh5llFKXgsPhqAVk-$$^O z8M0*7(!-OylA0+xam5j*szqp8XDcs$3q+hY@f4&&`c(;QIyfcv~>m4v%-j z<68VE@%Rb;mK}TmJNN*9i^ng)Q}OsW@N}ogQ~PB1(p6Wq{{Y6Ix8v)+(Yk&7De~`i>Grk+zoup2m(?Q~b^Dsq^txTYkHFh0 z@b125X-M+DcF{JADSB|>6 zeKS}$x}CX8cHcIi{?w;B5CC52y?}~;we~g?J?Fy7Uogb*~xRO078v|!F%-~#&Q_rjBSTUmh z|M%YiD~E=b&cNpDJ#>!p4SELaK`(vLK4YJK#;W);#(U4$$um01n`o}3e$##R+e&`3 z_tn_D`TGg}r*YZ;_w#Ogu44ICp54v(bk2?DoN?4Ocu(&f`J5a0JHm5x{36Ha2l6V< z4-CvI;x}ER-*4o7qTV(7|0-%6)%>r7+KBQ%+kykLG@j+q@^SpsDvs43R26Y4anruOi+iw5WLj4JML{mwd$6-&1UUchX73y%4E|H9x-y;zbD z#WenPwJPEHjmSqM@)7lN#C2f*Jb1Sucf+-H;9(%N!Ne;jH-mg=t^G#Mv1{!&a!v#1 z-xk%ocES)~Xftb2dcAN1Kq3=3sS4C!dl)zL5`& zQ_oYKuMFDg|J%I(OFqissoq29Yz23ULF_e>7d=z*(r2G}$Ubvr{F&>$XX01KVWEIng7_z+#HLd6Z5!$ZG%!#ut<)_L@9 zT|ysbE5}Xjh1AOW@4HwxKy;BTHT%~-`N^WeZ}a?C?q9=m6~mQqZ1PgPn~|mPY||?* zS$d0+rIGt|4$|LV#P}MWq6V#TR=;a=k!ox;jo~fMi?==Xk^fk@$(iUp#5{D{cT@o%!Q$xmT-u8TeLl+S>U_Vb%DbFOfjbkB#i7u}Rfdyj>w_eZbT|BZTo&fOJk>bRHBBJR`07_LZt-tI+% zT8A*{9neYAT}k75e9<8KIr%(v(pyr`doT@6(0f^r8JfseBt5%h(O{`Z({At~y{G)- z^~7>X<3?|0{Tyhs6}s%^vzCv;-d+LrLDqVSn!9!ojJi3{$?dIwSh%T%XBphLy|X0` ztd=4dU*LDm_q6^7@pVTTAM!5LqMFQ}dU@3Usi~X!Ne$GyN;u9#W@?$MRE51;!hMvF zmhG#>R<&cR`pB7@XT5W_u~*u+X$iia)*RkSJ!K7_^&XbZ-i!afXs{Z)@Q>_k{0!qh zV6ZmE*oA87WOX46@B3Q^JDGISJGM~)D=6Pdn6beBA1}KZM&pL=sgTDx9t@*`2z4yhqNK;Hia-c#7y#PgqLET8g?h5X0hVxGS; zWi09gNqvSLi=Jc0l5WgqjpU4D(H>->_Ez+58@vkdrr(*mLoLUuYbswC;n>u;;d6|4 zR$i`^W3b{Ln9-74mTwb;Qh#MRD zJp6>4>#B|@rzu%J~XS0o7 z)%)@BAGm=vxwkKHZUbY{IP_fARrmAUE3obQdmPWtX1q^uoyJ?qd-gL{TVK82S6__=bEuh3 ztX#qzu0G~()sJ-^TFgf{d-?FG;mqo@boJE$zJEmPbp+mKFeH05(mU%N^vZmR$u2qTLDHY&~8@pRW8N>M5CyQFD{P23#w>g^QJoH;W!v8xH z_}`nUzaUe7z2}ZeY3s#R*n?*7ZT)cdzZlHQh9u*4yW`c1wdd4(3|=$Wi_zg`jQ)DD zVuA8}>cqM1!9>m2%zq7>J8JGn`SrPATlu%}$ELnNTv3py6{ophY98kk_&g_G&e`-5 z*H<0#aycW^iT_td{&HpHhF9V?;?E-^UaeX(@<}lJ+UnCGBQvs+k$*rxWG5pthUGdX zBNuow@+rPw$lw<#zK%_A>bRfYua)#K9{heV7DdmhCy?C4U=PQA@5qU%POTcXwJA;b z60#}X-rR1pe3Jd+#ZyrblX_T#rN z2}C#7;TzR@^~=Sc-M~LLeLDJGzOej_4Sb@+Myh2uqkHQ;yAcacKhj+|2?DsP|)6m z>}0BOw9601Z+PHhdPDIW8sPH=KK=A3Ja-@aO>xZLWij>Zy(C}R7vGh7e)4kiAmX)p zuf%Kh0Agc4eq&(6=0G(l%g!l`~ z_SGC0Pn*^61a`E?nQ$|cXOw^^J@-r_JU6*Dx>jF|S^J3HVY?#qu*9zB1W!drfLhrI{WHm)GwcRc9#IEE{kA z)19gI-d8W5p8<=yKDk~#vC=<=R9pN_bc^iyLa>%e$NX>X_1`b{YQb+B4Xf#Td5FD? z9nKVc8LX~xvHB;By|z2P+J6)*rqi=F_KJ@&qwbv0-hwQf*o&U}qgO}hk7eDo)%3`+ z&Xlp46Udc@-n~jTQ}$9eQhi;|tFMc{t-ZYeTh6`*9&`4+;M1R3*-qlCtZXOe{V8l` zE_nHq+fHA9z$C-d#QrF@m)+{kjJ8vI7o_xlx%}t%jQlTXKO6oV+eut8tUua|xgQ|r zmhJ3hy{3j-Y-oTOeC?YCwuYvjkUhOW6xb(!N_N%SRM}T+S6>7x)}OLAbsn{dxJ`w( ze=3{$Hf+ZO2b_I+_S>i2R?`H+i$ouPnGrJ*hCb_h6 zdHs$vdt2a)ylsv%Q{ysq19EYy=h!|2{U-Y8`G>q`z2M#RMekW=3_R;4^Lw!2kNp0w z;F)>*xc`R0S(O_C#`#6YwBHGtce$T&HqsZb zXK`Fv=Q~y}!cN}Z{_pKUXZEb%t%K|L`p3`L%AHdKD_D|FrU5es;6%S=i|BS@;Dw@Yixi z=~+mOCDF5x_>DT7*RzoLjha{iV@`UHM9)IvyhP7J;?Re{44KyEx$EV zYrfaRq;OM5zO9GvR`yKn1t)dXkArIj-Ti z8v3g>E@6E+xDeKbW!<;eyS~r6uX!H#mAvVBlD8+p=#$`^d8EW7d3h^1Ucqk>>VCHW zK>Z>1SkLJ?F|<_gq5JD^jk6>)*^F}rV=4(vG4HaNaaQR)cn-&vL(Idp$F;0~Bplkm zw}Jn4;J=0(XBGFlmppt)sGwmv-+ERz*#A8}1)@teYsl77Z*L+WEqX~FtUgxE6sD)( ze#We4OCHwqsYM?2{05Fy0~LLic*l(#s~##DsX-pZWAgE=*OKTv7-QsN zx2(Hk&X(}Lb(_^|u=qC%H*KZYKx>xkJ<1sCMaZ?@W2@p{^$pNxp!WzAI7>Gp*Voc# zP~+<}IPRUlOtzi#9q;@#Zl8gX0rTGU8T>u(UF4l#?VVprpMkkQb*ZHLBdeyr#nUC` z9uF)URQ)LFJmmLSXmaHsy#l-FH3(53--O@l?m??PF|_7UVy$8`cCOJ{-${IVc+ucx zsrN3z{9S{FEJ?DQJweUi>YU!C;W zp^JL%lI~5<0q#k#6Pm5`E6@a_>sUxi;$OjANEMynZXeH|ge_E}xI9`Fu}~$=#&6GO54kv3JNa z#;biC9>?EwxOciZduVNN-{JR$iI-V#d_k~9bD$VU2XyKW=8~&V?@FGfdE)9Rlzm>xG2{2 z4ZIjFzDSoN5icyQZCu(Da_9ffL*9r#_g1hs*V)ime!h65dYx*X1~WG%F!Mn$@c}=y>#n*N5r-|K;3tIdl`9 zgxj9b#L9`#PVzR4cFhUeMN;HV^GAmBWuoTW-?RofaQSj!ONjH#@jP!2uN3(ZeXSgf zJqbCGehEY43O>>=k^``8^h+Of7oPLIoVlf~ux$OD65$#@NA#|>`sLe+_kDt~`1OnI zl=x5|axPrVN4)rQI$p%8S4?MKvh+;8S=S)HI6S_(BNAWFATKIRr{e5a>;>J$SS&4m zioB%5YIIZ)f;IE%CY(t(K zkmow|-u>7iy*KCHv;_ZC@BM+qdvDB?E660vtRu8N%zkd+Yd)D?)_gQr3k}An!Df@@ z^6yIU>bewOxpb>V#uqamA31-_mGdF9Mh|<6hXO6ngP()g%RL$o`KB=Wrna>u0|AG< zm7v{qeAe=LoWFMty(cES{+3wvt;?WC7{f>QR`026Uf>=ZxX1cyRu1$}cRId(y|b@( zMj$p+;>4CYcV6`z*Nx?x-{FI|t*uiIWV?SYqI;1&_x&%8@xCViF|-$dh1_ep^-Q#; zWn7L}FT%=EC+{v>-A7-Oa57G{Li&^X;eP}3Ut3j+;<$FV8&9W`t4id`@ajKYUi~BI zAA_e>uH^s9RtR6stbe|O&td4KJ-3CkANS{MQmlZU$%wP7Q`P|J4dzGZxHiMRMsp^o zkBvP}uW2uGA-NNlm&30BbPV!2V0us8wfUdPapkayShAUGdc14WoxA#7xtq@69n$^p z9PGa2xtjHDM!Qe`UvVz}=FgEU(MJA^%fm#DcAyR%vz~^a{SKcE`!)R5+@t;Y zSG@nQdHIGn=efL5T=QK|2iUkKBdw{WWLk4ZzLA}G_W_UQN1%B#$MQ*MKxfUxYuLT6 z$UhG+PGe|(H8ifW{wg&8Ab%@nYsX(zkBFb<{lSUuS}q3j;rsU|8IcXx#Vzk zuTto*b@406=T!Xc(!oi&PK)#&c}imNNoFopxOCaHZmus^&gY3s$pz^DT7P|#$pHw5 zqGt(tDof8HU#w{8S(BjWhtkqBbKj9<^i3W{-JrXaZa_FLsrB0C*U!?A01lD>F^_=*y9{a&aPzM z+DcEpWV5waf?`AC556yO)5qYiVnf;Sj|ILIj}5QpxE0?0Q9Y^PVnDLT-;&J|t~WiZ z;9>)~Xhq+>gV^z9?s4X$Um|97ZrZNn?{Sa78U0QB79VdTete18@daYX7m$;?f`ye2 z(AW6@>j13+KbI3j$|pcxIu3&$@;w`s1JpB@fgfSJM%Vy9GYx(Uz(=mh;V706e#+3f z<{X0`{a6ck0fyPIJ~fa%O39G%H_y7 ziuSrOuf}q!I4VC=J)!BaG&_N%d2=&jN&O@bf@gyxYN&_b8zh$yqW11kdk?m^EpS@Y zoAw**$ELjM!7l1L%w-$IcZsdR&n|14gN%mTfAHkOO-=m%40BXO&zfH`Z;V{~xA|Q; zir(oV_DIWNAE{i{!5V|#Jr@5vk+(RGpZO|vgv0Pkbp)+ve<0r>e-~^&fzPMhK*xc3 zG0h$R3Os)}m}AD(Lrs(1^&tm;Eu23g8h)*+1JH+UfhN4 zS}#yo7~t9gbjRVb>{H7<1}C_Cgbgr1ECL1+*G~l_#bBg}IXsE$dx8_9?9JI<^x!?4 zb^Q!*RgAyb!x(#l;pkNG_l=`>Jk`SRz4&x+vWXcgD5 zdyMt41L02dy!TZPI;&UjaKh!&hC+#bd^BdQ>nYz}V?Q>=*|&PE)ADq1N)#uvr7=|0 zpa0s>(r2lYx8}I?(EW1l`!j|&bNvD6@Y-rOzj5^HMeyP>c(Rbay)LCzzF?Jd9$TFf z<|d7Nj`1@Wh_6BT8sZ)~+$$Hpj)AXZ!4SL+1zRj%C6{r&!XNQfzP#4<{Kr@3#X5X^ z9n25J$Za0#qVKUezrgSm{tr$_;wf^=H3vddKE%%g_<1w@yeB`w%iZvCZ733rgv85&{&kP~c)8f6 zxy74!T7T@0r^*`Yca)ZwPAhFF-;v8mfZ4XuBl#evysc((8Q=;HV?oPzkD7w zYu4tOcQE$=2O+NMLVsBP*>U!nbMalkMxfzlgA1qOf*nWSJ3n?0&+P#Ro50>`VS}-* zISw|Uv#>$zxO5mTrR%t-!3ek&9bH>=dgNL806zw_hmkOF3|Uqz{v*cr3Fy8Fx<6M; zUV?G%Vw^Sk6O9}f!vE&DUdDIheAREX7m%gNaqP`;-a-6UzT!B%+W|k zwVK~+=Za4WN62$bAU;&p{95q7TNPd(pp-1zNN=xAd;|hf!T#@~S;}-MwO*_SNud zHNKP99oXsdX-}TZr>T-t`BBDxSRY$_a_OUYKF0A;=wIdW=_oW)3?M#T0-utvFG5DO z=TqW(ox23tH0QySUBT*~Si4sWO%iz%*XC*+UO!KCeKmY8gvN6@-`octxobdsGT?ij z?k(BxUg_%Ju9f9GR`8A$JbNy9*v0Sm`MSqQ&oAZu3-hN$cLt~Q&+~XIy_EdUlRcai zkk4~#lrDcq-^&F3J*sULf8{R?`1mV16o2I}-3EW}fxpjUZ^hp(v31SP6#;qPb@~Eq)#D*Wco;;>v#~wz9g`$gO=x(JHNu%|dS1 z28`Se)CG~(5b{dCd37$eyMXC+wX$h#R~>v4z2z$j?<=9V);o|)_xSYa0sgl7Q|HaV zcbNp8m47tnfpPZpYf0dHk`Ldm-8~5J{PJ~kp6uhq{#`u3>DEu}2$z;lyBGTEJ{0jN z@vA18yr#X`M{li{SANWW*W)(?(N7`XH;4D-U00){I7hv`*}Sjzh8pdCqr7h+?|T%! z30F7gO^FsF)4~uN= zw0%>FbzfZBRo?U#WLJ3+@qah;AL1Q$!~0ggD|y$8dB<{#M`X5}ck;(3@6YJ#I z^CG2NuPVe>RZZFTx3mXWBAz+IvF;y52E)*@!jqrL_`QYrN4r*j#-|@A`|vZ-vzZC} zNQTUF!QIx3*I8SexK4KE=Df+#9&|l@SN+}e?YKDm^U-?l8%XOV&{}rX(yHsSY9EaY zMx?Q5Wv|s7O>UC@TH@)iW5iq1VdmTsb=U+Cg9S-CY!flI)nUs{MTd>>&WdOK@0_Z${qHQD zt#?k=*~#xLoxO+morG;tAK}gDu1(TCxYVyI=F{;Kxru7uv2=)+2ST4>JD>($wV z`2WW|zfyjOyH{mGZ`}&tm5;M}s|1_u*IV)nYvO#jddrQUgy*C9UAjhk%bq8_H8)9b zY0uciHAgs>-rDBrtpRAb*r&Uu7~OS?Pp(GSUFO+FcV)URgIwYFpKjgtdTB0OVQJ~= ze%Xoy{az2vqtLtudQS$k@)a%JoJA+6$Lw_{t;djW*}9X`W7y@9_W4sD(jPYs5I4!M zR^1tV?-jm}fc>4==4xzmg0_>etExpPPD=QxM>u{JAACO;?MKFygP2Pk@+!VcVgBT( z{1wIeit!6WQ%p``EAl=>yx&7^La}QX`G^vp*NI#xSMgEu6d^uwy4`x)j!gcna+lXR=4VV%YGun&E zA6xYK=$JI-I37=uj^sl|%7xgy(`B>$bX3l2WI8JMG0BtZ1Rm)zY(M(`Kr99wJ3TrU zoDHoa6VuRgV!LYS$~VcruQ7RsvT04^AF`qE5+8jPhh;-wi&F=APtX@XD{a0heLiA{ z{JqH!XGvROG?T3RF% zldf`otpDCL?LP&Y zUV-frEr)_GU8Q$LGee`WOCvv>`^goQ^Z#|^)yu4{g3hX!SQ?97ei}b>8fiSzK898w zr0P}K_>uKNX3YMF>=QI9M29Nxdo4D~v+dHM*rcLpE4g4NMStew&l?$qHnu+CkIMtf zQ-eY0v63Axrq!FS-;v(_Bo~7awhs|-^`QKlc$e}I#MqlQE7#oh=%%C?dmZxX_glV#|9O3i@3Oq%lRHZ1luldLFms2E zv4_j7>l>a-e4VXrK3%l))6LRs0yK-q$oMAEB6>X^>$}MoDDF1zA98+DrWm|%cnn^O z4jY!saM+g;|8N=C{HG6leqEu}r=*m~?(sVq$H79^*w~xVT>l9&ZGX%3r+V!()PmjQdn` z6Vwon-w>b%I=tqqx~A%&N4(w)*$?%`H4>*bDR%eIH;?n$dKFj{|ud=T|SsK;SHJq`hUZrO)r8OM~ptlGWoN6#T9Xk+uQ%Eg{6 zZPeo_+DJCLsFMYxrxaJ9v-Ep4_Coz5skFM7_|0iporYeM+m#V$@mV@E4)pR|Sfl7}k3v(BtN?C^WV zVB=HdRqn0`OpeZZm$Q#LduKQ@Nxc^PikzmqXG1e$Yoi-ZPzy7%rI_Pabvr`XW#zTB zMuF|S@#kKJ$8dg8w6M(CN6%!-wd8ybs`rR~$=&~dp)K)9DsBBVE!|zWL-Y*rZ7{aG zWQTYn+FN>`E?PUt$Qn;hnCteFOthc(-+=b&X|sBhzKQMXn~;vEMpu=Ol53RSRKNJ| zp<@&}sz-DW_zHt><^UfGf2VnK7+e$M-OWC|QRN{IgX;nMpUgb}U~%*w@{#Jrrr%^= zGk@0vorl!NYxcj<^RZLw_({t?#9$j;qrNoddDNHY&yOd)!>QV! zQS5mpJ9H}alpij7D!zWb^rRO2pzK)_Se$5R^htyLu9Y))h{o%&ONKsK(71ag*BbiH zG`U>qV~d zZ6CebiEU=wGq72r?LFvJ)oK*q9HU=LI8=61{Am z@lQdo(;zKL?$yUoW2& zTXRx$mQN}>)8NUv@@6Nf3A(y)s%w+cg~WWOf6D4Y!$0|**lWdm!sY>VqQ9=1&c9cD zot93FBYmFYjq%-2l}?>m&}plu6IFY$xkb_K7uL>Nx@AeTj52(RG&_F7kBOrL^b!Z@ zC1#%DYInVkOghmQE5_F$aOX5+mR@fN0a9mB>cGe`~vb1l23mO zU6vd}rx{Ns99_!0F5sQ#^X~JQ@A0!X)#bCHg_Ww_;D2?i$5go# zFK+IFK3?3MhBkKYL!!S}v9?#Hn|R!)iM(v0=w=rnRJb=3Gc&GU!T z%EreH*wNG&PI%f14gIm)T;k*7UVKdN+&*mY-E;8en6t4_b2gg2xSFB4QKNdE6%8P}GRhL@Y zzFxZ8e0C-p-a1vc$fTl<~eF!beK6^J^ZgNK5xXWkGXM1LQV&XPY0d~ z#9m?UrqPA0CrbY2*SapkKE>*<4WIS)X|l8KZ%rN7QWvRX-sDQ=bNwP%)Tyq^N%l~XZ;ecuL1KtE5loQx#xwEaP$K1XWmbCYW2-t?ruWd z&s3kIYul$L^`n_NNc8SD(%X`cp8kb$>C8D54b|(DM6)lsbM`bIx7R1J*2zHKBKUS0 zJr)a@hkNNN`04ggEeOHaG4ORP{}0302ztoZIV@+S`FW8f&FAcwEc>M00urRi`F=vbT{rtJrg9pPtp2M?r zJXfEf<#hvmuEB2f-FIoMA`p&#m`@Mm)Bd!jx76?O=cpRF`8Ap!Z|z$GKjggHkN(!x3F^Uf&IJL#2vbcOW9{$Qkk?(VN{cCd#f$XnFI&g1aL+&d77 zb_S<(HrzfL9OXy9z?}Po;VGSQ-2oPbBh|-J^@ea%#dzPcZQk+mU};aDv#;k`S6^I& zzUToPx#UzY26IP{KSKv}$5!mC8>1HcbcZ`f+26;ie9N_=3DJYltT8mj=#GC);eQfs zhw(po4X@)jEJAl&2LBhXim&ID1^+d6YZIP<_p%AnALQFNs-}4mU48&QTmHBC^#}i} zm%m~7@7EuR+|v=RJwo5iD-+m90p86(f9M+VUp(&z1MiRIce?zZ+L= z=)DB&`1#%ybosu`&v$e~t;hFgxp%|u(jocLK6t%|-;?-{o=)Yz)gj`2KXb^)>F(3o z8?rsRuf`j`J%39zI;4kt23D+abLuN9cIcWb&?El={)Csq=Uy5szum>mOfYj1Sa~Ek zHTrcvhHl`+SzEV*7;;}OJytdSUe32tEdUFuntN-Q|CgPyV%6v}@y~1I#^GmYeFc3ZjH@1{d0NPADc1`n=&4M)js+L|lxZ+k}zB z=#p2j&y5KuP52<@T7#3qE!R_PS;yKjNjSM69Zm|9aN<6{se?5)I$P-@KOf&$IB5+E zC#(yTf|F)FcQ*DDoV2Z-GVo<|#^HnYHJ^({k*N=5s0c^sTkm3`oxi>;8Uf z^{jV2by-Y!vV42>de-|v|E)hdwDhOKmCw&L?=L$5ZHN0RSAvf#z{%y{WeK>c#a1oG zR@LC=dcMk9;c6_n3iJO6=jCyJKG)&nt_E8Rl5y1?WnMUXMls6)lOq$N zUhj{o(I{5G7T#A8H(kLy?++FjeUbER`20ICbpTtVbw|3XJBr_y*WH?1jqd1LImgrt zd(OQqrZJ6|AMD2qa#0$X(z!xD)t48Dy9$sZP%`F$&E zTOaXZrxbmc^gPR>VgAk)VBvBwu>@??f|12ws0JP^V(qE_o*0V00T}8V0YlM~z>x4G z90@~DfT3#gw!(|S1u=?nAzY=?AK73^`om)CuE94RQ{R81FqMV>qqS7zLo0?F&Hrik z>!)GDt`9re=(8mM2Oqgfeva3VtTp*;pU(k&!*n?r^+2ThLhZ+fmOdejr1Ooj zV|IR<-r3E~oE~4ukBw}6p(K0^^Mx|t!{-a-j~m$+TAJhwy=U4HpD(o4`a%XX))$%> zO~OnDU&xOc>kB=MFQhqjt2|$5r}2fpGRzlhOYw!ij~+ULFVvR77t*|azc0i(2lzq- zabM^~pD&~wujb&T^@YHeaCJTYkQ;j?)w@xQe8HlDjl=$^yA*j=&L66~4Q%1OFjK9T$& z`9vSejVWhlj?Z-ljjx1%bfNK&-edivy>VaEjjN(Z6}MoAYO!s<4HkAzz&{!aPUyeo z(XVZug>N(iyT+a`#y8sh=0bdofdzx0XxqcwbtKA}Hc*ix@gDz;7rSDFteTs`6XM+rK-0l0ejjlxwHbtjt- z(3}&0-DxRusr4RH>rSaLTys)BpwOFBpU4NOziq(FF-uQv^Xg8zPI*SR?(`m0cXEGA z)SYTasyh+;`0GyRW?6SSh<&oLP9yp&Q{729LDdw@`^ED-6HT1vU!S%fd$`7{e>YL{ zR_);7pj&gA0S2|-qCXb*(`&fh$)e^I8oh2{d3x0Ic?XHbeC)|(b2>P?JK z^`<6jKV{^L)dNAD`6sG3b=rCp=fvwx*acH>;vHvt^(I5FlfZ_pJzJcp#$>Uwjr_3R zzQ{-M^JBPO$s#|T1y)pd*1RdzmTiqmy{sQV{arsw& ztM)=nUbF5M>O_MF=fuQ9!c8k5@t>z<#eH$+ovOif;` zA?o2JQ;q4P)R^u&jWwoXYD|})BMc1;4z#BD@HH)$GZ#XA9m~L%<|q`CL zC1YI)-C%g_)qhk^Osy-)$4lb*2>#t@b)|k}e;GBUd7i#lmZq+B#8+2R?}F+|Un9@= zLt9q@C-iFXV{Xho=`4TVPxT|=#G!uFO+CreOzD7wP$Go;p%PDNLo z_}IUY%MFmrby^}a_R)Do*p{0df(83ynW657S5%{7N}<5tZ4&Xjm}TH z#)e|gOmJcczLpbvo;+>OD}k2h^7)=XO>u!^Y98CD%j97vwEpC_;_43V`Tk-kzw(9Y zc`X|Pp_XS@lje)i>Aiej&UMaG400&sv}j+;kTX92*-&}q^-fOy_2W8=u4nJc#8<3^^Owe1zrvfhw1`@z1If@*9CI-G4E-xoAGGhgZutX`)vK6fpxrZ1@BwI`zrt1 zjZRkn0eZARkCxX~Yu$}UP2b-1A0+P&8Y-xcjSWMquU{I=JCD46z~dPn|A&Si9>?FoE~n)R&cD)&m!4cx zJ@_QP=n8TM_Xfsw=AQ*N1Jyt2gBM+)h^d)rzp-8MoXI@)K4d*)dJNwvURSBk53e$^5r!RzVp zdWA!*_C;sl@8R{w;PpSeHZ$hrI4#$~>oR!V_`vL#o>OaheRezhrN(*P1F!4hHFJOr zug^aPKF1zD2|mNG>r(i8m*M?zK0otcBk+0oY2)+WG%~&SWM%r#!sj&;2TqmG-~UZq zp7&-e&+ibQL+aZbc|CUJmeuDT_+WLc0C^Ao$EC4CN-)oA}l~nev~*$aXP0<{EFFTOD$w^`W)asQNyaP=}eux#ieUx1U36tGerf zDkrLQeu3|ED?YU5`D=Yub1wOmUCPf!O3m6YTK_tcpH07>sCu5BX8lF_7Y=o!Bc<d^Q$>*bex$b9==3T+-zUhRQ zO({e`ejHpw% zeA-6d`;hFx^kB<8j;EjL+NEa~IM}O+>^tEU{A)13vo6i;mA3;Ywwr1+U@5_QMoz&uf`qB5qHn8qi;dH0v=eJkI<~wIb zD+1I<1LNsCn%w^TTxYN`K#mN)wnp%qiD8xpuKVVj!^@`y%deX@f7kM9^X4p{_BQ7; z-~88cPRqX@zT@uw*u{U{wQ=*$8P|SiZ2xn+KEL^Q_?Nr8oqhY)RmFz#i-?mZxBu~} zoJ|&M7AN#O94aOMB03)Doj-_=+Q<8Us0@FA`f25%+n-w2uyWuM?CN5^zXg6H>_zz< zzLlS~xGH^9>`LWl{kd8Dj9%&%S~EgDw(1=tKeJu7-qgOqDt#Jvv-eC(#VwyU_jmKY zi8amDJL=XH3fi9o|27BM7P>~ZI!U(IATz?QvDItO!B(fAx6wQO=QqkGUhoj|{@BQC zh-y9LiGN)kn}D1b{_fJ)@?7Vki?H2urRS0H8oqC)mVzH`=0n#@UO&Lz$k4|z`+@rR zA8_T*?gwh$#ay#W>v9xz((A=O?{A-`c?s@*<%O(6SwL^YB=i2~@_vb$Nc#8Vos7+w zo{>+Xu|9fy=;7aji&16v?ldyHAPbqLXW~E!GE2Uoo7nuB3CQdP@(0t)ton1T+*+BH zyh?5}$?TKJ?6aY~7Rl-XbkLWe#mm#rX?dS$;>qeW$d>Y*Q^1-l8(-SI-^uO&4suZ& z$c>IcE+5?(h^@zWlz%stdjDf5g0UMu>kRzd3AJoPPPLcMz8^aD7=>EOoHL@kkn0L$ zG>ok7zb6#4GWvPu$Nx-n7bYhTzVCHhS)D&;*|Zw=Cwq=K`$c-zB)4UAu1k_z?MX0H z7aZ)qKFB_a=d@@}a!H_Y09ozq=I>A%c`XTrjJ!(c9ShBFXHWT-i;&e`XmYV9vyoSB ze`;mJih*6hyw1gZR|N8+lH2d(~y);%f#+B8JJy~7McbL5Yhmlpy%~*%5Rv@dDd{@)o zGbvJJXe7C{y3OvP;g{J`>~FK=dDj)7(9sYzu1fO zi1CoCyq5ESee!-`?0fS6N80a0{$RlPgTby#W7Y?($37I`1Ip*q8dhQ9GEhB8|JHW( z-T8gKR`%zR&!;u@dx*i)`FuKOCG%pM?yOn&%ft@_4_6z&6UvJ zM!F~ZZRxUGKM~&-MEA*NUy##@Lz_34;lrWqgM&_$O%NV`2^NJz8&?uP#&LM24~N?K zu?`&02Zwb|pucR_otrBHf#^T{Y-Y^Hkd5H4k{p7tXX8i3m~|fh?gW1UC$F6Zo45 z-j;*GS@t zr1N_4zxK1fTA${?fxWrZk)OJ1$`;LkDIhFaG}d$tTwms^HE+eAzf zX3rMw*AkZQ#P^HoSlAE-ZoDF6^p$vi&i#vC*%c z$$Ys_-F+DB%1?$teaNj7f%q|v>*?E7%Pt-kvu=eB8d=$%CSSBYHD5H6tXN&X#3w7#<=Z@Ykv=D`opvMg@|UE~B_pZ&e99@v z$XB*!FC)cEJsH8DRgTHZ$RtljF7ae!s*#aN?TYpAFE(2LVmG#3HMlj(DNujvM|Zyu zyjvgRaddELU~1>O#^9Fa_^sFHO*Q&nG3FlmA)fra>dBApwF`f2`1J)_?#18L_1b$* zHbeI~!gYG?dg^zBp(&kx)IZneZQp9oaOaa0`2O8XF?pakJSor%?5e zmMYdQXd+Ls)w3a8$ccGAa-#94w;@(mlJ^l>X77RR9{nD+pE3C!%CER~=#7)FVZQpg z`0WeQ`|8AXbx!p=N&x$CF7bJT&jmy9`7>}4;qwLad-^ZGSNOem zzl?_U+BYx1%dlSiBpK@~E?}Q*-j)77DXy)K6v+04*V}z!IlcT#)=#XM%IL(Cm7FD& z_m#xz30La>IZK{G=Y1W0d;wfPAHAm!{y&Sqvv_7*zl^=M0^33H-8xsq`OAW((OM7Z zmu=k1XVv4|=Pym`Y>{SVb*GjjzF(VCcd~px@qCisEM2}os(;P#iEm7>v5ikWo}=yh z!#BolZhUKxC;G!r;ty|I6fpkq4fw;Q+h_Af8os!K{A==wTVMEo_NDaG1Yh`n@i)GJ zU;Nb?N8f)HUH?Qypz(cZ`)8JZps^YoZt5gQ-#=bMyaMyJ4_$Q|{^otYjOhI>4>X<1 ztnT0D%V<62_w_4|_e$yneF$wsKD966Q)|JGh7H>P6MSo7d}{}u@khiTC zk2-oB^=F9D>`^Ws7-sc=?fU&h?)%EBM|=2=!|=(DLSO&g++Sm*x(wzX2|o0AeNkKY z$?K1k&QJyy-K9KC?w_SNWAX|Yfk)9?U;h`#dD8qT{=oTt!`@ynN!Lsy)-)TRtBpgy zhV?s$#!-)65+c@4`H7w6Rd*3{uG&Y+p}I1wa7gZuVk{H~Z0pu27UDEIh@NdRnxo>; ziy5~+4bl^vIFu&rKi%tN_!&%}durk+SL<5hM{~4Bz{(ctNY5qDKzZTSZf(5|wDpzL z`z`YqM0b#w3Xd+3e0|8umU3-{}R5b?roA^Sc655-@Kw7!`IgZ~f|m z<}UJJoLX-+kt?&N#PLpeI3DhSKgNr9Cpiyn{*8T3;a-l5d!Gigr1=Wtt^xCYv}%f} z^>E#9&;S=;*E(li2g8wz+8CXYj7pSS5

U@X20E_kTr9Tr~&2RsA^H1SpN zzxHAH-_GZT;-n9IBPIVMw#xFqbmOZmAM~ELUM{G3$hVpc_C~= zL+EC0aG+gkb;FI!4<(sVrOV+tuPfI?JB+`kFdCyzngF zS0~&AufGaDZ|8TyO>;kR)84P=DqsIW{7p92(sFTe&1CE>!(KfCW}16|nZgK@7Z2Ww zhs}`f3QxRNdGlAfznd9|bn?9BbKtAiK;Pobh;Y@pk8qV9#^k=6`@9D-81@*sr)T&SH5ieGfiOE>Oe{K2sTLP&Uul_`=&I1Mtl>z zy&8<{%yn`R&G*9d%E^6Bj>9_v2i3bL_cJQL(e0JQclqe6j(@GOH&7Z@&uV?A`epT! z^WVWZ`~2k9!z=YYCwJCF)_=mO`6(`}vCvtdXP-t5q)NW4GsyTcmpT)K3ts-Kb5S)k z9gaDAy`XgP^2^CNN-1pTx`dpi)VA|wymaGH)5OcghYemH_1epC6JEyi@^7ei<33W` z&dZW=xyEWc@7ykqnSt$`MV>=8c@8<$x|%@EtBKUUqPE`3+{haTgN-M_5cPeN{oFAV zoY)S>Bz{PGp0S_7v!V9$WP_RT#-o;JDi*|WL*DB-_8&f!)=uKEqU_Bj!c4f+Qt~pj z$Ik_`jcsYK@50tR%r(}wKFjwT4CUD@hAxC#nfrsG_WoU9Yz_8oIv5HEIx7AtyZIxz z4*v`ZOTj;bk%i=dgAohB(W}7G1z>6?+*kNuF;w+@&CkSD89e<>FmiTIoU4YYLDGwD z+y^G>d&G0Iu+J@~hQQR`YkmC_(1$v}|H+x~I&{d6oga_PucWpo&myjMJBA-}{&9_QzHzRmvP=mxO4gXa$2R}Al+7ZC^TxRzXT^7Cf%bN7n5k*CQw z_;DaVZ26<=PdpLKH{+(*BYXCw!r-&P31+TdMmM!%SX=*m%?r<+Tpy~r8O;peNGc>Or&MwWh&p#pB%X8jLts6a8dmi;>|0`MrV<7Hs zFq<)uUZFXdMs2tlX0w)r*~xfqpYQKtxcv#%`X908Z)WY^#2#2g{^LUGye*)vZHAj$ z_zb`I^SJ$sZw)UGZCw&pmRDgc^*##rvaxN+6}hkTi>bNt!S8(D+@K>X-K=$l)JY2Hdqv+@SMwSQJ*-ySEP zc`3O)`p$6=mh~+pZ=ljYDQvaP2RYAe-oPTx-mOX6rt$_<}7C~x4z z(m;PCA-41bjXj4lHlZ!1iH~3COUB2k=yy7+om^MN<<~H$^O#%ZygqAkUjO1d>FJ3^ zORPXk%m5R?AjRcNQ_Q!R2%i!rCdQ`b8`~RA@n~vI5!;(}+YKkO`3B+`;wl#Ry7{i{ zuFYMi{|D=+OB@x)s~|=-9h@8F^S67o#9UWP1hYP$LQA9$c39UO0f)M%OJKFcDzwBG z;eLx`S1*B2ai8a;B|bhcVztC+JX35>@?P4LPAp@*2DY`UWy999E@Y!zhi!bAH7`;M>saQxw8;E8|=!L2B2|RptDjn!ELRA;+Q78#_gxpKrc4+&l2|NXX)05 z(eiHzmSRg!i9U=it=?^AL8r%R!##IBb1sGxqicu*cU;DacLGkl{F6(M6JPkBY3g&} zfZ5Q7zP2!j>uV`+5aPr`|7UEhOxo3(R}&Vp-M?jw6s*pjHz(dU^xaE?8fq|+I` zW%h*C>G)2g)A1~pA8iSgMzyypvP#p!j1PlZkHC%gab1+XDBiM;Gg z!|HS0@FHVJn$I-sm0l%W)AiElb_7ZjaAY-mf2WHh%U0*a^tn#zoK52yV;I8wIvAoF zvvCYL#aLJzvCpdK6sD-Ix-exH`@BvX9iJ7Z+<9NA@ktns&fp6FhDQijdZ=MrE<2d# z7qW-~<#bD9)1Yr$^iaz~P4(m| zANU6OR`_P8aUMMjZ)t(E{LDAOpyheG+JIRsU?B*kX zhxKqMFrjr2e{4OR1%GlvKGtdSxj*9JEYtAEo`$oWMvIeXYB|euNu1@mXAKs`bAoTs zIi;ALR5qRAED1Tk>2Q{+ZRzP=28X11iL+o^7#y1IvE#s)q1xAEgGFdxyHjajj-BPi zS9hVCq|&|)d-%&)zSnAB>s$|L3)wR@EOXIpdmBs}}leTpw+Lp$9n%D0EMhRC_X~gOD_zE%(J|+{2QNM5O!)`dNz&R!?#&?WD1raXba4l}H)bn=LBgHH ze5iaAKY6t$k58I=D9CvXhOTFwm-`D^Bj`uc0}SrKXO&<>x)6J2L|usI8crAL`9(1D6g35eKjwMRg{WT~?t!!D zdu|8co(mMj@JG6kY82=A3u3y^BCjsQvq%@ZNxBf<>tA-;Gt!3w{4M>ZT6)W}4?pvH zxY}xeY4rA(CRA!Pp{f*`(2oP0p&D;6>WSb4>iQ4GsdK>`YMsROpC~%rT(lz-(*bUk z(<4IKkDbS-i22d;%;o=xhckLmm8%C;C+k6QMRA{V@FLBn&z}?RzP7r51Kdc@B|Rwe z``DYjyU~M$Z@l}J^xx6EPGeqUypgpbd`s4Yw#Kk+^!mtY>i3B_XY~7Vj8P_i@W^3x z>%pK4d9bdy?|5@O{_RBJxQLB?Q!N;;CS-k{UTvt|jY&<2eRJ!;ygI(&(cdZ#&Gmbx zih*nQ_&zT|3|#o_ih)~xz-aort)?%(xZ>biMu>w`U6|ow;A|Zj_0UT8d+OfaI5^dn zK&y)LK7KE5mewDxl*Ld}-wWHUScYFP=c$ui@4^hX*3)z}oGh@Iy#9}2`_zHUA^uj}8u5f?EM<>^*QS<} z@N2q@$$B?$j*$6(aMp4<@4H)@HQA4;nDXc0SHs1vOFK9B1(SY+{p8qWwufsxpLILh z(Hg~4z@HlEZ>u^Mimk2X|D1X7){U}$#dscEIl5afz2~~&V=q!GWC`5g;Xp(CjxxqZ zCsrPB7B;;Zx1Pa0PlGLQC1zNk-;q&bB!m~rQB@p!D$Fo-XoiWCh}EH~@zkMt7sp8~ z*`FFC%14Nm5JsdMCt)#TB)m|p#5(~mzOz-FZsdAV7BiFwW8(Y43;6-J=J=kjLc_WS z-O6Id9x%h6`@#(6a_QU_e{<|cdLt6U-){11i(|a47yZk84(za>`*wD5S{zNev@^)X zJq%yy3|8%^%nXGq@q1BsJm zN{4f7GAFM~n6Qoi-|BPwpk42K#ylsuqUT(Xc2iEh4aMXy#h)ddv3@ggQtukj^Q|Yg zVj1}d#AFbAx-frH?m>Ic0{U zIDL`N;pbI+KK#U!6AwRm%)?J|flXRGQ9O@@nr zDI0BVmo+XAneF%O$+otX`#xSSawqrdV9$xiSZo<9FIhMOFPY6=*7u8dh+}xUhiX>z zVwcLUlZL+!pIIw0?G`&!-y$7OB440-gN(*Q(&QYMF(96&^R{8Y+!?G`wH*9&Y>bwz4v(d)VhzoR} z@jVR}n8fFnao$(L1)k^cYI=&!aO*~#i*@f?QssS(gP_NxiV)0gD52&|Bzw$dW zecx+9w^~4MaQXhl&uk;Ud25TKvu@_*k6 z_(Cq6(KapxzDO4rHq?SE!kMYyjC?f~Ys6uavF2v5CYv!D4r@lk8O3|1qjiknSKc_( zuRKJ{@YEWZf|e1F!99_iNsVA9{&rD{I2IefGLnX&TKhJRaFgR%3ZBW>6msX=)VOzT?GfTx67u_}dVLr*sbg>- zcye@GL0Z9Nw1Ps_7(s)q;|xz#i@1nqwORojBIdI&wzqlq#JXly8`wfG>J!u;vl@X{ z#~bRi$d68hx2?m@AFdXf@;%MH(HPq}r={CD+G5;i(Z2j{q2YZKm-o**WB=Un&zFAx>}h`Y?Vp=R**^ti z+&}x-KezJ7##@fGkJL9y`{?Z3ypL>bQ>r`=#Z&Hb<0-8Tt$0e+xA{4E@GAcPSLyrm z8fVd~#8kfeCSxj(dtxe2at^g6#Z?1;W+A-NxyGN!UKDW>u(Kfd&P=wuUn zC>S_4a7TcgpdkM94E*Pr_|M1TKOavHkmElup+}O5qa?Ok=a4k&p|)yhM@NpYU-oJ~ z=aG%0Y~U=E-6?-qXAZV1+IBg1s(fPmlYC-5_@c~bu}$d#9aX+;1-+lsIiJ*vyo>wa z40oK*cgPl8HU`^N*JKfQB-@l;u-K+~Lu}JpxZ^==QgKH;+ehJ#YYsT}X~O}>K2@z; z*{2)WZ&@yfsKh?4g`~08S9!*`FmN^?OXX{P##moCN zb?kHUK7DxarQfILzxm$VryDNeKHb7T)gCKnpK8A@;cwY^-aTuy>ZCpUkrDRnNc%Nj zPh{l%nx>AFxOeee+jzRCe?B*|2dtQ%lpFuj&+%K2C(jc6J+(PAa{TM#B7-;j`qe{m z7crLJS|Zt;u{&p0?|qp*o=5(0Xiqh1)9UA?+99Wjk1_q3mOAxA+K7ia4!)hFukb1Q zzr8fU$+=YRmMVHco)6BRQ57u7smg6FsajqA*ql7ynW}?dr;ki=v}*S^n`Q*F!?tHr z39+6(#_lT#6oo(e&%V7M5BmD6=@)lnQ2m;UTWbab5!IKQNAIS3dP1sq)8fs+i2663 zCeH8#afW)o4->Pb8ga*yYlQGj?L7Clew4APn7khAf41>)!g9qKcJhv@5vO<;)r4Eh znWXc^8^>kiTatTOOjn!-x?^%o9Jb?8X$7`FTJrszm^fS8ynuc}`8#YMG1ZG(@FsKK zfBOV@F=~v9Z%09$6AxER?}jNa-w+8+Ax9k>vywAheGb3H&wqwS{oJbJ<~{s706$g# zk=hc@N}uYNIX&|d-q#ZgXYI-1;^5SWQt&;rE-+^5c8t`&U&WSSRCA&*MxlGV*-2L2*ZLj_OgE8*sBO~nRUG9GV zN4T=>k!bhx@nQC};(rvMc@lp_@}9nXggw2QJ$?5W_ViNr^gMS@s~=n9o-QzZ`Z#-f zwP#Ncsr%#Z>0Lj(&^;ZGhrBp@I)1;zJ*_@}7k5whKR)`N?q>Yd(^Rpp>IEf&cTzDmQF4*6y%_F~)>OxWvX2&}AUpD+!x$>U{t4r8l zw@@p<#>Hy=hWPvf#he<+Ke~a83yyQ{sVxeaRY=`qe!QE_YBn>|W|c?8%KfdLEozJ>5??R|S*YoFKNGy56UUQqm* z&Z=Ls*Y#aHIq%caWjiZ=eEV?m#aTNZ{M3*=UQ=-FEa&4n^6Bi^y`7%QI=j8t9$8z` zcaoY5I=hdLr)FGXW~3~`A3c4QuRojItR6I5<(|(ZA4BK1;^7sCCOg?`wtjlBD1XcP znXN{vT*Fs`*^P>YKaRbida$YiR-W&i+3gj+MnviS&BV3dPc7KZ^l4t&a(~mw%T@|4V=@*u#H==hxh-Bqs`t%?jf~;k4c+7`9N7;J?FD(Bq-N#AsTZA z{o4C>;WCiVX4)StAqZ`;{}FY#`R z!Ljdhj`T7{CjTXg6I3wvy}_(jXH7{4>C5P z?;e%DYqVAP#$Yym)QPcTEg!2o5j5PTV*E{g$vh*Dafz|~_C8#HW-f6V$=8h>vwzDF zezGJ%?=GO`#88d4v*M8H*ZR}H1;=WnIRw!hGSD0{(HzF1IiNSJ3`O2x9+g8<{Cm}r zQgEx_XEl+n;MCP{mC)+a<|?>>a7x_mJm*#+V`KZ|NNd>spH8o+zXi`aT-`x^J=*E5 zZfi37z=TfLMrWbV)XVZ>RI0vD$v9O;{O$AP7AbdGxz-(@@JFh0^1|oy==}?>)P4Vn zrVu=(iGGE%_)`|li~jY2Cz{OjFn^_7bC&$<+T|;sc_85QtMu|sn=j#wr8U=rZ7tx} zQm#20og1owKyW{NbGhp2YAGNI6yKNo~u%7&BVWDvDHZWqJ@~XkQ zpMZ7G1}2(1*Q!ssCJF0Q=lW)_z}ABC*11*;g5LRe9p|pz^WMU_4c|ylv&pA7SRpt! zxy`}3YyP}CGM|3)is2Q`ssD37SXD}1(n^jJQ)VJ>pu1hdFuowuY5( zp_|XB9;^9`hmR#jS6t%;t=8@lHJf9Y3Oz!{p$1 z(T=Iucj_@_a>dE36vtEFF=5^&a;U`j?tOi5kHhgs(`H8Cc+~rc;~h=5pURP~- z9U9F@yiUC4N_fq1ysjSpBfq=lb(Ytdx>sP`{b1n1K+c8pI%?cnytBHLus#|bKjUZM z`6J-@E4=G2-t`pk*~UBmKIfZss6Xb<8QxER<>N*J^6j|a<#iwO<>ValO_WF+cp>)8o#6OxaQ+{__wV5cn(7NBl0b6mrY7OL?=Bu<%_X4scga`a0M+dmY=!Cjdhsi=0!|*F|dKZy?GI~cA_xV zd{4{9Zg6fT_;*XrWK-W?xR;yZ%OqZXX)lo3TqT^EC80m#2O0d+&Zsd#Q13m{#iYw8`+a z!Y{{pS`j=gYxR`ol`c<{Cf5NsFNUY#FW7DU1<5>ZzRS~E(C3EZY34KVG<_yro~GWY z3&FInVq-SL)7}rBnfn-Cb(hh^9-!WZ?q|7GDy?k~ens7@O?Ecl-x<7mM+l9l%wHI7 zJFu=P#J!VwUzw}-$`}0FTZ>_y9QL6$=Vc6Yw6*sLhHbnUw&s7^Z7|H_mxp8ry?wT3 zLeBkIY|T9z-x3Vl_;z5}!xs_5l#8DV!?qB&C>%@0^Mqx$p>YVyetYq-Y^-NST@*~4{!j0IOl$c1*f8w^^)pk8MR6R-yllG5%Pxg}W_THR^E$Sr^fQy| zfc`ZX?Th%1R#UqSy?-gXy^Y-{H$F|L)^r24q*Zs?sWEWfGqOc*pL0OBz|_;)_m{SnjOR~UkqFGz;`n?^>Hs7zxFoTnh81DW3e?$ zHjE9+E<`Uo<TOwq~}+*37kf89Yq3-%_-)p)qE0J~o<8ruev8Fm8?8FGLt8KK8O} zTV6yywlNLc(o-MdvQ9Q!4BC4^C%ahK_1MmLKXygK?_KONHl?feUnq8sq>l-|Os^wy z2ZdqQ_VjA~>GH3^wE_A_1nEJVLA-4yakt}$za38;E-}w5v#3=bXgp8O!0W`6og=oX zhd3GK4xAu%Om#yCh?hA_jM?53b0gb{Z|ml~*h8OY#o~1k$E+SuKD542ZeBR_#+=4; zd%PW8DoB#K|Uvv=foIkO!wc|g3 z_;A*l`+jkd7#Ampt1y*w5&u-^cv4XLAWqvfT=a^RtzU}zd*}0Luo_VDSMnoEo zd{FCh>9NI}GXMQ~33Ez~6|Ol!Y?O@w@8{e1tsxy+BQKd`pI&=KdHwW(_&m}lwZfUlnVLuDY3MxC6N5Eu=8<=QV`b`C z_RflVw=16Ana7boW9Bi9{dtc4shH2#na2V4XFoB4=b1w%$H&>9-`2WkZ-(e2*T-iE zHHYl6YSv=i1ZQt*4!viT*`KUUajoO*MU7P-_tqM>_mZDfIRmSi)9zpRBIT?L^(=_( zZRY(rb2`9vcC1$u^ViF>C_f@h3~v{o(Ogd9&vfRX<2U}ur*kKh^Eie0v?rTF?8|Qw zkM>pcxXr=I(O(~U(#ct1KUTvBALE+pWh>0S%tU+h?#&a-iN4q7Ec6g7+~@W;?hppC z4uV|M27cK2v(MROK0A4b)`q`+g;^KA`l|+d=v%7VfE}y@#q+Mz+TdB&oM&w?&RQGN z{f3OCu#S70HNv{pI65)FBk%ci_p@@lylbGxT?4)B-E{4VDZ(=+2zK#=U~L;~ z&5W-*?`DjdKXi>Zw6Cil@(uhFJW&jG72m&kyzhH{c)!-9k9Y7s|B3N)Bf8f~#vndd zY3$jcu-6av4qv~d#}lJ%_iG>Xq&=&-dYySvTzxP5d7nE^hbKAnG=+JplO}G?59JW$ zv&S?~lVkJL%g;TG{~ULodKj~P?mS(`JSARV!u2|T&~rkDe;|c-;#Ko@ksIbofUKGN$unzrE|7we#Mvi zJb0{qsQDFN8XzYBN#wUz?j&z4ANs5P3m>KftwXS#Pwt#r`TK1+sU&C4n1@pt1 z#FTq6+>76uTjj$2Z~VU&Yt6p3@2Ig#iLq(y+qHhPZ&UZ{3C8v&XICt@Pvb4Fr1id> z@%|v=raZw8500uPgvC#NZYJDC^$I?~XU*qWqmRYd@t8;8TLBoEbv~ZGjPSE!f*%Awwc_4_E_HL+c(x# zmvF2XG1fXq?O3%vSnV0({1L}kV`Z<~GVFE3jd4CbkPCNEd-BXw&M)U&8fA<}z%H%f zbDsT-?tRqOcd^QCN zV;EL@As9BoJIkK+zH>5$dEZ%>t#?ku?8J8#W*_E#Cv!HbU(^b)Yg1ryn4SV>WJA@u z_*Lz}t~hSR>v7n)QuUnk@}-w!xOL%}-4Flob#Y6)BN?~Q@?*6!gj;2t$zI$NFPtBn zcZ*w&{iONofp_T|;g+#jQTZZ~@;TUMK7-HrREyXJ3tu zUFN$Db`5vkF#8JLf3dOa&5pUw3OkmzHm!I3-prVX8S@UtyO1?2u4u>2_t4mI%w9KU z9Akg$tQ!-K!Al12@S}UB<|Wc0wO7TlWH&wP3qBof|i7o8+LH_PQn~?3=h8|JI z_nE#ztmiMHQv~@F8@GGz9T<+!YG0|Z&}zQFJo$Oy{EjV~Y@-Z43%tF_Mys)<`Y#$o zjg$18{uE>Q`mkdtZX#RL;iY>1*ZEoFHO7C5GQGp|_`RKx0{D z>RAmxj`iS6M{cOq;zTXiS5Pa0aZS*6#HlTYmh&8^h8sto&lyM6lk(c~Z*Cl|4X<%L z?-@sNMC|vYvfn#A<5-t~t#%yaHn(yVdRU9=8Ot0ymJ{>C^H*mzPtOg7i`mELb4yx# zcpuenfjb|=zE=M_#K7f3yh(ms@Powy!57@wX$@kb(zW2-xk`BNFIi3KUf zvS5e$21#$y`A)we&Y~siTRM==_%8B{uk5gN#@DV>ANxH%zVNJ+deg}D0Yz2Y~FZ@tEsvHe%ZF0`0BIIPpY+#+f)Og3v&b?mV7 zj%m+#fT{afqel#WmPR|j-`phoe;>9)+ktNw3=K1$m0;+@fvnc4)FCn$fZmYD|E&#X zC&pl+$@-=uXb z##OkhFd@|OrJg{>MdbGm!Y^FtOcTw-;rMN3nM<6&8lm*z2{!VWG<lGfaIU{ivPP5X~Ca1%IOcqxA@chgxQyW6~f(D%kv+2GV(O}4?yIVXl_8N|YV7=BpG zHUHPM_Ppmwy!KhLEjT=72Ny8Q#&aVDQ5*EhnYR3K-ti4p+aLsaJ zMp%Ch%dZ-UGLByE9OpaOePgvxMmaM^(IrOU&(eHH)+J`TV^}ndE}?a4-faZFtT7QT zP8D}%P5QoGW^8S*j`0dMT#Q#L@0ax`{X%_$hH^%p`$Dw|d*hU^c; z#;r!{Yhd3H-xJHLa$+%3+aJBIJ{DyS>3py;5$Kg0Wq+G}#d*+n&)r5p7%jd*97Hh@ zig9?67zaO`TVql?>@y9ge+}KMgC76zNII1^|M&6#mE?h;t2|oiFAQJ3 z#CHroduwRoWW`z>qn78Rvl(M->y6SZ2hfJi9+u7V^~D20&SmLZst0J}ZoJy7)?+BQ zBwRGtsbj8Mm!}kaMBMBC_sZB}k0g(+cTCIIEFRE!`uW?eu{C7_cI@qVU#zkAu}9Xp z`=o(=GThkDeFu!a7dMHUFge~@-_>B%^ii}%;ilrnf6W>WvxXHTdYJVcVttpfzC+un zdBeKK#-pZXQCK?UIo5R_@lQI(*HJI$N%Tm?Vv{#?O#hxA@I9?~>>21f=KDFP*7N`C zbF;!z(T3X4!X2$9Ueh%M)+jDbI*;Pgy!v>;J6z})G|GAI#o9MJp5oyePubURc0BO~ ze^2LG18cFsjL}!k+HYGvYd~ZCFy~UbW8A)+Yt8u1GFqIT?$yX!$ zrk8jvtwY&3ih1%eZW^P7tV45e)*&_2kF^t5@KwgLA~+?+<#qq5*fJWQa%>r^GirRi z&-HE@AL)#vjZXvP(?(2%#=^WioIvlcG26kqN*=S3#>?uAZ^d|B%)M%{Pv`oZ-K)aB zb;;OgI45yq=nxhIE5JbKtS?G9>yzVqtvy|yv_~(PhrfjwXvcNYMZ!S1s=+{ePDuCB zIgx;YuHG}&aX#s>cc#ZM2s`5~IBP7g7bhL-*)2}0bEe+i>(b2z@CiCtIK?@W!9r|4 z6F+6K(7dB~C+D^7J*~|XV4~NrD(;!g?`2=>%owsBd5+Y+mrKLlFLaz*(-@~ME+)$N zVzot$+w=CEwd0oNm<_v!FL2Dxf9aozCH50b93YlBNGx#%eN+6VZ;G`Qz5K=6Ifu!0 zD6@U(Vm=_NL&W1utR3CTIA|=a9sO&rkJ-_jqlq~NaUK`TFF=3L{`A_=szI*xQwyG@ zexBjh(e=D*4evaUcb|(-^C!lCm(Ez&{pGcrQ-Al+v9NZt&Qc%zt6eds(o$TzxsCCW z-JE)CZ0>`TyCHs|vGFVSP1+2(Z-(#ce*EmUmA&>{>SrC>IE}2Oj%**N{{Eq3W$oko zVeI2AjG@<-tHM4$@7l-2&K=|Iee`O^@;_K-%Gp>a9A=$O1;0|+3+-QHe`<{dd~Zz8 zEe#iA7prc8VjL+pR@q*rE>HxVGtG822ki9ME9*k0!Amu~oP ztR3Y=pDOkBD{gT!HgJphJ9&=Ep$#eblHYBQ<&8N0F~`n`@6&$l)4pH$BWKCol)lhP zZzA<#(REq=>b;7u4ZZI#uh2Q`{9nV~Tkwl4CU0^%`CQKjN?I%FSETyQ*V1oeG3W0h zup;!0x%~_1@6uIP-Cr{?D|{t-P9rT#|p2vyxS;+h>V4mhPUpF#uA1B7*2J)$| zzn%FseWeC#g3RkU=5;*35AnGyKA+8XIn3(>&ahx(k7r)nb21~n-Z^ECPfYWf=h2)V zCPyMRr~EGde1)DE)Og%q57+j%b2`YJ=Fm&iJEvVU=^qGx>SBF#{XupAXYpxI275Hm zy%oNGX&v6V?yVrlnz+>G>DAYjV!mg(^WBwqLuA3~jAqUE>1oydrzUvjoH5_|w7j&0igAF2B2Oz^hTM&LO^*{8+7tPChezS*TgQd9AG~ zuIuC-Kg!?UHF76wxuO2fwAV?wbTK~f#5>I%(7LG#P7ZH!*Ulm^;RnO69p+}p+PR4} zu!yy=kTtP@wK1Qyb0cf#SQqQCi*xKxsCDo#@pxKGT0e@<7FIZ)Z`k`w;_ieiQLsWd@nRq=TDAXgn|z#y zW$d@GyLO&qZp^)@KiwKAZmqwskad(3eu$j=Qxl3?W7xr3)H;%XEEzYnj^^;ZzqfPl z`H8Hh!`bvmy2ZhZYru;R)b0!7ZuXxU2e4xc=c{9*mU^(m$x-&=hV+(Of_Y)} zomoxYY+=Wtr1?)6+adG6hU?Oy!gcXi?$X8yhS5!ar&xwf0QnX}Z$5Pr-AKXi?mf95>O8u*im zIjzoo({qct7iKu0iO+d2EOh7nB=@erPZ*LD?qaTQazNMI437K<>rd)9-WE%q-T-HLR5%(tGs@{+MxNz4(HQ2e2WJWfFtstecY9x=Hq9v;~N5Wi$9;2Q^eQL0Q{Tc-llb2J>LJ1Gw(Ye~D-R=h4&IIpc)^<{tir zy+y=ylm+Px55H9J*s@?KTC)F%rp5X{@IgI*KL>{n@x2{?;Om#RAL8G2w{n*FOIkNg zaxkM{Vo~@kc%wD0+J(x~;w(6}$IM}2^cBwNLHvI50hsSLHlu1YSl%74J+f7K@93`B zn{P;WZR6|`4r+`BSg+ZAgP%S++S~<2YJ@ex?>bWQ%%n9TPVyUPO|*h3<{H*R7H57Baf!=W z6J0AZB05+8)Su<75wJ~b$O{|witeb_LRSP()=EHN{vudr@)p*ubh~H=Nxoke4!*y`nm8`9r z#C6pkCNCVEkvrW<8=uzI#%I_u?c)(W(?rg%g!`~B%KgQy%fzW3tBL3g zoXH+r=+6n?wn}H~gs^zOIa}$27{gZ9!Ek5m4?SzA+|jh+>(kEB5WaIWYvCr=#3I(l zLe|Iv*3f+B;6`dsy))NP_!6w4t`XLd{LtyHAyWsOb)+?Pgf&!+ZmacT)&>5ow&hn3 zXkDekk95|Q@WZaD4gHsXO+9s~)>In&N3~R(9*sfj62QNy2;_|G~oJ`LeN)_#iX z$^+m)GXIGV%YRl4g0J#cLtxoZJRC^4je3ceed1H78mKCpzH4AlY7CPITg-WZNqZ zPBdohaXFF4W)~+)SX2Bi;e5B}ZA`m~W6wE{?JnJA5$Boul_gv+P9z>APW1WAh%__v zGg^Y-O7NqP8h*4YB|pNh3irrv;T&4X+4ieIQEMLjXfTi$y_>$TAA=jsi{r^qohBJiWhBu!901@nF}ou|OqY2Ny@ zb)1K5T>p0iK5zLB_5>WC(@fT2tLD~Yi+jgw=(&@I&nY;%-@wjk4|6Kc+m9{IeC{^$ znH!A6^}gZ+dp!PZs` zp|imGoHM`u_yDDiJPa1}GAA?8=92x3GuhkH2)pvGkNKNUVdHu+*x*<>H1caG&( zbo@;V6cZO#u2FI?MkXySs zB)>Di+aBBhif=7u|0mY0JBy#F{{*o;tclIq|ID|pF+K*y*Xv{YEIy{?F(1<^d`wOZ ztn>M}zagCDV`^}H4Pke^4CiC|B0i??TtpvJDL$s_zz8!2W*w-e_|Te`OUQ*#Tt_YI zOSww7z&SSK6IcX3JePzI-tioY4;N?7y>?HMU&-WnCHs}ujp$bzYWH}#sr*W<;6z<8 zm0wA7JhERY%6b{ruLL%jxpw_O@+T(ymBjH9=6nQxH=185%HFTVr!?2ai`o=^rEZU3 zNwEv^D?N_R_ap09Vx6?J#>tI2Cd~5ce)1ogII3XTKs){b;Si>i$=PIWu0a z$ZmYZA8b4x3^aa?ak_&)uVwn)Cq4X3(AU_GeM$Xa+ttCt8O}_el zzB5Z(zSdOc3r2&x*ETKpWrt_bC;aPc%6_!bpWoNGepVzzjkwKkd@SPg1zR_NZ+1lA zQ=jSkUVGxV>r0j-`}~^-@o3Ud5pZL{&t?HEg0JI+@l$rwy*u~eObJ3CGT6w z`$~Up2a}~g4CYoh4$}Ypjg_jqalpjw-LQhb6l;CQ>=*{~t0Ut>jMWp@N3uU$-M_@| zd%E)e>b|8D&}i?=ZazIBCp^u+zJs~`@*A@vz6@XEt<3dQ=6dz6*%5uuLNnLz-!aEC z*B#7t9dk_%keTaGT);fH{?!=fnfbakX?{1DxgR>u$NzJLc?Rcx=jVA%LEnYW^S?EY zeLsIv^Bh#%-pKXXr7bJY-T&F@NIv^L@E_MlirDW3MVxo+$?5FL4xjJo5_-$aUil@y z?gguhnm6%x5KgRktq^;=6pZ=b>}^{g*w$B7+~*?vFjw%|>71dqcDtX_ z4;)%~{;IEPK8v2xB7HWi*3^Dc{p+|sJ6vtmqVO-NzexPTnRYNzc=yK2)V0LCwaq&# z_TnELzW!p?rW9w&+7BG-ruOJr>I#aJsBX0Uh6(3Pu(gG)3Uj1?>72>oPbPo9n^(QJC2;HC`$Dx>tnuG^#Up4}M^^b7cfE4& zqc1Q+$a=g-1XyLkW4 zqYq}`Kb?VASX;lm?>f%a1^oSO)^FC@#eLiOTl%bBSLOdtJHO&<}s(Y&j?o0R@tWRNk@N+t=6MC<& zVb5sonzMTC2RN%!=WTRW{PZ@}&HB^-F?wGq)k7}$pgrYeRm}0=R5mP>vSA{?7Nt2wy7>hNh`5l{XWKC z*J;hEe%MLg{S-Eq-hUeJ7x#&zen0XU&6zKp5vS0z9ymX^=U1$YQTOblX3r+}2Cf-) z&w6^DvuDfLv*-ow{btSC6x|6!%!nT66Zs*PCASWk$ELFBbYU!{gYO2R8a655pbB@5bZr zKR6JGeCd8)-%owP#+~d__3}CPBOftF!N#e+N#PdubtQW=#9n>z$za6p(Fe(o|B3cp z2u+;zz18RJ)p^)`^Qlk&68QECF>BhlQ?I@?Vc)7J!QkRRfBVuve+TcNoa8cpQ6GD? zwVi)oxzX%blLKw{Qdux)_Ny?jH#mDoCu4ApbbiL)qt{2Kj&t_vweDVBz~3Qse`0AjDmP<2d$p3iI)lHfiSL;_vBZp#_N~P>+e5>< zXUjQ%*J+<`o!ei@-{oApguP4Mn8pRvUkwl^v|x39^GU{C{jRj1H9ii8O&;?emd2uY z2(X8bv2Idu`K0}7>^9HdbnG_SX|mDUh{I{m^+k?9mJw;w{)EHV%O}XXa(Mi$f4|@7 z?8R1WFP#b6a|_|vFSF(zX&>J>G2rm|PmFiYNbz~qbD2|39r_k=_xDHt%YijbYp|uJ z`rh9PHuwJ#{QVQ~_dovLtjJ=xdnMY;;>c{-ZO3%)M~FSfCNt;X67;tB`?6Z+u(zV@ zt*M;pItT6EY7f4DhvNL~-kQppICYAzaq4ULJ^G^ATmJr2aQ_9aorjIbzRGU=#MfiI zFyZVN$E>@iPd3z36-ZwcRS32_erNO$i-ZTRyUaGS%*3wd|;~11g*!Pu@?1g$-5Kgz~j?5@ARVg31i*{zF( z?PzvWldxTy!YiKj_d(X*Jl5Yl)}LzrNoUadTb8u`{^@IDUw<>T{tk}TgN0s;@RY#G z`1&hgy)9u4ehj{%HKl7D9n+P)68th+35tn%);{X1BD)&9w?&#wwJ`zL0H zml7W%{w2)oU@g8#eYGy-z_IqKICFmSsp4kkzvZKgO7B-p+zNUbs;-NAwhZEztA)=t zV3UM66V$VMkte9*sUYaxdjoil2bASEl zIo7UtdKosZgSmy&m!2GDlYe>^>o)N|g>Y~6rif#&KD!*9Yx}Cw=Fbo#e3y&8<~r80 zVjk=|PPDo0T2AbF)(iIPo-NuN_k7i0Z;O2&?zhErzxckeIG%W~TKD>`a7{f&e4mVN z!(WQMVzGRYXRip$ce?vUc#d6r#ck}DcN3m#k0j&ywHwBc=RdqX{XJ6pX?KsnXQeUO zJu=zdBiFfmM7BTWJSQxxp}B_b zCrvSUFTQbDJjC5Uue3bi>f7c%@ZtcSVK_AH0 zb?l#~h{cw@ay943rkO>}^z1Zeh|UhDXHJp)AB}UUThM?`vBf<@+Sn)N``IUY{?uoP z-7AT`b8789IJ-x`hvNrN?!O~_#W{yAXn!U4d3Ss)p1$tGaP?{M_L|gO9lO)m%{$-4 zT>WH<^XJbWiS4b8>H1{aZW{wv|Gl_gTQ>|>|Dl^BlZvZP^XwyGzW2NkuNF@te}o!j z`9o@u{gSo+rnvj0{EN-qFL+&Ayq}qb_m;bBeSY^{#@)3Zld--fJofc?oz`Qz+}*Pt zEqA}##dm2w;_kxtAFw8W>DrIt?!v%0cQ0zq2Fo4pUX;My%fR6-a8$f|M*nJZMv7YT znKdrFuc#Tj=gh)w4>v8OM&3edC4EfXeh#rb$=v?1 z&J1w5oX=W56z4CN3*{x)p?aP;XBjiMPix(JxxI9dWL{tIS-V=d?JlN9-^G~vQVLAX zy?`|wu1}w@csaf0Dy1p;e5v8{ds)lY|1}z)uan)F2A@yt^C-I@HJ|SipEvkiFa)1J z0~ZlKUogL?|MEMO-@mE-G8)!v-@N=T!+PzLWURm8;jv@={^jZKlj7RyNP%o$c)i^x zmeb3>Wc|eA^w)v;ULNA*^oxU245$CBhl}VukS4u=b6~3B^d%`degBA@Uf1V0``zok zoW76i^u5c$OyPYNXGtaBSIJpYd0$Dap0KTx^b|Vp>*(VP;QIOKJ$>;1S^S;FGwb?g z?6nox4vO#AxgySA7A%d{dN{vq<4!)SUXVV2Xks$X1o+k-PxObM#2?`UpV3BK?pfyNg)S3de`jic|simrd6BGC9gwEZ(nKhRi_=Wwf3`-&dWWE2$IoA+!zo)V_#Mtpz_C>$U$U_}0Ss z)($-5kBC3Ww-)kGYJC;W|HVx~@>nMtU%2(HJpg}r7T?-F>Ho~#nk4=86#kxF9^cxP z=&#$*U$>#ZYQDCU>ooW!d~5am;aw8^YyA7>^%FJ?2|u9E@*`vALFU zQ9iMFP3=RA`xdOcE9R@V+$Qd)E=b@uy0`VG`O%Rx7{l$Hc`+Y&OuMpi=JEcG+a2Fo zg6%U_ANP~X#6P9Sja-YWpLmuKICJvbfk^0Rb)*A7t~h9D-SrXqjFw;@&317~YfD`8 zYCiuI@3O+>qO!~D*rQIo3iW4*(d;4i()!S(2W;2xCvxA;Rgd=Y9f#qQ9mEOW&HXi2 zs>@*Rk>Ep**B7;QpS=D!-M0)bx=VSO+&_yPgD&z47lB97Twnheh-1+FDgMCueZ$^f zFiF=;B-S(=pR0{SzlQZYh{jQmUZOfz%1`VhueytvbJadl4%L+t3y0(mDaJx^z_xCU zV$n~dN9x%Yqd6)by_j+P(;z*ui9>0^{?olahM&Roxu+(Ma<#4{el&;YRE@nB>PXKe z&p>(M)oyLQ4z%@^)cY;-7esfEr($CASyyJvSf4E^`vN#UOi%HAdM+Dxpk6T!u8p39 zkPfcBw$b-mLi}h7Te3ReS+$#a!;=K}j@IP6+OvbpKJX1Z-qzx~{KaAYziPw@6PLI>SA~n^(`yYb!YAmrToA4zZe?AN&sS9U zujGBp{P|JU+)>>!;iO{dH6G?12Pb=06*N1sEXAG}$<$bB-|06o%p5&0}jvBsl&qy&+>hB z!cFk{tKjo?eka^C_X9WW{d%tQ^&iCFWMeHY7gxMOGIo|>uO0z2%{{!?NiEO<>pRKuPonHc02rd6CNdrZj&>d=tFA z8jS4Bb#f8S_rmka$$d_a!#e>7bz)@`dvupSfx38o|8LkBI`fl)ch0|)>!B)(6djY22v&C+!I41E!(({b{44w_OpC=p4w7gN6saO!h4bcRiWB=hpY3(Eq zE6UznBFuz4EhR5gd;DB5+t`-&`YvqE!(3x+>$7~n!BC#fV(3D+mAO9{YVY3##@1lZ zrh}oD1IcdwNUp;_L&8$<&tN2+Q~16B9K8x0T>z$b!hMAg7DH9f*ZfRemBG{B1S4nX z#JOsS+K#>0#(iM2zDGPa3;Wz+Y6wj2z1G)10ez?g{GXf&uS19I*!l6u{7Pzj@+{(t zdIr^MQ$0}Ca0|F~L95}65%NoXY199$@ka5_L}WfTM9eUQ#f{xDlzT@K? zPvQf7Km1l2=b9{6;|%&56{oE_lrI`?+Zf9o7&orE|Bx@Ub&fwfdLwJ_ABZ1)2z_%4 zG0j_vX;$9AxAxD9?AznSGcP5#N8dT_!Lq)E0xW|QyZ;`FbX7v&9{SQ_Y$B*d0}pt0zFd=_r5sL8JxQCs{%A2#^W%1rhnYe%^d zQ_=5qRy(<_ip#HIPUkVV%6Wa(v)Bl5Y)FqCJ<5du&nhwql^7-4n zT4JuNC4yO>PoX7J2Rp25j(|g5)FrT5Vij8Ai*Ub1va7#|mdJgcla~1Syol8jr}0d& zJ;{4%Pdc%T@fz6Hu9gj3&$^I}aviquVb-|pX;Y(u_ft*ujvV3G};?rUmAeM z!PF{HO>kSQpxAIljo%u#pIQUG*wjBu*rT7NTOUTtza?0TEj=arFt)UMx0MB*9;*%a z+%<46h7+S}hy!2YG!(lqrsaKP{;c!I&+QL6IQ3=JB?1qvsiw#B~TjG-l~{b znigh!muGo|bA2DzMcIquE$hHl>2t&lH+F#?%fSw-&vnC#j2&q{)38^1m2gehOP|{j zC{4hT)$IM9E{-f)ofFgNI;nFujc1Hu2=D7)h-%EnG2|3uVR6JhtDaMsqPptBlv(Wa zI%#x#R+w_=r{x~J;#e6arBfvF*48L?CKd*u@J26Do}p}HWSgZHREiF_Zb?>EK8qL+F8 zT453PuX=kq_U7pN+@;1Nllus7$w|RmRNvtU{#dVn_672ke{S=X;VwtvF29Alya10- zob)O9iW-~<#bD9)1Yr$^iaz~P4(m|ANU6OR`_P8aUMMjZ)t(E{LDAO zpyheG+JIRsU?B*kXhxKqMFrjr2e{4ORWq%T9IZZzI zM?9Qm8vfYRaF)|(anejJXL&A(vqaZidMx_|gN~ z(-R)7D5w6P4~=UoHk7n2qp6{Dc{Q%@vSwA+Qe!QR>-JO{7aUF6)|F^m8t-XdzXup4 zTur5MsfJg&8rNma@f~}+G3SK_gE{d!1f$zYyDP_NC;j0P*-6scNAAra?I?l&NI#OT zgxwpnmB1izpu~Krd=o$UBTpWmH1|-D^O*Np&pI#n7qmvukE90}+=0(5B`(qKAA>J0 z2Rh65TD{2J1G}k(75#B_jL<@AV<_G9PqDPn%~JahR!^(2xWRORYH)yaAgTv6QT9K1+# z>GS7AyRWV8-vBq#b4d@1{66+3?{4%U;T!LMCH;3a-qV=Z7;j{42;Y+Rpsg`%8@)bq zn)?0j1Wm}qh>T;5GN}hc4y#)a23=@(To)?tJKh|Re>+h)E@ETfR13zd30a?~R~u?~ zV^R}h-`qMdujV#9`dh`Jxqi=7F>vi3-{&QWfeXJ~F>sHpHk$sB8WSxe#KEa9%y2Pq zwhoMXXeIkS<#YGO!KtnUT2&l_`MtP}S4))-)#^M$>c6m0(6173d5u@UT044OiGN!x zUg$q|{!4ze%>X)A5WR~&v@0{IiRW*$b)GuO^)AeCYduXz!^r}R$?N|Zwoe_n9O7@q ztr1Tc%S{EZKYf<#gkRHLOxC-3bA-(QgR_>?dEedItjT^%#gsn}zZx!XUD~<1FPQWr z>?g-2vprnn`K;T~j@Bra0{+xMe_PeDP;6~2|L4quw{Dd6E5>u_%HdzV^q%X6kG)8( zkR@<`hXW1mJIWXvomhFiS=jVu+JmN{4f7GAFM~n6Qoi-|BPwpxy5qFwY6D=zZ3s-IP;rLoxYF ziSZZCSihM#sdo+N`PLI#v5fo!Vls|VL-$w}elX=7D9_!&nxVO319wpK@}Pq^inmq% zgvNZ`xxqc#-W1+AeUZ=M=T&_E5PtHQho9ue`H8S*G=75r$K*$*!kTpW$t8}_f7|%U zpWl~=J#TT0ew>?(Xdk|L9`e+G8=Z%!PPb|d6?=GyaHPPkFQl_o^@S$GMZB1@#^oWi z{k}cf)|PVL$IC_T+3rFB3v)Rk~e(?@*3@`Uk&5B;^QrUIV@b}>} zYbB=LVu$Kmq{B(138^>8XgnlM&XFQVX9xx)(`YdNrc4+9Kco!o|gaCwO*S-|4NyfQLuHfC(-Jw1ELOmRnfh_?)K?q{4#m z1!94E-n~<>z}gg?q2#GqP9e_**eG);>w!?x1em`0D=UuI+IS+hhH>)V7Cwyp`A-@czc@ z&_IpRw?@D6J2HLWYe2VJKyGmPn#BX#h;QE7;OMMd z8p;N)=5uG!UKPi(3w!cm7uy%Ep&tqM$yuJq=g&9S*5C&p&NiuLJoFy_5yO^|ZIHK$ zYh1<`PmSF)W14Te=XVcZp19?`jW6WF8ExZI;EQx|VM8srBAl5D&d66|u|^yw8EbAH z1#3pb8O3|1qjiknS5CBJObqTwcFdH%X2WjHF?x*1nBn zxtKAyie*u(isD%m!=l)_1H|AemL-JFq!?Vqw8Ud@Ctmaz+~jzc*>`7bN}n&77~BNA zVXD`MQIk3b_dA~(-ByrRFd40&5MOct4YH0iJXI}Xublvgi1{px?QL{|#JXly8`wfG z>J!u;vl@X{#~bRi$d68hx2?m@AFdXf@;%MHO}vNticNO4#kkL+efizSU(vR_3ftjs zV>9Sy)hqrb&$5qaYT+HeiN>&*KDUps_u8my+#TCP!}}(N+doH>bJ|nvpTh|`?U(n@ ze7E*}Ix+0Y`{zq{nf;SyPUIK*o=eIjPiO!9>a!Ph{~Wb;_NClEH;=M^3dXp9_OpL( z<&TZG9BCh^Zaz8F*Ol4Y=u3Xh#R}(Xv-Jd2)ywI16QW z${*I5gRP3TU5=e9pVZ=QTXGUzj5r-hQD#_Q`O3qeY%1Dww`e) zqfbsH_Gu+&Vz;qR^TKoCpYplMUnbjJ&+Wa>|B3C<)U|1uvk`N)F2;Fxd7q|^eNNt| z-~H=LzfY%p=DoL1H(bJfx`lnJJyy;>)qY*V-?H(%d)8>xNqhDqBkb9c_G`SJ$jJLO z9UW=N-X*rq#?w9h^SO~dU_~NVE$(~i=lHG1lV=J3p4yxlIsWx=k--~%{pz8(ix^99 zEs<=_*qt-0_r6RY&m(_0w5OW1Y4!6`?U2*N$C&<1OP%^5ZN$SI2j5Q8SNIhD-(H&F zoZ?+n6K*MIlFl1% z9G8u6N$zDKUskG^IBdr__vQPN@8`tC+1lm>^aRU~_e3KmE}0LH)erT)KLamDjdAhq zD5!Jd;fm?qFy-YNBB3edsAFSR>I`QbzQxafhDQC|s^aE7{5t?YRsWIN63$AW>X$h^ z^Ag_I6ANeU$>HMQ)Q9AGIB_R=NX@~?t(zv5wjQQ0&Mow2SiyI!LqncIEU#i$)F-EI z@Sh*fBKG&&#I-!YpJnXrIh?iY53X-Q3pQhJwcv7psI{8-TaCSa4sU#lG+@=KRJ}^& zDD5HUMg5)iOs%mvxHqp054WGYvF){=e=x@Vd}M_EyvyCs{|HyMJreDHK0eHTR{W3R zGf(1=NZ!+TkFcj#v#0MK!=7Hso}TCKY4u}E+|vbSPakJbulDTeA$5P;J^k~qm_5DQ z)<;W!PsigSFJMpa7MHU<51l<7zhB~>R-eBid-~{`@hzAB>F9g9oAFamQ^mTf7reA1 z^*k&l|FZP88zMgT_cHBozxo|!g}3u_;g!_6V1KJNkNi%m3)#uF>)3zU@LT1|e-^B6 zA~)(5Y6U39O*XRDZ-~z?P|T^(m_lY9Sx(z<1+^E1k16rM!Mm(&m3a?mw^Eb`M!nlZ0&t}UTdG%-ZOhZ z^=K7;rnBmo>~($DPR{#ublJ{|AKyM)d~w!}2S3FgPpXknaO^DS<2mx_?Ag7Yp2|AA z!S>iXh^#H?J4ww2o!!Ur*PSTLjFe^gqo>Iq%O*Fg2hCQw=QGL2(7CO6c*UW~PPUq@ zpB^mA-?DyYtI;ag@YP^;qhjHYV=t&4tZIOj=R0S1JAOAr>HN*awcbxH*v<55UfObh z)5*;0{`=LBnR&lqf4J!YcJU3I)5oxlTd;@s|BIu|-bU^rwStdHn?3nJSzkTpvuY$L z*Ki>ka|Zp}50Y!R+08Xvl9X#G4&lu;{5^6F@5kqJzs)tQ@tb#F1W&ZFcY6PqgP|C2 zvG4j4@7c~f{sHf&{;zsZ`p)v+iq#$mw)uB_(d8;Pf@!CHxr9VuG&=qwL>=>@KJ|}O zJ?ar z-e4Y;LsI;E)sa$gtKerfk*(m=)o_*2>eA*axPfp=-0eK)*5izg?ZYRnVf%kNy`ugW zJnL|E2le%6r?;+UI$0Z?-k1oPL#FzG?F% zys@d-Lrv-rp~qMQ?5zEI@P(p87#21V7zs%6@#F6{$0npTmI_3g>(P( zsq{3Pe0qZwf^(DG9Gtu6&#NQz={K(!Ug4bjKlg)G&ETARQ;UDKfOCgkoEzYbxKlAb z;#!7>DSuJ^J}2KtxX{hds>f-nhw7dC+|Wd{UJSQ+Q=1H3;(*CpRWXylFu8jUDgLq>zU0N z<>H-s0t)B6@6^e&OvS!ak1>-gPF|%rp8Adn^EQz~CBAp>>w|k7jyIY%GXlq>-v1De zmz49Ud;)YCPaQ0{o$?(Yg4;1RCmwgWo#A!V{I|T$avNzhBjr2(7A-?u!{8j6h2eD- zab8z#d0oxOyiUC4N_fq1ysjSpBfq=lb(Ytdx>sP`{b1n1K+c8pxN5Jz}c-LLL>nYx|jd%Qg=3Y9~m-%ys_mf}wxY2-oJMMRR-G_WRIfs0c79E=K z(~?8*I@>pV4mC7BwEMxP=jju>(Crsqh&^*BIKCU4{|E5>d-#E-`o^_x0sB?^>jik7 zzW;QFbSLtIS>tU+^9mTvNxbe?jFD>5qkY79T_ykS<-9-qpK!uscRd>N-~0WJRiS3p zva)>)#Pd3Nzm5;NJddBn^Gg0lBL0cvecR=Dos5C{wA$Fn%7?0By$ig&LA*_SDLHpj zdr4exs$y_G7~$+GZ?A$;xZPUCG7Qh{{_2yX+o;N|Qk_M`Hk4qW=3}2uX(Q+1w(BGE zEp3-gNdXUnT4uAjgCe;^f zIq-N>85pj9#Nvn%dR)}7X3TSenYH)a2`6+k$W*$Ou<>}zKQx*?H7cf!n8)p6nkR?* zGNw(v`qnFk*sAY6OxtK})i;A_r|vMAw$a+IZzHDd_G)hl_Ub5@_W%Czdl%EBy?Zfj zBIj2%oJzQsDz775Y>Q$8T`0D-y@RmL#A9QddhOC=z9ud16*p!@IUnuhe%P3mS2^2b zb}3v&8obrvd>+1*$}Xk8tg%ZiU$b`UBW}$vuU)Fx6xpS!_a*I08eBG9Z5v$eSv0tP zXmD@FE`5*iw2OjiyZ`dtk7=P#j1ALDU7j`>o>utfI8Q5rr)904(!A2;Y0~66;O52f zH2eh{t-m0dr_FbHS_}Hza6HX?2A-zRq|4LP8+9R=_El`mW_a5B!83Co!>jHxn%D!> zyU_hCw@Rh8?ZL08d$q~V=KDK?SMLa+@s#-sqiqM)HHEl$GVd#M^1Up^#l=k3$aHeRT$`K_DAhG`d~pWV$E z*D^ovveuynR+xLUH#Ho$iY%YFuyHK40qL|%3o%P#u@hWG+57M2y( zzx%Q5jz#ZXEHnJfgJp`NlHN5IESrnYb}M*hbGL+VW5KjXTugf>@w3Z%nG?tHUeL=f z7M7h{_U^~B>lTg;%PvGOy9ZwOcIai<9$Pcl>SgdS*?vpW%7(_6#rfE1I+^0*YQeZQ zZod#=ocP$wu5I~F;$xSw>pg{CM=yr|a!;MXE|VMncG{GYr2P1?_%A0ecuTPB=-Yu^ z#-?<&{tLyfk@PX)m+5sx?w~Nt+MZsmKVAMcxHdo^i6A{lGl;j%B<^+`@welN!zJc< zWfrx{1JuA8zvDdd>gR}U>LE^M`joAIltnH56U5u8cIW^xGiQl2+wR!9dXE^H|8{+Bjzms$X6b%!xt0UIL=ArZ1fW1YhI_na^;dg@(Gu7_T_ojTF+YF^<8TF_-MRj1e0Sr)jZHq(aEN>Uv~pmXziw8Ex1zYpTVAZqE-Nm@?@Ii= zw0wH;BGiZ_Bmbo4bW}c}`fTv8+c;b1sL^+f+{SdAMJK?;-U3dJ#`VK`CCl{!$53#j zux=l%S%ke>h&{U%dv^==Z~-{e^D#rS>MrbU<5q3j(RiQP=czvGL=MhD4$eakI8VV& z4$eRh&?K&9_q+1~FDi0y7JDY;pnMAEazPGGLk_Yma=^YU1>nz-BO00?DCJ-P*BF4T zoW^xIuZ%O!&Z0JjvM>M+mQ@xwE363DDzv>;Cu%#Vxw3_OI*A(_~3$V8flGEop*tjff5m7))I8TMUbMe07gAsc0s5y;Al1J!$%LrygO zy%hhhm6sMTbNgN@DK9BrzHaU;$^`q*Q6|VG-2x7)kO}NLWr8_CDHF%n`vROt$9u-x z>yea)3AZYmJ>f50s{VrXA#MEy!!DFg_zR;M zN9+=@$nfI`ah(y5Bf|ZKq1grTY?p8xA>MD8afGYLvIlYHImDIcfiIjz zO;ay$1?fk2V2))U;>rMGzX5!1(1?l4Gjc^NDdI?_-bu$5auEaOAO_5C84xwrsU{}S zg*Z~gkZ)+5|BV{Lq84(@kz3d$@ww17^f>ag%{BUfe+&ZONJm{TXNNO>6m^J*RZnB) z;Y!Ajh*QsavReCqGjw6bFXPlY^bhU)-IjuTP-DXwk1^&^;0$ZhfkA*j06jicgE`%u zxJH31tF_vLeo5#*VR=5_4?j3p_eMo3dIhKY1P*Zn;=jK;SNAiCM+hu}@g?HSgPB?A zNs;Bh#=s=n5o=4XX*e#?g*lk4Z@L%H)(^YciI{WW>Xm!*`W||tbk%qEy1b>uyL%sc zWBRIl_iEnh#nTbLdW^WXtoA!1uAN>x8~+C^1pm&MwzT%%z5HELd&Ayp#M{BXLF@bA zEZlEAV>j;4nUZ`)HIC+UN!h;|_hB5p4)4Z$i5S}EUYyHx0&>HcnOqFZ8WK;zzHpYg zz*EqBo$J*QNLldl_v>(SBM& zc_yCa@dlfJ*3eW3nZC__tK8c z?2nY?F4W4Ep*E7wDCJ9HlByj4Bkm#PnDa$I63$4>wQg1w_0kA)0R z*ylL*8GhX*WuM`bOxWkk+&)95FA4h$S-xoOvxh#03;uKTUdVm$na{vyW>2pk_!8h7 zi@r?NI4iapwdl;htj4+VF`U)R-!N~i`?$=vB%8i5Keq$tGW#i}&MPgx?<%da2|s=C zWzXR(7vF>L$=-w|kJZhR`3wC#j_ljP*~;p93}1E+eTk*0L*I+o&VddVJ4;pyVCh;2IplyHCYgEMQt26A> z8V_a?BVW7)y)x#Vl{#zRr>v{2@~+@a*eY-)L{1(0EB7*5AA9a&pRteq4)<{yVh(j5 zZ*j1X{7&qB%(LCcAoAkeKi1yo!UoE9-k+vp4$=24+wq=lOWXf(lSg5@U+T4@>=*4T z&Xc3Gv6ORjuOiPBbDun+eTrviM?&8|<6xhLs&6|KeT&dF*stii7UHZ#>P8G*!!_-6 zO=3>-fI~)JFUsD<)V0X{JAZd2a2UlWIH>ysXYX_atMLG{Ndk70415vuB5;}G z@Gbg-eZY!dhOKxNXJR$>*Zw->B#~1Vx(>fn3p}g-QQzJU*g@i<;W+5Qt9-L`eoqJk ztq1;jz^%>V_iNBccMhOc1Y-r;HV(QDv&3@o%A_q6LTC-F~WMu*T3z_VZH z5vjP5%cb*(+-v8~`axQO-v!L83!g1(teaKlW#0Mf;=cwqdkyf?hi6|?Tv>i~v3vhC zfjuq9zs&;|iF_o@rM)Dc2WNHZ1}oQ1$`G+-UC;9sd$0GaIth$X_5J4nPhJXakhO7V zus<(jHp*IHXLVc;9n-SeM`KnOu-OyX&k(ldz%Hz({~Vj!_zv(_@*3HrTFPFZB75Ys zsj_z*7-lbIbminc6`u`zZt~Cc+=ImYPUAk5y&eOXWiCr_)*@t=-!)_o_mbZymy-4+ zh(7DgGn@t1x)At(cBMqMGm2T^{Tx=Jnynqs{{s{*7MLC%U7+Az+eyJbJU>&t1B${>;BcN1^&8k^^CpM z>4;Y$L#vH-WS@O`?ZuE=`W4IXTe(;IDXQF-W4(?e@A51B=rSC_lli_8 zMjj!wZj2wXALd*+VEG|Ajm^)LequcTL-i4*oL<&85*W?}Wg~HBkA;m~sMMuP{Kk~K z3)n_RKDTolY5(1^vyqnjL+&nC8@bq!9gB^G+>Vxww5;23ZDbrD|FnXC-hua2aX;Eg z`dq8wV-N#lA9pM6mt>xA{2Vbb_KcDksKot+gf_!5lC!&$hq&NcKFAfq9P5K40x%pKFzU z7JCd`u-{{e`Ca&XOnj5_9czz?Z<_mycMk2Zd`}hI9O3wM1|jRj!{q0PYvx7ZnxXHIJFtbe0XPe7gTytL!cP>}F!9X15y3O% zH3PsNE%>M{KBnTM_VN2Yhy&{mOnZUxS03U3E1nq{OW9#$@vzL{?i+DTxgR>d%sDBc zJs8fuNgVV0uxoN{BQch6+*!p%>@gO3t#FLRtOYYMZ1r09d!*V-1IKK8{;af_4)!RH z?Hum@EFQ)Esa5pK)G62|9k&eAfv{S;NSraoUg4T{Iwj-6L~zU4Iu++UALYFwUO8I2 zM7+|}Bdlkn9wm-dif6^O!u2R}Uxuqk@%CkCoIX1~)OO$iK$<927zI=!w>BT zUf`V);;)AGD#Tk=ZC^jWdm7iOBZg$~R?p+_#CojXHR>L5ed*Upp6U)@j*_3+2Y%aW z)VcOQJ1>v}tfv#)%53~5S8&kar}lxLD!HlT_pyIGeL3`CIK`UF66kE_wN-%vSFYYC z9%rBrnA0|czh{TJyp@^jKMy?VKICeyF=i--IDC5DuEbr4p;Y5ue#{3d2QIY%XL<$V zuWr0sRjOC;LpuzNitk#2&#U;)gkd3%!ZCJig>NWA>ZeW2+U0Ie|X*m-soX^R3Sc2MUaec_be#j$dxJay0 z%DgHERarU~ugU?j_)3p9>o*Fn*N$~D*E6LuAohP0u)JO)-%CGTVtM@v=FEJ@SFrD# ztHazr^B()KPlt@0M-g)#rhm&^N4Amc(ESSPO&_rLUhLystQYsY*T{8nzdwh)vwmOZ zI6~il8T*}%UTWfV*vD`iukIhu-xBCwNRM^7If{4)4P=OQ_w+%T|9Bcd;vOcR zXVkXi`R(?y6Ens3%hd(=U#CXr1mtN$5 z8oo0*&CFXYLw*_ln$E?w>_x7DnTO_=Xn4nu)@b(ad|F6$3e9{)gZaGVCQF+UTJL@#J6S?@82t0YB`8I6EN6CAinJbSrm3m4{^H1SroTc_qvTl;T9{ZH_h_x1W^BcoCOvfQtx~+(@9>b3t zgg;S@GiARGcCrZXUT)e4$q5Vj0mF&Gm)OO4(8&>x@o>$E7|%vNIo~QIqq_YI-xJ1o zDrQ2vukTU3I3hMD#X&3{bD!pq;y&$A<2#$P-^4b|+`k%=#fZIeElYf7yC>?kqV0)} zD@%;zO%o%DvnMfZiR4hLcJ@oKvz!q`U7-(kTFE1P0_z`-N8Jj0!W;s*(|P!OD)g!X zYe0;7`TZ;R#d(JKGuZ0hY^$d@{B6UHvoAx$X?)OO8VA@{tZOOImNMK*i z-|a#ztvyeZbLnYQ$;IkNK9_Z}#6nmfEBE7^5!3jf%W_>gYf_J6$oJEIP3rzR{P_&H zes43Dg^eL^>YM2AM%kYO?2WMqIa1`K5!agI$%8#D3T{PyY!JUY|M}@A;u-A+o))ta zcVI8sPx));3txsGxt{fp*3D952F9&2Uf^@Q3)>=NMawfIM$luM9L$1Oxowf>*!jEX z1qKyPWvqH2xsGRT){$$=n2=`%^7Ff_yh*MjV>;_P_Fx^)0&_f#*iNh^Vmq#71KtyI z`ZKR9-=BY_*2Z_=Tco^vZvqq`Mn+al!Nw}BHjeTr|^d~Zqd*TB0ZfAJqzm+zg2KiA;j$XnbBz8LqF z`ziJou?_c?G83_{?86WFeG2*Mvv|*w*ekWp)^3fCKjstI5cf7#Y;V>q;oOKN_Yx1{ z-mv$9lzY~200-Jd9H>)~`<0gSf$=-{D-^$T))i~GH$d(g({Qh5JXp}|MNNGk;@k2m zX?}7A&rT_5?ZrD&58xY!esjQO+U9r)pa%W7Bcbx*0Y=)qstTJ@l&R_H++G5kE}7|zl*@Il+|!Cte^1p82AKqr_B zZa=WONz9~xF1(2U?}ZMZh^Gs9w@6*E$2v@$!^8cHUdyQNU$iqo)dh<^6if;ZPse04JjTcp7Wg z1AU@Cye7_tLYxcDP5**5;#`mR1Ai~h2ETDOl;T`iFV6;GS+wsG$J`DdeckGn=Sz|% z|Bdg>lFa(m<>%iq^g~n5vkuQZ9eZ}j=W}XjhJIk{E#%#|G%voXm7sEy_#`;%)(?KqC z1LWU>GfM7<54L;V!RpXn9GrIkEt|a%9H|uD-Y0TKpTGM>_&F}rqqtFz;z2!166#S< zlTzgZ_dVq25dT+gRP24Q_j4%Ylod{5{Q)`~R~x<3+%H>R%-tx?JPY3JWl?73C+GHL&`0H!?c_a= z#~Rf_&cfG->yw9dVywY6BG$+AZ9430H2J&QGk2D{j+`x<$T|}1v-JtFm-aqk^mT+^ z7}LK{d^hLek1NzLl0EHntCt_es$;_44(WL`_-F>b=muV_WL)kel_aUq#wr^`~vr@ zW3253;8#PwrC+U~X4{cpjrG3>{puL|5M9QH?N?*1E+W5r$ZIrYzdB6*qWaYi*KtJs zYKK@wlz2METE^0Qmx!l>3Qj?6qFi7VdK|qO7&hZ*iBrfp8W=V(31B*iU({II5+@rt z#UqH3!!c~lS6W<(pIBHnF(Jk+Dqf+-$U0_Wjgjp+PbOYb6`$`UW0z4`WAI^#1!>%1^I)n5+0f_OG@Az~KZEB>@N8gB6#a1V8@OMq#+_mvcv;m21}R_x9z#eA6Q z#Vg9oi&=Z`$uC218}#kvTIZD%Urh`f|EHlQU&}|H7XQwE0+oA9ikIyzEnW}qq7R?H zhX2o6ef%yZ4uaf!eTrtmKxTp`%lw1|10hyx>?>_Hds1TWqvGwvMSh*o9^2!6tRFEk zzXak_75`>@3j8~UpB~+A6RX`pJOuB{xPB=x5#k~2ODedBz__;{)~^r4xQU0XB))E7 z+;Z(yors2YcPTNwxQ57g^BJSX^zrjN@#?~*e%RsNz>;Fvpy+GIGjAqvZWSk?4oKWi z#Y)xpC`s>PGiSNhVD|=fO-iIbup66T` z?>7wh6MH4|bIetRKbMZbhIo)Y;J%+T@|!`(I`cepAm_}#u9nJhsI1e_q%y*UTLEp2X<%n-WT_EEandgaI zL;L&=bDGTSM9XPX_Df+qM#{GP4BNgOdCX||;$M5#eq81-xo@;n5xGm`acd#}BF_q& z#o7quAN<%qbN*Bb?D=xc^wo0?TQkA!{3vnRx8Jj_vR{8g^sjJe-+6|scsKJ7a?k8z z&sctv76@S9mcUP9@AVnT#ib#y`6bx<^T@?9m&1G~ad7D?(QYxf0KAuHg7B4?cjLWs`r+q_+E;p6IY{NL)}CAy0gOekNOa6|3dkUt(nYcu>KYsG#66bFx)j}9o=x_hVKqBZm^9RzW3?Cpmt{>g!9d^86VE1zEtUjooPYvhBTxjt^ zY`uuSc8nK-kY^|H!qx=hg?O@_NSx3r&jm%kt$So2C$QEh@_dLD(>XXF;`mFk;sm)z z7gU@OhR0vZal(=~aRNA0j^l)A@-6+Lk%$j=sPO^z$w_>WxXfRM@j(|1zT&}M7*hZz^dvHPssWG}BCa8_9lPW+Se)$0ttb~$QC zBKg{pm_{G)MuBMn6Jjl=T5k>i=m`Czl?KN7Rd7nX+(qbTHcxQaozz0JuDSu-$h>)a zKi9X}#})i1o|84et?Zw3h#2S?u9>VAAeT`Cj(;pOIzDp(eSEpb?Z6+rz#sB~G0#RG z0q64(Vo-~S#{iQoOw}5T(BJe7*0_F>T;n{Y?oj%Mp;~!jHYID;>}>#U=gRQCv@Ua= z1t)qqO?ye^Y&lb{K2v*%HN5%Qk4|8MZ=;6Sg_@?A`w=z1wYTMF?Lh+jA!|1}hakh` z7jS)NVtvIrLyixFvt04u)aI_VRR8Q{#CN9pQLo*$2RWJ>m9^&_L80qd{~BNl+xAuO z^@wD|6*7GY@8-q&b*6d! zT5{d23asDsj2Ik87dW=kMP>b8vkG>YnY0ir$<3r86Bp~Ceot^{TXRj98Yhusk zw6SJh!QM{O=KMR>INiW8%pN>#s4x4ePvBn# z!5PR~iTeS!+AlHX<(mWq-~MW&H$) z4BURMVJ%`yVwZMn_*>w%Tt_eblBbPzmoh23)yF-j{tmY39%qkX30%*k3=?uH;ShO2|R)dd>*ty2_s8>^IJ}WxpuOC^_d_JcYqDMMgXDe^s6w^!Ly?=dwnE zv14Uzs7At98Tu2uMN|S2){PYnqQ!PZ9;23qJAy0#f#6c-9@}=-2B=w8I162jWrz?zjo+xY(g?f zAB}UN7{l&}fdh1APW7W#FYpg*94qkv8OPQen1C5?0SDWP9wjn2O*={+u#Sg?ut>&W z;XONL?~=&a%PzMcCswU;!uJlvUuvu{lru=K=}ho7qTvE^P04EtU)OMapdI)E@qv-# zx<314bgt{xM|GTGBXSo4XHYn5x?}e#2uM}*<#(%fRfwNz4iOF3-YHO<4%rEw^C1TLreuTwNZpbM~-gp02~1&#-y6 zXt71~{sZwj+atS1oV}?L~Cs%-@-}0uioG}orV2i|LSJ^ds{M(~oJBNQu(xrN5AIZ7 z)UzQ`eUbUWR_F@#rXIi5{NNnqtC%0e%xaOdVr~|Cw8_kUW1mWaw@Un3o$q?=dlIjQ z4!O0Dn0taRz}cnXj3c)Sj<~XJo1DkzBcnrd-NN#LHqQ}hXJkHbCgzCBJc-B$eiykr z^mw-OZHA*$PIHV~p;yRXx7|3D{W9|_*e~Q)N)+3Wg!((y;jKg6TqWx4rlS_moj+aF z-mS-{x_rck#y}Ltej`Lx@G{6Np9P$+?plgmNxo z5Jk?N#vt}JMc|A_&99Dn4C2T)ki5Z}_AyAvcLF9if-#8AvqkeeN5+4O5~tZbN2LEG z*C>eDIElm{(PUEk?M`A3pMC7H$&5W>?8|Usk9g0XNX)T+l>DM$#vG3Je%LVw_UCer zId&j!m;TOBVvcxn=rq>g462y1My!2`5o^Ti_Yv>e5p(W6jyNM4--o=6@EBv;DCyFm z#{-Nq8Fl!R(8DwZJxsmmVVa5_rfKM5njZM*pc`|^lEGC^!YnFJ%Q4I*?6_7NAYc6k z#2LFWfBIyhHo)5CE#N}#1HM3?*84xeKYjm!Cp+W4c@$eV^cNy^DhBT?A>2E&^2>@paMOzt5Q?_*Gwt!i(aoL3VI(%2G0dVPdqknum`s;?~(da#>PlD^J_R~i1 z=Ego?Pn$6pxDNet*?06;q6}Bt?0*k zOQ5T4!Clz*Mp4hdp8KDI{rBSkQ}MYpd_EoDn~eR>Ko46FYU-8!{{;5`M((%Pl7sq7 zvH#egDIlrzK-|c+^`;UDEC+^ge3}gR4VeJ3ANdbM2 zeS7Y}zIR`Te!qXNHFiT5yKgl93_0t^T2Us*pH*dIV5V=N+LIsL2%9v+6Db$9QZBGx zl#6#~-V&f}oSdx42KF_YY@EPza^IwE%!FKYUVBTR2r|*hcc}F?muHZN4Gwe+%nqE9 zS#_9UHWRuoWC_nBWyy=Wf-b!4^)9c!3wrTMT)PW%J?G%>s(kHBK)%>_`M~$=d54re z;vK)l>`cy{;S31&Z>AjX!24~d9766RF7M~7 zxAzX+4o!m``eBFo9on64?8Rc>RYK;Fm+FSRb%P)O`HbAwjo=h_Lv|^9oS8+Nq<^n| z&rx@3D{UI%Aljq1-KoJj`~SX4^Gt1>1|6pC9d_rp&iHa}a}f7fk&))7jd~*rh7krAe?$DUcU0 z%MkDAhk5Lc@g@gtjzSH1`N6FjOaB_Nt~yM@6UoQ60!uFW6Ba_QOFYH zE6(XbEI#F=T2H zWNRU8(XFsWx1fiaVT&Bd9&Hh8_-KocK@K`*Y6E?czbwe#J(NG3O~-LoO8Gm4S;(gR z;r~{e@~6%wn!e3!Q940_5B(kFtvbYb1g=?_7E#rI*CdZwhdcHxYvfNknBMa@|NBi}!u}HP<=isNS{2v18Rt|dgTQL*D~gsbjAl$Qxy)4zh9@vRJVHsix0EUTTR;Kwi$lo|L-^T31eiOu&DegX@75TOaCgsy%tZoxq_w-TD4+ zp$?MuY6oJ}+kRv{%^UqKqtm z=a1(HyboqI^E_gG66b(PyM(hs)Fmy2-#Qm|=`LZHvV~o;+#m9@9yW|~#kr@nQBrL_bi0lz2+d$jsv#<>l7s(0nLGGKywFCJAg)sHW7Zl*oNuEnOW4b3Hp}+;%y>_cC0xTIOR!nADVFa;mb$Nn z%@?vXDL|Vh{tR1W$`x!MfvJ1aO#mo@YKDj?UC zwX|zPveu4Rg0YG&Yk7`k?VlrM&AiXBWX&^ldqQ#de7tqTw#h1eP_QS@PAYa46mz3urzQnqyiv(Xfa!KJ!ki{{@mmoKn5WaNe zlEjxF6QhbRDY{R+A2PlKSsD?13D09!qj?eFOGhT~r7(OcKwo+o_|j`Qn=c)F>BtD- zOX5101-=w!nxZE<0apJ9Bj(&299) z_RS|dBXV`j6=x$?tl*hpxjNRYb=lauNeb=m$NORg^0zgx-G>6o~Zt3&>lyb#Iv*K@cz z3Ll`TMX#MLv~ZP(iPsyslpVk+MUHL1)vtON`2NWR@^-%)my+s}cd7WLtuTrom=2IG&x0AAh|Fh=pB*#SOcNuv*ah)?f@07gV zjwz5C=3JS#1Fo&+?anZ7hy1}}$c^9{U>ybjz~G?Vfm~iHa(Uh>J{>imw-XqSnhKxWbZWc1~NbB?=h}PnVz}8%tgP8eX+=N)_jvg7$rX_ z_WuXrvaRF?BV~JJayqJPTk?a-nQM_}@(FBfMI+_4fZj0%z{^BB%4| zVk1|mT*G?iE{6Qt<#cM1C(MbH(}5jqWG+X~>trKGm=oLyoqiel8gdsb?+#faPsJ)v zMT%Y2ISnU~KU6uK?5V-r&HH-p#(a;6vlVg`I$QIQCrovmyLo}KWX%(r_mOAo@yEjL z<;5pc>9CQ*l__Z_?uMS{%)`JS|cgL|1erIe2_gjlE5PbKk&{;;C!Wk`{e}(EEPOLaNO6s z8zon#kN#nb&YJ*_aF@V&!T(da?SiKx&wKn|gURC}w>=lWA?Iy{e8U0!xzFH~3LYrz zmEg7`_kRb@dVZgrLY>e4X~AdL|1HF4--vTGDxaNvm4^zNPt2?moaJj(K09oJsnJGsZ54;dVGocTB4tS84mHLE)BC#+e?53*B!6AsQ-!~d z^G*12@G%6J9cQi1U&oneaM<}jxpQ910C1tZ6J(%`WFcoaD7BW1&U@aLTH z;amHn^WkrZ!iN`o7DHAKxI=Ri_o43tW1f+GuLDu$37EWk_u7x|2CrUn>iyt4KCIY4 z#yhIq4e9)Lz`Y=cTFz>eoO;d{1ouL(?ScGTdDMt872Viw+CSp&`OppYwMM)t`BUUk z53Fzndb7YAA`c!M9DXMjr~XUe)UP%<^`8c(K3j0=_3wpv)UB|KDu=q>9fMO3-S`?} zwL8I~Hu+a0$f+*`5BgKEkzzfB+!1@xxn#$|@saDX{E1M%ks9z_Iro$Nv@M92X2L#l z9|9V*y2H7K*Pyc*q-}|s{t(n%nZx7r16aRaD zh&zwH??%kL0DENdt8uo;^D!QOp7|lkpXap}B2E?@dfR?kiO06Ig--bRJ7Ej|h;vr0 z@94T?LEt)^(KkW@*3TqkQ{0r^r@fpkR$sSs%;F# z=%Qbd(MN@Gx+m+QS>k=7^iXkoK0RwG!LU3la_=LC4IaLvu)$3ugbj-8gkyuXql^vS6$KmQ?9TdjCejLGDSa-m|*87rH&;*nNl%9>vgPhwK@tW=pP~`NE_%B6O3mAonV38uz{BM zi?D_9@oe_=A{O`>ut4=%BPqLE6hDx4FY0}UCA&k!0?$CUE($D=+@i6>0tLt5(!c`E zJsx2!uyz!&z>TndQL#YE!Uci_#bF~D@xXGZIZ5fcN9A)`aZ0OP$+qS#+6V&4YE z=8ZS@*8_Ve`?_2b*dKIZ%&s&zVmngEAqK6 z^sCYuQ$P3U=RUhh+jIr;44QlH(ZMOcw!x~jw(aS@fkpWDie%K`xqJhG^~L?|k9h*e zzpn*O6eb7Q$Kc&5+Q2d7bqA}o0rsHS0UtdL@df8T?g?X(v*xw88kH{1~LCTopbKKf)p(>$$PetKho_o+?R z{^}U!giq77Q=dWKgTZRoK-UV_zy?=x+gulNns}aekGCaIk+gaH`v0$Aeb2crZ?FUT zB=!WuS~OxU8b4Yy54qF>+nGbXCT+py!S!03T)$3mB|X``K#JElko1H3fwYhN29~+B z=PMre^{<$m>R<9;YV*63)BQ6w-@tVI`)Ba)-+f?m^NPtC2!*ulRIHV2pVlb;e{gX3 zWc=OTt~K6<&;O?pr@g+`H}LxY%7Inju5EKoZcV=eTr2dX=`Pa(?VeQ8!($(E%6cEO zc>z41$oG!MKF`HI`?1e_Zrtzxv|yjNW1rviq|FFoU2Bbf{w~(3drL-(a8b#yhntB&~5d_PG@MT(|Sa z0PnL{?DLf^bCrGW#6GXYK2J0D`Ok*1$3IC;Y>&Bj%ft5ewg)F;-y`?<*nh{^W8_{& zYmX}+OP8`P|4X^9%j$c^)8+ps!F?X4E+3)26S{nO`1|~4*iSo~t-gOen|-nA^8XmZ z9`EsvsV;~17{1hSb$KNA_+mVl4_=$NF3(MHpGQoW+b)cKzSwkm@eua-m6UPP<=C2&FLR%9QA*%NnDSY za*sXal)JzwcY~Ac0XI1b9OYzil_vp%NI}nZS0j5rb|N-(ebpDpMjY*KpC8Ca9G#N~ z?i*s=@@ssvIyLS2f=7J)sdWX-edHzL_h$U|AciJK+>4mG5I9BeChf>k^c1Z|oVpEh z{Au6-)tS64+co0$58Q_f%>G?=^7r3miOb*pN#kPwMqmEM*#5q_PQ?pfWxFT8|HN@upuG%vlyvlM{C;xaUDTTo-s&1) z?&3D&?wCs<)=q9Ib1UiiNyg7l3VnOFx|jb*^VSrvbuBM`9Q_Y=KBP77#2lG_0_Xkp zj%kgrx83{XZ(R5Nb7sn;pZl=ts>V!rreDijR=f$={3+%@%G{0ffYGm?qUT~xRB6c3 z@HtkT)A&q@x0l~ia(}(=SQzh)*4?q^U#xTBNz0_zD-R( z#&f-`r3ByE_h{4akr&(Ff!VVg<^~4S3kJx2zaDyWc9SONsV^k2AHG{3YANbb<9sGX z8{k@=!+YO{-r3z)gRdI>aA%-5Zhh_Q{_BxXT7}=9Dey7ZSNA95H~l*o=G@2`G@Nn6 z8Wq*IllS5r*+F-9a4r11Gbwt%3UIP^5&QM7)t@I9J;nD;^3CePb5z{-ZE>$~A5!ld zk=^nReAE;?GiwI7d%oNceAjaRtc71oKT_nX)?bBORqQ#b_j+Ix4!STa)dhdt4SyUp zoOdL_A5Vrq4xfCz3$r4UTC#8V1+w6a=l*_vU|X{G{7uMP&0(Gce)vNCz6Wy=ko(?c z&JEsEIly<&xK3gwP-uR<^s_0qlkg+oN8~YkF#8KWH)fLzXsBh8-_`quoBy$=p0b-8r*a*Hni0edmH!adM&r+c6i@8EUOw&e9==5%WZ{$4dbQitbyJg0on zVcNld&+HcJ&dt!>F09F?3>~gOytc7+RsTMBTI(wOu5hLKpGA!LTxMEAI?Ns&(@Ht(|F4vLQ*b~f*__}CL?v+%7);%xqE ziO^v&<68Q><9s%6QhnMv>lfK`XiT=4%p!1ZrOkKbcYX@TRp z$$|DN;MB;o*K$@Sa2cHwzWhfWnvP`!!Qm&yO3Z38_}cGz+>MWSq%>x@b#C}yrs%jc zx#3yBS?2n*Q>FXJ1HUr(=Lgm{tp!eAs$JO%pKRbSzy<$HaKS(O(;EWS;DJ}TF~2-; zBl-Vry!PXm=?QMXz^0a=&()P$YU^BJQ=GS33Ve;&vh;sDJXf~t1m-NUsZ!v~rA1m} z>4y(K`8KgB)C;@=9{D1pUI4WMt*jaNhd(iMFqX30LQ=GTbJE@6xc1f4SD!{7d~GBjGQyumfyc}ugY;Czd_`fO^!phf_F)d!&H;w zKuqW;_zk>QJ#gs;;Frr*uiRS>?5N>v-OtKPN{ZKF-D_|kFaBJLKl8q)vF19ay8vr5 zA8S+vT)Gl*@jT4D*74ktkXKo!GE~V#(>BgmH7e5#UTbg8yMYDaMWckx(4!an6kF5a-apji8n5OprF}>8Qv>0 z3;e_vDL4K*fRke?fY?SB6Axy}8+h6jKzW&@)vz}ib*P5{>`@ud1Gx9cYP zUXuKraM=xEnR>silemwXzqyC~<$!%}#_V=tzZ#x*t=sn!Wp*3(pdQzz%+AA~drWza z%s~j5HF6L(GP}uwU+6Y=lZjQZ#;Q(xsq?g}u@gPu+R^K)V~Q3y_Eb_-Tf5(|wXIHV zt>?nCwI^+CZRHST_+QUEli^P2l?(YTH*#CT-nx7*(cb17_7-u5u(#Q;x4(xB=K(8M z`DD!Rkxxc@>m>#bndVF#ndf4Til3pqJ@im+^KRJN)On@Fe}Fv0&}FCMJG8mIuzgx8 zdh@^@mU=4;9+{d`kTy2QlvU~od+tw5*7ocspA6Xi+qf6)Z6~f@W7ykH*jvggWtZ2d z98-3A&kx5ze&g9*%JIKBvAvMr3&r+QZo_Tw=EB5n?-dT^)?#}hv!i8uUxnOKUZdGx z%5bFp#n}!UDdjkxy{((1VSaX`&8>&crQEvUxAA(ETgvd6agf<){_Uk|b59l|F1yRl zjjhf7(>OMlvMX(_#omI$rrKK<{DsII?u5OSv6<=XUf}k2_7G(D`LoVscC74e3uHE~ zt%dxCd|lnvN*TVGY%Q?^VhF?!-bD|AchEyX;0gwou)~PW(%^&b05>K8t_|Ze@WTW@ z<{RM0Fg9b1#v1VV5ufb@rjQ9tfqex&Ys6^mD?m&^kI}wo#%Rv~Q>fHjtZ%S|7by$h{= zZUg@hz4^T#O~DMPZ#{Vian(I(UjH=IIBXO37r8B*t$;xH_9fU4>$NxoAkX4Ai+LjG zrMa>$r}>U4UjKItoI-pLJ~QW#EWiK$I5>scr&7jyxuy?I|D1Igyw^VH-Y>ve*$&@% zEqrJ81%6Rt6?p!7+>8E5JvdMD8MfjX*2Q^-kWR`o#d`l`_pi}?a?CoZG49cUzD;~T z@}qP-WJt9k5izD0`>Dp5(R5Jy=n~JP4w7$5j+)d%$v6Ej^pH9zdwX<152X%Xq39re z^*Z1r?Bh|Vxr3Ga)`N@Y_BTFqLqPWNVE>LK;GR+MWDgJa`KU8+l4pUFxV7}wIs+$} z7p8m1Q9GpKBxS%!Rsbhiq2MHH{||}h1@d&=OU3#)#YwIiaISkr-|z+Use9mJNYWFry!UPSzUqklo%iJloD4?>!Js8n%9D-x*{{j zPaK8vE&OvuFV7o#$?I)}ZykP(4G-lsFEg$|?xnoWIehPBcR>)tt8VvF7lFt8?CW{W z#A42Ra$2c3-}HF>e}^CPhQ)QEr+hYFLb#0N&xPYMqFzeiGU$KZIM-7U^kE&g8Q9EP ztkG5jo7swWvb+B?`2V5eLTiot%!8iR8~34}{`)X=^a{vZI1c1gM`cWMxt){$nwz+; z-qa6Wt#Y>oXS(hJUlcWn9&nzMfGsBjW1a-88TE&2yn&BkyNGRO1A`Z5CGbtVvvRg^ zR+7KVv+@T0tUO|WR^~W9D+j6L;;fw7G7WRJcvk+U<*ekjI7`7*xz?_b{3;g|fjzM7j z`=E!p=WzDpo;*8mhV3alXW;ze>|7IZc4~N@b?Wn=ma^14L(el9wQP}Rr}bHu-T!TI zc2W;(a4nwCYw>5k>rqoL7h->J#XjGH{a%25pN}3+Rmdq+-W8mQyh4>e6DJ&_9gc2Z zZs+G8rY5eFU+7CrCv&Av;tXX?u=HOg_Vp=4CyRtm<_K;uFxw*%v)!+pp<6tK;tc&d z^uufTv?8W*72ymmYVH6}c|~TCd4_Js8K(NSePL&4_%${>RMfoRxQ2R$_TqcIcN*ro zP)Ez5FZnn}pQ_DoKIid1HqI~`{SE1h)LrWt zs$*^W{x!&N9K{*hhac8E*F%5L;M($xTT7i)&QkGyVb5a8u_$_b5Z57|BAOM3JkBex7br&Qr@- z3jD?HER}r#=FOIP;uxK!cplrcG%RN?^-=l_tff-(p3ctF#N*J3b98yJ(>OF`2zt1_ z$GIL3@eJJ+>kR#xJgcBjV|j*lLqFr5p9WSM?feYa*^rKm!uc8YESB?A(bbVVKZm5N zM{MHJBPwQO;eKJ}wmdT>RwTJ!3FpQPoObR7#Qn14BNS(wXXO#6SW(j%=%`cf*VUn3 zH!GuXzkm(xlGuWuiy_C0A;(X-)4+$xM=g0?>-8D= z0*^dw$h79l9TrEdguEMED;3wd2QeOMBMh!su7xYM7F-1xLzNDLx8OvkZMkCCMz_t| z46fLYi3nCX+8V8?pKi48B+gWOmv8-w|;-WtsQ-FEciG&d(i={Km7* zl;fXVWLz=IZFufCbqI3%$bSvNHY;4Qk+#jRLUx_nX3DW@pXqa0?XzICdRYW_eAq2E*E?J+!dzGID+y_Br!Ne%Zn=zG6l;hTLYB4@7i&0;@8 z^yIPBz`tkSjrsH8^3C8A4M`8>nX*LDL-Ngb7`jNF89ATidy;2%vDD*H z|HA$JDMQdd{|C2U4xwR4CpNZ#z zO(5pO-gvAJ)Ol!m!ICIEG;&3|aDQT{8v5jsdoxrH8qZdlpD$H7X#1hN!{LbzRc~b+ zb2*)#(=ro3Kb!v(`kM&%OyOxu9UhM}^snUEC*q>$XDIVN#q|V`)d- zaOfP9xj5@NHV6!>Z=}ysg~y_DSY!@`J!`EwbNwu(KFYI{`pC1CJ!v(Z=^w2bGXHGi zJXLtxcIWBM8HwxT&)-i>A4mF3y-S`=(5taLQ@ay5Q>n9d=W4Nh_Gss-jV_1u*q+x- z+-_9P)~LEH?Ma9mr=G1YMB>xW%JVSJ+4^dnv(-FTfz?FBr_jZ4e#@wzt9Xu)j8S8st3%?q3`rL+ zrx^9?>51#$v7mDuO!PecIdo8RUF>4iZ^$!gjL*|o6F5&}o~6(k+ZfeGe@Fc+6`1eH z#i`NH()haTq@OopSF1jw4{#6q&9jE*jK_m{8(ItMsg$!ezyJ96_5S=Fn6q*0spP;3 z%-LvP;udo@c4p}FtcgFdj~VNG*aL>OkL=A)EK2f$EWP=cxf@^aTJz)wReC-02dG14 zZ~iM#i+pOuRgFH>BhSm!dz`U1KkIT{2G8v|%&~Y`OA0=`?~$gHs7HQIOKLso(gvzA zU*nqn8=F3c+En)C{~G%8??W$s$p65vG*6=+HIfJRx&q{$ko1fYG0cw=r-s}l1 zMvwjq^yruU`5ys>|4a1ef0;ednEwo`nLLTL+Nt#CzXmmv&!J}WIr!e>;%|3*TL;;n zzZO4u7cX!Oy+?m;YX|;@ z#9qgddpiM4Nh4=x{g?Yu-)ZqH67BK3y_f%;PqMSt+Pa*tQNe>Vpo#rF+}WW zA^HyKJ%#cFhJB{cH^6sdO+_!}z4N`~ce=pmUP;W0I9vwq_x?jq_TjS)z~MU4Kk7c* zlj~QJ;q`Zc8zinB(Q`CUiMvz9ebiq6yx&jojF00PgV%DebAs=uYqxjj`UY;peH-w+ zsV?81_tCRzCwl+CkDC6^W1b54ydSmxoO7>#rXD?ufZa{gukjjeX&P!sD}eD*_HTmh zzl>VA9asbJ{>p)_tja*egE`IJ)8+@NQd9jW5%*VM4Z2Wg(uFnYO3~-C(7);M6tuV@ zdz{Te*$IE39{TVs_PRy?N;vQXPzJZ%lz5ZTY>nc8P??Z2N zffrdq&JX*vrTS)R<)y{w3-(e8>Q*%L#M6?smyROl&r8Rg5yO*r?#}~32Uu+9_HEN`b+`@P ziRar18~QfRvG+bPKkyp-j@7WA+i^YG&u-Yy#ju~7$nZAKFSd(e4tYCL9`ZJ<7yASU|+gl6YhwDb$ z`&-!F4XH)K_Wtgn^kzMGYh!~v^5V{zqW{VnQ}khD&zQV>Ay0cXY0ShkCpywD)No1jTuvmhA?AF^<_C<%J5~!%>MSg3;VmC_BRFg*9*B# zh5V*Lj?*E}u)S+C(3jZL*h60yx#G^Oq=5JLu)&Cza_P^){!-6rgRQycT;a<;4;#1H z=ucb&8`MDG6#hB$p)!x$eWPz+9_CP;hK*6{tvkc~*weV@O4?x9@_TW=?f8H8E*95> z{gv13MIWhysN1GK%lUIMW~QAFx5LvsMZsCn6LB5d-5*y4Dj2K4&Mso^88)~Jc7G9U zu$VD+9c(E7ANIKm&o2H98!h}>^pX(%EuY<0zvpS>iu+-Yc#n)U#FfZXd*R=5wnz@< zt-Xl4=YN5ZI}_ zYQ6*ZyerE$FdN_Nf*(5vzr{Pj)|I5x*~}=*HT=swi+@?-@xA0qH~d=<{M!`YOZULe z_PGoF!p}_+er{V{8T?%Q@HzescC`<7yc6%Oxe9_A@OOFd6FaH`8)}o9tFXVqhsSzd zlTqaF!Wv$S-o2*(j=h&>5A8{W?{3=J+aQ;pfqX83oGymEE`pt12s?W#dP7gx*;upu zE(AOK*-4kDoo#-{nV);cw6pGj_XO-LY>;VZVPhgO1KrO440iS&!_Jn#&aR~WG3~70 z6RH<>f_C5du&wiS z+qxntVA@ukAGEEU+sHWn`YZMGwGaL-ZK?iUVg<0J2HubXtRNLwfwZMw*wO;blx1u% zH5v94yw?}di>ed0obmjPh~>}0uC%YKKHmU5&9$L!mg!$^T02GcFIT}1m(;EjK4wX6 z#d-9}fQ>5*ei!Sy3Tq;GJo~Hnu3e3~82G!%cy8TyRDFKkwo1RT1U@tEWgqN4eAc%A z3wzdwbD;V#mBdV6|c|u^!g|m$-I{Vk;Fty#qEk+*Uqs+RFGo>G`|& z!j`&VQ{6c0J;1HB#?&Vq-mW3OV^l^p|@KSn5{vd)$V8kDPP*%>Ek! zFYnfSJ+8ofV%~Goqtp5q14FHFWtuqZqc~ILoYN)n*HEuvnI*wFrx`d403?*3|jc-&oz8cOm^#N6L1&i$>zo>pRS=iz+( z<#x^!y#FL@kOSP(iyk|~EIB)rzLp(sY4!E8MDMErxStgUZpm1?oN*RxkdDVqZBcPc z#*p&!LcbNfvRAh$&m~uHIise6pyy z8`vRZYd-I_cwW=Tv0#)%h_CZaKS%cA;JV1?X1v;pxSBD@&7LCv_Ze5iHvIv1kk`H* zvZ%+_MFOw92{w+gF8iFamiw@QS9asG3-Q@#m?d+@eQCfgVdolP&z1_zvM|`)hrWQ| zI!(iK_#x|D6Yq1}PaPZ3vGsftJCV=Lm}uSVRp+-OzAj0c{5QTgOAxD9oPW!-OYyPF z7ieW>Gr zDh7C?QD@FvT?g_#?8hf<4Cgh_rbXB#oS$cKp2~bmEW30D=b6Z(;5kBeDL1%&O0Hv@ z1nhnn^CmC5bAv(H3C;p1=GS1@qzKFp_Lp&GkAeB=pR3=~hrCG->K+)Il)*J76j z5mScj5;4Er=I-1DB5(2-p4YTViZ9U)T~}?=2|OSD7p{qXZa%YWm#zbbcMf{aSrf$Z z7`t}ChJFk8`ypaq-9F_u6O+^JQ*KKU>{FLJ8|P}CzZ?JVyKmKb+AI0}xcFbKf&bN( zpQr6w1N`r(Tg)nN?1XPe+ql%g|26{u>+}>N4~$-luwzSbHggSW$DRk~XR&3l^AZPE z-0$=Mt z;cLAPzlZe|C*g0Ye%7)uKZ`RT2H|JDyRWfnP@5EdW#3~>wXRA21Am8kT*%#Bi?i{U z@U!;8$ND#IvgqZzLc7AhAO61TL|pI7{@9z2K0alqz?Yr*eor{4w4rYtqfv5gy~uQWnbX|uqkB0gp9DWTge;8VLKK1G{#J?s&A6)P<^D<6GV^P78S>NpiY^K53Q z|4+ER61M6NcR*Z2*sA4*GDnzbR;!~}}?Vqg*4nyL8rhUq5(e0CtMbVF| z#P?)O#_zKCYn~@tU{c3$cJ{)ya-B6-VemF^;JG#@W@0VwHZUpL9IVqd8HN5^r{stH zNaP(%dt-x1{oIWG;$u=ycq+iBc3I*;`#kBcP#kEVC!OH@g|MnbIe-7JCa`=_yIt-X z7tY@%XL0G5iI-V=I3&tb+0hQ$=m0w#2Ip^+6hMi=4TF(^Jm&o z<}Qf$g=}e5&Y$>>@B@{2c$7GQo5qy$Cu}J3zY*d5iDw@v&R;n==o+|wE^uAQ{ad#R zxZWS9|7j(7fAf%&b%W!ly}2#ZZF2sm50CQ)+dB--pZV^I;C^H2%T93qmKUq})_9!1 zogYQo+;QamjhQdo{~yjU%3*N+Hc5Q42KKkXi18&qkF!-KIDeze`Gc*yAUJfnpwj|Ah>^%>SnF0z3;qlQ~eY>A5+8svBpHJonvzT zcJll}O^M+Bu`U{QdaN-?jPsZ6e;V9B^8Pscw;wrluD^x%2aMI^{n2KoEC`VIr*nuD z?w^9QjuP+BY9sCWHHwX#;MWX?Uo-ifXR~vgBzS)oo8*LF^T1oqu*;$H{x-l4CBplY zpAV0>M~e67$McLC@9#B?@c!cS{(jakY?rS75k>FQldNZ~Y8HIuL2)*-VZQ8WR+xfd)=u_cFzX}iPag$J&n~YlPNr8{nyBgV}vJ*A`U69Y* zBwv7it=Jo|D@}X;laKiNZ-CCN1g9EyX@I?^kViQpXXVhQp2qixm6vmk1z#|yMfR!Z zdx(Drey`&7cKv_Dd!oFJ8Nr+#bqMuU-Up{f%*E+LZRsJ{>jKozuFRb3FGS70xEHv` zzsGmHop%2o21=g4=3?G>`%>Ov3itbTY8lAr4{&>qV8)A^rjc} zs4t`Ul?ydNtbzV$P2?QE^LNh+U@t@;ys6mZRODB4;)e=JnXy6d$8$qbqQZZZvu{tr|j)riF(6 zlJ!anU9XP;-+Fnu0N{eGf8&$2g(gSpzy-sytBb?_{)H8S9;5e**9$?{$82k6ZN6&=hlwT zOlR+4@ATpl)Qr~PhrQjz^})gNLV{2cg0eQEjrrLaHr-}axC_QyJ> z`|O?Yh2VR#Crpmv3yF0TbsbFfDe z>pGZ#FT|K;2IgVXH#zIMp=D7DeW9EPU#JD6iJn3CqTg5t z`YGvkpn9xJA86gW>b=k6N7|-e;d9SS)@#?{5AChT|2=b{dar7mw(gsTerI|t&-d}c zPudDj6@9x{J`$gQ5o%-Cz(-mN|0vZ{Aox&oleM3&HEmSvn2mOdww8P3g^%>ZF}70^ zxKK?oc}|YzM8xosj-0Z$TVsq1jjOH7`#+b7kAzrE@BKDPJ`y?Z=G=q{ABmXs1YVTa z5A0E3(7Qhrwq{d!JT{I#(xzd?X2YssKYL_Ue56emrH>?VqEYmbV4E%?A8A)4j+BJl znFqNux6GSadTAU(twMVldcC4P;rNtE0oh06ee``&^Jc`h!e?ipXKL|J(2p!;&a6tw znU#&{46PVBv(cTQwc!I%CnNNUe)crv%l3`o3@zl#LiPzbZ#yta<_+}N!8$_=xv^o{ zqTl@{z7Ny~+3Q4pD__Zrtwb&?2RM-jcr;@IFXFeUz>I(sZIU<RZ*Zggq8)9^X3 z5v%ci)ZEy&aV^Fs%&Upd3=uclh&)*|+~`KcJ;aT!z;i4FF0{(%$=ZP5vHGz}EQdHw zJ#zYI(HDrbu!6`vh-U>p#8`6`@Sa3`Al9Ggm~?bZ$YCyQ!e)f#?v0zxc;G)TADi*& z>z9trNQ;3rOPj%-j-f1-y!nohR<=S&pNvM{YaZpGETFO#?t3_@ik}h4}IQ(5?>(Az=%b@JW5}~NWa6| z@+SO_DbQWk%vQjU>hYwt=sELz;KcdOu8cJQ3Zt(fvDj|-ShD7JF?ym%Uxc~n5?6j} z6?y~?&eQ@nLHcz864mYOW?M|>sDbNP|3XNy$g_U-{!3o^Dv1Wm#hVDjC>dI z;@S945AbKsz`PB1XEo|z7y~a}>l;`MAEpP-r@6BIC67EbtIAU-cmfhjX1-qL+I?In zyc>IZ=>AJ=yx5@+W39muoWY2=N2ztq?+=fAChX6$;?miP^#l0J_dJ{LgB)B4{Qy=P z^5}&;>ioHwF^{G8HqqR8)4z?p{rL`_-x~K^81`p1@>aAXqh)_;a9+_~)PToBJ5;*i zp{VnGXrH#?bDS9o`*dElNjt$AjAfJ5+?Jg^`mIBI6s?Ccc*|BBB{fd8|gXR~vi|0+4a7M*ue3%0d2ew;#FV=8csT*Jp8u90ov8rcS} zkpo;q)o*+Li|$#^>(>j?p0aFWC`%-=l~>erVtrtksk_MrRBh z1HOacq#OlC!Rxa}NhffOEh>&7azxp{nWjc?g*PCleGGGC8gO697jDNK8P+Dm!!RcF z?ZVKv+q*|!-+uY(#i?)aJJ2^`0MxgA4lw{?1w7~4!K;s@eyJ2dK^Y0JS+E;>`P??5f5OhB@!#m@@C=1kStmuz4pP zIQIr$x-r1}EJi)fXwEz73Y&LwZ`bJS-pRwzJ&zkTE^gGgc)l}2()D8FyXR_}Dhl;as6PaJaIqKc%zJb?>(cwC0!9&-%SCKsQ zIf$XjKW7YGgjkpHGryDS%JR=K_)dqx`;_r>KR8S>ey)OF3jcL4pYMA7c`bVWYvrZI z_3NhXt=Xr?(DetV?ajq?Ph);LW9V6kp;s9(O9f&U^4(A4`N;Pwg+HwF-1!dRd6_(S zT=#q^e!dlZbPM)s0rqS@_O1$Z0xJX9OTi0`c7E=%t^Kw7tJ}kBf5ZLN3H`O7R}`+l zk8}$CJz{bOV&>$&+8@BWS0qCw;5TxY=XF`9i0^ zix?C-E%w=})A@1rITU}!)@LVpScGod;9*wXj>N+zcmNJLl6RtRwiZ7tSUa0%)XwI% zcHr-iy({psBPu>NmOKE&#|~}Imm_RWqL|oke;wa5T7$Vl-3M#6?nX{y)S$%8JQ7BK9J3|4DVT76E&izP37QkF@8&4cIDj|I^Lf|7nAdWaR!W zcIdR^BPCnwT1O-IA7OWFa{pGlGm-mGq}P6U?*Gq^3p~}Ko(nwHqUTQV)NKdj$0iBn z{^Rt%xX5$=qKC%V<^Cu1ZEU$VUpO}U`u3{>7pJ}@lKVgGFz(lL|I?l4{)eD{!^!=R zg#JzB4xQx=ztu7Ny7$h0=-$ZY{&!8}{^REUh5sV*gwWqH&iz|!{_Oo#rA|1~U!Blj zXZrhSyU^cF3FQ8FsWGV&U$u8%eEm%@_ixVqyexA6h(DS8Uyk@P+DtiX?jN&D>~sHO zpRGDQ=DB~XZX@^Khu*|J9^$4WnbGJ*ZKCm=SiQj^V3@6PVn*m34FZQjb7QTzeBHpCs}*< zF6!`}K<&?m$c3|yZF`m0cnmT73G^d4=}Bpsl%e;`Mo-$to3zR4=RC7#EcFzoFP+j_ zu(ZziRCShiYU#lzn&xT+!JBq9H7)j}`8PhU^^+4*xPL>_FThDJ1gGPF{zBXHb&obs zv&9v-Q}nehXk9Vr4zR~5NNBlU}gzxupa5e^!!#`%` z@b&)J8lH(c{J($Ov-y71q=)w&sWxUXT9wqSoMqcDLG}S^Jaie?jcw zB;H@vLTSKj78-rHWKY@-@WR-W7C3syW}v>R-Rya-_Q%nCAf5#0+S2oSy~yWV>`0>6 zR>#Y*DRHrNttF>M?{!_+IX_TzVnM)L>TVfFj6j!gD#jqGuq8%zTSw+o!~A9|*?ZqCeaU5NYL zgL`ZMFX&Ur=r0X!pLdGa-v$0%0eW1|KK0-m<%4@RrsDUr=)3j^epW%2=7O8I;oycQ z)RKtxm-THWE^n*P=xZ&XCmc)KjClj(8DTzT<8I(pOVPXbZFg?4)nPwthuoeh`t2H$ ze!o9N{XP~&zuBYiIO-rzp!PCc$M27!<8{#S`xDUd6z?UHEPT^f3+R>|9q`@Jq1Ys2vnNQe;oq9@Nr!@p;kvP;ex@LYAm! z#J1~;eFNpRmr?D5-k(0)J`92foCd79(^J&)fG4+gcV_-9HVTy)4dm+y5##yq z6X*L?jnDnjvw10MEymi1Uue{Kh+Op0e0X!88S^CS!+*FpzK)OP`Tk|Rzw9Z9nBevc z`g{ilcY&Pmo5DFPHu1YD7xDD}a*yBt*ISpT?w|TaeBB?(^W9?WqWb6dGaDEq&lutO zeX(JC!{hf-UukiyUJ%h@_#Gn~!$;9^OAPN=$0yGCs~Ugu@0X{Z_r2&u&qK56WDHL} znq3S}j4qKFKKzVlzs|@QUXRP2#qj!U1G^Z0u_cDj^@qps_aLrUb>H58sALw4adH4CmxyBUyJ+^}o5txOTt8$-Sk8-v4ogY11 zq%}VL;e$`IUXlDGzWWmNc9C-m`2O#Cyfa=icnR`d-@$u!;2l4K9Ly&C2rdHN@0jQ# zKMA=?*ZIJCX`)TWm|h#K&(YmExfvRw|19=gJhVxEiaH{AZ&i$ z@BKXY%>B$fbIb%n%pZILWac^C_jP~o>-t{b>$=-t9IL$AqY{|fKE_wTOv%+)eF_F+ue8}CoZ zE%E))Bk$9T@&1)~|1RXyKES)xATRktyk8&2i03|IPc>o7mH59tM`p)Bjop8Cx!vz9 zXu9KEIeH8+%EZ)%TtV8WZoaDJ=>6boLT4c)HN2gZAPx2xksK` zaSqQZ$B5Xq)rNi|=tbweFrmfy);FH|MwZhp-dE%tc@`vd*ptP(QKNANfA2-lBdgq_ zs_~F#5`@MsQyoR(EZkn`6S~nO=zDm!I^-PL56Ow24?!2)hCluI{Ym`3##Z24_@ifA zd#F`_KH(*MQO8tAmhaAe&$f26rU!Feo8h|VDo0TV=d=^gD>Vu78B~pl_ztkJ2|3B- z_|7@JPZ!Qem$^*7Vp^A+M&{NC_4HJ8a2~5O#Jqo-~F|ky^#L2L! zJ;1~lfeCxGG~YM(eyjCKTbjG;+t+&}CLY21KLtGewuUv2_1&rT4r{PE-NZ{`;l03- zJ;1~1z{6hw4_~v76KC4Gfro2C@Q~-V-KyZ>Rlvg+frl>w52@uI#lnL28SjluEPU*r z52YHtEbnMq&U`apru7$)v;Oc5eVMm35hux^# zE5zEE<0$cc;-$^470{Is@7M$FC-y8i_n~#*Vjjld=L45?{0rV&)nsA+L_f7T?>YHC zz`wn~zu*~~!FTfFS>`%i7pUVPK5qi%5d&SoK=MG;bdmqznc!bMKd@WZatt5*8}$iC zeL~eI^u8-_aF?vd=K2{XKH)*BTS&$yoV^=3Sj`@l4)i%pL%+jx^gSGd{)gyKx!UQu z03V+|VIh3N68eOH4Elu4@Cj#y_=I<%-n!SGCe9?c@(G3C6vZbLzYoJfRiA6EN25P@ z5`KyoSlSCeteM#7vw7$X(kGlXq1b&sU-LIQ3Vd}hJ=;nja4N9#>-(QI;o)XrU=#en zPr#=;kH6o8pE@3@&!GFU(jVlx$Hc@s`jOzm*Wo+oa$N8s>G$frnsUYnu)@_fF_Pb{4Uja@IWlX|8d0mkGoW!F5XzW z?VXW{oB#DveEwq*aI;YHA7{gVtY|E0yW7Cc62pHC#ZA?J{5t%{Aa2rs)b-#JQ_cWq ztZ_5w!^s#n)PLLp46FxkeinY8@E74bKI}lwFW@`QZZCoFD4%Z&@aFr#PjNr&f#>1p zdErMsi@cx)+ud?tVC9fedlXxkv0lakJ^Xhqu^1Ys4Tk{Ktf|Jt>oJ1V`%dPmG z9XQ$NC~lYMuoD{@TN5u!ftU0%iILO}^KA7S&!yDQObJeo`k5ov&rAw0Z~s3d6EC0t z@u;6E;%?+p8Fvrb&qVA!>Sv;t{wPm5(s;_GaMN~Z*y84{!Nlix{wU>s=0-I>ABKLW zcs`S#3CtX{p9#Dietzc9R-XHc4Y9f%F}nk?dm1>GbkBuqTTA99k>B2Bip53G`7tYQGwRNr$R9hw>26KZW-rk8?!SxNGgN8F}rGY1!GYYvY%^KK9o|uVdT}Bd`7XUe?x+@qKL9XY8P@?|A5@pk8n0lnKq_lEAm=c_gBh`BmN zO}|~#)HARBJB-m7+so2 zpSf-?-m&{*My?xw?*v!&(r+D`K9B3MFFZN@fh_IRe&o5?x7&+xn1!=jnCm9rS^0#k z?*(XC6Ulk=nz+^}8}i+1{)2f^)}9{&AI4a4`?|ZC-!@_8QshyX7Z{k}Y8hBJ3Hj~t zGnKvNuZ!Gv)-K>=jM{qkbvz4g2z$&Gq2@LhdfmcKt!47P^E^CzXFKa`an9Td)Ys^mtl3AF8I&y#DHNoHckD|w;jA6fyYi`Hym@g8a z0k_G|a12Awtc_HQPmSybY^Bd2~@~`Z?FX3!IS^>zvwA ze5B@O6dx}gJ~q5PGVt-$e@qS^i;0h4HsfPu7(N!qjgPf);3GeiC_ZM9!vrSY2uwV$ z;G~L=)ZT5iyDq30S@y=^anpd4f%A#}Q1%zLcb^}xy*q8Z?aHzDH5q3HHlYuJz)hZ4 zmJLo3SQ#BJiv?b~v2PPEU$8qnh?&XY&DP^dpit>lNsIfI(Bb_)GU zdCuUU;CBQxLGZPxp_V#Hm*P)aX@VLIO_1~_r6%Y}=pyMqs+u64CrC{Y^?}qDQX`oK zP0)+j7hi)$at}0;gV6+y`jKWovh1yqfs+sJjE$4bhf6=Q(C{NC11Iyp8pO$B_>t=y zi;*AK{m5dhiEhLYMerkMqK-23946^U3SABS$i46xWzL2^lj=u`&%lr5XA;55M&!ub zQ4@MnXoaX*EbgcSUS`^I-Pgg-zaE(O1z?pE=QHNOpQMJCc@ySPcB79I^C??!jVAbq z2XOC&$U}48O||9wI$v#Wt$nrT7p8L>?Z{)p$K*MU^c{E5cLcW6Ppp01+(TUEheCUK z1@np*O00V2Peb)M*Mra>-o|M2i zVBR_8xch;BjG;G8(y@*h$6T?@8`t1FwWyCJ)-{J<9rDlp&GurqZGGgkEou>6pv2!NuqZ0V1?E_ZUul_|Hu8|Jx6`Tt82s{2rER?zOQs95^Y}!kYgUnfva*891 zQ%ofuKKJIxz{9icvGGv#1tq6g5*MdfB4XxQNI#b@9P z@-vC}f`U_oFUXkrVIyXKWPHARB78pQ*~i{l0p2hge@MTM+~IB|9wv9#3GSHO;TsBf z*bR*i@ox$IzY6%5+Yk${Lo6(w9eEkGX2uJCb8Y>K{%-P!(2i3x-c(=NA3RTQ6#rN& zC}ZH!oJwkPDyhW3zkXw6;NNb~$i_eVgQ57BXK;iP0|&zo#=^r6aD&O>VXAT{Uk$CH zw#YHp;WMLgajeU!3+A}5MQqC2J#vAT@v*@LhQ`OW&}Xm7((}$u>;VOC zP;i0h17+d@;s13Zx2)>{^w^y-|61f}0{)*}yU>-Q9LOl%jpjg9Ee-s>l;K_H>mvj2 zZur5-#=AEy@QyrSB#d)K;Q-0sc^%I7*nXnU1vVL6Abmf<1yYv?jFlKj-4OY{wcri| z_=oTOei-53DF26EIU`I{m`WUc?pGrN2hYA38wUrgDZCc@�!*tSMA&(FyJM#ncqS zKQ!0%S;xZ%qs7Cc_&6f*F;JH>*nIKlUK9AZYna5vfjWNnk4Pjg-uVx}M+bXH*wJgi zfjWjX)G?qY>-I6IV?h6i{kE24s3(^B;v7ROQxri{$g_@un(Lx=)-`vcwXbZnS-mER;*@cuF{i&_7(qpsknQ^qLFT4kN6<}w;PKlj;$fGpH&|?Sng|YhYMAu`d0y{@F zHX}-76R^d>VrJ(4k%5`DFU0mK)ts`4ON?J%Xu?Z1r!4VuMC%KIdly?@IEsCvykZEi zm@@2pbKl6ozOv^>HujwauPDyL1eeHsN+Gl^=h#OKIzw`cQb*06hxG4i(0fO6i##_| z)lzRA(|FCY`bQ$3o!x}ru|}n-ezwpS&VaV24(DqMP7(O0rMs?a#=qmy7N(F}9F3h( zV=`K=toPxT^~#7zHl`}4{KQ^?g&T)S?3{GH@_%kk3=5Og7H*Vh1%~F7D-a8hcx@qi zi(O1@A$-7?{TiU98SU2)>l}iSms?30{w>%uGVt%YEy>|uBK63#vG2&9+@bi_Y~Wul zc|-J|u;LBHXTTfsGl}qq)K)Jxw1uok{yF%;e(-P)L0_EyFJ@>k`2W`V?(pIkncx9?TxK@!;ZnsmnPJ8W%@-!4bs^rVI}q|1vW0@b2dl z!$T7<7{A6)_{}npa`AYpxygkq6>qm_v&q?3?rz+1+k>k#WJ)^fvOJ9!W z--BLpF4Pd#<>a`X7v{8_LH*2Q7f}Kj!&6;j4GyY=tAxj2}e~T>9;SYvBLR zqp#cMtASp&{Flkdph zeCW&3@}51{y}Ft_a-lC9{{AtX53vQB0FC2^@k*EDg&yrfUypISuzC^ZY9Z!q0p{*z z%;9{T5i!r>Ew5gQxoz31O?@xcXLy!eJa%v%cF=G1*s!t#*8ca=4o*~gex}&LIn0@~ zgNmG7VF#yS2RSA?$TjAVc2F2*2Yy_`4_i5n>o(F3U|Z+VuaCCCbJxtaP=ITdz!r)v zuhnfYfGy>U55YHDwa-J${LA^+9}J{&RI#DK<@%! z6RYO3PfM|G6PR<_L??PFN}D*k&gJRCbMl_C<~o)3Fp3KnxNvF|7ee=kk323MAH{`H ze<1`HDinWV=BU3gQvC(#Lt6R^1}>D1`U^uDM;sKf$l&7$ah;UM5uyG= z;{660M~G{rD2^~}A^A8WDlQmtgas~$`v&3&*h*L&F>{#35&Ow6q=8>ZM{GIHwCPn8J@(}~(AqLEC_v1Xrqxd7a1)kX`V#s$i593FkwL1=6g7r1@IFe^V zhF*i`oezND$i_L^><7yDap9jBGiv_R#V&sx<444)XB;^lz2F(VQ#C*1)JD5@s{40a zidI42&lry}=6m27SSPRvz1*>mPt|5YdxmQi*>XB+9NMX!sLMA!ANYr7&o{nPIj-1U zmgy2a#Px{(zIVRyCCNt!E`sqT;>`V%a@^~|fm~&95}n{mT((;8;tci=+s z|BPwN8ou7opQR1g_tzla_VuDhOr10Ah5L+WJc;{PPt3%52}P#!*ZEx1_OHZ!7)P(g zyYXHkhPJrZi{QbI!EP8cyAU&*?nyoc^U?*rLGUT)y-!|7=gk(wMi;?W7s6&2z;-H7EAH|OU0r!x0 z%=@Tztlnok?HIO7J1(`d@3>zNB=NR}5zivd~GklU!eEx{zGi-W9;4^Ib zvcYEueGD7?=g2j=z3`dOz-M+MCwCUU1oET@Wk33DMh{ZvU+S@Md<=Uv&l?l@M3GNo zz9l{28}oBJurE)>o}XE@bjm}hYiPxvUih--v6qYQ!S{R*`#sJDS}F4v`hFaKK5=OO zclfe?`Vu^!XcO{kdvG4nQq(%=K9_a9s<*sqCF+ume5Fl{ht(Nt%t@)^kguFwA36?Y z#yE7N7qlKPgpWfu#(`sS0Pj8-d%5^5-o18=E9AY^v*dN{Vd#5r8Ke1`$Ju0`?B0zr z-WGUg^BGv;UDp}8+2rPaSERYWDvr5F!M2!t%X!E8H_v?np!Yk&ZZI4B`>g;IBqr9&3TQw>~2m%k_U&#LTup z%zOqoPz3wG8|T2)z-QbC9AK=v4;TPjJT)Ejd@IIcA$0`;2bh1I3@ixdUVHF&ey0g% zy~)pImTtXc7IK`Oh&}Z9I^Xo2v-l3L-vPaBAuuE#f3mQKRs%x{5F3|Z?Tc$+9U;zb zc>_J#W&A4R)-q?ko<|2SuG?3>;^*fHtU8VuUiI_kH9eu%3*pnkuQ%E9dW{B7t#*{S zS0P`#1wD|f&PkkFh`CsRIk_2gGaqv_53yl2V#B#BeKUYn)dp5!{mQwF)K7u)*lWyV zU!3ze6)}f8k2l9LkNi&bc`UG;M<4RyoWE{hU_LNVj`O}OopXphXSs3D*|w~G_Xczn zmh+`vE5dvcU$LLOM~tPN2j(h#PYKME+*9Jf*;K5zTjH3fWY^mcWxa*1HQ-m|wHDN@ zgs+V#YYo@5T5FPXssaxge!U2D7j>l5d$ zu14Py?4_w#WAo#fyJXi`2kT7L^@XtwUtgi~q^^nR^Ax(i!skf6R)jf|^J^k5Mxq`Ct09^zHT-yl)TAIqja5C-|{qo%`Ko=6>_AHx+>U%>kwb zxvgGsF>`=}ec-p~5B7p9+5@b36MJF}<`>+c#dV~nEO;G$rvZFc)0nFMF5n>f&`=(9 z|0LH+UEdSrK%2mSzG~N2^82+;?ao=?w_5S1*wJ)H3GU6>mS*n9;6gjGk9J{?6}+VK z4wC;QXLJC)3LR^79g)f#*=)Lw$i8OI%4f$HxozOQy!dQ+OXJFNC-cr6};EadN~e% zLfxZhflt00+#oe>XD~l|?1jEH;LaL39y+IGG5(VS+^jrXHvU*4wzlyD@UPT0o`ro$ z+v`pUbmv#JRAWvQVFneUWh& z&IoINKT~`8i@27-zXqQjc1O<2J3h}{R;F{8y8Q)xLe>6c?n$-13f#XLIf$i-mW{cD zpF+>pjWNkY4xtn0LHO{zOINHA9BdZq>X)sk5946><8xd?uj0H2HJ7ljzFzRJd+Vq7 z*JLAJg$-pHYj^P)mtS6cG3=Ip#j=O$`lX+u+HD2K>nQRrzrr8ghJ$>vniFv$C-6gs ze|1^eD&~X!vluUM`9Tggx;|5AFGn#lkSB^8Bf;SeD~!aReUUJ7WB6WoSzu(yb0>t6 z7T>+77#X}j?CvtgNbw#fjD+0|Aw~wrjU2;ZFfxXZe_G)`!TW9OHO_UXpGRMkK36?( zn;aO=yfgEDX(s-Y99RK(3(0{>-jCc@A@;BJ;23c3dI$%oa-Y)wF>@3Z4%8r|YkuZO zSB)-Sg@N6=?-qsgOwl}F8OJ;~DDy1l7;C|LjwRq*DQpM%RVUkEO^Z&7=wy1Ub7&u#(NNZ3;++v zhsn>8*DMI*HG|)wc3=y>;|9+{Y>>R>-SBtCH3EEQK}z^cc})+vM-xA4$&ac0sCE3l z3UOfL+NphvzX}isnEA|bj>U>28|rx@k16Lv=aIFs6|ZEEXA7bzPj2|WXspj{=^>W0_U{N(C=AsF7HpuclU$a zdzi<}GJwG=#! z%1^B`YAjWLYK_55twoLH>xwp1!<+=hk(yA9hv0G&)r78#%{j)>JClE8JvD2o{W#~0 zzD{Sc-6L(sY1C3v_Z)a;5nJh;qseZ-Gxo7=7QYv^1OAaQQ6J{-5%@D3@ZK(y4WS-8 zpbHJ|z5Hwk=9^m3&mxaI2H$zlo~NI$4V%~kjpttA!WNtv+xF5kt>U?054Es|T*N(V zLh^-jU6VgP2`=z%TTX~A$UV}m=j1yqVJ{^5cd&(hxc55zq0Um;0l7ET4yto>E?%_* z@=S9b+REQ5yj~~9B`}^Db3H@F<@Ff(Ui#^hbM8|(XXZQ3W8PO|-kIBH-eWK3>41^* zC}G`nz`td#BiG1v=wrtB?*$jxgL#~T@#1{<7`YD4_Z^r!^Y>+rBl!J2nD1=#z7(Is zJceStI)B`MOR@ff>sYs&scB~3Vkz>= z@YjSE*F2W_ItPq7NjP6oP3(xS`=ISa%U7I-?exQTm`kFbAZRujmY5M7 z?>=EWIl^}2UPJrgUSs}lxZRvbO$66yHqKbc#XS3grNr(W;=a<3xyKv~**5}XB5DZ?`!($wk@X@0tw;=h$CL}j z{6^w<{wKyaR{Y<9z9fua8OIXGv7fhqrvWELJzjyY7QD!shac##C`G--z`&i&_%v0(dn;mQ$qQ-tUO=~D;gbi)(3(Ryf^m$=2c>9SrpGZZGcYdXMK~rQ+|?-;#{|BE z9Fw(U^zqoMj7PMw=)^jUH6BOf8IM864bq>eiJ%Evgm}18VV|AYpMrCu<6o$*j4__dnGpANn~fDmMAxLmip68i)4U;^ryXj1XR-GO zxQ#&WUyaG4#NN1;DZaCu6ZKk==0xX}B}ejZfFp@9CsD9OYN%D5?E}t6($an$;~z^$ z-2psd4uRU~0(@Tda>p2uV_xb z{QzpFh*N8@@73ZyPW-(Lf9HKq$Ad9Z#z|_d)p6>@IH~y-_93LNSC8kKVT_aD*CXi7 zm#|M3o}U_Pj+6K-#!2L*fU}XuNX69fF`^%=YOWdMEj2K_i(`Xpb8I+HJ@Jf74D5|Z zBX|yD6-|@75bwV*41a_Ai@|7eX8?btCYM@VaUYCx%C&*GW|%h6a;?Y4UussQ{&r1@ zus0q(Am-_yI!{yK)2f_JPFrhkiN7X2AjUH^{tCwn6*sLhSk8AU zFqqg&y)Cu2T?x!f|NS=TrLA?EXNC`&j?|Rh~f4K1cL3`s6sCvlvH?En`CN8QrjPvzCP8$e7MNj%P8Z=fF9h zMr60se}(rIhD19tk7(^lv%EYl7VQ?A3ipNFseG2(cRv%_oLqY}ETO4#w!hy`!N zXIh~VtIcqlS!o+j=`9Y5}Kb*RzZioI)hH z3V*M~|53NN6?!qwE9XsyRS3t4~*YAU%~jjoAEpB zem(4-F%9Qx`lCf{PSn&FV2`cH$#PRGcrK@?qX+NIbpYQ$^qT`Ob7+>M2aljIHd&K;E^2Ye8LW z7(dwmJDD)8cNIV>3yf`D*27P#SetD&6bI^2(om0rnv`lAwC`h(-wA5YRg8*057vGT=Yh7pgnB0U zGPAJ{6W;~5hx#>P%lLorvOg+?4L1s(rcI1nHuycUhA>vFnb7@_V7xKS5gemZ$ClZD zRPfxq_GGO>s4rvIdY$`96>8;P-%j20r5K|I*jeZpaeN9ePK-4;M&$aqzfA+aMzX&v z9dlNyQAtK;;mF;*$}t1HZY zb?HoS|0Bz<-Zkv}YRuyZ@T+%)$CN|rR|oHh@k_a1jdh^<)m#Td;8zFV!O+QHWWQSU z39$D4BKsVd%&$hBn)KrsgAa4RI?C7%1HT&fE&XZ@HQRCd)foTF(65d%50Pzr(0(<> z>N4`HlOCg_{pt|=i|AL!IgTmytK;M{Ok+X1Kao?0kyv1AKJ=C!- z1*dJ_TRWv3e_W-@r`QXYBFDRI%JPc3DXhJB6fQ?^8}#kvSQpezxsn_0ir zDKYmE`F8RmzfNe5t@%FIj|4cs1maVb|7Ls&{yU1F9vQdE)$Sl4g7;-yf48AoWnWUE zJp{+S1+jip2**u6q>lW$!Ewv6Q`bZ!uG_1`^x_&K-_2)?6w^oNkihF5iq-o~t`94| z8(dNp42nE<-1BCD=T>uCC9;MGdgc$K7;ii;XFLXDS%(0`HVRB0qon+`nAeEJAzLeW&WbAhf5GQ z7#zKokECii$k7b5xM5IZ%sRTk#tq+2GH$Sp8K8SfCSKrL35yrVK_?h5ykV@NaQw51 z35Psh*ch4DkL>dfI$kiidpUMyAJkeKGYsQ}DC;8f*fC!4!JgxZ7q%u4FT}F-MB;=F zxi2X8ZJs0RIDxf3;rm0hm@ba}A%?#cBTf)=G_2x;5I+7gh!eyZCKD$ti4iA2Llt+N z5Xru!Ka`62V22tXV4mWM4-&Wek6?TdfsY>P@j*kJ@d4xIc;kau>mU_z0oQ?u3nI*0 zXk2i}688rg7sSAQ)@`zv*I8&*Sr1PBljo~98hY&ms2zzXx84iBQE(dIgjma|)?2@i zYtTQcGdRvKK~w6*newIS?5E5#m15UIv#z=s+Q@=6dOz30?Bfdk6Zgql@K)B(nIs2# z5y$L|!KG2lsDZ~nGCndta}0faImVsfADrMH3c)eYL>}SyMtzUqW57ukXKF1a=x=%k zV~l>m#uyhUb%)Y74A#n%vngG@y1yB`oo$@!!rJjwCZ6bYSmRMYRk{OI-_tT_seQ41y%x>*Y~ zD_c8sT#RGBZ{IMmYdMbIB*xKejAI9E`T*X|iSg?m?{sVFjVmiLe$&Q9(Kyu_Cg;5#bk^G%1@zO{8z`)5rk z5_5xE+(UinYr&jmRV*7%Ht&*vjjK5Q?^yfn)k3@`_gq1Y4dfNVxo6@uG3WOf<7|V+ zlyk(of)b4PnT*LDeW=M1wMr9mspV}!KWFyfIfVMMAGrkoRpgtFyp^~gc+2(pTp4)J z&VZgb*t^^b4*y7i!>_%q6CA$eFH1w8Ia!Q|$$wezbIFXKsLg=3pJUj7*pl3()foOO zwD%lGC;XDl#yD~eIhN>aB*v0^WfN@jH2T^s8y|UHeA&(%RFUsZU_(4(zRVufP}kbG z%TMxBM$pRqpks?3$Vi(UzN_NF^{ zMkvQs_8e!wagHtfMbSp7Ip5+a_GKwH+J)a$JMz%qL)V|qUrTQB^7CsCQh#&F{Mry34D)M^F});yZSrw!LN-Vrjpsr!hTRc`2k4$O z*^OSk;6Kc9tmFe^980YjR|LVI?~-X-C&msM^* zMyy&LkKQ{Ne}!->Nss9a=rtno0&+~LYYH9LP=25j`~vxbRBByccq+2ib?f6g&#(cx z3laA!8rMYT@Ha-h7vCJFLZ1G-X%-g9P1noT*53v{aE_jF()-yPwXuCCNyUA(L z-UD-HXp!QbueTEBHmosA)pbYjQxKdh6|WR-!@_^J=7Jfc&j9zM;g6_M03NsE54d&_ zlhl!O0>8_>Fj!L-umk z@*U9Ji2spv7~9+sg1Zf-8>?53d>&tAAp)ZVSb->vHJ<{CA4wW!lL zI=-g=82W@(p%%|>)ZxuV9UklTSck{_`w?gXLT!Y7f|Ik6=xdAh0nxSP)>sP;XIG*e z&XC3=;MGPzOp^4N`V1YFT!U)9H<6fRYXUJzB%PJij>i*=yeVT5%%Ak76NyD+?a?rZ zMRq02dki`j;n-dNu}IS6HzSEyq$z<|B$l1W6NAvsMGT_YdAu=*bxjd8;}P?#Lmq>~ z9%Boc!}vPtQ;#}j+FtYeQ|f!HI; zybLDxi1qA=#2ovE$S)dX%n{ez4?5<+{Ct#Sjva{GrN5I*%n{2DM}xOjBP`-mIfRSaFpq&!ykhlrWxpA>O>FIO!P3#LJ!ky z&xQSVoJ*Dtt$G^HqH?q!!P$geS8IOi)&CW7#%`QHeY{xnvo?7Pw2+-R1f< zGHtF`W-QlQeCWUYdGvr~yvp7Oynd#w>6N?D_oT)#*_Q>s<4t=}N9Cigr)zAPzMJ-K zXstny{w3gwofqc$y))){D$ysqdR(Uas+=OX=w}N}yf>%XWB!-oZ0z^gh(bp?6h#YNLO1OE37RO*k945j}I+d-PY-6rvxq z7;l{SW*XZGdaj1gsP`*hg1KCbIbDRgU5GhefS#;3d%WfIp-*?Th?@R&w1EuRfD^yZ z#OJc``D}b|ENoyL`q(;9SFhN>$6*6EaL$DdY!o(t`8j&6razNm1K=oO1CwF! z$dc2PJuZU1%zNl}?^3PhBy95J z4aVPLYke3m+68sAs$KYJxcoJaLf-~p({x9;Z8S*Rz?{)GPRzL3L;E;>g<>C=+er3t z4A05AllCzKw$Xj{&7Kn2MK|A}!P!h&ldls4dQ#@z$0mkXMs7anIRta z!4|>IZlr&79JWTRQt=2{Xp=2A2Q-}putn+n1#R&R`|97NCu}S}4(<|@UGdqZFjERU#hwYiZS^K`< zsyQZiOl5x$*xwuW!j9=*$ZzxE9?K_Y;T*qt{%=jnbU%T7Z~&te8|(reNqjnHoWV%@ zWV(WQ;(HZ)n-&5gh$VpckM)0<$kXR*G;m@t2g{fTuHdd{p^!6@_j zc)Sla>UZPJ&*K=wMyxI3(`zC4B(D1(j3Y4C$>38p@o5b3DFZg+gzaR)hO%Hw*q>Gd zqkv<5cz)K&_F&$-veP~1U=Q!Y9yW1*f_<>oO0|cD!X65AY{KuG1N##(pZ(9#3mnfY zwaTaQ_dc8pa29i0gR#v9Ht{>c9zH(bBkTcoSC73)*aht2vlFvB>S+(K58WPgZNL7# zpgo+!yt#o%s!jA_?{OJ+!99z1;meutnf`^eHfb04z%IU`+68PzVv}kY5|d~X#3cA- zhfZES-;-IE*|rFMQ|cx;Q4@w-_qfP4P-#405#PTCm?Z28*b}fP*rTu~*jRyKPt??j zF@-%zO0}^2==uQn6vTI5E$C26N6mmuHV{c8K$W4zc$R{jqVdVdhg2+F)|zWr@zB|zjp_HI8FF)$MKv$xIznJwGs}Ei_YE%pCc9IdF^zM44(*1oQR67^?L@9`yS>o;4b(@nj_uVb zwK}h%R%fas$F~7{ka$1X*cyDdQ`#8(MTzHfoL&vYc(u3fMLy|2U>B!hOSRJ8`iyJY zU~eL3FzgMsZ?ZQfZm_a9dA}cq+MS$FOS_B1UrNyKy4(&AV>iZZ(qE5cYseFCw90qt zv0KpQ&SIX=8h$%%Z3*&?JUiU?jp1TKg(rBOTtB1bp4SP;UK5TG~E#K#b-TGf$-+CQvaEXCi-I(W+i8=1>AI$SO zAI)jw-o)A^o)IST3HyeqS-Km(>>S|JN`X(g0-sFxhYhV0wM#rdxkzA@v>{RduhcI! zp?-;dUj7JM-hyX)%a6TM8N*8WNj~6G{l|uiap^vCVK*Q5nD{(7k)VKU)a+a53x=B z9helbEnpyR>w3kuwySmZz%`zg)Pwn&O3na2{_pGs9XYVA0eg<`p;w=2{RHf42J#U+ z-`e}ad{5ED0(UpozVp$7wwnZoO%^uAGnRN>_-fb+`;N|W3AC(CEFvZo0Alj-}(^G}LR zUIP9p1hYc;r{J@R{V^&2DR7N2{t5P$LjEampF!DMb?n?S^ES-wF}JMd_TXC`4E3${ zf{Wcg3A(2Xb6T8leeJ34u)H2~%elxcEBFJ8i&OFT)k=NTK@_O0eF`3tER`Pnyro3LRMP4u0eKsVow;SJ)c|DysSMqwu_fk(H z>jm_jZ=RwdC=om+doHRvC-UY^M$TmicubLxy8xY*lH)sp9Ki7ea(q80XPKjO(b`!)}B&0plq23WjclIloNg{G3-j7BTO) z6C8XX?}wZV^M0QLH|~R-6_Ojj_GV8r@_x(}H;a59@_<4EH`dLZ-w zIr9SgL#sTZy#BMHwyoq5!)-e?c^}obO?gCP-wm^8*q3R~WPTlU9@JX}?fOi}p6f;q zkvSjJ_Y>H2uM;_+$EGl!XugJd&s_}rwaWQ4AeWdIBj*EqZDFoQ&->&ekC^A%i8Z|k z`5fvTO!tK?QP*O&rxFDhb^XI}A7y2{Mj@yBSCVL3O#Mm~>o+Py4Z}*t5l}a1_`=PSo{p{h92JImVoqqrGNC<$Mt+^WMe2Hb=^tk3`U-Hr2L<;Fy`a>_>%O6~-{W`90nHb2F(1Am z&+7{MhJO6L*U+H~?I`d{=;EOdxC?tdzfTRKu7!V0XyNt$3u@svU>}XBg{Q9Nv7)wP zGpdEw`D#@Q4@{66gitN~ap+E^mZ}kcfU1S3X1V(@L;sFF|9b58)Fz8}6?yF(u?Oh7 zFWn!cZ&Qc9y=wz{3Y@nFJq7bWyY0H(I2$?gH#pCVPp6Jw_`bIo^Rx)_wGi{R0P}Y< zw0-lT?VGo90Cf%VYTxDj3jZJeoYcN!j(3SUR(v{rjtfmbUGN#Hd#C?zo@44~dVUww zz56DH>)!P_Rdnyz--Is*A46#1vDfOlckFqF_FeyuZU<8Po&^kysC}m`7`eyehW7mj zL!f<66^5~%E*>3yM{i^u{q+%a^kUAU*y^kH;5mzX(KmuIPb%N*)d=Si1oZg!HTO~@ zC$;!)=pw(VU?Afi)$WqozvIwmP_N+k$v$2Jn&RIji`nj&C?V#!(v-2S1|51xKDp^HBHvbId>cuc*3b&VO?#H{y_b zR;hU=HY#xe@K$QU2B>)kHrC-goPX9x2)(@#$KDCOJ?ESF*TZ@;*cbJF`sags`(Dgj z$9VJHcZSUUv45Hu)Y@b2PaBfbn6dv9dR!jDc^Zef|8u{Vc4&9}=WKb- ztmb%bcITMK;=;dRZDcS;7yXWmJ}ivW9XXG!6z>zE2aDbDzgFH2yI*AoXMpFcf$c8= z{yiRof8rfP{F0Ae`=Rkm9DGk4wjYHX+%?J#hG_ngi%%IhNS)Nk;Rbi5h#M5w3FQVG zhL{^%8G#$**{MUt4ZZ~~J6>+k3UkVEj%p&DAbW%eP7pS1=5S(j+u-cPys%%P$_aKz zPLQ@dI8IPpzcpm<4{*tdJ!v&tyxxnE2g<)ZAE|5LJ$OXQobAf?trP6k{D1Klm^RY?|&Y)~Jc`i_|Lmuv2 zAT>r8lM56YgAv08>T{f8F0f$;xxfv;zKC2PZDE*jfzhxlUJfuDb)1oUX+-4!qu6M& z9AK>XNtFBRKOu>(GO7IY*$H+o08>lM!gm;XaY_VhoN zXSF_B*jv5>UiE%$?Hzwk*REg;bE;fx=|W#4hc+(zn4@C)axE=;`Ph!4<&U86 z!WG)7<$J%=I#qM{9J|)F)@xbr=}&5X-(55HryFg#eJ$&+_c+tFmMs@P;n6fl$Ce-6 z;Ng85(zQ4)ankkiETbUK{v&pqncw0+%Q_o?qW z$L92PA)my4Vi=1Sj77_Z)m6x)zPg<`)S|zhw`pLVc1Vt2H?)$DT$d-q>GG#NJI|B# zKVAN%HtprgZ@T)HUyvYo1O=rs(&v z7dd6U2U?W}&nNP|Lov^DFwbtxGoKsx`#Lpz!#qzl=J~25=6Kuxo7fz4?v{nj?X8cF#k_~l@savAHHcHUA&x%{ z9-zii>|27|*p>L~fq({+J#TXYT0?$j1A3S}>?rD(h4Th`r{R1-{1l%@9YEIx)U#wR zol>5;Y>GB>`4l^Tmf_!J70afC*Zt|&Jq~T3THmq>{TX|q?}nc1R26s$JzkIJjFd3G zX3#O-CBc|M*x$8D*xx^WVT9UW#d2YL!UktveEU229qrFn9K##<|9Clu%gz3teo)6h z^&MmJ3llDl{T)m>{z3aNobd0XXn&_4NZkJJ|NJGvzoECki;RD<&H>Kc^!OcNHo0!n zq0^2G^imjunh_g%rEBd!MNZ`aFw%*fiW9!dc1K~~v7I|6u21TFXxK&i+Se z-rwFgr{!(*WcaP^!5@#$K#r$+Qp zg7#wgN&nlP-Lcp<#(nkpboa?L))o}Hr{Zjx8}>D~_G&n{W?XLHzBSq_H^Qg+3Ap+F zYfAdwFSECFXxZ-9R{Sr|lcksf; zhqVmf2YVlH{Ws*r_I2Ux+Vyih1KCCXPF%YZ>!cO+h&%&vA$9%m-Fi_=(S#c3GZ~tn zV|gC${SbO=pTrn^sZ8(1JRN6^G&R)sU59+q3jB2Bz{gxy)0d8)^zUpqBS)TJ!}D=i z8=(4j@?JcLcEFzNTLb^@OoraCq8ZOa?$^0Sf1Z4;>8ZVATr0hJj>?A~5cdl8A$fiJ zj~~EC&A>CWW^lXX3w@#c&l>o(^dm*CYTYE{s-mB>T7MTXVZRM$t=iy^+u@I+hV!m8 z_~YsD$KjK&v*CP+wD#OPU7j5H;`#qJ&$BIEd-+D>t!6RL0Y7{pey+kf3T=3=z`4U` z=lS^#8plbTCse4^d+Yu%?RE_Q$oCO>%)grNFN4qB$M2$+MSfTB8-WUFiQE@s0_N5D%xljL?EY8S%3#;AJfC>C)jCE^W2ALFE@T~3gDKZC z*YRGgW3FMYWv=75jCK6BvX0-zIz9$_<@!AeyZJ80f@}J3v3^U9^?L;Cm)hb&tY3Y- ze7E(mmhO8IYh|%5-8}|t_0^3w&j#dB>APg?Ti?3B-|qR!H?_WBYmSy3Si9&?f9TLp zwO3~0oRKnZyn8R!dL`B^&o(-|+2Lulq23?s_HkQ*`&V3xPSmNv*0*W;`kgzocFMxi z+9@BPR_;S+D7lu);G?Q*`QI@Y1Iz9HuCLhr-FOG5jo4Dqhci<;y72Fr-w0pBa~zIS zF05hVV4q`VJJ-%lSi4?~$!uc{SDt?Koed2u`u5thI#%Fkr7g?-0%F7$CuJqHhS{TI zD(Y;whSM>Yq3gF3Ygb>dlNB4+>zIdP%|80M*lU(CbOGXLC)R8s>^|?{JkOdj`ntWz zShtJt(}`XQo3U=$yLCPAu@dWcI)1v)&tx*rS5nuo#Ji(df5ano{gz@KHWOd3neVT} z&r*DUDb}$EX9Tg=J$w4vCuF+UQ)7uaIIXO2)_m*ZG&$il&Dt}*&-4C)-PG3(c1@4& z%@=QPp8NIibv?Q_SD{A#*Y{FevhK67*Lebav#3cGdu+0M^Q>XFHyRa#r+-67{FQe`oA9o8aCo&OJ`a z-hBOKu{TFt(;<7ac}~(%o_vWh6y?Hcl zHpk8N|6lkT{wVLw|8z&}HG9eS=1*SQy*X4H4*uG_52FSETy=2Ih5o+_TK`V;_&xHp z<~f?5?&+)sPmMf#1JBh2FQaS1mp#{|>0Fi%8h&!DYp@HbbS*AUA zIl#51folcdS}nB?wsza+T^{Nlq~4(pJZmoW4%9dBJT@me9h=Ml4EihdhUgo%*z<(G z;p6yhcLaR{KLZ~BPG@6o+k4<{sck60=e_uRB{*`P>9*|Qx_(vTKz)PAF$XjbxeDJU zH4c*l8V7Ph??KzIf;EtNlHNI2gdzE!6GZcFz537N@B=<_Y{UdCa_9|-`P_q!Q*9nTf zPQ=*Nz+UIVUTZX)uWawy)=Hbr{ncMx?~!#2jj+?{s9B+n%9@2r)HT!?cDfdJYSXeh zYRI9YpH*21hsqk34;4F|20NV#JDm$V73bg?cDf?OPP@Mvu+v>?Y!l5+zx-((E2-6q zloudYIzI(F{X@`B^I(@tVXMp!NG%RI=$YW?pvBoJbJ4Z1hc{?z=z-CWpX3`CKT_0Y z!@1wfCl$HLFVcRx!SnMh)GXAPcws*`p!eTrK9k?p2X5F8elZsuWf8_+>T*1|R;eS? zO}kw?#&tpJb3$!5$YtvNwvOXIYW`*w`^$m*-h?yX$^B}0-ZggD1={R3%s~^bO`EO4 z-_Hi@H9Q9)Y}Uv@SlH|?6Mvy&?5+S;!5XVM+J)|;ww7-6fa^rBudWH2=g8A(5wUik zfwdj+Vy$ENVeN4Xti2%#8-D3i@!4=U)|CzUEjw~s0&i`u3&h)e18)&$2)xY&-aZQ( zE&x}q>dBbjqn?a->m&ybo9;xePSum~GsN2ikL9=R2Hs{?EuHf3ux9{WZYI7%%dr~JM`vvUYOA0eB&EWok3i+YP{1u6*UgqMEym6JI_`?pnK;M%!m0}JOiM>X5bi~dLxG*9vww&EGq#(0L{nv{Er`Tk2EUakA&IP0X=xJMWI zHu3$akJ9;&#A`#q<88EvF*m9)W~4PJeRRp^aSc*$N{yOahf;6)ZLCACLD}1*3+qs> z!7G$CNMF4XJPG@FG-`I=+`a3dMYFqGzIDAv_VHlDySh!Bvs2{DhOI8gcuJ0sd`hV;uVc|8d2MT; zQ(ZnO&rKeM_AUH#WnK0g>yp>o3g0^P8tWg+Yg=kugW5}Zo%8tK9($1whgaR{HZ%6P&X{Di*?Fpb0x&fNc~(WFC*%u1TTaB*DZ4#MLrkC zVVl9ttic#XyKabxh%L}bB?o)+z+GN~^>-6`RXpPF4YLy*`BXYQ0zH;fx0}ClqO~9N-!kd*$TzsW?}Qd*$t>y^`1BSt{u5j`b+lCO^w{ zS=U(5_Brg8(-E^jj5W%2DEF#-XjzwGuLBs%OI~W@MKbf;GEvG7rFcJ z90TC^_hKF9pU2*hdvfo*3D{G7-r)Jg-nlw#@6_--Yt`pLEoGUrUe7ZawQS*gr}wAH9xe)Wa0P}n^=6gQoeI9x^RU@Y`ccpIz@(R`ZnK%|4 z?dbj)=l=P2ed23!@sh;WWWHRJ*h5(pEd5u>ea$x3WQkalc|sffAl9VhZ1*X9=oUw@ z*hBvY>%(dIv?8Xmm0%AoY3qVcdHJN0z#h6Cdzk9m_J-`Cq1RadSV`MH;~MH7+Jo=$ z-dQ-$g=@3|>!lF;=+g~_ZRZ@t9p@b-9h>og|3f|(_D?=nt+9W;fio(mr5k&g#rh4d z7rA!Ld#KK}6}nd=zwsXS&|dsuy>k=R?-^WM?s03lW|h5EykE$(m~t%2y4{cKkWZ1% zLW~N{vFxjVB(QD|;PV>ZiEDQ$*6!~v;kx~(?y3K|BJuTm_u>R%Pq}u7!G3z1+!IBv zCh~r|LD^4Ddnx!AtG!hA0jQcO`NWH~m*ROW_tKD@yjkJG;uG!!k8G`*Y7X*3sxA2zr2W>=1>cUvnv^kWr5FJY{O?!qrdpk!Zze)PAz_ML+fJL@nYEV)AlUr zVG2=8UeIyf#6rPGzG2w3X3HOxMy!;&8(b?B*I9)a54917RxID762jlC^Iyfl=IJr8nOdAY zc@4zl)XeI-u?H31SafYJ_kqI?&+L0I1e2*3n~7NcSwk<@1)E*^@Pi^wr!Dhd|2Ghu z$FI*FjQz&KX4>%&FEgze?KU*````B@ZnqELoCG#2TCr4P^P8~Uc(Ivwtl~3$4l_Q- zuOX9oOl~U@CQB_DZF&uOEo#YV(?Pv?V*1^|*mNX+d8A_Uf+TEv|NM)G$$u&FG=jgJ z6$6t4c#QZ?#bc?-p&mFGS4s_lnR~g&cpO&WvpuA~Cz3z$QNiQZF9+~A?)sj|wAn;` z<=A+JreGnRDj6mbJc&IrHs; z3&{)PLhgc&g?>o|?+agB*PA^U%%3w~t?+ibhBywr8MHZ4Zw7r?KyOCh`==(o*@Iy@ zb6sy1{Tagdnozyj!@{Shj-?jdY>edOWVbP(ME{iS^f19sl|(u5)?v44s)=e^O&+&W}%SXFoi|s(~6(bAG%~y^8gdb`LH)0>jQNiT7j=Lf)0&ZQ7`UKE>%OHJZf)})u3^2mHGKHMT52vYj-d@ z(PY=HjAK4Z`)BU5#P`pQbFqFC(Vi(fZMlXo#UA=;x%Y{s^_c2Cb*0>!u&yrVo_aEYJ(X+LYG0iqpFPsPYO$7s z>)2Y?P2O%u_ST4NS>j1h8>jBAHf3+!hP9l6b-XLI4g-9Psn&lqKGplG%&rZr=eYY- z{L~e{f2`I(#kse}UH_CweEKQ5AI8{Q-;A-h2KH5OHDU27)?%o>WytqcJVz>H)ad(a zQu>yp*5XHLPyJ#|;%o4CbK+lvN$jT^rPjqNM!i?=Nf&uPeKUdmH0oZ8HDej0TCCq8 z-%ADOo4PnP@?IKy?Z(s38$PmmJ);lsS@fG{4bK^e1LtjM?dH92%LAoU_rn#4gU+*f~x=&zk%b`x|G4?p#%YP}x$ zPpCs?Z~iM#i+pPNq?TILBUg>rdz`U1KkIV#KKcPnX z@Fs_6F?#e@qDQ~%&;KoO_^+Wq{~q>0WBxOwX7V`3YNyhl|0>jcyoj2~7f~}wE&g`9 zvtxk$`5W*D@8SfHq4(&|@94t6Z~tq@Ch)jopJa?T)!{g`7w4yO?0OwHi2nRHqA$M} zV{;S6#fh^W$jj6|QPa0*^@Dxv%P(Rri*q%OBMwyKEnXkoOUr=0z}JP^#Waj#9qt|I z`y1F#^V+8&_Bx8(+c9uT8Z|rXzR(BllgY10w8!t?|1aX41A3@*)R2uqEl~z)ikzq| z%0!J(7HY_{agLq6#g95vC*t_-a^$4J$K`+ncS0Z4{j}Xx!GrlJSuz4N`~cf8PZ*OBug4>u0?^FH=eFFxB0 z9KhyNeZgO{1^=rHZEX_g< zX(c#b+Ww8O{XM9K+kr7~UYO_i=FIg}KAP8da_T%!^|(y;am4+V7y~crOuQHqZ-#y@ z3;mleM^U>Sw#TzsXgi_LT!VUaAMBl)v@bdG+~C~bd3a);yBlZMJ8=(hxyygkIH$V@ z*P6uV{lAaC=`(^dMxU39WBSuYgFLrmB7=FYkr;!D)6*?C!w<^hV}r* ziJ|)EnqIjZd5s?6AFs0$IQ*u)D1f2Y0z(DX1uzuYkF!`#e%$3JhJFkE69X8Eu?S*l z5ymGcfT72L^TePN*h`3|^4a;^;BHRsFtBtL>Vdz68sKL1aO?)wKZtwjm|E24#XWUQ zEfQynEp+7L9HBxt|G)I1?%CUcvFm}YWp5q6bsBKi;nMC~w(-^5wLdJq&-U(-+q8F= z-DaCta9>k>_iXLmrMETJZ=X%P-3+`faAdPzBhL83^=7#It1%{1Z8^S^J?PJf+U#Bf zdmG^UaNLN!{|f9~KdwYz@9!SVZqswO7Wj+(NyT6F-G3Y4cW;5cE`i-HhW##r9WMm_ zE&%@CjN0Oa@OPt$C$jcsmHr*pzU;sf{meM}va%Pi^keDQX5&vhzU(o>m!%&&0ey|G%Hhlr>kvatFzcQa^>A%8{mHulc&X}q<&X`)1#2HhnFU#|#=*wPA{5^RS@OK^Y zHv{CXaxxz35f=G=0= z@MZ7Bm~Jxq6W0QRn(3RuKW9Ev=8;d{;PO}D9IDg67`5KIJH(GYjeFJ+gMsB=$NjeB z_v~FPt_l2=*X%(bsr{(i=6aUr&&imXI3J3`QynF~Yp_nlb%?t^sPBi?wJ72+C9!=z*%5W0B3?^I6<7X zoDMvN?)9hWMb!-~XFPucV)^sHl}`KN+0Ee7Z0j3W2K>v7`)*hL%N4-k(gv^aF-sdf zv(YC57+383HpX=Y#zg3N*3|T`sYhLmqrjbx=hl5k)#ul-Rr-yk@R^C1y}*0;tcU&+ zc-D)3p&QpDUVaF?0Q9wP^^48fR(X*(*FB@0W7rv zQ|;L69pJ6BmdvLfzXP1A*$=I@?gdO5;gyRh%FNBJ?#MQ1fQRh$VigYyNS zQ*x@2{Lg=cp6e~l+s=JyE&l{A^~3T>Ej!VB@y`yOQ@!FyMhnhJZkY&Qlzi%%OoLB# zXf2H6sMS0y{Lq%bS*K&hxcskc=^b-yS-u6ZlYc;exhKG-ZbiSxZRq#Nb55V%cfDuN zZoSvza-2`hdyaX0YTsgTsFk+y0Uq^n?5Xmc(T|j>BFk&yo=5oEknT zxtf6*yZ`8NyWeB#(LbqW;PZ}6@%w#WB8O>;yUWlciaHZ$rvR`|*ug|8IX0QPLPm-xI``{nQ*;`Qs6;CUMGJbUonTgU4BQ9iEHv^&jEC zJqh3LkHA&QPo6dSN&PeQ-!k225&!WT*T8>kw?l_$@RPF;TWsvz_BmxO_ZtRZc@m#ph|fmiESWRz%|Z+foNET2-7PrFV&BPL^aX_0 zX)2z>4O{1!IJ?-}o;-As!L1htxD)x@nRv#v_1@Xr5nq?472M}~w-&LwXZHI6T#C(A zM#80yVZ8~$ajDtVt6&tD7DC&dEH3@~T>_U5#>WSKsc(HqKZJO2Y0=F1d?RvzA_gT7 z_Gg_3R5?I%En_$)5!1qO3H#?6@Ln>X5)GHmU_TRi6g)=|mkNFBathIflfsm%;g&unagad0_Q@iDySb z@T?PfmI(JZ)Oa>ft)IP>ZQ*B)#s2XY>Z4V@?`@nZ|6BN4`kC@+EeGLi{TF<#x8e7& zzT!CiE!EFj8sca1%!dK^Sts_kv<_%ve6R0)qP4*`#{KH|a2^+OcUQv(UxS~u7e3Z+ zw6UU>?{e)5_dfVrhhMqD!}~0@O>ol>W1e~k`ulD%&XWHDZ0L3S*p30@svEHHAlGj4 zu}(O~v|pq6SXU!geF8q#3GjT}W2BGuMu?B~zdtwhST^|vU|Ae|T7XMSgO3HiO88pf zw^*kUnNyqGegil-aB2r7r-pd&&Zn{daxovxh#%;0QAZNs*1)qc=HqA1!Vh>s>1TL= zpI7{@vv>}X;{fJ}Yr*g8q~8V3rhHsZ+!!}NUnxa=TJIOb`838&ya)L-FZ?kX16uKE z-Wfx{rwJcO*TU=if`Wh#G;KoTN>@eglv2mk{!(anO4m*8*TAQh8GKqP_%tnqPb-bU zr}Y3&IY$xsH2R#F->bl*agI#BCgxh=`e6CAQS8zVU25!l;Il)IT_>)O&kqyW^|a07 z?1m2pUnqcC&{C0~GW%gVW`UnNDETR3)^)%m>MH6?m{o|rtA%Z6XXrc?Kl8$*VE>;` zyb@S-m)#?-A+V~@S3My&0h}uIt%UC+zK?j&3tvcZ`88>tlUEx&6}c&3q{sm<9{`@} zI`BOr4;#Wy&4Rzvi*+{upKK>IyI%NY5}U|R6(iRv`Kb=__AgX>^3j*58-AP<{?bgu zZ~gGWG{@oDU0IR&sqeubaW$M3{FJL<_w3Dx+Zvg}fWIf+C4f%_MvSI&QS{^H;(IbC z<9FHnwE(^tIjJMqJ9~hw9B0i|?7I~jcwX<=42;G71}8<#!8lztvDm#p^&^pY2;hwc zC-sv+>=&Dpibtn{IPhSsJSpP9gTZqStn;L!n!g}dm8j!ND@(j}*syyj1R<{HF*mt6A)ZFMr5KlMFQt@#_p(o3(O9=PM9gQd4# z8=s$l>A>xG=JjRQ>FQo$8p^S5kpnm=IgAT)o0cTa@(yO_T0 zsOE3k6gA%(OY`^qr^7M#5^DY~nlJm|U&Y5!4np&{OY)Poz~5#g#+Uj$o~<&f`5R)* zAFys%X#TkWfSVi+nm_otG3x}u87MXR)||m8&L)Pl3qK`r)>IpK32}B~e7Wo|Fqb{3 zI>D#^GCn^yp!p-G`Uh~T;1vsG9?0NS&2@qWqHh;C)s2!?1fK#6#Btn)u0bcbh zj-dVz9J0!*_LYb0{`|Pb^8WXMIm4N1NC-*PZlnC7)>!MMo=fuAg)BNSSH$(eJj+JNs_92JP@i*!Iz_A8& zf5gm;`5x;2bPbWB{Zn|>q0s$>VWfrb&y10y`kKM$YizfRyiS~$B=j{VOo~Te^S2+5 zkGmX%?r&p)x34{ zzUkPThDwk8{;flgOH2MDKL035-CqEc643ow;!=uqf1+Q)MbrHWTq5r`Xx*RQ=K(%T zs&#*h;Fp$y|0@IkC-r~RqH%yi_XiGeFuFhBSPb1?;C+W!_m>hp8`b^A*8Oe2#j&XZ z^-=M~eZt2|xVEC;n)o=i!RY?3Q)+2Pb$^#Y_a}UyVW9gXwqIJ^-$s-6#@ZK)SsONr zU9qw2p(R6)UEUe-`C&SC**!VX{l!z0S_!<8aUt>QWz+-ZLGxE(Xj}ArfX-7bMgK4U zz6aVem8X)|=w=Q<$<3aIo?q})N-c&4ekc+B-wZq-<3#qaV&9ZHJ{NR=E1-${UOezkhK(Ra3;l*s0TFn z0i4tF_FG?j>Q7Ufp87BL&yWJu9f0G}WsAll#>A=K>#Fh%{jq-D8K36>xcRl{-z0H|J)Fe%Fh8|VMC(zK zYwA(bJHE($H|oAtgIDiCJ?b9xg0!I~h&9j`R)?SC*MEPN2Xi6%;7!IHXClACv0Zp} zVD}DS-NnS1fdzVRCf5GR-b@l-WN#*kF~k@48;yi9Z`&UH@i7Bmj$%ED9!*$(?9o)J zU`zBKO@%RgG!awSqlvf@y+>0)pgw*^g!=do!EIXA#|yp2|Igl=$5&b2`QK*&IZ0U5 z>`M(wRRXonj0MRl4^1^se=+3@vC z4t4WPPat%Q@-=Q78+iT-`ZBBkkJT?*&@a=-)39}xA40zrk%Q4foxCh`jKN^&nAgE# z6~O{}G#8j2&HBt8XBR|m?xgt~I>*gJD`df-iJ zg_ycX<>BJkL&*7X^pSAI9`LO8;xpT<1LRs(qu%rkzr z(N8;1GUnNNESRS%C4c`&G0zt!%MKQNv-{2GY7^yJF=*S4Bojs z1@ByeJ}Ll1E~JKxAMZrscqcCj@0kDZgLnLRum4({>cv;DzVZg|^z8#Eyt5U&^Ah;Q zyub`Jt-kwwuqKr`o}h+s~Ou z&gdfksJ5GVKDsDT6WHGe$<}O3#zV@3zY;vuF}^Um08AwOw!PcxpX5H>-FJe8z@DlJ zlj~t2v+k|ETYh|kbx#@#9UT^Gxvqa*a}u%8N!2@7TzF(yD9__rvSXb)&aK0Kg%1m< zt|VM*r{!A0Lh93LvCw2s|1@*|R}gDe`ORE|h4h&_eD&4L=g>#i*7fQUU?FjuDfGh> zHt7yuRzGKquuyIvEK~;;a?nevpH{}W%Fs>37Dc~G&7iMQZ)^|sl-xXZm+J}xJseut z^AvxqZu%vk`_Wi8cm2sgY0q+g_oIgw_V{(vlgmq~cjj_=&8GsK^dvD=!ghmUB#nO# zxv}?ykrsfDGQveB9;$qF;3p5ob=2UTO?pbYR(liyBmLk6>#4L|Xwf-}a~f8kh(R#Y zFR$%iZ=GPf&`H%*<1YA!U?h`=ejG89V%+0>6Vez-HtDpzsGt+uqp?BP&oa7Zb1EM@ zi5O|~F>X8lVGn?u2nN2#O zJ+ub#nH}FAS`W?)uY+@#^CX;Iiofhc*3-aWme5bemo#ua4eUh6q>FwvX=n`8f$TNn z-zxO^#TMfW%f(I<#vUzB5P`qtVKc%`wAtE;q=)1en-&ag$>ehpkE`v*#_IQt-NUoQ zP2^WIpE*jq(JK681KEwH!FyykI+JnC#x7Ll)nr}C&)@((r2MuV*>O}4q?@`x+o-!3 z!}nmuihW32vkH69kzgR@pSU*Zfo(#=@r7Y~PU{TvdAz=RCrxL3WyVKGXKXv~^wAla zgV>s_&QMK9>5Sd@%cVDltc&XFi!_)HQBBQb(IJUC&L>}o*j(HA7<>*z#cj(<9Lc>G70w`gL8lj+%rjlX<=x`usmN2LDO;EoBL zUFFP{fuq{Onf0#EybwEaVO{X7%;+6nT|?Poo2gl5b8aiCiDI#ce9;qwg$*;Q5!gF5 z;0&jiQS%t($>ztJJs)~&_{#d4Gh{a|>|-}pjpQKqaw-*l+!)zV~?@6i$kji_~`scQD&b{Ku>H*BEF#2cBsnBZ;{j(-+lNp-xY(5tI z%XcOo7yYv)nfII;`ezA#E9sHrrGFN2UrApqB92FT=-iqI2khq~{q!WCQ_o1n`uKIy zCSnE$(@B2cR)6~FH^bCN1JzK5MDg>b|Sb&MWlUQT_J_@m1=xqdM@3K0D(^yw6T* z4fylRO+Lji&-|A=??`w04ZfLF^b{2NRR> zHZ}@9Uo}b^v16?F+cAufD2F(jygsqQHTbmEcVQ*(weiC0yP({Jp==oG^LA>Sw>f`) z{LkCgbN)Eb+reR+H`xGm-d-N24M4U6-E+H%s~_zA#ccpbasGaPlFy&b8=TtDeWzY0 zo4j5pmh-FE$-mL-`}xd@Gu;?;aq5+PMm^bICWcDAaF3*a$u7RroK?~FhHP{^r<*u*H}x&*E({;cn01hb=8dht#>i2JnsxswJhz2@YvRxs!J)~CH29?q4y}0i7RIM|uXDk}{y2Bd;bHn+ z+PiakX#F(_{(L=qbRBzjEqgYLy_-p&z!?sEY2rc$xj)w=apwN?mrH|ceV@RqUkt*hf?~J4J$@c zIc`h&bBA(g3%uOf{DwXJ`uJT!Jck~n|@ z)-Bl{cACcl5NC9f;{eh+;}nPk`0J}aIy&PQMW>I>IFdMkCTwlCUfS*A`NQayFswL$ zqtqcI7zc37Iwb9fJ90nV?N|2I852)Z9Keah9GeQ!AH%52aU^j7eR0RB;fFg_;{Y zqcd>q^PFSMkzZXfwEnTWC#hz~05M3}oF&@_KsM7xYFRx`4AOXPkyY3~WHU{TKibb{ zR-EbfR+YWf#viE$$YQ=D+o_E|QV-VQ*6%Q}N5tt=9)N77mp<0f2tm8vGGWwlk-}Shws0S-s#7dnyh#F+EUZL z|K!*s^+p`1|N8~gj6HRj=ln~Zspot+_SEm?56w-6>-z^&?6kHF{3Y>PU&hC8Y{7rz zzJF6g2s=j}**|2u}s`(5Av?Zfx|AI14Q7T^C!oWHd1 zaCpAMrI#K5v-i7P&fW<6{_!2|JXL&$r>gHCKcT@d#!txkJ(0eD&hO5leE;CposrS- z2VxgDq<#PAyhr`vi{Kfy-sOdX;ho?6FE!_P^YCKR4OZQGq`v=H&d}%g2z~$YzMm&M zcISln{^6hU{ohUuz(76al70X5F6rO*Z}vI)OrKcaf1k6B@4tiE#BJd~{d1q2;cU2d zx&t4I%b0@=?7g?}{f`;tyhH5OyHAaF_I~FKr}z3mS9u`t!5h?5_t&;nUE7V9RBSyE z&YH66&jV{W5s&we#N+)N@p!70t^7OU!2Vvjckhvh_Z@P7{(vuBb!>N&d$bqMzMpyo z?@-hGjB#$wY--Z3y(%y^>u~srDGS2ISqmmK6fLN(cx+*I;DZIPey6rPP!zjrb8T&9 zI5WEH+kwvVU?f(&y{7hO#H1G!)A5y`1-Aa*;Xv1-^+D%OQ`fer;f~&rqZ+p}0^_4g zM+ZJr79=Kpeb|YR5A_~(#P_i#n%^fl)4llc_r`trZvE>3W0DU)dP#U)5jp9pwMUkF z^{<eUa=) zw2An;BKGTwgWr5>a_`nv8T@>Tx@(W}rwUmrCpNF<)tXxDNoM_Re%r)gq@lvAYi-Aq zYD-#2zX8P=(I0Z{7VK3Es9n1&lpkwA#scj9Q1z^b6WcTJ`3@e{`964*=X>t}=UX-E z_K^pH4@e)> zP)D&mSRAWD7fj)umBi;t2iUmW3dYmOy(j;@jmup`to{~s0Qt3VcAx{^CSF+YR{iwj zfBw_=`_bI*kNxRK^X&icvxYwVEu7hp#Ql!n`V_j~Hz#rPIDSvM-#1&1I23+Q-|uIy zt^E^o#@Cz#48JDM`;QL8$M^YYF#J1b4t>Ut^ZjmW1K67Hr}X`f4ek`V-v>DVN%wnP zE`~b)A9em-ckxH_?7ur<=(9h<_j^Lu4T#VC@7W-ZJZ0qhyBSX^e?Pb4wsQun1u+nZ z-#D@yet&CO=z;WIe>7bF$%{UkIv_S~I67cNx%{bn*Za79`mRs-<4z}g{Xcvn zysn$r?Gw%C8+qq3^7%E^ZgVtz{)4f@Ip@dkuJ2<0wvUpHV?Cws`hINoC*`iM|J%>v zw_B5B*PEI9W(z)Wac$-P&MfIr^mbA6Y2?qQA><40P<9}|nf{PvnU@@L?y4AwtA)}8nD_$c2COql!L*w>2QqyA9u znSl@9WBv;RV`A5D{#I>_+ILHdL0E|2`ikwVY9IIJ{!8Zm@((U^W(3AGY-deW8}AUl zCC#52d7r6d{$0+|XS*j+ zV_<27nr;Q1d-%J0Gv6^ayw09W;~T@ySeh01UbYI==?bS9Uu5p@XF5N`_jtwP3TJV3LERJh`fW|3ZpLZXNVdo) zgVYnD7G31S32P%ezW(=LA0LUD`FgSVt`9rU%uO*Gt;k7#en)?N$3VWv(r}UKgR9tt zc4`KFH#Y8HYybRsZ4ogE&xMK_O2c`vU%&Er?PPqb ziV;~frefLo;i3lZ<8Id5#v#~u+qjYr`uu9#jmrN)2QJ4Kc?aL?rasXgdZSw(rasfe zm>wRp-S{vShg0$EMK?|iFaJ~{HHWq^F4arv!f(1TOkeJJAKYWCQ8I%1P`Xe$QTpI@ za+RbLV$={%)raGd&GG0#zg~QedSVgcmsCH@=tIuxK6Hg&A5Qe?!#vL9HuT}s=!0#6 zQL#r}ezf-6!BNpYt1ok`KHSUM{{UUMI>347Z148f4O<$FM5QaG2k%0MY(p1LK^OiK zUHC7dai*Vb6kXU~9g=zI3LATPKDzK}bm7zJLd9{X^M0J^0nk;pxGg`4x`r zIeEKXJ$U6^ioqzXQ%@A>LG@KXj2`O&!#&~k;gueI&8G+NK@YA}e1*}2Zr8rF6z=LKbV(2Q)@d4(6E^L^rzY}}dNX}j%XQMn^ z61)7lC*pcf_0L`mWkz)et-ucR4gP*Dddby;iFfp4vc6%=)jlJfrK~DI{Gh-2Txl(xYN%yhl>Yy zO*VRPr>)5@+;A%3!7}vDfZTT&c<|3M(1SBopE691!%@^a98Jx`GpKz?ZOZ#2&WGT8 z;lV=iV2SYHfdn302_C#M1rK%*AKMWcW%`l#hX)OI8UzoT-}ljj{@7gC=AEhsg$J9# zDZ*VHV6m0beK8jcN(Ww?-e6esLcJLK^r#rg>EUDg}Zrx_9>zjlV!xbN{;E(MseES5~ zPQEA{_;4cs|L~YZ&vRi%^*vuSCh+0m{E9B=(kqFt>p*wPrzZS(TbGT4o6Wvm&;DJ< zK3>ay&Z1`JOo#n7H7kdVg9ShS3Gy@?KJAyg{`hnnKc05@@u>9X+6zxly}5tP@bF_1 zdb7}nAFlyFPOdJgTj1%<5)VJ7>P{2KT%p*Mve-B@s|Q(Dn8{p-yHhO=BYRd23G z2QEQxejeOs@FLjp8)5u?aqM_aeF@mnj&D8s=6mQ*^FHpt!{EFo^ycIE1Ow>J`}lc& zKN!;5G_1~y^Jw&Cn9mFA?qME&4*jFh!k(~CPoC%L$%*L6C;43nJ=qy9uD3ndr5nYq zr7tIw^Y!Re_lA3r0FGmX6wKkA6!Ot^O% zXHrK$ZKpil?371^Gdn(h%IeLX$HLDmM~^tp-0bJ)ry$NW<4eMs=*(lrndr+?4`;r# ztozO&Ts;J555wI@VRISnd^j_>He;RP;yaVLxT!aP27Tv8(|3N9@o!H~swYv@xxWs- z_P0hkd$D=!`@ZYfHa_jS37@w0>ni_TzFq6P*+UQ51HQiV?~|u3KdbTKkdJMA+LIDK z?d8^o1AZUOr!AlE`S^4P@@X%{_p5$q)~9WKy49W^=MnrnuZFUY-lsh`;nTJ`_wr-K z!egfV3SZT?@l_pUuA7+aJIr%8bNnQ>dF9yu4Ziyin4j!8drf|Op#EPxpY{cTtgL?x zjGz0jWB*stzp`$-@M-_+%gU=iBX+^&zpZ^ekQuG?^6K-+%e)64)n+K7YvM)61v*Th{0d*QdSN+Ayc`?~ZV! z!JOl+F>51l-1GO5;Qjv`c_6=T1H4f=_^P8XKX(&zY`nnpbMyDz*vgzq;dPz#a8n&M z+4Ore10TGKkDENpH=9_8E4>_i**nX=RnhqrajZw;@76PU)(1g+-G2Xvc%Jgk_hSze z7u>L7K_@n)BwcwMeiZoydM8w@?Oky;zU{vKl$$2~tMO|GMxiH%$g5XP$2Q_ZR9~)$ zeA^sibqjX~*5=(c%TeE*4a%{l$6OUTw(4zRb8OXsBe{p?T}B0 zY=77_vEOy=qrRi%=3nEP8~C04MdmZuHuV{;VQRnHf%5PbAG-=4Pa8fZ<;Fe%$6b(} z7gg?k6Yq-P=PYEOvNewXdBFT%8E0lFSA3`(S_ZvVJ!k`? z10Oztd{ZAHdL=(g>5U7&p0C`2*Z%qYwY{Od*vqeczqSQ#UW$)jak}_+-+C08o5>&I zE8?;!8EH{3xw!Mjdf7_RXzutRe;Mcd`7@Sq`<>g+#z~$5+ zFnUwH%CfL2qALg2m&Hb3M!C18FP{oU8l*FiMqj>l3h7Hz$D6nye@sv+o>ZLVL9fSP zAhPHj;Y!6`dB$?^;cL8mHhyTGyV78OZ1*cGYZpDduqUZUV+cPs7*jnOg&j8vJA!>p%;n!E z*AKV;q0~BFiC)IXj1~Ljgtbw7FojF|`iS@C(rMl2#)Y3Ix^K-$ap?ajHM-B7Y039V z=Fpq{_QoUO&=&`W)_t-&`E{RaRvF#5I9d12=|}gK^rQP);o|-4J~yY`ulw$XLl3R{ z`i~E)CiYRiq>PS3=XK-b?m_>FL$5o})pgQw@)cXZ@j||{h}>A|x|J!q?n!!8t_&4N zgEa&9wOq91u<2*HXvxp6vD`TW`6z=Y2OdIKF1f#FG0zx{?lm?Q?uZb7tS+>^@`>pG zL|@vIuLtF`PTLeuJDcK&>BDvBoF4ix_J3v$X;buLL2FYi8J114#Ml(cpNZ%Br((g$ z!b)Jl<@_873!2Y>1@)OD!Ggx72p05n=5L;zA3Y1q7y00Ux2muk4rV{Jw!spUQ~ozxf|KJ@ntk4^MCXCp?&{|8hMWq1A!MVjmnzA2whc zJX(D?f6~td%YihCp+LkI<8_68=&lWP2ouW&^Xbx1upk&fx>>q7O1#Hbgb2ZiW`!> z?_q3*as9`4)VEWyPy^NXAoj`2@f$K`=9u+g+WtqKoYO3(a3uBMy28^#55~HN)`Q0y zQ}`D6I2}`%98>6*#R#$ACo-n+%+&nne)w>N&m?;I_2t89{dijH$DJd^i*G0}`f=x} zz>71G<)h`rPj#Xn!>Sz-qSio|Jcd!^F_4pW(;4J3P(K1pvY&in>o2zc5fgKLL&9Hd zVy++T8*{B5nX26YH#Tc!x$z#Jnd&oEO!YqUfBTEOHvIW0;;vPzt2Y#hO|h|s_$F*@ zq52kP5O;kIaWY%+A3p|1R(#>a6$^VJEyQ5+`*W-4VYKGuo6h_9|C)DhMexhhi7gEM z`Zi(<_uLqY#}?A_&=*^%xI)Dj$_K6YE4HS;cx>f8i%0w83;zng3c52cr_!CB_)(S* z5s#gSFC41=&fwT`By?vw#^&^ivGL2|vF54t=A9lo^9vnAV@khIImwnd%=p4oeW}{< zsrvG?jxR*-Ejq#Rg=yWFj;;3R=#8ZAdpY;?(0%#sr?>8F#;$03VPZ>^Kc$dZmu}S& zBhFB^MH@%0nuo%73#q+hZHwxe>5rv;@{HJ0Eqv@~>NuOcz@2 zd?c}j|4DAH?JJP#Q#P@8r+aK6wZ%?kY$5t@=TP+;upRD9+YX25L2&x{Rz^(!eJcC( z(0}V%j!yp_Nxg>OqyKD8?o|D^(u=J&c0+1V^k+9TpE(M!99qvBj zt8>7Ci)14t#`-q(ILAJC`;piOu??o?9Vf?Hml(TX%okfYiCAm+NL&V0qjorY@$C`v zIgf;1Oxp!dE4yG^7LU~~*mc(Fp$|X(KS!nylk9@Sk1;g3+4@mVoL%rF#~7mTY<$g$ zh%rp-yRl=YwO=cfW8r(CgKZ!evSGa4JW>^_tjDO`^Vw$ACJF(0{;Fp@%NvF zzyECK!xf{}wviv+O5M8bzo~HY$PF*}(kv%q^0iLk#2F44{Q0SmRCI2n|DI~AZR2O~ z<-ppj0u^0NQv+SM^ZQ%*y?&>986om0n`Z{PD*1ggaR7~>!1IA%#V>+qe)62a#B#{M7TBiJ9PYQN5><_gqc9tCfZ3@c4TAEp|0g*RW|?pi6aqZf9S2 zQ=4y(#@#cuscw081C>@#b*GM{)hWa1moDF=H@i@GJW6f=+U2e<4}faY24vefVDI+Kdc(78X-evatTC z6*qV4J3Wm3UFM+ie?6QRt)}+gC+H3L!lZj0eYcpkX}nr7A;qzlNfpS>&7B>^6hKJXv{TZx|JG%_Wc(f zBz_s$)N_j1&+^elu}oyDJXjR#rXR{C#$ApKl|5Zko5|kJ8C_%gwv~;0z4odHGV6X5 z*EgRK*i?^v-iD3Od`-Ii2*pF?t7`@cxTI zr_JgebduFO&729HJIz7;_TE76=zY!nn=>5OKbN3?Odmk>&0J6aRE4sl(%n@L6qs|_ zGSk&Phma@f9@V^TNB8Usp?hAO?zC_ommw#whfXowgU%gF_Z&jE8vTO~m;U)t{M@5| z!iAmwb5@Q1IdCDhp6J;l{nJf99%6mhDAqR%+fXCx<8Xc>=#WPAxq84^oisa~+fc>2 z4XBSEI^5eSb3G|bz6N}?|r9C{>$egJ^>eEM0UU;A5Tu@N=)YN2s zl+Z;5=6eUITlKYYZggQd*XW}Se9!12?h)yt=Wo3ixuVW1y66D9=pn0%&{fh!iFHcW zMRq+>$JONNu(&QlW)ixn1Rd#)%jhF-466^g`bf{R`l#9HAXgu)9zq}edW7{+1bx(q zoTTcc+t4>xdHSXZeIxucRruz@(awOnh%+7I{0>JSEeaMjs2|I@(nVFn*F`bb@rC{{ z5YLi6O2t42lJwEo`Y-d|{xHxSYUws*`*ad|*J7cU&_VCN7;-w$Nq!vE%$;oX5$~2h zI+*9_BmV8zN5Vr!A8~%HK60dwIHy)0wOM_H3}4Khx<;C z{j&xA(}C_;Dh%|Xi-B5L@5WGegEN@UdC0{;RbZf3(0kH9RXz;l*E@a;r1g%VbDn(W zKF44nbWIgHXJGu}p6$EC+0oumcI*a&f9`Xhf99Sfo%7s^TRJYErc_PS5!EE_`SB;sz59Rsz%xpL4s zxp1yCeHdsUeWN>jDEt#cf6Y$8Kre%VHn5J?o*~XYQhWz(4KbGtIs72iB_@ z927%_KMfAL(uaj!7XRXz(ml)gyZ@cT(mm)*qkB9awuSro59pcBViym+1U?cEYegS* zfRmo{;~~z69}g`X01sU*KBm6N=o_nlc1Zt#kF5S_M{kRZ9YUsrhh9To{(#(;3J<}> z+R?khLzzB2bfWanfIQ6T*#r-}%+o))=pVzwq<`RQhtRFkK@aL2g}l>E{Dr zpH8qrH|zQWvh%jZK&*)$1GT`NHh4No`e?^s`pCmT=pciEs_5%&`Iui9S#H(lVIZx4 zl0Mqlk3I_D;^Si$1Lf9Rog_}SWzv1la~Iz0>Llb>IH;9-r2{=HKK6)DCneU$>LYh; z>{{3{rRt-ntv*6VT>O)5xMdRl@!mbo(?@z%TpzK%#|Hz2=?xHB*7MgH@U={MTNeCn zEIe);cA)Xtfx>GoFU$V*3@0)fECgqd?4RW{a#mhJr_4rIT^@`yL}VMth(zm=o4fh{ zMWKX!r-F6#pL^+=uCRKvyL~nLv*$bmPZqz@b7aS99J9V?b}(?+oM0eF@5n2mj(<<@ z7Rlx7)Lxy*|4&A~Cr=Jsx+@g;;NXRU`31-(R6l-4 z?dQ4oBOj=TXuj!-w=e5D$J(vl^=usf6)K3ffQ8%ljdC=fpK-SCr;kGq*q|HU7sH-7 zJDlB674Dx)e8IDARB`6&S+@n?h$=JQ{JO^tkI=i-_d;@8i0rv%VKL8(>`C-D-7A~L z$2Q#P+6oHlJEvB3y~B69;Xd#3y)mq(_T;TlHXI^5b{FTTH*>x}y#RTqH|KZQZ#!rEZii=S%)$}7`RpE^wU@ER$H5xt-S6>Tz5DaS4%anvf1Q_+89j{rxbx19+q~2xr;+t69Ub_f1A9^zx~P>smN>4XVy*o$v65#>ugWfQenxgw>tWBS1EwnetQc{%2?&&(3(j`o~3qoE8o2(oE1Hc{iBUP z3$cYxL3VyDU2D#Y%udn6fd!uBB^E$sIUYh?OR z^1k?(8K=gxpFdhp`<&l|a-*g2VabHnIs?wGXIh#1f+thEIMWv~x7Rt-UqGf5Q(^3M z#qN9eO})->@w+>AYrl+m>**h#+aptQ?yaZkpCUf&uBFCQ&6r}$dsBPw{HkECvCs84 zCavKczHue$>)nVRpNBrb0lhvK{a%SZZjRG5Y4)<-SHqr7F0KDhi2iT;2Gh)YE{~xw~;LYIwzXbn-nIjv(|M2n9C>Q@v_RLs=yYd4Hj8&yUyF(F4dq2!;3L({*zC0@$y^|> zUTgA>FrVQ9MR6|h9QeJ|;{qL*;Mfqc0cK)gd?9yC&?!;tp@cxr5|HzVZU( z$hCzSJT5t6PhC0kFzzIAhHS>l*bQ$eh|5qD`1i$7ezcsr)3OswJ_>@x(bh?`oYHWF zeaWluYMJh|Gu|nT|8wC;Ly0FlmODuX-1QJwD2T7eYvKotr|DbQIT`RcD?fL_xy5Ue zF?fExm7SmRd&9wa|0=~b1PdDUEGsvk_vA)gU-;W{ec^A((1E*WI_raxSSMp&eTcn( zYL=6YjigxP1_wx%Z#U;rcU`VGb~ok$&a~6Ug^7!4>^sm$kDAX9JmwcT6FPJ1k#sQD zYh+!r*x#5B`o^4y85$YRkiIiU;LaGGzB2~AGgzDSoiV_j!P=znj6v=U)+c>uB<{IW z<<9sqIKE5qrs5fQxqgNcct$G6xF3$u3`QOqj!`@)$C&4F4D&u8$GG0(7#r|4h+{09 zjNQ9GjzK-Gw!2+lgN@Nryx-m87U0xv_{CQnPLUnw6kqdkim$^db{S6by}q2HsRB-s z2QHM21zZSM*b8>w2lm*HZQ11&u02sU_BL>%I8rAzRK={m2*yzVc=?ZISJt>D2eTWR zz&KO-iJx5H@rveWW*EQ!tzhsbzJqNUOqgf<4xcwReAniaz=f{ezR|;ByTOqzX16we z`6F^&TxhsO0vCRc*nuXnVtFvz#49y|ZG=PNo6!T{?`?34X2veetFcTLw_qLqgT9ak zAF@X7`oIz5Ya?zUKZLtBi{tq4S`&x$S759$_!y)k5`Kun`X7U(-{n2l2eHlL6&+w& zmsjLAXpSvle2cgI{s?i4)nG2`lTaTheLlvV#4DzP5u12^GxIQff_Ydz@fx2qe1gwf zKCuZtp;%wbCoc8)#Cq`Pl(=hpS`+@E?fik}r9 zl@BDlzAHS=aMIa4SJ-_wJj(qH_*niaxQ)vt;G@X!R$^WekUZ}FT2z6yb}%{^PRByw7(Nr zqx7ASawo7R={sS7JHg0H`c60@cftz!qsI`B6v1Def!{h4|8*9A?6Jfn5tB4CytdQ& zBMx}+NZ?u%k92V&9?A7bxbaA`wGR-Fl+erE3vN78vAI9{^+#~W3j3w{A+CT6=pFJ! zC?3iBA^yzc2$2h1j_}6$aX&=-y*3`{a5i}ZS;REH6?VK>B;RM2Z#BLM!x!>mt0%cW z2%Af0;!eIYqe^{IzyE5Ti9qokz|MIc3U)v`R>GRmj zBw-$xBjnfR8tjo8i{$cy#lm`f z7#sM_jaBl;BIVY%fqN8-B;0PXp5lu}50$s-L zYUZw(Gh&jw`M<;GtpCN1K{1hF)TrhVk4pkS?gd-L{S4qSjpaSQFS)oGyeUqgxFqFq z=vfxq`FViwlH~xhyULHCy<0~7i5V+Pqq`!3`8S74qHSPf$@VHR^s~ayMkYLeGcuu^8GoFbox7Ed zJ4z5!i4 z7oA**Zk|Id(QIOgu3y$mjOfeZg>UooMC$G}r9Mo50(E!%y@Tj(-)BZxcYk=qx_ixO zqr1)A|0r~KV*gJK-ECyx1nBPgypKS4o3S0W?lv+psO~n;PwVa>boZ-6$APLIL=X1) zB5-cx({4_r!QEi1?3{|Ohlu%^OfJCH!~(6$j6^?|5(DJr03>8233C?>5Ci1;!&5Q$ zV_@szf{L!}4|})hE^on(*-X3**vsSqEdJBLrTV{x;lKyu{?z0i#NTW6cF%7|JI~>B zI1H{8=5ELC*v5Ah|FeKP5c+Npwr1hsgP+7+r`iqRsZS8AaiBQRbyIk3gY2_Y`2V@# z%;-|$Sr&$~Vsl@O)<&t9a3FkDy?!puc)j+n(SfzG@PzvH8G#R;0w0dYSFpOP`tLV> zDzon2G6HLNQBPqz^%U0g^M}l%6)d)qS`7E_`!g7u{=GDq8LcKKzk$!)Lr!Qfc}LyY z@ejcrR-F}?--``7*3~;di)U*tE#Sf>^z;@tX(sMKbvKABFtrmhd8TZ~`i!wFgCnpj zM-PETdx>Rg!)E*jIH5V1$Qf6TW{HV!FNr+_&rl3Q70+!dg?AbJfo}e%QRFOQ&u86) ze_FxWvL7D?Pistb!H$i>ve=$8z_L^N@iTnR^D}&(m?hzxrVEPWxqvmHA~0lbT{Gho zw{S6GHug@%h!bbRJ0m>T=pQc^Lil|T?~Y@4#-`79hi5rf?|e0s6V#2%S3mSKDCZ?BDiAY*!0C;L$gm#cYDeuC`!$Krd7U*Kx_wA^>P z{7?P?HxJ&w29f{7Z&ICC*9|YT`6Ta!@o})PuJ6EdH#b(Ph4H@2*_O;4VBB8~=fK}` zVmH7W#3hTd&sU>cI(gRt#?%5Y>*Di0j5UTFE0!ytc|IQ+8(qwL4mDRR*G~}VRC7sQ zT&Cd#QOR_XXP1{O?BhLFo|EPX=C6TQNiW$sy7TkNH}leWt^6EdF8A=xYGSK?rMV#^ zoyc!#upo92AJPWaT=LV1&Q@;M2Iiw&u13x^ITmYdjIHF~%9wnaE9H|yZj>h_xm(Ed zF2Y|>Nt~4Bp~{movIPH9>{T~&yW7eWbKGm@D*vBbXGPD{KDzc<#W~qM)12goVIRD? zSsQ@%Xe0c|d{>-^GpKhZIFWoyzULqhc5VJW&TmF0?kRmortiq~9eL{9kt@j&vvz`0 z^^UCZ#yIF5IqvB1NdFp~>^pL(xgPBunKZ`}d`GVH;7d)~X9w~oGgxZ3N z*P+Hvmu&Rpmv?P|lZj9M(BqR=z$c#|=Bqy+PSU=A&5g%zGX8z#1q_r|;$jUyr<_b) z`RCz|1#n8g59iY!pM1^BC0R`_iQ>Hc@3nlg`@J)qwlUdMM;NC9AA$-3WY(3O@wlQOmhYF(^w-ToUZk0V-I6r~$m6I-f z=j9Lh<|iJfc>Ij&GwPZrTxV+Sx&9%=X1SQoe(wNto)3nv4(FIyF>#Y97W<`EPf;bA01990UyR=Xf^%USk&b z6z`C~qCH=-12$)mBrnQI-tb^?o#MhKQ(K{ty-C$Kk^$8z$np-3yaX#9U3D5cAOIEl$^W z#24VC7SqI5{H+)t8E!iuqthZ1L68nson^hE(tm`wI z_A4J>_JM@F7aMsmj@|ZZLf+?)d#^K8UA}8{7cBjaJD>KEh zgPxtD=ho{S)of4=lT3P6*)_3pQHA{>K7aNwAroFMn0OdxuPR)WkO$T{vCqtdb@$7I z_EvQgO6i4l7dDFTjxe`M-y7+B;&3&)H9hs={4sHie%Woi`!3N^P1|9oZ)A7~dqXP3aF$Zyx1Ycq- z%tx_Wss4~8IR&53sV>1EQe3ZEn(~7Yt46$%VjNTFlMSD7d9k>jol`zIQ88`8>RJ!^ zOd2OplZ?2!9`I$Ud}H{Nn(&bv#-6|7!MwVa;wkL+_gGWa2Yk)o{)(+ujw*d|Y8H<+ z!4>2;dpy23E?04RiYp&_j`8bxJ~p#AhJW~Z3VtH6x5cIAJN@z$KF8kJJQvGR+_Ad8 zwuSiGo@L%Md>rKsIEFbd$g22>YHQaqPsP~Chpahjos3VF^^s52X1)`RYa&tb_{<6r*Q2 z3+F@YK8JPR%eu?g%HQD%xpnQxM}f5mu*Um%w&fvD`0Ka`a#_lzVsi+e_to1+>I zZXVG$@I|i=6`Av)XLKV+n)@=3v)Hv0m+er}|w<*wS*KF~Y_J#qPOwPCo}~;qxo;wfr5NCjt*@D^VvTMT&g&{d4H1pe#?hirv;%XvNx9!yEmHg_~Z8YJGG#^xIOq-vlq9w6r8EJy&ka3Dr~OGJlzq_RNUSH@VTBbuCs6Ko^0XOA=$zobFt;7j0HVL^>I z2VYPp7_r=o>FWaDM!c(1Ic|3M0Uv_Y6lgGzsb5>k^ zsAn2p0v6ZjTss)K80}G7Ypk<)?ThX5$;?K?&OSZwswY}dixt=F6?i^A#kxP zJNz5Y6+=cU!wEjo%-+RgG(#n^?|J)d_yl_#8esh5svV50>vPvROW_ixjs<={{AFe= zgT^JCJH1Oh>fX>O!>w!M{LRMg6~a-4x8poJK0n1GWYAN@?&o{(c{lFM$LH8@mutIg zt-9^!7smV?S;NFY-PmS6U4ox3#-}Fany)V zyq>t8@dq-mM&`JIIog;#{DYhQF?*6D*@#ve9`41F@~n%9{aeC$>`&gspA<)#;DF*# znqT6+)ARa|)q`U(KH~#q-!&i0ujNZ_NQu?cyG9$?RQ{50@7YT~m){xZWjArZ-N-#R zkNfTh?!CF>B2|)$G-p{aI@07Ko!EOZA=4-4UW~v0BX}=vPTz~^d-0UJ7tc@Mi;4V< z^u6fjjGqGc;!N?yO!#6J_vTpc&vD$N4xp3@H3 zeUUyfu7_ z4Xj==^{=d|T|zEdEPPr0l3-@EGGj;WTyn>rh2J*AW2?y@dlmjU4z73?`D4P)tI9I! zHjzWNEjZTXkX7?jIb^Hgpd0ypCH;HFL)8asAw4;iKgQo1nx|270$+iA81hSKzUL}m zjYc@x8t9!@q>P8s8DTV;_6=j_2d7W_^@5*ZsTcj$*Xns^(5^kY9o| zpa*t?@lEiYM)0cql&`@r2Fm~XqL=^m)d%zIo_gk9hd3(3z2u*$GBH)fbvGNlC_h#L zj~X6U1HS`%>N|#qfsOSU)uMCHplTfC*K42DpZ7iRz4+oK@QPjV3i<9VH*5acbI&0FEjATx<^Yn~<4CcdT9)|+_0{CdZZ6`yRwE_^HXj9=qjh<|7!^?z0O zTywR1qP`~@e{0JON8_ptC;M%Uj~0Ka#+^Lg>ooD=~(&-a2x< z_4G08Et4Djk=9$R;VGuK%%1$w>aAhq)pvup+Ew@V`5DgGD=M7GZhC8QPyQ&Ww?+YZ z;^frxf5JvV>IeoVo_o$j?jP_@LT`Y%)USLu^)m;`f9T@=d5=G;sjvPTtzNCo_qmS_ za*wOmhH_g|G1!}62<2>K`)XYM%EMr@3*FeIal{kJj_n_lYm@l%bJ|`EI0wKR|40l| zH+W3FgDnn=O-PQFc!NH{{#c1?gu|5I4wj43uU!2re-O@!n%gA!n zqV3E@IIQQl*E!&@=r*vAY{2SWsUEuXxP$7+@wN7^t9gg;n8}0A_wt~BLd-uH!BLI` zScsaPrbng4LF(hDJ{7vhWh?LD@2$+`G4@3L!P~BOdv<6nleufFu$fKiC+FQ_o*iE( zFm-fq=KkdzGPdHn&-<{BdUCkE7KmryoyI=CnmGJEI7j&u)Xc(O>DISO)YQE^oEOzP zlyeU&mR$D3`@*@=?h9u)ntLnwKyhAgAX~~iC;_+WJ<4sG;)~sptvSN_)bPCK3A0Rm zl|KJ{Vn{4@aC3HD>|n=QgMTBQyUZB(kGalC;2hb=3*t4k-1oXR%W~o~#NSp=mlF>T` zn|(#!8EnkNxFAp35BL1!)1O7_wS4Q9*f!UP3S+-wo&T>m-Wi!(ll$tRG`2eo{SsW#idpe3IN>JUbJ6$1gpc(V;ty|C?Ok_4pZoru>LVfS(Oz+ZD+_7mZVQtI>@Ou=`*0WN5|$;A}o7;i~1FY4Zl6ix+VG#+12_`od3VuWeN4^%v$ zAIN|9WKTV8Y!dPVUGkJox%8l;GxzJ z)KyNNI+(!vfu`aI`k9^|_XB0c{XiS>`N#*9>I-Vdmmxop;t;JLXw|a5J|M**$_G@* z?*{b$6es(C%)RE|XIU1mdxd-WFY!w~iJw5Wq<63jXm8|yQ=fF@|1|&ZTIZ|ie#IL# z5Oby6EQileMn2*`pPNTH^7+gLr_>B#|BxT)tK<-={(|D|tj|Zbsuy^!`V6U7+=6(# zP$74&ays1jvJE^x;`0HiZsL2?`I9fm#WJpKOV}*#ljXaLb+WbKl*2DPvrt%u*sGPs z_Cbve{6Vts$S1Umn7vNkyAMp(f-HBiCk^riG5u}?ZHOFe&(WU~t! zNhf{dn?ou7A25v_Yu|CY`)~JK?^0Zwd_ivf7Uw%@oL(I|&VhTT`;3u#26?dqjPbHK zo*9|guC(s@i0ZCQY27uVx{LL+`XH^llJx7*>8@e;gX9a6pQUKB=L^EGp*)Q}UY^FK z+^v=Pf|Mhm{#eiT6Ni(8eZXI?Psr^J76NPa!Cu^X($Rjr)x!U60CW8X`51av4*449 zeV+eEzi$R(e8J{x@bAS_yc~A&&t;dn2W+5zQjNNo3M;loz+uW??*@OFxEyR9Ej(Mc z6#1frvtryA!e2IT);>eEv*epf^$WSSnUGVBf2fT+Ee6Jmjd$PgJXjF7*T^rVG3yR* z=l(Of68zm@w2O&-=bhWYeB;Ii=EtT5y8Q2v-KLOxRkr+dUtMSNCyejuTVbz0uG<4! zbJ))xtz{$ka|`%GwwqS)kNTI0qp5#N6YJ5RALvV7?2E8d)de|qGlYRy4;%L)zlj@H zC%cXMUAs0J^FHrBu42x@Fa|qdPtoTR*ujlY?*=Q_`c;p4G4(fa4{M*)`$T=g%kizO z_HuLm>$WP?*Qe41o+`uFal48C*VtqW5l(S2gPv<*;K4`wTs+2w^-qkg#O;^I_w0Q9 zJ{09hsP3`Wb0hf=!rQ7lt2I|$G-Q6Kjp5LE&A!H5-=ABbHaFJJcn(sdwvGL*O!57x zHrje}-Fsef_t(fyR-o&?_?)zk!ke0x>+|~>@7#iXZ_xah%i0nqd<&9wX2sOuZ zklXd(yMugQ*v`$SLLnGK=~Z;e(&t%<>WkQE<66rokx|ahgMwo zlFx4@%x&$t{08^Vbvyby#XKW@rxcrb zgY=zpiry)|NZ%=@Z>#JK={x1b-6_L}w{7?Q$OUkf68y*+Pm-hiRLJq?l&cTpl>_9H zCt(Ke6W!^q&q#LSf&u-=eRvArTMSQm7#@-fM^Qd`DHu_`O~g;WY558No_ulO(o%8- z4w4VH3cq8zqin`@?jCQC3s0s|Orza0T&G zms0Pl6hB=z7<1W~f%)V(gW&`C|8g3d;5uawWXJ8rG4N0;97r{{wi2h^4fc|qQ`j|j zwabI_j#6?H+Jv3JO08g{aqycM94eDs1&e3IQL@1i%8$M6mG2sC)5e^Q&l;R`p2ef^ zNw?3oi*JM-JHgS7;$85Q2!2jDm$@H@;wfLyoTgsy?6{!7^r;i}okRSW;?@+0{>3<6 z6ju>$c73qAyIq`^%l&hU$>(hPOn)bK)yEv0h>EQ1;~z9AygmATezGwr|jZ@WuEZ#`ftMq@0hzdIoJr z&NpJ~zn7W=rQr#&hUVV+immk59=MnM4gD_G)H{C_`3UL<)Ql`PBO8+Io^p-J<>Fd9 z+Z*W6zH1$Ne3^x9NpmpwT~1y7@9uSL zUYq$Sj|D%+$2Q!k@AiD>dioc+YpC%bFwZKem;FyV;1isYR?f%)_5vUAX2mclmY|Ju zqJ26@ZD|uXi7XywT=hO~)Wh@a*^m!LJVxhK=UMY|xsd8UYc7es)bq{y>x?i~J6>a_ z;LM0CNw(hQJC@U$?=asc&ZgdD&zSmjx}4aZvuo>6Ju!FB^}RT63g^ehGT-@e-ZU~f zB_nXhCFJtt+;Q>ur|-Bvcid*v>z=!4Fk9RyaL2{>aF9FB%q4xtow9dad=DdUr0=){ zr%T^)r@|eln3|IL_?y&+1>R%&u)u@F<+|`kCDjPa4|HuI|2wrt*dk(slQ0bTonk=@ zrvua3*q=VWC$K^)rg3lE1WBJ@{xSZmB#$BBJUVG0SB0l2<<$SlC z;dFp?+u(2!FtL09>J2X*cZgVy-XQTT;LJA+kIOSWt~ZzwyL|h1YSm-&5cp{sb-H$h zCJ>Jj=xQRKS3FMqri>a`O}`5}!uE;-7LWTm_(t`)I%aC^a@=>p=MD+;W3$$~^xSsv z&bSGRFPaeNBh$dqMd0Xaa(U-_^_vc1%YI7h_y9Rb+2n;8e?&gmC%GdQ>%Oc`M=&S3Z7qq6hn)c;yOEAU$l`0RG! za=yF6oU%(*=5)HFyA+84W_5xX?!g$ew`C8H1%z5%#G?Hxm?u6 z&R=J}Y@K4`qx#lN2M!y3$E#(P;Cb@NsAr_>pL&fk34f{=iN>Xz=0`l9_XpNBge?4F z+qI5j*2VL(LYbyUTWou@_CWYgQh45(rZ;R7&zs)2H>~A(_w&2z@R_lVaJdA}gZGi! zf4^$X@wY=Sc>Cg0sF>Bn~L57p%Cjm1xE$ERo6 z^?$>QTethE_`Fs9*gBdf8GmwAIi4NywG@}juTMQ2W~{8KTf@V@o^D*Katlpt9`p6D zsreprl+LMQee7B4|E_b-lfI+#B)iFa>?Y!`UpfwaZUpbTHF2B6iM{LM?@ixzfi>y7 zE+cjXcio!wU8nw}C*`hN6TjRX1zX)WH0)V6t!We)Zs6DU4uaKHzjO!S7o0U0!Eu*n&sI>%I-oYJ+EO3yqDb zXJ>tAY#g_mUIbvoGW;w};NkVe4IY3SX5p(-%%bL2Lw^}@Jn_0Vc#n9a@yUUm6eoCK zW<}R}>Mbj#YaYJ1Rl&Y~IK?yF=Eai5z{28iRd6`fo{&E<1XeVh1RsUP!{R-qjHex( z+6oqK11oodp&G!ajl4VZKu%rbG;&vX|KxCfLru7Ud}`tdALTvpE!os(h_mq>{?0lS z*Zr5leSKpHllIxesH(ec;``wM$+3iP&ge_%Qtk8i$#1m#R4#iwI8`;X+E^p)o6c^# zi7U*GZL)QjS&P@oP#0JG9>XsqnQFdsrZa*4w{z3?g`HKC#xE1f z{aA=hXg!39B_pfJxu{H$jdo&G#hWA_U-RU{a38)vbSQ=KH35 zcbLAd$U+%;wc20z^Ioi>@wK@8)_i9zvV@$uINl$_rra6&2*Kz5_+4_S7{hkt*7W6- z&4c-5Ap2S)&Hn)3n`(SzCG{no8_6fN-lC1qjCLjmO9ssKF>!{-rh3DOKV2RyiAqj) zcyd}3R`1o~y4tw!m{?NIf?@|%qeHT`(q~)Pfqbf8jqw{985NhYd++ATSQ(W}DewH} z)Ho}rchRrl7}~2wWYdi`#D9!mOfjX6%(v^AnZz4XEAO|{`I#AQWDON>XnGv6_KV@9 z(if)h4twU85fgLByH@$++r}JLg>s|Q;(I1u<>y{r56&=qz`pg3J0ynJ?62~g6Fk=J z^H(@$+1Ht~;QG)<0a*cR?oNpnET8kS-m~W-;p!6MdqVJ$Dk#iuda5~;W zc5WyA4*E^{4mwruAis>9_IJ>VQE&GA^;l|#j-z(ycxs4Fpq40gjPF0o`Ecf_wdxtI zS~vK$OyAG!2zM&=ZW_V%MW3kX$~%*PliwZ;2A-c9pq^{EsB`KlxXCkNXKx_z{H>2v zbjrTIjsK53@Dk z*^H`ra2CDe%gRs2r?unhzpE|#aQb6q#D%H%ns`$ieCIhhlWO9;5H9HK858Jw|HYuw zdVQik&S7wmZ0$YR-1RQi;#7*N__1woJqC#3K)MA`605|S5+68c&`p{k~q`j z;eu!*SWR(=+d>7g_g|dubbv?R0CU%e3K~lAH7p!m5_^tuSHd$Fg2^r-)@>o>oBb;{^f(Msj9kK0lECFZF3IsJc+>K2dteXV{s7hHN5zposfLB4!eLzJ4w;O^+E z>dd;b2TJ&R2K9}{#eRiOrpyt{qTYw5w64JRoir$_nYF5d2CZB#qvIb=q$R*Gl4*~^3L1h;L3 z%e_V{v;3^xtaT(@G+;j17rcBhVaz!f=9*eF&Cgsf|5Ssn zmxJcvWM}HV&P3$%$!G3!=3H1@C;yv1znA&!yWaKL$@lZFa5J)ZfO-8FajS}B6_5Mr zr28DnRxaz)h+LLZOR<&jski9QZo1d`HhcFd*fFt2jK!=^ZhT$r8b}_**%pN>ej(1~ zfcxDX8Tx-1tZMVx6#MDs_5TLEY~Q;~{EXZ!JtvR7EFYa0)A~pc6pgvw6_uOo$KaEodr$AnuRfU8Zt-ZAk*VAZl&}8y+mKYXm2@v(B@Lk*MPS?JqXM*!TW)Ix;s~HEaw{F@4#5 zc3kxmYXzh1z*sV?c33FFkc{0=Ixcg1Y-n7zN17dzuuEw;H)e24IJZI1u(;*uFw1Ar5uZU< zd>Wl`6T0I@Fv~or_f-$GoVq$?XX(VS!4vW!9S5Bff4;>lX`PbbLZ^sMG3V&0bjoFE zopP$`6z=oH*>h%=l?OAIDpoP0tYRxX>?=+A>x-He2DU0DvX`HkV3I6sF?k=Kg?=vT z%mBwYV3-VSDr0Y$;i%uyL*SK9fMpKgCl;RR<)^s!C;09G>=yg+;qC(i)q@LS*dl(! z=jZTwar35di2PCPHvGLTh))E}kbOpS?}X*tY09TiO%=vV-X=ZjH&!_M`{%dU)ZQG- zX%Ls6OI^1@?$QH%_9gtt8^Ja;f0k9(jK05xd$flCyP6(N-)wvEt;pvUBU>0YL%K9dKpE_hR4M zDf@Ptun1$9-69YB)dp+?udxRDX))KVv*vZ*08VggNeq5(G%ln*PcZP=#+w3LWnUKV zY6pL*cc}2yCb+-eB|FOM59QY!7qfm(ArXYDfA3G7Tu@-NAkue&d8QoM4@1)^j1Uf7MxawZaPZ;1#`VQzS6|DKL#{9nU8o;5q)kj6B0ko~_^C zitIW3UlrI%HF%nySzpwAQ^nS@U}ol=P~dXmm2Z--x+Q!|=Z`W=sOLPcVVZo$$i~Mr z^1*eAs|FwO4%u?_(=Su11*z}b@98;_M=OXc#}A8)UBfdvn6G_~Vo>#*e*jBKt~9pA z7o1Ti`Fol?fD0M>Zg7$t+o@iEMc}5A`l}}d*4<4mqUNsnU@N!Um({GL_DN&gj%;t> znU~-@K78TzoWJbYGW^OL;%gMCD`L!=xAJuB8Lx}cG|xga&;86-V^m(H9cSaWW;=`N zi>onee8QZF^BcxoY=G2meCxhrdWBrDHX2>3aRtT4|3KvOgE? zVa>d?x%7cZU7=4N+}MY{ve2I#Xx;A&Ma=l!e#Xkf&97g>xb-f*Q}5gK-@Wtaq{zZ5 zV_$S-;Ue?*fo0)8*>~{xpxi!bod&uy@AU3We(KK5QKg%)qC3mE`FX8{sa)*wE z|38a9jNhB+C3A1`Q}<>JT`M0)Om`>#kA3LPdhT3%Z_0je@6BHRZtqQpHOt`c)KA%v zZSSE)x<9ARGWVtb?z|cO^+oQ^a^mWBcNVd)F=YC1D9iNHtNCPR-G3l!HT?S;^zrB7 z_hwOa75V+;!F=w`x?$a$3%EBIh6@cR_OGWMYk|%b{!-N4NLrMf_hiF*oshlzUIL0Cayi_8u+m?;g#G-=j7E%yYp>PFT4(+)v!q+k!c< znpalWs{X{mne07SO84kLvd`ngK?5MjvxL#${CK|qx$<0_o(_p zX-so76ZdG9?ork@evfiKrMq-bE<cZvEadDOp3&mFKmD~f zq{Ad%%KdN{XCwD$JARa3ntL?g)2oWlO5CGW;{xmMA+}C&6gnH+qnm~QmU(yR%ep^# z)->IpPhIb*UfQq5y77Xgtfyk1gbgaOc{H+*`oAT_EJ`*btcU9T2?NgwCf_ygxV?LT z`&n^`S=2_f_e}@3mY*{Z)znbDqIxg*YY3S8hIP?;HC>Qra$=y~BRF_fARuMApc^Pr7$hPfc|W?$W)(XYIOaJrt9u zb-zsah$j-AH6JuRGL^r&>4d~@`#&G*mKe`osV z*x%gV^X_{)Keu*8cD-cy6>pAur)2Y@|4nW4lsQ)V?#+XKzg#~U>bH!uH$0kqb3Asf z3D~#J#LjgV_Acz<_h&mFV&h8M=Yj6cYVY3U=UDE%V(!h1Tc}Tjp3H=6>fX$PXUgBN z`*JG0QTL;{FZrqa@*2MP?1U>F-H|^I=O^yTo6waWzwG7j=AIlA=(;3@Up}e(5&apv zNo$i8tNKYzZSxrU)H7qNo?BHrKbY099)7fxJMz2ub0vHF?2_{{>+Z?Qj9$U-3+c;t z6?{>=vK$-Yldt?B~w0@Ev)B?~eQ&`}A4%>oe@zr`f-ouuI!rc)b)9q6K_3Dm-w zsu+pRne12_ck@4TZ@(cfnqSdX2Dkiq_~y*~-eS@-uP z)a_Jk?u&nYpYw=x8=S)aj=4r(%D&HW&%XHDc{dc_yoOAb$_Je-T2d|p1RNQ z$tNe>>ny%G64&$3@LYXXwPo~K$)RFapGDW$ak}GJzEyE5z0~3tx6mD!9VAZRV%>4s zu^z_TYUbhUdU20mp$k=u$>oy8(fXTN5915b_*7R&@zCzQ@7IOmk<#;8AD2HU|1`&p zQTp@8tdqvLf-!y$%&GiEJJy55D@ZOiX61I5k1mR38l6auFl1oUAUg3Xc;#;xulCr# zm;Qa;n<>mvX!@wTd+Oe!iN2l%`1rW%wLjP{6!UV0>?g<%XXPvExv~AM$7}VR$Fl2c z^(=RvT;6Zj!Of{HFmrCfmgCm_DKYu9!a(YuAbZvf)}WCy+Um*Di#Ofl*tO6eWFkve zj@0kr-jLgavy|Ft*KB{>$dN-lk@D-cHxBnt*#m`jEe0!SpNJ0uYbf?coZpUb!-{*H z4d><6T}!<=H&20iz=8ESomq|VJ@%-mA30Kg(CScbT#j-%E3J25>#RrTcJMC6s<$H_ zzd~0Z{y(n#_{V5*iR4FT!^)lN{mQ@9;`M^EV-dMvv&3Al$^Nz{lRBHO?y&19yWoZ|WW+JF zjg>zK*rcwiju3{c65rWmx;sgO1TY ztRDtmvAEOV=7I3aJsw`+r+EH2;gyM;SMIS4a<^n7&+>3f9=IjW@xe0&v+z@xWfL|5 zV;|>^`|)?CInCDxz#l`~#{0!C+;QiPW)E;r>0Yk<$+zS9MI67G-`zrto%ZS^{XY&kJ$!W+RE>@W; ztdj3y75yDtFMDttt4v5>m79fC%JrF|*d}2V^u92PY~8AV&~#75)>iI{jPZd>#rqzU z?Su39Ja_ed!Y6PqTRR~X#|inyru$<)BmFKJtfu~@dh2fCxw=>WfA-!zysGld|K0}< zI|<>?kOK)CkZB=7w0#xv)QbQfAYiS6qB6fh?Q|p{wH<4fS`CU$30qoHX>}5RMMZ1Y z23lG{Mg3ifzVAC?Y71klJ-q{n)+C#NOn~eSIC($cXRT-NwL^fQv{jowxB_JFwVuQM z+|T!WAB7p!Q_$=Yc%+@2s=48GZEu4|?(yuY=Y;F^?BsUy<#;?7! zwH)CV%dD+L`z*VNaBBzm%ek8#s*^kjbL{xc!*vs}lRTKW+~{^a$NCw>Rb(UC&hu3V zR(iaR{Z8QxFpOkCdmy~=Ip*8Md6Nxeb}$lu`j*=rbgs!4RWFHyJZDLUtYm{ZTv#wgOCG*LHL}; zEDS=Eu;p=NM`O;A9qbjSBRezMKk2g-oIT0Sp);8?<*OUD)-w*aF?4ytV zAHFiHekb4G&ss=+270`!4Hcu5rMjJhdOKZ-duaYxTUAE`&4c$jP- z&n_xR#VxAdm9cs7USX@;PBstYTL;6HD_^~@K9dy}HYoS}0;|C%jeJKrDO2coe>7}S z7FOR3_MNfS=-vh+#;|j!FOK9}>r+C_C5=(*Q5`IaYfbDrxi*iI#$Y#a#1w2E!mMsi zbOYN_ZDvUHanACij4Kfi#@+_w+IvxFZa?xoJ{*b1(TU=l z&-1y-)e-b}UhFn_mF6LSu@b#obyK)ry?0%%UmQCdJlclt(EVchzV>-vIIzLB|LfjK z+XLRsawDURjVadfeD_exa`%Qi@_8GrS zXW_Gb&Evk>({p_`6o-3Oy>v|61GaL_t?aJi`g((1bnh2&_bUF;v(F>Lx-%R8rO;rP zk?_G$%(2PLN4X*Hd{!>H+I&Z`mjB8;GRL!UgLts!GL1WE7BYJ$euf`1*QZC^&Nb={EX~EC9yeSY^;N*1vXmPk(gLvLH!*WI&_r%R!pq+yLr{cj%uQ- zrh#HQupdXYPuh3a598uA!}Yy=?~sj(b)Tob&za=t&S}VozU3?;QrC zWUeK9i@vkIce}Svs36*c&e8lFFvte8uR}aPh-@43`v_x{U_)?wp*D;t9HognI@gDEqU&;M!@BPAI-23RreqW{^5_f*?xf31vPj%;?;obT25$n$XDfffP zYvkU?ew2KN`V1||?Jn@b8tx(a|0naE2JU`)-*4pa=DvrMif@?vo}bb&{`_3~}V|F4+2KZbG|Tad|yGX`@`uk-Hv9CP14c;GK^-+SY=_x)~UTkl=} z$8R|=?7YKi8BmdYi@C4o&!b>J_m02D=g&Cy9pA=WRAaOW*_^|@qn=Z`_s!qo_H%9- z?B4(2vy5}d{ol@Awh#PeW9yW+pz+McSEJaH%3!*-zPN>ob z0QCT_3YNI{zVv6Oc)BWbAdDSJwfMR7m9My}ZJd+Ey}vY+8=VZU(>yvtxpA$_i=0cX z)n(|2=3s8O>;^OB<0RKXS+dOrrp4BO73mhMjU)TFQXG(8#A?Az&CEq54RRZ z61}LLjvVAk@fC~F^{O2r%s9%QXL}EK2=|WlXSbl^+&f=&im1cL{#d-PJx||};+m=% zqZ*a2kK$c0kgZeyRNuhzB(~2L#G1r;ZyV29AHFtG&buq|m#HRx`y_H|k-7QY2U=6z z?@PkzUU}vYV0>k>@=R~IGVbUy>iOK6nzwYn?z$3era7)n0zIrV_vh)F(=~}vqx+b% zBOP@qST8kx^tgKu3{K&KZ!_jNbFyP}$Fy+6dB~dPp*ui!-F9LIp61y_*u2-7d%h^X zpZ$M z!GFT3^!Cs+Vxl{~ICGwMjPb8Zc6xTnFJeHZdp@Zwy*SbDKKHb>B!{qI# zm&E~#Q9LcI+rwgLm-6~E#@j*s&@}7_!ehe4Md*xY z&b-~JMrZ2$y@=aLawO?7|Y;Q*C1jYZkI!5|HI6$&{ zKRzVwr{p$`0jzFNzENK6amKxdb-e?*mHk2EuxozVZxt)$%I$~71B+w_7zno<1jqag zTr&^Oi4EY+d}S{ZCNe0FIa~T)=)uPQTb`fZGWh^!u{wFBroe-^#sj z{000JHfZ5~SKR$Lp7Rbob@iF&IcYpl?85`aE*|I`NS+P%U&vZn9H2M{VM*DoOM}_* z$vTk78`#-2{8}UWg_OGh`rQ!*Y^o*eZ=z>#Hr`>LfakA z^G`2uS{SSR1TXSOSkT1*{$fVE8>}?*Mo~_0SwJvU$ekot^JLcQ)^OG(lMeI z=W`n4d+0H{F8{<>HBNU-Y0f83KNSCv@<=*DL%H|xOV66@tV37+k~;MIyz&x_KZlwM zhQpgZ^=c~E{0HBjUd^5!08?r&UH=6&B}(8&?1{w$cArwee`}KC;v6^5Li@3eJ2SZH zM|C#d+|{|NPoe!#-F4NGaQEVKx-V8ck7k*)-xV z2Z3Yk7@D~27jsVuU-{*u^Lj}x{=hhfl0&3Dmt0smSxTL|=Freaty7WVtVLeFgL{UL zW!pa&TvZ>=Y1FE*18_HGsHYUR$Z z>sPrmTp4uzPgY)k!aI~da=&j)wXb`SmoeP=T2KGH?7Hea%r$>XPUJ= z5FQYF+u$&FeXS45zqT#JZ%k%wU+7|OYdH5iM^E;}I%!>Pj)%LhKfy24)w(XewNF~! zeL7O#muzY+Cz!P?j{Q{ri{s!H)ng*I!D-^o%2qyuhgk-E-m_)QaPf-l7`r>+l{k1M z27k49MfqULc~}G1*@GTB9(EZEb`j3&E_OKw?4letVV9&IyM(Vb*rgckG9CM<;u|hR zo^6d<`75eJL*m2e;ei|-H~&v{{iSc`80=Dkd$rHxl7z$MRt*H!DnwO4z0=!i=k*~EkR*oA3aTdV9D?)|wROzOWwujZ{0X~$WGjZb%JUFt4SEvK{&x7bE@`Z=GIz23FA zruDTruj~UFx9;Gy&h{{lS*Oo3evQ+Qb4iK*?mXG)#EbWXb#`@u zb98pPjdj4%+S3%483Awd?m2^VSd-N6eti)*NASb z-6M;0)Z0yQ2{C*RF>p@8hjCQz>pE|LW?PKISgQUhzy3zK=TBF)FrCd*R`m)Y%ihv3nw0)4r9$H`LH& zUrMpv$bZ%j2i=W~zknQQA07bb^w)jttxp5kLOAD0IkES$u%WwVl8+UPrxVsO@-qKq zU>){d@?Jw7woI&}J@#xs-~ARbqiLM;ZTButt%>3iG*9;mi9b#bW2 zb|P5kTGmDT7wckekHk9enp&(weVf#pYL9g;cO%zJf0M#HsrgyFbHZ3BwYK5<{n%|@ z_FW09eceknmr#R2@; zbCw3)kMxGa2J!2GVa9O>s2}And`OGol3Be2gGSvIDTydQ@{57m$b#?upqcaXb-p){ z&jdo_-)s&CYR?Wv8drn@wH<-Ls*BJI@)Ny^-6YE2XJdC64X!Y`Tl{-Ub)WhtBkH46 zRDT~o5AZzsXQqb((M{BV{t4LX32YZjvAZt*2KwXfqWakPzvWyc9YURL#cS^i_m1fq z%5_rTF69A9e>?$KdDr|t-{b~~hgGY70`EURoOo6^O81KFB6_x4f6#q~tAD7E&)mvb z6Xn#5hudw+cWW1HWc@z4^AgtdV%GK|)_4+YJrO(11gCk#f84D6l0)HQQxh=4vt7Tg z_@n#$CzyjByMMg;yT&*gpNk#4a<}EPFU!JK#accYDT!+AIp{Fum^88W;`*=k3e^5- z-`#I4$_v!~`)2~R&+HuQ#J+#FBRh-TOZT%b&wFOkRSEf!wN7gi9W&GuY*qi)>Ztc5QX8-sN|;R^!U>RmeeJtf3y?AU>*9?Hw~ByvKR^cCcK2)M31_MKcnW3e-Mu*WC|3z|YIt?|wsm>odV=if4@w3$%wnzoM>08Fk{-`@5Xp0i~w|_TGp+ zVj#~|4X0&ajnrR(53V+QQ1trKBlW?uKJ|}9g3;1YFdkTV#lgwQ^B_LEeeYdA2=cto zzkk;cUJee5R`jY^a~eM<_N!PE_|0iw8vdIRU#jR8So4K@1GRx+fxSDh#f=XPjN@0G zp1Mh!o8Sk)I^9N35R7aSNLPJPig_}s65GHLm7b;2vIPDrqi zPq2@Q>ymy*q8|>014rnGG3W>7X07IId|3VP1ZS}=+$T0RP;Ba!=^W~;2yf{u8y`1! zuUpU5i`Dv)H1%#D;BS9m%rg0#(>$ z${0%weeuN4?r!Q( z*9M`HK$fvm=nDUg(&w^5?W* zZ(_v;f|rA_?e|r$x$%3|DP42@zl~q>-a_IX(Kq+;{2=!@p z2aR90NX0QOLPyQdE{SUV!XE0e(vVlNq2ZN+=lt)oSR{rm42m1C0V`_^!V9gOtCn!qRL()R^g_466Se#*DD2hO_%eX+3W>o4wrhwC1n zz~6_B8g^<$)#qRQm2e08X(N8IP3QpOkTv{~4NQ2Y65S&#vI;wx!#RB8iEf<3N%p?) z6MJ&Nxy}=uL!HNV?)x^*qj%?T@pA&ZZ^t>4Tl{>$oJsB=dnWJW9DI{AsXC>Se^)LQ zn@;>2d$#hyJDewx6WbT2tFy4FsQYI@_dU&TeeZJn+jRA=73^8m-0#BN)bh~1jy{=O ztr!COE1Q20;hYan-SaKz48>Tu_k1>MmmQmpj+x3m|9mL%?Dgc{zRq0j+KF>Zeyhll zjxpCsMh9I(Y)T$-n_dSyf8~BxGp=mr@PF7_J$oK!!;FdZB3>XHNhGVdu>pLpdaios z1I*(E@4Im@s^YQDd7V^v#Cgre4;{OFtdotL{v-1A9B21b&Z*AVZOr{V&hL5nI7ivL z6J1xf>FmZ~-2c>X%F+2OzpJobadlrowv{V;QC5DmY9xG(vAOl#)9Y^MrsoL<+{yW` zCU1HTb`hQV8Nm{B=I4e=%>92gXWp4K^SPQs9cK#z{EjoMv32ME-vC~DPZ-2I!=3j* z^K-yJ^8ZibJejfkut1yaoStmH%X2IiC>58*4sXwQIqS9xOfZjLQLCAAG%JVugk0a9 zw>jdNIW`q#?Lz7>?Qt-VgVm6s*m9_}{LCucyvu z$AF5h9pSvK2dL{TyuY8buh@2iGKBZjXCHrkgqqKrvI6U}Mpro0W@)^Q_=JY?3J0v$ zIDSxI?_BVvYD0)`o{w)@dF3tq-|R5G6P~K8yN=p+mC5QKs8-HA<~-$4^&5)S)c54C z(D&r;VV$;?(DOjO8%F6l@b3A1CX)My;u7TSR{me-;dJzfwNttA(u(^kz!bUgKU;s% z@z8I0-dPs}l8-WvKp?wzJUTHDeDul$YxGh{U>&jbv2twTEh7TSVT)e5aQmYBE*u_y z>5Du1F8t!C9O78=0?FZv{_hKSEV}=~QP>EZ#_{hQem{WUGp_9``#MdFvTGy51GVu< zq1uDfgULhqe2S>wq%~@u8C0wRCDFikyl4G^j`h#5N5YC7{O%6Er?V^>(%wT1f3S<^ zwQ_e|9xTpzKYT{{p5VZoJ-M$Jzt6m{2o7wVH$1Qvy?@28k)?&X{iD0_!;PmeVdc-4 z)u~=iR)idy@Ss>fWCP#x*4fFx-X`=$3(uXFJ0Mz<6-ah`5ZJ4JAnF4b=Q+I?U-PUW zwYU~uDP49m|ChDkbbQ&ty#dy^neUd}5U8C7Z#aFy=)^pJU(GY_5jJEWa(GtrE1#(? z3x;be1NU7y^)rEi!X5N5WG;EJ@_S038yy^+IQ_A)&bc3)3B|!ab9IHYF;o!$CHp^%ajN%4e&Anztp97JE)#Z*wX#te zy8y8+N%rCVuLNrALqU3>xH7fz{P1sg@La`O-}iE8x%=6L8@_d2q+ZWa{X^p5<==d8 z4zY2Pw`}5v58!LE`=g$Z&FoK{XEY&Wmj&~3c85vTU^@9V%%>S#r1wwd|08#fCiWw^_a)Y56niuI&hkVh|3BNeH={lL z(G2H8uaftk7o(?1Vs`I{_HplHtiSY9Syn}@YE;O+unrkj-0MKji9I{F@^|Iq9DzxZ z4`EXDNG*D#_JcbwM&De7-kF5{nTQ^mfKBZGZIC_er;krsZgfhP`fxJd#erp!C(o;G z3I~!C`1|lt=he3InPEI{8$UZ1^&#GDFwe;6Sp__^kY^X6%Z8xK`qUnPKkP?mb=FgZ z&*pwT-R-Yagg-Y={#<;AL;eweuCF$I_DjLqvSER(;{$=cE%>9#1}&*8XIz{6zgGMO zWTB-nkeqhcp!x}%=gsn0^vjA~cz;D=44*lT|C`dUAX?79ThJ52^1{PeF9mAN@9D|Y z!T+nSkEzEq312WcB0j>6agX%M?9a2XGd|#sD}$f6;CCw{wqeR0Rf$KjzgOOIb7D9? z;3?lItnW*FNPF1r*Yk7kdG+i$h#prDdRx!+5ExVU-p{$O<-6L29IW9!RK8N%*uc71 z`6IuNV$7}|8lA_@3r(r#+h^U zoPy{s=3RMLKVtIo6T;1NPsyf6T_CySRPt3gmo41as++SOJIQm41}EAE1d_>;K=N7E z;Tdcg`)=*$%%#Ta=mn+Ig-xb0kIR_H3f{YV{gS!|c&F?N)3!cPH+$3Q_<4^#P!}ZT zM7c1tdk0PZzS+TGx~4roZ5ubHys)#7QR(&O(d-ZMFrKs2)^|nm4zSl_;e7I|@T-tV zw1+#x)VUvtd~xqv`@hZcdKA2Hi1TdMT=$`{L+e*6lDiqpT>OJtv#(>Th;t|HU~l&F z&NI+^zryx5fHBA(IWHKjZRSprjj4&f({rB1zWLmuZ#sIG{k*>WOn643H7775KA!c4 z2N@lQz7dXT-h0;@x(}oi6gw%u$*Qph(RrstqVsa;Il^2|@90=3{XUn!zlNOa8T-%~ zdoD$HT@e0l)2}BvWdZUGy>X}xP&xUQT2JLZw{sUNzE1gQiq~zx?%0eUF|weF8nijs z#e=aOGs#gPMrt$9E=Rtm_RfaiWXCIcPH8YFKFjtYy0Wm|f4|T6ND{phO)_WMga)M&mX*8e6f?KHhoDL0Y z+30NL!xkgclF3`iOI^-6TFm#vOR~9V^o{^HqhV}GG;&HoG%{pxG>iSK2?W+_FZK7$ z?5p;#%*&ZCy9e23t(NedO5Qyi%sJteC3V8d6|7w^zSG3|%{i@*e!LO6^5#D{z9X2((U=WoAJT2+-Ottv|>E?&>eLv|X_QM!+&A(wlQl{2ZM+%`Zl zECb^EZk>i)&mbpkrsCI@MK}jbsV^8|JX!e_8$vusdD{(ddA?89CyVjxvzn{;m(3lI zA>ZSfmvYB1stZ0IIRsH4M7>|nZU++;g$o+* zM27XerR+uLy^fI6Q_7e&y}Y-dIC^Fd`%#MAM?Kvl93#C_rT<69Ut_&%(Dh~L7Tvo`zRld z_W$1J*^*HYi>&qA$GE4xy5iS@6RrU-%mg=F4Su)^41!r?Q6G~F{lKH$;z(<)4dXZ* zX>H0r@(JTeFRjhsNNWRuoZ17djX2WJ_}RmeJ~bR^*@JCTxD$j$@tc`daY1Cq3GCYnnoWF}BJ;7(hk$%L_6W~Z0dTNN}NMN%R zM*^$%vm6PoqW)bO9O*1DtdApID2@c~pMg!=}n0;jK!!r!`fr}dMGwULHp~8K>yluTv8!0F@sT(WI-=X0$CF+0oe6fmJl^H!JBrKc%y%9@ z&oyygMj=COdGIUrgg8Tz9p(d2RpdSdoDRX?_nKe z7n7`L4pqNcp5i<&b(8aqv7BeRpYvREBWpEze-4*9i-HT ?r)RoF}aKqi%2> z^h{=d7$d6spP^DO(Ma-OGtn!$ON1p@tQH?TJ1Jpani9?tXW z;5>!oKAi~X`JWehI8S#u&yq(B=P_remA`wOCqplt2_&lBK08G0(^7XpW-I1ilWMEpXA^Kd6mgYz)QlwZjBfo=*UkH&jC_}x)?4;az- zhDHXGuXf@+72ubMKQGnt9)qb}zfc@3R}s!h`-aAJ!F}LWe(q!aLaGBPpO9>Q)6WU4 z8^s^pfAR;tWc@+HBPKXEL4>Epl#$U)#3C6V(O1-Wq5lbUqm$+%T7ZuS9?}e-*^KYL<@;${vb7_p{6pRz z7+wPwwf>>X&i)}jCm&CLWabF}&^XUO#9rz-e*cjDy!8(mj&uRs1upWDITFvv@DJf1 zHvXYE)EkJJ9OEoU^87<^hfkev=mKooojHc|!orxp)b(r-H?c&)V%0I-p)>*Fl zzx?@LpFH1C=Y9T!`Gzv^2=>Wt`G$Je_3$Lcj(mG{XP%T>dkbqLo-~S|Jv`}C!;`k0 z2v1t~Tn|s`E>EiX_eskal0O;<Ycv6yhF~!iC=k~@HjUB%MKX$J54|U>7DgO|@h9mq#qp|P%{6pub{6oba zS91MBmMdMryE1%4@G990EmtzXmyd}5Px*+z%a$wcnjWC$P^ON|}zN6N!;A4U> z{Kb7oaFs57M_{O(BQJ4Uz;)`G(1P9B#@QUfjlj`b`wVV06W=BM&lNkT8cNN~#rlqR z@_FMwa(U90BX|;6UA-V|{8}fTWIn%x@5+~yOWcu(qdS?tq)R|EIOBjH2v$IhwF!A{oB#?S5R>`US-3_F1sI{A}St3f%B{#d%z z_yRI~N|(B^bW1w(C3vBqFD>`;CHC9zR~q&g_A3Rth^0FQUovO^r12%@D&NwH#?l$z z66e79mI~c`2Ki-VOR${jb;Z$T=&Fhhs)zCc_13%cE%`rddDDmGOgisXpQ8qe@2npy zrtYNqmhc^&jCeZvmf%bod;QnuOb)vL!}^vwW0UT3rssZie9rV`a7%Z5OPz7ZCn@JB z_=hxSa&ce(+S49#Iny6mqaM!m_sN-VTHnK&y33i`erP#U|JtiLGar>R)$+XKai%SN zMx5zoes;W~oTGeVh6{)tE+mGyh*)CsjqWU_Zc?AxBsoXQl~nFL&)wqpgZCF_q7RmG zjUwb7HISDQSxxQ{yr`I7nQrb8HL!m((aH1W9+eGxur)!t%UO-TqoET|8s_q( zwSJz|tb8M|v5lu&7*ssn9bP=0awKLXE+xMFVA#z$QcPVYPjdUi?1m?;CN^C4tMZAd zYdSmaQ(9=_>hM7v!=IEISBfj)yXh*w=(X(Z=reGojpP?8u1>bA?=$WW`I4Ueenwnf z3-fCnJ0!Y`Ia=S*+*698EpG*#*G?H?;_52Ne-=klKVkK-R37vme0Lj&r`tU*Ko5ev z#6DuZVxivM_8SfJa*H~Mshf)(vV)kq^{nN5{+@LTIY&8+2mencIj=UiNcjTFG1BK0 zH(!MhB^_Tkz>Tjfn~;1I9=k`z>xrvVFHb#dgXKXNbk|=r zivA_+ z{Su~%N0qQQ)Wl+MR9mA3-2MK8mCpW9W=>H9`QoZ2Y;ua=qTTTuX`jV^I;in#YGU=p zCV{-$e4@gq9;!RUziY@NdT_z*$IK;CjnslvE>S_W2%ExUz9+7rTq3>WB@gPSq9;)Y*^6ZCsa=bdM z7|v;qc+U-~m^%0US?{HDh<*bn)U*6KM871a?%hdx$2E!G)V|q|O<`lGcYOCedNUyJ zlH(Wn4qRrf^%w2q{j!BApJ)%Z7uRo8fc@ucYNkcvOT5@ReaGt1LQ}K4F#c!0tM|D) z$gR!Vo!p{v$}KWy`Wos;(Hq8`Yq!3W@`(=6TceqMRz8p6KFEn1SNCw8&fs|ZQEd0c z)jel&iHf?&C8DPfey;bXe>QpluJ}Z%{7}#bX_<| zErY>cE>Th3<31UC9SIIGc}Dk;OY{@^xCF2jT*P@%A62`@{$v-`1p{0I7MKYpxSBkos~j+b$s_vqgWcji z-Kw?F8B2VUc+Vf-%HTa)0)YXw@31!V75#61_VAuh4ewcdBEF)sU-a;v?(&}XKQO$< zoSh=hjK_O2^wNp=il*~Cqf@fnS_|Y6rD`pZM`UU({D9Ah_uR+N9^TW#dpKh;t=DZIy==}#>0$=K^p5bs(0VR+Al%euvT zx>Y{`jFOR;{7K?H>wlZUd)5X518aZ6+KBhe=4TJ@`PA?ple24bXuDBkF89ni_NXdW!M&ME>OZdH6r#=kW2EI&=J_hC*jwk80UC)R;R@@p1TZW>GVpIj(c4 zbt+%a8TfbP51MHFJ^9h{&sGozMV`ncH$F}|Gs-2>XQ*{#>dfuqcZ!SKjjuxfp2eN& z%LP=Y)K~vVadBmei^Jxh_V@J1-*cz&_rym8=q))jG3C6R`b()Z7Z1BWAH@X8?~~|M zXU@jR#Yf1_t-jIfpX>U8c4I%4FX)^2f+qWFENsUY^aB2s_Me5FmH07i4YXtUg;JO! zm2p`bC0n}fZ zcQ*CaTz}D$(71v@xgI!E|I^+T+?^6 z9o3s}nJs@%VPhG#=JGCT%v~*i&=)2oo2e~VLw$JFSx|pqw_bGtKA?X1cKVr`XX@qp zF8&JZ-%(v-zh6l87Meeo=kp7x7feZPCHt=PV*5Dk7(Lb5g>UGsP+@d7{pxMKh0b;6 zj);xp9Cwb5(_Y&=BK3Jujk#BSJ|Wd1D|qrpb#EEJ(Cvwb7kmr9P|*=J=1S?`^)%RLc3y&Ss**8qf5h*h z*4*`;e<*V1kot>FjX8JC{>(o6eM2@MPX3{E%>{Q1$E&%Z8gp%vE^)q2uZDg2L05zZ z##=9#<~)xNL^@MGp*6nxa*}no)EGOKT6eFfFGr5Ex?aVt%ZKPNBro2_-r@%QHGBC zr1B&G-Tg`ANB;5m@S`s3arf{edfdAFD2dKKDg0<`4?p@#@}th!`Qyfqq;q=sQ4c>- z&y)#p+W@%XgFBDSk5<5q+p4MdZcJ>*WLjFMV-4 zeSj3luRcJ5f&9C0;THpg`B}ox2tVlq6u{T>4)a--xem#5W*R%VrP;$@CQ1bni1+{}h;o1Z5g_0e6_|q0j?rjgzi>Y5cm)fQ3T{|$; z$Ml~V#&d(%=!ORZjlrd*iQIcjpX(nQoEY$ju}&>--xd!_Et}gJMB61`8 zHI@dL-=h-_4tr@~-FD)B>Ft>u$9kJyoY*0RVXH!cb*E$Zh|+hel{u|>EL^*rz6=e) z!HI?-y;y>MYJZjG_A**QUx^?xJdkHJhX*yP-%dqvP-Awe_txeQI@aef=hK5ZjSuj= zTzVDl=6UnZ%dMaHSfF-J?|upX-}HFU=e9GJ2F5_&pdYl-Q|YI{;MQ#)tax=JfAsw| z!H`*p3G{@xnSYbhPp$u~xOxKKzjA~KAB(BPim(EreXWrp6ZM_5QWH$^BJmb{Bu;Hg(kG!^!3osiUHG{Jt+nDUeWBL0z!NHB& z@DWVKKT^oP#rfQGp`1qbxLABfc6~E;u=DBpx0wGcVW0ky{%l~UEva63S5YGgTkYI% zA$?l0vHtwwI(oDxgFH*UtaoA){s-1kI;RhR)TdpxyLN1@9rT`}=fMn(^*U^+yO7}| z_Ubsf0*6LdB-I1wMdHm@qfZ{;&%&@NAPlV_1 zerf{!%p$DKt97HOPu(;Ee-JezvXM>YwaZ5SEBf=O4nZ5b;F(*$<;X^>Jsv|XYSmoO zy`?^Cx+`k_+uws{NW6kMY0WC>lTk?zo~a*fdzF5>rdNjk-iQxyKeE#_g8H+eNL0^m znjT0NZu(JO2V-ADtpNEy3Ko1bkzhWf7?*zz+GF+Pn?=vF$o;IvfJ>dMFm~rjuH88+ zoY$znt!4Nr|3uws+gnF<0VK<^(JHq8C+y*V)-p1)k2yD=<=i|KDr$6lDtmo(@FVDZ zzuVUFP%HZEj&KouUyIE8O7@i}(ad+ML%H!C*sRr?W!5V-b^h_b#axw>-bC$#=8+Z2 zRm|=8`#PST%lxv@uMu=BGHd$NJ*su#JoP`1wdb>{5!OO(%yRmnD2GD5QqJIaIuH9y zA5VVIdhKVOZe`yV^SLZ^L}#1*3f7q3Bs0{H&ObKELssvasLqV!VI1-h;XTXI5pVK5 z$&sF^@3*p6+4xN6jKfbt9!=2@{1qMBUR{h{Gv|-Lzsf%A^AW~Sia%%vxfId^z4+bZ z)GA(lj&H5`jMjQ3YrK&4ZNe8<%K8oG9g#&HiEXUKv~Vu+(Jy{8vbJj@I+uMN5F8L) z5gcITv9&*WL%#E`{1@pU`2mahzI;W$|APM*nw$&0&wk32Z%QF`mJ zR#Q@I^;+Fo*6)G*)dbV>=bp#B*e=eA-o@v1&PDdhS&|a)mm#$ z%h=PyoVQ1EDr(i!c^mqE81ke(x{DWljXf=lcivO2?_ti3`s|d+2jZ*YT&y?DZ>$I5~^#)oqix<|c_ z+v&-paqngh+S^CSr6~#ziMoAx3genzD}CYHknha;0q!}wkG+47K0Riyqw3#WLhtEo z5@GgScbjBOc{GdJbLpRV*tf5-9@{t*x?4v7%I!%kU$Xu`!njN6D?Img^&=e;UkE0t zLf@Et8uUx5FAslLU!F2_Svh|%x4n71|8M0z>;LWe*#7fikAe7c0-^D5n%+8HVEa+R zC%qY4P_Y9ihV3uelEU_>d+ftv`&Q=DgYA2;{a+H>&wkRy_Gcq&9~ImG_Wmxgeg65T z$EL;hgL#jzeII^)WNd!{7+xINV*3*Q-Gl9qkL}4pKMuCv;^%jdbrahQQ@GeZ_h{ID z?eVbv&%gv0+qZ!2f97HP%f^tqJ}IEuLq}ly7IMR5+=+^3>n_e0zFfk++1=hW9}4R) zpyt$5mpUfSjr+TW_$Ad5%fR@o**5TICybw3r%y1(Pt73%#h}5*UBm zpH3L#r^fp+#Q3SPeOxg9cUj+KVf@u#{3gzr!+DlnP<1b4i&=pkMBLNE`l)&OvHp-Q zuzoC!^;6&ZNLb%Hn>C+J%i-S;>zg@!Qm}r6z3&dzUj?R@O`$W^Z~5g%$NJAN>K4}D z(haP?#gFxiuwYR4b)>yC%!!(-r|DVZDsSNkw_{QqV?*XJJFjxRgoWNi7e zmo3f0zQo#Q^9OtV)&Mr_pwE_ngKYWOZXXDSYsY}Mc4K?LoS$oRTzmUo?Ct8`9N-Qt z3kIT_7mP`K|DMw4mSLN&{ryIN}zc4SQMSsua#O`OVky8R6V%{Oou|Hta#zLO+8|G!NV8U~Ht=ukS3Nu2XO7 zI`zVCQjj>zT+MSW_v~!TX8$wrE_O_VcbQv1)p(K}8yoby*b!vMzR_>P1{*bvM1HVg z*MLK0!(NT;J1-bme-7)TJy1Wy8f+Pg-!{MNlWEIdC|h=+Ys=35`I5T0Y+Behs(xNm zcXlw+sCf5z*s^688_PatmG!HSV~hAUnCEJK{}yqX=dvb;I4^e+A3Z0`n6FabUSdRF zeyCfv>(cOG`tx0rkgYX>T&iF2;`I;IT}OPbY}wehx2k4LGjgSSMzw{W$1br78+ILk zDvq>cXUYNfbX`04)$Gw#?9~kRY&v^)C3fs9sK4Rau~~e{jZXPv z$3yR9uay0}TJ~>h8XcZ~k)!z^y7Xd4>#n`mx!Fy=gJi39Ci{mSwmCc?7H4f*@MD$+ zbK>zMKCx92BDp_}a*mZ77yj#JUmKW2@zma=cFj*fVz?1h!Y>1&_mpHv_5@qE4s zncKy^7T-7_WgD)>HmtGfESvF6C8wgm*ojl;OW1KgdQ0QPr&%aHw2ytw1vCB-eNcmK z{znUL1BVtIkzcTl9`>@O%m3!zk23aSRPV)6?2}s~$abE5H^|AG)IpN}Ol!25=Waum zG@rSiExnn!Mo!7Xo=$xzY^6uq)4z#5y?{GBFMb2|bm=46(;aN7uhJ9qv*@vr{Qgm7 zM0KcsL0wPz5Ns{2pl472FXq{bJ-mUn(^;8&K)pnAWeZY;ETE zvyFa;#B#BTpR04X;EqJ|%s}!-)QrkPpO^E0qtQ+JT-Ev-`p*}{TiHv`E^g-66;|G0 z?BYWj+vqcI?c!s3*5-3LJIHf}O&puSO!o9JXG(VQH$y{=U0h?4P5f5wsyjJ@dbj4G zdA^QaJc6H2cIx5e57;v&oA`h5|FVfc&N|B`ei8d-_hvL>xAyQqvv0b8RZlMm9BlEi z{?E0AXT>WUJ7%ahmh{2(#vYFEQ+?{uJ4dsJXLquP%b&Yz zjGU6o4~vn@?&-nEe+i8IUn?>(^3RbQ4(LcPj#F#3ONt#oGaq(auE*@;K z?oP0uul=`##jGN*tj5tjUV_MV&h$1VBgq4~2_4v(g!oN!gJl zlfuTUd$92_vGHB(o86nEVdJSs!NyBY5;iWsTbQF4cDY<^JN>co48Ybi5Sz~+>~f#M zF4wD8b(>XdIq&NgP62-AqJO-|$r^RBGi2CA=N@cHqwlLotXecA5r#i(h?d& zYZDdWti}n!l1BL}t43zm|3`mflfqrsKo=iVyy5Rk0_!&LXIQx6g9tIq6(u|yPBJ@K z5^pWed{5Rr_io6(|K1Ik!G-i)z4J@Fa~s@i^ON5*b|Djk$-SIaGjYRyY=h5WCrS`E zxdH5T5L~r;T*zq~5lFU|1d{8xyJUaKs=0T=A$aSZisj?EdUtN2a!#o;LhTXRqQ-7o zQYTycp{apn6b`OCojN4Z<}=}tnlE(^n$HR(_ha8u4d9=JuT7j89uSR$sk;r|FAWcf z?;bbN*$|1TxRzk5`Tty$i^@mt~SQlz5;tIpIlOxwe;Q%<-M0gu~lqU z>!SxAsW?pTss6%l&a7DR7kZ{7Gt z#iC3Yp*Yi6mg#0F%GwcPx<;`)0Bus=^tb~cBy8mBNu)nI!u zHI5SU7Sub`jkVOeM;v|kI@KHhK(ey=bA|P>@Ey*@*vMp)dKpA=WGKBh#f&Mv zE_NL{kFEJE_j~`3nM-kONByOa#^&Z>22dIU#mMGyTfl8uWL8H5*=~{y5!5~l*`dAU&3yD8Fu4d zCjR+&I_I#bbMpV8!pTGD6rgjmukVJ=;atWzd(u0Vp5BpfNP1_AbW&eV!9@VYT z+0#6#^?r1Z-q*svImam-bQU^j8)qwy&Y9}zpsG+#R64xst|D_b+a|gCNA^$YpE<-2 z9zg$W3!#6Wo#3=hsz}a9R$dEz%=Ayr@%7Ih^sCW9=yBH$$>(O~!&7K}h=_6z& zt&hY#+;JJ5|b%Sym^t7(L|bq@~BvN$N*%GIbJm6sHL}>8zJ-K6~J8 zgmBPk;hd4Zould_&a`?Lc1I^&A1rQ^zXyL(!Ud9_-0pFh-XPB2@7@BN%A?T zbyC0B*LZhVSZGQ$_cperlwLyrT1@mjdT95vA*UU^@}a>nSUI;^TtWkJ8kfGy4Ht@wu_7LOzEGcU0|Ya=^u2b z(LWw1ThBfHKDs6W{}LyA9-Jgjwhx`u4qkfNkBc}Xeq1#FD7fexaWml}bdJ?Qo27%m zNmd6PK!1yu?Ln@Di(W-$-bZ#zg^S>22hhL5McF=F^w-isN9ANj*Sh_##L32bIw&6< zWH_005WJ0iWTS`f_2VHhkBf)gzBtm+yTCw+5^{oA*Ei8aZ(1zGn)tC$8$7DP(@WAx zn~$ZFJS>DBGFYf8RNSRLvRG)phlRBM89HfI7di?5cmd7cf{hl9L#=XyFx&r0be z*7q-ig~D5#M?~fyz9kFpM(oDy9JpLRI9)Dwq5jx~!nKy8<+(Ca8L|GG>y(ccDVs=4ULiL4eELmY;@EzEwujsw zLPgOwaPfhiy&TQykBnPBvBO}5gK)q&v1XIW53LG!%_~d|P44$9&RvYPyAC{2WyV}s z|FGc{dbes}N_K}Jf3ED!;dzlE8k=Hg^m}X=r!;)YwH*}25~C}Us<(L%9;8~3Hm~{( zVnb$!^Wryih7{9wfbX?)Zdw^<9`>1H*3_M|;&*tbH|KYGkDar9x5KkEX5k4pht=)n zmKq0Zpm)E=cO?^7!DUqcaw)c_&7u58Jx4llJon6Xi`zEeF`a)PH$yb-2ETJM6UF?C2q6$ep)*Ij#-t5c|`_dMb{n z9s5!eeYB4~m)TWyj3 zPHQF^Sxwy2KE8WRI461tTgZO?%*Hlaj_f=keQVANb}Wqxxj78JFnt%<|0K__@B0nU zUCte(J(3JJV+RqZ76yEZH8On!d7ot7m8Bur>BR91&3fAB{FeBaQn>LNWI}751%KBw ztxR3*$&})!#xS?nIMY`nQ*UalvTK&O@7+22634~y?%3^KM!fa(kI(ivOpnd_<=nN@ zn5r35oVl+d-l>W{^LkEKW6~PF?i<%18JEk2zl1Ko44pm=-98l^Ux|Hgiqkw|^8Aih z!k&%puX+chbn)Ni9Wdta@D4C@?zsaput3ipaQr*K$W6~3@Nu~V4q5;3T6n-eV;coK zM;hP(@bVBn5X1w<8Xk~cf8GF>2Rz-02P|}XK*k*)|L%4;op5niJm5XBg|&anj@_9L zTn+BehYN@kJPjY%=<$Isdwk&4vcTCJ;R8#{0;YzDAM1$=?7AuJ?B5e`_JP4n&4?W8 zS%9_V(>(y5coRIazpro~bvVEVhrkYFMiNht-!Yrd2y;#sKDhth4J+{Tj@$G=iVKu` zTtNO`oqzdY#ls`#O-$YwECinjljEm>Q|*jSaNfa&@hp!RffpPN2VUj*K%2oqviqG27pP+FW%y#hmEr~Y;C}UzkpHv5U^#;U zPvO48mao0Zo8p%^dUpYKPUS_aCQ7#I6v{Go>UbIsM_s%t!~r&^Qk$mJoPB^zKiJmMPm z?ss4-eLl{ds=?+_I6(VN6P!cv1^M*ki<56ze(o%|gnV=nJ}>#ucjnPSTxe{^l8>TbNp#igjGs#nDo7*F$eFL7%xdGd1; z{97C+6OR{C@5Pm!=lH$hW7NoBqL>GIC+JyLZm#m=M!a8G-12^5ammoGTPIPsD-usI z_N9B+`;`|tdDu)!G;Z*KWcfyO9(CtoBS7z2{s4bkEB+)o5--!(H=~#CH=jTHn4jTH z=*+E9PC>lG$hw}<)tFm-WB$`g<_{A5@7_ua?k?sx)RpHy6`c*o7~4sg{_ct>Z> zaVMNZdA3L49BIAXt$U;7n4IHsk8_y!`8dZEk8?EPgAnIfgAYP6B5;oM`*m;J5_0z6 z>iQsT4A(*M&~>sggI70(isP?;P8_4SOOA1l$1!A&ET{IQv3GaIiobdX31(ESs}@dZU7Qjm==5uI$Yg zAGSO-(ZmMKGX95R@L@s9AK{M0j}gIw;PNI9lWhl6x)>1-Vtf$7K=>Y58-o)K*GS_; zagAoMqjEgu%WDGb2$#Y`qr1T48{rx)j9nPGnP-@qp{&F2sH14`B5UN13C@sO8@)?D z3U_Vhr10X!CSL2SV6MLSAEYakXKcs$1UT`}yvO<{HhMgx9gOSpjQmE;vklB|@t5CM zA+E8MIaq(i@4;aDe4IInXT;$d%{;$_c^H1dJS@L>mCqS|!DlVMm=C{DT(IRAXM6l& zg^`)%DVb>lTW4@n)+CO6++k{5D#rxbfpetR2Q2O4X?J}jGp+24a$Ej@%&13p6MHgG zyn?Yt;JH)6UCWG(&uzXjzy2`KeGZw?KDoSNu*WOhJQtUr%0H49OH%Vy>pYp~3d?VY zSGn=HV0_69nBL_U@Kfad&+Mo5TWM^q?CtF)-_`B=*3P?y?el|0(dL^bIl}OVcZy#W z8-DR1JSQzL-gvCMu%CW;(O$Ux!j%`*kPtt;346Z!*}W|u;>osOW)z<%KZ4#Zeo=6f1im$KOW-COx8(XO+_)v#;Exiww9IdFa^sfNAM_~x3hr8A zlg@sM3*ZKNhkO-^Te5zNu^wlb^EsC@i~;wn);XLZ^Hx7UqiCHpv{QC>tc%=qn|Ei9H{cL=S;+2F? z6t5)wQ)O(*h4uLchjflva^rvI2=^&w2`qPH%u;@AKN!gE5u6#Xw2@e3#VZK|c7!7F z1H5lOI90hDs$Kr7VipGklEx-$=B}71Vwb%6t5>SEBik`3eiFPI)f`gsOJK>jkq;N= zDXv5IXT>kM`C&7`p5h0JUsCReo@H^KpA!f>Sw0|}tb7dGzsrbIF=J(qGUNn*LmT6W zU{lsJ#R(pPQ`+xozKU6z8y*TTOUs31VFlQ6?50Ny#uNt4@K+Njy7Q@vozrrM8r~z> zx3+27v(s@(1u-iF%B?%hK8Ob_Cf?|Ha-f*8s?cDwj*6k7CY13l$iE{l>Dp#&oRZsT z+dp?>7iaE@SJE0B5wA2toS-B%KFjZZhh2X~xG27h`aCO$DN}rsa`_T(Omc3fkMm6W zh3sc8lJOX>k~kqyr`jch2!)F0Po*TBlij;Ahixuc0o8rmpak?Ch}bLe;krz!^R;=HiY$eu7Y#?E4kUe^|Je9#v+`L>^okJl#6c^YiHx5(z0sD zvVs_-)^>0{F;RXw^^awY^1-~ADAj6mV~;LFA5TLsPenghqNk@2do-EYqf6&^5LfzA zc=nt8?9t<#Rlgj|?|jnr_}IT=J#OaSqsL7Qa*rO*)Z<1rdi1#LO53>eKeQfy<@hnA zsxNUEJHGk_M!xLkRvJ7G-pU(Xk-Udkp;5#TT@Z{kF7F?SPVW>uq#S~VPWjtuEM9z+ z*df>F-x-TP2<9%K|6m@y=5(*Oao@KPp93Bx|ejND2MCxf@ZSf>-)u?wtxdAMJr?6>9Mu&Lqf z=seDNrY!JnSAK};UcA#oa35iv=FgR+@(FA3r(=*hw!rYR~)#Vy;c39gZ$qCem|Pnu))Y+LuhcL{x-uIqbDn3znkcXimMYwngGAusk|YHzW(Um%~C`%ae=%75VI)BD#TGH4=sh6VKp z;c7M)<-IU{zSvjSw_y348{5>zc>m1V);xAG?r(+%!|4Xcr@q7mab6C&g&DF~FBg943oJEaK<|UjDM~F(Mi#@x&WML=o zvGSZTNAQ0QJWG1X&e5HpPrjL#zH8-Y7jwCtcUBXN^-IkS8A%|&rNN?jD?X+M)?D(_ zgwB?4wt@L57p#eMy^j5}`5BUbD`WC`E|$*`jn<~%B$P%0^ zi_fX{>aA9unB&`KuJRGO@}y^KA6;G}Kcn3<%}IV5_Q9K*^#^H>R>7srcg2r5gL;>n zN9@)f&Ug;;VAtl?DXw#J@+^ApNOVfi9och7_S}&vopw_0$cuaK$eug$({V>8;gJX7 zk@BNNs8y)^-H7_&!6V5}r-$W`g!trR9-llHKKTf-VO{xlGO&?XgRnWQd=%vo9L2xm zVGTd097SIGRdB~5IHljW(^U?N>c9BkYx(5C_xd>d`;wE6pDF<_6K@ljycJum{1ykn z9ZShOiQ!YR_^1qgR8B4SX87cI_*)ykm=^flJnCF17oqBI@({pb;+f)4{dlJT-HMeJ z)^C|L(UHw@HgV$W-K!ki%kovfPlDD+LGcc==(2%U1G^&3A|Eh5J=o&+sX5 zuEm~zWURth!pDcf`SJxM8He!w9I*BAFzh`%lBgJmH*wB7Y{Q_oy;j6=gO)Q!8Zj|x*xQKb~ z(YT1g=T}Tj))8}GLmssB&BG}k0={PK?l=p;p~Ba0{(Z)H^Cz0|PVtS`a1b!KpX1rw zeT`Y%Q@lg|iv!pq5+lIo?2+U}`OFP>m((k6Y!tN_)F&QHp30dPCYKDT&Ox@1)1~+* zb8%}2$bKr0P!!w1db*s{<;#BDEI*i=laR;f73*a2H+e;j!Qy7~9p<{7XX>0~e2%%A zydvhI&s&_X?}#tJNy#rNj6cA9uK<6$xmnrqHGl3%%RK8KnLh|$mCsA|3M=bv)Ri=G zmVMngOUb%Ev*sP;^2k>!x9#jJ*JDjd>VCA9;`$KB}>|t6ayxcMIFveCDE>6n> zYn zf8y?qt9tH@o_nL`-mqBW<8*H*j;!7C4f(CW=f-b^t<&$fI{zqst6{F+Dy2Kqbx+(p z&#rUckMvuKW6XtP6nlOv;ZxPU`l|I=o$ketolgBFmt&;fY43_>;YQ*Zs(Ez~zLbD3 zSf0@a?y_7XxBN(+A&f7sVR#1kK|EvXkiZPZZ}~Y!uFEk}?|v4%D9kUOA)F_q`uP<`Om^$g1JVRJOcEDzE+;IG5JH+vnBdd48jodnHHb>TtsWS0r9^X(rzv@B8 zz+RQ)It#~(|7cy5BQliuwmIa?PYHJIL%K%1gSvFzG+2IURNO(lqYB<}FlB$1AHc=H z85kbiQ^Pn_>*#)c7EGr+c8h()nf&&3JH|X}Wmc0nKMj1Qy;Y9-LB6|>@g4>T9{^9w zZy=vR12JRA9Q%}s_-e?T0rNF82XT=!Ut%oGM{z-2_(M|r76G46sUC_yq$DO^gZyB` zuMxYX*vHQE$%D_h_62c0JEuZ$qT<|y)wLe-nKTU`zlc~n#fFv2H>SK6lV@K*&A@{C z#o{UK_cQFB>I=SVaR0;&R*ovW@|(dGA9{}Q>v=vlvp0r+_<0I` zBCxl`rRFr=I7L}$mp4oz^qQfvkC+3e#S zu0p2d)6sb`wX8W`2hjzK;X+4?*)yDl^U=(@Phs8PX5Hm$WD zfv>o66MymhC-u(1!TpnZx78beqx)y=U+Mly%~v|^1n(a+#*=#gT=t>wpY%LF+WlkZ z^bzl$wSOn~&)S~*C)2+AcXR)Qdl7$~MIKEy@!2`VY4;;uJD0d^YX9Cj!1>^naP2;i z|CqSF>wR&1@F4M@O`e~H9(;}JyHbR&<-ty|d#d{dMoyn?dk6K^x*sie&$V;<{VZ*K zela}gdvKlzJgABHy+JOGiQ(gSisAdN$A`>kKOZ<-ad?+~KJ)$dKH3L|YadTsTbLRH za2my>D1XTx!>9P2%T8ka-lF5g@6l^(2AFK?JYx6MtJ@#9$KR`*E4bx zN5<~S7G52aE&M?jYYy*s(^`Z3iCd_M=kv&zX=&y*ybL-=3ucMT5%C{a*o$%N9dvSf4R)pl@na{49C zJh+6hJ;1~9?V0m_G=G`N{l9i%QatL8P%p!+>r(v9#_bJ(qY7`Qcy?Nc5a*ReUlO~Y z@4@HYxGx`{W4~Rl?XI=zzRQ=|4-TQ6B^#r+51yge!z5flG7?2rjje#OIiX0jnYD>f z$3pTdyTS41X603WoN@BcdNONc_Nu&?J)8ID`gc@+FC%U*Jy$SYGd2ak{BGwg`FXgN zVat~+o)h<$F05uxbE!L~cs+4F;}2wBP0X=@Ir?Ju*7{@iBuBClEjB#dizDS(V~G8m z%X#cd-o>93N15h;;!v7jns4cOUB~Ldu^6B60kZF!kLB0$B{#Bn=DS)G)>ZH7ZDg}^ ztX?LU-;&~Gmvg^;iF@uc?z?H+dsE3vsw6LI%KQ#^xXDZExflOl?!{$_XYaWeKlS(G zGHd_%7~G3yEXTYT|JMoMi>diZr~VD^#beIxB;AYYx&C$U#btl@_u?e+#ccRu4)VrB!V%lUip5jx(U7C$cNJhG?oT}60As`5176q-&hT@_QSikB;Flr93qA-}91n(& z-)t}cr+zU_rv%O(4@X>cO2BX^Fm!qh1Mfa{A3Wy(T=!Y}OeNn6IJ?L9b!^SHU=Ck5ugpYH`xiWn=;bU=lW|jKjf+ghxP)`rrbMsmHq3Pe^ zW$MSco){q2kyFpjrC@wvE#>B4K);#Qa1C*Q7+gqwhBt}1d01;2`GRkJ?In>+Z&36A1Q;Hu_MZj@hwHJB5|t^gj4@SG;_s{E9%!Y_`N z|8=bwGx*KB3+q=tb%#S7mEm6UPgI$hs*?Jd1~1BwmByonhiw8=@-BVH@G!8kKBJn( zZY>AZJt&N6pZ?6x_rUj>!&&f(ZSV^DCM-8=dEpZ0YSu|>yp6p{BFB5+7V?292S|Rs zRm6y?o?GWQOReE%e0N&6=lT9y_+nhYn|z-A%^2QgTzYqE++g9&jQgvWLm@Nial1J( zKVjUq2F332mpXQ=_+%Tm;aiC@evNlF{-IUW3sxNx&DHLSdeCV6``RWt8dqgF({F2h zw4tG9+?nHb_u7mVoxM3UBq|$#TQ7t2oiSF`Ado-NCI3onC8| zS`3Pv5{^_d(&{9=MMZ1&0a_hFMSWg~KJ$ziZROgI(;xQ%iq<5DA94wB0(kShKWpuE zvJ(OXt8;57fABgXIeYK5*ZQvY{d~SZIgc*mT$%x%pAMd{UeGz&$MgN?R{yzmvUBTy z_McnQCx6zt#T@pZTOZ@O)rY+L!{Duhs*k&Vl9zpcl^5MkPY(3tW8Hdk6gB>mxQdzl zPS_|+4cgF%8%`U6{^^Eyz+CEmzMcA-$IE}{K>xhYKh@w@UygRa7Uv%H(Qfp(dUPna zwI>GK3WiY5MqZ%y)f+wrn_6sRmvV?Fk{vrZCTpv|`?Lekg}hzhjh_?4bQnCQ{=*K3 zC33sRO1w$0;b5%9MZ#gqZwJf8=zFgIm;X%fG1J=S)w?rOvt9rd*LsX6gScsaQrpKkj zLFxghz82EsvXvj@=XT!ZLGotRXZ!$hT3O&Ajb#+Nwg#J7Z#nM{^K7}g$kf`M4Q^l` zGPcsjs{&X@Jv?l02I3ibrm>Iz>%SKXu%%E%#y*U#0IqL=1_; z4mM}kVh17n`1M z8k_Q7wC;zo!)w0f2Z@J^Q|oKSOAJ~$^`NZ3!`6V?306_h6xHu_d*NZfPJ+Wb_W_*(9FYcX*z2jefoN-7x zjo*zbvOR)#6MHPY5gd~{Hx7a~4m}eg4~n`S;EkSG)U1 zx(=*ybY!|$Lu{mQfbfR=@Vn4O`wKbi!5NB+-b75aYFr#ezbf}iHlKJP2eTW-0JoT0 zD_gc>G_8;bnY&gX7)o-hUaw7cy@yuJjxk6nXcO?KH&gd@tR<8Lqp*M1jV(0aMW6giA{ zcz9UM>cvSLgYlh4FEa;q*X1=zw@+e>{|r`g7{cmj`H=%Q2*&5)JcD)!$;J-mK8PoT zK{Qs$ptH9~H@le6wZ6^A;)#;Pf_*k7z^v`INuHbYI6b=@Ry1R#7TYw&oLV=YVaJ+p z>eC$cW!+(jX~a^WbgamX#%i$yJ*OD6!xGAQTooxXSmJL}SmNWU(b#_`SpM-Bb0++z z|4fK72mNOPXYMC)CiEdbLi|GY{^Sc9j4vp%>tfGggz?4?RC-)LP*5KWBOEa{3HgBv zjUOn9}*en`XkP)8+s>R2|pFp;x_pl3SZRCGbk9Fn$wEg}PZzc0AziKlvSIW)u`2Hy5 zBjxj%J;0OCXDT?Q?j-gP`H{X&4w33FDBjNbd}OP7miwv~k!rBjqStQRb}6iPiPx4 zdr6+VGgtnU2-vnX-Yj1b?|+Ohi22;|H+DXMOWxOuyvMQF#;*zB#$PeX^oLCOSCE@7 zOk?xbl&|G5jc~`F3zhSO%pptGA5@rF2DVACPTDK;eSeT-vjZDRlAiNzX+8WuU>Y~p zW5;RN->tWvrMO4=f^7U2`@7pX{W^5)1G}e>86))!h9!0}#&Ib;^YO%X{h4Q1*SXPu zc73|euJ`)Su8;NXVm{q@(0_JyJ6}KA+0_SskbFV%vy_bTeL?s&l&7)7&(j!-Zmq@_ zq#ObD&w8epIGk?ShcnFDc5UylG_ck&*bAMfGdhU3_Vc?YFxTIZkD+JfldoZ(=lg&3 z_ckzw`nM|%NB^HY+RtGp|6F#NhrkBvLD(X_R5xj(aG3Jf4}-r+Yf2S@KQw^b1+rOqy4Vf9L=@Edj<$47Shr?k-B%YvdQw7^TAxqW?^; z1V0;$Hip=Dp1BFkmy;8kmB1bre2(lk#pqSp@=t$trOBT#zNa5V{Q9`IXSUwqUH)k< zThPz@!5^~Sw1a=t(?oU~^;BtPK6>*5eZ!A^5q7FMGrw_?Fc9sumkoKeXk2U*!c9rUOnuIeg=A2>!f}t>LXr>Z)Lfkn;V?B zWogIyR20Wuj(y@r6T9O4D#9riGw8l1M*@7L@1<3qwII7uaCr;ZJxa~-eB^c&_-;4f z7q+u`I<{8A+(>pzIS)&bxjLT@9WQRx_5ajbo=47u-et>|?R(UidT6DMFTe>Lb`|D! z_T1py8%%ij@WwCb{qYOh`e63Gbd8lgD;LHN%zUXQ;i|MeQ=eG<9*>SxJwVA}G+q`i zOq`8P?jZKG{i`qW^v<&VnRqhhZfb^A}> zmz=9En&fX6oI`cdY!8-?$5;4Qrc+E`+ZN5+N7pIl9{oC{G|=0(U#IkEKv9QcdZDQ12?A)S(%`@bNal3JU;YMs)Dc-w=%AGrvwG8{j0#v|nDu1@oUIpyll zIN>?pR}jpPg*09Uk6w*F(qMv0@i{xC@}%5v&; zso$Kz6~s@CrQTH;e!9b8%mqV2v&eA9@wS{+9peFI!&vT}cgf2}`M<7*%etBf8v!bnH? zdvFbfTh&M2<#cO4lk+Eg?p?(B+QDGePE zK`i$;@9OZJ!6mGNYO&3s9@zcxmKb#HHz{crP(PqHWU&p| zFufitH73i&HFvHz(B7Swc=Y%%*Gm45^1a0S>LJtB=R;Up{7JRKb|8B@kauD0j9O$J zdDGnf5g&;#v}(yYKe>2ba1Enj>dqjG88?+S-pe(uVD?YhgZqjCSPC;PzG3!33H_xMBO`vHenrFKPo?oAzP+H^JjkwocYb61_jlTw z*XDf`--(}t-t)Tl=mS&eUu0t|y{oLM=4@Nvj`&9c`?`64=>OSjT6h9e=&CvX7k1$p@USp?V z&xk8Yw*JI5j?-#P#x}s-)N|Y(Q_oJziS0XETZih2*)`X7Dc%$*Ol0%Evr@e2m5FR9cS`R`*qx@ zfp6j^WUg9&JRnGVR zf!-mo?g2Pl6ih51fcnLY$L%GSqccoA3pn#l!{de-9@iPpNSwF%fd=*1+zWnMK%K5F zX}QFsggRP@=M|3=zbU5%R_pI0p0K^*fW_lp0^g_}$)U-byL|hs@AE8Ser)x6mhO8H zypxlw_@dkt9~lphE&)f^lFK{Guivy6TlQ+r<05j9z|fBwe?%eJh#u$QDaWa{F+WGZ zX4t(~$bO_AhSR~a@^h(&srYpp*xig79DEQ=8RcEP6LCCh#3p&pujR7>ztw@yZWJ!( z+ASs*yRh+}4Yt*I;CS5Mji<@yct2z8>Tl5wwpJb4Hs-~~wWvpr_#XLo2Gd$@xR&|T z*ndp?a2fH`;(EH5=2!EQ35o%ONU>lMT$**Tj#e>{SeFVEN1;N z^X2M98XWqA$sQaw_MVw%VO(eLmrq7LBdvexHO3_TskmH?OF7N=`8@9f<~0pj_+Zl| zo?_O;^Rm)1O^vq1=2*k7$QOI?ydkDHY&V`a@z~z5j_2LQ-)bX65^LacT|5uoM~>`W zsximknp^1Uj%>9Jva*_acV=T7vLyLGiky6a49jokWc69*UGgg1!x83^ULMUFzwV{) z&!<028-6FpoyGZNZ&069@$PTGQfF)qn%|mQ{I2lIKQf1Z%(c8v@~U}PtuZGf|IFM^ zpytG=2(s9LtrPjFQGXWXvpj5Lz>XmEs@bOABGd5o{Kl+{<$*Rv-ku-k9_Yn(_Nc9` z733d&4d5dWP{Q$CTrFC^eUo^TK$~y6z%3%tZC(r441&%`|Zr{kpDS z*ZpVdx)1wxU0-$GLh_UyM(EddeXob}E*L5mN7xImJ0_kGd=!i&6z@}w3Dtg{+ap&z z#p}Ry$E!2(KKP&~ubT_6<6pjF3ET8Mc%7aj+q<~k4Zg3AK8IsfH)~x*XjV3u?0Y<4 zJ@}RiBh*c@afEQX7vOj8@I&!BQ^OWKB3^etJnH~FYg1ZwVtKi0No1#Rt9ovV$Ccw} zQ7-2y;s$rY4YTmoDP~dcR!4ssaXj(510%tv+~4@*z)p%2+%>tXV-=250T&K)~QnVcyOv}W*uOTv~JqF2TfdIUSgfAyUbj?4!3liLR`u5 zHb3q$c5iB|1p$0&xuWhX&bDLZC7y8R_sw_pOyOw9+iZ<6$J>ky`8abMYowYIb>K|- zIVzd^X6oW<-4pm_BvWlSPxf+IfA?;>Uf5Z+w1P6B+>bfPgyut-STeGloQvunvT=|Y zRq-at$L+p+7|ul8n)vbjS3GxZmy4sGc;HWn*{vnyzuatc;AjEClhZ-t-N<8@|hWHVGb2ulIqBL9@}K#+ zM%+Kew~$56#f*=cZ}sb-ejU`WgPOkT>ZbR$_db<6D5qZs_3NNxYm5D->!2kAwjRAT zo0_3H)D9g?4bfa`iBiY-uA$zC)DBh8Xw|yGuVwmv=0(w|)VpZ`+n1bG)iG=c|0lma z91g9o2vN^9Qj)9~05^Fu;{7ocT7TVrRY}>`H}U%f*i^s2v9ohSDR?Jsd#L^!T%%l} z*V01kL*cHvd{t?^ai(jN*MlYQp&xh6$dbmJ!K#NN@Br@B(MHl!n$HED5cD znVNsm5!au#xFj@d75%j@<1>iWrN=YBfA7Z2;4NO{+T=Htql{l`%M(9rDF1NcgXP4A zssE?A(*ZcoGw>$W#d$VTlsq~p)UoHeu-87Nt47Wdu#fERM+d=kc%JIhuh9=Chh()IA1!$ClM*HkL0M&d(XtIL=ADfsaJ+y&xj zsr&bzG5<+t%-{8& zF(*A^QgifIa>l$W9FxP@Gng|dm$PUHXA&6ZuFrTMat<8=$H-19d#=GU=#3PXLC+2c z%S6F4P3ZL~F*`SaUB-f4q`UIbUE!BQ_2+?A+MXQX-OJyuxuwc$3WYY1WSB4AeFUAc zv


Sqzif|ElBjGBs&$AnrS3=5T{wj-CL&2)i_43wsUxqEF!?VVEkgOKqemkz+AT z>i)s=<(E9?82myUfx$1>q@Dq{9K0>!9Rbtq8K35@N6&6tQfRQ6?7zwBRUO2v$Bv-S z^{jQ^mwADBb=TJ+Io;xyT;`xAtte(}zVytNeb)7K_{G}`y;~!>rUtHhI&30#XV3Ua zo_ac{mQS1*i_-54!;~g0ekna6ez`7yUyd03^7|9wmw5)ily<`}S?IiN=~+I0A!ps; zm)tJ=GM(p)^YM%7B`1Vmh{@ksF~u{nM8p0T#oT49)-7kfv+ zFSmhRRL@xb%w>yiy1TTo1-y8Wd7etVL=Aelw>Sx7>#LcB+)Y(I8-rI&kG6nKSAE6W z!74lXr<~^}mME9P+B@g^I3YgLVi)PREoTll*u~W)l5K1ClG*fRS1f|?f{n9~eezji zCe&|0F+?w{zOxIzfJKF0Hokp_mqXk{l04(#>;;n#{J^E2@Q7liy5>fC^LAd`T)44S z3BzDl)HT-r42D5AG_Ee*Z^tAIQx+*m82l0`Xx2R(e)(wFyH{#<_7h=f_vU zE|+zvR=#YuMZnF&70!ag(X^A~eYmn1X5G#+>+13OFhj7gq)Al(CY zISX8~3*WJ@O(&n?;%9N~F65W9*Tnx64)jlY&rZ~9>yOD zcE}spy`REjber-BRcD3qlFNy&cuke3pRd|n*DyPr-z;uFomy|j=+s?&_XT{*Yrr>k zU(RZ5z`;mX|^XO?; z2hqhPP8bxbmmgI*tjyt-}_eXUVRz z{6qOVb5b@U<7Wq7jSlC;6u+tdQs*$A+nB!!Y(}kMpEhuouvZ)NxP`b?@paQ@0nVPX zTjh~E%l*qg^p>?^v+|x^*5HBlJm%a3K25!^b>_M-e4IUt#yIFdnf^|BJ{#ms=JGP;&VLtXUt#`%NSVLucX5yMhW2W|x zVi_ybixbt%o%xyiI32PxNTz>;?I^Dcdwrd8?FFZ5T+PH4$oHL5%UHoZZOrK$^($-VXW>yST>-_?MlRGX*u$yFt7msf2p z4`*ghOADPR?D9SGST{tjNj^4sICY+Ln#aqhjC_1Prx1*%cx$i{&yambpFz1&jYwVZ zuG4*@_-S=7*=R1}9*1~ecOREO{4;QsWJ_b4d*;AK$>0;@2aIIw+rdmW=2N|6OTbXW z3%xoMD^k>W#9(zj zN9$+j)14UqomXXmVGJ!D~-u{l~kc+QA=mFVPbboy zH~YGiPwCEK=+3NJ7kfr;qAQKw#3ry7U7C$eU@hljN`G=T8U4wp^k)K}fczf`>CiFg zP|nXdy4UGX+4Y_N?Br*sKRxCx10AYQ*_K^jqU)tcD=s#AGpIvnbB28#9a>4uy>w^^ zYnwo>kECUpK6`cNWH$Z>@>a+HFXCLDnbMynv1R1`SB4AGpN)OepVy*4=R}GPM-I-Z z8*7pFIqRXiE=MZ6bZCNeUNsh^*RqTbz1G*EIybEiRnVxI z8nZv8PtjjapK4E7eVU&*+vwA-eJXt_y*E0XpVFsi^SfH&aZ>se{iiws(jlJHr(h(V zP2KfreoCL#{R{U6FL@E=>7b*~*_*=oiMm&oH>ftn?#ZmZ@5}i)>pVCz*yvNuQ*C%~ zVmEtL`m`oI_|)_%dpV^~_47&esrp7~T+=hV^l6RsDf64sr#eSDKcp`gaDG0=&*kC5 z?mCp;wFY(Q?0^oX{;lNW=$QqL%7=MHx{q`1T2M|Zuu zPWOp!oY-ORg{)n~J@yCmXs{NC&N9iEazi}E*@8YjIHjuNUyVL3^v_qtZ*}R@Z{~zn z-a+i0;w-d3oIYLP>(ZB{N4eK{>Cx3wJk?WsBiqIkmNB1-jS@x}jUA+gwbbwC5z}aN z5A&gVfWpJm!rgU_9k;I!(9?=r%%XOp(>sT-zr4hIsOE;^8P%I1SYyEG9p**z)q3VI zlQSdzty~(dPdRgbS3vjd#5UBWf9yQ9T#2oY`H}wl0rd;I=^xh1>Yw59FEB^Jb&~#3 zy*1TAxJCMh@49)@d?;p7^FB`c#FvRyWI}m8L5vZc1A{SmU$=(R#Vz0rt)0g8D05`7 zMxMbMA;vVGb+dC`m{`7FKCQx!j57`+r>*RTIv;1;#B=|^+Dm_G4Rqe?Q)@jwl5fsZ zopbWrW%9k?dj@r9@Ev=5xA%R$@7~YWw#bW1hW{sD0k!9jgQSb9c~Z z*9(?bA}&(x0`y{^Tu>kYHGN00qyI2G1NZG;b$cYnPSAJqQy_gjO182vUaqhIC1LwivThV>O{;d^hUd>mF z8n3fX5Mq}##Z*HIA^su zrhX1*Cfbm*6vqUUxcG!>ao@@MPF~^wdRjJ*H+5ZMRYy5o^QFk_5PM8xWDnfu@~|ddiD$uf6GMIS$jNdzd~!@!Wyhf zi{fXO&0XjGY|hNQ_}R#_;%LqKVe?Thg|@V=xDokX#3^+i)Zm}dGinSMDQ7-`A zxQ29LUYJ;dG17H;iKC3S-MoiA^TkEp;7nYmGtqF%(pY>p^I`lX8lUP9DQ?>8|KOP@ zUa9k5^JDph@>KK97q~v`)m3 zfH@SOBO8DlU(=F1yr$0$YrKTIbT*%X_c+&nPy1Hmd!M=ZY%g-8KBBd01t~cyV6U{_ zc8Rx&bN3L>N^%;%s%1>~N4xdPVv^SjGsV=a5U;F)@QEsfl5KYL`d%jFP zX*jB6N*K`2zuSZD{LabOo~+PH&Eam|{VL8TjqlChNxllY$=5t%yRhDw`l!|!JotWUr3ETCzlhE#T#>m$A;(K@5`k2 zrae2{Jjzzsbah4wOS_o*BNg(~X&%+9qKLhbfxRNb$mf4&4&C)`9W0j9{M&QKUgvt2 z=EYZVjgw)m$5GB0t;4E5;1=-bid2r%@)Ow1@9=R8pW^*F!Yw1fb?CDU^1Ng_&+_rh zFz`!?^Mh*)cHvXlWgRvGV>d_FJ^sLWuMJ(BKu|IcuNVfsw}y31VHo6g7rILN zx%%n*Q#eMPzm30LLoA-wdF_~mp0LbAIhnBuV3`Yu6}-^LGIQ|ThhFVzEAJi4)PiLs z%X1BuDNgA8by!AxL>LDAvh^-7j5vStnW7!6hdZ*;>dV33+f z4rd=Rlf(zKN&hd(ZERgL$g7TEgXg-|HEG_o`MIY4+%)kj?C3|vkTVBP6_y$c2hg}R z54GZf?6u1bX36g}W>L*Qn>STxF-wK;NMS;D;uL1d?ZPaxg<0TQmDF%tC#=HxFRUUP zxauXe-chx&9i5ReI5bu~@Il!_*q7_k*$ahJ#7Fr(w#isp3NI8ITkqq1N9Vlcu$Fq6 z>czW(`%1qEKdJ|zStoGGA#$+hMY?K%8(abxHFnnXB8|FtYnpQUiW{F#=@ZqtE8=|5 zvHIjmYxi!Mm)|Jqg~^(d??2lF866s3m=Cxpv9|?=_?iydsm3DmYT$PPOwo1LbWVdB9d06NS*>g0mF56~M zZxm8%A6>`X+{CzqMci{Q+l!v?ZJZ8=j1J(CZgG`8IGXf?;ws%bY5mf2E$&dhug0&n zboBz^87rJ$OY1Bd?0ggthA=HZ4(*i9bHTWQXC-N*SKTx`*H?ytJB zI?wmB-YMJxmXQo-4TL*J^1dycSF&l$2}cvp+q-Ph%Dq zIh+w%nLu_l=5Dfs{o*WSXEy7nGi?=nPjYi)4DZaiBs&8GveVi_cAhb7VaMlWM>dO~ z?7V*r=EzIQ&aWAl7o(*D+0`t9;oPjZfzjeLla1FA*-^UntvTl;Q z8l4A0eBsNU@P&L}!+qIv*h2FqxoY{F{6^L6?ZO7s^kn{nxWdWaQs&FatM;kpy6d_w zOkwt>S^K>BU0mbjBgs2xeKqImFZUR0+c{bGkX*R*5IFTvxbKGjzjh7!X%|>y9eU*FV2H=zo%eF>EUs>F@cw_`pJL7y z6Au<8&au0XU>0jKleM{$HM)Ye`WkkR%RSc4*gaY}KStdgZ5+irwd935X7MiSsV6=r zJIHfOhNWT~RUgaPKX|V2RZb84hw;0E<3=mLeURmG!Uv;$U%^^1N;B6ehhV#=z#GK01#(sW;u_Ytn_#eT#PB&`b9YCflBhf^Hb5Y#$ z1->`6Hp&^E7rz~jrFRk6Sk2j6b91Co{dz6$FNu!@m+t5MkdATOUu)eS32n4K0qLGD z+rWEy=p5!wI_GirZ~^fjs>3D>c5tNP5y+q5x%+v>A-<>HEjjpYWtZ@HzR4-ZRuhzK z`Ey_Qd9dOs#Lo?1hhuo?+8e9GVwh3SkL?`pXQd&$VOBepBusEnp$sSXN3nVgbhm*M~<`Jik;PZx30OwQyq2H zK~T&GHf0mXs&%)%8H?2n@Auce6MNNR=6$}_K5Mckz0q`clzgdclIODT+}gVwc^kXw zYzH=ehPWNp+V=EO&up8UpBE3VbvbjSbM1EgEpy=AN5LZPp0KJAN1wV?xiAZ8nPJ`#@}X~Kf;!wm=MX;Za)V| zax@${sw9Uj!TegA*>L9l#5S{we4Ck(Coq@xk8r>8(6+NLB}aC@SZuAm?#3$pW3lx$ zN!etaT>O#iWZN-gX3c-bm~UsyZr(2Ycf}7BG)vYb`<29u+woeNKaq{^LVvs`)Ay^< z%TC`HjzHgYE@pp?zDel%obxAn@}o-EpX2L#`HQ9Ne~w--`H$%P1bX{j>NvC^tGmDn z>(D>43ryvjCUm~j^PBkD=y~|4c!tsQeCk{Y>iH(Huk?BW{{NPd6OYmH@^2^>Kyh@x zPs?gSqi-_N_K7HJEzITu}{-c-`}=I4q~$J|@_^utr*X~x@m>b{etB9>Y$|w8@6nl-lhC}p#J<#AUB(&F8qVpHo}Y!DH#P(I zTV=X$GtkdHZ3cPWZ3c!^8ab@5b??%R;+d*1qk5LsPw^i3$JMm=28C93 zVE`8*>?&m%mB41BdqZ_O7ov42PA#k;9zD5r)uhQ$wB3*s;j9!P6lwX%;`pA_V z-N3n@dT*Wk(s8BOO|z^$g0oor?oTuIPS+$yRSY!yN9WWOa9(QsoaHG!;NyYsG3EsC zrY(UaP5<~(Jd21TM>V@0?9k!x-3e{wr`w<{Q{a*C}UC`!07 zUq+l>2Kzx{cltoKmN;WqtrzJ7$*SyWl6}Qh24nT45B>*yO4k=~Hk){g^a6`ZjEol0 zgC&4N^SWg8D&Z2@rh1hpy*pUJ$dk4qgjoswa#tR-LuSb0^4*I>_n|wL7GFsS8 zM$NnQBA=@9D-1xR_?3GYx9xW^yjOYs1>@}`mS_ex1mQ5@;UdnAXUE*()pBlX|Gk8~ zsR#Ie#=L>?>}G7c|3mevvg4oi?MqQ9rJo8ax2?|#^L7t?B6JM%F69W#so`b0~i9Y916eu3_LRrz6t-otAIKP z>GkSS9!0Mk&i{CoM?7~&rmz9)9X;z3uL)i!e31P$>n}JPJ--{>Z~O&(3Lmtg;}wrT zp8LEDM_oH+g4cx!N&=Xm#9{($24msc*ND@7^Lbp!}m8)gSqO}AN7cR z=smVx`#^kSsqq(-68k_6Ct@FpSWAs}!@$tipZLCl1ht=D+<&L{!ZQoKHpVJH!AtxT z9<*2>7~gOe^FE9DpUFC0$$DG?&;J^^1om z2Jv?rN5kKIU%`C*1j28v#26btfjDqbuV=AlVR$^)B95JAnrs5I#g~Xl9zW3Y#+5YI z$kq)ft%EDd#-n)AO!mN@kyRb%5bGBla{}z4n7^0iFZ7lhUqRnu{uJL(bPs1L=S^ig zvM;WupVW zIDcO1yn&OxB~B`yV7NW|?;w~#_0v4Cvbo3QJ?;TUb9RIG=s8-3%^-}=S!L zcbFGHbcX%DY8B|bUB-TwoUcL7Yq2pa{!MyQcI89pG>t_%U;T<3eLi0cUb|>>qxEa& z-rf1;Flf7Zm|0=G`7zrQotvslN*TWCGDp*O>uf85~W(CxXaS{7Ol z)oEAF3A+{}r88DvO8(Hb$yKaH61}|}nOPsXI@!j0n7DYp+qEXy@!!GGjJ{({&(Di!x-!q$SIiiv3wv4LyD6QvHJu%&thyFKjYM0;lGZBer}z0XP4e$k%(5%Z$8Tf@9MxpVXScjyf( zgVz7#t5s~2feTP48E6}SM7&WncD_zwRUc2o4Gw0$<fU@<$t z&Ic8o+cx4jrZTrL_A zW-d$OKbQaFB-lmun}~1lTF_gW%5m^ zI`Gb3&Y_dxmvP`1VZFZMm-E0c%5M{X=?LPN$kz>iDFMIC#5SrJhi{;lT)c;3H*3?1 z6C>#PfgGJQH&A^5q(g1ILrFLMqVe7aesQr5iC=+V(AO^^Q<6Pln4O$MA198HUH1?e zqW$tNTSXAdEa>5*I3AX;xLO^l;a? z9mF|bVEh_q5bKy8oy1zG_UTJ$nXyCIYozyNvlBl)0N&Zv3)a#8>B6ul!8>4Tt!WC+ zoB@C0{?buy?NWdH)x}^P)$g)?hxb^c6xKl=W#2g#>*S@d&aQ}EBZqa=^Hi}3aeNPP zuud|7b5tMfT7P}!IGiIt_}>&X-iYp~@o~-{QtM)KUW9WLTVQ=Fc1((?o%^|>)Y@o_ zJC{uJ9M;h_ihGqVe3daO&QEL68^&>KBAZkDYGEAe?y@dr*l^@OI|LWqgN(n39B3Wx z2kYcWC;Ri$1ilc~IaW@bZWcbYb0+y%#dvzdI}1Jzyu-Rn-s`B_)*bI?jeT3t557bE zXcyM`Zcly8n&>_4eIluSb1dG$XV?qgar5F)zwK1;&exe2tzW#Cxjh!|*g180hdMZ^ zIn^3#U+zJ!7yLSfc~bf84)dHc-bu}Er11cl`;`FRIS6LxIlm6?+-2uC^?ccZHMcrz z>qcVl3EZa(^VqrViFw@I3iCL5v->kBYv3Ma)!5<8oR-AbGN-L)Y+P{k)-X1LG;9SC zYz70c9i(Hk8A#2iaDC^J@YchTLGkGDN4}aQuHqp6?4|dI-ioHfW5f9Oz%jGQ#W+Nq zz%KkrOW~6l>7k*c7DY>=%A5R3s6M*rd;iqRep$hFL-$NMrPTDRxx1(_{=j#w)G0_J?!~I&LZzt&YYApCzbHK%>}l$!6xSK!@DkJ zPA_3@FJ_J>GuM-_$4vBE&-k}nly7n*QetWdsurR3>xx6#`#;V*xUmPvtDiN-3j8l_ z?8@tw&%Qhdn-z2UM0RORW6$CYQ_e{Xb8me2148wG_+Zi7OU?+@|MTZU_0R4c=fxiw z>&f2Y*3y30a=mAlT$Pk>UGuaq**Qyn#MTUcy`g4NMrkv4gIa88^QecQ@sHqd(-vL- zc6{Xd-nIPfu@17?k3|+5L-{d}wb;)(D2G&QVf*vicYU6*Tb)auyuUUYhiZNu&MixZ z-VNPqeYhpm&^WeU>HF+&tw-nNP`$a1@kHPd(xK0D9=mJZ+-hIF$KPCS$JLRmkb}H< zQzL#t{8eiXbwwy`%rsDcuRZP_`A`)w*ILfFR|p-}j}?pe%eM zvW>XsS$RG7nL0Bhd+uE~Y2UFor`8T@ls#7*SG6hFo5aQx5+71v&abHv>eX_VRfeL8 zb*xnjc1Xn;RVt4aJa{;id{#fN@p&?Nam_`!CC!RWxjqt3Jjyo@CPiC^u;!E<98@G+~B+GDtr)1--!8TLQSmK-)kN@IMuX=Mua}}}R zZE0Cq)tm_%x83ed2W#&foP|F(%2<^Hqk0W?UbW6680&73qoHX{zP(k7{a)gEF zFy}{YC_OO(T{9WoGGg9MM<>?YWX_qYnlp}0Lg&n3?^f`= zw~)2MbNg`4MC>`Un{!6z%VVtbp}$ato%}o++#!9U^G7^d_tt&h^W{|b!=BjV z2KvORu2NsIhnv4B^Cx+?7^Lf28neczXS#9g|L%;_J81l}M=FkS31`%T!KE>cU)V!^ zS(@^yHa5LF?D^oc92SXV5BUu;vyMG9leJWh$BpbA#dvoA{=FN2!X6YiUI$jz7=#yg zv#-ccoz6bU)VYwro>|LT@Ex54yiXOs`yFS1=5A$eT4Q5&eymP-k~Pt}qxno4tnS+u zU%mSIsYg4<{yXQ#+*86IoCQAqFxT21tYGq_4vV;cKD4pynA zR`}X6cY5>4Nm37>vsq91*!IGC-{QPjT=Ne{cfiA?$0zdhh*8uut@+~7UkZ0{K5fE3 zwwW_PIAk6FWCs&ospjkv7FmNW%wr#p`*I)l;bf=p2gaYsJ>PqreW?9-2z|ew{n(q% zck6b_dlDVw_T+u+gKx7ZRnJuNZ{BPdZW~(2((|ZCras4cM+Y6hE`o0&W?`fDD zc$V#n)7!IFv1T!&--WrU?;*X;c`~(Du>|yFHvcbXpXaCad>dzmVlAwm&t&d0<8wG; zrlaRyNJ~C~+doA-7%g;ND-k;I)h^lXv*PSzFzE zK6}HAiTxs8AR9?Er=+=B6SA$m*^6@u zVl^Y-YmDs@e^0tKcJHS92?r>Lx|SU3b=XC;=VygW&7Pl^R%-PB)$Dn1^6cm9j&zO{ z2KW#5u*TLG{l5{s^1d*LzlT|e6=kljIr69X^T0rg1DMQyGGh;5f&H>``m*^R_ie zzv2uSG5^-1r_WztbnFc^x0*BH`kFE349LYNr@X&T?RCz91oGgX@en_c|I*}@M{j)r zE*gfDrol}kaMS_tez^bSaDB$Z|9B;Q{VB)SJ9DeHc1H5Hs!zM{{sH#BV%rJM5Z-?( zWe>n_AEiF@=8Vw#jEX9cdM(Yj6Q|HLy2=CVHCGM|?VAVQRILc{%?t2PE62Qz-_428 zYvIX;hHI&PSKU$jPpUgGpLd>ir1ovaZR$GtEOecGKFrhBQhFq)*TX2?2j0Da??iKc zS5k_c-Nx^=AI{<&ads*jFRl2mVVEKp|BpBRrSqX*bH8)(*}TkqghHRGf0Vfig&(={ zT0UQcU*$|3}Ae%*M+IkwfIV%Ww-FVjD=_6&kpYkF~_Z3TYf{R zeg?eZtc4ZH`TV_>d)y;z$U0vnAm5puJSge z4NJVi`d`O5)hnYQ^!Gp2?{-tG3A@G$*{F%LF6|( zxUb@_|8#lUO8eb=H-6{ZXru0<+K9x$H%FP{xx~mx-ZF_LK8U}`t&jRdwz576?$Lsb zT^7#E+7mfz^t$litaUlfC7Uur-}?FHpOjoqovL!yh&j4qIrH?z-0avc@Kq;qfz?m{ zutB^inyEF)jqgHNBdlhPnwZ1Uxx~S;KAV`^5&XX#oX_9uRj2MCcK*@4Pb;`c&!5ik zqjyyh{}JBzGIKMEwV8U?=wvm&pA%S{3Lk&8!nsuIL*u6hN^(wmRO`6!QRZLgQh7#I zz3NrSzOWt{Rs8FJ`y%_q-#fqIXXWI8NuLVZ!|17LFe&FqJ?BXMhj(4Vd2=!6&ScJ? zNt{Cyv5Eb^O|pj#;U4Bp$x!c3#`}ZN2hk@c)VD-J9TWNa^idP)FXuZWc+ZJ^b}ku6 z+*v;NDBxbhxMv~vF5)aJ<}4dne-Qp~fHSM-Ih8*ayn7ZtUHo!I_;mB+)5U*S{0)4% zfjah?FNf>PM})RkhC=(=@JE#oUDhy~acvp=ddZiNg*N$2kX#>%2(PyF0}qN{>ks7ShMTLg=Vcq@3-LBX`M{Y z80R`R-}+4Imn!~iDlu$XoZ&(JsUDS?_4jT};5VHgMz2DW9aCP| zxyY!__0|gCHq2hyKWJ5n>e)ORDInetzY2Lod(j!D9{xz=3%&2`|1QVt5%9th_OqLF z=|f?M-8WTr>|reP@DFOv{sCJ>0-dyjwb{orzskAyOX9tA8H4PR^TXl#R&&F7bV+ta<=_jT>Fky**zS)o}8;`v6ygUlJnc_SRt3jUBj(3zkZ%6aVXHRFcG z=6^OCn-7M&1zA3;vva-9_j&yMI&!Xiv~y5V}w?cFIRn+-?(g$5#A^(S(@jfu0d@>`D`_`NBeVti?oPFD~7}bxT zg5z7iqK$*wv8LMlfU{TmHp)?w|4@3xTYbByx!%k8l^+zxPNVmUF1(eR&{=Wq-%*^k z$oIa@oVCie*LrW@{#swfz$)IN3LQkeMZLG=DtJ;xLVm{5CmI`wov*K3_`T$V3-3yf z!k?PSx?~~8+COvIkF~_MW^!gLC$~R$Yg(AU zxt}Yfhi?@BOEL4U*tkMt=aXy)WqDgTlsMg=H#d&DKYjP@vYMJ?SxtFT@$tG}9f|*`L5)>ls|qMKaXa9EByJbpk<9(Q|MHV3z_ zpWzJnhH8Ma*6Y~MQP!o2wbgoPZEOBzX@mBm`bj+kPdu0BHZ8m@xt=|c8Kw>;|0n0o z)Qw)od8HiY9Nyz<)>tuVv&Ut`l9Bvo*`DLfb9s3_GCV7J9kSm#otj~bhBeBjxDs2U zp09f!0uvQQhBe=X4C{W&S&N=)9VNe~j4^F~Wod(SgmlBZ>>1f8wZ5y3y{OczuhvMu zb*7Oo!~7>WXXdgVWypQZKU;)jbgtCs_sIC`%y%7UeK}`~^zJh7o@7x! zn{y@S;^eabZOS=2AX{6J!kiG zB;kWcp6bbw^6IZS$8w~heD-sszYdNxp4_lg;Yj~|Mn6aDD@Q8lEP*4f2!-IFn!2PqZXa$^IK#VfB)F6~l5CLTNapY2Nc=vv*e%q6*Ie>V5w9)|nCMGg14?qYhU!hQTPsBVbmJ|&6$yn{Q7cjJFFyl1-QJuPc8 z;XNgBah}!_aUPQ^GQ{LJP=ltqT%2ciPtK#dydGb$p<2}f3koU+o?Rby)rTB>Q zP(nR;Pvg%%Ue2=}yNcr)C*nNi{@HR|&chkeXU^lxuK3PGH(x&Q3i2Jr<@Dq`_jAs* zuwO^X!>JKMU>` z&7~*jJ^-Hr3G zKaSx%5yc<%f%9;_^o8@3bGH8(oTnSM=qu-WdUT&S&x$_!g}PyhPm^D0>Cd}y9&lTJ zeQAZ|Jilg+`Z>>kP0n-uw0_RhSI)Dm+;N`#`pelfe=g@)#{KZ&oRITu;XC3yFY?(} z&NKIZ^kh2t3T)bh|2pLt0*9tJ51i&y{6dEFpp$37d2R^!g^VBQ=1|A+cuyyPJ1*}5 zBO2e($WX^?J$O$Q_yzs_)-k-tU~20ZN`U36B3WI&p|gA8KJcm__i=t9)q<2yNVdM2 z=Y`gf;-B z@&1k*^~pa3Kk9(1%0Ki1u}H>8bW;d!H29Rc(Z}W^BFAJ8JfsyqvjyLM+XG#;WM@ZC z`G@>9FuVpV>ik31J^e#`Pd=V=keOrrL*sq_5NoOX1pPzq`_4aPI1;vjPsBfjf7tkk z-d4XLYI9(B=;j}SJN(u8hAzaW-IF5;=LdX4KMruDzXacq&XwN$L*Qq#-;c|cI7=K? zntRSC>l@kw_f#Dbai!X{!HIT!oz6F;bF(K;8qk9y$#>>+q|_PaIMOce{gM1b>}&1i znlA_Qy*_=up`Pp9jVH0j$MB@TAm308x<6=x?JG~ZdF07?(m!zTzVM`;_~X;WlfJRG zizhw8`-VaV^;?;nQ23E6m+{%plm2RW(p#tE8ya?LKTql_PkQXk$t&I7NoUW9C-pp+ z;1GR|pR3}2=1l4G4Lud`4Lv2_P%YmPPx=<0ec?$R#EU70uIE{msA3*lwmd_?dn*$W+4GJltkh~KAtMBrt|m3H9=S=xgy zg?iyj(|Y)cgln!he989}^}?5eTnvFYzbtJA$k9>N^5M?HqZjr@rUvo6v^c*~Qr$!;Qevn)`0tXg0n}dZR0LPIZ)8 zc^Bt9+R68g|H$&Br;g!CV0HC_aPezBc#`@44z86iDTlZt6G!)P`jV#jz9e{3O|Nlu zk;HOh=S(bJukmw{KKqeu>|Dh9k?<4qrxGikrq zoJkFmz+OL5Ox?%kTf%qram3U4zNKz!{h!U5JkI`)>RSSvy!BByQ_1K1#FNMg+jyX?_+MnnZCkjKWF-@;Y_A3j)|$LJ9*C0mP!4bsjr;r zsxu5{GJEHFe)c(2w{z)KIMe>m8P4=pAf9eTfHSQSXZkYF5oh{5pPjEN=cs^~;bFuM z7ZO8UL@Y7+Mt7A^H)&vf2RTQ|l~nG0&$DWG+q*BlKWUOx>5Nyfl+@R2(~- z_`2O-cE$QDo^BU0_mg_Y(m9_}#Q<+D{*I;|JZXgGNh#fJ>S`$82yE=)=@y3-Pj{yu zPp2G-S;;BHmmiMUoFm25b>~U8HtimG(pqA}Rlll$n7Wp+T|TA7F0KwA#0mUKsd1&a z626<>@{3*{oEdu-uC$5#BE{7yuK5AR-6>zv6AyHYt83%^n#UE#cJYqRcQo&_MX|Pb z!rtqjEjDp=)#N{mBdNErdRZzDdN01ajl|RKnIEDTL0+vlTS&Rq&Pc=EOF1JYe0?IMc_Y^l@Lrhp#d|j@Mue)JV$0P9Y zomrQ7ruM*O`k8~R)elbbTk;(pB4*#jF*4ucN9)N6+Vk9uR9v0->_3q6sF=EU!G!zH zt?Kvy4w%C;U0mHhaM`=!LxFp2Ag)e5J$0{*jtBMGUsQZ*`9u#)_Ugog-tXxz8j#pO zQhkw2;unBfuO&ZS{p(V(b?^w~6t%2TPSJq)yVu#AqG^$;UUQ0OyPTrGkN7!715!Cf z%B!$F5|mF=YWBb8cq;d@`9!k~7xJj<;!%IHE1yWYr`8#4cz^~ zgVo-FwC*`YP2`KKp0LR&f{XUWZ=`h=|LLU8tEq`K2%7}*?(&HWpM0p{2>-7mkLbaL zcbqVnNOe+&rE-ae#fq>gEaf_J1?3Xy884f-xcl=AFH_;S29q8QE@o_Nm!~WI<;>OU{lzXmY&!%pB@d!yX5#qu7UTYa*5h`zTyVNfA(T~ zv3{dr*nh63W?D3{tSh!|h4am;Hmmy1{E2JzJj;V@ZPvcz7L8YKk=fJNP)CX$F=k)e z`cBFxI!KR=R@PbhJcj!qCpNC`;Rfx&O8Qc455(0yZ*qx>ddVfCzYl(|S;{4X-(O2E z5q)i_E$4EHB)@(x5!`6pqI{D}R7A}+mrHbQBup)Xd_R|{DB*J-GZ&8MM#IG>&*&a< ziGD^umk_psi`g&grRvr=7+0rxb-6@y_#SlwOfJz}&c0sri1d5TwAVRDw{Wii8NBDI z-g(cp=>D&R0j>cH%mx!&O&-xz9vH#o5&dCApLkE7YAt{zp8B-%o?pGwjrTki3KiC0 zdb;I3oy<``@A<3YJ*QM_Ay(PXd-}?I3fU+4ip<{mElW zP;23Izt%!K-x2Tm6Q8HZd%B%fpF-Y~(%FAuyeBoTe%^Be-t%|;yeGhWR`m0pzewKG z4GZ`49$T}bx0(uu_n1B1&wD-%yyvH%?dLtRGsb_@jrXhwg^KE!nK|m`J%1g% zr|=Z(CtP+>Kkw-)@A>U;!+Xr$xrjaE^PUw!d(5fuo;$c7wv<#Z5xgf=KLOrj>L)zQ zcf@;|_&iyR+yVSO=6>ned1a?0k4Uj``P9NRHRka3l;G=${=xcr_#NND$7AZu@ktGZ zp1vN{vhk=fH$m}n_;IeIW;*Y<-lNv3d_Cvj-;qCPlJWNx#72Liia03pL?+w#IOWVJ zmq_2C){&_**UsM*7q&Imxx%Ok79!4_eu7sGv{LD5@*QHt^U#Kqig*@d$6C%7xZm>K~n=Y7PjLH zdJ+H1p#Mw_^~g5%bm zi+{Gj#LjJGPDaZ|gb%1am--9y$5LO-`iqvO4e7H_=)xZL<{rm3orxXuWHslG^#uj$ z%~f3DeK4L_NqUdAjtmhuH`uSWVC!GYAEaKPZl1fv%V7t0wHCg2cWJ8DoO-Z#tFeGT zB3NTicKH|Z1!-PPP7>eWfGzrLKQ8X|a9?UH6r9i>qW!5SwyAlhey;D~uW=cecUrPPHXM*h}7prHRfIm_=Hr4Y}gY&Zg|J|h3-f` zyzo2th3G{`jjmvgxib29J;VNO!{?zobHjb#P!WARZJoJ0lJZxzQ3p)V*=~G8)R<$> zxS9*ci;c_h&m`wFScY$^nlYe168BJR?t0%p6dhCCc!{YoXXorstaH#eP zV8?K>nhUBiw}0}b-apW*p&dWysjL%VBD^r( zQSgj%ed4Vj$nO!)9v{v8o~Xv0dc~U>a}N;zsXo19T+ zk6gKs&whUNUy~oDPw3}IedR~>#gkVqHu-nO_3ivDel)E|ZsV!&qcPlLD%S`6La7`g z_>svW`X=8IKf0dJzVIV+zrPNC^p=a|kYDIi$d6Lv@8?Io)Z^~wNA$Q&@uRms7Jjs# zpC5g~{K)L-zhHi3YAl=rKT4fZ{rt$*;D#S{TjNg)KkA7?`pS<67oLP4J^PPWehrK= z1g@u^!X{tn)nQxr`*HW`Po{pw-EhZ}l4SAu#7NVVX)*m!O0dt)8B}cgIM}|v#8YBd zQEzVfZf@S%`OwdamxZsV=>%GpoD{!cp3e@eH0vP0;4fDcEqQgiRdx z8hY1c4hn4@5x()Kr$=u5X()RAX`v!|zJ+f*Z5OfD6}*S~4yMr?H*M67r%itOhSR1} zlPMbd;D4lrZ~p0xZ`^p=BznY64c+)3ZFjb2RIi5zN|s(0;>w1KAo#0c&i#zuE~DAXKYUY5+c zx9s`BY5B?A-;MLi?jG+ANh?Zbhq7Z|8W`F-gnPPsTpu19D~r-Uh*;EnnTN^I8&8Xj zy6LpPSJ{T})gT3s zC;7eU@u2T*XDm&Ofxbciw40tvKM#kuZu@Z6Yn%9|>(_9;H$w@fCtHVy%hvyvbaSR9e%>||pkK+If z_>Sgp4xgP%(#eS#Lj3Dc;$S~RJZv6uvH9f06wrq!y?*tBt#0#5BcV4p%FKnc0KaiMlQe{`Z~>G zFUU3~yYV=FJ};c#ybT}0bo?WQtXqQbJ)f4J^z;SyHeKa8}vuP zwrsGIvGFda4mGygd67c;v|?j@`r!t8w0DHLmwH+6#3uZC=22(PK>n#uyKHxdu(@{9 zdy1Y1vozLgv8C=phC8rVC&(2zQc=~R9yl)%Z@!lE7_c&9D&nJB|AGHSyQ8#1u{ z1<%Ax&P4jsZf)fZR33ggy?sVwpHv*UIELxdOTWK$)Wx|NYn!+cjWw{0(`d&|Oo@7q(3A4UBcm3zF6esiPQzuQbd zkHPUbnOpT14CdEdOYa`rn`er*7vGV3^Xz9nXtL`2T?O36WLT=yKLmYq(6`95bWnH zc=opMc(T!IjnAeQwQ4R%Z>f)(bVVJ#jg-p~Wo^oc>969QG-uWH$*9KmI{m|KuhDPU z^vclB&G-NhAUiE*P=7Wp8q>X7W`;TnH~+Yyld-R(R)G8;!xnx!ndE&&>E3~N&>E{J z-*xmni&9^@HFt`a5y9>}+1i~mB6-c~+ggsF@(=W&cD;2}7eKNs8?9pdf5sXfU@oJx z2bz8J1@_I8X+_Ppr?THy2S0+Y`;Tp%5AEhWyE9Tm-`65Dzmk3BNwjiJZCXxZ2R3W< zX1VTF#uXUvJG`rM(p#u~&^ofJV-4^2+xE`q=J9@+oUc*NR%F)nr+Y;6!hRY&fw||q zsu9*kZp=#hp(uw!y;9EMZ`u#-td03Q^L2oEx{Y;P%J(ujBYN8GS24%*CYhyvbiuJj znahmyx|sHi@`4TxH@jnL8do33)U{#rP{ax4pKMbIr&X zKflI0>-$m0P=-Hf2e}kF2L|xB$Ea1j^t`}a^Bv9gYUX$`^V@Q+{9`4F(|Ja8NoR5! zb1@^5gM4HsZb8;|ZQ{&jU30^^u~p$*BagcW8$WSMzLo!?Ge~~G60VnTN^`Y}vrFqG zpOAW^1?M2|9^@%KFJb*2d2r6-^wwdnrlscU^@eku-vjxp3wOz%-H&mIw=U-le3r4^A0F16j1)FA)@IJe<_X-} zV-J*nrLa-{6Vr<)a#p-NoMn7CS8?{J_wgZm@@U+9cn7WRPspVyiWJ9eU!KB*-fuU3 z;rAoo-RlR~eRdyf|2}8u63dsY-`jI$aAMw@>PK3fSPUkq;k+^VG@LJ~zC8S_zC7ieWuy6dwCl~||9&^m zDa`K^w(njGpm%KFRR`b{v3=!RDQusTf<}X`p|Kq$Awtwrm z@lPkg_T#w+SUi>QPW;n~Xu*XJ+nap%@9-UA`?-Af5!-j|#h%#yLNL5IvcvYJ{QuL9 z?YsJ1f0Edq9Q2c5`))GuXdkh?FonhTImg5HsWb6-*!~w_0*CF}!1llJvHfLdle-R= z>kZqtksBUICn}z;uQ*@$av6HFuf1tL64qB8u_vc^Ce980-A4S9>WFp2_{`Zh@aBIR zjK9&&Nyf-XqnnRU0^@J{LqEp!__{D9e)<@bA8T`TmTCjYS4Zw+C?4hL-cH^`Qc?e_j~r2cI1)*fu{xAOT^mbJIj$^l8lM&il7f#>0i+R&%Ai@r^VgF%B3g@*d)TsB|895}JEpuf_JA7Y=PWPcqCJs2^e-whYB@o4*b0Zp&UQTXvzfWoJIUtRXQH zn-;c>ny2d;#)hNKig%xnEn9Z6ajbJjd3Iw0Tf}$4JXiDgcZkb8pE)UIzuZN9^xOzz z2IqDXxBJK|5B14*T^7lwKi@S;*;=E>rTPUg-Ee=ywZ!MjmW^$Dt7^uyB3IHgsx9;a zc8N9Eup9VSb*vq`yBttY*V?hKW{s|5t!A-iGg-SUv14CB{SDua%{;X8oMt)Gp!u4^TD7vLWSgGGeWqi3XP@k=p~tgqPO@cZ+#_3dMxqV7SqE#U z>)Nmd$%dut^e%}(RW|PdTam`4oF2}M$=+Q@4Es*7 zz+UY5ZP>iMC56d0xSMSHk0Be6fDs;DQjmNcTc2!fsu?9ak}&bv*aUN!kKh?AJ7;Sa zcDAs!d!K`yQ+>KW;JQ6q zpQ`z;{w6=oJlN#tPP z-M1;-v;7DcBzLdQ^wj_T+4*;R>#>a<;u#z0#XT20Gro5tBZ{vW%{%FAy$-+7DD2=F z_;hwwOf`M(G9p8o%jv1D8n(7Z?l5ApOHH4}4!^g2Q|@<6jhpDql9c`X2gd$gV7{w5 zP_liiuF+?I-}%r7*ehlKu9f|pnnp)wUhL`pk4(A5)4Xf#wQu&2?;zRQJ)8AAm$M;~ z8&5DdZTKCHILIdqkqaAX)a~oRvjJjPT32qiPP6Q z%Ri|;;+1^A1)1B0UQ2A6n6eGmVjI@jw3p3zrjt`K%-D%j`%8A>1DsnLCqB(WokQ)c zYYv$4N1O+B*yew{@OE(Muw(KIw$a00wsiU5g6mPvdW=e68pA%hHTwUv_vYbom3N+R zEf`9YWka+}PHaLawrm`_haEE4!ZzRqLlR8P(la9Io+lQ@-A@ncV45}x+l}Rd4!)43 zrQGYJNrF!qcj&Q!n4|{`x1YgLm?oDFS$nWCi=^@r9<61I(fxei_q?Z0$&z<1w(Ac+ zl2vuiyZzqZ@B7=K?GpKJpp!SLgCzf%_Gk_7eG6LBdhYOT>FumF@zDb8>C}h9R(h~K z{maMS8(VlfYwa8l_pF~03FWULM-tdP+su-L2Wx99wtmvV-v?_ zu#7Y9=T6Bk{${Mo*u^y$*~ITcR^7`T)Yw{w*7+KC@dSQ4*{RPZf56>2*~I?`f0s@C zAJ}Kv#OHHv?rdf;cV`cOk8_j!RXx2TaInM2`kS?d7o-=p4lGq|Eb)WejXfOSr~1^x zcZRcv7Y?$A%b$DWq_M}y_x#Tjhmn)4Cy$X&JVvgYor{r6*k?b^BNrRNJA{phiCQ;s z7+dg`;vq2dH*zs@C*Ki9ewUvo3?qYub1*WP!(e3a#>s?{4-kVkY8biXuo(HVu O z3Bt&2tkL+I!9M{;MyCnKdJKh;)6`lW5+j4Jj~OF_Pj|tCPbf|XqniBSBf-hoqlA<9 zT^m8xSK!Buw;U-p4)3GE#^^lb85;{PI$Ye%o^JsgKM#)C>f_>epDqr<#n_HJrZ4ac zrkAzIj&5;rX$TjW8mzku>?f?-GZ-7!XR-171{+uT*jTZd78`&5*TTl-0XBYQ46*U< zA+YhjpDYNlahiKO7#|B8hv^}VjprQ)HV&W3c)`YLYCjH#jTI~6@R!BL#Gi)v&0uVN zOt@G!Waw8kDLb-gQrP(EF~Y_j{`$r{HeSuSxw9D#8!sLP8?QY<*!YnmVU9Df%N1kW znS_mJGPa(xvH6^XUG5{;<<3~w@5g+DA3Vj2mf&lyxOu);xb$kTYT8`ye(XuJ9;)lx zh@CEu4)G#CqtwQ@0vlV$^fSCK^7$e@*JrZR#&$&3E#$Ly1G}1A-j2I*_6nZ%lS7gUiZ%l#$et{r=4Ygzr7~1bUk{;Y<`BvE%@M}t!ds<*!|Gf zCFn`g_cf2}na5k`W!r!7cgAL9;xm!i1)2F`43HKhxWLY0T@v91 zOvCE3mi7xGnf>6r0^a$f2XE<{9G{$QBVNB38^Azpa(eHjbG`k@^GDH@J7SYtD_KMR zgv#{u%wKkg`Y1Js!LrgX7yV!@ai6S8a?%k@Rc3ZY1pZ`%(>iCeUo?KZ#v_8 zd=>k@G~at3+UtQA^+G%9sn{H?2ULX^`ldEf2vLG7q>TV zFWlVP*`I0Lx%La}^Z&)flUAp8LMxh^&Cw7JC^K`rEnXAMN1Q&<-v*s(XOZ+ zf?fFx6CXYBpm@?m{$9-Illb0bzJE463BEMX@}wu_7x+`#>p?uJY#Q}{pqGLfmM_6e zwp`)!ooeVxJZYBr&UulHc+zeDXX@V|o|Jm9s&5gWr?Dq@z?)_a!<%j&f;ZhBt!}-F zeMyOCRume()G-WSGCYZUpXO|TSrgf^m49u}#ZY|drE1;JMEX&F4(3Y%PvTvF!T3Y* zqjqRyA$3y}J8pPWysEDo-n11SwV%7N_xYITc+vsR|6b7(@2iLA_JE-lu_nilE`%Sw z2tPUi&q>0Mw5JWR8pDswUC}txxnJm*;z|3tpDz$+xg1*AgZ=cySao_Yw$4uIPdq6V zt8OhbJgF>g`BB*+`H^Bb9Y5+e{OFg5EoPi6eSRd` zNQ)nR6n+HGT&Vrw-srx`kG2zeun|7Li+%n+cUEWH$R72a$2#Fbcfo@)vxtv|2W_CH zkKsYc&@T3R2DRH84#R_*?kI10oBP>8{7jS2e~5QY^#(l1+O;h&v^+@rohXKXZCvG< zdye1ZhhM+PYYO;~YCMV$S^c2jywCPT_%ik7=ipm9pL^T`UsG&yJI{xCQD!=M5y+1< z;zR6<>9c%+#eK3j+kL4{{h0kwT^;QSd-1yVV({D=h`AMSH1X-sh32%2XT%$S$X(L2 zVg9pnkL5plkK;dS=Jm`&p2YnWv@)>x`4e*%j;WbO(LD!L^ankUh*PY!! z_-K@PP%x&jQJx2ldmd!&-I(woaL~BrL7};gYaVn2x|whx!S!Uf9R#VqhQej{4p<2>VDM zL$(Y1>_g^XfIgOp(;EqWY--%s$H3~sK85kgtyKp5s8^8F$EpsAe{KokpNzpjzd0oS znQ8Q~svP`tr25!JKK@ayIj4{9L+{M$WR*$vt+jb*=wo1Ki$Sc;AbqTpeUV-B$Kdon zpZV6BmL~>;G+8n>1Ww}a(bEFBey4RPVHx1Yw?<|mpyFuw;G&Q1tzjs z?LGE%FecjM>tuS5!$ityb2^#&0?k3kA5JIxWq^kcqm$)%QJxq1wxK*P3fYQ|hZliE z#vLzWPNxDd>I4^+`?zT0?O9wjoLy*GOoU7i+l7`7V;B0awF?CpC9IRlMkl+_p)gT5 zvAg2!>Ln^0jO;^qf{$+D-*EUyc5C4y-)VW>kgkTJ@4; zyOteFb(6~t-8zgU+tHRM@9{Q2wJHZE^??0^g`Wl^J?Ptx8ZM}8$?lQxk!(M9f83t9 zImwn8wp%-Vw6o?~&tjyKzz!d_85R3BBfZDrqcrym zl;=x%zLeMFjx7G@AbamC$H}&Iis*4&*q6p2Ci1bc9w)!oD8)nu z;|eR~VW^ONx zZYKZQ$)KAZ0Z$r>IPF)5$)O#|IPLJ5gLSfeoc6(av*RgF``6IRxQWx|?tgb2#A&-d zF>%`BNBKBylgl6Y`;RbZB+M72n>pKco)_hFMilotIA_G&yK#{-f^IbKcoB0t6?oBD z=xW39qLHpG5*|12M>{!di#YvcO#Eov`EjeOi65O5x?21f?dK&{_Wxx8Iseopt}mhn zz(i^R6jNVh5;c$F>$<3U)T_MOcjkJD8PpgdS0S;3`XbZ@iK(7AwU923CR!7XiIzq5 z*i+Ay&Gd|$M((G6FQX6UX7WUC2fq%kZM1_t5%|FCVLv{ADr0jg%ThztGJ2o78IqCI z_BOSPwo$u?cbnh8UtPEV5&pj*O6?_b3R-7Gma2|U`GLka!sDp^M}j&_KPJzk`~8U5 zL%pJ2@;#KV`YJvM)g@AYzPre&>^@8Np-a*|)ahCB$q2PNN`g8d%7rZ;cXdvjI@;uf zetO&AH;b38=l>UP`;Mt2^dj|y=?SnjHEDaZ>UJcaoyYp7P%ne}4#{i0Gnk+HalS_m zf!4Kv`ZxQ@I}rxhqZ%RPoor{1YNKbYYom5jKe@FV$&1)eEx5hp8?=!lX=-VZn{t3T zFNoK)_EUeRHac|9=;!^KKHFEvypZ)w=jG&%DdC`)lel@9}@vYqXuU z?`1F5%kp>R!qmrW(sxq#_W)xy#>>(@sxwZ`&ur?1v@y35>L}H4E_UB)*f(+`{C#_u zJ#_n~bJLtW>M^S(z4BP?{HW)HpYv_%d}QZW%5#;e_jz9HxhY{+_&zesWai#HXZJ?j105O)%Fs&d1PDH#Ek) zXRueIk?&A1MeEY{wKsdnRh=21l1od6qaA8N?Rx4OZ*3zzN~mM=arz~_ukA$F=3(k3^y0IUz&^d=(S#Y zBpzZev&kKEb5Wh}2s!N^-g`ZKehK{kI{5x#`2QkmzAW_G&s(r^;FY+oJ@ranK8(72 z*qWCQ$0Q%jzUAdZUOpUwd@!_;J2%ia0-YAR!9#o&C zE4C$#z94Klb6c~;nfzV)g6fW{9%$lU=4JkznuZoH6sFWC=WVdeE^^}ELBD#J{dM)> zR#9WH6Ktotd!1S<*wA6WKJcPy?`6On1_PpF^s)yD^^6_{1K#B83hm(O_D@!|tO6so zvz89U4KlZ{1lX?({7=11_TU}f8Q{O~`8oo+u=E}262g8KCzhu&znf3L=*m`R;(!932mgLA3QbRyO3T#H^qxuKPnx8y|VjM z$yjcW>e!?5)bn6&rz@-xHf3(xr7N5@&(yqmioLUVOV8SQT3x0l^&acen#$wl^eeAQ zr?`9wWP31f@(AlG4z(Vu|)sOuRxJ%#N&3aT5Ac@Y%eN=7N z0`vvxpKHLAq7QxV5$eaTr(TUP?g7SB{;(T<@T^knHhGtgbOAC7J~x2EZpEiIxOo@tVv36|HI zH=}zxy+yj2%Xjbfb<9`&Gcsp_Zg?>Llz`_s8|cT;%^qm`r(kU9SQFX%HpW+N*UO-t zUiMQob%6Kx7m2Sxf9Sk-XkFX^>20|*BzlF09>2!W=yQgCENw6M=cf4C0`xlN-)o%d z(aMmnQx&2k;ruKe{Ww->=zA?R)K1-bz0c9mO+F3DZlHQa&TjBq=qJNFq|4|YG)61Z z;yL~3(MJD7FVtP=VVzTS7tl9E-(N&$bNeiwv4wTGdbpxB>1bNVc6gS%3-%sl@2jnY zTNUWq&v93##>=?}m8nNN&^>eyLV8em4Sn2$!Rt7Y`o{>%muIHZhf{qVM?t=1#~Mxf zvi-Q_%QLbqoEGwhy~@j%5H?{y^77><A`=C*GRt-EZRmw6~&j_#uVl;|#CEZJk7-twMb^M84)I@t*Zmfo@-J>?DN zt=yQl$<$W|pKagY_=fx|z2Ndhv|`v=;eo#L^|@aACu>@2WUpra9bivk&MS<*qPghq zsb3~IpFMSWE*EQp^^8A5w22>sy%F9#P$ONXCcvE1RocK~Q^8}Z_hj@HupmA$`WQ?} z(Q&)nt3!zH`#}7!klZFvue3YHxZ4f?%At+Px^_zy8`{B z5&dK~x{UOaL+P}t?Ukd~vIl8&lN{al@iRQphjbE08={Hb@P=;oz3X>ZdB1@+E~kEK zppT&M_CsH5*xOn#=uo~4*T=%>AoPmTp5Dx!HbI-tkMVe*kJ!Exw$5k?zE4|=&e-RD zeRfxTzGrKsT0K#9x}?WykJKx|=_9+*NB%ZGg?_MBA8CJbzSoQYLbTD({^^|5r@9NB zsE0LX_`QR^7B`|>RSUx#I+2|U{YdwQ_l4<1XCXb-`3xpN?_a_vVCh8baC)rIU)e*^ z$X^el5xvLhAGi7XMA&Dd~RJp}TM$c9?tlO!e>O z+i3Wdt$*)y4yS9Fxfa;DHlS;0uEXmYmXI1ZBWuVf|u+WI%R|v5*1(bj&iq+FKu-NGWwUMPo=mK}y7^q+z7Xx)*P9)QPThz;-3v{9r z^n<^zBnQzHfG zbGLof=mvB6y&GGgFyPFI+ncWfQ|!ctlKAI&nYXW`*DClY8w*vM+JQcxHCBUFy8mo~ zH&yXatecoaQ?FvbbQv&etME10(!@Z4C;Px(d)V^@@ycQQnz`7P24Y(j1En|)#SBSD z_y#&chWCZzpLT)2-r>EPkM_jHKG}E{7xyHLMbDpK$*1Asp0av^=KgE!zV-1+qbFo; znD6y5&Q$P?i7jIt>WkFI8jSsxeQ`a^!u#_sSkqx+H1>L7(plWeUTc4+M$6KD zVC_!U)XQ4#EQl<9mcQS|T~M4;FM4wZouP|&rCiLDp%?4ZcJ%U7p2@~NS=~}+Bl_L> z)ZyZuM9>IB^d3hq;kYN!i;Fc9y%@cKxoR%ne~q_vp(EIyVRo+W z%#3~|{eU~+vEPcvvigDeV8tL^MzxmhersOqiGk{3ulDrV{VRx;8NZz3#fF;W%FrBt z9q0--!0)ez?=OM>Uxyr6Ogz*g;-MC<96;ZEDPI33Kl5^8tmMSDyqp+PPOvX|IgytW zM=U1{z2xPDwRpjM^wGE!`^1;xG)h!O0cbAtB$DMaKl5wwtWy473Rpg8{ znsKlHa(r>G4*QKs+^aW^^D3BIKCj}?c@@u*k3FV&6^?%JEsVRo3g&i7&4$xLzOYwEoL2#VIOXLF z`*A#Z6{n?q325jrc@@x4UcQWd-rh*X)aLMe>{=rcQ=2o!Sj5y47k(^x6`PT}qn=m6 zeKYpf0BeTx7RDg2g8Ah0D(1X7IIm(Wv@vEewLXoEtGo)y3BMN8xX-Kb{Tqq_%;!}Y z{ofes0}p%77!ut2otVz%k}kAOprOw{Em$z>(xtNQnUJ2o+edQI~p8B=qXT0$kI#5+*4X$Czju8byH-=COBK1E)taQ?W|5*@O_g1txy=`D%>3V+Nj}M=^JHNPx&oqw}(ZXb7v^w3&=l4VlTb1KE zCq6m7pPE7Y$RoewBax*8swYH$hjQMloYf2C)ya<46)hdAL&aXI$DV3GDQ|eogE#l7 zPN(V%b>4Q3r#ia(O6oEL(Q@8dm(laH`v#VFP;XN8hU!h8@Y?1n${D8S=l!p=G@H7Y z_f@pClLM|CVVi48?*ZyZ*?O)v4|WcFzMDMaJ@e`^`zBL=hn)9W^wm(E)h=}24LS>Q zg;jHEd90$fAUvwKprS?^R`9_`1Y-SjB)i6YRi5^CKlOX=2=$$~klcOkS^HE|pEdb1 zdAGKwM+tMaaq*&IH`gZ4?_Yx4+>+GJ^OR$5=Q+4esGdV{U>gCK+%wrLKReD4R&>*zEv=lns5G!q67#;e&O)aU{=4F~= zRY`AkY0De*OR@V!y(P}Xj#ovUrfPDOZLWQIy@^NJYjfV!E$1aSqaW_s+nk~9O_+}KPMxDUFH_r2 zF$bYLz+7#ehKisTn9gS-dFSR??N!M&p2m7OsDmYaLp8gy?{W32|24=_{|vJ324vp# z$i5}W!0X6SUrdhrqLl;l{2cYXjLgf(yo`J^FC)iTM*bo%BPHY3g!8bEK}N3Sd0`aE z{o|35f6sdq-;tM*)Nwfi8QDV((hPc}YIh};qer6CB>q6%U-TO3k@OujdL+FCT5k>2 z{jEfYJXF8K9D2ak*dJH_wn}3|8A^YA9~?QH4(a+Ge$lV>Rdl}XceqA9-q3@?V>vx? z>+de}y3vLE(`R{oV8b(geX{dB+wV}l4qu~YSh&w&E%<0=v?MtPU9J=TQuw=_`di6L zdSKi)rKO#kRnj-Rz)F*NUwGWD=yd(*YrW@GZvlMY1sy-3n0WABeY7;KahH@+Q#@K4 z;4xwQ3b4KUHSB!(+eSZW(m3dJa~(D_*j|0zsK3QMYoRaWZm^gzzOYsg`Mg~vSzNC^ z36)^`<hR|-IMH#dscV*Y4+#lcqRC^GQAu8D?OzZeWc3hBUOi;v%&HIJ}+|x zJxXfniznRv?U3&IHfyl+E%$X3TTeQCR*7}QVVilQoA~p7AH8d^K5FM~pLcV=2Yu*8 zdZRqe+K~%>+eDqP<&9;gj+VpkPq6>Wn^&zY>8FlH)VrcEUJ=kiST}8lW^Q(8 z&z_rpGw2cS{9WBG`qLz@4lp(PX%42YN`0SwcKU~ThBaFpukVI&JoUuTT}*!()}m)z zU0*xzhqm@IC#~^L*1Q0W-i=QAVyrOz=)2U&61^7Ldyz(m{T6GMf3*u8q>wwZ6@6Uy z!qigc{8eAiK3|m9cUJDAo^32!8*LhQqQ=~bn)DZ5v3H_jwY__~6FcK)db|AnP|pp~ z*swouUfjHdQe)rX!Gl!wbxv0?m@>qYSa0)jUB94_7L?~vHgH_7w7}T{4{UX zX%;=riB_3DF5QgL2Tkdnmab~j#nk#TwBzq%SCME38;oxk3TUUo^fyfge1mb_K4~mB z&jxhr9J=8QvbvaR%ax{FF`cI@TBfD$jjJnJQ{A1;9^F8T6QTo5jmnI!Zby91w&#L~f z>w$8#y3qc^=t561hGh0o{xqix{e`a!C1+b*$kcoVJA`qr(}mO^T4aOo#7oo z#|!i!{2}W9E`4bBtb_F-VY@&lLN8H2?!{HohpJno8%ZDP1#3*S`q1mnXDA<+Fkc7$ z4dKUn`i68eRu33+Nrm*FEdG@qB;VFlBuY>n%uRGA;Z2enw5qyTb&_`S?)n4?m z1L#A>hlL&}eP|E(zY7er0^fkeiK(Y-{UP*&H2P6BelNdod^qn{wh(aO!G16GUv$0?{9a(>_k10A z5A!dfZgC^Ea^)kCj+9{y!nPVib#a~l1|84r`61WxOKYQQq7VJs8?Q0F7_CA)^|7R__8FJ~v1HoF$pZMM36DOl6_Z~DOCI^R9`Iix44kD$?$IMZ)3 zmg)q&>g!3OoeVlg;Sm12rg&BIdFY}qG{Eqcc3Np{}W|ED!x$C+IvJ?Si~C%yKL^rY;4k)Cvbaj{jIT%=;| zj@G;%-NF6M=u4cr(U;77iqg6}dTzv(z9;UMxnta){oLW@@#7dMP!=tJ6>Wz9vUsulc^IOY1G-9B=c+~LlHcWtNr_}1xakdI z7|#PQCGbC~hlu{xIZPa*YVhRXSmalSbT4p~{2D|1p={io)4kCBmi_^}efybp|D2$% zkK!3W;Q5~uBS_EkQhipB-l4y(xG=J`11y}7UkY6-Jhqc(z35PiUFrk(K7$S^JxqE^ zIM#7CF_Ol<>tY?zvyL^^@yWx)I)0CJbb|jk67P67@s660{vVEcyqlQEftVlfh(1_Y zV&febgLBn$rw45GGw@i)rRazFjyljazXP`Y4t`jzP55>{*jeA}W8bxB2H%1;g>Uy^ z(;kQxrF+4NijmvYtJue^PeAxqK7lzg`2^M(KUo9gwErmT#T;%!_qDy%t)J{I`2_y; z8ZU_+sQ!e~3ELPi1

aE^JY}n8CE?+1SS!=%Y>Xq5Wh|AIuOh*D-y*d;`fZ3fIze zGdK3JGG%>6(gkfl#WlQBy2yJ^S^t3I{uHn2d<0?L$;OsbE8@YcJ?a1Q^R-_}PtAA* zK7tD4FWbVidZ*}3`?18wyV~Dg^n}jItU)oFzJI{Qd^Mm?3X5Bvm0m8KgLJ;W_&L_E z=g+f&u^#4oR_2tadP?Sjzr#3JJvgm?_W`u7xrBWL!nt1q=R*6>I9q_uHA7ryA9Gay zA)UWyOf;~s{wmoA41ImTU0!TrAhWiC4$jHxH*@GCdB1=D^2@Z-*QgzPL_h=3Uzi4- z_TwOlEqF$;1-qeYY;|8wcm^ z#S+PO_RBLgPRt;*y&ON|aO-q*t(eG*qS!jlLtn;L)4>{zt%JD8CVXJ(9p!8tj=nQl zTSsutM&~x`E)CXg=m;9lrSlSKPjRDm&3jlwc-^A+M*NNEvu^33>cc6zUkPm}MpE?D zhCGn1PV3&FbwgV&PV#ZiTlHU-`}PFUp6E_-lJ;*_FVo-ZdH17f#Yxgvb57^P-Ux9Va=8 zvN%gCqc4ktu}?!;{7haJ6QhQ#%FE)DP!_l6WpR667M~Wfc%<|Ca&Rg#cqH=r!ecmD zdn)GjeH)y4+UND119k=59_-%?`)tNZULSKk5qW)K`gS>3W0BYAbf%Hc>kF?p@eOz0 zN0-;fTulz5;%LW9ULWgv%-JXs!_e`e^7_JSI?kMh(a-DS9G#6PHy#*2H~Rk}^ZK+` zr*K|h4qc3#j0|Y+6qk|t-1lL4OKx1lDJLTX`W|B$d2G4wzc{+Qz8s#Bmyy_ZZ4NkHN_EIa~veTSU$ zapd)7=l%C?-XosZXKH<|@%>AVMvgnLPdMRd^ZHnGHa=)X^ZFh;?eh9`9w$RyAM-q2 z^7@whIn9at@#T!L{0WaWn(`++uH!A^N&Ym9Kwf!x%_AXy4mW2be~uxqFK6DPB!9j< zQhDX$SpM97I_8z<<t&i11iW7L8V3UwG{CoY!|a9`EAajy10@ zybs1-fG^`*Mn`gCR|IIqv`=W*rr4NFVmc<0<4(HX>$xmvHv^*mYg`f~U{UUrRn zPWln?d-7^e64~{-{OZHmCyzC+51KXaHM;-E%dQijlRhlJIZR&P=*q5xa?;_o=5Kk~ z<>$p5XHL4?&r?x$jaDu}4lh7{jb>h7cxk6#LH&eAD#t!Ai$gi~d09NBIri}RqtCHF%Dg`K%ril}zG5to zT^9RuJ!xcdcHPG(i-UEa09ib`Irb;HEbj8_-IV;>TyN6NbzYoWH!14ByiI?F1=JS3 zf;u+~Rwr8Cq^^PLivAI`3RD~C#xo*Ys68_~cJ`+}7Mb?(k9~o9Ei$|0eM7O#Z!3-G#pRB%{Zu_Tyga3b=YV^m|wuu66SW z|9^+MsK(9WDG^ieC1>2T|BsLDed8>z`?jdJi`q8(sr{0n&W36&sHV;SU)6bOYM!Xh zi+c0TuBP^9yr@;5sjuCo)OJyQ%OZN~wa1E*s^_6tmoznU8djGFHAbJOE>MDZ?wX4I zow_02)NXi|x*;#dA|E_Yy^BrM9Z~&;7h?tKE4O{4`2}jA$WEW4p2ebQ4c{wFr(Ry$ zd#huh*eV6(ds_QVoK6Jy=`gdLBfpyLODx!YPa@7-}u7_%2ERRzUiTs=d z?@@h^F7`G--?Ii{5obmp-nw`~VRCA;FuiEon&zq0)Vam)@gUmx=6wY%+o{u|zHuE- z&G)GFVd~dBZ|a+}fo=&!P5f zJ3Z30PrstZj@c(@T4!(TY1sV==2v6qHzDYUP?q{$%PP;!&D2<7U*GrFwU>NFH!pfG znmKhaSJ8M8HAecm52_p6J9(bz=dsGq^IR0q)@oNx8ub%w@aO5~s9M6VHb^3F>kTwe zM<&=e*6}^)b@1F=Efm#~?YgusqdDq%?ValL)>8|1A~deLqLIi~KlkCiC#FYQo-duo zeCd`vUpgwj#D3-Z(h1;8arHMYpuce;@m)pqc$`SD$6|UuPNKgtJ&s?Augmy#*-P5y zdNoVIOZ232Sm_e5(wfFZ%dH)sVJXCki{cSN=NA>ZDU3^Bd^KFuanfY52V5XjQ z#LN7IUN+!`e(=&h>H&5k&kJ~mYI;hIFdz;)Fc!}J9isBi2n=u^sB)&)F-TsPBik`$idBLc*2|S zBFnUY!gpP)Lv^Jk_j{1Te+#DB&*vM!6bZ1<_QT*8^~zZ|o!WxP#bw}vgOm$o0x_wI>< zNmtDEv~E)$8;qmAMVna56naaCv9S8|%na2mHyA;C1RioV)xHJx-2u%h=i1ibQ+==+ z>bTin5)Nw^?7-YzzhbTXYSwLIn95S`VOQMA8NRI=YV=Ot#XB6nNkcmhJE$k|9MvRe zJ^jKx&|-pGc)jsNDiNjLVi7ga;#KLD^smzVcm3;JuWzcQjRU+p+>h8{o?2=Ny7f5R z(_?UtJ#W#u>MYgL=PQhRx?6-9m7`)Yqp6z@*0Fbt`i#0C&=`G@p~Wf_TXS=OcW!_` zTo0dE0>8KpzOk5|u#4ykyKv326aeC4sgCV>t%%6!G-kmV4=;`GjxZQO}vBV3w~AZ{G=MK)Ft~R#UN&9kT88 zA=3}G?bLuBY#$QU)yyE@z8bT&i+6%i7|YZ#Rv($&@ZP8S+y4R+NJi`bD{2B8PcXK_ zEZvtzy$rJc0CtXkuu3O38fWK8Q!CT-sz9E+2u_(zkC<>@Kgo_4=`)cAJ0!s=FFsh+ z_X0SjVYTgLFO0GueA11bL%lUK$k=Y`l6E31`jFGI`J^7arB61O6wkhhjYGA9ds$yU zGIfA8@fJ{1wE^AykoFel(*8+$YJ#P1VyyO9WorZWmu7kiTKO;A zi7){1rG5?B<-F736>D4ZWjokM-%(xQcX{SLo^f?VFQry$eVpEm!YdbE<9UqxTkHkx zyXJaYw|d2>_HpOF`QA!wN!{QteP;^$IGD#Q3SpNX#xe4q{nEZS&8jqe_V@8Z@Jp$W zUn-5QL-<89Jp4YbBfu`~K^VJq8~mgCP~Zh0yV$;i*pN62y-TzhexJ@!Sf(B<^KnWo-+mhv>naiTt}I z?3>@uN9O{bS;$`NJi}{wXPI!NFpa}u>VM<+3}sBMV-3%1{GtW&wq^0bV~4_&*u5hQSl%4E6;z%nExC>AMyRp zUgP_>61!~=ac8r+6#TOsn~n2tor{dG0RJ4ScSD%R3;!I5JKt8tFufbHey!nrTJL~+ zn)^e(%~OOyL0yfXmSc@)1`2R0e! z)4E((hj+`TRV(`ecqt9eXjokn^bp?#w%CeKOFfeH`(E__F6746eXC5b4CT0tFq)sLK!!N+ys1h0*C z&wy6s+c!Qg@Su9BFQ9MwaQzvak4w1XoAdKs( z!fx=Xd|~SUJ`v2~a6s=y=L?&d?&JNk!*xOf{q&4z#UIwe9xRFu@`9H8bR@R5-=9tCF!s~lEYd25%$JXN?6R!H>09TzD|JX?IwFR6{*k&}6d@W}z*`4!z zE!Ss19AEoFp0AAxUt^#1e9g+9(d26*;rGwsHQ=?;ibu*BHy@8QBJoH!BC`~GKj!gB z7lUWOR^v7vN%I3MX?_y|%;@lz^Lt@m0$)0R-O*(FTH+J@})6|8-ORh9@YUzGj3oRW9Q?M#wZ@?@{x^4 z3e(k)aRWygj|A2*cRC@v7;_JNdf3mxwwhy&N78&pC2j!Rb2`K$h0i}*A9Zx%98X!k zax%mtjWu66%6OzaUpbCA$1#cr7|dHnGae~C_URmtgx}=#QH@9P`O0X-BjubiaXiNx zj}(4i4tBwQ6vi$`8jloy-{5$p@MlBCBjNXa{e;FNfoD#)c%<+ik9Is{ch_uC(wYd@Ud{^;Y8%)J=b@kp}Wp4{0zd;_|$BCa#RMutM_`CUA zeCT>f5o!SB@E-76SDbiqa)*?^qkOu9bLq(GAs5Nz{wsgZ<^C&wu9Ih#KX*&i=FjQ> zturG_t9aKMa>d%nXTv6bQ0{*>HIiN+w!DX!aP@HMBabe@Iut*y+~;m`{a=h#rQNf$ zXDFAhD#%^>H1Xw4@hVd*N%7Sg<>L{fUNqUpve!12265)9C(uJ2wCmfFx!_t)aqDxa z#nexn%F5!%(ll|{GvnF3Ipz9S5&zrB9L{~^aa-qRHSZ+;`{hZ>U!ztKaoIL^&Bh!n zHdwWbx`==7CFe=;%qz&5GqsD(i)7TJetE3CwP~0<|3CHX6e$*b`zI?}<|=-hXNaR| z?KscIZ@&=a`cKJ?+4l3z+c#eA)lxI+>H3jde z{<$3f+<8qgd3~Cf$>(POMGKnGb;SR#Bd%F{oK}t-xsqvOpgY)O^%x1$bsx2-G-u^Y zy0g~z>Zt|w0`d2Ib`#6Zc`pj(N$R{S%z0O&yI&5SH#w8PcjwKz-exbuYh&EUoW7h` zIWMz|bJzLRej;wp&(;R0F}anUijRjrT#S0ov;WchM3wt#YnQ1WzKfMoYs5 z-Q@_{HR-!sph4wv>KUzNbF73rNsT9B*p;u4=KYo)E7HXuklV>!&>GvxuXTA5%NuK2 z`p6quUYxLNWUUGAX&dXji8Ve;yxs=r&*ns0x=_unJIEh#d5~9d_Nrsm9-EToxL z?7#9N6mxIyx89TR&uItq-^ux8&xm_v&bG?Xusb93jA;Myd0sp5a9P^tdG2QTdAApK zz7K8f<@=4XinMAZFFC<6{i7snvUg+jWX(bKAdW)Tm}m2{CNFCaBWnV>9y3|>W<}HmnTXy~jQ%r;JpRe#^Pf#!kaN5b(SMX@q1s5wb)E9(bza$y`JQwd zbcw7^19rchx*(gw`V4g z1g|}MPHQ{5Qtj$Wqk{K!St?rij-j@aY?>r4ad zlz?*;pFbyBlK#TWO&0s;z37i0N9WjqUTJk3=?dkX-DHE&+Ah4>TTlHG)t&7@-`Yqm zjb1RR?6S4wuC}A&><1^RmYH;%1FU(d`XElvSxRlSrd!Ke-Uf5E^W4ICR?pGgd%#_{ zfG>racksLPm|b9SS6fuMk4CS!mhr)SPtIE99jy0&-N^6MGksdim30|cn?gCKT2nJM z&rH2DaMl6F(z&|*Deks9koI{$HEDaa!v~n+ctDfV&IHKkk4Q z{s6sBJsE}d*@q#|wT@a3;sJ)HMz zpWBNjcov73Ll+jK=kSEps9yT;qw`4e*Zy1~}3}4N&uJ(kHUN>~mh%R=3+~g#8Wk2JHrlpg$ zL1zi&dt)Ej04*)_^~mA+#ku@$<#T_Tbu|4&wOtou)~+sT(S7;ToVz03rJB1k#oXPT zHHi*~xHC4s#od=FsU7f>Eru4Y4d_B@-n|G7KEgfO&-_&bDg0jXNuAZBf3SDvr|gY* z6Zr{`DR1fkdsG<7>Ro~T09{YIQHq+tw~*J}6OW|qxk}&GI&_chI+p7m`Dfy4e7m#Q z#dqF8r&29x^^6ang?UcvKF^Mod;g@z+4D5-fN#0`r!&{H@-<5zdkcMRcP6ZleHK~w z8D!oK$iC~5flILcU5D*&@ydY*ecRs{%E%dm@~lToMqb3Y$15YNXf{YHJv~i8Gi3@GV(&!mY0!-*CXX0Q%?}-k>%f=?`=Te zk}e5WHM*o~EJ=@4|B!RRo)!4T4y8ke&qjWK56oiwhp2{6^%3cj=&^xMY<^AM{>{8Y zKC!#dA?v|^`hU@<@B{h&u<)2!{SkXO`s03duzv8R^haUN-kM0p`Nn$U=j7;+`zK>F z=iResW%b94(I4yM=#S{2#psWo^ha=bd$g=oy*Gpv+ohADKXjwZN{19yGWsKWo%Bb& zSMTUww!q-OC6m;zB&$QpS5gfg8laBfRj+)@*B^QBoR}BrKVV{8lgHw6)q?7I@@ns5 z=v+2=<=WM)@00VLP}@6KM+C2j zeFauWv~$xl!l$aOsJYyUuj~S41A6v})(`KMei%mwOdqTt z-Yfl(&t_3GDJA_7+ACz=JMgQCp8o(%%0A*~@Bn*uFLr=G{O%f0&+Y5PSKB7NuqN0? z>4makNFV;|mo5FNuGNpZ3k{7m;)u; z70!-1__`wXw1(6btxlBlK3@*%ULCBPRHS;KY3Z@=qN9X$6X}Sm*QqwVd-%cVU@)b-D|j?Z0sc{F+~8J?)Gof3VXJtzM-X@v@^>ooWW} zHMR+(OP02z-pJZfWJ}BHMph5&k1z0k$X@GS^u+KV$IDGU(oZu_`4mNu_Fj~vm*BUP z?@jiTp=>7Y=UIEmrQF-B&Zqg6Ge65a^uFPBKKVuQx0lSrmslRugf-7dhQnJ9wzq`! zJ*`Rlo;^=Jr@Q3rEq9{p-WvGcPMofnmt$F5XkL!x<(Tv3!1?{f4p#VYop56HJCovaF- zO#gr55OIp&sw_Ra*b%VNvBoJ1Ur8ss)sIuu|I@@Ns#lPSQ$*JSzXa#*^fJYc$lm`P zx>*-GRR;a656pTd>ySTBJ%enl;>7ey6Qh{z7bLvzQHNE%g5)>bjV{-Xep7^Ark+Sm zemr5|%fr^*g&(9FOeB3Qy?L&QQ(TW9NAr~)wsug=nsBgUroQm<-y1zF4X$p`duN5> z6v4MHj;RaZm3Xbu$H0c&;O$x9PW4sv&|4D3b;&1j5BO6tfE$RD8ZJi6`0<20!Hu%t z&y}AgR@z!iPom$3;uN3cy)HH^=Uj!)YNYp77&V+E*rl^O!bVlBN^s(aXYolf6 zEW@#j@>}&mFD`b`Jm<&4^+I2^#%(wr&f<6ZV#2)|+SoJ2E2@sNjakGlfeu~8JyP7F zVix67o+2L^drq&%El^& z&&TRj;j`gBnX{CABil#AJfk~Va<#FgWN95gDR(k#e;AI&pXM&!80bqUrv7kV&gA8c zv2Wz%j9;t#B$YEwc{w9H+ZfB4XPj=6mot-y-JsX%y~HYFw;YX_c<}Etf!^-)8Zhgz z>NQ7_e|70`(Pvf5087a`Byp5UOl?`SKxia zhlkUxk30X0^B(K`D}24robdcB|D1-@Y0!%Y>zl^cE80H|^RIH~J1=Watz6%{tTDci zF_Sfab#(bxId?HHYfgBs?`bP*#x}M*N0%AJ*z#viKy0~rM?SXvIAhBthpttO(Mgbh zrMdckoZS46Y|NkHj`I0eb&uCa3)07ue}$ZTEU<4JWBygnI~I;k{uQz1X5J$hTb|vQ zVeZVaJYKzCpQ1eKa+!F91oL!rFG-~ldm}U{)OZ56sLPi-jSD)c^R1-kCB&=F7D#EW#s+GoPU+&D|s17uJ7&3PL>u$`E3n|W&A(!9`TVPq$-hdFBNXU|Cm{bS=Y75$Jkj}ARilxA zCEbYlCKG>njQLknMl%0uSe@?>`Bz%gX_9}HLx-az$HHTtZgQ*)S|1mA#CbV(!s<+& zOnJoNyLTe;uX5g(mt%i$F^VTVkN9MfV`H3ul|zp%c4UO}ufq7n$+LX^)rrl&$~jlz zvt!J^a%VLv`B$3fDCA$cH7O6~q{zSW<48`4{436QEaT$CXMe)t;tv(8I9&dfX#F(G zzk(mV9@qm$PtN4?d`CFXH|JcBPtKULI>qEnK!2kuXPS;N|0*wM-aJuxzQ~#2>`yB> zL!9EFYQ$&!-d72FTq#ewEJ4on2Krx-CtY%WU8ekO{{Q-1PkpdvMX1LfukM?52KCjZ z$Gx|y{dem_b$zN${UU!)6F;KAe{o>o*=q90V>=@2{*-6Z)NM#mb0RXKZg%YKPkk&h z?c*Q&0yQCKOq=$xx)}A{2gxEYerwQm>H_JylN_>$^Twj z6WMYJ|6YpK{d0o%-p~7AI@A7sdv#>#`gnDEHb28-spd@~dHze_$r|%781pS^#Bcw> z-!<2Mc-7;z)ZjKX`=~YFMSX|O%P#YJ$QkdaHpSlOqh8;&k&M=^dK9V+pgM5oS{E%Yb@rHO#wyLQVQv=DmpiV)fCQ^la*i*E9b`KUmvb z@Zm#SXHHm~T*Mj{6>o38jWwzko&J7+TIH$Y-!<#A#pM6r$>)nE6eQE}qSoZ>f)@HS zCGTh~Y^hyc!|w&H_0i(=&j(ab?4hl1O(;kgGR|AXsTK26?^B*V-%Dy7^(VR{^3~62 zEY(wMk55V^_RP!Jng#_a@4>3RUCFCGQ@5M?_CKP&hpYKK!2a~3Z#)YvbyJ(5lbQk* z&_*wH3-=Shv_d_-s2gYMcT*Fji#=ZuFCDfv!RP&6N*;9unm%dkk#{_mtx?#AUVV8` zn?SY9vo-ErZ321}%>M3E_jo^~USZQG&umfe_MP~=m6N}V_)FEd&-nGy{w-FTtdEzb z@1!Qdmh@3DKs`2N1Q{cvk{b5V_j zY1|>zmh*_^y4uukD^Kl&F3fzGhvwsIC;pOJ1#aw>dpJMpR_I;js$DXnJpHcjD|4L_ zEl>Bv3sR4o-yO7fgngma4N12Bnx0F&tpa5g#S^uf9JBi@Jf15l26mtQl>U>z_?IasFz ztWyonshT#|12b%y1)dSck^B;-iM|wBC*0F9{S5Dme16-EI!~X~Alo{S7ina62Y6t{ zF!<(ni*KF@>)Hn(nv6q2$RBPz~_{d?M0%`%NHkSH;sMb**@>^KvN66;Cj89I^!1mXT?0->M=lQF= zzBv}_sCI3Y!8%oGi*u?DiF0lV;hc=YIlnn1&Y5X&PE`)hDKa>xDE%Dr+hU!fA+gS4 z#=6+YI%Uv9T3AQ5tyJq7EL|HdPj(>t=Ykn_uzx%0m$>o8%RF0m*!EwXsv5M@>!>?j z)pr1l^Bgos+`}`f*C+h55zMpU!CR;gR%USz__2rmS3lEQ_H3xSv?hnSdmc2qz}rrZ zW??eZ8=WqkFly4j+YA8x$c+wS9@?^3JE z*4?6a%VJ?4>c4^^^xSi*M;o*KOn-ddD(@#?CWC#TTk)YTzTXL^d*Q)leZoHv2>+Nm zVApuUGIoztFIqTEeK+lU-Cl&}rFacJGr{2>XvN|m)nQU?CEE|v;2(ed*Qnbi{G<0c z{8I>)cGzdrVX)6H1MKrT_{wMDFQ0+W+yKA1p1vbXyn$Ezo+EiaWOR`{AIj=mr!^nS zqVg7bNPcM<(i@Cl_ZxXk+pd}#~$p5j5;<1zYu;&0>k z+9>bG@ASr*kuA3q%M&&6Lg$)zo-Yv3qnM!`_(aKG|KtO?^{mLc z`gm2kgMAF|vDQ%;Cl&)Af_!JhqCKWOn>7BAO{}4V*q&LlBU>u?x5JNL(pl{A%Ca{X~O5qF~Z>ibLRx3ef( z9K<5kvR_`jqHkujIH{hCsyX#3{?N2id5_|H+`C-g zN$6^}*RsmfnwHNzv*i=TiR87+-})$36AHf|eoKA7eOly+yW%BD?ZKRAN!rn_&goJ1 zRcEKUy##G<;C&a;H|D@o3%HB)0;0c>_Gd$|Ux}6)=Iz#>(%Ox0RBNp=Ykh|`YrXPI zYL1#~+YRz}CX$+`=J>t%vOdjGe#`gq^}WO#FVq~N8#69*dkw$4&P4MQZHO-JfG%_% zq6x*WiKb1g6XzC=b-J8br@!Gm+Mxq)|5=x;xv#qAmBK{w_pGsv`!E%nd6qb?j(Nld zv=12ntQ|X%z61Kum|COuMX_2JLMQD!2R$VJ!Ss%?>ve10zz|NhSOh0?gy z?3>+V?d^uom9(h;l=k*(u`-_qsx1vbi~6qO$-cxMwfG$dN_DFjep0A^YtLf$Q zLt^Z{6ib*nJDPC*>??hm(3l!;)9(kCE*wM;YmNWc(!*@?`|$Meo525jBl7ff$kopx zUq6GKy#fF4_1-{y!n&8@^>6a?kn%TY506&%E}icwUew53`n??_bDbNMNmn}*LS zb|qaD$$THaYh*8dy93#a{0?L%aF1+UUjhS<@+0-TIEUhO73=WPs6hJ zD}!Y(w0=CYH@wGI_Lf`O+quBo4~^a`*$X{xVhxg!$@3#y?&ja2WbbXh?43PG_O`J$ z#b&f2`xeBl93DyrudF@143^Bj(3itS$l*znzr4R8=F4B?RgU}xhvmp$A8+2o8FwPHC5IE-w|``xC&ecvHE-=lV{}qldor6|QVr2bM<;vXbAjyD?}KG8 z@;xkj{}DRTyf#YqBDW=bh36I)9A5qw!XrDtI?o|*HnIldYz~sW+lQ0AAEo}yr^;F+ zd$%Be9)KTjhu3!?FK-oIfsg%BK{ay5%3s91 zrw6?!EPsE<`AGi0iu{%Aurj#Z=fAl!I59Qy#8>EDZe?(J+R?DiY9%sQ=l9=z`Fj!a zw=q_lJ^-!1hCC@}uRbc?%DRQg6bJ2Ogk7Wh(kbpaD|a=w?=ycVd)FX)ABNwCWv}G! z+aHTOaS{AnM}o*3}!DHV~O-kXwAqe+h@?q-SB#y>}?InUOg|_tN5>if(wf1y-Xiw!&7VSB?E1JFynznmgowL{4-&a33rDYd*$;w}M{`6e*`IN;u zlEG6_|0nxx=l1Z{H(8V7trTmnv({d8f^P%4%bbne<)`*Y=Ttd}4u;#4!RP+{C-3pT zHiVqjd3?dA0gW*yTAE(@?*rhjL1!P5v&Dyzv%fx4IXh2sb~5t!Y~=7c$m5S7m(g+W zEv2W>gmvNjT5u>i`!!$3<)?Jq0(4yIqy^_&SvwK^R&ut?*KKE`v+jY<8ac~P$=TiT zBI&oO&6jvj!q>i!{*y-UIi#FLCYPKwypB5!dK6#W2t7#_HvCgla|)ceyGVFuVtUa( zJ<|O7Xi@81#96kY!G!^VCFSvMt(6B-oAYEcba>_#NmCGtVIX@#S>>n*1gX; zzQEn+gN`eGdD{<;?_z#WGgsw|v|bk3@+|+p>+8Hb;bq_Tbzbo{>AmV5`%`4E{$J0X zpM5D9S@#otw;O!pVp8?F(Rt1O6sPxGoGWkDr_<_$#p%uLrF31%L+QJcvrS+_qvH;i zvDhN+h!-b4^>)3H_*Hn5(RGo%9oRC2DW&%|Tu=W>?)pOX8TFk#aDgy87>zr(mpk|= z-$r9RQuP^QO}NCv|Vcji=aOxFNAKMfAI5tYs5RyX|}JP5`3n= zzispx#kMMLQ#FKb?4{_oFCtkybGef%0m0%H_CpM^l z;$zpwrUX1zSV;P8$8>Ubh&O&f_8M}Oq|fvjeMaM!q z``K+bT0K|TLGnN{&FVMw0p}U%8+0j%FwyagY+@2aQNPl|Uw+&5gk5wceFxam!kkvPu zTri!_l+-SG)jQCZJDUyhRo<%~uR&L~XJhN~vYspL_jI2%*Y_<=nR)#qbJlzv9XXjT zeOWYA!TqvlSCKZnQ8aWxUFOmMHDKWyFhrJqM;3c|OJ?!sKnfJ5YiOD`+ai8mN{}AvLdr#c=9e&n+>OGyYqE^{1YKgZO54#y2 zCOT}0SG3lW!&uLGU%9P0pv9|7TAKf%ur%2PO-w2Ij!%!aU$V|w_g42%wzIbXT+s3y zXZ(J&#)dl>soBhf;s%Edb{T2d&xsZHI$`{T2?om(S%75Dx0wcddtQ z%;cX-M@+-VGGGF&qn9(^%bl484yxjx%k7c<-{tnqKj$`M|NpVELDZyQyTsZUg@K-h zM>u;!I45V5!9O*DES)L*!#>U-U+1pP7XPdO`xL`lw-1MXz^Ey(Fu6TR=`{`ap&w&^ z(%v>SPDF1N9%2mkR2ZlO{%!CO_^09ei7g)YSiYRi50>{G;Jk%}HgHdi@jI;H4r?D8 z!F1Y#&D?uy^DjOkyTh$up~3(Q6^5`-VY-8}4zN&B77HyB7J^?33vB|Yr&e6*trUhr zZq=@?ZkYoAZ~x1>tqUi?ug*gch!z<9(+yqSD?TNi(*L_;hvf@}zTMi&apmevWsiH< zqi*i_CS=kA;hYP7n=Ux#z&zv__StaGl)*Z_KG`rm-lF$Qr)r3!OTv$YdAi^^l}1L1 zmn%o6(#R+1WcKo9<$)ddDbD1(@DqLKZG$yz-{L;_j_6M{yU&^uc|vDU5Pf*7=tkI8 zxm?#tHt}o;z32~Ixxjmiy?XZfRo>I+^y{Ey?SX8v4VTzC$_8U)cy&tW=MLOGTHOqr);YjqYgW z&}+=|9XwFlX85RPG$3O}W4Fo;GB&bh?e7Q0x(9@eIN zwUzaM#PJXR^G<)=`b@ak>I7veJ?C=Y!Zf*ub?9Ab*3`-Ry*NHcaL;bm{cYhMXkj7i z$-zB7-5T6eFeN#qU_Z-E_?MSqV^h71jqnQD6g**J?pF$0Z~9bW%de3i3-Oyb zA+xvh`xWOz*4>7mt}>L zNf7fPThMjL>IifH@ZLlF;DXqQWue#zc&Wol;n;{rKO8tHHbOBA;n)bpOSsqw;acOD zVb3ohE@A*5@@{BgA9%U_2VXyYYy@W{o3VT|yPryU>%l+Xwnv&}Z;?Gpewn?~>oW4o zh&S1|h~x0fOqH$Z$YUb*V*ip3H-~v8Lvaz_5OEQbIpRZ#ADLYcSt?yD&D_7t+A6?2 zoB2$5Fefg8zcqw?Gs_0YMU{f19Mrw%nOXQ3ps0SJ;`iu zci98+J(zd6XZ0?9U%KJT_-1TuL|L+%b93+0yPSS0zq05~Ygs3!!`Boeq5Y_rJ&5_p5AOUkI!EK5;e94Pg1l^D&_}uBCO(3>b+B*C@Pk)NPMG+R z3NYES-1rE-m*c1L=h=4FLQgo$^581)hhihdbE{L&F*g$%Va`;z_tp~@8?oJwjd&6} z@V|4fU2KGGeA;i>08+kh#?n+Gy*3mdVPhkx2f$r|CX#P+-dUeiHcp`pnW*@PqO%T; zk9eN-$j8%|!AX$3dtr zMPQfUcNYgC8=5EC%+IiFjzEX)8|RyN!*J~3GUW0?C)g{XJZ&kgBZs1Lvq^1Fv`9m-rD417{P2gF%;M>IJ0aFBe<>Uzhz?>g`aj$ zFF;P0rkh@Vq`6D90>+b^Q4Cx$z9G$B`y^Y&5)(UdWHyW-ZjpN*$Y}jOICcX2g!AVp zZc&)j#+#L<(`Q4A=#obgySNG7YveLCQ9D>J^S2zi9FA91>;P+Rz|WlmRW+LE;+OP0C6d#~xP0S*?%WyG^)>m(JZ1FN9mya-Z@tvyN*hR;G6*HkTdl6dK z{V+25$pzk2@j+y=_GpUaaA@65zs}01+?WZ)D!TbeUP&IyhZByOaPf-&oq37JOLx+k zid8Ha#CxqSdZe+6)W9QFu?czH7LHYf?x#Yx&l8U){Is?Md1A&a&BiIprd5L6o6iG$=X=c}`C55-9+UNJ0#!|{o^@fr5a+j8O*2gzdS z&&p!r6xrJ_pHrNoWbyiVg|Tggd7Jjd$>1*LE_tD!+AGB=&f#pt`yRpeH33=))5Gxl zcq0mUhP30e=+fs0Z6Vf>c9D}7qfm>SQ|Tmn{T1`XM{u~< zh~Qp^b>t_Jk+M7IKRX*Zn-cfKMZHTd>zlAD%-ztM~HX51|JS?GI=JV>c2@v4VX=AK%)B zU1L2yg_+1(KRzPZYxxB(XPoaKYv19V`fs~NKFnw2!`yowzJLqRi@Bd0S(E16QB;@t zpWvCF@b7lN=GXjlt_#W9L;HvlaVt|@jD+6#2zOlXRSd}7Q9bYjLE^F8UT8-8>T@}?-gh5eE(Y%lg#y?YOIYvLlX2blBo<08bL7ex<_ zjcCVir`U*>{n!ZMdHD){Y;+opD?4gMU~ljBZK^Lpi)&WUJD#{)6MLxGi1^LE&!EcK z2OVw7U-`c)Wm8qXubNbfx!bwf*ogY*l=MX7H?VOLqTfb%SwCYPU~N6HVCBOR=YXY_8xA(Wlxu z&C@Qv(|~N%e4QWRXyYMtF2T7DI`=OQI$zO&bZ@OeW4gGA^~RU*MBt0QA78>3i%OE; zDEWqOla)Wj;vAjL4%X!2BD$d^hl3OsA)3*d$d~YK)*OzD=-F&|%46aw``J_Fru=LO zn(D>>u?n45dU7}}0$nAkHQ|qMb+q(GKug3$7&}QxZk!Fak_`{u?X`Wplsd|T;v+a~ z>qoG2_TwYW{2G|M;%&k)5|Y)jC!X)qQyD%5d!A*`Qw{W2of41SM2tiiu|B#h8@R77 zuf)UlGnlS6n=`So9FDFwfX~8o^;ArG1dgucTPrs-RmU7P{!Pe>#%Ni3&ubjte8m6z;Pr&>iD*o*5SwGN|Jqmx#h$uY2;r_; z=9}Xe@MCl27kHq^#Y22EM2FTlARR+`@AJ^xhR;{DJjdB)?ff;|?ZNR76^14|D3&lp?-%?yt9Yyj6NFH_j&aUa_#=Cgp9 z&x39J!(p46RAa0vz&0B=|3TO$*mJE(v7Zjx?BN_YMbB9GV`wLhELU992I#Y&wQ1hM zAg!|T^Dk>h2xFT=+xRy`2VooDd8F9paXif6{^!^SCP(nWCVR&y67F)?i;Y{ID8}9nT-RjAr|T8AP%$>UxM(L;;%Jl?L{LxnS7kqR#1Ax zxTZq528=Xk0`?u@6a9`)vv(L=vn;?hC28r|VN4^Q>ta5;@LRPj<^kQO3H~czc?ULR z+07fpi=-p^pTz=sCrnXh;>Z+#DS4;3$qg4+Tq9n+o4efw{*WBn6p}&a>=Xwf|0pyy zR2)RZg=H-otATh=CxY}??2p0!&BWKnIy-G(vApbbAosv1y2OklZVP6_PXXk0> zWaIl~-*s^iZcfs>olW3-LEf5^H^g+ZHpLIV1rHR4SsyPUcD6jVgg6M%ruNL?7po6i zd2=XzP;n5_2gB#5vmJbfI%mndF8>_YuopUi69-XY{oI#r`&NKmc7k2z7AU6E$qd;E zEOw~_x9#A(9d_vi#|Ci_U>6t9`2*Hza0~ZV@ti%#UfqfP$b>?8qQSsmEp+*S$0$Zv zv3sk~r*m)%_JyW9%37p*D3+i}*af*E94wl-F`yaY7tPgT0r4zh%rz#i)8;_hxlgh9 z5-hFnYF;k3^KRxXES@{BhquaBsXQ3rmfl!NQhZJ}hcF-PVvVxtJAChKOzlI^kZ_yE zc6TdGM{T}sNp$35J1rh2uG6O@YBCe6QJyk!oyj^4yGmP2dqc^lJN*s44y#Wt++;~Io1YfWrJ z;gB(%4-}LV+YqKji(j_fw#xe|XRNw0isw8ucCoRX1Leduh%UIV+UM*uS={3GSTPNq zd_U)m@4Ci&Qh4P-*Qo+r}XFRy8Cn|>9#X+e6%wl&Qgz&BTbAM2^5u#p%`)jdx= ziT=lVC_Y(dwSm38S@G)V6IJ*%K8-IUd_RBu`Gx7iHjD>ULE3QKhA8Bm`;wjfP z(pP0Ic9z>YANeX2TlxR9_wMmgmDm3No*4o&2_aza0ZGtSk^pLph^RdY7b^s;tpu^s z9z;)%1oZelU#vnc21R>-k;+tBZPFGfTC)eRw1R@-0m}KE0#OUK>iM4b`vttzB*R4x zw;3-gb#eoMI1uVe&}b z(htmGt;>C;vo9R_lH{3WhkYn2E`hJB9NuEpRqcT-H4oA@S6=WKW4RW5DH_T{*cRc!qN5xyn1bh**n{*%CiX}25NZxcp>m1BwqEgOp0n)Rh4rFmGjb<#O+SjUm&-L7*I}h# z+$LgO2507;*m%Ro${FeTV!H<@UNRP&_+xbc$H!_$|Ix>5n1{ma4swrUtbyI+D^-D8 zrFMbr{o<=|vyVugiuASQ1TU#<29BOfqJ+4w?mmRv7~ z80RfPtNlh>jU0V>zPqs7hlqtXajjg&Xv6CJS>v2ZLpxq%?tjcUhG^dlwB@dBG^;#o zw&2anzN%NGWgB>NxXg*{z%e`C%#O^7$vm^ni5hZ>LH` z=LB|5?2cFFgLkZM%?GjhS9#ptos5UY>!hCzjip>i>ed@}UgiLrCB{En`MZqq7r&4> z{@4ZamF&P)%YOe|?E0GL=jzfgAMw9KwsAj6F6A-iM+m%|$s7>Pen;c3JM)ykL6^L+ zT2FmFdzSbpUQ**O+DoaiFKy|s#$4vCHQp_>rJ+1u*&gMCV_vD}2#+^;pkS%UnCJU{ z#52fvOPf0y@3oA#jPw_L*)PFPla%jlBlAAAU zEz*9$Ow!lJm`PtZxNZ)L|Kxpm{j<#V@H}Omt%9=~n&TX>$Tx$3rGLLen}3Y|MEIY~ zIcts{GHdcn{|;cTRiq6F1n|T9v0)YX6hC2}-Jssbugh>P`1X&2Z^fr*u0NS;k+%1b zns4^Di@ldVm-ZWTr+?eF^E_A5_V3d6wX|LOFWmMwBu^%{QQF>LwY`5}w`yyDxPr%j zcL(jhhIYR}yEWGLrw>d47gU1}CW8}h0xx`nJddyI;0L3gL?^Kv!4dAuEA=L@f#j}c zml$WarI%c%H?R*0o+$j5fhXi_6TuT1CZ1UH>{WUh+QsHx;>Rcm9$e#Ab(s{7U=4(E z1n=EEXA}O5eEb*t*;g(9#Y60^2iRLn*=HqZRq~o7mL>Qh7d=GNvz2kNM5Bo%q%MT) zWp3g&4e)WvlWRb~cok#Q;LK>u15bPlJP~CFOgyn0OcB8o-&A-)+QN8tTC2&9C**m; zc;fBr^%rT=LDqrzD4w`N+c1xxBdoa+GX^7gB9faiMd67$g(q?YnSv)+V@(gbRZWPd z2Yc5yVRsvNuBLx<*DH*Xt%dB*BN*cdxNb9kBnx9mZ56rybmrXMmkY*->^XulBp>Hm zN3OTIm+%ZvV955L)wL3@5r2l%yQ;D91$%R>Kcktw#ljal?b3LwBkbof?8(77<8{l1$eAb~gDYOcHzIwku!S>HV?bdG+PG^>PW{8!wOyhk zVyqruj|sK2$5_6QU#-4R7fhkAA#YKg-y-_OyojyFUK*G}_85t!$iCd)|51*%d_A^N z@^z)%R$GnsMd}W`%GjU4SEkncjo0f}I^5nDFBVLZhy5$^A7@N9((ZS1+tK<(?6mAL#vUpA<9^YV|Jww7GWh7(L&Q%1-j0XpM`MpM#^|rK{Z-oi z6KZ6Lob5`n1NG+w=ZE#Thc^kn&2F|hitt}2R`bgqN(oZr+AEK}B zLk}tWwo)&kmi{`%{|;mOK4BaL^H}}&5`87PYD?{Nlz9||e`Su&8c_c`aKkRKE#M&0 z%Shb|(PvuzAR`a5Ke3|>>%9q zy?kTOSc{+B@`8K#n~pwN@QCCu2yQrny||3Jj`ns3us>4Y zQMkVm_eiX^qv$yb1T(OnE7A%K%pmU_1#njzW&kT$zKL7F48iM(X|T?vzQv2guN6nY zr}#4CCtBPxa$1Fd8GF9yB;ePAC9pAqC5~}6t=LrQbK#xB!K4mf&4VuZTAuNJ_V1|g zg;Zq){?Wj{LzP6&?G zLQPz20DUgpu^4W5LOmCH62?H*$H6SE`c1B}k+Xgq@Cl#pn9DdDu^9Rp|B~^X26vU~ z8a@Pbyr+*d&*C$3Kx4AwuuCPv#U+yJ%?gTvh zdoB~ZoAn5!4LaOg{6zQ{T|CC z2LszC?T}g%i>NndwIe)lWG#UwjD0&-&2O16-=Ylz@Y`B*K<3TeAIiCW0~+n^k+GMx zzLq|kg`Kt5dPlVR63&w$`-t-zsp4_?=d;He1Nu6d#G3c&Ti{_O&BE4L9wVoDw%!!@lPA)pPYlbKDBo#P6HPdQ$73{t@1x z$Mo-az#MJoVlMg=)*oZr)9hDkykvYoWWBzod=&BfHkp2{b7iW26}uw+amv-#?^L-u zM*rH^Sp5Dis((3N|1!R+e`D;hF`BsB@Jm0F+)A6^sOTabYA@^GF6MLZO zN5tonU2W(`RK92oe)6+VmV=%A!$;~*vzJIdL4f@=f}IL%*r~w4PVCE5z)mY;u+u6N zJKYw>PU2&;uoKz~_QiPEDF8p#!NsG%PU71V+@#{`*rFn^(==D1_g-+*&%jM`o_o~V z05{cA=elG^eVyE&zVFoEBDiTEeqO;%J5>#_qQIix{!HN}c_uK^-nYm_5!|$4>V5i2 z*5%7Gk6Eh{*A-vL5irD!*J>Mv^CMVGu+ssF2hj$3FV7+RSg9HIU*ISCU39MN3=Fl$ zj-jsNnIx_w@#%;@7W>8;HvBH=W3LiC6{C;E=8HadDR@f!EfSLvpNr&#O3WoLrUF;% zavl0pVmcC2kobVqQP=19try*h#DylHG0_+1f-R^$49=4LKgspbN2G_b)?(~j^EB`} zI7@1Zfw98RCH>P%4NvJadA_@`Z_Qw~MEo;?vs66|g|p0h8VYBnN7vI}ueNZOs;9vo zD;R5|U@YOfV6I*C;qOfQAT?Q}*1|hr9V7mcgTKMp%M`W}A51>J9k_$|b_`v;*p)nm z$3Eg&49%gTsgylh=8=OMQ;hpC9GaU?!e1oDV(mHNmkD{qD88heBIF(miW zz*W+|hG*yLi!REK-UGm87Oq;&9xIPtZBMpF!{;&}u#xLXtl$*wKZ!lbK%Q;QPCQ?{{H7@B^SF}3!+cP?fu7!OO| zD%TZ_NcjJM9;biI+_v%?WWTjzD(R1CJ1traIeXZ^ztuduU@D0R9lUJ3zG=ujUGS7> zjzu3L_Y+JdHB4?b$7s5Rsc6UI+wO$XV=_LXb28c}@c=_-D}E`d!60pH z0^6wgn;NS;HCC#fYMhL^YsNSUo-%5Vm(kV$cuMJ4I~XtVlQm7B0G>+sZX_R}ma*Ev zSe@klV&4ukU!-=$Byd%4Y@hT|(=-!T@qh7swZ#t7Z=#ztaClGJV&E$AeGLfQ&;h2B zdb!E${q<-@WDdyMU3{6Vz6HB%=RDS2Shet$V8g;G_^?b|1s)Y#RTsPOGfpyx)Ow=M zjb$f{pT@yb(xdPYkb735HGq`99_^2A3 zG#R{f6S1pra2B>1yGlX>(}4X~8kk!Q|5Kl9$LPNXC(VeXgSiasbObykd6_NvA4C&# z2>kRMy#8tSOAALefx9dpR4eb54~o55;u<@_Qt~R=Q_B}sLSC78qTeN61+MWm5ciQ7&j50I-}s;TZ_K=izk%^2uXh^X!LF|_ zE^)o-eu(ukg**sXu6HXMTk)4BQ-4FgQw7dhp>lk|IjlRuR@O6Zl(t~|eg;1?a(lDA z2N<`*>;JJPVvB^(2zl%-CXD56YP2@q9MT3|L0` zrj2HX{>a4d`nhUm*jEl+BiPT?sb(ewj*)nl_>|*mX6$`0oK^f}VqeR?jjg`Svekxe z#)@Y#wl;s1tu7ao9XlA&U#HD1PmUc^hr_$+SuZ+0R2hPGs zOpCqAh-pQ1GGC|trcOe&(TWv};=Z-ySx;i@Pk>FE_-+CFjiFH};Ea$QRiAYO_EGXp zPEM`T4_;u?#S9mJWOg9H^H#C$q^3w>+L+w>Q$y$@`bM-;7e#4fjD1~U68be{YGc-z zF(KWIWtwr!h#jFXBk`a}+|r7#45}P7(6H<6=ws+p8S6CqN@}am6MYPOu=LL-`^M?V zY0HT}Pmp+v>O;}T9AhuKf${EWd>K=9mIpP48W&4UC@@bqYN4CD82Z&3TYFneryDj^ z_IMdz)%Nh%Qg@9pk?#o>`>(9d$JZKL;e#t2jIGRD=9Ty$w@kiYADRvaFf}k97aACB zf?%BJ_+%s=WVKcNSZ?gA=wNy=eQw;nCP1&x97nbA{oTv2-vQb{)*+ zW2j?jU<#=bA~CJr%zKGxtzC1KEt$4JQ|3W?dYMDu_j93@xt+cBGiYVP{jG9A2A^52ObzxV0j-SMbGo8d zW)$s@(aJ<&468n7SJTR5(C!$mjB0mx)5>_;*UCiA+Xyy_*2=`kMQ!ymTl*xWmr;Fa z>1EbMVWTsvmyvcSqnUY}_S-ZwZRaQ&6^YY!rkOcNfB!xS&5X3u(s6aBoB3zj7)Li_ z%*!~snHGg941BS)6MXTfFuphgx|uJammzK-v7e}%9r0J-dl1cxs)GXev}zpr9G z@X|oGQ&+h=_#)Q9k>xBb3uCmWo00g*BEcBE+I<%L!*QREH8U093@>w1az)$I%=9Nl z@qNxdodd_4N!&;L8>7HwBZ$qwGnd)@7NVP(fNrLQ&w?2{jsG-(Kesyjt6bvZUeqN6 zi&v1B)&kZL?TqzYqMsRsen#qqwx^$&P5kF(emc_6{I{u}$-GACXO`JxKkeyf)^$Do zOt{@C{fw346VVpL>Sv~4L%N%Orj$0D`kCMwOh@y5 zFw!FGu}F+-1bAu#`j=QdC3>1N^fZa%sR8~Kn`vUUmmvO*1yj)YXLT z=IQ8aCKNp+8V)GpO>~q;jJX}HS=k2jJ_uBxUChOm4LqH zF7Q@5Hc&JV$?9uvr7d=SO{R&p8f>}g78W=c`WkyX#83Nq>T9y;8^KzZzD8lKX$GEc z7i$r}6OBy|vtR8Rn_;HL#{NuJ-zI~zPDf*tE_2DIv9Y&7^(z?ibTl^7C&}n+{yoFM zS8K!gs$-o^^TnI*{^TA9`Wq)Y92a_=9_Vt?&@1;uuk7%(?r`k=$kmH8k9zn^2dw&< zXTZv@eL{_xL*&o8;f(Gy;`I*n5n!#&%;{0&8Vz+ZhG^|PX<9#G`TgDViYL2IJotwD zE1J(e@B2S$CSKK-@AgHLr#UO$YIc!dg_gLM*H%qi^B($QslC@gO;y22Qa@%0I9cgu z_}{MY7u7%0n_5k^MfC;5Di3iz$y=W6(!8svG4U(3Jj2M{T;xI5SDRhmgU{^xem3Vt zrTOK1?~vP2a-se;$At}F`xQ04B(C{B@m#3`DrY6t%Go&FZymal@4136ZpHuALOa~V z{N$Pk$jh)_!_r0wCiL)JW%P-h15iP3$BcaA+?lNl9eY20;H&iQb@cH#`g$yVJ_gOx zXubL3-`};(HT^AW95$BHckVd%kao&_*~=13Wg*HP};tTzR+14Wtnbr-138u z_9<$VcF)GYnM>a+l3JH@M(A>V(LufutbJn^e!90lod3q}i3i@#`2m~2z0WVq3HnzI z*VB1cYdnqb81+EpzFX1ZY$ATWG}u~QP7TmCeRkKOm+Zd=ol`CP@mZV&B+pR7|0c}8 z?F~P*0&eBK?wZ26Q`CK8E=Znqe`=)Z5B97N@=QAZLKz#;+S~hf3(sWbe#&#J{wW+A z`Y5Y7sJ*A%s#%I3m+wOV?aqbxz4(~gcyX)_`)Lp;Y?eZ3Du0Z92(H|{ho;zsAMJFHpk&Ely zcKpE~^>8}83wl(p`+~#aeYkh!xUt4O*c2#`M*W zu0nrKoxZ?I=82Ow!O8xpF(({O&0C^XzH*U6^Nw~`4%12p4Xo6(S4w(RzM_?0G_bOV z_KH@*dUa@POmmC`PJ9bHOz{Tc`r!JP*(+bTU zSO=eL!bdOl5GrJjvL{{b2)-ci8}R|iHGY;Q{lS@fnMJ(jQwPE0zs=`4?Ug9DZ#68~0;>Hp6c>%(+6BGmEY9-of3 zS7_ASINDk*`RM*WyXqt-!HBg`vkV>KH1agu*c#E%onY6SK+w*o${ooP3>w9JN^44m-0>8Wbx{u_u7cdVV)f7C>oeY_DMP0ON+uh*$VSWedQHk2`{!$@XkKohw%>AlkcjW8uR<_bzmD; zp>t#pxhYY+(J-}|& zJs80@SF)b3p?-tlnvf0G7=0+%#u$T53fG8U-_WP3emG5BWA&kn`-;7hdqwPsaX-1w z`|KH(4SAn2w6M$?6U&J0@DGG z_w)bl{Kz%8IkZ>yr&YeP4x7}6|LyCi9i73Nl{2N5)|}BbyC=~#`{5GpLpw)NSmJD~ z*~~M%X1QKRYqpu^vU8NvTC;KP@j0wnHFl@9W@&GiTeGz3d|k6EXC|^{oqLbtTTome zf24j=>d~>^e$0OR3H$9waDgV~YS_nst=&XkqMT3BI{_a<{Pi2*0#CEwJ;8Z9vxEzn zdyru_rEUdwnZ5E|wAWAPOWkji4}iU5*N>wl@jXxWFvSO=?k71E@$Xk_?s=u`=_bD? z&o@6fQ+~Jc13AM|YSAxp7AZa;xpWpEkXncqAGp^Ue6i9sAaFO&>=s`G?Wn08P`?AM z=hNWHFJ=z#8nz?S2iCK1fk&-?&$Q5Qawe%<)63o=ZF#E?yn;Q=oteXHKKY~`zue~_ z{)v~&?^%3c4tulQJIn`ewD>^3U^#ri@46s3m3=@kag}<#fZBdjjrXp8!58ivp>Mh8 z3Vj3rFJ^DD=7fi9mhn7(_CU!&sM;RQ8J-tj{$P6j%YVLC-;r)`hA&bdA+k3cV<5FJ zHiHG^xrH|vZ8b2$7Py7f+IsCsTi&n)4ZQFMsb6n#2gMn{@5yk655Qv1&CeQKVbh=gxBk5Arr>P2g7`^1?Db_} z#orA1SN-d7g&`Szz5fDN*bTm!P2b-IU&!QKm)F4of8h7a)BAeAL0r%}W9DISl{WMC zErV)qS9^O&%|%-tX20j}&sWr3wCQ0m_;m1h%m1`izw7#f|2mh;`!4I7+)9%(^r-*O zD|^&mk^vW((WCy6%hT&0N$&w?=n;75@~^@foQgAi0sqFY;0$9mi!=1?kTX1Da)zyN zhW*S};S6b-!5J<|f-`7ZA1~(};-3*dl;y==v-dsPB+oPi|JMxG!FTTLX131UyFWKu z7j?gLXzOUld1dQ(p7YMu$rz<6rLkyM4W7oXoGt{ zWcYU7^WAadMe(s*ZrFb!F(dJvh)&raFUsI|%XiX@|5M^cmhVL3Mcb(<|0{6i-KLMW z`N4ScqWrLb_yibL{E4zZ%XuJ1+~}LsWH91Jb{~q=pKyau#LsN_Pa@Z`$Bf$G$(+RT zBsRR!>O++m9_L=tW-E4NwOw8XTV{aItmlyPUsr?UBz7delp*-q27nP7vMV;C!*{Y~~;KNzZtTz0=JC-}@ASLrW+3wzTa(cd-B z0es=f`>D+`Ag~D?nbg@h1@;;Swrp{ECgNL27awh-a{xXV4?Yq0jYrto;pmu zkt4kS5Nvmp_~U+jDV9IQ@w@3;N=i!xYQHPqvj5S_EtXHkL7gM@_aA6mUU#Gmb~!p4 zpNhnt$T1=Ala5gnRdERC)y`SeBORn)VeF}+tL_fRr%$r$r6((of z#@Na=8<}saEpQ|wUR)5|$eRC{d&+%;v%PG7Pk4fy+Y)O7q#d&E{THyl@4P~vg>5im zI@!Tl)UNoJ@HW<>vHr7z8*dz;m$3FX-uPWz)fu9_n~B{>KCtynir;R(qOpWH&thjm zhKkFuwk0kjeC`fUI^5@*!EqkWQyZLJ{|ubfh^Ixyz;Yc;6027KdsN%9 z4!cpzek=9M-e+D(pUvcXd-J?TFxuKjrX`x0U@u{c8N7Zy9U&3%c%qd)OkIAK5Md$)%%DzHhnvF^k1o2v#53}X$a z_R1ky|970n>mQDKR@Oh){oK}noO_*@>!0V8xKEc{|FrGAUH`Wyu>NfvLUfI>976mB z@i+v!#PxQaX`&qBDu=tV*wmN`e|Qu8V#R(8{vdTgBUsYl58_8}pFa#5a2ospyC2bl zwSPZ_{Y?I_JPv=5vp4O(r}e>X(o{|Sx^1OE$$Xb1m4+5!HT>q(A= z;QxK(jt?XLAbet0)~>pVT#xgg6#lOg{J-PoO=Z2cEBwD6sS5|V8O^HbRTI^C+4;X{ zx~yl3;D62x=m7u6`Y7yLFX?};h5v_$e;@|`kDQ|mhqw2s?CBQ%UkCoTIDp^e0OfFi zKGbM`{qF4gtU*QfM`)Wi`yS#qB?DhaNB0MQe`wY{iU%y1b=j83@jvjl=U2?SY*Qb2 z#Xiw@fDa6RL?-?S;!gM=ggc0qW02{OD8V10{h)G-a0d;HFbIFd>wI50L$d*dhT}YAIrZFHF6J`}cU-%F;Um`$a&7y{fnE!~Qkqez&OR8l@IS6S zu<+4qOR1|UHFpPP@c%6SPrLT7?xi;^Ocza|FEG~W`)INw^f7hC3mirtHBWU&J^4oQ zca?FUjVD_-zQBGUI>uK1H<#~;zjFw&A*rz+AphbZ*E@uc`bI}l#)qy;%a1tvW*o_U zt>{CZ`x}nFjrhX%!kxeI2llB2nSH#6Im@TQN&o(4S)FJuJZ>%2;_Bz`&7OrucJK6% zcC-nfa0}O+l$qsS?9oE4pK3=d`CFAEqcOmBdeFY+X%5cbUT~B6dGF$Ho_Ry5C+Rq< z(Z|hvw`_*yn?%mg(0N0Gv-y85*La9AlyS)5TFpByz@O*xRcSxCY2pQ1U-5+=@F-n(Ugr^d0&w{*_;JV`AiYkZ`Z zVl(3-d7Dklu^SxO8HZg%%hx&jWUR|*&z1X6;62#*X-~Bfd`` zxDIAV;$vS+Y;tA~ZG+U~clNz>pjL9xz!@@jobR!c*a<%5WwLIR{}Y_=!JplN-6$i! zYQo%_-~#HiSIwOr97KJF3IAF^E=YCodg3b-`wU5L*?P3Ul3(@_wp_GRR*u=n=2-yi z@$X9=`T%obEpdxKagO|p{Okw!x@p5hv}M0}-pKwdwCXj)vqI!sHs#ARP@5P#PtK$- zo!poT=JWWCi|R74>GpPePCT+X-SOz=0B3>Bb`ZN`e$UR_T{mQYM*S?FV+p$Lr-zKv zS70|b!mHL?YS_NVc{W)iEzdk&S0(!d-)o`%XgYl``^r4;LHeO;es7*RHz;wNSr?{z zTi$T!vj*mQTUZaJwbWK8-`jeG`5)iT2+>`&V$U zwHuezJ;uF66E|t^V|CNF4+)g5daO>)|C6&^TRF#7VnFH4@m9`pl{1<=pFXm=g}uQ# zCq&M09nO3e8{a&Hv0}U{SXak-ttdLc8kgK7sa^Z-kgw{8*^72bUF^%4Z^S;N4)01e zCdT<6D_v8_cgpiy{VjW=#GekoI7u((3^6&|PHL{YIqR)2ag0C~HK7@squlqysahzL zHXPykW;+}{sXr}wR!xkXTxSz!WN%)0uP)b;&%fZR4j#(Tssm%OE7(qD=dd}-R=}@h zPY}Bx=k3hmxz~)y^Ul7|?VU}V@8TJUwzh7NxjKvY+EvB%iX9o`L3!&+$qmP|bpS7C#X`C}=yTfCKcXlwddHGF>9ZbuzJY$5$ysfpQT-+3Cprx|lT9?KGMBBn zyU(EoW|)0t-GA|4zxPIQO--=4rYtDjTCSDNe382TvJXARTs+LYkon%n(Z_o{n{&;a zeFDd3bN1=NslhFLei7|-Q=5HhZ{{uS@tokS={qY!J-D8nN8IrCeY)t3ck}!n+Ap8C z>}!!ahpi~!XKq*UT%rfQf%oO~?+~+pYc?*fOJ}`LNLwtqBYVrRV^?FtR4wYquhVPL zy0(lo>S@b5mRPTKX2|ob`)^SryqR%c=VHw=E^>y5jE9VE&2Jag$r`M1_TMcs#+fxI zI!qbg)&1{Buh3tOuZ)xQZ3cZSYoM6V;kyjIN8bmqFEbgBV&=Qov?+pN#Gcg1-FXNodKorF_T435JsBH$uk1yYGR0TH%mSAGj5a@J+D7 zEntSJV27K*A5%EDqo+D^{+HiMh#xJpd?lUmqh(=VOgH04)2Br7qh*@b-`D%w20!}$ z=%W-rIzRZ)lV%QRqWtI^52W}}qWtLpPE-6yt({8NOgy`k3_p5+>%nD~xjkWiw8zGe z_6R?EN$v^WT+3_gLg7a_*tlG5T^=?sAKO;|KY}BTjp9d9c1rQ1e~QnKGLqm&lfwLn zdS@m-8ixIj;z!i66Mw(OkJSH#AMy7vKN<@^0=G*&lZJNq(SERtegBp?NGv~Esrb=d z?eilzSUi3t=M8_1-bgff>)9)XBOPR)TgM(JwWW{4k=|~HCut+J>b1g?E`uk5QJTSj z?eQdg`;+BKjqoJll)sM0lX`W+lO~%y34Ww_5*U3G9OuY%lPAsTt+&7q zhdim*>G32vGqEg|C#i9X;YntH3!YdD4m$keM7_%7NlpJYPJeIk82yU*?s|33Dx9gG zaHfpPkmL)8c@h{?@{yVtH|sjWlQw~O+t{j#LIm za^`sD+yvd(qp?gl5`4zsNO>kl5)3;4|8GCF9^70{w9JC}e?^}(gRk{Vdd6@i&Lo1% zSlp^*8hr{zJxZ+6;z~nfxf1OUz)^R1!j*mk<~tv_QWO0a=1PoX8?I!|-$b~QTq~9< ziO)dlf?<2S$MFj^VJlX^HIB`hq~>y5t~9+el!j--WUbqF^Fcti8GdN-jT&Y?IS17JD^_!h@r9HM9 z1RZguol$wx-7A*#hp7>+q+yFSZGdk(eWN*6-t;|QQ(Wo1;Y#894T;2(dd^L8r9`>X zk8WYTm}}<*?}aPH+NET;(&b!3*^~%ZT5sb@>xC=b!)Jsm&EPfBSW>K=>aMXQ_WM1S zuQMK3>Z-9Md%Kh6NU2zoaMu`)1TJg_Yb$O8?mAy%Nk3)2iw^Fb#garbn~EjHizO*L zq+&_*Q5TLS?MX706t=IaSW-Ba6gAe}C6*M6Ln8dB<5*JG%_)8)8Wa77SblV&ZytRk z{OHTPrufl$!;jV{qqY0@dsF-Eta&* z#*dZ>KN`$^gdY|1nrJKuezeCP^X|s6r1c%~qizsO3b#L5o|K9u#o6gAfEa;2^pONzoI5w6s6Ea^XP zN{B0kYol}$ONzoE-6=2WD-)u)5_oT*@6yQzSNecHN^zyXPp&lQUsGHuQLfbb4T~!c z^xe*y`K(;&=Uh+mnn*0E4OjXLpAoL~IX#CNF7ZJ9$ZxCt}M>x>oX% z$g7!14Lh8#265XBTtw~;z`oHMz_SNW2bVF z-bQ0bE=4CAyKq~>yy)V@a*+aNE|Q#&b)zdIZa$Le{Uwj71^wa@`a$I;(HCuUld`l> zY;MwVG&z!=^b$HpdydlQmY;MPW5zsblb>|q*wA6LXw7h-T@G>@vc9Ufo10|zzdfFm z-d=7JpFPSw+vFx)W9BBAdbgCyVoQ)N%VGSBRA<{`a$$|)5fd3orvDf%uV{k zIGuQss;h9`hQ_PiwG z*UQ>)BlLQ`Vsv&bk({Kky|TE`L9TyVc}c8eS-Uk~cGX+!w_Tkbyy=YU?9h8gbatOP zCn;*2+vP^=9SP?oVOP#}P7-mXsNCoXH|jW!H2=ngxKTni8^AH^?f%*B#Et$iDT*7d z*R*2a2lS1^kyi4W;zs8UH%h8z!_a?8aic`J(Wb8{Zlu;uf7Xo2jiT&QGI6Avxt_8q zCO2AStJ(07S+n8Cd`7s@zww$x94X39h2uzYp4gfViR2_HZUncP6y`=oPSUV=+z1>l z+$b}a8}aujZUk08L@j~w?Qo+7?QtV;Z-g6V!i~(Fq!I0KBRH6y8`<-bo@SqxTqMau z+5ks-lAn0>8n)TP&s z6jyg=@*68RX_hTFshQ`Amz(q+W7pmCljf%Kle%mzhZ@F4d^eqOrn4SXV!ns-lkhW0 zZ61}MbcfV)xFtA~cDCV7)7#HalFwT{DT_CqE@zT8U2}=6-oBpO(R^t09eIvZ-CSFs#>HOeL_ha*t z(ck^QSt;I>C~ulIQSm0Vc3$JX$(y3=QZl^B%{7!wiSVXyEeCj$s^xG!pAp_Piq|A! zN>O&IyT+94`?usLb>*0ny{%!sl!_@CHSgd+shAS=?h=S8&BK06j?y`aDV>e{r1K!A z6or}3QGQZqF(vFz+}x!0VoLJ)SZ?(99#dkRRi9Ye%67R?7mF$B?bdGSIHuGzzH@H0 zKEc`z4c)kQL(X*(ZnU50(6m9mn)QBt)Vp1d=~jS}TX7h_ATweths z3pa|jOUZDf?bmY+u_+O56t3L>H&V45H0~qZ=zsWZqA{gdJJnrdO7{Ib%#FHuOv&En zFh@$olsc~so{A}{+7uBDo>B9Tn9`n9o>JGW4<3u9+pFo&1!78Vc~i$Rr8c}N6;n!( zH+8X?QY=1+Xz@CZDb-z<;!RT1VczvoylIc7UF7TWRf9LZLLa4g(|N<2lB(%2;h$2x zDN)|^<~YTh)Y`dYQ{+!mlle1e>HSgNgyn`PpJEUR_^ig~K=v<$lBzC8b<_=wNd*>(F?bK(| z+;JXS^LcW9QtVitk+{<5aDEc}C<=!}_)*7krJKei#E%lH;}DBGx>K&w&CzisFy6(! zE%c3s=I*<^rufm{CqHt`Nb#dY`O(tRiXW|)weunGO@6fgbotQ`xrXFFnsKF>wp^td zX0FmKJ|q0-Tf8O_SBjsj6o(%rlB;B$FZ+3)pTz#XtRsHZ)ob3_+n+2?O4YoJ!;{Fv zkUTN#+@tgL{3K#Y%g$*m$>d3?ns;q$-hrQ$P3nv*o%L7}et>X(68U|x=O>BnX~UH| zjwOX{?HT7v%=N7J5q;EEV@YCn+VGHh$X4|4R7-q z;YYi8O(K>QWv9Y9O5H4$WZ%Ez@uRLDOR~2=S)P=NCB@-Me^ zE?3$k@$JsI(z%W$VSAFwOOnr9ar-mQmAYvx39b}1*6nhoE*48#Ox|;RuGDcX>E5e4 z=Ss`Cc3;j*(3}--b##8xBFVk(rm>_~hef#3Hl9P%F7f?8`bJ_&Px6}LO6Lt%N~)g2 z#ZyyUDN(NU>yfMzbM0Kfnh~xPYnPJYN;hylWm6(tDO}G1uB7TYJjQ2)D?P|-qOqh{ zI~C4L>SpIB+4t{oJ%?g@jqt8>ev-YdVP2G~b(gAj$N4o!CTgLr*CbZ!j(F0Zb5`r_ zY~&@K2es~^F!MReOX{rFU0iNN9I3rpcXoSaaihQYT6c`|`Y;b>oZIC_$;6SU^Le(< zPXdGNiO-EXjw1~lkq|dZC?^Tbv8TIoqv4lFaicw&HrV$heIwlH>%6A8(Rss-lB(J8 zm&qw^lqffvd4=LeYV9=g-sDD6b}1Qd)RSu{n-bwh;hGI_BUQ8EIzA)ZXe6(R#*t#} zR5*^*&CX9^|5kgI`yuL2Sara=c&$5oo5LI_RqHNQ>yG*GZv9<1qDXl-JF{M;~lGnr7wWGg_!qV;O@6LKmsgv`Q#3r@T-*u3mWVc}! zZ~A+$b(b)2>UuGyD0~vp-*p^Q+V`atZ<1PfKmT$pZz}ao__D#9oV=!Z(|N<2lFCo| z;n!2VDN){3ge@Vav|iInee-#5@}~86e@-&IY18G3H-&3CjJMTt7;V;aIK^j#H#PH` zL`-SD-M`a~V@k^`O-PtGb={a!xUFHnl!_^(VoJ%!l$M>dn9|wEPdYzhN@{I)rnx(t zweAv#DYdQL&~Z#DY_BYCl!__Y>U6Lcx=~Ci3WG$rQO7Z*r!Gy18zoe`0bgwt=IBnj zNpnl1xDoiS%;&t!;6^XgM=5Uf_sNZ}zahnq66Hp3470dVneR&0%xC3B|H}2SDa))n zcW|R{ZW7!`iv_QZc1eOzAXZ zN>`=wl>8fAUGw~;GSfbF#+lA_Oi64~GBG8)4YPRD-+N3ccC6dwOq>)pSr8r<>*}ee;PZ-n2~9%6%K?8`0w3&1;G`ogchuBsMacnhv1}Dc+PQ zZ+f&`@g}u)ns{&WrYO6VOibxwuF)B9x}DDmZ>r|CGu{+(rTLG$T>e-)m3IYNw$fNdns&6T-xGC1!Fpv`uNGZ1zw(KL{k72I z`Tgs4S8i~SL)%d1@-&w9_IR)QcBSs*_X)iVy+a?Z3?9taLOnGtr+_&2DOaw44Em#1 z{^r@Q`O3InbA=ZA<-+`6U`P+W$)#=3(kh2J`(8RwE4gUk3{9&pEh!mT;^NiBBo=3? z>81HvY&yLC&Vok2Yf7+c?kw7q-Z){#fO`MJ>frU69`7eEE#&0>E`FYN6$E?mDslCm z{AiAfx1J`)_{Sr(4d3IZ(pCA2n|nUQy(@be?^j--RX0hUHT>bF4tJv`P4lIDG+%{7 zJ34;jqPmPvM?Yb2U!weIYftTH;8Lwx+H#oo@8f#ApZQ^371ygu)AqJxXrWm=XXBNH z-h(`A)%@P|epg;lo^#fP>E4#N9s2Hph29qOUX~o2|3(XXu4Ck$y?E?O{qSThwAqyt zJb8x}+OV)77`#&p`JHKvL*^AvE@S*A@eDWc3@f$|?93F2~=B~QY^F4|SN$slP z%uo58jC&3Fz7=WO(PO<<6diE&@vo$>S1rs9x(|&Fy_<2J{+_GA|Ne9>^jBAoU&dmC z>%00XSE1kPZ>#qAp*t!=hbLEtW?APXaUJ*k?E1c;+BX8!Lw=9(IXb*DbSZf~uRD79 zKD<#2_2K?Uc+S}lhwtd%uW~PszlkxFYi=2SzrK0uy}Df2`p(LQQ-ZH$Xw`vhv`{(i zo=&@mIJIgwl)zS!gIZrrd6M$uNrnZ_DcP8_q>|m zd#ls+6RsY9w=1`C7uPL!Ji2*eTDn)-QYF`LWCW)E##>kV#j&B5!4q};lEV7i`*^&% zvqz)nK~McOkH@>8>mP9C``@L1-*fd>{k@ZGxjkb;%=KZ-#2oZXa@4xHj%Skokk7xv z_f{{Ini~E6);PU4wKDYKjg_Gj&RlP?D>pFdNWd8O-77%;A}=!&;{&Q2w{pYOytW%x9T@bDVkJ z0@lzXzE?||%0F$bmg{fBrZkMm_qs1E^tuZR+SS*AkN1RgRp_TBT&IeAPj}=6w*PKP zo%CA|#@paR1L~jYVe(u(&5`CkK`oQ5&a}WW=KB^nWRTCV>#c00 zYOB~t&xvuHRey55W3D{ETx%cCznkax(0=Sy__O(}^NO_mfYj`Ge|lxe;6(X>Q>>R8 znCIp6^ANM2Yc?*fOK0s*NL%bJ!$y^5?yeg-KfPYov#f13ztpNdNogB1?OwC1)r;MX`%P%_nEA%V&=Hlv>|Dx{Sccdb9y&@y9?W1h7Boa4lU6%Wk2M-vLBh_ zf=m8p*c9t;A97vEaTKgkLyq+6a3D`<=N#yNaqYgev$I+aGx?m|C)=H3MKo(4dpe(fzBHalvJ&TOEVCGwAQ;y~Kdf(iKV! zs3d;;NAL4$(UoTW=dt;D^$!v2(Ve|j>}MG9o^=begTwG&jZrb6T<@h{tke$^?`bAB z(F_0AR{rnVrupPEPBRvCoc~!dpmhuLf(>SL_aO0! zb@)z?6C3*PcH%?Y2(5Z8aT&R`id*1!>>y4QZhxX2X$5hjjLWp@KD67<^ZAKyiO;)b znii^hW=T}MXgB>47?B%^7vX>P<8M{*qBk6R%Z0h#7Ggu?5-%b~vhkiP^v$mP;E~zH zj)+GrarQ|#MpQeR+PzWdvPN1g+|AQ(*3K5CAtT} ziKyjtm>A6N`L24M=WQ9H5tjCNTkv;}k+=kYW{DN8#D9E}n4;C^;aJgZzE6AFixnLp zR&;nI@r$WiD3f+rF(QfY9gr9i<0seJOpNH|g~W*DdVGE(e(GWP*#l!~8}qmG_)u<# z@gcc}iVqRLQ+4Z#iLXi>2J`+B8!accBYk-s|9NQYIQ<&(=pS(P@O?sD<$3zy5V0Z^ zC&CvlaiXSPy@{VW{4K=P#yj#GYw$@ti|*AWUSxbP+l&v7&v>+<@lRs&`7P2>-uaOX5#? z&-|qBZ51!NKls$V`-m46w22oLyF7vCWq#qi&tYv1Fk?mq#08e~-9^Niie2tN%O|bX za-WqdW|T+FD3_Q~PDe4LC>*_n>r`=n_LIOvJ0dY7_l<@1-^sG#MTJJZh&ip|Mdloq zc+o@Qc#(1M#ea>&i{7L^BzMUkFOqztV=sSIZ(x4(arE&XCtkF|*(Y#>Yrafeb~CXH z;ynUx-Ymv#foy` z#ENoMtZ1^tiY8mJq8uYuL_e368L`f(5-TE>c@6Poi4`p;US-6J?lWUW*^Q^1{fQOj zSh1q0@ill-c3?(StZ0B4E6R_G70G-$-B?jGcKtKNilXqyXl%0ve)#l(j`NHj9F!1G zN+{0=4AO1$jG7;d;z{c@ZJ4i#zL8kbDqd4O>Ac}dN!4H&R+-{SiSne)C5k7hwKIS< zWAdc+cK=N>JZUP|Q#Qqn6_wg*FbpzlF#Lqi2v1tXYl1u}%0_i#p0v#JgLYG%6mEZt zC)qW0DW24n7*ASu#&}XF#gonwPf}~#iZ^soo}_9sB*>G(HY>%Gl%_6fe7gxxiozp_ z@}w&Urg)O**tai<DxS1V(=PS> zk@qG~T6VfTDU)l!lfv~F;7Q?n4DcjXk6{v@5uWr9ye6TkTV{`YcSlW~yNl-)+1r;W z5Bj^#EBf^8XzKbfPm;|mvc8kbD@x@Ri7!TKc<&-dOk#Z=)9ysLPuI;Winc)==M~vM zdqy;Mtmp2msf!)wZlbA+!Wt0{)NyTvV?_yZpoD5GD7=zJteG{_;3^7sr@W${FN)$o z;JC|t-|la4pgdkv9O&(IndV=tgSG<8`oBt_k#!rTA*qxT<#lCU~r%} z>7x_}I&U~oQneLkT$$oPiE^Mrd1IDaYv=2%8R0;&b}1RXoR{kg=;Gi2U4{a zcJmqGKwEfCL@yU>qq?hJ&dxEqqh8M5{zN%aDz7M&S7hZC^@rP?F}+;cxYNrk>SxrU zOXU^CKkJoP!g}FQiZ9VuT~RNG9ZEQ_NIq-%by9jc>hi^mZ+Fnkfk*b(bt#cpQO9{j zGjkH+NeR_o0E2Yf`U~H9B#I~P(X=o5{z%`5Ue3#FiYJ{nJSnOA3;DxSJSkD0v^rby zB(-*WvSv)46lIr^;Yp*pp0X(so)oUX08di&7Z>Yt6S*93V|l#S}fJSn_qcT=7e zZhwj=rSgiBt-kZc60&fToh3UYI?82 zfYj|VYPiT50XduzknNY8o~nmipL~wG81Qw~?^0jBAnno3?gHxG&+egX+P1Ik@3r77 zZfZz1Wosd=FTdw6_==XpYkywdyk_wVr@5KuS&*x3IN{R#`>Aj9BsI^QE~(V7cDNgz zau4n|!R2mjr6yGK^$vA*#F})iI=z>+x5RP#F9x}8|ApqhZJ<^#W8i4V?E?=|e`E;H zaW^$jobPG(I7@FIICk5Nf#XYV8|cj>STJ$%!lJ9=*J z(@*OwbMMGl&3E{1XXwbg&d`Ue^L+iBF5k%yoT1hu{2X(Jj-GOcDjdB7nbha$$20YH z_Vi`(d?j4hfu?bgrZqa276&sQF5c9~nG?+V?-6?OgCq66&VpcXt+)3}J+-}kxu$iE z+Z_G8#ct|AQTzU3`eCg5_JOX_I|lv}&-%z!JYVIEfirpLhs|fLl{6OuY0tk4{>J03eKBwFkkv|jb>NB#&W)2>FC#(?o8X;{AugP46Zwr zI$V!&z086IHy!4Bvqxpt&t9eZW~TKH@^^LigM4m3ZE2tly|o2DI^=TupL00&?)!Ab z&aM2&_t!a`svkyk-@Evox@epS;0Xk%*)@ayI8L7*qtB^{6*|Cuc3$RKzMmR0Kko|{ zSg2wqQEM-X3+%}Aefq$4`tcjb=_jX-)q~T==pWrVTL0vp|7_Uin*P>5a1FI~OfKM- z8Ut@$yEU&pWM@rYh1v!vbkms*Ur^HeIQT&bYYrr^%EoY zsI@5CR%)@y**k8H*NME!I-)j}yM$M2r@6ntE9=2cUA6{ljJYR`(Hp3f_CEEx_EXF2 zm(=f)dTZl*r+GbtD)naSdNojwY&_TVu+AsUtqC5}w2fDp>#LRjdsy>2pPAEB+i;t+ z@)d_Vb7zn`_hv@V%2!G`17`tqw}rYm(;eGm$-Z~YmwZ*hJ~A@*YwwW~^{hScUUp~cjKn&ry( zNOJu@epcB<#RU1!kdG$uh2Gpi=PFybA1+==lU!d&KV=L`vsosb*H;=C+*zD zSrf(7Rx923!#WRjgX}gh9h)a-UNmFl${gulIs2lVx<+fMvm{)iiZgJQWTt!94jHA_ za*ex(Y1LV@nHo-Q&h;sm^NR*^zRyU`@5$0a>3tu$sr;Vw`j%qO>7fs+=&zP*dg-$q zSxMJpld}!?r4FfO z%#rPtT5C@)yr23^MXEm2A@+lP&g{Ti>PIc@W7MOw_m7+FH&bh>47*yExwEcxzPrBZ zVwsDa)x$YG4b-ALl2sWx=F0R-oh3Oh<8}J{20V^j{U{XH?PFiJ$Ja9^Dx}cEyXRZ5oF2a_nxB>5X zGT!oeH*F}UuG|4?aIwCJ_27TM17j~JhO zompP#&!5bj7yN{p8HcGMD19ws+RT`K!kqY7My2m0nDTuz2z)W?LN#i$k_gj^Lj?HPdwn_tSQXA9V zkM$5RZH?5Yl)oA0ef0^H^Y>!TC|Uppsln!`I&9dO@R>clmot0Huw~`E=Nu~IEFbgl zhq#C7myE(9k$B0?;DjmQg=%oaWbngHV32R<;0d4LkO6(pJr5{-=*;neYMv*>15!Ky ze1Fz>z<&-kctCw-mir&XspDPC|PdGybY z*IVFG!gX548dx`q571}(z@c_NVDG2%i4WMHD~b}~5Re4r0~+YujF3;t_j-RZ1x@f!xPE8?rE#W!N{0{e5>c|lGbUf?%*f&Dw5 zi5Hmbb@3M?^YC2pf)>+Ob{Ago_%FHyFNne>iSmNHjD&bWbk2SJ_&@}U5FaSOZ&(}^ zA1G)yKJZCx>yvlGm+W)75I%T1@qw@LJhx?b93PO_z<6Q=;-_2c!LNmnA)O!M0DCoj z;|^PlV1~p9@CiTWaQTLT%?{(!zY$;l`V7O5e-uBy_~kYB(=vzV-8FAm@bQO=H!Z{G z?fX9?^dEkDrJk;_m&08-M|kgw-;C7dS`sf0{&Dexi+>y+w(^hTJ5{)t=Q8}``KEt7H~2oj@%{K% z+xW-9TH#Ql7I-K`mJulULFoA3P@e&=T9+Q-wz>2hxROZZY6uhcfY#80Ko zM_y_A$oF%$iJY%-oM%{ykGw&AIAI_8QspDJulq1I&WQ1m~#+V~O&QbJjh+%@g>{53v`|azxH9ku&VY z|2>`YXlDJ0|9b-0m`K}M6Gv+}OImzbI&;p0KW=N9C(wc)F2vZ#_ge5#Z{@r4J$a@; zugcJDM>=P{@2;B#USGp_-o?51-gTb(8vI+eT(jA=xNZ=i5uf^E&eeDq%zT(wfjY;8 zb5dG}cPcCm=2z#K&?l>i!B{?G>1XkK3ZA|jKVv3y(cZTTPiI8@>zCp?ZQvSN%%2ZO zR)&u9{loA?b3eqv z4{>JwDOW$@&_({2-o9VoL|bP%^BbGd>k5|=9eX)_E4FkRF{M)Rtv`Of-Yn-sn0?gj z?AO?gUr^4M`Ib7Lq#*2958>ZF$@RtOxu9p|u%?@f>g5~69#xH9WLVi)35FHnB? zztWC|pSM1Fl=DrF;0wJUpJJ^uKX4c4Ysh+#=il=DeY(tnkMQFjcU`9+CMGCz?9f!s zFBy(aaP{>E=$jVeqQ#Dkz-z}_tL0fsvB_2+mSLNvA17j)#kVefDZX<#3;b8i^(xL0 zk+D9^-vm!q@YyEj-a+=Vz{{hF?HT8G)H?bEq+PN`W&ItRh}{#dv!jRawcNLC6mzT97_G8vbIn%yRqx6l%(Jd@} zka=_n-@KnWUExUH*S^b<>3x{_zmhRIfInK+w5-?XShq6oo};~nuf31Ai8a`Se_Z-P z`bd23EzefzOAI&G*M19QD*pd;+Bk|cSBx>ePx;zUu_ry? zD!{K^7?9_awz?hp{BE3a(!hL_SeE$Nf6w2=&;CpLXEkH-KI`odKNCD{jL#6-Zu!~& z%KY5L8mnL(3}Fq(UMsdo{>EGbQ+WcjuuF1Qi`azQSU=L2H6uNp`rC=KMETnT+)MoJ zWpEL@&o5D2UFq%|S6|Mx`#LK_nzQ1q)~w3Ct#NR5DW3_~$V?Jf|8oRahv)U_;_A6P zPYPF`d0ahga5S#g=x5QK#Ll}J*dGmi$2{>dPde0`tm-vpxtcc~<`x(jU-NqRkHffH z<>~I{GlHxC$ZMjwI%+P);%dRqc3cf^QMmdVUX|{|#MRinZi=h-*nQ4t6<242tFfCy zz|=+U;Off6aCLayhjBH0CrW#Q-R%fhuesFFX|%!B;PZBIH8}Ni!`0x~gmE=KE`_VF z0XxA1~Ft!qoVuTLzEUn=i?&7tYb_%x?75 z+VOOu!qe}98LfU>$(ekzzc<|)!PCD7w;Ncx5dFl>s*eRv|EaF^$!74FVCfZL>Br(= zX;}-2VCh4hVCfY}V(G&#jt^t$0BbUgr=>mMX~BzvrI*IR(qqpDEDcT*EIpesQdpYb z1!J8ymL54?KP?#{t`V+!JC;^B+SIH(N83&ZM`xm0IcGQ;UXd`4=K0#-X!vP7 z94#37Z$FJ-=!rHA{XsXv(D$SzgrTEzeB)#2J$4M8jCTEv2Mx^%Ykj@N4Nq6Q{w&Xv z(yXL3D`!))QuqZ!vvRmcm`Cif^Vei}#N%88%pX2S93HXE{bra)s65u!`Hb+0ZM?QF z6zzInv}gU$pk06#EgMZ*4%&6}>toY=@8PFdy)Zwhu9*feft!T=^Z417f4)5)0j3P= z*ZcB4{QT}5`F??+V+YS&WY@6c&yCfvi?2ZW=lP$dUl-rLA72C7i4F2v>4<3A7l55a zDSusv~4pT?zOx5gogHz6$^HKBu<1!j-S$YvMP7^SvT| zwzr%Eyrb~f--<6rG>W1p4dBoEcyMLtAb3Xk&dHzo89$c3-GTqa^m|0jo6%@ognxee zz!b)`nz5bC7~jNLe*>N2*LB8T=?st2UmL+k&w&$nivK9=>o@f9_UCSolZda>;v|+1 zW(9t)Lqprq!}l_^FBhA7c(_Se58ofo4$f72_?Of4C1+X>-wW+3TG}f7oDn_z_4<)~ z{CMc$o8iClwD4O^9egIdLe7r1_lxj~LxV@_Q95{H6f(A=gO}V6>1Rs=FK11!6Ak?8 zaY_Tf5e>Zf=!d~C4iUqWJS|HDzxaRVztKb1f%2Cnu7Q^`^F;qH*Ok~;{`Mc&wek!n zXvb1h1D}UaH;9jOFxR&~kBoD){{4F0Bl>q!6XZevE_zM)$so&o;51`i?^T&Sz_#eFJ_|``(j2Xy$*H^FKrP z-Z!v2N!`1>UGa1-;!}^Od#^ej%{#F>OY`1B|1K&w`uBR9=KV+XY144=tri7Fo0|9i z(g)0eNiMfn*8Ntr?~h?a4pv08?<++6j?FpDcMmb&?ayWDTx32MaxKxn%XL;TMlxo* z!LB;`cl&oN-~4HD33Z0}^@jf4oPUEH3kt34o|KmG;fkI!oiy7I&R z_))A`rGFol-_|F8Sw#PS%9*G1?;qOq?*-(582b18fIOd}fA5R_UHtTUf$iiyDE&KY z%<#(>w9&s`!1%YPfB#?fM=|@M>@OLwWJ~M-Z9KkB@vYmn@p1S; zxR1^geh_}1D1M;y@bClC!<+qbHu%A-KS_!ogxh*L{2&VqWblKmHvHf+{%7!mtkdBK z;dXTeesDj1ng~C56@JjfIrX`1$NSAwLdJSQ6n0;qQDVo?xy~ zrH7ANpV|J-c!FrdM7tA@C$thj5uR`aEWc>pJ&E!J^-O1rCmiXNCuAkX6HffNeV%X+ z^Su@Qj_`!p@Bxb_$lnc~z#4h=v-5=8T1PMc42;&_l+e$n{w50RB;z;V z{0I`;N7>~1vdQlG}uv8!4y2uJspx@i9t>;^s|9W8sJ{_!SsyS1*o zz-)Zv(|c)~MHegj_OfKnEYT6JZ>TFWauyc@F=kOcUV7m9lU6FPP`G(z#qnsx_`%YdaMq9T_@U|pNpRve`{C=&l--> z!P7UE4t^;0-rDHkX_wLwvHnB{zf*LzXm>L6+UnpdxYz0E;H8e$F0O0kCdBICvx#pA zE_{nvpVXid-?F8H{}suzY{IIp{Ex6B))r{K8Vu7Ggm}cEgGKrjF(-*bAYju znl=BIXH|N5zH8~>%S}DJ^pWL{u(0&I!^i7qQV*YyhKGKV{^?8)f0e@2bCn+cCrS?= zzXpw^hu>{JUp!pBS^3`!8;||}?7ew(-PM)ndj||xl4V0QORm@nNo?6TRJEB&FAGn2 zz>owdW~gof-C40PPF9*qFhz_4R63R~bf6(aNqNsnLBPLj+@acLFv)vhc(q;}g)Z`_ z%&EfI43e&guryv1Uwijz!-v1q@ZrUOFHdV8;wrsnIPoQhBmWWgp7GC~jlGuyoOp40 zCa`xK{QVh@55J@}i>1SS_;tS+IAMJFWd9Li>ciHGf@2;IVtq%6srTGtF?Corj}=qD z%6#&e`ov@E)it@8dI{_7W9o;)@x!CR)VmKDOda%30aL%8_)dVSUl*pH!Fz%z7vQ2q+n`dowC1&z|=Xi5j{beI;D9bQj^rHA0ZA89@5$->fx|6acu{`+!+rNfwQI{raWPwI2u z3#MA``@uN6B8U6l1D5Xcv9#g93rl}7i>0T=D}4@pMGgo4Gmi>Omx86s$0U{>k_R8c z(~^ndurxh)PdhCA&VN5KSb8^nc=6hgiN`{G`k46eV;xJAvl~e){WbRPa9BDy43_@I z3B=MbWQ0-9f-hT4jCukw>WRdt&m~5E9x>_<(d+rFNBaF9?Bu$=Hger1RH}x{C+XN*YDKdB(@{+$Rd8% zj_q0)k8GiKU{&ngFMT*N<+cysN<3^D{lC9KZYD>^e{Mc`nw$FS;svc~;wHBezgzV2 zvX(D?s31uX_Kf;0$N6^_|6HHt1N2J}kE#RTWjCK$J}I*0a{f(;*L{%S9gKU~lymI& zx70-F1zwn*$yeU{y7M;+8k)AIc~0S~rmaiytyC|saa_wdlK?5pZh6JFbAjcI1AH!VKK%KP*u>UK=1@PbGW|T`SInY5TA7{+7qOmsEdKGj=7O&_ zZJj=DU6T6$Ux`Pmnh@=oe%{B}QuLVD@0Kub^^90Nt{|C)L!6vh(6TsMlq_s4Y^hyS z$@c}V_0i(=FR6PjWc}V8S7T7u5y$w!L^qZi?ii>JYR&(E+nc73WDlVW7{*={Y|*xFg2DV(`>xO)2h7dS(vZ!@x@9uxM? z1jX0N%-C*@*97BuZMS+9)U@hZ)$HwT`g5mz4(opP|25x7-mm>}Q*$Bv9Payj1+k3( zkFiJC>ksbvEIQ^h=$cQXbFM@8Tnm4DDScwkGW_j<2c(n6^YdbUKY_oS$lsrfPC_ru zwL0l>`AL6`PU0L$CzVZ^=d~d(1=FlvLYHj0!q+?1$dz=`4Cx(m2+~P6`@gHZ|PZQMb`Y zzdocsO2N6QMNfwHQIXL{MZP{N8d4uE;XPOS`be^omOi=>eT470NbAG7(RowrNJ`|^eGzRG)8xz<2wcd$E=&y~dJ#)^9zd!lyyS=7B52*);^pLHU z?KjV>G-v&9;-$&ioivzt&0*u`V6op6@kW7;n*9rQ+^gFb^!`82xaI&{pn=$fVIplhfh$?KpnHp=UuEGEk9Ad7*HNC$y~#;gts zjcrWppd;|jy15^#1RPWf2CBp_bNGidJgi^l;foA~fjY45!azrhf1Gb73?%Mq!_>l- zZ^>>Q1_RyT`((r6pE~?8;h*9m{IW3qIh;?n1>Dp3&v7s8k39{FGNowWF;>X7*7#t{C=82t15L*k$5#viN7!9Pdpk6q;BAJqgpe{3IqXVxdH zOx|Yv2Gz&mkAa;n2C+Va{IO2fMKR77!0EpO3suI?dSpM?p$MK%8|$Myae_Ii|M0ZN z(!f8{y)z5JMg^?hu>RQ>z#aHxV4|e((c@3u72uH#oi;a>l3f`$0QC=6FQo6{TyD_1Qz#(H!7cr*OK^Jv`i^_dm zH2#(>E*dT_G%O~+{m{mR0=*E93n@mYxX__6Q8zuWrQ3I->lK4heCQ7F z(T)5Y4j(CQEqvt1t%G+D)=9&~hZZ$f2C<q;K7%O>xoTy#v;A5q3KTfm{nixxQO2KGGf}K0`cH@#`7FN9BI(`X)bSq}~QMn7(N)ZuBnk7s;(+N5V*o9c_91 zZg2Bbt8;Kt57>3FlDfVO6$E}GQlVX|SxV6JaZ`EAwS&UR- zu-jmKRP4u$^c;te(yZ^v!bo|&P4O;aC1Q=?*ixPoc}$$hKu3fz(+G1SgLfV&R+9b6b0V`G?NR1LK77(R zk(~3f#7Ejpy^_{qg|CC$*v2#;X^iu-H~6*miTlv&qr}S&zca6cR3|dFdD-sVofKX+ zxixpr#j8Hqyll4?MhAt)Hl}sZ5%7>shHn09IpSs!O;c35s9vU+| zZO;A=#{f^;t%>1jOCRNV+NPF2$oIo_JCd4_uwKaPqAU+GUo$dv%?Rgi?9`0l8;ucN zWY)y!BF1z&=%SP4tG({HKVe-o(s?v=VDR3&uXf7%YA3I^2xoqb`D$h@jISnrbV~SY z@n5x{pIFuZO*AH27N3_$cF>=Eb0qTY6ly>9eHp#VH&Z8a3;1<#Z=)U5iG=ll z^bde4WBVJIrH1Nd^d5aP#B0ALX8RRwqjwR{HhqkqsIJ@pApc(&rS}pw1+CK}%hX4w z{6OOy;rFQjM}j^}FHq;v{a(cDpIw02OBpq@V8 zpHPs@@Jzc0<0i;w9gG^|+Bon0{rpVy?Z}sHj|=@>RVwlq8?N2OJk;yr z1wQi*pWRZta&_}5TS+eU^!@5x$;jh~}tf5;s?VHBr(aTxA?^VZY$5)+ti#+E}`g~-^SIXxq zQ}6M4jp=jovZUVq5Ivb1;$^LRmg<9N(+laQZB1q$>)F#i)N?GJXzvyM6LlN?)Tqpk zPfD-aJ7C6|V65uFV`OL*GRC;4u~w3iAJ8vF^U~jIZT3*BIz2uqSC$SZJNUG_o?76o zYve8-`q+H*yuxHZa%XymFqgM|88UaiPrl1DPEPK+m%1qC+0I$GpZ-rqUg*E#a@EVLkNS=Gm(*n*OhlI5KtHB_ z_O+XTIxp*}r+Gs4H0F%iF-cYq@LBay3CoN2Me@@At@)nE+{ANuDmJOr%1mgSMxIN9 z9xT76cl2b&we)@ZuOKUWj+2$o`?B&TXI`@M3(oxKIa87q^{Mi>|998M1)lAhW5?(A z%#25zu+Vsh8;e^njYVtpTJM07+u_DCliD#imVaX`5z}|-TJ-!<^!+vH{UzxC#q@kx z56Sh<1YwXPT$sSaV9^Pzo5RO>Ia&L zF39|6dKy}cSD5O+|Jn(b*+otK+xS=Su)eN8+-iCZc7pBHcdt`(1sj6>%-QY(FRJ%m z20UXhAU;MfYmm@=*27@H&-uPWJ9xVN6ICs%!AR}Qr9-?y#`d)U`;~$J>6ghGyv;KM z{P(ZEkAN>Me@DKAu%E?=<*5uktKZ>sec-uw!FkR{cs|D69PtWMKdn2O|JdJyyY#oZTTwj$lK2em8|QgiNA-3sz+X_mq_yBl$%p>#LHft8rzfp2 z?g8GZ`fc@~TyJFO!6TBL-N?w_h2;nTBv>0{DJ(x-=g;*fGqydvua7xDhwS`DeH+;y z$&Qg5{ABq()9_ukA5L!Ax2CC;EiIB8KGP&W6D+SeZ^rj@ev5oFSMT2I`d7GW0oLh7>nYzakem_yh8j;TiH}bPgK1&t5vGA3xgopZJA33q8zplFkDD zhUEJ;d^Wev(ivNrhwFzcS(A^Zd2B~#xwBx;LH4}bKDbqZul*cnWit1;rCIxjI`BPo z4nlrVcn*D>gTeDSk^V>Cz8t508JD*&M{HkMtGs;)VH4IPZ(okmz63Iqw=Y?oRf>Ha zoAxE+-}zATXYwDn`1z0B)`!_iUs0F;SV8`yvLumw6#r#1_V)<#9zng1@*W3_?_%=; zL;5b*T4B&b`7CpNpJfI1^ddounCSdg3;_ZUn{b5F#wHa=THzU6nsmrX8%b+Kz=a~W@g*=%gu-c#Vlo%ZOWm*L3%1c6>TJGBwl2L6b>m%{cLEg|=5d(kPjE}v0*uq!^#vprI+pQt`v@?*6|x+}u@BfIfOz8;^H zT+e>UA8CJlp4UtMLbB1%`e~oqcz)OUic3_H-x#0j;9iUC@vW+b;fLt-avKewwEgd$&*6LxGgd#p(SWa^vD$GC0(8`xt4~MPqfolE!)>eT=*fI1U@ICvO9Wv;nNk8E*qvgS-tm zj14eylD7dPY6IeD!GkJ*2USQOyoh}Gc=F=KS`>o`6Ssqjsen6N5 zB_F_Wpui*n2a4~H%@?pZbx2=;xaHw|fhEotsEsTu!WX#Ha-af#z;U1s%#LKbZ;pBy ze1T4Uf`0IK*QM4kXcN}MFGz!*!FtKg-_~(=QBnFK!-FacxGR_WJSh1B9{qBs$REHr zm>elce`MP?jBhZT@4JZw3Ik3bzrFb?FvVNsP!gH>nVnbGWp;sovRtUr)DHXs&9NG+ z(*2*tdAf685A%K={HOY`{qkkNsI9`+U`xY+0#EjVzxJ@^3%T2~o$(Ka2OYPi0d9*p zP~vfjGbA73JNO70o)_jn?E-(j&2u##t%>74Sw4&7JqcrR=g)7I({Q|}te>E<|BkqC zeZ12637PBWd40TRGWf=D%b17mMQURXCVtDhxI4_k>+=p+(_v#bmba(Q(Vc8bVc8ts z6Ml&DGR{wskMJI!*F3I=XQp?H^Hcz~(z6@}4EqVMfm;{*oTop6t2V@M?0YjtpJjXm zkMSyZVdMlKLHnWm*6shBwx;-7-EF7&rI3p^V}ud5C$XqA# zoH!!xQ%#GMP=a|WNGi)W=A=gG*6&1pM%`5B+d z@}8`3slAc>zV+1Mcu$h&%GA4zQ}c3MqnYrY#F5i;oVK zw{+nn^#3*3fhF*v7Q=^Hv}yo<^W}K`Z}@sr?ZnzaJsoV!=)~KJ z@O#FLo%rDB?8Nu{I^xsbPOvU{JCU~&M{FmIyyWeK?Cglyi7~*N4eRictg8sW$JvTg zrLLk08!_hUD&87{brp;$Usr+eCw~pU!qrtc-(scnE$}->rLJPp7_X~9UR+LMOw?7d zUg7aLzapC-QO%OA2@3lBReTrOIjMCOPJYNOjJdiB#&%lNRmkT64{Rrwk*}+`?ZezPyKLH&TRyD1 zy&Ro?y4siHtgASq>`TCjKZ3fwGr_(bXI;e^XneMG#Np8UEBKes(5>M9sZzOI5E#lzQCJUr@k72w=4T~~q3 zjLEtRKNpd&s~EZmIq{r_4~`j)h@A-DGiL0>(brX+8Fu1G>nieg!t_%)CL+1I--=8a>Vs>!BL zQ)_)Z^8V5bBc}I`ddOZt582(+abFi7->RNH)9J~xBwm7|fR(^kXw6Ik*p0nc<)BEWew2wOS+dmXpHlTh&-0z?s zx~f^76|YWqtf^?}pbypevAeosPraX1H@xM68~W6zQ~iZHZ(iW3kM2HdiU*?QJhLvN z&(G`|Sk^(mN%b45H+916nkT7dn4X{az1q@j`d;2!(b5hVUp2zE)|9&k=pSYKx!QWu z*{u0)>WKHut;_72NdFyb-e+*HhWe~_;qz|LUQjEno>MDg6|DvFp=-&n_j}}KVl}O` zH&1N&2l@(`o>TPZR6TjK=_{nW4@y!!tn)7ZxxP~go@x4TQe#`YrmE%fjjO%+{Jr?; z`um-#Ij0Ae{(i^G`JSu$-bcS9jk{;_0`D$*M5(t@W2_?mOV(M>R9_yg$3xxRQP8kx zYleAl`QvI&YrCKRJ-3JMotQ=KzSgXLvgyy7e1*DOd#6VUW3{|^$*>!16Z`j1L2YhH z>aFutV{XSexKF4)qdow-`$FTnh8{gl)Z2#TdrRkPPkYY^i~4e8dtdz=PqK7? zzF}!<#TUn`f<5nGFE3`?m1f^7(@}rlYa8vp>z)cT2lg~PhfVD1OZ1cKXI{PR`8wop zG54zUNDh!eWP4eu`liK7TNg!#-fz=Os$fB;IaZbQ)|9rq!F?%qz38{Zp4j(Q(Wj}J z8f9B+AD(YwJ8NzBySn9FYP{W^zZWlUHFXKpblcuiZtv|g+*tRp*MAc)rN>vP+3z;i ztA={#M#(d>n_+Dv$95j$lak>TJbSqLIQiAR9+yW;leM!ro6JM!rit2W4;lVLFh?g7 ze&4ikJ@R}vuUt9aVdj%7%bK@j>UKV__1BzsGmpQCl_Dpl=?9VHHsoP)v@*Sk-eMc> zEo)KD|1|Cw(!E0)V&jwT8|lNr*xJ~)yLpasSJRBVE!yg2O8v_0&ziPYB7>5#dY<_j z{iGI0D+77DBywU4fj#B%6DXMnNVJ`EK?FEH)TM(WPZ zXSG%(3p~B+n?WBe`5Wrlm3@xuSN+dHjrymtb=P6@uEq8(#RgtOjrtO5)EBQBnCsW5 zj{zIGA}kjHzw5->$nZN(XB)YiwanYdp>@mYY$L<+?ey8nu#rwjAGM7vJ~|s2p6{{R zNH^BJjm+D~%JksryugT{~KZh_XDLVbTL@gWa& z-(ilNV;lS9y1%W`#88IvAKwE<4(CI<`wnmOdwmsMVDCFzt2^HCgTwD~HgfA9FY~(b zh5OUzczs~QvweTE^L%^Xq3$|-jh*|Q#=VnT z+UZ#(f3pj$G=b-Z-@6r`u0MUX_ni7IfbYA&dXI?{58kVfmZtUIrRDS#kCp~_OxV5x zY_B^K-g@Qx#y@G&d+_PzIBaIHz3z3R{}x!Xg?kxygV%)dg|&L9=j|%V;(FbaPzknQ zL0_;2AD`6TH#Ek-$@l51=-pa-b7@Nl{)zCg$Nhe`PpteodiU}U^?ud+!*iBTw}bl_ zIIR~_KH$5|KmS?Q~%S)Uu?mEhmX^ltF4{FGMwkt*YlR2_EA2FF(|$Xvl4 zCAHj(C*1!1kng#ZIoR=*`@RW%!3K}}H8*a26Mx+A;dc%8NA1|{=iS)v#vghqR+WC5 zvG;(v@^$v<^6qPr^!;}wA%gniR?Wab99_pG_O zZw5cY?Z4~0#r-tN`2nWJKh43^RjDUfXXk&I&oF0;=fG+G%2cUUzx5QMNz9d!23>WA?|4 z?*!YQV9q0Ce-1fjqbvU6nCwq>+@oiIOzrO4Gt2(Gp0_`KPszOfIcy$H{RNaqGyYGc zE~B_>;;g#N2J&O&ZSy?&KvC|0S1wIHkou(75}#H7Uv~$}5%@ycvyU&d|1iGLQ@leq zd#HSx^M$_T`$EZ?))z88U%?LG^Wa_17AdEu`y2Pq1+U(GmUjRjFYt%Rhv@!y`9m{j z9PAGX+XX%meu?hmUQ#80sJcbIk^G@vu*P`n4+Za5j!T%YgZzf@V?FnVbn>nqFy_(< z`9WFyD?doNt;r_0Wiej|&(~Ua;KSMexA-FD40qv=xc;}j_+tm~hfEF&KT!VA9`Jt` z7-l880gbB!ze0DFB*aD$-V2QUuI~fyVf-cZEpDV&u5twO zkuuCd*jDdQUtE{JIik6oXJb@N+(W-};{tORV>X|o@hkt8o6E_W1NKTa-nDD0TXrzc z!MU8bz_i)9sBg3N}2pU3Wvzw zHN~rv&m$Loq5Pfok+O23=a_W|+cvRx?}YPsIerp;E#q+GHJ=;SPvURMXBj_&oX8b9OZ6{rC>P?8+0o~g?#%U>hB`mGZ{T+J>u_ht+;c*_qxbp+UJq+?prp>+MP%;x;e96e#Mv@u zjPtXfGrS^RJ?tGc1|M;DbWRqr_VTBcbLIQiUH(0wzYFEroF5hBi&!ToU&dc;xtcp@ z!g)38cl|AQDK*Bc(@*=kCMRF7`MKc1>)G?=tGuD*sm93D&X7EXbxbg>A!SK9~{3-FziC)jt&+pfIrS~9LMIhp&G7jIkJJULpFKJ9E**_gLo=I-D#&~}+R z+CjF<{48&~eE;kCZI@Zk)6sVI`gteAU;MOx-{gjQ-UfV?u+LSC&!xP}V;@Q+`@l_a zz+pTeyp$mSq&r0Pv(907jOxJyj(x_(=F}&lL%tWdN_mZ;^H7#I=X@`Gzh!?0Z{Kou z-3JN!`iRf?KA-<396|0ZFV*kr@jLXhm9rwtI>5pS<)!eo!tZvzST8=5xJ!ND-e>S3 z<%h{n33DB1!jUxbUB`9A&pKAFLNn{vYN%-Ua7zAm;NO z@dpb_EZ=bnI9GS>^ni_i0UqnP6#tOiQ3t-}*ID-;fNeE5>4^PcXZ>9t>#j93Is&XI ze7ldB_5il47n~@L+@@Y}AG0|D;alYdX2+Bhc*Nw%8hB6pPoiGT;Wm6K+|uQ#0`8I;PH3ZXkJ^ za4mOk=5ik^Q#NNLU(nvCxRz(i7kT$7n;#JGPkc?6BMAFWmRnA*hzI6-^8c0RYrm8` zHRBcJ2r5jzYzv>&GbL|YkEK4|)%y10Cv;9^4&rF~`2olIYQUcq7Pmetce$_+^7;DW z=b2oD8)pOW`X+zpIA1*_bHU$XoU1!Ht$+7EvaZjCa|FV<-vZ}`WncT6f!Eo`7SO%W< zd64iWo)Q0UH!`AJqw}4I;|dz?<3|=`zK{RkMBLzS1AhhH8h(x4W8G1y`&j~xU{=qF z`#4wra%GF#>*08hZqMIkjkV|5v1a*?$~E>92Wi6}a~#Me=R}t6;cV&doC?J_*mp0M zNVc0>ynuPj57j-KlKWN2hB%Uvr#9?? zVs)DL2F)AUay-d9*>ClKS>eYMBzuxO@g(iftgo)0)${C!(&9;Suj-&Pqq|`hKlpy= z+?ZGc?{fKAJ#To9r0#06aw6G(7kP0p1ILpzpVzvod-cXf z%|cd2My_Tdc!%?QPsh4GY8LW!eM`ZvVB3T9o8g?z7^&-HtS6$b58pPB?~`1!VD$7+ z*DM@mU0-;KDxR-{5vz&F;=q>>|9xuIoFTU2=T4W3B6B9bOOe>%xZc0uIi5WjQ3rQ`g5_%)1U* zqj(H;eK~W$UX5g3-{(i7u21U@k62u}qpIs;u0#7tC%&%l#v$tZ+|SO)+6e9039jpN z>v>$Y5yQ$-nD2ag*7fD+fswaef!;g`wkvq|7`I)A*Y{sI_L}sZ{T>Up>lo_#a^{e? zUDukv18{FnWKFu$tKqsn_p`k1Ix%&9Zaq&&+cjFX1Ub3@`!$+%ec^XIdv>PO_5C&2 zHec5_`!}Xu4m)~E)b%mW6IIt2mi?1m*S8p19O=3~=KD=_>=|r} z1KA$~w%E6CM^R&cly!YMdM0m+kGjS_Z;MY}jeVEj@22Em=6DlssPp3Vx=GRhWheJ5 zETp&S74*4TxF*r^8~Pfkujt33O0)FS?s=W;MM(<3+9do$j@}l-@4tZ&}3MdhM~Ir22WNhjp4BISp&dgC3*L(-$bg zGj~m<-w1s}y6N5UEPX>>ibdXko_-gb=sTkR4KKwC(pPT#PVT&)~Ju5UG)0?6J49okrvyM;lyWk$LZ_lSMOci{O8vpLTiqw1b57Fl?qVMNq z?l-%J-i)^Hmp-spUhRFJHF`5vozBo}vJZZEKl5367Hb;Mz2{KtwVgZCv`)XJ$BtPi zWLkS~`)Szq3dTo1P~#gH@T|*HKWbU+xv`mAe%AFpe_nfunYwXt_o5k72V<3t7tv#+ zpYx!;vAq-Ln)^I%!0*uAREy~y>w1K!CynkCZ1BhF#;9Jxt~W>`&fZXm-9R6iVBMI< zkC4~FV{^Sw6w~dxv@WAD>hqFe_2;dp7wmXsTzy3&k#BtVgL_WQ9cg*JbO!6C?;Mj} zGWGmtq+VjZ#+Y6*_2;LYUW)5};{xtCF63^Ms%+rJNez84#)et^GW9O^sgezpXxqyBhs7tLMrZR$t$ zl|@?8_4La4Jpt47w-R>hp|{(cv7+=QdZ+&a zY!S9YvRi5TgDv^r1IxzqJ`WtF{`BVF959)BwCkDOU=7_rybHVAeWCiK*O(q`s$G6i zSSCI`-GAKzZ!h+~7+bTL`<2~$uEgHS7B$k7P#Ena+qi@HN7U1=0oTxbp?0+YCMy<>si>P z?I-iRyW?Qem2*7J+w{i<PgOg`gw-No1hn7Z#~A^@`Rg zuUEdTSjUL#l~?27l>~UD0KHR){wYEajYl69gIB;T55yno2d{LZ153AXU-upKGsf4i z`coFOTnuJuyEoBt4|ixj=Erw*=gcxN%QuFP?*z6*wYI$`wo`0uhhjT@*z|*AJ2hYj zdk=~FYG$x+--y}X#czR8c$ev8tb1g3qkEs`Xa5E!kd4;=SJnhEp5WaMvvgk?^)lG{ z1H?J{!781^Xk47f^$+aEp1cH3naLe7;d}jLJ6__Ri8R|OT4 zDEq-D-NZR`w`K+#+f856PHaUVc3Lr?)B`v6Ddv*mvoA#p)9Mx6%l!JW$$PMG8~J@I zn#D%yeJgwNCNUxPkp9TF?=~lz@W*FXwcL*_HFg;6rTBowEB1~{gI9QGA3YIo#$Mio zeVKv1e}jIqdf!5Nsy5)8A2Qy;SlU0qotj{&&+)GI8251!e`)3}L2LgNI}rweU+Q|u z@=S+UY;481?O-4Mjd~Hj!)M;*Gp>*5rSwXzk8?Mp@XD+Op2vItK)j%R*Bnpt)?G2` zecZWkp0|ovQaAWZe=~`79IRs&hpP_JA#4 z0<*2+ZW&=1$qPJ{hs9IL)X()KL%Jt51W=<^!?O^km%@vmR9SJwo%=34Z~QuN6+ z=#?etm&M?kMc|r+s|N6c46Zpz`e)jpTL011KjC+uR{H0mqticEvInQD{>j;6&h3!; zhxIyA{S$uPiO@gc=MC0B;ol9Ve?G;&oi_UC2>Mp;ckvoOzm?c+?+|w}n@hnzD~Q>+ z{MJXX@fF~oL*3mFUSr{(0|x&nw^cFB-3{5i)^Itkx4}KiY4!5{0Wig@&dC^qA;PVe%oJa?Dz09fZC@?sstWL!?`a$y~wt(;b^;s@ZRG&rMS zO-*oz_%5);R&rXpBU#_?#sBZZZfxDR+T4}gjSuefTdP93txL&mZH`rha$6NaZmT0U zRBo%};M~^IL*%yj-H^GhxnMJ5N#Wd}xMEua)$%SO3Uq8FE~+#(8bbdm6GLzM08!fd_S$`aY>$mk-bOA+wQb6A=?-??(;)1o`zLM!gLU%G9gIOZ%ie!y`}iAMubda} zS58SdsC)7}?+Zb0NwFJYH?X3?F1iaEY-XOVc#_SX2`B6ZpDGuo``^cdSsV`N-RN>* zE&Qqs~qDGf3Y%Q<)I8eM0v4NaG1?I=Zq0-)ex&RcPxgnmCfO~F=lbH@cLf%+Kp5B zvGwG~gsc8Cz*Q$EKbF^PXQ*ELKk|BQRP-9_oY!mC_Kc=p8;QJs4u%4+jTRp%=e>D8 z(unYpW>IUQJ9SQa-v2A$8L*YG*_h-bX?$QMjc=s*NSCn}$IM4sB&>C2@sZFEuZMI0 zqo|j%@6GF_!|SE*96cZDjMhu6*AerP@_LE*`cmTS*AQP{LVSJkD(+)BX}p2KI%zcd zNSC4yd$HwtKGI3xBYkyb`AA{8IuU#%hcyz4B{B9Or$_uO9IH82K9a^eDtshx&zZnS z3h#fmKlkYH98X)law_nVPPSe-N%Z% z_(1jNNpl&Se5C9c!B%G&A1SMA@_Nnm5E*kk z%cJKbnfiykUQ>UMqu^N{9Utjo`Ql?-uVu$LhV`2I2CD{NHF4GSM@qa7e;f{WNoie% zn)@x&=#f+_UNCj;;*j@Z|KFgF?xXOh6P0zDHhyk?79YA_QiL7=_;3%0<5yjAdL>ai zr1~Az(;Zw(M@eQFU-fgHd{*^yH%4v!oc`ZBJ+iEdXRW1Htc`j$V&VtY z{&&+O=|#BZJ#fNxhf5!IbP47m{t(#M= ze--@SM#k`wS0A-~Zr1Qj_}{O5NcC&<3WAqyYu7C2SlnRsF6x5++)K@q_{=M*nKQkM z&W~ht|L%%dd27=!b^d?t_bCz=eETOVTIPtq&1c}zw04|t`P(lBwf>WGIop1{dHcrs zUM)SN<`?^Q^3)H$MZH_L-rVw>pG>dzw6EWdmnR!z<)$`%Ezi*YY0j#dTh(K0$=urY zBO|wm_Q2J`D<4+No(1c|--XvjIabw*J z*TDaO1YWb&IIS8tY9-TfpgUM&^=S{wbsxQ_G-lOHy1mxl)kllcFT%gyvl}iq`@J|+ zC#n6eF#BDR?tUe--_%V0$?Z4u+R0jm=N5i%!keGj#lCC*YCj$~`)7Lt)R@}JPVwWB z564l@`Rv^`C#u>{+q*MMkck`Y$iGr{;PWMN5*JSJS_l4YvXpm3pj)SguI%){65X=Y0UbP<0mS zjig%FN&mUdE88*8lTU*$k@ac7?w8XSWOLY`;b(`cjZF-y&F_;h10EW_HrDo4aK4QC zm~~*^oj;?xSbbi7%$o3L^gGqZ>Sr7OGO{e89;8=CGNrutF>u9N;R?T(**-A8df7e~ zuk7mwTdJM+RJ*3q_#nc+s+)c8i>p2L=n&T3cjwg}&rj-^dPWMY zI}?AXBQ`O>I#a+pCE#4~^Jhm((zm|SWU-H)i~smhe2yLXmDabBuTak3O*9y-ZPt8m zJ^f46ceV$AYa_ihdcmZM%hpo6+K!L2ADpOOX7X_kFz2EAgE&8D8NJn-ZYpcp3Fd0& zbBp3xKSyKl0e9U9z7%HO!T0iGc7efNZ&4F-5VpFS_k;N!pRw9I*zWw9fnKN!!c1sYbOUHdG(f>ZDme`G&>^ zvg>E`l(3`5I=POxYC-bjK`pInxx@ZhdFlz~?#5_jjQvu5=RmB)_(9(U3u^!Wj7*$` z40Jy*$NTf_1>XB${w{o<>GS|Dq7Hfhec3Y;3zLn} ziH6I5XSA?YzboQ*(~+|daCaf~#~sMRpOM$`{Qb|uGWyR8Jm%8+XZ$mL&d3t;S6{aD zyWsw4c3p+b<&#a0RHTg@zVZ$0hx2@`b9>P^&*JcMwRN;=H)zNjrGeU@3v{&rx>}Y&Q56oik4^a=F>S1zXxqc*mY>*S1S5votGtW>?>`r{h zdhnnAU-U`xKz=?fEN5B&k$5=%<9>Xwe(Z3aM{sz1w5(NkZwM>4%O}Nu=*E|o4=Jo<{73vc`Hy<8p3%R2 zp}~JkC+NPCtPiPNNi}$AfIfa#z4|@hf8@EdV_x9@fQfBS9*fJ>3##Yw`Cb{dPKwE^ zhCYJ~sTa1mFdf%lP2Q@oMLuK$ezx*#3Hlv*V6S0)$S+vm?@{H)$WzF#ih=caO}#p} z*W*&tYf}E0WY+d2+Ti%`xO*U9Bh=~uD7DbatFDx2k{Z#2fJE7v8*-DKEFTvYVXhZ*(cgRyhr|F93L=!uzz@u{6l^> zgPut#`G?3}A?w~jUQP1+XJk_G5hsHOShIVG1N`}q3p{;pUnjZRHu;4$!8*z>R18D@ z@K;~4@~6I5FK`wb8l7)=mhT&CedRycz31$`9^}9O5!=_u{W^-PL~G2QFb7IFE9@O( z@O?%4X$|QsTAwKAdA?twb9Jz9QjzLGrsc=JgO3vSP2?l0U#HGQH@-_b{^3H!nm9u% z;^nPXjQMgn7Paj2P-kJea&}QY#c?gw5)Gr{jmP{Lhnh|TIZrCM*cWnZu*fGig8H#zQXU+-7|~jjx>XS>2)M4e#?QFG{|>WG=bH@}MWI`HcKMbj!iD_AEV+2g9r(I81-Z8q=j)At9m~o!{IyZCW5GM~b}Vnl@^-C!d5W9iLv3{P=Ac^r*be%QJ}oHgNKai(s48@xRO+^KsNJ^Ypgye{P=?goE~1GoX6)NmXzlgAV8pFdKkJkUd`|;z14-e)pT7MzHk$&DT%ip3WF8gHr-n+YeDhy_I zJgzX7G`u$avUOldt^Ew2!`Q7{B6}bXqnndHr~TF)ppt{#%jbIZK~7o@V_913FcS{r zVtjh%TgoSQvT&C#3*Nn!1DoZv3Crhl+RXg6&Ch6$ynDIFioLImmYKZ_a~GAj>O)=} zchP*#=fd?OU$)0>m=9<1yK*t%yBgY9Gw~JGN7-@~iA&%^S8`yhOa1DWIr#8PD)B9n|zsUiq|B!bMHiT5DO?vg=MnMj|c2} zY5}r{|HHZmd1NDt+54I#hh6Eb1K#4#;0)?+sSAh+i1QoBpZJPX=X&rI@p(1p!ekrk zB)(#T_>|7r3Vck%Rb+nby{W;nGGp&=(Eckw8kSqfRcwfrC4X!DQpGmJRg}+^|Xo)k-VKb;dUmwug7O+%wC;lb|#R&QMEJwl(#eTPx5xg_tVChof!>IJoq=O zmz=!;vmUEob0qaw6})qBufrp)zjB|=*Iyk${nb*i(^##)(%43@{wgfjqgj6ye(%xN zU*&xEzR|6}a(I6@-}<=guh{RCU4KQc_n8x3f93DfkUkB5@nC<`nTfUw?I+^;g1HSBqnG3e;a|tbQIRxBer``4jIbUw>8i zXnnLGeT?;2S)Fj~^;g+vd~?+5ugp6~u>LALr(w>_F>=d;=N_W|>fsYwf93DV!5qiK z)L&`d z?~Ux^Xzbg!-#)?KY>{Z-EMd^>oe z>#s7LnGvtQlD&hoX!wW6Sbx>eI7h7hYFMA|5cOA@(-~5Kl_Q6FJ2v8V#Cba=8+DjE z;=COju{z?M^ZxAd*I(s4Z{+OQ$k!2{7(4d*8E?lN4f$^8eT8c)AB`MudLs z@#?-AXVG76YTVmN@4uV=uC7nLsbAvfX=iJhmh9$Y1lBG<_Qq^qh!{ zt6LR2_e&p+Ou6mDx6%`0+LT*9ToOM&D-1~U`%V*o~Z>f$fTOY4Z&*W?PUFy?ONS*&udN1gm zU*er_(j$KRkN;b9?FUyqT1yXZ)3c9S;V$|+Y+inu*F(*CKfNjTJ|Fe^u8w3hclD!C zZ-ASqEAKz2E~9t7R!+|d#_#$BTuPsS_4FT5PmARL9#}S!^=YRktNQLOYNUrBeK~sR zg`u8o`&h>>(}zy=@q4bH1P@DV(u5KkJhAT z(pS8m@h|@Iy5@qfHf^0gZe4OQb68xwz4>P5s9tpX`2l*Br;7jBtlur6{{Ieszj$0h zG952!P0lQ6;eMv%?Tv*kwQFklzM!?9p5(tAP(QJzt#6JiNEh;+H{q!j^HtANojre- z)O&P4(It^@d{*yLKehJwgjAw-LB{qpC`fq^RQ2sj&i72;Zu;B*g#I3`=koyT(~rOL zEV9&1Z-P#G3iLMF8oK@PmsaXdFZ#xr{@wHh>0-?n#!H9oP4IdDE+vn8{-#gZe&iib zWqTC%(IesVpf`bfnP+?4yWRxcQLyTVPu=Z3Nx#CTPn_MNyW8J_tEig%UGOi}-#+8_ zOZ%5tX|g_En!bac1Y6F(+mlR{u`X@M=j3Qvx{tr#ymGboOZMwq)aq+p`gx9B8{Y4) zi(3yjcQ+RGSeU{YQg1mAF4uh1yRAI+7II<6!#Fe^*E{jo^eS-gUbTn)qi=)Ezi7UM9+CeN^cNVqA6vNZ z_9>uqo(acUMp)+r@5<|(QPMdx^Ezipox{4EmOAHE;hbXb0hqvj028?v;9Twp;2xZN zKIDDCeE_O4@A7+Q8LR^)DF^G6fOV?DIaO2UcwmMtGr%*#II>^DG|`tMj|lg4Og+oH zjo;rqtX!c(e6#-$_@)C) zkw(XtO^R&EfN`e8vlwT}IrjT0H5LQxu^1JRkdA20Rxd@<(joya{v z^v>#90_MT4IIMF3JElIZdY*8{(hB+%hx)``2G&upr32t2hjj|*1*G0sx(7tPj{30Q z!a6^}Hh(!jF*%LcUpKb@C1IWCuk!k4TdbqrwN(b|RHZG>sX8RixiN%uG6v`T{*X9l zy1_YBIXI`t;GClLbJ%Z-b&7_>I!k!h#Xi<4Lmtw?I_hnuUe93Z+Gu&Q1KU3b%&>#? zdyD%LH@LW^oVr zv4{27eWtan*-(9HO%3y3EkI@$dfVyIEKFwZMx{5{k5}Fu^yl3PUf2cxP;cCK>0=b` z<@b$q>Q+fsp4@nsx827*KcrWc?YqU@ElY%b=>H0a(C400KiZhR&-8`!S9?DNGa2lI z+)591@%Nozx)&c<-Y5KXzwnRg1Gc~uma%K3o=U=Dx_8q)*R6%*Lp?k+F7Y+=nQ;#P zAS)LCs1K8RE7|)n4gT@p{~CR}gn#rLhkpve(hmDhpxX*FK zeJ(#PAI$WmI4$Eotjl41u%MTR>8qTN`<%#FbD#&pzSs!GnuB-cbs3oin!mt>FBJb2AAb(VSBHu}|K6{=JFfWi{L@1ZxwSZD^iX^pd9Vq@r6&@fK9@N4 zdBm&9f!$N$eL#%5hdkKN*+ERZ%589F3To^4KJu0JiuKj)cSW|w z$k7b&wU8WG5&5!$DfE#cCsgn;<&vuV3do&#a5xI!`czzFb7&WkL)#6nMR~FTzP<&w z-~v9syNLTx$O)w`yv+Lta%qeBJMlr<<1z00B;UsOwNai=-sz3ABU^5P%M&$xp<4}~ z=T`VU;tcH|C(=%?Bt>3?ai=dL-_$ek!9i{?dI=2 zat~KV$G4__0+-|86}8+zUZyEFzV%b%B9DA^&)45bbkF1Nis~f1o#aOTb~g9F)cs>i zv)(%pk7S}eM;s*GYxptpKI8cQZ2q0YKjlDfIw$f-eY`5&!8(T5So5fi!^I$npxoJ? z{Cd$e`H)S_p#yHujG2)w75v-b^Ov+2JAD4qyz`dse~|y*qI*r@;=IPaqiuK9ZB0ZY zpKYs;Y+XxELv$aV{B$0{tbvLNEAptk41nv^gul>MVSDWGPP&iWFKk{tW zUz{6{yi@yik<6R?jkoQ;HMh@=FyE@az2p-O&xyR(L~?%`zt*BLIx(&F*h{Wx4|lyc zL?^b2pEZO1xu78ONF!rNKHJp15^k3Edd9Dk&9lc9rf>LBvbltfoqlJYHvB)LLd+8pLPqAh%#&77;{XGq_;@0nSS4unb@v-7^@`aV;!fH}_mt;#{ z!!o6PcF%C1)w2>`n4g(5k$YIUH{?@1N4y^QEO+lDa<$59S?y^~E2f{_^6~Q$$*UQ^ z%~7f+6nR1NmiqhlDUrwSjF%*}2D76jX(zkdr-xWq?VZN|2=YbU9q7zhyt# zkppl4IhU-xx4Pxk35n#Nn4|8|nT*Ul3y-T~F1&#D0h6D#?@pv|M?UmU%~9(juGTE% zq@B+p56OQpcgNWIy18y(K9f0nY1S*jdbJ^QdPc#TMO&X`E}i7fH^;f7if8n*59{LD z`-jZEH0-&!VakcRHTAv?|4jGuvX9FD{?XhErT12|Zg!2ewi`ZM(xUsPw6@=hmH9HD zdu@;bWKn-BKG_#pHW>29_-v zBoFIM{@2RGO!NKl^6>j0|9d_5^t0I2&tP9ajh(%Y{O`5iKz!UIFURYD!`DOF-<&l( zTHCv9o+rMjvANv$c96|QPK?dvt88u>JuB`?x+s!)0=;W&FZXr_wio*y*j~OrV%v+X zrP!AtZEw8lVB5>zZ+vRCC)+E#+rc>v+upAYw!O&uiM74u*7kNT^!|YDy$Nnb8hPBr z9AqPt7euz)#lJ(@-kW{ft2@8LwzrMBiJQ@e?OPbPc6cZoysGx_Hdr=ymT!lPu)`B% ze|dgG%(uVTs~r0a4$HB>*yq6ha;BaAZH&qOKH9vAJ?_M2%MK?vZ~udJo)Djq)VQ@C zjnN5dt;tO8l4^)fI6B*lo(pWRz8`FRvF~Bq`#+EqjccQ9FLqnDS9op_cO@Ur{uZJm zJHR^6VQ)4v2Y5CI+1~BL+1?xJfAh(*7TMk{*q{5+$J^2M9oWm8gjdjcA1kiL&RF{^ zPO$7z_}q#I<(?7#te%&+b$;gl36qi@bNSSu=Y;L=lkAV|@6WNnvK`h2m;3rR*9Iph zM;`kcJqN4}E>Ald)?Tf`25bNRZ{Pl2g#B%dm8K6M>#t!?%2}(wkZxt(!ert>I~!r= zsC((ed(PTjjqM4>?`-c{Z0|SGw_)2WySwwlk;g7VpX+_$F%`q_Y2ZFWjq6x!Z?kWE z8?e1U$M$M04~$3OGp;|%26HB4gP&u)+vnD0ejyvoS~SKI>FL;Cy;t)UrrV-9W2fvr zgVyeb=j&{5YsmKM^Rm6-fBlSicJaPW?C$O2T&>CWXw_b8O}B^ERQY7dp0m4>>6^{{ z-`4hq->voi`DZ7!>;f-Y`|I|mavEO{)Rb;ZzZm|_F8Mv z3BC>NE@L)!m#_qJEx!B?Ju*V<5F5~0gQ|f(yE#|zwendX*w|pO$ukvvV@Nwmn z7F=L$?RflK+1WDRx1EX4x(7XL>?~hpXLqBEUTF#tC2>JZQ6Xd zx9;ObEzh4qU`AF2Zk>pIaZVOh;aA9>TY+7}B?0kF2c&Gp{{6 z@~g?{_7$6d(A*0q4zIKPTzuePJ$82Fk@wig7dachV;?Ggd)p6=?_zvUGgj4%v|bk3 z@+|*;==;2Hq04^g`@GU^@_Ti6?9Z^h`hPuVe&(fMWSvj^-EQ!WRV9?IX9ooxae8XtGCjU^Uwd%QU5>2BBS;jf~bjIWFB?I4yR zOew#&;acvmAKUdsE`4p<( zSS&x+*jih^5%{@a8fR-i@7px_MSEw+W>)sS#^<`^+hLPy*Hkg?<$ZsRj%xoFKI*xV zi?(a7U=jSM*^PR?@5w}&mP4y7A+)K%AW4tK6ho5y} zTXw_CJiy*R&%5^HPrOD9XPSJSE5Rb#PhwF0@MG7-CIvcHSV;bC$5d)|;2Ynscnvj5 z@@M*tKcn}SqYiO_lk3 zC!g;!#yWJ_ZfxNm=A!Z2aW!rI0q)&t>D9^Wd$X2AD zU=4K-Xer~p9>3R(LmXGhvBq6(eBSDGnz`5-pX&5}bX87`&mh0|3ctq3?(@Mikh|C7 z+SG^YoQ%ANqdt&Iu*SM@yQUg&*u)sgr^^?_hjyv2U7N zC$eB;)00x~l+qlxs3|9qF{_ResdWuKi);XgZ>>LHf3 z+PB-2;|1wYZTr5FsqL|fZccA->xC`A2K;mF?cx z{=v-WO8a*@&l>A{%*BoCX~wMaIyrJSTmG_SsDkrl_pTysbfaYG!n({u|2AOGhVJw> z*iAg?SmBT|q_{wMoRXn7@R}Q^WavF(yTdY663Ea4j8S7)&fU)IqZR2*e~=7S7okgZu6kJCpE2%dITI6oz2bhapZ!Ci zQ|vi$fA8?K)>F^vj1{#ic2Nu8UOMasbeQC@Azsm1OATW^`+eoM=0Fy&Drsr{hY6+0 zE@Wa-$q#&awD%=zpLK3^4i!6V`;P@J&#~A4#2xDH&Z`5QAA1I+=PJQu1x7ai9c$>$ zuhu^M8LMZkUX*UI`>V729*LY z$?tUWh5^1e@rHAb00&{G(z6s(v^Yq7hV{f**AZ)`R<*TWu`@7`&U$*rLT?#3XgBBJ z_K%NmdHzF%No1i_F~2LLrO6*4gWm@SUE$-PM(Q=c@ybv!{-JSDeca-ph2R|FptYO7 zKO_#ihu3IH-z$uC_<<|0#ZL&%E<) zHu3)#ObntX{n{lq#wZN*EIPu)8^Sd?n+*P`32f;pzkfrY8aHZtMCx-U`>UAI?&$+4}pIgt{vavagLSC+5A9x z-vRbpSZD+1w3xiZTF$W6p%F}{HQ3C#w=w_XBZ@oR1Qsd`uux$L3l*k2*y{ib6=ku| zVqqclwXo18aC&OxrQRxGDC}14n(CHG=>PUF*KJ)i0ey8oen7Oq;Gb^f>K^GS`IP?8 zWjm~1DD>mj){d)IXEJNt!y0vS#y4S;77FLg@?*N-oC9;QW5j2}HB$!b`2J+W)Od@Y zFQ2L*jxUKm66Wbb=TsUSC0(u>8T!|ww~&)nE0&iBaonfalOLi_^fx=P(YwGH`rAJA zj^yuMZ0@*8k;k+L1$Q@XmD~uss+Q{-*(N?)!d>(Su3YH7$yz=8{A%xMe0tptrZrGZ zw&4;xM#W&P4X;jVA03|2n1sO;xBoFZ^oxv1>-8hvvy=C`F}=$=?W~DBwkdvNpRkP1 zg5LSs-hpM3OO4TCnd!!Nw07t<#yMR4UUuj={#f093CFrU(|*3iKO0-N`xf?NC zs1{9F<~z-j31OKZfo1MDSmt}!j%xq&7RzY9_FfKQnd($0`yF7JiO2%;d>3pfEb}Du zZR0F9#uMo->?FG38TH-M8PGbUoXp@u*gYb*hwV5B$1qOKVLN-_VmjMd=Z*`lygX_! zvSdZ}s)Smt;y_+DJn%K}Oj#f^`WrV6BQwaHKBF;d+`=`h8TTG!$;pkwGfrlNXYBq} z&`$?hTfW+RNO68-L^b;x$T@z*KSvsOJLB#G^WO%p56e-fk)smu^n_r%k|o(z$%eH*bP(ivKjQOnoZ)`2BWpPFTK<3g@6lgmY5l zDb<5PGGcJfB{sI$#T@o9H=V1k%>P49fB3)e^yjVL2^T9bqu7u>=W5@O$!A;)V-IuF zv(n6|llgmba*p7h-OT&@!ac~sBIc8WdwjVyxTknha#G1-KIT!p&*~1vw!Q)8k$krO z$1*U_4Y`;H9h8fCtPFQoM;_DX+`bHjc~rw`agyYxB-O@eoQ!p&12VoolFWSXNO8~M z^DDuc9SyfZwZKJxInN988H(wBqI`96sd5FCefPmxF!3L3pus(S74De{ZrX#bd|PK8 zJC{=Y2Tb%dw)v1TAZ(-h?l%z!($^``EZ<>=;y>qOCy=edd6v(SUtUg(P5m-9qAL_r z@PvgqUnyk0>63*mzr%hkB5&S=&EC%US9~P$$j#(gzCJhd$SknW-TZyxp{p{r#0A&K z%F>D-)q!8mp2BXx+Bbmlng~_dk<<-qcv7npclahDxwUQd(UF>r=xsYn?nGWb zR$K<=vU!;odDkvY<&`CO zn%fF$pL;H>%SgXXWiD1%=jLaAk6sf;LhDfvc5!m;@-x~;lb_*vh95y)wlL^JoN>dC zU~C<%+j8>Y)v^LOQoP^*)6RIyt!w%R@GXXywB4Bg#@2_;R85>0PV9MLMgJRhP3nT7Jag zxDP(}@R;ng;U0?rptwYcb2uIw{WN-WJU04iY_)8(;UA*MjE&~2Z1hHKvExDv=0T`0 zMG%+Zd&h%N49$~m=4;qCN07tzjq?oOFw8w%j$K}aT^9dsHuBnvZQjlPwZkzqwmHN* zZ18Qf@ao4<~2eiYlRxWtX|@gdt>aSXP(!r10UY_qU;J${C;ZCFO#oMfNxLEaRr zsKie1hKy9LCas!+1f+PCE{x;v3SfO^(9|X3ODF5Vv5@vK&TmThsr{au|i5c26z9 zPM4;eUU{&&OR@sSlbsORisE z{FtHaa`+kI2k5hgvxx699B0wy%dL+sU1sd^5poy*t?D4|qLFEFCbVZSA?rFH#wI_$ z(3>nhh)vcSO_CiB&D;6cS^JdBnGjdejZgMU_Ex&*KSFzM^6`QcfZDFn=az7cleI7oZ@YA{u?1_12X_lv`m{tjPll@&pyerqXWasF3 z5<}Tw^?)(_MBZoliG%Df`>VZg5Ah_#R}9vKRWQwGvNpHhUx8_aL#aamY$o9)@2hE2B;Z z#8dntvL2QL$5DJTeq-OozAWgSdY|P_ER_ud=&o^sVU|N({K85pH^QY@$-3c>XWEEstS6^1 z9eeBZBZ9RSKjL!U^8;+{+w4>S%?p&nd|3R5z2}n)z|T0q`P|5yH0BQG^}oO~Kjq&o ze$TIY=Uo%BwTI3TCF0hmI*x>%`5+XpR2#0uqLoF&E3;gQ7r{&Nd-u$!%gn@{ zYTR0nH?gPc7bT9wcIp?7q!vINiP9i1P#Ve$l%~6o#~?2Nu5dPW88%fmY<9Fdy$Acg z1s`7=3CEB46aInnnA;~tE>axUTT?+EK{&D6UC zS88J3Pw;oLjT?S)H}<9|y@mBsENn0FS3P?Va%*@I!~@L!`Me0}=f%;3xe@Ke?Zl0E z#pgx{&ns8(g7IlAKjHu}TNiKd^<%0pBa3TSa(6tuT-hnZjfmgi=L`aFgpp0Pgn|HO6>7cGbQAA$p#6@iz_FR*l!? z5ssD*p?wMVbDwb;^GBOSLJ#7(%9c$$zZIrkAeuWcT|j@jo&7`eKYv5RjL z=19nIRy^?nU!KayDcJoiL!N4ozv`59}RlCZ8m#ixg1We zHh|BTKk%=7&e=oRLalojd6pju(;PnTAK2M&-2ZWG%MoE3uul>31;_U+zQNWqeiBTx zcMzr#b}{vg@Qj|oKBmAl!a2$j1eiuX+TglIFwN(QC+K{;y2h6sreU7~egpeA6sB3p zIW~I>rb)9tPxBdrYmk}2T+aq@jp{OmZ65WpjX0kLa6S)?@ehY>YEq4{ssP(;VE>21 zHky;TpAOr+2Cmr@J?oJdkexKPT)d_Y$Y(!u)3}8}S{38xUp9^q#x{qJ@o$I@!Ztkf zNU_bMKL1nkB4L|S^k`Gu=YL}VbK(ONBjjMjff%1`xVOyoX>X!0dIz!l2I4vn-zauw zd7w3Lk!}chpq=CrguleU)|j;xjre5BaoSix=@H|a3gH?s((G}>cZ5&$Jvq(ZVQ|gz z0N0eHNfnBl^FK1@=yuqRjBf#J`li z6K`_Eg%;OHSMTO*cM+GB9oiJKL1yp7gHV1HnHq`*(J-s5Mek~W@9FH2#w@*NWAG*B z{mL))`kc=VQ&*d~{fg@+nVQCL`J4mGgRq$9g^720;?f^L*NBsw&*_WqxDbv9 zaXow1hP=v0Kfq`D;daXA$RB)AScY|J_^chL9h1%BD!%J@5N=HJyIoA+M?u}1vo~-$ znVa~7zh+#*Fze$baA(U?OW{FCHnnCBzgU0J+M7f9gW^HR9}MrG_IB_dYM*8Ay8L}y z%UWpv4G*Hi=D9E1_PqeRyajfdQ!GxWvl)sLSnN^C>dY+;@T zw{UL7=j_4u>P+m%CKRF*4F(2lZ3x$@BQxUet;V0u!7aoWnr<&^k?$ccL6fixc0)K= zGIM<(Gr}(#tHlD+S;ClW4X@MIK-#fSviK4#t-sZ{9Jg~7;}#ar9aqy<#VSlT?+nuej9JTqeCCQQFc3M0PuhW;Kxr|ft zV|bm6L%fG3c#&Z_I$GQ!zQZ~nx3q&>G;ZOR-N=g8L~>;KoYEKkt`m9E@3Zn|&xeyY z>5LV~n)|%vbQZR%&Pm*c)jqF5n6lPz8w!WyblzWF3b!FFix$6Zxp}qs4fa@lW5nk? zG+U+$TB%4PGt1RTq9;wiUc7cT{SD3(%zeR>PsvzN0Z zUV+PFh<~?x#yD>e{(*4ELjJyOxndBupTYp&C(s*h*s#O(Q)pYRbDl_dzp}QuZR%C{ zrUmI~+txO30N-2zeypdL!bUij>U*Ag9RH8~5IE!>l$HFhkF>V}e9A`}xmRT!ah6-yALS~W5>k0 z%#w?_3x-{KAk|GgaaQn=o5DSu?Rl<3%5)N}sFN${X0MRL2<}yK)k}*zo7B zztnU5fjc?tdQUxeVb5Qw&ZH!Wp_se`v99U(E$v16n}T#WFOg!+6iaDh{ls50eZmLz zRA`U!JkE`BT@Cj#ws?78Cwuuc_);9oJ?Iws!s1ab6sBPQ$~|Z;iqSu+Lufd{c*!E- zCF^whW zVzWBy;zJI^ghj+-V%;a}t{3?6)X&~95%~$}dgC`U&)WY`)gnII9Q3;7*C~!-ZMmMw zzOI@E#%+s7w*LR@y?uOC<(dC~W`@8_LI{}WfCQ@5BtW$V1bo;85rv?&Y7mun5#4?z zAhlhepo#%$yM&RJskGXRTcD`T8NlKS3hM6*=)QJ^R13B0cH8{|^(o2lAj{K?Li2lH z_c>?g%w&>G5G29*Bd?hxGw0mrez@+>=ej=EbsLdkp5{+N%Q)GD{0 zYg`4sRM8dY1y-qj!N>=6DH~r1&XVip5aT4L)NH@eRwGAWp6_n#_C8{vja)02Fmo7vGhaTCuh zbE2A@BGG~xKC*nT*neX_2xgP9Gitkw%{hWy6T9P;`QRO=Tk}C|{^-osbHbPdaviB# zZ`gU6189~Q|7>uF*l8Jm@e7&bk6jR7$xGO3+3)YiuCIP!jxPQ3DgWDN8~3B+QXXP{ zgu%O+%mLBtw>R#(Gf(*&bjb^=@%a7hS>mI3S&h4BFR7!T+R|T*xy)H>yqjrDeObP; zJ<12iyi(5*8Sf&6ougx(ANUE+Amc47z@z<(nAPv)F8NB5aE`K5mcFxSe{1_XonVFTE(a(s%%nP=Cj_lfH= zTn@hdqu^WdDVpm~=32Dv{b_r2zS-L@_Fnp2+HcIA{;k{2^OVu{`)T_c+AjSUY5Uuf zCzIPCZSSwz-aoiqwY5K7!4sI>PP;Fo-EY%wjrD!vp~>KaD)7Ng;Dj5&3pbGG@hu(v zVAPZ7AeJLI!hJDkw}A~LcQt#Eadun!pz(S=`;g#?!fzXRLe4f3Jdt7IiPgUyt(T%* zZ0aR`jDpbK)oxXnN#O|AKmS-de&wD>yULWnJP6St{{k4v6hJ^IDb3QuG-z{sZdDVa@sZyCM(l1q&(-*kZok49+5B$X>jYyQ0M~88k7QvCsjVXSpT?Y< zeX(GS=$<1OL-KJhcjS7TdI`_)1cz*Ws;-52jrcR9-c_}QFW8&o{TWT{Ef&7eX_rnM zUNnzK80X{o0#@TQOJ|-=2A0XQF%tu6PsQi1e|-7S3pm z0fjASlw(LlCj+OwH*H)X#JvgTJ{*(ucLK|8vkV?J{f%U>>*;O|I?0# z=tpCZF~;a4+P;N$|B@ORqUVq{Sna+}X)E%SowoLv)jXeJtBpOTJzFh%jM!>`DB4=MS!QZJx}{yN0}_G9~wGY*1ztp0nMz6uW^C)GYj znMX1BSLW!<0shy(4co-FfP+LYBXuuCpK1Anj6BHx#EvqQztF-#0qpG?W!YvOvOHry zj?7Q-Ga7S4>QG%x+k^C%wAE^3%-rNzBo^7o_c!7X%wuhDy68c$o;+h8m3MlqzmcE) zhTtB?N7nwfnJ#}U?ulX^mCIz#N2!A%bJ4;K7S<74AvRzj?UO!}dM#2zQ0{BBbICgr zJ2K{?g&ES7w!^>-YdgRU(KCqJkNYR%dvLSwH)8YMPlFxsBcuy%z)z5IanuLF-frYO zu!ks32e0CXXlDP?;jw}pgqwblZ|oUs@RM792=NImr^8ZU6ZdKD!^bNIx zjk=C*G#j3tz`?15D(ly^@;4Svu=sLnLA!oH#y#7?uehAS8OWsx){`VQgJBsnY z*ZI&em@?69}b_%xm2%8a{a;5%CaE^Wmo3Ms7X|`5&+WG57$CCt3@I55Odn??8N^VeH{u@2)pis;M}c4l)^mAUfq@z1y`uo`YQqd*CCfK) z3z#8v4KWSYxzx9KiTJhR2>29VVf;jkTS`u=@GoP}7o7zBTCfB*MzF*o&ZZTcDt#{e zUO1T4;j4bc1z*cEzR&(0^L_j^mf!!wRpa$y=81!P1HwC^_`#@kAU5q7xKKVX=aq?F z?uqRfExDqMnc&(Q{NkdmJB+=P7`DXm)f~VFAvRQeu2W51Sfgqi_?0h0VLHK&X8R;J z$b(I;v&HcZPHE&%C|*R3jQR`Q{_n!+B5l1!FoNj|r=3#oLB>zo`!Rcj^yQzkiI3A( z`?|7f1JzSq3q1*AAnRjqmR7ZadA`Bf!&i?__+-ai z#@UF)(9ignjOR4Ct6W#cGV=dvSD9z=89AWwV`kX+=y=)3%FlDlIKA@HTz#{QQ$Cm& z-czQqPEkPaC3x-#Jo|?(6T6%B2&4@<+*|xbdwKpxmCq=%VH@-Icw~$WKamwXk-f|E zcZm(E|4IS#(1=;6cB-*5#ze&e;N23Fk+Xy)59A)&E0|sUL}G{98ZYHHczuE{VB548U(|%>kJ= zvptowU0Z8f`fnw;MEp+6 zS)0w|ZinlqI=y6 z*Cg(n=z3D?pZ*cvp~v;_cflO3=VC7Ulv_hGwj`ITeznF+#`hD}>zm3)k+^R=)30@| zOx3SqSEN6Vx%vj2Dp$woU;7$M+`mQsR}%Fvva<1a_M0 zD)in5Zh8vbBF507jgg$asB1m`r-Tt))MTrN8&-WLEg)Ah(1)HeuYBbLzaUF?INAcR?RJTJThyJ{FrV`q(AlDe<>ROh$Yz zk`pR1mxP!KT&>G>=ue61NK8TE15!s_pV!wfx)F&BO+;g&FU$p7P!FWK zk6^7@>|E1S@H#k4YKnoeR8E870_mR?YIsVY$@9&|zBPf_lJU<7&QkR>6wWg1X(*hP z9$Qa?z1qTAs-6aWtYEASg0Y0_g1NTQhp(CTL29x{t%bjVb&U8&4*mvXFH_h`d@%X= zcHj=;+c9+YVpsAM9{ZGMF*JvUrc(B3nMV$4Ofl}WacFKj3V)Fpi?!#7UqUpPqQekv znb_9f_0yJ(G_jS`N|b8|ehPr8Lf8fiSBYM9s>9G1L@*U?6hB1m1qFT!GuGm#5nqh> zD{q768heBIF(miWz*W+|dd|UJ^p*VBJpf#0;i^^avGRFC1J^(BW7=uz3)U46a=qhT zAUML%7ff>)`hp{1+eWT2z>!WI=K*{!1A-g4j_7QT(f*^@)86C-t!Hj515YhV7Ejs6 zR$*xRRmRlX_x^SsW5IY>`c}EF;fpZw^IxvgKVxoN`3%N35L|Ery&uY~bIk zd3M265)ayY(FA?tkh!|xDbXB@K1A*(m`ZAx+-8o^Gz(KPMzwe3`xTbr|LB++s##+) zKB99no28EsIP^q#P>BIcwH1zMcQOxDyf&-nZ4hSW<=(Itlip+T>fV4vYqo-b7AGe zTSD~7p^F`*6T2IuuvFwEKFKgY1Pf;)x8iNR)QfrCzEO<(= zR@?morc&$Pm{ZTwH-f1Gaedn+rsBJTsaBbNDtnKN*>m*Ul0S+6M&YT*^H}}bnLZU? zbe+p%^ecT)dzs50wR_SJ24AuFX_?F;)u$0G6={R&Q`u9*Mz!iw14rqc$!+v$6r)O? zKCAk)XQWSK*QeoklX)sO^dD7U76qP*;HaCyMU%ltRp6wXz)Lq0ySjn1u+7+2Cp0ki z*ng#gxyA55^||~?{deG`=?Qc&7lEA)fQKY6vl;(`Xkzw(pPqx)Kh1t=;iyJ%m*sczqB*;H{?5& z;GE@PFIg|sSa*W0tY_LFZNc_E1wS)#d$YZJ7`OfG4Wbu$m3e&xTPr%2A;h-^@H5{Q z-l0s_mX(l+63_|@u`$K zh-ZQGPEIppi)lR~W0J@7*)%g?8R?r=ni={d6Tj;hs+nP5*>{;>KUasEnJ_p;;#uNT zZbLH@>3iX<;wKaPTKXMq^+lGg&IM0J;{=SY%^xLsCW$mNc?vgupEdt0XH+vIF)c$g zQy7#O&mraz{_S-V&yqNn_}!bhztYTrtqtG2Xl4pyV+PVz_PtUt-=ko~QZ%lDr&?)d zxRwuVYNBVf}; zzFWY4V`vl#I3pw{AoW?-V;?2otCZS)$rZ#4^857dYSf&}rj2ZW6JSZBswBjp+E2a-L?0Ose82VJ~ z`;jd07j2s>`WS4A^w05KSLugo%aOlKlz5BkL(#_^VlTRm@osN?8B=wZ2Q`KoY9%HV zoU2b&Sj^PL(683m+S^((&9JGm$IJMtwnxU6y_qqQ?+F%rgEn_GzSh_ZA6)KWY-Qdu zufzxW>P-*oL(|~^rUu62LIZ0ns9($;^a ztxr;m+nQrmf6gXP&EqJDiBXCUX0eI~88OOX?Zk#8M!6H)u6*3ADGzg7=2f))f_n|W zw#+SAM=~d@_FHSI7k)}(p2TTk%=XWw{c~u))sI&DhY{DRHQR5+wG0i95!bq|owkb> zMl?Jc`{Sw5!gxj)ei=&#lVR7vTzn;U3=K>nH9{n&)th-QF|9SLN9*E~Y3^lcVPt*o z`<+`q1h$Ca3FdtSPw-yh32=z)E5x>1;fV_HgzU|U^e~s0m_qPG1{~)QZ4fQY8~Ezu zbubgqz}Ruc6!b8S3RmQtxPtH4a^9o3g4iQt*Ew84U2zjvOd|)OHKv$i!xZ_dUdSP0 zv3AYOf&LZYpP9Ou_B1no6H^RJUNf^a!ikB4wWXIygfC9FUPje6WgfJpm)Qq?KNDJ+ zJJ?%4hgK%i-@-9YwN_>d_9O|d%+ju^mAR1i$7y9$TP=)X)yM2=TA6U4R$3X=?(U|Q z@wTm%iJ7-iY!t=7yWzfxaZzi%%v$;@3B8Q!LrX8SHU=A=TD^?4yEB@Zd9>fAnQ1*o z(WpqAwj<5VUi$k#JE578c3L{Fj&w6P&=cuqjCq+rH`A;zg@G@Ybbv4Z6u}p#KsR$f zdKuyd68nkC*%5yQz6a6Fs5&TcPrGJD{QD~A11}9`J9U-2gD+w&99ho7vM@$lx*3U| zEE0^ttKDa@I}-P4Uo%q<&hRoPC0Dd9%}jq{6c2Ou>Ai5g8N_|WzcB_}Hj>y(JiZX! z%tUlEgZM0%vBUUJBlvT(v%ktE=FDL*(=DNL^3s~Y8ls)Co=fyIW6;k?ozS-QGqZ^Q z+{{mV`kDVW^)s25DgBJ9_oICL?dWILc0K(}q}?g~jFsaP)fU9-XU1Yfx|@C`hc=t~ znaG}XGWwaJ#Cy=sEbU7Andtr+9W%Rz=AF#M8k$OMn#ANzr-nw_CN`(DI+}Xg9H*m6 zIBt*0e%g+XW)1P4p4hl867RwP(it7iztDzw9nG+4%qP+wD>iWkbTpARoCzIGIsIeT z(I^~Z_HmnWAD5w}x!CO6crDHBsFo)3Y*zoy?1V=9S$qJL;hhrB- zG^OZilE+g60?WzAk-BpRp1L)(nKSz0@s#LlBo-uisy$uJAHY*DB%!PMt4&uk3~coj z^H6FjTG%QXUCnHTv%0pf#{OI>U5%mTOr)zhfi36`x|+9Xr}#qc>+WQ9HP?c(mNDnM zny#ib){52EJkIl5v29CRBRK1e($*-v)d_vgU9{J(uW4)C)(Fl@LSJ(?cq<(nc>77{ zYp$X#2HrCCHJK*XvbV{?0%t;BV{gY7t*<$e5v#9JSZk_*XWPVD#P397v!CxuzuGl6 z!%U5h{h6%3?F`O38I4W4%q5$~#@>d{qp^`b>5R^%UiE2FU`+&HwXd^jI&ahLMzr$e(Roq%C!) z5wCZkj{s}=v272rpWp3b4AI(o(zJfW^835z7T@GPIPdKy;#8mToxW)CG-vs{O)kz9 zMnk;ZsrgzoZT0!!p;c%(>Zzr=n)gySW(fFL;#@=cecQuD{%86U@2@HH7Z9V|$Mqy% z`6iddpO%~RYW|8MA zIzwW)k|*;lF623Wq@3K2!TH9yGn*GWcAj|XYWj9OeS8&tJ&r!V63x?Cz3IIFynBml+Pl;^Y?wjc zxf9$&+A8<4oNW+CKz8XD)rEYF^$o zLYMo99`diDnzt8&F9Qp+L+|c=`0cle4L-roMptg=88Bx$&uopW@g1Wsh}?TKdYp}} z$)W2)EmhJVWz+**-Di6p+R6T_(LB|lC7;Q;K=M3;_}|2Nx4$j9f4A{ocU6T-$*mTx z#0cg_59*}_9v`I#9x?P8QWx>7)LRzqz17bfc`nh&%CoC}nmj)IX;yJa`#`(L!H-cR zvMAu>IbHnp;3tirp8R}F`{a9^9qF_B*u^~$^Y`B@tRl};bOlA!RdGh{_cVS+=~vN8 z%6IFZn4`;F^d4(rF3kUs&Cgw2>-JF1&wDr>-uXQ$)}HThcpvRu zv3BsE?);|qr#rss9KH3Mt~p!3`RHTXwS(ttzAN3@!IkLFsm~YO!n|R`%qV*pqkBApahFk*qx_!JgD(2Z|krP5BY) zc(_&+tR(&*Yj-!=kH0Yv)|`^O;-{Z@fVzIh{7AHh_ne#j8Wx)sG;NUFOXgI2_mlNI zi}kD4ExJ0^EqUwtp+(fR^c-E!W(^weYt_eDYHNpj_5Cd(fqh z(2MfE0nRVicq&`^gERE{74e!+-2+eHc9$c#65A#*yEjCKO~2GW;_xrTRzCgP2dL@n z@UBFUYj3yJKl1mL*oN{R+Q9(#U(fizV*Wnls9|HC{Iuq6xvtokGIaBSJBRC)9+c<*%NfOt^9~v3ZG5Mcwmf~&9KG!IAq^GOhnVTg$f#spZDd_# z)TH}gWvwpE%Al@<+xug*Gt$57u_gAlN^a92)~nG5!(K_d%Qb3j9BiqQ{PRGcZFQ1^ zV8m9aQReUjr;?Z9#-@mF?g)0Kx<>3(e&~la6^|!tXEw5jNS$KK)>!?kKT^mZR2V45 z)-6P1Biiz-$aS#nj=lX)6Fd2kYeei0&r?$4_OHM2LFy@K2RC7JUv%9RnnZtlm=o#P zpv9b3yL#b`q3>aPynXt3?|G>BroBAp?3$X#-y#S10CwmT?9f53Yur=(s_6mI9+YA~ zuGI3qgSEb1t%tVr>&`4Mb;-O^)4c4E)YB;nHnY~RLU;T=^Jh8s<4)Go`(L9j-lZP@ zkNc!~d*0#kUpTV2zgJc-Z;jR~_}azeKb6m(#~e$;HvNR3^Bg_BAA=kI&U)Y7W9ypN zvRd1y*M7GA+Gn`Fqot*4xz;l{2>bL;*rl&Jdw5r1m#%j7@NPBj(iZH}3ezra!7km| zL-YLz+f>7P_tM_&%+0_pN!X@w2F3}4aopgW5bZlkJKtEOePYKqPcTo9F;DllV~=+5 z9Xq~J_J}>tvPb_;?v&IOegpr@q&SQtb!{w+<6$2k!+JbwVw@^4&Q@m*7$-k8Ozg{b zG{#$<6#qu7}Q_js0?s&XG`zpNRl#V&0WoA@DE%&qW@ zvWwuE{LpvLDKdO#CZ2Ii4ezNVb-BL6Gt8BZcU_`O&a+?_Ys`%Q8@Av@Y{4#cv77u4 z=)kC3vs`bWZ@Sm}!{VD}VITU?U&GLcjjf)2T_DE+9^n`3|djQ#aE`|GD*{+K-$8~Y@Ctek1nyB!}w;ypHs`JZNg zdz^E0W(wvv_Z-7cN<9kfFniqWX%N+*DU3E0_=IB*RR|f%MD%-&izQb|CPVor@!9E;09l% z?m=`fHpW0|T5MwPm**CKV6@f11h2v&q=wd;2U_!k#c11wA4pw#iz6s*z@Fb3ZtyYq z%i;x*G2&bR;RLT!&qK~pmO5Xt`?}PQ_ozAh0k1Ls2sp*2-x{1?<6r(+f5CNQXcnA6 z{2m_m_ENCn^Fw~1e+y19B&)CYdvJp7;F($U{oU|_OwMb0i+%qO{C;ttzTO*%_gQDh zJPMA|X54XnaP=K(FCSF>mE%tKcmDoDdG%L9kAk(QfwPRx*7fEH zH|XL2k4t*^FUSJ(Pw(OX(Z%WhANA=0H|P=k+r?MI4V;P_oR9zFS-8PC&Ef`q+vNs7 zGP%KKxWR7bt8jxf&EN(Xbb=dbS|2ax7~*>o-jn6U7qjyN+9c031b^0a)`5R=H?eW% zUY(?k+hND?CuQSe?so=l9PKy@Y}`88@05+>dCoc;Cu5Ydab4EN-BFcBbU`F)RoMa54j`>~wsA^WQQe${+Zq$<>L)=EqlKoQ+;9hRO-h~ z*;xNs%ZO$8Y49bl6JPQh;&1ANpJgM@n8~YIy z;-FrU`g>2u@wXi5f>jQV#b+WhByvTFA?11>5E#b%OP=gce14=NmO1bG08qnUA;J8c#*v#ryv6<*vj=!IbsrVshu%0#$Ymjv%`K|^xD-22O z(u&D6OH3xh&C=<&^uRQD)GT5$KZKjzjz&+onVOe8Zxh%(!1IkGSNNH83Yp`v&u%^Q zCe~WH$<4MfrgF^&){|-rT!^;d7cB^FV6A_~J>@>a&0aCTCwxH8U5Q`&(hgbgfv>Qx zi3iWbCKxfA?9fbVPkfuXBv`~)_t~Kh*N@Z(vF0~if4{Ek1kv72#9}0$*Lo(!W4DfI z7)0Er)>)9D;w-FbiL(fAyE8K#j&p11s!Yy9yD;1T4BXU+k44A8dTyhAwVNKorj)z- z2SuYSHl)hYU)d1OgHfF97Ho*l`uC)3BV@hj60<4lM7-t&o>9)?v3OZn^1{V-FhH6u+pP%{C5xrk=qTyEz(Xo9zDA%`%4=0~Ie`eJ^&Qf5a|KqL0@Q$BFM# zse2)7yA)rm_*`X8ZznD+ua(3__F+Hv^K$?j@(J^93pQk>>2uw~tJsR&_+GbQLspu8 z*FC)UCEl$5m*zOW7JE_5J}dRdK4e}=pUvQTd-J?tT(IZ!;DQAA>dv@8VlUn-Lo;IFg4ozgm&65(!KuatZgb5i+RqmgV~NKNQg^V} zsfGIzOGi86vv9#~&gE_kQi=>;4~a_}uG$T^t`!IS=jd zfpw>y55PfB%ev>f9jyCxc3;a$toum5h&#c(PSd)N-18*(fbF~O^MN+kJGBosVj`qI-M%{S?MCdBU;;JVDOr z5S}m@?Uv{cmY|!GzsdJj!w-t`LN6U5wy=U6oHu#i5;K0&L@hFlBmDTBJg{=?b6d}Q zgun0NT9fcwN3j0ud5Y)1xN4*>*ORrp_zp1rdp4{OpSQ4n`knj_4QTp#g8Q>o{6)CC zXrVSty2xj3)a6GJ*j9(lhtO~`MqedtY?Yh{QP!te!LID z&i|$Vy%x?NBK$uN=a0Hq7mjZ4Q`y5UoWB;FZ()7WfC|v;A3vi~I-R zAlj^Zh{xPM@GbOjf8h58Gw)G&fBwvij%N{{;cqW2pLtQJ5By;l*GLBs82*M#{0+pB z@G%HS5G}@F)88-%e}mR@>~Y};8kk@({)V^szHo&>_#6(6l1*wN1eQt4uhxuL2 z{|{Po(q(W`nbVIowN%~0=dQWF8`v0Z{1<9tBKH%`&=+iDxObPeF|_Thu`xVL%Eo*# zHs)*J9J}oJJuj16;ULGtNv?&9oQodhvyjs=&f)Vs`I8%oK}7IIOIF3s7FYJpqi8?H zr+fsTX9yfa0T%IjMsW;oX6F0KlNJb%PMrV$PMNFw1YF**DuA# zE4sr=sqI*vMtq#VO>=1nD}P&8ciV}vk5`6keqN5ga2B?GB6^8phbJiClkQbsX;CY(C@8{O_Xv*xq~jdky=|P_Tkgn_9HIa&D>Ar_Q4+`TReQ ztZI4UPh4*}wL&#~;+pmi-;?x>=2&s#4ZLpYudP4KGnP?rt@%7HJZRzTm+xNqqss@o zwp_8N*ZeC=GAh*B1cMj;`{jEU{`m3|>K;l>+`$?AKa2m5U81eeS`UFF4NW z`}8J9_%rH$7dVVQYMSDZdhZS5&no3U8;-VYc#(ZT^oK3{Zw}uRf94QkK2ifesLlWR zUaq$fP51SVqKr>mUn@J{=$mmM^Ub19c5qcEvy}E#OK@0b+66L^45B^aLWnpUHK(=&ALkkBmtU&A#PForS?8CgsoxqLIVpWHa<9Ida!!1kiqlzgJH1;rbOIC4TmSB}(2pSVOXe`XYUBQEjX9~{gz zj@|1FAAQgnb{EXQQLZzCvvi6b_;i_HFWy|COK$e>8ULxYGmn1C)&9?~R~)8YHJIj1&X>Uw7n*OfDF|L1OUIn`%tH$8Bh+b`Ey>n0DK zn)Ghj*Qr4)^DUk8H;+=Q$QmE1b=buCNM28;Ul2v>!aT z^{JxkIWwS?F`|!dSVBKtoYmXAmpBLcB8<^b>qNKjPM0yt3hV`&Fj>_Y)zgP%S)57D z_-v+c2k|@gT^snnFOBDxn!sf|PZKd0x&I{o?tW+pbw(Wr(ScW$Fg6n&DhpNe_i47V z5iN)KUYo$z_>Tk9{%MW@p=oJu8OMVw>3@a6JQc)FvV(h=H8CS4L@nJ`NCmtG)y}1gzGY_>oys1;T&SYfl+>-J)>Qq%oKTL`M;NY!3vVB<=MTD4Pv7G3l@3C~ ztD*a%=5lGj#dT%0XkLrh6S`` zw|O4O?h#tmYT{L4@*x}ZJ<5-4x`k zj#&<3a?I~pncM4z%**i4bbvmQ!{&mb&khaINEULtJwIaA&eE{UCz2X)N6Us9@eh48)y-LKeTh>9v#8n3*c{}(pG?uhnY7^m&o|5A@JW4W$%|@a+~hhNIm3F>!uxc& zmVEw2S5;_VhE^3Ehh4!QDm#bGQMN+z5xJJw1vw9ACeOWkWS)1{xo+<)V!C(pj6+*m z*2`R-$$RolHpn#&V>b>=!hT)mdPmNWE!Bt_n)jEQuw~S3okl-?iVX{4Bjz&S#V>M{ zy>va##ePs#kB_Bk67zp4aejr=>6SXaaz4NS73)&mSK_YZwCm_6^$UuaTleYX z858-eA>?T57T1jjllU7Jb$Iy7ch@+8rDRo*-vPp2I!& zo=_8OPu6e=_Lez)a4Yuq3HnLKS?0ggxvO#I1vU1d8PsmmiNnjaq`q+fXZ&>!^S9c$ z4~HI~`%tLFk=>Bam}D@2r!jwLu>NYCo?tpQTQsso%w?H}_o9_u#=2R=_i8u~L^!bA zV+%H>eq?{I``kRQyD-P=VeD!(ZG()Xyx+!{%Ggz#cBpg#V@Mw@<~rm<;fE*+j(vS` zo%C4`K3`A2&EPCG(VG5-@e}=poS8O=ShdV$YwqrHXu;`bUs?CB{pg2p7gtw@imOXQ z!l~t2*~}NIyD$6DW6Z_<%nOX;q5%XhxXg;RFOI-jeJw)b~(=_y58$}Uq=59G5fcA zLv39;>wRKct>k>{EX9spiVah>qJK7CuST=lJj$qdE$difxz?E;FR<>vO%3lR#(Axa zHOsij86PqpGPc$Kwx~|lV7VhNxB?&8aPC___ukNY=6$*&M{=Wl*V2v}#(6Pzs+hlJ zGR`+M#?91Yx@DxtOD;6A?J``PSUp-^sTIcVm^n@ zGW-F3AH=@QU_6SM?_SfU2!;`RQZ0XHes8C*tFh~)*c91!7lZX=9?E-VPcp9xEheYN z{pNEEIl}gN@6@*5zUI^09pd@FO_VxDNQyKPBcz8J*xq*GBje^~g+q zGz|M4!;h#lCw_j5AF2NfKjQBZel!k#1a4moKdNtoAMG~y(GJ-U+Tuqm6hFGVZGHp? zOT>@lJmAmJ6Nv_D9ebs4q`mBOYuV$Z_VZyl(tB<2ByFTtwMKZ-MermrN)!069i9|v ze`k4813XDMoGj&op|$&>EwtvACX7CX~B$CK#Kc6m~- zljBKpreSG3Pg3I&$CJ$d7Cf;A9JK$XNqVKplN$f!D*b~Cuhd7(bNkgfsBor!!kIEE zDB*77NnlLLD{5ritm_C*+6dl}>+J!{M7WZ07|yx)++2xkD6WKGKyjr%jMHm{E17L* zf+reWsYmeB3oF8N%$z_udvTw|k>FLrks4R0!;yLfK7a!YNAiHf+T=*zg(Eq0ymGFA z?(ES}DjW$uV{oKAlOqX+9f1G0pIQ%Yt|wYR!TitCCr#jM{eqrxT!}M-;4&7sYMx4; zf>93=W3;%^(0Hyy`-5=Q?HzEXUxN9%I9F0@x+AXCNWVq6664s4E1B~*8LlMPiswq= zGmyGn*dFg;`~r>Gisf*PL-$^*<}%!8M=P#0ts?Afn=3_Z)hXvnw6jgFL_eHTt^^l~ za-~~ZbEQdfT#5U)!N{S!tNG6s9?%2^?`O((vV))SxO&jU^|L7advEs%Dcun!6 zvxXn7>x|aUd1s0rCCiU~I!^H;wRR5j-sDFycBwP`XsBF6Y)X_LEw%BZrNWP9@fqPq z-{CdcSW>*5Qv9fU#*!lYcOrh&4Pr@=_IH*irD915c+!_AmZa)!ov~Pw$&*sCq=dS= z82sE8SLy<>q}E)i{a8}OR-JOL1Rv_Av81JTUfd>E>Uy!Hn7raBS86|&O*L+()?5IlOivmK0;BBC(|5T5Mj@s5bJF zBu~Vamvp)0C6QM%iQ0PD-AG>2Q1Xy2jo00gza+Xl$y<}WB=vvM-SPJr-5naPHRQ;+ zduR#sk}3?{o!YA`-CZp?3C)t@FD|AZ zRBjS|(JD77OAE*6CLKnTBl$@$qjR+9D1Bl1Nf$9@%#&96NwurVN$1fIqPLqm zLFMg4^>$`%(jQcAlB(Hq*5oCXSA^5$Y&Cmc67$xcm-J-U*V;uf^BKxZx?A#+h^I#K zl8CR`^ODfnElm_h>J_K6YmVk5MeLQujrMZ=lgdkC9n0FS{)WqMt=~4eQP++m#f)>C z+^92gB<#xR&PgJU6oWyc+^GFH($*_d+(>FRe0*&@HyY&|@(%_#`hY%4aigQIeA8GnCO2AovNap(xE}tNNX-VgQKV)A+(^}I_%ojo zZq&eQC*nv;?LM4H90|@7U$Y^ZoFv7K;5OGrxRH^QG%OJ}0*4DX%8chm{5^&nfz|g> zOJG79+-N@BXo8t59m9>ly-{wI2{$rxl18?{jo@H*Ze-6#k~4TE7fJGv*29sW;3rYN zhAs9yB+-0Hid)NAlwL>>}+HHbDQAx9z?sd>1`k>EfPjzpid;z$`L zN4h?mmvk6jCAmq8BTX~&lkVi2@UAnSp9H>o|H7;F<}Be+)Sm!TD4fL@CDh%S{Km>n znrX{TYT|hkxh4>zMIv$`(RQGZvy*`_WciiBQd34@|xmJUryfim0MH1 zDOuk1+!)r0%IzEN>&KcA-c-x)C(E0z<$8-G=h4*P!J8uaN$@6>pXB8;!kgytnruu- zUDIZ#x@$~{{dSDW&q1s3F!B>w|-(*V%Jd&RCvO$1$Z&j{X0=jhpu{llD*xXqu^!Z8l{W}pq z>gqM`?CtL?PfFFiOTd%9BsK4dB`rOp`AH^EO4Yn;UGpv;Kexq|&UDSYgj}ip{G`*z zmB=^jruj)RytqxS)b(OXOA~XY_G3wth9$+7lFCm4*DST?OLr%(RDD%UENQ8xUGCdJ z-)Ly?zQ=2dE1ex&X%sn}ovG{a>7*1_N|r1AWT@gwYV9=f-sDO#cBwO5>3puCY)X_X zMd~`hl~i4aJNb-orOCW@B9;_mr@Ct_sl?2i7A;63uGCdyN%nSkmLsKNNeMU-@u4rn z`AK7#?`QPtKPoltkjhV@kJ{o#XF8T7b|+DeQd_YkyPZnKl5&DeI3GEF ztlQ*AT`!gtgF~WPyY^#AuCf$AlDc<)FOTO(SNLYrH^PrD;5EgM&KiEysT`$$n~>s1 z$?~IrC>^_Og{t3hg>NPAg&$Raj`|IMEg8FPhOK@Bv7|`-2KbSx-_W1?2tUf?HQ88_ zx@LU+hHiL%68rZKOAC^SA9eLulD++%bnQUadzC5a`CX5OFCSdz(;Qn93jTuIra zj=0j9jwOlhX~mV=i6z->)hXvn-87a2SBe?yHn~z4izUTlk|_e2Ob2%asm&)#6J3;JcYM^Le?F zkLz{Bm3HtM;YzRZnrtj7-cChgN!{%HB=+xhcCOTwYu(w~8sSB$T6d{hcbs2yV3HQz zd|7g}?uaL;+@v#7>+W>qC7lJe?!e5-4k>H6~yB!#KTG&YLyQgNmsC9=;YF*2r{g{$^ zF3~qzyy?qc>n?t*+vH7MFQ&9EF>h)=rgYw*q$)p%+VG8d-lX}K&^N-H zuI4qxo6Z{E)TvqyfBc6OZ%URo{bRAkn>626-kZEB#x8Y+Hyt0Sc+)!T{G<|FEr-Eo zEr-jvkMO2pymlg{6l15laZD+)e@A#zvHjfH-kdu-D>T@V-Z1g&ZJayHIji<_XFqdg z1T;tayHDd6407gd4rk3C8=3gL*+HB)yV{i(3cuGwZ-lS4ciyaE6D$4|;Y+EQQYxm@ z`IyquGj@K`>BvtyJ7P*vesqTNlRAnix!cH1YU}(Y`MhPXEN=8=k12WL^3dAkMqMwa zv@|g{YCop**8xd!qois#z>Su6S8lYlB!(NU(=>T?Zl)IuMnxY60cjq+LFovGd6 zy)4CzlI2GEtP`}VYVG`x_a--ru}hudMjQGoZnV_KjUu%h;6|!;!(l!n+~^}-I}uZg zu~XeOrexp0BiyKq$CT`Cj&P(@OeqypI?0$)0rotVr!=KYo}UztrQ53MaHeBQ33*dH zF(tbVvv|{&J*E^t)@|~pt`}2^!6(V)DgCrC#haw2!w(*h;Y~|5&FKr#Hxg5Np4Sv_ zI%{}Sr)oM}F*?PYlI2a?3KVZrYiB5HMtIZ2&r#FiF0MDhR?~r)QlzE>yh+t`c$Uuy zZ+eo~j(Ahpl@>Vcas|{iZ8aV8Mo=@Zq(X1BY4O}^uF@NaXyE}v(OdN8|9`qt&)(wj zl@8K&mTTI<(teNE4F&6!X8peCvUwGc@9nRJYv=X%>#kg4INJJBm#3k$x5qpBI~BT< z-zWAi^bY-TMQCrn7VfEOIR)69W3Jr5mFSOJ_?u_9<}2lTP32nnHw*Ja!6C$HT-tgq ztzx#b@An33gYFnOUDK*c2HiezkV~s#@7`hgJcTn=_tJdLHqNx$S>F4ntcWGfK_jmF0v@15Iq~b~B82^Ge(hvEma8;aebI%3byP}uze#Ho_ zs!{5!;SVoyxEnlanlIg>`N|#I!3i4{)n%L*`?$S*$?~JkJ+*_uuW40ckN4C5U0iSb zGe51Xu>iw`{f zb~AdeL*$;lbm$U&|4mwWlPf25^iD0jo;X_QE-f5zrZo(iTYOU~<9993a2?OEoO`a> zu(<9q?m5lj30}MNvAStnhXe<$e5}qF5=d``x_n)BPs)3k$!dMz*7Y7JULAfv zW4!)>t03^`Q&hdD4XO9NYBOd=$+PItR?{Vb^-lu;*Abu)y{|&C?9zQQ&RAqHho>=zXRr=y zoStCW-&?B0*5omtW&Yjk%<~qoh8FR?8roELqNPf%zXh98KQiCzKDW^8E-dhR7_&t6 z?ZC&%?x>t3`e`xOspQ_%9C^X5uPv^Ve(S+^4K6gm|I9gV?{Bi^=pWH%gXmxB!!@?~ z%Nh?(H~Y%Ezvts0zFk~h9V)Ia4gHC}kZWb5$E;r{&z>E8j5+w>Unc4GH_g%09BJMo z)H2!ZObZ@jzQ2ktF~sNB_KvlYp4k&Oss7}8hg^99xz;Y8e>>0bq5XDymCrgaPsr$YF@fu*0Zc_HNVul z&Q|m51=jjeXSTPA@m)(lh<%XxA!AX`T&e!IMRl88RiSbRWAbTB)o|`rKlk3yM&^9F zqaZkm->>D~HH`6M>{Ky-%VdnJ7~kee72#V)db~$m1r0j=5TLJ1OARf-ROWBfH5K8@ z=jHf4+_S)yQfp*wq+})G|t?S|(DAK+Z2TFni4LfZdXx&M0AoZOT z2TE}uu`$Ajw!vowYl4l{y6z?%$Yz5&6?AI{|I-mOp2g>8Mquin+#DU^%R2DIyoB?JWh?qftH}53|L`0~FVDY#SdZ@PtztjJi1(~rm>n7hcDPc-fO5TG`)Y;0pLkCbv58*z zzc%xK&lb%mpK+S8pu_yniUF-%m=~(=kvI;t+lT{2_H2m*ZNn#Qj{~hun}BR3`BvVk8^x8KG}-<%bT;B6dVP zVzIMN(lMe^VnjNzA&C<;PnCFyBPh99>G(CL5i82%wXHaj_(H`G8jTI52UZeGbbn`j z_2mZN-ZA z5G&e0iulD8Eu2X^tQe8R_x4DPi1CwaZ6ZeW%0gm9ay>r30YCLH{OrMTw2k@OaeOGZ z-T07PL&b-P->JHF#l%;o4ug4riH(*K+mXIJjQ>15=hHCuLiDE@JORVVYSBION4iz72zOy2Hi;59t6C=ta z=96p2iSh#zpF7B4U}B5l@y_(K6yyMy%)oGgg${aLn1C zSdnoK5_~vjd<~wI9h@E$D;i+Nit=M(MKYgGHdfSxUH=@hqIGtkY?LRppJ()1R#H4E zsTvGmkeJ-&?!=QEOJaD^4oyq*4ee|2qz~z%6i+&9cv7coF#O@@g%i& zZfDJyJSoO5btYEi=X%Pfcq+mgdzjl+gTZ0eVEAu7BRuK%ye7$$Vr*15=1Gw~JH?Yy zJSoMKJ_k=4nc_*OgC{ki8xUVeSLR8LqQ^#0nhZ~p&sx5n6i;fYiXY!@!jqPs3{RTV zJ1L%&R6PbTO-!D1cj8HZUL3=d)@fQ#-{0sPi6{AaP4T3&gC`xs)^(;H!-YdqJSkb8 z^g@Q>Nowunv1Ux36l0e(geN`9YbP{yF?OmuYU=EJ zc2~?RvbQhUc+rg*tjd_6~)@1_BD0(&z=%Z z9qYL}Yw8&19d?e}9W-^N*t*0VsQr4~qdXmRpo_V7U*i6nv;5tbtcsm2uI!yh(PA7U zPx*+;83+-(Ca@bQ;d6Gqes|(P8y3ZIprxAD%eRug5e{@4uPF|6)^MOs)mHfE!W0Ke zmIM8>TX7(@cHZH=aG-d*)EN%cmuo1S66HXV+6r(WRa@bDJ|i4xEU(FOpm;mgO*xR- zqudLKp>@I93Sf#z`%)YziM*mKP8$c>aS|LzeJ90%QXD7&2a3VQU7Q2iY*2?BDDv4; z!hu-NDGv0-)>eqe8d1Gm`yA+KT8aZnJtN;w;yIApcY9BR0~PX`;y`B&2kKN_(KF|# zI8d@2=$+wp^Fn%lRG_a+D0VfW2+h6DYfhvGny+6w69BDEFZK&rMvAMPU@NaHmb zy&U{!huu%pUG;KsjCGcFqdV&5?CnpMBc<|+Qh7yIUeRaF|5KJ%H1;I*a@QGk=u&w_ ziEGgjOHejS>E-CF#pEY-wY(y+L!HpeMQm>`^M^T<+U_Fm7&t>qWKhQ@hp7iD9NmB=>cv7-F>0^h*lRUmk*39SS zNk8U#%BDnPMXh+!zw;U4NiXu6Bu`p*mhhyd-JB;y+MnV{srn0@t-kyM!E+f99&C#ZSe zctM4Jsl(mi4ZRa4z9lsvzy5rZH`}Toa!SsP+ z9d``eOZ|}{JjZNmo;W|y?s1mfF>u_L=>sQlhKF1G=#Qmtzg_aMGd7&*=-tqGp3~>v)62JSoFhD2 zd-TSAclO-Wr=QkW@Xvv9ns5Jm&hUZvo#9Vb<@x$KUB06qJHssp_&MYZA3Wv^mpgg~ zGpWzhk7w%Z?CHzm`37-a2f6BlHLbz1q&SrMXz|8A&YV!ze~;9Q9~q_hbrytrYrVZ+ z@2TzV%QdZQ-0tY-Ep}4}irV*&(huX@cMNou+&S={c-9|{=J_h751hd>KWaW}1<$JW z{;xXi;R63)?pdtyd@II&I%v!My4}=yX&I-5N6{V+ZA*7Le1c`Go!a`L)Ku|?Ykt0u zXIi_`<=fAB4fT$kP`yJ7uXXhFz2(skF5t|D<(xO+V7~O_8cnW#4P|`4!qKlG-I=zt z=|szh46ZwrI$V!&z0897H}21p_*Eol3Wpm9OuAR$z&C^dT zO!KXJe3Txu7F|UlH?`R0>>anp>m*)f9Z{RgJ&0For@7DPmG$7JE?Ye{#@yFlsn=5{ z?L+Ey?WUI3Z>ZlT_0}fzPV;&OSLjXD^{S^H*#xfVVVzH$Qyn^_X&XkH>#K$Tdsy>2 zpSicEw*Gc!#R-Qxb7#6b_hx#}iW4QwrTNU=X6oWhbGU<5tP82ZBlVR=^8OM>PQxzh zlFjO&Rpm2=K|Z(9nb8oS{@0?frTd$x^>k@Jw|5bL>(7{dd*8!vhuO!*P`j!`YDgWq zDqKq~sF|*OuhjFYc>c*cxo->o>fm~EUeEikqQLp|tJt8P{Pf~S>I5C5HdBi;FUWZb zRnqp`sMi!^PKT+{b&NVcpADgwR6b{8EW9~1YlOD`+x*P8o$E8-JlALbaLyQ^-7oT7 zZ#mr!Z_v(doHbEQZMBjOKdtjnH^^@D(y@7R=0y`WuGEq4m9sC(sB5%_I!nSODmepZ zaldr$njvHK8m=*Wm{yfVo2lW{>Rg{PIlt&a&i5I``8`=$IKA(WZY;Ye-QQfyIX(1Y zCH>WWSucI2BdgQ(*yL=(U1>$ZGS=rVM^W%o&XRePzVgt1`FDQ<19Aw2%E@moq!KhWb%U`WW@-?ET~B`c2fDD#fmr zX1-BZGSBUAJWu8#XZ3JSPd&Bh4rEn?54kb}QfEod%Xo`EKfw7spPe^eKg8O9@!kh? zsrR&mb9fF6*VG=BVV<9{E#?fK!e9l@C;eB+`5BeeE}L{>*Be93GkD~E1N8-uFfaB~ zC$I6mitr4l+bh?W8i#pXe_1Eb@@7x7E&%n!3aBF$<{3+9oBg?D+{@V?rf#jRbAMNA zSWVD5?}~GmjCypXu59N0StGx77B=jrkLB}!<9n+(r$oND;5RK#?8A=DbrsU?Le=kb9?Ld9U*pUS?%}+Y za>jS+>$P=n5GS^H`FlL`fxE8eJguxi(}fk`)ttQ}_0oQIxaFmpbH5i#$F91usmx_% z%YQ9>bSdM)XKSbv*38*E%Qz=R>NU!GlK;sXIn4N||I=qj=qGFkHPY@4d9VfXXT&V0 zpE>m8@H=J3y=n`8$^BL)9>EAN?);J5Q zC^(pVxEHpBcF_;lx-#kO-a)CM7Tk=TWh}ECS>75)R#5E9KIXmsd8BTd*qQy*4$S5I zdl^6JtL4}yu}4xvZ5#D5?cd2(HPo`r8fw|WLv7Viqp$4e>K>Sfkm2 zHLNeOkH2Fse#-dV=gjg-fBtgr+|Y4q3+|_ep!BtjX%l05oH_ASMuqPvv0it#KZYU@nc%d-;T4ElCyebj_+bU1Wj8b^(p0V#(7_Tf@S=@m@|szgF&jXIjRmDHYRdr5AWs7 zo>FXC8SgoV$~eo%{QExcVfrPPCguS*gA*o$7plMwH-R5+1cTh5gC~4~LvBBw6c324 zFOZlAERFafJIMno78pFhK9^6H2RzC1q6Q|oszdu1~` zFIi3?xN|1^X|iW|T6LN`#S3nw4)bp(=*{pb;X2LZ46GZ&2k5h1;7~gsu=ms1#0Tup z6~hNOZ&UiI^g@^4>Z8-e2j(qL@qyFD2QHv*+v5Xkz<-UbJDoKye#0PkMSL|i_(nS7 z1vv?LLEu#L0&~6Aelv`@B%lV zvGalg{D#Fb@qvOi;{!SKTb`H=U&2Q2uzV1O@WGRb5B!|xxjnP}_<+O)CJ-YKKiv`! zel2_q>HH7}*s0+gci3VC(;tOm(@p98AeocY4OyVoz_ZQ!I5&bN_@%VYy8BF|2sA8F# zH&INy&OJGTiB(SZr+h{*@m^j#!o*MG+g^oVJ7z9gxcEHqulUEs4=(<3eAvoAj_*|A zV!_MGKb~*;$8$p;;v3(MkF}M59IPe&_IUsJW%Q4XfjXZa|9BIhdAl9|_-^r!gO9{N zp8ovex*+}`iAz*JUt2fA;cgIpeFOgSCjLH>@s_&&p3>fa@sHh6ZxX(ANA&CxIm2H3-_sb6Cf1MmzbA5yNwl3caj=@Rq{WA&Gv_?`<2I*x zg3b8h!i`noX|S zy1{%#eCoBFtMNXVc|WlNb&d(=q%;%nR9G6!ug)={PgW9xv3$hR&*GaBJUtsfVI-`5Wo;8Ka1gp3C##ThG9c6*PTX@?L!FhnPo$oPE4< z?d&bfIDfrgFcaVL?$^`w#vjfO?c>b)W3GP0p^E}9zxSZNk+#lo<~M9J_X*Kwl+m|h zOQ#Z3DiPoM&#uv%iG>TvK}CIAwKm2<%j=>cKqLeZF%A#=bIeB7y2MR#TsXR@NUl6ko6$X|LO}5 z=rRXB#gBW~HD2FOOiKh2sH_gOFiyaxkHxIW|$+MPVldV21#WqVn zPQo^eZ(aIQeCKi&__NIQO3o3HvEI+$1W%Up*+%BxUiPx!D`Sc68RvG?IQj&oU9v`H z{q37n5tjKN`uaC>qvv)U!M7`Itl(Z^a}G16nc&M`u-t-)WjGUF6Pl<(?8$DGrG_9;G@rO z!$*G~KKi_fj~@G?eDr$89Q}<8diuZ0IVczM{}uRW-zWA_#W)#w+lV>Zef0Yo8}ZA_ zS`qs(^QfF@-$#7x5&U8K*!MDz_TifkFsI8Mo%gkGb7XoS#a6FiO!nZ9mNhNw^*Pq9 z%)94kui|yjV|<3tcFWKH5%Y5!YfR338X{+TG4I6o$lsW2U@A{=CU!~AY7v`oJL^aK zvU-%KLw`GQmKc9~kb8;0Js#hTWgckkvnL*E-{1bDqh}shKT>n*aP_M^PYPF`dR+a3 z+*n+F75)5qarK8s65#4z#Nz7nxsTxLJYG);t`_`k$JO8#g{v>))sCyNdnsJq0j|yl zS7SGafT@exz||GW;p%l3pN`;a_)d)W1iRZFu3r5$L#NRSSA);n#MR)`FAP_MXOqU& z__!3Vz6|UH50tY9OUFe0U9v~7BnF-cPhS_s(~&+^csi>!o-Q!)^r1HJ^dS>ZuRRTT z`gRjfd%)9zSN5T`h{e;{fg?Qs%YS)L-^BBlIPx0~gB`(d>bY8BYW&m97f#TdF39x@ z=V)?fH+X97c)C#G>G#2mR==&_Og`D)8*hu^>Hh?`8(6v!{lv|xj|ERR*R?#+1RfJC zy&Nq4SOP39Yatmdy{`i-y}XlHdjCrkB3L@envCFSX%BcByr^I2qsc3x|@z(6Kqb ziG9;M?7r#FXxD>}L^Lar`FygPl_1ZP(yXL3E2mSl@||-G&B`~JYbK9~S$m!F&%gb@ zibq81{h(c6>V7A}BUHVgbGVQ2h#tJQEEMf}U$kfa(4d`z7A+f1S`ONE^y}l&d>`Pa zShW!Uy7-{d;Oq|c=$3yTKfChJx5XpClo9=UU%n@P{v7#!fuUmu&wa(NVaJ~vuVEKo zf%4DuKTE$ZzWo5c2DB6F<+Z{Q)w0hAJBe08d~r9$>DNa&vKv}Nzm5+-c;Q6-6;r!D z1MRx#*74c+W<28dm+(Gsu;#l3fAauhMPulz5v~mJHAU=+d&yXEX*RyZ++gYR{<=Zn z$7FQu^7%^q%e$OfxZIVm;%j2_Im_^b_}Sid4)Bh_Uw<3E7||$-o-~L*>$3|h!h69p z%6Crw%v1bW`gRBY6VvYzGjGPCaWVWJlNr-0#`Y%0_(sP126Tqs(iwZDGdx6pZ2%uV z2TpuL{6`UAzoCb>KX+T4M0}+dC$V%e%kg{d8`_2*zL%+eInUI?!%ZT3`2KKqaIVtB zzmldeKGk~oUT9a*(pKWdmZgEO{oi?S_mFj< z{AI~&;2ZG~i~e1%E3vQqt-q*i;Tew5jwPlBJ`bO62p{K#T;KjYGS0F3_iJ>I=-*9E zkO%#{=y^r|{?|Sp?LDxi8xHCT9WK_uQZZo?+9zN7mwhu}=2kGi)S&({!GpedmyMbno}I(!HJ0H~4E?(~ z|16!$Ue=ZP>-j}twXzus% z#pfsR#pef)vmR~wclAt(c*Kj!AD`C{a^*+-@ncxCO8-74zqL>PqNx7;m@`l5-#@YG z-wVhAG4${GL3uty|K1nd=DzN@?Sh;0J<97o*KNrP}yjaU4NuZLiUrvn6(bHXh%m_}1;(_yqhQ(nn_r zKZra}3_nnMc=&s%@<31ga0nZR}P+* zIvaL9d&F1H|HN0`#Q()tz6O8$p*T(Z?cyiLhKrwE>Da{=hMlwP*eBs97k`@Q*xS;u zyV0;qeHziRZ!&W{5svOGanXK!(V-ufe~# zoxjEF+COAp`Rp%O8M=0SP^A za4^1XyI$ux{F|0+QVm0u2 z5#Rb@H1J1E4Sc=m3Gn~9r$u!;A9F9u7ymbO@1nU7-A;I!=-_kF!RG|T&t8L{{aVrP zuzu`1c+u`0c{{3s-;W=4_v_>Jcpd!O4zxS}Dt>1Ctq~nOYdA&+Pv2NN_@UH$Yo&vy zT}ns9`V$?z%n8xKXXdrm!IyKdlhMIT9V`2|#A|o5iEjule3w|C)SwdIvZaH6mgkw- zN(-OgffnA-Q;U5P-@Q&B#AxA}E266w4bKwB%dX+s!&phpn*ZThl^&k&T6*|0Qx9)n zHlkfO2b1xN&j@Dhaasl^&F*#|E1EyC$2$b>EQ$B8cO7U-=zHSg$;*n z^=Aw{Jhf*M=;5cBdU&J$Ori3>FPb|?>EZ8HdU(;l=LO|CM62|s(!^&gjr?G#J!7st z!}p#Y(Zq|EXCPx&Z;nq}diY5VQ7moO!!O?6a?14Ze`rn$Q+HTH#x2+=jXmALI;4h7 zXE1e|iK*>#^JFn~uV4yOpN?G7S3R+q`e)3W&x@%q<{E0=csTo;d807(FJdwEJ$yzm z^>kiO38o$whpBCv_7tY>B&NpKDf@4-nA&kVF?E~QJJyTd(>EEZ#LH|C*!qV!QT48BZ`|i2W(7w0E(fKjj_oHCx!zPwi z8u^J2&wf~Y?X}nXthM(RUHYz5y7a~NoLCHecBdHlij|^EXQ4|E_=nV` zdyIpR)2Eq>^_tS2bZPe7{Vck4N5A|d>Czwl_`G!Kj&Wn@())>rm$>%xiemwvKCgKA zFHV=%?fcBSG(Nj8QkQ;^zB`*P?K_Pw{l@30OP}~abm@O3Hr60ER{5P}4^KVxqb~cb zlsk3=d3H)y&I>OFj~ZX#F_R5vvmIMf_AV)AAN5u{YlFdoWItuW)I&oY{u)E}-&X$p z_9XWINNb#8B)JaRi;52#8Loq=Z{{9c;8}SC@sPwG+$h+4CHt1#&b}ox0wc{T_ARM> zev~PBWGz1+dx*|Jym@AO{Tzdlcz)I$?*SDFZB`FYZ0$w>3= z&q$M_ea@XG-|b412YcX?`iyRTvTc#bKr4On72=f-vJZ^cNP%C57rWsXcj_N<-SEpj@EyLZ8-*tX-z~;}ZNfWm zuQ%3ey@QtkofUnyJo-^H(ub@P&^-ao!7 z!!PxiL;4EC#oq7fjUN1vOW6N6gMH->((c35e{rhIr+%XX3B*(z|7w^rF6`{1Z~N3{ z9;9yhkJ!yaoQuHz`iHkt)_&%nmVY&dkE3q3?e1nF;)5ia_hz4HGkeLa*m9Y8*eM>c$D zA4XrN(bsS3zP_HmW)F}XCAMNbeJ%TzErA}@_YDiTAy4G&1go#Bi#+B8%8T!7T`qkc zq_4GXiuLsd@Yhp)?LscOy7jflC9AJR_Kc%!>FWmiT4aya*V4~Y&OcILpGfS*IPcQe z_jc{;Ud{)O6xA<3Rn09hDn}K7d*(aU+^xhpi%!;|r(fg=OoR8gus?`?j|}egK5=Yd zBJbgesl_eIU*%MrgrjCGwV|J+;A8fz86l~%pmj1 zx_}D&oH^u0=A0*&7-kx4S!)=>n}2UN51?0<7-^yB8MB9!%a@-VJo>7G;xe*XMTJz*!aYF1SeUZrGScU>&y3?QrJr2-%N6=8iy-SU98NXzM(Wgw zEB%R!p)R-Pc_(d8M#hPL;T~SRrlgRV=>?u2%e;S0 zZ}`5v^PK;i_XMj^JC00251Mb|j0=2#@UAk-0_zMlvH_kH`LSiO+Nl0i;(v*E^K1_P z9^|_0r(%cB3H<6AKkQKzI61$J^(m$7d#&_(6rGo|G$4CmpPawhJPD0kUXAYEW}Oq^ z=G!ewT~7Ss_QDM17yabq{H^ORw5u$+%D3%yW5>~34DzOq^X=wI@_(7S)TNaM>JrpQ zDJPRUzjO|DzL|38QRh6nT9vQV+)0c}`QQJ6b;=vx56;yI<_v&E!Z*bezkQ;Ze5xYpbi_A(|`u$)U%8=I?#c=&vpH~(|BVKG+q&- z@y4ELTzncDpN2LY8=y@CbdA=lH1MV9SlZYFoz7TpN{mi_vYn!n{1-a?Iri*xqf@=~ zbLTUX(80G`I=DVhg}!!_UgcLt?A#(SNjbNu0Dpo~$^Ku#)0^mv^?k|$;}X@#tI>tw zl_));ymHPoXeLkLm6dTckpBaq0Raq}e=HiX9t>XjX^*_(fL9ve75r2ii{Omx_cNO1CzK(wC3M~(|C;IP_{4$Uc%uV{)f7twA<(d9}d6K`v`oGHI|F-r2 zgwy{`>;Ej5|7QL(=KnYc{uFv_>!$)%eXr%+wm$eNY-*FF`JPg%WdDZz1odJe^DJ>| z{w!m`FB=~Agyp@gACY%;)LYVB4h>dmx@I+AN?z;-7?HC*GMdxh5C7@yPTDwc@=)N8=fdVrW^jt`Nl}8n>v{53;R5) zzBkx0TB(PpzQ%v@KcrHPh*B%2zRJDy_qt?*wPg-pNh0%oU&Y!jaB%7C!f0PRat8R^ zEJ(p*!f#;kf z>^;V5ysc@_Kk;WXwbw^w;Q$ZkuA;XOB99|ePTAmwXWi7>XjdirbGdEtRB2l|dwnv$ z^pi7$mXhA;`*m@B_8X}SbaWOj4wRfU=1$^Wvb}EV2AF^g1PhK?P2jxPDm|{xX7#6?OHRzxOB&d)Z3cJ6m?Ucb>F>- zZ<#+1tBkdO5#DQw>VF;ed5-(8_3==j<_o*m$C~eYDqHsJ>pE8pJ*@t|mwE{QSn>k< z!#9z>sbwFM0@<%CPF@JT9EIaX3;#a}y)^xtrIBLn5hvq$BK#}gtz!&8EB^#750bU4 zkux_Oq{SZnv16dqmk%FWYauK6FKh3#OqilfEeD1r_=i(BtDf;X!u9(+Sr#4b{NK0N zlNGLbg}JlyT;DH2b$ovfc|~Phbo|PhAF_tCKfhnmu3z&z_7w8+Q{ynU;q(?;md}wO zeyfsqHsSZL9=tfP>Ne$0WECsBs$N0&J9GL zU@kk<@YKDd8!X z3LByhxg#>@2hfoJBjaU($cF8EJgbhw6O#68@TuoSdxq5YsUz*%%arnqKKoyL=(l>c z!@?Ddlqb+KBsLDlrTA;e8<~uLOa1GkIJ;LX8#rEeDI6%`cZ zmC^>$9}9Ej{op*sPwYG!=e-qjR$?J`MS+~B=(#OV(vjC}+m=e6V+R)8Mqam(*9`Ki zjLVDnUGh-e>+h-W5u3`YIoX~)ji2yfJnzNpV(aVk!uWwVI$lo|yWr5gUtH;DE_j}J z#OKwMTd%HsYSGmT7`MBjuh7p{P&f5SXgCDkD8Qx>-VnXvcjQslkm~0wJpT;zk{QrQ z#^kF!uNo754sE9{=ADo4t+IBL-pW^=$MDWhbI?}k7nSQ#-m!S*0n*=`z&OQeTjQK>yejj8&60*>NEVTKN^&5kWO9R`I)ySwUb@_iqp86S>0&Bz{V@{r1 zgG|oPP;(ogl>>V&|2{SMb>?-)i%W7vmntgUmg^Q88umaFdFG5r@H^}ttshPXPmn!U zqcO49_*awuucVo8zs7&LGb(>ip!={EIr3+CcGI;Peh2;HA)WM>N>U>o_PPLA=8jtC zoHy_hT#=v2Ib|yH7xd9BWl`Dj5NWrR8Cs6auN;K1{WJKoyg_Ci}#9WC+ClY=GQ^{Yw3fr^u-u_2Ia_Ko3?H1$y@VXbwB@% z#HgP8@$P6W$ltlP+oca}d}rtT1oBBFzdrOK{rIZkmoYZL^Z3-8=RNz~TPRQHw(vyz zV=dqgdR{fI@#kEa&zLOud@Fc z8$LpOO;j#Zk0AZiI>-}9roL~08AH2>q-$kgv`IcgR>}-)!#!Y75qWy;!z3+9o;qpm4_C?w^6@yn+fL9iRSI$6hicddrQCVQpLuCQq0@onX z=eoW3jt@-Yyu6wPDT8Y6%bzyMUFx0q)=(b`Tvi3RXDK>nr~ED}b4y;y*88sI9Uc>y z^zoR^`Mo~W*Jdv+o&Y9?e_^T5u={Su9{tjV__hw!j*`7I1aFf28QgDG_lzodsMH+X zpM9gzbw$TpB)m*N7Qhp7zkvIeBv0hBlkJbe&wo5Q|2p{ZTKI4*{5S@_EQg=Vz-prW z+>`$K5oa(9pTdtT9E*@QPIdVYo8 zazBFoZIIKVo3C(Rmf)w~8_~tKZ?awE7ycdRRGJrO8Y(iO&=Zk8MTAGEqi=4=bNFV_ zp8fFYP@^s|3pwIKC$BY9LcwPnR?GX@yhqpBB=(8$wA3wsEBoKu)yqF+j3iQq@X~kT zlX-(ZkxZWdFd3P!=f0imXI6)Wuh=thdQ)Du9%qvFcG5Z?_2_qnF`krkTk$&%uroGE zt4a-j8R=wC`uK9Aa-*|OEA#BKPum~+C20-LDn>3JZbmLcQ+#FD_+>5=-gUFq*uDF{ zoldVejDKiTVSy)7f_;^cQ~HTYP&-nnr<5gSNS^U?_7VC?^tq0H0w<}(SL9-i#1Y1G zEx0u?P}3JdgBi@D1?2G{b3_B@Yb^SWCs1nKJIb)#Gpg?4QgiVmW#)r#omAoX_Bh8gkSu$Sc7zPW&JN@6k~ zuB?AK+jnWP?-oJVj_(pZ!_W3zy#D;jUB6`Z-x83HbnanyF^oZ?Bx%@wGf&ZDeIhV91Si=2#zr^;-9RGcu z$$Y$Ou<~!R+5EMf8yjN0W!p6-rt7tcG{WC9Rk-bQ|2S@pNgcofw5^u?39%hQ z7v!VZZhp$&w$QV()t*`0#=JcN--$~#Or3smq*<}!3Uj(S%3Q^sMiYs9xQVz2c@Nfb z*kF8D@|^=eM=Re|f4OK)zW8$apU7BUU;k~1YuFf%H5I}?A`hZ*4GKT*DcN&k&cA7T z)b8@EIvuZAeSR^tMJ}&ck4_mxc3>y3c;~AL{%zQ~$iBHlkt4FsAcwQgAE!KQk?r%! zO8noWe??|y^ZmW_`F-@c*aXKZQ=Vt>T-F?Hv(>SWklHbizHMjEnu*9&$+w2_COYPy zC~p;WLf!rI!onlR_+I$I`mUC5hC(0tMr5bp9#$PHsl#DiUS{LFTxAZC??msE@??F1 z;2oQzI-cgG4@oC<>uOtDJajsGIe)iA;$=?pPNru^d-;_+nD3^pAK?kK@ooYOZu|rB2wTwA|CB~~%)9?dJi>ntRuNfOaFn&i=$VHLH$Jv{)B}CiEK0VU59KNI zkz^Gqd>~`ZvB9seE-ZLsb$xDvx%}Eh^TB>9@}4cxcd<(Ht=ePYL(6F##=4VP*u&MI zR<6|X3)F7{ZFLaCkYA`Gb>iRSSz*DI{;|mY_4T)Q?twSpqu%mG$~o}GW@PQ>$rtObK9BLm&wJnt z+0QwiFV=AXW%$CHo5XK|O}*V+n4!;x*n;fj{*c>9*Z1qVACJ6q?@rbCb>Qir}Th~ATblD=2bd-Ct2@8x`_B|85EeZM7oPyTZLmov1E zd-6ZzUX-lw>!R-|&!+Dyxu?7p?io)18|=L~;hWk1X0A1tWk=r|OIM5jiS4^WRrc4m zu>OB<-2ZiP|F^~cKNR;rXTYg%FOU2Gjky2I;{LCX`~PO#|8Vqwd$}FV&V_xQ0!Cu@ zcb?Dl!r;fuF$sRrd4qXsldf}4rr$;1DVKU#^d>NZ6@|1(Ft>W1uXAZkc|{>@lX*>^ zFW`A)X6!lbw4VPk{yA;7p3jYcPW!FrHSy1(h4uW7_~+2YdR`I#92!~AzZw4=dRfn> z#XpC3*7M1+=TFMMNzl=HeqH=?Xlgw#<@ve+>T;d_((I1sSMl7#bLbX%ioDA1`%yYdp0?-&0<6JQWcC#QQ@so*HSljHeA; zt?@L2@dS>ZrRfXrVe>`*!*f^jA9`RXru$z(&geAo+!fL1haAWd@hR%(@Z6<5A4lCB z$dZlLb9k;e`h24UnbK%Ihvy_VqFT~J3z2m?A9yY=`ka19^2N(jIe!xP$3*ZuyOulD z3wqf6cTzrefghdzD*nGj-nzXxSvn?XH2G^j<~iRS65T(45%?uCu>IA99j!xBW}ldo z3Z9aNJ(do>l0i9rkwg86D@yQx3^s=>I<@xDJ}{g@T;v4`JmO32KZ!kR1M|3*SmI^I zpyq6T)#$L*wT?AUxEww^v>JqNmrYD#JGO6wS^CLL>}An=aQlor!moJv_g~y3;Va?Dvd8zH#;}-^)X;G@r8D zcI?AGd`@s{Vv<@f@dOG6dhU0q9GAOqLvAZ|3zs89So0kmqSy;GJJg7t&`McF3o_R% zg-&f3tBADO@gnQbold=$bvJ9q#l95m>^<@q{%C-UQbWtb#NyggnXQ7tDs%S@4D7pjtm;d z85xq7%-L765A>0RnQIzsgPQAUtH!L6yK0}B(}FI&C+RwKAMF&(rVqal(825j)hbPo zMc}{C>2b>B86r45`_N8Y(>8d-}py@OUcnpXR;+k{T?ZH4UN%bIQBKcUA2`ygGX z3hGp_b?Iuya?ZJeU!I0v-1b4fhg?pd==OV{ySi}w^^({7)awJ*6I*#Y`_={Kvkz~D zeNaeZ`L{6U+So@lOn+1uIibm{$(K61>DN|hB;_81k0uy79c5oGWiOx|7keUuozXo* zcT<-1^*(!cM_G=!q`8hgiz!d)CN}Brp`M6UzrCdWIW&+uO8ulB(r&3^j?|I*OoGoA zQb(~%g-36W>PH{(6kFPJ`4ZpFOqTgf17^DZ0d?4(cNyF_FYeu#R*@BCfER3~#XHf_iH>DJ7$ z9q!Vc9VNT(`|-XC71=vYu`jLaB*R)iD)qaK9DmWtk_Z@F#DzS|LZ0(^bW{1QXUw(Mg5`7em#d!wfGvGx%d~@H$62svM005Usfsl4Kjm0AC6VXnR(fbAC5{hTdP#$d1O<| zA5`EYY|?pyll&k3_LRJd?o~0c@#CGzV2Z+r{bMpG%8Q3bU2jU9hKwV@E{bXgu4#o1 z!fVOHOTAv?Y{)*eSYmR)0hY?%E3H#h#LGFDt;i1te($iokN*U1c%nSre~jxf>M#JT z=p+8myDi(_S_QAqr?2LyNNWxIoH5?r#=zzR;&Dy(Q!eIzDgOs^&T#ovo|9GoG2=U9 z@S!J;uF8ARaYvq~s4}lE2cJ6exhkhBFPZpU2Q*HmF4)LBCZuj#J%RokYRhV_A+HMN z5c2m){$9qTF|aDn7(GM#=H)8>dC7zLeD*2dwt5`j%GrEH$p3u4ovHf>oNqq!#gh%I zL)86iPcB`39DG`0ZFbYHLfs#O8o$aMEHn&qE@8t|_P2LBe693P>(d!#khVIgkHhHi zGr79ChF7Xc@Yzf=@+|#MKQFbh-{A|yKN-2r79Ujf&5eAkKR<)mmPA9J@8?R5;wUl? zw#=b#uJlAo8P~U66!mxR9g-k^kEvf_>=TFXdfrH3{ld`V>y0$m^Qmv=zCizNHq_iy z<5Aj}8cKa}`DzF8&2u=jbVbda@bko-&j(An=c5HXm)BexCWhVh9C<9`+vR%RP;-YF z%i-OWkb^NX?8RlP-9}3Dftho{%a}V3*fM;?RYhh^9zc82FsMOF_C@Xq}WOxbET9-5sm@ zARv?MQ>eRxx-X~QQupQd4BrmqsL0IYo8eJpd>eHOfqB&$ z7ldmQ9bx$FFuv>Q;7@+ep2Xf96xt1TC-a^7!1#8mjG=bEzZ~7c4KKFAKQh+8i9XXx zUoVll6yA}sKA!RIMvjkXe2)XytcFH{smAjcdek4;<>|2&`gS)mpm1Hb|C4veGmiT+ zj&mEoW>c#=WCJ`Q^FZ|6sa!o5)Pg}sn?!GE`-3OoL2q%YEZ2m-OLE(g7f$*K? z7V~RJ+oWBkwA0cj1v3!6W$%Uw=8~!QHBtRV_M#scl_T)F%um_S;B@uOI7>Zm()G-8 zY5j_N78wn>@WouMUr|pPUnRDz&|AzeAKSB<wBf-7{`|Rk=(+snHo-#$pmwEbqcqNJQ9!C}g@f$?zJ0LV1dvBP{(flFuZ>^CQ z+KS#K^Dwcz&9V-n(Qq`6=l@{-kH5+j8NwP7ky*DB`;$1jQp>&>$h<|Y8_}^v$iU&q zEniI6uc2>hkb|?~VKCPn&!;x8ex7v;%gA#xyuNJf^3`h%BP2elV%9;3j8y}XqvWlK zQ_@$;ipoM4valIB>RLlAedsG&mahI6#zYJK5oAtfUv$6YWUa)McAN7z*BiuFUCaDE zmU(;(^LaV*dKvS6Df9kov&0^G@IQ$=C{M&DNn_qm=Y0n0`jWmM-(@oIb1sTK(I2)a z;x9~S+{@hZ6#D!pF+cCN_;sJyDUQqWh1j#h+20srI*Pze(3O)N9#il6HiCI6g=g&5 z_6WAz+w5z!CzCU-k>A@4`gD}ieJ%fmw_E7L-Td~_cTs&js&iT21{Y?p`G{}4GB5H? zPxImk=At)9*Osm#Q-*rX4d`w)sX4y6r5@8|>)VWv*gu=IKh`4eHf-{(p2b<&V(-X2 zwm)BRbB7Nsd&e^7qXy<92mMe@c`spu9CTq{80pQy*PM(4&Q$2Drf=rFmmoZY?}EO` z_609aG#ljHSL~m*XG_hIPSzo^wt3jd9N&==6*)8rT?qa@Gzh;nx@p@G=HdFA!*0Ge z$qSzGw@t%-u_cF|C+-5A|8R)$Ch|k4Lsm4B-vsiTaG}hbPTxdm!MU*7Cn~aIDdp!Q z@0yW|az2KfHH&TL%g4_V9Fbr;wyszmL8gkV2WyG!#}_NU)FUO>JMt}MicT*+df9uw z5!-9Rebr&9ZwT9GCURKnD`{ojl@B>3?Rg76y^sC;gXL2Gz|d@TRGD{Qr+#keE9G6q zoIavhb-A=t_(|-DHd}6o z4Pn_Y<7>0m)MLMl=RDzOxJJ(lL~qmfi|ji#0Q&`d2Kj5{AI+~W@Ga(+c3W;}60`_F zAEC43gZp>35*J?LEeeOGcp}Z|_}^vzW&N7?Xo4F`P2r{S&TQW}^q4jmw#~;2cWRw) zS`zP)eZgBDrtp~HB%x==v)0PV9PSDoMSd2;H>{CumO5l&r<5dSh7ihUJ}3IZ&N#pb zbY*Qn(NBY&DKT1RMrp~s{2aV0?R^gVpTIV%wx#%j@a;#~b`}jJe#7Fa3Oi>Ny|k9I z)N?|0(ENV%ec2bR=)U{Im)|!p>=gYIo*K&7tbi8YO-ojbUoIIQmh}TyF-~R7wkEC3 zl|0(A*XFWkW3h~-RklICcgRoH&pXIZz6&!iTJ?}Rz5zeK&KWZ|8EVve#*bqp=Xqb4 zqtAd(hEB}AFH7H8dAz~133K5y>LqjW4A~1S_FaTF%C|X;y#zbsYtT5e;j5fWLSMD8 zr`aB4nDEA4=;VP$l5dbclCiRZwoClg&KNI5^Q0c+mv1oc3suJ$tYKX0aZkB!#=D#G zUL38f9_u1ktmn*S{`hwy2bO3#;0(E-q1cJ9cUki;n3rHgC+AN`_D^FDn95vmBXhzH z%o$UdGp?V_T%m2nqwg|r)Z3Xil9)Ffq;ZlqnQvTt3!XkdmHh(j#H`sHkKqf?!QSg{ z%?U%WgBJE*Gw|mgvlSV86?3@g;>BDI?7$+{1b@wNG!q-P#sQzt$KDc~K;m9X zMWtSmi>-EdvEV2U!?`qtv86GMeo@-HUHyHnBNEMlGA@T$a=t%u-eyX97A7QP@mNTi zVdkCFVMB6m%;;B#`95JhSJ*lKoN+pdbIs3$4Yh)y`7|yxP~$?v*Meio*m5FM1%qfq zE(;DJ`488)P!tCdd3!bTHWzvO!PDjDICP#WXx>@g!mmkXo#gF;EVWA0a*-!;n=6-n z8nKxc^j{;`$1NHQ68c^cldDntRnl)prq(ddgjOO;8*TEPXJrNTuw<#bw!fC8ZhNlp z4vhuLcma3QvQ%_9k);E*ER{S3XBkZ!Hxut6SdheRoJdEH1Iv_iRf#wFqnq}%LK7+T zlO*P-yT+PJ+0(`=c zfep8oM#_-uV#k~dHk`bqEZ|&J7H~aOrtx9%YskGkmuE7@PlpjtJ|B$OU)C8TrmxNk zBc{!rz4Bu#sIK3Ab@uJ0N#E6G? z#fT}lM~rwA?PA;y#zu(tc@G$|Nt)}Tvf&}>)@zKII>uwf5^o!i5&tpcJTPMV?MxW) zT>7ggjJWIE-&>40{O8_d#M{v?GLVPLR`O0KjJOJn_?9R}d=T8_;O;*Dc$wA-D+Y&Y z2b1X)UR)o;i_>CwG50;;#lJl>UaaMv#)}z4-SA@Moy?;aUYsg;F*b1s40RXJHBMZQ zjSE&NIB^iXumcN1hU~osoVY566aN~V_%0O@A6Cn|F`Rf$pHnz7vZn(lp62QqC;oL0 zI5F+t2HktIhod$_3@4s~9uS>fQ|}E)d*)P~-X|;Ka}{c=5z2PMnJVNB?M?n6{n~C#Ij{apD$uS+x)Ykdqt9 zy?~!We^2O+6Yrjft_e;oYk13JIPpHP#EZE9N5F{}gA>;qQJna}9&qBse{`I93O2&$ zffK_&9XK(4EjY0=BZ?0@vrdB#!&ib22R`b858pDO10TLB9(RQoy5hqNCt8#^!&2!P!x`yn)l~E)v2|OayM+(`=~x^-90DJ{Yod7qn_AWqe25(Fz=!#! z10QC7Y!!SMoc=-j#=?jz8Ef$vamL3nj9Aw>4kJePea;xMrh&$Yk%4E!h$oyUMl7*E zE%Zq|M$DYQJwC=o=KS7Z#K(7jW{kLFUy}9bh7oT(BSx&}$J1iO`r8hSSms2d=Q;7b zFye<62ORX@X)$8TJ9mtDVH6`y{a=h&W5m!`bgga}F|_S$tMrT!|8juFh&yDC1DoZk zv@>AD(ET(RvF4>7G2%sK-7#Y97mX2Pzi5n@>sc^joxjG2u@OEKMw|*pj1HqQVtfPL zG2(@1$B3yzuQ1}}o$&yT5sUpK7;)-3W5k*U{{R@VuE%*}#Mp-Cgb~wM|KJ#Lr#a(X zFyh&N{2Vc2=5dV?bM1x^!{=wfh}k2oXN>q4=nWd{)APt#Fk)oR*)ZaX7Dk+YW{fx< z8**eU?7)f7gb{satQdT#6IP6DJ!h;K+IN<_-Lc}E@XKkMB5Q9;?}8O;9kdr%F*3Cq zR*X#5Sh2>0z=|~{bQY`_Id(d%_^qy3F=d_>D@LZ>rtzTDV8x92PFS&SXD_j0>KKm| zFX}B;Y!{z>D!!LAu;g?w@n$gEXe7IB^FG50DhNUU1Ev*f}^Z8288}XEO7<+`}k*9uf`|4!m>Y- zjfcA9#UDSY@!}d{Z)y{XCB6XsWJENUSmNEh_}7=DoB=N;wm|9_#J8ejit+zTobS;O z>}JaghPiJjm4*-cZ)^dV-c`na7VaeFO5^sDI zKmRV`jo&)fKK;$J#T$bI5icFtz`m?H-q;u$XamzqhK^49@cH^%h~>hsgsoLUoY(PD zo{0E(KY~`NtW_9C%$N99L|2ozL;}R25UbVPy3n~sVj$w-4>jFZm8+j?}w*obteqIAtlrdHBH&40;y6sTUc` zZTO2KRbx!?$IbqK@Cx;oIAxQ%OP?Oax7008xv)I)yR@$ObJPcB)m!tGvpn+ii+^b9 zSY_m4l(xhxUlJd$EdHrjtTJ`6>S^K7vZnIx7!EDGFZMTaEvvw;wEf+mSmo)&&`YfH zcpak%$k2EU`tyug)>w2;G0VuX4lMdy zVwT1378&-B9J36Kzi=#i{@G)e6AyNYSr#n1B8ElRFpm&B^~J?3FaF<{<^P|@EN6Y# z1(S}8SsoLQ_0H*zN#7oiNuPVna)(YRx==^Va#u_mUhff;j;m+xS?c+qR136GxMIcE8& z%<0%jLF|*bxaAdR#H95+X<^dVnx2lhW&Q28UdAnNBBrwOTw|Bf4IK1iPq=h7@ypd+ zacTVUhq0d|et81;MLT-`nc|m6$79pjLK45ciFnGcG0b{B9x;cq7D>l2Q-;Ja4^QtL z!>rStPYg4*lEmu8$1oqUVwg)*B!zuGbPO}LS=SimAa=~T#4x{Xzd8Ip){X>AJlK3$ zp~>j3g7ImL8d_Tz^-5w1tvKc(J=#4MP90qn*auzv19bAtY>iWQ$RP(d&tUd66xwve zGSl9d?E}L2o15QQnA|0vxj(kf1y;PV<}nMa9vUCdjJ?qqhxgOg5P7b(yF&8r7&cWK>##avoT*c<@avNgcETq#evR!VYhUB>>wVLR zZ-(|VH$4Y09K)V^fcR#KZMJBkaqAeb#m6?U`)`VEw(24EJFnPgY{_`s`WH|Cz?7Ki zuCdJ$*WBtFZ(f9M8nriLvCYuP1ATOCv+kS!@z~~0>tRme);BRnOlPi`#+)&gx#LFc z${VmNrx1r3wJSHj$^7!F#5OyaU!0^#CasHaQuvm*=J{!?g|++N>nXMwT&N?qc?hx1 z$bp`4>un2JOVSf=ov$J<$-F~+^8<{tC(ser6W=W7HVB5%Mr`vdjDZF__JE#iy5rVI zSgRwvT8%t9!gmtiyoB^U#WyD!(fH<2RxG|*uxqwZUd}q1v&K1p(8p}ncocEY;8DlGCfbIoi0p$GCf@d_#)1Yl3O@Dg`frAH ztRq;CUJtL|Nt&ESDSJ8b(&LO%YvUyb`jdskKo1!cje+h_R)a17sB;`N3vyR|k3O;X zgb_I{827L12ZmdTZT5mg3Ep$MnCJ%VV2O!#Aro{=H1gQWFTPF3Gm-mUW2Lp69=N6o zOiIT}cVJRoG4Py@cxfq5FewWI7fkA9#^AB-s9mbI`-@%7U7)maQH;_);JeDyc*hc;NkZz z=``Qyy)|YpIXfPn(OW#6IO=~iJRF+IdX4WxhYmda$+3~LzCFZLGk!&GofQwCJ3ioK zk6jmg>~`Sc+$Zx~o_+3k_>nUUIvDR9(0y^UKNjp_aZ(!zHVoV&%TuNk$Q^H{@(p@Sa@&av(bk%7TzsByE7iX9yy@# z@Gm$%yC&8@8V^SXo(&KGJma&?F6*&n-tP?_zH-}V!^78~ZV!)M;Njp-+s}xH>p8M# zJly)W0}mHpvC*@ySYou#3lB#haL|vZ#lxA4JH=`D8V`@gYLD(47pwihczDDOt$W17 zd$fCc#>2M~0~9Zd9N0XAGtYvDcZ%IUg@;=_)*~K{z0nO1$JWt!IQWXhZeJ9$BhG?{ z>oK75aBPXsgopPfej6P}$8R5aAR512j@}Ia2Q?nP@Z9lm>eMSd{QI5p35|ziducqp z@2Pn1v*6*H7XJWvxUR=}3(FZgJe5@$l_u#KZMm(+fPDd8XHRc;;E~@Hi~0TP*i!FsaXshl59*77s`M zer7zpM_Jt+55I+&KJe@99e6l$`nJp-@NkVob&vb*hKEBhE5Ebi;aX0Ehl5FJJiG&w zItw0-EbKKNPJK>`ha(qn>noU4_n2_-@a-LVIC7*D9*&-G$%$U!;W2DV_7XWKJe<9G zLPPt^KHL60`@m)|Aht0Ze^VBFvFdn-A&NEY(fwGTAig%4{a6K)mA!goKh|UTlPcH~ zY+|OIw5hqZGpLnf(KxoZNKV4lbvAgU=DeFP?o{q#7 z)Utona`fZz^Q)WOMy5-24iEc|C9_{pXy(i&_kx8h_s{e;?VnlP6q@O7k~4v8UF=7L z|16x2KZc=UuNH+GUSfai7s^fc+IF>Nb4DL~ zm3r?hZgOx2OWR+nUl~`(`KZu_{km4}Dk};t;=j=MuM0gtUdtZJvi3ptB9>>ti#e}{ zJsr2%8ghwg56D>+CRnQbo1Tcg-%B0kJ89Qg_OoiIJ<&a+2eE}%-5;eb(LI~9^j?bv z)KkiM{&0*gH-YIVsU1V0TfxBMJo(=GPUtN0#(nt7K8@1GTFP&S&yrbhAUu|QpnbaV zR}1w#(;hmZq1T?;N9T*=eddm{@;>{DY2N2-swnR-g!k_j-rx9z@;?4#ct7l3ut0b} z4DW~G{qSjde}T2XL=U{rezWlYiGnLDh4)Wr-XC%r-v3EyMD|kq+<5>0NWIRB_fL!~ zdyPWIyul zD6)UO@ICPlmaGpWJ7mAfO!m$?Qe!iZGY^QYm-D@lJCPjrM`>sO*aG6t*0Ls`?xn|8 z%UOpLWBc4$vOZrqLeJBu-up$?4>%?3;dfUJ{LUGhBJWdbyy29Z;;^g69sYmFk*Lik zd-CRJnf@O8uVlS={ISO=>j1uGbd8N2y35wGNUQL$-qWjp<;X{=fowH@0 z$c~fWxybIEnCy=3S1&q^V|iI@P8rl8yMuAEyX_K@-N~-DnA|4D;>IG{klbA4n=srn zT;#Xt=ha*bxoUb{7nRdNk<-xpNKF~^^hEwyz1^H)Ot-cI6Y3>RyB#{}<{4 zz3-skEdG_YSTZ=ehgpC8@cldFw`VKm^HT+dd7gno^9%>|D01h`a2Dnj4lK-54z8ZU zyxWlH<&MF5RlF;A4#^uba7bR6Jm;BuWk_D(D}(c-?$WnMh-pZre%ST8ZhHUXKv|@K zxuNy`{Au=yZe2gCUOmkXpD@>bSK13NW-&hqAGU>xQ87{TX#+A;Fs1FBh*--#*dv7&$p6(n|e5mof7CnDZMWf6!xOw#fPD7~zce&pJjT)J4V!{nq<2 zLit^Zf?B87InZX#D$r!nfG2&&6R53=X7$ZFAnR=y) zF>>3P$H>QHPK}W%|BYjW`0CTeR*Q^w-KC~KkfC-Ian9_`%F(<_=7c*r`_tj@l`_tY zjk-VsGRM)EeMeo%KGDxB@8FErBkWttPxi0%)p(nh)y(AQZt~R>H$B08pvYVHIWMrQ ztl&$WK|KfF|5>N`9%snzEoXmR_TdcHVkZ&%5}dChAC4IBmN;Xb zwu(Br5`4i5%fFP~r~mVt<#q{HEYvxo*jCdhdtq2XF7&7z|A? zWsh{JPaAp9!=|b#V1N8V<$s224cEiOgM3fr`rgO>_>VU$nwD~I=ex`u4gB6_E)8X; znX`>t-wQW++-vMnfUBp3QZ)ZmDQ$-K7Fv z=-*Poc}3N;nvCkZn_8+kvxxr#_`i^I*jw)MMC{xv?)T3}j&oMBkC;H&XL~<1*iYS; zdGBw!d*=O3yOb*gFHB!+bA{wwx?O5eXbXFquC)yc$vIg|ewW|W&Uv(kOHKcoO$BBe zD)0(4l6_-k?bJwbQBy1Qdi+Zz?pEqCSni?MUb&|pZP0KGw3U2M*fSbiiJc71^F-ER zBS{$+O)EccpZ-shzg-2MOjLoO*m~rD6Z!oV+P9H+8UI`4KlvUY-^twnU*@Y$d1qg5 zN*}cld$%-J_pP)?>VBra^^wogUzE7-rG1;`o@(DEl(G2sXxrLoXEFD!l--(A!qLVm zOXwrtyu#0-iG1@od?WO*zS%4HLJw#od34Z%`qogkv|aL%_P>%CEnnI{MaqK~H+kJn zkAI`YEp!M`kBK}lfZNK62O0ENY796X8MO5sUwXM=aixd~fQSFZ)L;VqY!( zC?&Q%sOw(TbS54tCm;EyjkIoXk8|M_Ij2p&?M*q-)*{lMFK@|yUD6lV&=;R4PsR7k zQDRL$p^SI_m~=%DdE)T?rpax~^%W<2^t|LkMx}V~YdVm5z4<&c?4O;;rIPE+gUF2) z)%Q2`_0DNh%o|Vg-{!4s8p@bYhX0>)sz91|bd$gOYfTy6OPZeIYIqBp9$+sstMBiP z<7quU;ph0V)^_o&?pF9&ctHC9&+zjhblZ)}G3v-4JZ9_4QuCf%HHv-Dvs!*sY9_0? zKoNE6%Um;*bBevxaXGfh^;{nn*~}jMOTe5SG17dy`Y{i~FEUSUV?N9PE4${ zLchN$6_7I(}u^vw01riQT|ce{}hYC#unH~MRuZRUQRtb^j-109m6SFMp; zvxxss@&7&Q@Nl7WbKrVF#pl4;@?7X4^RlFQiZp&h zxr?|8|0L!q_a1pRP`S5p?JLicm0M!>r4I7$VSe&X_Lz`7%uiAZ`MV>6Yp`9ykxPsWpsqwRgw6^o$nP|-)> z+sn|2hTG}_OI)eGM_evnvGN4=z~7T5d78G9=Oz4P3{8Y*W&HS=+ZKUcE@ljQRHjd% z^VXv`)=#ppxt;%sjIX`$VLEl)U!>g9Z%bT#$lKvtdReL4%CnHMpp=cVVbi+UGw98m z9qd`oT=fy-;s|?R+{L#p_{d^~BByV|F%y--QZofyl zWv_k7_YIyk^Gx2$|6%-OY)Co3=UOCqJ?GNMSlVUSH2;)vFY|H<_XY6LrR1}j{jOy! zPT;@PuaNu))6N=DxCIvQqa(>Tig{*Z# z?ez)RNGae9so)K1U~K7NY#FQ%>T8OPw4w5pjU+gd+psl%M{Fhh8O;^oI$&^ZD`h{B z`ZCrXZM};L|S76xr5Z zx-H8_%Gj{h}H^$xI;{2S6DslolN%kqd^63&cd>_a}4{o4LSsz)*{MmX7^C&jt?_Wi) zn&OEJm1oTTSMzMLJe%f;T*b4|Lzw%nLN1`UioMoKxhK%!3hW|B2R5#MyQ1ml2Rti> zai)DM^}duj&iQbyd&&a~S3Yb{_YGgTaAn(XCz#jRIaBC`TK8l2RNvQ_f4_aO)*ZHa z0v}SJo!;3^ucAAA_}X~$_xyjG|9?Wp44*l>>5`dqn%WujHJtPC0ptI02{HfF#m_k3 zGs+WLTzy+p30RrP)y=l-kiy<6vG)min1eRkxU!DvO1lc|vZd54LOvDHHu^hYyd9;- ze#&1GqsK+Gy*Wk?(J_P`!+CZq&qP*9z9QpwzU&cnInTE-Htgtt+u$FuW4(O$c3d7) zL z?Xmv*UYp{H3oSR&2t?*X7z0$I_25=rt~w6 zQ5oL_jPEn`y|hjAlymKWYy69D@y~c=cD{@rOtk20PgJJmU zXs?6v#MYCs?Qq(fKfblpT-T4en{mDYpZ+x3Hv~F-nXAF|Wv=_6*+bxhv!1MV%bss7 z=vx&=g4W4O#MYwSB{uGncP~+9yOCB?%Kuf=Yb^KGkC(VbR~$;&lD<;(LDCB%Kazo4B||*onbN#WxO7OZ&JMX zG~La8yS2;qyb)-)l-hzp1tL)p~y| zd@X!hYfI7mUd#EE%#+i-iRc+BQQH)=q0c4IXAeBJmh>%*14*}qYbEbwtjhOdW6a?@ zNwW@qk$1%C1pdT#Ug|A2h2%S&y#By5c`yGTMeGKJV%!4dWu~k_fnqlk<>xzFLp(ITRx4mE5^e!zMAT$iSWxQyW&Rdgd4CI zreHT*kNq$iKjkD7dqVGPCu2zX_wS*kQPt4nY<9-PZ>#C^3!=W5o3Jl7qib)tRZTyN zeer3`AM=L2E>Mks%K`T9O^^Cx>b?c*9ae_GRP~wgmRs)NLO=@RFP*xEr61#190sJ(2me zWfkp|xpwkUHT?*7$i3+X<#GlW?}FG7pNd_9Zlv$ABjilV-Pje?>>qi2h{F_pAgMx` zZ`s)Ua)2_Ep+ktVew(*Ae9fZ#u*{1NXc3%2J$SYTw)m} zYX6Ve6UUkVQ|+F}5#j=V^oS>1j6G4ncpLJ-f|YmTLlT{!KXiTSE6k@07py$|^mz7f zboj20)S5rQmXQ1haerT^)_j*(g5c(HW}^4*aEiAo9OQbC>rP@ezB2Rf@Q|5RVZqln zU{474b~tmQDYnG!t2~iK)iav%iBI#$n6V8ARU(`7Md!zsSc{(6g51frv!{fiYIdXN zrD7kx1|6PBj?!U2bnwx>BIpo=4i`w9p+g?e?vBx6V2lpaprhn1bAjYti9L}i`6>2F zrH`NMqx70(bbd|KbZ9C%`!#Y;d+l-$J-^01`wwPIULNd7@|DpT@o zqki~XI{L}V6Wc`hQ+&RodG=HCmHecg>~m^L|61*``c>MqjC4X5;fZJX#q)%wMT{4O z1`?~L>qRoe3X1s*y;-JbFe3(J_=5J$uG7!5DrT|6D_F**+9c`oV2-;zPhkkxXHu{qz(*r#-aEqwd3MM(!OY-H^}cW3r^C^)oXY8TBy$h*a$0l=Hq@d80%1YSY*1)6E^gldghjfK3P74Ip95e9USVZGg4`o=sGd5!^p+DDzIP?=pr%3e4{cISHW22z2yHed2i)j$`yK0Rz%Ku(ckc2@_wA}ghtp8kzety z5j*TB=%Rwf9hp*UZc54W?H)45Y$TrXV%mh>AGwpO;`$U= zzYcnc9>0$HMD%zW!_QKV=>3cMzgVfrMciLosLWYh#ZH&>Tl9Ac8f!xDj-Yc#JhVmp z3Hm+xq37%N7w})w3?|Ja+KQg9&-QudLS+sn&7^_KtmZ2IglC+}6#Y!$%^+SdcL>iEA2T zAo%VWb3SV$c4nw6YGQmZJiitGy98Z-7Pi3L^fcdW@@WIpzhSv2jBXyO<6iiEBK#=4 zUNb}a7s2aG;dRk>g!e_azh~1m=B#u(c{qHtx7M20w>9wl-?6KOw}r0{AfNZa*YCmC zcg;{c-maO^bZ4pBAw2wtnwd>|xwfmQ4!(=4@L;=&>h-%~SMQ>{@Me_|)9>@cmqMT2 z@QdicTi|g?C%j&1zlZNm>G%2J<2NZ6H1a z`BvH}y1asiJfx56_QVJ9{2uNt-Co{ForQPg`FMU(N8uZ}m$F2+m-6Mg==SoB)sBxX z-CpDlJTkMUIy@8p@G^%h?K3#DZZBuNo|u!0&YyND`+GZ3A=ZA}k819--PZoed^+ZW(7h}Z3jwF(n!#m^lk)+!tyYxO?5z1Hi|nYGWL z-Y{F44_x>QB*seX_2}0w^!lj3Ag0p~Vw_{gqU%Tf219%B8(bIj83JL&IPd zy1v9jqC0=;OE5q9Yoe+B2I%Tf&?bqadTSUlRM-To(`~*(#7i~sTG*-vsmD`H#;a z{C(ucK1Ppgp~p<<(S}YwK1Pqp(BlEV5nM&`tz$gue9`f5 z;(0Y=kB zJaA|lJTNoH12cz(4{08_wAVaf^?}HdPcQaF{#AShjO&jso}_&RI%{76SM4jHU0rQ_|MC8UJww3|dhahVr_pZw1?HEzVkhia5-V5igkJg!BHCZ@XobdjV*Ud23gmIj zU!ZM;Gx!T4VlRlTP=~%0=P!tK_zRF{;Lj2K1=_CZ(O*Cs@a{-|WUFN>)J6RT=*lsF z0ck|%$6sKIz8~`!L|)=K{sJ@RFNkQGAoH}pfOm4Q^QL^E6aIooPyPboQ|&LHf5l%Q z<7q?c0Pz=Sy8(Ye1b;yUe?iP{s0iaPFwf*KXkq>kdq(Sa$UU&m2>yZy{sI$!L8O>5 zCw77GmIwJLw#F8|5uIN2`5Dw-bouY#FBle=b{%P>b_BR_)L)?6h`&JF5r4;DP(XV8 z1*Xs*e*rNMQ9TfUf%$^G<6ghxD(^%Olyc>nE}J^ab17fy*WoK*teT8N8Lx-nYkURa zySZ;PqT_HDS9}HGC)lTNj`#{>jJSI66{O=Uz<$725YfJZKCf(Dl>40@~S2UqQ^C=jatlA*4o9dv6JpE!ThvbAs<+`kz|;zZ>s4wY>Fb%=`|Pw+#33M|nov zdbORn|Giw3xW2~K#kHAhx(Y}PVDJM^ph41vxJnw%pJ$6uewruXBt|KLcI__#$6#;rxkidFG|dy4WjKAaY4?HG@LMH$0{4Igg^*Eq z^ZX9^j}9_}|6Uhs23Wgs{84x~-4pmB@2VBw4H@XJPVmqM6{sdgu64)-?h~vXsZQZp zNYNIqob}w$hwERt5}O%3C~Za8re491RG|C@kNe?jkNfywkK27YvA*|u+|AgHp%?J` zC31a91zhA4+RT-FLa!3bDer&HRo?$r1wuYgq?UCeHHB(=4eLZg^wCiH<~Z-lp;_zMV2%xm)SqtB5f@&UzN< z*COIbrC*Cuxzg`J=`Z>{NPkH`|7ej{$~rXd22=P?c%t%P`}8|!8tzA_vv-uC*Z+XC>ADqBX7B}g zk@!W{O$Dk`xKgKW23P8|Es^W5sfX~|Z@I#2niu80@FMT4(CMr?bS=A%b(S(7H0{`% zL1=fjF(Ktw$7oV5^@C=*evAiQKiaS9bDFVGLVcvns;P$io8O4`ldRV%rp|49xgLo2 zneM08Jkfrl->iP>G#>XV_dhWPr4G8Es6$6Tb*+c=o6zaiD4hgL627tO5YH=`H{!~) zc%xexJ@A6=d&a#3-jIGbXs`6Ilp}rIM!(;x`7*h2{7C5CrHw-W`2O$U%@?S*$eowC z_R!|8e4*u1*ZOtk735TWeY^6G#0^W`AE)k}_~KdRo-K8t?z*nfO7m)0-k{E%c$a?C zyh}f6-tF2)J=Cv3co}*MFK@)g5m_L9vx<0GfQ=zztCI0=$%5Sjy32yI$pNdMUL63YYb?*G6sYXw#4MXr&Z(3l(%;Th+D{$MG&hcoov$LV zvsX*&kiKRsHmMssq7_|8bXu`7otzgTXHp7&JKzT8P9I~q@61&01I5Z6xy^7tCv9|< zYMu(S4(3DR#SWndDfG@(bgm3^=!N&@PwSI`&V~;9AwJo6mV27K=$^7aMizSD8?_5p z%Kl0H(XD0e?qVa^_YgW|E4pm9LMQ#Qhw@6@bFTBaUzzD~2P!@8X%~3hBERy{=|bpf ztu71+;(qOQ$A~S1ux#k8q7+;PF4RJ@5 zPamV)yX>x-8T{{)VYq)VP1?wOMEmv+Hr&s%K1bHB$am7#7M)lBkk#f&Vq-3-Hr&5r zuKy^{a8D-vzs9Av(iMPx3oWIrLh2!92|cZHxN9G^lLu@>qhkPgbSTxGKFiNAt)ZvGH)Toj!-%5J9N-Oo$dEyfe!hepL>zhR8IFTpGDSdqtonSkoqS#Is-du5w4HE?P-#=eXaO*FXCMWc#zG;x>Ux3!TPlJ*t#^v|4zn=zwI97-^ti-Mi$f{ z3k0ve@n5cXU;C4ttrmd7jVn*`Ck# ze2)Bq$SeRQj3qhPGDR zX)|%BV9bdaki?vhCnf6j6>fdcI+xx$fm|Q^e+9L!iNu{IO5Ev8ViUHw(;d`bH|3de zC*n^k?nL}b#hs*{RpL%XX55MRP;dGla(?qb^VQT?6OU>*lirDZ*SO&l{XSq@A#ta3 z$X^q88vTPGY#F6tSMYx_-#3n=2MPCll9w#Cvo#X4YN#T<3@t0K)$-STV{HCm>XaHj z#@2!##5b=_k?L=Y01# z*FBd?&M3J;Us}AS@nem-`_<@-)kvJ^0qVXRz*W0G~AF&tK#MzeyVmzOs9~-vv zzsFVk;D*Kd4ZAgBY}~Vt>oUH_emH&)bx|{X`Ip?}%O8^G%inW`FCRmuwrM+XFkaK| z!)sR##sE$9P+otd|pr9Ui{E{dYr9L@e|KH7yhQu`78L! zLhBcDu^l4lEA;nzSU>dlCTX>b-rG%jW1BkBTlhiw2=IgW2#WsLuZsSE;L11>$FkF3 z@rUr4-&eZ=9u%16T-R|o=ujm$J-7;9momP{&C3QH4d`TnqX8U=%qTdK83ktt@^T(<$($?xYi$07oaGn1 zG>WV~PCp0MPVq|Z3nHh?tLVqPLHa?T9YRw}UbpjEWbt=g1qZ;Q=m#u8`bEMc^pt0W zo;)LYaN#Qp7K@kK%`J1u+%lK&fzw=0ydpH&$6TyS_~H<86`{M#C$zWt_pvj;eZ-n0 zdBTy0pD}R&pE%(ll5ZS&`gs#47BB7QvkmS@-V$3v)^IIr5E%7GfgRda!!M5sudoiq z%g|2oGIR^$8DI|MZE(3m=7VmEwt!_;#QWvm>or5D0nPB03L-?o4!zULt|_1Qs#bW zd(yd&JIpWe%e?D}S6Mp3(z`uzdx?sR9j&f||5YCvLEzo&-hW9oGzX62bXwDo#oLw21t*VI|Uzq6z0tS}ySz=e`8 z;c4L)i_aX^TWZp$l>5FONIGvUu*ey#YT{HM4WPAIol&esdGv}bUv0rz{exApb{rnRZ!|GM@F)^${>{#jj z^f7j4H8$*ozP|j|xPIhu*SyQw$w|bphG4&5wY7LlD*x|92lsbl!xHCy3|sbR62rnS z{u=gb+L^w5&)JLf{Ucqql^$RIKNk4%|1#T`e=og@)?puRLQgki4>snx$XRM{eL~!7 zKlRxkb8b{}x{c(PSMh(bHbm9Zf8ZLT>YR(U{{H>so~ei-#?s#}XKyaouxaSiQ^5Zd z`i{-7x?0xCx>>7N^bl+Hip~ZP+ld2-zboU&+SlWM%Qzp&+~7dQ-b%dc8#j&3e-}HU z@1@|3=a%u@7Ta_7`+Y~+W(d9ohDY#W1qKV}kuYpz47oSfJ@&CBo+axOI?9?>^I2e% z&jO!(r(lers|9npdH?VFhxov{zue@5a`<2g_w6U9Gqq>Wis|^Ff_D|Gu;IO)y;g8F z(}r{5=bbjZuR*p~iEU%zT51+8T+6#MC2v=z;$z@ko;{l!3C~;jzIR#(-v@x_Z^X&Q z*~ehNl;>m|iAh*^&m9uN`!f6kfk8b_tc?2AcJd(ithr%qeyaJKMGxy)nL}a-7Vc$T zZxAaGo{@FP@A9mj7IKfkFZWsax9%5sWvv3!5Vv-2xjJ*3UZ-T3eg~rWCP^)##LlQ| z`S>E^T!qBWrlHrSnzd1>oU7=4TAzv|d4>AK0mRPI(<`WZuN9yBtN4SJ?y}lkWOf)a zti!~`(s?hB#Lo`m58g!ID~X@oiY!Y!?NZKG+v8e&()(28;iuKpZ_!QsY@PHYz^*FC z-}e%Wc!skZ#+mDkx|GE!Ym?ARebH6-xHIZ97H6z&p;y1e)NZ|)7;QiBg&vfc+7e)S zOk!&3eJlE;_p7)yt$54L&-(IjN;7(MS@Apj+|2h@i#sV8Q~MsZM~CSpedxxTEr)M> zCKyv&Om8D2rbgcYVrq-2Bb1ohH~YDWsku~4ZT?rZ{P%%FVpI}S8*9eYfg$B ztQqH0=d|HpiBa9aGYVGZU_LN%mZ#Q&_d@VFhqYVhv=72}-sFrrZ?SjnEnm?pW&B)X zYcj6H)?{3fixS2tUZKf8)J+6>}rSL(#ojQE--iTGN7z8}Ev1G#Sy_h%Ac8?54M6?aj`#r`&& zGeK4zmw%OW9aj$DOK)4*V@Gh+=m%}@18uM4qOP*Qu)?oSmAU1DmPi@>_LWeCJfh^=M8Wb zCsmD)u4;U~k8eZ$UBg1IoO6`AiH148zyo=l3&UQhrslJOJ-eE-kB2sL&pghU$o)5S z<^F~h@a$O{u|;wM8+?IVmJ%;tMy*yEwOah2gKoI>TA%*fLbFz@XF=;k-pOFrY6X%x z6B46VE71IXYPXYoff%(~fkxs^$DUNRS^=|GE6|+ROE2XtX9G4CwOWD34SjU8R!hYg z%Q+o^cn{`^!f7eHUT%n!R8!a*F40LL>+R>3(Ek#Fa1E||$EkdWexjN95ng~T# zYGMKn_=k$l#P?#+x%@1b-mn&W%ed=&fw}OMz$&!YOn8l2sTuk)cx=5@2X;90;S6db*J5}qg=-1dbgs;ezoF(X=l2`XIoAC4 zdFze(EQPOa#KNd!P;-}hcouw=CUJ%DO9cn;eW{6qQh|-RO9eJ~U%|GFtH366o9njt zUcm-^1hzt8p=Lv;J}W3=5=W*!E6~K8)MqJqQ?y}jMH}W0(gqro3w+F9z8l$_!&TsN zlEFw==5ep!>1M8irxhlRfXxf-2br)59)vDJhgi5!I=~K(;0rkHJfn0$Bpl3Z$H{SG zIn-PzdgdO=g?31d=v0U{HTn5 zKCFJrlsO+d!O{w7zmBlW_rjnE5Tn<#PuuT2y=0f!Suh3f_3 zx!)EhNihHf=#-*kaRd|in@G~UgXaT>v4axzzFP7RmRRY7@m zkPqQ0g(u($;>kz<$*oD}a*jgkyJG4JCp{tb2+kQpM<_Z)&M$NmJl$;INoouP2BjNM z!g?L&T)|w!S+iF=?^WU?k@iPB`Jdx#cc`%)_)cVZ6zg!(G1PT+s9%D7-$~u#2u~y* zvUVrkVwZ&~YPuRWa3ArpnJHT@_Hv%S8CpD&y!GOyGcVEIa~sEaTw5<*p1Sqo#JNpl z_Mhzw^q9MU%mnY&i%U||)JTcCl-&KIZn+vWqYAmR+1Mw`NSz+$YA&2j7Ah zrNbCg_9E)K)Lukgm!6geFNj_cS%#-AJr?9I;b{lFMC!3H9(TY&d4Z;1ttXxPTlOG{ zlQj(Xo-1`a8f$f|;ldU(YPkCGZVPI-cxOu^_EWHii<*%}Y&6ODQwyV0!=+O*q*HUG zQ^Tb{A?w*_*fC)>T-c+Ho5E_i^k5BFT0!evY*Mp^EAZOlMWGrlC|s2+=z~|a3eKPGnKE-7~DUk)TQu3orr=<)v1`ZaOz&- zSi)a|f2r@0`vvB-d`*{Us4dYmBY04LqfSdtHoq5`7IciN7zY$ZSX9qP|^z2YIS;&W~$-1kcm6#IzOHCHP_0CW= zS-iW5nk@RHw5!RgcdW_Msmaob5d?@I1$J9CSsB!1WvH4gotiA&J`Xin!8t^xCNGJo z$=XRx)+3#&$ zlx)g9)>^4I61^4ZL`{}D52|Xi;Mw`WNKID2g7<9rXA0|{!1`w_F>A86MXAYxpReb6 zYO(?n3sN;%$io5Pxs>l^O=eA&y2o0V#Qdy#h#Li{$qESHTHjePfdh#fDR`~*Ch?s$ zr}dqMgJ8`S`n?`rcFk)TvlHIc=2cU6DgDN;BgS(rv7T#)`FxGo&(*|){)?Kcq_|>2 zQ|{Mh+#oR_Tg}z^#5uF6xte#MHskniWNFb5$C|5|_l9CblZg?nFKrt0NNFQ~HDlJ7 z>SG=!7OR2sl;IY+0E79;e2Y+M5Gu)Lcn_z=k}|9rK;Uh=Mg&XZr%Hsk0jOgCA|VK=aks zXekvsxf?m_N3N=3FK7K!%~dvg6MK}L@gt9`lS9>|jzQV}&|Cb`>O`*J=SrMfYz1BYwB|1%_|RJr#Ujn%%12qrv__ ztmv(0`P)~fz15p*8FB74-|*>q#En$FReEu2DY2qQfc^fZ#jO%IdX)9}#~SrkQggNc zO3u-fr`qi1okPTpHWQa=UWm*{t<`yxbgS-4)m*uYTN|m7QZ-kMXVzP>24GA8?qytw zRW@!oO;`0+Wt@S>?(j0U#Era+-B2N)`F;cT2{E1CjC1A;pH9t{PR*50&6QrO`2vAC zTA-FO1M{@N`!24;kls(^dY$yG;(9Yz`AmH1R{31aRX&$!fxwf#z*u5PO&hhqJYq@>dl&o9JA(XrcrZ+pF@2R{-V&Cnkz+X^4H2%w$oSW z@AZHK=ugc~K+$`SN$+{i^cJ6|8XX|~P>pY>=zlZM3;l25>Ogs8$v3RH5+-ff7wDF4K4b~ltHxw_R8_HRi z@IfoF4|4r#KB2i;W2I9Ap?KClZzL}`^6b?n4z6>?K_tI8@^B;%tuT4mggcU_%o;15 z8Y{&w`lG-O?Sx+*6JCLKiie?{;$i3(#xuYi#?$bS;%RWHcsi1Y9N<$t4t&DnkAu@# z^;HrRp}s}&`6EVsrqUG_tz+p4gU7Tmy4&}JNFC9L?dXVOaH`}Km@PRiKLh^-9U;Dy z$msC_6ZO^==d0{fjqt#$$U!6c zepP-)PF|JY!Cx|TX)NIi&o_eKx#-CAUop-CI?XTh`i%N2g|}^{j+MD}b8eAUi6iOA ztHhCX_E9xAGOFfAMjN56f^C@ro8-6c@*j`vD;^fP-*}}SkKD_81pefq@Mna)iw|Yl z1>IeX%ptf`^F+}1Yn=a^!d$|4qUS7L3zP3TBD>+d)mfcl)>H-ciOds^E@{Wt@Q~tb zc*seYM(UBMeC{BtS%Q4;qpb3Z)hQP;y`teDYnJP(iQ!FLIBH>sG>T%)c^>8bs~xAfB3kIXy) z9FGal!n;b2;9Vt0A~$y41t(#0CA=)WFTBhcLE1a4*{+|?r>^P}sjJFBM@mnn0n}CX z@&y`lwDWj(qghw=qb)t9u8QC6byX{+u4*myR8m(}Wvi#BN^>$K{s z=rc2kxY7&QsaKU1x2E#{PTr~cD~-CUb+)=HWWubg(tDr1Sf>|6fVwK3x+eN)RPSz|ug7Q>9b0q*H&SaJQQ0W6SU)8zL#hE@V%@T92$l=B|7cAP;9G6dw9&yp#Iu4=JCEM z6-PD{=nb4R+}OLHs+AI7IkHyj=e>=Xma3DY552^+zJ+Y6Iw|@}S+S-Ih-p3DTh&Ps zZ=z0$m{zt~C*^ghle&XCsTH<5se5d7Qs26mxSrq${Wpp~t4`|Yz57-?4Xocv`$j8u zQaW`~0sH*aNd@QjM%GDj7O_)xQa8AWa|P?9^f!USigT$NDB_y7Iw>>GWzZCG-{yZsQ*o|uGKX0w6I=J49|o7O0cbi_jO=T-N;-o+yGc-n}QW zFnIrwQet87`ufs1V_`+Ew_1x`#LC>n%skXZCFmzSuF8Y-$30Acq9fG!wjhItIIr&3 ziuC0p@UBEVQHL!ncd$gN#`9D!B`(qFOUa4=ZD$n9PIx(hg@JLxEIgl^$9))zD zK8*YJ(sSvwrlVC(-+!m^XqCje>p3I0lbXI=)D73i>f17VbC;YY>>Pct>E)h**#D+hy+_ue#n{q|XMYCE(PnmHosCr(1Tn#a`QX?X5$Xx}cp~ zKSBMJ&`#)Ot;xkcu#`1fw6o^OfgX0+T?Or&=<+k@;`F_j@1@ptFMP7%La|A--q|^x zKl>PWC+l~!eh+^ivi=`GZ>^ya`aqZMT1>haYw*B>Zg|iI4{9|_HSL$xU-IcW)Vh0^ z7kvG(9$wqU+V+6M@*jEEHiE}I-o+G+KZmf!dU$ImytND7+6|l*fBhC7iuPOOTOIIM z2Q-k}m=g}0qsYSlz;AY%*?A+9ZeNB!9O*y}ZX_LsMWe$q*IP&DaL%{bjqG@!5qe-@ zqJ9GTISfA?fyOP+Lh?$&TSAM#P@kUR8l-432wL0&Ee!b_2yO>}+y3CTpOS;sf3{?| z{cm?6Bi35We>7o{O80`@{;{qWYZ*fQT$-S)}ej&)n@ z$YYMOGMMjU>9#H7PDIsh%Zg6Kmj5U^JYL;4V)QXb-S#Bk3(V1U+Y=K6W@p`2Gx0=R zy6vt8i;hwBb_a45uWnlc?VRLqS`>OcIpUb3OwDxAZI}MqTEl0c+j!qZG~G6lHAd54 zIf}oW^`5`51ODoO2GMley`!9EVGw*}$-CGRb~%sKZ9U)*C*3yFLAN#6Mxnzg)@@Cf zShCw*w>2Om)>?zQ4I0|l9I4x0-Dj<%Yr5?ay4t>Pirpsr<8E+m=N(ss?Y6xsI{)na zhpf?=e}4J*4)`aEesbF5#b!EfSM<3??mkhq3!BMWj~$PZ>&gGv!ebQO(h0cY#o_r;RHIDGG;4saNUZm{@YWZkkog;tTWo(Jxnbo8yOob|{H zcRK56pSHA}-SgoVYaLzJ(U02fPcMCToorC=q69}9G?F(P8l%fvj~KG{{@*&_lMd)n z5XBDt$6d}iUUzqNUF}4}sql-FzILL?l4vyPq`nrqG~gGUgI_Snq)SKkXrNx?L;TG4 zI$QLQ#k0c~Cpg-pvi3;5ofedp7<#({`p8~t&E1i_Tn!I7$;&S)ooU&!I(~V{j7FPI z=**KTFGFI;%Qw)S(e|dV>^(K|((fK;9RHE;?evS>pCWZ$S#$jI^6Ya_bl7JgFPAq) zmzNvaYpgYQMqUb7yM{e|Vqpw<=>)&UyGI{85MB2!{OHrDyfnS}(=Pd8(d?Wj4n&{p zH~;)JZaZh{5etu>ot;w?1BcUFqOU9W$jQaw;-k^$df-_6I6QW^100@2y?q_HbJE+B z&pYn;{i<`1JMO779qgPZer2uWGqQ6c{Lx4|CnpAFrzgcfcnd&?B0i^W^!? zIR4fJp%_%8PwHgn+zh`s**Q)$nGlU8pMl=K=M%xPvz;UQr=8x8@J*fUoG%AuC5GPa zfIgkEbMF4cSzeZ1FCUI^#_@)WJ0~w2 z4#h7o3!~BGGmsa5Yjk-@V6W+lyv%0p9ojj|Ja0ATCCxv6vzHilGO_FwV%n+1wtEtX zPa_WRQF)zX#CBSV!yhFEU%EnD?M;+g9_m}p%_ok#(xQX?CJ=;gkv#)lY zOZ)bnkKdVj=OS0ey6vOAP81uIi;#-ZfaOe96W8B%fZjX7h={d*t`|+g7cS zo}_iu&!rQ?@q0#$Sh*;9@MKp;#iVWd6*mG)zANPp`V>_(P_ORo{hwK_&rzH7u}fzy z>V5RB&-wJfrA8q}P4Gd#q}=-M{9eJT34ZuzUH<>*5{nXETm# zP=)y6AMyWi-UliD5q6chD(iWtm8H7JLi01~>B(Mg_RcfxW@5udX>;dGNJzoDC^a7D*Qy(wPPwB(E zUC1fRUU2;50^Rzq7`)%~^c|U_Ts@~WQ@5~r+bGtR=5MAq_MM)zI{E!Zeg`L0^1;nI zSF%5U+nsAhu>Quujc?T$6xCDa97BB#*bQo_Tg=6zSIdLT{OW`k~`hf}i)?11n@*GS{?&Z8Ijh2G;clp0SDj z{WZ{kEIo^k(A#O6`d>Sc|MRjYAANkBjKF7Me{sQvhCl*darl%s?Q;_k= z$oeGCrcBfu^Crx1Tjo(Y(F1FJ_qKTw&O~(&KDH;k;G_>S&VLpcF+fH+eY~} zXBqmSIZdlXo17XJh@-hf4t=u`f8j>EuL#Y#Zgak-AnFde=EFjh+4k}@{DQJD)o0I zqb~;d125mdrm>e6_$&INROyT4iiV3q`l6Blha*P~W6>AeMPCeou96ELSi@Pws``Q2 zf4t)!RQCbz^*RhZBjJ(0;PYPeYq{u+Khql;TDMYLIArmFwJSXXE3S9aA;2fHAkU=g zh53__QIV~&$p5^jS7uKB%}C%)@lQs-oy! z5Smx&Ap{Q!)=J3}Gbef@Y)){s*VG|N@PgdmaE_s~{pigG)|bTk^6Wv@Cu^D3+&1G0KFd9m zxMyx+a!@CK6`kw_x05_WDq7eNgvLjJ+t@4m*0jO{yWttZm%uCIzCrJ(iHirWmA&XV z>lVHdIV@jlz;%qZAB1)S-yxn87;D^vgSz%y6PJymL*UU}L41lnWG-`G65N#17k@7_ zoa4zf^da~Q;nb)@`H*MQg^pqPDXU$`*e|&U`$xgs&nX=@#ME)JHyE-YI?mTKLdW$o zbX>LPnpS5WcPV;mI{IoFdTT2BYYIAUGCFS3eDs>qaa-1)<6^wS`^2(@N_j`Oik8uy-W)aWdsu1s{X)R!!P?#bve=}n%DE)$twip)$B|1r~d>1Em|+uJN8IxX0^CsYqo% zxQYGX4)%lmer2_?&&obM3R*3eeUq_{c{F_pFwJ$P)HQnYYm}{nytd>O=+xs?mM`+I zRXx$sYpuxOk+(hS9PfVk;UV@msabA8CR?HJV$Tr&d}P02W1*gn&Eb8fd`&iU4tjd! znhZ}*?4zDF_icM%P0L7h8@xY4DB3 zGhZ`#W}pMlWWzI?rH=Z!6y0AU{NmM{i&-;g^RnR=@5TpW^9#CN_(f9v~ zq>qJ%hMPRp7alqX9y+FY=)pBD(CqNl$$I@Vi-$66_MnS5vJYxW!b5%QE`W!`KCFO; z%H91dR-)&d;Fn34Q!}5(Swo)R>nHvQj1QoXT1tB8Vs9&cVqD+SB|(0g{hhT2Kf(9U zKiyUpcsE%WyQ!Qx%6YaGc=qtyQS@Ugd?ov~z!u`EOvO(h^*d3uHONl}%)(DkD}K5X zez*d@xE%ht3_iIO9WU!!99 zU!zR_Yb5uHt(Cthb+Fi5`QJYM%IH4WZCS0)MEGFO`1H-Lp2}AdA8fPo!P2l%dio#4 zMrnrDAs;L)%m-^eTWl)6gMN`d7`6CSWi}tI8F~kOus;_B^#8zDp(kZDA5897V^B95F9yz(FUGY) zUu?h^LVs8e0F$dic(-$*!Lo{D3E!)ryPQKWA zHeamH^u>C%(+{!yvD>=rk4-yC{@8kSN6sKef9%bmKbBV5-XC+)C&;MfkEN_!TAOU@ zj&Ay6Z?^NtUMy(uj|JsD`?U-m2P8;MPb?XwG8&+eB! zo*bd?u)RdziC^Zd?>hF&uBZ}y7u&YB{IYNzm)Saei9u_var?#VJr9m?vb#IsuPNFn ze~o=M%wIbvS>LqX^_IV_hra(x>~C!FnEu-BOWXNtb@BRZ(oZELK7VaAeKR`r*Rb0U zzni4j=UKM4_}TqyHlX9^cOKhc+nr?iYxMMO`D;SRUt=t(hqe5*SIRf`mc2&&wc$_V zui+nt`D;9{{54>a7`ng`vaLh@TJ2J2f9*f8ud6-Rw&Sr!RD82zyZeCY$9>H8SNFqj zMZ#~ru+e*CtM|cXKMmU*KW<@P&LSmN?iq={gFn|5ev8x33*)t*pLbr0$!AG=^L5~A zP6vLT#b-7@udXwGp2=%n@$*&*?{(zoLF>c#?)4=W?-@Rm?5|tn_49Z>=;xu|T6PmR zu>3s6ooVy)#$Bx&e26bs@N@h;e8ixi$Fr@#6Xxfg2QM;4h!+h%@3h;U{k%0X`EUk2 zbS1oW1w3^*ymcA=-=&-f3;BO`KJ2UbP-5;W#N1Q4zbDV7@q9Yt5N}`D8(vHZ=HTR9 z9DaBNPY!Awf-ZlE`|kAM&mI_8yYlezc0OP);te0e({7jc%Qr6c>F-J&j=II)Cu@~& zEJ_RJ;s!Xy-d`T&AB|llzF>2@R{2q8crLDu-^CYfBW~^I9ib9?@6Z>-Uz51|k#p#` zd9zz@P0|7f;bF-)9XY2!zt+>UqK&v_YZC7Q0;bXUn1|h&#NWjyyl>4r#M^dAOx#5b ztBA48oZM0VA1-mN3HXB^H!=25?&NW?>H4GZ*t1?DCwCCLO#H!e@aDa|C@^VWMub0@ zVdUhPUu-5dE^(4Jo^2){cmA=(Z_BgEfpN7d;63&5xLWl-2zX3lC323X<>o@&qL2N) zF#qr(!#`}}jA!fF#JTYi_q#jr5%V-7U-x_ueG2iGWY_pwKR%-51do=C*OOhh*M4;N z1brU;rJBx@d{nCc5V7ZjW{hJqIX-$&1>?Uee|JVv;Jb6>-3fhbmhx@{|5zjhb{4x=zg)cBwmvWPE|e+oMiCZE#xl_fkT0-olki|Q6PD7|Fz4A zH|xZB_F~^i{YQJhGBcRtD~0aK2bKr@%A3gX$(jGd#OjNAU+(-vZ8P3m#5xSzKP9pu zy$`*o7X|bi?KBn}O4ecI`l`v@-HwiEfF2v&X09*Ko}(bQ3m)Wb;e#VTt@^XOUxm=$ zz;8c)+5yS;^{eX-Y~o|y0UoS;-)Qx}neR)yy`7KwzOuOnnl_ifW#o}1w)8}jc5edk zOZ-S;%bJ#H*;{Y6_b)F)cU+1NnT{@*hEAD^4w^z7eDeIZA|nod~C@@y(WlruD&`0a- zJ|^&(IlrfZKIY1zKnr@RHN%$k3;F=Ix&hz|`j~;`waKPFit1ycpNyPelFIo_V=q@Z zKlI87^qS=SRPGO*Oz!U~Be~rYz@)r$WD{>Gs|A!7x`9EY# z`I~3jWeb~>=OzF5B(f!QSTe5s&9UqOPWivwqWJvH@kIfnuFN3^2=3bD0MP+bx3Nib zfUHw;fKo?rlOjH$VQ8uHfaX0>eNNW3YQL2WjN)^G>!>~_ zyrJ@Q;7;f-umyciV%TObP{F2rPGD0$C$tn85AmF1E)cvn0-MSO2Kg<*=L8oj7YIE& z@;NUQn%edWp{+d^7|Z8;uSbN>i7eQB&MxV|XYWA=ezv(lMFZtqvNwnMmYvN7V&8Q+ z7uX5k@-xc?if`F27q~R&Tkg@H?``>(m75%LftGKH4v&=!{KM)`9dm)%pWj?y7|#TK z#LngdEq<~2h@Hv>cEd;fm*fJ`N9}Wg%Yyv$*yxuGe!>>sQWKF2VGs+)1@7=dbL%A85w?Pr4sCe~5EVQ2BwkeSZAF zRpSjm@H8t2B{o)^e&Dw!)uw>=)cKR5`GJZ)$+r7Wx*vG!q;Nm*)|H+2180p7`GKc7 z_<_Nkg2aa7@B_g~hkoF9#~XeiwubTp!GBzS;M|p+_XDpPZ}@>h8aw!br^V+7?!BNb zx*rH^G5x?rD?8-}wk4cgKkz%1UG@W?itqzh%l@qVKnK0kp&!^muCZHwpwc@|xkk$m zB-i-)^#fN2{lMRhZ|?^N`8SFm`0bUo$)-L!Nq*q!c7EUu_A$@c@%=c4mXKTyen$~}VnpdV=7(=9&` zTnGI?))|(2jL#2Luqi(f*y8d7!E4YD4B|G59|%1=@&lI(P0KYKj#kF)2aZJlh1I=> z`+-k&B^GwhYII=yxksDcQ@KaAj+2yoRAVXskUcxhKkRJo5xXzaKa4x~NKe1dUG9fF(UWKv+?F0=RHDh zlvtzWBzH5G)Wu33;J%oA*ty)};k)3&W$w59Bi-{G^E~7q6UY@Lk~8Q*?jVUAf|vYb zGG_qYm3vaPz!3A?&QN0c6NmZq3tZkhopU?hMagR)q83~7J~hQO7KwN0OJ4 zJkayJtGhHqt8D0{1&GuC1YcmaN#t6huim}&mEqoW`WO;LPvUK9aDO+>1?r>`_o<}ZF z@{%LT5vk{A(MyzZA8-$@XiOyrdZwlu;|)-~BaJg<{c0p0{3XWSNB&U8R?l8V{Cm)d ztTqohW8&1IoUxpbFnSL5V7=BklGmZ3bw;ZUSg>`boLD#s+)f0?6TtN);Cwv!nQ`Q2 zist)A7IGfTZT6@X+PI*N8`^ldFM<0Ld9DY~CqWzNLw-|Y0&>Pu&Qv+k=3Z)>E!q^= zXj8}gy@fVar)ibd&}VQTjT~9oPlzF`mVB8-8_CC7bh(#%Tfm#pWGggjdb#+8a>)e* zY0{T-AXb0NehP0EO;(yTX?i*Ph4Z0FHvBOKnyBah8=5e7KKO3DROm5K^~#jmk`O)m za?Y+FIY8q)k$NtS9!Bm=o{=+0!V@Ahc6!KrEi8J}1BXG6iQsbrc)bMtjt9^5qrSNa zdK5yB0&-?1Jx-Ra5UaH4;V3KrT=aHno|CNHhO8`zS5{6aeqAIrcAQsql$Dvt%1l#M zW{IqvXeTQV7Ia-!Ruv#CH+EH4PW($`h1j1(8%J3gimX7N!SrkimS!JsCoA_s6UOeetc1~{ld=LFr&?AD?fVKj;(Y2-hI3xX zzPC)U=`WfY8hcBGyd0zkx-)x=k2(k?D|%s6Sy{r~lXJeYw`-ELn`;;f|jKDS_BpKtm;lg<4}KKsr6NzRX}vy}MA{jjkIL!YDQyla(>Exu1i zP3u0Fei$F?xZKMa_fl8ZK#cSje4vH+Lr2g_LNAf4iDGYK_r8mcEMs44?~7zO>YoqX z1O1cHed>Aa_4Dz84l~Dt%w^cyaxPwKlLiIr`efhJhUaF|C*or@7(8JEVQfuGdH*9kcQS?yHLyv1b z8PGv`!Spx%T4Ns(oNZ%Vp~F^u7InY)u)q}ZU*);TeXGCezZyLd{$TDuCl*HccY?mo z$&d?lR)=ySbp;|9*wf*1A%0F5^AZw zYV9qb#=d7vv+WsEsKJz&jM_7z%0}n*jP=%@fz4{)Guqoh(f5rJ*1m!59jAP}m@_;_ z?Hl5YC>g=tc9IbT4vA63w}1RLsK-wJ{$bHe?H@KD)V_xl+4hiB_ISHb<18y@#gr9^ zZ7SY{KBlawJ;bt|LNUhZ`-sp-jRAc+ke3&PE|&kHo)Lf5CNsvo9q%VrZ1Ys?Cl(zf z#vCR$wml`P+(=GA_7rSkb5D8Gu(dk3rv&8&yVSm?bV+Wouj7=P6FI|Ys(nTL4J9|& z)c>|}W7}gK<)+Z^TfET9w#U3_*k0xy!#z%WOi*s1kCGew6xn0CBsb6}rrd}R1YNe8 zK8BJT@kxQr+G|dY+(>^AFaAj~_Hhb!aw_(6PwZxTGcKej+cKj+W9Z!*TFr25*I;eV zvO%p4oY}~>YI7X=GY-bTvHCM^=3LnOw*HI@Q;q&0@AHnOJ9&5OSo*i5DCfbXH_NUKu1e!vaz+ic<5dI6DU8J?Z{}>qaL>TH zZ2A;lv%OsPy*UC7-XY&4`Gy@bX1{_!Kl-QXocFmt)n4ak^k9^{ft>LX+gtkNl&0d3 zaYjU*m40f;(&uZWaaQCoxjFeRSl8j|Ipz2{zQ9BD`;uNbn|Y6EPtTCLVEv6Nb&B+3 zTjxsgUrC>@8#ya~q3WTS>2KmZOTl)drnN6q0x`fu;T>=nP1MzEhevZochT+ zVqFKRVPdS{c`nt@>vmsYrKg`-VU;W6YjulS~{N{+rSB&Z;@uxuHm(NRJ`G>0#!Z-~jI) z=pit__{Gr&27QFUzbr!y)Z>HeuRroxAu7$7uQSnu+cq`O9 zQ}Fe_*jug0A2L;u!~f#zi%k;B^BM0+JP#YD^XNfI{Oir&c~}RX*BhOu z&clj)agKDArT2*GNMG9CW*^Ey#78W>xA}D5#fi*GKiZCT-$?Go_OR)`KJ9u>HcW8q zqW@a2ME}V3Hgv5#W9hwP=qNd38maS|ISVBHW{)9X%{;T1zO$>)ea)T; z`c0m+sQOR($F6fF`*Wq|;jNr=+gGIP^o?yqzvhDj>38Qw&Ud2cjM$W+rw+1Ti2jpa zWGxw-o5RnM9-fDQLwdx?o*{6Hu9S0WlU;p-`b5d?5?|mLa(y2%qPu%m$Q%bHUNvum zzS2F2do$M_L3W5I;+u#}OP#aTV|Gx*VdhOlrq85LS%BZ_k!`up;=NrwC%J3m9A|&< znNjl^^1UDY7;u%<-iux`&W`r=??;}cw_+pk4~E|BT!ZNoJD{e8H8qQV$3JSNciK^4 zwAPgZjRk*F>vag&l+Fi-b4xPUO3%mubhTnis*c3e@eSxQIkzQ!IOVMC9J8@7j7V`kQdnElah5uW4N8(Z7I&@u; zF0hQS!K-@v?gUo)hUNk5N^(8F;kpw%S-M=#>9;Cgl>WZP8QlZm!4Ah*;1~s8e;wU) z40&l_9+91|DqUs7;dZeuOJ0P(n~q8^iCDU-<1@5p{8;J+6X+oky;pAN9UL0{BsX3m zHf|FAB&p**@Cf@}J@l~ryK9gs(Z3DEGpl(ItkoNEs@WUx32d-N)0VUPNv=yw@vHtH z8+z#e5{=$~^yfg<_f4T+8U1l99=~(Vl*EdPdGwK-M<2->ZG9xAcj`Lmgj-yrtRuVYsoJ4me%=U$}`gV;tpmuZ#0 zoKQcIaa)Z3jeEhp@c#AG)*ZVgQEw@6>l=A*tGa(_=>Fv|-KT1&to#4?)3&M>#y`$8 z^^9%Qb%@=OQS&JJPIO%Zy!C;5u%Gu6&yX>tm!$NvY(Va%|0I1VYyPD8z`(O*FA5Lj zQ%hy+OV0gite&AKlg3}`B8}6e&qg#Lca_ql$7F0}hx^E>jn{-ZGDbBQj%PVLC&vg<(cyLzGno%ElR?=`Knz8Ae$ z9epo&xXZp5yx{12(VI5R_hOEAzL)Ab5ySUFz8!oo#fMJ5S28*?=zAe^-SWLym!t0m z4wdf(>>c`E#KzbzD|pNw3v^2sD89Lr{x^Bee>$|ob1z7wBtrrjrd z*yzz{`B*2`;*;%5&|7x944Yl zwVpx5`pB1%&*NQKEiQUH$k`hY}tLjcU~SKUI`Q6nLdr;@i>8U08H>oX@{G0R& z6!=o;2Pb{Xf;bVs5SSzn=NBB(8&Yy{sl=I84$j1m59^{{o-zRi`?Y4}y@(!vL)^UO4 z<5){E{C8-A#O^a|(ka&yo?aa4zvbZ9 zk4>&}b_aQfDL9ti4#&X7L2!HrJZtf#;z{s3_qP6PA0sCIDzWkW8wRW$&pLR|U7c0q z5QGnWSUFG09a(+z)*%~H!Ryo5W4}h$rGIInqfa?Nz1w6W?{qff;!io}@GeJZUWV?x z6dgJpT{?{%-c-FYZwluYjoy;QW%23TLuL-o>SaU@@2Ox+&G0FeOh@Z!D|Ojc4o`Hh z^t4sA3$~uNR)0ymUpWoGG9I7O@90zZZKrdO(OR^ zTDxv-JI|{hL$?O7Nz;I_8QyWyuhIjy3LSgs9kMSB=i_Hu9bYk5%PP&KSxW7=<6HGMXKE1=&RE8K zqPwfZ&RUEP`|NXjY9F`yLk9iG&g6Z}K7U>HBV*_GZ2dOUjQ)9+A35&VZU?q z`^4Umy2}pyzP}G^3-|lzkLKw2(f>{Qk_BzG4*b5qQM)I8bWFeR&-5U({66fepx>u_ zlYXXe(t+OxyjG6zl=ywnRr!6;Aa1{p9#LoFTgCGG7K8U4;K=f~Q=Izif#XC!_g&`bOyehws zK8&GtMDhFRHx`fIcMY&Be+zgW{XXF6l;5|L-ny3G$Nr%FKIk5|-)F+nS-y%dv-TzAQdtv^<)%Tp*yyBHT}H?KJ2GvbSQ zd`Kt0=yYB&f-izTIUx3f>3VnCF^*bPn$?EA|iZlqtD%5;*U2xf3#|%hxq7K zTBDzmOO|z3{fXjNXLg z2W8#Id2id%IUepw;NC=@VciSqO~`#teF>wjeYkDyn}~r*jzrdeAw44{kNH<{AZJ2l z{qjDWGLP@Yb7cV>^pHs;h>>8_Ero=el2@N zdN}Un?AHKJ-_pkvH4^0&p*F(XcxI)=U;{}wvNwsuDy!`d&*6CJZabc{N8ZqqR*k_XKc9YZfl zXC3o(@S${!^j0jG_3xu&h%Lm_G2mqieP2Y!`0YA|TB8fof;#3+qi^4BF?GzN;7IA1 zAkT#4<#VlLY&de%F>&BX_zE4P@T7DM{c41#UZr16G#&Gf4PPOi3gPUO=$JV7EvwI# z)t^J{=MjBUEL}6qwx^G@_H>)B8LT~Zh3rqUY?_~4d(5zD=#3)!W|rs7EbIKS)n_bP zuV1@e;|kg}aqQtmo$tFG4?Zrk;UksvSYqQmNDr@HgOlr&4pRG(;uXP3&WgpO-CJ|A z^7!-R~-PS34pcDMr8JtPKW6RbQe=uI0 znRb+^;|5l2?QwGX_SF$%M+-fi?6Gms!kKSFbck19jygqr+o8_1*(i=WlfIsoeKuJ8 zaB^LdVYgZPtEn^RySL6nXU;@t zmY_3dp);>XXO^NfOVF7m=*$vy<}7sPOmyZfbY>|!b2d72COUH#I&&5}b0#`-mgvm+ z(RAiQ@T_KIlvrIWXY#$FGYy`Mr8D0KS7AES z;mwe&{#)ow8_uHY%nooCZWk(@c~bcnojE_6&O98IZ$orAWjd3+tD|>WNNoKG zy+~BQ1kSK{KY8v7sTChU9K_B;1N)WLwz&uTr?FSq!5CXX#&X zfoBN43kKJ;@h<17)N#vu1*~W7&(-r>EL$ts&$at_f6B6VJ`Z?*SVrq13CB;a`48vh76>fsJ>&et5?Nj`hQGL{=*usmBq>0fn}Vh>dF060*ik# zcTumkcVa`Y;|##Iwze6?T$eD{T%Ng*-{gI37rJ`YUF+#pA^n8vxMvq~d7Y=1e;EBQ z8$G@18lQ6!!|YYj&y`uXW$xg$4W7|8$vo@jw+D%F4@cG{zw~h`wb97bOYUC&>R;Tw zCfPNxB86Tp{%!ZIsmsy=o7~>I{kyfw*MIKTOOXSwi!&;@DQn9aV=i?KD~XNn_)?@i zO7F5Es(yWK#qp~5MIN_1$z$lg0Y5kHvt())HO58{G-I5*z-_}zKE0Xx?S-s?wbwoC z?p0y*6!8Tv;HM zt>NU5Z?P8k^u}=?T@`lQUcBk7hpWXVu2Xk=`8|BC7Z^o&a%zNn}Azh#5QIk5pX#~5=D zF&R0>A@`PglKs*L==^PK*LXesEAHdj{k+$H|J7RHNQqZ(;cRL{o~u&M!ZvugQp3N3 zdh&fGoKr1wRr=@=Cv^1i_n6P>1(Zo`c;A{oRFEI02S_X9)N`*vZ{CZ>Z=2K^!+~{2 zZuaV}Q=~^@KhBa5scB>{9Ya1OK>wiO$Cz*<;h6$m^&w)P7hTZ+uCiVI{jVP}^hFP~pAS*J zpQJCB=yT4kRh?LP1^jS1d~q54aVdN4Y|?_sm}&!dl>Ye!~;2jNZC3RWD_JFZW;v1xi`JoF$Y#@JD&iV#99q;gw>ya7ACv5xuw&y+~f!t`~Ux~f8|VzkB$3v?w7u*4bK_Ihi4G%Fk&n`W!KNVBEgq}fZ~f@U5Ma@K?WBZ+;)%YKr~ zzQX&w7N%00=B_m5D?}@5_>rd&tumB+g=mGmnKqC5PQ_)AFQ>T71Hk`p5tkv)G8`F) zPveWrcwFt{GK`_(GQ`AV#bvex;`7G~hn!IHo1h-RZ?eZ_3WPV}jLS4&=R}wLDe%K& z_+k?LF%dqQfSqxP4&Nv{J54DTw{OV|YJapsfv3V#!kBiTOhkkl9dZ zjJZ35cX;3*9hGrC4Uc>JhLOdw5#^@bPJm_O|{{$l*B=2ZB zhq{lz9}@K8X~cri_tMv1=8K%`m7Irr8?hi`uJ6iRJlD#2PWSJ5GW`Cz+<%OBKBzd* zIfX_X2!BV#fxwf*fqJ$W{*L&o^~9Sje-}SUol}U^Te-yRPb{1P?_3EFT>&p$4o_W% z&bm~G$CS=;eZg;jiDRA&ew&>e&Tmit_|);+xU_Kod0*y=!Eei755NBn?(ZtUo!Je3 zyJ3OY#-iW)z>BBBlNs=4UwD*Q(!&14vxz5lQnz&wZwlI8Veuw(+R2JH<%5rt8gDw! z7H=Z{{C(`ChF-**E^>%B1?@57P3Y3Fc+<#;coXpz6>mZZhQ*uCOVyi8(2H*;hvH4> z^I*J*Igb9JhoN`TjY{tlGgt8@;#4~~gzMc`F0$mni0w4rj=zq+KE(L-+-r|FEs}Us zNwVHhLcHn96#XFgsd&>!_5$W@$xAZgM-g+3JN#2q-x_laf}f9m&_h48JK6F6$M#tK zZ`_~8{nBHm;UeNq_<06=eI@*T1-kZf z;_!y9{n`Bgd%XY5p)YvfZgV7D7tZ@)rR=$cuHUctD`X5Vs&a~}~x|+W)#kcKB z9goCrq>e{oH|^?pZ1a7%DQvzT%b3rxjwkfY@=-qzdnN#k-Kpc@JvaZdI-dV){F88Y zIR8vL)%^3PaQ^w8%-0eBObO?ox!vTS`U7$B&q=Irq_L+x)v2*ajNIB;wx4?53z!iUe6=(l@9B9Lb_-66pO!&SjK%zqSr$# zi&f7f@s+T8p3oc#|7-EIF~>HUqg_3Z?f!3C{A}F+Ecc(XdWe(D$8Sp9UiC||*Q1?8 zK7It*{DO~(6RNtj5YLQT5YETD%T7KYzp^o$k9&2%$FD36=VQ;QPv0Zt8Trg!5>@cnSY^x30%u_Gw(_p0@qYxG$K` zkErW88+qzVyd;j{1q`7-Nzuz`Ym$Jxl2Q=BG`b^lw$Y3Tbn92rNqbiJ;J zF;ra-eI{eo^#tIlc+OiLe$}`ys0SkIdR`aa=tNylbh(dR*K_{An1}wsI=|qdvOn5u zy0qo-@zChH*gUl2Gvc9*-<14E$8|k+JtTEKLA_(Xb1?Wh&bl7E4WME}_PQQ)R=4VU zL|<5SJ^X)a>v}?V1+xhM2w38Ko`X%@{seXGzT~9FnL>#6eBT9Ww&@MCUd(hv(_|wU%?_rLP>w7}- zu;+j!4@Ru#@Q?7@qty2-j8)&myy5jdp*fb%w&cK=BNx7IU*llAzvr8#-Zk#;$Nk-@ zaX{z3Y4;K1sd4!E?EiZ*F!l=>2hy1*A0=hBqy`uKe(d?@v((m$enp;b>6&U?a_ zcdhOo3=MI@Fee5KYoo|Uym%;w-i~7OZToMC@SEBGpyfFMNP8iOJ0Yh~ZS&SEk z(~%)ZJVfwALJSzb8AZ3m3xnvYcwpH5!{|JDWfa+u7ls66))9sXet0aj5Y=fkzmavqN_wIO1xz(ZXhYt{5{xXxeS^|AgLG1mA;x<1y2mxaUizmIi& ztZ!T%KGs{0cYUmJSB8((TkrZNVYiVfK{xV7NO544b3q-gxogLq44L)JQxmhylYtqR3Rd zFjU0}!?YMM{C!>bXt4-+j0X>+V!$vliVVk#hkbFvkQxJqX;Jo;cwu-6T@nu-n!XoZ zE^dpWGvbAzKRPHL7+#G5!=mc$@xxc+l+EfGFzoN9Y*xn!!-^O%?2KYh#Y>Ct$0?iN zi~&RcD0W!9FuWTl4C7 zcYUlEBgT6F$*zy}bRZnAE1v24Souf8$GYK{T_3C0@$j)8d#>waeJ5h9r?+%{to0E* zan=)EA8Y85@HKvAbJxdei{OdxJk|BF=C_8!RrPe&$GRqhCwe^F^|AgMF;?=gx<1w~ zB6wnIZP&-jIT4Ppn}6**R=j%rHwT^fl9>_y&$qr4U5}5^y9dMB=uxNrEE0xGW57@l zMfb*whg9Sv9vIGw0mJ`9k*RoLxIRu8(qh2y?I^lFUKl1KkMZDP|1Ht^;cro7I9?cj z5ho0FF<{vFi|+B{9msz?c&Ld1!)Z};M!a~~87B-YW5CcS%3c;P3~h1hhte1@lyy@! zZ;4YsOo#!)cca)-@#3K{PTBlY3>a#?in zzIb6c6ekSZW5CcaioF;w3@^qhn~%nTVRAQR^Yml!>8K?!U?`5TPrcgK3-b9(S)yH@ zy~OqHiQCFEy{BuZjo|ZGK2O@VGBewiKd*kkx=g8Y(-M56HQv{o%bzwpH_QBPzQ+3~ zbNSPT&r;uIYuWx)ygRFy>omD0Xa^qgXuP_Uck=MvR^FqL^MenjYOU+1Ypq{i@HoF~ z)ZOLsT`u3{@|~8Go25OGn(0@9FA9v=UhTjXK2HMf5n6IZfy?)zz?YN5-_)F}$-Gy?OZ}MO!#i=;(Kc43=;rR(XZ#)zK^Q`5qM6d6kX}q6NctX}@=QCN0tRWXVo=v@9 zG#sVD@0QHWhj*imb+W!!{CwQt=h5&_v~exi>#0)|KCJg-tE(3pXR|I|-evG3SK-A9 zuJW73=N5mDB!_SDxV#HICugYQ^U{LyHS>6Hj_`Z7mg?ULt>ODhsl}D~hVz{DzcuGP zbIy_GoN~41jGWJsh0<-|^9Vgo`aH->o)KC;D6HxWv>Y!h;sCHJh|o zJj;E;2Oa${W6gq2q6=4{6YbB~=drJ^S!l-kHp^9Dv8~T%tuJC-vmDm7*<6?GNix?X z4r`iku1W3@*qpBNtniwwA&skbpM6ZB%PQpBi%#}&UB@%I(B}hr7WiH0w6WYH@Xvxi z8>)R%^Lm$EI*z{$)$69t)RqL+^Owv0E?JA{3FLehba|A0V5nEiesrAng8VLPMYcu= z&o75QqU&7jp)$VgU$e?yNeezx_T#k9ewI08|8kl8SLE}NFv!~HqpO8h1H_7Wqoh0OZZOKX7Qe^YlP;j&DGo$H)>jKiIz~& z+0TlOwtYGBdHLOrTiFABDXq_`bwNwDF0RQx^4-H(^PkAMfGMmY1syiBOsl+!KY7N( zpPN4yf3y27-u&a!7jNGB`mpJ{Uq55|?0zpl@#E89e&S_5@8t930*&{D@y;&zsxLY} z8yGf$%ZaRgC4VyBETKPtx!$45zRK0S6(!gKV$bBl3;8@JI!)%2d1ZciZswnIl>Fy# z%|CqGPqKM8ViWw=sPo*%BIK$nz?g7Ll=-27Rv=7S@_sj(l-d06oZ zHh|#N1?~i=+rX(sXAL?J#ojC7?}i`Dnm&%dk{`@UFXV6e-)>k?__vY;g@2ljPP<;| zG~z>Q-xnRDz4xu#gs+4bhr(AS^0#$pR-ydy{&Q{k^-2#2&IF!zJO&&?(E%mMPvmna z>wCq6w!JjkxN@HbXFcx={{nC5q)y^3cS!C6t@l}3J=izTPh+|6}i6z@w_teD76N z38~7pkZ_MA0n`+i7QRNo1}DYEZth^qw6^UTKy*3~qxfi_QMn`tb_aK8iP~oz<97hX zHWf*a^BsG}8nF%NFldC9#_91$e@(b32_ZlfC50g8_uo~!C}PrH+VQBJdY<*Xd+plm z{$KC=u6Mm_Z!jX6yn#CIRUbng-L-*y=g}9B6V}rYHxle}WBo83xjT=#Z6K_tADq6J z&;LW1M<3Hw!ObFcx~B;>=yb(|Ct0(rU%sgOs-NVBbXl!kJIhzk zNH>vAzXhDEPdCp>&(il=bMs8=0rIo^3hFz)?7@O%NmYx2rqXI$V(V>3JieOXio)8d zt~9G6Kf3-XL~okM9KRn-_>8Z<$m_MxrNUnHFzHGgsKYerbCkJ){xrVx%bdt~s7x2s zDKjrAX{J-I%AByKv{qPMAk3~gSN?eNw#z5q#PSXHB;&=d<}xoh_0X6}uQbW#+1wn{ z%KFz{H>>T^cU5nTHcTg^n$-G$*JtI+E(InzD@#6UR=|~RO^w;5dXd+9cQmg?%6Iy) zv%Ch9m*#;7x`4V(K<2vJ<*PNBVa+dHE-^p5e?B~G%l9p(Si4il06KG_%hS9X3>R^o z>vA=h^BwzC#%(g=Hf5Q+YA9ppqA#a2ekEN09D2-?T63e_pZWA>{;iMGpT)GR)|F`; zV@!paxs2%wzHRjXV_R%rk9^9QD-5=EUXNRfXaUyn|HyLEqDI=5aH2Jhg2F zb7Moay+x#C>|@7_CoB4H5a zK5_TD>3U8$Pmis02d>zgF>ysOV^F#~cjAh58P5OWN7$Y0F}xTYYu(_q*J)!q>u;5# zb?W)~+?(-P<97Yw3GI;0%JOnF`dMiUd5PxmofbwW6C%G zZkxiG+8D6EpY?h1FasRe@*;)@^@;GXS9}ya?89EQpKyS1kPz*sLs!?~ZdW7y2A_5Y5M(f@b+Khghpe0yfn&QUX+ev*wV|BXR} z!7~%bye?tP$Ad9Fzk=}|7w+GgkVX|7e?eCFx-b&00`Q+08H@y|Nujy`wxCC{#= zLEe(n%<)q{PUU+)X?*u5o$vo-@Exm6zGLO7e4lyy0q4(8^KOIvJv^ZUd^^sYvwr3Y z#`!tkY1vqM@AtiY&)U1=&IcR$-qJg~E`klzW2rf6n!o=EFVAobm~#6+RyEa{jR5|0{pAm+4o(mwRt;{iBo1 zch9Boo?qFw4c;~9yt>X0M&73nrj+)q(svTC@%hfH^M~(E3^mwNm)|X`+r42`yWmeVtGOeGVD&2jIerxrl)iltr`v_@_(Y@%DQ+=j# z@uXT9~LD;Q2i_eo)_JcNVn zF}z3K@1S3u@^r8Hz?EYCv0E_Q)16{H|ASy4$CG5;x5PkuSXI}GcC+rg%>1`cZ8; zS=#N$OncPHjE_kJ^L|aggte ztz(|&!RyW&d*<{$#&RHIdI0{QF_nIHX30$W$_)6+bok6P_{~(lD>fz6G-z@Od?@;j zdruPl$Ok`4<^MFEOXv9v(!qo z_)>u@#+RPz7J3Ivi7(~BL)yTIH?5z|ld?PFNjBzt$9PhoI3H;vj}y$rQm}ave$+tQ z_aRrSQ~z{c`#n9*<45mb(=AlWyqwnC%*mnOx4#qPH}Jdd?=IWj2aGl`js@@<@ze-- zFG%ZU4d8wOLEn08Wd40v><;(F5q*Z!^!jVJG?bKH~%b z%jLFyGBX(H?RHy-`ndxCN&NkphPcN%^aB%E#lGqku0OL<$gI+)oKGW^>f!oIZ zM$#mc-b0#acy254Y?m3GPJ2A5>6X&^WAjmSHV)R8=pEO;Bd*gO$=sQELQ z@#;yx?l^vSPI+mss?`bajSIlX?|0`|KY}-kk55JBTrW9Bxyf7?F!v(JhX|N&{IU3X z&vyL0AN-_Oyu7>)czF|bkIORhh#-GD%ICx42_fa>)L(V~VFIrRcEBra`9l65=NGe> z_cNLQGvEW$;Rn;;7gOOEQ%cTylA`>AZ|F7+a+RF=v72W+Jex$CWOzk4bQf3Ud+09W z6(`^o;uF$a+TaoI!y_K$Tfx_pk1#TidWGRX|E1O%G_E<7YsOy%vT?|z_f6EfPRr~pHH?lttzdx|@p!NPlgn zjfd%zsjQJ$BerI&H*=2AUu#_5tX~m+OL&`boc=?PsR^8!^iUzst+^T9A6ux>PsVnh z>toMq0=9~7TQ(J4v}KroZTYP1wX0_>8op&%pSAbT%2{j8a%XPYG?M$5aX+comQ7c1 z|4Q!Pa@m$)-{<}hxPR-FTZY}i_1|&bZNQdI*YnKZ@QiWa7g@q}sjdg!7g_7CnDtKt zmG?X7dxVGmYi9kLpz?nYJxNgckNAH+D@>Sods*Zy=!XQ=xo1CgctH zHsJ)_fktwr2r5i_tXqIJU%_ay`Fy_)Of zV6C~}{#p5ivSd?RvBFf|n}Lrt@dbo?Gotq%=h=-tgSD%;_atFsPg7gVv%kDBSi6n* zA;K>&jNaSIvvtb12l*1}$ag2tZdJa-7ZC8wwD0Zb*@McLdruM$lJ7B|eMk8cKSX$k ze05Lzsrw#1J4b7m!i?a%wPOtXpUd8D!FC#5yNr*nU9f>@-Jt%XPsh0kTEl3)qIHbc zELz8C&7!r8vu@E^Ac<#_367rctV5i2hqKOTpTBg?8J{q|j_%>CL!5Po!wTYk#z+xMHQ6xstn)us7v_N0e4X{LFy9g0 z9|xEBg5M_ykAq9$`eiVg1okc>ybLCVUt#j6#GgvQq>V)%eKS~CT>kD1_Ei(H*o>_# zv7XLa+F3_CV=j!2K%S05o{mADjzgX%rW3y8AO5HKhsIrP7p{beAije52I95E zw-R?VPFsoZBz}PSO5z8IA0uv$TQWZEtPcxarSG<+nK{B;HoW=~LJlEr55Y!Kf*my8 z&YuQPQ>QHYI*Y!}qOY^Q3I9n>9KjYUSv4bZ3_579o=s=(H98i-GmJ%6naiB1$k$i| z&oCBQ8;I8uuOp5u$f_g0llVUUPkbM7d@d>r#arOnPXC|R@5~>A9nOD&sVoWw(~HMr z*X>@GHHz?cxo`I?`^@Us;q+@~_11I4VtrbU-$Hd|FvCkfmai~en~Vzf5NZjp@!t{t ztKh%a`0oh+?d1M3!YaSp_R*O>?<{`d_;Y~q+Ew_*g7Gy|qV~WilRj<(oY6Ph|I%^Z zCG-IQ4*>c<*-eq2+>)^P-}=86U>e+!LJcpMdX8jJjjg6Qi2X;?w8sa@FWH)#x(S z=rGl@y7KwU>>qocv_$7gt>nrVgeT>@3H?{RO1AK_KybRF^E!HNS-?!+$bCn@b#&T| z+^@iYK)Pr8p>w)t#-%ac6CZQ&t?tZ^Q3S0C68Tpxe9e*BowdiA57}T}^PuawDF0~R zFE&2#BM=|0m3-<1A04IJgnknr|IoSdFAJIJ+8f!(J!kAUa!>0ZN4KlZjp}y8(e3`! zSkK@bj5F39(Rh7pg6ZOq)H4e{h#pa$#kgmE)Bm@}4_^h1UrFNlboSl}((BeRCY|XO@&3Mt@nEl5Cqb|1 ztloMqu2(Q;Y@OonSRWEsA8sNp-A%gKaD2wqXNi2Q1fQdL{xN+_de~R#p4}h#2Mb>r ziTFyWub!D6)3Jo1&h)HR>2Ax`pXiU;@5J;beA(OYoeszLpuS4fn-cr27Wv@tf+BpM zi_y_OtA5m#?tFRR=#5`?zu5S~x6Ib#rJsDx@wDe-jIVX3i#R&RF!Ujtr(y3Zxj3qG z$XD0FTq4#!QT}vNUq@bc)}C+5|MvJL%AZf@5Bqs%oZ9IO=jjKmoow4rOx8#jOdOXr ztYO6i;{DrB-hEx}+x?L!Z=C*X*B%?+&N{Z8+)k9ydY6b~^bTw*9m*Rg{{;CQ@2juC zJ-a^=kY5VT{#dM zfKGx=h5jwH7^*V^+P~o}#DnXwMeabiNvxN1olYND_=XNh*SRYC*gl8AJaqoI&(1M_ zop^itdB?=b$0V6qE`pojAtVu!3Ec=OgwFcaN&m6_MaG`jzn`L?bcW2yuRZ;w{!2^f zw+#vX=Hz4dAM@Rbx2K=C|IWUYwCnV38TbWe;vblWpP(Oq!S47zUJ&~4V6iaB*+kA8 zJl)64x$N|$-9<;1?oPwEb_c$%6`r#1FI<{lC7L--Ci;gh|F z|9nT6?r!R>|1zqYdf$`RRV)S~6 zuAh%yFW2?IM6Z8*doZBX0UEDWHZ%dBSTYXVl;)>E+u8Pt{!^z{~Ed}L8Ch$>lLBkUB?o>!s#~CjYW)HN6`pKQX7{|=oFeF-`n`XY1= z^m*uY(0_p*PYe9wk@Ucd@{B;)%b5XdslgA~T`7N2`H5~qC(#*^HaEV$_|HZ}@%cU_ zihu5hqMSK7Es9_8N22%yH;eWTM&1*}XZ)BbXI5IFO;3yuy$#*}#FWqhXp5UOA^3hD zg&wEx_jx9V-k%nXOyJz-`gc<<@4=YOHodG&{@X$SJwdpOcwbL1>pO%W5tb7EC!wBj z2mjy3v#rpJpc+%<6+E7Dd80cR_7Ps?%*7@`3BJ{to?v(c;d_KT2`dTP2*vz=2hSdZ zj)XqRv*m+Lpz#`fp+~p^YsR<(YjZq-htrb+KOL4FcxYU=KxKAHV3l!I{uqDc`KHh4 zCd&RU^>0k!3`{m>LGe+4TIX9&kKfIikIKozf`RF@Wz7}AKn3-EV0@HTULB>s_-|3V zvS*b3^uj28Xk;)@KFS0hnB)rlBHbPM>2y!P8Ur3?CkOsMyPNHw{<9I$x^OOb*IfAY zCiY*;@y&0gE<;kiRbJ-jP-r=Hy1%=%7N7h}iO)#yZt+4EYecHQ3Sa8c+E1HF9o6^G z5Yp+>`=Qeaw-MMoxtzVR@J{-vmf)waDxgyd>O=Kk`4|&e6Lf)7cfcCw39KqJmB*>a zeB&Q|82|eM${5n!Z}-nMWS9CUjs7X1ox>^PS62oDwa{mn6QYk_M*GQgt(QE zJb?f<0_DpYq+O-7SNn$Lv`OWpUF@wImf^RC`}|g&iTdIz-%SLIU}!@D^$ZbIchy^C zT`|lRSQRvtD~y-1Os(0FW=5A%*JBt@OdQF2A*t^I?8>X8mmPl&z=2m03UCm#%Z%mFtYZ z`RFC<`jxu8&BvjyyZou^OnUPXuJtY-{kJyBj4pR&rcQ<)Z(a6ah2L*^;iJWzOPNWi zb@}Tn!Q=stpKrjX*0@&xetOdCz0)V47hGK)oSv7nVP?10!Rae{^ zT{*v;`CiUbGYj0VU7QP_J|b0e-&Hx%?XFzFdGsyF{If29^FGcqX3-x<@XtO<-a6yp zTvyWvuU@vL;I{Ew3huv>@gL7vjW?C(Ub}Q2M0TGt#^RtSy@q^72S~e}eqW@%4{+9k z_OD1Xm)mm&Or+Asb7OrxnLbV@|8?qPbPg|Pp1iH459G9Tvh1p31po5vs*!}9t|aRi!S7D8MiP<_-m|;MY2H!ej=sOf(Iei0kNvyxSd9c9 zGJFVO8sQeg8p7j*59rgE-QG&{fQMcr{SNq?lfJI3EYd(QE6O5ygdkxap^UJeP*)I~ zz734l4KdT-aJwoG5TRRmc2V1f0<9Lb5gKY?|j_GxE>1~L^bW?wbp+iO z+vyJU%v}0q4L%X=F*z`f!K63_GiawU*nTYk9K1PLnGX-Nv7$D%!*|_h#D>PY1U~j@ z$68}pYd`4_{YXcBqcD zr4C%E4UXTTJ;!79SKS+8K0x;P+V`XLlJkFOq#M}d7rp<2>YC>^FO0D|NIgxI5NV`T=eNZk^EoF99(bii_{Ss2*!0^WIgX_^zx2dAMdzL zDdRA6xWtwScI-#YZB^jr6HZ!+&jnld>1)~S=SZ=H5i_N^Y?8Cb+S zKNb$oxpmQ`oLh@;%DJ_?z)Y9kjSO$yfS&713O=Z^Gh$f$SoyqrZ3|&P!NWTQ%aY7+ zosYgZJxZ02-jjgt+^YAg6oUUX=y~?IfCs0&TO|);?QH1)_YOv!@;`06X;1npaH4wT zQxB&-JKEvyWBJG0<+R66Z)1@9Q$N+)sc%L?UaFt!R+hA4oYLss74b5`oBcm|m+`J8 z=l{=(xeu=W4f>NSRB&YZgWhGPYBjoW!SPc2T6%FV=X9rmsnpXGb{C_=45NRs7gesI zKQgf|+=iXt7`okJYzkV>OO9)tY8e-u@wM!BW^i^ry{47+=DF*x%wNjhjQ@jtIKZE?ghaUG1;ok_# z&*k!7{_URz+fJ+sw*3d;p9$Z-+Jr9qp(_-C=S=XXS;FG@4^uAJy73TeqiqDOAG=>@ z0^NGL0+QXoL(k(K!k4SgL#Z?KX}1LrNT-b@w0kCPK1_K*Wcm1oE#oB5=iL{5=gv-a z)xES?I`lN3$CAD}&Ff)bkhN~Q#}al}vs^wM-L`}8D&?|vstgJTrtHd9oNad2QnygfeDa%0TZgzzWIlOKp*@aHd}7vk^x>`5o!LP@ zD}SNpVBK{=7IXAkE>q7+R%;+7g zLrucDSFNkJJbKQj&N)L`5<5fslfz3oSieNil{VuW5|TPsYEIWxMW-I-Bm&8+ovbln`qUHun$hV)O_&xEh9`}t9gt!!?IW81!6&N!A~2iCsL z3dYYlH|l1roHL^;)`7I8Q`Qf%dGx`(ug*&u?U@MhfKVG*E z&Q*RB{?ocb=SU@!W+aY92kq6f>DawuV-i0@%9vz1=SPtP@w1~VxOUEs>V1|vudt@w z*->X6{RID{b8`tBl=}Z0be>fGJDmPCF`vu!=VR5kvR$fgz4T{!yK|%!+~Qeuu1lGs^P^{I0X)zR&0!LV~SD30}x^g8yXnk!6XxPNH5j44*=K z-OTybH3d<cpc#95KJ{OV-WqzOy#L$D)33{YyT9;5i_0&kf7`Xk#(%=P+R+PS zOFB;v;9I2914U>pPMKeU^Q|4##qJ~gx8pLbgZk8fkjw+$nVQlbfrXsO$RyUH)Hx`uhQnz0>h+{H*?8HeLsU*O;GS9Ipv`e#0+{+T*n6XJZrJ zBL|zsU!X6T&MGF_@5Dp9{J%Q(|FXyJ0>&-2&nmlUqOEUD%qOg*6Q8g<@l~?zB3(XU zANzKK)+y1aJr8?(*E z`!>NxEPd{|?+j#EQ6I6APw)|2^C^AA;>bW(oSC^pTJ0h%vt~XP;f?25kL4Q8(=J1G5tS!0dia)bE^rjp=tS346)s>2}&X zcKDsM7yAwL2ebPpQAcz7M><-9pV;uf`qO@5wVn8hCF)=^I`Ip0e~B_CF(o!vFWV89KGe6 z{=YqbUyy&8JwAy#h%-J3I>?$%{KgV>kJ zU%10Byz}nCMA??8BP9AoeFOc(>^@4A`A#1>HXY7TIzHhallhK6nj`bO{KewmO!#yC z!|*S!m%gg+B27ILeJ^3lx{iIs+P@!cxZ@w@tV?*0QkQ?&eUUEzurB{FeY0KPYS(w% zyYTS!#)G44ejR)SrO$Nvhjsafb@_*N`G+N>_VeQ`umY?7owqs1x-sHR!ENah)y^HDp(mTLPcn6rHTXgn*s;>8c zJuu6c@GiX*ygNhRV{e1@g`S392>rkL{;#jq*gL^G^8K>DK;JLD|HQG| zJ5>1U^8fnu{$I}fz-GjJza0B?`}ct*+C#hE|D|uNug*foX0g`I(i-H?^8FHTXU|J# zM;-qpy%Vgqz2BE(cj|fvSVncF`DDK@^(%JQ#C~1nciTRX`VPb`3GW8Wxc}pRU+wLG zJ3g(?SAv~;2R>g8ZxW5~=eF}-amV7H_B~Kt`IOHZ39om)qwe_qeAE5o?ERasqW_on zIvo8pwtug8f>k8=e$DtK->-yulhMgLz&gYIIlZ#${a;^&_kYFrzZ@MghKab2_zCX> zOY}9@9-_lTkIP#(malx4{-dy;yPl?_iK22y(D4YcD)0v+o$sVl76@2 zPwkEWrTBdbZ;p=U_>{s3$UmNr7 z5~e@Vhv}Q@+ok?-Y&Nm}v1NLfUsuVI(GyCJ7EUZV{=G>h@6Ng^bmH3YhCZ14y-?ea zCx=emHYIfWnqUM!F8dqk&Ua-U`d{JSwc}Izca_BayM7Ym_dki9!`E8Ic^_C>*Y_X0 z{ChIL-u^vZ-+%1#ReQ_-^I0Df++TMrclm$Ky0Yv0kF&b`zqr6$zE8}ZZFm8bA7ksW8V^B9(DGe#IMW!_V(${>aFL7C48Ua(|q?a(YLE2 z!MCfnJoeqkX|eZ5Bz3de?;{TXl)hbY+hA<3GHx5}dcT+bz6?jc|E2k5#r;w~=+T<@BBR9cAU|PUza1pJL^XWS5AL*J$L^5R-AsZ@$)6T z-^b95U4OoRS6u(YCt3Pm3+I5ef9t%%D?U~+ z{qH>4CLP}KNlMt$)wq0J?zh)HI;;1`bdL_c?I_(XasNBscOB_%efuBv{ zxH#AG{%@y0d|mE;Oh!3)YuBFkGAi-=jgFjeiTP>G0K<--R=iI;lJVd0cXCN)ci0!5 z$!w?ZyWYbU_t!f6l4n=bAaBX(+kBx@KTZv`%}on^aBX_%#H@_anI)N_Gb=rn?|Hh_ zobV*q9K6Ito>G?Br{MYWJO7H1OyAKUAp*K$~-(8q)S{p1+XkV6zvcN z!H4vmcT1o?m2#e5`NOt*e5qFRpkSmb)oV4T1S9Fxqh)9?a-{IPp~KuST9#VX>`AS; zKFwRdBrV-K>B*{T97~<4%No8ddZVj%{oEwe+Bnuk(m1DFeS0vHM>!j4OVe^wH8IU+ zH7$49ZJI=z%!So6-}dycdB@YE#<&W9@1?)Q(=t{BBcTg};i05G=1!|GqD|Y=+eW9G zvfs4~rJu;}@Uo1mqx8+I)cM$G6FEv7W~OCYS=^uH%d*s_*ZJ?TC$0YQw@suqC8ORf z3qE)l+)wRpc8%x%3DXL!qW72YUS!H1oapk_AM|+Z-}aaqFYWfbldNOD?pAe@*;SEj zDql3F^=*&0W({)S#bk4P8g{yF)Tie7vIl#b^!jr0n@qp-a=EOcr1Yw5jn^{w+`Rej zv}CLL-DMAcL_3qocUzLF{G+Fjwbx`cH=I5@XXnx$RsT*M@{51G+2ze2u0#|6@;z^(%L#bt`Q>>rQFjdhg6rE9q zhyPdh2-Z&g!=PIy{bBH}fAC~l_w^6fKJmsAGhcpVM`_oK zZg2B^%6aqb+1|=ab#gEI@#dGP*Ff*U0XL2rIAAOF+G+PiX0wa-@8H=Z^zYr!@(HH0 zvA2l`XJbv-_tl3dJrhI1lQ6X8!_wWtXQ{{6{3`DVE%lhg^T?yo)1N*w_y32xe|>0q zT2(bTYyv<1z{fwN_Ou*K4UOR`ilzP^!%_qN8^u%KIG$8?BBqW~Mo|h_%4{2*N*jfx zr@_)IjNK8iw3;%H_BWB&mi4ci0lw3@Kg^eI`H0u*zm&B4EHJd)on?7l{p%aST_bq% zf~VU{y;b&HaAjE`+ECzQ4uQL(lgoE61YZI0)pV(e><43wy{VV4r*)9|qkgJ&xvdIc zrnMTZF|RAvfwhAkUrojb^ivFL$<((DtTl}eMlNxs);BTl7Cx0)^$cbC;0c$zx?4>z zT@?ykm0H!??Xy1One}(ywD~(}N!BaxEqm~^r*F*x#Az}fWz^{7jIq%4lOXUcTg0Ee(I4!9=~+^t)Hf1Q#O8UZK@wEb_0vE zZ7k{;VeuX2(gv5`8stBR#rZsc(IcgCEDl;*SnI#nRBnr5@i^_?3r2*+--9dV<)U7X zFPy)5W%2wt7Vj<2-|WYyeVy^!SghySlb*~P@x+_C7B8*N4~Bio=IQ`^xcCD4*Iu|@=HD5!$B}#%{+i- z?t(`jUeEiRtKoIIK6CptuAO=G0X+UY)UPoGoPpC+%KM;yFd|GlJpX#;l-*A9o9pp5 zFT%#rMA>sazQcL;-0D}~*X6r^w5MNvQ^MTJ0n5Kg?akckW%KKL`a-<=mF4}aR>GrS zS>8X+qZ>T^YWBtFR?qY1)@$y5^^=)TlbKIG#_4|MNi+Si2)=uSd1Ni?S7oK8F^~FJ zO=TXX6EC)D@WBF4I&-L>J%_f(=FrUe97?xp!F6?P4ozkbl`w~DnL{niq1N~u>TT`w zq}y|-82pRRO!Z}0tbr;EnM3=)dNP2DbK?zAv>HoAQEEzF%6|KhXfPDW~m^`tA) zYGLlwUg4|i=FYHw#xui_)$Z18`fP7b`daN}j!&*Zf_8L#~<7G4K`J-nv ze-6MO7P~Sm&jsh^kDuq0A3ZmJJP#Ju*22pd#O6Hl-D!V>&AtPnLmpY z=Ff=DwRS&c#OKck^yNb4j<6>j6^=Kp&5dX0Yyw}6p7fZ`;Je@JO0So0Qo|VC0G1vF z^AT`*G9?(1?&9FHW$0C*BYVFaI{Iz$1Gh!XeCQ9pn#Yin*QcdgA244}b~CLz-j?og zJ{HTEv#+2tw8!H)EcT0IQFCAubI2mi&X~SXguZZOZ?}+my!3`wz@T)gDdOYs(r>l+*4Ma9S`ay{Z@bK@k{hSXi|AT94P-&T}6z*Q6t? zis=aJge~;7HDK#W#&F-Fn{8}u0bBbPU9@>&Oh?$8fGz0=S9y}H9pGd}8n|*NH}7wU ztGCe+-UnAtB;e{lJj{cB=X8W8E-*H(o{ZzlTeH`k(-DL#)oK62o6!+&j^k=;@kKVS zO3`l|9YH!+8Mrz-F$q076+Jo)KAR4|&4BM_!hf^Sqy3>Xo}|hHx$e*jbm{ldrS~&- zlHXoeQf0#+^xxBy9@_Vm3H?V*k8TN=IRW%%@#8ewT_imd9^eCKtGqtOG`s3Oc;+3* z$0qTA^i=7|P3YbS!9Wwb%7M8PLhr$=8o_5ScprkE;phx$=%I_iRrHx4X@ZeQD5vR( zNukNhyj3S3_-<%{*K6I8nrfYNr;6{i?&wbb$btjtFlWJbFY5n$)&Rxu*zMrd2cI?f zOD%Tf-k48#DJ9f;wW*x{eNzcW zAG``Vb1^|O?f69Grss!k$6Q`Zx@s|)mc9Tcca22m=lhYluGHoR>A@zoxw`KDM;H3w zx7@pA%l(hO=rXsLU}GOZ{TMgPv@Uz_9`f)ae?lv-Ya2tpO@+Zofv8EV_cG^t<(plN zo}o33%!!7(Z`r&sHQk;|{Ym@3O@He&eRG_57fW~fV~VMikHBM|p4MJZMspeKj+aWa zt8N4n`3rB1&yW0t12?Ol_oKV)2T!^ArgZ^+G!r}?>&8^Kb-;M61@Pa!Zg;bL<-}8Z zW&vX#<-7ix{hsbMC(&Oft}Tu0(KjdPEep_FCX~T<2`ACfx7g+Mt;u!uZ*HJY6RFSs zg$v-h3*vL*-r_-0)E6?GtuQl4!+Y*in&^rpzJ0i%cHSAvs*M%{?LwG#-K(~`<-EIPcwNrQ#&-1z%Uvy*-?Nodp z^4k~FmjfBa5`Kz3Cb>YNA+WeH%H!^5NhiIZv(^P%~PYi*~X(6Jf^}M z+kQMDbeepH)%!FzD6;_G+$YUzJ%%nD1_M`k;Q!!E{8`^ZX_{yvnj`S$*TtKyMBa?9 zvybuL&%Drj@_?sLO_V?9#QAd=o+|z<-s}@^EcEdC$Cdf;y6$lqUMDhDz*og(R!?4yu%K~2LfyTT+l@8pJg{*I>%#xxaanEX8rP%0 zegu9Rkwe0{=A1W%^HS5p)@kR1^O5AWWzp>kI_=2Ks)KOeij7Npt@vtp@LieK-O4vU zYegD3H{H=^$Dh(O=Wy<=X{BEevYr_9;5ok99_J%j4+`h2VmNO_FFI@b*nBk~o$SUA za6Xc?V4tFw+Q5qnPP!VZWyVr}T`259z-V_~3_NX3#_D@bPa|=hizfJyckG#Y${wgeBM{#0MjuUNtY86dNC7 zsgHZ%lFgptC2_gyE*`u23iO7v?!Iw+oLwnhG!0!e9X^%;Kg)!#Wx?OjLzi?9oq?ap zru%je6WQqL({>;Y+YS2Ye!iv9fG&nj_n}4)6doe|RJPs!-MGZI@9s-6ksZULx@i$} zwo$wm9W{%2HW|4&0lDMosWqPK+GO9Yp$yq~ZQCw!*=-%2wH7;ZBf3HEm8Nw+vRC?R zNl(`7r1gRc+1(n`!0@-Rzx9B6;Em#iX;;~LY7=;~`L6y))(OMdA6jQ%Z)hB3BC^>! z?P!l@M_;vhA^B{i%(ua}Y_`%>oph2VuG)!#V(~-t-yutb56O1b{NdR-(p9xyXnG#K z1^sj4 zH4idq=RSD*estc(?*${$TQw)7S2=oX0KH(O8(Wtvt$CiUx2B=DT92+pkH~eWHK%N~ z9>rd8`x^XGds0v7tzPDhc=e;?GkIxx)p2;q0JqmVj;`$$#pdhv`K@eQcO6#KaCgaO z>93M$$9u2q_dH{%^>yxr*dhLYUBAnXKUKPk3;Wjwosg5~_81f8~M;AXW!b4#|h|KTyC#xo!Hp;xSPyP99YzOc_k>gWSXyL1H2$nr~S7Co%W@ePP@(Ywen-U_m?ZLYs+={?YT97dABO@_WgJNtv$B} zncxeu3lB8r1?fro$Q0>F(rsmrQ~Rr+4u4FAKaNLcCSuji_-S2UkQCE&Mkbus=e z+&>Nfo_ss?vUOqjvt)idoQr2Cwn2L_Q+U2GwjL9oZT??Zg=F6lrtLWm4lVRz%a>+N zr{0?qgF2HUezO6Z1<~7zv#5(H-_pjU+q@v%N%OwY<&N999G%zEa}TG8w{&C|z8YCD8<{W* z*)S6sF#}z9I=b$(5@bi*E^PC6c&NkQZJn33nfRJ`ZV0|EyKobH+2+CU^&35IE73l@ zM)qOq;n;^0dA)daQK{R`PkRbJ@kR8_h42RP$;*iQ#3%o=Tga9(SI6W5GT;#;rbf^R$674{40AIVhhiODY2o4CW*f5M#6-dy7) z-cW%s48|QDW;?w79BzAA+u`YZ8KV!RlgD`a-UOa5?8fw%c%7qqd9V0hMtcm4#|y*S z``ivsDW|@&!AxNdd1UD1&=w?Bk>(J5c^l!ZtC!7}r=?|Ce&pkPZ2a0oc7e0`3rB2LeZ-eP0yitcSv~8( zY0UdV@F~7r?1{$pe<6C1gR_r3DK*WkRgS~27gTnxhZnEVUUpy0AG3dbgw1xYNwRTP zynH}ay*CZMyxhjw0{lwEmv3~L7i7aLroT!(1LA!7tS1?LJOh0^6JDDI&-KH5yTgMo zKp*cBI`dLgA3uOD9WW{N+n2hkYOpIzhFVbMvpv@7;PU}wb0g#8#fBmKS;JWPv-EZJ z=0O7j0*+q48gmLPhW-?|A0Gpy(PvdO_nX6M3I?o%Egk@W~>|JaWmD zkS$aA@7JkmmiC6>nXONQx0a#Qll{OKi8oR)-yV#V!N;X1#StGEMqE^-qVE)8_>ro*ixL`s5v8 zpXgi5(yNY<=TYo#0d)Rjl+{H3S=`@=pGFqesSDl|;MS1mo$hZ2bzYBR?8C9}V*P-ufKJQ~bzk*IR zHlI+?1{6Eb~ly(-WyF))mx$er%n*PxF&? z@?PYv_SR1rGb-2RX`aFuO~k+B3G}R7Y@J(?zkR9W0_+3_xVQE0yQ6a{rgyWS_=488 z3rsKC7hT&npl{2D9bqmWU>z;LlLd)3#e}u=AlA~dUti(LvHloeOS@|3vTmqDe(fhO zVeWqE>@f^ZcI?x_w99w60sZ`k%f>&XwbvYjZ-Zola4g>l)@?To+oR8tYR`i)`+Y;^AEeFt#D9^EDuVXBVV|ZS{To;Z5rT8Xwfa9orJF3^e%{V3a z8pZjAaJ-s)3c&AV@N3(+$+MV!;9JnsTfpxo_N|MT^{r~5j9PU1H112cPv@TgYf0%_ zpDFw@2W8Kze(A0dHs^XT`n>R4!1yl)zkV?N4ER0H9Q5NyvJd<|4t@_{0~U6R@gJ&S zKe8z13*yIq9Nkm&+I!iuISIS0fq!>9*xlwzt*>5rS7>rRYY(sX7qhJDm0 zTJ!E+U}Lud?A`-*_aa~32D{H1llmsJLv*myRyQS<`Yp>A3 zt}xqxE&OffbD}-Gy&XvQ@IQiG?NQ1O^bz)~z1RsI>>A|tnI+dCL%xqJxf+==8`&}o zJ$@#7{EQN0j;+TRF)m)8yMEswcj&K3BVArH!L}`cXYqXPStatnX85C?`6E6Uk~e!j zS2yC0xWwQnY!ee`lL$2>ch!vBo@Tj)`($e)E%M%rtSD9_BadFJ*@ zy;Y7}Ihtl7ZJx~fmMmlob6fl~!dySjyme&EWMs_K@U>#<@-+Wn!G8IX(NjW_f08k_ z9*w_kF7vSk88j_X#%N9=TWUx@k-F}~FJV7pDwz_sr=RoF`VIP2V&B^H5uP(dvW0%H zW=?9)^BDWCUhGGk$g`X}9pS$U;_3GMORXO@(NunjTxew-E8X!JI)>&_0qbY&V=lqp zsnE@unXyrsE!tB;7Sw|su=2Zn*3tX%AJ~px&#!3TcE-%(>St9y&wN^#R`oKnpyBS@ z?fG<+`BbnRSrF6LlHHl9-(@}>!oGbJ-AVIF`t(k8T^3pGKq^t0y>cE&7SA zt3}rYmu)Vj4h@hhF`nS$%2otf6g`C;`3>BOb$3$(Y}%9#yP2k|K6EO|Ggy~m>X$3};{;fi?06qu?#P$P_PY7n*g9PJ?PtrE_ouNIWuJ5|yz~@UXlaPb7ksE}|Ew1o z3t`#8q+{1tTc(uy;+T&5MhVwFqqv6mzs>kNGDa9{oE62dm$4HManj2V4ud~*;9eGkm zpUKXb=QZ`l^tLuwe;3NX!CN(+J(@Ds9;LMJ0QlXCj#@YXUqj@?^u#^Imv@@OuFK7GjGRelJ13U6Pi{Eq7`e!b7|7)~A%D(p< zW=;YA+^d&*s+z#%RBw{}HY)?mh=aY=-qm|vY8yJlUox=-wy zd9gYtcFi1Eofo^ll|&VA6__B78lXPL;UpQVPx+YZiU{eNvR@??;GL&m;#5o-MqKB@QYr8ZY2tj&lH9C zWQf8m(xLnRaeOEZ>h`jZo@iPZaelMf#d@8w`Wtk$rx~YW+HtL}87E!;P}hu;u799w z#!1(It!u_f*K>5uIO+NtT{BL){+_NGCtY8yYsN{}vvtil>3Wu~87E!O)HUOz>lwOc zymUQX*NmI4r*XZWaWm9;g3s9PXl7ly4;kH)e*dG#;A>>8E1lB4`fJE1qY<{X_?Ie<8Gh^rFK^FFJzsqN7MJ zI>v;rSNd@#yv?{OWpmCouGS66O4V2OR{hluwMXqz`_xXgSM65&)eq_q^^5vP{j`ns zi$UMIg*D$EwaXn0+wJl|)h@-=E>X2hRP7Q~y9`wAQaq1#iK<;LsM;l}c8RK8qH33@ z+NJcgt1i!+`A!PglTG-oZqVtZ5uHsM(K)0My^g$-pf^ya;`1m|^d{~nLyJuKEzw&@ zFS?lYqIZ&B^e)O&`lTkkH)tYz%1!vKRN@spFS?fJMIYvQ(MQPF3w@086kktyq8qvI zgI1gHTcVptFZvAWMV}?T=yQ~(^qWn1%ZRco|232A*Lg6eo4={}*ob&SU&r0v7K$H=oEbUk^B zZsdMnXf=6?ZX!?7XUJ3ZS@INpjy#oqv+9}?+B^8d(02w9Zy+DhgXDvb_EsbLh#nyy z(c|PJdV+jJ+sG#edfFA<>h;&|$#aMM4J1C;9X^#0EpUh55*_9a|EuT-clc4!QSR_P zqGR0Q>+_)F+@!yt_LUK?$f=8n7m-f%7Sf3>CY|V=q!Yc1bc3Nwl`gw>?`&6OPc>-* z+}lJN(Pu~_`YdTgpCe5HbTiMHw92jCUbPcmS}|_V5uP8;{o_0@dV=Rg+jxE$^t8(8 zQ@eG0*_C@nc*2j4YuB zZ)cwsA5P^gqWG|6>lS$L7GuKu;H6uVOgLY0FY`?CbQ9hn>NnvPqS+=qU$l=2?}L|a z$>Dj$^Gw(!I+*+L+ARg77ac}=(GjE<9YuQ4F(y1->BpVp#p^t#S-d!zx;ip{4YuRv zv|wZ#ves~4ytTm{4A)W5Ja}sz^{i8zdaf6xo@JucbDk*mY=Ae`QO`WZ^Gw)?QqKk# zlzP^QQqT3G)U!;Kdd?H2o$*XwlU8&#X+`IdR`fc` zfJfHdK-r4VqioTexStFyGT}PWTSzaunDnA|l3w&K%2xWN(Xuzt7u$@x@;7JB_LltG z40U*KT`JF2kdNqE@)3QQd_*6iJa}i_W7I?O_0&UjBlmsKY7?##-9&oPXGkylEa^p` zqaI4XnR;Nm#(!Xrc<}m6?!C^lqHpl5=$qsT53TzHWhvf3S)vEI509;DG~qhYBa|h2 zob;k6NH5w(SxSG}gt75ds=T}m*8NM3&5z42;J<8Fc%EngN^LkPEbD8L!q!+!5a+H3l%IRj8Q-&U7m*b7{>IQgi z-DBk64+^g~qVVblcyS%Px=s{cy2^U<7nhxKdgY1PqRa}0X6J8LW-`BuBbI|IS+PUOL_M3DzS7eyZ2DvCUqi9Fbe zJlKgmC>&v02T`xBTULzQs`SW(+Tp?Q4&=fbQRG62C~{#2a$yH@VTVb#(?#o%QS)27 z9+^6))%)?)ruK1HRAy9+42EArX51n=#uJ{7%ylZZg5i2($TVa~Ju;-;WL6fAGp+Mk|CaV;@1JLH)U(KwX~>g$q}Ob+ zX^r%T@^jNpa)>^CjnN^SKzanhRYoEFAQQvY; zRTg#ypZ3mKOJwSr_mk*4L)V$n>vUaXsPCVZ19^LmnKwh0dwWqu@y=9uuoWN4lVzbrbK^r8i%7ac}=(GjE<9YuQ4 zF{BqAM|vMLXu>axP9nYNWYUXHC%x!w(u>X^z36qM7rlY>@V}SmnefY^H<4bni1ebj zkY02#=|%4(z35$}7hOtvc-qTlCj7GKD$7ac|2MaNKQ(Q&Ld@=RpQWS$omrt`e$Y@QdL!}Fro@x15_=vIBouKXlC z>|N>&ZCOhmos9S1rKzE<*O^HBaol@4J+yZo^+_0;z22gdZl(IjNPs27HZI2F7Jr%pNoZS|WWYySsG>*IDp}-%`#_=JHO;`IRW;{9KfBZWX1R znWB{QEm6wJ6s4TP)F_Y5EHlUJut{kQYZ)8PaD~^1GKM9hBTRUPC}TKWbc_jSh%$!C z!<8!?%~k29PO3ZmMU~I~NAJrW9$EqK6c3dhO#E;)c2@C5@x#^l7wKC3up$Hd8rNR- z8|L<44+M-(KRaP}3HbgwY1H0y>RJ}FpRK@tc7*Xz|173WuhSQ^Md^zXqV$Ddl)hL@ zeP5?9W{c7nBSh&7zbJjN7>vE1$N!>(`CpWN@Qcz9i{Uq~(+{&n>4y=b^n+iNept)C zmY=aN<~;UPLV(~Us9!4Bqrewsmu$Gh!O1#Ta?jw_wxN7(rSj7Lp88`(O7Pu0bS;%rgIv7js^D|4QO0yp`ev9YeUmOq-)zHPr9CI@ z@eQIcYlSgC=M}LPjY{&49?KdejlRd&9iB>mzDgcLM9CvnlssPap(7)&QyHJ2utdEB z^zAG3u}_pfelf$euD};g_4kqAE40B&|8A#$x79`M;LjTG=v>NEomBqAFJ;AXbbky- zQ(}0T3MQu7cuBS9rePmvoig?8_}#^ArMsu|Uj}73^*KzxTtM6GIY2w?IY2wUMcp)K zGDT^}OvZ61`$qx%+q?wTV=5SxAJRJdXbt^2mwv5hULN(N)oh~Ab|mD-d_SZ6#~2Hh zd5_CnuICrgrvduPLmf78ZMUCv_)CVjQ;%(Vv=^J{!ycYz&tV!kDN0SYWCz#2%|iBx zYq-{4h31v_pH@FQ1>b~J+UOh5;q`f}lho~1 zo_!x`>^8m9J9nJM7_Zx2dmY8&qd*J3DV1Vf+uq_Bi&( z_BWpXyXgK#pp-oU_BR5I;Z$#5e0PYmzY$1_@~93{8?c{YeIiu4wEf~36l)a40MA^$2CCXmLw?)~@xI~n_ zjFF<>4n{_ZvX^nG=+I!~TcYe`3>RfDW0)v=8AC;}^AChJJT*QPfac@3J%W0Mm|NE} z&qA6zwBZN3WUYnr2L=9unZqwCmS)Aw}E9)|8;t!wr$bUjv^vncZvxlK;?QL#g4Tz z)nDyUd(uIPuk14A-0$Cp*tA1+a{{EiK=a)YMZFqhK}BYG3*MT>MIYln{C>}R6W$}bkupWANiVvI^rFvDrqVx4dc~iED!!RA z=b1BmxAS}^@z;1>^mX#Z9=7KVo>Tly$`kzq_mLHQ8ccYP=t0U8Z6v+u5z>nur#z)U zL3+j8po*W4?r|)+fNQ@i{2_L*J=w1CSjGFe!u>^axR3nUgI#TpD0a2yMX{?@iDFk< zB09nq{*ls;BE8~cpo))+?sH&!4}XaLOZK#}y`a~TzvvCzNAB#wp0-C6d)o7&*wd;+ zv8OE&T}=K;e<$e`zYD7P(sTPAWBU<*gnUIGBVT0I9_(m)M6shiFNz(lN)$WV647VK z7rWV>XUR|T=b(yjR=(_cG?)nX_;CLL-1`H0vd-Lt{cMjY_Os_jv7c3mVn16VdYnA5 zm+d)0UW&It6+i6?Z%Ojkewfd7o;$op6#LloqS(i(M6r)85gp+U|0oZNeQd1aW1xy- zAKQ|2LG9joq`!zbcCqJ0v5QrSVi#K?x|sCX!S-Mm8>{$TP{px}ZApsmg*+c1j{U1j z6#Lf_(Pv19y=xElud#|h2UQ&V*BeJv3^P8w`od$4_tRlE(V zIJU1XNquU!uqU!)BysjdeuPbH4>qo`ieuv%t2j2UElK@qx3Dj=cQMak%aUzt>}61F zTVoZ+wzVZGr*=zhe`M?x+{30d7Ms={Y+8G;X>CawP`icwkNNC(d>&pub{u(~_JmiW z$4F+hp{vwwjqa5UVXs6!kZp@@dhShQ!f%QunecO>UK4&qG~I;n67`#KTM;zdgvTh} z$AtTc=5T)@G>`P6gGn!1Kzh+(q!%4w!q+MNDAFrF2CDeDD4$=2yxr?bviC`(N2s1H z=z_yuz;@xrHbDJe7Nve0MXBE^QR;V-DD^uCZ?)}Tic`NNQR;Wl1bnQ_~mZgn?xR>lgUGLI(dlBrY!iZZP!rz zI_e;L1NW1m^GwtRcN6IqFCx9@EurK?2w~@LiUQK$@O{5onhPo*Iv(!QH z=b(yjrY@}82k}0B*;2&US7mblHPVT`PX6#=+YYAqo0Kd12k!qL=H5QOs_M@7-zO)K zlMo00AXeIz{An ziBp`CNE9pn?m743%N>k12lB*~wxs2nplKyMlu%KV{=)Ekf7aedj`5+|dHVfvUY~vT z-s`&__S)a?+TYLmu7#d9elTG#`3g^yUU-i5!hZ5q`b*@e{{x`@4_Wkq#9HNTv4-m& zxQwc6IG^T;^rDlS(>;+a{m<}35`|MakIrsJ$2SYn@q2~n_-Y|KzDk(yi4-gST+-|R zd{F-jNe6e5ImO`|l;55->VST3uHc?)!DZwnTuxr-?`HIUvk-m1SBSo^7NYN~glox5 z>DQ57|JQ^1zd?D8Gibo()E!7(AW z;CUgopjL=2_?GYhJO{e}*g<#_{SSlse@MJXQf($aX>lq=f5$h`WaH}@Oq+(knr)-KYV=eXx$|rJd2L=bPUg;<6Ir%C)M#x9mBKe zI7i3uEIQ8SIL5OC;92(g@hif!Y#(ws7W!GOzFqsVf8tYmJaCE@<5FQFzFVOW->one z->r~$Jt$1aXDiIWhbx>4-#Y^ITMoyaMSA_uCcQA1^um173+IwvIG^;wLYJEkS`If| zO!{QbmyljqPI}=|(hDm{FI+}?;c}O6uCyG!xr+4YNpbFmZ(c)s{a;IZ;X2X_*OOkj zf%ND(J7hT=ay9Aoe-r72TS+h6PI}=E(hGNzUbvg|=q_>Xws0@$h5JY^+)sMp0n!T( zl3o}lz3>p}(L=UvIoxkE>Gi*b^ukus3p+_KJWhJy3DOHsk{&%{_gE2I*h_lhY0?YN zkzUwOdf_F~3kTp_;ZgiL9^#teQ$9MsuZ+2IK7IjyF+PQPe#eS5*H3%UxO|G%wi2Jx zqds9-e_UT0@&;`6i7e_M%%%>)TVKE()E`PnBP=J4a4Bho z6{Ha^BaLvmC3%;-S8~7l!5Z!tuH}B=I_?*)=YHV^eMkA~|4?5L*V26e8K3a1W1P$I zXV^V7roMYE;|HFL+Hx1)=6XMC#ksKVxY{}8U%}_xykK}1M~4Q`o#)V@+339*^d2z-Mh9-uF+ObRL&bFj;92BH z+KeBGaf{(uwi1^a<5{E+&(YrERZQKeyQv#>H+7@#rf#Y`b*ory~hfv_ha~_4aY*=FW`?~O{_v0KgBEr_zmY(v>&MW z4)H1%Twdi<%Rb}@Ect{oiw7HCPIUa`ZptOuzob8^eYPcjI?s~o@+`TZfoHkdgIn)c2X(%vzi z<%MB9%j1S;x!RA;xc#U^{e^xM^E>WgKG;mYceV^&|u1 zAe_n`bo5O6$Z6jQ)`N05l|KCLS2>kQhEq|jf%udZo|!lj@hOS{jptK}ydULL#DiU; zTn?Y|@$0z7)^HP_(#Z1=pR!F@4$mrFN?QplXglFD+ETdO;Zrn@?aoIpu7ro-J!xG7 z4;afk(sXX34cHV#2w*;KZdCu>F`&f+~Kf0a% zC)`2*6Yiw{33pqOqb{fN2xSom^DvxB8T$RmLHeOEOg|JJvLZ+LMv3t%hJS%qF?`Ai zo)Pvs#;dfGxA>k`gDDN-Sw1ap7G5kv3jqWl0N?(q9 z48PJhi*>)gVAY@DoP}$tfm6Bv z{D#-h!=HRd9HjD{Xbt`)$H^h*zL2hLcn5$QJH9Gak-m_Ne+1 z^xfY7ay4#bw22#;c=ib66LB0(tkoLhI1cOBe%HtT6OGl9Bn#wIgWN6 zyBtS{j$MwUQ^zjHaZJZ9$MI7gyBx>sI(9jZ<2rUZj-Tn++}2aFS#38(WwU z&`a!LAM-8x1{^5OBRScm*Z*A73-d`YoXh>I!TFpQ7COGF09^mW-elve+9N%6QyFUr zjHsEe7&JJp>%p_nl+@>dGl@kx|H57MS_40iILZtE>r3^sh~rnh{zcZy&gXk&4tpf@ zz-iq?tV$1@*3CjVt+_(DtXqU|KJ$g+gM)>j+Uh7V8RERuUbhmX^Gn+0a4=MOILJJ{ z;P(m-GiSh$|HxkH3mgtr7ak6MpWpvg*j;E1`tjxH8^FweJ)IUfY;P(&Y;WebrLdd# z+K4nRXDO{cqv2Fw_k&Ta|d#-2j@NU`{e}f%DmarNO zfSZsh+=@)$c4P{7AXB&#nZn)3%mM2XB7WgsWaHzo52Vzx4$$p9hln4agl!{N*bL@@ zEyx$PB45~veBp7@3Qr(kcoO-uz#iladpR#Tr^)9WF$RYyKmH2a@;Hbz3lET9co06}r024~ zkYk&?gw5dXU<+x4t)vlll16x(JcTEaEj&q{cYr#$AdCqDYV|9z!mx;2e+)3`Q`d!}*kG}25X?KEUeL)J88 zPDA!I@|Z?m)5vogc~4^v?`btJAg34GxP|et_UL%cmr^sxnD6840rOriK7wyAfL(}U z7i4FL+Xmh96tcuMylwo@p4wl70c=JTn^7vnW|*|Y>_*Wze2tQi{fJ^eN`=^u($OIH z!=!O-i0*arNvv%%`FLxYb2Tx5ZHZ!AN`=^#(s3ZRWtGsiEm3UC^~_Z_@~|&Olc*Qt z)d2P-ik&GHVrNPxfY_N;LhQ_Dp=)QN*cr*79D|Z08BTdd*4CTyjHvyw%7g8RVuMPB z*r3u$AU0@~5F4~vhz+V2x;7|^4KlLvNgLVI9Up7}I~2tpl?t&(rPqPjqg6ue(Pkm` zs9uOYiVCquk`c2>MaXi>>8pL%lryRJyDBF(DrzM}N`;9aHYn;#h^!K(CPX$1u~qd# zY*kc`c?ny|ONfoD7h>b0LTp^05F3|_ZHxAhm#~-fl5^Uv?>WcL6_J-y&(zv9Q_oSg zV^mM<6?4=grNTrIyB76%Bddg|-pFQQnm1A}O!r2j!VGVuPdL>ZNyg4a{rKmFS)7-g zY|`t0F8^tB*}$R#i={iLE4C{dBp+chh>eSuaKEse{De!%Pgp^I!ez)2E+;>1U9^(? zgjJlEoRy^4|23o+t|c9|khy-BmA~v^5o>rYz_(y{gb?{+&!W{}0l10u!mXqiZYO`? z4&(@Tl8*b>uJH%X!J!M|%C=PkP}2(hCndc2S(g3vd;$!HGO?a7^uU;x0x7 z2Ok%A0Z;YLjc^x?rC(;PztcbY{w|%nfS=^Rfgc*q;xxXJSZqiizO@v-J2gM`3}gLK ze0Q4fJjVQ~e0OS|^Et--qxtS^Ggj( z=|_W=q!(7jzAs3x|7%DuTuXZ4I?LfY){|cUH;{fD7$Uu}n)Jd=q!(@_y>L6}g*z;V z>)1(p{ohUc31A)Rg?mXa+(&xhe$opMkY0Gua=4B#>Gl5*=_i3U>4nXt7q*aI*h+d~ zC+UU9Er;tkL3;f^NqY3UI1InAm-NEZq!*qey|AD3!b_I(T{1v={U0JdT!saQ5#uVv zTYOxc|8NeY&5a33B_^Z?zx!D@jKdbb{-8C`9rO$kV?EG475RG3zQjQHP(nTb2Q59f zAmtTq@&&qsBe*wlpnEy@>X}q>O;|-;guc;%?hQ%xmfAbSHDNW^gmKTw&I;XbsZHy+ zN4S@Jgdg)Pnu4Q-^lfUX4Q=ihHgmu5Q`)|1jdj^lyY-Mp*h?DWA8K!3X5gO~Tj`w> zFJUuZM!dx6I9|f=3>I?5GbAEc=;JvGQ+bZUG@hd{-E!hZGI);qPdm#`7as?rIi2@u z>_z;C{H-0OtYOEFJx-l8~Zj8oUuG8lftFcyCcr{i-@fnKKP`rlXHimNw=NU7KQ`qXpYOH3i zzs)yqtb9#D&ye1S;pJ&cw=SEwkl|%0ObVQ$&!`>59rS3f3i4Tdqrz|nw29#gXp?f6 zJ6Q8E+`*h!KOjb97RSUmY!A}bXIR50IAUYr%Z$;zl-=+Gw4=GsGr-4U%KNhT0s5(r z_G+OWvxT%HeekmQfl0{JJa?Sg>0`Lo9(4E`^|y}jhu?3_*T0uN)D3S_$$hkON0rOx z%y-^%-LsK+W_>eMQGe6#@Gm|H2Ql7f&HHomiS@sF`roH<4~vFz53#x_KH&`QYTgU_ zQ9ErpOGsOeq2DORW0sKq6^rpue23yZUJKH%<6=A(^E_7YTw2{2k7vwtw=Nqwb3K86 z>Y%)OUH;+xCvXo*)XTiDykm(R`(&^69ICCMr}4W!?(!0g3@i}hfCw-0d>SI|MJ#Cj;-KPJ{gT)_W}|9uR)xIILh39D%{;U;)W;a1v| zy4dtN>1gH#UG(x?#-N+q>qsNqOB&%m(g^p%bqWu-X-$80bhbEx1@u=Ry1K=7xq)UY zQlkGYR%E5H)rxEqc3P2r!sGN;bhqL%IwZY z!KozpgKs15v+noNMYS6Gsc7q zSOkx725w|I+`%3E;B+7G!0T=%-PxNwh9^)PJWb!%`ciH8D<)&|*&O^S>W5K$?hSBZ z()Ug5X~8Ct=J>FV;R$s92*wXfvR?M{@C0)>KE^usd*KPHDZ@{RdD;t4pyStd3{Rlr<2r^X z(DBc73{RlrUvNB)_e?P*S@_(~uzs)hec6c5;|%d6-viG+b65Qqu!glY&cE=b`mMgy znr%M$-v6ZjQQw%F$9(d=7uRoR4ZvC-zIS}>e@~2v_8fg&h|hh85MTQbg!oi`C?p0W zlQpqs-W-d;nA}j#w?Y9rs0&?W<9oO9z1#TS?bOii1?Zx#pcN^=*V0wYb$swPK6u;p z!55%+yFy&YpVGzqcejxD&&H=> z$rte6cHuWEz;Dup-{fu~ev`X(9>2*$!Zc5$3*U*~Yc<;VO$vBlyYQD3;4kUIUvjq) zf63iCkH6$0A^wslgcYR4XQH%4^uGe$(JuTV1^7j}@Qd6n#4mEU&f^z(NQhtL2_b%w zh!FpL7rqfAV+M6%9M*+DqyT?N7ygjDh4@46)_MFP4+-&yJR!s%5)tAL=@R0jx2>^_ z1=BgU5+VioJ-QMTB6kaYpw6czL>?02_jp2>o)C!$@q2U$rzS)Og?|3a_u++ecqCL< zG=;L=!1*9@@pp6;BTrZY>U=qJh4?$35LO^ph`*ytxE#5{%7nnl!N4Ssc_%-}qboF@3p2sBWJcRf%o)8`<4Ggjt>4od?8yxanE-2tQ#J%_!x~j=bxCzwxt)vldCyj6ic?$7WbP4fQ3<~SW z^EPlVa)kRhFFE^3um1-~FFZ(kVb~k#KE%DZb8Lf!U^96NTR@#}C9SZNw8G=06`mk( z;Ysok_K^1-U@vlnr#UY<=SZ*r{iGLOBE4_`zY2c)&*ImQ_vsHR?T|Mj-@bhM@AHfK z^D~Gq+s5}p%%496e}2KL?}Vi{3s!=O=rf@Y)cI5^@{lmiiaa4qw;~Z?h85`&PPHO~ zLcbN6bRU?79AP%+B`25k`kzmF;at)S=W|_H==k%$gMEM4_2<9OzFIHx+rjTaes8mn z;T$lH-v!SU{P9!p%TK{SKZS3JDST5*!EZkWU;Y%vm{W{DzX8r7fY1Jq_36KlU*Gui zS%+W%-&2Mgt8t#OS0Zg@Vkc-b6FWhhnV1UNY?)#zXfqRQL7VN=f7&c8q|HtUX|n+# zZDwLOXfqSbL7NRI7K1ihub2+nY^RVm3kzwp0ioMw)r{-+xUm|i@m&ru#@NDG^n*B1 z&Y2hr$~oL7_E4ux<`?~l@psk;=Dr7+-;aH4F8-)})P0VxG@-V`_)0y7Cou5{AvYeO z+Kp4#^ijJBZ+Y=8~1smw^)e%E!KJLZcHrX@tGh1li2LhN$05WCzf^z$FP zyl05`3(FeZLtoBh9VNvqWMN;MgUH7&Hy0yUSOV&NIr4=|kuStP?-gR7n}yitUSVZI zB=JVD>cf15@^I>uQk!q;lw3PYb;9;GhsYC~-CRvx!cCygZzWIRcJdTryY~vQ-OWO5 zcdxLHJafRk$Pw=2I<`B6-+|{I$;@~7h%9Vyv&}u&+~#KT5Vn9i-%4J>PVy3B!}kiY z;mty9c(1UByk>#D$Pu3AyyTp7Y`F4r>X};mp7Du~s=WkeVVj#RZzKy_+??o*6bpTz z&Zl}KRl+oHq*|EnjqDX>cq7fisoqGh(C>{TegVuvjxd|^l9Nk%{m zvDf7%_PPATewUv(;PMj(U4A0$*n063KQPZ=bnW8?$JEw>@v#--18MWVdK*5K1=hUj zCbnYo*%IaiE4ISnDR%vv@)U~aIZJFsn!A3LVk-{!d)@eo$qj6pWVnf7FfHu&1S7#v zS~w`Q)54WPYg{-a42}!end{@iwz)no++(g^6PE8e7`!Iz7ly702Zi=E;Yy)3J{&UF z$A|08_3>fbTpu6qG1t?>@>K_e>0$Y*gQ4_rP-v%zD}~mCaL8Pr5Uw-VCxmTteL}d$ zT)#Ffe|9il4Fu=K932ZLUuY_n7M$Vfnm+!Hlqc z-oa2tSU&Hdoe{1ST9d*dbA3{{&Rm}qw$1fP;U05+a#;TGU~qC+{_tREa#;TGpglQU zDYT}9L+1LFaGkk6C2X7PQ^Gyw`qZ%e9k}(A2Q}<3W3BxKe0c7Y>>0*M;lM z_3Og6xqe-^$6TKlZvGbWGU)eUa9UXY@?dCMSpM>$JuO@*v@*jXb3HR$XRh<#()G-6 zkGW3FAN)k5>%(t9QdQ{u)23A-f z?ISDfYX(->;|Ep*#}8QM`F`~Iuh?)){R-<3Ki?JhMC42yu*`G%!_P4onl!Y+`uxu= zXipik%(M8M&uZw40n4=iXMa9=rav@X#^i=^qk_Ma&cB&=^-KNUWBSiJbpPhuhJ4NP zS&G|8=h(y#8TpzkYI;sO45F#cA|gkmLPF%pWGP~60DUgLhoN8%mU zxUmxAHDc{;2d$y*k0~#Fl@k*&yo?)01dQ*Wav9%09got!GeOEfc$794&*32sVmQxH zLff`bZgCw?^Sz>2h@0g*#uuvp<6qO7Q%+y0r>}exACXDy_fgt1C2q~BGklwUS}cTF zKaKWPOvI}NdFVxusU|0G_T=z z73ZK>h)lk>XBy67@`pHwGJM|-=P=oD4$1hyhsQ$v`LI}s6~sae=NmpxEQIOzLAT!* zyZydI@BLK{S^SZnvFT@(Za=HyUiJHxZvS5+9gtKXTR%!%Q$ifq^!UZG^ZuvC>hK#w zJnOOO?~V|%gw@CrZbFuDEANtU!!?2Ka(Gjh-x!Mx?WiM-a4%_u`$!|)Pa5F?;=@3= z@4=3P#BFF@sXv0-XhHY4Y?u-dztO3_3BRE@3Go|>leqGV)}0#7eoJH6cWEs9FQu^$ z6Z>j^=bHMJ*j8~HZ;c;S|0W#3DdHn~?u8Q|M&j1k-Wu>8i`ZLZ3qF3sB@idEh5liV z;XJnB<2T2|No?VrFvrA6Y{AEGj){}lf{))E6DP3+AHO-C$1#1)9N)|_K7MmdoWvG< z{N|WAi7oi}%`x#2Tk_pwVk5TT1y(bso;c?6EY+Gejy!Q=j@GY;0 zy+9Ms(jFCk_#ymSlZtV3hSrQ?-u^`4BJHaI!X=~#7ixbEut@uBfM3-98sI$buL0hy z{WZYZ+Ft`?9{*S&^Z3UI;TA><;TBSb6N7^ZLb!#gpm^Rs=598y=kR(scZZKje)4a0 z%(E(EAGE*LvEgHqpS+Lb{X8q}ef(|a7CC%O@{{Xy%(K$@2X)M|((yNR%(K$*w{*<2 z((zgy^Q?6IfR1@qI{v1Pc~&~UU&ne@9IxTHj%Q|3&aV-FPzN8Q;|(15-OJp`1=!B~ zt9xu{KCk?LKj)`;!SV;nFQ~b{ngcAqq2>i^ez4{VYre4N4d2PN_<6vL4KKE_Po(^Y zs~HRa7uIhY4X1A53!B6`=~4EtDDdC|WZm+D5#T5=85|3a2FHWU^P4mgoB&P%nb$Ze z6TA+b0WwE((oNtE;7l+8&Ia?qo54BYJn$Cqi(moBeCC4NnTtYsGrT_b=SjP`ku~1s zL;N{E`366$FHt_k-Cq2M)a4cQ*&EhJ{bqd$)OEctFq}h*>U_wGL`Q-HX@TL~Q7N{< z%)`ZInt8a`#pwD;$Z5;w(3(rd|UNnKzv(o;MdYzTffILe9>mzk3Va( z?#G|CS@+}5+N}HWXKlU)#GmyBzAVkJRXWYDU1iG2JXfW|XSGV{@L8==I($~Eln$TO zs%t@fR&U_1(i~cop6AoY_l4%o_TekkyxBede>PrXlQ}y8!)4bU? zTpv}y)RVcHrkAwg63AfTk@#(-xp);(tn(O#DyDlDQnb9;`%m)m3h&PtPmFoKZcmYEaJ$|56`z zQ1e~;uvwb#+K2toeAhl~h?(!o_kiZRnz_7n;0^rWi)?&Ln(yj&=etHtf8d{~% zr-G8721Q#RHM)i{K2PHoXl>BTk56s0^!yMMl$|HymMR^p1%A*8S9_66& zSPCkS3Q&3AAKI)u@DFWP9+jZ-r~;M8N>F*M0q1~gow=**k80vLO4a{D_<_{_szLR? zO`!VUR#5$KJE;DLU&zcA$1h~&dE*x{bF%9|e3Y86YUVTV1M$UazN(q`djP~orTMCd ziR0Mp<=Cb?=(cDxh;ECvfatbpD~N83c7o`(=y7lgcmhPXMNfk0wrCHCZj1JU=(gx- z5Zx9%2cp}e{UEw6ddVBfKnMLH{k0kWH=GZN4tOJP40$8sMPj;aJL8vA?6ZCED^3ib z;RoQ+k6`mAa_xS;7gE@3>>M^ZX0uB@e;JNi#0o3`6Tyw34{QQcLC=bCG)>pRbX^De zzA9}3`MxSuzwm?V7g?bCMK-8@kqfF{6S5Ah**i(& ziQh7w{5XAF1UI2_>pQhidSJbUPKEyu!T*Q8SExCYx<}vHN09p~Y~}^n#O7^aJP8l+BihH*Cq(&FpK6f$NXClMso~Ox3If56cQgh)dkS^G#ypkb zD;4#UZCj2$Zw954T0rTf0rY-w8}kz$E!2F0BKUc+gG}wU6BSCDyW~IRfY*rd< zz-FTx(WU9q>5eUsPFMO&_VH18g0-aXK~d!TvuK@#{1-EZ_FX!Igz^de~VB53p?X!Igz^de~VB53p? z`L0C2A4vYdb`-%C{D^u*l@B&5YWgLpd^Ul~2OAYtKG>+J^1(($mCsI4`Cy}r4Z%hk z8-k58He?_9?Z<}fr(cqf^Q@fMh$F1aluG^Tm9L%P*p_Bc`L=+{7u!{@e6d~i$`{*J zuY9px^~x99Wo%0isQO{MjBPm$=J6l<)x?~JA~=Y*7+-EyUKYAhbxZ`6mk(54si5*o z1C>`gsJt>jxW&71A+pHa{L8MUn0t$VgJ zpDe)Mls(-07W3~C_20zkCp|e<$0kNU>B%uVHZl51Pmb2HiP2AbGF8VWMnCDvQ93p; z`bkeFbDYWh%fzpWImGJEA&+s)A$tejIgK@*de}!pJcQyU`X9Cqd^ldh+mOaLR&~g# z9f?o+6|?S-7cSomm+ytk_u@bE!ry!0`n`~{<26-+E$ALVTg%d!Y%Rv>oIm!4wC>fhU z$=C);MlC2AKL91;2~aZr?>HGxaZWOx1|_2&l#E7DGM)z|<3&(1BA{fv6epvJbCU5Y zC>c>uGTK1NI0j0_&p^rO0wv@B#K}0tIm!4nC>edAWSj>j<02>-?}Cys2uj9fkmpiL zU%S&|MOG}Z239Qa46JAZud^a=p-&bjCk9qrPN?Vq;4AvSz>2(rep$HL7g(`i1otKm ztoREn@*`cl2h_E%gXo@x$)f`+zMEA4h^}pEu^hxVA~ebRx_||3u&tveETd+&J0vDk5YRo zP2)RP?{7b2#BZhGKgO;ownF?s*@SU5;tKMvK@SI6o1AgqA`gCF#(j%MfQp6ZHKjrEatD>FQh<463636w;f4=i|_7<-FExJeZ zrL}%X6TYc5^wDq8gQ?VI>jJB`7Jsnz4q5FPhrSty{uzh9!T&Li`Le9#e}!>pBetg- zzkLn*rVT$;Ep__}b+4svU!m@`)a@(Oy_ULtg}QI0UgjR^Z|DRA$(-ML48?;zij-%`2AbHM`7koxx={YU;Bl@s|3CRmrB z*E!@puXD(IUS(g96xd44iS`eFi9VqAxql>UEW!J$-r@qv#yR^^{4B{g@9TUkjHVqn zPp2Q~xvL-OxvL)_-}D1LciN(iHr8|2J{M2XekKluXRwqs!{5gu;$+khG^br0%aR4W zo8R@;ub9BNmTTe^^lbi`GM<)A;klm1pJ=(U7mqM5T}iqU+Q3}rc?aFLco|OODbi=_ zI&G3o`ATS$AZ=3C$)ru-e(i%%icFu{0hwjf9iHN6@Dva8PS)`5Z!-H}B$?QV&CKzBW`k2d`D?Wg z#wzT)_JWX~HNn@u)8}`T72n~1 zy8NC0gfIOo_|%u;TVH~Y{mb=34=k?7=U!`jSNyy~^u9!c@k=S-Dj<5JOQ5KImWYP8$GpeaDE#y7H@R+<5~QD zXT7M!n|a@O$IG`-Zar7}pLBbSr&y!9J;mBk5ycIc1 z+Dg+m9bF%b!!Yko6Ymh|7rhEfXGTHk%r;Qzk2(8jl>Usqi_S0G=&k*UsYgQX0inlR z`z-ir>o{qDjNhZ@V|>M%a1tgKIn&ud8B8PYka|}0;Lp+8#hMUMC9>W~NP2>=dVUFP@a)`$;$8Zxl#ABFa zxQQI%G0ZXCM2_My;1slOQx0(%<``Zghxm+K;xo*Dc!@G%GPEAkci=0Q5tEU^J{f05 z5hv-!U~G{Lc#Ge^okIU|_Q^QKnELl`m(?pC<2U#{=(iUezv=9gafI*vGhYo37AvL! zF5-uZXMl_N2RE+a2f~HHK@;atr1%8>!$mwUgp2rl;myIpS|MD-b|GBEV?wxyM};$j zgWH6e?AtCR&Oz}KXW%3hFQGVxo!Doy?>BbHtQU@sT=sj%hoiEeVJ)UJd>1rve>?Az z)>boI1@EES#&8vexA4MG#CQwullSPDzOQ@M=@^bl#}Dcl-a^OU&@sG)j=!a2Y_yKo z>KNWa#}DWj-a^OU)G@q;j_=noyoHX}=osEY$6wPiyoHX%oy6iu)+Z;{sGesBu2lSf zd@yc54TrN3U!nQWpOv$atq|uCo9_&VdNGgnnPR-f-?_Yn_SNWN?9{~`6@D*qpp2jV zNg(4V|0t00lYcD8_{l#WWc=ix2r_>1PXX~A`!hj&1O6G{jo?jS4mcB>11HTB~0Tx*iaV+mA_-gie9p2*S?wTT+x2*L=w9bgu8_~KWT7N|AkZ3&;txKZy zNn-1jXbqCrsEc?D)yuX%%3atxALKlBRjl}M4x>)jL&V7HdNoK}C`NcVm*MBT$;ACK zK3CkYS;MLYWPGle>lmjIl01vGog@!`R!H*jXN7268-G@awzbpn4Z&+z?tI-!r3reR z{cZ78RVod>s!FB7S5>JrOF^E8T|rv74W-RYgLhTALY_#E%2f^W-Jmt9dQ`5hAm0u4 zc98D|dk4sOgN=Vm>r@#Te(Iq7ZI82`E`BJ#@^1l^KYl2`^2ZP5SN`~+{PYK{Q{|^W z*e=ImnR>IJ6HDgQnh~WZ-YIBm;kwB^mgetm{EPJ{Wk8Ov@Uy-1Vw7FHh@L zX?~s7tI|9pYM#gAl92kSt|-3tc5eIR^?<*r**r#x)drBWWv zpz>$|l?OhfI^}`Ss7`s{GpbV__>Ah52R@@Z<vkn*jA+uH%{vpM|1Xa%v=Ty&XQ1#pds-9ax^SnUwyg>83 zK=Zsn)w2#%J@N;zN z`azyWTNcQ((4LNqc^2B!aWT)r?CHp}(4LNqc^2B!aWT(AdpZv1Iwml#?lJnq;$8o6 z9AhSPpnk;I+Kyc}`i1+3@g2HPe%mJO=1&>lYP|C*Sf_MBSHBm#`n`no!*~zPznj54 zM5E_J=tQGWL8DJW^r>VPrKX1c%xCq7?r8K|dkU4V^2I9^A?0JU=7P%C3`+mCfJTRc zMu(Db90!ug{tn_8DwP(SQEBQ1DlIm{=zna6(f`;CqyN*T{~g;Q{Vy4(Oua|cZl=CX z@F0J0FtPUgps^3w7GocZ87!%+hXj)GT8^$#`uW|VBYmxl+&+r zVr%>=C$`4e32cqA6WAJKC$KffPGD<{oxs+ZJ+kBY5Xp0%RZ{J8Q`V8Se-4gA{xR&( zIOHG04vnK7j$wz!(T2ycL*r<}W7wf_*pOq`A+r|+cF63#jU7t6o_UKs?EL^IlKIc< zH=aQIOs4$otMCZldHwX;PK*7s>9?JUApN$}2hwjlQ$hM|XBtSq?Zig)({DSmQT_DW zPHa>^I;7L@?wg(UVO}J*cS94r$W5kBzS?i@rv!fg&&yJ-a zd3IpGI>@tQ8AzTT*sl)q?5G6MTOC#I-r6fcxDgv)NcTAE$36|OP`_u%t3AZt6Xexi z4U$*;CXl?^w}Rx=z8xg5c5K?SN+)!J(h1nQZPE$Yx^2=4*t%`x z-PQw=cUvz=-fgGBJn&pX#O&qW&%P9$qiQ#bvlvQ?SY}|9q{4WbG#SdVzS{%ysQnyvAdq}c^$j!8K2X! zyPok`9UHEM?@S#VuEhJK)}&g&yS+M-7!9}*#oqU`zSI&oUgJ&tM4Holfwi?`yolwl zCDhIuLXG^kx@!q(O`!n4v;>QI#>CnS_-*cl7g?cs`m9xTMZO;VMj@WP-(o#$_>mCL z-=B!C<@x)4AkW{AJqz*t{n)b*&)<(d3-SE@*s~DN-;X^D;X6-6PL>}Z6Ed>0KxAZP zgUHCjrX?aHD<4Eg7B(#r8Clr0L}X-P)2x_`Am=2b7?g|>P%_Fv$yf?XMg=Gt%RtFk z9w(!cbCOX7O2$f1GS+~Su@;n!b)aOd2PI=eoQx3XB%>OXj7^|qYy~A_J17}DK*`t% zO2+Ou8FieKjJ=>_>;olZKPVXoK*=}=N=6uzj6-oUY|cqWGbkA?pk%aylFB z{O|W$X8iBZ;v8!j6{TB)8ngR@j1zd5{ly^fvcE)WMhq0O=2X#g`it(V1a(gpsC!m| zyzl-sl9g0nse3}K#iV9i$#P86fq@nQBF*haBH}n#-Xm ze&aFT*V)K4&y?qBo-g-i;-}WU7}?#~e#`8KF)Qop^VRjIh~JQZMgC#Q79aD|pNo%~ zbXNK?5B=DRZq)qV8gyDcdMm*379ATt#%uPrSfQ8=_?Slc7_CLJjWzbPpSboCm!8ue z4&q-{3b&_wHVdE!#sIihp@P<)?nz@VP%gyUeED9-v)j({2yY zF0*O3)$k4GKHARQL))2qXghNc?eW1qw1v5cwlMc(yZ7YA-IMR$GuORmzI#uhbB}m2 z-i@Im@?N6yk?#_fjeOCw#xJinpRIG`Gh5{)4&pi8i|prg9@)?7JhGotJtP}`;Q|~) znEs?TTTDzx9sNmdAx>tx8=rC42;aQLe9N5Y`L4nqn0`bXnSMn3n0};wMLz0R)9HKh zoJ_oue)#q@!+w+0;io$~|Kp9wnPPiATw%PT7<_n>tOm9WLd^ za4Ff8NqNb?{^MqGD9%0?IrO6_Yn_$QH^iZ=;2UKo&oj2KxMCsV_Y>Ei6XH!4(^ukS zHzYfL{ir)z?~qv?DvS(L%(0m^#Q@5atM$hdua$W!}!o~yWy+>pa{ z&V?o-$lpHB#xt~+C;1A6TdN?I1a^c zOt+l<#6OK6c|VRD>4hhFQ~U@#!ZXBih#%R4e_i|tF$>0reFknsF&y$|pMe|MV&Vcg zR@??{XU@ZoD2Brv!;L70!yGGa!}YJ9fg9OEA2a9SMz*;A^)qlIis7hHJV3sC9gajX z8{6=so3*FlN&bwulC%F@{PfQWH=J>BlnxVEfUYB;1>|ra8hei zxovSGZq2Hn#jRO&T*q!(ysl%nEqwHxRuXC3WUgs-9c%7v}c%3Cec%3f`;dK@Z;dRQvp6BkYUj*Li?h}E&GG$-^!HQFG zKC;>J`-`XetnnCH&s{MY;x}U426Lk>o}cM(ty`H-q5Z-q!E1!@(fxvT&3h6UhvBE| z842R2>q!Ce)Afu2@zeEO1LCLaxfaAv*E1Q!PuDXI#820AJ&2#K=SC1eT~7{(pRQ*X zh@YW>SFsr`5d!A`5d!B`5bdW>SE`E z@;S~0<#WU)t1en&m$a9SpR%d5pz>S^D$g~b z@>~ll&vl^kTn{SG4T{OL2H)7kn0pNWw}RJz+rewW9pGeeCpZn<4PFn{fj5GC!5nZO zI1AhlegQlH-U=QBZv(^N9pE7=(v%;%J)}H3skibt4l0impz=5gDvusedGvzH<20x| z&VkCKA545rm9j-^ua@!eHQeg-J{Q$fl1gOZ;GN`5vd`MIFv=Yx_z z7nJ<@pyU@idx8IQpEVfs+x76>V=wnadi-D|m<3jW+2Ber7hD78gKNRL;5u+VxE?G7 zH(cEVT(K~Im323$vetnr>t0Z0-3O|y`$3iU0I0Ga1Xb2BsIneXS=s-4<#qLzX;Y80 zmq;&Y+7vWx3Ys=_K2=LTP4o|~>sZ9MGm>HS8S^QIi|H>~gRhxG4Z-?YZV z?Dsu9E-u0TUZG+vb~COx^`C=-r(Sb7`S*z()Oaqo??W$q{T%Mm-rie?uQ=<*SCqpy zv|3iA9KNA75rl7O^?~pWt*Id2hplP44yNlmn4#<7R9(mK%hXRd;@crOl*aZGX6*ER>fwhtrlXR?f(EhtNfqhxhaNO9Etdt zSHOR7{aZxr$SZt5|9j}+OZ3Gi{D^xP_uGsair7>8-%>9(StDxq5Z8S#-%iSlx|+De ze{DV8#NOS?r-|pOc|Ce3mzrnlt^G1+^7dg@{}uH39Q1E&@9sUW5-7Wd5i&L+( zhAGtLH~5@%E`#HQ6wBCver!M|HlQC}-iZz9M=y6`1Nzako!EeWbZ93wpdTIDi4C}b zK8)?%tyq>H@hyF2I{Uz~7mH#cJCM_XP3S;Q2R5MtIUU%94&-!T6FQL7flX*fPCGV1 z`*z1-Vt(Tt({K?#y!P$B0K&}-9+q6}#k0t3$6h>(ymsuxv&d`5UObDucI-tX@?v{- zD+Xl-<@!hV?EXik$8KyRT^n{|8|m7x8{0_NhTYgoy4Zf*vDlU0cvBlXOgTn3w1MB- zY&Culu1Gdz4e7;UtRa2td~akm>0^6!EB@pW^3`75Un9-elm@RKtGo67#l>!%Msa?{JAdy3=p>%}Xz~=6%tP z<=D)8)908At?R3p3w?&~vL5E|W#L0OgFj8*azs&<6IpxA5+IObv(g2 z&f)mNBxgO~0DInDp-&yf_xCnyYhU>1g|DwAcH<(lJ$xT_JZyFLxZj7T_&!YF`|twa z$MPN12P5JZ%s3qXvl)lue>UTA{Lf|_j{n)lc8Oas<8Y0`uw7icY-6u%?3InZvawh8 zI1qbfPXMu3_9PH{W#*P(ue$y0_g=y{Rri;Jx_>FC`zt`*zYM$%T+TQ)gK@0xSxf%9 zXC0_})`PldgT|r0`ljvpoppT&sOvjHUEi(iDfNe!S%X2H594Rn`9qu^O0Dm%q&z3^ z@#)-2&iTgHcW)R|Ume2F`3LwL?ExX~hWYQuityi#W?sGg#JTu?%HDPO8~b0-zA^5m znel-*f@a1C;s}~)Z*c_8w6{2dX4+dEK{M^0$TMo55Auwf3;F(Vxtr!7=Om*TlnkDM zk-;-CGI$0?2G79ASO!YQ@;DiloRf?yP%>76D#sd7<=`0^89XB+V?8Jt8{%YyI42p^ zpk!;xracbtqm&Pm2zP%`#`lCd9@j02$B;~=Q^2!oPwC{BjW zImu`SC8GtDj8;%GIzh=e4ob!eP%=)&$>`ynWb}fPaT=72bD(7OgOYIxl#BsTGKN5& zOOH3LVI|+f=AGqze4lyL{q8>9>PPo$90T7_#Cv)M-4){-j#&7}7*8Ha1Q|~r@qvsd zkEDW(Cy%6oj3>j$Dv;xFa9r9o1gd zW-h>dE7Hz;*Iwwz(715{{Hn%mUdpAi>YO-?b!M;b#Iw?+`_ZM+qgz~^xW(0>(utab zFP~X}dF)ph4;^LQsdL1UY~voyH*ZB3hFo2^5nZVH^;5~Ko%)MIQ2oUrsQ%&^RDX2h zMb7QDY$Vx3nP#EX6qXD zea&__i-T|&*HKoLK^#Vf*{eJ8>?ZnAgXu?CIgBfA97vhlpTuFDAr53UaUkWK(>@va zT07-yJwyA9p&o72!R&289k8L7zl^`X-K-<_@%v+l-x0roAG;!l-x%6TeJ>_smj2TY zvuKxg+TpkG8s~T(%I_DJHBT`gQSKL?p?1@IB6)fAtJio>p2mlL5N_iN_GmTZ^OeMW z#P-HmMJ$N?-p)EzPW(q7b0f4jMly2r3};e)AHL4lXsc@c!T5@g74dC3nsRH-wbrW= zzcGq(9_9YCa3NRs?*4S1BZqZd#K)+AX&j*UvAqP|gJ;%W4x%I4mxAbs_6iVt-M$Py z1Rc?~938>?EIrf4GpOU;wHX_jxi@R!K}gfK4kS(6diW61v~IANJCjfw<3lud)Lz}v zE9wi!FN~e{5Bi=6v33sUwTE{qWoq5dp5l_X1C+d-yc@_ny4#9q4{zpOnZ6m@!@Kn$ zya)Fk4a0kI-;qOBMEiFe-b2qDxhs;bLACRd6Yv_`6Fn(i?5&NGulDc$slk+nSHVx? zHMIZ6U>vvc=6`m0jZy0zUPJ4Uoq_8pi^Y7vag@bkKHxaYVlf|Z9A&YX4>*ppSj-0; zM_DZ91CFCC7V`neQ5K8&fa55O#eBeVl*M8`;5f=+F&}UnWyE|eBG#jv7>~uV7!Np( z`_Rv4;5U}B7e|ibJ>WN_%g?}PXix8X#C@Fo?S|L2cel7^kIQA8ea2HixWd_|dysFX zr+KHvW#p}O_TRXV*a~qO*N%caaJh^LI(B)TbRE0A&UhWu2Q;ou(y_~BjMK5pWu)oY z z_U=%bYgh|$aEf9*CO2rltJbFaV|WnF8Hiuo9&Ytw+xt$e#~S#M6rQi*JbsnHSe@92 zDd?YHjSNPnpa*}I0-`s6H3md){^}Yq4ZIdi2PcCW;52Y5cs=L`Zv?Zz955T41?GZZ z0Q13H!MWgV;C%27u+WL~_<3BMhxU#AEb$%Mhc-5USur7+2dy}dW}c~S>V+MlPWm3R zsS|cDVpAu5kJ;3Tu~o#TPWm3Rsgu6PZ0cmb$EcG{oowo4Qzx4`+0@CVPUhQ;I+<@X z>SVsnsFV3NqfYivQlL)x7W3_;d`m#(TMjDUrJ(Yy0G01DQ28zgm2V}ee5*j^yAo8s zYe40@7F53LK;^q0RK6Q5<_%kyL&|d#-*n1zE2uoTgUWLUs62Oq%5yiUJnKN^xffKP z`#|NnA5@+PK;?N5RGwi_c^vSO=;O>p|6F!&TnIlH8qe7LvOglw7zIOLF&uk_&fYNiN)pCAkMc$vp^4 zZWxr@Ly`-3(qj#FpP(+-M#bIqOb79^86E^bo8dw5vl$))Kbzq}`oUYc?-E!54uH3V zL!L-eVp^ju8Gd+*8$kSMh7-Y$W;hZ2XoeHX2PFePnk^agLCGjgK*lwV-IeeiN{|1n zE&$>`GyDktGsBOp1(hEES)J0a2bCWGSyQ6Jsf3gcKUpXb#7|~;k^`X9;U^0zT^Lll zL*x&y(v)~@qX%B4Qfcs!Rn7tXK~rDQ)E6}MRbCm5O>iqFz64*Gi7&wyW;hglVTMD2 zUujC5+}L!2xz%eqXUYwla)YMax@T%*lgqRC)yMFC`PIkpeTi#nN}Sf%G!VMo19$Sr z==vT#cQ}_v80(~9?~V|=aTdST|KGS2?ZNRMs*?+?h`vc4flGnEaN_ui!4%rB1RMjF zgV%sdLG#{&=Di2?el7>idk>oT9yISgXx@9!y!W7a??Ln4gL+OIoH9J+awJArQg(yn zeGJ`i_z-lz;X}~&zu`mB{e}-YL_CX?R4d=LxTH+D7QU_IPZ~6F zDJMNnTncu;a3RShOFkA?Bz;Geh0mFr02MiZ7l*o$Y;`|a3^XVKy9*bBptU@r_mg1s>O2=<~8z1xPpXoGK&ET^oa8(K_R z$27bKzC6wvdv@X#6-E;aoN% zV-4kL#ip#mrnF*H3~zD?#16C$BsjduPy*lgsU~e5`sbeSTaEX?nJmiZI~V)1h%v}f z?2F+~urG!`!M+?NcI3)!tl_}(6xZ@WI&95R@h#*bzQu4U8@zlckEy+K2OOWilP6h& z3&>{y`Q%`Cj$n6kusbo%rP{PdYQy&oj%o-QOlkN#P>W`_cE}Qz~`OcE+!|X9uWzc7nQRx9&-)?>^wn z+1L0wsO#a6aw~qFKY{L2x|5_E8dKlB7TrC7AAK6%roTi;GWeAu=R#D{ICg7~oQG!P%QoepMz86ZAvdnz^#A6nTvlH;pM zK-OWNQ3A3K^NbRZb(m+AfULtjqXc9f&IgfocrJ*n!}CF89WHeE0V5;GImsvnC4*;X zWbn+4jHRGtRDhDP43v!JaWX17CmB_sWbhn~44$Kru@;n!b)aOd2PI=eoQx3XB%>OX zj7^~GvK3Tawu6$f1C)%Npk(ZhlTpVx$=C}@#y(Io_Jfjf0F;b_pk#zW$v6}z!{(f1 zG=q}S0!l_JC>fogWE=-2;{+%fC*x%Fa85FMLCH7`O2#=*GWtQuxCBbZ04NzlAkVfJ zp8c=**4De2Pi#k!xCA}>7}s(K@29wyQhZB_?fBQ^T5dx268e_>{Y$8WxP~RvL0rQU z>L9LR33ZUKe+hLE*Rq5<7~X+8bWC-467ks5Asr()mj$8^J}=j@fHnU#j!U3S#vh7r z>p>GI;^@jov(LvLjuR;&PDC6_e4I$`hvGyg8-4{Jy|ZtJ>ZI~!faoLDiTCpIY_|@x zbuO248R)F}P8@~y=lD2oMR6uEex)5hbvM31cpk%7Y(*z1e&o}*l|6dr@kxKcjZnsa zFOEfPyqu#C=4p?QkKtIx!?7&o`{-Z7u_zwovv4eWHYN0>7M{(=aV)1;bBlWhvw21> z*&pRtvID3FJ(2x4!;zWvw6Iso8BF3?ZvkSO;faGE)`)HFC`s8AG zh*+FRnP+s3;gH1b@tpVYobTryDB}5vM^T)Jco@ZqwDC^qd;{aX#X8TsrPvVhB;6cK z?w7ew+=|xMQhr_HRk(LJuOd0f`+!&ZD!fXO8#8hm-*t>v(VpYO_x}jQ#f(UY+;?!# zF2#(9SBc*TM7)~z0b&2`&lNKwpX;wEzj%~qc`l!qM|nj$Bp*Ax63&Eou5As7&2L-# z*}0T=^vto{;!+;(A3JZaz8m0D#IcA=S%W-rDQg%*wC(^I3$*V1D3>CA7UNPDaqrQC zpN&t+LEdmaMf-e+PoZCcDGhK{@F@xG_mRk2WFuHJY9wouC9y`CkNrN9HFxdesU&N# z2R>rdwc5x0&eu2No9`1R0f$hEzrPP&VjCR7cDRG14_n(SUHiUSch^iltvIfPgYn<6f%*I!U>rlL0l=eD@FMW^a;on8A&AW1lpWEXJ{_aN4 zmR)bc>!>Y#^zTM^&2#ufdb|k@chSH4@UOo^+@AKixFp2?{+^JvrQQZ#q|CRZOsM%E zl=*$~oR>1OCQO+#;DhF;WYk0`djZD6NM0OTV0-7|^(hc`7gcDsg0BC-2^oU{WBNHJe^c zNXB2D%z8-4HE&zVMvwoT80`0%OC#>GP5qN~D@VbR5AZzK@P2G2@B7^RV|io$0!PS~ zvEJoazS32)hssxZHo?um#k-<9y$=W2dg;nR$!;JIJ&SUW@7DU9rtEFEHD#|I*_8d} zvZn0lTr1G&^8}8}PY5(E^9Izc})paCb^_qZ^0p0zX^k2`J4g++*sL z>djN$cU`zLS2|4jy#3oNbGt3-h(2phwO-P>|4QDclY)br(8Xt|)21YA-ZJjjv)%{K zy#>Cz1U;<&oXmGmO#ezJY27mT$%|+CW-Gh=5x#qSJl{R*_Fy3GPlAE=pcUvS_5@;l zcfd2I#>Y=(iI3;IU*C1;(M3EzaoPJpao%n7tpMZn%R1Lj*~M?G+^gN#y)D!~z_5bL zD;7~a_c`#0HJtM~ydm6S4O7|pi-plnw?i<1VBhfwV zwT+9xf5R@)hY7^v6xirRn3HzZtwGCADTwN?Of9+U-(udd>fXKKVJa z-%Gx>=6_HY>6Z7C(Jkl`>6U@y45Lpj9oVt!!JquYt{%hkAl{x5_ z7tt#kC$eYJv2JwBHu}lSy#LQ=Uzs@F@=do)vGT;1B^q|h(mkv!c5vEItSojgbEuoL z{2%(W>buRdUi?RnX@g^<(K*Uf-xhV~o6YE&ds3RR-=*%S&se=p1zD z+kUI|7mSbf{#(CGx>fp5WmNg*fb6jlXyP5zJGY5^a{E85d**SE=FzaOc7@vdP4dYC z&Ab`jr?=2SH>3n#n&axBaYhfNG#sz60zY5w2`KGb+=o7mG;K=AYeuK5EXU4Y)kWXp zSi0zybWfhj@eRsx+Kc`f=_6Ly(LI{8Rfg`#rtg*zD-36EV$Mr>M>CnPf0XBuGIUk% zNEeKyuZ%(0jn;5#GxKXk6Kn2_3*+_8Hdo)w9Hwt(OHLvC-XTXi=eLwo z`B<*LSwuZ!I!1b@1-lZPPow&*M8{+jpEvU}=$K^Q8R?fC${W)$v9VwLoSHWB)SMdW z7sV7eg6~l-i*%R2a~FEzOX!O~L2neJKNg}_?yN_jIC|wXj%ilM>y}R#(@3{G^RU%l z>8`PS59^u^AJaU?^*0&!#Ktz(CyZ^*qr+d08{546!PqAC1O3y+*yaLbn;)ZNZc0h2 zd4cwN5WRA9%J`at=$IWG-OF{!ZC?E&#x{-a z*d~>+&3*I%jc>GmZhb>A(1_l9^v(r=dqA7D_M&?Cf}Wx7ecnJj-}+7I*5$UKGnQ(y zk^;S?bH+8y%Xh~$8at`Zt3JcHM)&ERPW}vIn^B)Iwo#k)#*J-$J#1`)Z}Nk2jr7T3 z#%iymIpdl)8INgfBO5RMq5nU1$2W|BBCopRn{QFi*!ZT0GM6yE(foBi@7P?vPa5Co z{*Ti$4fN?CeO>eVqzg2k@AHmt7Q6bU+|@VFfSS99FYpJrqb0j*n|&_Yijua zN#mM*uFhF)bk5aze5Jex!{_lCo%0#THRs7wdZ(4LYFu*`)VSvAJidSVm}X|Y-uawk zn$J6)X=C2~^XQU`$>VB%j4pYb8j=fDpc&+OcF3_YUpOpV4fyeqNs%nkIHNB=Sy*!fqyuh`m3 z-h;noY}1E*{&jUQZ^#|b-1y&vd3W$WwW{x1_1zx$1$#a- zZaDO6DD%*3cIKfIE3xT5>!qL4@4MC{1T>~Ox!xQ2EaRHR*yBHimhcW9ohEes>I`e1?xIfk!^{9$BkD;)0P^i zNOx#0_!P!T(iFHYpvIw%JsOW%-U>ETBKva~01hsVuEmndRu>q9gQrrH^rV%7?=-sojYs1` zHih0XZ3^oh*%TkAgx>oz{+SPXPwifJDC8%pWi3-8Z5fX}VYlZ=+B1*#*lqZLHoQ-L z)8eqnf%}#Nt;}cNiYgoWEcGq4#u94JLgM!215vvM02ep#X)79HkH6BGvZ3~~&UUKK zYyTb^cYMaW%>50=FMc!G$f&`GAiF`bW2s4#@KwUZ^zTtT6kHw5Kqf4 zknS*qm%Pj1)nV{#HoTj~IOMG*I?JIYUaoD1m*qco^0IhX`-)A4kHx$4V_XH_ihl+B zx50OacORPnHgFH|?n7bTtz!Qw`9|lR{Yh9 z@@j)IR+RDjTi8K7W3C^X!8h|y!Qb`phV9pkvZAKTH>*ykM@q4bnzj^7|=;93c+19%+qjxp#Kj3W_cm=uf3gv^| z_JLQ?yQVMF+aWL(-amrgZbWY%MQR!n&EZId!wz`62E6h<57vH=#ANz1BE>lY@t+QI=2e zJum%F2+dDC$oI+|wzSsXMe67HljvuS(b3W2wRe(Ti(a)bVhL6VZ%V z*3sgV3r!uJgpN)^M<=19lS~~g{j50v%`GM(52T~T*I!QY`;Jb^^69_k1MhRAryucW zOoIL&g`e*4kd^T+dCi0@-(PoM;`{R_LwtXWm;>0r_}QKVc-NnLMEqWZ4sHXk?YdOY zpWrpSj#JGes4vpve}YzOp-o$d2PtblVYf9F-%ZS#WRu3@($AV7u)irizCr#&bgeCy zK4&h**6FJlkJ~zZF7>J$*69`uYnzQ`O({}02 zlqD11M5o(6J)Hs9fbXUMDda*yR-ESSv3XS&l7u z=#49bd$WvKSm3yLU>&7vLMjoxiKDmTGjKD@-0%E_dOl+#H%J99De z@^8d3$(|d+evePJbf;-akIB5$XjsQQM%_4nV-PrK9e%GnzgL~c zwVj?W_&+)=l^=ER^FNohh1U!$#CJ%lP z_^G_JLkAPb^~7n$f!mp$(rape>|Nj`Uq?RE&72_l2IhVDp3@;P6PkOE{|!kU0$Y*& z+rW$H%nO~Zz0bQ~JLf*Gp>NOA$F{UJmG9IEj)nz4@YGBH!IM`xc&ZaTop{oJ@Kj*o zNw5{nY#8pPe!*G$J_z=Uk%bzUOf+HwJ03UYjB^?F;WTv7FXfYU#VEla$xXh#tb<+n{ze1e)2v_@n7kMVVmf8t_2JqVo{L0fbR*hYHNvq^R zJAr|CB7lxF;RQSduie07Juv#*oyZ;!C&FYOFnNh~R?r8X_1xA06O)#J$xQVXn3#PA zChD_jB-}^%Reel$w+WIIrof*|ysZOn`e;zz&i*G_lOg);HT{C> zHP{FvTrp-JCmM-?RP0Rk@l*AYwyBTPiEp_iCKTJUlRl}hIgGj0*IN3w+Z`X+6VboV z>EA81^+oDvL;qwu)(N+U)i3&|{-`gDfu;Jff!NaI{|fcP53aK4hvZ8p->WqHkr-%0 zKm3%pvNzT?Pa^V+c$K@lRcD+1a0k+j_&_T1>N5JESi^G0GA5p)LvzXX=(SkdjoooNb@XL6?O9Hn6-O55XtUZXAExHfiK*irsdxSv`UW{0);BkV^i9y~`IqV&Q_rApOxYlPBmH)k`o`8Z ze?w16-#q2eH@-HmISVa)bHAl;N`^M+o7=HJvZmj@(v_0lweALg7TWhEHdGG7WzkVlV_@b*h_lM zwU+)VvGh;DeeD<2+*M%0Q@Y4?XYZA|XCS}#qmK@tj|ymO-SmsA=b8Gbohdss*0S}H zVxOds4gveu&`0k86Upd!=1y&WB>1fZK2^YMLxesOjNWtTBf&`e$c9l%`baQ1l|E9R zvn_oz&eBIC?s4|Hwb^IglMdd7e%g+H8e!_E^Q$MCavd8Od716dPo@q=9;?rHpr2kt zKaHc!@6bo}z1GrC%joaA2>qn~ZgA))^;i1I?ysqLp$YY`CH=IDeqC+rC&stZPrXjS zWun=yOzy3sUk&K1t$y_P^Q1Sd25EYj>eL4`Ul>K33zQ!;FJu6W3XKDUor|F2*5o z%w>trcA@vA`#Z~~K%Ske*j~mXU6iAvx++IUB`HToxs;=$JjxjpB`aqv)JAzve`Abt z#zL*ke0Z=ec^vzywbS1CjENG-@k`VmVO*v?_T%|Kh_B_tFoUSwO>8G4=&%1h`anHNxofptU zk10nF-J%>l|9G*Og?l_(t-;9ESp5=lI5!tr{n4e5i4y#yVl! zzcgesOO9x4Qwp4}fUoO;&+#t6i!sh|=u_}wp7)QU-*$ZC<>>>ervWc7Hf*NR)`XYF z2g%sg$J03Dfi*7n_(pQ*$r$OA5Nzr#nX2)gV6z)|tjC7^9D0HP+ z%b{Nj^q>B#f1(xj$sDi2Ya=z5p>1}5WdEx_ndkQ55(ckk*d$THihbnJhn+#s@lYNpd;UlFR*&%PiOq z%jKQesgbgI81n6zW@NL*2ixwAUEsQNf@$+gR%cBgvQlz-DQC>T?~v24Ag4b-Mn9Ph z&vq~dZxqjV@R;~r=8)4xKM2X`DddvV#n=h#PkoJO#+KFlkkLQke$6?az=R*Tk<*@& z%4t(RBd2WyXE}&1f>mp=%9re`|7Mt{3QE<`9dY;>Rf=q9{^!rgEnJ!&p z+w(JNYkj=8R_#xa?S!0Hdt)p)Uj%%rY*{au0$ar#j&LF4u-*4i)-|nRkj&pe-vi2L4z}+%P-A81oKtML3j>k66DyHkO?Dp0SDKtluBohMe{LW80Inet&Ey za@OyUJ&!z%Je|Bdc@OelDR*OW9x4JTEFKumf95 zw&rmI`xKr#Y+i>I<{Me)`%K>3!nZmx5BukWJFBmsJjC?*Z+54cy!C_F_JQQPdsgcVucgEoWfKs)w;lUu;|KJYDn?ly4w2{_XH zU5{9AEd#)I!@<)U@H8AetpQKN!P6S>G#osw0Z+rh6Y~HohJ&Xy;AuE`S_7VjgQqp% zX#?NPHd+Uu5py5hg~${G`Sqa7Tb~1*H)GfC#;)B=y8^&@GqO+c68#9qSF}$EwC#|v zpv2wEgz-a(39NJV)?Pte`ds4P1`vm?d-gkmv25dc`|f?>ZPNFRwt+=4{;CgL%8jaN z?2$H)eUb#@%|3yr!rav&=SPzT^8!!C3 z6Ps=qa@KXQKW*IVxMd8~SB6G}w*#5SBo{be^Nn6~=U z$kv6>m2CBK#Hq?wmwX?`Hv}u@6E;F8uh{inHMxJaVo6P$GRJMVt-`X^i;&Nfjnc0g zLyU$eB*!KDe0)poNHyZ?)8PG7SG;Mnucw_Qw1Ktz_+h9oK}qEB{GfBl;rT%w$l>`xoyp<(L0!q=`9a;t)5$L&M>Y@YO^$3H zbRjvidC-56Bbx^eB1bk4`VKj=dC>RBE!o^3U#RA>G+(#blFc6?o8yrkn=RSA*^(Zv)o>g*R7kn%vWz>m0{*$?NB^mwG}AbIG?BOf%uWhI!DMf&nYlPWg=1+WZdh z+Wek>ph<=Yn$eBT!CEr>RIru|KPfCN8Qvl+k>Mx65*hw)0ZZh1Z{&IymJYd|zN<;D zFLTKC&ynS_H5x41m8~H^bOLbfj+~5g8!Nga-=kuT70KN~@;%#VW1fR;%Xr!F&pKrM zLS(#bc=l=EbEh(=3DVbeTaFMtZfbL z^mVna)p&C`ZOx-B{L6PNxj)NX!@%6XCHu!)vcC+vEF;cQb7)(!&!q!q)3%G~+g0cQ z+3)hFx1k7j9BJlN04h? zcIR*1!4bsqfAWkc_!HWH0b{x)+-t=>#VWkd_*r`6O5*%?_B0x=^tKA@WG{H(z_#so zFlVb+q22w42i0HEu^!{|!O&fncHCYVDXB{vJvC-wMVid=EE_5F;-qc3C; z=R~;={(5{cKXSzf{^99TTaRC+7@yj9b3Bl=bU$Nweft&K(wlbVU~kIrm4m&RgZ#?D z-poOMn%!i9ug59>$+h z!oG}mte!XjJx`}VD&=!2D~}?-$b+0@98u=!R9o%pRGW&fngU(s;yaSRFL39O>Mhv2 z<*a{Lh)iNGY|JHXJDF$aPI09KDv6EC_qYP-;O{T@bXri4pGxPxa?Wj_#FbF*gWl@# zVHNT0GH<+jeK)?XSv<248)UogyE_E#Xt?)@1<2E6_ql;1?9H^*(|)P(P)Suf1in6UXzc&R=(qGwSifEn}>) zEGc<#;9T$E+MdSXt2s~D)?IzDJ#5{T1U>bG9{0MuwO5fRyS=py`0o4j-E?U6X|83X zD~8ecYO%6!k41&L2HLx9!9qh5OVi0>cy^4Qh z5PLBZ=e}YPdndh`OwN8tueKv+ucKE}$k|8n)h@;g#T5C7-H<(*{W!XY{xb&HIMHwc zPov(?d}~Q(qhb2$^7%i5r#Dmn*eWaLsIx1f_9N0}=65T>^KV%{bJN{@tNoM*dperu ziET%=EI(>b6uY~Q&jVZra*odbV^jy8L`)bB)woD05gh&?xOC)eh4 zZ=9!-iPIEMt3V2NNh9{jhu9`Z@y~sM{pSa#;~&bbx?A=MDFquvI6dy}WR7Pww*5%< z$mBb!s|yBKe*`U?<6ZiZ!*i>^r(oJNrior^;zl#PcTXEXQQ*gO_V}S6C*UWnV?+3<{iI3H7O{_WOZYh*9*c&* zbS~iz`0EpBVF3JfG5nPTe+`7co`k;!!e39qU*%~Ie!0lxl{whyVSY)2U!H_dI=~Ze zxH+Hk&7aNJ|5|YIp{`-G@230<_#_%WQG7u4KVEsHgQr7aGxA8h@{8z%KqK<)`dHR> zLc2r2!vbjjZuqIh)7j*c6#OhdV9%V6T$|24+2kJVgTr4w|Hxr%z{Bv#pGdFpTqAfm z3a_NWD}H!Id?I_^mVff8_>tKfYrM+$22Z}d`sW3Ms$b>5+Ix`s2*sY&z$=1Jll{P6 zOOk&PWiIy`IJ47-_2 zfp;5}fwnHAaLmu$co}hW?E$s=KhwY#}LG!R3v?pjDwuANr&BJ!k zo}hWy4tdxPdDsql*baHv4tdxPdDsql*baHv4tdxPdDsql*baHv4tdxPdDsql*baHv z4tdxPc@En_^D0A#@s%!trcB!*729EzWjjp5PI%0d5;#7@XgE|eZ@%(<Q zn?mnwcDFY5&tpdWz$e%g9pI^nMu)&*Y>K_aByWABq-yKKnN@qs;3s@*$OlvZq`EuQ zmonax&3`L&cNzUC7hY)pC*Y=pb3_%lDx2b0E!Y$}t&<1WcJU5g<}(IQaM%>JXK7PB z4LmQyrszRhgPszN%dSu?qb>7fQ%L6BX34zUka+{KDQ?527>G@A8#cv2Y>L~kDF$Lw z+=fjt5S!vQY>I){6t`hhOmx^3<=7Ne@V;yc=G{!4wFMdZ264x))jVj*#y8N#)vH5g z*%WVJOGriza=8P)q|G&jAs@o+=)4+y2)ZvDQu5T6n;vj4UH41uj4$w6$c9M4&d^$0 z*%?n^OFRY5Ed<}PGddwNe~5g&gll(lPyS}vj)xh4pC~({k)2UX(*q?{S%KzdXDPO$ zQ+D1FlARk(-;HHw{1zMw&P{d(`=uZ|;~jR!veUFPoOYsc+8w*&a&US9>8m&m*%_z8 zDY2?yJ7Y0+hK)1!%5K6L^Z&2aJRZUs`neiAL%L0N#v9ldC&d}}Tf`Z*M`IJ0bzFwVlZM+j$u?M=4F$!tTX?NI^0rDGR%LmpIsv-7d{FN3~&kc2bY zAKFi(C$`4`%l5b!K4hQw6&GWBjDZic9kxd~wuf*e+heieF>$mCzM2O<+QSD6u{{pG z$=;ln?XeI&o`B97h%NbZzF$#zhbd3L4^LJU4qmDI26+5{cYpqmmmfLkZXbBVvPXW7 zJ#qy9-4C%hBtxfzo4c?pu19{#J{LcAL{^SvKfgnay$&gx&yRzJyqwetXD zIoTsC;VHqa$sTc=JXLSmBiJRT?zim`!MQnm#OcFGw|p2`mJeeoFv|cBvQzAR436D> z3cJK}>b)Ab6A!YCxkv3)V#b{i57Ha{O|06cZ?1BC2dOPC z`k+0M)<^k$6R+|6i1G7PCD872x9?@zt!H*Yn?BZE?lD+@NA0|6nV#-WuL9>!t2|H}Fus^g;E-5yMA)E2wY9d!(OGras?`?4S5c`qJR> zH*RDdVk*8l?XlsdZ?UX1RQ%*faM}l)>TF2)5QHNiel+1!G>naGt}&bq-7dt}Uq(AJ zCKpy$Qhz0SHM?{aTJM4!YxQ~4etJn)@`B0ZnF<1Ily}ko|sQyLlN3FG^y4IVR z%2n(upgPn)?KhAEU8^mXeGOkdV;{4A?)BpOm82!~TkBY|6QDQkyWxo8GW&&Hqdibf zx(v}{_?*uSaHBqCQQuNznCg~|pEJFndKv$lZ2VKKK|B>NDYh0~s>Ls7^OE?;-se<& zq?pl)?=~OC3HqN&n+}s*So;6i@j)K zpv$+Rht=#W6+m5R z_GR`4cBP!(wW62t`fGi4y?fV+p~maSfm`ad|D0vb>chUxyA1r zdUKYqE8p4Bif`VX<$LZ2hVM7*5B2EfThh&EOy20vadXnF_pMJ{ct3E9+H+<}m7VgtC zS9yGTPdu2Uwa@V1`Q-b_mq!`FXL4OZ`MTFwJh<0Y@~iReAE?KIuX3XzJ4(1X z-fZ||MsyQCtI*FTF4?CbRqaAf_SIfnJj+=>zDn9u1x~aksyRMw=it*c@tFk!_6T`> zx5cY2+L@M;))l^feHq`Y$0w=h9ytlm?sMoun`hNu$sWaaDV`oZ66V`ti*L)QGs|e| zpZOj5cO>u?KimHNQ|>Q%D*p8_7Ix}Ecv^;KPX1?#Y+ z4{bdtop>!gdNN%oeEDKR?G4gi=_<)!UvIx}5Bl-_F@E3e$PMW#>Gq{jZr>|E^7w2W zC>~^<@hiG8i+TkE)i>MHeY>E6^`u+jw_q>m+VArHU+8)_x^If5`wfyoAovIonj#VS^BQe*VK0zUsvD#H~I+vXa~Q{U?0UN z(M=uD7jLp((}!H|i2nG5y)SH;{T%)RTh~25CMvA!etlJp@6}6muSwq}{+sAKTL&dU zXVU9-Usy{U?#prLQhdfUzBhAa7yEsOe&qKpPYCf@MQ(`CY&~c5+u~Bz!#nhy_}7JQ zwZEZyi~ruTZkuK4wp+hx-3HHPz*|o{bXz69AE$0xe}=j(*`eDOS$tSQf2G@;I;~}X z4B4OGq%M;zIa6JBXzcIsN4v~@CgX`ovF%OiDpS6pw~EkJQ;74|*j(q*--7KKwm1Fw z2d!~iBC&GPRei8Y%yAp`w>@rCe7eSL{ox^d+&0^^L6YmsptYWbh1CNJZ#i-?yr;dI z<}qeljy!vwGbVn`eoxEMQ?IZOQpiZiRb?o?lo$bWxO1^>l)( zO6y4U{o%+>eZQ3Pmhg3fo3R&r!AS0lHf7t|-!OH7@JF3B?De9$`m^88VrW|9s8vTo z^10$Q4`!a*v)*B+ zT*w%yitj9EpJbDUz%6qLjr}GU9>Eq2YVTxyL-qL`dO%|rdu&onThRlJOAMpY9P^+L zHRh?YbU=VHnd+E~9*bk#BOM|e%C?`f4f8CFjpG=TOCH!`A6w6?r%k!QQ2J~??J@8N z9mvaKZez8%2cna$+Oo$oqAiVMv`#-fmN9(- z&aq6KV=R*t*o3_v8q357-lzOQX~F$)yKd^?Y$aWRC`5>?SXL{XmLDae#t~zCSTy@j0g0NEiaw=MbFI!pZZQWlIl;k z=u}krTnr7(V^7L~%nQB3 ze^ZvR51q!V%bCM?nLQ+z-;0!)qEpFC(W&H}=+yr9`%#Q{fz$I5 z-_vs%yGbrKCr7s&Zt_Q6KtFm~?S77Wo~PYg3qXJP)Izqhd{1X8 zR}25ia`g{qB3GF=I~5(CRIWnDUrVln!&cz1Ep##g*)T$gX>S*Oix z;n}vEq17hY+V@18Sw5mOk*l37xw^%Ys}<0zWMPceM#LwCr>56nw-4~ z;68}dG`pch-J4j=BP_s0dd24aIz&}BJxGHZex63>O- z6lcv`&(3k|IcY@K&K!!ria6?BIqaju89?*U#XE=j@fo!Xlw4}ObZ*VV^JiWXdPeuP ze%8D<#CWNT?p^G-$NE$pdJfq5^(c*&)Ey&U)Y zS@(R#OVPU5-Epsvb+3otR*(@B^oca(LLA%X!Dg!5#_cD)J0z6gT zpfYe*xk_d5qw*}3!K2EC>IB~^FQ$A4ZCT6QNeT2(z_oOE?DxG~KF%>W|7UBDr%gOF z#_cPC_Vmo(;<>h+Ym+>_5@=P|p3=1)TuWo`xXosrD@5aUf1x}++NWpk)-xaR|014w zm;6sLzK^hBv_~m>Pi(Be!{0cL^8ltGUq8KnWN_!)9Qf?YU{7SM*2tA5u;!wzcd)CY zF}N1q6TdA(&piUKUBy1CBiKt_=aUAY$0BGaJ}y4chPC0n{txS8flEQn%=r&Pk3R0j zVq^E2z;$5fs|n0!kJ#`#eXrp<@GYvDHGdX#sPpF%I4328xs`F?U{=lS`K+OwqrKb{ z*|YV+|Kd7$s;YT({-e;J&S*$tf98JpD93@z4KX7Bdo)pO>z;hB!m*B8*zpGl&tUyyDl4IpX%i0?x;x4$eJU_P5VRNl)ub^6Zj zz<7hov_<7Am1(cavs9)}Di^6tzg6~8{%DHf6MX(W$>l5Ln$E%aQx~^y6LTVK7$rDjzTqpd2jo=?(9xoC2FF~HFO(y(-kKq3>*Owt* z)ov61^xb|(vHa>=Q3db>KC@zz0)lxW&pZkYZXqdFzYXaM;4$rG^{=f750#5lrrv30 z|0wfqyI&jd!M?}*=NjZTeh>Q9YUvx)k&6v2e@FVB)@Hxt6IQ=;?vUtencBhkMGJk2 z0nxrM1y(y}R?M6~i~ByVt1pbd=Jcg)iKNlgeG6^!5i6lS+54WZySS{E=1{dRv4m%4 zp({0pudv4OMT~uBTHm+NJsN4XVDp*L+N7enQGocCx`DLKc$Pczp8l;ogBWG5z@ul zbJe`p%W=;a(#6`J)x6i;anI7lb?9I79()+C)6&It=wI_5GBA7(-j*(|L;srhEE#C_ z4>=%Rto>rmd&t4?d&mpvV(sx~-h)5F_mC^n#dYXk^Bz1Oz6ZZa7uTVG&3hKlo8N;k zrHhX~;}4qmpx^L&(6n@M9r{=Iq?2{7sowjS=;CJ`x|niU7ysU&iz$b7G3Br>rX1GA zl*781a#$Br4(npdVO>o5E4r9#C+K4Se?=E_?JK&NYp2x3JoC@g#kL-nPPX;$!xlYC z?@HHrp#dNIHvlc#y4u#oebB$NphH{l+Pb(e`gb-oXzN{D7iXY!i zL3_5|wRLen^zTE^oz6_txhay%PF;Kvv4W?qi=TDqV#;A%42;9Nm~vPbQx5B5%3)nh z`764ZYiFa2ZTL$k+xqumi$5QRmjoZdKZyPf03TcL+PZiddhJo*BX~ZF?iGBTy0{X% zs6}1;tV0)54(npGf0iz``gNwd*zQ*j{fea@y=bSclc%72)lP2(HXC>?YDpKTiIiQJz`$kON{K}(16ZT zP9~P~AzhC{wy(xEwRwbfz%T1Mahf|8&6_`uHniiMSjY{k2OS!Rucy+~3 z$}X$L{&z*;tCYR}zsxQ>kT(oGXM^u7Y_i{*^D8akU$_?yoDKek^IO2zoESg z{tz@^%0qB%eqxfnX=z8_mXStG9P?_LD_Ta{O+9;<)6#sM_Pb+^ zPhb}FnVGJ3^^4%gRBIoBB;HNonG5fYUeFI+vCm}$uJR@a@>rL$k@8PC%Va9!;LWb& zdOf?X|J6tDr@edG2l7Q%QovxZm&eWAsM=75d`b`P_3}CQQ(f%wgP-_a=BQtsd`tD? zllxUGE;7|fGWQSAS)Hz^sL`j>W`p@$XPXtHdnRpu)6r&klD{hZujx}KlGJv0Qd8Sq zjJ52xZ(*FQwm&`jmX+Dww5~HI_giVVKa1xSOWYEkh2v!wjt_W@FrEiS;Ca#SPL1bY zUjxsXE#lebz_aUX;@QI1>jjU&r^j9F z!N$5hXw!UNF`tqPn!}Y`$j1+%ID)05c&_Q}nNib=s%P^a{tNb4U>{!g)yd~uYMWvU z?6#HhP05}Ec{QfYX{n#$;~e^`y?bVpFQ?BhP~OM46zAc!`%S;hJxhrPP|S(hf8^1X z9^>K|oN5x9c&g~Poufwst>xtwP@5C7#t_MBRM(X}iqHO71fJhXOpKd_qcXi1ot zx6R5l_nX>bu$K*VpH6@DtY9ZOB-q8!|L5utoh68AD^2W#e1+1o@o z{{R?9>Tuz|wo3^A_$Nbi#H_RQYE1=w?Mp5gFFR7QiMWwD#jfap&dXg+e9jBRPTWfR zNt7p`_4l3-JQ|AGb0|}>6WE5I0jvGQFzx_e?;DAM4`|PaMjY!~VgpBvlrWQR#rKVa=i7t%B`*EDW}`d6Nj8`Kc7U|eqLq!dCEJW4c2LHTmz0u&~59G>+0iv z`gnjo9;A=yF7MzTz~@@7{nQ;x%u#IMXV^@Fi(phke0WQ6$anNN-_hSP`#Sw9v#-;`#radt?>oQg{EoBk2cV}_vED(#bv^v9 zd0~?up_3(1(SZc$qzXD&4vlEf5XBB@EoUAM$~L`3QTAHT3;zk$Gn9Q<$R_-b+S^z@P3?gk&$kuV8VyeD zcvrQNP0Rz?3B;}fD;M*+&9yC`c4paarVUwEoAW7aEy_reH!&KzHcoio`A!-!D1zHk z^yy5#xiyMCn|MBpzNOLsQu-f~QD3#g1?O177}yHl^|V{>32xN!dX@!~SG!naT8nFPW z*oJAyd)v;^9tVlwd^da1>_Lt`J-bQPK0Q0LstS8=mSykhnM~xY_7|89El*%CPuWZp z3Nuzp7HZEp$yM#o-%{VDSL1f zFbCbx2fd;G))QCVoE=fZJ*ORE>Q;2B_DPDgBebqUdbE@n>$9;VZo!U_z2A}@AsTd9 zag)+>y|G>Dv1x7kk-W%;f7FKYiUouYMuO|z(5%KFI;Zk6{0T3T9>?Z*f%CLxvHz;< z714ywh1b|Zv}xAQ8r6}=Az(CT3Vj}R72^u(QH-8tM{E?$R*{m?4T>+^tp4LK(HRk= zS!2|l@oqV7T|Ie{Nr$T^cVBrS>p3^mwm4|K!P9?fceg%$CwJuR5I958eJ+rcce1(+Rw)3tHu`L5!lH@or8co;WFF@BCFHYJ8Jd1!ye zKJ?#~xwYtJjmdj;sTdvGWzpz^*0`+#deGQ>?1RR>!|td#b;Lk>zt0XP9;0y!V|5p_ z>n0iJEg8L?ad&s@K)>Vn$ycey~l=| zb*7yc9KDr&P`Xi<%TaHrF7}pSJ+$c?pgz&bvAc)Ux6A3{W%P9zeyHr=&hxX1fq^+U zs6EZJmt!wtZ&)8XS3ZG4Yz^swBILMjkM3bSBfiyo(s+1&4|2UU+FvyzF{|oP`0e-Y zORCnK=dUu3ESj25DkT=%Po8gv>D?O^Y+9~LXY+?q+ zf7xn~U4Kh;mh+rqb@PcI&>1J^0>2UPncAp1R-LOUxncH)e)*&PRk`rejKr4PsX8UI zB)eqiOaYFpa~q_0o7z!UnQtTy-ia?m@s_d3Bz?0My_BG{JOw*oDE(%*R$c0vylbfQ zT`%ymy1u;g`I3Auzv?cc(aG)9t$T zoSrEyH@?i*95iiRMqA?qAKHmenRfbLcilO0rQuKSnogpwzFuUpJ=bj4rRVgFZCkDK zjQjAoCx%$BbD*uZ&{#WYtvxi?0ewJT-VNc zS8xmQSf6tKYEHT_rVVykfN|0k_N($6v9(@kat83+1w0;xo^@tH&7DiDA7Kx@J=ha_ z1|$XNJ{z~-5&H2{@b)D3++g;tna&;^8nf2H+e2LL+O^V6cN>j&@y-HLA^W>+$&3kp za*NJ%iw+(=-xK_T_%L%1z8n0FPsO{jecIO^CJphluN}!*h4a{pAjQ)@Fp)i2kjoA0 zi3@u;-WB*Bdg(&;jvmCmVuJ0i8$#z2H*RkgJiN&p-2ZlLP;#J-7?=l%li5DjXw;l@ zr#o+{-txw<;6YCcW4m_jQ^k2ecl#S>DwgIUVwU2g0(JKrjR%0;XZz!W=Rpro-DLP4 znc(ux8twMgrDm-t>*n!wM|To4v*IIE-=*-AE=>xggs@V-W^xniF*mm8^o_K@xAsyiEm>? z3GH^RsJox@c33Y__mICantZf}n4yBg>fSf|eIxJn&&_nD)TbeHQr)rjTadrIb|eIc zVkdmebDw#-F8xQq-}nd0f1>VGcjtOPd)(Nt8Ry|0WZI*H+`b1a_~Zf`!Km)}yr38S z518DmdM9-Z26j7XLpuL$*zE+a@37bV3&@u83k~1ThilD!=lVs+J-bf39@T5sWz}K7 zEBIIN{;bi4PuI#W2-V?lrjE;~qmSA{9fQ>lzODD?nC~~?S=VNnZF18lkFPIn-=sd# z55^KJL=S7hf2rdcy;H*fN8(!ta-q{4=yU{hy2xz|9?tbop}RfccmLa02A8sKLX{s4|XLuVZl-ms-MaPMudDbDA`F07j*CM|>ZucPg?9%F^(x~eWR ze9v8E%)$S$Vi)|lmVRnoLucBoxP-OHNbP0t8GNMwb&1B{6}&eKS)p~Ry1sULusQ%Q z6*5*dmkv9+GjIOh&v zbR9`;FwkkQF_%_?Pp*`*+A|%vsZAruNs{I7F#b-UZ9$Uu-Cjiu_6W+w zq$=Pxg}P)vXm7AwZ2K$uj}1KMsn${EIbOos%vfX4!+b+~5UriQta=vs!iKM9zS=w! zU4F?C;A%bNkQXlZn>dn;u0*cPp}uj3w>}N|P{v+X2LETpCkKkafp8%_WN>e0ZNI<^ z=-h6ktAX`Gm%HAd>#ugvPVHThe4l@=#;4;LkBtZB_j6tKZlU}%-x&rBQ`z$~4VY%J z-d*3-dNsA7)M~>bo>d##@P9M+mT=GGGJJaWFZ?g%o@|w?d0+2*NO=y|gqOuUE4|$t zc=qDknS2YIW6Z;pgM51?&&x--gmTEH8;gFu4n6yQ^z9h*?zO~wT@yqPn>JlRiSeG% zaK*8@2aLzP`N`;ZQ@?u8C!zw%9w} zD`Iy>t%%K!T_OFRYNXaD!ZYK@*E7Bt$2cP$*&NM!!#B7lnU*$dbFOh@%jk5HaUg%R zapczf7W14RxTJGW*Rys1zyzMT^*$plzpGK1-&N1Juz{kZ{C{5$uF}i>U#71bkAI`- zem?irjx_EWy2m@(w<8;!pH{v)(Z05xcGzty23P6abJlgH|JAmw(1qHz2Yf`nulsh} zws>y-Z?#claE;^TuMnJM6Q&v1U$k4cK@R_~q}{!-L-A!DQ~T1=NV(&?j>O(ltY4PZ zU%M{Vp}GQAU8<)~L_KC5>zeAY+jYiuIND*?A^a1EVB&udehIaug7N$gWPptW#x{T4 z&KSwYh5XmiUk)?rV+(U4<@nLNCDc@QZ&y<(I+#c*CoLvrADB40{J@RmlSXGbXk+4{ z^7-;1rWrFDO*+9AzX{uM4KjKSFx2#`xf@~6zy;Eq?);Wim9S~iw6SaqFLp- zXY+&22Ys;XZC-Gj{4l}fhk~q`^BITD5npH?YAg7cO}~Y4m%i7F_w2I$%_8fYeXMUP zx4#+rJ)36~yX^dq@bQg*2fm8z&&j?cdE)XTGvK`{a4y+ftQ@#znR3qJE%4K=uRk|k z<5r6gY`zlTWWNt@y?^6quZUVDp<%IT-%i3@$E??`v#m;XMo>754?&-A~(A>HA!bVua-k!=tRs<0V_U-=mY zgDi3z{#(FPT2#%@@%RVQ$@9rg_|gx-cRl)8`f)t{)Ab5R|Aa^N(XLDV>&^Pn^Ww%ITiJ;&JeZ1q#PRX?{n`Y9ZDtK6JBiIn}7ye&w_+9bM{yrb|R?_~yIQ?$Wdy&H0@2Rk8;r zb={@!q)h6X?VZroLt52qV%IY2lRZ5nCBLgbdG)w&-~0D~3ugy3Y-Rs15AsIijkCpz z>Pd&r6n7<_-HxAGwz@NQM~#=*DN{w8Tj3?~ zWQEhdhduT08}ru*Q(OIY;?zl$FIYEebkB7+j=rFBVQw|Xx+Ba{nu?D zeJj_}kj0Y8;t};ra!UJ+WOwN{HT#nMu34(H+@Cn1+&^lV-#zuRzmA_eXpwoKcDENQ;38Prui>Ys7Xlna)fG|H4K&GjYp{m8zU^4`*Ui!7Zd zIcV#;@zf`r23NEvu5z(OZ!)EeGxQt)o&wZM-XvxCd38cvX`j+&A;F5u^8Bbd) zNQ>nEcIf}!@Yz>%Ki@1wH!dSBMK=~{9~Q|4cp%BQ}a`b_>Mk3DeT6TR+@T2Lro74xMU3u->zPaK)% zNjXQ=v=cI!`_%lbeCV-8Y;8e;S8LE%rxO=oKFE9~2UzKwWx&&jJ@K2vpe23NMQ(pH zEy_FE{;s~6ZN>w$kMH|=Qq=qNl$9-djfOc6Q(}*f0+kFKkOit zzkk!{#qZxd`q?K>Y+o5LQv0^D7Wfs~=N#)KLfhkz0qdb9wXuTS_9I9p+GCxT#x^c! z#2(vJ0oQbFD}ynLA@TW-TC6e2+ zs2;zHNhk1RMfsXU7qlk*tFxfBrgaY~;mgbCE_9{VkB4r%Rc+3_jMRPIgwbhbWsN=7 zO&tAf)xz8hxHf{+Yu!zw{ap90yLt2yuJ`6zE~)RjLi72Ibw#5qxqcznt|DEuZmRiw z|8=*Fev#|{?E0AtZP*G;CM5g+KI>R^s9(bMy_<7~k#y_NjqV7v(NOm*6Z>V5Tr3>JB;opX<4bue1euAgQ zMkm%YBk@fCo9#jmHs7upUu_q1L3+BbryttMf_Ac@ong?FYgPy3@Erbf^6nMR$UU=x#H#o`tV>E913or0t|nNFg|J*0V)-tFRAyWNgm8 zl2oy7vPnm)p(oMVcIYW;{glxe+`C%$*Z(ARfBpXn-CsX#bkv5Qj{dsun{Wnp>G(F% z@pGi(-*D1UnuCV2;0c?4%CK4WZaTD)jvqvLlD!hb^X9~?s9~wPU&j4y*rzuAxV>4a z+sbDorXw5AhmPbk5S*G|6QUt=jA_#18ndoYy(cg0d-)bEH?<}7&dJ*1!j3poy0LA- z<8OM1gYg!Bi99-ba~yLC@ysbCFt?D%+qoO+Fi^Kj%_U2G{V(+0qc=#cb@BS?3Kv8`(UGS z#MV1EF%LSYj<|#$axPA<#Lj_OaP3QMSL#5z+3#_&t?dNa2ei(D{gL&SQ%{uB> zvrVD3=9KyF0p?uCagGi9*o>K(Z8V^B8oln)Yi9jw=KKyu=USa@`ab0w_TRfndv5&p z{!T}be`8K)>o2n!4+4WLOWH5kMc=PYWY2p?o4?pUEVy%;5!_;)S?6qTTiV;6^WV-j z+q|1Scm(4U>IM$M`+s=kMo(UR;&n8AU zi+BglWs$w)B_(r~$2j6!wSTuKpqptk_mHbq;BV|r)!yAI@Z0PB!7iStzyo(1zoY#L#c>`WZCy;8OWbkw+mTU~*leFLpT3ZKBxAw-<4&I5 z3N96&wmm+CQ`y7E?jDU?8HIewMb6|PZ$>g_c_p!EBZx(_&e8HjA&+8_M{(Sb=eY!) zPvkvh(cP_s$9PWi=rHnVKl11kc;OH>(YA9#GU*Fs(ox_NX2~1m-P+IZ*^~l}?gWpD{gO;=gx-6#?hxq8^s*|@;7qrccC+edgF79&0-BwrqLLSj7WOv;rRue z|C!3Qc9c`mJAJS-#b?S2lQ>6>2o zNpsd!oDa8wd>{OM3U%JXU!PYA=-bIccOXU!#gkXe>MNxxs1Vz zC-5XC1oW=XSP(zz`S*#-i-t$aNzrbj@iO@7#$?({y)MR12G`L^joL@{cb-m1PT&J% z;KRt@T?3G<$nagrwXx)3eel@uU~~FFy5Ljj#gvKYLsPbbulB%o{l4R4W|nkVaHYj> zzB2Z*LXJP;?h?2G*(fJ zCVr4kqwQf`MjeglpXTcMq9{A4y6pC=?LTzL3V$=Q!sIpT|Ej#mgYQSf|D%utxyXYY zf zBKT0pMx8}84Lzi~*P#PK^77od#7m+6;bJKxMa)SQ}eRn-Li)@?z6OVLkR%}t% z1o>ss;b-2{7|?#!^u00GwEsuu!2XYCYEO$6QNd_)cFe|aLFcf-rGS$naZ^e*XB(mr?_8+)nm81uxuGwP-j-*NU) z^^aigEtUAPW!d;LV`?f3delrwetd0SyT{+jE6k{w(x37`%I>6^%9O|dkkH~$Uy1C1qIyMt$pZVxm*p1dw^ zUh?|9UnFnHdn)O#u zYsa~koJ^aO-!a>kl1w|3Lv8EN^&$M%cL#F+Qm(6=*Kqwh{;O@*bN|O&pT@O2xTdz> z&9xG)ecRUm9JpeKH57MfqW5oU;#>OtPoe?EJ5Jfvq>uiS+Wu`@|Fe0`*1xvymAwM0A%w;r?&dDC%bwBw5QhUmS zNMlJy_}_{AJER*(U-JJv^6!zxkwz%q0zb}Ic)~G`4D*F$yPEbkW!b5wy=~dCvR`Gl z%2o~At=O}+-6~#{?P}YsvSFo5TpB;SOB;=DrHvX}^mgC;-!rl(GcGmG`9oehiM1G2 zvly#t9cVWHCz56)hORx$v(GYiE$7-|(zERib3cRU7|X8V{|?e~=Y_7V=Gj%Mw=MOO zR#EQ;>RqjR`9G1w%(8iH8_#~Cdbzfk^a=Iu<=M|vFaLLtKBHb;Q_MxpeD<1WOp_T4 zO#8DZfa|`$ug8}s9JK+~7n6jWOGv`WP?B&F9^Wty+QT^$k+>+R^5<$@ud&>pn>>T@ zF)+L|<(<5CDH=1plb1poNVyBwG>#CQ&-5MP>`dSJ_B(4$S{DuP=DanNm+)1e4IBl3 z)zcT>r|5qsIQK%gKGID5DKnr&(SIe^XK}5HRLS+Hxn9NfwR#?U^pVzbeKprb*SqyR z*Q!Xnxh|R(y_$3y%^W=IVKZGNe=lb3hv>8&^!N)>mq@zAzZ6NA_Wy8y?}I+^#hNx2 z^eDO%O`bp}Ej)7q{k8DS>D2S@)D{z;%-M)8!kw)>{zW8^~{ z2Tv9k5=SMT{I2$I2>H$yH{&x~Z}||MHoyS*Hqr)g;x|LFOAg(xvaMGNpp~L5V~*RQ zXQWT0%cZwPztTCYSj558)`Q3 zPLVd0lcp1y3yq{{=YN~F?0!l=iLPy0Hg%6h`z<{qSbY=qd=qtiQ*9CM`a1NJO%JEj zM!g%>PqICbbEj*o*tW+gY-8CT>%m`hx~Ldk6mCm|O>hDXBJQ>H&Ix!v&3C@(?+9*F z&^xv*V8h(B9nnKAwcW&(t$(EFrF%|c514u$yG^il@{8i@!{4!Vo^*P1*G}mFDV{w6 zzNdKhOzKtNXAys``knAfbi&K=Jw2am!|F72b$PNoVCr7%qS+UQbgtG9M9xvoZbo-X z&P3{8C#+{{osO;JnCC1)??>ua^((An{|%lMjQ`WxES#?+HeGzQ2f8@ZKA1FintDun zY6^PP9_#PXKC+DU6T!8ur<^pfrx~021pQ>ug$;9CKbid0!o7kPuAl53ljqLjy|ek2 z$%792`c&W6^I`kC1$|=cjbiP$h~B6o_MutXX7|w{*;mhWlm9O82i&p1>5+3{oP`zrpjxjLR2A)l??4A4WU&!?=We zDEVsk;kb(RBx5d1nL3^P9`aAvmtzk1pXB}u^4G~@&a0{1$o&tv-$;IpyjPE!%5$Q< zQ#)Mlo%$W}?~#wpsHwarI%Vp0m#0j_ce00=iWRk|Am!5W##|r?T1$yw(dVZCeiDELO78=S;U&88WL*@zVtlc|FS#j za?(wthe)M;jfC=2;t;J|b!cq>dm1&k&=r~DX~``EpD_FxNfT}-4Pc)MhF)+Q2TuEv zUq~(*7y(X4g3~GFQ^`dG4|6}j{VMVo$VCJ1aKD!Od&u{aa|Wr2vsqUD;$_6i#CWH6 z9u9qxe~B+*jPLIbP&f9P(<3 z^}jE2>O$mFrS<=+%aLWHYARo}{{Qs*HI=tRqc2X#2dk{J{?DRK z%W2bY>%VBY3Yw>$p<7Ar=$fJDlMKF@OrI{Jd?V>W((|M!>$>R4d=ELZmE=aYoKH%- z&`9Xd+|Ol{ZzMfPdY;tXx~~0WYmix~$eH4E@Q2V3@lqsix`lBO6~;v|Jms`UBgk$ral zxBG9?rA?=zPrGkgYpTA9ul2l>c1&7u^u>rVsx*&h;#$|OKBUDM#ai!@DwzU*sLg6? zm}h{Svt4%CZr>{AGCwH82dy)3ESfguD(#wWwQCE{X#PiSvaStf?)6g5+o(O}HQK|P zi(}d&@oZ(1>?ZqnHu8z{)XZ4YVsxRc2W%N5T^Xx=e`GIN`rx0bD>dHRiav;q@;{+( z&W^x?ts89Hz}9PePw>~;b*+_^ep9@*V4nd^+WnG#3-_lPT3iG@Md~`qOW6WfqQ?r! zi^y#qukXtCu-m&nqP;fV*ty+)Teq3-Te_{v(rtG8GMcGN?USChU%A| zv+sWsG-cD4^0T3@Wb|JjbYEX|U(vnE3ucju(0j8;(ag)VhQ5x2Yjks1cG$MNWauvV z@-kw*FXXw+JSUxD!!t6s<$)jFaFs)sNal}(Ph}&9X-^4#UEOg)0VdIeJ-S?T{X@~QL` z@$a+eHqF~PW8Ss4CDNyuLEO81MYnx+*QR9ls=jU4-J6Qow|b1xwsz(}LNV|0!@iA8 z`!?qNKYJ~KV&3ifYA5fjY{rWaE28xp->!-HcCE?3I}OO^_-*|Ax9gAoC)RbHO`Pzz z@$cWRtNV9fuXr|e{Y}Kb&x~VC?(juM+9vw{uZVxQW8RG?h)X9irV7QoGmn+c|A{1L zyn8wS7nAH*_osPo4gYtLoU!i4T+5x& zaurGU<=2?T*~0eRe@2REjQJtvB1B`6@#?p+7fZ9_XwenEw{SlkbNGq_v}4u(x8FDM zO3a_|TI|GY3-BSmXEqKMdv4=TG3Pe!bY1=+?L(w^gxL}CeKz0U#y%Wh;eGtQ zh7})R6_fnDMYr3Z6Q_CndkA z_98?#n)-2hXkC&WKNuO`X2-VK@wR6gJO1r^A^6&HVc#@7O?qW7HIrWPW7|6SOz~;b zvn4A;&$caN+b{U3b8WlDwpA4WJqzEqYzx^Fx%Ex<#O$y-JYh;=ugcgXyfba$j~PGoOG>E^^pd2i#%(# zddT}Q{3CSGf68?eUJiNRQoBw5u;jfd>mzijQ)h~P?bt@yG?Kr~>PyA-+4!{M>RO29 z{-1izgb(vlk}rv}Ihtv&p0Q)LBXyyz_ndLsd-^uTX*Z|is;u>lP91l`JPWo*q)z*P z?7a(kRn@ugJ=a>ftXx(S}vW!rcok-u3~+)<9TQKkZfvLKJ%h z7F1EY?IxarTU#@utGYk!Zu_;*CLt(+aEn~E)P##Q$qKi6fn^i({r+>!8I}>$bM`)a zxBDc|GoLZ%Tyu^&#~A|JQWR>Z4d4 zb)NUtHI9yQ^mnYzo_@amIG>Lb+aHe3zQA1U&OZ7NzV3qk5%V6O>yPAxgI4@I=H!{y z(fRkkI=|zq^7FoDzJ7lG_;;iE`TOCgPChCSk?WcVFbbI}{^ndp^ z3A?}b__u-q*`4nEdw2f5JOAGOU0dfnfchqx^Bq8~73}&g!2b-ah$A|A?e6^h`M*DB z#hknI?~$*Qe^=b}nM;iNpV)JSe`3!SK3`sbUQAxzN32YF`Qu+RFMmJzR?FAv&c%PN z@9jBx%GmhOzvo;9%NPGE$;De3ya){1ITyuHWApPNJ3qhX0{Qv-$q9F2i+D+A-^Ij* zy7TeOv$;TjSXhY3&HS(A*y{>zsV=-`e~GOXmNR z^YdTe9(GRB$=j#K^Yve_hfe=s4`cKA zqegvs9$()p?K)RYx!hP=H~j*6dnbqQ@Ny@YcAH)$>+5$o`Fkg(Mc%VJf8YH-3+SC_eA-T#;NumAu3{_o(rd;R=>?E3jmAG}NczU!L$ zM`CjKA$v{zj_%xjXHMteGq>KEe~!sL{Cn1K#UuWTbN9}=`hcCc4_qj3@A%K%>*%fj zX=jaVY|e)NSz!H-#+Vpb>^uhR`hQE_UUM#pp=+J1lUvr>`cT)LP3$;z=j^{aXRm(k zlCyW#*K1yjzTFzT#y()LvELDsv+o*n3Vq#teN29B6zdh8cy-s(iCa5)qYLFPoP6ou zT7LV#diyNd|7*zKYwnN3J?wm(*4)3s|8jTMPVV@MHTSXkfHmYeg;fWa&Nw>x6$g+1 z)qnqjEpYO?F`Ody|IKx^1z)zlzB4Cb&$n~(_m173Zs+btb+rRAx<%j6?i_yR=8w#jauPIzLY9H9Py%v8T2y$e!+q&D+QJ;r~r}d+7%gr_)pS0`6^A^qNc-G6-kU3Os+w#m{nN&GLa3*_0wD=%aZtp5dW z|F51{yWs3Q_|Bd-~M~@%3^X$`O^6Z^`dp;fhUwloxbn`AOFaB?N{!&wcfU(Js%7)%!8*M-jkndoNf$y%){wML@}q~ZH0T93?tZ-Vnjl2 zf5e-ybWoaM^r~3<=x>uAPAP90=8sJAB?VikH|gP?<#=#QlG9>+)cBdz(@nJt_zc{sT|>`AV?HQigcI6XCZ z#*<#zbT#kHyR2be$Q`cCx;b&i>87iVNDBXls=_RYWb>ToX-o4`qr50RDcHQ!WwmJ{ zZ89#dnDM@6K;@r2Ju3}Y{{ANVOFGT>h(BVkDg5mS?mg*FsVkvPwW(+GQjM~=v7eC; z^|d_gD?dTsY~!6z<{6O_v|&biS}>jSGkxhnwP`oMwRlqMS}r#trxSg3#>4)nTEP93 z!A5-nzZcHT36`9Gcu$K__EeF}TX)Rkt$W{NRC;N5hC42J($^zc5ogq|jyKkBG>p^l zd%Trvh(TG8pe zPnJIQ8SRXx-nZh6weNZQ1{(}txUsEc*3Kn8%eU|jgG=vfb9v_tI{YJ}ZE)$ZwvF`T z%W&9@7a6DTFpODgJZk{=-{naR{?eTm{61~%a3_X0Jv!sl<@B|Fza-8(;{(QLAipo~ z=?@hhn*aTYhwl9TAx~QHM+5w!7xo^Vv3c*Y8OwV{|Ci_o8OG`7dBzFawbAVj&*M1< zJ32B|m)^;}@UFHR-s=+Y9fR(;@{U29c(0vSU!;Xyw0{TJw$i^3QLZR7);47t5#j7= zqwGfY;TcbnDLe^7i~q80kMOz7lN8>@x5k!vjF!37(c~FGpBbz6xd+slOH;}#z+p4^ z=?^~sF|Bvd!PE#FPf;ul_yU$1>E9@x`gP(-&yK~^37%1s2$s^$<|WfcVd-VCw3V@I z1xqV==7|ADWc$Md%BO?xRL+m|r3RBY4(YeVl)7{M(!58lwAHA&q_ikjC1t}HrHHh=3bjz zp6N~se#SK$9=fybTj_DZt!I`#)#m9}d6aQZ;CVj>gY%8P!RNr?Mk6I`fWz0oVFGO_ zDE*#=!$-hjLFqT!)`PGLJ0~iq&_k%0d<>I~mX~Dd< z<)!mFvG{1|;I<6rCayO!EG*V>?HNy6rF7zV*_SS@SX=npqr>J1Q_MEzTQv_U4xsm~`BnoScHPwEVa#0UY4oXd zv~*+1vXt^>aFfeeH>00RmPtQ*gL;ROj)JF`B^syyLZ9!Tf17yzGLN^#59TY719WCR zI=W>;FZ3k3E-T5ncPjgi9DRb0{}%7plnBniX)@3IWPm>+OglRNR%FU*C-u$oc*6_v zVVimO98Xe9wk2Er>-xEpR^@s6*EPq;)*x#V>a9!rmoG<0Z(TZ| zQ%5&?`d1$Al&#*MldbLU{&mI3Q!(I&|xqiK!)7B+h}CxII{DiE2-`{vNQeHNtW#R(tN?6yV8Qkk)6<0 zN#zM{U+{6R8HKH$yCBf^FC2fyy=|V}m5sLi6e2%{EkARN0pV=McRKR(0y?vL;e1Pe z3Xq@bg+tn|vgM}(d`pk}co)gfQR-cu<_WGbe8DxmLxbT7|5Mp?OMY~X-ZGNB(3tgQtQM`DjB2O!O}V~9|5Ol68#Zz z7YComM_g;RHe6?(xSaaHZOOw)@Q0+z=dhEvrY8qKL0-=!7^iohOJ;wTxzoPIC^;-@4t$7@Nxc9Fr)U& zYM;bU<1LJY3< zd60wt=QzR(7a10=e%^^IZ)JmVjw1+HdZ!}`z6(eAZYQobm43s*)iU^v!x6;6%D`1e zQ5-xv86KU2o=rvH`p~;+=-+gBbcWgPiCcR#%WZxHm;MkgeT1>Yen07T#jR}|0{?9* zdiwB7hPlt?(LVTdF8o>gIE8kXV9T4)0ZHKOvD75Sv{(6u=*;`DkImBm@YLh*{q3Z}7gfdBHgr|!ijMn~OV{OIqzGsUWs|ruQvl$#W6|mJLny2R(YvAz-VIw(QQMGE_f+YQ~=;ZLQ>Q(DD zx{P}R#8C(FevDnv_;~44Kco&XHppCl^Vut@w>jS*$x&&f)OnG+J_Sa7lV?O_6LQh` z(4w{lX{nYx4dC8)i~+$m`sS3!W$~By63H>Mj`lfE?_h(+7cRs1s9Dyl{0?w2c)=Z= zGBS997yB(=t{Bde(%doY7PyGUsA=@I zW)Al990vOZ^E{bB;V1|0kq`HXV6Ornz&)rZ-{u~|1>U#vXH&Wk&T!DT7;<}AxBUxW-aF}7$7XC)f zkJl};@g{%Fi!ROpGkL<>lBDuqg13Xzdjh=GQvT9N0&i(H-f}A|z+2fv3)_B(42ydU zZ_k0Z&%oOW#$S9{GO&~HLtC(vl-ul(zzDip11gNcAMT)cx&W)cMjef z-LBx=G$Z)D;R?=6bAdMlycK+^YaF~C7I($oe}GN##Ne$fS2n=fU^e+~!ds(fU?<*O zyw9WX+p4SD&;K?4^spG@g@Fbfv=ek_y;mg;fuVO~)8@0Ru|Z$=+me3@sJd}BpKM|Sjc!katNk>m77u;NAL?j~@($9w(|0BqSe9EJqzLin^Jg`$g{}!Dp{Vd&_ zB;A}J)y*T&&G;2>zrr(D5U*+Bou;6NS74J;c%Kr<2XZkAef&$w2BnuWo!`gc6Fh{B zJUic5dkM0UL|aO+)6y6CI`voK$DctKvW%qgS-4si^5EfF1Bfq3U%xrnC>Q2Wd$KEq z1&wmvy=e2RZ%;;(H+IisoGU}>c zcyA1^y{t{|Ae^5j&LwUuz1jnOuTAd}9Bd>7A4v!2Mh`e`!Kb?B9L~L!r|H*Y_!L8) zI;U5=;{38lqd0%e#`$TuQHRmj(yN2vWp|th=a=CR=DT`y@(ztz3vz!UeL8|T#B4{O zf|(|88pTYeg_(%EpQTTmz^SD-!RZ=wu&~%+<5U=uE_HOIFzH3FHqxhEG4wutE&DFr zs&|ub+qAS_xyC0K-*F|JB^fOI_T?+hFl}7K{wVZli1PSS;$2`M9em)tug$jc;dP}1 zHO|Ck(3Po{uFP_0f{*0NxvaN!@r+IAv`X-?JU65K*wT#h_dR1O_UKKMU83Bi}ZM;{|Gc$Rnm)YgN8dCslqz`ktxHa@Nf9}n@4)g=#E_!um| zr{t%>RLa`Dj=_-HD6%!fWpL$9Ty-{7K)dzkI$GsSh^$LD|1)A#Jr6yi5<(j$D&v=J^w zT=(fFkI~6d72kcQX|WaKJ)B5KHrzNbtZIZ5qr=}xk#n{b4?2f}#D?K-#RgAZi zXDG&N#dkSY?AGC}JK(xaaD%LijnhZ4z2dNIm={yQy3uL=}GB9H%b?# zTx)UFX7Fa|UHy)&6NZUFoDL9kXc}TfthlYswZmQT9LrfPeMns|^4#~qxMH{BtHQr{ zy6lMyoj806IuRb6LR%HXs{2dFEb&$O4b8uUyTDO*q8D@Ut-RB0b4M^Y|f+&T+@WEKaJ1n@K(u!kG390zaN43Hcj$J#9bv9;#LlK&4n9W z<|gLlN(uWd?wSI34X%3Vm2YRlKJkjMGi#@gqG5N~!Ths%)P!-|1|=fNd0ytd?$HnlUKJoAHx>o)c$tU$E|Q-<-aMKP>%I$E>kSTsIXXW3rb`V51ZJMe^~ZSYLq-|2)=v zkvs86R-nVzGbSIQ!+pol;nct0*5Shb%V543yORV_q*U+dONla%E?UT`h4&$ zyDwcEe*IcgaSmbHl521ngc}EaDZy#H_bV~BzZIOW=Gs@lfoy*(_!S2hzCQuqaqXkJ?m@dN2@$!uAw z5qy!cOfcetld#hr%143A-bRn)Ptp11p8jxxOt@&DPEKHe$@jqIGNV^`19E&CpUF-8 z1}}_aavnN=@WM;l^iIO$5p2d!z~p84SEti@SUfQ z$W6Hx129Nd;|92dhEhX?86M~#B})XH2Ch+0QRFZK5XfD^r)lbE#8ZNCcP%T zYogl~A8tlBTY4DXeuu{$jExho0YerCwtcl&eLo#NU9!w=)u-G-lJp{+a{;#Nf`!M>G~EJ8%U2J3gZBF_7~VY(epolyW^!c85G!kQ~!fM>iIl zH-PbRY>mjUboj^UZ~24Ak&Tn!em^=9&c0UMP8?@Gdd=(V8=QlWy#f8W%+n(rs9C#i z3_5(7CnG$s=E-$gwhnhAk0InGguV?ScSETsv^2@m;qk;@rNe!cWAF(>=@Csr>x+86^EIOAK5yh*!&3lKMK4`mR_bmr@+5u-&TXoXTU=) z*lb{26(dNe4q+$^hMoh1uQ6Vy3)9N)S(b)wz7*a3{kAHwrJU;X=!r3@89^6uE$M|h zHr71ul;nwE?dNcjKY_Cku-(GhN$&Xoz5EtihpUgJm#3zu2Q#pb^N95;m+S&(^A?P0 z)B8v-e+F)rgR?q(!Kui7KKPVgF7-r@^?N=%$idlXp2W&9w(u1Cdj8rAb8&q)?>$ey zV1^w7`;6G_93#%cS?SV2<#nkk=;fsr&gL_}M0)uSm(i{`UMc;x%rmG{FL!w2;p9Fz zc^bMl9i5wj?(KmNz6eg<(`>Jaa`L0_>0BeRu6Bv5ypq^LF=dd_8S7N=`2@DPiE;4~ z$50Hd@oMz5ID5JgX(FC>2(G@L{yKv0nKB05N8jedcV52Km^I3dD{GuvM%#6vlf16< zx-b|x4u4?WPfzFn-!y~2CSm|-54+1V+TFyQ`kwv1FRktb*!4xzl;nV$B#HsFA^On+oPe#hj8nWI4)HAmwJn26@p&*lDN`*_uw!rxA%j@GG0WW`!I9c@VE zc!m1?FCBRn->3duaz>XvoLYX8dQT94%Z2x!aBR`0n_X9l`CFd z1?QDaA4aCP!mH+_lkdSM+;6y&3&^vdAy4>o&usXW(dz^B=+BW)#cVo|HF2jG(h`GL z@&5B{fBCRv6o0wF&J%uQ7-O?sp73PGsff8AFTl03h8kM-jn=MyhD%DGJsJrtEE zn|qU6Y?psK-{?d8qCRdT`dYE<2=aIoe_C@;=EuesV|?l%_|%GHU*#DXe6Q1|c2&;7 zcc{XK9icAaPUG$1u-J)H3)ilsmMS>9 zx8sTZ>e7T?A$zkyOPvG|`v0!1h zl(|H!$xW8n^FlI+BS-U##Pz|hVo$;@e(>A(g59@V$#oUWA2f>x~J{lc!|F2b(j@QdO6!mi>p!tRE&xZt(e zY*$G%4{7n3l5eyv`y6%+3%gaw*#=_X9fo0HSGl0M*n&pR-C^4TkBwb?Kg$*vV0Rh0 zYvm0c>>AkY_Qf}0M{dNP+<;xV z9{VyAEkbcbo7ZrU_`K|b6<+|)()-F?#bPFmj?^{pF&{%V zr@@mR-nN4|Gai>U$4T~~7JLWLC$;zhvInEVz)t$It4;XVSFbc>6MWc%>#+qn4-YD@ zKyIX0b6kV!>cx$SL3!=-x%8>>XX5R7OTkCXtVehRsD>`XCs=4JF+Dev+!zh6b}yftsKDH|v| zV{vKb-ew_>$FW0GW9^J&68log{YAX%Vdf|tVN7LLqH*yyE@g=sK4)=N7QFVEXV2 z;~Clb2rSEXe2gx4>`SrbJI}U#apAYWWnVs?ia$!ebPl@oQ?PKnF=}6!Pi4*3+Q?W4 z%V2W1|bV@IRK(zN6rG6FfCPjd_RIgXM-Nc_{dgzJE%m zzad{ke5S+I-?9h8@Hf5z$_#K9Q8LeDtB{9MczU{QeU@sJPRw+;ACJ;^*vv@}nPv#k1&KPfUKGD}MVf zkK)(KlYT(1HQycAiC=M)B(Nu(97eCN{!H^z66@RU&crVC#Xj`IPV~oK48ZRgXy#|r zH~0^VQz-r(oMOxx(N?r)#S%}s^l@=&oTY8|5MG#$YjRdbIiEgdIoc04yPxW7lDmh zgE=zrs4QdYpKFa3>ylh<^vtq9dtEEpcd_sPa{~LvkjbN9^Dx*7(+3()^xOSB=QeJpM>oqQ5=JdxUQSBlJo5E=u|+ z`~W3=7Ji(PJ`6ugiM|ZKU_^eveRn+W50t)MV1{QJk=FS}WD(c4-bcx`t;;F7w)JTv zGU;i5;JS2w``9i1z~#>t1o}7l1AS`>0vBca+uKk018w^Y0-yY;Fi=FDlWr?C3sQ>A zk?fCMUTBV{jtusDTwP=yCpH%z#CDqz3D5CI>aq>4 zDTh)nqx4gjQck42iL#jT3Cg*Yiz)A>%Jr0GltIeJC?B9)K{=oDxgKWI z%aj{9E~k8v@^Q)v%KIn}_BQvv!T#&)S5dx1xta1=%38|hl#lf@t2XyHn|{rHBj;YF zJVyBfWfSGolq)Vacl~CNS(Rh7ZyIT|Zy95>?;UHjzk8+8{>Sk~d-XJ$+3=JYLQyOr%Kw!Um<$QQk3c-KCL%-shMk?sw#PHDXnA1-C> zaMM)An0XL6;CD6SBrKIu3LDcYow2`^eP{gB*^kA*DYrioi-R7YeeiZYW4nE%%iM9L z+uY@6Y?m6R2Dz#xyq8fn;X-}Vu*7TD7aNiD^-aT)RI_2C&+OVqHL2cE{gXx{wy$bZ zlS2&?`m6*9*v7K;XKPRJx9j)k;oZcqZ_H%F-@dUwTVJ-r>DQUkmtOj_Hp4LYJm@mF zk9C_n$J3v7-9@b5{9NC9;{#i+PYAqwe`4S*!@agty6G~%IrWMH^U?`^bIi2`W*P6N z_jx3xz?{#%#`?~5coO^0cn@RW8gJyjC;QfTv;UDZ)@lCs+A9sSD%EA~8|XIoT<@{^ zW&8NJ3u3;`?=`D0Fupq`rka~FeC8D5dbw;G=jX7ei`gU-w^AzqF4>TsJp)&ijJ$;n z(ij;ZrjIMWth!>v71b3#=6%(^yD8P@*HfzBFQZi7yD8QGht(FwpmwA|pSsN56W!*f zI1hdN`8GUP#Cxzwes86`icNCdm+}n0v*cNQ-B_DAPIe%dZ57Xu9CgJ6dd=L1TsY(S zmf>1^6&w;#PITVE z@;rWbur7@E;T@f02lrQTz5tp0Mo)ix2HVFMQAa=Hw1<0Tb3WqaK})J$EI?BRKE6C}W)XN5yN8tqZ>Pqjk-r(E-@@1=#phxW@u` zkGPQd$O3Xb+7~A&^}%1*_lk4i`-l0Ka0;>8?|-y(&)wXUfxL#%ck|JA$KinFsCxyk z^_ykiEien``OS%cS6~hUBiZrv(+ve?Z}#0k@(220^Xt5Zxdoe8Ki6$m^~HA3=T#YT zrm(RmM_kY!sm7kXb9;grq#b*2PBcrg0jtRgC@+x9rujL-W2lF*mY98g2VCno{A!~s zKCC$rVSKSgaOJ5y_cHrE-1a-{MivNOn>AkawEg^sC2{6%o~`FLEJ-kRe}g~KRD4eD zONsOUxr{zg{R?@H-{l<_L=I<6sIC}uZFPnF;_8Yzbj5D!-7(&6)(`ZU!e3*&F)qk+ zr?5$8ylizjhPf4-4x`U}jBhR1iBnqq620JX%4~kueIwa?Y|GH2!iW0Q&9;oW508*% za3%TMbLc1K`tgBoOk!-d@{H|cU8e5AUzo)@!BayRE33`kDE@W*TV=-DTKtL6$zm!p z$a!KuYacPZc`aauc{h2talM13RWVqaY-2{btSJ^|l7m@ZFau9bZY$VRdUn~KY5eBn zy;^t=Kkv~(pK48m^mPWf+37dTT@zjA4BmeyW1UNW(aWaym;x?WGsf!~;{ffS!x&6u zY)^PnDqo>Yqi=RdPsf!_(798Lnd+}HjB&a)Su(^m>iezPk5s5VKZheDTY2B)9dx{! zKGN@2-(lC^^3#5}!Akb!H>yv(|BOEX2T!Jrii@OB z7mi_M1nrRw$;VUdFPC-@4>_%ImmRL-{lnxOTPe43z4kxi{iAWU+wqfb!(X};zv&kI zr<;kZ%{K9;te9GdC$T;}qF11KNN@5wefT|->-utiKkn;~e>K4JujE&az^@t}<5$U_ znv8E{`8&hn%&+THS=aZ(pTcKYY5NMw`zZfYifW9nP5IJVe;bcql>;uC z5}CjMbb@*KawDSuu_xc^m}iptRlb$uSGB+s z;qPb3m!cdXUy3qQz7%Cof25Dfbbq9`%0&5AU>1L8Tbx0RGpecK1SRr|INc8m6HAM9rB+dkNL zv~T-h-`2j}ud~_LI|b2a^1<|8KcnAruJD_~lbC~v4>TNo+JX->yay#d&~SXH7W|pv z_)snQQp53~TJXu1c>IyM@PY4-C@@>*8 zro77aGv&(=d*s^i-IQD#UPj5a^4rF8?a(Lufp209M_gWLw&MGYLY(!jK z)2qfG=&`;akjngole3M;yZilt1KSD$d$4sCfA$Cd_1%KNZ}73gzwifIP80;%KPwEl zxxWSuv*ns1^EJwU#woMau@!?m0r{GiRN-V zpW{0zXHu3>K2BLic^~CtlyfLY;xoyo*}(B~$`>i`rmUd6jq4*IZl|10IS`*|J!K=u&r%+v3{o~xE~b>vv>BgiD0UCOX*2d_?-+b0co$r=y>2{y zQ!)H~8h+9B_(ikulWsQJx89CUUW6|NXFPB}as3C4_H9dy_AO81H$8*j^i%w+=kTrG zz_QnXOo6n?$-o(FBe>C{B z%t~~&#j^SciZDA|4Dw6dzjp3c}n=Bt{Y}AL^j}ImJ{uQPCDQ6tAw+}A-VUrKeV zt}ED7pM0iK)T#5a=khon#`#!ZY9;(vym1;fTHGe6622pTv`YJMR_!Z?mWwYXziK7i zM7~rCv8hyisQ|vzkGZE6yM77pb9aH?ycfUeX7J#L-xuC;&Ts04otHl~bPe%I_IJ%? zz2-ptdB(4*7q+A?wuJGk_r{skIoOYpin%42Pw?)0rzM&v(fg(3dZw@`=ab7OJ+K=8 zN%Q}dTL{3T*4zHmN%8>lpYGw3F zV50dR{iyc(@TcbDPw9D%KefpAr!tVwOYo<*pU0o7!k;ST`SOdz*Tw7QPt^`}nd0)| z)fe)on7>HwpD|;T&oG*8BmR+eu=-m*)ljyL_*Ku_e$|jJzEvIiOmP(5SHm?{yH~ga z>U+tRn`g*pSrH?XI(B?1`QL{VjJzAT&he!jERnyquv9F}fD2(}vM_@$Rma%G`cm?5 z(y1d&@h_N!G~Hyd*v66V0>eJsL`rtykSaT===%B zO3yJ|Q9o)b?OkpAN$Pj$CHYBelYFH|Y+q?A@0x?J)I#q5EBz(fFr0Tzp+5ObUU+y{ zUug%=bbO^2{G$->{R!_D^^tDF*SQs+=N5dQoAH5W<0E~Cxd-14gh$L`{z24NYP+MHf({6mG!$T63&U9k`;TDjy#RcZ)yPlgSnP>XAbY_HP;d&!z!;k8aXC%tZcFrnBZlWg>nzWfo&_dJ^kjv!gkuf2IBsY+@7sObORC zy+oNp`D;q2{#TE>!oZ$>{`O?*Yw3UD<`|K>-js_eFQ$AM`A+ehHHrK#f9o#3twXz8mr-(m>v|(HnwUy< zL4i54ccJew&i-0vtKqg+n;B;_2+S1vM}_V^>)H}o`{ws8C+ z=ND5dk98O2ZzyL{{+{v=lvR|=D9ML3&8Iv>`7C84pe>4v%2Q3*1!!U$LkGmcqHElQU0cm+*Qe|esj^U3(UJ~3eE3k z`6FvR1?D3ug{JVjoMQ)*-(ugv-3Q=Kxa!3@VQU{_zn^`@s1$3eDk~dbRrUzmqvIPk zc+71>bpJr;~QQ}F{|#3_Qf{Ft;$FY?TE`ToqlOp($n0w z%wRu6?EcA*b_XuUl#+>|Ing;my>6=DPzgHuugN#GKHw8~5jA znXjSaHPH6HNRtw(n}?x6Z$g z{FA>PeQ|-g3ExyXp*?V*N_pgRwo$|)+X}1uqzo6 z#t$Z$)m6#nu4j-BVV3?C#x1#!EZoEKM#}psB{u`pP5C^^87WUB-$wqZ$hZRE-R%!A_}%UAwKT&TXs@8MeIK?;yrJx|Y(50S%`$i2=x=)BIUZe72-tL(kuF?YwvUJicSt}$M7H#n&^ z_*Rx9gSQ(I@?LqGW8IHy-dkjtoBHDOEpeL-OQL=C0KaGO>{`Z6d9UPAh8dq^v}gE? zNCNX!43{f(%V&vsXW*xsrx|9=K=N)G%6ae(z3jda*Pt&Xd+Lklf8Q}{EAL$Wq!HL% z>k90_-{_Rx)UpZczeDXEnbI}NVJ(NRN#EqrX7OS5gVQg{Syds|j~E$w&7)b<+QXX% zkM1Q7kq>uS@!HCD!~gj)D^E32d;>lrA8NAAhvh>}g}*2_rMbaV;4jKKtXBLM-)aOt zMY5fzn#n!tZ*mS#`q4AW;|$_i$=JpL_ZFC!`26P31qJ3^)BWcC-!3r!;qUzBPsv|x z^OB#snfw$n#XYxsOkv{>%1qT!BV!>Y+dtG zA@C^uwUKY8jASiZ3%MobFOuK%n>~HxYm;~f%H8Bc8KZ^ zwvdlHM&GF&$;wCZ{}E1D^`?abc1|i8AL>$aQup9PJ&F&t?R-8|oH5SvofaZ{K{hAG zVD%3;zmN|F>FzZLUon^GrH0U}Np{Y}e)sJ|qwhWkA4=o^RMhc+y}D)SkD> zTx&IV)5^Kp_sK`n^%k#%hlT zJ1=9b*B}SdY3l1Zo*U#?`$@dZRy)Vk%)D@`%{)(Sb}=?tQf!L96o;&^AsIlYBcuQ&XZ_IwYZOC1D=-N!#_vfWv?y&igqw(^7QoS@G{QHze*-Y)WW{yUtxb@{i|r0s6fOJJPK8 zlkYUz_PItV*TwNL+vgf$`&wDbb#WfsY58D1;eR@YBgsco{UhL)A52B}HzuzAFg==Q zIz&HNzSrIGt3>((pP@v&>Sao<5g)RAFF2Crd%>+N->Z(2XUgX)#-_`ED%Sl$O5Ohg zrS5-~l6MhbE2a;{#fo`H@wsB%KL@@w%x^Z|2Y<^f2wak07#IrI9mQ|Gb$&K}H1#z< zf$vma5J;>k3?%Ve^Gil#%YXBmTj6)ji;c)u_+9gQBl7l)!oXg9v8{08<^!ztdbcnz z8yodeq2E0A%>uLeE+f)>5x*^`D`Lq#f*<(beT+IBN zNq+MQxJURV<{VyKXlhR5iN}q|k28zR6VDovosHyY?}q=m*so&$w&*^5u)c(IwPlI* zy(vE?HdZ?~u|84f{E79wC_kEmPR>rO_voCFSWgTF-zA~`c)8{RuwTV~(B9v`e%1`r zI$y^AZ2SD&g!)un>rbf9P??=j?^bCf)b~)?7+>G|GIMl-}<^!AN#L5^~Kf4TlK~1x$U)c`E9*Z7yEbF z``PTzkM7I=Y`qD8&&W92x{H2dJ=|F-?cfVKl=1KPIi9&hg3Fu~&XwJ%;{ZhI!*@>yE;Ad~O``FyeS z6O~(&@25G8@&%pTntVUaRrIAqbxQ-^8eOFIwll#AGI=-nnoBOGbXcKTl2v5+CIvyi zSyWSCPJf{gKiG)O%7T~2$j22NJM!RV-@*N6Fx}F#zbV|evM=ll!$0Dh&5eomPoNWb zRVCIR=KsqbPAZ$w(U0TCg!#nMV`ok(WP};xmYK*UMw0?`PKVto69k=f)i?^=n z=%M@L>QC_7&Z@Y2ID2I0hPe7S8ULMSarN~o=f>4nQ(s-x7<20zW6fs1RiH6y7;~j* zjU{8$&~~{QJ3bAJd)F~)V2sZwfYaH`Xe9avF+}7Lwa0&Jqm6jeJ|Zt z<;@s3Kh~c_#%J38q@}meYnI+ZcU+c5Y=u5|5rgrjM0J<)8Jp21RhQr^GAfQq@G0xRQ(bX5xw>Nc4Sw_KEc{7) zH@)YME8))M`)a0n%&jAtS2dn?h{F~zzmOb&_^~q{bBP(3c>IycY^r}aWeS_}q{_Md z*u@%Qjvaow18%Ijq2eH^*r-a2t7;xe@mJ0xIh;PpOvaDAm-?2GSFPFD!T$_N=KJ!A zl}lM)vxN9c>C1)YWPHopkg+Dd!E;lv(9FW8JPdbJ?o~P1L^k=A@@sN=hgkWOUpRz! za%51)$~`&v>K?^Sonxn-VVu`IK}TNYM;+yvl2>OQpmJ2tBDcz2>3-FvJeBU1e<@C_ z^ReghIQ|0PIm@`FB!(a>YAsM ze>4zo(0@XKc?*>K8@xa>~akFQ=TGa&%quay#&~PSU3Ju5`V?|MZ*J z^K9oG7PG(4_Nz8ci~3`G?dLb}{6=)5a&T$*klFal$E~`3;Wf4onHn>%@s11R;pUQu zlP@!cO+HR3o8*5h^AP0A4WXZszr>H!_jHx>X+XB%II~ip_nXsAzuAD_2&b5(w&dEF zu8Z*zecYGCdFPzw7z-oBC&t$1MEh86l#i*{gY<;O<>gcEBEHXjM)Bvt%x7dlVy{@N zQ7rDo99-6x;h!AIGa`r6nQwL_J~4iz+R%JC9Gvf8II`S4 z+K4!^+&qGPJLld!jD5y_5&l*HKg;qNhj0!*D-FN#Zv4j8)YqD2L{c?x@$y7-5qbH0 zh#!PF&;J#Q>_A`i#%J$~Pq~^gZLKj(tDZc!8Bbp|ULBp6=p`;P>T=CX&Z`{ZkBnD2OnxKf5P#$vmGH1(DhK!@c`AD%SCsP4nD032nC7+S z`y;Qlbwt;1^fidd5qss?nm@VKSh;Sby*4e^wVM6t+O%)7{|@qQ5;M339cO9|D($*e z`{Zo3f4lbay|sUv_VK;7|KGKb@2&kw+9#i>{n^^L^O{zEns-cLj^am|d@GdqRQxfO z7{2=ID!xa5E8BByi`m{|`@rCToVZibcd4EEk7ro7vmP$2^_I%@rXUNW_+DJL$KT#K z+i1^QUN!#8<<;Y_TE1!gNcw6zHr&rVoyMD(Yje^cx;?#W!vFMCO(i^!=jL;69_Q*fS3jU2usyFJbi$~b@Zl`hcV+n7kCkm2e{4wEgi{0I zitvx7xBY>RUlasd`uN*FNUNGKW`aL7w0A-10P%x^#1Gyfe(-LqJ9OwHPv|}D=BJCN z1cZ~wOnrgCTuEOGrp$m$K&h#WfQiKO$zP2zHGvKtj*s}-1ps~%+tf}?!?x< z4gacMVnpg!L~~7hDwzLAu4zBHrtKTz`BpdWbCpflQsoQndXo8POUfqf-kxbP7qNYB zEq0JR)K>CPt*l$!Mt)@j<35dkT64AEyeH9bst@+SS=H{d6aA*s=1;n`GdtcNx+JM; zLUupw2=aZ#%Kg{LQ?}PEZ z#OK7NSo?kB0Ji(N{~BKI;$725-y3&_B|rk8{nFT(gF2 z7IRMhyo7TPbME_`(^xClrg+mP`fw|KsBz!?j5~B-wI`(U-$MWGrvG+;gFk>7_1__I z^G9&goam3}JG-~C?F1jn?^K;lANO_M@ynjwjPG(4-(NbwIEr&LzLR*~I=@iIKqK;Z z^wAfv!FkzRODAd0)q###*sEUZk9xKby~JCR>38OO&BvC!h0pYHs=vJ>EqYEoS!1Cw z5O39YA~a9j86Tb3Iq`nUD)Aa?zL)Y1l27qq*?e8^++Hsz-PBJ?@&12 z4*%`MGGpxB4)bjb6<9%YVlo+l%==DaVy~m z%9pIN^Cg;#7Q$A_)~(??4&u97}G!MorL-(rr{;P|Qug{%X)1)f*P`4@ZpLtg%e zLoVACwh)_xyUmX=FM+B3MdI5dCkbcgg?~3W?T)Uqw zo5-B46gYsmV!&SiB0ji{{!}h1jlQ+wyo{ZjDnAAo|d@QaWL+p zjhC=F^SY|NMo2Mx&0|v@Vi&yA`RzF8RevM-1l1AbU3_c?_gBR|GG6)6HMF_W@QyR2 z?|uGIRh$vplR#ZL!E zztL8zKDXD@1ZdAP&1<8-LiATE&-3~`!R^S@$2?zLRmXp3UwMx9#k1g=H^Dh?gnQlq z2fdyg$4usq&48OuhoeploV=RvQ5HqNVPmzI{9Hqv{0n3466#Su{gASwM!eR?J!#yV z4zJCC*Y=>!i>SLN&*;Uodc$k`nD5_Z=SHqJB6U|qIj!c&={sW995J}o2QA_uC#et^APsoX+ikeYG=L~d{%jpyWqFU;rqHclm9k`GV1iP-sF za8|}ev2=0aO(-gFZ&Eb!cft=73D##s4f#>$U0!HtzaX&Q!IQaMCi znGzeSxo1sqW93?!;Ko-_X2Clv(J#N_9b2e#GVd-wNO_LQwk&GC*<{+kS%PvQ5TM(D~l%>v<`BFH}5N{gMR7)i`)Jkp$fMQcKYeu5_)i9r*SXIhx_EIx zXbzmhkNs?%Z?qS~D<;D$ZlF(wbABjXZp7sUp)&TD6O(+L`OrV+_f`D9i*vgt6$IX% zSs2&^Ul9+!e|ACO@8=W-=JI>RrGE33aRuftuPHQNODHk}4{KJiPGWU* zjmp*yaceJip`+lwn;uIEZF|m$?5^PYSC~5mztvg`aoBy^;k$6y1AC439S7k}hm6Ps z`Z}3dxc@f4IgNf`Zn>#;Kb`64Gj-86*TT(|YpPA8oK4+!yA@k(nok@{=jRv^r~PVc z*Y>L&pTl{D$E=6zZmV#Iw!x9AZuXjL<2L$Yo4D@k6mt_?cXMA~U|SA6w^xQ))2l~7 z>q#2gE;5^MBfjxsPjXePJA9FMn%Fz=_Ow34v@*@+`JAgF*EEN6vk}=lt6yOM?fnC4 zGwVxOk2H|=n->%797J3^v2u4@mZ|^0rrbdBv5q$+L+kl2aA*8;`B%q3j~0aPFLvN zMfg1T?!}Sa1_{}Sr)AiVVzqtwQ+(XRWiQ{TM zPJgy^HpPQ}E3T?F3qNA3XZr!06W4X>QXPulI>+bg)jTAl zm*nzow!dd{)_Lz?o%aK5PJH(&>JTm!-&H-T^IL4A*>c$w-_`lpbC+`b1^tYFu_Fez zC*iKuw@0yC2fyvaFJr|wg!1^`T$RFxF!3(e4xMM1Lz0a4Z}^P%VLgoYZ^C2qiSOq3 zHX_F+F?W}H9@^b8R_(20T$13uKe)j#@8q8EanAy-UzkgL*RFG&728d!)V(vfb`$gG zc6*WiRAe76tocMvocFgo634L)+uY0V`}kd4`2fG~h>7#w$#@S=AxxKjwP)LqF;#ejAPP zD%QK2Z8!Qxej)u3srI@;H5u;En|(c@KOv)+F(=6nSDOW=eT4E+<{diogX-;=ZdFFv zguP3cuh*CF7j&t66M2@s)T6ef!f*Xx__+11zVJdg|AjcN?2=cpQf##g%Uh?i>pH~&M=iYv8n-dejroH}gPmtt{X#Vo~xz0{}mAYaLW z&z$4HzopLgyd!*U)(Gkm2e#%E+VSbSoM@0~*~qU$;e@i?tJUlud}Czcb(t}|ErTBt{JXPubs8o04|u=udzwhk{o z&3=>@e+RDmZ8+;Jxa&=D*c;)+H^7UphuhAC<#i{UOA9bctcRJ6=;92}nki{2~JB-GA73)pOi}GS|I`P}dwk$WpYaKq< z43`z3n+jfM+WgI0X90hk2yfFI!7yC*6mzU*q$VTdMNiLz49;$k zS>rq9;=AzEkLW)=N1Qhu&KstGuJWW1k4hn~n*z^GA+DQ3TsMWdZuI*cU*NpFo8r2u z$TZ4z)-}ugp`7%B&=k0Z#ci)P+Wpv7=4rM2v5}4Wl-NerJnX+JoHl z1l-~umKOvX^NjW~T{oJtv?$Ox%xGVf=MOzRsW5Q={G!0lOn<13wjE-A&{o|a=MNq6 zR!wN_?~gAoSi6BUFnYybb+YCw?$dN~F5V9pevozCOSqQx z4~L$COaIhp-})S};@_~&`}fqxx`vbd-@aXk_=XU278|P^!5DvMPC;N=USVL$=%T<+ zepe9q5$#_``<;F{^hC5j)CX$2Vq~!~%DRCZyNMOh2N!CC)?6!QcD{C4Yp!Sc%&zS* zdXY{EX;sWw)XrG={QDVretH4yuqLz}N5KAR6u z&4H&5rOo-u>7(0X+pcquunt;$%!wn4L-k@)o8QC!HsJ5n;_tAA&#D9NGpn=hx5oz< z|G>79(Qi#X>ou;Jj2vVq5;N&p5V{U5#y;< z+|~>9>)X>3La}|?oOmuaEWC(g`y128UFiM3>iNyQyJEt1<5ib?t)G~%QC)|?=!tSQ1`jkMzpqAc99(TA1As4d*?>i2Tx+%aW6P#6bA-l?l#}C z*A+MKW4=0dCw6pn{=P2*{I7Ngw%Ya1V(l@+Ci;7s5vnr4NMA5w|Nb+$VQk;$6KmFd z30=^D!hZ`NV1qs3VtaBs=0Wx?gn-W7EA? zu_-^H^Ref~a(oBpWs{EJ-#khj_b}hybTbcLaSYjxV&aOS*H*35`P2)%4J7c+UCCzz><+r79+t^sH!*AEvvE1$GY<;`ZiW74GxW6k1+(~&a z-1G-NfBVm4xUJ?dgxD;s@O`EIw|fGc!FqiT7(ov@{MO;es!Mgma$$X=Xn$YyCS&S+ zmr3_(9kIj7op;tZn%<^A)h5MpH78ziTye$=aoiE~^D2J-ve>7exT)H!b`0Wr4}3lq zKEDwkQ#jCjE7ti4`+9%PzftbNOPK_ZP%NPa-Lf5S@7!a>^69rKyI&+Hx<|3ZT3ePZ zE+u(NMUFVHnDaj+DCQi+g$XW3@c$STbC!>jYx5%cI9bG;UxpKC{<~t%n*XktvtrG4 z@S3?iJ4`>N@XU1h@6PcyXD%Sd#ir+Vo%gPIYAW$m{hx~q#h+E@T;?cP*V3=6=o3Tj zB+jh-xcIKsCpPC*-1pJgwZd>SaV5olb5rAkQ)yc+Ttsbl=C`NN2FY0+{puYYjr~qh z+wIt|#(NI$I+Ay7;a$7NehtQ@eesQO%^TpH*TX$$!a--iYp26&r@>99!cnKdYbV2N zqyK-XW4U6wshs!mdm7v|9qyXJeLcAUBI@Z$eZAnWz0Hpu?h4;LLR>dEKFVLsrO~*q zssFb_yEJdX+oR3bnmfa zx-GH}uz0aJ>}WflON_TGj~(90V^^&k-pONEb@JFX)Jeak1z|zY=J8uzUkGN0EbH}=*(A0jt*b^>Wm|E&?D%AmY1UQ zF7oh^T8IZ8T6|e-yQV&jOzydzcUUFOb?yi zH6`$IQE}iz*5tt2L2u0XGvo2mP5yRlx4CO9--8_KHn;KZ$xYeJ3n2gfHr#j*dVMF{ zcsJa5-<9y*@x+UZiTg}L|HF;ng&Y6jW_=5gb0u(rMNy9207tHW5PrObZy`O&oN_qw z)}LZup5y#$#AM$x*&=XFaall#|+dk>Gf`HR+ zR)1}Y_LtRHnMHxk<yFPdIz!zss1h28%5l}&lbkj zH&yQ>=DiJHmHrxQ^W`nXJ@;e#4`hJ-USPj3*dM6tnM(j~wm3EX;1JjRk!u>brk-p5 zfA-!4yo&nl|DLmCAv=N^l!gRQ!YWkDQnAbl2ndK;d8&`{76Me(06y4?VkHO_^njIu zRfAVaScC~2b_AE!1SqI%Qm9MyUqGk?0s<8!2jS)YeCN!BLree_`}BJM&2`On|1xK0 ze%rl#@B6pmui4Mc0AD|>=fKSo{xL!ucxEHdyyHBxpV+N1z)oVZ?lXVqTW|5)+dTJY z=ed(-&i-i9_VclJsk3+FuPAqPe{~{G-WeMlEkC*^tnKoX_~=UVb^mvH>n83CE4%X6 z$=L_hmAg*PKB$d(PBJJQpl|4VH*)EHtuMWYZ|HvXy>|S67xy)0n4PA*P_HasxZ*bj zitt zUpE9NwSU{*-{*`8e(Lt$fxHRCkF^h`_QUMKrE@Av6+iFD++zCpgSWhSJLxa^>6`A2 z$)kTl`>3z`9ev{oaI5={_~HZl`7iXd-V?swgdTpc#rgQ!E&uNLIBPrKQ~eZ2E^*3# zA$)xu{c|yVoe%zV@wLWBV(=kmqgQHU#kr7P-ZuV zthp5ia&~6ie}RYlfQ#=2AO94bd>8ooPVn^|;OE}p=-a{9y*S@95;s0O(aX6Iex8fr z`B>hIbS%b7p8> z!_|KCca|`4BIA(4+u-r-j~mPO(7vho5O-?bs4zJ(X3Zme8@;Nzx3fLt|EW2&&5+yK z9`m2qd!A;ku~96ZQCxQ`cJZ)%eyg{*8$QZGaN*~iD}E5XctVsm|56m6{s43}nE6W1 z7+ynJPmA_bia|hD(QtIL7_{)E};<oV>R;X5U7zo_t(4U+I|b zPQLQtmGAD3k-e2wuam49T72T{Dt*70eAFMk7#p^ll`fq2#+EMpnuB;Qt!JMq zJw&>e;vG@Gz3TiH`bXn=(wE(l7s;-@ejWKcH?J$QetjtQzP$S;--aIPjy~yzUg?T{ zxfMOs#X{dW`zA~8c)9Q6cR$a?@O&(KCoZCQd{uM~x}s9&pjW0M$Il|aDUQz37&5V^ z-@PrpgU-5xI;J7>!dTKT+S^;Yr4_nGdwXB+c?R8*P^nv{agO96;(>Qbub@AgFy}!S z^hW59Qj&sgI8SyJ?RO%%ZtxZK$$NbJ%jBzqYbb|w%gJPP3+0h+sbJhj{B>g4%z`m1 zeqXQvoze!K(g1zB8C~)veWO@KL(2C20MmMLSil;GT=Yj49(y#RL-tNF^Y)@cnsUF< z@7V7z#SFjCUL)HYd#n$@4<+CS$@Y45%7>$EopNRaI^}O;W3A_rn^G{tLC)ok)+cRj zzt}r{H9Cc}hj;8or{uvKvMJiH{bZC*8R?W~FS4q#G<3?*o^rg+ein?CSME3HmP0zm z-A`9KM&;PuKeFGT>N^enqP#Q*tq|Sv6nZ6>wToYn2V+8K+Ws8<;*ZGsHul5OzVHW3 zV>&TNdroscdPe$AWmCD@K<%=0iRhWB$ok?cdZr!Eyon487pr~ul20S3;)X@Y&L`-b z#tEz&b97E^TjwMc@9JY%@AZxBF}RQCgmFqHdGplX(s_5GYh1l^{76LaFs5MRoZ-?r z_*hfym|zl0f;J z(VmIuCp%_{PTiP!{vPaGXzOP_6bV65j!>#CuF6fHR=#U)t3v_hIC1RJv zt7^tB?Db{q5n{9U9wpFax#)FQuh>||!7#RNLH7v5)I#QkU+zNBd=m`ASqV0VS%5BS zk`fnONjr_XH2={@MBJ`Ub_PI?Kv^MWXN=S5NURRL5w z=L_)jL+F~FD_$%(u%e(6!)zudUlWFT#=$THkPBfL;fQ_Dnt8`u3{ElE5&-=zHN9%>}lKO)G?Mc_t@dJ$6xh?Tf-L z`>SA=Xxt(lQVJep|K&XC58;}@U>DhN;S~LUAPT>1aPZ6jNS8$6mm%PnpYe|RdI~zF zm@`E#hF_#xqVY?y&VOM3yL}eKx5O{EJNTuSgI_fFN^2%d7z@;1Jem_E9TtsUrqKq` z*d@sSHDQ;94t5!C>l*(#zX>^T_uf$(Xl&w}V3#B0DSac~U;FN8e$zWfWDZWmfBNpY zrFpcz`JQo$&c9xcE;*iDJGcN{a)9)KWb_KUM0({5=8>#Kmni)uX2~aLdL$p6M=jX$%j_|9R%;$SXEhIDYH{ z?BOC?j{kEC-671-@b_lk&B*h|liFKvPw8O&J^z}DQugpDVGoaWM*Y+C>d)`JEx&%n z2gVQBQ{o4w|7>KOet-W@tiK)ZWaEeGFpThv?Hf55M(bOIE2e^Bq*H`rq*I>yv1v_E z`JA%Jw@|rMp0`i8w|1ZIU~O~Cv-|YTRt)nZN-4|#;awZA7{==Pq^b9gJO{fRsRp|U zQ&hz+!bH*7rT;~+OOSkm7r-v6gD_GjOZQaDxqi2 z>79h)$Ch4+UXQIkzNA+S#~)=tOI*=Wot*ojS%pQE%JKbe+n^pWgK?tht*5d2k2O7P*7+QF6RjL(zm1lN+k z?Y|@u8$o|4HWE&%7kmxQ(1Y<})EFvaZ!J1InW8t9} zX_pSfG{$)A5!0y08jO0(ORHBI(@5uB+yeuCEidN-q^u(4%P*Pl7> zl}wGi_cySL)?=%zElx&a9?dy(d>Z*{K7v~YH{UTS?B7k>mV6Os|F%AD!s11|uUM(< z`V8_{oYcenDu-g>nkVkg6_$OU&GU7+@7~+#lx-C;Qr5@ixqI^2d*zVV44yBI@*fnZ zxDEfIJAI$6bvMj2XinR?@e0KeqvfxvjTe9PCV$1r-B`BD6&;gM`zsc@nZ5{7FL$m% zDeDcSFIs`)UHh*1L}Y!N-=24%_oRC_nh_bwBW-}A!^CtPX_bMT7E1v7{@ zNIu_lZ1*CtzijtOpS%Rm`y^axL2``8gJAp{Ia)c zNuIO#j>@P#e^f`w^d`qIk?hDf&|W|8y}vqZ0=`71OVBx`=qklS-1V2gQCZ1*A~g3R zvR#gB)Fj)of8wdz$@@b4n;eOfXU*Z4OuKXV6=#%eYRto(w=a8pn6l~n?s!Kz@6_U5 z$(6=C6l0WJy-hnv)+Nt@^K9%T$TRP|_ER>sx_wSmR>`!+IAmi_QZ55}W?WZf^HyZE z3$of7nav?45U`M8JFZxrjjc|8qiwA1`_m4t9Df%ZdpmOM+S66oQ~B^GlIjLm(@v{N zf5AMWSI`@F{4nXN;62EE1@<_MJwAm!K8ZaR1|Ng`&P0A!V_)A#o^1?{9Df@dd&IG^ zQ)$Z{$gkw~No?hWltA7B=&RVwIVqg4gRNzpI<(OlH;}Kti7`#q5QkaseMUC--7b-_ z%|TC;bbJJv*EjU8<_>OPuHA=8#L1%Gcjpf3eZ>TY3*7mFvcJ8)jM(2^Ul9L+Uv;Kq zbor{5$F;@z$giH$@7KalD$lPdk86`vF3GUQE>%8_M~iRkK;5!3#&18ro@eYbJHGkK z~cazq5KWnAJ>yjd43Xt8vR{J1x5$8M8bm!?Mc;>;F}MBW+{`^S91Omf!LdS9N5#$dTdUmxz-!R#1!Y z$-dTPmy;={WLV>@l4I8{N1v})ojpE1>UHG${BuHtl@sfHGdo<>25)j~@J3^D!6x)~ z1RqbXw86<>ao66uak)BCak;w0;#MQOl4Ff4{tVfDAKCo?+5HfV_?{!X2k@O;`F(}> zzhu`QL#$$d({1~kj{Qx?{-$Gp(`|h0+F;pV<~BrpTiWldA*L1XAFyPPhatz0fjj#@ zY32=tzkh=qKXFAMZ)1iT`e=w@ec0b)eOzUn;Lpgi8=sdQ-B_P>#qhHG4ZZ&{GUUF$ zyUP1jv}=>yGP*LYvPO>+>K*sls^yl=J#g%tOiv~)nO+yq{x6P9kEae<7m?|nRb<-T z<9aw)PkXt^CQEj=+8F!Xxj$1iPuy8s*OGRb?Z|E_dzC4T4E53ZLEEv((lehiE~PoU zEs;^((>Hf>UuOy(q@2iaTiN8dk-e&Ne7-HdaNNv|(Yv;I*s9qD7qZ2iMSh+w{`%h3 zf41>-jeAmej@z++r(vJ-v)316?P~%y@628n>?bw^++5a(`gnZ7q4>H78~LoJq~pWf zZ{oFvFyFR}SQ@`?E+Yo7vrfz6jIeajeEPSH7|6!5$vRkC?=R6JxEu zNj`fo1I0?q4j5tWXJDuIk!Bu&o!(8F*t(tmh&1u_UV5Gw*}YKBN3Cb%|7Cb4<3{fr z;ZxvAKuKNg_ZiRp-7@aIXFR<+u7fN?*_Pc z*96o0`CkLp6Qn1uHmzBu0c+BgX4uD`N_A&(R*tV;alP3AtIkI`*1~y)wFur?+Be2p z(k0fKduN(1a1pdNbPbg9A6yU3>dbtWdY0OxY@!i4S7|_UC}LGQ{52-Hh-e-5+j*|3IJr{?&jrS@&;duDkB{V{B@-X(cZW zSjLPT>ssph)!jz;t2@jv>srEJO@w0ezZwihpM2FBO22~DfT5zt=dj2ou{z~XP=v?SRXea2qS6VAy zy2{etyM={U+vgcA+`w;3`Ry&}KSsP<5pAu`m+|dZXa#Jm9X`cjZZv8~-m6Bwu z=-m`+)=iR~kL-P>&iv>@=GvocI&e>QDL7(U&pUO=SistC>axf% ztYw)VYaVl>mM6tn3;M)buZ`ur0NT~&hxB->sC_MK<-G~k>y6>B?es@~$`2pde5U>s zpS=lvH`#ji;}rB%YH=}jtlBoVt|tCs>uUB`wsp1sKfwFcNo$+R_L1K0q<531F82M8 z(AE5|J)7sV#;=3#@fC%~Qro)im<4t|uYY4q>$7(EvoDr;CWL+{g>HkY9exW{f2PH; zS2%sSyoSD9PG1(SLNC7&Yb~QM-Mo5`*TGHqS=rZ_*3&o8#_jrAFM}aI!#*6N?Bb`i zSo#3k2C6(KI(d4?b0PDyUS@t))%LO2D>}qp(aU59{Kz{zp?Q>cdv$i&OaC8m+DmD- zy_9y_OKG(iG@bY66Hn@pXiPT`#8tMJ2P~xaI@F{uwjLhjDyQtyndP*3E>z`KJ8A9F zBDGOAW%hgX=HRQjZQ;t=J8wqpiN+M4;=AS8OMP=V7xu|2maI7Me$GC{I{#)b@>+D2 z(GDzCS2(>na=`krztm!`<1oK3sIz67gYQNher#tR7)0y1bY6(Y37Srb)GMB~@0Iut zTYVI{I_rrqFT(%1VQ5}OYxYB^onuD_BVE=2dp3Qy;pv2T5Eq6u$F@P z_xf(N7BEk4SrXsu%b7V%%<%F-erpxy<-9%`UC<}a+KJ9m{d$6jdZhV+wS4(M9T8t^ zmg+?8yIn)-m7sf#6J|AI53=U$QBdfMZ+pZpS8};xVtKS@Z}{w4^g%4`@iKD)mzW;w zmnqMV9?Q+^5YJ@u+fXj`d63KP^ZEFCId#E*)IohEY?zDOx@+|! z{mpqb^so9lDY}nUCf<*a!tk17pnTt?k0+3C%b~+67E_+Z*iyGV^2=5C8|b5Bv{lHA zlvnXJ<=2DuSDWV>k+Y(j(at4|X_edUoM?ZyCv{L+7Pl}g_x{sPJ#TaBHy{5!hkmr{ zru{3ZTPbbVfb&vzjSt1L(oKeAAX{ToGL{|4}~F#fe>_`gK2Hp4#?#V+0B zVE+2_*M9n{ydi6krLSqnN$}~o3t;_iVEWZw&-C}WPI3v`zto5^HL!gs6I)AJXM^qc zvZmoT*!v;qs|Q$ncIbc^zU>F>Y0jREhxVD_p6qMh3yg4>er130GqNG?fC;x?Ll2{0 zchoj@?)RC(6{hv4SD3p49$9gfd1gKLb{;aVPw`7je;2T}UWNa#o%7VMVSinItJsL0 zc!uZylw+O9FweL?z=AtttYRb9TFJaU@zla8wXDTu3D)wy5j!Y9ph)LLViy)Zdxf>% z7h62{C4Q4lzUgXf>BmMmCjA!~vFQ(F#HA0;m~R+oisHR_bJKizFB1=6(A_u_lRi3Q z{s_?rGv+@l`ac=-SBZ|vnE!$3LmBf=8)w`$Dkh#Vh4{REmZ%>(3k;7AwXr=I$;S3+ zP;khUy3ozgbSP~$g*}92yVSmAlcDsV&cPXy2%Q0?pUdV#;US%eGlc#ZwjTmN$VV6g zf7s`X(l0jF9}aGh0pE;)(k};1c- zdcPkO+%KO)eXG7y->Hx6?{wpLeP=M=nMk}Y%wAgf1E=Zxx5%fg6SN-me&`RN4;$eH z;K(n zGkz`iPu&B3-VDEn?4Ozp^+UnMuf;=qLeX`nW7-%;1Zs-f(xK**%kp=9J zx+=~(GK2AlF6hVc(C*NF&rDQu!{Zo4V(GDko&Jg=Rp@i7ec#17ehNidtGG}n)nSnNtbYM z6LbS~4fHMOVCWX;{ZRQ2cR=MobcV7AhV4hd-({cQXW4#)cz!?suZNaFCqfTHABM`0 z7zLFdF%T+0;vVQ}_JzkMo`X+37oT_@KJgKJ1Yupns|O$e6^f@PD>w{ zQIuka#bfK9;e1=t;bxbxs=?(!y|mAWy|6wWG$6@ z@6%2PBEE+F5cw98V_SakH{^dvz9lQ0@HO6|ZaS|@^;`R?^j-bp*PQx&e2SQwQ@^re z(<&z&YK^ba*ki3S9ABe$!oon*Iuani^b`lueCu{|6&sOW!we)if!Pp)j6~Y*GVq< z5ij9KWXg|l+C+YY+a}SzL{)tnU4I6_!!Ccj--(vD9)FD3oT0wz{RsEnJ&qrdbwzwT z&o!L80ydBzF&uu6;L7U#2;r9t`4M^eA)isd-QY40eUL?ax55wD?lHsb*t@wEe!#on z@Ab%hE1h=A4w2^z+gZd_3yuNgt4*7~JyuKFcHdXH0r!3*8@-QPj~m zqkI~0uHrk)pVPUFt#1ujndEitAk(TpBw*b{UQcFlzR&=r6N1m(hY$Z#_AF&R!pnU) z+Yi5E=^!t@htFEZT7;Fv5tk2%wbl|xeETB)Mw4p%4bzXmVV5bnycB$@xPtJ1F7DSDBoF>L%(rF>!&6T2N(=gF8fAQfHgowQ z`$|G=63aWLiA59e-sp{#l5)fc8JE>ZjD9ujHq2260>Y=?y%I<$LiN z)Iai7qT6E;dAZNj=(DK)@>x`WXZ*1|%4bo%3aFN#sf2YUSpLVCB27x<w+3~|r+3^+`)DN18uZgc* zOYIV3&O$c!p=>;DK>Nxs$p*8@AIYXKrs%O_2;e|gTM)hdGvUpD8JQmL#91i8VgOi~=r}3WB>rHEG*MRj7 z&mHQ-p3gjYs3(-?4h@F#+@Xm^xMyp|yWq8Z(sHc*HyhzD)MEe3KY(_FehfVd-2*)U-4B&~l|eT{4@1{Lk3nZc zPe3O^E1(0Rr)hh~x5+2g+CM42UH%7gZGVFvlKf0ShfA+COJ#jnYhn@btm3On37OIrMDa5w||H)q4atq+`R>7 zN%jp`g6LI((!N+6ZA@S`}*2OR5oWCX6MtFHTakTbvRs;43&WnoY3tL}9JpU7o z!GA5LpLkwOKRTYTSiZ2fa7c7aUwcfeZq308&FVNY{XvT9*DF>`zdmhHHKxCZ^F&oH z{DeL&W6sH4i1O=xOX~S;WA}<<>l^A%&2pY9n7l)+J$9X@xiR@i1~Mj-5Y+gX#v?R#p}FkJtC{c? z?WVYVGVLb3+dry4_BfPNpJ`5gqGRzLX#Yvf^_<|u;Cn{l*_vYTYHO7_!1shz?Y5$< z&BsMz@9z4Nv+KWQ?A^w$hK*g_c{htp`aWaG|th;40qMO91YBHH_>a&@ZF+Uo8dm9brgGt zqR%@+J2i6fE3tdw*9yhnuZzUq)hC)O(+1pH9gZCsg<%zIZwdCbZ4G;6c^F5G$KE6^ z3Rs7*1qtMT2)&;`c@AL{5-8sx?1&}*9^S`(PzxC-HYXt8e#)dq3m3ih5_jR@w5QAUL zz0vi}GqM%Jxr@NL^Q7Zt6O1(LwZ33o>HE=O-S)z|3D^*Rmn|rP%7$!!%HH_vS#yMQ zWjFS4Pj+KJR5rT|DtjSYEqifqhP4>`EMLh>T)?Mz06z2_;y-hV=h-mg;0@tN3qFf(>U@#%QkWUyR3yddlsPu~hJ#?#ia zvGKIEe1dq|TDCdc3+)F*7Hu4z0G-AEvdJr6c2ZKMV+5B&Ee-^Y0bR4uh^d4wgEvs-D*msQ? z!M-cGH;CV!hl+n6h6SgVT1#jm6eC}_8PJ%9lPJzyVPKAzvJ`24AIuqIn z8jXM7=l^-!Uk_afT?AbW{VjAEbQE+Iv?p{e^k(QA&~j)AbT@PZbSLyJ=$p_j&;`(U zp+V>_=y2!<&~DI=p+}*6pa-D)p&voZpxdE`p|3-aLFYnGKqo^hpo5{O*+UjQ625_T z`e2e5ljGaH`d(by-=V{7{Dp0Bby7kqzB_uS`42gh938V2>?(cY#x&Glb{r2m6)v{p zc*v`bjfdzrVtL4|zAKDe&%wyHJR&EyJi>=r4I>yiJHd>|Q3~`Z=i-bemhNC=8#kB3 zNB26iv6=b^OC!HW(R+`AkslZTBR@~W-%lvL9X$p{E>s#h8v=G52}XX^4@ORlU}X8~ zU&F}9{Fd716&oj~UJ_28lMnxW4&IXQb(D4yH!ID~fpO!^Bc6F*E0~zZQj= z3*%$0R|dt|?V!0bZNSOZ;pTx+nE8YTUEu^FEVFFWeUJQijN9oD}H@m+?u+%F>5{hT`+e&;Z44QEw}TFj>YQ! z5$<0Idux7-uy+>oVmc*8)|LtnH$8i9Ud(xYT@Chb30}8xEOoYH(A2pl{W&Kp2E91m z=}%(ND%aK;;?HGHo4fO5UIdS)($2!;uR3_Vrn*jZ>Kcv9HAhCcd@t?P30%0y!R1Zk zjOiDOGv_(jTyhp2V^-PaPq^`At+}z|%NN1uM{i!k9(*45;`6eXwU525zrOdL=E5t^ z+??~Jn}X33h$lZpY&n^^FNtyaKOO#9;;a$-AKe*325ZOW|MXMexE0TMeL=159b7hU z=N-_|$moOVwP$^bPSg2=b@0PxvLBQ^2MCO;Sg&w;Q_Yd*JUzvX6WIg#O8WdHXWM@X z=2q-o@!-`B8GqxfXsrv?nv@V&dNzA_1{kaOVA$Y_l3~X>e&|gNX0canheVxU6&uV+ z9UuA|>q)z?$MJo2SX1Jy9ekj{_|Ryc`H1sL9_(p`#<17%xNFVuO2Y`R?d7)$GxKj* z*_~J~`@+83E7_Wp6dzjotP!5azQ?aIcK>qyq|mYa%}!BulH)edKmbK zy{b#tlX@L{Qh(r$3;M}7wt>=V!L)8>=!!d(P7T(*#|&N7PwCiT`rz^QnQI?V&iW6V zp^WjmUppAzWPB*$29tDRFsYpxO6e2`6|(2;9QM4OSJxYQnRPSEy7)sYdc}lR^@$C= z(l;)&c2IoiwGp*KZ#sBQ1TAJSR^weNqk z&bI_ypfNDDpUV0^W$*82k5uZE3-6smUmLWU+V*DJR&DiKm9~1Fw$k%0;c2%G_Omwb zQ}&N`>wXjUZcDu{R*(0*by=rhH>g$8yS<|4@sz z)cj2LY1-LSaUA%57(9PFdTASSp*@7`r=m#U7P%9=YaeE~R|S*@LK%wNp#8;Z4r@dYQe4IwAjx>6n~PaF-`7 z*f2#jIoSC3sg28*2Co)t+i6QWk*!DeA+gNI`u@SY2Bl~8KO<}%>kSR>%On^ zy);(TlzEX_m#gP;dCrZM*=2Fw(>wE#dByweSjv}?SW3?-u@u*aD?a0nL+ibG#^ah& ze%F3)jnaRunCD3O!b@&C7f;dp;jE#DIx1i7`6HX5@2Q*(oN_Ae;l?%cD5u^jH1cnm zZ&FS!>E%)U2%v)5v#qPh%$h^fI_QiB#aGTSdq#LTe)>#w@oIFjyMNy_o||pdEzbgH z6?x4wYrJtm$>%3vt2O92?TN8JLFwATe~=C*nBh;sS8LEU2Xf5Nabgr}&^`a`ZH7)= z!9J?QDAu44zqro~$u0_4o=hmYrGl8n8uVd!m>K$t|G&D%44=+0&lC|mSexnCbbOP! z=y2N?!8e&NJw8Og5ktT?SzyM8Jk0r1Twz5obow&=_K+Q4Sbc9&XbrmElRnmtGrT$^ zCG@)VJHC`B{V_Y{FdIE_OVX6W;uDqY(y|H%PL zr$x&AKT4+t&pc{|&Nfs!C3vuz87lvw(uu(%KQ=>0y@3#8ouRpDp3wYsZ)kz(3oY;N z53Rg2CbatA*wC8(aiLd-#D`uVT`Tm*u?e9!GZRBrc2a0j>*P?VeM)Glks5j)o>Ra5 z3>nw_QT5%uj-LM^vc8tGsjRP1_L7*iwyG1n-=~ni`;dMsrQbfGpZ3vbesumd==>PY zAc&5ZegI(N!P1uM{ z&^qsb_6~$xTPWGRkZoJ-t;5>XI_MxjP*xk` zQdYY>dbUb=s^-7iu@y6AE12h__uw--k7!)0`^lf4t^A(*o#ym`?8XGnWiP62w43YL z4bAPB{!PGU499j1ckD(UGT9rOA%2%$9(4hm0p_x8hWwcTxc*Mg*{Myvz4Y6i=zkx5 z=cn)PM2E(Zjz!*LkvG>rTMp)T=i|)9rr5HQjjYJ7*!~Q%fTVBHlzJktK91c$>;V&GCdu+vs zo04Yb;WNl4#m40S^Z-7?C~VT5$iRc}{{!`7XI;!sXzAFd=1CDhK^Rtht+}>Ieu8|2 z_q@qL`3YO`C*&vOA~PlI&rr&_+Uq!1`vc;bA9~}0jb@mk>o^npLv-TxoFUhQGvqon zkpGYtyt7*X{~n&O?5+FTag2!ppkE+5W`ZLjUwDeCW!TxKGS*WnZ2jajmuTbOehQ zb5=|VI_m4bMC~YFA}z|7NX3^h@g*XDMg51z<4eeYNWqsd@g;OWDVVh8@leW}<12BU ziwnQ0FQGl@YVsw1gs%IL_LCnYUm`O$;!9+qucG_lZyC%j{+${6M+!QMwvVHowO+I} z?OwHQU#nkp+rHlST-&~hK5mBGxNUbieUOs!xVAbgR(b42M?}{}{N&c9YT2vug?_o(z~QqTD`;0D{L`NP+F_wUu%96-12 z(-bnstZ%B_RGwaxCpV_V-gi`fgyeTIujy;+{xtj5wU1sJ; z>phkA`}B$4`4{;_!?V60pNPZ%Y@aBYa%nzMOP-TYROt9cr(V+UkJvubYuGJM`Z(KPdhKJ~Kj$a) z_&4{Heuv(u$xr$T`Xky;GUFnClH!f>Gm7ZXIrQgT`t#Kc=DdTw|6E{(Het(-fxVCO z+duhD>zG(8f2Qs|fzVa*pRS3_7cZhu=Flf|>62IM<1bw^$o8e?zktt2pOjoP+V-cG z(6xqi_*~Lo#O!BI=1orqU zV@~t1kG9O>qb+Zt`{(?#gv;QcX`ds-|1udblHIaxH~vb%v6lnze+FVRtG2W3W&d=G`+LQF>q2(+a{GM30pHf=JKI_3qWIYV%+LOYG3;{~%YKJ( z>_r+MITt04`Se#C;Y9Ws)SjgF{H;2X`CIP!OFEaJ1%91{|ED=yBk}JP4_7SQ9{&*@ zN=mfH`{Id%gJ1e+j-=x1oPY6`1H_y@b#SJ}$rZP6!@i&g{EYvwuKb7{1F3Dt&36tN zU9pQjM4$VqX}xD81>YY4CFZfTmtn2$%h>9>UP~B!>AlP^=?FGMCk&%6cd{1nulEM5 z_l>k*#RH~wiha62;yaIxGp#)Kb1dasO_?Lv%+EU3{)Y92*6cA(`==zIwR#A9RAk3k zuk?+zUT40_`{)yuTl2~6d1Z_@X`Y$tpgC!pV^%_}UGb`2e0LN1FmN3#5k93n9^_%D zm$@~J+fPU_t)-3GE5c9NS24Ghvg?^c@Ky<90=dL=K7mezx_VP%^_t(R7=0V>_!XJ# z6RdYidB^@P{{J+-Z-1BAx&1xnPHA52Fzz3LPKR3H!B7{E^#b)d&U}L%>_?_O3W8Cw z`$N>x-IL;C`xLk_6=BZkJqjcX;!(}-thztJXXLFIO1biZM-7=N)5k>f*>FK1r@ zcW&gFaov%VZpcel^VG8YxR(K$#gODduUsZYg{9sym};gD);Y6M$Vcgd%n>+ft)nYWwC|q9r3-{!k}|Dg^e-4 zgDt%MyV}BS*s2uPx?(rx;KRLw-D-y&(HQAT?zhLT`0?{yUA`b{jBVw;=jIGDSI1go zkFlMzg&Je~cd>=8eQFvTv4w7Ztf>0PK1t@9{up@!AMZluekGY>F5@a>ZV7vTN#WszhuXRGlnxhp!I80!55kfsI`;l;ZZP$<^aidX->H9 z;cxXcK)B5M;Hn}k;&5csIx-xG<<p2(a^LJ0MyvyGSjJZm$mNV`;tVO~sSFei4FQijjG7nljo(S(= zicXD;&!$E8v|$XR3E1UE^jj`5Y3WyY4Y9^C7vPI7amHtt^or~aD&3{=S?R9x^lDxG zk6wML`tjMSdR4qDy(-?-H*KHLDUZgAr6;SFLC+C?xuCAibaZX0p5uGhQ$P1x+J{Kr zIK-YAuI^oYxpgmC;!?+BO$Qg`Qr77Csl!zE?{R+WJK*)N&rdy{H=9TKt<~9k@#O_| zuj8lo{Fb`cOW!ePEgXk_)A^?R;fXl-^B>L{ojUN^15tC@G-jgyt!~U@K?~iFjG0J> zDfT2C_FZ(Z;<(bi1~DXkv&hlCZn@oY9#?l(S3bQbepXp((#8E9UF?baer?(%`n#G( zp>G``p6lvn`Kgy(H^bZCWsD^MA~?7vzW>*ZjYRnW?0SuT#1OlUC59VEEH|E*ZY{<> z60AMVPUbJ1=i#@LMYG0pcH)61I>yDfz_f)+s=Kqo-2h2}yTGie@x z5(jFY1tst+_rOpl!NRF|{i%Nv>DU+TAOjO8=eJy-BtUFK`+yqM%GgXPQ( zcv!z(Wh`I4?)ie}!R9(U{%X#SFJ~RhSp9Z|vHaw^7YbhBoEe?Pn9f=9in~9e-%^a_ z>FZxCc##;o&StEaTrXJ8SjVIKEx}mcVSPbC0d>+@jn^b!6O>={n11sb$M2i_Qo&2K zS^ebt!JD}^YyGT()4sUk$MNZUay8`gaQy@P!Z@$J&qf|}8`x|V=Yot&O|`$H^nFT` z->6-)3pyxGxs*;S@=ISz@od&_rBe!?q8*#^+f&?M1aI`KWh`%6 z8Y~FXhVDB{;FW$J_VX)xrr;UAH#IpWIJIU+?s>mrqnGEJpEEs~RhiPU$9Q}!uEQwpZ?dsFVKe&EIFEhuj?7xSL& z{%x9^YM0qP17^!j6Ba5~eTaTgyjo|6*=3^57CQ0Y;l$CVIAw6pWIjZ@t1LapzX5H~ zwDjo$t>b7;Uh+dn&?YL!=1E3{yN-i-aeWNdX`C6?19`sd`!m#!b?Md=69mHJ^rlzt#xsUJvJ>Ic%5`hoP<^aJ-Uw|;PSgY<-}6P}_SU0om< zjW2zwz(OYkX+u{RxcXrca`g=Dr*n9pK_67r4~t8u)~p}qMd=69mHL5tR_X`RmrFmm zbuFi^wa|0fl-<=6BalOtJ-%qFtsjQhq#r0}rG6l-vVAlC;13qt`X?cHh;jrkq#x|O zzMX!k?(D}z;$G+Dg`Azj11;#=mb66*{b*wVWK0-f8kpI|0MpTJzlAT3($A^r#~IlE z(VjZNPT0GnrOy^T3vZ+$>uJep!O6OhO-x%ixnMHi)H&>NoWs6K_p!eR3c(57lU}%y zdo$L}C^&&`{UtW`OX3_?Lo3jc9k92Lq9ezoTw}kd^a0LY*0&$UZlvix`9DbdC}+O^ z^pVI|BzJo5PuK&Nh=a1kA5B`GJ zIx~^;9l=TE_--eBp5jiVgQP#_JcRhn{u!mDucU8Z;o9S?Q@q!AW$`^7JIx&9y()+t z9nY=AB%F_MM*vK+-j0vPpLqOt8Z*~?eIqkBqlnm#V#SNN4)dKOU^tB(v#x%6hen%|Za}vj^BBQ9lhT5BB&7y-kbaPJBu8=<>~2q5 zxt{%`&0VJ^Qr@G!_~K*Bl(_1Tw_s%%ssG^%12#3r9BQXhVwaXHQ<@? zm9qTF)J`GoHL7#uH^TQ>w4I(++er_p?P96_g2!U*bL<;F`hYFhpBToOjlxMrD(w(! z_xWFG!>Tr1{a;=`(*KK~(xE}-=*4U9D|2|7LyIfM?^K)i^27wEqB}yg!xF|t)fSrb zYOil(z3)cSzDS!K&tv|qczzN1@Br;~f_i;Un|;Cj&@ZXWNy;^rbsvwg4&+hRhvYIA zlZ8IoOSy9y-?7tP(#%0nx>RXmqDq5#&Jat9JmV+LS`+*ESkkOBv7c9(IScmlq_f=T z<4L>ECy;iZSK57^bijR{`OyLQ`4rOb^GdtVleYJR1s@(Tl7qsBThQ&Q<3rT(m(=lB z)G^Bw-|jlvrw?pQw|%Hrk$azd6}k5*f2n() z@|U{zRZivyMeh@Pfcb;=o|5_67c}m?{G;=X&L{fYI;*>+7V~`R>vCjXV?GJ+%0hok zFcDs{^#r`K1l}m4j+!^5G4wR-c5UQ47F*SXwF6nqnNweRJoE!|^z1%_7qU2iLFp{| zJd3jz>}N>tHJo$|X@4YLO55y>kEF9`8|H6Bo+sTDJFa+0JhIvkosxv?BqqlMwck+; zed6weq%sES6MH`+WZuIVPIYC=rJMn`%#(=O7B_Ww@Xxc}$S|6r;ellZ+8?K2hqHOLbmRJn4$O0~;W_1o&J z9NK778qb`HYbX6u9d^(>WEUd}FG`O~hZ8dgFQP}I@Zztii;EXGcepr?u-C#1OQ5TV z!P~=m-?c|Q;IT1`fj0mjW}@?@+k_93(3i>d`C(rz@MJ>qoN4Fu?VM?yLLqo%s)PCT zOeb`0KJ76LejYF^&(^O4hP6D+9xmX%BwM%czd+q(TN7ymx88+Ly{lrV=(mzFpltHP*dTRNaLo;;E96f+pYQ&GdWHyqc>-{Hzpo3iL$O6I>_e3bweAUZWXKNjPe9{{J5`8abvH+ z93SHQ^!YycPHDCK1ky2IJpQ{U;h7zt*kA;2B-nUEbB8A3JIv=A^!rZd;}3An0rbOs zzE?yZv%wRE><2dJvD<C;Nj>pm(lgza8JOUz{F@{+WpWk#FH0cHL>_pG=+RPKxW}bh$HtSSFo1ISLuC-2l? zFE+u4ZSdnm$KaR8;+K1Dzq~p1cXjS&?6Umw?D|E$Yt=98{e|PZ6~Pbk&+qw-@$N~l ze|i})viNC%@L3;cF5|Cz;9W15@q>lE_u+dt1~bR-zwl<9?t41D=lb(m^sVN@YtM=` zd~+<BsX-O#>J zo7odEi80TX?C+s@09xDi3$U$?b&=Tz<9)$c_WWoAzt88l2f*Ev*|(?8{zrEz&Ut|S z{ICnhr(J~}GDH9Gux|d6PZWYGHg|r{i zZZPy>&aZ!t`v0D}2+uR0<^}Nbi(u&jZ%lBOdO>IGH1nK9Ih_E5W-G1LpHb~9tB7gX(L#!xS)+RcoK3-$2$*@G=!<>*+% zwT^2amp`tw<8@qJxdwB&<;aXGM`lzxGNa0oNjaK&ypgqr@Ld-B{^fgFzsNT$~BnFFmp4qxbV;OOlYp1_T=S5i|n*FuLQc+PW$rWIcK#eI=dt(t*!Vuh+b)d zd`v?&o?)J0Dts?{n9rViPb3G3o3XbY@A%o%A~u8l5$TV#uIvxVeppJ^g|DxIuibi< z#K3p7soJ|F2EK!;y-Q-?JE+>bBnG~t9o60?Y8R;5yF~2*ReP7HU7%`jjrEq$Hrwzk zG~U~SnAbFL{xh7noXT9ZQLR&F?PZKx`)J$_?<}Fr+qqPafEQkgfmf2@l`G+uAAED( z=%GH*ey`|-3CykKdmW3o)^Y9Qa_e5TkDJnNVDq;5_!UK5>$vuD8T3V#-5=-oS^NZJ zdVl7Nt6#t^QT^h%Xur5+P&=Enaisk_MYX>X)&8E4+SF~&5n$mU?O8+{3P)?dMZ4`h zk+vH^+qvI2qxe|z)Pp+Oz7@PvYDC&5_q;Zlu(%W-NNtdNUfm~*Fxfjwb(KGXe%=`b zlO(}^($l7&_-9feqaP!I8h#@n;JY3+)<5ueKh!>FANkKy=S z-LW~{usL0^Ikz%S&iM<~>}hEF&#!CInbfx2v->#HlYdL#OkBIe$j{k{e&*gJ@vUUO znZmbIv1@7A?Aq2R%#~>X23picHrrqPV5YGHOteG(O4&ri>dC%PY504yPL1C=%vYCn zG4aJ^j5GT^wKh*V9hi~-<>Nc&jJ4L@jeqjgXOHbX*u0kY9QH|Zud+m+wJ_cjj0Z~& z{`}WF_g)=ioj4k6?WyCpzPKaaI?%{#rP7CQVf&A8rC_UX;M&f5yseoYs|8pfk?W9` zGkUl-(&lYa49@nz#$9RjX_Vp#c1ZCC+xxBzw)4dW+xqGRXKPI@e!#ICIX84{?pp0R zpUs|g_BjM8*9LWt*6FWutn8G!L7j0br z*`>q<+o#qIYVJzA)N6wSO=J4snZu&5UIAq780}F|B86 zn^p{KI1k>#UfM&4R2;nD47Y#^>mTfAhBHY|UO#*76F&3RPCj3GKJxdbw_bTBSgR@R zcP-Z`zNL13)0uq+$7S&>7!vrTyQA;;y)7CHj(crT}Gb`Gr(v3zW3@|tz$QWBIf_@Loyj?6n_z z;5|=rxj`&Te4x0(E4({}OXXodiazgU8dg6exjdh=%C?5SRrwh23Tv!q6FliYtKaY8 zw|$g-2fyjvO(P=j=y|O}*8lH7-%s*b3%ECnYoqi325582`!ws>wMM8pWAX1`&stHQ zVO%O}3Go+|&*FNU?=RtfAI~1*<%pJ+~+ z`eYG(GLk+?raw~n)(+~>kG0;CMPhYzN-=5LW-18-9K9@5HhI(67b zTToZ~nSAGdZOWnjym;QyGau<0p8pWK1IlyZ_nfr$nruefY3)=ePf~dy?e>(~jBifi zIWP58z7ZekE^Nu2*pxf4ExoZZw_~q*S=b!gM`~a+C{M-@=?C3P9HSq$riePnFem>_ z?n!rLPTAeds3`26#bxZu>TOiC9yy=qOv;|cZ@QnO-*+|SnbsqX%&ZJ!aaM+&@eu2X z@tgnLN`GXH-JQgWkXMQ)(`aR6^E=vlr@WIs>|bY-B+FVB2%i9)&kb_T|MX4X^*c-hU%vAFvYjay=qWAJO$=SS$PNsJFQOe$U6xNhlU z_3c2eNnG=}a&|QuIdRu@BXdqP>K%yc?}3Gr3N((EX*?0O`yL$i09Zw1KL?Chdo9}j zy5O)HzBg!)f8+zq;WIKDXRJ3S77Xg*AIUh?j>`WVSNY$6{{LNgKl=GDRo?GX<^3*I z-lxtxV&dUd;!NsSwLx{fqcn;)-Kkzx1+~n9C^|@l#6})`~KZ7JARLNIM3nX zOR4J~bfoxfkFSAE?8ga)o)dKJDR7P6Thj6uZIMt6cAr!zj(z4 z6Ybcb@ai70t@6+2T{kV+@|c+!Ht*B6{lvdcc}k%!PiG=)S=nZmupba~V^((wnFQy-7+$i-e{Llw6+{7EBo>Ngyj(>^OBKspQEJX_zV zEq6n+pdNU)6W=hYOBynh7MsMCKC%t+)P{5$(gUyq`?fFXNnJ9%t$bS=rZn2pIA!kG*-3g%x)VNYrv6q; zTROxohxGh_u^Xy+_sOKobzfzWo>y6==WF_I)v~IskI=4F+uGz_q@6P}OtmxDCATwe zUVS^aaoTy=|CQ|=f`_DcG17F#A1isFkWS;In z$vCb1MN#}F`4!K(-^$~=neb_r;SKg5Ya|Uc{K0Sfzw+4Y$m|i7t;+Y@vbj9(>b1J) zwjVkA?RE#B1t`1f>w&sulzwZ7{0-tVw>0h@*m7NO>9zj4Nat>uXX~`R$e-8NX$@_i z*2wgX%-Yo`B}@8EypHW}h+GcjlD#z3Cicruo7-=~ofG>7pem<}H>$Ri^ln3R-9Thh zGF|oE@8dVS-5fl2J92uVJfh2fdE0kB?c4a_FYWxT+BfRmuhsoB{C3gyjmjgs zeGUGy>Sv8K(pqK2!N1qrJj~up*ULs5#|=@*M5fWl2ksn?-ppf;QR6P=8~K^n+vER} zO2D~b!9&PP!}P`(Iq?lMd|Z378)Z!3d-4sRNYBbJ)1K&e9q$!7@6EU0yUuyD-Q!Dll#v0@qu-Dr7QgAAHR*Rko{Nr0+i1v$;!x#&$Rt|VZJ=@wXn2so%GrS$41LmN84z_pUyMs zBO5L>dkx}>{kY-CxQ`o+ydL|}1UsGfapRG-KfZ2c>n(G7x7jkc_w@^B_ZrP*>tSrS zY;-dH(h&SGkjwvZ!&Z&AG)js2xKXR?w#-Rs41WFvm!6lcckx73TP;7d=5IW~Hzp;0 z-@n^sjPgS-mS5F6X>Q)9r@H)PPU;VDIli=Kr#~Kh=1()vo~{2J7wc8)a~@Fr4-$Tq zQE|o0ejh?lCES$Jq4|$9KE;n!pM_YfFMntP_C@}X$-LA7d{gN{)+4Jwg|pO`ZXbTl z2daY2yL{YwK>u@1>h8FT(6CkLVN zo>~drNe5_~AyDRi_IZo7XH)Q@##8=*jM2F5d<%8%?l*24!kWLQ_}!R3zNH6D_nY`e z8CmfEB+>?HBfD{iXF{izeA|9kyrHoz_q}Gk-;?)qrj2izgB~6jpOul*zEOs^w80yl ziI-O111owx#vk<^&7V^_a`~3_{xd&Q&FMginVdxsukzSI$}&xmD7F4jF_CUr(9cDtpDZr z_x>4IhXx%+cDT7wM!PtF@3H(psea>;li(3suV(sM<&akrv8~x2#yM!CmQU^Obq81G zgb87jYw5O@y^n5d)%#tpwCyc>FMVouFU{W)FDA)X=AFim>`uCs>&Z-it6Pa*{bI}Y zDf1thouuc~SCU&|na8u36B&T#WQV`cdMGv)Dbu=?b4Pr($q-_~bVD#O5nvzL6N`J_Lx+7liTuLR(cEUuwjTlRix>yqBXwzlg1G-meG1- z+K;cxh#{^zKQpG)rp(w@Pry@iwlqo6cZ>M$er$<&-cQU_*l+*XC+wJ%%V#~{GsR0I zd{%uu@)$Qem+!_4=KH_(oZXJd&DY!M!sWfBc48mGe>E>k*NGPy@4qCSm-P*GUPRwD z%xH_gdl-Euoi`927twnSGoJVt>bn5_W_}BO_f6{X@2u-`{}*&!OGnooaro#FKG+1Lx|Iq}!6m!Rk5ADzqde7zB!=fCn9>8D)$ zBk8sC+ob05>79$UN$LNBPRnz2TFZ;*G-0@N{C4RWPI}D6aqZD-(HQPuiQm4hJ`)bT z)H?h}pPla`Bs=r z$uD(v+C==)|1GRm%9*=ez9Uxqrgi#v)^&UT7wqU(hxexaf9$;rd{p(F_kU(4ftlQ9 zkXt|`0kje^Rn`mGVi~{-feLugi!F#6fmP*%6uz0Vdy3QS=rqhkiv391{_0%zH>#u#_K>hPi6LUNDwH$JP zbIAeDBNsRyTuA{~P+wIXd)TYyG_kY;Bea%%-}lB$9H`(!PcZjZN1k;<>$1;}^!auF zj&D9eoUi2|>$|q1110C=LB8S z-hTBBtlOB=Qn{<<(LpB%e6W~xB?2`cj?r39u=cFPd=uX9m5D8lBfu}P_F^NLfvee< z5UXwUfv4cRq|VqL^6U4q3D#Bcsc~HDpc=`${{+{{w;UgFd(2PX_z5rvMV^68hx(C^ zJ(2ZsMrkbp?~>0y{jV#ZKI!$>7fsTd`@OjG>C3@z6yeXGAfNH0PirkxiNz$m*Ka4b z9Qrd@*2Vp+XR>bK>HnG(`<^fUZXd~eJ*dUvbalR0U*4-f>$MKB-i`bI(drNR-cN_Y zVg%yf$$dV2?~ZowCExfCG?(u@aUHqr^j8z>h)aH_tizSVHVf>~wD%s_)dcO2T*utS zO}W)a2C^PF>ySPUF6n=PL29D!WqrIoZ0~~o3D6hMXK5umv4R z`UCY)M&uOeOT9Vu<0U5=8vX3iFP_{G2hWjCegf0ZmkUr(4}^a<{zk!7oQe8 zRF)Mk`NM~;FJsPUJDz6qdjayXyMb$MK$&+pJR3xlyUTxaT=W1?@6jeWs3eX|Gs z6Ek#8%s1%dNY)oo`Uajmh@KFAvlpEq^RIi5*%R>p1tUL}!TJ(Tw};z3X;#Px|}q1LJPjJ94iNW@g8vpPJA;1L&JWS5J(ML-&l_ z`<-2WaGnjUe?5}AW|za;4d~O)BFl}>^@`=e8*kCi4ZSDDL>^1{)<*O|BlsuvKl-Wx z9@_0?4T6Exr=Nf>5t%`kgkD(r^hj+$eGzT__+Ru$E_2mN9>K33>b>}11J?wqbHMsd z<+-bozct9;5pZ<1zAv8K3s1iL{X1f^#@6$!oteYCfTO7@VQ%a_wl~m$g2mkn-wN(U z)}o$X2AS`>q1;n)tXwNO z0d6Gh$)~qyvJMef4=G~Il_8LFaE7o^2>WX{2$PN{?unm1PkB-eD>Fk?UKy zrbRGy337%Gehoa?p;6RJWlr)C`ur~TmfeA`)7hPWfo@`V-i8GI%MHkb-!q`T zKYca*NNagh#l+a}7Z0v3F?4?kwzveFvWoQ;C&O#^<@@#h@WU+ZPy_hYY)_#sIrR-+ z6T8ws$=)3Rqh>y@7@gq!9Pv>PX7#W@ zwfIU`AP-6X3~yfL8H%ow{vO)Kv(Ok9LQX!$ml}@`HOlaz<|2cK@W(3X6VcU2c!&3RCy_((ZRoyK(< z*J)g*ZM_m5sD;Uc%hI;q;@9fJ@_(MR2lVf8u9)K`oU7*CZ2lL&Y6<6uVMoMI`AtZC zDSy*_#KK4Ln>%>Mdu%^p(|FI`@SwbNko`fvP5$4A57^tEt-sB;?cqD#;X4jMpSJ^A zorh=a&DXa6mS=C{xxahFQ}PBjP;{~f9JH6;Z2BPXO9@THCiTW|lKULvK6{}@iziDT z?&IBiYg=!~)#`=?`8M7~+SIqwelN!}jqeNdob7zqKE7om-zEOzZoX*}cIZ9sRb|t0 zseLYV1U!|I{tEVC5dPE{W8I&R!QP6#+lGGL10RnB zt8DsQ4c_<|uT94dvV5(N(S0r0D$~yr%ux%zmtcR>-UnHY`&se(95dwbW8}1jcR$YW z8=#f=JRe<7?F{tZ5c*L3uAA+C7k;AnU4!ZSTg0yMn;Lvd<$q!KKE&7j7@aS8;2+}C zH{i>~u&v^IiSOBntx|p%*Eam*j@Wu%py^}u!-v?}8hkzFkD;f;9}D7x$^HGFAqhU& zbHuF0Cu`(AuHikJvV(dBvTpig@A8bj=#po7?t|3s@bfMr<8OEdBLg|`dLPCyaT%Y2 ze^;gr)<@&xP&caHhkTDB59@aS0R0Yl`c8N^EV9i0O7X!S(MqmH=0p}n4ux+;4%Z)1An|G5B~-zwp)%3Xz8)=m~iE1a13hYoTpFZC9)7 zG_KRQPUAXl>pSW?jq5b7)3{FKI`O~a&)>uIPZ&C45ASs~GW3BVL-+80a$aPI_u1LP zbsNzK+u8PVZ6*9C>$FVdxs$l&w>;}7>~G-y!)Ozq?Er00v6ueFUmGK9Jnv`C7V+)G z&y#a|5@INZ+OFZ`_KbCR*B-vduzGge|-55d>s>;2gm&_Bcmw0ueIQL4DY<;qr? zKGHkrf3X`4$er*P{$2gAuz?ka@bMm;7+XKOtGqU2s3|d0F4l_46)XKTpOz!;m-e^_IY= zUG3+IemG$GdZHf=*!(@w4{}a?KG6?y&h-1lrxTxVAMa-RdwXe1<>&3^n)B`FDcLuC zJ(2xohQDX}dNS@!WvRNy3lm$4{Ut*u`9}`b$l;jC|HJrS*6E3_mD{hz!&uA9 zX3ig&{;|1EF8AJfg4pas-a)!N^IBpD8vgXz*0(*|TE9JpE?{tkCW|3@pA-%bX1Si~BG&n*U9w=$epK`cpI6`mGa zrd4^3eQ529$n|9_BYoDcjEoMgh&*6i_j%g?p7xovKScXHqrKnSO_3YRVv+u9W09Le zn<5L0_Ak)>547J!`?qM%GVWivc2#6-nI0LmR*&2gS{3PKwBJqp!?b^i_Jy=--p5*o ztgVTRD|;?-#oFg0w}xsW+VIC(zCzn#+M={o(zZ6VI`Vwknn+b>P2>gsH^1xO`Tu>| zchP=~|IP1wGPF7JQdw=}sZed?W&St6`z`+ekoG^*euDqa@BTsP@yIXAz90Fg(Dx&C z{6Bl|l9vCU{VCf1i?)yW|A(QUMz)mwEb`BxpGCIv|6kZX=6E;9t^6N?UW%rnI89C3 zeV%qjQ`&{5CXM<*FGbT(oTes?zCgR8DeXd2lh%dMOVKnGr>W5HZrT(*X;busc0-_- zqGu>h&#!Q-Xvwil%eXwawA89xzU;V+6K`)mat)ZQH0w*aIv_4zu0GYOT-)T@Ch@sk zJD=WXlC@~(y+ti8L!Y|()777@Zyz5QxxRgT%-U1*r_-K0$Cxr^bdE2beo#lpl=peG zex`0zdk74cU)f+amV$k>_5f!CAQNXU%~z}Y*Dt!tj$(SE^&fBcEkZp-^GkWI{jHQ4(YUMW*pKP zlaUyoGae}MQD^*0Y>(4lI9nW0eD9^KiJ^Iyv}n{Wl34UL-m8aBWm`6ZG4 zRpZ<~etSTkOWOv{KgRiu{C|M+@A2D%@?4G&a_)DWdq@5^&QYtldDtDTtQV%$j1RC* z*DUHSdKSOGgM86=OnriisZRxKOCL|G;C@yguT6}lJ7eXAKUfhNJ8NZR&<|EdZYf+5 z8S;Zok#VzPkt==>i`-hcNn-5ker8-aMXXz$clsOV^M6pdI`WHIYa;(txF%A^|Fb7&YKHk?!S0_E4w7d~rQiLBi9eSQ`Ug+q%3Kl^0ULW+=jclUt#E;-`Mt9b?VC@H@`g zWnFZP=qDw|*{nx=i^y^1`#V@C+|>X5_~x6~3fXR98^U%gn>BaFT-to_xHfv^g}~my zi$Ughg4o+2b7n#45oG=**gq5tGA9;<9zkR!IH+)C5IGM*k05dqyrM7`M9zcm{apHW zgkQuyA8u+b7r*03qI`AHlxU96xoa-o6!x%xo6~eefix?e%F`Z@qfba zI7T+3$YvDTRKHt2Osl$+`sB;0p*aOTly~^!a>+@FVpFF-!5YM3Q@^08yp)Yu$+;-= zi`wjMy789X-b#IQ8CM~Psql27Jkai< zw>(F3J{`6@rH}nn{9!e2W}Iywugkc(J^x#Nw_V1}jC};Q`r39`F9n;JE{5Wymvj8*lymp`#GJFv(ns;{F~jF1@42J&Rd$PYV>02(_IS@e zhMPYkK89N(HNY9mN(|2&!`b<2KD9Y)@0*d2>|Wpgn5?7pX@`F@>W?)1)8R3RIkvF# zjKq6CU`xw08TxJ(P2B5KjukpepR_!aA$?Qw%sKeV;`1&Kd3ELIGA_;kwBg@^8F%Jd z*AgExb&=`c+W9J9);M;|4JiMX+7lH#$Bf;Y^8s-?w~4u&O)4Kd?%!^T%;Z>dhUd7y z>k)KOyxMH|cM_91$9|FfT$`z##1DHGKg>Db@~q7d+f1CuoV%EQRooAgT2;cM68p9D zW-04Hq~cA*pU91!Kb`*9oJ;(?>R;B-6aRg__&;a)UUhwj{b=`TN(+D zNAsyCWqWo?+j*&KL((j!e@r1Tw3{~#m!r?Ep8{V&n;Y3ZNldsEUsjU5(S3tsWx zJ6cag)@n-$yfPZQ1Anz+V21dsE#_RYfxoIu7>^|5uOx3{=;#G~%2Hsk1S>T3h6OV@ zmxRM2=jG+0H!bL2mI8~#xf4UjEVzqvNq8*54N>1}P+1C07UvcgeR{!{IG2FS0yDI= z=+*^SlsT|jw6_+GUl66;!W4nQssf9ZgwLXV7wt*-EHGG4fyGL~XVLy=+LQ2E;IRG) zJXQiei?;uw&BAAa#rkJ3Sr$HvrJ<)u=LCEf?Iw*A z@LAB&q_2g~qRpbKiO+(5CQVfy7soCw9auM)mXW`A`7&~<12UcwKFiesRVptTy1M$* zwN0*V0*CF|`IJ7B`1DEmELVTJ`g4MTZ*_fp*S9A|-4pbu!WG!~QRT|o#+Vj9%Qn7r z`auex)d7FAA{9Q1^#QCokW}lgpg-MlzdP=C$Ng>$<@$@zm%0#o_JZQKY<*tcrKfN zV&()W{1I5Le*)8$fae0s_0M3sOgvX<3M`_UueI@O0=~;O#x-Ny3d6%UncrjKyZELn zjPFtSuC9K6c-O*rb@Tf={Xi#u6~4JW`pG&_q{G#U1&xZHv z1pla5UUJTO%KQi;H@9cwy})`Qn{7FWV7-vdHoO;DFJ!X~?*-Ni*-Vf3a_{HTFAcw> zl}&ptOegp!rEI3jol&yMx$eY!t!`+;d+jjrUeoVNkM}y#LA+N!HA++EAf}C*3I5CG zo2dL)j?HnF(^paWFWZ>Qy>AA%Czrm0@$VF$DLs)X{;L!8mgn>Y{MWqmf&a46$tmYf zxpwxSdwn9F_H6V~{ChV1S3CWo>{y2R(B}>R)sD|>5w8K9c(KikM zba;$n=69bn{;M5d&Z0?%zMp&jiYUHY+Ob+%p2?8DDS74`eCG>}|6(4!Pniw>#qmsz z&x-$Yf0r6h*9~-0yqYQgtDWb|eJ&dOm(AW=m>08-jKg2P*w3r}JsbYZMh|s;hW<{M z;J<9XiH+7NZR^E;zMZ~E`MqiSBBk$O`Uf4xf7$GqMc*`b>S8~imi}qJHzobk*x`!_ z|J7~vw?vPuB^RvBQGbheRey_i6Qg9#N2~f<_yi{ANY&qxIM-U@JG5K#x@)Nw6*B5? zl{xBf(XQ%m(JuK3s{WSb8LS24Rc5QdMVqR>MVmDT%slW5Wsdq=l5@v)jCOMl+nTpz ze*5JzNBu3yzhgT=yO}p(<>8TgS661Mza_bNY#-64>Te0Yi*0L}QGZMDU2Lr!TVq@3 zCp0x_XT=I>e~5OIMpkS9`rQQWlJH%$3r$U0t2K9prfjzuv{P&D3O(5tN}Cn>krEaMsDyIdU*`2~7Y)iZSUscV~D+Z16fSl7-=%~G4s zBtCmee3z>~UHvJwbzR@y_3c9jzUu<(PYZWoj5VEO%mjRwF}`$;D=mCihGWWes=tNq zbjS6!I)<(v>grK9MiQ?Biw?cee&~hJqZb^%W%#p6_$|YxnQ=&GtS|w;#kCR-Ou%m$ zvA_iUmJtiI@LOr(fZ}_*_$?cLt26btY&O>!D>rdls{Yn2qyCoQR(=3pYgU^2Tlg4N zY(}g*5wB&$w9Qzzg(s5wTeEESw**tfwsn@H{+7I}!H-G!EQ24-7`27D5!@N?o`BEd z8|3{{#O6EyzVK!OKC2tw*XhSO>1pA!3|gMG{#G~gLPy`VOYm3*?Va-#PTsKaSO#BQ zvUsczc&sv`{#MASzg1?`-x53)8?q9wza@AqslPQVP5mwIg)F-FbLp4FFKRwJ6Fk;A z@{fwWU21qN@*|MVc>OKGV@dt3GNb;M;IY_{&3OGS!DC7NtyyX6Z%M9&dq0Jjy~#IG`Z5$8C`>6=*iD`U)LuL% zPSHW>g^u8_&XL~ooSuNcIxqFNpphx}PC0kVwKE6Hy*>^8%HTUEeH8y@g1>6#_wzE$ zM?SCkt292cjeinzdfn@T_{~x?+N_6}5q;X>pNxN>f`3xxP#gS{a*YTxzq(WSt2Dlw zMUxDDJ@w=OGVB&)m7K?LM{VmQ}^SEolW0ip$GV5=V zk8R?O?&jRXoU`h0an8bJmD%cVfgO_EgRiWui7ckws=r0MngcE~));l-vuIcKx19K_ zGNb;M6Q4!9s=wvLXO$WCV(fSV+Eo256Q5OP)Za4kSsbhSTP8jWx~lqHHkvA5i}sm9 zbJ|Ussj)HiRQ0!@rAaRpTclmp-?GtPVTz!ms=sB>P~o#^Q#DSc&BSLxKUIGV+9`Y% z$EyC8OUpKVmP<>e2VB0)T*foPXSq5cUO&dwr;+zv+vM7&5cmSu&bMLOZ9bFu^hx+E zSAV+t)2zSc>Q7gH_7MH4a0NDgRJpRYF{XvjvW+hlKFj6@DSTE3#+2t&e+&I-)!$;g z>5lugs<%R$BOluO#iWU|f20pP+DB?Gz_nSf&2nv4OIO6BE`*-Fp!hAD?l9*Uov}ev zUpZrm7JkbXFSPJmw)mEX-?GID6@IIoIHCCFE`G~~-|9^LEr(4`6=zpCE~&d^*55L* zT(fNTx6N!8!7Atm$szy{NsV>Tj8NFKQ{N`dcP`huT}J{#H7? zmwP{#eo}uc1;3<~O?xg(C-^6&Y^KSbIUjhh9$A0O=9@UjSx#TYnWJIlV5h==x%Z9N zopR~WX*wu9ktzPG6ZDqn^aT9Zd8xl;qmxt4opSAr^}5$v_?>B?Wm*z{JL#kN_iXsD zcKSovu?+K}&l~=$9iQ38KW#b1?)BCft)uj5hkr7nZyNqdnZw9 z;J@0TXIh@gkiIE-<{W(I3y%LHmsr)`Voo^j-x~F|IBpmJ<^C>p{jF}Gi{jNx@n7vc zU+#0!;Jztbi7FPm>->x-1O^PUi)}mqKSbN(5O3_e(|oas#ah*J%~MsI zt?m33&zeSDeP3(qzy_bU<{m9Hrjd8(%ew^n`_%Juy;}JLSw4MA^P*i9yl)=wo9_v? z6ny8tlfk(aCqr`|IJsu57W;nK6Y~$_-b=Kq)!chM_kJDy@*~YZrok7ek#Ct2(8|BT zz2*O=eC~U)a@WW{KK+^i-wIv+rI%KodwlUOIiL5^`ll-bdG)$KSD#P*OD@+g$oA>0 zTPkgsLC$(Dx2cK$$I~X~ z9`|Wu_)X4K_60{5J)wsR^w;^eDV{w2TDE+D_=yMUkG0UK*;i1rk9QBRcyrps6}zX6 zSgKXszal*C4mPjXvoq}1re)=5(@H#^s*u-PHF`LC4Se%%kJj`fjc?&wn|xU{hq=cQ zpH>sg)~eP#7Ct$HZT>AwH__KS$3v%i^zi}uSiXBywpQ-R)^1kxfDZzEx3uR#kCN=1 z`jQV8?<(P2S7`w~;0fsKwLp`1^psPR=%+lsXBFSFQp?eEJUMzU-{R+;*U%^L`toYD z?8;hi{;Qjx$sS+(Z2q#%(69L(p1t09-yGhzk@r0W4>a+s}lFuKHys~MVFZ!17vkpIxgr5VaC#>U}s)VmApkW2i_Hj>_uQdx# z9f2lGvct8@@+Z_j?#b1E)4#HI`xW8ZC0SbSvOG`i@_ydh?+)?RF4w$@S6A~r!mqMk z!bJLJ_L<%{o4>byL;KtCvhcC+bAO&anQxb8HhJie(-U^h<-OGV@t)zVaP2hcxGdlH zT<$Ru`c?3~l|1h-&zr4jH_Nq4vX*YX2YwbAxr}=UCdBWpbMHLv{eXNk-wyv*X`XGj zdgdM)C%^IYeWpxIgMM;9InTLq9xr{(`;m`Xp2I!n@cSwJeiSlxEi$HG51)AaI&C|n zypQ?&k(^(lX)V$h(hse__Ut_UpeJ_fYu?z&s4w>6U0Jc?k?h!+g}q{P;Gyv$t+}DM z*7BTRbk9WfuKA}YuB$+{E8v-VJQtl%RRDiUpNSrt-dig#;&<>Nzx{XN*ux9r|3C6@I5ua-gjiLX7W={1J+a5iy|I7# zwlDVGSy{2Ci?d_j`*N??d+UW@5B#@}ZNyy544OQe6VT)Ii|hx7sbA@7DP!MCq$R(xfX z$=TeGcHy^i!e5j5UXwr0EW86=xE-Fj4c?dpk4%i6esDqzo~aVv5xXS1*w0pSc<~b@ zi~Q9q{Q-T{(aI<0@c#F9OgzftIbchITF;H87mVH+~_AK0p81>xE!v0=*x#P{DF8m`@w^_kjz zdE;vj^wVk&4)N4(r>`D@Pu|gjdZq9RH2$5kbs_yfv_AS?cqN}_-l=8l$JoB<&DP}^ z9(ZLNblJ;xZ?9LzJ<3*`tzE5-tdUjd9Ry&2?i#}QH z$=6MtG8O(1IhzWP%I}0HglCsZKW4u&?jE+{?4_HR(w9@L3j@6Lv+wfTpx z^UOay#ykJr@xJ*-@64M2K04(1UA=6TX)FkshM(S;<~5@o!`%c&d;t{^ws}X>sP<&<4Nm^0?6kgt=x)9Ip@RHvL2$0Z`P8d z9!kzlS9L*YcjD1iUC>Z67G2c^rQOOmTN}rr4_XTrJ(P?=S9L*Yx8u)MUC>Y>_FUBk zrOm>fhgd6}dO_xQSvjh-AEVvOt+Vo}LevE%=iJQawDQksKS8^h&uQhKho}o0veg9@ zi~wyP(WdHx3hq2aT~O*<{Y7eA35JT}R{po6{pmxEn7I)vm$-R6UOqv^%byFWc)7xCTm8B=F-~sc#mzXl zi4|8hKMReTpMsVB0W~uV)71PFjCETaI}syp#H-EcTNq}k`B`YI`6)PNsZ&`Pulbqp z;8!O&o}@uaNontA{qT z^_y5c^d`1Kwp-YSu-(e0=E~lEB=uaGntMy2OJd$xh`OC28!kK)3o>65gf79cf(w^A zp!^SAg5w0|D|JBm-$rNmelGpa$}eK44}%LA|KmuaoOTKSs2H3B7mmCJk=G!8KoCD5 zi2UUl@*2d}2eI`*Y<&>7HVu*U6A)_9s;KQXJXvnAsD)?}z2U=*<0~LI@)B`O{Qx8;f zKi&Je^h?1nX=SvF_%V%)Mv>7dG8#ojqt7fY&NpN+}HVq>R2rIkmqv1?{YK4Nyw z#;nx&C_9SUZ1DTo;l9}1$5T$f);W%H z`Yske+!#~2_l@V2xOC`VIw(B>K3r^hDS3R|MQ?eI6Ca++2mdL)vvX`OWA1dmxm@oY z*KZdeoE$(IZ_5~a^`5pd_WUvNG4`6v+Vp_wi<`c;$h+jJIqmw9yf(Qv<1J2lIpy9d z=kE3GkKsB>AH~1m!<8>e?&P`rUUJXRb!?b4*1OUe>z$3ij4_^-2Pu9d{AGLpjC^VL z`V~Q90zv%Xlw;qH(x)AM%BVl&_bK=(T=#q!U7QAcSSNnm zh%sO6=T-lLAD8}}FaFV4=%KF9pdX!n&-6iYzh{lnPZHx+`orv}RQ?YAWP6{K_VrRf z-%kIeq(_?mN%Al3euVgy=Db5fpQQ31?0u5PURix|iJni}CuzPnO`jy$>&{@zwKRX- zf$v(=KXZK75d+`#6k}}@&sDJ)xn7yL&X?sEtWBJ!w(<;Uml~rkp37EGB*UCMr%iM5 zTrM_lmz$$}N#!UDo~sr-m+9Ad#e?hs*THmqP9u4XXaKi|_hV5pxGSO5E}MQ%1hK>pMLc ziMs2vL>3tFt-mJU>0%vgI_Y>9oh789P2gpBF1{Lj$o?whq@duMf}6Xcdd=D<>unMt{!?m z@LfvQFGJRiJWGm*S^NZW^q0C&@R2VQT zrv;mwKQMXjRpwyLxi@pJ&5RE@$I34LEAF4Vbm&=hP`cw1#eXG@#|ZtyKM}9 zEXbV#w#XYnd223FWK>7Pua&Oc3#TRAGCPMz5bHHgC+UXCNIG& zcAVInv*5(?`zQO(f+?{3%C+FaEcfdg{hb8$%W`Eh%M$0#FVMg;l*e-F;8m#le|5(?rE8${uk}4{uk}m`kZU2 z|3$u|s{d8ysQ*Q~s{cj1RsW0JCsqHe%vS%4HdX(NHnsLB*c-KOYMEorYuXhqhjuew zsB%%rEmQerWsdq^w5vQ_+D)9CTE87!n5tt_W~={2o2vgso2vgstuIyotIVkX#j&dY z#jzTL3r*QZ+i0g^b3#+LhiErxq+%FCQ^|*fb_w;rgr;oYve8=QLVFA6 z*%nHhS^rDu$#yHWQ}w@umTZ+SE!*mUxwMS@y~~%8Qyq};g!*65&Xx7YBfoI z2DvsUv?+3V+)`kh*p$|HC6r>j4Ei2ii)qjM~2;Tmk?NT(dD z`d=OJE6=I^m*~&6@v){kj{W>v)_bAd9lsmvy>y@t-0{0@%?H<~cYXR!#+5FF9=)LW zFq=PX<|{g5e3qUL8G4%ENKDYG|7DB&S@pkcaVo3+m#uG9{jYZ7ed2px%J{HUIIu3Q z|0VspHg0E~ad1`ti<()g{?{y{{ugzTRQ<16Y3hF^#7}bnLs~S)89a8^`@388B$+t+IC%z>`48HU44sTlZ zzqmR%tbtx; z)c*<@*swCA{ui~sRQ<16M*S~pf2sOkv(nW6a_<*Z*gBVf@p^GKeo^z+XTyedf`3#D zE*Wo4{A7U!CA*dCp%={VyAxoO14zYo|QB*QdaXwL?$Ezh}dLo#po_>NQ0& ztqtw0gE7B5>VLK4OWXLVEf3he-Wt1gls@h7Q%3Za->2ZGjK?nax*hpx@LlT+>`uzs z!|Cu|?f7~YjWYBN-RoCGuWyH*X?Z9^`ljKbjD71KQ~!&3@;+>mV;uL-Hj!IQ{V$H& z#c8>}OI=T`8|b2VHB)?7JI|NRvVE_*J{dQZ7SI}o2Gw~{0qAu(Ixf2Z1&Egc^Z3mv7b-dCuzPnO`jy$ z>#o3aIm>``2fnM@>wk&9S?lJ5N$m}1KAz-FbTlW=8AC9!aL$;RIfw3yF}Qdx7aQl| z40|?av)%e%Hvi3)eQN)?xY)=s7h~*VjG0e#@!CI)d~CzMi?8nDyWDjJo=?neZL9y~ z;=5dY7je#>fRDWpxC9s9wZ`?G-1S)^?pimO&bmPtLXW!muH^Nc#P`0G@m)#lQ+8+l zFIx<~r((UV`d^9hskZuGF1{=BGHbiJ_^$A@&@!#cIgjC-r|3!eF6Y{&N$b6LNBu9` zyg0N8GDflT+@MWxtc&q-F>UsGah>27HK%=vVZ4&oxi#&xxt@NewfgMq-}Y0r8k9}b z(&Psqt5Iat#VV!2Dv?j>($AJ((@}m&E2~|^k1A%KjK@sMZ|O>`my*?sz5bWY?{(&4 zIDH!z{}rFBa_Mk>>7aDSC5rz_8jqVbPljt%CB!1St^Suy?wxXOVJFh$8@tzEeE2Wt zSg_mbf3@S!+xRMF-Y|TXFkW=8r(Re)^i+B=HLfrt`YN9(IgdUmPdnWj7Lvc6G+r_F zmpNWZgAdE7{#QHxro|f>`j77QJs%I2dPq55b2e`EzES zBEz5Svie^(|INa(r194-_VcQLy9pnbeoFbh>iP`) zkCc9f*-xFR|7G(pEZV2>CocB$Y5OM4_onHaqvq&><*H^JGQa6#>SqVE z=8DA$^^MispS8Zx+7*%O%M5&2$WiZ$cC}tO?IzAa&3%^{^}g^Y7OaeqAHT46Q{*3L zSM|PVH?dEJYpM5DX4Lx%IqH4UuIhc!Zq@rLGwOYXZ1uiqQ}w=RQ)`Wu8Q77Kquv+o zs@@mv<~pfrz05MB-dD&`?~8VY!=c?=e^=qd%8YtnAzQsK+El$S+El%-GNayC$jIB_ zSk?RDSk0>nO|fgEZM0J{Nueq257BPYNW~z8rp(XZ1nm;=VM0^dzh$Gfs`n){4N>pQ zpq;AsCG@0ip|qLxzJ#73>U}{wRqsn^$#JDi%eH!7E-fQ}@A75jR0m|dU3?g{bY-9V zU#=dFyzkm1*Ct`RZ*=W@WNQjvNqqApe3+|0Aj8*^376-KIf7#+zR{bwq->CXu?Zp4Y_rB!uVV7w; zUp)Bmu8_B{{-Tk8u6dWVXv3GZEYX&3iV6%)mV1@BIU^}w4@}gqjpHkSeHhh?EjB3WF6$VJ&U+RDH{#N}j`39-~#W$pg z$#?#p=@+a1mrZ^W>XmiT`#SwNCrwrTFB?72TK`MlcWvDN>S})I=(~2h{+EsRPMziC z4ORck#uw+a{+Dy^Tkv5@rnAHE*BaQc(oFFt=G+E(6det4(q!r_{>TzPvu(DzJd0~Xx~Wt2HLG02(iuN#o2B1 zcu)BKPRpr151`Db0Twc9fR!1zFfd~1ujtrWMh!6jM^2;2X}ks)|66kE-Y<%rMv>Df zavI0gx%5lom$Y))Mf{USPLuLjj2!3ahR^HK$DYum8!uZG-LSMc-;mQNavDWWqsVFW z8KZsqIUznCvFs{2N_IJ&*|2cea;>laB(`!2zS^}-AD2tMN>uFaQ(Ae{ zQ~7@FEKTLCY|Ki|Q>iV|%8&Z49@@whU-kz*Tg8#(VX#@@WAZH!$zCO*bqW5ZMQL*8#nA@e(^uOs7Db92h6Q$C$??p~j$ z7duNI#lNX>I&Eu;*!b4@JY~a@^JJ6iT^Kejf(?sc!y?$Q2)^Sqse@th8SxxtlMFty z#(PFhu+j|pOKLUPc%&`2w;g}ly&f!A7t*I4p30~{((qKq;}>f@BD%{Qj~G0q#v?X9 z5+CIQwj|%g&PN&gdlnzL*QXp`bd)}6`6xsBrs1QE{cFo#7CtilWrL5N#E)Gb^6JV* zW{kULoZ%a5$-eRQ{>i?v;y2qmDEYxeyJ*?R?=V+~-)_c-6a84DJ{Z1o+@CP&gK^w0 z{;SOW-T9)Y;^9p3U+wgZ+^4hk!IYhqH9^ez69cCr{#+jMD09xF^b>J^PJA^P)Acd^ zI2*sN@yGeq!k(D(CysI3rG8%Z@7eHQHvdUopMk$*`9;!Sre9?Am+2QBDU^Ow}c`_fkLKP9LSDN18rL@;@v;L;A?{GmJhGKjTPlo8E7?U((n!t6whB z^J)7f&G)A1mn3`M6}T_6PFQ#0zw-NKhX2Yp@L%f;oxRSO^A=rh;=!60C5(AoJQ!WOce-mjNc{q5 z4u`oGNq7037Xr87;=ia5=C0@BuICbR*T(q^`1y}7fpvy1gdTPAU&-q~iSK8HH&t+6(+!-L87G%~qIj%4j`^y;h%sNKy^?bTQpBfR{1>tRmu%QN7yo6e z4`!Q-cE&1u68_7a<1RI@UPp$s&83>TrG?}r+`=}5?N%d4VVyA#Ys$YhPv&C3QqFb5 z11?tUJYxIQ9QGxK{ZhFOnaZazf4$Dgr%NlJX>tJIR~NJ7V%)k7)0b90yNDlEd^{PO znUurQm6$IlwrkqqHf+~)1KYKx9c)*AahvUJ58Gwss2Dl2cG=D!nC!bsj;oau?BcXk zjNFWgo4FTG|K2(8?DXNBF;thn@wJw_iw;U(T%!0d$qSxl!+DjS1?MHcx2)43zIWTW zn>EYvz4P-klYb|_JNet$kM8w}Yms)8K8k-YF#gLL>lrw&BYoQJjXBnn{8@{?j4__w zzPPwARsXw7aa=ZD%4qD^4liZ=`+3Q^@kx2thHn%EOBe)87z9gbtwW*4A~yc9W zNj3Ix@m^9tJ_-&n%39}Lg5R?ECqv&kE&p`j_bK=%W8Yfx#+|sX>FwaUcCeO^bIk*# zr_5Y!0}Ei+Q#%S0?VyX(qE6REj9*hc+e7eO;;*f?;kf3f!Es6KC7qwP!@zY}_`{{M z^X>RUr@v)kR4!5cm+Iec!gpDI&!9MtOMDnppC~{)W0f1OGyB zUE)Wi?T=LcgS|h}*egp`F46O8`y{jSQh z;I%@m$uo5Hfs8fTUhkz1z+Nvszc>{ ztYygBn#ee6PHio^b-@*^M=Q0cWDVP|(7u@V)}rwXqO@BzsY2v`llS?8v%VMYyJ%0! z2MbZ(i}~A^o%OwF|1<3;ob|mz)b|3vRcEj7McaST_K{iND@1)Sa&4@d6&&y8xYexh z1zk;gn>1CiHrh>En>14~3Fv9k*`$|>z0q#c*rdP8Uxbb(ePs<`Ro{y?i>_vUFX(5| zRMq$5*rjC~FT1qVs$9M_*8J~)jJI3g%a#3jeH!TL>QUDwxi*QIx@+H4`by%nC*iwX z{pspY#>^@Y-1X^QpPun$Ptc!EepEO^+t|{o?`0cTs`Wu^evqo~)d4@V!lU&wW2b6Q z9BBvZ2v~C=&ODd3tmldTbjSPdc;6lGyRj3|1s7W!>O$z*3yS};^^uvU=ZpzjFy7f3*`I6yN+(#(&8?`pcQr|GKRW7q;W>I4(@; z6R0)#R4pK*{ug;7*k!YJPkg=W6!th(Y+cm>lNwqQH>aMK)Y2OAgH4ff)X-9Kaq1>X zEiJRYl^Hu$wYBh(ZSnB7+FCZ>#LNR#*rh`1fl(8z&R!3!ka}R$u-aA3Grk9LK$vE%Qs6*5<>xGfa zh@7_NGlr;lMa?VZG+r;Pka}U@sgTon{ij0eg;6sMIZa(J%)OsWzcl=kR!;5tFP-3@ zG;*4hFVmISG0W#Ra-Lo^@S9Fd+2(iJFlD9x7_S#rO6}dlzU-P*^}oaC;gpr?HnJw*HdqXxjZxea5nlV{yiIhteyT)HZ3_PH&RTU zFb{RYhEXSs+OyLPn|9&qhqdEh+jy!i@7TTG8qala_Az+dY2`e8Qz#@0tE z?eoQczMVcw`MqiSD9Qh@{EV)wA7-yNmsN+I~s%y=nR-$)0xxZ)VmFGxyzv zKU*^(GyGY;fj_g>W=d5*Y_Xf4=FDL+$G^_`R@2_Anpn2HK{MaOnLlXN7wbR{p>xdb z;?Z1epNo?;@TZpuPBNjsn9aX)W!;r^SJp2rS$FMud`_V&d?WGUUHqB54#M+^Ikau{ z#a#TEi$5a{>f+CQe{%6>t0U$dnLECZujS)n`+J6NaQSjoq^s6Ja>}+dX4f4!Ul)I7 z)E~30PbI$jrHnsIsy}vn8_sM#xvo-wY{$9QA0q~2$ADi9n6p~&P$|Z?ZS}`&YpA&R zvNjB#i!Za)BeTUmow=7i314Qd)p|bak=f?fp-GUjjFn60V#_uK!PvT3x>eznxd$EP z7d0n-iDApSy&jn@#{zmskxv&Bl?D^VI46#;Pm`13(r-l+tZ-TkVp{p^B6_NLdNTGi zDJP~YF=bBd*z1jL*s&c?#<64h?bIV%!&(mJI_c+Hk8F*BB{Rq2X1<4+-{JIYU7VMT z^XiGXKcycoQT&+Hvv}Qx6FYD9$J!Z_IQ4~7Phigy<{sSZ6Y;W6`Y8Ur!1yt1Y}Xz2 z$J!ao*?1!(U*5fbMGzgFcHNbBcqt?LDqko$w>~Lv+wg_XPyMlW#u^qsWataJ*Y|w9 zSdXed)()P?#fS9_+?>*RJp>>WGvYfse&zTP#*e4d))KCPxUH{9YcE*?68 zwLC|%_pmS2mi&8NOXaQ=A5UCY-7EY=5o6*Zzg_?I%BF?88b^3y-#WHv*FT=9eBw3M zXYzXn)HhtM#ZLM9*JQC~Wnpiv>Y-j*v(&NM;~QMF{xYq(-pjo!3#*^W@#}ke-qVXd z^Q4?>2!>nM=W5L(JX(1}?{G^v*RMDD2Gj^nVQ*_|d4rGhz5z|!G_4%ERcTAKD)u|& zeKvFc?%Y9o_374^_WB0aJjOlw&A=5Ov(75#1HA7w&5NIy`oZE|d7hklZ&ucf5p1>1 zi+0I-e&8#pk@qa^t(E`Oo3F3W;XQfI_&!?mtL)$7dye}2HHBO&|6j%aXF0k0QD08Y zhMe%Uwf{YQ!3+OAV!^+%U&p@RTQAJ`pKtxmj6ZVR%<=o|f5UzN?f>gP{$|EYIqRo| zpv5rsVmg5a` zme07-yX01`-6_`=@!qSs&+(StHy`L9F8!{*c<4|3!-j5TznuN{u~!dWKJu!e2m5HH z+o6f3E&2SJg?GXOcfbp`!xOi`8Kzcl}?s-#@Ay^?S0a0;ebKYV7Ta z?YTTFw(IDkUE}k#=BCTFmUnts2 zKJ;DTd91GK@^H)CMgHnTpANT72?X>*JTFk`uYNKopf?T-w}`#pGa}rQ|2JCssx!;$ zCc_VYkG4wcy-yxA@!`gSTFV24TKS2-;g+Iet+|3YbAvBus((>RheY95_+^0PcA<{NbKhXsDU5&q2MT^fqQEr-VNZi{lNW4>Kb_G0QYI|2I>$0 zOYd0Y7_DU_-`#MkbxaAf*33OWU~`8QraE%?FX2{102pf0eC$BNsXC%{%_- z_~Iuv`DUFQn4QP{g|2yut|OuAAdgpH;m@mHtoikk-n{zPzFjo)8s4e#wGzEJJ6!sY zxjFhDSVKAR!Qv-Af=1aq_nj=QYN4;M-m2v{?fIy+yy=m`>I!82*AG-|xovL67O#KA z(5w7c4xKpms-aozM{s=K$Sa4wn!ohgS05O)WzyVHTXxf5_3%d#GJh{JKbZF{;@-KH zy{n(j&DEQ{xw_7Nw{LKhoE!e=)KmNE+uP71UuA1TFYNIQ)_biGeeenN!PN_9`v&Wm zaU4SK%iel^#*?)Dm~-EK>k~7+@z#%LgjQ(N?tWCOs^FQT3r_h0HAS94(~_3f@~P<6 zJ)T@Wt6#YEHs1NG{;P&A=l$>TjT4nzmKW-_E)Fxn=|3^c!i<(zf1> z?v=W^yS2ehukc+zNA^Vy#v%W*ChrM&R%GY!_;5=zye;x`ntmMb&1tGR*jiroQRS}n zKJ8Fg>E0pr1CW>Bdxt1lxdxtzAuBb=$u02K)5waQ?{$OcKj41C?<*?@R*wiGvx^Nr z&yVx@9^rAt>w{|!UL9@`es2~!@?LhnKf-fY^wxGxzU<_f9B6-dyw;)%k1xutK8$Wy z$^DKXYu7H$uP)8a)AMM*Ha|}(%}7Gy!%fq-p{Guge*3~|09Q?SBz&2@_6()Svl2>ljtj+ z{W11x6EYw&zv;D+x`qr)&JE}T`e;>uIw3MJSjoV4Lk4zhxlPX?1FsL*m z@z@VP_TvzI_yGP$+-80b`&Wh?j9?FEU>B!jGpEHGM@*fMy;OGd@|NjA5J;8HDwi|LUuWvv;J`_74a*lr?vLrTHeXo-9 z!8K8zzv&QjkWVW)&pEUQ{nOwZQa_0AAIV;9;wVGTpF_^C59I1kIplnlA?G6BzJh@M z+Twu;cI-Se>p?-pK1mA<2eP`%%M$k{?#Qj671Q#zO0%DJwx?_>2$?KMb|x_`~S!G2tPTzfWPu?iBe(7NWznX8k~Gx%l_yaW2Oz;r|9-!0-Wc zRyE>x`aOY^KA`aX9C)k&KT^pR`o966Q2f&dZ0R9vsM9BWxge+h1nigWmD~GsQg2|$Mg>y#I7Mj;vcRs z{KF#y!!1qE-4PSpDZXwq-*63faVp2p^L&xDTI9YokgtE1{Q|BLzfkOFn_rld;1_C( z`c!-V0lyGg6Wcf(S=x^*9WZR;8y+9FGu%>(%!yyP9$7lz%dI)`L2G$7y!Ty0mU^e~ z2M2mIeX^EUJraNLV^3cFN@S=68TtdZCah!#y@L$>C$`X(p^b(N{R$Z>MvvAYLsOIt zJ+S4xheDI=$CPvPHsZ0v}seYgFnRXiXV6gx!LU-)U?g8gL3~bV+a4} z8KP$mNR*$?`G)9M4L1BiFa99so$_;KFKw&%BtIM>w9m2p!PZB;jPrbq^RlpK+1RyS z*tg!;IX~mP0OLGw)&BLK*hh@@K13h15F_xPp0utZS8GAekN(A%Rdo>A_!DD`ayA*` z?e%FbJ1*DC$1=VXyC*itFFqjp`z`#382gVHqkqKz#0Q#p4A#nzRZNJTMxIV!C&rJ^ znjgTf{N5LC31H(+9h<=O2i9DJjh+GzPVo#{6~#V}#6C~Oue|}A`vA|-7w1&pjeou{ zn4|v?daJktv|P_Kny}&OevHF3l$T(}C;eYPOhdMy4;ccFO5cw#^59Ry*DBowk zhm1kS8;lKSwTw1;o? zJ<`AW{^N^xjhJ))mfP6UjcbIDhtIik%Oo~=-XVT_z!&2C`qUt6yS9IO(G$P(^wJMF z#uk6)>!ok^`}E%$e${XAtK=FdKm7*3N@Q%gU;L_^CK*@1h2CAcxG#MW&==x^KjIyt z&n5m+NqpiMpvEAY?yr=p>s~Yd=+q4b(!$XSxA?`IA`p-u1YS8~1xqb9v^ojgV z=r1vuBF02Qf6wCn)x(AU58S&YYtFsx)Bl0tTYPhdZ;|H-{r3@L+K)cOZ`idQ`hS)2 z!UhNZ@AqZt%l#hxF@yfgp}$c^iI-+8adkKxyrcs!O* z+vd-H7CE1cyx)o3-+}z!jz4=F{_G_D*?1giFtQ~woWuQAEFO&f`t?oh_dXboEhIiS z8Gkj3e4P@%h;NMgJgR@k7vaO=|4zqOx{E$KhHRfkwoX1cAto}hYH^@?A>;TTGA4Y8 zafRrC`MFxT@Jf=bBBy(i&wcQRj1~8DpSY|JNhzy-bcWbJ;(kh2MRzvAm-X1Sc|3Ps zFi$U~zfB(L#rv$r)|vbf^#qzU#!tc*pF`&^VeBD%QN?&e_(Jq(8($1=Dn`d-$`|*F zTpD~K&uQa}fW;S&rQnO@9pj6g!WXR`U(-E?Tq?eRF46}dy*VM)#utMuzQ|<`uaN$h z{t}(NVsRmTlKlQ~Q_0!iy^wUZDx)xb3^2KG@-e-Q1XZmZ8uMnR*b(P3-a1;vTVIk`D>2va5^FuDcq{LpD&8uwR*`Lq z$M%?bD?H!Hc&o+tZoIV%p@7hSv++394w^?B1b zbmOh(5N~zJ(6dU0x-;IIT5irc-pc!@inluDr<>xf&tG)$R^+V%@mBSH7e%}^1s``f z-pW1F#ao^H)MfEj`lAE!*0%m2*3zTmt*PkW(ReHONEdH)(*Lg_-fGHfX7N^wH+odO z)xj4M&+Bl!m3yR%x2EBX3m0z{dColEs`~7L##>YLPlw~J+#_AQ)!9F3|MWS1bG57_ZjrW-eRtk__df;lg1&Xd^u4(3qC zvrn2svF1Am8FMHy-`PJt-^sj+G2dy)w9LE8d}l7dliaoEnDdm}()`NY>RRSH3o7Gt zoewgP^7Y(-`seu0Vm8TxlllEx<~wEHGUV;2k7mwuy(hP633G1bRed9$`9BZ$dY1e= zneSY-Xh606W-E* zHG2>o_Fo0_X;9!%yt-|+O)-(#*t=BQ@VZ+-av?ED-x4;H4sWDaaE z^O@3LGOs;M`pY|{NrU$H2Kz7`=`V?aO{Bl%{?cFjm>1m-4Gw@qxSjs`Y9Kpl zZuBvuzn0Tq&ND)HVl}YHoBYx?l=A;j`$5 z$>@kXnH#->xzXF_xB7f4*H7j~M?$wb$ajO}5Fz8o$c0e3LCB_m(ctQrnJ@i4^QCe5 z_Um8t4bu1WZ2P?F8v4haH?_-rE%H9Oa!~cp$w3+k|1^+0KU3u2Hjf(TryS<#0?ei6 zSo5eR??aZt{=Jqlyklm`7bJV+_f|li!MM&p{6~-7qO8x~4+r+2Nh(eD^2W z-XkwVWFDPW^qAM!!iv{(^p#{u4dW zp!j28e10`JP;VelqLsPt6U?oazDuqc_iOdW^FHl;**3pA(3oFM-IvDvs_ILz_l=__ z#Y)I0n+u<;Vm@~&eRnO}5#}{T?rZ5g(ZN&by9zbOI=E>MaxZhNKg;*)d&sj2(q}Ta zDwr4PGwJIPd9}Me1C`uY1q;yiL4Cw!>Gkw*@y80Z(jueJn#lnZ?9X0w|L}(^pLjOl z)o*k3*;PiLT}hwGIeVX7W%SvV^w}`_3!AeuZf`V8?puE?Se86&eLiwrfIJ70>k#q{ zj%DFxu`}fE?#F%~Bkxl%P@}Zo^&_$AfknNlf9?0RS_=eUWB5&`IuYbK@Tg-eZx&L$I$5xP+co=yBqs+6vj(vZN9GVjax%!jvV}3sM?y*l($;Zku^vE!tJ-Je={x!O7a;2yG zesTojxm1Nlj_<&lACpHIW{&@te zpWj|@ks z)9+9*`s0g>CajA=({Gb=+(;hkp{bH@4$g@Dh(>roFb5^{z2F^0AC;h&{(z2p3;sEQ zPg;UsB6_CZ8F>eY3#>lCxcbFS7?3eS1+9?IB?;=4{1d!!9gJg~n&)9)iM>%Mk)Kuh{s-W@x3NbX96Vt6Wi~o^ z!0yY49oooyEQbdqpGR;!!UMcJ7{1S=!#;;DD?_J6&}}pDQKsXgOq-9+Q$9)~`lT^Q zf1_Xa{zQvOZf;3_-}*+z6l1xE=$iuJqfbwY2_K#K+3hj$PmW-J>~edUZxEey5*zy~ z$y@r**7AJ%r-*zn!3Q;B3riLiRzFUzuwdrof8nFK$guF6sf*tIxi3~5)XE=5|IFo@ z*Cf{&9-QkbtnUlHr)E*0`VYdV@;h>^W90Tpj&dV9axT0fc!}e_{Q9}bz2M$Nw>5rg zVyuMRNzrkE8v@me$8XSDJ_;7rx8R?gFmzizW0n*6zqjBgy@i|}VXPE|7X-r~{48JV@!aRJ?4CmyW^9M}Kyw@|Bu+)_X7B5tF<`$v-@t zn^Rwk&VBa={3>W3$`M&@Y+c9mtZoB&|gd7k58e$7Q!QQ;E~_se+U-Q zBR}O|R{P4x##C*ngTFW;f6X0YNk5?LY&@LC^k2Q*J#n;~tzsF-S;?D@*#4#@$ z&~0y{6U8QqUnY9+A^7CT2;sHC)eSuR@YQ$XpAA;u(3m^8{u=nC2@HX}Uu|H3F5mVz zd{TjbwjNz3@;Dtn5&jijCVtxY;gg@ecAfIoz7L-y`)Pv}pN!7HPxBgn+ImBmDL%wk z6F)7>!5en}EZe~wcK=NAE4(3o{(4UzOP5t3`{LJ#-r4_g>p;d=T@jm6zHg6?&Gd23 zjhMcC%Giwh9yOjrmU?t-#u4Lt6<+F4Y=+-&G<>8D_(%uv`R#GNjfRi30UdV1V>9$& z+k8sJ0{Qh=FY{oA0w_gmg8PNr5F6Qj98M7}hXl%yOXTuGfpK)x4-`j1! z#AfV$X1D#_9h=!?;2`~ft+5$Hk90IPW2f6C6Pt1H^|SExvzf$Z_}yuNrzA zb4)T9^xKOiHe>iC9gWS{dEugu%{X{K?9siM#Af)N-ItNrjGYJUzRZP;&Di_ul8?<8 zx~(Iz89RM?Y;4BHqY|4Dd`yR9GY)>0*i1Tp{cDKL*!$fN-&_*085^Gnk9Z9~?X0mG zerNa3BsOE`4ZDAqacqY50@AG&aESSiIn3XTFV>p(e@}}&cdO*UOxl%8{IiMq@#Dn8 zr{?A8)0j70&AJkYnHQVRT;pc>pZT&G`C8TM@;|ZZ2r;_%_`j^bR`t0-#Nw}Du8}+$ z4|8i@JN(G5zW(qNpX7MnyN~QTOnmzvSa)LQBibK}e|*Oo_D6^H+IFiKU;M5|{M9nA zb(r|{c7L8Ov2^C0n~#ju$_2MR#jllD6=ds&$-5cBJkw$3Q*M$yb7G&7J#oWv>_5%C z=BIK(`Yo)NGB#(RE^EoH3TWh+g;gc z>seaeX0OraH`?;Fy4S%ItLs8WTR-+@TcOc5M5~);wiO#~S88?B%(jt6+qGKVwPxG( zM%#^AU7p!C)@U22)os^2YCWXg-abt~B8K>H#AjcCPWJ`+=r1teFdDl4Px+s@Z~1?* z{GS*9{}K70wF#tsq5R)3{{O@Be_{Op@5ui{;{Sh}|AkidtpCqnC!Jc=YC&!96*z?}g<@4cIc{Jt$A`Ix93N#b$H%#c z9G_$_$EUTrSN;ClZELkN(?)PS(o;9>YWCN9>aLaJ>)Ffkjh?zZIUdVij>mcUT~6)p z=Xr;hv@_R^qV4Nkdma04a*Z5+o4p)ABG=~CzB*^gt=s;ub|&uz+J3>gG3>W^>Vy|w z^%vB>`t2pRI%Q+`N$!7IJ2M7e+05MAh2WD))unqFIK%$vE7{v_eHYs>4cqVnwsWVZ)xCi2+?l1- zRmriRy&UIhb>EQV5PLc9r`3&?<3jdwJcQo`*cY?UXMZKXljD)><@j2DC&$;bm*X4x zog9y4FURBfT_5&g_Wjx4#_!~KGJ83m#_!~~jJ+I}^E)}7#a@oT%vip&&*tU_{8Rxg|KHV#}`+DSNoOVXm zBK~*ulk}tXb1*CXei;6hcf5vN#-GwpZ_uwlrJvrQUmura`t@-+re7DxG5xwgj_KD= z$}#==Njaupw=W^DnEC3}Y%*_6{z?^Vwlq&*?s_9W$*_?BW+$JnhEH|jNBB+nai#F1 zpZsWOmv?%?uIboa=2fe(QGZ+lFS0+n6ka^cHw=asALJe0QLov)K)-uXH?=AFM!j>UVdJ66FOB_(X~4Xcq8a_4sL=G}K7r|Y4Q ztSR*#^ZJ{OK6F0km8>PV%K0PESKjRzzjm|yE{EKOdA!r>ykiCLRL?t(;+To;x)P3G z;~lq$wW?eAmVBPIyRK@8r8T;;)iB zesdA)wl%?bEAi`P>`_Ah>jgRbW_(=9w-6pFc|m_V7hl)x_dM>cz6m<2Z{mK_=(`fG z-(DPF^N01gc2?47LdU4c9es6}e9gl~pUMBn>7RIRPU^Ll%ypH759GU+o(kSPliwU< zZe5N&hk|3?$5=?|n*{oveO&ebxO?~bsH%JKf6q)vCdniu5+ERjgitXg;ZiA} zqD+9JkOZVP?eSP&1qx5ABq$c!Ln}&9POD4;q++YV-vQK~Iw4U|P}3ui38<~PKq%Hr z>#qT^B1yOv6w+dO-k&vlC&L(Sr+Ciq`Ta4k?^=8IUVB}>x6k^nwYGy5&xNbLgB=yF zDz?x?$aZlQ6xptFL(y-S3S;>^Azf!MmEjNa@P;r~OPbXooK+RawJFdV3*n zeh*an4=GRikDRK?G!Dkz7 ze72eYui|;|TJ~5dc&*k-tgZ#C37chK%e9js3>S&rvM2E0DOb%v^m^&r|FgU(K3of~ z+Lwf124DVn^Q3T{-k-+0a$&AU@zWQ`pTc-+!FW&Mn>ZM+RJ?Wxc~y8%?*zv_ zwZ=XiC&pa>PBgeM$;O3@y}^x)y}^ytJsc+rBMK{qN%1=ZKj^*!i72Dx=37jE-c#OwK{}Hg%LlZZ&lBd*o(u6elSo%7)Ja6 zj5wZa)8H{-#Brpbg|~#^tC6#&D67W7iDk<3{v7^qVr+7hXKdyxPn+i}Pn(4kx1)>2 zXJzz96FgQ*f0%h{@@Ae|d1TVy$8k{Q$6NSsA-tFiUR(=a%tq$qKN+l8Xw}?7{>@vU z5!ms`BR%9l}1`GpWye(3gZQ1%azp}t3c^>yLL`o-49 z+)uV&r~Pyj!SmznI9)4t<_&LLXPmCd#OdPO6l<&Go(}egRMO|F%U_iS^DF%~rHN4~ zeM)KiMd`mQO^izEV@eaFQhJ}##Hf^RB`qv955Cfw^h5cEnBq-k8GMh6cvH3~>2vX> z0_U0+{$;!srzv)>!T;tO{9o!^Lk7$>WT4TxmS$fY!n1Ds9g+u?ht4gw>}w?8TD5FA*k~2Cui3BAEvD3<7=}iOa54`?fbvRJ<9(UDlE`Q zKPcY}RsQ|{ysM1&tQg;Nif82WTlnPi`J-%%;I(5nKCpxSPqHyYimRqn`BbRxAK&p^jOt6Vj8#q{m9q-%MHFvx3^t^9AH$~SVa^371?-=b{g--RmwK4mMv z2deys+;M^o zXAH%5na4g_&G$GYYX`A0Iujz>}zMpntkmIS+lR5A#3)vv~XFoubm-l_O&x) z&At{PYw64fPOxLUw1*h3i!;n!#B^Q6c3s4HUBr4_#C%;Qwj0N{w66B_;aVU5>%%>L zxVI0_^x@e)fjn2AfMW1j?{cn$^?6x4&3yG!*yUmI{+fEfRWqz7v>Tbzee1C=@)0-M ze*Cl0VZFG&gy;G~k;Odak;OdaBV_S3e))s=9>Xr@zgnId1VtwElt(7>lt(7#DUVFf zQ$9i_RYs^DQROR4Jv`;5st2-HZ@FrQA$#>vu9`gMy-?+Su9|tuCqb1@an&qW9$8(k zye+Hs$m&dFbtW=2vmgFMDmaO6HV#AP>iv|B%+*h(Oy#FQl`o)7Rlbz>D8CA- z{A%8#JhHo7d1SX<`E^j`pXNSfxBeL@vRhvjl3n4k>rMalC?9WVbon(<*<1O|4=P5- zKCtcmy<_=iX3DGVSFYX|0FNcCnFlV@e>*s{Loqlu3>sUfy_Hqicdfr^ME6%?-}SAR zCVZ7aj9Y^pr#tNNcO2IDMtCk6o4sSPwW`*(*LB$IL)eA_Y(qLePaQm)j?JozvTAab zM^1B<$0jaS9vNM#Joa$C^2q0U+t9 zVxCie0aW>Wc+SRX>urqYB374(%+LV&(F19`N;b)3BXgr0(((r+VCv)I&gR`ik ziN7(n(-}L(>!veyir1}Bp0Qn_JY#FH0b~2D^0Y-bO!zBL{x3hp?i?I86yB+nuCn7% zPepmhexGYTc&)LUei3fa_vUgK|E4tX1>@he@yz~FEGakHDm1vjZ{q@ZVk%?)COk2f zF@F=DcoSYugO}A`PJHPp%RBb#yx)n-?2A<_F}|(R$Wz>!V&tg+jHvjbe0uqf%jGwQ z#S#x9Gu6cU?0Dj;5?fxDA}{Uqui}X&u1EV!{E$Aqi9S?3@h19A_|3!*=~ELwq)%_6 zy^1H^M0>;IiHarWbrVml#!d(~HrnyT4JI~Ka$RJ($mVCESYmwTEO!Ve&gWSt2Ij=n zrc%E*sozxU=fo0qUdTL+9hfkRbZ37?Egi2I9P?Ht2G_uNO$8UoZ`auadEhC1mr;B3 z)wh|OmRH1uVsNVzd-YM4^Zsn!r+NB}1_Tj#+}k?R_>a?gynszP~DMujBih(zIXqoKo6e$M<)o?R9*|mA2RM?ISIWr1g?o!{=b6>Mw_4 zbMdS*{2ab^uE9ShHVuF0JJ;ZGa}6FZa<0MW<{G*q;9Nu2&9x!Ki=As%+SjtVk9C#i zy(8^wqoJ(7G}p%2*U+6#Z0=Qi&7&~VE-+FX>psV^PhZ~|zFHKFbd4w&>00P`&KkL% zb)Tc-dId7<_+&h1NH}X5H!vRY9%9zWbbJgHo~{cMoETe4Y zAA%}hO4-WekL4?mKUSnX{#cRn_+tU(*FlwknzEID2C94&*S`YY2pt36%sbzwUDuMX z<((6tukkMBe+yN&712cuA~TU);5R(utXA4W-snMq;uu8nDZDBKxmpu`AdTWp$jdUs@J>Lh-K)Yj-;()WDs{35}iMDF3=-tW}hx+3+ zbe6_f_>6UbXT@iCGM2(;cQTe2gU^b(!Dm_UQaC=#=;0lEH`h#j$Fk!)@R!p+!e>uK z;xqVXzVI6SGhcWO{!u>(r!6J+o5R>Q(O)Kh1Fx9)4ZLFFH}J~C!fo)%!|;as>rMJA z9IuJr9lSQl;I+864Mw)&*A#@|HQ@&N=ZfzMui>wowVwx(9|x~FalJ@yJPPCDNk5U*o-kU*sRE4v$((pgU#Xt`39SX&Z%_E>mD^n`OLbBV zYbH2MXAp@u<`e%?><=tiqnOxV;N1&~Z8><1b)Gs~_w;tHf9%1TX3?B&7Q-25Ud}q> zd$jk)1talT5%{NbpBd*k7BJpA?@xQpG|$zZ5yE9AO{}W`TxQb3Yv8hJ#I{Tx%vL~b zYX`|`&5M1WOvZ}OZ*v9$~fMZj^RNd`4n>IFt)9SH{ z*{n@HI28N9yqnIyS}h6|`FByU$P=Pqku{=VkqYSJ#AueqyVh7CJP8w9Y zWC#>K-+>&jfX{biLy^rL*nt(u=8n-&^wtjSzzX!%4(x#NRw?t^)nN7YoB{hPW2*S+ z9I$^C*i`*}kJ9j@()TM3HdT7D(qL1i?^POXs`QVQ2AeAVBc;KnO3x-OY#Qi{TRD5o zLcXwkV#OmAmypja-#MN7IsUQY5KZ^$HDSf%x?IV8RBaYxBp5P;{;KS$j1=20nDpS`6`ntO~LYX_H zZ_A>g-NX-?JX|laYMNr8%b@YlhoC*75pjYt-CIliU-!NS)xE!k>fT0ZFK9EgFEk=P zFkjE&bI#YZQBXbWh3Z)!RL>?s^=u0Ca%d`a5OhE&X4ga<;8o%PuQE6PDt$doW#VT} zQ<;;YDsu`{WfnkH<_xIHEQG4eTc9fQHfRQP7IY|dZfO3$iTVE>%>TFO9r`6>_w*io z%pARE8PxOOHHVde3U8-t!n#?|A~M_pF2JJx@ckpwB=@K&wLY{yFemQ!Urv zx2D&i@LSVwq3~N%BNTpXYKFpZO>aTrx2AWY@LSXSQ24EB4-|fD`Vb1gHGK?)-^aPTWX;M#hPku*OcYsZu=y@0IEqc&}8?z?u%{T>w?tGoUKF5UR4_O%wlyHw!Yz&w>tx&OLiPS^K`~uA)R^eI&DD~0NPtDt({YN*}^k7mQ8iYsU9ee0ll-_y`6=rhm}(5kN3RGt~f z9_3REjV`|dD!cn3xaVoD6=YA@etfK_!9G{m*hhX@ENA-4Pt*T7oHM+MIrWFJ*;*&K zL23LYrGumulZt0fztpzbIs-U?HG?Ym5H|QZo=YNTP>l^=;cEaT&#F6r&LqsK)jakJxn@8mx@sf@W$ zm2o>%WfVhI#sa9yxCg2-7C}|U;!qhw=+9;gA3h>xH54B#_fdDnV3z70>Skhu)Xl{FsGEuTQ8yFAqi!aKN8J>&8pVDQeScs%dP!p~ ze(-^hEXGdb7{V^>_s5TTI>tNpC)7Ow*~py8xtj1z4*pLQZOOs^X^Mj4|1^1__&?pP zV>5B5RQ?x^86X@2kGePuU;d5GURC+b864(zX-{)DbAy3Vb@)XBAw2o2h78QST@J)3XzGxsR*~GVe;3~%6?4P5(+CLYGr-VIft&-~^?^FEhFzes`Uwi!5xvVjIZ$7qY z8b1dEnSSKGX73sAHT}qY9XvE7Y`@qYVe9>hx>@h1{bKm~W}l#!dWFZP^5`>grr94> z#`|Yd7hxj!p);oD#I)R|O%^dhZ{-o{AHqj@;G3!7oGIX)8^Jv{aF+8w1;Ih)9B28{ z8r#X>quo4nuC;z=;3L-hZI}>-kH9~NZERFvkG;;}C}0fbOY0nt0tX*~eG1T#<~q2i z0GzandxVj`21e2zv7eGZV~-f?{e+DauNv#%AhV{g>+Fqb*M|6y4-XW!6N3@%dDO-| z2LFJ24E_Q482m%LD};ZN0*~<>mj`1*=WjR|sL;kh+Cw>wcvKZS|EMRWvJ4C)c`O41 zNyf{-K$7t?Fpy-tOgbP*|3l%uGU)(hyi7U(883r36>BzYTZcgHxr{RDfNb)bzZeMx z12v6?f`M4CSW^ZDY8u~#fi(Xv3{=aw2?PB=*u};`2KU$)$lx9u0~y?7V<3ZjYz$;@ zkBxyAsT>;v30FB7$carQz-L>C1$2jh6hqK@EC&Orj^P+c`=*3}d|;qzH<%7zvB?D% ziKcBmpmESxXd*NLdKoks+6USj+8^2vIuQCL=wN6%^a|*gp~Ik=(BaS==vSblpktt4 zgS zlcqrBCl%=aP+YE5_mxlw<4-|#-$PK{R|+-$6x8@rdaf%jSFC4iS-YrbUxVt|-$M0l zBUH~eL-p)iQ2Ao-LgkCSPg%s}3RH%L|EDsdpeh63v_NIxn--`HeA5Dzfp1!%GVo1} z|A=o|kU>5TIutsjD=wF$5P4^_GNrP(SMzcgFr;+Gmf5x+EB<<5et z+_}&!=B&Zl>ju!leD;$MSWL6H$ulkH;3YKX0Kl@zB%|wYaWZiPg?U>41O|e9>Gs$%_I0pYaWZi zPg?U>41Us@$71l4);t!2pUj#^@RM2d2!1ka9>Gsq^H>aa()vcT2J)n85h8tYqduUJJM*z$T}~N>V5}b{fF_#UFAH71;Zg_!62! z_hECAu)oe37S7ofPW+`b409zA$6AY@B+QlUCf;EYyNQOPt95-ozI<{Vc`w%!p*|>f zM)9j;?2O`9$=DgiuadDdieDw;n=Ax+|fSqjiNY%BdR(r5vp* zbWx7h6}l)#>k2!|@N-RNOopnADNvPB096??pemyfsxoeYs*Kyh%9zD9l`$8pGH!>e zjAE$DSO8TS_dr#~BB;t(94ccNeac$*V2jV{F!4OvW#V~5p!g)(r{%=wOdJZoMSIsq z;ZrLHXJS{>dlY_)VsIu#N4-bcaTODX(=*i5!~vjD{ldaCi7i)}y z;)^wUq4;8rJ}C8TOoGxTbN&SFZcHUln;Hjn;jwBPk6l6eJ~#hSrY{;wnNF%W`c$#3?qXFpP#*`M&GB}f zQ{v#W{BV3me5yNqrt?b5z-L-ts6LLP%woZkO3N>fjz-&LCWD*X;=vn~)@`=KYMvfSP~t}hJ@Va({MV=Ge<r;yJ+j)0tkxr|t;lLUvf7IL*CVT~V6pmKefFqea?m|r|PpSYCvxM;0vE3)kZQ`O@uY-8U@9(_`ezmP|t)Z;JY(I@rz3wiWO zJ^n%-eNvBamPen|;~VGEC-wNodGtwr29!Rj9}1;Uw1**&KG7bAJo=>mDkyzYk8hku zpVVIqrBCW7K zkm{&$Qyn#Ks-wnDb=0`2jv6=BQRAjMYTQ&ujhpJIaZ?>NZmOfkO?A|`sg4>q)luVC z$JvC-^^O@<&2qh?5UO|F0@XWigX$f#pnAt#sNQiqRPQK;>KzN9ddEFby<-tn?^rB7 z?=|Pk%~QFL*?U@^fU4YeP?h^MROLPcRk>AAmAetDayLU&?#ob>`wHcr>N=Y+Pi5_~ zLVI04gsQBMp(?8#s-ErA^k3*)!bqNX&LzzKAL5S* z;2!5p!Y`Unb`mqt`*bGZY4#u2a-QX-=9!(Ar+flfNoNx39j9YMXA=IS{IioYDpan{ zB>W&YbSB}&^3qPurI=u!Nq9Qh>M-SSCgJ(`YA0t=Ot8-+{NQ1$`U6*qIg>Dw$Ao1H z4Tf=*e;xV}xMT)YHc{V(S`U^v%AD^9_?GFMK@o~6#k2)C{FdG5B2|5l;@17MkA zboem*B<<6fM>^AvDVaHNd!JLh&LgC(Y1q>{xpxEhRA-Apn`dKB+tBmF(DTxJE3r2_ zy`1NUz1iu5VsA8Wy%Kw)dFz$f8|~9riM`o50KLWeXE*7dk?h^jJ)@zzXB<@bjF;Yt zGH0HR(DhrG6W8_Iq&K?G)GJ+TmG>lmr8#%)_b_M3!G}fQs2$8Vw_snGe>Zy}s<1DW zoHx4?Te;JsF4#)VxnFGEd-ASQ-nEKyn$xVBRg}{_1WF$?UkRlTnzNy_ubK9(qJ7P@ zZx!umrhThuM>Fl~EW^(=l`$ErGNwRPMgdf1%z&zlLa551FH9NqMQ0haxTZ4ZLRH4? zP?b>(RT&GQD&rog%2)(dJr;+`kgxh2I!a@zF&8$e#W!ApZzvzp!6vgk-m$9~^Gam= z;avC!-pIl?X`nx|@J$-%&n$eC26!P0-=qOvFxVgoO8pvApwzD+6-xaY1~}LxF0>ZL zKC5tVqA);+cZC63;oV`JQ8(3&(G-~&O$_VVep*okZ)*Ko9_!bH0rJ5BWn3c`u~GYG z6q8tL&Mb_nOtSfPICao^pW)O&>wW4tKX3*9?+(T`)0N2Hp~TL$X<(*Zufj(Mnd^}c z-zYzYdJbp&wT5Uo?a~^e$TFwvf7(4=|I_a2dME9ku6NRI?J4_^K5wVb{V^eY&`!JL z0}bO@`GdoFR{mfP^>*Y>@)l4ZQvZPZkopH0;|AUpV2str0mfK;9AJ#q$HH_vf6$R# z$zQ&n8Oa)UJu@1rXU1Vuc}Bhkdr`}U@6yq=Go{Zte{dN0$hXMko;PpP*oERQ>%jqW z;tTrRPoJxf!>FTtp~#q$D{TK$W@1d|{-E$M|T^(=_G-Pvb}Wh%41IMv*bDHZV}vdb=s$lN-S)H-K0E3EVQ7c+#Zc=}C4x z$&MAS=PZeDGe#eQClz0s-PUt#Gx-aSFLBO$e(ss^rF@%TFJ?c~4*2^Zdj2MGpR*sT zIBY-EQPM}jOFuv!e-00yYd_RMdp{IoWA;OteN6PD_BxsUO<K&3mjL8Uv~ z=n(0SiBRc|2cYnE);1`7o#lpS)1cANA<#JJmC!_JHuN$mJf77D3Xf;?hr;7o1EKJ^ z+4qDGKhefLFEO^8?0rw8?R`(z+xwozhwXd1#+=8P#98-U`<@!Y_B|!QtJ-H0=?`jc zU3ami1RDbt!?$th05=#YK-(Ib4-dGIW7-$!0i}I`SSalaBtU6jAQ?*g0==QMFVGK4 z`vPBr4u+;fuYi6TIt-c#9S+Tbeg!%TItKbx=rz!*q1QvNvuY-y16I>NGi?l{efip- zuYLO3udlr%+Eb$ScdQu+G}3>nx9oxHtT9!cHKwYw##D9In5xbiQ`K2xsyb^-RcDQ< z>Z~zUoi(Pav&K|))|jf!8dKF-W2!p0aAupvwIQBsQ=mPe1<+p58PLAaLg?ktTcCrW zw?V-l4YQzNnTEMga74rHQ1C}XF%)djumB1+Xt)OoHfUG`1sgOhmapOs2K0_q)I;xB z4b?jygX$emK=qDwP`%@6sNV4mRPU&Q>Kz-QddFs{-tjV2?|4OiPY5p+soZy&hf}%l zLsjk`sLK5is&YSus@!&{$~^*AxyPX@_as#1o>C0WTb-{m(%6foGKN4^#+6W&0glR7 z8Q`dVl>v^*R~g`_e3b!?a?TRWGX2ILFVk;OJ+}a==k9^(IWSe0o?F~?mSDVTyS+DM zGgSA1ui|wd_$vG?L1&*=ck<=z^Sa`Hm=DE+r?p4QK1cAsn;&(Kpx*DCBlt!0rp^)6 z9;)zj1TQt8>KsA6!#PLrALUt{BWTXY2s=mcV)<9+2x@;-_&I{-<7MHvFWETma)aZN zEBirZ-}f;u8H!0IRDKAi+sFCxN3bQD@75WDS{oSJ7nM-?EI3Z*2|8;788k*9^QHEBl^sq@igH)tC#=E`(LDK;l;6U6W0A3vyYUarIq>+0<_ti5Kka3@8{bcR znYugE&g|E@h1iMixlKANgw=NG*+o{(E?r;Dx&?4sWDIPFy$4A#Fy{<+)=(IqTXTbE zFP24Y1)o?kFcX9ELh*qW`!X?@B&h0}0@XWHp?c?lP|U0z8?6|a;%)2kvFp+}vx4#z z12bo5TnVK-#lXy27$c#SXU^83Jae`N%^(cm_j0I5D;~uEWpf8;>HdKbr46H`ynb;EJn`q-V z;gt}6OEj^iIpDY381H%Lr6Bn2QpT3R+h(nw{A4GF6Er*D?n*p2Y7M{>Y9fr=R|U<`B?;R41$XA$nP`?kc6 z<79(1BJsu9*3j*<_Y`fQo>TGPFJw=VVlna|g)zg=%$rI56E~GrWX6I~g(K4GJMhn1 z&2!kX9G#gro#!t!mQ%$$6bGz={}czTqV0+;R?&9F7OUVt#TKjJKgAZS;6KF{tB`|6 z`lbbJxt+F2hd41pb0#4&RIF!_p<+Ed8mec<;d}6`@zd}xTfmsPVCyAdOeap5#r^Wr zvbf)zchCKAA}Wx?>S~JdFPz?jP{;mpCo@SoMyhog^tv>xQhA; zqfOi$cVCn_?=RrQq*(j60z6a>4(fSo;v>u@n6o&F(Vxtb{Hz|nRSat*xYxxuwOSby z#jR%eq5{LfM2cy}GmgU2gRCWgskfF_o zsPEOJ`^EL+ERm?TA3XnP#p>sOQL+2^$13)3e7r)op$%+yf_ToS#OJhj`Q`8WgTKBr zH~7Sl{lS%qxxv|Pf5&$cb93g!gSt*w6;9~G>>(&~8a7HjM4w^>{N=!*`fGDo*@7!ae@6-oNz<=+n3P}zT0t{b#{u$wzygM09HK7e5Ekaa_UlK*XK}JT{O=?tRcu8 zPKV|+w)vuS-WU**@P*o(bNuvXy=NZpOLKe2R1U9uX2tb& z&p7S=z@H=HZH4> zel~N#@UZq5n7Lql$Smz0XI`xxyHbknoX`Kv$*i*Hf;Wcdf}<)EdGGY={K1j`Zb9bVQPS*UP z`Xp@L$+R(!^3nAjVA_s+#=@CM7PL zgOj`T6ndvqPoZbZ!t|8%Q>Anhx(Hw3XGSj(AD3RzT+O+4QbLzb>a9JjwoXC^8J&a< zGCE1R16{P=n|w+1Q5L@53C2mXnMI%1%<~8T^ZU8Me@9kIqH}W`oiwLUZq9cuo0u~% z(cd8%Ju+FcGcl)a)}+k$7KLQ>57;8+E4SjuZQYK3dL8}5-1}Di)~$a;Kd~QV>uZI` zEjsEq=&1M6QOJ3RF#WrWts3cy3(-~8wyxShT*1**jt)?r@1pKbefNgd^#kfE-LNx? z{qw48RF9nPDbYF2lS8^|TT&c)tV?%kugM>5y_I+@q_OETq0@z1`0wZZ{*~WJ?vWob33*n# zRR@iWZ2Nj-L*sQ7vZHZwGq$~qPD`l#{Vi7TomupOtp_9fLiKr}txKofOjkbqzwN#XCBUScjw2q+=8x7>*8`fxP>B z_&+mx44<%;{S=Na3y%+k>#{Oim&HfuvYVW;7z_EAI#*2ZvTS{pPydwK`b&E4%`2^- zqqn|h>#TUj`a*P8XfJwbZaiE+B~Ul%CFv;5kB95CE**95Iq`GtId^o_9`=`;Z<~hu zQyF1;iv1OiKUK<}w34th;N)LPZ&{9Ch3{qbR=^e19ya4oq4RD;hxr&M`BVpoVnbbB ze$}fk+pj`L35Pg-)eHDb$Gp7)o6*^;(SM(KQv#dOmr8%CH2#v(CrB5{|H|-pJQjz) zg&+2~D1KOlC^~QrRCeMXe5n_nf4t)N&sS7@`uv)TR_Zb{8Qx-E{Uo~a1Y?{|Js(Gp zJ&-stXASylF}~H@J`;29BK=gfKUj%x)o_hJxNA&q@I8F0?f6uGz^AGk=Lv4}qq`=f zr>3C43aqX5lY0dJI6gYqPrlUXnBXvcDaVhJPxUrG`BEqFrBY+~23ibz7T}xH_&DwO zQXSaWB6xFoG~X(qJ^|{Wy3B_@%dZ&>5V<$29=)NMX81Xa+kD=f*HU70v^T9`baZeB zdQ)}MzOz5V+iJIkZ>9G(;n(k@UnaS{W0vDz&A+DZndfe(d!{a~?wR`0A^q1d&K=z4 z^#mpBA24o{{t`20IXb0?Up2oS{G@-H#)bSS?P<$8a=K7*EZ^x-+B3)(`lg@SH5%Qb zb}3%eU7H*omLeU*`?|a5_uaJnEz24m-gaS;?@<@kTYgpOyMW%YduVT6j5YRKykC4$ ziEkx+6hJRI7&u%X>3+4PJ3lLcjLfoSWHxa%=?(c=Mn`qyXPr$qIeu1wt(!JLiCf}l zm0U+0w4+Y@#b|@d*IGsSS;CX@sX{tw=`+$T^SCD66h-=B>Xt^`YL}Ima}D>Nfpeoe z<9a&RFdknk(!VloqfHm$UrB#ScQsOP>9IDb{41rq^w+ufh@IZvI$H^AP)s zA_mxlHHy(eM}O^$0k8Gx^0DM!DYhKxTWKw#{HlG}700JiJ9feg!g)Kbl@%Y^IPW9Z zPbwT;lmo{56}l)UE++5-WiLeM^dg4j=%U_nUAm|bY3ZV0pl=={_VhSs?N!7j8{Kn! z<1Z=}qJMs6>z_g3xee%@6CO{c{HI~mA5(6#}=-?JJ$;SVSy`Hn`Uj@HU#~HAN3pjs6Q_91lg-< zbkghSq&Lt>tWDU;`sl4Yv#pNb6-3XM-}F{t%>154H}wSL^#tSfP-R*J%4r z(oM#X39EB>eVw|hjyv%Iv|hQ^8V{r=Jv9~|DlSKBjNgZ!)fdBUoF`l< zoaembhjxFQm{s>o&Ahs2SYr}=XYS|QEk8*<(z~~Zbd+$O%2>#6KYq_<=*|Kw_?sE7 z;2`?F4UG3F*E;hQM|EQyDdXSbLdegExXlG3e-=xr4J+p0nlVyCP^XnVQ zXFh$-TKNv}Vn=s4u8DpR*E5y0q3in-H=r;63B55H{V@qWGLba{x$Hr8nLVhd?)p!z zx%kh`Js$3j>eMT3$h|Ya*b5}NA7PmGT$FRq(AcBpSW=UPJiTT{E-RxBRS0b$}b4euLsfTKN;w+Zoa`^{bZs) z=V>s*^UR_3y4hd-#sI6jd9th8vA4f#bXWfsUV4jjKy~)hACNESnP^Y#b(e!1wLd1( ze-RGU{*P|`m&e@If5dlDx$<4Ku0!of_%!6Z#B=6N8Fk!adB-^Qbn3O5_`hUw-SHUn z-x~v#R%yQRRU5~=Mg7cN4$rHeW8gcrPd-U^^_1-XjOY7sUw8k%)lED4xbR0Jd_?(V z@=KObmg;vO)X@i;>(N>&>4iMzp{FWFgbvsM7LuPL|D*_e=B%ZXUMPkSHQ$4+ZjSUr z#DnM41sY@d5)F*EWPGrz_PR;@w)rfkfS4~r7Q=lI>5F#iEFCn<_Cp*!Apc`NWu)=E z=D6@bLVNw~bCu8ZKh8hDW7>rOAzdTA(49V*M*XD^v{${1cRRRAI@|F-oHa$qZ<>tU zPeT4Dq6c!(4X-~_>zVoXIrT!y=kv3t^uaag z1M$Wq&p%SJ8GqyV=z$aH0po8lfBZ(RtNbK-Kz3GoKsexP^uR)QKjwJ*p$Gc0M!sJ_ z^SiSD4lc<-e`xMidf><3^=B$AtZ>f*wd3yvi|Yz|5(!hWNp+K=1#`u{N8$E{I;ZPm>0Mfn>aq_{gQzvlc|$a zR(Ls187+&i3AU8<>nx*X@p$GIy2@ywjNdg*s1|mrhcDGm_+ZO3lCN6Vt|r^JXgJ3C zV$^>YIOqG+&w1x+>i@d}))PlXQO6!R3(>)E@qHM{<{og4?$!Sp)KmV5^han8CDeZD z5cv#F-Q_c=9~__IZQEzia~bfee6~7#w(anDz zhqL}V8ndlT%?V3)D6Qv(W#mibUgSKB+T`d0VUX^0hu$mQp|yz8FHSt}Yadmu&NS&cMU(K6bh|WmBZ-|^PInKPu7W!!m z<$U7p9e5G_@HXjBy?p}zjD9(Ze(6BJ{1yFj!p11VC#%sHThJHp5POo&XhZHVL|-^q z!O<7+lF=8X^yNHwWdT0J&)l`w3Ge^gz)3kTkgfs0Jl%g%&hc;JQ%tt1e`m*oUMC*3 zn|(6xT}ezTH9kkO{8pi-`U7}o0s7`g=oF_MJuki9%o*kHAq!4?MP+oxlqkcADM{Zc zrlfB~NH-k(Bs4eMew=lDwmyigYk1wH4^*~f)WVn0z54%4@R#ZaPV0;jIdwFC1-zxY zj)t$K3sRs?y?0Y@=>uVi@cQc=$B%UBfz_lt<3p*Txm5WLOKrIint6-ZHJVQi_ZJ+w z*Zk@^{RM2RF#c?_SHPaPFmn!Fv7Phz53;}VAB6jd@eB7KB=<+DuY3guN6e?ql4fq>!%vf*W6Y8sRItH~Ev*R~SLf$7L_qq5CuNi;gLSoi?iR);tqRpOXO=W(CaVh^E zeu8wv$}n8v=z@9p585Ni`qJ{&Yu!Ql5v|<6C(9F5f1NEx?cj+rd=>4D90AX6?*nOV$}aJE3^&T+2HIBFzFXo&(T9!=pp$fcc{Dw z-R1Z!)wECFCOS#|q^CZ+_uTuz&pWru{@}gw#IO0Tway|A%?bBdlkbfWupgvc>z(A= zEu%h)*_82}+Z9%ihLz}&o7h9WAR#KiIip7p*R86U?mhX^DqH`|U=MOR{UQJ6YkY&{ zL0kV!iuDA3O8iGS=|;}PdW8K7nrpq8{mQG^ub@~@VQfm^QT8jyr}+kF2M=NoKs@`h zKlQjOPcW9L=!dKM{g^qvzwuj4TCwOVeyYopbq`e>j&%o~T>nr-Qk*-`5^HVJwNXkZ zgwk`oq?1GG{Yv)^r5Ea+exdYg((lY-KJ!b~ruS}l1y#ltuKjU=JNT!OA^%-_+1_S- zObhE{vWexq$hbUua>?$ysrSgiRyq6JHr29+U=;I|sz*KP@A2QhS>&n9I`aEVNK=lg5uS z>vrCt-SE^dXFp&ud$&|?4j-*t29JC#%4^R2ZsXl*Q@q+k-}-1r8THUy|NPj5KpZ-- zfI2TnE>w@o^~)>DXm=6+71fni__$Ymd?)qPoT={FNL`BH(T&tK54vOg(q~i`XU$#} za;Ca0qfd_VohkLf26T+-5l1}&l&`pn`c?JO8cC;L<9S~J@0(`7YX$GhqaT}=Sts=l zd^2<2p5nbB89w*?B7=n()2N#rwSAJUt?80+vX3hY2V<5 z%6}H;h=$H0@3BTY;B4}L(3XGk_+7~UD*8dP-a?;B2izHR@}&jvY0_Y;TrwcOp97r( zzl-n1>kFX^pR~%w>*DWlzSdcI7b@rOx-ui@?)7Dza;|HRoGb0fxzdiDEA7a+(vfnm zYv+@5+4gWp;o=Jg$Z7Ldn0ywQ!v%Jy6LvA0l<* za;`l_eU70E9zhrAOwonWq3^sQ-Jmy#Br9V8vjUs=fHxpaZ2Zl%!$p7kp`b%CxqxqA1t85+6VCw+Me>`ny zr~a+1agxt4kGeYgpbVKe?O9)DbU^7jbU+g0EFF-7M%P9ujl3v5$J;lsdxjPK{Vk!j2C^CKG2L~VcFJz(TX+w`8+T)0 z9J^bBJ$CHwa?*47PkKo8sU-g!o=K13FMUs)LXhxEV4*8g4o-f#{+ z=!-mx5BegH4j<^6!v{*Aj}P8vOe6JjEn^+2m*;UUTrZ0!^0@X!H$36!oC5kqeDL3) zb3V@#T{=fR`j6_INS-+FI#=O@0Q&$2ejX=?PuhRs3d+Y~-lJm$b2&SfTIIr54sO`v z=~4OT5fg(s1FXWwut}=F^m) zH!2N&Q2J)2!4FCoDs7)H`wh~624`!0H!!}Rf=&0buVQ}>|5mr#UlSee-0w-*>oAyg4YoOGj03eJR%m_3YABgL+w;YPi0R>ndYE`fE_% z(Ekhd{NcUzlmb42YWu=ApHjM0PXkqCF$3R3H*t@ z9?M9-Hh^`qX;#e~(%Xly9)!5+MAF-cGyH))9_ggnC$-grzQ-O9=0(kVQsI`9p1zgB zF2(p7nxhz$VQuo^=j#2P@3!CTywiD~^DgH-ddJ6I<{Vk`3}&{AW=e8z?`U$$w_{*Vn5zqOLqH)Aqn!{D`A@FFK22{~PbOe$Xmws{!Ye3%$C zU@wqh{2crVw{(U!M@F?~cR9E+fc+AF4B!hn{)c=n$y*y|ktA^Mbo9bX zbi(2FR{2n5F9X?|1f2+FZbs+tZ+!q-0@ZtVAupNC8SGjw+{E`=eCJ$W$%z zBy36`eAf{$@2kvv6tEU5i|3OP>C;5(s2A+^2)up#js@cuvSwqJd(hjeM|Wk4cU$TA z&;1WCs9yWR|AQZXr#$?iv&&Y%50%7j^669gxr3suO|yt$6y=hpU*?gn;(zsd>1?ji z@46=6`G?HGDIag!>`IL-<;li7dR96(7#ADJV*Ku)eEDM!uYb5g`dQyQ);xiOXTkjX zKKj-X&@`y-En{5rcy}3NrE?6M=&Ls77PWTM^gVs7^M};VB>GRjU8((l9sg;5Ti9tG z*M>7s%DFeDPvnED4BeytQyI+5ZB-0G`1ug?(Mxz&F*;p7)DP{mOR|W4mch$G%@xoF z?IRH%JO28s)LDGYJZibqF7d@peE&;z^t-IBdGv$cage!My`v0%aoVish3B=;?^*uK zW1c|IPW$`mmu9-G8pS< z3)xxW^kQtRGhgS}+SJQWzElkUUPW6)%X35Y!Pz1D)1@K$$oddnT^FL#A?$B3_OwV? z{UYsYxQ`cUPaR(`+@3lnq*OB*C}n6yFal4FtT$8?4owh4&m+_R+b{P*UtunCJ z58Kau$Th98&~vXTZ9n(4(s~Ztz1Y6@&s=-mzV~&dbuW7R`}Y4IaqSKLkKQiQHT0L( zo;&(geyp+MA9i7O`LnX;%XKZxua!+t)3q@F*4TEw4;=32%D&IiwJ?8Ic0Nwm!u;Mo z)Fa&1e~PXDHM&l=UVet`ebBb?(%sM6I

p=0k^8wkw{-JZUv^o6ld1DlX-IF82?-<|&?x{xx}K++yu< z)5sn=lbI)JpPLfrK69b%&NcHN?SFHCxfo|aR~|rio986F1m$DUo^5 z0p=lb__8$44j}g=`!AJF0YAG;YkQfGbQ2<@x(ks}NkU|lONfl}2$>Tl3z-Y02zv#Z zqlL_cVhr9`+yRVZU$u_f8=pB*B8Xk0@+9*z?Xe%v|3Q?sJ$>}DMJ$bwvHP+R-SG*= z+so*V`TCDOeUJV#?>P*8y^KCxD`ei&SNDCKl`ibTd%DlOr>~HCPp!UZ-cyU*c^NtM zgb+D&j}STJMQ@Vc{Y~aNl0QrMJ!H!v#b{`*GC}eOKS{pi599R=^PL3dJ!jDCdSk2W zOk01-o@xI=*~(`ODr1c)`}#8;J)IlxK$m(ByLD&uw;#>8w(LxfbtIGhY~E*#9_O25 zG3?oc%sD~Z{g64&OxjM@+d6kK{h1dr=V^pzYR+>Il*~B>wKg8cOzY#w$^HAD1a(?6RR?QPNKh(TZbDb95 zzc8#bi;rk-Q%F1A1YI}MK4-eoUd(aMFh12@_<8?JR{SS z^Mhpc>N7shcp%2bncs*HJsmB15^kGDo2P2Nr?xprdsL&te$IH5zT5E-0JsLhqVXP*%{6cpCJ7mG{Mdvd6|XTD1RMS6pxcWuAwPOY&P zniboK-5k4hY-0}j<&j=Cc_%XdGbnErfQP#RfJ%+88mzuG<+GvX4QfB#FzJDuj;683~!EY;mz@}o_`2m zHvAaAZ1}PGviL82+3;idvf;<@<$T@$27EbkPT=rl*|9V~(0NPZ%h@(x?h9WgV7rAc z??+E<%bRb2zde5z-mLk-r@x9_?0WD{qw|Va=gb(oL43M|Gv?ok;M42j)9=EgpH7Bm zJ6nTyie@`|O#iNo;L}sah57VUP<(nCdII}Xe_vz9;nhv>=%4ev;u25r&T($|wCB8h z+VE%iRK_>p)8f(2oS-eAcKBMo&8J1%7t5#9++jZ5z&eZ8xk6ZP7SBF_Z2W|FbLId~ zAMXOs4!2WYgiT8ABpu4(*=nD|@a!G5#o2haSF$O>{^am%*(@V@wrrK+*{Yj#V%061 zm@C7x(SId_TsF^k!?T~85!Qdjv(LC(hG%Esk7})3QdoCp@0ZE&?@v`X+E)Dg-NWq% zZKJ$~#%;b`K%FYt7f$?p0c~_BKCv z@lw_rIlMeT`P=gHOnAvL${}7})rOZhkY2^)X=XexFX*?Sk@pUNg{QYK`C(L;r%Tp2 z`utqV+8FPxRQVI6JHh8w-e{Z8PoaHE9bT_CrERt5aHIa-$3F0X+53J*xoau6^dM~U*cBDu z6h&w{@%+@^n?s%OCBUDH{>(h$Gy0EM;v)+Ztg+kRE9yh>Xx6Lz*S&{kzCGC;6+G&R zt32lMRvu@rq%?Yw)_c(4ZM5-w#P8ompS?%?{%x@!>w9iX1c~3jEd?Zg|F%vb@%y)R z1&QCk?Q+lurh`4eo?r&(2m63oU_X$3Q{Ec@4g`mQL&0I-b?h%{&WlP4?nBp-u6f2n zKZWK_Sl&kJHdcG797J!DpK~Ss`#X0`PS zL!QH}_Cdyo*#~ziJjH^4{fEojm`6L;q1PTnudSn8LE5x9G zS?(Cq#*Zc@5a;Txyoq(`PqFUp8rGrfne&a>Si14TreFQ$Eq*_=Is}(R2TI>{39Zt5 z*&}Ts`y{E2>#P{l#$V6|V+-yrduP%$Wf~7lxvx3LTb$K-WJaGdm1#TkjcVFhZR3T0 z_oLGtfX}*)bs5wfnlSB({JNNSZGRE%T0@%WZ8!91>7VCpx0E*RhFFN_$rz%}t{Cxs>sIBlP>|~s*chVa(sehT)lA1on$L*ADxvi^Dfj^5k zO1^3iaSJpdJ}%zpCoh#F&5CdILHlX0c%!pdQ_fkGf!KX)GJ*2^2Ya{Z>*rc;axUH+ zkG)*^x8==}C!+Bq&^v2A&&ivQ*t|I_5xWa}$*Qfzo1;ndr|{+jR#fl{c=J*Cz_AG4 zJO~}(N7Q3bG)Q|7iUXnfK}jGqKj;z=njh2|gysib3PSUPx`WXCpew+1@G21AJSY={ zHxIfRgf|cR4hU}^Gzf$@54skFHxIfVw0U!XY@zaF$zNAz^X6Uf=6HBVoz0u;Y~DP> z9UV-wd2<))Ccg1nYILwUCwg(Vr;Fhm?ae_O?s#gKnP1|HArnJrm#1H{u(b zSNsv)+y&nJUDlM}5g#c1u}f%`4uv=0Zu90ez7uag$y|Bptc%DaCb3$ z;~DM`A+HY@H?AoU8kv2Yt4nbA$lFruXn?*(>r?y&%VqWe2nrq{95M+ zN79G*wZ?$@SN<}M-ySxEdeV1q(sw=SyEo~(p7hU%z4VYmF(j`;GAHm`f5C zpRuA%yZ;CLdJFve#@OiKqwwoq@NGR)yXzaZyLkN%;MKq9Hvl<1J1$WAj!S5j?!zW> zug$OD;5)VZTeRynlYd(F_M~3M*Exav;@7`NU+TqJcnaKIaIb0iE%-qz3a;6pa>{13 z)uDH2*P-`2j<)jf;8|qj+1gq>{6cLl9)8}ow0ZbB+Y%oBm9~V3|F39E_Ekkwe1-@eO&D;HQ!uKS@XG8|I2nPzCYi@FyQ;QdH+P4_s?cr&SssXd}zDT&m{vE zP_};5Z45F%`n&Au=aK41{;nSTTU!Coj7be@z~K5ip^T1N3CM5Gjim?-h)!Mi=K%NAbA` zm5qI^<}Y8)VVx7#y|LG0gBkCN5B}BDt+Elj%rtCjd(3 zXSos@{fxIpY*~JfYYphI49vr;PJGio!HTYJ}6Lw@*Z*&hwb|o>M`Z695yS$ZSz+|_#@(8y3 z{^Xm^SbdIr!;uv?Q1{{3Ag|=NnS0T++dg#dFOYQ_ivu{PYhY}0Q1WIF^!2KR?f~y# zkB#y{?BVn(_KiX8#l$-I@Po15k6#`B-BtASpNOCN zw}<+a1-KsU>0-_k+XHV|j2t{gAB~Ug798riG^iN1YiA}b{!jO%!A}@Rb)IgG`{5y% z(O*A~y)5`3_ZIPNf~TwL(^OAPFcrO|8U17zy2+Q==l&c0FF>D8d^EfCA?YXlQqfV= zr)S(<&3smK+xMi8OnRWKtYC22dyHi>-=!XTyceQB)uyd;nsz0oPmi?9#CYcR=jy{0 z`fxXWsPTFQ_MGSGue<2OyZO(WrC<+!XYaohW1qTE|GkJ0!Oz%N|J*gEwSVSV?$-Wc zpPv~UpA7d8*B{u|z3GFN{=s&2-u^+hw(XzW&(c5Nz3BdlqJKQ!zJI94SNf+##)kW+ za&N1gox(oO=jxw}p|N(*m(C^J2Yu~jEL;P94S>Fqps#_@*VE9~KUAl+Pevs>Dp_6vdiPi^{{dN7k&YsS} zI{1-j<=J)#!Djf|?XkpmGIod34~rS|4?$0}JeL|eNyW}Gjy-d_aBl|Bq?3El4^EtV zY2FERz!T8P`~24PUNimhCA8v$Rszt9=tTOw!~bMc3Ba>8*Laou22c7$*)s)$%3kHa z%6kldgw~!_Kr3pWR{ep!mc;+s%oh(r5A)F@G-uR&QENmcOEqU~J6{})tdp*A06D5S z$6NT{>fafK4v>c~kc&=mGrGY@{5v-lqbr2{I~Sufux8BB8D^hNXL$7NI)l-9&>Q3j zyjZ;fUBTD|)1g^MXE=y21=>9_jTnX&y#Zg5^ak0{q&FNwZzx4qICKqq!)rPC{kcYO z&>kqG@o64HpE%`7tUQG7unOHlcB^BYWBLVSR=PvEtvlqSJLJ=T+7mP%xurcp^U)o& zCulypgZ2c?M|aSkp!w(y+7mP%-60>{As^i#AKf7z-60>{As^i#AKf7z-60>{As^i# zAKf7z-60>{As^i#AKf7z-60>{AwNQQkY8meYkVb37*j@fNJDoB*}B8s=m}4FQiErP zT1SpoELKDqtm%f?BIigy(DxKn$bLbR#?UM&r zcJmHi>9+>o8KF~DeoLL=IofjsIz>-@TaZ&4xF4NjAUehU=oACdDegz77>G`BKRU%gbc*}YDej8UDdwP4ltTN`De$`)nN<&u ze1mnzuT}hq;f-$~i_12KuccGGfi58)ImqP>{*E$NOb*)+eu2!Zz=ojb(jmo99lq(I z?eAP5_E|rjJc)sxAcsz@XUXKzYgKvgFKVHS-Rs1=HFlC zo$cVAQ6=fYS*1C_v-8eEbjL{E`9PR=?liU=ThI6teXMqF)ic;H1>PASp=YeTNIfG` zPgI}wK=1e-eR>tYulH$K&$v*ZvR1W4&v+3%!|5~j%5Lp5{Qs|2JQ?mYj}(ezonZ{KIO?$Od`ExJdz&w_heb&vDuhLO6* za_B7`z3>Y7!E*ZSO7#5^jPIWO)MwH^w4X>XbdPIn-D3cB$Ug7o1JFIjLWjd6bdNdc z9_l0M9xqxR(?=obY9am62|8Gc?s0q*dvn^l$5P~Y0y1YHy5uwDzkKoohM)crnp{44 z@CH4%=*M^Y_L;w~n|I9JDfou1k354uauWO9zo2i3ht8mHeu-XjJN#4nx#+12yz(~o z^E=Mm>jZPVGw?s@Bd?w=owo~pWEXt%HGT_u?<9TG3{5ekD@}){q>E@iwu}6vyUUKa z44Tr~(5K07(4_mzepE29>}me1oJX0|Een-VIHe#DhMxXc$-Ai_H*=za_53hBQ zZ(HM}4jWN5?=7{G8<)fDdQF~Eb`5ss`)&#N2eRhr)t?0X>#>)zcFw=lR~90sjZL=lHKBogd#D@c(2=jz5dEmL~@M z#RGHvFO9eSe~NPX|1i<*-;m((ue&?S|DQeD`PYw&_WyA}j9+m(D!10L+@yVI;fXhb zs>2Vk(}Y;tB$;##X?OauxGo-G6)#V475B%^whx-p95~rM>y^N<7?W-V-(N( z(u-paAL*5oUil7wKj)hC{A<`h@pshah$qmzlQ_gQY;)RU!%N*_i8Ivt$z1xhH+`zJ zA!S2QANjGPsZTYA(UDDz;R43(Qf&RRDM!|%$z`iZe-&~!4WIWsjYr}R`=SRnv9Dn% zdjYK;=JIRJmGWv-K8z2gzl{B;6+5bXnXIV{v9EyAQ2n&uKpx{-Wm(n7@;5T~G3oOx zgZnr0TS2`Q$1*&D@uqz@BGz!3dZE{74^%TQ!{f2#oX;%!Ms>&`y%Kns(w2^&H>03z zCI4G>{0qb&UWk^owia5d#4hL1lIY0U=Tvm0HMKRg`{(wWtFzHjHMA%?%7cftryUhf z+GO`pvQ0;d$0F7xd`36#Ew$P=y69sUd(lKQF5hB2Y-C@lXx5^?JtpAa#GX>ojN5m| z1^n{wZ%zo?N0$=={!*3YCuX_am3l?@<;2ylV{c$r>XqG>XIQVl)<^eybYDKqdi@OT z7Kg4A-96y%@ckTrC-M=UmO+1A$>-X80{&q?&GC09pPCr*d??5N`*D{4kL(Zi(s(zt z=YjU3puKkfbz`IbBG zV$gx_&zrK?E8nH?0y?N)M*`^3t z=+La{E8e5EU0P3%9BHB3X*S)?Ce0kHwSFca=r@=46+Jul{0r_cdLjDtFc*%LgV3_p zUpjJ7>m>VJq%2HCfBx38um+iy6eu4k`hvCwBWq6Z|7Gg>(lEFG596ZzT01TpdTm&Y zkp}8zGpJZg;gLjqngRYXP zZz*)On*6nnL3!n>?QGd6`&}k}hl8Bef{tfs>JI87KZInENt6FymvYx*8F{gZ{hD@hzYFqXFMD4&Jp1?93mjSZ z(%7gLS@(xA(f(J5=vk}0OZ;CV?;IJF#5j{&cj`hcZA)FwFfK)ByyM?AN_ug?e|&tv zzd9jIXUj*0>CBOH4!yltNIZOmyc7MpkgZN0N_W~{&Mn*K+p=x?-&?jpb6L>Va}ly_ z6}F#9*;f4xWm|HDY+Gj2;d1IL*%m3&&P|VD{qyfB%fw5*sVqBw+kaw@cA0%9<5`p9 z=$n#NhQA@VrXZ`Pvd&*~bDc|n54vZIz8SziXwTacSt}=5)f=6}%-hhvoq3zqr)$pE z9~yGzZ3~PJlH53(vDRzyaov!x zoYFd3AA3usPd+g0iphtET{*dN*s>_A^tl9Asp3eK|BdiXvdU&dZCuajp^J6k8C zUXe0F`-JqN?c_UfbTVgtIb0q2$>Yl*hszbTmH)-^NC>2V&&dnt~p4p|K?$PIiH_ zjjd^nO^vOU%=OJ&hOy<$Wi+-lk5Qa{%Us6T1S01$aS?Nwq~P1=>*2XfeDEEv-)%dW ziQtDCf6hE+9b<6)S$MmPao}bwdM1B9EQ+zX+-sFSpX@6AfOb)NZ)Gg1yc&z0XyX*d z;zZ{B;)xDVzRJxR56B&dUq;Foy|;k=R6bYmQ~JsF_!Q4wc04@C(71NB;kl)(P1ASS z>#i%&IJNbe_=vfKXx$m7k#mQ$jZvo*+TX+Hfd9HEHJk^{#P}yc2J9B}vjpJz%G}d-5ApV)hc-HuK<^ty&&-iCAG@j4L zE2pt0P2+jWMe@pOn^#s){$A`&cMbZ;0LIWl_M{w$UuZr54KHIKI?Y#C6JI}RiE`_E#TPkzReq$uD_=zyVEkcI3+u|VJ$+NYI{6>WSGRu?zKY-MLgVnf ze3fzhH}O^aFor(tz&N=R-gPhIu_N=$N6?uUa$mNc7b10LyPqAsnX%f+Tl;)fXO@lV zoAA}HHeao``Ra1Ut9W6wT}JWNYbaj}e?3cC#Ah{r&&6kbHlKCbX{V5OH1%}STuYk2 zPYBy_N|;+Zd{=u){@wX5V_CMCjS+h7*=#o7mhYZxod3P~u4wRleD}-^?N~?Ip0yIm zteHqbzqkaQwFB!YJDPQrQ+(mGt+a>Q3Szai7mfBLh&y5x9k_wL+jfJ*sK)ICiG_`; z09mgPw-#i5LfmqY^$2lAU~h0X$Xd#{pMzI|cYtZ&tsr|9#N~kOQxMl5Bpx;{-70Fh ziG6iaz}~K6FS=9%Hrziy5?8!C7+W00xSWHYOib{R#LJ*Jt+U40vws46PFn3M=MKYO z#X9N(dF-RZ89)n>#rtmvU^D6%oOPYG_Ogn{7R?sh~uXRO;W&-z+4787V)HD~TAQM0=)o<|uuGX|vDVrcJ){4eXJ5V) zC^pZCyv|~sBlwY3J_P)cuHPf(-UHTJ<-Y|VV9cC&z-oSgGcp$v>pqKe==xq= zQ{D&OS-NN+YeHsGFJ1T7ed@34WL?uvx;~5_a~AEX>l$6t?z%43HT|RO`MRbbb!{n4 z`dim8a{T~h*^2LE7UQFUd+E^F<_wpgbIi^E;o9TrZQdE{_RnJM>77sFxwnUVcYFM^ z7^}LsMECY_&&S?zbtcVnjq!$$xSrU~uXi5OJMZzoAMezH???OJLx<5GrR+Viv+;pI z^90TVmvX-0TEmE;@JzySi9|E1^Bn+e+lzJZNnU`>2j& zFLj+y8e}|9VeG`m#RpS}4bS*L#K+Pu1r>7_J;r$S^DGt}yZ4>kr|tX|!A0y5+wx7H z?{lB_ol-G>(R{|C&Yw%*oRlnlD--^I_TB|Ps_NYPUo(@-;ynFSSO4Ie2f?3wqalI)nJ;0Eat3|*TKz+79s+wuIiDDtXk5g2c?SJ` zbp8En?x(&yHmKZS*46n+W}L?evE>LVp84o(K9@BTuDHCjFt0 z(0?W8HzHr9-X{HNyM9Ju`K4_|>!By~sfbB1gytUZc@P@hPLf#tKBViQ$AagieSJ-O z$Z?SzQ|hVnAfymqM$a2(I`#&1?YGgn*At&Ojrhb!D?ag`TNjJ&Z9(_C(Y-6t z%Ys4C%kn)wt&3%!Q}a4HS+1MckZ+=kWj|E&8ag?24S6WKSoTacuc4De*N~s0i)DXR z^BOuibZvA{7t5Zj=CyR&wTz%Hmi<}HYZutAS-Q9d{cB!>4?|^Iy0``XYhFVJhOWWe zqKjM5zveYd2Ab_d4u~$6{bJ2)$idKa$P3ZMvd5o!4gLsSL#~J}ZbAQ=*WmfkHTX?* zaSQs_yk_yd`9Ankbn)pYygu_9_zgV=rbQRGpnv6>=w$h>sXX_0(Zx^NbTP*vUHqa= z7jqoa#TcqIC_0vu|+t99`x^zSM#sP(Sa#iP-` z4}w3fceO4agZ_O8>}kELb@3(W-}}Lxd^1hHn<8@Au8S`vR`65n;wNponB$NxhQ=XX z%yCE;a~#se9EWr<#~;zfock=gSkqs0vev&VE&f~yFA05w{yy}t0e!UI)w*~idhJ2z zBlLU_-7EC5>*8AMq8@eelQvz-&sz#C|=6J#rPc;=R}-3u1{^<~cb&$Q~O~_PuWi&v4B9@8tL>-!OcW zIG{hx4BBPHQtoOeUR~lR#V%{W{&z$$S1I=XKbc*2V%m86Ih+2@!X|ss^sn^P|I+t@ zfzQ(a(&x9+Uk`%=-PfW1=luO(z?6sdweJ6?IR7XZknhq+f9w92e%BaS!1{N=k`#1g8^xbwYa{4-nH5T0anU_4lI+{$GW8(NYG0E=K z)M4T`=uhAN7paMFPRQEP^KQKI(FMdd!$)R}a?t+IrcCkSZ8I_EX1%HSvt#gKsZ?U( z@T-ZhXd~$`<+S0`5`UfScgGqZqXK_srlX&K9sHPL?IV!Dv&q~uW=Uk#CFqJ{4rN^L zPBNykE@cBu{sHo>PDeyU-lwUv!k=rev&870OP#mb z>g-JL)@8qwHot%*b$2G5sk?)*macmf<7BD(<8y9*A={lgZ26o^UeNW=;y#Hb?x~-p zk2hL@#&2ak$dAMdT*$T7KFT_UUN-$=bo2xprva+QAu)*w|S5&0vnA zwq?=oI{ID7w`#$y#6K3H?`5tcg}vJpd^^XXE|+~pWWV61WvuTC?swC^9-JNJJa&;^ z_Uw1^y!fmWRAR$3ixaCJf={~~?7_ymJ+NusFEO7Y7sQ7vav`622#F)8A;ob{zCAPb zo}%Yh@f`CP?6JT;yzHx!&%2~<5?i3_wv2a*>^U*5-jq2#wNv6WdsdM@O`D(L z_!#e!I1jgOH|;X_EF~U5VouEVBabGz)a6mgpO=*~u48@SweBdRFY;$K-!pxb`OCHJ zIkolD8(CJWrXPSFvUc`3w3_u`Nr;#CRAid_O&wJ1WdrWhXph`0v=ccbw2PwsPc4oz z#|*>oz18IFJ;b*X_fmO~HFGKKZ6baCCNvD!;nM#NLxcU#{AAEa%sNZAtf`=_qsWEE z8&8$&CT^s%*b!;S_i~>mKIa)?C+;BK8R0Ty{k2F;1x@Bfvfh&BVy2A&cs%b2Bkw=CPAwV!!2>xu@SY6Zb90%j6gz zn|V(J$N1RH<0y{t0h;&AF+M=^evTb3>mE19(bjP+$8pyEa-3-0&vBZ*pE%?+eSZSS z`hGdq_j7y@Y_Lvq*Gu$K3A$|ua$VYZoHm}IjqlOMG>3cKh0y0l&fVpVA?7H?_z!F* zp^MO{p7`*d=#Xz~Z@#U)%k+8LwM?I<{_FL5>c3u}mwKiK<5StYtnyc+mssPFjm<8; zqAb2YWq-f@o%Z+G%f1ghZHaMTDShpS-^DL%@*_B@j)*kk!ATuBc^Zt!o*@!DBx^a7 z;Fmt=s3>^!BGz(BOo#LpKF;5!;aimBdc|=}@HmY=PxAzi9n$~GI*#O6<0XP)w{^et zf4p@M#~E43CgvSwZ)2I$ls%B+c(=s0M$#ub-c{X2pCIiF)$i!zzy_C$hfb=LFtXHtnl5xUi&Ps@2{a|C-faeo$VOQrp#v_B}LK5B;x zonwT?&{pW}r{40M(2X*Fn`P0Y0t_ybx>|G*nP~pEXrupU)Bo60yUMtp34JP%U9%l- zL+X|PL84iw@Uo?^WG!7;JolWA9w)M;JG)^DvQ68Ee%dNBT;w+~IM|BFP@Apz@3ckR ziuYgt$u@-Of*7$Q#NJ{KrikaYE}D#8xPVxI6l}v(S7F(Ea!)35R`wTI1(s*Am#5fFv*wO|L1dxq87Fd8_UG@Z?V?xXpn-1p zI;-8`_LIHsIiIra8K1K4+V(j2wmW)kd#0`JVn^VEZXb!>koNkCtM1N@DB+sjjxcpA zI#u>b3b!Liyd)0c7)jbJ=qb0L5CGLDSGZ=Y!^Q^t;UbYi){Eu>M&Dc z0l~p!`uZ@Kl`)8Xr}81@6P_hKjLq>3-_xpK|5dS91QYUIco|y=HqG)`qdFNm1dS@^ z(dMbwGp?WQM6Ng*pT=2pe z)^j#ew^%UV?iyQjfm0EKnozSATVXG_lVhR#GL9ozd;cRav&+G^uS{Db*0eSDJz7#% z#<*bvY4(3)o^5+z(;O^iO#|hv=N)UYCDyU0>FmXQs`kZ17|W467mDtmd&>(2@Wu+{ zkMM|d?u9SNH;u~BcPXlmzn*7hzdF0!CAdkaZgCQuM&5wE@cC&5zkVv3Iczj@eCcSG z*#phGwmEjSj&Ky8zRk(pjEiw|1movOVpF0RlLz-_97+4N%xyp~%a}ZU==!{vq3iPA zv&L=f!Gny=f3iS5e9{?F*+LAo`#bDl;!=So#_A5R>m;f3tMm3U?!EwfIg8)Gt$*Ox0etWThN!?55d+XGxb@S6mrNm-;$@9%) zM_noTdh^&>w}rgfJa*Nwe~{^`6+J4x+Ii@SY+?pxc0^yP%kQboGVYUD-F)H)$F|JaP^@;aw@Z1<^K89!K*<;gn*>a!UQ(C4v^2G;DT{lwKSfLN~ zWKNlSdVlL!Snz`KrVUFYQPwCovRM0?by;$s+@o!)Ev^}Fb-AL5^*Rr1^#x=7z*>JW zHvoPbXpW7GzM+^qiDV4xs9Bt(c4e>zT;dn^6W7%m=kPTVkF}rguTDu*)B9kT8H|(W zv0s%}#Wc9Vm_6)5NBy!>7ndwu-Nv=`9ItW3 z8uMLoMm@);*i$Cr4>O)0#I<<*25~z}GJbtuNye`ACDhxosbvM<+hM&#%l+O!Bzc~T zn4!YC&tH6-H)HY=@9IoPvOg7>lj4l=Hz9uy9gO!~jh*mk?mOTbR`a3Z4ZOVKJ(;mFinQ@;*pIm4oG-`Qjn$J!DUp6QG`Busp2kly^LmK}z?OLJh zE9~|D46>zcjLP`QwX)`ZuzwwLPnW67k#fzltTOboLjUzVUy-LWPd+?~U>Lk2cX;W)7OI*tc5$g^{q zdteW(T}_$S`u5)Bg>NSM-n+;f_`sj-n|UDm%Bl2E1*wSSCe3if__rWuUZsx?ptIhW ze#4e}jcYG+PU3u?9m={%V$w!a{~gr5!KF5dudD7-mGO&9RVDKun-0N`+i9n)YZy$O zB`#q*GE(+3H~=5X{}zuLw~6N}kQK5{RnBj}$M?JeFU@7FEb$64j+ll__KqpSR!Z;8 z+AZG}It&(8!Dm|v=RKc*y!Zn)^D(~j+Kz1}>yMsKReVd^<=+CHp38M)+(teA&%)u) zOI?h(LB?EoOV-lKx;pIEUCeV=Ho`kQxxYVlpL4NywOYJ%^$M3V-lrbV)9%gAIR8_5 zUiQ3*Hwx!^Gw#B-_RBdlpZ~Mdn{gHVHIuQm+#}5A}Y9Jsjja7P6;?n~);%%QJ#G7foWqSx#r zk4(u=D{r{Ocm|z2ob+vIz1HFMdvm?d zJE*7ZU6HiZyIRJlGZ>G}gyt(aFXc9I{5bC!4-He;^D`BiX0hH~-Ye_XqzpIi1ADOQ~x2w*T zoJ)Q0x=wYKqcbJd^=4{-<2#+f<4%t2B06K#@*85Yl$SusXLZ#;65+3N#mNF z&z9>aW^vCQOI2$AFjbpBOzv@D2SrAB|GJa@N-Ohrq-~kW9LJgK`CONJq;gHkHJ*{Z zJF?OHsbyO|`rJw(m%25B4XIljeG>k>T-SALa^3cGsiTa;Wh^gq z7D6Yn3sV&~81)wWU<&^yQSXbfMVZg)l=`Knl5%Gbn~cpR@qSrWdv#e-hLmMkWl1?B z!^$zs*m0%|U9WR4!&VPnhV(zN2xkAcF*hM~SeIaw$ksaer zd^F(~DPt~uczk{B1^w!41qTJBGSUW8_KAYLvJOJxJ?X2=9M7ZZqoZy~+EBS4b6Pu(`ZyUBvtysfc{w|eztL^v} z+wpAk_+Y8p^>O6|%lP!=(dJ!|Ztq_`v|q3zG_d1Q+9&NwqrWeK$9tls2fhg58jSZ; z))#fAaQaF1^8@qN1PP3-L{*l%*Bn7Pa=rU>>e9x%uSvy#g-%@3LnMq=A*UT~WH zFw5kJ!mRQ&jKwO2FT@wsO#h2r-^AEU-j~jE`dGiS$a?2U>z$J8cZR=D^NhqV+utMo z@rAz!z6x*8$Gu17iNlM`fcNU?bCJEpl0(-lQ_fkuMgI&R<;_i#v8%-gny-X!vfqHW z-nb=Cy)lb4J5Ty-namHoeaBM4=1xcdG3#NyN7}g4^qtNeR!Y8Bo}-P8pgX7Mk7~?)miAtnv}M}2 zN&iM#O?r0J+T59HSzsLr-PB&sdmX}i@J>E@<8fs0aJM(*amK-NJ)d`j*NX*5WwcY| zi35Em?-AXRU-qKs%rigooa=vihv*K6r8~l(53hsJpbooH`d8)}g$7yVn*L4nQ))#0 z)iaqRNF&cDH|a||guXk`$D$u+(mpxA-qt?pM`@!jOWJoaYpBy0C$nZOP{96*=lWmX zVb_h}&+EEr8VjDIG@h+?O5aL5n{DlsJ|14%m^+)4{myLSF>aY3#BM>J;CZH{3q%%} z7#0~t+lSNk0+LsaPmy=qF>G(U;CMK)xPbnZdq2&w*%k}O!Y}q`?C(k&nl@i0%^xQ1 zywGeb@p^nOfbS=_dnKn{Y3NZGcrFo~F*kn!buLZ4#q{TtZ4p~w_OL_pj^x?HvfZcGEZrj6~X&=aNXV(1acM9gu=J;znX6IeB$~c`W=P z?GhO!`;BA|9X>z%>-od7q|7p}XI7ba>Ughn{)BgC&Y!&_KktIFwVv5!Yp2R{j&5X_ z{dq0V+K?mNm$jU-%BKD;u9-5yvXQ2mGNrq=1m7Rt7E{(+`fi=2??eV_JvWo`q)+li zuS1t?QUR%qlyB;}<))4k`iPF3Dmo5qq|vYWmVOiYr)96mzx~)~TEDgNF1cUizC5#D zsm=`?&$7;CbMA5LT!D42fOAh+=Vn{y$~gC=bpRbpOTh z*+=v~@0^QX+(@cHFBZu@EQ~RPPG8h{vwq-4?1Itsb5G^UeQ7q@cs-|not`>#SUq)g zOAc<<#jEGUwv|{(g}+qBf#Q$%5=SO}Qof^V+6bBW zKE*#PbLcTDrlBz2Eo;zNrxR=74>IqW0WG`^@)KuKoS^89J4l@jr7Hx*qx5n?XH@i+e7GZ#H3{n|8glUA{3b z^)_fjE-6;UHe|%dNL-A`6U>iNj?A;fi%-KGUk{+HbS)2&i4;u>9-vE zZPF5F)kyk|`1rhB@$qx~Ai2c-%`)kkk4(s)FEJP9_e_Zg!sl&bW(Iw_j5+cD%N$8~ zJ<@oG)T5agtDfpn&9nA;IH`lwWkff1nf+t&eb2)8J!}3i{y+SH=!b)(vNvwc+wjJ1 zc~AcM?D{Q(MpD0K)&l>E`q{@h9p29GiF%yV)40X~M)bI*4!Wjc zTPemTB7fI&P5Q+?rYWXO8I#C8J>~MO^sA}A$n|~pGWX!4?b_zt)pC(4K6Htr`0zMq z@sUhd@v-Y7ivNml`NU08zV~ND`%dv)snfT|_&RP>flS&tmAMp=c^+~ti~P)~m^gta z*O$HIaey_^zw#|;S<~7^itm`XdaWbHKNH*zuWQVmK)PVZti04^%K{hfD9C%VZf)+@ zIG008-*Ic6m-87rZp*9Y{KcHhC5_rK*Svr9j-tF;&X3{T^`uL8%s1~JyW{q}XF313 zE+1cLdo!4fPxAh?qBA?#F6rwfjk)7VBWTwXOV;LI!0|-Vh1eY%_-{T(f5an4+r(bJ zuhBF9KCfp)e0IwC`__8oy2uQX-Hh$q*R!Y6*Wh{Se@$0S(|E>xLQfeNon6jy&$<3L z>xCZdzFx~eS}){+=;@Y=ykI8_>|}$T@nB~H*qI1+t_3^Sft?(%GYRaNSTpB(KP&EJ z+#$G={TBsyLKDGVBUsO3p0}B?+8)wg(mqmDyxQCB)%Vh{w0!!FQiPER3u2J!H-M_~twC;q2+_^6kZE?1iXVcQP{06tn zZMb*d^+?MI6*$0NEL~uz317MwUOG8~dlHo4?#!(0%RPk))E{L$<6cHg%fhTc6YGy9 zo@Ed5GJ9PE%@`SpWs&nIDeE|8u4Uhd_iu9e6y-JlX}quc6Uy6z|86hkU4S0eWxe+Y zmrvH5O&QGhIaI%fL-+@83Y6^5aSX0vJ#s_q5N{yegWr9e3gk2|+>IZ!vW2*W@AF-p zbkATThQ7}5^lQk#x0I6>Q`K^rV(vusJF7iE&I+`$4*Npi3>{RTTlIJzGl_+^d;WF&ExUKkK=wJV+e(ZiniwmN@se7vu9f$}w(->kFOw=ebW&-dSkm~}qP9y~(hv&x1Jz7^j; zb&D&;_zk&9gO4do+J2CCOF4&W|7pr>q_0LIukE&r(N0W_e5Z)|9^E$4SLaDIj?ji9 zOA@N;JPF2wr%QGpg>LP#R)Uy{W>Oq|v>Y3LGW*((%_c@Si+BgV%OduYo0P=2JZ2Ez zD*Jc4;`+`i8n9jQbWT^1epZO_E0$_ua01k$nHaB$r zp89vvf>^!>NL@Ej=MrbEe=jns7MpD!{`9q!BQlo0f7Z^^&Ge^&Kr&1o9{bc@)d_IPQz*eh<$fixwsNI=N5e(Mjad zapciHc;S6)qCG=`GU)?k(rM`8-+m9^XoeV}>=|_qE5}6tR@9BvHjA5KlC$+GzRSW$0L6OrJ11265c>&F@ zLJt4V_5D-tlFNP<@{OU3nRATEVhvKJ3dA5I68>}LnnJ$+nZmh#9H*dnMq+0QpB;T_ zyf4;Odc!Lh##X%wpH1Oi74XzC>e5E+Mic9HkHBa0PB;7{KI=NZ5BD1RG5Dd0cnXOX z=YtaL_nFMT{c*-Jr@LVU{8WUU8WBIhh)nkS?q?rDXF@+CagD7t-!F+jxjWD68|ZWz2M0TRf8zgpd5Zrof=~I@jG_#|{*yct$@4Crd6xfM`QOi>#z{PZDtVC|%y z4_)i)P9KOa*biPznTS3VnV3U=^@pxIk9AEiFBwoZ$Ktn)W$a~z9Dl$$)c66iQRMh! zbk5D9$9Qif$La810R0dE^DVcDJntW-e+DDZ2ZY|28kFT{_(60Ubr0z>$_Su;x+~{{ zqHLd(rRy(s|GrIDc)O7mCa+QcN9Dyd_&yK*pNbsFMIKB+Ua)V}h)Km=#Kg)t?vvtp zk6W@r;^#z8h~07u86okz_d>&)DMv6elX`7Jp5-uxGi?%+Mq-mhz_)`!{Qo}3De$et zzg8n-5>;T3+xv*j<2c|)v4@J_{UOvv;-f?s z4E_DuH7gI^OAM6?j03;_akAu*S7@W#F`xlE!*?8eK;p9&(RPV5E@rMy)|<#Tni`O; zg(b;V-bA<2%sUT`zw%MJCS|XsZ5hzHCE6R9nE*ba2Qs9+85_F=y03K&@NdBe7oV%b zk4e5u>VvMCz91pl_*qBSbItI>>y*he1EuZ;GDGG@qiZKr#i5yf-%C>QTAdD_S+|XN9G*o2uf8cbLgdkKI+z=%XmZ9DIKC;Co@iF4yAqP z<zqd<2KV`YP5YJWcuoX+P;0*vAH~$>_v~$}>}MJ^LB^^HP2eSV^I8HfA%|7gb+d zcwzm#q=&ap>-X?0)8>w@pEs7{D>!y0)Ym3I{OYu!4>wGU7*Jn(701_bJnxG7c{gy+ zce!W&HTCmu=KQ~MK5AHf?E=m(;=gj<7bxM}54lGTzc27`(vE45CheT|)1=p?J(l!` zY4YCx=@qkX z$w|~RDOk6$oWF|y^4=@BehufPo;Ps*yZo2B-OTlW<@^H9{g87~_eGp5;aqQBzdCdk z8B;vq4BmS&(Tm@&5(dy~?eh-F+RLCH_^VX+-nxFZc}?qIt$Rh^YTX;IceT!a8T(6P zwcFgmTr1zg?zJm=?TUYtz4lpfE#I#^#$10p>AdWj!;X`mAob_?O43cFQ~V!9{teO( zNFDq?pZr^-8KfMEM_|m-EnisjJ;WE5?P}WF9E+W5+S`^LEB33{tzxT&>{jeqZMO=q zitVaxR4t9fn@_wJK&Ik$ndk8+Q4?*S>7{|8A2C|Ax&OhWw{_F`vDlNkd|$FgSLvGlJy znd_21>I1DWCrRIYog{s7HA(s+G`?XR)W&xh!uz7I&YLUiUe(jy+@$5q2SLMYl3$tD zFImP6uS`oOUBU5C&dE4J=zOlvNS~eSGrjNGZem?9e3_D8}}J z|8mN3gWC*Jd7a8#4i*LfwVbcuTpg*F^N(}Bj`Q2)e(;z<+Rpi{oEKalmiswZM>@=T z!L;Dj#AzfxYSxQpI!gXp%(@7{X+QAzQ_|3IT*_L0+nC;d9BS`U@X1`IX=8y$!KGmG zES&Uk&sq5E;hs-Z&X=h!W`E+-5M1nJu1DG^y7Rr}cILHDAoD&?> za?Zp6=eEmz(*K+@`=4`%F^ z`xM7&>T>w1A`QP3c^&xt}eZn=fAIPsGZmLX0o3USqZ|!1kkQf_y zvUo0WM#7Wdl>H5Yb7vd6nKRpI%^}!rfXm?9a2vqhzoR9#CybA^UMU1CMOmuSY11>J zPehlC-WD2(&JkVnUD6LppXXT<3)Z{--tYcA_5M81{v+Eg7_P-%BbZ->o|3xEbSw*e zp7xl0Vach|(cWBbuW1{tbhIV2n7@9K>)J-scG;6$H+hEZVnfxop0S~(jtJUNtB3~* zx1sEq_TVoF$F%*w#+GiU=qJIo#%DvUw68w7+n;qOPEb?77fC#_4LeH{roAP`J&$=beo6X zG4&0$fTp==JEDhrs=L`&TK|Zi7v1v-_JFD9vD<{Uc7Bog^U!-NohLfIyK`r?{}b*# zi@u+5@41vKZLc6cS<1K5%VVdP?R|28uBO$e(A8y0PQ%o_*hQ&*N6M4R&}F0 zMb3olUpuW=$vPITZ2 z^wd1`s2=OL$v&%$^*!{p)>C#2v~^=spQWEHTxgnW{bce}57!ENIRA0am^}9xp8G8C zGI`KuUw_iO<^GU;-Ge^SdZSqOBSdf1;pgvGw&^z7WPGR%wsU9E%67iHd(W!>C)|53 z<(jy&WSfi)zfUMr+E@!sgbq`!u~|>gpG6Pa=N>*erJVII%d>*xG2$OyKi61fiQ`eW-?^@# zC?FT8qO zE21BJMSPw7YVxh@QE@%%Ev8RMo_`PdV)A|LSy9RLA9H;Z`ESXi&abcC#q~G29w6@| zPrtCf_Pj{<`~ef)^S?puYa_OrHPUiOKWtC0|NTj705fuK$GVzaigD{@^wB zwY$0gC$4`$-bMcKD3vRF(miVBKeh75to*B9e|V+6`FPiK4{=G-C!$Z*v1X^9MC^Ts z>ppMC1*D0jTS@nmN=K>qvQpyotX#^F*m?H+X?LJ2GKHr_ZYk!3;m>ePIPDm~J`)C;&HbQJj*a=}0jeL9&woku>OTrjYb>ju~B$e$q>47|ei2Clb}A0_7-oo1g^SoyOP zh?9wO&mVj(_#*!n`Rlp$wcm?Mo`3VT;EQ}I`HAWEwLjx}71w`DzL|W$|E{n7GuQu{ z>z(9XZ=URn2$DQjlXm6@uoV)CGD}(eDc9Ch)@PMfpG7|P zM@C)3p0Ss*u5awAl1Gj%>0gz!w13rD(oyy~Omeb^7%6h!y}SD%o7Otnx2hZ8gxE0$ zXnRkxZ56nFoOFa3t`{Bd%F8ILgtBB`2yFvxfmVX;EzAYH$vl8y`T+g*lXRZHteW$+m-@)i}*8iX0QeV5KxW4vz>;DUP z*Vq1TNqy}L*8kVo`<{K?+v{1kBm6m<{**fHp-xTI=_qxI=wDy^we#z1_gVjQs1ts$ z+7|171$BCwIvuwD3x?~!Jmp;7OmarnUp<1PcxMuAn!xcbq#u!uQSo#Y6gpYcnpU}y?-UFRdWF5223sKH0Y#&XBJ;;(E7M-E? z3;%Z4ZhcO-U*l5aRPd?WCTlUJZNk@bza2X!7Hn-%QL0Y-cxGSAd8-YnQL0$hgQSQ| zfj^|qQr8gAKsQ|{d#>xZ1z+Zy%b0_fZ^T%bHsvbyT4mL%iF?HVBXzRQU5)Sc8u8mm zeav&zhcyYEvWMVjl}R@9NqrXi#P`O`Skhv2q1FRh#)z(rkv(z5Ub6JT-%?k~c&{0K z5EJ2jMBce7tRJ*)(6)irYx11XU)FZZT2j$(qtX3B`_W)hw@dU}s6E|aaUFOH*L5N< z#TJ-^9-B*EM6PwbyjN@wUEiHy^|fP1j&=RDZZn^^bX%RJ+jRX#cT<+sPxPETCweUO zUNEQ27d@x1e-W7ay!c8&|BXcVjY9VoElH}XAQhqaDoBy|WfH+x7k!Ox4#^H}yNe7x z1Yb^IJ>D4ZYvexB8R2x*|Ft~uq8qNa=@OColi^db5rguhdzm+r=Zbi)Cz)3|TE&ZR zm_2jb*E4Ps**8_@9Kf0Ihh6ps>+J~DyUmgdBCn)=x<2`4|D(f#*G(NZ6WJkg2;p+8 zG*T5;WCi8cD(ijn9!(q3XPOq>$AiO_r|~X zuB-d9uUGsmxc(yI-^*heliTJZ!)+7!|M!T0*D>$v5#rKGjH!b0?)b5?`CmY?$GeyD ze*;O!x7kcs^~-89(^ck>ZzS4vhIn?Faw3n0Gzak#SFGyd!fu_`r0msof|3aWU_`_kaAF zfODmf*f@uoPZs`YWBmSEo|kp%f@?RJ%pge|yQ~AR;JA(?@#``^c7w4DlEkg=lt9}Yb@Z~yW00ZI$1D=e+XZMU@Sad{ciRu>2@3~xMJ=t)DD{uU*Z6DtolFub+ccI z`ICMvw)blf^f2$Y^`pd`={}U$bKQRubFTYN&dYp|?9(Ih2&=;4`!wI*%^nUP;eFj^ zRQBpaH=6qK>EOB~9X}Wz-=<^RbiD1k#*X(s7o@L_3;Uw!Y2uZ=#!S32kF9m?x%Q`t zXG>NHp0zEb?HA^$bG6-~Z54_Cu3&CkYzwg`a{XuQiB+jVd%|PMdixqWbI#PlrO1mio= zA9bHeAN*1xUp!)SbW>lsN5^c3>q4#f>~Y#{qt3)>ccw0Vb@l`o*(%&whbR4kN54t^apSDNsW44dTc`N=} z##12~rD@xpj29WD-)WQaU#4>=jqEb2oBGNUV;nm@w9M`y7CS_v zzw>=P(!*Adv*-~!cUbus=#&3E@g1M!pZ7fa>HYIw3cVVAuf4q^64C~#4u6GZ* zukhY};*XzdtsfWp-@7Ma?_Q5z6%2^p>Gi+&`rmu~@4frl>U{uZPcpp^psW=P-wW^` zffcb4wO_l}|K9!nbLO0Lum3&pY5eapZ~EwPwd==LtniPmSmEyc^7%Huyob3m@yoY; zO27PF_*>1fPOmTiQ|-5>{gn3kp)WF4!5oW!Nqq4p2G0Y7md{1zQ0@NtI?F%5st5o4 zUHF7U*kZmUL_6EOP_I9pIGY~mVPe7Nn>iPMyxDiL^qnApMGbU-{vlS&7r>P z^~;<4y!GUlH)$IMZLRsf5N*T!^|^L`J#$MlWFL!hAMdZ9`YHYOp*gBv|9nVp+xM^2 z{@$-z{DH~y|6u?8M{EzvC#n7R@ixD`C+*|?_Gj4_q64%)Fw7sJ`|F?my4k;fBmaE3 zz82XZuCrHJe*3B6^53qjd;Rog+@g+c`-m>pbhUJ8o6Q$=mL3(~kH)+9rTi26>p!9o zHGR;Bc0c~;(I4-}m;Fk^W7Wi$Yu9yCd+^(9AHKHBwJ)tFe{=7Be}Dh|NA!y2N7i~} zl+CZd)7-0?y$mclrTzMLebR=U(*3S;-S4`8wZHqTa^BPrpVNQ;5j~*kY>Tbl39p6k zt9I6S&F;6hjZZ>!{1TiqY4sWX_#ct!nnrqjM2v`zcld}**YN^cruX{uLwl&5>;50% z{(IuP8bs#XzcJU-8vF3|^*goyUe9Ua_w4oG_kKg}tNv}iUjKcs|MRQr(-Ixs>%Z^C z?>_>++8?X^#^Nj1e&cYTaqsuz_^vLoVZGm_{qleB_p{)-cm4d|cl~^53?Amc4_{Ni z%;vkVv)0t_?DgG;d^%sGZ$0FHw)q~u$nwqkh%d44UazY!w*24`5h4zG#>xiuYW`r zX#cLwrm%l+E?gIU-1_>EPr{0~)Bby{x2IaZ`_bWgz$RN{5A9GNgz2ZR%6XG!b|09g zaaeogKKmNh@c1}cuNk7L)~6OPh(6tE_uJcP_&>>SFLFZbM2j}Em(N*#d)xZ^aQ#=@ z!`geh&rIwVt>?7d(0;_8^jq(J{~-Uo#5HQYVfjQwZydJy=IeU$&D&x~Jw2?k*Zr>h zSL=_j%K4ATChZFjlmFqeNy~j%UvJ7Jp|h6r7M-=6ud{sfqH{md2W^vyAsT)O&Y5)j zJid9MkCxdMeN36%>z6M+JTANVNapzBW7kb6{_C`fz7scH>wAC3b-q)#<@iqDKFQZ{ zqc?ybzB2sVX0p!yZ{eT!^sonhxUTTr+PnX~eE07P_CLQSsbSjuYwG98{$AhZzkG-9 z=A>7r{VV5t_rLGm|GpO=f0)+0&hsBHBSeq)`tN)F_r3o6-u3r7j-ppCe?Ga~>%YJ4 z=gZl1Uhnf)`X$`|KHN{AW%JXwT^wBVw(3**>F>7I?uYMN+q?I@s+9SkFVY8pu0DQq zp5ja5v)A)KZ3BaAGe-B|pTFCh7ty$q`J~X=ZRU(ZG_cJB+5OZ1?AOgc{X6;VWlpL% z%parsrQ1Dxd;Rnu>!%O*$LAKG+4H`*Ah?F!^D+MT;!o+1|G53`NB8>YE48n%C;yE0 z_kPvl4@}wc5BATS{uG-Ze(ERq;YHq?dxBg30=v8yAEx%L_4?h;S9>8aP$i{7;R zw?Y`Ve9>po?th3sKHTr#Mm+FgS7glw6=j6`Qrh??OLuGKLwe2fnVm&P>=f;H)xN@> z^_M{KYZ-HCi>aptueXRRk_wWBi|GSpsZI+*X zs?E)=EF_O5NNU9BVB#T~cD_)gyz>pL|g&iDRJ z@xBw&Jig9F3BJx1uFB)CsG1YV^?etqz!NJ5o$|Y!HT@T;KmN)YQR(g&zx&__<$L?s zlHHl{s=di@`I-||K<1n-?#SNVqLk4Rrvi0OZ@`_jbXbB?{i-XMZ;DwGS9RzjZ(y<~ z#yCW|F-!VZJ(|FAU(O*{no_(0{f?dKYPxUL@=fWHYM0xg9!Me1KYdX4y@&d$z{SKE zo{aMbQc_j>l+Lc{Ev}d;?j_#Ua-X|Cqal{}Y+UjEQ}=pe4gUylpen&_v_yLY@zkSj zq&ILl^IG2_&KE9;t@68KYi>?-H!MnwH%__|Yg)cZovF(z;zDLQ1~$xyQ0*<>RDn3Y zLsjj|3Z(L$^|YmRsj3>E7-O_9b(n3MK%3P0)p`3}LuwAV`qn5%=38#)B{I#k%p35n z%HA}RXOBAL8VYIC_V`od;#KME=+8hM z+_XE%8DSjt^f9U<)UJw1Rk>NI_Wdq*%_`;~Hb<&ESgMX zR&-m3!<|2@`99T=R&-IvX6U#P8+P+~s(qGH(-U~t5T3uy6>U82Ofcrt)-GqXf9vwR z(<`90Tpu3c&O1o|4CVTYzTUd=?-b0R@XqY{@3<0-`-XVyo_J$#-ivSS%UjVm_$|?c zNveH4?>IucHap$^eBSeRSJyx(OX}pDdt1jg>NVUwYuK!>&l^k8Y^+BLo+F(#We() zsR!P44r%Z$jjO5#hppgeF!=acLVrVJYNUmyAeM%F1WQfOH;AV}Aw0>u?U*{kI|`%0 zQo^Zmv9wXJv=J=5Lf;(*ODlQjks&IuW66-Jso*=F^P@cRMhyS!<~$W!^(^m- zK_*=3=wq~Qo9N4!7+W>a8Do6NJ?j?E?)XMxgz?JBrH^#D2G#7PpQCu+1~6Ell8wi} z;bs-*SK#nja2Q2fvWjjsakvZ|W)*#{V=Xu|z(`}?AP$qL$56`ng)_7NyH|uRY@im8@TtRE1^bY;w>DDEUm80-V_t5zMX*_ zIh5!PWH@3U$V4u>k&AQXJ4WENb5S00d@Axh7r8zK`99g#LA+J#h@4{Nz8P;dWQpU( z78iZyKu#l1MMjHkEG&tuY6UkL^mQxpxv)g!v)hn56n15;E{Rs{A3*b+(6@#6m$=-A zykNc>K0s#fLPj52*AF>~tV@YecTVA0^U?dr_-|0ZmS}JWPGfoB`$N0|!L*k7H^WnA zJ1K95%k7`b7`Byn&v3;YN;P?FaKj)+%md?GgBx0HyfqXo|2SbFyfwg-*A37@Wc4dc z2Uo2?M!&LjNJvIExdzuXhj^?1S-iEwIk+JQe#(KLV(6y_;1fUen2X#!3?CUw23Hx0 zaq!WQs>$$CJpYT#-1|tnD;^#iZ1T`{i-+<;JQQ!#f$M6EhjQSdHh8EG9%_S!+Cw}v z&}eqWn>V zGP8befyqBv@K62R5gk`q{L=-#MUH!@i}250%3YD*GFB;%v5Go0DwqH9(y1o@$UVY8 zdyx-!I6Q{yyfgfh#Ql+L&hU@xk<7X}<}nH^{%La!tLak1Li{6TJxjf2&A9{qxx>ak zmv+>dbn=Aw=Y4272i_6v363(;RJ*^WYkDpCYH`I|Hbcwqe7238=sVnat?$TXlm~7Lm&9N{#MC^7p1e6R)_5O&JsG9ickUP4;cP6H z!n3bnGlb*u3>F86uqZrG3lAAQ^O|KZ6k;zNZjADYj2FA%6)-5a)FhGP$mLh5{{z&) z!}&2_M%putYpv)rk<&@Y`B%VY2C{uD*Ges0L1cLvI21X)*OgQgO?!_bzhCnVGQ>_O z0e5?8V;got1n->-cA}g?`=Fe5M}yP!3Gr0}un!8sSks)sj+|bK2(O8aP-fW( zYXw`_YpcN42Kumh?ky&^>cLj?+zUJAST;hV4O?O(OmsyWJHg4+L~!Md^zRA7)qZS* z_rTQ?He9{wf)56tu@Rm)Pno#d5WFKr#*9S!A7_xgsZJZ7n-;#!M@Qp zg4kfC;HqnU1a@>Rc61zaHXeEFLGC6Xe-p8zlYE`7h|0YwPTvV^>Eqbad+0m#_p@$C zL}k+m?B9;@tDB!vzBes9+JpU^f&DDifn7a_8lWK6FXh(gG=N%O;uK}NIsQAUsbyXol4?=OBQKcw68s1Rp!r9mEiP| ziRhZIkwmW@8;|aEegD)^huaW)wFq2`%>XWUjYaRLrJ{EovHmS$3#(Xv^_BQn3do(_9~ZAfDCSQI{nTS<4?;y z1@wPV_9oTrarLP=i48OU;gXOoeT&U@DZqBgE=Bf|PGYCmo9`J^lj0cSZ=z1)sn4D{ z1<2ik5PvK$8qp#8V+M4SF@)IAA~&8S_phGlJA~ZL!Df;1naE5x`dh|wIoK?5tzFZ< zk1Zm7)N;v0Upo5o3+* zcAC(W8jBi(G9Ht%r93-7h@KbQ*$*y@cs9LcVAVW$poP9&MP6>A z_B{mN?gVe><44AWwF@k~HCcG801qPjM1L2ML#JIwvAM<$hhC0B{+Xw`rf0fb{`74t z){GInxf1*rZhLS|iG?>AkGYY>NnmE2;B9eC)zjeZZOT0Y-nNrJtzy7if`zw?nriS? zx^QmCz3{Nu-h#Ksz}tu5?FjuZ_OkH6YwVN%uHX%OS@2fBxL;K&{hL)Z)5P0)(_T*P zka`N4sq(N6i_j1G^nY~~Dy zqZ@nqdgS#q?<{A&<`8w7j2tdUC&f{pLg5eiVl?vjY2giW zH+dr0FJ(-y5FYtSfvOx1Z^Y1+BJ{M#3&uLTu3{X25?)AAG5%B7YFpq37w;Ov{DR2q z->0c6!F;}xfV?Vkh_M!#G^_Lnr{x}v53zv* z$iWZ5%!t)z@bUYqGh?1@t267aTERRTX%X`hA_oJm0X3Udw26;zQXl7>MIEl9MIpWG zEc#~0RoD$(&Osr3bgdA3G!A<-9y#Vgo+TjH5|MA%qKo?YI+15Gue+Zy|C6rdQ+wl> zzriNm!+xes*ka7rQOl7Hor*^HuvEbg$U3tB8xK=2UQ|U@NBnPO?gOJ zPx9XVU|i;I#a`9Viav256E9wjOvDb3qpdQB)$l>rbg@@u+|c?PY!__Q*N}_pjIG?r zKp*8@BK9CyZ@oX-H-~er*Qvm8`JY8HqI|6bX#@APAnV&1b85R)c)>$ko00E(uzOps z^9ICr6}}MLO53g(*al;r%y~KD{9e;`jl*^|);x?Yk>ZT=M{hOOFmG_@DtxN_si)Yk zZg@xJ^%}~_SsY(=40$rl={AmygMN}T_v`i~8U0NAbyQ8$!r~6GVMVVU8@P7xZ|F}M zXQz0WkNC;j!B?uJSg}tW%!92*hjc*0y%vA{3Vtj|Rh5s8V7}QI>o0|Wn`I6ZJf{`f z?ApTN9nuEjFPUTe50|<;3!ZGowpi{@4fXjtmi>X0I9i`=%iL3}ckxIYtoi{2Mm>;L6MpUiUzrcJ&EhX%H>;fXVHsdueS_a6qQ72I13 z4n+4K2ESqh3%=h6-&cccnIE2v4e*$Zd654V%vHSwhBMK>rI!8|thIx+#E>1}s7l2S z*yBp8@h;5oNN4^)WVi5aX}mI?q%WgXgmE2ux=Z{~;IhB!6MGt&U*#H%O)wB!bU?@^ zP+;<1Fj=Dd`Pae6?Tne6v@dOL5R>`H{It2lJETs6$vxnf@gGsRovcaUb3HD$^bg986ChuJ8tlEX`k-czM z5R>>XO-wpfr`S)z_n8i7Xs%1!z1r426fLr)yZPa7qYth}FHA!}s zb2Ia^E%1lv0-3uInJxU#Z-ILFQRXcUqc042que%kq37Vv@JDbiPV~SY@UO>+@{EGd zkjG7+1-Qfxc1 zaSD)YZbz~)gE96xz?44>O^5%!SqRU3Tu9b~wSnS@u4!7Ia@p4gM{ z%1-A%=BblUoxo;kL#~JoEm#wK?NJ%C%2<&)UZ3bR)?sfs<_5=#%itHs+*HPjLri`V zUi&%x^00Cm*C!^yFG;2hzts9K*Ee%c^GlP`Q;AgR>g0#XW>kfUtAWyIN=x3n@R9X3iFRzhD-Y`8UC=$jftHreJi*U ze7U=k+iv7`1M?39klPx&g8g*pBRW-lV=^~2kpEh)-wn@*kFMn+w=Z2V492w`vmM!f z2Dbx@?a1^-`sjVJ%PpDSXp`xJ-G-no6RLC2HXji3X@p}~WV~Qle4yKrDdp5x=3yo= zjyyav$9Dq#KN`FWFKvXLld-==zhP(WdK5flfXznwRptm1DMK*i2Sbm6!Ds2O_UweJ zJ4zCe&0~_g^mXQ&7hXGKegRW-!NA(xk$ zI4dB&MC9@;hw7AhydvmT;u;o`%U!NWY;q4ac>=OF5t*BW?CpaLJ`bC`udj1k&?euD zJ)NPV8@4ZYRMjxIkV9^eqo2*bjt8F)p_^Oi7dP`5GKbdmP2{uK?1?JS!hG60*y?XV zuRX|~$(JJgplv4h&c-on`e156+`P6(*LclWGJaW{-XUy6TN?Ds>LV80`?H^KvLj3bVWclt(S%O3@= zQ;|j6@j0~oI>J|oOmb(z10#at0I>^Nrm4VtjP1nU*@K)c5NC*w zSmrr4g15Gj-hjlmZsvb0bIJzuC*{~VrK^^25<4l4dLN=)la{C||1@u)fN{s9CB$gZ zMv2kb115rg^)tRdJB=S$mAz>SWgMQO0_Bz1bhII!|K&pW54y&g_CDoj;4>PtB);k> z>Ex`gS@XgExwBPH6&AR^;19exgDM=nXlf0%_}_J3{Ss;T{R;Se-Aq0 zF6D^L!q0vZKj8+~P1ske-$CT)2KZCvY`Wkzv7Mesh&HaG{`uB;xmkFW@p2;U|@m+`d3pcL5W7Hwnd5say29{VcSP~+XunA%Y@gR#RFbl4us65L6DYaHh2 zd1}G6Bj(T+Z1nFh$yzPrujz_72hj_HWr>p*2v+Go6VG5oc>CJaq#PRg58%Ku?^KL?(yZMG5&}bUt^+Cy#nlB6I|Q+Q!V z$ywM{CU&>LXX}{r?o!IcuK0pxq6?ZhH_OrmE(^Pi{Y+h;z-|e?Yw;Uu>u!Ui@9KEB;EE5BiXKtw!bxHFgy`yK~Wv=#d-HC*MY|T#tU4hAp3mEkCsw zy<^()E%b}q<7{Xi;q-lrXT+Wty*Pvy%PWat3U-yYl@SqQ_M;D|o8CF#d--ujIcMNOT zCAJZBP;Tpd1~e7_nb_^)mV&u~*v-*gr|e{>sR4BKRj-dPjsN@8Pk>~_BI86 zY(o!CvFjP(N%Tt%&yT0B&BQ3|p-)Ax1n1Sy#BBWzo61gGlOK^e7o$^JpnEkuDZbC6 z_+Z`4A=Of5IdwYBwF>^noBS8saQS#u`2l*Now2Ogk4LdHgqPA8LyJFi5wTC1PR7mj zjl8o>d`mJ<1$My7>uHRo_Yh04ofw~|Y2SAG%;gwtRR0EkniE&`BD$by;hiQw9f6e+-*(kLD+v4IlK84xo+U{N$*45BYKo zzOs)UHrQfHo*XfKym9n5ULUg4^j{t_^+K!} z@sfwoOcf`5%ZJ1=E7(KY`~%U~A6Qi+jd1ym107 zi|%+2S+4a$~KDdbS#&@uvn+t>ZO~aOHF1(_n5t$(P-3xv*nUfd%UW9(TC^4EbB{8eu zSM063mqu6pD!#w5d|@!g^a%^Uk23}p+v!(MY#B%NS^7Zyqwj&mQ^;JG%|8&1-{ciR z{AxexL42*5&WI3x#YTw%dxDc@iQwEn)Z}$?;VY8RJtz+bQdAxPZxTQSFIv&Vz zKUKLh+4|p?|3g(}EMt^;&{bq#24%@ye~jDvNGm)tiGFwOQ@5M%AQq-MG0Jxy*x0U! zk-?5iQA_{2U6rqiaX67PC4Y50R&wm%*!$Ngj`zWnd%pBnFrAs>p3<3~RXksO{JEIG6rBzdAYkSuw$H!zS~V$C1G zC+Kky%MOkF7fD9TJNb=jsW5`R$J>(CO_aR?P-k*FU`5^LNlAllhYh==Bug`yz z3XJ4Ae-Sy)`4^G%oZlelIe!&7&-tGs=Q;l~D)4icH&7bw?KG%||3)wZP5if!LnHqW z$)TD5esXB&{|Pzr(*J}C+|6^d?)MfK{W8nvpQZwb3shhp_a0tA&b^0MkaO?h)hck^ zYH#tiiQdl3U-lMX_LHpQ!HwSHvL7-i`T2p zUDH+PpT4I$|NCF?W89%Sci*Kt-@Zq6zOz_${{B(b`NyBC&Nm-Zov%NyI^TLhb+-I~ zvOC1qD!t*)@2F0h^Ph}eAU>+ci0NBIx|wtpDVcNac0H^D zc3N$VkF9H*FuJUjW@!Lyx7TT+(P#0_i9; zkur&~ZXZE;Lf2WO>qwWA29r9Yyq$7=J$5(q*Eb9#G2XsmFe#bToOrEIy9jb`TD5hzuXmF{E~837Kv;c>-9~!BFi`C8n5rt ziCMl<>L>MC7MJBK;8^;4b|Q8X$GX2S;@IqO_`Wa4W`A>hLicrow{!c~m2XSD!}sP; zr*HT5E|V@hu8!yt^9^3NufB)=-8muNw>8P*o6Njk21)w)G4$yolJLaMoH>?q##joE0 zm(!T)^u7MQ;I-DRoUc%I+f?Z_<~x$%9jp8#>YE?u?VLawOBzV}AQmhW>-Yp%7hQ1~ zDVfv(PUO7Caz58J)&=7OsH6U`aeo!(v*5`K`+7T*Nbj9T8G}^2i)TgWygz{d%xPxv zKZAP%gT0;Nvy^hA%yA?sL)J2kr5rhLKlgS1kL0}Lyre6|{-}PoY>n~keQR1TK?b1P z=b+=`u|4Ks_lPYd_Q)K3J#s8INs$Npg=4qa9E|<_>?It>-0l1mOLyPSGfD94NaS4s z@~#aV5Fd3vW1`nrdP|lsJKyV@@V~Qs7lDz~Na*zKEMI?)o%eZ*2cYvC+{*VdI&s%b zr*BI#x&xYTNs90ZHg>0rE$9u@qfh?)FHt^&cD(VuXkQUJpaP$O_ysaZ5}zY@tm~sH znX}*3g{{?weYM#U>6e%YKVz|Z*veCQ?^vrIPOA=ogcq2+_HA<`r>*xlE{^d1k$1~` z8y83UEKKH z1rJl^@V}nx%#q45oqkm9LuDGpF`d|l8Tc7|iFQLSSjCN*wM{r^bCAq zcn9jLEp_GOIU(Hsb^rSX*1~P@iHmeG1s&u%HlKwvtc1K;+6-|wc_X>sYfJMY+R^~G z&B&KE&}lPCh1m(T8E|USIse{?XQ%HS%zvq*t0U>KNk=VZsyPknuQO;jyKKws?&~u{ zNdGSCx&!{j1P-NRAnj6qprzDtfc%f94hB)TpGGIIUW-hlY<8-jjw$G+Yp1C*y}#J9 zy6fH~=@9p*?6>F4C{li20wbeadDoK${hm)5>3^r}7}wvo$v^j$L|fgnWrrg#{lV>X z!QA4|15#rOS931js4_`-6+Qq4Pa==9izM?dh+(J``H>EZ%^Mz^5FW+WF}Cin>mjftt3>@atU7 z|IN8Ci~C#f+%@p4mJYuXuj&M^>KMVR#HR+ptsK77KE`}sPUYO+6g~xKnCo%{`F-U7 znFzP48$@gpoT_SFyb+hy{M%S~RW9wKI-dCbMRDfn%~nWz?1@_)kM3u_i(7fTsur9e zUPU|mEZ8T^t7`jU^QLY*UZuXPwyn4nd_P-U3fWRz3fWv-3fa^ST`if;oG;0EaVy#^ ze5WkN!p0f1TCw`iw6Qi;M#t8zZUa^VixgiU#!A+W33+3_j_*#ek;B1i+hcPOcXBz{ zNS(VJ?0%iQ9PB4LcRAR7I(IqPy*hU}*pGGYmg@-4m8L@aGjTAb>yMOM?k%?Im`DsJ z9H=AxX)PS6V-qACs3RPz7CzGv4pj@6>IjFbg_AuIZHLBy2OjR^H??D}&_k3#?NlUX zQack#8Pz_Gq|9pJP<^>SAIvmx9rq6vm%;YPy>*k3+*>yt$-Uxj-MF{i({|u`#==fF z=bL)CPiO9_pNQn1`e|0k$32;)cA&{pe;|eUgi|A|(BT7i;9!|Qu$Qr}=D9)}!?oPeB-EI>|0&O(kxc7`*F(=6lnOyo-BWMmO?ByxQ- z<8Y~Bem~9cEy(G}ZODnp9mo;LR&b`J$SQt6i#(1jL{=lGA;p=tz?s@H?!lY3Fy8Fz z3TFa$fi)Y-d%~OggWm_ki|&FK4TmS)XEkgeMV~wgE(K;h_$YS$$E}94C#;68&%m1& z!<+s9zj^^~wGnRh7X0g7#?M1=sbPE*E|mz@3^hju1NumJH)R@hI7v)F&Q!+%nrD@OE{dFei&99^WaqIWd7P zdtw8Jm&FCtzJ#09@BT8)y5%wTCr#*2Zt$B)q)%xH+~zk!Iai(DMIR|n;nizA=T5z% z>m}sD(cv_^;YH4!I_3Nr=T3cczO8F`T;Cc4??{O@TXL>kk7eC1UytRtrkFi$J&Hex zH%T8n-Xy&cZ#n>Pl77676o-edp4Bv-(oBH z9{lOUC&K?_kuE=6sVzqa$9_1|yBswf*~GDw(%v;rG~qZ%`}i|QEr>2Gf$KDeS_o8Df<11_<9C%$mi36L;b*f z_>O7E*| z6>mPK9F^Zx_|zEql)mTjsYx!M%0NG_gHP?a1fME~Pfg_e;zh#i!u8@)+uHezu)J{f z<@glwi}?PjGY)ZvE*zWSN9u!B-r`j4I5xqnmb<*FZ6j_~PJbplik>Uw9w*;(q5>*= z=~Wcp5NDYap_BUUaVhcdqj6T=-Q4GKDX%TzzjoSEf3+Ffh1$#jwHdfnIdv1srNnR2 zc}JS;UEsl8;!o)L5bAqpiWS;PeG^+lJ*R$)LoFb`;zgaP-$)MCMepfp8TA*s{wZ~( z?^wPtj~YaN=es;f<*t57JV|*HSDN8+r9q@M7p_!`@BTaa5_#xInv;2-_)-EmyfIhW z$u~W&R0}@}lJ2iaTbLt_gzMZ7=lKcT=RP>l2sqNc#2)-OP}gZV@dsh9bZ+vW_|L~Z zQQRBNGcoX^SR)Sg*GenY8=QTtX*~A5upcUUkTv|KMtn*7tayM6T&XSRp8qMEbFlqf z7t< zKh{DrTl@+MC&-d)j{ZreGv`Ax9^Q@2rVc*q$Nblvu+Qnwyng~?Vl{kb0{2v}LM9{M zMDlF)CM5Z*E=Tgt>b*$5QT-8;Z&t%&=IHsykb3@Uq@G`a)bksVdj1`xp5JPPX0)^$ z=IGgjRw!4`jzsF&sYpEw-}ynIKhSnjejr!(?Y2VI9c@!P!V1+UfyX8z(~z^R(BE3; z2Tn8A)QYQ=TA^!3*d~d0h_@v1{aW}@65kj9YEqsb*xSNxNaCHf>#Wd$Yi+ZygWr@l z0e6hHLgmen(~#F9*P!3YwkeJ0fAOvHtgS=7>!%}mzJ93{>Vi!r$L}|to8_Co4kgVk z@SBfC^88<)w?cniWQX2E-~Rd-G6p#kiB0P8eNou2u)VKBzri*R*J+2G%I{p{2;{z4 zvvm&f@Ne3oie>SpdIi_|@?RmcKXNj1Fmf33t|U`xC7bF^oR8t!2IM&8Y~&>5L}YH7 zDY4T{bvfq+TzdyO3po?{401Gb?Nz3FuN~U4tf{Hq%I}q2pN5nlYdrETaw_C zF8|N#S-C#h6pss+MH%&0Y{dt6#$*_;Tq>VvYRaCtiZLkDM3zz6xvNcOadXqSteoEjjamwV{^M)hY)iSM~HCG5Ad z5;MC0tgUUT(W?o~{YLt_P`}?qmy>NrFC88K$O`44caH9&-#-9%9=nSBlDOv`>NpL# z*9tWqW}BzVyce&PYHp5Nj6WTi=%ITBKa?lsG zp&Z_aPHfv3W8hn6=X!L6wpK13QCsWF|7yF^5w-C{iKe7D$?RT?eyGh-zG~x+UPu?F z@Ou;TQKa;yRk{)9kon3Gt&DrdLe2=y&t}`Yoq5FhRB}IidmlWO5yQJv! z9VJCpd`Z#f)RLkt!%B+U-&s;5pU_U*XNnV|%ogm`WnE*i!Ny`si8I2Cvcn4_YV6UN zlM`j-Y5aKrjDNhh{6E54-!DEwYjNDobL!9Mafs*CM}__7JAL3temj`LbXMQfcf=ubIpTTN zOM|ZKn%=GZ_cYeMjnQULgzn|S+je(NFneex+bq_~@^o;N6~gb8rx@!N-1G4y%WTeq z^F0w|DxV0K)nojh!MC?jZ}NL3-C&v6M5`eq)e6NCuVVRp!JqtBeBK%G>6XEkDQ$({ zEkiyJ(vazvg|G%?A>C71EPwa>@a?3z269vFO}bshV#wU*T{!8 z6TGJWCXYM|533x!a*@xf7`>ihW#rX#VNPq4ggkI`rZ7Zru*;nFbC+~{b(Z6&>MXng z9ubEc;NoF%s6pTt`KA;bJP`aMpTm6FZ{b#*;1o%&pK2)2sJxR%t4%*6KTd1DmBiTC z@)5teF4Z>e9`u{>ciQIBAN$Sk|J62sz<*VifPd;f{8QKz_l}A-Y8xNPKQ*(T**7NE z9GMVj=8=#6p%v`iud`t3QB)w;wrm6UA z6G;Pd5B|`~Cw!*N4%cVJUHE7-3wrHM2!C%Y{;1=Wo$`?+e-!&8eCoV6tuEmDq>|uJ zH{z3;0*87M4pnw34i#f{_qfwT=w2a*XJc^6hjxBB4h87W91U0O!1q#u^s9-k&%{mp zj&|X+kA_33ewX9BFVBfFYA+Ubp!~dF;9IEJO~>czJ|~W(`yE^h4i&Cl9-q)%VL)G!V2WjJBzf*GIJlF3uopSkS{7!n`Qszy_cP4+6d{Dvz zpOXi1DsiTUY3!9cv@cw#51c6(dI;Z>{1i>$ zQR2=%FP5*p&m3FCSk{mH&5kqT%oXa(z&<5lpAumoZwy$Zeh+?*zH7X8_!apeX7aN; z?*x}|P5deeA5ksm4!>gjiR4#5RG)PTe&ui~mtS?)Jubg;c$dqsI_Wz6igmqKi-(D; zi4%3CEy(vXtfTe%LCU?Faim7+6L;$3a;{GDb@98s%emURTq|3?F0M0nIvlJi_)ovV zNaAREe<$$ei9z)JRq+cyPY?T-j>Yo?n5~^XrhLMR=`0WhgAxpEL^3_1E*G!CUQZQ!^F(*4!Vs zE3M%3zBE6O$bU7ftkBkfvCVeyUClHrv>kj`v(yTG zFeE>)4=%PHOk8u2xn76!10xutzR0)D@$3DjX1o=uxr+a0BDtqeqHX-x#OualQ)xo1 z@N{G|`^z2n8Vxhjba)eleOpIVZj%TGy=j3T!ZNm$-_P z&np&y^J2~m-ScIfXAd#X^#aaExYx(TRix-%JFX%_GAFJgO45p}Xd+n^TT#CTzl6?< zIbY$PFN@Xp8nzX1?P>Sg7|y4;=XPvGjGoDft?)@&u@&i(RWTLy8@%^%zRr7JOhv5o zz8HPCVcQt~Tk5@w^YQL^4(DUT=i;CB!{B>X#@YH!$O7b>`rYJg{Y=TGXX~fyI&z|Z zXP&L!s^7?W^t;*F`bGNP{A~Sf{mwdDU#{QCQvGgmw*FbkR%bUCG&%Rab=A2K3a&a= zTF~s=R_p3>dkd~Ux8G`hZc9Onb9=07&h0C>=G+0R<++0eEzfP;)6?u<*2}^6+g9Fg z$`^=@R@fX^rPpRMBxgsAPY=wqrgUciI;}(8<`jEi6*Y3B_rfZvCW7O{J zIak|P8~!!-Y^jQ`c$z+OcX52hQTCTRnpn{5d<%Y8#Z`Ppz3wiKt2ipT4C(&AyDGM# z%K0z0;uGgT=Xb;If>`IC^G)=8OvR`Cx2rg&0?Z!TwJfIMJ?ei~K}65dx{ z+|_K~*v-_iR)OlMvg@tJsY~jp^4!fPvVJP5_r`TpNgZFRo+_!^-_c*vuT`8FWQRV_ z6n#^Hvxg}uuTb4l9ywt__S@mopGxV2L}@*vm5BhiI#@yEeo z{^HnQ^BuCTxnIw4RBfM`q ze$~=V=h-tPiS^~7(sAGIn`s&32%18AqdMn?R zp4YqNr_!_Hm%`M#9(gT~-`~PJXQ>zXX34CmUF}>X~5F7PiATOW>O&viXc+ zEQj+~gF%l)V59wT&s4uKf&BA>DpJXpYPU;G;OE8 zs~gUD>lv`j4}Rne{XIe$fv5AX9&Zh^}oQzGIT ze|8x^+!*|D;xYp{#BnBaNdLDJhafK3mU2qE0*};sy7Ku{qFZ2`;VH{)bEmOQCA<+# zF9kqT*67?tE*pa+?F^lI*}Tj9j;NadmCW-vJG zUwFD))5QvTx?IzVbJyoy)1GtceiD2u0MBwbV_UAlv(n&=li`i?d0&0D6-rUu;?419 z5`Ot9*bjnSXJ4VvPWp>xaP}-X<$UV2zSJ_#d-9@8EM--7TR1K;0lUZzH!CiYHN@bE z_3iIq{fOAY9?YGs@4~wClAY{OPs#S;jmWlk=ypkPSbNEqb|_D>DSCwzKO^38_;JOx z_O?Uo&z%p?-^jAC$zglt+lrsuZp~fN*`1r#!8f1t@Z7ZPIsXuSH`ok*LLX-ogG#>c z*Ev30osZHv+*{`(bq@E|`A>BY_ttqoo#W5c`3Rl6eoee-1F9%B|Ur0+}yJ>WpyWGxJ{f+)i7dhPT9dx>BYVNcXV;D0%A_8hFT4O1GU7FoJcF~)wP!$Y}d>mJc#|^5cY!)u^$|+j|v|7B0BgnhcXu(?egOU| zdBO@+%nAFN_7)NUhp*`XzNQ_^Vp*%3{P_xcZ7og>?tX^&vnL9A?b*@X5R2HbZyVzv zeyHvEq3W5pT895=8TCGxa$0bkZKlNAMrE)U%&L5!?Q0t^&tEmlXHKjgye_f0S56DY z5%l|zo7Y3Km8_38`@s%}s-l92zz#c!v)qrJ`Ve_PB+O8pY)ZhN%FF&qTptr3WFc|m zP>O*kJhXg7bM z-PFX}A+57}KgTZGhx|LmXVbfXpEO?S+cj{P9<0A~ka`s6sQNJelJET1J6c(xf2EAR z)i$`U@z&{+6mxa({BXvrOqE9k$L9&yTaqYuV!g&Pmb?#V`ZC3CIG+~2CY-FgP#p-j zYMlthiF@^<>$)b~FI~l6n{2XTjL$2-8k z7uqs)@vP%-Nm<>Jdk%x?D)6`LkMOsQcl|9pD8GYX*BOsk=C_Gf!|zh9&~NC2|L}k> zI1+!$3Gm6j3aUmR`XsH1LSHSs%OK_S+rqf3!3Xk} z%ya!EibV@DR%)zUz&Z}XyPD(kG|$L)LJarmnS7u9h@ABv65xj&I0kYAIXtX-%AJD~ zWUO*x$mqW-=>Jo}AFWxN%9>H!GbR<>1!njmF;;D2i+klW59BA{ynL>&Y-R@&*u$X% z$3TuChu7}5%pxvsD3KX;j>yFt{>s> zP2l$jxMw>)AJvtI#b4$5T%L>ceJEyZU&Q>H32Cv;yfoRhA7s6)Z|2t|U*zX-@M)Xy z8f_C*=I)%D0Qs4&xHifwNO`63y@b^0!X4<;mwaDXRlonvx%?ar(}sgJhk-fo0ejvJ z2E7X($53L&hJa1)1fvcPoVtzmDEoxhuyOK>f37k{{K8uJ1n*Hf{R(-$RJb;kXVQ2! z9bB6Mu5H3Quj1WJ`9>z+Y6h;o+Wd9A>l?Yv3YGT=V_L<@X&o^qMht8_*TreVwR2sZ zCR{t$#c9H|b6uQP)0T5^S|Rvsz87x>o|Ru@Joq+AxVBTGd5Cse3!a^be#<|m_NutF zB=Cx}E^sHGnae$jQ9IT&!5r;vv6i-#skpo{#;-EQud+ygNz?wOY7qXC-r%HR*v`j; zS*aJ<(uIXj{tNSRxF#-MJq#Nb?YWwLHkSXSx;^8PWLsfn zBx9&z&#J-3^0ichjc-9_gF9E#zq~^lYkB7Y(k>n(KgR%97ZqrgV#>;2Si}c_j{d|&F=lR=0Di@OSUQJ!67$1zRJJdR zS$Lz5J__u+c~)|;>;)^dr-=L45<3OH)m#f<*!?@eyI|OZ`>ckYhrmrotWYn?Itg32 zJ<>LVDF?l;XH2xo5|O-Egm_7cf0wPZLxYRb}U^VZH2u2E3b|7uY6q8 z&WoZ=1z5MNC@NS6Mk>B9!6=Vqltr1a?)+r48LYb{D>YD-3(n2VFr}GI0-8@!dG0Dx zGZOp8%BJ|Ln0L66H1%y3_+aqW*tD9Pnz39f#@93&xy1_Y8{Q&tVAM4M<(c^<%tva) z{LO2zb+*PX9>01|Ot#VfYw`{BKYo6rbZ9B-0$;Fy&iros=dnKc&Ui-e+8;Yo0wN8voQ z4V6hHWwMPj*+%_ur;Vsgs==3@ZWMQp4+^&)qtEdAivs4*HvWF(`)j~CXV_y%-x1ER z{t|!Te(yf@zn_4K>d+zCX;&O(J#?_Xf3$6GA*O5ASleu-?M%Vu?%8n_k8=%2I)`jY zX&hZ>W6F0sj+q>LI3DGY{Z?32a~6KhQNi&G4$rRZy-V+q{nq>a&3hGxWMxV(Kj8Q! zhd0lAFY~+~IAq_|^~h^C^7~u;GyGy_M7uo&b}bnd zZoB2++Y9ZNx?&BXJoe^Vt+t^yahQACJz$x(iB`i8QmuyeO{|9N!Lhxu@AhtHg^u?l zc9&=VW6$|+%5ORKk_hho#od+}$1^|YnFqQ5p$^!0-FGf=Y`cl8_3RMt-Aw%4o&FY=E1t-+^S-?^zB}`<%|8C$&;P>82l;!6ls`_U3Lw4Hv_z}j`2XaO?bD7@NOb|%r&utziuIX8@BPv);phL z5B-gJA>|M%N$>?rGopg;Wkm=7hK}AuOp*;&8xE$Ofqas_XP2J zS*%~s=-r#~v+UzN%1a9P)}{@gbkdqy_Yj!>au`?RQi5!yjN!7Cs!V4jTY0~tJR-|` zF6CXze^;`VdiZw|SYr_7DtnOT6R3SR#=PLTis@hm*`Sq9*+uIp(^Sedm9(eDV&8VK zZ`f|+&G*(^Z(ZMsy;SqR*+b8_(^ zk?}uDcP_q&#`7qI<23JldPMwBBqolWXRiF!@*c&`dN$hyU}NE6;bGZrJzV@5=V4rY zFIe@*VAkPa*I{7Td%(qagNyG1+YSZe4gnXlwqt1+7dMXMn#_CDF9(q4OJl&ri9D0U zv&rD%6mW4W?@Z&}>3k!DZ?PxHw5#wPhV8ww^(Nm8 zqP-4v@tZTx0{qq&+@=`8IyV~Z>{Qep5myP#o zY)v(^)0z#I`2(0dl{O+ft7l{Fbiu}2xTMns8*AYb%}Y<9Z3J$T{Wq!ZA=;R1t{>9( z7#GVQ*13!2kLcXR@*nBk#quBP+_kxG)wyePJ*aaR+i&MwSacxR_c;E`n_Qdl4%Wtg zs5v&%mKk|gPJ%N>y$N&F6X8{LZH1{>M~w0OhmtLYt&w2a+a$r%w0sVE%BM$kGWMus?7GR|++^&!$=G$1vFnD{=lB-pC2g|nrl8Yw zuETGiZU=MI{lS4?3kTcYW;NK1tHfzF*o=`?y^)NK%*Sc48565+MrI>BSq+6e^EBAv z_cQ&0sywTqK=*Y)PV5t?YHu}6%Cm$2t6zTL(Xo93yPDgK$cyJm0Yq{`Ex47Ug<_Q${5xS?09QX^B;~Z z2220JYS{h)w&J&#=lw44V_w54_U+q!gf)cFvq)QIC+hg#(f+{Ty!^nxE`0*Od&eL6 zHThpc{=ITJ@^rX7R0hhsY-Eu(%JNqHj>1+z8C)(8nsY6i*`@N~%()((Y8vOq5@)ay zg8LigtJq@B_3*2Wm%rkIlwk3gf?oT{r}9`_kQOYSQqW87uM*quX!3kJ_SxRx)Ld|C zJM!FHK7IPO$h_;?4CX-#k9l@PVW>5Ce^&cqf z9A0bc*#zsB0q8+aJT{Z2{@@+7#mMi?=J#CMW@MTF`_OONW3g;38NT5AS18x5w7Kn< zDc27M#|0zHv?l%=+pyY;Ft%Nl-u-gv`>yZTkapRG%X{iwQ44Ks!rr^QXRjy!>fg_} za~$_4&<5V}1++fX;Z}Oiw=kr+#Tpm!f&Zm%RAAqj@ciI@%sb8mV}{#-MV&{P58Zji zHT#KI=iTw=&tLd|e+KP;epFz)``+QqJqFl>|1YqD#TIQOi#Fo^|3|Q4WZCz|)~t98 zuifjn?95(!P&-gOhWFcR8?qliP8;*|q&M@56^|i(>5Z+{&pSGBk90?PM$hYA?K$*p z4-WY!bUpG~H-7(&>l%|jf!{od9rq|}Z$=RZFFS_Dj{evcgX`xl(e;$e*m2i@7rYpF z;n_>saW%d@#ki)qzZYz|U?s)8MB>|tVB1Jru7__IxVGFK^x0aw(yO)jVPklY42_8ciQip30N#xQdCF9aos~au~N0CG1U%~ZrM6#aIW+#{zkrs1Y{yOLbilb`j#DFd+!;?mQT4AyX7K1(KE6oZgX|X z!BWzv6!eJevN^vPC!2G)T^QO$C-!5I%~>3$gNuvAak8;FuK^P%{#`a_#lOquEL$_N zsi(*A?K;XSnQx|pe|PnCF|!{V7l*#rIPP8c)D-Ng+UMeO_Gi6w3^5AMy_D-b%EVGW zu`|m*F1+iMiHmt<_kA*Qt}xh4SV?x@4k@vPgUDM4u!!>P#kVJu2kBWk<(kkYZ2KLk zyt}qv)%R%9+L^T0lGetyUyFKan060X^KLNbU0~0lV9+7p+B?CugTbbQz^DVkwFAJl z;r)ls&y-C!h3l#Op9XeK2fJqQToazZiuW|-eVJg_X68!|yMi}A!LD1_GmKx&e}?V4 zM*HoMFU2hw_N{ZyvF)-UO86YxE(^{sTz$2*aLqOBXVu!h-rfogVJ%+|2g{~A#kJ|y zVxM(vx?p1V!*KLnCBYB3NrH!OMk-ws71IDtR@yv!ZwJctOjIKC zIK%!}`DI_=_?TlF$5j4njD7E9p9c^7*0i?GDQvFwH|CpKY_7UK4HFu2$fo+bWaurM@sUBx|&n3x|JIlfQe2Cj8yEbn>7 zALzX|KL93dc&fJ@{A1hvz}%sI0>$9s3UF}+bGFL(Z%Voy9G8rZIJr;Ym+V*mcx!)f z2RLMBSLV0$1pBfk&YqN%pfGN`)~^mZ0M31*_3K0at@VZ>ceH+O$e~v0!Lz#u2EOdm zKk#YxfWX4m8;AUz`uJj)-N4vwc6Vbv$j(uwjI}2>=MWcw|N8^5@m~7%U0~xqVB`I_ zf_r;nFYb@sXE6Og*!VEm_@n!@79iIqfCVOnF>)msx#Dr~@e{0t^b9fOVC3z8V0?Lj z>+7+}ZlwQw3%va}T4S$ml%LA)$d7RHuyOtztlTJ%9$r>nl}`^Z zCkijK{_8y&BfHzds@wg+-tHLIxEyr-0ellN^p#ozMR-PYD4TQa=mys6=?h9nb>CPx z+;`<$`0>$1>y~@yw>_Orq@6eZ?)+9OhxDbrM%Kg5Rxb~^o@*VwHtg1k#$)*?w|_^O zZ=vnZx_0f5%a!~4wCk6z4Gt;0M4J)@_H z_W^iS{Z}^^FK@-}d4RG1UbKiUJeP{dSfT=!x$W?Ke{HY?X@Shqh@^9`|Y)@Zd?~u_Vlf{yAGc)Gqe_Jv$~?!e!XPv`rrMx?8U;@jqSyX zlL#>0YQ4%j#)wEvt@wyYn{g1FBLDeG##7-WVQL@qP&0_%)o%d93Zg%B+scoN*| zJ)?GUhMp|6-Qc{`&E@RdK^LbAQ>PK@E6h5O zwva=8d$w!6L$++$V{0jQZ1+wKM?LwK{rVxK?AKAGJ<^t}d)++#pK)R&u=x^OyU@YI;M%9iFB3r)rk=}Rq_~FWG4^kbFmN({Na1bp_^FB3 zo+`>W9S(7q=8X!IV`Ekv*>?I>#ojLUP5--MXgi{}OMUj=ulLPkuCcXRZANz8os5h1 z&i<`YtNX!GYQTk`v#)p!c<~EK%KR%yIQtN<)>@oZ%X+6XDnx{1{e2cVZO^<#) zD=n+=ZPMI>^A>3>;5jjMzj*C;OBTQO?2^5&{b9+8_0KJ-Ax-K(lkroXVLbW@evw0Z z-}Z@fulzogf}t&2H?Ur$)Vuu%&Sz(>vymwR4ux%5w! z)PFMb*U#cFoJ9T4<=YRr-#_Vodv`n5Z3OFy<9gp!AG6#2t~k8DTiG+bwzBedhB-rP zzc{~9?_bS#R3C%!4V#W7MGJmdQuIh%Nzo_wl@x79FSOQJ8SCwrFsDNLUjO&_?uInX z;|Or547TGxR1CiK{%P7~{ps_=7xUjQIKJZl^VkH{r!iBbGvxSu&jd}4djCcTPB zs{VK1#X4*E`HY@Xf8*rK9Ru5+I^Qjtd-a{IpN7{U*BV@JdF6PV)GPxFFrDw{;-bzW?;j`YbX;#wnyWG{3YLaK6_+b zS-|as22qER>wkGZwa7J;mF|gd|1*#LYtBIehvu`-S3ksgqU;?J++KO!M*ky!p8CuF z=!qZt?*7k^b~2{peTjpnv%h{ZL;+f8(y3tbQko>(Trl z!+o*bA4k6vAMSUe8}&K#6&Ly(`jz?U@iXXen%n2dhm7s%x7*e4&}R)Lj~VE^FqZlk zt?jM8RA$^(|*o>03yT`j)f!ZPdQLSigA5)YpE$WEFkNP4p?*^rz+Y zC0|iDvQ=D1+MfNHZJv9`Z>FIaW6*^seiiORj?S?IN9jX4aJ}tsS?@2+4jp8zkzMV4 z<`DRy82ljJE~QWTaEjBXG;Ec^17X0W;LFFK=Xja+AZ0qZYnSpTge56_xZU z0osPf6s^~OCZbPy*iFw-bX94&&P~Uyq+>hlS>P+bu->4T4)rnKdb;Xkl#a?V;q?ZU z-v#t9`j%qQmeIG&rC)h~xr<-&4SYfk-G55|5);<-U95+rb>UCg)1>G6zP5*BCs;eZn}!v!VhjZ}oY1)7N;7wf z!GW&L@KSNAp`a4pd$@EW-&5%Cz z{hW)}VBbmE%KlPLKltr+1^dw_+(F;)Bl?KG^c8*RL-JWK(CtIMCw57``tsO?wZ5Ew z1lz2$MhSAy1N7@&zv5sS7sEJx3w@6;Od>ij{Bk$_%zp>NuvdbEVOG(Xv`>pK+(0=^ zo!_Zn0`L4m61?-AB>mMAr23pM!OxG< z*X(`mxg{rFTXF%zlw*^>9EN$y#V|iZFN9%)BaS^|2Tpq!1}q^AqxDVI{tw?8#xUw{ zE?^jZ0N)401YHc10)}~rx)+X7EU;N^n;?D5Q~7@L08(@Bv<}a9_Hj^LQW@XWx-~nQ zw<+vW0(Q|lJRWwbcd?7!-x#}0iNG$}8zK_BbpHIx*kvK#==9m8v5U&ycF`CvoTA?+BJj%=7r*?w`jQCzG6DQ@FVCp1=g_CDW>1mt z!Y}GuBJm5@)`{f)T4*4DO^Qga)R>{sq`!KCF)ncB#vYQeTmM$;#?SKn2T{9Ro?-|c?|hD z80SC0I4aX_uPt8k`TA$UEq_>2K|dngQV4E&zsk4J!!GS;gW^9={SB!+czD&Dj#x@F->tkIh!Ab1PfDG-OvntFwo! zYgtp`+H>z&tCUYj!~a7_lLHbq4c?F z6Sq*hl%DP9ZZ(zXdYD~qdMeM|W@3qpC?PF>;aLZ-Sk~(=aZbJ~0xotrc?s+yOwky- z2optOmoeXgT?+Y5;T5op@*s?K9n!-t%1>kL5{X;HuY_Al3Zeqyp5cCnSS9+IZ~C32 z)vvnvrPReQ50Vy*_qi^9Q9onoXRd@_q^JKc=440qE1Hk3ekF1~w$}Jkzhb$3XfXBd z^&^pUu|3Sv-^DD+-=iN9W^rQpE`d{0Da-J@>+oD4$NoaUA$;S_yKb0vWxVo_@$Z6T zeBhZVa1F7%=v5v23VU`H@f&eZ&z_PFw*iklGlsreb`@-Yzm-pU7xysN%b6Q?6MEeb z3?i(x*X23yTZ@+LWlRyz(Ya?oS&(WCm+njNf033{_{+4k!pG8@6mFo;_&lX);ahy) z;V&uJM(96e8wsVfDEt%s$NilDBjp;We^~U|q9q&Y8}`#ToB?B~e^`S}@+@;gH69BO zJx96pz@{-Zss%QU7RLbpNra;$Htq`ey#whu-`% z_qpQv;pYy6RWu)4Y3+O_Z1d>Ep5qJnzTy$QGsx88g`?8bzcwXs{EnIQpUaYXj_odvQ$#w7AUN>zUv5_)AF5s=n=d6{(w-#}KNd$k8 zonip|p+9w>t9duX8FZrTJbQ)gh!^Q!qcL9XV+h}uo!ql!D_xN`36;NWq2<&?kbHTu z1|`flP=9eFINlrIWuFMoZ;Nr_4)mP*-uLaW4h1;3z|rDJ#NR&S#NQH&AiXScb+XXW z$t&9>93BBqkzKSC>A%!IPIr=Sjp>T56~;(*`9>7al%b;<=QSQ^eE(u-AM`IDJ@msT zwm56)oiFuai}3L~itEw*AA^3zVvC4#baV}4Y-1f2Z;;JXI_l}B=5IcXetwGmLHa2> zr}R^0zsuFlUDi`e-gj*f@B4o9jcyVjC!411nO_i#@nuSC;a4e)>F8!WJmLG=Fvq_( zd&wf~4bsm8?%2Hs?60x=4Em{YP5LPeorZp%u+p$MrlF&0g$Fn^w#we9@l`g*8_;Ww zt$9nR|6#C$_w0q|yfOLg4;h4UvXBka z?m#zxgpT$_SNou|`Pc;f1|4?nikBK=FQvbY#@O{^CUr4&g$ulRL5;tIzY34P zgTKW7LwnVpj*;nWoE~p1EoYka27N!QdL zN-sUz|96ek8t*jTio?op897dCTn_szztLfh%NFzhm47Sa;nu`&HAt7Y#b8%;b$Gd} z!xO(JPL{7Ak@snQz1+B*N;;*(^0P{hy>U5mf5l6U$LA)$gI-^}Pl&K`a?}6bINW#) z-s+CQ?_0B%Y^DDWm~*#@{T)-z>)8EC(NZW3a|wVjIHTmh$_0f^9-$ z{6^#PL+J4n;Lb6>u>)gi-@itWpS;Q+c)ztB{Ahw@J{;pSM;iGF-b0r?`@F`{_ggWq zn6~V_L(d;UhrH)28$I9HzVG-Dr^K6GyhG!$HwN!u4DLw!G`IAxS$6Qzh;%A$fHgQS z_%Adbdp7lAY;n3@_eiI8?v2S_8aS|k00dd9oAak@3;o;dxDPS56CI=wl8^6v#bvIBhOI_VfXRR`wqeH{B zf6#8mWc4$j;+Im)?hWXuuIZhXT-TmLHKY^$?XEF-JG$3ck1rmJuWUE-Z1mn({Lsdw zORh8)vlsbAWAV4wrhd=C)0bV7x=(z?@x7L_&(G25SaYmB*t`#GS+Jhi1aNb8Tk_+J zE_@KKThlh$bl^N8#P#-3W&-iH)!5Sbzq}e7y!JY+j<-VUgI=P3tFeLS!;iE+r(%<} zp9E`oe0I{t9-JI!Mx{hMYZ=H^T7AL_X*~ny{21rN5jf|SoMT&e&OhQD`}$zrkB#g; zq~cLqSOp(U%)xJT&Wj_L!5MMwtQZHMNE{@?T73Em_`Y)2|Qt zwQUBb@_RSyJMdmV*Y2KSo1gCYnNib4 z$~Q$TEwhHUwQf|bd8KchS#ejqd2v>vS++6BygZ7u?q;2W$J5L^>TR8daD&uJvrYV&pTsz_hh8LJq@XE7b5lTXOa5$3Z%Zh zj%VMY?XDPUoag&+UC$3f>iLn#=dgu7W`)L3{wJxEHqi^S%Pe-sm$hCH;ZmJb}eXecRzvnj_f~;-b&kC*4^@&#KkJS0^-}al?x_%q6 z?z%o2-&Cb-QrG#7wJ6`TB%faovqE1FwL{En34J{i$(aB3cqIMF*L{%G``3$*Imp>a z>hbHz$Xw(ghtyOPZ%4sc-GeTC_5e_L5|@XV060&bbkS1!KKw|q#fJ$+3Nt|>1|PTJ;KH!rPM zF?XB1tg$S!KF4QP5*zhuN~~EmJkI>-arO(KT%Goim0*_NnrJrMmt@{)NBi1MeT*Uf zv;(J|sXo=tHX-+d`kAC3NzSw)=O`V`(diam4M+o4y-#h8ukm-EgP`hwx{W-on~@;3-PG%zE& zFfqE|o=FLb&nr*ZzVoglUrD;wF6sG>tU=a^H42tRCv-pQq${;fHn9NZIfQoh4E;eI zhuVs; zT!L@N1OA3NXVyH9gy9_4y{v!~B^->T)FT>g8ILv>!r;nn#|aJ~FY@E>_l zT?rdLfZlp@^}_Ydel^su>N+K|j+G{!Pl&+qiZRf4H&Mqk`0fo4K6G|1=~>HI>ZM1# zTzT(89i67Ef_6B)vajiT11W!%d4Uz)E2<;qT#Qev&MD_)=e>i-gVM6Lvt_*N^W1z6 zaP#*P{5_v~bn>S4E67_3WtYuxdpddX^X}D_ z74p8dnmi18e8O4!=a6SVKi}ud`_fzy`_nh{216eM`%kLrY`nZpWPoLzoi}e zv1D^Qbgv}i(g+vxx1zp|Q&)A@G51*gHRU*qb~^0}SbrCo{`Dx|!UG&FtR<~W!S+X))9`D?`w8?{KWFY)?FlzWS+Bh*u`tS{c6 zF{BJkxPvjYp8mC>iLHIV8<)P3wX5xhQm*Z-wawq)r6s@ho1Is~A9k~! z`ZcVt%YSFzXPkJ7`~RG8zG!VXcpPBWU9o1h6=yaOx2HB$G$+xltxhtpjtY;1;sMLG zPbA|)(KA<>S~8k4&;2q605u z4`0>aYKYC6()y)Il8>~0=^4p?Z~f9n$*HYhIwbjM>zB@14PF_o#-1<-`@FN4Xbkds zFg$&zgYCgc4z|xgfN6vft@xR_Pp7+eeUKe65E%?AW>V6yFsqT$z zfgFjv7WtSJS_O`L_Z2w9i|kJ_)Cv`~=K5sp2QRWG&uYIZevSP~`><9V*Xzd;`|^ox z>VM-mr)$|`>Na>S*Z(mB`K%rK6T1J8*~l0qxcE;A$U#W@x_>;6%t00++aaeSbCJW4 ze*njAj58-!u|Dd?cyn?Q{)fKwkJFL;k)x4k!KKUK4IAvxTjC8XXhY*HXTMVi&rRd^ zoK*AfN_fL2JG553VI9|IanDj@N)z*H=c~->752vmM$h9#I+YGyK>DXS#jHztLx$>N3ha2%q>leMa3)wmC_=I^p&eAO0WP?4faE)!`P5V_Te{cOw{avfoe{%D8 zfi&NKe;X2-pRQ;YtI`w$T`lT-2`VCUQ z>xhr=zLz9U(+=B&%Q4o%G5!Xg(YN+A`c|p?t)ocsi?vz4ZuF z+RZn@1NMNwOVRlowcjO}VJ~{W1-OySwCwn8{Dg{qK!}bk576- zXI&rHdU;Q)LEp({U&S)w=d>?l*B|*!4&Q1y&Ni(k_)Ryy^_$l0FZ45=Cl&tgemML+ ztXax@gqMf2w;#M?-MA>YN3>bbT!anS5nr7UXWqh&xcxi$M*B(y)#+c=vxVKEk^lK2m9g+I$-3BNMR^HRdB1wUtZokpS(Zo_8%3h9{kBE1jvQ z1*Gvw%FJsI!5FtK;PcS^*}jyzdGO56=(O6RDcNET``Vm)&ERQvO%e&{YoJIK;XHouL|6^SQXHmYzv-9CBFE!#VLGmg4_h^@c z`56zreCm2Odw+_94Fore(^AwURAagITh<9qKO0_<0>tev0E~7>~xsnU-*u0NrboxrEvGNb#a37SIq{CO{U(?1 zsm^lwzU)A`q^J56>p$6kQ(@(sZNlD+?c0RC>sZJ78SrX!%hg{m1CKKHSL^yQ#`~Y; zn}=`gW9k_57kr#=3c;OqjP-x!ny{(qnZUGw6tjXcc6r|nvofcNGcFZbndY4_&CJ(~ z^~cXJrmyFnU5-TbMT=zs6Lb;=GD$b;#q$V@TmOjpOx5jpLnLlRsn* zTobN*S)T?taBWVuQMm+(S;%F4sGd$4P`=_NxnMT&kzDFR{2`Y*6{c~@j66GKMwxhJ zwoSZdG-V}TGYu(T^DI*MXdP0#rW7e&L)kcGOW8YROW9ZJ`94TJKN6|uA4BT-MMyor z2C3&aAyqeC9Z*MKe~yphVK7({*GEt%X~?O_tB_A4;f>;16RttNgQQ-BzlirTFI)!} z8wQrsy_1o;w-Bj&S0Huon^vgkc5GC?@*Bf_^79z(V;!cD;l7&LNbZxrr{sFu?7YKo z%DAt#H)}p~U+o|y_tlO^a$oICD>SGp{w~_uh>U!5{5C7pmpmQshooH{A8Un1^;Z|9-IH3 zMfOELhU||VfjkZ7#WyCbxLUkr9oNS3-y$S!wR$RY5^@+4zWl1kmkYR7%C%X@HOM*0 zr;+oKlabFLhawjvdm~4|Yt&Yk@p~_FC2|w82)PQm7Fmc~kDQ3yh#Z7`3wayzU1S}y z7+Hzjg4~VVhI|LP1GxgZ2RR$L4>=xr2-y#L1bGTsg*<^gj+A~?Bg>KX$TyLvkxP+Z zAZH@aBF7@nQT8skDZth`CM6-S;81+`U(*jse`e5!t6%Au&it^h*dl1Nvah~kS?F#I zzKsO)N`EV~x@!t!QmUhKC+UlF+R_*8MTe5@aR0;e>YFNZxV{zLkPb*kqywi|dvpxf zb?u-P){8nTM2v5^&6m@r)A;Y06;dCy1YP)}+BLedy1Q+b>wLEr+Ws$a37wZ(q5hrO zOLCOoEbA-W;WK|?%~^2-qx~aQPdR*NE3zuatgNS9(N6`}<3pni*T{#ay6DS22a)8@ zDNDHU@^_QWE2H>LJ9K!4%6=TbRfiLhY73K)s)H%W-N;9grN~j~rU%^O{Q~BIa7=|m zEavd|L_XKVC#JzCsyMaR0iUKCYHyVw4@4&k^L<#W9p?o{e`cd(geoGum{by~(cc zO9PK*T6wS0)`Z6uf8*itj@Wn~#J1lF+y2FN{TAP{>klMuV>GtJRP6fND_}Wowo3_i z`BXb}Js5c_cKDmY$5UxTYPai{7r&l;{$9zoLa%0FN4quNWV1$aAi|z6Y<&&({7>YA z|CUWZ`68Qsq&;7@d|_?jkVu=p)|gh_I)N2BHg#?K<7Crsv066$R+K?wn|>Ahi7H+2 zgyA>Dexr9m(y!|`kk5bCb}u`&-l6(btXq(B(eIOLZ@$ah=zBDeE}X?$5B|T0v2Y={ zwlNkC+veSRx&#(pK-*P4&2aOzmTw2U`kXv1@NDw`F&3XpQlb1~@<+&bp;&f(tE2E1 zDOjvva+&jL3;k+%3Al>aPZJ!iN!_(2hP_Hs6Om9^6B=Y7JePFa!G zPSe7+cW-{l`O<&V_U>R;%fYT*+|3%By3js;!mn*3@N4@B{K~%W4t`A!jHHK_KYP^YQVO|u@ z)wof`HH{m`ks7nBks2>FR%^VtueDjr_$;mzg&+}Vj@&@EpNXGQstC7@MF7@GX56Vy6!@POdWmlxQh)&SYd4N7*fwHLh6|{NIkO&Dc(Xo zPGmg%m9W4tu!GV!6{+++ja2$pA(g&&z`$S#VeEp3{bnxr3jfaKUd0;C<=&b?B=;(| z=ne4iTH4)e?i2o9&3(eZtGTasB9i-RXIr5sVz9^E>Nk(M_;;L(f5!>`j;9@ty#)Sc z-t!vp?;BIVC^KmXw7cr*$act)$j4*Nx^7@!`TUpi+vfW7$iB#F$o|L?$m&G1Y(3cb zO*@QzH*jqn|2>OT`+W>4j516z&B4^`Z^af|&hG-QZ$-{Ru0zg2E<(;nPDMV09Ex0w z?2U}XzX$oflIx|&BIFw6TIAEn^~lM{jmSaBw~)6X-$m9Ti;C%S>$-+Io6N`kA%7~ zPajP3Txvqz+Xv#i|CTGUt`NlqWL6YQ$~#ItFrz8pIqdMaG( z*zwR;2OCe&f7tTSTfJ8pxrK|79eqSk9DSr6wz)2hk#m#mupXr$Pq8n~6m01(Ms{#> z9qs5oS2xPZkFYfQdy0PVS77A-RQpGN=Fz^N)cJ1uF);Enoug+Hz>W`tk>8F1BWHv$ zvbg%UF!Jdbqw;yf!O7|03n#x=K>Pa~yd~~+igF%D`c%)9!*^k1#qQh$F1{2_9vgv? zKLmrQEZZ~|Bz{Np6nio#y{7`D_4n3;Xq z{}h3lmnFoRH^#*~<)B!Zo50DJ!p&nNF!O(G31epA9Hq}om(mg_4fo3(qF5sgj9kYt ziSq>-cH(R@22e0;@Yu0od3p-J&{MG8_KFNzJ89F8Kucj`;mQA7+d!1i|p3q&9hnS-v0^aZXvwMI~dEI z??u{Tb^RpQuY|o7A0zDDhPasC$>F)B!owZTe-jsTQC(jGd*1+FcW^9uc5Kk(`3CCq z#Rwbp+61>gu|X?cJ1=8@u6E1Zi<5Z{Jf2QD3y;6;;_=Jnb%C4LNL;QM8R7Dylv8hT z;Tjj0cZjzZUdhfJaIv}cEYilTw2M!8_GQhvaqP?AfzeOh_9koa`B;lDinXkxS@VE`jGlx&`B7}ksl>h{#~0i)@rmT;CmsLQ-XJ>IB(C6|zeP`b?Ww5f zLe1?RUp;N_P~;SJ^pUJ4&;N}+P5Tcvg@-L>Jt!vz2#hRSuW)(?#mKXto@~a+tbyE& zIzPkS_FsXyWqX%B`1R}Xzp+=e=7nlbN)Rl)lr=p4_$m%PH2!SyL#KOw7?obwhP7gQ zBy0bwxWfGO>A}OyC+*7`$M-j7PDxag!k=eP4^H8pkJz8&kwJEFDr+52YiWlzSXSt* z!7*l8PC>5?{jv43F6`TbQ_YJh3Be7|SfQ1yd;BMS_b<0f39g^OJ&ez<_D>6Lc)Xz3 zJA<>C4?{a*t?FXdq~6S$)Q6(t3uE|hT(-_L3N!lI!K;SqJiRb;gdMzkw9exSv&K(% z_FOweI$J$v2U}0q^(KW0?WYHmy4aj27pCOd!L;7~;4;>{eUUY9S7t^9UuNFS`o8~{ zy>kJNs=5~cnVCE$&lx0WPz(tVAtF>-1!=LF1VrSORB5Hw+Yqomz(ia7Z+$|7sK9_F zqOAq*<)Kt2FsM}v*m6zK;%yNt7pu4T`V|7mE1)1K34;88YtK28Gbb~7KtQl3-?zUx z+2@>n_Fj9v_g;I&1%E#(KKRPmgy0`;P7JQT#T|U@&ZOXFWpdeS92eq%x)waPKJmS_5VR z+wTBdfz@jkSp5-L$@6`|X#)m3h>hFH`O(JMuVCB*7C1D2Qx$m2=m#_a&e&B~1T1BLCg(J59w~kt@O~0FKN`OD4s;=BG|5>fa!z{) zkDZmd$fXhZYPi2?i^wMMSMv8QrvKl&%l$fRm*%iv+iqZ;y&%RbE)=-*46L9~e2 zsTX>Ko8v6gC-;m!MP5iGP`yAZLyVNfEwWqx(zzI$svQLo)jo#owalFFUkv zv4C?i4WFFIOT%v>dlcn+5~DN<{YLygl9yLw%`e(HgnPOVfd?1Bi}N`T^)Gi;yNcg$ z3vW&#KF8qA_r&1MWA;C|`7ZdfuD{QMrjw}PX>kMD`Wi$`?-gD58|U(1@HNEj4ycfJkC+YN3jZi_}NXqpvXnhD~}{sUtN#S zVio+bVUjQS1^<6>p|9asmhbptd@D*&^5`ONEc$F8A;uV4FPea|a#l z`4hjw??=JYUzB@4SN?@p$E5~W!P^~~_bNZbn%mQZe-!?XE#=63Q287d!YB40IU#3k z1fM-GKPY4U=t5s`SAkq7_z#Zo1rJ>>*Bw;Y~dlWULv`2D`%iEeV8?%#8% zFIe|YxlZx#|F$pqi8DXQT4(TCuOqlL(;0l;=L)_wJTCZhQGD>#aS6dy;}e5xZg&U& zcz058?Y+st*RxZCf!?XX<(H=ggIA>oU(h_k-+*&6Z$E&>W&fzm-8h}ke-m0?P2Z%i zf1vNx@!kP4CUAf3BIfQN%v%lfwvBn(&YZ=;^Dl(w$CHB~fw`#G_ZqAL-qo_#0eCL~ z-qrYU76I?)fcI;_yBgokBH%4;i%eLATwH=&d=r_l9+@C<-k*-i4;r#iXtz1p_NucZ zv8f&5L2;}Xhh)O|y@>-ZWIXZ>(Q&2!AL#uz>$t1vtI~1ltLl$DYw3^m`>*P<;z^Me z?DLWLz%%s?vtBFpX**AZ+cVnf!yJg*s30%<;ttxtXLY$Dd;5j|CL=Q@BReMRa-$TQ z9D~ddd>6iaS2HpLolD6Kv1jtp>x;;<(}8h2nYSYNzl*txWA2LJq48WNKyL}qn_-{5 zgwAj5$9Wc+qG+Wzv?6jv*)z}z_Kfg!v1jr*dtAQZ;#?eQKOTISwiBUUE3V2qz^TfY za$UX%ynT$%4DaUTOwHktoJm2ZiVR5Nd~VrKZ^#+5?XWb8oH6|Nf=gmMh~2Q#!Cp3# z?Xc3V!!EVacE~}NbVL3KEF|BgA%A2XA`fI7Vt<*(A@#B35c8_`&Tp{DA;U&6_Mw>N zP)jz#NaT^oC&NZK9r9?TNgj!fBU-RJp&J;{mv4|w7wH#eVSmi>}Sbe(?COr|u@| z{tupcPomfk>HY`!U-ZUBoHwzUym3p&8@IZP*6?xM|Z&frqcg%UgBcjQBUo_xq_ zZsy*dTEmM&6O}!&dQn{Ec`kV6Gt}cVeEDXczw@HN%f~{xXcf6)s^L*hZHW#hTf%Fy zB|O*?K5U7QUD4&2Q?Vt)en`ib@L@|xeX2io)s$fR>r=yeouL=D)s~Pm>0;RuSHkQ5 z0sO>{5nCcVA!JMBz^}}6@V{B?Eq=lm{3snB1?&@nv&4&D4&1G6vl z6W(P0?gxC=`n^@aN__*lWPHPD`!ZcFIoJo0k6ii>T`r}twrt3y6k@OTxgPxeTB(_la`-rej<7u>M&>NIZ^B+xd_n%Q@4obJOX%?}@ENu$hq(7tRPf2{W zI(Jm;2%+B<^dp!_Kgu5-EYD&mG;9A<>+-2qo}7_7MK9;8pCG#Zg6w$~+4CH-XB9NN8on4$+yt@IK`-)WHL|4x`M%DB z4|aqPX2J*8z(21*@4oU!`N1{t(}CpwGVGzNbUF1J^67`<1G|YfUW3>Fh@4;|tNw^R z@gVuU&^?YX0VkhDMm>j&65gQJRndii_lz_6B63REL+I|mM^?RztXh4uJcCVy{{HH? zMC_$sDtl=aGOON+J@t&-zfajruOYV_nGYy?>9r4~zR^w^ahBUjPrx@~*-77pf0*qg zUt-8k5`UxE8H<_ECCulu%;%ac_PnEe|9QSI_!hG40J`@cX`N8wW ze!4KUUwkohvV=K#mN{9|1$*hjTa+!e^l5B9=A`<MB9U84c4M#P>+*5}_RmCQrWMX2H_O?-v4|M5XIm*dHCg-M8#yA6 z^>2v$G~4Skhb_s_5`BMGyV-ikLy13FXR`HzaRO`NR&-nMujnPQY(3ElM2?D1Ahw>! zSF^oWMO)3u+4i>iL=QMwoA1OB$&2FR{IfXDH;m_;!vxMdOyn$5cPKAPBKzqt&>B)W zXHd>0Rr|L(hW2k6`IjUw!DZNW0qj57vsH$DC;o8pg{$=+(T7q~)Ow#AKREi!SlJ^f zes%I+{P!+=PCIoyQ`X7FZ{MGDL3hQm{zF{(e&qw{p#0{WZ@;_#JUv+`eJ$1h z!32KsdAu-63%q(WYpd@%1EPb!Fpm8tgVD|436q%1&BOx!eO!Lv1I_ENzttBw$~oQt zM>`Kb;0u&;o?{Jd<+4Ze(m3K+$7_K zMEWiJ$<)3w)|+IXnT$d9q{$w$YJBbDuX>Mm-{LzgT>Gmf zF82Q%+E?x3J6G+ocS`oPPNIH4zm@z3&-St`#Y`Y6MW3K#fMTS-vLL&m%oU$SBZI-x=YCkY|xvGPtFV- ziJvBXP67UE)js!Ry*Y9X7M#P^U#a{0N9sP7q-M@1F#Gtcv=ANd75dW8B{23z9)Dms zbaE~9G7P%82KpJwxu8P=&{M1P4Q+>R`jDR^Ojpp(rL4Ugxj;riQ(}+(Md%7T6n#+a zvS>85A01R^Y9upSdin+Pv4{G|b6`<^w8YjKe)Anyr1=IFHe2K0Cf*M>h-_Rs8sCWRio(4&uY z1?3%)d&alk;d>>}ocTM*ng)4}12o6V-Wkbb_Ace8LYvrmFG8D-GS&kwU&DdK5N(b$ z`8)SAeuFm6{!TgLWGQ@9*4FUJZJH`;kI;0)UJ2yi9Oz|Q3oOt>9(^b&(UTD1^zEP2rms8hB)1iGaA}9Nviky5G zd58byedNvu(AxhXgEE;LSzmsc_2pM?jtj{8ve49OSziVZWqr9-az!4$x=mqwwdy@cqf`1FVHd{rbl~VnMXP;&F~Z6)_pYhYa~BG$}D4 zRymnZyV9l_GiQAma^RF{j(O>(hTWD~r&kNCWqLt{-l8tnza zTcJG}kL=5mwc@5R0wpnH1ms&IQXS#*d(xn@OVb=}iW4aCI@(1Ra(2XNlZAdgH-*wM zPeB%rK2=%x4zenpSXbo65^T6XAh!l0M`Vrk2=!MXSK_eq4PO4dX^rjWagBQh*{c&+ zrPkOQWudIGoh`D^kWaY@Az5gQCtw+;2l~=*RCtbH5OpV=vK;L>-1C~`-l7)P0p|dRSbA!E z%DHHJ1zVDHhYMdNcYRJvg@F@Go0bfeMpDuH~4x1nW+z}oBP3)fp@aNB5Z^r&H z`|Zp;-@32jWz)Wj6X?Lg^Bu|T59J$gkRwFe|1)QeOWSYxqVKDaF-R<%j6rbod~mY^ z@0a64kT!*POTRBcU+IR7GWhE6OuSp36MU6#cV|C`VXt^~eEo#|eOvJz$a8}0(w@o_ox09eZA@a)JWXyW3pSL z$$P=eW_hn}r;a>H-s@!U*t6D<2!E6OraQoiMDX(?Jw~S^xOS^)Pn)cn$oxiIGkN|p zsSmB02oDqAlkl)p;l1L=72d1iLy|TZ>%7E_} zeoOde&7@C0zxaXrWB&Zx#J@LmMrb1d)7TU|H;fyo~I0Q!`F59HT*xhccJ#ulk1+E|5OJ1 zwIyFnS|@)Udjozc_s-W|dUf4z=KluWTyn=>K<@ZD;$ZHTd*^8{9a;DE{HMt?Be@te z$t5p-_xt2ty7p4$8^4|ZTYTt}o3V3RXMY`Q9rw$kn1P8{wMw+f!#WM z%9E+PCp`t7&Pm)Pz9M|K&yXAPb60xRLgKViIg@Uy%Uku4t3%bVp{w2amS?cHcNhNj z|4MWZ{5R%L<}3yO`7Gv2>i2;kyUD#LIN}G6lgZ2H#|N60mgHXsE)=I}FWp`$EWn z8871$+ie-TW)H?FwTZGc*gwM z+|Q+6#)rOGc^UmpqpB|Vd-xV0-*36nOaU6a4me`?p0u$-ShUr?p5y!#4xUm{>8pj_P0li-b z-4BQUuZ0f`V-4pTa=YnkIIZLlGVj78&J=$zc)~r<)syfF$@wO{z~B$U6NFzp1)nf@ zfx#byCkVgz4Sd4j1qOc*o*?|g_{_vcMKX4u94{w_I1J_~xz;&2Ea2@6kTsPql z)U`K%FnELT34e=q&;H^(2;w@!{fM9ck{g5N7rUntlt`oLw(ZC_xK&O9m|02&h=U=BL7 zp#xOH+kORJe8N0?;E(f={dYS$`ty)?pVZ8qKNsBaLhIf%um3TrM<#mLJvRR_+LS!( ziR58_Me32iyB47*P$zuhThz^4H*fx7c`BLo(z^d?^PxyJ#V8hz!|)ymaXDLe@$|UR($K zhwD7rz=P8zFJ!HMu|-zM_YGMw;uK^>w7h|_=08#%v~B)n&hKKr=7I;tT!-hM`o|cf zG1tcYKTrLW;D8|qjQN*&H*i4o#+dUD4TR^P>+t-)X_|koPiFpQ?gbtr&j3G&KL3rp zV8ki#f@t&q`HgGQ9~{^V&cN}9$?u4sREO<$*yX6ooc6W2c_k7OCSV7EBDb@J@C{@*+LF#Y|+<*s_sf!&M$XRi9yg6~wopE_s<^Xc!$ z+(+lC%f`n38|wov-f>s`+&jKe{|lJIBWsaBYb9@;4 z&6wvaE%Th#ih1sR3iF&7bDkZhd3K!QJUgP!bKXhLv$@Za^W1Wu+dj{Zx>)lp{Ws=W z)*^(4mt)(>+)n0K&8fcUrfaHpaL;cRUidoq_Fk~?!dypkax(Mg_i2?YSQ`s6@7fOS zE}=(kkIKo&d$IMqIvjNxbbE(GyFzkpe!^autrdB}PvmR>XlXHc_6haqMs+^y9w+aM z|36*xY+2F4R7eGr6`(|zDYcx(YFe|DfZCe!jNAi);N*vV@5L8JNd>OepfQ)7q~vaw`2{? zZH$|71sQvPe0bh&`uO2T*SWMS65u~?X`27q)I|TV)C7M=_|LplX3!RB|>n^|H zsEe`e1RlFs!#M=3COkehOv~G5=LO}gQOP5}9=y*1cJi#iPWX_(E`jks|6qd3W8dxm zTNPbz)3oF3MNiT^z#&1+`QL$|Rffy_*L4Za|8jnXhx*y0=a#*%?BVIdZ&m%&%>vU= zj(GoUct;R8tYlqOU?F>6ReU4yzU#SmgM`U^UKi>cLA@%jO!3E`;7gepEH&t z^y^;YKJFt9iS&%|*}tciL) zfotMS)bn!9o(1(h*Ez=XZmx~zlespYmuusBuJeuO*&m&6JfF_B@w{9c&vUKLgGE2w zrKR~rKYSbBE@S*TWBh-N@fVCS$Kf8>6Zni}kJ3}r|AF!wWVFCVU{r&@H5MEy4EQkK z3IjfjYq3$!xE34r^uNZar~frZz4Vj)LFRK}cd>s^ohex^=Yl3ieBb;{^LNZ`ORnzf zB=-3-*LBdmtobB^E6d{I{VCvz;uGM?N^oO0W0ZYEvWD(OZg+sb6OdKCh#kmb&z#Ja z!@)eTM^DWmxR68s1-Z^)&U47Upq}BnR@1NJxsD56*8rPZcj!6?*sy;i^gP$O$Z_$9 zxS`czcuFd?ladzim-CL|nG@q2Bb+`y$RR8O+G5#-L{?Vy6P3L|d@R<$& zy2au4OTS9K@Tfjn+m^X107lcjJaaU0pzxPyx`XUPHgrVM7ln@t565SYz6c*R>5IQ$ zEQY@L#l}|Xi}1cA(Iw#3lfdoCyl=>(5#ZQ8tbun$Kg@>b3vUzsFqL=GnDc$EB=pJT zswHz8`R$T9dBGsKGF#XARIR*qfp?BvGoh&lE=)7V9qa5GNX-!0>ROLj~Z*09tKr?ZfQec661?e6-%^r7exVKM3#Se<=(SZabC_V} zIO|}R7XxM|-f(Q%)7?>e^fo6l!G&yy!-kH>E>FNNcPP8O592p@?gr$t*yX*uEFP28 zWzm?=blYt)_#pQAFMp;TIO2?}+>MXSJtx26go~WZ*y|2(*GbVXUN&Ysws#M7=6L=W zeKS$&9eM8?_IwU=EBoQ)%!*aCc`xNY+8yMItL*KfUB|Vm&FqTk6&6!fYcRcij`yp+?EOhSP zyQiq@nR2~vs=A)Rwa5)Q7it4%0;aO&*_ZP@WFLUUcKrn1R_VIX>`&b;e*$NI^atOU za_?63?#DQ{r{j+MH;bQh7w7pQ7Y@!jA3jKXeb}e<8S8*@&eUOGQ;u(88fOLl2Dv1; zVT|(;j_|(`-zwLKf8Yy#{^R_hTob<)JX#jIKK2V=@XINo>)n_5f_wXguJ;Y_1wa07 zeo(95T9!julyFth$8U+c9uzF+cd@$ucCeb?T6O)Mpmvn!DT@*Y2YvjOsOzhP<@_#I z*WV3R^INN~^McwJJWpAapap&WmZ)n-u$e?Bs=C@W|yMka+Sq^1ULR^q6-eo1~ zIzCv=?_zbG5Ul36mLg|vElNlX%6Ujz(4kk5zy2x4|7-Rl{D%EBPoppY7F~M2Gv2>I zYzFfA^#SJJ*ErLk=ROcvQ98R){NJYhh$1l0=e)o2gqjsg(rZ?{d4A1`omn+2yw}#O z_-0|vim^A=toTK7%?blb4x!4FQ%-cY^TH})(q}R8AiE{V)P^1)Q@aaKeA2z z$fh5;4reIV5WLIb+`n=s@!OPjl%Nn)D^+;-d;A#)<8jWM?I^kcI4 zX&8s#gsivcIK2ZGJ3}^?4;@C}tT{B@<{FO7xfYo-44HEc>*VBLh;pW(FD`Pdi;sH- zP^acN+fjaX{_(_t`=Fl_567|hCY83*XfvI*J;*gLGP^@y8+&ECq601OFETr>>ZjS- zCUl}rVz2CeSPP8gTqrO0-h!i3H&1e95EtXF+RZw1oFi$&jAQxp%0Hj7dC9$j)i+?D z%>4Ml&3pPJ1)f4aiQj8?iYu_p?eM$NC2u?Q(B|3;;sb|2NeFE17#H}g&>h&--5KyO zhyR7_-%m+LR)2%?H{$U&W;+6xp$nu?_BzSYLs<{Z`=@K<_CUsU(#CdAclZaTJN;L= zI{62>68!^Q9sLU>rWQNkz_-X7dT+^UIdi@@XU?fS1nC#~C6CszFA4&^(=+^%6YC&n z@t;Q@HgInDo1dL4Z{-AH3_aDpL4p!9_IRPt)t)3 zt-RqgV%4f1_0@mE{+GT^w|_Y>xFX&cc)Wuz5Kj!}o*Ow!`;Ob|_x#A$a2da%>+dP{ zHDq)B*c%I1PjmU!7$&T^w%OMeIr*8_1IVBMY3OI%uf5=!z=3a}MRXh**v7t&UF^enj&rN-)W}`rAZCn` zOPQ*5uB+H{V(i=OgL;3wR{2-A*6=;PA^U4&zmMb?m+Rqa+Q1k2PA)j?hhC(9*}ZF4 z_XSs1APmhKjx@O_(4;|p1Bdt!umnC>0euY2q?hK0b zhw~`LzMriHinX-5a;~LstC(BqAM0HWvexq!IB7g9_iyChcKW`Fd-CpEcZS}P=OqqV z{+ILGKS*^1o~LdBWxf9YoBZ~nzq5#Eml&Zwti}HodDf5qOrl6%tMR``{{oc1(Edu^ zck%2#a{4}w{X9$BgTDKp2XA1G+)3I1nID;#%Y=uR=0x_S$($@_PRf{*H0C3nwl*<_ zVq(387V!x-00-6PL1_I>`~fltA_pcj2O#mJh)j5(e?`L9zayeoUgmQh;$qA@uX z?Y*2aTK(l^OL@*m-*dPp^#yYOy>2{nd6|}-lclZ5$&zOr_&Va_e4lP)K638elFGB` z-Lf71a(Z{e&)4{ztX;@Cz}Y;*x6&QiT0gBf_i01UHZH&h%f5GAlY7i-xUZ%3HHT-S zeWShVWlXiul#EGY0b{i*&lzLd>gtsxW0UnT@q;ag-UQB*`OS{=_0_7gvvMdhUO96D z{yg?N#(Sy5;U7V)PX5fPedT_EJO|vev$M0ZM`dS?z@H@h`UJ+(zRYFcj-k2Se_rSt zIn3=Y*5D2JVW8uZRORZVaBco=8%JiZ3_S2!}5Pz-S&zLvGyTMibOfs(m zgJ`%U<5YOS_`UL*K9fw;p)~)bJ!LzNduIkqf zE_DTuM?!ak&**|<1%KsN?pbl$jMIJa-^9Y}+w*74$G^&X@!}gyQNBUZSGS_u%J+No zu5m52f~D`$QzlzIq;%r zZJGNcun``=i#_#jct;8HqBpu66%C6h zv~^JYybkF%G%*2Q|LncK)S=nVey+E>rFVb3NBXn(E=-l@gm;2xm&$yLPg{71(GTJC z6YhO8%Da!IwpYFMLHNA%Rrq|YcCCFCSnmg}R#^L}3&A-%%O`NAv>ML9JUX2F>u`SY zUlPtia7g%$tX&8!^Z7OCZYwyRjjcUCu3~Twza{)Cx&g10J_256Pxl=gKV3T}^@~mT zCiE*fXS7vHyV>Arj^^}_zgJ70sKxo)`oDamR;Sth(l<+cM&AsaH~3lxyzNSzzm3-Q zvwZq4<8|RwQ}MKJil=q=Im&Y0 z>zGM8ZT<(3gxT5Ic;(UIkpN76+g_H2zZt%K^(5BF|_1)8P zPr*&s$3{b^&3(h%_x6SFQsCR;x!PoR|ld-qMy%ynX`i2?48h<%ma>_Js zzpO;?_Y#G}oV|33$Y|}LCci=x+1gkadgoO5W+{7&dJOgbsXY4+jJ@56!psqp7?eKgsjEq%{A zCfS>}^XSm;F7bTx>Vq$g%v*cl@&A1C#EC9XQHWRVLO!4_|C4-WR{eQT7XO3aqsdof z4eImltex1gGG{?z^~D~lK)#4Q|Zwhz2A+whTmKHf9ZRdjCoVpK_S10*g!ImLFRD+bLg6j{F=L@c;Zz(vwCnH&-L`) zJ+mI>`xEuKly{7|H0!4O>08TfOvf&2={xQBjKWoYUgs#=spW6WJJvCp?NaG~!G}5h z3eYPj=sLLg2F2z!!$syS^lWy-b0wDN#>Z)WMUR&@%(}ne#T@2E<~;{}b_>6?=++K& zlsxW@XFTJf+a;7m(B%Y*_5U1shxu~AL;GT*jHmC5DT|oP5{lfH{#**KMYC-u;m2Q2 zsltalnX(k$ne?^2>Q(Z(tbHB9t;JWa0t+`e-44i~d&0!4YcjTV&ooy~(gPa|)yO&nF z_ZyDVeFL4PALqGB4_q5p`YHR)4s(X~=bR@|f15jS^sc19G1gv%jyA$;1r~GoHE5s? zKef=WneN3OB{0f|j^ymnz3i9TPx%;Mme9qz`+TWG;0aFVq#JT@2sZ2YV>7F$(F>cJg{6D=*kFx3Dh~lf+u6_l4D;3|?LI>*{z^L!cEu#u4*%cKHKFSO4 z^d0lbJN?GIOY#1#@0b^6E*vHMw*(hc#a8B>9`|iYy@v8=c3i(}@L&Dp?Mu>^-nTGS zo|Cx}y2Y1yFo!*n`QV($;nO(|@r{N0w9e9}X{qg1FJlwmSR@RZe$NVneBvH<_ws=! z`QS+bcrpY$848|U1D*^6PuNp&+i>vYI`Bl{6+YjP5AbC1Y(IOA1=r;|VTJmBuSeY4;Fs-9DDgl?L`srml43Qn9uc&2etc%9%P>;0|bc{y$5c_Dt+Eo%V$?w9a8 z;dv9`aUs6fEo<5t zZM5*SkbTg&jw*UvBW~He%;se$XOoud_$G3+Ih#~?+2T{s%?y4fcB#SB9>y;Hs&us) za_$a2MP03}V>+98UF}yOM>p!YH%G@kLwAeD>+-|6*H&I<+o-X4qt(`K?Q>_Vje6n@ z7jdBO(qs7Cj^38c-rp4V0H?ASIE}cHbYekWm7&&0$Z^SYKh}qmfn0@+gx2} zr&G9{&kExLa;C<}&iDtN85?$UZew!8)Xh~7cdkGGvniY_kyQ2hK&{~jXU~eyH==(v z1r2+;6TiUOi+hL}=*4d?zS=!5;wc!H(2!Zdp{tSbxNlNH*jv?vHu$qc+3^*+ZWQWyR<+EuGW5aq+gvl zul4+J-y+(T@qWigEJjjjoV4@#C&QX;OUAehn9Deie1pC0%vT-fh|B&?IfpBmGLG1x zQJ*}xxenOx{|0LpSEel3e*x!#a}Md_#3lWP7^FJpUe3qcO8J=BpCsnu#W*c@gyiJ{ zcCznY&fmy%$+?Ibb$=xvO84Y+e}yx7L!Ru321YMD{KNX^l05$BNBeTC;o5tnmF1Xp3`^?z<+;6zz{kukMxkrEK;5y6y#lLB}$G$Ju z#JN-d#lOk5*MIbfTJCh>3nt3hg2c72=R6vD?%LG+)k~@W=Mb&(hB)7vsakTK3!iv* z-noP}E~m|&d_!P)gn0Uoi9?a`Nk2Y?X7d9Z3B3cIaN_T-Gj* zKQIQqF&h3c3O+Itelmg>g6oKR8&0gXOV9Ba2fs;z-z0NCh38UvK8^R_H4k_K$9c}+ zH;07ZKpWdUjr?XS{3oFEnt-cweQ(YeQTzs++X(1O~%~pChnEP=z)QR@&79v`JAXr0>N2<%K9a&(wW3GjSNA4P z`F+x*NehyR^&81|XG4E;puhdZ(FI*Utgi(pKmOIQfSj@QB4=kN(--1sDsx#I`-Jii zJWyhBYr$KIdy(@|lG%6oQ|%&uHu132;A2Cf$C2nDKXG^Q-^x1WJ~lNbWy4Z{oWX3v$1G z+mPJ8QvZEt?y63%+%)32|FguI``S-jx%)XU>E-Whx&K+>;5x~_>Z$MMN-WT8ALZrJ zCTA-p`vtG%?9GX^aSQkUOuL78FOhaW6EO)@`SaRoC;3jhCBXBXqmoHF! zc{lXncAUSV6LU3s|B116lM4d>HRZwu*Xi_~i!9DXrp)4e#o^%E#5A{mJNPgTIkcPj z)p$pyU-s1RzD?vxrzY}l2Qg~K_p;#$*71m}>_r=D3^Cf+4lJb|qwm67@=W|FI~qSq z92#iKiwfZ>W8f{L;W4A&H6!6gBj82Xm7d@{30eDWv+e^wui^V`O$9T?i4SA z26sZI!i$vd1C6>pox{3M1YDHv(>c2CLmP)^D?<1AK=@DdZ8gz-{uZxvpOlNWu>)Kg z8}=f9hr@rCq5BN~$@vT1j*JbGm-kBMd$_@qcDVxM;7Pr|m`V(A#)jTWZhtQFJ2x$3 zLv9XdR_7!w5MAkF=pkY}qC0?_Jsnx_Dw*%BMw|r>v2b=fGTN*wDZaOBNZ?gqdJ{UO z#C&ccHg+#(V$48i+70h>!{_|JpD}-s>--IQ=uB6%p$~i~qF)=)m9`WF z{xanPr7P8a5T|&m(ZA26Z#N2FpWg(p<=m1T^zRGs@+f2bNXPSIt}I1Y5#M3_*SNa) zkGd|ZQh44%o1zOUngLdyLFJDgdFZ$SI=0te)e){kUeG)np z{YL2x4{5o=qeJKaWcno@S@wxmf4Vaqz6+3EC_1@LtiA zYSB#+w0M6t``_Mj`_|N?`qsY8xqbJ+(2rJ$gQpM$#tsKC;1=aUN--ib8i9n#&a!tRXO)_ zkRzg}yy+EP%3U`RUwC)k8OAq0p*&B~=w~80D1G+uyOXiW^-^@eM0dRZBgVFsaqMCo zJAlteN!l6*->6N~*8Z7qSM%Ms9(3fsgA5d&>>v)>$vewF3+Yk<6Ol=Y=uOhjA=;@0 z9u1B-e>WF>Pt?|aD@9wA<6&&{Me5X8sh`0AH|XQ@@tyUIYa3%(%D6;-+`^cKAcsDo ztx6M*i>`CQBfycSeC{;pT<{#T&Q%K^+KL`#=v{j*LSI2XbVi@LQ9t+ROJZ+@-&Mn( zw}QvLiB&dquHDX%jn}Aabq?!VU!vzXAgc^LOJa^1(7hz~H)cD~YDmutwR1?P!!M!J z2Ks)O_jdy;(RseOh};?Qy)Mk5=v@QMdKY@4=v@~w_k%>P@lF*wrP9BUd!M6gehJT) zc;IKy>35^c1(2Q zjxG^8*>Zf=qLb~RAD7aPx_FO&GPG{!V;}R4T6oD4zIzwBJKXd|X#5?=h0s7UxZVLf zCPd>S(C>=03;q4kamX9B!3BNyVIS5t?(_Y_!0BtjU7yf0zdb|;dr-^m1>WJPUAU^=QN(v*6vczX*{R#oW^q+&x!sO`uq?(0;JxE{uNrs@or}D1bCyp%wy4_yP9QU8B3t)@*P`ov;7am;jtpq{k;tQHeuaya ztTc3_UGRUA8@r)9!7uc?4X+~uC+|hayQ?5DW9mf3L1s`U>>C#y)_vXOW)Q*e~&kojyIU@jHL{*VFUj;BU9U-$d8zA=mIX zL*E;!-xHnh7X6;+ecAA9(fPW;pACJlf;!RlM9+Je`kd3G=WW$>JmJgLeDhTFJSS~* z({;UWa&6N0L)AP3T%$e5n6z%J}p3wd@ zUEecwJ+XUZ=y~scC3>Fn;kBEd*T|occ_Q=AUOlh*Gi?v4GgK2;uZ*vfGuwh&3Omla-76e#R8H&A$`cUfVX zUjGB?e?k3?)c=zDIKBPMxw8tdD)JY0p6f3h87hdhHD%83?((q&IrchT*-Bjx4dS@5DSTv`w(mSW{C9aKe{WsU2Qoot{ zLtGoVn=T#v6U zZ}?w+k5c!4)P2GAGv1Yje=2&R@ZY^J6t3m^pOi28e+&Pg;Mxnk6imG#m>RIVo_YmS z>IJ3-j52_if~hwIQv*gnpkBe0dV#3{>rCLKVCoIQRA6@#bqb!;DR=_AF2GB{(;I^4 zkNIE0lK*X3hUmeDrB-Rva=5J+lk)+lp87a z{NI`1Z&Q9mImZ7N^ZQ*&F=aSsvjy2p9Knw*egH#vG5nBLeb(?pT4kEyhqU@+#K&j# z2a12x>c0}%W7QX0@&k(QUBQ_cnzOt?BX^PbqAzv!${IuLJECA|9__oq|$9QPmL{!+Q-evo&6$G!FZ zzmt1!ac>vbztiunmA(xgm-t!clm!O_uk^(F1gn4k*oM%&ae}l7p7I^C2>yIIPxtzrhK}Q z{h}eCdcN|hk0jQXIUY5c_QG>KH`14G^_6EnJ)`idaWe}$KRvT>Q09!nE>ABj96T;i zc=6ML!mBfviH}{iXZVGq_`217tG;1;|LM%xg{#NSDg15boWeC+k9SlwJd?Si@K57{ zh5w!zEL_XA_^rrMGuDca#<#8bX!IqM{;IxbhWf5KD;m1cXVw4AQ2$R$f0>UJ;O3d= zuNAIVcv@k31-zsyde~@*!92sb(AK_6EP(L64)CulD8lcqq6nWGOcDMzT!XI>)7E4x zmh}uPj#zQSiWdei+3!lNS_GV|ykrry7i1kPCa>V0dJj4^^MZ)?xM!wy;W5IW6dlKN z9`PWd_EXL_J>5Ag6nC!UKl10LwyW4C9+uPyi@@_Ap~iLs)0 z?2n|+cELyGdt={-g-%nUQx7uVgUt6J^F7d8s!pe=$aD`f-GfZ`Ak#h2o4N;`rXsUF z(5VNR?LlTozQ_O2sRxZ5BhD#n5JPm^SV9Z1}YTUt-c`JK;}M+N>C#25lBY zo5j#(F|=8{s3JQ}(I)qx&0=V?7}_j`HjAIr?=6z|I`ZBPyw{QUxQ=*_|DnxdXtNmF zRPW8s(JHScKlyZWXpVpnrSAK3tn5iCMy8H_f-{Ikrv5-v`%;$1HQkF++O<*M#cSji(LgBM&GYu(M7Cwl2tdc+Dvx4p}izF98NDB6ki}$tjO%X>;pYr@Rsk$o==PH zj;dp?L?2dmGj_JwUKhK$IsIGSTaVsuwbj>)j!oWev6=U6HQCGqLN@bg@<>{BZ8NR+ zB{ngZ55)>EtNmxCbNl(oJ!hv1ABDe%be)rZ&uxXTl3Sb`(;B>N4)@G9Txs`^4L3(} zfMd&w56`gS%y?BwZVpp_Tcso0&o^(AwG}?i;7_aOBL@Do*(Nb;3p37$zxOjrOq^+@ z?iPlL{d`ngp{?+Vi8HN)Z&aK)30+xq-sxVaU+KBnrD@;M^;=@bt$VG5_=gN$WazhM zyh@WZjxB2gO1~xd#ALo>_->8$fRLP9##+uYwI4gA-!3b>f&XRC@a^_{X9Zjot{&9& zJMqb!6)NoYfXJ z@Jg>d9r0HiFKA`_)t`*DVmrfOW21{avvc6Sz1NWN5VX^1s z)vPPaIu%8U#p2$Ptbt`Wa<54|mc$K_->P#_l$b2;J<#<#Wk2FxL|hgzLu`-h{gJMVzR>VS^U3+|HJWFz}0|vM0^(Y2CR*>Z)HmZPXo>o@mbUxFph}N z0*(fJ!|_?vh2d(%X8}J0rfMG-|J$&%#JbtAEc}^`mxV{$K;x~7&$4+yrP`MaTy1`8 z%O+bk5r=Kd`KUUR==4qEvuyrp^Ur)ezSY+4ZQULp_1VBbRa}7yAJtx2lZ_dU&obGU zRy|0?XSG4!oE$Aai}L}(Yar3iU4ehvcE4@++jhV0L%IAM)TO=(KKqr%Z<*%ESg*DE z1|#CPc;4_OhU2$P{=#tlmdU>rj^8r*3RV18Gk!wR&F%QDP;8fq-x}Ju1{(aKz88&s zsG)T=J${QlM9Ai*@mm&|tYWyBGqayv#c@d-l=#qz=L*Mi5eKDw|jL8^J zIKGQ9U968s#do!L|H0jGeAj92uT>AU!dJz2nP7XB9^bVwR(#hP#Fe)8{gmUqOmku7 zaaLSW@m?l8In_OXvLEoQi}&&p?^R@q_wo|&Rb-0yBGwDqX^i(G)(g6c9q(ngXTvWB zzNq!{mg2om0e_S)uj!ugsQV-Iy}4(1yce-v&}QQvL}I<5&Bk~yV!fcv#&|Dcy`at5 z@m_X&HvD4XOHA4{?}a%9{E14NG4{?V+T`BpjQ5(oyD{EtqaN=y`o`GtUi;e^@0CW5 z(r9}SWBSb`{>!ABsQt10Z`fH@T}8!znQSh*-B!ds+3=MZ|5L&<#V1-D|8)xRmhYSm z@n3hIiTE!QoUC+irEBZ_+0RGD)3yX3g}*Jue>F27N{+R1KlB-k|7u2OHsMd>o?`p? zu#MJM_%wq*t%7e1{Asg|V(jle>G-c^bh$80TB-Zl&(A1EcZ+GO#l)Fb!Z#|;oP_TD zmB)Xvj^3fj6#vElH}HRp@n80P(c|e(16&lYwl@B&neR(G=S=*UN#2KJUW`047Jd0# zzpv)ErT8xsJk;~8)OXq?{>!ABm|z`Mww~+vo0*HK=mbX0k;=a% zey+Lrcc>4q>&_)tlvmHcRb!w#=ovcCOlktP3@?7yQNq2Aax5#EQ#-n%tLru`&G187eT5P6TRbcNz=-%Q-4_@Uzoq*E!|_`&{D7i++wohb_^ngPzh#oSR$sXh zx25uLjnngQN!-fQ#A}Ukh{gN&c;IrulTm2{s?F@lz&U&u_XUik)D4`;;|^u zW+?xb#A8YRt#L8(Z^>Q@yFD9zQSc=uZCdxHoFx84rOg<7S)B%OY|yT7qQw& zR^22Vf2G?@;XTmS^N_rHyp7#%DDKFH!zsZ*@rAa;U!5epKFn6Z zry2Zd_4`rqC+Z$*9e<*p5n=4FKBf4p7`j^+Cau)T7ae&2MYsuk`V%=Ub`ov|Id@uA7+VBC2dX*Y7to7g5I>V=kiV4u*bkQt?;1 z91FuYhMYRr@5h9HjPXW=e+)T%F2!G+w)|T~rgONF-Qhg|b7vI(jC zs8jj3jQFe~J^z*wpT++w|CSM-1zc7BEfY+Yu0{O~0(0sOm?_&Bc&hwcz|w%1@-0%Y z@^6{ouVRXTqsqUf!%)R%QKxd8NSzU%1^iV0EnuhOv-n@--?CxZ7@uXsQt<&BFI!LJ zt%}dGc|a(CjLlCAKec6(Et|Z=7ua&XF{a(5Gl@>$BtFaLpEmzA@^9Jv)8?ON3I9}a z1txq{du2^FW;i~}WM8WIER!Cj;#mv8D!qD8a=e#mF04GxiYqGK%Y-Li z%XqIMJ^xmb9`9A8=ie&QlE-u`SQL_@m@vxneIls7rD1o{w*Wki(E=7|CSNIL+&k=e=AnJm))KXKgqup1z%#) zrg<;SDd103+KjPx=1jzUot62wOuC8H&a&z%);${GJ=oFWzwCBHd8ceRoH85~pJ;9T z*D1hTzH>Ijf1Q#1TP8SJ>D)@!R$s6Ed^mn*ly{oe1i!8DQTW?Z{8uycq2yRA_d}nt z_^)PkW)uE2?kTpP58G&Mg-$7q z@QsQyC!srkjsI%q`_j%i6aQtB z_u-fpbNtuW=)RiYmg2um<5kbMQr~Hp_%D-gVw#Jnvh`fQ-^^S@9dC@eh^jlBbo`e| zj)mbHLr$IR_hZ68#(1N`KZYDWmH4m2g|7$lvbBTDKR6sIcS4ZOe6C8tv9k z-Q2r_%YSJSV+AhHCTL?*4o}%E_fub4IDc|d>IT0%#b3()mlU2Yi+A~FH%#69jw5wL zzoc~kJA7|4*L{*w{FAv}sHNBK>$UL0Vn!0Arrcn5PVKgCxc+^~t~^H$Hho?q^dr=Q7q= zT9QA>k>p>fCDl3mk2pGnc}itGvlz=vE!m&!NcN{N7B_vK!<>BVO0Cl3rv{yA>sBm^ z&kHU|o3;Y@9W3G73-$gc)89SxcP}_lM}Kdpza!~upGUNMXk=4Y`Z}BEHLd*m!-X}= zs$IqJ3qJe7=icCR(y{z`jHy!adNMGa%(q>%DfCs!SoVQqUESJ2!KwYgq&(gioR*d! zeB6=Zf3wrn;QEVw!SXmQI4#u?oSxwfKH9|f%inwl{Zjp-pT0O>a1?NymS*}cZ4>~%$&7a@-`mIc z#%tODdA2;RVnqq~EHrW+Z71c2+V<0SDs4}aF*A1XzfyBlU+uVK?_hby&G-zO7zO;K zJ-N@l!44;LP5;=BIX0O#Zs+|Gyx#{Jy9^rhUk;u)+c9$b?^fqUpzStG`{KmMz;llX9@dpwDw}V4@UhUxSM6F@DTX;`_>TBAuf_amn z?aAQGoqQLbP?-*X$(#uv8l9+(?aF)bsIdoJX;q&9(}SISft?rU2M#eGKmUO*@QWYk z2kvnu`|rKg7r4JHbp7D{zQFBc@&lDcTHxus9f3#2Is?DG*A;klTwGv&c6{Kmn-c<` zfEx=&B?e@Ey$Rq}LdY+809YM_4nLy(t-#ttU#`*8{V!22cc%NL4?c%`!0yg9%bp)q zv+Rl=)GRyy$2H4lbkPD&j&s1{oPnoqcF`{L2oJ4pNF1;mSiJ!M?C2viUYTfeJnd00_%>MZYdGUI@ZIGjCpaNETOsopUo*Ib zk{w^MqJp^`0WM6YB=@aYHXeSV`T#y$MjvMRkd1s_c#hzO?~%OV2ot{%*#gf}xRLEB zzq*9>u49Zzju7uK`1px8oSSy{(n|M!!%@0#ptJPjJXh&~YvW2kg@+uzF(L4IablqU zHh19YT}gpsH)svzT6x2GN(b$dtZ!1*Gk%eEdtHvI5yuPW6+7Vh84h@UeAOIBf`29D zZ0sdz~Hq^Y%Wld9tIU|NC0ThGkz4 znb$Q5`g}+m8}>f_j_AqN}{1)Q9(*%?-t& zcb-cuy0>Wzy2=Ymy*d6|q zD0|M0{hZ-hb{TFR0g)7gS;dsQZFCl^0av&b{OXCBM}_CC8P-Q1Sl> zuET2*-VjU;*oD_HslNx98ZZj`)~0n+-X(aX%KvHeO%aqQ;lox>Dh4n}3GR=dg8nTZfm}rLWi?vf7kZTQVZPT(={w_G379xs|r# zN#*}6vh8=8s$Q?GZn|EZO&I5FdQkY}emo6~q5%_#3VM0K?yC z_3;_}%IXu0h#%Meei8BGx}Pc{eq8tYh2zI#`1?fnmiTcaZgX0Gd4u-EtitT;{e_Mv z{DnE^%_=OPsg06-D+Sa~pneGT6RDr3*Dv9n>D+scdyjB$Dfgb^UXXXzQ@@k?x2WGm z{qOYpb@H8geCGzfGmr0doK;xOy?5k0)K8#(2=x=GU#Hh^lkafvIqp5ey`|jS!97|3 zT2K8>>ffS%7xg>!`X{@we(2;|-O81pUDo1qu01hU{N;H`zEAu9ko6SDlut)z={|Ga zS1x|@kiR@%`OBAkmA_oYY=`G{Zls^wh!;2f;6|*t%K4e8=lqmd*{8{wnHeMJr^Hw{ z`mrNp#C3nQ@%?ZNv*i5DH0Au1IA+OHnHkFYnP$OPD?T=f57%`p!xtTn`IY>ij3b=? zQ^t}R8cP%({Hgbyxr&GnZ?p$h+gAEHmHt}wI4f+!@!>jLTg?Ay(!bgpFWTDI*2IG^ zbHdkHi-UJX;yAuvwHBnq-U@RoZiM5(b-Xb6%6{210WUD~l||5~?B9;cJE*tvhw$?j z9w59%_|uc;X_bnu<9(l&ep~2z>ZfTJ%X)jStb>${g6ym-DVdZ(lrEI3DQd0k;|HU! zm8rG22)IPnQ>R}b{+K@aet2Y8^rRGnTu$a)X5-h-_7K!2(pdi5aB zJ;-wp^1ShR>XF$VWVQ#Ht)7>?mro{Zm3auFS%&GXn7(I}r;&$iO2b#J1Ve4t)E zAE?BKOFmGqo)1*w!zCYRrk)Q};=?5$Xl9IjptASVZqJ5a6nu$EqwR!`F=(_H8ZCxK zi=oltMHSg;I*k@Xqs7o@F*I8IoL&!&7DJ=O&}cC<+W0*6&}cCEB8NL7a}V>| ziQfT~8x0w3>{me+N9 z1o7b_%lopA?{vXizGICKkEVmKM0d8@_F{9#(#_?0t6jfdba3_nioGp1_Ux^VHg@U2 zkc~a(yhc7?=;DU%E%Yw?)U0xSe`+J$8~zq6ysUI@rE~lF=54sP!bjmR@!?7rW$)z4 zyf1stPu4bUV(azLZM~N8OSkdD`yfSc1iwuEZKR?5RPr!p79M$%1D}0*4r&i5} zydMRhT5Y>T*ditz5*_6;N(7IJW_y@%sFk{27)JKtHh5h{ZD*3Cpn+ej>K-XC`Y5{cS|h#+S~Y+j5U!tmxT1S zjqo+`opvznGaVnJ>1jdYS;G1m@t$VBbsPgsee!rRdi3*Px^V3AXFzXSbR~qXM z5pxnve=yHU40#ovldtLhm~#?iyfNmaiM&3Q7;`Pg-|2|&n$xNE@m>4%_^zk0wT*bL z$y1=~nUUvwh4q5Dk?YjqeFoG^j!`?F%al)~m3#86GR=@P+NPKFy`wMdd&Hx~eU%I|t#RscUw!qs zucwF|S@vZp)@$TB9P7n7g3-<&YWH|4{2z9F*WA!qZgzZEudFi>-=%2%JZN3tCn|em z%rRg4^_Z{Gt&I74iuepR2_z^qC%Z_Pl_ZU94ru{XG@lrIK z49)6$QNr?BkNs*jtrCY4l~!Zy4}eySp;bH9%Z_b3EwOwtX|l(E+}S`1itPYW_R?SiIj zud)Vfti2g)ZH9lyYAf6EUm^X}hQrwf2gN(Srtx1*>~TGo(})Y(x24er-`_%9SjZ+1 z#dinUPigGGu+qPk?(O)m$bEqA4IhQSUvd1G)fN=}vWY#Y$AyhH#q^lt4kPS8@XLf( zt+M~@=f8e&U`^~51FsBw#e_@d_^_wU_K6vnTB#3)amjxEYZDLFM4vWr30yJ9iOp#t zPAsicQ{7o&3e384khrk0d_2i`qLYn7vfpdpxH2VQ&yx7CCiT7UabivM3}c>jJ!5}LBhOd7-mG7k=O%`}5T2W_>HU~<6JxwF=B9~1 ztv%w!PDgy$8PESkEUwD`Rbdu5mH&%6b@nK+H|pHfBFmZA)T_80>J5LP z+Ka;8GPS>~$ddnydbLlNdLvFwo!?Gen95^QWXk_Voyz}3oyz}3t}m7Ut4PoP#s4b* z7ym08TwqG+Z-Slj%?V5?_fT)ZNck`Xrm`Or*hS?35|~ncX@a%d6D2UE4ANnz@_z|D zDGx}Uk^f8JNx2%>sr+98OUhImmW}zpY*-fl%*M;Yqivw^i2Pr`&ZhOp3s>8Gv+z?} z2H7&myR7gETb>uLjiMWgZr&t5%;uk!w%(~#s(s`(|Frq%S;9Z9_-M5y!*LBJJJL$W zD*sm-^vaXU|0Vo$ZphX&?AYt)a^4H|w*9W3_tJ(ruFUAqh|0QFQJWq@ziVyzO`wnh~ z^M9S@{#x}oD{NK%FB4o_%>Q*7@uIDLJ>~pgCfHkfmK8Tt{x1_=w0Jh{xt#yYtDk{h zr04(g>ak%(dj2nRf2sUmG{9N{iX7Mjf;{0%Wltx zpTyKf!IzjcYTlD_3iuO)Mw{$mY0sFjb1wgvNmsG%t+47YD*uI~^oA9Y|AF%y=*mi3xe44?hR>4=^kAhFFwq4A5JJMprcg@pd zccSJVjur3KjII}kQ7d&r`}rBgmp22?m^jo*_{PAYR_oShP5v*|$vaSFk8wypTgKjE z@_+GvvvFGXd(rc$od&olTy1T9S2N$2cFvjnUnYGZ9GhZ}kNO(jSM%Fae3xmw>iMQ| zLrwG{tKQSD`M*s1i)ntM%HMPSelznEb-Xd=r-^=H)+5>_|CdSLg<&2;o}KIWW6nv8 z@y3{wCi1#H;<>D8z)nYe*J;oHCH&_9v-j@tRaIyH|2{bZPEPJ}3rHbctOOE;Y6UHp z!$n01myY>Thkj8lOG#SU_U5@q`B5MKkf*m5GV*y?x842}V-ntbdDdDR^9R$} z8`1uF@|!qUpS4u~_Bs1678g7F zqgae_EXEjnVk})YCT{#Pu%D;D3yI_ISjANxhbCB)*p z*2HwD*#0cDV|(3PKKllJ5#wkqzN`IyPSU+E=lHI6`%_-b^S@kc=$C4&SLFP!w(C=^ z=YPfGyJo-0-fpq@u0U?uLsn_D9V6OKaVf=jMel9eZol`7asHRfE>7E|f>9!VZnRD6 z_*jfrET+wUUfg-`i?L~6W-(sv_PKTDXJ-v73bIjh0UX;vrx0my1TvKot3 z>LgYPf6`d{x%_L+m0voQ)eFRrX3e~PJZ3xpmJ1o{Wn}fse*TwB?~V4wh}Lal@n2zE zRjeI8-*zx#$7LG-)eet4XPykRO0!stxM=5px#T`t&LgoCo%kEap8xX2e??=#i+283 z7y7)5uR8P_hOgS-#n|(l7uH34nsKq?xWaC=uhE&>`_Z@a)9!2!3;A!i!z<4C<=~Z0 z;={Ui{#O_JW`sAosXxY^zx3n5+Ue5{FMW~ce{~U06pIhLG~(vWICzP~hqcq^oO6n7 zeeQyt|K-x(BC#x;=xbm0|4sj1r1-FQdXdv#7wY^k*Zl43hYsiGFZ=&p^izl5Yo70h z{?VbH;q=q_od4z0FCuNEwGm>6>=zX%`}1>_Wtco$&Um(YbH+UQhY- zGn1dp_gGCieLZ=N-X7&^*fa4tesA*oJ-_2#Z~0NLRi1Tb%99g$uCCI`GiSyj^P4~A z{Okm)sbG1V^No$|FS5VU+Eug1%(3Ic(xT4$;=b80ocm6kgRy%1@Sne)E5 zXU_YYW1shxX8Y}sHs^hjHg;9DDf8M`SKFC2Nwq2WzsG&2jm#Q^+7$cz*J-;p@nLFH z?*FZ;t<8B~YST2%`?A~4ocE>nvG`5VK|K-A!(RkAAHF6Gm=4Xlz)A+eIj6EG(YtH}Tehkms z`@Nh)AH?vy>-?|SoD`dr&T~%sBF51#G(OCw&pPX_(Q{g4oMv6ny%rNW|I4)w7&-sT zwQdzT|I5`k=KQZN*8ik?U-t1~S6F*~`R@1krp5IR{?hh8x8jPMtzpH@#a3~%{L)t3 z+LEU=^oa?94`=?Fl{e9|{KK5V?sfD@;dS&hm+!e_pIqB^{XBYo+?)e8*FFal~_Bmh~u5-ZV+UI~}gwFx1>tW>$$@KoFgSo6V zKFoztowaEb1El)v{4eSsIsZ%Fp!2`@h7Q)`&%e&}OXU17m;AIjuj~TVH(HO2Zc}sq zm#aO`dj6N{yEd$UU1)wd*Senf`CqQKj~=t4dBdFl<>HIa^Zc)9+qdGwj7;|kd{|+} zhUIiO-o&w;VNYA^jyG|~`pmJ<|4OsZ|C(dRhNao(f6cYe{~}I|c1fKu*FOJ?Yy7vu z@fWfDQn8Pv!dI!RkA-9FV(r(CUyS|wY_VbI!H;GQu6?{ow|yt&Q)xZ9IyLi-V5;ZI zVCvA`t5Y{VY9$$8OYU#t{v+IfhWneiAMt_E+>BqGdv2cS34D0oa_aU2m}8#-mS&#; zHph+&BSwtzD}Vf4`wTFyk<)zSG<*ga*AY35<(H3~<|C*1$Z0sPF4lhS_@z@hy+Hia ziJZ3cW3hdl^EdrTF#nM!g89!}u{wX#ip*qNPVDf+Ntu4Q~yo{4Yo zr&2M*$~*48dT;+|ZXD&kXZ>-7#!i`JK8`NrD!eE7_a5#5dda_QdZ%6BT~^FIH}j1_(t-nM<- zg|WBnZ^hUZV?D>rNj9HF@Sc4pSWY+i zOJ_B>c%;?0w+nqb_B^p%7qET0;Hhr)M<+bhZTu3!BO1FLJYw^d!6Pm{l8*8Re(iJ< zHy?FV-;3~3?D-Dy#ksamr+n1S_U(j^x~*SF^kwmpqc7Whv<^LXWm;U&=*VE)H4|*z z*lMpE&+OY?H#Yp{+6N^$we7qZnIHcSTOE44vp(EbkG0PSLstZZLFq`Qp&niG5P z^J-7S!`+Sl>Y`ti&-tDYX69M#3F6pK>^K$ab4jd6IX07=uZHzG>1x7s>5d-f;@364 zFuz7(PaOM66mGlR|2O@6w)iiX{$!r-hQ1Whi_~9^US#)|qZb{^P(Ojo?S2yWjviMQ z(@(BG=y3kM-2d;Qk2-9RPWq^w{t?kL)JKk6gp&|4#d* zlke@MU)s&*7b5P0bO-vK{~Rq&;RoY1_OtmOJra4a?eKo>)8>dttss z4A{pmzdgqvBies2a$Z=>f2=gx7VYSov9^rGDeA1f|4DI*iU-RXW#vV$M>=t{(d)O4 ze@*oISS=Te;%%HYxR~A<+tWek z7exDTID3&?EWPuKh+Bxoe{nuoY(JOSelBUTy>Wg={QRev!9GJ@#5fv@|7ySglXUOP zIsPj;&TG#p)~jrat|;!j;k}kGRy&yS;xdi@l3#GHE6ywD zEOB1ay|qt+bnjN&&7S4x-pR?`lmBRbkLK^_evCceb}!O%Z6CwGUvT_a1nb#xUdPf~ z=Nkv>$$vJ&UpB^b&o8mKFLVC)1&!l!@lv<2V;8*C?e`1Y`^LBPyS8=BFi()scE!~y2B*ZBpB--_^0H+APu`R5$o?|^^0ty{}) zJUXsxW*2c?b?hY+z2|`$ryO5xI~Kq_PSvHfod;uaTAb7MC5~S+JbQ`6cS&C>am8^h z?j(*&YcHMYX?1p7S0w&$#k^#9{9&}d6^T)~Oyj>y|6Zi{u87_v_Cre-cI_X-F$sM z@mv=xeybv*d+}R!|LWd%)IPtf=q&MCY3#`}cP(<@5R0U=H4Hj^S#nI-wR)x$e9(S_mOUK&iA5So%VLx)U35}-)U>7&CHqv?di0$ z(_Uunjr&d;JMC}$7imYQeYFR$Ip2$Wk#=>?_oDrrHZ|vak&d-xD=){|(khMdrM>6> zImmdI=X=FuKYTt7?HL7vB{dKV#!3nA!M&$8`FbP7l7k z6voeJel&50E^HY&-^+z7&Hf-RJ;l zGlutLct3{sV{0cG3%+dYP+!D2`-R4Tx%$ZQ(~DjcjI7zRM*CMh@2nw4&i`_)6GqPe za;*_Y&i`_)5t{SAx>z5SZhkq(f60#iVt3B}n%Wu{R`>02T$s)$FnjQsvw-aLzwi@b zUUtsz3Ga8^!F=4&+PXOhOlN3m-JJ8Zbe7hjA8npJfitwsx;W=1=`1bhd@E<|*qp6} zj_g_wZ#`SfrJFc@peA-HgY&>R6RgsG9#{tFfpLb_%l3I-GdqY4lm21swJv^j>J!bc zPW?;_5Z~o|Q{;RwzUwRYH(iu(fOjM3gI(nMM(csmZEVg5bG7ey3!P4S9A~b5(fo3* zbwBU(!d!h3%`?%wV$KV5@yTU^G{ID+cYZp(o`W?rfkKnm;ZJ#c9s$1>b2~Tw!&qU7k zxESY$b)o-7+NqoRV(fY1)w*cUPWh;t?b`_-bz9%QuaxZ}$%qyJ6+o-Mx2^}Xi#ZsqUd+}$EmU4_hAdPmA_paqw@nz15lT&6!v(zd^^} zBies3a=zF(_z*^8?pQopEVeHeCuzr@UM6vpZO#{S>31<%kI8yW)-NwvkIm;{o5F?A zjikfJ;?H9HAZ%^xL)&`3SStqb5=n-R0r6$_53l{9xAc;vep>BSbUl5JTlkXXSDC-r4(Nl*{k*QIFHO_Tc=G@ z!7>qFx>#)4=2T*AW3hCr10DJvoGZT=oA_lGTXxaUBXjv!(BApTXDlYFlb9%QPB^~4 z6Q7J&`>o0+R=Cp`#7^b&0=1`EPj4Ul+0G~CLdKLu$Bxw=ZH*nPTNjQUOYY)4vNi1G z;OvwBS_1ajRS1iu!QjGgEnx_s>URjG`@o$kBR1uu20 zeT^>E-nYJ;-?ps_eLm-pb%8Y^{LoEZF!ubVA1`)Eoj=w^JW(t@?9zyvGh^N*5+C-t zoj>N%&mu7_vG_1^R*>O`_H$UfczVT$Mf9EvdH$Gdes=Xuhx6~3{eRQe9mno<_`RL< zO*{SKf}TI-(jOx2-HE>NW&gj^{^;a;JL!*hYhULR57y$1+jIJ!xX>s6#~V78A0PVo zPkV$;%fHzZn3!#!5s zkzRr3K%y^r#M`f2aS8`oTJnx~`QO{Gp~|xIXtz?U*eZqYQGK@X|F;td1j|mhyl}wV zzx+k=1E|T z7ndB|;HRFHGcnz2+5!E5?>X-Em1pp*uCIdr+@Bac?)8^%@&|I)zB6pe^Y08_@(bvb z&^~WHKkKjG`r54Bq?<^82z?FOkNf}k*I%3Uf`3D98f`I@aq_q~seGcx-(b-$X+5pd zCU0u_;U3#3Ji{;9Q-ri(813@S3+MOfA!#%Bd;3#Zn~B8I&ETE#eZ_fm)&^NdqPd*9~gyU$wd^Td}XoSw4xXfID_|CRBfy~mgBotR`bHC$;mzt_uZ zo_W2Mmvwsb-g?Ul*7vrW>%ECJ^+T-YBoF+T_RYb`wC^gpFKC`lN8vNk%SgUp3`?LX*zkjs9)x0Rf z$~)OR&^#p5YARrz`H0s)!?!H(q@JrEZ8iJ0kE%IJIa!ob{_xyWw^L5YJK)q)DZb!Q z_%n&R92pX5t{+F;mify<-ak8)mEa3Mlj^T&+;>~(=(XXxEdGT2uM0HCdHUDn$6HND zueF*B=+{*VQ<~rX+0;-b<*wknj($?KchoYg>{;@pvXEl%HyN`U*GA;>OJ^HV= zg>on(gv^dwmQr@$=eLFyQO0QcW(nzyl$q*Fp-f-dEXq^+egHk5m|T;vY~J2HA+ zX~8$*tvw~k=swG8dKbA#{iC-z%b(cnoqMW(k0kO}yC#`-9Z9efGb@6TX(ndWf_q}+l z^g(a$V2hR9u>X%Oc@0Z5$_kM6R~Hp*o4T-ITbytB;H!Ki22UP;)!=yOaME{=95HxD z@`~&oi$-sovT*daee_oi{4oTX{|+)gka`Xw@5G{BW$P0YgAH+s!60;>cVL758}{&w zPY%(yQyE9T%ddfPVZUc!u;(g`2VZ48xO&Mv@4(;{q|=c5Id5&9wT^o~<-Z@i_0?JT zz4g;sX{)TpWJeOC=$N&WBg zCI+v{2;^)UKVt9*%X(S=-@u~WAC7ve#pxkaf6JkjDlxE z$VxeKawEL89$9hoebDClKaijJ{n4WSWy4dE+2uB$Cx`ibzj)m6`oQvcuMRYe-<#Bq z)XUBH$0&DIFKf@VD^88`)Ao%Mt>&P3d|6^yBV)s(4UQ_`C@jI_gcYISQ{Ih1av-^%X@4VV>kU zeXDr$$P}x275yUK9}e#yhWCHZ7%bke@Wcm85>kSj;r;3G{wwf4vQb`lq9w1$=KY@3 z{bv#0_t$Jj7LUUJBZo4s*kugxc!CS!{bfg|&{vfGDf884WI!^%dFRNWg$ztfObGT% zw@QC=QZg{m$iVBi4D7QK8#W>XJCT7bWWc*{;kMTnE$mbV9zq7<7G`aGby3!~S8N&B z*PD4b-pWfT>RFZ^mk=!Vrw5NhA7$L#;7xBB_VDeW4EORs-{bIq6MDo(PeK&`C!Cf4 zv%mpk?YZzMeZSA!w_zjvA4i)i|FO)`8$IdmYyd@5A)%_`3=l?gqpZ)MTG_2?j39{=iz%pX4Hk9zoU5&B4Y&ioeh-yG(_ z+02Ktm=|XF+7FnL2kQB^i{x5*W(O=h4_d3r& zBhR~#m0GU1Q06DdbE9`)O(XLBKJvT^d0tzTQnm$oTg7u*khN7Vd2U3Wi;?F8$a9l7 z&d75Ca;ULUWr~OLk>`Wz7j#*#_}-S|9z7C_95*7ztA-|&IeGW=qy>F}gtA8D`0pum z9P{-wu8QVif$>r@aS@@tQ`KF z!dzx_+huKKsUsP3&ykuFjef}YIQroc&1=Yz^utxQet4{Zpt<4MTSJ;VrRz5F4WpPB zXOP}X`I5B?y*khb`+Kb5G%KlWB>Lc|o}`*bk)bSP zsG7MZU}T7K2O0V;bD<+c&)72b3Nn<*I9iSjO*b;MXxoT|i#nB|9~&7OwQcyqQQLy9 zxzNj8=r8I~w$1Mk)?5ByIdq6;P{YB8rkiy=8LBXpA+WKG|`XK*D z%g>0O*2~gKo*J&U_eb=>mWSiOd0uc{Jo8x(=Cz*8Z@rl3eBitUa9&*Lp$(qUAHjMb zGafXvM&LU=<;f$7R`XzR$?v@JrSBpezX4n1@e{^7;I*3TuC(&TgYPu&X%6y92W0$y z3;iJk{UaFtk6g1p&{Q|j%KNBbQs^}Dbdq^u;&7{J5%bC$-avB#bKEB%O``n%<)fIR zr^AEOJp)$fGe3`Hex89|dmVG`BFYFZ_m_Pe{rtgHfAA^V+pIg#mK!LefjQjd2M)84 ze-HH$&N^u8pNG()4zt$qd&++o`N%{Mb-vH3hcL)Q^tc-MDUtq2B9F&3|A1ZU{K>&x zV8dC7DZ$y`t~t!ZWBK)>UIn!Ku7uR!Sg?H>dr238^*4eOH+Zc3a?q{OE&k&!-r13y z7!1S*a_&m>1c#-eyYrsLE(=~BVkI@~=Ucr?`7SYq!7VZs`jj zn0~NICwh7}RMGzMkZJ!k@)}F~&tu%RX#e{X(}S6eCwfoquQiz=U?R1@XL;YUVQT+H z-`N(w;5%Kn|Ds{rybFeHQ+aCtgRC(fVmw4|*t?SU|1Nl8Q&juU_r?cT`aHo$?Dk(t z`|FwL_FqZ+OHY_g`xjZgBkixY@uAO3cRnEbvGv+4k0+wjw(7IDBj?kQ_uG*BTao`; z&}XNj&rU&~4X*$f}z+e2f{I|5*1kvZ}GO0lutZUMr;B!qlW-4*l)$NKfih!d&O@ zN4_Va!2&;tFTTZ?TMYIPUzCD3#1|SzTlr#OLndQPclqKwl1rN}R8A{jBt-b)kq-D` z<+<_29`Qws$J=n1EtiHbXczUtAK#o5YUPUo5xz*ohL=HqtG_g6uUei#ANhh$(NDe6 zM}7455c=s9eO-Yp5AlX&+1+P8$us@6-k_2SYlhKe#|D^C&pMwiCw|N(3Zc{nxpZ(yc1AI>$ayuEh9nSjH{3yA7 z$Qu`&>}!ih^-Q$f3XcwDeZbPV>G4GR=X};%k1#IA)?1_2Ta!QgdaK$qw%&Sy)?2YV zIP0wsk01UyuD6ag>#Z+nt(7&`AF$T?nbupWe@E-BT5FYTYd!XoS#O2s&u6{W&G)hO z)(g1a8YKe{F~?l|_14b$|1+((@(mrWw?@lBY`rzS-s;TB7iGN_IghQk{@JO|eZ4hG zjwd6>I~ILj>#ZHj^Et1#@(mrWw?@nH+16X(@7>ZlzjW)Z$j#fLJch_5;@B31$w|2nC=e*uZ z9-XbXM)T7JTW_U5&SAaP@Y^N1-r7<7pX+)nd33hk8r}Z?Bi36TS?%6>YlJs0srA+< zzR-H!Ij^^pM`!D;o$$pMx85pw?*4kK>9a3%y|sh>Ip_6O^5|^6HM)N~U2pYzv3vHw z?%5N&M=$IjKI|X~*hBo-JrnVZjw}5LJFe`We=v5>UN!rN<0l~7XR0Uk7Pd}jzM950 zKF`g+wtZX=VE3pP6!vc&$NKf(ep2*H*-+$1Dm&i`Y z-4wf1Q!@6`Bg64o{R(yz{DsT- zPs;y=_3_fXGqKZQSFB?FeSWG3n^;`%;j}=G+ukW3&Un`096!#7lB^t+K4CZCoI^|mz zq)z{dt%lY|{<&*>q20#6BH7-jX)~n~M5PHkEL{4+`5zQwIef#ik-#=xOYu zIoMQ=^8N;Ig7Noq`mwEzbP#s1ZuTL4Cwp}PeJ2}OBL0leVgpMm8dR2xeO5NGCG_2F z`fUt9W2?pvCVS^DPoLli*ji+xnn%B-^ZvYKzp;Y_=r7s84q(qzf61;rRQ(k7u{11~!@gQvT|%gV;q6(FTW!L%4~}qTsI}8v8F(Fc6etd-+c|g5Ae&7%rj<{evr^RD4Sae zyi)=1OvLskJL(Wy=4DT9!p5G7t!^wlq<3Un6Ay{!EO}kWh;2|@& z!$T@ZJahn`&V$JNA^36xJajk#TY8k8?iQPerouz|&uyQ(#pa=@@Q|^s!$(7neSKgX znOB<_J1TPT@Q{4a+3BG+liH*qWvUJ*Kj@9rVy?~m~NR1DN; zt5?lP=JbSRJZapYfgefJU+}zqpyZR} zM@Oh<9QY|dv+tl297Jw6KHUG*zdf9BYV~mD=QOJ+ahR2tqxcu^;F@~;_UgTTYZTk! z_)97OXYr3Mz%Q{8c|SG<-#PRG`Rwc5dif%~k6+=N$YdFF?fdx4R&#w4zqIc&t}nta zzG1MQx0;{DAGQ(S!(*d;p$}#-KBZWB55y(b{9SLW^bY)DPvGzLG5jbydoA<(TlmnN zOi2u`gCCQVIq#nNX$JmSetR4lO4-wjtg=@b+olzH%I4!E5cZ|Yuzh^{m;V$$!T>tO zzu{-=ZZ|)&GlTaN@KKS^V;cQ+@9CCtTlm%zk5!XG8_J(*abj{%x8R z#aQ%X`1;2hi)gb%#v=JNzKh=Y9(;XxT~Ql<*I(KE{2KiH`?aqB&})BX^Yd%)v;3>x zqaLqYgZPFX#%J%h=pAZ9^_Q2reuR(q3lIP4soQNopNXD6^_h$xf3|GWlOfvl0ep^+ z;)hy4L;mK(8Q~vs6dq8_K^A?lcn6I~S&U27j8SjFKPS;iv(QU4?ldzeSFktVL~kGC z*W>VlqmyV{i{^v<=qCpmSHf`}VP24a8O;l6@OTP+bPfHlx{mUs)gu`W;((er;i0B~off+0rnIu-scAJ$HV?>Wy8vBgKQg)t zKkOD{^aEu3*eSk+?-CDK^wEL88*;~_okN3}@W26lL=K`iiU*{-PS$*64KO@#D8&li zjbGN?UGRYBqwl~2Ey$^Oz)Svz;k(~6A8m@_0k=Ga%{xAfbVykz=k2>xD*4?4@j?W6*H%6@zFwLSp4RUMeqN!H&l^o<$a&=XCcq*l&>>9xX_bP)0_C7@?{BS z)#6jVhp%-A-#+;$A7zYO2yZA};)FN3W+8H~xHpY$NB?|sC=1_7jd2OrC6pN+zs_p@ zV`@fCGy2I%du*!#vz$c#y%9a>E#&kVSScS~Pz;0kS?iRCe=;rPBQEa{W1Ge_`TZPZ zehTyQ05h(2#LMzi)tLMdynOuUw;5fjfwDgM<*gz4CCdM>G0|U>!{-X@Tl}0@o1~wjXx=TG3+dQ_jmBE7go9( zU;4?6FH`VED!`v!{Q4~KYfO~xq;YV57ySC&F8I}qo$#vgTZ<>o@T>en)jm#NsP2Z} ze*U+iM#d)b+DOJP#bgd){3?b&Zesj;5FS|okGz5Yp;$zRM>fMFZzy(#J~n&;|A4fnPp`PZq!@N8lOx>iggk-*vt+A7h~W;!nUI>W4Rq2ErFU_+nsNf5!J( z@-`zA#K{;QFS6%Bw_HdcJF2l2UH=gL9$t%)J|n&f$GmJ}Z2LW9qUJ>DWf}*+2cH}p zE?yg0c7(DUuf7faY@qpuqlp7+M!_cy#1N={6$$-<`nJd5lLGX!4UA=y$C>bn_*Y|@ z^t8v}lb`Rr*63=F!zb@ zW}?>kcEC&Lv^K;0&)7QBCUm63==|<=y=QD4X%l1E7rr(_AGX?4&bl_UR@mC~A#PheG!__JOt;o%OkaMXYco-OHq4&$yS+BU`|i14YcuXXbI<*s!`h75 zBjbPU+KfGpe1^4|NV{D&Yco-N{bTt0$K6?*;k{o)t<8Mbch%sj*qCGs`qh_ZZN}C~ zKEv8fgcrW_Yco+ip!w)K-C3LAJ-05SwHY@LxOJH?cx@)qSC{?Tj6JrU5O0hhpf#+`rQZLTo!9HED#Myen5NVmB;d zUx`NSVl%NdZqYUNvRTPiX|1kVo1V=Y-3MIH>1&mKYXEEUU%}RhpNt3F+W%=>y0^D4 z@WeHw3*TS5w~_VjzhU2r^hMU%%%9$R2D)`<&#If_(8d3Bsjp0Stwz?TU-u;iwU&F$K|;PcoU#?r2Tt!r%Gx?Zm9r114pU9&fV?mwvO zKH=-{>pCNR{dc+^6uy3dYqeDk`@c1@uB`Twf5hEZT=2!XxZv-In|+GkgZ#eD??!$Z z{9ds>U;Iw`hK;8_GCOrY;hs`xU#q9nJsbf4BxS(C)Dr> zly9i*1vNYZRsEGV`~juzg-RRVfKu;Vr43&|sdKi{h9{uZH%V#uVS8Hf&9#YK&#@|N zQ=xg}t@K=|(sz=#((|B7&nIuC7eJLRByXkfg(`hNc`N+@ROzKw<-Vo9P}MZ+Oi6Fj zKO*1$(5J{x>5WjOH<6#xo1sc?AwQ+JLY01j{FHtPs`NJUJ)IQVH_vLWDzwfNUdg@v zg-w$fvu zN?-4(Oj3G0ROtyG-t$-N+e#f?u+C(U=H9(LdoA>So>BS%sM1UIY*NLJ1;sa4y=|RI zx{iA<@!vS;HczE^VTUiJV#fo;H%H6HzEk9X+B!21UfF`}?Tg@(BKAam7~g@~}M4wN){UePTP}wb!j)r8i@1(OmEisP^x?5vn;_ zd-ATZ(#9Rc9Fde#BMbNf0qJmdMWt3D9_pW#O&t1_ZM2ntDd>_ObYY%_8wI?zt7x|%iQoh zbLSq*s(hZgb5FcgS*o-Ts&tZ7d7sj0P^J4=m1C98fGRzR_Y$C)&}8Tc-cx!cROxKq zQ+fFaq<>G4pdC-7c6Gyv@joyvPkPlGC*%X>=CfhwKHdrHrRDt#yK4S>#reg!(8 z_mo}$Rl1P(l)e|L^!>c2^aD_(ms*wS9`+7muZjxx9Vy{gz>j?@nG@r*uUe`0{T-rt z5&qie>Cq50b8BjOAwCBh7tXc4w)={2K3kjYTN)p#8fl%0{?>h`dxrLnL2f2kXS5gb ze=t6&AJxyP@qrHm@UQAP3SY+O=%-!u>vQzeF8cK`rRmqll%`*oC{4dEQJQ|eMrr!> z8l~yi*NgEh#$H{*Pj+kkS4!ElrD+nj>u1nOhNk&$i00E0_%tYfgx|!EkBT3C_(#)r zNv9|6oyok5UA43kUMz+eli1;za=L-`(VkKtVAtPb z_hEE7+vAIG*8j(7U)61+&$>bH`SD#Sq)xTev4A?&P{+}vF>F_6k={uiUk_NNH}WmX zl=XU_)S%jG`^Tx{zRtZhw1fU1%k?qZU?XiH9y0nBc^LhbJWLsUXCZk?2gz2Md}p@O zd}j-Lbl;6W?6jYFs*tvj-cdkX)Lc)!NY{*qDm}rftYPeFNoKz@#^0i4jPdZ-Xk@8o zE_FwicHXHz_mrx>lbTZ58xo&)>NwA?qxsGf>m~D@eD^cxtN4!JFob>E8sNJ}(d&gh zvgrR{ia)po9asJq;*qT9gXcyhm7IN*#0UDW6`!Qua0l zT(#({Uo5VQU?_Om$OF6_!CLUMxd%UAY+NNQ<=`q|taHIt4hFdtaa9sn$c#5_+V||Z z%E5|V;i~U2j|x{6T8u@=c40ge*}l*iE6E!k>kOtc{6QMt5a!yKX*COHZSiw&8Z-r( zYgN_?Bh7&dBjv$IN;CGAD9zYcsPsIj((@UQlwJT;x)9u=^u17}@24!KAAl;oG>o}w zg_qV^m9@f4>&RQ_A3>FVioBKH2vvF$c`Lmcs`M7}R(dN`=@-ab>6f5NZ)?S0CBjl~ zldrJUJLISIeyGxi$xrDcP^BBmPwC@OrB9Hb(x;$GpSCKiEMG;Ta8{@7`QB3X>eJ#jmxU6FfPj-&HV+ue=W4o#$;eLgUP^X2D5?D3}yqPRar?D zRbVuO&$in5>;?Y6j`zW9+2f$#wJIy6q6(}gY?l2E?wtx_xM=K_J(2%TdnyMru9v*^ z#qy%~a09sNP#Ss}eEE6vq;Q?e&&01>n5$O&bP@Se7;gg@?Y^kG2neR1~i!PPTo5U1~i!OUK{h>AI5xl@y_-KtjfEC z6i)mVW1(=O`reHb{oTNc1{bE;xRAa#xRJg$xRJWMaiTDyu%a6$9_#=oHp06d;lvsn zCoXny;%CH#!hvIG!`;+#3~jobdUk~ibHH`cxUefMy3c1d3y%sTzDwH*BNiu|8%7L( zfs$Pq@gNv+0{3RZW5S5zxqc4b5{9op&g$Z=N(U#dQJV5|_`i<6$x)iVS*$eQyjW?z zSvc`k#$xf=8rq`{9xI_e%sMq`vrervGHLMRc&O47EcCZ9UYrD8+yGw8M&_hH8LXIR zRW2mGb1pOrJO221DrbgReLBR+QTTC(H;f;H;K!rj7Y9E!V7H3Kk2T=6Brs%>+KB!Y zUoP1`c!lVH1jA$7A5d#bC#e>92Faj0eGtb9m=nVkd)MJxd#F%ZavB@oB%wEEp)4kZ? z%sp&#i?GMtg+0#L>ae}N&-^euDKQuaX1p7`m=1321!ipIx-k*EA7lIfBJaOJhWa1> z*`EkM)-SYnPx`~FH!GfQGI)NxZKtbeo_XEpkJ#zzjGZpALAJJXo@pjFq?|TaT|Ut@ zm|xd_)HOCLU7yx9?V{^Xb&ZWm*B|K`8|%A2{z4di(=FKb5U%X zcZy;Y{R`+FV96;7eS^~zGE6%(fR~Qbr)rCTgtz|_d9Ml-PpAs4;P=pks-)tZ4c1_O zG+5&uUshGs$_Z6fkMR4!gndi7FTJi9Dx9#AdMmvOs`NYQSyh2Y$!qn5s%-L-K2~Mx z{;%erF(Mt)ZO9ChR-SJJR_rbaLtZ@is>A*|wV`u!H_jvoS;; zPi2YH8Bjex$W!^B;EUR|=x=AN5OEdF8wQZ4@It`G15?RM>1j58$R#hO=Rk!g^2kf+ zxlpC=BtNC+L6x3Q8DNjvtvyQXcK2YS3!j@O2}X7wNRzk zk-ySEf-3zKc`CgTs`MtF`xY63RO!P!cN6po^qbH|=y>RH4|`g8N((0hD!&bVkl$1Me#fu&1U4`1 zR^N;Bl->=W5-&?UW7xLKI(Ab9=W$5Zjxon*Pl#-n#8wq6f5Uyrfv`Z-JM zMsGjv_2a*OJkyV7`|(ac-t8Bh?&%kl4Sw|7>?>ilm$eqwtAE10JSs6zSskz{N2NkL zkU2f~H1mse#I3d-{~UBwZ=Nsaz5Y;Sak|pT;&i2>WU&Rk{9$yDQCIR`74HmzB9qgV zMkc2#jZ7|58kt`v`<*6Kn>{Z8mDyJ*$gDRcmsa&LV z8dT{%p32oqBde>Gwq>;%S76-olBld z&w(nPN1jR}v#XUxW~-H+2UU80SZ1q{*;-^)@@mRUr@TQ{({^-&QOI6(G36n9)hj7S z=~YmrODIR_wNRzkQI68c?rNox-D;&bLY3acbI5M>W+<{-y(KKW!eiew?boApf}uT2 zZ-Q#RmEQcYY;?qdy*ki0jx#g+yh6Nk#nvErEV*nExJ>`;V$TlQ;0PErbDd%N)XT8*+s|=yvYHT~*2i`#Q2Rd(r_tKfO-(P5Lt+MCq?abHjGdJWiHw;DR z*$&SRWzO0jZ&glG8abV$G;`t#rIFDUN;4lmtu*raw9bNW3;DjjOM{sHx#{w^A(U|5B54wu!3JZoMzkV zO0d-(0RI%A_X(rz3s|T21w5zrRY9ZM1Re_bZ{g8r}D+O~aUOPZ8! zEJ1y%c$6VwQbh(tJnL(Hn^?E_DWt(`Pr1Gb^2VF4X!}#4vxwPM;)X5 zBEFM(#n|8)%y(L6x@~_$7ZG+k1UCHjS+G;IU*qUWZS8F{sF&8Ybhd3SV{1^?V5gak ztvNq&5%WGA9s<)hcEYuk_S6J6VWe1Ft6->+v*>)Q6?`&8GqAKwXG+kSk9 zxE4l|zodNl9E?)+#6}%%jP-!Dowd#?0eTk@xL_p#@qK8J008HE4I(0Fw!0{QUiXU76-&3IH*99K~_sQBi@-lU6pu0M1o|0}%;s&ol$r}SE= z((Cjd7^-;ngnf5X2Krb{74M>t)x5?#O1}YBx|Vm8-VIgy*R+e$Z$p)ShrFy_6^ZCu z*yJk{(YI>iJ(Z)CMn4;^H2T>LrP0r3D2;wrpfvhffzs$_L8UXHN{{g1^Vd7HZyj|* zx2g${FS=FDRPs|A-E6ed=w>sNMmL+GG`d-V(&%OdN~47=4W+;t5R-iQcSb@^$V?m`iLY3Y`-b!zVD!qmK zUxRLi-T-}pGT-65zQJ`BWln^?Mp;U~0adz|vXtHpRr=SIrS#iSrQabhrT0UXK1^9k zAAu^}$o-q3$D!YZp5Q&OR?R6-Wd^uPc*~8o3cy}9r#+RzTZXOmfBsYA?%-IZNE?_jU>MpyTg z{s?+LGA$hSDp>47$hI4k{gtVoxAb1sPcmy=c$pV2yBl1VC|M0$T-g^Un}n?_Q8HWn z`Hlg?XoF3A_9z`>XwTAtP=nD}%XBc>@GE`ehT1l>3T$P>9fevim&xseC7&M?aO_%-DbeP;_nC&5T`a8dT|A z-a{9c?_{acdAz6eT&U7_a{n%TxS}xGT{b4W%f@75<9PO7o+T~+Nn>k!0IKxTR{u%I zW+op>`BFw>vNbj)E42M5g~_7ZZ{Jc+XkWb5e71Jkx6~W9e|7WQ_hn;eYU?(q3-7CVqGD+7yG={0+|<*-9+Sb>TJP2I=Rr?+LG=uN&XbW5`bg zuhG68;WJ}%qfL$fXHt87c8`tE?&$!Z$>&q`D*+QY{+??(z-MkuC|-M$c6Drc4mO+F z0XAD`W3z?yx3HOT8G2}*@r$I7gyEFmPVy606E>rd3^pq;*vub%)?l;5;0%M!!uwP? z{*jJ-OZj$W%Tk?W!^#JTX%8at#$xPWvi*T2D`gY=5Z>*UZOg%9_<3q?-IiD7|JZ{) z&3dx8SugfD^Rd?%=g~gs4@Kj#0`N~H&WwE=bLnsG_orAht#dW9Z?kZjxyII&3obL) z!fW8Nnb@{W8qAi9ZEFtrOtvhoiyZ^IIq_zT@qs)BCi@5O{|&gzv1Q>yx^{()^&bN3 zXMm|XiaDD(E{xNvnHRJ1O+9us^8@Q{+W%^uC|Km5M8P6YiGoGSM8P8Kq5p`DW`%8| zskU+0b+(VB_GeBWjxR9$c8vX;AM^ahOS02GHVI1qKehr2A3wGo3a>oo$$!bL-Asb^ z(f?59fycH(nFq|;P9{`i$p|QXz6&{i3_jnL4MjG0F%LY3Z0@=q%DAdt4v*MXzPyb2y5cPXq25`jh?N- z{;y|WgX-BgpnA3z+8eqX+8-KaA6Tq+(K#3E-FT?p^+EM+5>)S|LG^AQ=#|h6=n&|j zu+6RxJHRX00bXHk{uSDKrt(D3oT)sgLY3zT{8rZph2QFq%U6*-z~Q-QeyTeWw%NJ)>g%2GR1v;>vQ3r1 zTeSgfZ17fX2fU@{9DKCR=*gbae}UH0p0X3y(w?#t*V3M{6W7w7(RSh@eC%YKD$zUe zUWwj;_e%5*yjP-k;Jp(1K=$Gi`XJg~tUMgsR2Zju4Nfamo)-2@aNTnRZ7NrJ2e7#+@2ODbJq@b7bD_$64pe#PL6tYWY3$$d zX6|s(^PpEl=XY*TR@~Qi=JjlqSB%Y9<*kI8HifFZ5~#{s3srgRpehd@&4x#1SI$;> z8=)$16Ld6mGxS>MmR8%;bkmPLN~amxv-DP|=H0{Ko=x%W?yK%TMrGT{BnfI>fbXQi-hTm)LW*_5e=CXHfaR&m8=7-b=$~ zK4A*)al2K08K2$!AA4lzeuKfs)S-=Fwr~vx9kb82RjA9*yJ?;GXiB3RND{pvof` zsyyaEl}8>_dCY|>k2_s?%;TQ&m=9GR3!uuQ5UM=xg({Exp~~X{sPb4E&SM1ax!XdA zkFr_ah2AF{-d*Sl+pF5zWQOWL>ONHeQFmkSqwdDuN8M$ES)pgBo3Rm6H)HdoZpP+E z-HZ*7x)~cDb(76%Eb$^be_%D^lKNWwkOV%m=sWde7`qVfj~=n9mv7t;sCzQ9ku{lp zHQ}2a^q)GuB?tYdE*^^hQ|E)C|8(TXX6#NG{4X3cNH_!@^{^Md^c(HHs{G+YR z8*JN^!DpHFy4MSB+i3%s=m40==LQC zr0<9c`@kuR)GxGFH2>jc|>zL($I0zLf;7qVG-o9N(+>xoA8k>``SE z-yHp%>{lP)|Mvgc{lC#;-Jo)3Fc;0_=U^byj+AR+&nVZlBjq}HXoM?XY@y5Fub=~e zKgEmH(k`-9`KXuMHZ`3#1817}ur-vQPhEtG;D?A!&9Q04nQyYN3Hr*9Q~xkNnhw6X z9h@@_ymK44=T`P|z9j?>GW$47pH|;a1t0C@ovwWS+TbI6{hpoZ!bjkr4{U6dYxlkO z;mCFHkuZ>hkH9{;jFHl@wZ}#-IB6}{!bsl&BPmAg?@70b5yRh4*hu!OaSjeLK7Flw zZ_NBgnEyCV{jBw9C*to~wA8?PsKj0pNf3SCj@lRUtN1WsGa6)+h4F?0|*%(MM zlrynMZDGuB^7biT0|t^jt^or{#@B#>B;#wqK$7t_8UxbwKNQ|uqcH#(U!ySq8D9f$ z%GPXrTSq|cwTv|y1F}hL{bCFh3{-bL6byvFV&xhzP~C)945amUVW2AdO&I8}g z)zOWC6gMRdlmrHbVlA(WjtB zpVE7+cDX{mTZQkU-hB>cvLE|;r3Ec8F+5f4=! z=%%^K1Kl)Nd7zu-Di3tiT;+joYV=2R)7;^tGoe>QN3`1IvXyTDy-xW~g_`e&n(v1y zU-Z&!<%?dLt$fi-jh={Jnyq~2L6z@(=xFEy=(W(ouw5=mWfT)br7~7RRmLi)%0L%Q zGHnMnZ3k5u=%PusE}En=Hj-8uo1iKKT{KB$po=D52i*!C2Yn%Imou?`Rp{p6C;2=U zf}iB`SO|VHK9ArhosWp9Mos~!Ryo0ZPYV>hW z=_Y82X@7Xrv_I6eKh(58)U-d;w0~RPRZNrYPld|kJb74oIk;=Nsh_uWDKuAkTkxjp z7Y|kad{EWzJa}92Q?fs0oA!ezmH$+z@}CA({<+W#z~hRulKm;kv^BhF+8S!w8fw}a zdJ%YC_NP&%J$sanG_+^wSD~8Y>}9`f5h z0!lvHM?%R*euW(<5!blzg?kSI{P~|ZVsyuR`%3}^xdE`Nr z$6ToLxYL!#Jnkuv`B3Gt0IEC+p~~Z4sPecUsyrTmDvzb%JVw!`__~MoC0WhJp2v3? zd)^2rI*H=69Q&NHL!q}Qc5N&=wQO+4c169%qPNHfXKZxTd#r6&F?Kk;Lp_Zhka`-s z9`!VKJ?d%fbkx(>>8NM4%`FW))&w4Vn6XWKAihWjj}_C0>dSKZi}gu1cx<@OH|`<6 zNq8&)JQjq1veCsfS7)P()y6~7#cF*}bg|kbDD|sNgYr#g{|UakHiI{zB=rsBt+VXUVdJe=KeX*pQ)G|wg3IE;XFb4aJ;7_e zut)hq??uH{1#b%DE&sA!WjY6}0bPF)IH?{y)rhWt3>|;9(qO6&(D4tT+aE@!m%XWx zaeG!$uV5nB>Nx3p6iea9adHgxg{EqXneono#$KpG>re3=KZ(UPAUH_M^sh_TYt8411>$i1H zeRchxTpPbY=Gw#FUgf2B>{Jq1Z1oT;@6CsGPGov{Jy@(7S*!<(RU?b_U@`DpWj$D| z8u_kA{;QGidgQ;lkN$@u-}T6UHS%3ge^(=`^~h>9vRaR;0aI0@E9@X{ zWIAn9jlM9QHmOEmm`9k2Ty76?{q4PzN%eJ5+N2uYcsgxT{S7E>Qauq$o5%+CJ8z%hL@R7pddk}uds1KQ zNqw;=^<^K%zQL1meOW8@1nX$8D)t3Z9o28DqxwyCRKKZ?>NnL<{iZsq-&9BSo9d{3 zQytZBs-yZ%byUBpj_NnnQT?Vms^7M=H{oiPF~_Q0tupeUDq}8GW!wo>8S|hjV?I=6 zEP$$vLa55P7pgMuhpLPRpekdj#(AIFUv81|{gEBh@)T70ZiFh|O;F{#8LE7@K$Y)S zsPcUQs(fF9D&KA7d%AUR!s*IuzZH&kISf@^N1)295vsh7LzUMFsPZ}mRbHnp_6_uz zedI=&_Os($vY~o!j2(w^Jyh?Fhw8lvZTBYhn|AP2`b|4P_3Q&s(++L-Cak}S|6cM` z)_)VaP5*`WC5+~IXJ5ieUl4yx1ot?55?*XRc@vv~%F~{NEyN#Jv7hCI=9xDwZ|Ov^ zlJ+E287&FnJqgd3f8Jz|3gxRk2@fWO_ar=5UV4*#DJI%`61JpU%_blABs?2my~$n_ z6YV_-4?b#D9P|{MJqe?EOjst*U>Hy7pFrOQm&}1`PSn{@PlIKeSo1xIZaI`aD8e?S zUJaqGf4|<(I(Ng?N7n2AAXui5F?L|&@r3ac~~`<2$*74Kp8 zkb@5kz)`zcZ{ElJ!uq?3h1kOUQqF#}k26=kX;ByEO0Bt{Yu!^RtAw)FlF#l;t8y** z>>dH74R()&(gwS;p?u$NzHcqxx0~-<%XjSN`_}RuyZOFI9s%wtkEu}QF%7Cba-qs& z4pe#ML6rw>Ve+6YB6-Z?p7NLvRUQkV%A*jfJnn@mkNcs@;{mAZu{4~Abk(0TMyXHL z=fWmc=*C6phSCuoY;u>^H*PI`UXF}EItl)PH%6nI)X<)z(M@V-&(Y{6HSoe{bdwr* z!C-?lDD|u91Eqd78Bpq1GswXv{;)5My;tFZ6k&ic?+OFd!@Hx{qwaRwMpIyHG`;X= z`}_3;@TUCNrsKaR3@`%>u!eisB6iCbA)CYsvu9zi@-&-Yub~d|_qm2T$lqr>`v*RT z{=195&GMuWJCqXfO#?ISc?CW?##)bb_*&^H)bkqpUp_?F@LlpDiq3PE{^z@A>3_a^ zmdfP2XQ@oSTQOyaY4b+fJkTqQ4;uL{=|H1+SNh;6-jzO>L%kjOle`7hhSWc(Hl+SR z`nZO&g7mT4I7lC>jf3>D+E|!Q`wu#@EBTwDcgEnuu6M47>Yee-sk|fIf>_j2;k%)X zwfP#)*?({p&q%k(;+dUys_(*fm#4u2e(G3Fn+Irf)o~Pclr9u)Q}VdtpR!VVMdF`E znf(ZRnSNB?JMmA>cPH3=H-mo4qOV`iI6DI}M(R~v?1=i8EiK~i=hgxlLPis8X7O=(i;p{2T z8rVVjestVlgvXA_P>mfAK{a;lfNJcBV+_&QF&V0{;~^+~J$eTez8)P1&t^hv-Xg2f;C_Fy89~2%RJrD|yj~)z#$4%T5I{aiC_xv+``@9|ZbiEz-^i4bNX@V>6 z=_a!uV;Xzix5hoyxZ<9Y;Z?<%MC*g{t?S6Plx$<5LipCt7!U^r3i55WtcM3Z$T8m+ z>;dKbf(cN*FPIGF`-16EzAxAp%J&5aK>5DlSD?e7L!n=VUIiTm&4OM7&4GRmIu?2Z z^y|=@pf^Il3BB2>oXi-oj`qp7F_7Z&6`!v-eZ}i5mP9cn^1s7pBv?!Psot6oRA=?6 z>a0FhozQmKOeX2UEPgQ62sp_mgRh`wRs3!vbSnnEbppypmE z*r4WqDA=Ip0Vvp@W~p=)Unr03Zu@0&-egst+PeE12MySfz1XUTEp(`;_XJe=o`Ndh z)3TxYDrP8;Ok%N=#|WtM7ztG#;HVkO0~|F&d4Qv4C=YPd4CMiia`qA&ZQ6|(FVk*N zy>~BE@7)j8dtj>3dT(j#UV@3{+wItt7od6$e3hu@z*p|Q1f4jqj^xXU^ZM!+%!lH^ z7R5-}`v`vC{HT2dRlc*2;Kk-m?IWldD)&Bu7n)DCkD$tM_7OZ^p4C2rW`7LVK7!}U zzuHGo@m2191kc9H!f{`*aom*#$EBAKfNFj}#JXhICY4-%7)*DF{pF7{muS6PdkD%m zFdP?^T>cz5PWuTuzJWEmwsG7}>B+QJkhS>|+h1juW0NA^$I%nkqKC*9F_QfEvENv< zt>ixRL$eP&`k~na5ZzC)O!uMtDVC|DJ?%`q&RlFKdge}zSz)ZUNAEshRqoOKrT8s? z+oEk?yX+Vw*}$AV-0`6>I=9vaO)Qp$Z3UfJHZWs@@j=mnW&1KVm^7&B+Xt#LGoUJS zP}pWx%^WQonCxv&qhoK+WX}rnlMT%5oiP$hezJj?y)ec=$ERsin zXOzcOC_1lfU?z`TC~YVkn8_m#syt{Pvp)vy6Uk#9_mszcsPb3Ty3* zdC->59vjL-dj?i8<{8@({hMOrH{q2qeoHa7rMtmzchcXB7?(oew+m@o0&g2%Kk3Pi z4JTxHHo@?$+lKS3ZNm{ym%xYep^}f?XR+Zdwrx1VWzjaAXxok2qaHjPZMzZv^-2!d zCr~z{d+7u1MR+fLpuGt9+ihEH+i|kN8qxToGatG`c1+Q;)bn=q_wK|L$rd9WQW(>{ zXI?(_PkDaL`m6*ns&K?m+7A4)LF*j0Ek}Fi&Eoy;+H$r~hU|b_;6K>`xA5(+u|1FK(@twV9QteHjN>UjnM2#hzu3#U1X?G?_LkpyW`P4 zc-QD@=$HGzn3KTPMPN+FPB@z9rKgSNd9&X=&+kND9K1RFSn9ZP`oysr9y*bF!+g@t zKJmjFQ^)<7bXPddoW;c$sk68W`U|5?-s@i$Z}$5OIyNbM{~iMmm4bs(Pfvc5wFI*l zMhK9SvOPEveu6qrahtwj1!SbB(6^kKENdi@N>z9ClN z!&^N;;#<=VuH^iMrkTAh_DzT@zYRajdVKyqVxPZyVx3NiVu$%y6g$j&&=;{$_4W4+ z{xkJFNjd%5d*LPOnNj2`%kXCeE2-~|To3RMU@wvQhQI#BKdfK(i~qZR?=OC|{=KdL zxL$KZ1K8{&_MDHg&&hZBrN0b>{$p5$Dy!GXBCSYjdra ze|4wz@_#4w3}vuJw_|*-kpJZJy)VZ3iB<4C6RoB$1$KSjchyDf4A>e%tl>0kO=Cw=&z#o>^~!P94qxk&nB&YrszVjN z(fi;FeRIx&GyZYeRKO}=erNu)YUitwVcD8{LB%Kf)?feH3cba*>b*le=l*8Z;b+HMuGcuG@^0gry#_;7KI_bD zeY`ok_g(666dfbfOY3X#4Ue*}rnTW`x2{~T@!6~e!^4U%Fl)i+kfRkl&bnG7^GXSG z=VJb6O=hdT7Q8#W793xmLb|Yc*YuNy)8aQeVZe+G7%9kQtL0gUpzuv4gSbJzx3-8IMMz>z$;ZB%7mY z^U6hm(7*oGq|iShE5$u0&ySm_(VL&%dh1jw~SH$&KUI$V-#}UEKL9QQmay9 zMR&%k3VW=27Q2EoRykvU>UmRb#`O@x;%ouJJu`UhUH}XZO_b*tH|g z&p6gPb}826zwB`<<)iSpRZrO)?Q!dIyNxt z@z!xn^;f-}x=MZ@pdQ)px8%LZe?R2+xBO1=jP!sh$g{p%bx^-(ZhsotP=8&A?5LmO z=-b|AOiM2R&0H(=pYvz~dmM~z3)SbB4XtC^9b9Wn+mRMW`-I1||3!V87@MB&86MMC zGo~eQu8lLMX&jTT*!DUS`}1dP0g}Gx`)`-ffIwN%WI+s$*9(hk9D|s#iR=Ud0$C9OCFz zzeHd9$k#jg0%P_%#=k%K`UGEKywvr_x<+5p^+~Stq<;+$H2=ttzJ(t4kD};d>qQv@ z%b=Pk?njsURLgnaIH8%x9dv2H%`Yk%utLRk!g-*47yf?HXz}PjFacUalSFZJP z_0%4r|DMn@G(fu4^}Rx)(4`zbN;=hB{G>~rM3>6w#TjV5h*^MdTF`MC(WRQ1Ukl*P z)jc_@fcgZfgX*#v`W(OA|37N`+I+%``Ks%G@YCE9=M@C&CqogSH!x#ZzV=~H8-v5 z@`%7%a8qg0yt98H+sd~Ax6*gl!0WeCE(0CzE2qP+3a+kP^y1*kMU~N&i{86Dh<{c6 zoq=_3S3tVHg?bzKY2=mD!IUu^WgKfcgg))ZRkU?v8ZiyBNnyeTv+rQ@aDJlJ^hZg_U1&lHnHd6 z(%;OP0RFsS;ZF+fxy9hl5tpk}{HYsh`7L@=7@g@YLMMI|?>qi;uJ2CH^p5J7<(tR=bxvC~6 zy$vR<0+Se<@CM_f-*~@=QUAB>hywAZ4Z|Y~TAA3?iZ)&=+IX$l%a^rYT4?93wz!k9 z$>cF1X%0=VomQpseK>%|D_6L~yl-_1r@cqoz@@j7qTo=`UX3yS7eQLhNlf;q!eqRuy)Xq~J6F7FjdzKNtpKS!*xz!O! zq1@|e<2}p020lc7ln-&CO7utdp*j#BQl4x+^r4j}{ZDPX(7Xv_4urJrp5YvwCBF{x z>-27lrL6-uyN5hyT5Y>C;f^q;jxuT5wrk))=zc03Nc<&JTnPSB5#}g36A!YnCYJhC zo6f`=lS-y!qDYRBko`pBvO>T5%ypetm zrOX*CUr&3nzPWbX8p=HsXG+OK((E7w$QD z=Hwk0?}jztN*#J{`s*O}e{ zb_|36C@&iuxap@nm1w2ZMcO3*vp==V>$i z2_IUUVZO3eos6;%206-Xd;7Q5&a%&tr46hDsrj5;4hOkQ3~S!ZB#oO5_my@!?R3qi|6e*e|6ru~Y*nYC zCGt1EX|-cEkUrCw!~d1eE0H_pPn@K=bV_%B#sAy!UUSbkG?R~Z4){nDjwnthUNVWe zl)gs^Z9I^#M`Njk3q$Zj-=sbwFkmrlA@LOPlQGycdn}c3VJvcJ`Fl>`A(Fu}VS(yc zT%wBlmX4=8Dy|#Iu_D&M1%$r{T@2+Q!i!y`Ss0XS@emsa#6Jp%BbEQl#}!~)m&re@ zb(K%%A7}ROEav~>AHo{pLUVX}F2d zFfiaDFyJR(z)z9epNV_$|Gw~!*T4YzjQh)H%y)$cR|yZm0O7&a;DKc0nLj-5#$(5^Y2wv^O@$pA z;@C9tT6_zQacm-vzpWlnrnb|2$ffd$94ucX{i<-RTVrvHsspSqM*4GU=lq!T+25Q^ z`u~>Ac;c`y(irZY00wVhe;DcJX4*M=SI;v@r}&8QBj`g3=3f{h&S0lqoI&MabB2u; zXVCvLkX3QEN;um}INRHBHt`~T>zTccK9s(kuQ1?sVQ@G%+%6U#mjI7T>$a6Jsnt@Q@Hc@XeIn2n#2AUFGxWd=2^TO&cW_ zeP>*-=L7t?YIoSznuAq=e^{7NOM3M!wO#bzp_D}rZ!n%R;$f}7`qe!Qk9@+y4xQ`& zYRiaAWS*m+MR~HZKy8rbutVP!c4#c3@Wt+rYY8VlyZ3dl!vTgk;SR0_?%;2RJJ>#y zC16+~7$#pz5o26!F3}V-qVGwVQHS224PMx71sgArB@-`ZP?n>SmGN+f zUpXtTQ+xlH$pgJFb6!sS<@t^Sy$8P!rx;|E{mtqRdYk^B^~{s`M_2l!l488lpAvD{eM-VR^(kqO2w}tSLqXr{u7ix@ zv+$s4T0_$&JW$-yQ3EcaclG=dEJ!4@)4iT_3lG$W z2u;7fad2NF4$S7fp?@eT=t~uED6;fEVEQeh=E|QM$`@?Cmw&Y-zF^x{`iW-cnR5j! zzlG^@XzbfL6MvBX6@O5>zZ*v=e~{kqC%xhdb~~bgJWHn~=hHbCSEzs=sEQ~o z{XT9(*Lk~DZDQU2A!V~58A0oY~rvror zb$0NUf#`b{dY=hrc+2DqXVYh0O}~zO6?K-MH3|O;^-_ER$@EP;4u<>*40#T};nKkay$3ko0-n^}Gtg5xqqpaS*ZX+> zz3)4of7SH$yyt^+OufRpV%C-3Rlo1&{hROi*RGs>wfBR!uJNw-{nxcCd?`oOrl_h& zc`b=@4gH24|ITLO-P9!BO%IcBMM8vkCvB?<)SLR% zg&~D|8+nUxN*tzs00XP*mCS!3}O;ZdJVpQqPVnI>0>CLejU zWpeS8u(p8b{r(P5rCWPH#+C|uLhlRnHt|e-O54eYc#GOP;w?6Q30q9;G77JYqOPNz zg}2@G(zH3Nj3~b_EB_pQA2Gyd<4|{SNWA2J#n%M8Y|c_f{KOX3+1FZY5eI$3;d6(&WBkkqDb{!=al5IcM}0Ok+2{5tBfRQqu;dozP>+ub z^Rv$A{=Jp6=H$4KzB@dWJ_O`k!UKf;i3XtR0-f9Do?+Wgm7qN{>*NN#Mg(9KXQF_c_Nz&ea#a zghOdrQ2FGXy-`m8f|4iaBt$#?o1%=Dbgz%jMJ&mNt${z1~+JKZQ| zp4&?m%pvH5pHk^~kMnzZwl$Y)(lVdx9fh2emQv0i;@L9`xF$V?T+d#?Iq8|s`A>MZ zd<*_0%0utY-e_v6r=o5wz1@6Ch3r`zaT2k9}Na(ljiB@h0;-4GJVcW@;!!UV=8CN ziRE3%@i@{cpQ+wiN?OJsqf1Ha5W=@j;w&@-iFDrK^teWp|fi@_MBBbs#h ziC_IDDp#dPV-$o^<3w+p6lG!bDcNUbKN_Wp3Am}Vx9E7kTJ}mm?wSj z#=TJNlg zs(jkS0$0h?4OpOiHWujoOjsZc=xbp>m4yM){}}SHi}degjFULSXwquq!Ax}C%umTo z69Zi(*FZZ^k3zuG)VvJNXHBlYg*R-crfUz=)aB8W?dm?K73j> zR__w-AZO9s!#10kxSKo+uXIlsaCF2Qv}GLlE}RX%i=p&Ms^25j-rG27+Iwq^MjU*- zC42u=*xA{8(|&;MJK$Ugz{EuC^c5UG)z~C(<5lo+FKc7I3Kl0YURHXedwq0{zUVyP z-QK_cRwMA?9l@~%vKh=VU3Y?f%5G>cyeY`WgV-0_?iON?ZM!?2^L(BOhm@XDuHWH5 zX-(uy`$;rsV}>AW7sAF!zN(wZm+Hi}FGG>FuWDZo-jYTAXVR8kX|`jVlZP>uJeYFL z_l5t>$%EID2bFbm_}>$m7*9C|@!w_Pe`C2@ZVySlz!=xWGKaD z?0*`2&_C(ut22m89Q0P6*%rPmvi18ABp%b zeGBC~d}+z=gfZ5GIUlPyUqyd_=6fvU{B8OK{>@yEshq#niE*;2MtMHxD?2kDgnrd5 z&R5XS@K5G=q;bwXsW%M5e=x@bzoQ`g0vA8^3J425W?^Bkptl|HKH- zigWqj#Bk5cXY>BVFwY-O_ufR8NA0sy(mT=VQGas7{fQ2bU)K{2&*OwMOdA$!+A#cO zFKyltv|*CBot$gWm<-u4WUYehF7Ro|+I!ZszVo?8&gOG{c?IXlSt;jZ`QNkEM)7rg z<1k$C*YLzc_>bDPa`$@)J)MP4>Us%nn`!9gH2R=v?D^wNo&%q78v8w!)91{JIr{2! zbW~$@r_-+VW53jX^uvX0{vpmKeXCeEU|osYP3+$qZM>v=V{{%LJfFdNYvk6Cf9D-!)s%g7WhJ^|%04n% ziLRKkkFH4e)yJQ5k@3pv^4_v!OC->u}m73fq2`lPlg9pUTt(dK;p}O<` zgm}s{-q`P^?e+|^eenMA{UioZqL%@*Z$~l^dd|p)VnjOmmz$6CUvEC z4A)Rrb@&!FcGE0-%2?|UDW3_HpSax&>v<*5lP*5` z>R8&}v&gH$;>=(;B`+90Q4|cHDG7$ND}$jhg!v7oJsqRA`Z?OuP>$Esp4wb5)SlX$ zFVvpuo^4NcZrf9x+xArF4fgaa*s~VxCGGm>sB6<+UO;=T`K=B16j@)$e8z?QoW)Op zJ@fB-xc^%0YK3K2HzU6l=!^J5h0ZOzJzwWwoA^SZ&Mm%>tMkr5zTne&k04)2)j76Y zdRM{sWvi;WCtXzBl{&ZLF44IacahGmxbt*w#hs~h#f>dqV#T+Gdn>H?R_NS{Z-LIO z_$KSzif^RO6(9EbcUBx*xhK3;npW!EisLz*TX9U$x#GZHPqY5Fje8nnq5r+5bL)T4 z>s9NZqGd*GMGp4<2;9&6h1ZH;Yq@mbmP z>ADxfYh}|@buWb9nzo(&fkSz&?0c^6h45Y3`Don>;l1rhN2skojIIAOSSMRAo*{c5 zuxz}r`;Qi8z6WO7_P#ehq0(sV0MCVsi#a#_s+mUd63#>I&SHFnA@=@*y{%oR*!z)| zy~l6dVDDR3K4seb)+J9h*n8cx?Y+)zdmqz`?OuVMmkh>{H`({G=zB|?K<}neXL{dI zcp7`(hxpE9?@b+|{>3{?9YQ|z4O55o4RmM+^`P{aI$^zQ>V)1!mNf5G&rKQEb7V>T zK!^j#M`80VvtF?OoAK+|ysHYkEgn!RpO7U>t0iC9ecclec*WuYLoFU44iF6oSWJD( z_IIPcZ=!B(nH5*uBs*C$!{h<-ZJ&t;#F;*>IDZ`fv3bDa5IZmXok)4g{_nExoAp3` zZGI4|=N1>pvABSq$-apTm}P3eFKnXxn(_hl!8GLqGfY0f7#s2&_G>(W)&WR^3+cT$xRUZdlV>sT0>xpE zK|KD*>G?oQV~o19PDo3BI@;MEtW9&7b3Rnh>Ypp4AMY9RYukJE&rO0;%QxQ_?jGtJ z3H6CI^@aE?Kh06{x0U?0clyem5xzIBBupdx32o$D+Q^D~8GL9jeRj*>fC+HnNyww_ z_0c(eUFZ3XQ+mka>wl+Q@+n(k_ea!k7U#nMjZJh@ylj%i*YzL!efwSM@P{G#y90mV z0rt;1f$WQ~{{`Kd0AD8!CSU&@HuInNPw0PDOh~pd%7SN|AU2N(rwe@iV`gk_J(zDUQMEI_B zFtJ3w)1VIOo~?sAw{=i9v_XE+zfgUW4VBKxhU%TO(bLoE;2US5gSHJ77T7jaSkTml zDm}Ih)w{M0-DufRJ-2P>N0tqhUdo1s;-2)f5M2z#Kj~*T?lr~1XztD6|0ONhQ2An| zm;X1~&`@2K4ZR8dv~6f;AH~;YLz~mpGx@D+Xde$yTe_7I@S~627~?XA@pkuxx%^S} zlh)U=K0^U>o9}nH{2s2|tfNuW#SwUyu|4DQA3VtVoqJrJOYeglM23g4++pEuZbw+v zeLTA{*6nw(Ce|{z(;~)G8jSaiM1SP-UYkb$l+R$im%EfXrTf=J8^dp8Jq8o!@g22g z9m(&z6Z~5L;s?l2E$!$#2=AnywU%*pcX9qh`e16A*Zd>S4={hQ7JtMNcci~J-?#<) z`y%pw3wuSq$l5?py9m*Vry~iOi}ir6(V+))&Dub>=o(vji>_H4sF$u&33c7sC~u|f zu7tW~eW1Ng!rp`tgsczrzOMTd>iSx2HhKJg2J?|P?&7$LBZ=ckI^&Ny7BU`fIpZ1O zB*in*9K{oyiT*d8MqMg?<`QRET@3bl0PC}@hG)!wIPF*=;TP=xT}>b1C$6Z{_g9X1 ztrz3y-X~qXrW&u&#$wIRIRB<3=HpU+Y8Tfq4)Gv;-m~##9>AwLJj&g#h_canblWy& zS*xEjin5Mz72Y-`J!Wof>o(^8Zb({M9;NxlJs}u1%;Y154dnfBMEuicf|5CnyvR|NU%3i+A1(f|{T~qdI z^DLn3N9vlgcOx%l;@ceRXJ1$tEGPOji}#i=ev|!I2RMwnTrfD@!TcZmEF+jloeig) z|8V*-gK!ITJGQd6*eA%u4HY9^yO*-M0hzehDyuA`wV$zUW?5-1WAVcp)|9&(9E+k1 zB!_;+d`D9@b;wu@?TB5JM*(F}MtPJWhdnu8i>!}(EPif$T)clRvi<<&u~f4Dhr!49 zk@kxykJjW5A98tX^7j(u!MLFEmne@1bxnCFzh*vtCV$qtSj8HrveuE{A3;8S?B46$Lmxv#!z*?lYEUKFJs8pVsxqp=QGIH*vDe$#>UZxC0_;P>yM-pKlQB5=&|fz zEAk;7Yehb!W6H-u%)bueZDnjB2?0uzOK zW5K)`TzkO0+Ls-Hzk+%9PmG#d%UERL-d@V>e#)u~<>n#uP-c35Q*4yKuZ4R?LeFFy z^Y%7iUQ`hCXp7v`1oL)*ci(r%`gehMKTrtP-9aea+l3t7#kp|rM{c)Yc(4l@|D%O@ zw<6z5sgGO1yrtB~EU;lIayARhT8f;F*EM=KUf1Z|t-40Qpeq_2G{xrAG zTN3N?XH=vn58}v8?VOyEl%9OIJ0p2&SMIqxCx<0wM`7#FZy+2(5qN5b;f0}bG@w-{XH4GDtQ@?oFqhq zRV~G~_2Ik^cv}2e>$!d6TKoHer$y*lIyxk|5te442hUNb^a+(eN1d+JHFdgD*VO5A zx~5K_(=~NEMc35n6kSuN8VkFY!^Tr#DZYE{k6oOwfbpkJr+<>L6fDTMuym4zrT0(= zmqkVRE2)D&oaZ`VjInnT_#2BqLi>e(0$%SxmM2gL>#2hY)WLe{fHtf4GxeXauzIzH z)wN)S>R|_aB6p*Fh1WwFhp%$)L0RvntjAHlYthX=y5s%5INytYKK)p$xxM0A`S+rq zvncD<*esQ`a5{~$E1|6MSCp4f*7CtIN5I^_x`eV;S;{6Y1gBMo^nVqv%@aPgHp|dQ z8RpqIO?rgW>6BqQV{J1j!v(R9^D4)gl;HpayGI#TQ+66VzZF|>8D%(_GQ5m3984Le z843P;%MNJ1kL-ZT?;Us04&W;b+BwQ>EafzVGFt-9csVbl%w|3oH`g1-_%_PyDa!19 z%0Oi%8_=6F66P@9&BPp)k!*m8|CG;+Ov;HpCrmp=`P^#RfE4^9vH`6b!`z3y%Cu;o z_odi`^IAS>(S~={=O4AHt7jy8Uq zLwmuU8{H8>3?eVgt@@kz^X>5C?_+1gp~a=|usHOcoZp2l`6Kab?AaISp!l@fYrU!4 zrW{&*eB#i(sB3ZPTd)t}&?aXFdwS^_>@hhr*waeasTODMY;k7n$zJS*IJC)`u_y2A z8hi30bs`S^qOL7_B6(_#KQFg9^lbRSzazz!9K}13yLlFeF2bI4hc}rVn*F_~U$ZaV zMVTC9{PgGO%F}S@2Ue!M_B82y;0fb3bfozA;?OQvT^i*l-mSLLdvJ2?M=n0CF{Cep z{dHhZ8~8N-vwp>tmDcf!Y8!07RN+S!ZxpBQ4yWElU6xVC@#s%4^vC9h39VWGmhx_m z{0r+H7XCOa{7Iucgg)pDYU6 z0qJ1{<&udWn&l&`hdZk3WVH zI%53qTe7xR_sF}+Ex}#oN8C7tb){3u$5W1IuqCW+IzDrJ=le_H$eOE$pL~Bd=Nbn| z+{>GOW4?nmc$8*~6SoQ3+e4h#OQ*>y zObDk{J3h7bCy>MqNcw@Hd`#J*Rw4adQ9As7a9*Q-qQkjPkf;DkqT4;M+SR>AQ zyEv=OVd1Xgu)-U0*E=~E-hdh4O+0uLQJQUG%_-cKvNE|V^)K#fa#dvC);8vNdHZe?o#}xd~RzpzXXjFNl}(!Ql@=@REMf{nNo8twWZAU!e!*TECw7 zmpA=R(X>h4^I1mK(*}4o7TLyM$>V7Dl9IjXj{Ot{Pp3@8K}*Am~9^Xw!4v+aoB+eC@1-z1~T{ZNyZh*?^?rL%>(HD zCy`-Qtfx?=zQbC^9Gm5YUt$b)bvwSH1GEd^3;Q*XX*Bx)sXnv@j_~~_yC<&SXtb@G z#(K*~Tv1g=;nj+x3voS&4EAy0(?SOOgcC*+MiIsn#t~jb*qX2%VOzqEgdGTz2`?c` zCrl%}jPO#zZiF7fo`hb)?-2GOypr$=!mA0dBK$7lb+paEWUbjIKFHb&K^_S2`<>HW zr}4rA7;n2vK2^pK59RuE{H>ER*jo%c{pX}af2ZWcx$|5d>uTW?bMA7@{jmPJp?7x=$e%Eoa|6`X?-iJJ|i!{pb;=A(qrtp3>WtRH$ ztBwue8{NQxzfRnQy#xRt#MdZQm6;!(K`X~64PlUIJ=h=otOO@1r9eh+yNzZG7O4Z-WC z@o%>Ht(&!KlxEtwCbmi!gcn1|i)q(Mr_%d0X_a4BZA-Pegav<1kSzJ4ad8ePtdAj+SHs-}#)QEWn;GOJb zDCWr?oDT2SHiLI-z|k|~UHtzDysJXz&KB?f3Xih!?)4D7b8`P&@vaZp_zP!ie`bu& z+XtW6`Q!`WqgPlu1P)t?EPWX)HSm+kf7ldDx1!7J+u1L(z0vQ}5G-Y%I#Z|j@O%w8 zx)vPWi>`cNVX5To!r>@m*3rSe7LNMxZ3;&hQ@3Y>qpDXMM_&%XQPu0&;^+wrN4LT! zo-yFcv`sRc=hqi6t}Kim%^Y3LLlH(FI!BDQ>$NFHt6uZ=H)3=hywb#I`8C7cRWIRR zyBRqhNB-o~+(+F?ZdI=d$UzoZmJX(6f!XO`S{9g{4rXV8+36veE&rzWiRd#@d^5IA zeADS{#O)FIuH}~y&mMuTkY55{$~yW;mpA=J$$93TY14x|+qMIl7RQdEEKJ@=SFT|^f#*C>yMO$?1N;zH}`CQZQBL)>B&wdWGTNfW`1qkupiqn0N<|sAG#-g{XJ}h zxZ(FGPjN%>>+{{7aF9=ZZwdTD?TWLqH7*zHDo3z>Q6zIe-OK@vVh(UL>ng_tEL#(Z zVIK*bU(cis`6Tw{Cd#av!(BCvuqUDR%DIVokF~53{~+ae864y$xYl%QPuVWKvlSb) zf%kVm;|$0)-9(>6ZHE3A>94)U5zyF9n~!T;L_Yo&*Y@Q>zJQWw;8OM#D3-DSInOClq*T=k?en~a`qgBm4vNaaDF)7 zI9F2DM;s?E338>>pZ7YJiteQu?ft6XDd=hYR!RONpV8;ro%Qk^?O98*ga0XmzI@8- zt%*r`S09P)?_qr?#sSc-jIG!|Zrkn>PmRtMgpto>pb$3C=^(eHSOEn95avN&YQfi25H z7TcGNH=XV4Qjx`Hk=@yR>-&yG|3|cqU!(oeKE^2DNc*^5 zE2CUvfy}lG`BL3#{S1|HW1Az~^s5gIv1emwqsyL!)+zlURZml=_$aIPQKt!fQ+=Fj z`(;w6>1Lfe{aFsDe=Bu5l{zh?PPbF1+xXsK=5487)gL;HI%N)D`7rAA8R~Qxb@~i- zI*dAfhB}?nxXVptJI`Ewt*akUD#Yx=N&8CSel}t(-ka*F&k7Wx7WjW*6s$ z5BEJbpYW$&ygYpaZLxV)ef>H-*1r+m{K(PH-=BH!%gIYW#-!AvzgM~1`Ukj@%(^dQrasm&){ zoaby;=cB$Di@k0F-@6^3_bb%bZ;Uqn!?cB2!@c|vZQ%)&DZYl6!uZdBQ&%cG+s4*{ z@6yG^)>>C%LfTxrsFNA!xVN$0^?}-58dKl24Q1Qg&~}(%+116!h}{lb&AHlIjr$1< z#f}ZZp5269yAk_#18e#Y4xDhg%=Nwr&uI5>2%OpO^>YSf_qKI$1w!qf_(G`7YtG&& zzh|;{vUR&zL)5l)e)4JCxjopqwb-^j*uTTqWHL6V(a!C`&h>PF%hvFJl zJ+XB;*t(wBx*Tj>Pi$Qdwyr0(E(crJ6I+*qt?P-c%fZ(5#Mb3t>w2Drtn>Nih8Z&re>@z~L|*p(x2 zlq1|PXFmTCY$_OAH5Hwl3NQKyd-^%{R5<&Hi+LBWpgpa}p7wMxUldLxJGu>fs{hP0 z;!Vu$X|(lMVo$@+;nUeu`VUMUPNDDD$J_zip5BT*b)dtYv8P4gR{}ab3;b$oPyfFb z>}g$VcR%Kt_8a7A^t1WlU`?_2T)&FllT2HkWZ7!g<0d_Knzp*bNwzu(+uc7Z z$=}|z)g2ma^>S>ru>T{D)7fep`!~r}YyGHmwAIhIV5`UMX>5DSPv2Bt&ec{+UM`58 zMjl$S)3VXA;I90x;x3io%51pH`ILnsWI_C3HL@UH@Y8P#4?q^qg$HQ8i{^L${y5Y3 zs`13lwYhi03Ch6j=GxtA7dCA_+q|G@JG>B{VYb7mFS{MCF;*9%t*tqk!8Z1pxQ6zJ zbizSgaFH-LNjTgjf;M(!!&+OKdtlEUGS~WL98>}07D}-vTK6~J+Gj*%B+SNFy;Pj( z0mdt6?xWfcS+vE}&+36Q%%rW8f={VC<(&XO));W@Pb=ScpULo3!&$7c>uOtUq75C# zd-IHhI_-ttW%Ssied$MfD90DL!Wrj(<$wDE%UMsC^%su6@PQF{g*B?@CmP3J{*fy% zzpHV4c@M5xM_V?h8<^RJ|6Tt5%)piSe?A)N3;g4*%)r&e_rnRk!2ceT8R$lwnQ^|r zP)}yyg&!G#zp#(u%el_L>#?rDvb)0q|J@@zu>6jQz@NKD20o;WmHrzT>${V&6@%eg z%7@mq9maWu;qIq>d-@XRWqhLx-^_I}SB|u#k(ZsUA66CS3*_JC3n=}iON; z-0BLLY399dJ$-@dSYKena9`l3v;YF2YrS0r9F(mQubLbCXcnqzen?j zuu1BlFvs7p7XNNIbSZTnL!Hx)xir|18LUTlW^jyvhyD%xB<6aNTH|&e<8ewzPwf4p zk18EYSSx=%>(#37bu_vVdxb9$PapXESNQ@?9Ha{>>?fd?hwja<0Evh&@lSR z4;anZ#|qZDRUDPXJ(M-tW{{pjWKiE&%s2E*W02fDyNP2_XCttX^2$nx^9RRUGMC(1 z$7~Ggbn~q!+DiC{%za-?IZmZ-o4g{!>joglUg~C^;jYUhUVSHpZ|o!wLms~GD1CnQ zjeP$GbZRiVbv-&Z2wfWp-_L^YJ1xF{b~;Gkx;dBjY;;hzs3raLrUdnGC>-f*^v}zC z(mx07*)CZ=ujKd;addB3|$a-VLg$qCVYk`uVz;qINBkd-j>ShjXoGf8gH2q;T)c=-Mdadz@o_>Sbn}`HWc~#Ta&tQ=dxQp<~z;=hUQ( zHp2(~|S~PZn`IxlUu;yT-pOPK}Kp z$hDrQ@jP^Fe2Kf2*VfhZiD6UE0>f?~Z6{$EV`P+;G;leCJyZ@O3)SvS??<=#0uO)R zcb?1A@V@e~5qOPy4=vBHI>(^I9qwH={HtTEZ+j49^T5S4xK}97p5OEuUfBm5vqx9A zjM?}+Ow67ZZ(+7Ihs+ITYs|q;3$uk0!cy7Y=I~l;-3zZP>63D`_IcyLDt>_!@OYlQo}vMWyV zt#tZQrN2Ra4Wk_MyE5+uot**>NVhfbI4vnP`BL89g@AXu3z6*U( z-qYKsRt@$EyAvEEKpV`qPpU)JUlz7WMzX8s9aTC1W%-)p3Yw@mb5y5nqvQL0`m+p(ld|8gU z+2WWD97ypo2B0NAImg1LTK3dFuY8g{SX3WNtb9pt)wkJH_s_&9<4xU<^A}otvWPPC z!vS+GE~$8DehoZw_XXsULsQRQuTIA!<5){oI`w6FWWSBRKo|6=FM2hqW2X0~IhldW z(XA=$n=+gG;*%GGN4|>Ao^KAh9Q{APHW3b)9T8XtmW1N=`QnjrwE4bn9yzcnE~!l^ zT!LSSM_vv#iASE_ax}d3bDmU&U!6O;OfLRaaI?bVjk3++jiGh?)zgP;g%o4*7{Of&67EB|Kt5ETdOU7H;7Lh!?uZO*rw8{YgS zxZ%ND-V6-DrYvTzr1nm?`Jd@y#OF8$pW+yNOX{O7sCmTpH=1i6XpCutzp;?9&X&LN zX2#~3+z*^->hG|9jeW2+w!e{aM5ezZ=x=-ge`Ab4&$;PZ*{PAvit8ps2f2pL{Yuya zP`<_mNrl(F+=Tm~+hy{RY)w!YHE&oAGh5yIi#fp3R$Pw9FYoJ8MR$#?8{`>?N!?F%eC5^Sf2^0YPh&-EX@ zuQj7f!h@X5PrJcvU(jA0O_{39d*Jt)#yq=Jc#!7UtIe^HGFG3u$HCq^_{8R^KNv2l zHb)8R@mfB3Gp~5=C%w`5?ci*)#M#VrrUu(ZS?qB&gmjK*!FRU?&NY;>T})f1 zaUZ=(bI$==E@Lh^RW8R!E|JHv$YTLzB_FuSn_7^^-5o97M4wa3@)+A7k7cxjZ2w!I zA@aYq@*g0bJACrL#rZ!mDz4j>nB~13`I>@!K7o7|MGy4$#rHOxBRBnW)2G#^<9Cw# z;&+>X|LwNuzR4FQeJ8n3^mmePv;1$jb?lQojB7pb!}HcjeavUy;oeI(8s3-TP3PCA z7w-K7x+A}v_*g}17c-qx@y|VuA8i$WF!}N1uagf>>&Bb&i15w*fonaV%5(YV^z4`1 zdpXYv{PK%%lV6+( zb8>5fm~$aHj(2+_hFwNkJK`)1!{^l;hDk3^hhN$+#`Kwplj8I0371)e->32d_`HOR z=eJ(qr52wF#aPWLwK29rW1cL&l8R63LhzN-;dQ3v!=lf~tREY%Z9kUn!wSXfufvZe zu3}@q?Z-NJ%gNZk7#~toP7=fX+g+}Se;U&}W^O3{0`>!+4=;JS2iGBepe^E8#iR!Aa5_%X&cd%k zi<6|OZ^KH{iZI#eQ_~@if{*aD9L2CWN~PW-zRN=L^at{!INoF2kkVfQcUX*ESD%5i z+^_j`@PO0l{7Ia}@*|buk2d{d=!*I!L-i??v&^?Ri_+YU|Bm5%;vFgj`f2N>3!~4G zyV!A@3wL?%Y`6>lnx^u5&fMkVuf|<^Q?GrgSJ}@`I%RsdBQN4G>Ob5O>#G-sxe(lC zDSGiTGNgV(aTp`LkI6Mw(Z~1!93lbEqrOP>IjRp)d)J6-?4U1kAJ=-mis$NEH21aP z-XA)Jdl#Z(=ht6xVfzkS)ct1KBh7J_@7lU=aToOgUfR*;?QH9RY^Jx8Iz7MsHN*Qn z^(S0Y|AorCF!j>pK_Pvxqu_p_JTafX)qCiN5l76Te%05iKAC~s(;8#Xy`GW0AU2$} z%L=bM8*JLV!)iZS{d^hZSNPPKv|Mkcr!(KM`-HD=k{^E`7KFL)(>b3wQb*mohgjr#LKu{f057@9*1-m1S& z+(vkOA^6RiaQ)lLanx7W635Y8r7`$P1`u{96kiPBe~IQ8!x4M%v0u$(yq7a9EGL6Q zbMp2wj(#8Q_Lr=7uKFG|=2+{kr8pdQo52Qg92fqS4E9(W#WC0s?jOt=Sl7G5{Wmag zw*YlZ=SE1-)^b~sF222;^{yhA%lOulzU;?8P9OKe#~I^RZFnJPDd$r; zkKEWf=M(PaIzA3p?p$?q@125S7}R{pCwDTT}@b%9sm#HTo2hO25J^DDIPd<63j z-kLqhJJqc=W?J&p{)`_XZQCkRUVnR5$o;ODCT}y0H^fWxDVt2bA(=Cn!%^WHv9YCc z>t>Z(k8>_J!z?%Xz`mJsbDUCc85gMBs?bx(o9%C!M!l3GOY|odk0so#{pFb(zn8fi zD`rmeW>Hu9)KymhjN~nqDX*_&E@s2MuH0kqqrKqSTIOJt;p54puFU>$uE#L1PBNKC ze{5*oojDDT^PC3blr((%^{z|KEOhR9bT4w_^XFIZ(63OvGj)-Ae!hR_{|of)T*~bn z_3kT_+nHs)1-+AwS$b#cA@ww}fBM&}cV9XUr|O;d5fJ`V(LQtH7cx@eV4QbNWo!nVK#SSZv4*o#Iq(L>A?qe>!mT~ zx^B*RpLY;tIm+S!`J89T)*x?T%zvQYpm+rJ@gDWDkF{L2UMcf4OPxiW@ptB(&~LET>FWu!gT*mqqc$|*7|L(guasY8>74WH_{Q@4 z&FbdKxc#McvuhVqFHQYKCcZh{Je$0nK{wALFJGH(qD#_CQ+MEj=cJp`LrXV9dB#b) z$ve``&~JRBy4hlW&q6mZKz^CCu&%DVqo6j!St1O6l7r0S7Dg`yqYuzl zysy)9IX87$kaHMY*sW7ZPE4oyIfvl*ZtSgtw!n>PsmbaqdIkLc!Fc0?BhIkm8s?(7 z%d_fFxQzPEuC}EPaNfz0dPyM1amk{bt*o)%nYNhPco7_t9EM}SM$IK>{`YsZL{c4hs)sAHMWFow5yw8k)ymN7*}vD&BDrTx3H zx4S48jfsq*EM)%+!+m9w<1)*hqJRCZwv)M4ft0qVao^FzLTVZO3#`q(GOwVY1HnPo)#?KweXDNB6iSL(lw=_p!~$JmEj z?5pZ7Dl_=aD)?G#$e0Ai926Uj32-o$AoRcPknsqy%>OSWe!Zu63d4%7&C@&|^01ir zqKWT5>O=ND)!{py_VfH>yw{U5?#a7jJq~xnm;pO3y{C7GmnA%cet;8GBA7>)$lRW` z%<*Z*T%Y#L`AK3PU59|l!ERx_1J;b014rr0yf*Q~>fdN>8>4=U%MsWXY1A+Njxjuu z@y=a&r-pg+#rP_o3=1>Y2mZd%rs}U>WIi*U^4>Yds4r#jg&}Yp$y06ju)zJSN!t&u z^IOIsxEYI3lW){NNxo`E81;7^d2IbPhaX%2dwArocie#keVLQb-23``7xR#ufkf3S zeb2=lE{-l7@5CB`b*YZP{-M75-q#wQ_{$B?j2?z($ROjjT{kd?ighPeAqzEQeD%wt z-R4>y(v6utjNywOe)+YMc(*^*=-}^cTzSkD9 z|7Oh(jrv8=t^GAWFzV~r%l+6Rg93}9JNgej7Zvc5uJxS^&)Pu_&xe;gJs;fX@(gfY zT$kF(7{2n_Fi(vuvwUM}xaaSQ5uTb8zVdhb8|5{TndR?ZOV~cM{5}`q#e|WBDVgO@ zIteofBM7@>mfy&_0yW(USy!N@Z)SOoo?k=A^V&9qPx$ITyVqC0pLH5a$YUyNPOd_J zJF`|hYd{wNK9V(nkj-@FHqK++N^hDmyaso2qJ!QSpe0z(K7a@10u_Y?Mf`!SDq;Uv%Q6i0b& z2FHlt{VSF~=IO=#yI*VQQZ+>hEaFGuFtgJwaYsAG0>nQEs+j7`uI7-o>6h?{xAUdi4^|zPe<}+)-Bn zhV|*~^K?x0d1ejwd2a6SGwZ^~IIVTP4bMMQ9i9({J3VXqyF8P4-(ZdFGuF{2UwK#L zsAel6>zABTXFGi5^ud(Z)Dh}=(>mPiD}PeY4-o2k(>gukE5A|CKPS}lCUras`O-Jv z)jP<^yVf_}Weoz!$-AsUpg7-UT^z~DyR3_&INoJl9LdSMtcxRg%w>Ge+2qxfNAhaQ zBY89Bk-VAmNM1~NBrm2sl6F%bN&An$;HG74raP5%DBYb2DVuk@5-Qz22$k;Mgi812 zgi5#cL+O@&G%fq4W!sVF;s!92|5lBb`nA8~b8_(Q7Yv zcvkmtdRB}#ir+&&klcvQ^8KF{w9e4Y`mHg$}B8D90U;rV#1!^3(8b*XT!zu)2X{NqP1 zPvRiM^OI!5Gxbu#Gq9`SNhU8Fxc63PhxuQvIb^@9_wgZNG-PqU?@6Ff6c?|J(0qyh9Xn zGX(w;*HHho>-p}Jj8TfmZm&%>0&&>s>DL>tsXl6ur&{W$HZCY{lB=D+VIBD|B2Af) zZ)6Scuf}iko^TJjnC8H?``{J)Z}~;oB;;mDlyPM#HoXo`Ci}jKa}RPd067^7*YAOR zXm7Ft&c(o_R(qbMK;+ZilD(dc&i57e%u^Au=!qd9cTgdhtz^VKZNR&+GSP z`aD{O0sjxD)Po2ORJRaKZ0`Gq=ODh6mt?=6vrIBc(15*&jiu zeHBN@=R^L(Sx4bb?&Ttvsj-`~jiZ}$(m1j>@;QtRc{y1d@^g%XBXf+S;~!hd|0>9H z8t;a_H&y@JFfu29!`(S)2lI(z{9{IHUb3+yFIoR7f^&z5`958PO;4K`{BPRdPktv8 z-mLzuRGupxhMx0Z?FXF&uS}h|;-vcpq}fjYZsakI=Sov(ykF&>(k;K3(jBKfHxs|! zvD3ZTb?<*E{RVp>V-NN(7A6p?-I5ySD=;c@lhZhiu6fCJTi_=0)y)y))vPaHpyye7 zuj|O<)YRPM)a=}3@iaH>D2G)(s>_1JU|o6@@{Kgsx7b13*Dfd8r|j7hRvKE4Gg%`! z{MbOVzP-qR*3wIltz6PMv2sc8L6u9slU=#Qu6NbvLXL#bHs|!=xQ$~12eL43%7#(n z!ZtoQ?)r@r#${30QxA^H$vSvziidb+xyz1%3v*o@P~c_E&e zvQ8aI)^E0C{Y-h<%sPjX_sy;pGjt6c8(ryx03sAo-eAXN6d)23}n{_o3t zat>{G))kEMYi-#|umpW5-i%$)_q+06`&>FG{m}SLVO@chmP|q$D@^?A(K3Ds^9(T0 zmfaHhUce%i&2BLGB>X~`&9bv}Q*~^Ar=Te&@$N|&au(N$yV}Me!lwTZ{kEBJFn)*j zZ~!~hpVXDm;GPkdG%l-C`natAdC8far}-S?=1De@yG?|ZV5tLJFn}`KOu7@0_k@U~ zaedH(K7=D@Y|7roF=+MpoV!==&$)i}gE^x(e}2Zi>>qRgAjj~v<8vNbyFcglwGZa} zg!5%Q%QcD^BU)6?TJvT;9F*ZqY`{n`{<(?1>_I-~IM?ctIT@?(&UtR)yllOfMp+_L z-Bh}W%>O~h?innQ6RsJ)ZDgVu2ZLsWflYJ_C zjfqF}f6yxXea)2pgI3w^`+u_RyBg`XA zCmczbL6}cy>YB|{#D}c+b-yG;CZ*$&OFM4qc`CA;#`?6m6B!$1g!#|)I#kbvF-`w( z>$$BL!a0X8H#w2x3J%Gh&Fd;jyYgQY!jI;Y#&mRaJckdx%|w4Yb1vO2T>aymqSX^j zyvxO2=kjbB*Lkb=nCmhezzo{lc(a|{A2d#d*yOVdKW9dnE)2}x&*~PKBYkqPO z@u_cQa#CKhFJZ-qOZne?>wgQ)|BbT#H|nJSm015p=7{)s^T%BVm+rE9R8GRj zcaLlR@u+dVS8vJbvwCaJWlQE|kLNho_l3*Vmd%W#{?g$(`5a+tRz7LPGxk^*JFTYL8aZDzTso59$xtT`BYKy}ZU>DBxy{37RAN7;Z27PxytM6R5 z|HYvL-q`olf6qQ~qSLb+zvlQ2$KRu`NIrVe>=FMU{4DzN zitIKT>mA-loNBMBZ%6g6x@KH|1BW%J>xOn&NZVl0$K%J{^YMe@rf&Eo=X%0>xL&y7 zo}9O=w%PE1^Zefrj+?#llbqWL@8Np+#(Q!$nC&ymKV!8qnIB4f$CvGzh5h^OiV^w! zM2XT}LHPAzk@a9jhH~*KsHx*0_Q*$1#=m+L^v{{@zjj#W@p_TrukMqW5!CgmMykGvx%#gN%%0+ba-PEVV$D@SPixxG>nMG#C3HKYqHp9pleq6AbP&EvpNYmpOqyr}@(p)Gy$9|rByz; ze9k+wzQZVv$3M;;H`N!${Fl6O?`+tX^A`U7Ck7=kE`Q{>g}(4{3&H-!H*C+Dx?xn# z%Fr%H!*{9l8u;Y``()Q&KAaXomlBy zSJTBP*xAcbu&bZ5V9x+o!M+>93O>DswXJ^;5jZ@8{iN=72R?fsDsXHVYXg(_GB97d zT|j8-R2@Ei;YBFU$XBN_N~5h^i~ZQiUiG^<_TcA}Uaff4mpu>+a#APh*yDk;!*8b! zzt3@1%2ng8x+XulU!*(dVXmik>O5{LGGgMm&owTKZ^h#Go977Ph{~cei;C)B zTw9Q{ckQ^G4>%IuFUWau=Dh4aJR8R0}j+puTSlQ8_v;;q{zPBwjYlD|CUFB|!juQ@1xr_WyjHm~vft4{mFbtN;iP2MdzN_FD640OmqDx?nS3Q!`7^Lmc$p7A1~GGF z^5ky`R!UZVUji%7C4b*$oXq*xr(V(Za!^hW5+3b3z*n)t0vLa0+edZzD&nh9eF7 ziX8D=^ZRYa$?9*wKCiLla;7DhHfL-OCmZea>D!&n^Tq$Pt54VdZ;4+v^%I=!d*9Y} z^@)|Pb+sApf@Am?Kf5=Yao{nG2ajc3cpP&A;{(U1xQY+qH$3c$C_RSna9alR0H1aS zx-u^|ig~e`J9~t_(Q@Wv)SQ@dTUX{jxYs=NEBu=EpRkYp3Gg!&zs}Jv%wuiS-tRs> zptudsyE4C3eW~su?*E@b%x`U9s(Gzjm{+mYm1NG9)qK{Y#BqSQG;i(j4;+Ebv`wmi zz}&lwP9N76S100W+Yry8TfzeKn9EVufi{*iv8sl>eWMwpUpl43(RR!?tYQAf;eJN_ zkv2xXeB39c@A#nl+6-U)rOc73Wlz~IgdTj;wJxK+mVDJRCvvkZk@P2)o;>Hb1AAd7 zmR`oY%FDrw>jQ4)6C7dB;S@*vy6J6=;nT8={k6<-jB*y%<{Wjpt5OCU`x9TD zHs|8q6W6EEjyS@Y)z7an>WAV#Iw@U_>+5%ZlohCbtnJ)kt)u+M{xvZ0ge#b~&y}tR zj6kqlo2IJ`>1xZ~>+KrSc2c~`FLOKIW)7g4PvSYeckpTBQXY3u=E~<0=7gqDUv;d3 z>1GUX6o=NmxMT0c_1SfgtgoZ&cQf~B@f9(1cekg{r*(|~DfZ|74Rb&1sQdk{4yBtp z_AsX>in^G^K7!2aDAqh=4~LsIl9F7ptc{Ua_ui)`hG)db`-_~>RRc6vmNI&A%AH3S zvMI* z1U|dd!FjU(^Z#W8j@_>F2=o5u|4Ziy{?G1YPrgFd*ai34VC2)<{j-}j*IfAbZkS1K5?}!)xIvF*9o!l z{uk*Zn_pOQ?LyX_d?MQjJk5H%h3tvb%4yUalMAm6k2mVe;@bJOC#4bF-akN`1z+2( z?Ah4u@QG#N zy=;X=h815m`z0zZiM10b_4MziSsq1Y(hojqIIS6FD#-9o4+=TCky(`B3IL7*>N^ZE`y5qSr>JgGbE9`YvVJJvoq3WY%K2-UUwOC`3W*!9aebV2k7oNXY+sO zIRk$lrs)2lMn1cq8z$c82Ml8)S-DkL>!vy zdJ-S{NceO5xK80i?}wXwNc%)w=}N5HiA}x{{8KWO*)R7EUslJmNWtMVi zNoKcVw?p}piF-Q+29{H%cfm6cf_a;q9qM+rV_qpdss`+HgLD2rJvrwpSG%eK@TkjQ zesa#g(V0zPc6V^+q;y5WpEhL${`y#ZlRwpc6lP+ooxVd#+Ycny?M{(vt>?0h^nH$8 zen#H5TXKHPm2B$ON#8+luXlCwf9ATl)Rglx#3_Dg>L_J(2wj)F*P$n0z^?|ttGZjf z>V9-^Cmd}Ibt0_ZO&a!+Ch4I#kI5|_H3GtDjo~Om&-Q}Ln_USeHt&QxB^XJ5?eV=b zF4n~75f(m&fmt_37$$cRcdCZ3M8Fd&7_<0}n|%gieaByB9nznJX(JRj-gLeY zUKkl0?%z({Hj|GXK$SJblL=|k;d`9F-Cn_EAmEKbkEEy_eXL)%*vmS@2nzU zTgb~o@+JPYi98L&4(%l_?P+~_&ILvwBd6lr1>js@PAKQ928TAo!))GFdog?k`_KVC z)z4ZJ=?mJ7!n+Fac{6g{wM`@Es&NL}eT`hJLj%|P0`6art+IJm9^-Xb?4>?|x@<8X zbT!Dcg7F-%^zaMxw4SsdyLT9_`30CSp8orR@G!Vs0NX0=CGJ^^tulES@7BC9I55u@Rrdw>a2PvV3fD9F z7&s+9)(Q@$_}z|9CvmdH?6)IMR!cg%k&e3XR{k;Qy3NP-@QrG)WC7oOpiL{ko3u#B z*E%|)1JTHOYuYwJ9UlU}yVdCE?*qqSAA~9w`rVzeXxGQK^AAR*Z$Nf^(q+QVaIni5 zch&`+lP*dRCAZSUXV3?&1+*AnsB|D7KE@d2Vzs5V)0VmcT{wUoZ%*_bKa@oJI)M|& z@Dc89;a)xWwsNnF`5v;9Xz-rFd&cn{=6lE&_v*Q~m3v*_S_iD}Z|3_)ER5Jpy1Jl4 zhb$e+BYk=<-61{e>UnP=c(972ns+B5e?7p6EWSICci!e(FA(-6{xt51vu)$vvxKT| z_*%at%5y90NQv8t=joZ=)BhBQc-=vG-OngLA9>qCSVG8p74@m)VS&@AFCzRIp~_EK zI5IkjX^hi7o3w~GiCN^-)PBwQdDtE;ty|mM>w0Auf{~F`} zKlqHuTQz!qD*k<|dEUu+pd1@9FvRb5c0BKtxV{0N*O&&Y&c7u*uQ6TnlU+ET*Z7_A z>;IqQd12t&-Qb(JUT2+yZ#Lf>WIYq-yW4su-j@nqi}R&}&o#5!Qt>Ss65AOo-JZ#>#o98*`TX3)juoIu33*TQD zKEQDL6(Z={bQB-3`ZkZyuW$%|uHCPB6z+czKi<{w^1JX2&V#e((vB@}gn4#fi@9rE$LfOLEXT#c%jwz zwQT|jN(8|uz5YuOgj|Ra0U;R7@B5sYGnvfpCYxjffu4Lm^U3ba?97?xJkN8!=Xt*8 znFY393f=QM#116j>5Gd-<2gs+c}L>8GZE`Vp0Gda{i5zMt#iTuD?=_=heR@AvbpWw zTrRa&w7%qbm>rqra*6dt!e49Gapg&Qbvf9&N)P zr#jn_MXQmA`YpB(v3-W^6nqqT2So#M-UytR@DA0cj3^p|b)BhH3o{RE30J$vQ?6Y4 znR~nwIbmo1d!8F6z3)lRf8R6I`JTt7@9*~fd!C!7yzlA${QI7pUGI73=)e0u_Wv0B z@4^1Zu>V1Q|J}IGBe>3FTxS8U6QN(nmA}Vx+oV!Yul!Qa?an=(D1HCE*uN0_{~P-k zVgEe+I(?qs;~6%k)bow!OFhF~dpweDW!(?4ZwdBgV_z=q>$kEl-}!-O>!iJ&0_R@O zYxvBc>mz*r0{hFb|1>`H=UnUh!1Lymy`JY>dp+CmnLp=2eEt&q|APJJ@R>j7^Ui&q zwksjr$rRs7rb3>YT?lzwyUS z*YrvFQ%w$Hd=lf681F;JPpHdUd*TzklP*YoCE;I%e--#2^DBY>JN10ZI__KNqcp#? z&PDw&6Xu)NIcQ7s&$+VHEtv0Q`(@PU9wg!dA}%1}0u&Duah5APZgsi*Y%;iDRI?~K zBPZoQv313E6SnTyZo$?Q+ilo-VfzlYYp@N&_6=-zU`xd|2DS1Mz(Y3sYaZ9+aZD@T z<#9}F+>pmHt#KS0XRJC-F+*$I%Nj#$DSk*e{RPwpl5BZ(Qop=9iet~Q&HRmmZCN_L zBfcwj+2cvOyVN6hDfRTl=e*U@_*IBSFT=46e4mD6nK(8bpO@*!Uc_%V;`j>e+lb>U zaeNbf#&H$DeHq7g;rlTh+l^z#@%d%_*mnH3nC{yZ_nnOUcEx@1xfsWZzq_68i|^BL zEEC737<9a|r4!?79hdW!RdyUzQc_h&FY zu=c;TKdt>IuB(lALJl1(2R47NY{(+tHUv5Ej*>Ushw|Ip=hWcc<{QZN;SbC{X9H|h zMXoHUPr)(vJJ_??XEga8j+uP|`6BXTBY>sDV&hNdBsj5-557Jdxufi4=lj{n&1J*J zvymIh?(TX&8#%db*myQ_KiQc3Wh1wi4I9r!jwkyYuF`De&a(YvEv~EOK7|~@Du)(r zJWTQqXyYq$5@6%mu<>l*ifrJD>k5LpE?j_D22K2KrrB{BAOS zhgU@c7c~7HzQe||G45w$+-JXAl`a+Bd30gf3Mrc2r(Ao&-jGFk`D~2w36G%eERFFa zwcPxs2>*Qi1k?E61!Mh<*fUmO4d{2f2=`m&8@p)ymt}g7d2Re*6YlS?;eM*u zW5xYVa6W3WTkX+m-&T7T=Qrimv{yb%Pfu$&wFX?y*2Gng-) z#THa&LL6U>XAd{b{eQuJKmIv*=Dz~pyb1pt`~HD_P59^Fn*R?t z=LY{A-w)xt!9Rzrxa^zo&#~VsKZc_rcitaO_~+Qq`=SZ|95UsyHu&e*$7O8r&ml)H zTgE@fcP>-LKNs>WV0jxM&jN;$TsTQ~I+&;PTv0;ay={6N1xJFKr; zH}#pZZoQd)WAQ{Wu8DC?jBBiJMY!Txm&*@dQhvJ0Z*YHQoo8@=WsUO~{B%>S(cq_> z;$n=SZkod}eseSNMZ)odpRRG&89zOvG5^+K{B)BZSo_}^=d|`8#{*nXc>dWsE{W@E zITay?w#k9b??dIMw?m)lK363_9o%%}mKymq;HD#Y)W}ZBs@K%ug5B z)%gEH4(*mhi#Be~;kQHH0c|`;&i|6|)8)&|PdE8XE6%fySB#%-!hC%E=i@%dk=F5F zTvr?SgdEx?hhV-T+S4(@pkZ9Ul$8s-ABM6<>(o1s-SHDi@}s zq4Lw4=})>&r{kxa+G~}M)!$#~$5}tN#7{TLgPq^e{B)CVG~^q^_g(47gUUb1^9GcE z5Z~VE`03%Ue@=Rt55G3aQvaOt_RoWtKE+c1oaU+6{}}cgd}{FAAHj84{c~KWN&R!m zTjYa_KFL!5obnggzXYwAg;pZS5lCL2zr!Ft-Utp4#F)x6ON$!69bL=O%b9otg1jv%)+ayni zeI(l^{By{WYZNfj-=c0adP&5Br zpMx6w^C14SUG>l5Q$<`r#09oOXW4t^M4Ux?&xDAxcqVt!xWMJ|vzMG-uH#*vx3i9O zJdSCN<22!y>#@Wp{Bk{xXzYq<(q5e7LhQ)O?{*aJEyX4TKjazf@ZIpLF8xNARza;$f%U%E6H2zz0 zo^`x3_~kn0)^9|wFyMVsoYT%a#!9!O2wc4%K{>AxP z9nN6$C8npL@XLer2mQX&@XLdYuZ9kUI3A1hb$)pec?Q*)5akgE-SxWw+HL^ zXz)pc@yo^USpB(BaaUXA$#k?e{PH0ELf7e({Bpg$R{2=R!z=wb>&H;|<+?oB`ImxU zuKPwqzCnE7m3};^{DVAiK=}vp?VXNa9`5?*lQ4JS_0Pd|=0306^Qa9s#Z>|^!M z&HVEzdi`_GKgV}g|D5yBAuCq@9C9*b=+tD$>YtnB$L0-?JF9;VnRD40?@z-qKmIvn z%Icq+^PYvj-@by{#0di#Z&pH1b-&y^0&OaCO)asuLd202~p|?UmTW9Y9`RBs^ zwK|5vpKA4a#rVWUxj})*FWzB{+00} zO?t`r=cf6n!9O?6MH&CxjF~w9+%yMe{PSk!pY5uD4!L(sMVfD{VeZ}J@ zetpL4pPTy3ShwCxzp?nK7}vzOCdRd%I|O@4#>D{EYl`zw|=#Q8kV zWAM{4X0mrJA_i&j(@k+P#!ol(MN9ET!tsKiZsMn3iu&g!J+Ssa&sqI*&QC|}b5{SH^V3oL zoYg;XiJva6tJVD$a%i_4TC{O<4!<4p4rt>+a{iZupMJUPpPT%p73W#UD{Fq4kNv39?bm9z;`{*h&zq4a)6$5}tN#7{TLgPq^e{B)CVG~^q^_g(47gUUb1 z^9GcE5Z~S&e)_EQxn(1qQf;Y2${JXoQ8wq);{hbnKT=Az6J_;B-?yp9@!V^nJ3s!RU0q;{RMzV{bQ;$^ z16?^QIhDh@4o#M#lpooml;^Nf8`7ae<&e>|#F8y5dX#K=89I~}DXIMuWcAwaHuXQg zVOIyrq(kuv>F8AGQ90x|75cLb+e_FaoNvSVqysykN9E8XdS23_L6F%v$Z8eZm;l+3 z%&Kr-H}ok3`s9%lm7~z7W28@!cc{w}MyM;gOX^r$3$i=#m7JjD>iRSe`b2g) z4mwTOAw41;FQLAME=_|j@%9hIHI`DpLyx9nlOmUHDTXdl+e)EJ*P(6CN-XDn5yfPBx^c##*^;*@}BLll5REFZ3pTfviUB| zwENCyM__*>bS=tvaZ$AI;tsjstUbEuv|WZSNJVO#yUvl8lad}KWevc88UI~U-bZiO z<(575b;g>)DEG6Z(|z7uzwYVd3(G3|%f81>K2i3IbGgsH|Ha}mhuovI;#-pMg1vhY z>76TDDp&v=7?L1mmD_t4t?z>8MPCfabro(%bSQ^$zwC!^T1&?&lH7IcjzTwTo_MSy!aZUWD=Tl#& zuc7wFpuL06<~}>_?2@v0Ikwam;h5MTn+jh`?K}%#O6{zHef$=FZhb7;iTjMey|?54 zNj%RfyQ9d3bLsP7{Qq5I9DHAF(Z)pg`22rPoA=s3r_XyG|JUGuw}Z74A3gZU#QpeQ zi|=3H|4#hxf&IVy_>qZkCaxdv#69|AtUqOsFB&1oRzn}MoKaE%bkGYO^yWKqQeJaR zP1%J1EAZd>orhCa47fk#U3{;__v84#i;my+aLTR$GgD3_OWx0V$=;Kw_loNrNX7GP zx!90(6YPYp`6SMN#dN)0x6NeNJBHsCLssOAE-tzgx-l9$G77pf5;~LVtA8lN2OTOP zU6S;_h8?y8=x1-$^|JTGtv2uHBkbN&cSd->@IbfjiSm7!9ql_m)8V`DP>ipBvQ!6O zSC@k=S$g0%w_w`^+pD%m6jed*#$L==lPycN2|?CZoq>LyvfGO)p`QbB?g%>fj~EBj z?4JbN=-O+JEi9WWcU7FWuF8K&-AZ5mI&)1b`g8D7Da)86>+>9EKGwk#4$DWp3~PXR zT$QiIV(R;GU8w^yzc|@^;>U+{f}WkmzR+@cWUxA-uD5SvwG@~ZIks@F1+spGGy=jAerJj z$&|hCL&&pny_Jxs&`;R-g;v@-joCGWgZpUV541S9@UNO24E~=Ouf%x8>(kTv05Sdt z#8a66)Sd`s{guF5p0_Y|vG*$p{3_-HVm`p?D|MFnm37>=&QF`HztZQXewfL;{z{*d z8tbou%tzbx{twLWMVvsy3ATzjfru06>%a^iMJJ78T`oTxj`dZq#=5^qKpw&z>-CG@ zB6i5)w>sW6;RgL!F?vWB+!2|EF{x9NUOvD{*WSjvd7@$|vl?{$tp`8~cxA|1o|4+Ahe$*l@3Yc`U|H zIqJTqcYM^`fgFmQ`^AJ*33-m0zp2I6Eh8u8W^68Ow`1#rZ8$a-v#(lIM)7(cyEo$Y zi1~YaG{)_DKk~S?HJ)U}H`e&DwcoftiR)_d7$Jv_kpr8zV_g;bv?2D6(`UL5t*hdG zrv^DjtKZoG8>aOZ0euVhTm6S|zQsq7FCzc7HdZPaA-hAjbv-|~27HAkIl5rBZsqJa z@%|5|{{9bIUq$c#aOv;=p!HSs{tu`A{tsGTMeqM`webEA;zo(z7q?}x2t11n?$D`W#JG@g(BzysO9 z1KGd>?0n+Af#Xv!8C;bWQdeaxe9d@ULl))ovm@PKcz-NqWn;`w7%gR;utyeciilDI zt9Xf;g=2>h_bSMzsH0w}^otO{-_qk_R@@_gr|}ZQB^Rd4Sii;Q`QY`oRlal`TEEqj zUko-ro5$M|RgL5AgkIWsyBdCv$K{&nY@l%#YgMezVI4QG_T#Kyv7U?i)l1`?#ZImA zVCRR>f9CPa8>~+Pja%kE3F0S>HeRL2gZ4>~=MB;)e&a-Y)^~LTKYmCn`0?sH&5ti+ z>(2p)b56XH7XY*63r_r(f)g(|@p_KennMz648)p^)~tacPW%L&6K}-{Vm*vlU-Sj| zL}I;`Sg++2{P((+_?ryF3;w&YPSvP;FV=5*1TRw(yiCE%^t{u|dThae7wZnG4uP1% ziaBg+=CD`lyjAeuwK^J{*K5sl3I4lO&=&qX@ldvPkq*3}bJHl_8;BFX)WnJRj?i;& zA#&oM1Fx3F>4WiNc?}!O8VD=r#Tvg99C)o(jF7{nl>?i*U!@#)<_}`w5A^&w`32?^ zbZ-3WP;DOk+GcG&NNy80pN;iSf(tL?5Rj9FF(CW4%dj@0MVk+kKGS^ytb6gx?T3>K z&ukv`_TJF>8HUYUIP%w-IP!ZR)j0A)THwg-(+rJ<}imiConj= z;(S*3)9U|PD<7t}mzf9ehe5f1@%5P|T{83IpEF}lvo3`=HXE`M=Lf`It(8wuT?$dY z^!xVm;r*};*CoO>7M}c&7I^YC-2(IE&DdpiqQ+Y^IIDuk-lYEMl_D3WyPbsp?l<1* z>oWK{v#>(n=`kE56|ny^Zy;ekH75o}U1kvESf-f^+`}uEXlb<2p_1$D`&Ds~L1S^aqI=k?J0Jio^? z3^j>Z{rD+bp7)2?w*>oG{dn5P>&K(!E~_6u$x=Ta`xze*`}w?!)#n4p=s9fLrdaC7 zV;`#@kNrH)#A@}SCLgQ6ImuK%9{X6{k@oTW@u=0u>c>yf>&N3es~?Z=%zu)6Y5f}H zV#v#>%M1G#nB>K9Jjor~&5&=C`tc-pY>#0-mlw;8kbJS-4mlcmagr^zMYIp{Vz~m6 zE7f~~3|aknk}0-ad}sCJg*-L>k&tHr%d?U_3;k@By@ymk9x@gFV4vqL;a>|Fze9{y zo`qt(YOHT8#{Zyriedhi`18WQ76|-m;>imCD*US+@9He`E9%!hMN8{D=+I*GIpTGb7d!8R-zlalvIKi7D?jqtY zy8mT-6!Ht$E=3&ca{1XyT0h=|cMZSnLR~z_n8y*hU$(||4F0^SPmKEUrZ^a@A8(qY zwG>YzoG$9eU!wZ)rvA6$8*6;n+HX9!&-H`H%dK&1aa|4f2syM(4s6~YDtEpe`b_r; zz@zo!#RaSXO8Yx?{dm+iXLa5u>Gk7LJD$~#pQ6`~N9}l4KYo&4KOVK?S^fAaE!2+} z*DZk0%f3O#q1|$5(bmoR{C3DYh^_nO|HH|dzoP5MoBXO32U^E9D^BM)f#U;?Gp#s5 zTvr<(g&f)@hhV-V+sw3mVfxw< zZ@!uSpx+1JI{WEiFnu%E-wkX1_-4jsLx(~f$Hn>0+UmsSqJ`~~M2a87}Be&1Ss4&pET_*voPzgy=F9bP})Wban{HaN-V zI=JF|W4(B5<-_#$GV|d5FetD29gh0(&0tMKRw0h%;{49fhxfxaT$e6q{dkjqvif_2 z%PRQqS{-7@it6fKYPm2!*h%>Be&a2#N8Uv$px9j4>c^Y((kgE&PPx*Lv;OTM9=zZ9 z#rvy0_2W(BmLdBfX`F63FF64(QCn>{t)Lmvs!T2&uYP8zmbc? zbsoWWSj~7`r%BEDNqWtAr=?~*_OqJt*xw`JofYFWPI*Pdd+z7xQWBS_}tj{@9?f_ zt|u(#DCFr`NV@6-Zv(n&a5_m(QFDJdbduHJCmqH2TzqFW_=SFIHTcQ?A-mg2cCcqw zf9xuiryoC_d@C^3Q24|LbzU38`{XOJe}TzYvbYBMT-0y78NRg%4~Bd$_CJRG+*h(3 z;J=Y<8U7UV)apTtu}NFICB~+!c5Gspz9oLV@CU*l=;Nxus#iC*x>Cn~t6o~?q6R;zu>_+&yeW=gjnA+;pF*{;ak7O!o=Ell9}h1*^A7 z`&(Nd-tYzBkT`8!l{a*5{0P|{K6^LkI49{f;!)2W^2x?HtMTIT8TOui+a$e4JU+wT zvxiO5YsBNTpS_Fgj;ESWl8{4K<|3gM=dHhWg{<-^vv_@>u1vubFY#bx>z~@^z zHUMAb)vq(xq!KRx*ub3U=l0F<0mp+@oM6R)RzJ&l;U-KduB+wQgd93n4#E6Jxb-=R z-|)i;Vduxk!fyZ{Fu$R5PKm2GzPxd+pkAxZ6^3+a^atDzu(^TB{;8&_RVS^sZnbT3 zzRquQYO&1r%7^J}OWgQoFfIMQy*1+fxP`iI5e7Bu(HZ;9-<-wPtR96pHXC{*&d&yy z!w-wMRz5-XC`9=N(W8*D5b+=lJ>uAg^Wx!iCRZ$C_=aH`(yQxrEF<}2nIYXZ-<1r^ z!m$j~Iq>T_cBx5hid_iD7#!6=Ja>}~S@Dbbo#x9fw_KPGhsuv{ra$RA?X3^bbeYz# z^6^&ZThn-ZU3KGlTjSEkTNPvJDfq@(?jKFM>UCh8U73?G7dFDNh;^L3+K;ooZHXIi z>Qi>UKmXbfV^NKGqpx@jMei#bziQ$_?3W;Z(&(3~^mx#I3G%!_`o(XY2q!PTqxtb_ zi2V4P5cu(H^!OcNbka*UUV$q=cxeEv4UW6u$_uW%;C5?y18ctNYUg%aF@WH6qYhs? z*2WN5zGkrhynx3zg}ept=rysn>)&SWjIj6c+B?Jkf!1;|3mOPHNCU) z=XmVH%2(!jMj=nJ*7H)VN3`mtRUZX^-c(y-IAQC44`_?ypbYn8>CdQDzd z-UFAHnAd9Kfi=ItYdTnSsA8Qm>h+t}-CU_S_Hy}I!Jogn`SVt;e7cD%uil~aUqa-{ zuhDZxJZ^8q>;-q;1MaU;kA(9P_#B60d8==-`f}^oC)N;XbxSUl9N3)wD&@{Iep0A* zuIJ^}=-iYR?L0{S6Z$PU^8xruV(q~D*{HE|+160BXy;+lXSz>-H8Otr|8R2Vt=#x6 zCT_g!gui`zVLNex@q@qZIM zAb9ZwR-b0|VOF0N?(>^d}IWH0w`@T|E|>zc53x7uN1j3pVvwF?=+rnG4b4o^wMikSa|M+ zZU!1h1@GO%`m}?1?=(Kq8b0&71EW6zjZ5bK2r|wXwsMso5859=o|m2PzYf7~z8em{ zdxzD3Z_vh^)u>O=Z2k8?1?Nr@hmx(q!uoX1X;*@A+TnMHrVO2zjP)E;ri! z#lC-F-&e$oMa@1|qtAsJeaH{~k=N+M_e1#JfbR^4Lsne&h78dUBtxtG7_Nuhd4F)( z8Si$}`-01j#q%L=E^C9MjeT6kMm=-Lk;|6VGskx>Q^t80^3>{?3wai>n3>Q|&xKan zyI*~HVf$MBV#rbW(hodeh_T7DP>fCEoy(*GG1l+%ywePJVpzW=e!K9m!oQm9!wdf^ z{Hq>s?JWM4@h(kz$@uN2xv0T!H_b!YniVr<;`QN8c(kRt=K@*k#@AW~{Ev9m1gy_A zavkP(#=)Q=&pNW`(h{1>$jA?xaVqHAG*wHbq z%jJhJDgWJshq=G9j)UA^Sz|m#{diNKP(0BZV`TN?O>;QLooyzzNI3rAzJ7d0V~wf~ ztsigde`~*6`_bBO90zdy;5;O23|m}R%cBT6v`r3dULPt4z8(5Z_X)s__2a$i+*r@X zyGuPQpDp!l>axePakVs__ILXF@$}Ac)Q~r7R=Q9>9yR0*?z0p1<55E%c{r_pybJZ? zQ9~a2wdU)`i|Z2SF8c-{hjz=MMSC~r`r9GzAolLZ4G1R}o_MO;La84wb+Y>LCckRM zf!1-&ntSFrf#U;?Gp#s5Tvr<(g&f)@hhTmq+PciQ^#&5X^49)&oTi}RbumO=F>MEM5MqmZ#sr>`Gx zvLWmEYH(%)@$SX%jGE`6;kkgH>kMapOuq&ib|`9=%B( z?0kRTwjajokoxhaam$c<5I=gQ9}n6uL7q2Azxa(4;o#Hry7ByfNAc^`5c%~rA@J*Y z9hxTVX=GLbj8=F|AR%*t=z%289er-~RPpkzAy0#{y`tl|W zBG%rBwM>G4zen&xI~hNe$HF=ro?>$Y!M``w%L@D|@T*@vbg@>n)2|h^`c>=vRPgV` zn*FcEntf3N0q>&PD&jI77MHnPepvADuWtUmU;TL#*Zz{$pEt#tth^b)z1Qjs2{~L| zIj}kXRm#1;WcBAw`DEC6_DzCwpDpCjxz=E`Xy-xr0Ilsux=(;LHh%npaB}Yb>d%|F z^_`~vya@|f^LtioAo%s#m?-3MY2^^iM{pho=WSTIYpvDiAU?v6`xQ=p{UxbCZ?bW# zP4k=%m!(yP;`}R*S0DEJ^UdH$Lw`aXr^Wf5pHJVZ>d%{em(`ye99Y4hZ&IW8Qp<&n z&z*!n-=X#AO}c26v31TBF}vllxGtG?^58=DZLRN;Ox zxsJlMi7`rLPj?;pg!2CGx`h9fvYvVL>9@y3N(UUW^h_$=&o-u4#=^>;Qr#Svly$DF zyDrr!)y}=xkX2!i9p_l=UPI?q4VLO0C4)*Uai4*>Pto!z=kLOOeD+@Ff8E8QRK~gM z;?b6hRCis~t!Ufg*g~KE2j>SSIJ9%R#+IHqbf>TKdaW(q3%LG`?mC;?vot$Gs;#_U zs+((*vYr|>s_xh;BYkPO?@~Nh<%Qg`L5ror&A8U|gji+ZVwpYTNZeaKDm^59t3Mf)mmmFfoIxhpO-+!` zclw6l9=&@UBtN_7=@cZ4HdI$%RP?em{j2B1GH<-BLFl^vy@*&U_F?8!wA$mygU zsq9N|D(^)|2UbN&1&1W5_L$vKl<;Lk)}JG#hvOO=vZB$(PtNB)`>K7)`JR#SxIW1> zp2>9pWd^!>+ECl5lKD)$al(jwj7UnIpUd-_QQF?G4megzpt z;=actq=H5Eu1bTHP+k62Lss<@uEM#si_3P*nOi?{_S|}#V|vOr9WzogZo5Av0{{Et z`}6@bQnn{7eQ^7n!S$K52iG4$f0aT%Qn9vTnj~fQMmtk+?YP|N!gWb;O0_LcQSkqe zy>~So>$iN|g(K+Okr9rG_hYNJcP}cJdn-|^+~f2A`M|u_{yB4Aj=i_i1>c)##D@$Em1C;&>Kd}foqVFKT#i#B zy1Bh0(dPRd_ou8t`={FDlyAD+-i^1-NV!&$F4FN)I5q?4Y{WC|r2P@n#e1W1-E``M z-qj^|u0J$1^dvhNCgt57gXcO2oh92jF~VI}3*9FBsYgGKu*Fsv9c#!c_$s$-y=Da)jfrNA>Ho}-9HN5|ID7mbYGPtlvN3d z%B#@*vC#cLL-%1DMSIUQWaa9*ABDERV(5Ns>8r5CO6dQ9zVIvhJ$lKqGAkmsurd>U zg?oQ3<2={^+5D?}2PhJ3U`$es(xb0b@X1$X1HG9I?9y%EkQ7(F0XFbHY+xX4z&?9+ z{mwbFgWA9{*nn;J!1^6?2G;*sw}C@(Qf>bTDJv#7sxa9WqZA}2E0y^FH2m&*dvbNZ z<#%1^Z^v;w57+-%dvwtTImV*@F)ixuyfQM&%nnkULO z$jN?oFwLH|I=zAFdCSe@(U_6|NadCpL{-N={%KqcL7%vNo z?b!#fSu9Vkh=%UUu}THTIp#kxzVR`w0^^Kh@im2&&{-NoDlwipa<9R7=1}Nfvl`OZ zDfEP$_icrZTy7`A#2_a)n|h`Xk= z0`~DGjT2<&_V^;$Qa+8z?0K1;_b$q|JF2UCNLlNcoyS&{!~c+-_rmiJz<(MO2kUme z8Fqd{LY(rP#m)!oc24$f?-HZDzociAacnDWi){R`{TkiQyBl`C75<-|`GjuU?_vC+ z{@$}BzOWkdp)uQ78zrKyO@0EKqd)^0I`2e4{ z;GP#?&nN7?OHaU_KZiZxAn4qN9n20emrJ9a! z&zsm}Jo_B)#C%9jQI27a5FgJx_7vp{d=ECBokGpBe_=e^r;le}+j|u?*m_mxz@GQP zZ$5;3AMM_BJR5IMR95!V#7P6D2TIFa%Rs`9A{yh%gpN9Td*kg1I82d~maHm6#35Wqn-)BL`Du5%IUBUlX z022~Et-x4Xg)!8M3Ac5LEj@>JR1C&3;8W6}33v}F;kH0yWF@dA;lj!#QehGFGZSMO z!?uf?+EO4JqUYw=kj6xY5Ai%4A6C$~1{)%L_>_(hYkIotsyC1J(b!2?w-(PZ2;<^7 zeBX-uldY++`ymMl%3b(B59bgrr17&67sfWhh0@~WLirKkLf9IOjs0LtM_@}w^|5iE zY{%H?u2W%igbUZhmX6xvifYa_WJN;vp44qAIshK*DND*2DZX$3@Zi^SeCgA$p@FcW zJs4};%!c51U_*bySjcT?lWs$Qh7F~`j~2m(#xfh4Q-AI3IYDjcMP@^T>if?gRIixE zLOaI7*xbm%cM@ZjDk)Ye!v9pcPxYtE#+MlR`{DoTy-D0|CfQ?^E)>fO zh68^ljUS6~@Yfi32?tieZVuUdRTt~yAYK1HjD!2-K1xK7ruOqadmrVSy>&cj10KY2 ztNmOXC0!&;^6N~JeXM~88|>+W?*~%fd!;yu)^>Ztpcx)@_*I(YQxrkb^KF z{P#iN2Os`_W$#(^6+XK#*6r;rWu2az?yHAAox?aWqN`Lp2jj|nc6VJ2#<&Zo({cZv zMT0O#kA)77m3uvtjq!N^#^-UswKrnSor8NQOJWP}1wLQYHCFjGwDs`Wya>v4~2 zjN$Bhn1@NQe;MtedDf>o{yYKIE{Ht%}L;3 zTd%4dJTvc;81x74i*kF{qLBLoGK+=$S3&;D;;x0`(C@1t|GkOv%7gG_6CwNSb@|iS zO?crX^+SwQFhcHHT@3j{hnW1GxYkg}KL>tSg8Uy&N>}nzzr3V- zVLy`poN4tDv!*pK|2h5Y?X&vT)BQ;PpV||Pj=&#|#P3%?{`X_Pu+bv_8TJTeMZB!6 z)aAbd@~3mG@?Qb@6Hds0{BxzQhWtra9l*1Moez`!=(u*EEE|}%5zpQQJ0AmkzY}&p z8umX5cy=W4Y$oumjp5neur0FTSX}R^CB0$44&_z+Km3r}wi~(NFhJ9TiT!d%L zw#%%4M0`R=Nj$bQ-o@58FEbP`Zz7;+Z8eg^Mtc|25`>3ox%BKj4j%vPf6_ zY!!BT820%o^n>P#M{pg@R{I3BRR??qjen`St&;DohF+FpTze4reXwi1G6en2btDSy zS%tBV>qoX6Q!QcsM0)W(_}o0qJxDJKFyA1(AV1ou7rm>~;A2A7i)mz+x?a$I8ucQ^ z(2JD;^kPNp^x^>NMT2awo~qj=(+kLj`rxY%(|wJ4(aX?_IK=r~=x^#T^4U)%8 zKgBAsXv28e?+|R%KLarrP$BnKu-{bKRh@?ZNyP7x&_6fGNs1KZ_Ws7?_Lo z&p!M=By0V1U~%`tv@aHy9hsd|A2BB42#_^v)h!6_@?twI>&0aG#~AY_<%%yQdjPX{B4J^YL)A8}v;QHUDcq^VEP`uS@2O{38#asE9+#&H+*tv+e{^0y2 zkGERvI0JUPea@wdw+6Q7R>xcM41waURy%Gf-pcj+ii@|xcG?qfHII=Z-r9zEtHp+1 zWH!{1@z%h0<7{iZ73~icZ?)P_N5osd#=PW;i?_nwLXEfb=er`}tpW77)$vwbBUrrE zs!w5yx1v8njklsd_LIJ`{y?myQ^i{Y$-lMnR$L=kywxiIe~Wl4x785ht!CZmRPk1e zUQj%*)$vwbBUrpOh+bU2cq`d+i1Ak5XO}hJ8lZn#9dE@og2h{{{S!3aYPTcr8Hv1S z6!IR?$bUGH2Z=#GBo=wkIN!w`wt~~h<5J%9EX#XFmzMVjPk?ftNwV)Ca-CeBWAM4+ zTT+@e@*Zj*jo*4J;@6K|$bExyD8wVBJm~Fq@P59f@lCfO2l_tcLy;d@guEGH zQ{oj<{=^k@F;OX+myXp^98*Vy>3!djbqsVi9 zX15m|L%#Z_kLN!7Nu5(jfc0F+@H|C|*AGO5lH%ab`>=^QI#H-tc^fKi-iCd?j zO@BeIlgsG>I9fb6U5!2)E5{l6Y2{Qy)-dE$sNX23qV;qOgFIM>eTcrJeDz%P9p%8{z-Qcy99VpApThCT&r%L-9{SFM ze!Br1%T*%}M)}TtayR89axIjj%0a&+@f0~)L)d>?o0h;>r*X3 z_S3p1D;e>UlG#juQ49>{ciuGIT$1^UaH6J4qI z7v)P=ps$b{Wj;C8X0Kjq>MtWV3O-O{4lGb^bR2xaSonmy;2XxkN8E|r=xF3dM|m6U zb|W`B0CJlJ`>r4k5p4W4xDbpR1lx2h?p?SI`O^20FV*bZq5Qz!OF4{toAait(LX$I zYPNY5_C6-JSK%MPK^g%4sQ`C=GTFZ=kE-cYEb??Q$fd>_c~q`DOh-BA!5G)M{!kv3 zd_ZF!mHPnV;n8ntY>!1Qwfe?PANiWOlxK(TOu%!08{0|nGRWrPvkFcobX6$lwhFqV zvOIflmPbw1ZJzR}waBrjAy+pPIz+#r+#2Z+>6`=|qC7j<{L-#Yr7!Yn{hQGtHnu~D zMj*#WI&>JE&QD?QN1&J2LWhnfAeU~*(~Z(~Xe4xqj+ygwqjVh_2_0g&b?8xFmS6AP z#O6sRERPDi=Q>0jG&65B4tC!YcF+5b`tdFFBlREofeNM{J+=I5*PcoRcoGfBeV;>a zb;#%7is5<+g~sN;!B|N$4}mt)8;SvOc4}c4knHwI{Rtg06|m z(8MIAf0y8Xx*y@OM9E9nr#`C%2do@<;=}O${TAjv`(lDk8ENUWZ|Z$^E&7a(nfvUU zdY@g3KI@DA!kBYF8*e1T?i+T{T9$Z>^$D=!MA&l@?Ai(Y#yXZoU3?e8-93Ww`!sl; zXP`fWrRdTD7}H}GM-~1dK1OjYwiT|52lpHEA<7FnmZIO`TL*ztHx|4K@JMQZi1Ud9 zMVzEqV1z38ftSIV{U=d#}j^cMiCKIQv7=q%7hheGXpXhp@>)jJ2PGFS`ey&w-b= z5Pp3Q^5W|g>3pefGx)G4z&)%P?C_l&2mh2PWj$_-EB#4-sbDgAv1h<{`V#s`dG-%5 zz8?gK=3JLJWi9kEAwkMY#rQN1e5_dAkMza8$K*KbDr2E_)w?!Pc~^76g?l^kG@{^0e{*h9W31LOS)_@Zp$(Zd&g*2Si5 z0w?OFW_;0$(CZh;7eQum&}-r}9s_PX4!u6QHn++8jdyi@-U)sFEZ=ktxb|ILpLar^ ziC=Xb?bs#t!81fM&fedEJ4lAqUv{+hG&tICF2CpeT{_Qagxswv4gTW?i__QmAk)Xe zIj#f`wQ3yk&9Tl1{D?~E0IfL~h`y)w4&;vp!Y}QCk2(nbIR{KS5V(Z=P94T%wHmn1 z-i`5kxGr!^LVnGv2j##gpTe(b>pC=DNNdyuww8%($O#=!L?3+{{ZDNjBs)vv!QFbA zjWcKi+B68C_o7YDVBHd1!-W5v!9zb`k1wr>>s9(8bg1_K#`wPd9cSUGuFle0T?dG> zJr`J}95%WSJnRP8=tBpo2^nnB}{ z)QjoBkuH)l9lWgR&FBD)N7JAK4X{(v0XwdL6ngg=#-oiE9WY~=O%@$6V;LHUHlZCW zpaaC`p>;f@186(eBis!i_C5HrN$_bN__m3_C=-BD#(UxO7)Ggtf2mADf5X2Vens+C zf}1-qxoc@9<`lQ#8su*hNsqpp=_5Tl_q$O(!Y4Hte_*#pU!A}+kWV^~G4{{ITl!l= zRs#Ab6?`vRA5@94aNuHB;m^PorZsc)ne=EjY?$Qr1HFpV>I)ed0Pp z2WQK!(ymzFQ?xjya1ZGz{Vr3g^MTt(9OX**$l1^hS}$?No=`d)c2Dcx$hTGgcZP2u zxRd1LVs4BnWIBGMRQFX^S7{ya$vNG(m153v4*2(W;G~1F(;Cc`vY`vKhJo~%;*>{U z8sl?dUEUG+Hu7o2`}q{(lcvkPm|qK|%fwS9pL`m+eCl_1GOSdMd!2k^w2yd+#6LU{ z7h5_6KKJt*fvX^MXClU0?5|}ydX285$whlD=ldknHtiZibeCtl&v)=3(D&u;W4uYOkV+{edPfh5d(6jvv zdNLRIY(0D#+2aK03F$BSGQw#;ho1ay@AV9;{TzDYkJEZHJsBDTPP6GaZN2Wxm>vSF z5l)M+=!O}eMOt*jjL(?9LN^HKua}bzUp5!EPq>Es&XKPhdSZSRj@Zmrid}c=*vtfL zZiHj`fUz0&JTxcczNXX0W-Kwj?a-xV7#Bm1&EWT&bd0nS80jc5zd5eANykVV;lnO_ zYzBSUm``ajHq+RL7)!~=cZ%4IrN5S;zgiWW!SBs@W}~IQ%y>q`W-Rep?gK)L%`pAx zl(89$4iOga*P7T2em}|*n=#w`s4F2hL%x9JVnT||u)e&ku^CIB_0z}vkYh9Wy?N}X z*o?W)%wzv0h|T2xTgPT}KhlQSj3KwHCN^Wy>ldNdFNPAE!SCL+#Afby+@CTMIVQ>l zy?dp^W^_!_hS-du3s-z>#-al>9!(1+HiO@pu?)p#%sODkGM70vWAxQkADhvATPtHT zhJ5&Zsnf+~Ogc)j8CoCH>e!4$Unw>dOke*^Vlzg+JD@jLMQp~TC!`}b9jCPzo5AnQ z_>5vRX5BF3vruC*7iPYLShI|nvkkFlyKffqH7uWnIx3IZMiy?%gq*@bV;QgfbKy7+PbzTe2bMp8Lfv>8I;< zOENyY+2>78wjR57Zx<=+PG_WYIzCo86CbC1i8^M-ktg2vgrl%~Vt3^axc50+t7l?Q zCO_+h&GxUG2jw8V7pPv)enUrEKokM&9S9_^Olt&cAr zzJHRW{`EmwJ&;el#UQ{o8=N;X9{D7vH^4x>$B!lA=oXXy~~KMK~oo=3j?LF5+l z(Uynx_IzP#OR-y04~&u3Lr+Bbio4|vx3;AuB0?=TwWTCNYfFg~t^WUr)6%o6XByt5 z`^~|9(`=4gwvH~@^6bQtEkDxlcUHgO{xOpJaTcC=eA8!Munc*tJgI=5eHZ#O?ThGJ zw&J%1*dD=l8o%E^Qc^z}FRQ!I$4}$F0a7RTmetHtp@qkfDXH5*9YBOC;^txF^XK3d>)E*g& z+BXBS(KAzy9r@e?e@1-Oi|5RhBdf=uUq46xZo&V)a`Euu2nqdY^A*SE4L^xC()BmU z(k*mu5uVeHbNG3byWN6)Y1kKo_Vh)2N+e0$g?`usJ1DVBx1^$;iGsG@1^Xd8rF`I6 z*xqK?&RFEs==le}_RP9i*bsjvvPrr}iIg{-I0FN5-Y&#%!H1=F@CUNd{uBv#-e3j~C1@j|wd;0s4)fPWOwP_OJOURE% zt@Iz>ZdU)15H|l&ZSo(3Ah$Q}cB|X&O;>*jon9K7?tT7&baf#<&*+`*o!%qEyC|`E zcs+Ezc1*hWxI4pJmy_u$yH8U8K3`TpcIFK)86aKUew}o2C;Z7S_>%+hCm+G4KFNtt zcT9pW$%21@PuUNja&ShJ`j6Z4hW{-m$w$7WHGZWOc?9lP-az}->g|2h)YdPp?S#Mk zWQyizN*s~iovtYFficnEVhL^Q7VF*Zj`J4hMXAL%|T{a)wv`;=u#>cRU0%V9123(11}8IlS4netQ7xA68;+o{bc#KPK1HkXTb z#DinE4aa_gZ2`73IOh=jMkZ{K`iT05`r{D%%?r?L>Zco_!_+_IbGqX5CsQPq#-_4K zcz)gIG&~{sxEyE`StOSz6ZZ~9DbAh<398^VU~T;&(@X5 zI%biVuH%@+3(WEc&b=|QtMVrN=33yBo00o{6ZfRgx6o(Yn?B!)&sp%#&x7;uE98&Y zflpPK7|ne1kAPL)c>P!FK6!oJx-VZZG%(9~|r-+oD|E6&RsR-CsK+p=MYj@o^HF9v4GTZ!*a4m&hV zs@op#@b2suDdXh(7jh9m+zJx zw>+6rvSmJS%j>``AAi4O3&t$xFpRN%v)g6kL&l7g3)&0t5=2s$=BG{6KT_Tk@ z;20}*;m;Ndb|GIwe&z$TpL|X={*$kv&)nBsTy!V=$Y}VJQSd7x;a@Vrv&rzm-!P6% z!wwmKN5*~|j@xlw1kR6y--&AUJJFJFZh$!wVVj@B_MN)l;n+qdKCF&yYTw+DK7qfXxsir#WHvXt4sBTjTUiQhzN|}z zcloqT-;eP9{)}|*11TApA7%Q^PI0TXV=}y7&Cm4xVVYZgJGOZEpY8y@q45~wdPy4m z&9(3~z&QJXaSrBaK4>R=&@T9(-S9yNfN?$o#`)WIdBb<)#QXj>D*-+r(f6-m8pcUt zJ}C)4DGBqYB={xL)24H$3D6mu%dFBd4*4W&+s*CfZDwu7*j+54t??1wcioZRVhL?@ zI=s93!~pBWddVk||1o15x*yG%_&vB>&Q2-W@`q_9TY^15VH(0SBnPtfoZ4u14f;LJ zw|dnA0bPof&pfBh-2)6+DXHmaUUr-G+dM3gyN%#y*!k&qKcLe8|pN;PO zyPAfZh*wDV?!o8nP6^K|<9Tg(Ub~O>&%iSiS7T{g{1EsH+z*jop!ed^_&yZ)g<%i) z5W*A(ZnMFMgpOlQfWJgIhWrq}o` z1bj}U;d7K`a1Qw!@-HVa4lKo7k;iA|VNOd}hxqu(@WtdqCIjoxvwW=kpIy@0bss|y zwAjpA!~Y}z=MZ*z4nE}<@F~B9A9^1C=S^JWN5D02!zWP;<|l~3{4adcz4*K^vA6Pn z@JWmCc~Rmu%>TUk`da39z68#3y`kZp&CwDtg|mpp`gpXZH^%>anf>>M-S-9_>8-d> zYn@|~+kr_&0Fxv{X_!QTTvcEL!bka|(-GHkt8ackUEQAO_U`PF?mY->kp2k(ni|B89m1?bt|fr-k|Pyaj5?c>*oN_DG!7G?MjSg}oo-S>~3 zfw9d29oy{c6Q%z1@kAe8n{dthxHdiOkh2=D8H^g@FygEnhHaWYgSowHQO}p!Ot$PM ze4{@vKZophEw0Tm4RE1>Y3Mm;T7Q%dL2jVvrTk)j3{jd$;o+`AP{8)~TZ{mO_R^u9Jn3s_si@aGusj=zb>&ey0k!c!4bzG22-99`NO{ zQM{JsLF9jEF62clh;Rq_ovDd#AAL@M{x0~)tv-tR*wR?HcLib%6jy!;^?F~y*!|kr z4DVYwR`r0}cVQm-V_}BxWJad1WYrD6vb=&}ALl(g?327T!%FhH)o+)&*YC{hUcXD~ zQGYP+7sLLN_sd~B@?7=rO1j53$_cY%x(7lRr zuVUP*nC|t|P2Qc)-|Q{MJ&NfbYj5-JF23D+;Jxp757mFyx98|E-``IR_x2HFsnudagAxBF|6C0Jw%~5a2yrvs z-d|2jS2^}6aYP{26zQ$PJZJ&NnZM|Gsr>V3<9Gbr{&?mEz)0llwD^uDbDF~uM&fcH zj6|}xVkG)qf7ldZ)$PElJAqYq0jm-YB%kG}Zp2C_s~T=X?D^um5x}kvVA^=#S10gm zH|ZkBO%oxD!2Yrt<&EqD(|5ZW8xO98SD6ni9{#~~h{fZYgemDWU5ER?*2atISvi&>Ulzy*(tQa} z?Sb8qpFD>)YW{8veAu1vWuxKKM!~m@gul!5!PhY?6)YZI+5(2M;-=b}(*86(uSFQj z94De&gqGV{vyO5W8h%=1__siDqUL(3#k_4B@Y7$A zr{seiBg&xfx?N%Sr zb7B+&S(%dQ{rwN!>RXuKZS0-tt()Ukzj`DcF%&Jo^RX!(_AX+!yAi8BcwdD2S3L){ z3$g3J&5BYfUi1&-!zflfe;4inKgh37*DXf;CQx3BU(=k|qH9uKi^YhL*V=_^(sScI zEG)0}!D-Z2hp#DzuL<-#!P>q6*oxXuwx5HX7XK|jpRmW(h?A?kePQ2EJZQrU|0&6V}Fwv5&Q*V8lICf`<*~B zW-p$Da23T?24S<}D)J$x&{lJN$P4+9e;`|bhWQY}QA@XwPfEl6E72yF+loNk5SYe4 zw-r)+h&>mJKV$uiHJ`KUkQ}kY7Ks>#=FTb6@FR` zUq)|W5VlL(cCb}Wfsk{KJ)EE2vCd$f7z)I8cxqqTvj}N(&N8w+74BdZB_b;ar zAL@qu`ORB@vF2ei5L=@C&%hT^ z{DX2f%MpY9>F7)^#ke>2&hTy>o9X=$KIuGsQq=<*e%k-eP~X2!{ip9A6*mG)tp%>R z*}G%yE#BQbZ}onh_dIabuZHb>{yX0N?|s*MFz?sEQ~xvUuX(>2M)NUrG_X_SefT{B zUHee|O+NFri*fB@)3u9n?P6TJ7}q9Q>^VBZ_YQQ4Ue1j@KP*fAR*SY zK*viw&&cJ$anN5VmxvtQj&4x~ZrVLE+Dm>c?W8|$B0shO?RfNFx4Pi&be31=`7wTN z@?|eDU#8{9Naoz1QGV=0T!Zpsd{`vj{)zWy36kU zFWmPSI2e=h|2VKxw2qa6`Y|0Vnf+LESc&F>pR>4jqS72zBA?ZFl-qa1@N|Zk2Emu* zAWk(MajKG$l5hVo8TmHEC0)o3K}X1!n2(Wa`S(Hk+!y%|HGe&a2Urw!QxIe z@bBbjD1JusvJ?1E7>Vc40%4?J^DV*u5&R$G-w6JX;Qx?MiSJzepRI!bBltf{F&DTB z_&-fBixHdAIm<2O%vS5Mo$(m!J0bsvJzFT)h2r4kXK21h*ri&>F7%n>7s3A#{2#&p z5&WO4l>f6;@P7pVhcL}m#Q(|X@fgAYEn%4}g#W{yN$`IJ|3~nD1pi0ye>yGyCtvV? z1pntM;r|#oiq^FgU$FS{^MACp?d{|Lu;&u|AHn|-{2#&p3B;8a{GY%)=`)@`8*4z~ zP^X)?i?pu88(~*Aqh>o_Kd<8b6ZC$Nhbk78?Z+B3;`+?TyD*5yxe1()jQDtE1+VFY z_uQ~Grr>f>ooQNsaSV0y$v+c6WE0j@48{72<9Js@18SJ8#(C?dS-6Zuj)VX=lWm8|DVpm`46QRC=wO||F zQR0WNT;}&XzjJ8h&fAQ0J zx4HstwzikfAa{$LLJFb@6t9`z~K4OZy=hYR*Cxm&qnn+*x9bI)R{v%$+C(*1W59pigWtRH zd#00aygLNXg<3xcMxgCOAp7-_qdFbe+#ux*PsM$!(B|~VD~7#*?>skj##5rLnF_J? z#N2+3HdBQ)RC8cA<4krl!DKg*b6!d?n;DX(*-WW3M4Q2RWHU=)YrEZU?;hyl4`#c4 zqk5&|bCUAk54(N;^@DU@W^#tN1p0c%0JrbfoOIuflQMjVi*r(J+jCOV(x#>Kb!2); zFOR$Cr_?%TrVJgFlj4khIK_$Y#~t%ioEcM7C`RS%Ju@Xac0N0P-0^Tqv}-0ib_(ab z;+DK`&ys!rH{a%a1=rq>tvqdh%AW?Ewhd_yq|kMzyJn=^k9Wpx z!tYk#`Vsg(9ko4I=+|B0nx3)}-|6^t9Jk{;9iy7IFF9kqZ%m5w^~d!_V#`UJ!LHfY zH7%vTV=CT3my?o#YuoU>uVY$DhJMWq*VL3ue5d1maa_iCI@S;SUT`FN$qt|SqR}5w zzl`eTR_{ztSE*mVcb8k8{5|@dp#0Yl-0D43TlPnJCVx}~4z1NEjkEZqa&T;^9WP+5 z#}2&TWDeG!O$Fci!h>$C<4^bfdZF8gHCSPmr{pb~A^ZOKL7VTL$L&qz+8;8U0a>kp z%&7LNxt~a$qaZu#qrT`PvplE1Gc{$|J0#ae9cZr~%{i+1(WUT#@WtLaQ6v+Rhdaz| zhCCu<-?k|>=&s#I?VeK}+&|ajJqahV2F{bH`RG!t@8LfB6xQa)ey;gx@*&On=)K_C zlaDUJm}*aUdlTW0Ke*ZLD~Ulpfw0<-z2x`D;=N9R#*&?uv4rG8;|Z+` zj7@gpoj=-m(wBJjc)!`Wp>CfI&rCM%P9s~^Y&QZjr1z%K`zDveCy<>_hYk02<)nNQ zw!aC-B3VjS{rzCrc{AD^6Te7ZhT8=tDNE{Bg#)OUn|GB7r6 zvW!nE+Dzk93C6Q@=%v{&wcmc|{cMmc-bCupj04=$h<$*w>NRuO)FR??gU#8)264`vT95pw{ z7rcP+pRnJataOI`!aepEW!aj@u>E5c#(uQBJw7q#GkuPXF_f@fxW`b|zE6Yu*jV^j z!f;7p#&GdBr~3%Ubqt2twOLzH?3v`xfd< zej1;ve#;hz_qXS&$ACu->=v(Kw~ab>ONBom?AA9H^_RP(<2OkP#m$bp>2sn|J0@2> zJ;}|kb#_XwdM*cXCq3S^39-Fb5Knt;3}SneB2~l?RKyTiY>(PK8u2J_!``_Zo`Y4HU0)B3_sCWi-1M$7gg6^=#v_@ZY`) zpCRkGtQvCwHdoN&a$~WMb1v}1b%DiNHbZlW z7l56Hz+b!|9~#CnS}JVH3(P~ja60x&w#sTzZ^r%UI?m% z>@(umCLJI0jYd4C>3CuQ9gjD3dzrU|39(!ec zc<>n69N{r;w*$~Gk%+}bBNjIR{gRAWTvx>6s86~h7T1$vaV_`{ip6RE!*2MGJ9Ph% z2kb@nB)|JB zZg=|*cR=h+MLo>$;!`_|@euAZd_3i0bbY4S89jS3)gRFD6?}Pz;H&1wib;B03^^w@ zRt!Y{&{)A^V>~Wq9w#nEY%DOoYQA14jXQCOiJ9?L9L5{u%L?KNV?{sl`hz#c7q$jp ztv1ER$j=Z)IfhzmW_(51iZGTRzIp~Y39&F5e~xz3Fw!xKIZ^HuIl>M3O>?JYUz9tA z?Y4lg;+3<1U80^VZ^R6(!B@R>|5&c$E6SDj)!Vug?XCK)F%FCQZ1Da-$q~ zQ7nwkrS?-@Bw(+t>A+6-uG0QFt;b$lTZ_H+v;%t$?i(KLRbz^UwFP^92Xz*Qq0ZuM zs1u~k7t+0FvNC*M-k0gS5jHa&v9OsFGkrAXn`2?&!e0OTpp5)i(>&-~Gs2SxeKRWx zSSkE@&{xKXKMzVaM|n_ga~*}fMsw_ioF}t?U9#_$@iw&8&e}TXaAUu7UmIn?Z2N)Pe$fG#Z3yZ)5pL@k%ohH9C;4{D zcN*hE0L&&KuSw@oUXw7}pdslUlGiM{PWCl7)^t|v7C5ijJa%i6*9@KawpH-nt~%b^ zRtY$(IZnkT;Js~aC+}^m8_ytkZ-VzmTs^^i6TG+Mh|Qb%!-Dq~F5al%y*-0igyhG| z?!39v!1%C3cyH_-$iXH4lRK*4DKRg7?PPms!^?2;N(G*DZ`d3~}`phTQ_^Wt+!tZQ#A-mm_{6 zcyHj%c0S%)ej56wS>8tYd2b9;5hkN}xVQDWSBB{X?@jRDf~`Fjytme_*Ok12_a=C6 ze64lk+F-$ZYulP?ZB1xkeARsJw6(mq-(hWEOZZCg-hegQ!1L%x-kT5o))Mv#H}9A|3mQJv{)F~u;9J5uhxg)y=i>-X6L+?wF-jw*7kZH|E|2Z zaabcm>q5ukz0+}vBMZl&##ubpVoX3?kO<Zf95_Su(pdDic_thJuC)~>bdA5H4~@mju-vuAJeXBu^^2b{$9&Hg^d zO6%;|seawaVlAk_T7#pk7j0thSaa>XegvU}@g_;@KXz5GKjH6d{Kuy1^_l5?je{Qb z1GzUzuKls=lC-|YPW_?WI_?ccrRH?cs~9ZXg~sC=ty z-X!)>ll0bbUBa`HovKdC)4=~<@J&tNH2|xr=WOV)hIG=PNiB5?FLT$I`~Ai& z_JS+)rx|NlmzhVI8#b+3UkgpjcviNnX1z_j1=ifSyz@QkQ07lJzDK=g5zE=fvL~6n zW^)@p_lavLr3}(025HaYocP(9NrsfAjJzeyS-Xt&KFU={I|a#0?$wgdEXqOpQzU(R zOkK{~QoLvHYv%NOEVJ5Q+FtVOsr@Hd?Ju;wo3>v|xumTRlCNOkN-%H_bj^h3LVKaN zlg^Fr((ZkC)tc?zcT;V=-Q}L$?sENJ?fx=t=)|D3`%c={iA8Du#oSB8Sp%z-3Z9%yO12yl0fpqF17)a;ci^D*Y$(Jz@V?nf^AUYis^;V z_%7Zy=+uJI!^y#O16{$WDG^w@PtT0i6d6l@OG5&mAOsO;~yCrO?Aom)j^3~_gwI`?v>ipu_OyZWhfir9nA zQ*oL4!7=L_`+fUedGEKJ0lssXIw$sr*dDLz{TjNk9SYBM&HE{OuPrylmQ|HL}JDVBBWl-XzLb!qF>mcFaOuBgG@5j$sQn#UN2-dlv; z6J5W0)9Ur2gJ$u}X@lHEo8{AHR}p6u%e%HcQcD{J(2e`ppCZCM+OMI3NpsrbziA7p zUnX=H{aj-`-^DXI=STGTM($mysUQ8C`pUb4JxN2}5xPklJ84VVH&<+)p1q|s|BYssp}f*5b%4AS=d#k+@p@qQs%&}HS2$st?F#sO7JkZDyviS z4QlF~xgQ64w6mYF^jT$kAlH~*m^vhgcACwP_rZ_v$4+}cJlGGLMtHCv zHjT}La?j>Lxwd)G_b&Y8hsMHlcQJ?ie);s>c<^V?`Mr2>0qsdY!juI;xwd3Mwp@!W z09QvzN6J&oJ;9pDjeNP5ci~IfKT)19iBMWwZ`PtBtz;P zDx$q`N(fS`H5_i=UQ5<-JTC}?lL@k47o5LzMKOVuS8DeAgAO# zKYb~wJA06K_~2Pd<2C4ivAp`8db9XKUhUPJy?V1(ZwAjj3lDz8dh^%&@4b*0d-Z15 zyk5Q8t2h6WdQ)uhGLM>1T?0XUtn}$$ENb9UvSIfBxt|WKSNWv2KkUOC#HJoJs zn;%$v5xUt|CzE~uPH26MEKQ3vvDbs_`J`jpkEG9X2jK=S*|;Gs#h9L!Y}}~n#y8SZ z8Hd)53Y8Lha^j6=He+Y%dCl48q6#GN=z3Yi?V=cOk zzi7HClWuu`HM(i&fZEVbWg+X+KTc5 z+Ny-MD!8^}_@Sz-vwK!M?&WfiKHv%Vme`No9mZZPrmX~%0sfb1X(lF%8S8eo-Fufo zyToNsk;F25{Q2(AqP6hcQ}ABu%dcEozJtG2 z?}6`x$1J`x`H*&6cDc!SY9-`_st5=dZmJ-6d!1pufPw z^LjKd=;4R6k${C}mn@@ZEiuO3EXRzOF8g_=e!S!fG`KW(! z*bCSZNh`9>j$Y@uN82R1)H!GRqj<-e$A}c|Y^LmgMx8`I$XvNyUui?BZ|;YxFIc+h z`f7i*`u=r!^Fq1(fp|0bPLnT9{sa?$J>Gmjb$Jiod>?hOvFf(4`WNF(cuDxu>Num@i2V zotm2xI=3`6bZ(Dk`tHcw)hS(T5F9*%jDDTIV(GBWMKgyzS@b5laO|*6MZRGhi{8M- zccX7L`T#R>77UlYivJV&{jDWx?P*t1U5ktTg5Q`H?b21WN$=mFA?IC$?8`$%+T~k| z4(ojzM$s3OK3)nTm7uh|m+x~8FrHw3Q9}>P{_#I1%?AkoO1jtSNp-oh53%P)GtC8; zKU0){c|%dNKA>R)X^kLl-H~=Iy@AHdu0dv+Iix9gKezOq>l|B07L-TE<(EUJI_V$2 zdFy{c8!faL-Q*?&T9&U1&<|cMb9cuo)|GNy&0hCe>{T1#tU&gseeo{(s1w+ymOf0K z^t0tm&wDtxFq6G_>)5|OnezZ{z$TEnwn@;4d9@Bddsmq2DotAVX*fkc_$YL4N(zP# zrR9g;;GCJKIQR0q!}G&GqL2Rl8-n5gTNJzg;TMD9|NO`Nu9FQ#m+$G41{9#{%^vstr-8d6@<+M zJ3Vw{JUvBvZcVFu8eP1RGXR9X5pX(|HkwV_)KDkCoXtQRwNcI>d+!(c^TTtoFJ9>% z48MB$d74%x|6M%Y%H+IwIV;J}E@vhA%_2XUC#})^hB;4)Eijw2m}DGdHhxZ$k9o!> zILO!B*94QHi@#@j1W0d}QrCzdS~1~#J*&2Dl(Rgj(`1DdzYIf`Tpkj;$5*80-4j#?z6tM)VLo5@r1@K^p*QYR(L*b> z&?`O_^-WUq`cnQ}-f5YvqW{AAxYbEXh6j6no=R;zge|@L?fl2T1z$C|HDeYwAwGC@ ztGqpyh5At%l!P%7|&#mOE8+`)kWy`~%^ue1G)wS@nYgNcUUWL}IQk^IDp$)5Wwey2s5^at(K25}Bb=%5_AjP|-0A&|lbgMvag^uhSF6zOVOr>qoNu_FeLv^x z!y*fYuRWz_{+2g}p z9%Hc@?>yMeC% zoWt@9w+j6+TMNC+Sxoz`)k7P2Kgd}W-L}C$Lu;Y&QevTTVw+S1qq*RwWhHSEabg>N zgSL?82C+O(Y_rEmSDrskEYB0#@F6fGdG3{Wo+h^P*vnk8;9zeWF=_6_Cr5Cww;wTS z?8V;;)D8|c%c3!9;m@!` z&jz?&%y{iA;`Td((K2*i8P`WLW`vG@J{Wy{b})MS#p%e|smR%a&=W7GUh|fo+0Z9H z7`mHtALdL5hQ^ML@u&7u*SrHk75eXMwa~utuF!K$oHq?NpOW(Q4Mw+OTZsH@dpbYl zxlB#|17(Tu-+h_SXh61ykiC5_qf9QZaV#Skt25? zoHuwt_Oj>_e+TTg2>wdC*K^W?n@Pz-R zLgOA*ozfd^=)7V6ezD-~>nLg07+!H)y zKX9;6b#5$DofDUA;klyRIo!+PUJmzEcFu6MEoZnq%g47fIVt$sKIoV;Ex03RXEtNI z=bsnzT>56&JX7+FZ{&RW0_?`@%H4kZ-abbeWRKw-o;lOH*#ATRZeh{9c~h zWoy#M43{#B?-cU8!$C(fiUQimMc@hbSL zCmQ*|yUjD@V7I5VF4cV#&&VCY^91nIQ@VE9o9L$q4x98WGj`L)S=6i1!nQ#ySd>_v z**LfHJ&wMkjd7QW^ARS_OY>R7g{&34XDMH#nYIS^bxp`<$uF1h>}xyEGAqwqzh+*})|k8$ z+~>SBqDZ}zQ)Kgp%C67Hp|-%ngFc-$v4%9fwGG1*#W$r8b!jkR{G?M=+`p5WlU zt_2gtw3A?`7MbY$MxNREIP2FgZ6-29VtL0=Khb+;{SxX`+)cg0R=t!}r%m0|sobiQ zyd!1nE;jLR)xl3}zbE(YG^Jf?;cGWCIY_*LXF0UbL1Jki4Vjk5J5v5)+Gq3DVByHj z%DK}Bo40OYI;$$Wk&wgtn$$()1bkjhyF7^=7~xh~Pfk-izdz@oy?0@n7tJ$wTy)X6diQ?-R=)buUL&OIziWztmkb+bpKn#b(G(+BIbX zYa(r$nR*+0RCMzK>Sf=z>m|68y4l#1dW}`V?K#S2ELLiJiApjq_P^PV4!xZCy*zi) zR&-#{+xcU&UbLlIFJjL_!N+skA6l1%&YeviWKMD%Hr`zX`9>$W2pZw`{uSXzF0Tmh zd|_14;TJ|1-QIuqwnr}Cy=^!DoB2O0U*;R#kw$RU51F4u8Dt!N26bOeko1bB{Ruhl z5yMSg^s@^xZAwt>GkJ-1=3^vAd- z_H`QPbCxmZJf2XMylcurLhyy0B6lCw=kiS%dCcHif z%vE06#cSkG5AJXg?6NZc=CF0{{5KOT*Z@LP4cZA!cR^FToyE2vfxTBkxZ{E1qG^PZ z2a3}R2&?{d$C3j4QwmAiIOAaqmj^2-^PvqWWHIXnfO zI@;8$tRw0tdPj8xB^LVk!AB9fT_N%fyL1$Is>~ieTy*TJonn8F8s3I%8!>1U_!==> zC+y4}IlPMWWE_9Rpq$~s%oX!{zx#O5rGz$Z?!Z?R-8uo?`a#=8(#b*I46WTv0~c)_p{OA4;d z8J^GgBPxUbg=>TUsoAb8r~N7W%G+PaNgq|Y!GHVO4SxAfbJ!rZ9Cdzg%ky?qrgoRs z3esBT?>?Uo_e{!HY{}R;(B$3nNUV#{Ic(W+8!bD|X}>KnX`Hr1CG{f=TT+Z2m-DA= z^inq0V#lp8ZMb~sBR1R|vEfYJojttRvfrv4v=bZW2>m77eiPeQ-WU5Ohqm9O)XzB2 z-B@VdE8yOf*1ZzzUJ3U$Tla3a?ych9Q`SAxUbE~uvGwESsa)lkXNl!mz zW19M**kIzLmX*yp=%i=Tir8~<*}B|iN%f0^s?K1}QEW5u6%1D8F%QhS@j0=6L>@NP z0(g84nD8i1W2qZ|0ZnapW21>3wTUv!7F&-ztDvXyT$txn+C%cxi0wSHlah+;d?n9< z+KRAXwL~ymdp`es(l+xaU1$D^u_onaOifciv&$iECN`%^RzJ)d#u{rdZr5FGVJWvk z9SR9vqz|<0b=Jl(=+LVKf-+Iw2QUkZps+Ip3c+s$Z9ZL%C)R%sAl}zOS{dW-DWOx z)sLg?G-z2!`;~M3DDer#>ld2z%z>UcOP++D;(s0%-$U9|Ff*SvtvcN`@Be4qF|iyu zhzDnFj5m!G^bsr&Q5=PgO(LpAi5%Suy7h-)C(huva711qtRPWg*Fsk!Mz!T+!wAW3UWX2 z!kD71+`riO8GAQ*J}oo&TFtovGhdNiBOW|hIF&Gxd^bOckFM2kNwfOEBdQMir&b01 zBhy@2Q>!-k?dKwo7%MW{@CtA&y3nq#UB@)~=2AcD!(N!q3jf9aH_MMK@4j3s-chbi zU|ssuEn|YTX+CXQK$}jbO{dYO(`nNiXww^M(;2ksOxo1MfH_|OLGdsL9GLpT!h_I6 z@Nf`3#ORb9$H7kMG&*pwa3*2X3jq@cyI&YxiJXgm2Pw z7#kBBi@uDt(LsNHlAdM1cL@8)ZhMzo%Q{rK!k-U6j|2lU9wT);U$0pEx1$SUX+3YI z^?t4;zhe0G!m`A^f48z|=(US&b9?NiYma`Qb)>ueEIfYZPA_vgKIU}NnA`E=uah1+ zw^FZr1HOGzPiZ)-CpR4GtA`FVck0H^PkaG?!aQ0_#fmSub+_T(cmKMUkt+J?I2G-p z{hJ1=Xpr_jJ(734%5a~{tLw`jO>SKk?6# z`=?3kb<*5`Z^@gtGKWQa&A*!(+WH>p9ny!9R{smqdgH(G*T64lHa;^$n96EnzVKG& z+Gc1&>fKtu#@3zpdS{rJ#jtfS|5C_mrtvWt6(4iGSi6-_hX(qVPpUU(}9Mtd%|F7UZ zsrQ+yg3;TiG6&6^d#hd$ax(@lWor2h^pb1xJvAnXZ-$Dr^;Pq#3seVkJnD8;exc_7 zR<9qRhBO}J-b-A6<@E9w=R>=nt{mKn?3#B$S&Q-Sfd(@x`q#Hn_gg&p-Z;wq*6UN5 z?`CdMACgeszLd9r!x847rOd~!VowpueL>pLA+++#op z{~6LeNL!7=N5`p44aT2j?L<4}J+WtcDD25Fj#G!@%hKz^o^<2UGZinMfNmZ595v~K zDEFWSA8oV(n{pQG1;l45z`X1U{1`-ky9t@D!K@wV7x|UhU+ibBaCsZ&GiT9Ae`^c# zv0iMSRN6`>yu$p$>!kfE_SG4^Ps5v9s`2NP;P(EmRO6>#3(pq{#J=o zjEC=5fn)poINVZqw6@=NW(dxi`4|Xtt`mk21-;R)z9LRwD0p-c8}VzNC>v zdIL!NDESB-r7lOAM}Gz0dzHRu;{VUM`x(-v|LN&(>~sasJxskTlhn@^uvQ`L8E7c- z$#Jt*Li9EE#vyF(v8TI!m`vUJ65eq4HU5+OtoTw@?(WjjKl$H;`@E{|Ny@r~GFP~~ zk;BNSZP*a6;8U@Ibh43kwEg#;Jl#xNj-X8s`(kY>_Sw0mMaY#o$d^Lo%xvV%Ear@7 zhFV9?C`S&NdZV5aL4&fW%lt*vek`k(bFJ})_^M*n9y{}sG26?v^-<3gN zw3T+uMb?N+je_@F-vDDc_j3s?S;zzU@2xUd$O9(6Bk}^8Uxyt2Fa67%eJo&1lHBUu7$cXeGtX^NL4{OZgUO%p7YtmVBi}`!R;<@KO=dF3fwSE1;l672fv9-EIFJ1`0dP>9>(TA0-vQY zPV$RE`GNmn?a$YWf`Nf9mvMB67Wf_i-zZZ2*NvlJQ~~B#4}AB9yg&gM-@^EG2|@6g zX7&$UCib7=n`FM%`R4n)x10a{G&NcFRMEX@hJ2f8@uTp{OZd7b!y~H*$u6~h8vJxe zCgmkv4O?DuADy&ad}01eAKZBXA0Pu)BY)eF+qWRM+mLIw636wyV>g7l(+8ppj(`_a zCZZ2TCeEO}`a{=!C%fiVR}83MWbs>|Di{qQ$Je-q8DBv*iX5MX&bdwW82Ki1oeS?p z(GO8DkMAq;ynli`AA&p?5YI0=Cd<$B1NCU7>~UR28d3C5cj>%UR)Am2fOz?(>|b`s zieNXg!sIp5f2X{d3*Q&P|8tN7g~)^1$ctIXi<$W1C&lE&9u0Y+v2MY|J38+s@l7(a zg0%}PwYt}_T|`!#LRN^J5WA%l86j&Fz77pu#e2?(ogd>EL&<{JNA#iTm<9WPSdk3~ZcAR`agR%20{H2~L zV5}l#*S6BpyT)g(t)AO&?drLUC+r$OXziN0!`2#eUHJP?;{Ip3pM>B4b=<$6`*(eI z*Z41R|BKvTa{aFH_i_EtT&J)u;5Od*N8VAchoeimu8`{y4@cK#*3A6@LGt?z@#};q zGHd7lh#>jDOnjOk`F}g}(Yf0R3+}Cq{(<-{f|TVunUBr=8R5@_E<$Ez_1sAWDccu_ z?<4ffTs`-*1S#VeiT{}(WgL{bX6|(aDa&odOXZp}4$Cy=UQdv+|08h)p|`A`9J->{ zI?4ycur2uR#Y8WDKS>x6|G>N2ShsG?Q+<-k-domB9Eeq@`WYeV#kWjDtaVG`Xa8%s2Wijm43rm*O=ie zZswYGvFfrlb8`rltO>2DQgti*_+RtCgs{RNyZ0pTKE=A(RovS`c&fh&hk5tAmj}bo z^ZzK}yO+oA?dIK`l5StpCF~^K{k*$d(&c{%0k2K--XY$V^|-6Jw}tQu>7L--S0!Ek zA0@m>x^hqEQ=8X!&6B=Mo9a^*#_qRBUm%HhlL@w+Z}&s&euv%9NK9XB%{lMUzP3)Z`yqC}!=qy9#Cl(9y2^Nz&1*J~*}P@*)C@)2%lJcL8^Z(A=j(~S zg8A<7{v^0u0e&|Vo&=YIYxeR9CxN}ugy+Dd;8!sD2>+jUVA8~*2ilAhEG~a-4)dx` zEJheBbC%QYOWXZuyUhio6Vazvqff6zpH4xaI=>TqiSPKk$q#9Fsk`7xF!jFE1~=mw zTf3O=W&U3<;b#oiSk>SI)}NR0lM~7Zh3FKMbm2h7Pi&}{ch-3U8T5I{u<^G z!h{!ic8q5=JbQs>$9T4%`zHviGF_&RPT_(se8%zT0OR3R_{M_q+L^KOz%5Dd90Qy) zI7WZ5ab6?z1^>4EeUtwK6ZoP1vUkd&(g$fD+EaZ!SSaTiX;$pAV%>^`pG8ieAD7#T zU9%NiW-B(#*15gu{B6?5zc zwr_2l_9^by;6EU?XWG&8wx|Cp%l5>_T;x_C_~UAV^a-5uD~w#Tb#_nnvH77N*cU$N zeJ>_I66s>%13va9e@3vvz#M?h~zWpmhs!--ep5mU}_D^w7`a!mBCwrlY zZ8rhi?%mpY4r^rWw(c&Dm$xC9Ci0PTW*`T#BerJH?iru<=Vtrit03)H?rgsv=wZHZ zwOcj1UvyzW^kGP)2_J9cp6I|ZVH@|v4z%q$Kl(}Rx?0+#C%Ym}@2}Gy%oR&}NLzeT z?wj;+*cCmMTi%P?74VE{Q#@$VkpDu%ZTuJ8O>D6V_>2q9oN}yOe2#2>v+Oak!#+;; zOnTrSEco&}@#QG5yi;h|Sc0LR?5tI3F81@neu5t1&sla8zU+x}rxO@^5L!9yCMVs( z=m%RCl;ZnbhK+Wy_EGP57wQAsZv3d}V&V(mGSiM1`{V<*r^%0&TUikQtIzPdK%oLGBL{nJBvZGG8OeLgME&GvKZpZB1L`M%vw2{yw8_5pn-bL_{` zHDU`o+ohI1tjK^keG~NEC*{6L52wDd>6cI+6W?|}HbHMYb+qg=B072>V=LY28#{f6 zevZ@XV{p%;hg08lQ(k$;);HA--D~UG_pH0EyMpelc-=n+_j;_mqih${9ok@b%&%t6 zkBnpB7xkNq^>r6tDcR%p?AgX`R^!P4MG z@f~l3SBFX&@D&rk$zea=uzq!9>5HvC6`hQ)-@lx$Socf(H{IHRMtrhEuj4x{d%7;g zPfYf%m3C{wx9`~zu{}~sD+bjEeQu+fJh%RTpLs%_N!lCm0}Eh_w4`zdrx#y7e8*%g zw;BFu!AEU_J|MD_^$O6cPF2SCL6`Du*ER8)cO}u%v9W$-6-2U0CZ>z}V+PiIXd@pGE%$Kk@?=8~JIYbbR?T@a4=oNeJoj?XVkaiFZ@_aA*hxRz%g_UvsPD9`-$t$p#S#s9ArtSo}2f`gW?ctg#)J0$J?=B>f# zkA%M1d*+@@^abDWc|x7E_i6Thjj*Tj*K>k_mj7nmqc*^}Pp%td*LTSEFJjjva=j;Z z{h(Yw8@paA*FTM2FPG~d$FA?ZHyDug?*IE(e9z!me0OpzUgC+x z*p*vdssfMq(*i4RcLnMn&;#GNDJigaN^;=Qt5X6sxv7*lxP5iDo3aMCSIrB3u#R!#YpK`trOj?q1B`T@?Sp=s2@mjpus*=} z9N{a3hY0^hXe8Xn^SgPsgLpKtw5g;OJehh;iz~R@L--YI2(}T*@fA+jgWD$(ZY11K zSV?%EP{#B7c=rVHB;qZ+TRlnzT5iF&ccK=kz19_2J4_Eek(LyAWPEbq@hK^Rx_+sF zRqR73dubM{L6cj_`v%G{GGk^x);8jE{fw;FJe&U_YZ&Tgj1LA1sY~s3!9We=eJno~ z*WDb8zxfZbcxC@s{K)08`0+`>K=sus@Ypmh@Xa(=;E_T-U|b6xZb}Y(t6z%gZ~cvl zv9hqv^T2$OL(C^u<7?hQS;l(Z^=|lc9C0;qVP+p=Ek5Q~@qbQQAA`+DjEUaNdVGT? z%luFgWfa)c? z9$1BZJV`l44m0(DpGfdGA8N!ONhG7nZwog^RM74G`+{!C+nC(}Sio4)0e?lyu!P}Cupa)t;} zb}6^Cbw*Szt%Fn8&q247}v6IjXdLw)ydUbx_1Wg$&O{~YBDnoH*&O$brwa0 zu$I|a2PO}*&tVI7&{n@(n6&y}VF7yM=IUT!HhaXUtPU1d4ST9+^RO*N6Nmqx=)kb= z7QH^~>7rbhc7S!xg%iE1Q1;B8K|q%rBq=Z$=m1gif4` zZY;tFeohEoY5IWI(gx3?0U?0@bxo+X{T;1$_x ze8IWYYwYji-*EqkCC^&OOZNGnL0u%z2((Avzqp8UndPILLH0B+WPhX;*ecF63RwGL z^X4(~6S~T~l}T0GL=LoK=g1!49xx(l8>F2^+KZ*m)SdN~z?l8N7qv~1Z)6=i_YQ2J z{Dsg@(v#;rn^G+M&_WAIFBcs3l->j_7`<}Q;?CM@tG3FznZ)!0JYUUz;;iX6aacx~ z1czeJAXn>r;3Dn8lAjn8t8!%>pe<|d^0)=dDux@==A9}wuMTFCz)mt4N&!op=djcr zlJz#&QlV$CX9MiB?q=O`zog(g0~uE4_SMT-ZROCSNcm;I$-0(Xv`}>l`|pi(g>L3- zKiOMV*3V6HrLp!W%{T--!A-{;WY7%OiOHVsjbJJTTUpkX9l_qYQS~)W%?k$J{AzyS zDCu_I9t@nhD|UVMfneZ7QtbM0Fc@eWmLFKdS~NDW2z=GA1^)gHSKuG>^}rVwB?abc z$$?wTQv&xf7B2hnvaiR1{L51V&$5m~{8;YL`WmYkbGyRTm;H778U>Q?%KQ*(d4BXF z*t(ItUnTsUa4OpM!`HDLW=#wJ1df`xxgej!YS` z_~ew4i%(3M1co;z<%OS2Q{m^q@fOa`I0%kkpq!&!8DKcm7{eMmIY+}!`ZpryUnTsU zAn9qG7x9@#f}N*U1v`IB_)o%TZdResepw5N9w=bnVzCkP-%7nk?8>9qPR|p>zU*_k z3Z(ScOx(#H!0e}Ko*g0kX3BXO2DV6=EeEKxc^(EbFMm<{lv!YP!NajVCiWv24^rpx z;BB_YI@iLj$Ii7tR;+?n*fiG^qWj7T<3C%qwF>!Z+iczmRk=ZAUY^hzJdY!9FZl|t zIeTJCCHoJra>#SRH8S^_S=4o`#(D9iHIw?-I@5U;h2nRq*GEm**!}t_S!9;%AUKD z*q#UIT~pV)R`HX70qN`XuAk~%zxq+F3;wX{OnTQ(^{&enKk|>;diM{D>r1hIs@jWB z4}DjsJ(76MTA$c{1=c!BSCW5_Qs(|Yff^)6ZD*kUF*z2U8=lUqY zxkgGoPC5j8eN+M0B?RZ%s8#&mLa^6GJT7LF zyXn8g)=fniLyE6sU_Hu5wQj1GaR{e<+0*(Rdp*ksUZVwV{ub6pRl6+z*blHiDiPZ& z!0cv%y)G&V?2RVaYoZ?E`e`Q~nTu$Z^?9s`+BeGbi?P;3*?nf|KgasbG2AsWC-21F z#jGK6ekXmGcE&ss<%0cR`Y)gK^-z|*Y>(77j0#)iCW3E zy&md5*4LQr#(EJyds&HJ&IHTPEOAZL*O({jb_|3u$8yHl;$`b*-O#7une;1Uos{UL zInK7|roQrS8sl76o5a^h(I)ZrQRspA+Ndh7?R8PIUz_;j)+Vftviaz1_$!>}CB~qH z{_v8rd~iPL>!pOg6QFNT^%2_2Sf$X`4LuXrN!4)gg@pA{%mKvLNR4-{ky;a5BQ>Y+ z{2Hn5#sS;k=Q^qQIH5VGYRkJ=8<{>N)|ziyHc1@!rS4rnW$oA0yDm)DiM-!+bEaIv zUs(9&FS&lowh?T*SJqD1_O5N`KE*xpVHW#TY|^`4J@3ct=S*mG-9G-G;`f=jb}DY8 zcEdXpSf}gc8RvCR{9}(>_O7qGa9w61T}=G2zRJXp=rw!2M}j=+?z?TwbBP?5H5E2K zDnqfgRW>&4byfE{Y{YPO%tq{KZI##>GS4D9FHzU+!-uBZ_D}Ff{##pLCGA)4Y`-4p zVYau`ZlXT}qC>5X;IC3cAePoL=ZR<>QzQ+?iH zmso49gr@d7E8DgUJJwpovEH3s_X%2mCG;`-D7O5teZpBUCbTKEY&MY((!rr-ham%+q2c`_Vg2xnW*0(YsMctZ`ai3#_XEJ^;vOyrW-5@|GyXhY<=5PeLgMEO?`y# zcwB$=z>ju)&G!@9NoBai3xc;)~n@}GU|BimOZ5N0i?ge%LXYGj{Q0~-WHch*;0Yrz{d43GmZ*^A|lScTI z#dTOW<&}4A9cJn;YwfhH)8}+Y59sm^o5)EITStCO*Ll5z9**)}Ko9TO;ng2h$HwDi&dKjCQl; zS!G<*Io4Ne`Gl4C;1hN~zDnk}NUsmrJN5>be#!^+0W;ghOS@R(sd0X?e8cu7%)#D| ze^b02dVRs-duDg%3ue-mHR0d1#%ANRb@+&-%|E}7w%>^Ph?T#Gk67*d^bw1XB?`~5 zU#Vk0`J(0@ZNHY@{j3hV*=7#=Q0$R+*oc;2m_5%f>jq5w-f0{5 z`hgivKQNQ7PW#TLt7YG{JLZxvuNb9(R#i|d#3z5o|xpE>*tdcSj=`|?rK#gupK-z|8Y z@E7(EGjZ#*dn~^g+wR#GIX_l6CpR`$m*_XQS;2)N_Cs>Zd zH*DXvvGFE*Pq5f{hP4;dUuh38Ig`L1$Lihxt7fk7+>Z#~&U|$4cEWcoJ?3HWaC^P8 zeLi<5duR-1uH`K8<-~u@{lDUPOz{7*_XKOV_5|zB_sh71zF)Hci9K#_$G!evs;;N~ zzvAuY@c**+1Dj*{e%a&GiTi;$$3uJf|B^F=S7!{$T$91rhq>Ruhw}ZBu~g=~WbLT! zpCo&Ng%kb0>~SaYCF=G2dT+lkp)2FAPP(qjbeTSna_+-jj=jPBYu@Sil{o&l@BR9G zImWs7;qzt7CW+(oxe5F2xUB#0cOR(UbgJiauKHa2oM79(<7AE30;(P1~=JYj} zIYe8A#@jVv{CTjC@`lCG@e!Y%KbiI=$Dg ztNhsHg7V{eQ_D}@IIaA(xzj_ZI4AVY`8S3-@0<}jbN9^9SW6}o9!b!`!Q^t{V3xrHimoszdpJCU-tM~?fX3YF>(ogzWfd>*>i% zf7$bR()TmZ_UU1tudi9-T3@?BhFE70_MZKC$$Ylrc{<}c8i4AX;#WANV?ea;v|Bkuw9_l0Sjdz@lRo%_mj$*qx=fC5$ z>&|Zb8`;N8&c<^3ZM9E1zh76Py(R0?y7SwL+gH|{`UM!ok1Otnaglnq_qz-Ah3z;0 zQPaib!H>v)OYD|o4nM8(-0uCf;$vprU`~8AFVIi6e^3JcZ2P1qdVX4-o9%ar{I_fy zBHV+Ymh^#pvK{=4Lpa;TzK+vBL0^7S?!QAv*?3E+Poj=;p5JKe`F6`sYYrH;{j}mV z?M}yk%J<|Fo!u=hdeYf8?QMVS-hH@U?%JatXdUS;KYO<)bmmTPsB^wA^yaN;p;L4H zp>s>qL+4iNb+79w4X5$~Z6B-VjXhiNVvABni%&(vu3*%i`FF$9Sqr^2r*gYznXkTWOfWjj<1yMu z*R!l|{S#iU`*IIC-;@=M+Ij3l&WA3q+@71P4!AY7CX4lwxq}P7-qu$|$6|M!_64I^ zLDez)T-Ur7-80+0Ecm#*=iU`)^pejrE5F?Nb&uDGj0{HWy>6o=H5g5!9PQ(R(PMcx zgxa`Yy3AW2(Y+0~`P_|5eQCyNJ)@!J^OTvg)Z!PvP#f4dKS_18d|pL;tW(~4Z!nro zKAWgZ>vB~;)#ouC%X`@ky+o$@tAf$c<-zUa`1XX$ z*H}uO_NH}CPE(b?X&(ojNUv>~zy3J1`4we8Fm7x{<;Cxthx=#wduab zw$G?&N2Of7hs(V(cjw8Cep%y*xY_hu7hpn7Kc^XbGTi0KuHCB_}4CpdI(~Q!jwEA0> z+fZM>W51sz#*_a*ALdR$5<2*Y;bu3hBUOIUV<@-DJRO5TDbYls1?Q*3?cCRQpvl3d%^ASl& zMMr6$VLV^iHyED!r;$sh{b|&aKk4bl!$X7N%`a^(dhVsCidObj;r*N?d^B5iY$A{2 z)azN7J5o$OzwhcAC}~NVT#N7Av4?VvaE}cT=wYCN0t<8uj1DyT_pKgT&RW z!)Y0)qJp!}tI9734NvP+LxLy4(9*XmUKD&*=$^>0*dw$;S8WSOqeUMI&D5ITx`sA} zmiy|rg2PtuGX#A6tGB;lV``j*rx=!oz5`25&^Lys!EroE-cC#%Cy&xpu$108*-IS- zOV5C%U($BRz|v~+JU&!KUsyJ@eh&CfQ~jjizAqNly}-zP5*GT|DnkI}kk zdMGg6TR+g{G2Z5#jSt?w<8!_w&i49@iL~YGL5y~Ot6>&7H=}K zDDMasUxk-8X_>~z%=1_*=Kay%u83oC$Z-ZQI6a<{dI>f({5?eX)_HujXV_@ zEwZt+!dKr4ZUVG*EAqLtLgcgCkTR5Z*?zSM9-wGcfGY=r6 z+cvU)^HyYCmPg$?n`@ho-bBWKj`Fpnf-`XHCBHX^2BU&$Tjt*ePnq>3z4^L3vY0WB zR`Q;&d)l&1-Wt+4So5rztPg2yb@0|Ou>4K$KzM6_DX$x$g~;k(E+10A5*hu=&>ZKj{ul7p3$7uJGvKEg@RNsjS_7X%pvPk5?lJhtST>~I@cH1Qq4l%iqcr}P znQ?GkuAT-D4KaCWuf;<}aUM!D!r*$V#X~dTp>lXA3=g%#LmhD*8fY}@X(kVqfq#)R zvpjx-K2TjAJk$*4%i$pxWxERAX@_^7(majr@XnkcdraQ(d;P{1E!}8`cf!|s>Qh{P z;~Ts)0bRXdQTdMl^%Qu z@_zFA^ZcW)%L|8**qz-{R=5B7tn;XCNb+k9T*P5AY6it5;RL~Mr(u~-Su{t}xZ5s&AwI3$im z;el=Nkij?mEqkF9d*Rr@l#s}Hu^WB~2E~?|DRLaS{42`8hBElMeY^)zmu`oaRnTs~>=UPzuJH7M1R}Rd*YEdGBp_O>Bf!mW{AMu!X%=3%0h|b>2o0TuGS@ExH36;f^@2c9)GdaaDnRW7`N~gH?j7uBl1b(O&Fm zA96MgdFw~+rXzndu%k0W=k%ny!&$D-DQxN2v84~ucA~%CT2fupNbKLU(;jbrT7`aV z+0pF*H7|huEb`b#y-UT;LS0v51{-n(+4O+#_k2D9^|Zgc*PW8MM<@eQ;+c|66hhds0IkH>{>DMwAE~6@v8SS6{X#I2vuI(QKXMk0*dgUygY9w% zJZ0smjuQN63Uu9A=ye%~m2Q+Ee-FA`5!cG8XXKp{+CC$X&u|Z1i1bK7$*ww4sqE$~2Yo99mR@%q@xY#)`6$J4AQPhi=k;5c^qV z##6+R$ESzdkhwFkSEO$y^3sj&mVVp}?3Hn?UGu(-9U^Via@F)uF1qqO{stS9^?`=q zC>OgU54$6ZuBv7JQgEGT*&Tw%(~qTuMBkT!!(y=0gpRbaXk$?NVv@F`*HW%dN}o)4 zL3F&>%@J@}#<#f@1MBYs|1Gp_EpfGlw_NnK;7#$rTJW|qHS`#GyBEBni`PvBYojc@ zHCcG80S_YgM0b}EL#G2Lu(u|$|Id7FaAZMe*StJkkL2!Ix&BJQo1PvSz30*O6&Bv4 zFXl!LXM&l@g13h}_1^<;zbD<};B7DQ_ml^`rCWFlG;9TLl@Bi7@pbrD>~6u^cfi}* z;O#i=FL)Ck*w2}Pe-^x9D+}IsJ=Cv0oA%8wTVUdClW8kw?~rl|-kR8J+s0dyOEVUD zm9a@_MzL1|ZwkESpOJTLyfus6impG3KG7X`OSF>}Sj!s!KO=O;117q08I`x$Q&FF8BWiJObts7hUCS>(oWOfmlA_XVT2ecXa9E?w?M7NhHy_p5J37Mr#*d6jZzYXx|G9h^>(F$wan zaK-FnTQ)~uF9>z^1%qH%Wb-We)5fD4JbIChop%<5&XTTR^$M8X`QN~!Xs~jZIR7}Q#NDkG}Hcv;0x(b9@Ymn#N_j^xP0D@OcnVo zve_fDIq$q|rcd#kZRAx)Jb(?1xu|juv@AKj*-8^x;p#3t7q&>BLUk2|wuMHI%Uhk=H-ZQT2lP4n4a; zupsSzjIz7QH-$dw0^d zi|=*Vw3Bv783gAYjB$xwD{{3D_^$KyF>;j0sPchx)dzbv|BSqI9_Q|c4(R#{{fUw5 z&db$AoKK=ZC^%nb;k*O8s7nnp#!YX=lb{ zkdgHa^7ujUor%0y85YKaY=}*Prk2 z?0GydeBDaM&CkKE`gaM-Oq9LKC(aW4e#G z=*rZOVxP*`?*FtbHOF_GQ&n`|_?T^43eUEPti?vnfX`;2Hw(}^ww>Cbf2mW(cN@q< z#&^xJUH;3st!=Z087FSRHpseObsR$Xiv3#NpFTU^y1|5uyS4bh@Mjo*>r1R78$}lS zrki$ZD|j>It~^KA35FSe=$ONJL(51NHOFj`g;I}1JlpoFDGN#GDe^o5zGci-Y*oR& zd?&g@3r`J{i9E#q9s5x5aT&Xcywx>NY*pzmwEh&k1^aYAGBKAvl^gjNBE74`76j+5 z-$)HD;$G{GDmp^`V>`4ODWO)z%NRd1T9ES{^fhgpRrtV9J)4p3hp>5DZVX1nZWW#o zyUMm(1K0(VT#R*TzQ_X8ZuMce8td0$M`XEtk<{JBdd3Uxt;H|3Kjjp=)eYZ>tX@w# zGagE-KZz_E?s6L^v9;Y2Gv@2|WE%ZU+jV?H(}U$Z#C{c>c5>i`AwQ)prGK4uIpYxD z+A!oAmFX4RL}UDG6Z+#UG(2qa){o%D671^l^ktmc<&9LryUjB836^t89X4(0h#gV~ z;Vl_k`xjkZoew`YV^?f&X^~$tzR;{<|K<5XU5nVf^{TF;(>x8EkOfYg_U8_pc8_J# zKCcEFIhO4G?#eH9W@(uwZxt_3sTbLP=)r$7d26H!{!GS&M=13(v6FJpDPkjuZ7bt( zQvZ5lTRwWLg3)|*rW30!+E4oBxk;AY3(jr3H+N9M*A4bkyZRyRmKpGu=*kwb5y$;h z>D$L}UXA>|5a<1vGYLlJ{Ot|2$tkfjUO|47{sv2a3+|sme$Tjz6#0?tKd zJL@2GF*A8T&+3nfoQ?d)^pK2i2&PS*28RZAvElI90r!~C04IGHxKL&oq z{uO+`3BIR*YZ(`wh5rAJ^mmZ`HH=Zc42JX2y_J^k7p!%FwT!q8pw(w%102$G8iEfN z@5p7`K;*XYY^7TnPtlesD#^GJ9o;pI`6F=IU-j{xLEhKvL$C)1Vuuch+XD(r{uxYG zsD6=+@NozIB^UL}SscS;G4eiV@rWH#Cc)$(^u;&9;KCRtnYT1C z=~6!v+e!F7Pjkh`xNMu(wsYH3MYeP|E_^e(;3jm!Ty#SbI${pCZXver>~eHRd|cR+ z@5oSFzMD2LeKV13B6CB?br~0KMJ}5%7`eVscNxy{;aV9VrX2S8uv6BHj4rKknd!-# zf=A>c_U0mFgUHFx^4}wJ@^>jAQ_tLN=>v4Y8S*@aUJzSSbU`V0@iDMnLprxHF53cs zh#qKX%tB z)&?2#>0fU|K33>`BISGP)=xx+SLm6M;yr8EXIV1b1wV%2moV}+4Bw3+o$zvxDZ`T) zcNH1#C!R=uFpLcUE;77b44AGD<|QV;V!@TG`zCUHIVV? zL7k_tSK5&)Vm}Mk#8!Jk`m54Uq>MNDT*gLhEp2hEpI8OIXp5zvIMn19;kAE*U)Cy@ zag#3-e#tat_(bc!Jb#vZHor9Ke#Sxch8@Uo-C;i`JNQM1Ut~T-!7tyj?4ynFeU}<& z^2VU>JMaBz;W$tq?GNqdG${5T{`jE%Q%?O=B z|4#s~!b{IU&so^sqTjGH4m<%K0$}qX?JDB~8KfZ?ih!Z-fWhz6UL6JL_4ibyBb%>8 zHZR$+6Ku&`>L%pGM0cj4F{UMQVZMbm-R1L618ZBbMP3DGN73DavlD!C6uJC7p-UTJ z%H`R<3?mc$Sj-r|%pq&wta#DH9a0{V%Ws35mEf$Ae&B5QJ`a3~TrSgN|K)iec94y; zxAoM92z`~4$m^22p6&3mDw)e3Y-C#FUvD#JJ6|Q4I4fH|yuQ)xLoP2jaaMv~iOA)J zn);cH;gvzJ3VnE7E_dn4*yDce@pNQu1~NAj+1m#hd>Qt5-_W@|F?;+lwsb(HHtu~$ zt8ZXjVFs}wV^HPFZ0j`e`53yng?4c>h9To;O`k_Ti@lEB+`?GepRm(khF*t|J!A3f z#~-30FOzxBE7iOS)_AhCbK6x`TF4}~meCji1MS!kw0p;d35+9vzZUG>^kuI4%yTZr zn+A0*@uW8%2fJ>O9Ugb%mFP>E7Z%&S?J8_{WcD-gKs$Yi<5OKBKX&{H@Hz)sw3oSt zmLDgDN|8zKe0X4FtPdb|LCahfeS?0T*gJ=ilcnT&Y|PA%sZ)6NBd^bpxnX2x$1~uq zeH`UvKJZ!ow=$k=Fm_UXZ!lVk92Yw&hjO=3ubInKePnJhT0);=<}!RUsH6C190C)u z`SkO1eoh*%*)%n9Y_^Kd7JHxarx8~}^W3+(CY!cCX$P1mx^h`s{Rz@M&bV6uoBsrP zwUT}Y_xIzck-@#Ih)*)N-iJ9C(MgJDTc3U)leJMZ);9kJ^3{bBHgV||8vN#`RiqzdMEa-@N+Z#{7Y=A`B}{Ipbzd>nm3=h_0!A^ zZqaYWwo?6$B15;pn=)3@1)quCwAq_#Tu1qft$uQ|@F)G`gVx;ODW$H?()7qo+Gr~N z9h8A})^ILy7>4?Y;W7tu6@i6^p z@jEGTjww3&(j)0h%lP$m`Y_|q@xHXyFrR+IPW0Cy(h|(A5u4q@;0$|wS}?77+M2M> zzq~B}ap`-_Q}{NBP7oZ6Zv_3eQrgeNG8hql`XTKk`d%=ci(PXeh6fw5cW&);j$^kU z#y%oHdmMWMZBb_7xe8tv90w}!P3i{6vGMJgU4MjjvVD!5SQZ?wCY@aHI|KZhW89=! z#ys#{*y-)ycN_E8Wy=QFx06R0o8HHLvF+2iC(qhb2REh*e&InG&)d4^fe>TPjc)9D z!EY|@e-HT01k>LKzbE0rO#Dci!S9pc_b_9?g55IwhiaIQEVX<=G8sRP%_+LgxojD8 z6713k{>{B$_j%3RxOL?Np&2>!J=`+Cm|<*P33jjd*1MJ4SnyzpiQOi!`*pB;5dCrl z?EX+G@9+6uuq)#&f?XMZUy8ji*p=}a!R|(Hk}(~9t(C^+9!(pjbo7pj3$Uw9?Cylm zHZs=TrId+XnFCsY9%$m;LQ4mcKVHg*;I`rOi6&>>$ym)wj_xe47e7dyTPJAO_%I>)r*OKBIk z$JN+8(iQqUz7bnqbb>jy0G>tW%bb-HGZEyYyz^&#Fht)R)H5P&`|y*|HPf$2bi-co zU5-52i~TRU;3_b%A6h2rga7*XYeS+BL>JtI9>`rbynZWuBXTuY8{T+8>>|dV+}8a7 zG?jTXvDqgt2XpY*iBz7`_J6>R>OlW2nmNhrd&FdBx+ydFUgfU0^~!OdigxPhjqMrK z3*Hv_8HLwR!nd}LnSqXZ2Dw&7S)Sqfb;fl`cHI>X-DI4n*~3%ql0EUb&T*N zx}|~dr&88t{1Oh)rlM0~UpP9CiJlkbX30=?# zcEHMSa_C1N!hc{dem&o#etT&%T^nL-{VDvk$XEXyx}fR7drf{i4nO5CM;BQ3TCywM zdjtG*l=1E3*iOPvVo&eK#ua-li#fka_-Ud~8U4Y5w)ktNtrqJOe0E13G!#FpBj^J0 zX%by<6Mmr+=@(S+Uvz=+(>=8PN$kZ#Ha|J+_t@O%0P2{?Pg=tp$d{Awm9yWl#ozALVHt8zu-e<`e!{$TL_kIOiG_e`VUfyUNhF>>rCo;tTUi?tncs2{Y75IO|y?uOC*Lm-|MkCBf z2pB*Z0WumHIo1dPBE|+85e-I75QvyGN{bU(K-Q+4zKfKC>HoGs234KuDq{DHyIbBx;O!2_hklLJbOG%{eDRQJ;FU| z=``jVVh^4*{`5ThzgzxMzkJrvbB69`Zu+KJdvG_;8lU3c`FxGre{;E)IbU<{;cmtR z_1_=Te@pSjtN-4OeY<P0T>r4Rz^(l^i*a%(&&5YC+8X0 z>;IEEDZ~MOT@2tI{8Ob7+GjO>|4%ADO5FPiQ<=|v?z&C>-M#e7rGZq<+l+<3$Toei zF5soP&#%ky(mZF>(T4UI`g~ocm*%;oF3U@EUDRcJX|92~ke8lKI?qef&+GENG-H2V zp_k@4p{~fpw$0^vbiTJQ#|&&QF|qQSWzY5U%(U_HrZ?88jFZg3qOp;`3$xuj$_$in zF6*>^f*F|F_w_$a`>Z(_I`gp`de+aq_S(X-lZ9S`b z{{Q31z}F(g4LSGCkMgX+v)}$yQ+er=k%6kT5!-Y>HjQV%OI&Z0MB0C{$i3g~BGU^`oF}C#mF7Gt{Yj-cPfCAG zY0i_5nVTc~W}0(wr})E0pHEDZPwz1Lw_9<_FS@t;Z#v zOV4AYC(`b3`3-Z8%(i0qefUj&JNZ4q?``bG92n%Mxf31yn)%i9Tg&eO{(s#BW3`+! z{KD5}5~JE{MqIk6cWiKNzPYw4{-!a`JCDLt^VdWou+sCr4BdL=^U#{DBnFEP%Wx4{Zz#1+VhmB@%I z(VYrck*Bt+$WyFl{|H!PoHxaF$cyWd7dId;ZX{31Z!%7I#0;F>Vw^YA*{|y_Cc)>VtikHbl>_c9>j=b1U9+DrVPMd;FXM;ZHh4E}>`kXI_ zSw82TAk6kT_iH=kbEb%S>>mg7ea@R=q0jkeu?Tr_F7o1hpYv(Sm-x`1+0?er41D2s zwpSvb30I+4+g0cltJyyh)}U8hhhA|#dc_Us6*r<+@|(~*nAOys;C$>tHj90G&^ZM* zpi^vQ|75rio#Fv>iU-jt9zv%WN2lZutE|~g-FbI3-9L@(BznXi^l*>%<^}YKm(e5k zp+~%q9KS390>%Q&xTT*DR;7+m*TvW3-eQ)H^ssf=by!*6z2tTZi@2-aej*P z=@2YQL4JI5TajA&peBNw6zyH8WjIQ!?b z|FEBOjODrWH;=Em#$nmk z7RKHdW1RDhr7fw($<=m%>rC4j#(7Z;8fUYZX`F|}EaRMKEN#i=cx{J_;}i4P&sf`% zkGxojyjX<1I2Ud_o9%W9&@q zp&Z(7pd4Z&`_te)<0Qlb$cqP&7Y`vX#wmy74^s|&*UTTN)EL|_hJ9TeE1u$5@iaOa zLlfu7OWR5E5_{Os7@N3YoP>Cpyu?1_#p}q6{p2P2LF3@#i7CI(XrB8w89N?F$MavN zl#d`^<_G?DFdLfU3LHX`G3lJPddc$^R!j~hhBq zkum>4#{5pk{7#c@_2`=U5znvJ235x3{;TljIG(Z6(7P|i=?F)h4(!3pBKF|RBKF{O zBKF|p*nY%Vz>dF(>-%!qRWyRcovE<7e;7nWlePGA>Km<%iD zmSgn(PFs#KTBkMXAL~r>Kl)soQ9mc*v|}^YiSzwV1vaA{o6&AY$4cjOKFN3Q9OFEL z%rlbN;CI&E8*z?fLzZDfj$=cPn=!G{5_9cgo`0X7OzfXy|4heXPnKa%j$=<=$DX`w z%aF1EfaR$D`{DJGk%3btYu-9`3C|lLGtjbx zJQzU0!=MD(D7@|clW0BrXwrOz^&)8doZJ^ zyNdD*J2%~du}$5Z#x$vYzRt6*u1#Hc?-0Mg7Kr?$O7?~})P37h(wcKThxsVUHS6OE z+A<-gV)MlSHebxZ=8HjWzL?29gP6rVgP6^|VU%%!@uM^)cWo+mqpAN3&4)S?fB%d#fXuenqWm4IR7sU>JZ_<53V`x2Nr^ZnE z!89J$;b+y@sPV9l`9(@=Jggs$e~om2c*E)m#6aky6*nJv<1za8myuC@Gbn4+^Pg?T ze|DMkq4rr%ow{g?6(Vg>B+?c^k+xV*dAn$f6(Vg>B+?c^k+xV*AL|P7znI7WBJB_q zX@~WUH(j*D3Xygw5^0B^NIUExt`+3muVp>aBl+FaYrTsW0K4Sj9YAwc^crF~yd zxPI_MCNM9D{8T38zw@=RzwAd_y?#{g^_QjeiKVu`q;Fe23ja9IDNAoIdSh+>({EJp z-)Qm}D$hmQWjuAW*8ugf*8ugHMcH)Cj1j5F$2pH3#E-(vZwv5KIhN8#H4kYoZM2>C zTur+k=eqo{f7Jd1wAqPa`ncY2X#Z8th4TD@&)lcuAEiyhw3VMS93XA$j~w$QouicF z^$_*NXS&nR@x&aK(NAj9M{JWHTydK<#EJKlR;)tTRp8aS7s{ENkWL*nmvt29Qs+nU z1jU(|r*g8C@{XpwdiK?PGmRaAH2=1rQ?_Fq`!i^44EO!#Vh+v2)HtoSJh8+$^_;Kv z3gfI3SJ0=#O8S(zl0GG_Vq6&S-p|_WmFqOY*u9o(<@kEzEE6{vXNI`ZIHSZ(hI_e` znB;OP^N(o5AFCfk-F|TW>ezYD>)t!+i(O#<(_b`AwomS$PcqgY+ex1kchM)sJ;pf} zMc)*+U#{l()#J$Hka5D;?zTAnQao&&Hsg;ePNRFsQjRN4^ZMs$&JT9_=sDUo>W>{G z-yadfJ!+Y@|8=M}x4&+^?EvS$$BS|Nz>9CZ^w(~DBm6Wm0pc5B&f(I)WajR$O?)Ff z${i0ik10IXOB3G+GjG5iLwqAV$x9R82v7CW#5ck@UYhtuc)FJ+z7f9LOB3G+&+yX3 zH^Q^MH1Un_Jzko)MtGi=Caw`)U}7^$>Ho~{viaYW()0yl8@n10(5|z1EMBwykGaEMxoQie*f`^eESBFZb8E#4>t`Wz1uZPVchF zz`CL}D2;MPeC+BC(7aB7Xim zVe;#Xn!+%bdD}&lvx#f#kGam8bnQ@wKT?|OQRz=9?ZuiZmF61L{!c3H#hN~*G%*bA z|Af-SFqB@QG}pG$TL0ce3`6OUD@_bT>E%j$v8D>8i`@OolqQCuV;)eN7>3e{ZN5kh zBbPESO7m??GB0Bb7U+1xW`6T`^Hc4cqNg+sB7EXJdmv)yq-v5ai)bAIvYrnvl4XGO-ZZ;6bf zyG6#)r^KLfeo+H6jk7@8S;m?w}Z_G$7H&#@m{aW-k3vtkeVi5HL;FC#DZk)PzR zBd_g#sO>>F#_`m6(m|i|3;bYbGkwkiZD;wMsbV(!u^(sgtDP0`t9?tvueMvnulAH! z0q2;v`GpzZlk+xW*?QpY!US0R5F z+xW%4CE^#`E#en@N?eaTez3Fn#TICLBh)s2v6fUf7V@ny+xWkBi}=5u5)UGWzw0di zuLatULv7>#I*b3SCH3~^hlz*mp2&B@qM+VW;M4E6M1S5 z+r&jagHP)$KCT7Y#>cfl+xWOzQl~Vx5EtoQ&oTJ27XT2ix{TfF$l zf@1dJ(^`N}>nuL4v-q@HQl~Yy5dV0Xc*pNE)-NbQ*P!2dmirjljDGG_5=Y!vNdd7E z&4KKHv^sv;80WN@YMi(jFwQP9!#EqopmF+ZV5V{AYdgz0SzuyGX$|K>1ol%GW4TzFLv; zRg09bhq2ZAzqC#HQbo$w<-y+mREy(9bw*{b1TY zO}^qe_A^dfKbY7*S~HeyHt1Lmxos2kuknjWQ?yCi+oO*pIf>%XsK}^IJd|o_A zoP)9dMkjHVoEgNo@JrmxHO~1FyqEDVp7XIG zFN@faFN@fa&xzQOk7F}BupzUwjSZQlZEQ#fu@&iZ%j(856k|C~S-Z#KH$-=H7@rvS zWxI%dc}&E`bAyu``9*#?G`8W09;|o{arXw(Umm|2o|KCDXi_7|7Z=JXd3bmSJ;_V}oXB8yhr3 z+t{Gv#949{xb5>aah8XXdl)$zTfrX9xR>LwM>DWTL$Q{eQgdw)&&i6l3>_aX*7LOA z31geuuuY1$e94w))c$ATPl>aW z=o?>pAwWz-?^g;C{|NBBqxUWah_fg?Q)%KXN*5}PpH%4?N)u;Mxkt;a4QivSZMVvCz+AdUox@{wbc4^bu1u+?R@}_-@4jzFRQ^->t~C9uYI~*@{{C zaK&umy(3`AxH0ZLn~oSaHeH5%8vB1_Kwi9zyx51lcpZ7MpO`Cgl#tSc{Ac4S zzdFBfDRbij{KEW(;wjAYJ6Wo^e)kB&kw+cGeCi+; zQU|e!a*A`QgE*fy5KD~u-;Dj)EPksqlJKN9yXHqKJdy3C0vqH`SeT+PaIXyQsR0)ctY#S10v;P^8{tMe03Uq~4FypE{}cgCg}F zD^l;T;Fq>B7V3T#fBah3DlFxvH49;Wx5icUJy69v6j!i?bB|7IBt)yg18R`i%Ve%S76Hlt_Ee5NYqBI7>JE zZ7JnHMw_RLw0VI@n=8(8p7}}~!+VhNU(??CBJDjCXZgl0ahB(7oaNnqbkXZaOVnTJ zM?-$cz03!j!Sl}6!FS^<7ikN{Rz_1s#X@F^l-IQ#_@a z^PzakcCmsutGI%;5-VvtaV2dju5#lkx{vKHVqB~t4#RcQzK%GIxSlwSxPkbKxRH2_ zxXEyzI6S73vHzz$>kq|Lp6eSs?|(N7(D3xJn0kE8f#? znZ93fmJeDt^7G+w72*u%S!Z$l|8nChz1%zY5^L%8=IAKK((BF9QGBJ>o1>%nO0PFZ zrId35^MRRz@%+kiKkp7*77K^6s=3RM66{SF_p)zY>l)CT8BE55On`2anvJ?v$^ zMQ^|BrV<2(vO z+!u)0s-*PfBd_g3QY_NQ52)n1KLcQ54~0wcENY7H7O zt~=qSmzFdYz!|JXx$=#VHR&DrGg(J@^;;ipn#DSPt=GTC`?8C8UYWx;5|YHU?q;n@ zl9<*#A~CJGBC)J{MPfel#R-vt5~#L1&YBFxywqOzu}0@j+T}?B}bSu z5W;_ys0oCRL~BcqM8CxEzm;^Cn1K+!96bZr`L7p(;UkGHB}Wo<{I-^Kb6tnAegZfibLf?j+i3FC9;nWjRpbI&NF(JdF#{>lXh{xzF@(P%5kZ$&2JeDP z&?#1+Q(S>gu@ZT4B|61bDUSINtU@fg^ipU;GV;D7wU27=~NWDb}M?+<{JU zCpyJl=oI&$vj8@vI3ckS-S{{X2h(G`2WTPZ!3p7$NF>lJw!)e4DEh^A^ow2S7f&H8 zo<_fT4*j!W68&Ni`=#ds`CMj=!C}gezanA$PDD(Fb6~*lREZgWCn^U0PJ@`~cM@Wj z-${zue#d+mhWt)Q%wxawTV+h?19B3ya|vWW;)8#2w@*?nGAH zg>G>VdEO5j&?7doUwZZ-uk8cKiwBVx4-ro|=erRqA)O#Eu@x?aN0AZRkrBI)5l@k) zcpBZ}Ir97nOp>SA!+zcyk?@ZO#*e&6Ihr#tyQ!S8L}88rt6`Cau*CN@2p7&Y&;=eL&MQhs_5TP{Dv zqaXd(o0Yj{8vCa4-!zVy#9gNR3~DCnnceAwPAy- zZhX>KcdqM$4P%Geut()0_Ne?0h&@^(Vvp)X>`{}5J!%uNN76B5lSVyl`&Y*m|xt?Cs$Th)fGD!oB0z-QDRZrQ_b zj3A7?YKx#(EQ8piwk7BnE6^`ux0*!kR-1_3>J_nDY1pl{Dz;UZYS(skm-q&{=lcdq z$-}KvdTgn!Q(A0^>V!ROi;|aE3$a&iTgX$aCr=T()+A!r+C=PHuZUes!=AM@q&Q)* z(Y0%B*tI+Hfpgy7`px0o@Z1>2CbcEVLu`fEuC}A(CAO27h>dF!v2kr8Hm+C1#-(A~ z+LGiY_OM@iE_n65?Ap0f@^b5$5ewRSj*5*@J+W8JQFF@0RES+`3;3NiVus(T6N7%I zNzC*+ZDN++=@qm6P8xQuErfqw%wxawc=}E4HgGLOx;{#KyHP z;drrv{KOUHCsvZ5xDq|$D)PhDwN;UySj~RvsX<=b>yQ`MBZnU* zTd;A2DEVT~+G=4j+=9GVkG!~p{KcK<5qFV~xQG0)cWn*iFE+AYdiEi&?E}b*2ay*K zxpq-8i*FFCc%7KYtCnM8uPAmgDl+h#Vi&}ze)=I|7u-vKn|J+P_@7^TLHn-aCpmcV zA8gFx0=|-=wIRLu*3x12W7;Kpj8!THW z$`*>Ug<=p}CT1GfKb?gw)V6Hl2q;@9VhbapY@sMyC>9|v&NXhVV?OfQ#ui%Nas>G_ zw#$$gmmn`zATO>!UaT~3tYane+FphHXjp~3SUvQ7fxNcYAup~+Uff{ZSjR@>wY>@X zaWIO!Sc|;41$nU^d2t8w;!fknI(8wi?LEj(gbm1xjmV4pkQWaiFCIi*JY?KhM;v)= zA4YyMOdv0|A}=0AUTjBR>_T2VW!zZDY2>wi4td6P#V|r*5Axy#N z#9ICnhn=THUUZ>Rn>KKa*vK*B`<#oG$f!X*n;NxYg5$+jju$_m?OWEF8%FJxL`LjE zM*L0f9mom)6ZclS<`kDmFkeP-iP6L25;o3Y(5pB@Dtg5L=TXeyJc>ciqnK&j^&(lE zM{U#2^3x4p2ckKhkLuow^&j%Lj$sVwK>w}hN3nwG+-F%|{Cv*SCVx|RCN`V>ntw0f zc?$B<|26zJZ%ha>$KD;&UScd$455Z`NU?%7p6^$&f=7uVXgu4>c(#@pg2pqwqiruS zgthdY;W32Ej7?F`U$q&3mDX-trmYlHuxm0Xw_T$_yV$i4w99(NAFb7}YcgmTyY7K@ zSwVl(T8;H0?PAwtguFExdHhem(^`%7VxhMtqsUvMF_-`8b6TshUMzWct%lZTXq|@E zYiQlZtuciw+%qbsQ17kPSj$}h1kb!fBg5zEGinFL z4w9Oyf_~n;QE6iZw26%s&?XgL>|ovd#17^R^#j&u%p!e}wvN!&7kP(IWW?r@Z*!0C zr|dQ^Ks(z1IS2SyYkIc$2j$h{#UHNxn%Kig>SeE2uCY|o7i6z>9%{|t z*YLYO=fx$K*|@}{ZSv0sxXyZsQC!52?XLBh^vk%!`>plh`)==vP26M*%3!UB^8J0* zdMFm~3;ge67>hfiw3%2-n~7V9Q;PM*`7w1#(C0KpGdJj(pYt+?vALrG8L<%=aUU|` z0b-rvK~L89M|aFtj9?M{HNaSXG~vYtT8*!{LF@y49Yck4* zt;r}KwkD%|*qV&;VQVtVhpovdAGRi=eAt?d@?mQ-%7?AVC}&N^Qr2QDV+}?HYcOni zh#CA*U<&V&_4B2Om_zy`@3C(rPEbo3zRQ}YM&blYzo9g70;NwWO`JgK?T6$!1tcO_nyG_p2&zUEM_b^6ERLPzLqm({ErVlfe$|6`QVEgch5xmAAiajuD^#w zuD=96l?1-{gy)MdX1qPaHB*c)HW%lTHYrXqevdP$Db7P; z0BV0miu0t1-{W~PGsSU4{2ph->=b7}46!ZWho6|kv(b{$sg&(5_D9f*zvE09`otwr z`zz2Z;_rA~tVFMfzvGO!3cX@giZl5`u-f%^B=FT2huMyz7oWzNTJ(uqp!V0JSH!3B zytos+B0h~X;vV#h4d^X^jpz~g@jrfzke{)70%f1cz6Af_gE-TQZt*D8{&w;Z@nt+O zo+1wsU&a~n9C?UI@|XpC&?8=8zw}(jzmLzpcn)dfcZ%^foJsXN4~YS&{TY7eNipbm zo)__1I3hlaGa^2V0WsuvCVv>_p-0STzw{I$uk9j^@5+ef`thfiaLm2zi$MGhXUfP+ zTmrSf0vT}yd5ZWg91*|88F3Z5#VYc=4_2c`tYN?OtV3Sg>yZ~X;5RtzyHQ+BI?A#5 z7|zs^m$(ILe?2nd4rIigLW30MMK$x}QEwZ9!%u?t!86td!J@)pmLhnOVqkH8-Eh!@x|J(rQ!b|3QMb>zi< z{3`hEe;dF4aG(BwWC#6@eEah0|D4~DKR=80W!rgv81m;&#h+ii=Bc>G&EguE%6KLQ zp!R1N=SeYWoae<%<2YiLan6X@#u*Sp#+m#m%tMcu&wlACL|)rP$cuB47w7Z8SmOHg zpTfRB?fLV6&UdxG$!{mWC-}Y1cMRviAit}=srcik;+LO_e|{>@5>t7mn2O(iD!%-w z++$9){`~#KEW-Hge_x;e&++S9e?IRa=*Rbz<*n7Y!o622ZD!X_&}Me+1Z`&5RM2KC zwWfkLvuiDAvt8Pz&Eg_$c3Py(`bFBzuHB%`>{<@mtY2#}XtRx4(?Of<5^1xzNSpPG zUYpf&U%%H|t8oF}Wk2^ATe%ng{sohj&psbDX~uLEA`npfnARf_0}WQdg~On{Hom~Zh7s8%4anC6W%(4R&Ra5QKPT5nxwav8z$z+EwDbHXdQ; z`D0)0Jb&z~o&S%0Et`le_SLQnz`oXM8~fUdeU%QLw+E!dEzfPSuh{a8h;3JSCUY!y zHxGN;irp;}vAbp3kKL^nvAeY*cDGT)?zUogt?m$OIPf=Wu2J3{{D=L`!|t|Xf6GMd zZ<+REf2&38Z>@;^Z4|M;t)gdtrOPd6Aol0BoFik8sGQi662<73{DLqzX>y#For8;4ITchNO&2FtF zFL4Xh{(ABhcaW!u?QRsY-K`?FyGLvw&jQ$p9&sQ4W4ojH9XRh!PLUgr$ioJ=CO8J0 z+uBMV;!&vm?c^nPk(Y=KZxpfNts*wOM@*8}EZBn{@dEp$=dx?Vm6u!3jM#rzpZKWQ zb(n{3ZZ&=<4_n-t>UYY-0Mz~rzf&y+{Z6fz>315%EWgt#X8WBUG30krKMeEGBj&ST zdJ2)(b`i&8+czs#UgCFhcpubsViS4T=++1_*yPqS@)Vaq?XN&qTtVJqC3%Z0(Iu`T z53!28?}OFo5o_2lJ?oIy_Il*S4X%yP;r&vZ6_+R`UJ%9pVViASLfqoTC+fZU#11b$ zvD1rB?DFChd%XBWgBPD@^x_lyy!gZcFFtY5i%%T#;uCS#)+;{o_x2o&jy-2NCKiLk z*H%mj2j_kAetas6%)DH?wqnYqCCmxd+6p&L@xp&;oI>k)F0r;E=)J#6Yb%cQ`MvcO zQ}*-GBpaKEM1t{6U2m1ri$ zqxS!a@do?<#CXE~KQW%P|KA>$KRXh+JuZKCBzk*X{_IHN_IQiL7{)XeP&__W#N82K)cyc*6caIi9rtPl?MP z9*Ine%O4(zPKnDO9!X4zSBYk7JZk@+8gH=wPmL$+|5M{h`+s&^{_#j8J1+ltB$^$U ze>{@Nj#r81j(F7me@DE*{(nb2VgJ7)p0xi@i?@EB^)ig_k;t^T{N<78w7C4`k;Jrk zm1uI}QTu;Ryutp@w$cAN@udBqHGjk>qQ6CaBFKEpT-I0ceVJG(^LMvd1tCW6$rep8wUG?rmCae)IWVotT84 zN&Uv2r{8=YBhkr&tIh9z-Xe*qgT|hV-|<`xE(jaj{=fbC(K-DG8_Srof840ZPc-J= z!?pU+KL1H=^B%hY^4Nxa&DK{+Y$KDjU1wn4Keq7*_iZ+|!M)0Cu4}C;7#`zzm@(+q z7{@37P4A;}$HSKnzVJK8IJCBO8_y^i!`?^bu9KLuf9A0A+I11B-g=3Ew>BihTNi=- zw&l&_f0dj2mB&Tyz3qAf)rbD6`aCG+d+R3(*`9;1j4?#-pehpQvKE29ueB17v)#%4 zx7JK(t;En839XmVx`|uk8jo>*q`1R6Z>@yl8bj@!h?v3d_bD&&DtAr9t!3OaB5ZyC zl*{`5DVNH6#>P3O>>tg$sH%vK{XTJyJ80Ma^u?ifQ9a=g%qz#YJBoVqzMyb-su@tM zV=?!tqiDzDwC@Z^`3H{E#)@w4#u z+gFc4$8`uj6-B%t31*a(LdJVPg(c-V<|JitpQvIZUxJhctZP zx2}cw^et;4RStqd-AHPk{BTZkk>3c}yW(Z_})k*ou$erdc1cwa81eHexG2etl!Z zep`b$K`-%+G}cNe)}i-K`B)=Sr~Sk{>^C-8D>04OpW++G{=)Pv@?y>p5MOH2armOu zhZP&`VV=={eAZnXq4*}>K)rTijj8;P&zgZB`h9#0H1!hgQQ3Vcud;vM@b*xTpH-T3rS#{O=3FVgUTMyi(vK_6xl;ObN^`E1eoSed zE7I#oH*n4j<@__&A2bk;QF;^U-ba`_xd_`?^zJt{G@n=gzwh(YykPkQGqbMZpvqEOx}e}M1v1g~x89q;lXexILwgTJjWQ9i^ye*A~j&8I%))-Qo_D7A=b{p&uhHs4>m19TkJX~z1orjBEY}+^)J#NgXN%Hu%nk0{J zt0^7g+iDsE@ol|@UrTdsLq22UMRhtJe^#B2$DdWF3IBEb>kuathexGX@0Hb zG{1I@EhqC_C5O*yjpXoIt&to)t2L6tXSL>bh|lUR{8gGmYvnmVy*yuN-fSj`Z=q4LKU6;}TEqQX-k zzNqjtn4RLh6=J@p;zK^-Lz{^Y(TCD(TQKj_wgt3p0c~4A+ZIqd@js0AZx zgjMLSem6GMtMe6Q&Zy2;E!6qKztoEz)O^=oY?kJ`_F{iD-?bMTV&}W^JfQimb}nxN zyo>EdbmLpneAkdS-?h#52lm`Izf3D+uB+oV_Vx%$k^7_4jJ3px*%g)+bK8| zo`#HVZRa3kTU!z`wzc&@#w}DKZP)!y7GuzF8o%lo|89+kwDtR) zw+8)=;vz$1*$(bs&hwq^M_zN+@Y(nPar9%@yh;4`7|#pod~577HhIWqm;3%S-ey=U zun4BY%`gC4UzXJ&PXcI|JKve8hNTc7QRPc>xLL1teITw~6~n;t*e@eQbTAluz}kh15qn zR*%kzmp@q?j%@x&bHul|M91r%rz(E4vPrrV#vRXFp~j@6P-9X*<9=j2^Ao;YqWJ=) z#23EGeQTX$umN?qL2N*sWUv8slEDVl*|CwaG*e@`YYQ}{Oa8pA-w69Is$<-b^Db-N zJDVD7fNP`|Td_uZu@!5i7hADLda)I2?Dz=n_=x_M8uLAy!I)o4TtP7kJN8Ez)9tkf z?X?H(wFm9B2ko^7HTGja-rD11e!%GFZtRA1x#wbJ>`7b3+hTtW!?wMAu3f;cgl&64 zl@YrVRvED?)-GUItX;saSi692c`HeockpeBVQVJzorgzw_DZwu$~bS^720-%wq2oZ zSE%}9bF6*9=2-iX4ew-sh_N*f`;f^xiEMsZtd;O$6UTV>(MN4v0#emgEg zJ1#;yEnK3kyj z!A7+yA8b^c^1(*6DW6?X`Cy~04Z%iP8-k6pHe?_99l(YhpkI=Yd#>EI5yyBhQwH^G zQoe~4*S54m<$Dw=Uu;*C^2K&FDPL??lk&xOH7Q?gm$fZPsQO{MtZlgfXR?j`YGF=8 zDKUs2a(`K;ybNQb>X-_ZR{$!n45++M_?>K=wKr8z z_NE%RUic;=`^M;Xr?)#xbWnWGvVWju3Kqob@C zqN9vwjz@-cEMlK@`~j4Xawr|kp>%u#O2?<5bUXy5<4=a^c$j_C@#j!F)+Hul#VB%bbN7`j?L_ojxRy!sDsk69ZE+GO2^+r>3AMW$A2HD<3;vK$Jd~AG(qWT zhSKpWl#Xvg>2RQQ{L3&ME$owy??CBjgVJ#VO26ouU9YcFQqGM>!M;N;n)1HeKjT&72FUFa!<6EGP z{|?mgwB2+aPuoq`@wDA^#=6C{-J-|Ljp^97#pl_FZCm{B5ZktxwwjJ@TTEL`$F?n| zt)^q!7SmSKdG=YHmJ_aJ9;LpiG>zw6UB7+YBYr*||1ow&Ybz8VSUPdsHpL2Nj%OT> z@NRPM`SL`eCUO@@?z8EwCGGxKV>ubYD} zi2IXD-M4)WpU5}QxZkhP81`kxu}0Q?)8o-_KsQT9wWbZglEam zt6b!{XpwJF{dsfQ6?}z^jeL(H=*loOH ziMYS&tysX)aqf31zBe*3@6UNw7)?9W<mv&cO;~ZoM8$Stp}@pgHY|u{^klYx9f#rqvUiZULRP2s#=z@KQm zwHMEFUs{9Q657E2&-spcZSiel5-%d3um5S2e9E_kHi^(COIzId!_SCE%%X1Un}0$c z7ika0CGP#syWhgtMxU(Xn<_Q*vwYg@Y07n+@~)+?tz}=Bbvf()bI}`@_!h=?_O(;a zDCN)^4t*meopbfzt??q-sGj~z8=d?%?erM+P~3+3^p#o@p%~GFwAWb5{2*;STif)v z+1jR!6?2Gizhd_n`@&a=TU?<0%I^tc71A{dJ&$o7(~wQ!czp+>9GwBR13H)Hx^ar{ z5vO>XYjPXc{ucWkjFEP2L>+UyU)to>PySkc2V)KPUEhL`pSO@Y>H9HDxo&FlZHK+@ zV5t1^6Y4t{+sQwJ{F$F^%bBrTIYBoaf(;CpIy4y7ki8S+tIOnv->@sy2x?Vmt45(s`x-X>1>g zQ>;_nUgX_SV6mN9$TtGvqz*(%#N-LZaX9frMjTDXRgU-liS zF|!S7%sc@lf71PqM)~*XyNvluH~VA%)7B#;c2M;BV=u!GdXJO7kMR!U{7}5&2gD@o zT8zJ9t_^XD0^(5@iAyZwJzWLFo$Oj#ViN_d$FON)69uftuxVlw1oe>&afzj@$>+KcQIY$xuAvPJGhqgZZ#XL;5t;>*jR;) zTlk4j48<+{yB}7XzOQ38C{2t>=_iyXZlUyNl_qYX^yihvMk~EuY2p@2Kdv-!3#C7& zG;s^1A5)sRh0^PkCT^khpD9h;LTSZLhSrg6OiSISdS2?kS^3U{$guBexG@XGD>VQ4 zw~blIRw(8%G~byR>b04?pJ^y=@i$)FLf_R$a_@A8Z&ZZ*tOMo#DKrvt{}dVpxqk|c zh1@@dCP40=LX#l(Pob$0-*G4h;u{D}haZA>!vZ)1&VsYyhv7Z&J~$8F4;R3Xz*6HV z#`5!&z_z`9H*WEL@0}u=x2*Sz=shEP--zBjqW6#JJtTS`iQY@1_md30uSD-4d7Zi_ zZlQW5%&*2S651bOKXuhw@mpgU4f;RI8d?2c3uz0j5xzB+5#qVYuKVTwTGXcVit1>5Y<~^wvV@-2$bz z9!f8Mq=fY1M@mR9ex!u-;zvs4!Uib4jW7)NLE;_8dv8^P@<{MrD&^4%mB&%2Jn$Jc zC=Yx_4ax(bQG@cpXVjoP@EJ8Ik0ewcJy3aEfHNUJqXa&qsPe#96jdJhilWK`Ur|(f z;46wM4}3*Y<$qNwt~R}@tq_==*+17A^8dEhIG&Vl%f68MUI-dtJx z?PmN#5%oFzL-svx_=oKK+VBtA@BiW-vhQ2NKV;vZhJVPu2Mzy_eV-ZrA^Q$8{6qG= zVfcsa`@QfF+4peaAF}Vt!at;SFcH-=%0AVz7OI|Gpz2u!h7K%SPbLt+*so$o#5S6BVj8X1&_k9upLf-U2qaS1*gK(FbAH4 z(_s?c4SV1Wcmd9am*G9I56*+v;R4w2ceEbkcO4t`uB*hL-x)qeeu4X>(X#Q+DCUv1 ze>t&^6NWiL#5#D+wn%E8iJgQ6M2wxp>H}a z=UnKUj>|b0_M48J3w_gZIp;#(bX?B4&^H}#jde`qzB*~g55u+o-gS&Q%z^qU_tuG_ z|Lyp~aks=fbe#OQE!fTPa(}D)o$tT~$wj>Jz04cmmmq&j+(Yy4rZW%Gj`LB*L_3~B zJDx(uQ|TQ&l>zCuT|r2{?ZQU&(Qmu3QGN8=E^JgEV@OxX z`)+pLd*dQQ-)?9jE^@c6Qy_L1)U#Y?gl|QVXJ;8C&(0-~JUc5Od3LUVLkz3 zm5@9;v0t6!*;xe{w>qo6Z)?{;Vj~HBA>HGsAKz(sjrzSzUL8@sJwaX_wUE3zwm|ag zsE6d$u>+D<2R7|x^6J2*y-Z#m*tD0)tD^yuS4SfZ!+qZOxDQ}&h>r|?6QYH0OI*QM z-$H!k64Z0#i3H!KAny~cki1VEg&Gsup~i$Rs4)Rsw_Rfbwr;z|1Z>@Q@;;G-zBkuwdn)9bZ4W@M+4c;` zJy3fPau3v=3AqPq&w}_)+p&QaAsB*rFc0R#d{_tzVG%5XbKzX<(H`H87Jb`-Z&hsm z`Ftq`f$tJG`4P`^f6ID}uM;zQ zoAnxZd*8xn;#(N{28RCcBp$ABj`!eOOtY~PKktSj?Y+lssHSMl3CKwMX zj=Um>jy!ByDmwD8X{qSQ!={-b9TE0PM;VlkB~UslpmeN&(oqSeV)(h=~xG)V?C6P4Ny8ZLh0BvOh=S`(oqYgV+)jydMF(`pmgkn(y!fRBQ7N}$DA!9;_a}bK84VLb~$ED*NppI{ZI({G2@du!er@eK58=}2KtIUm% zWD=w$(+VYX6iTKYN{03gNrv_fG46zz^J`LIlJ>m~_P}&_0gi!};ds~wZ-=z0p4CFM zsh-tBgM2ef@2%kdUE_J~*0sEXH5^IqFLg~ShLW2Z+%|(Y*ZYkWLn+{XvcT}(cJ3z& zQX%(~1p&zYWI+a`9tA;2Jqj`*^(e@K)T1EVIJr^Rw;uFjC|bYq6|UFW=(OjQ^J&j7 z$L8Rt*1QG5Ovxg~N_1@{XD79W&oMro=r)aWSrq!BX;mP~{`v z2URxmWt_EsdA0d$?IWMrDlh9GUeU4WentDy{fhRZ`xVtgx`{7bB?b|vKdH@@v!_7ocMbNy_9NQJ_9NQI_9OKx@=?FarSA=o$z)M}#ZB^A zuTYx$?%Iwe+DBcMAV=K>@~MyFC=ZWy<0$#mNpX~X>Xc8}^QlvA!i}YTomfgfWl~=9 zuYbK&F%6XGPra+RQ3*V;wV>==y(mT+Z>DXNQQ7+F*hBDZ3fU@5GldnN!$kg?-kr@2Ogb%!~%(ETcVVG(!5!jD3*)KH~tC%t80N z7TA-4fU+wY{!po-#tZ~s>sB zJ|z;@sT7Ipd{QK?^D&XQ&L>3TIx9rtIuDA(bv`Z<*I6zS*QtQXR~~3u1|RUg6Tx_8 z%fJf+E6)@2k_Ye3VknpGI5P4KDvM7 zz2?aj?!)lYC2xcH>5}OXKV5PR#7~zT5Ao9_Z-@Bll2agly5uy7pDuYP#7~#}5X4WH zEP(jwlCvOwy5xr;e!Ap+5IfqJRzu}k z1C{4Gs65w0<+%YW&y7%dZqk}OGw{|H?zzXXT@S~@9q@Lz6HbA<;54`g-U%DvhhQTt zfcxMqcmRGF9)$P7L-2kWhaZ85jnh&TT^Lm!UDR87oPx^ZG*lkvpz=sU<b!C;ftu~bPvzPl>v&w|pQ4W&N>r9Tf!e?FA{LMZ)3Q2OUW z>7Ng!zr_6(_?!F8z>wcA$#YL)mCs3rU=_@R)i58{z(TkV7Qyv!F5Cd;!;P>6ZhH3{ z;93h4Qd##vm9+t?tc_4*-3L|H15jl>2vycYP-TrnmG!X7%J;u(?r1W$O?~dSM0%iY zQ)t^1+BSvKa~(=gKa`$9eH$g%tTi#oyV%ZyVVDnR!a_I)7D4n?=5Xgyd@QYe{Y$l-znKxeZWq8=>SjNzVPwx7M{ZNag^P%t0ubLr^ktD4D~^ z;wL*2xVfoPXVo$^V?SgSwv^`tCyy@%lL&qi=g}WqrjZZ+%4t@rHI|oC@L% z?WvG>Lwf)cZ)neeJRi0P^*_wi|1eAc!)*PJ-zx;UpK8y8+!M9uL+**%3*k6e1Si6| za59_^?|>!m#v7QsWY<;%DC7Uhu~lm`)K-hJ&x!v7|1(L~JkXx*y@$NqJFMyDss6^*G(a zx4V^33+GewdUQ>$u;&!Ups)&URq~`WQpIumOFHp#AXJLNH6x{W%PAmFJ4Ap2lnD+^mSk_UPfOB_M#bmL*MMy z8kFgj>mT`M_diM=yRjX)6WERI$eqA$Y)9?{cB3A-q3?AMtzG$%KVyHVEyw8nC*U)6 zrui9SMY1XDkXH<29rEq-{mxqChrZRV^(W7gufEm&XUP1SWQaE`!^Y@4-OI2sL*MDP zYejf|etNPQ_~o(Ko2BSGcG&$!_sg~nqxK(w*O|9y)*2L1et;&Nf0HQL7+f@7MmTk2n2~%TCMdeM5u-Yl>X3) zN=wkRA{!JASS|P-h(jfT;84({g+l^}Kp@CL!Rc*4PyzuVWsHUL`+V>HJjpf^20Ogx zI@kNhzSgs!;hxsLzH8lU-9vCcU>|4Rouf}x;``gr-r7h0e&dcwtlg+1Z5QLi*I3)X z+a4e8VSMOje0YTMv3$q$!7{}bj2({u+1TOupN$=k|Jm5#_@B$sU5YIjJ6v`cx{GIL z%h6Zm=&N${RXO^qyd@NURo(`QzAA4IMPHe@CFrZ_WX^jZik+(WM?m%dtx&x`3aa<- zgkBB38#^`;J67*ZB7eQ}AXM*6f$E*9vO~Q^6?5=A>-l`Bo<9%O^B43yx@hg4R=r>M zOYt-7{wnUDi7l!gLwP>L$ESOra?jhMsCsIAQ9(X_&WprvbOwZCH_U(Eo{s-Ej(PR+ z6L-h|GyFs_epCJrw67DpS&x077{Pk%1H}l|)82{^tf##dBUn#+D@L%M_6~zb>jyyL z(fUD*KkV4edO!D+#!XPA0S`#oSXGFGH2a z5~$Kx22~obL)9KDplXj&sM1&!l14fAl*W3f(%1-98e5=B<2|U-cps`XK7=ZbPean! z%{`^D52`e3pi1KqRB0TADvje%rEwanG|oWb%Wh9XaSmf)BeTfkDdtfhwa@8RKbj~z zhIm6da(WQ#a^ek}Eqr9ylbge!*pr*RQ0&Rgu~6*E%?VKK$<3{y$mixnDDt+s1NI{Q zT7F|E-jGV#I$vTNX+A?;wvpyD)MXoKK0{r$k>)ehWgBTeLtVC!<}=hq`!791U9|s_ z;ttX^4$cS{wvn&)VA@8$+qy%M;cdO3$f(Y$Hgf?6SY=hnUDcpq8nPQRh_A|K^H468 zRreIbc+i~H9abwWT?Up4M`zlYIMc>ZVWQ^X%V*|e9{V}$p-T3hI>b7X*}S9q=F7ms zd>ad=gN2%3-+{cUsJ~(es=s0gs=wk4sy~=`HcR)YbC&K==jsHjzKZ(wwCh(9n}5Uk zVj-%F;vgl?x&YhvzYL7XJrj(DsO_7}+lg_g{mnbHzbPMOG3BEy4eun|?{v1`p?u~Y z+P~o)cwo|j2PPeOVBUcTdPlK}_vn*qm&2?d8G}8kScS$y&R$ZjBfR~GGR`;{600zM zQhiW;Qhkv2GkuWu+u;9ttfC|B?S&T`sdo--zL9ToXy1)|lS6xMAWo<-wT63St;nQ&yEhF^o~>tnRSd8T6xK z(~lZs80YMDAj9qcq!`9Q)`8@)4rB!PbWTPw<&m%TAng-RJ+@K@bG8L_K!=_kiNC+f z>?3ym^Dz|PQG5eG_NcD>;%O`OJtvLp^`CaQo_49C9lj*4aR`1Wzg?EqUu!-#@xJ02 zYB$M=((6IL+JKzAf)9HIv5j-+qde^M9M*g|XJd?KEr|Ty!F{TN>pu=KH$rD)M3Iho z*p2dg@pW#XtqSl5<15~t&e$@Ja%;}D_N!8SBZhKT@_sF`kjAsSFC6FS%04cN$EbhF z4v;)n4JGaY&#Fd1!HB9`pQ3SzV8qtD!3gA8I8y=-79+dm*al|q%_QO= ze6#gIDBoa1?ziu%I)M_Tm%oyHUS?484Xo#7oznYPT~ zOmU?*AFA}8M>a^W@&&6*XLvL3%Jfa=4DT%~hlp5=`5?wI+*$KMjAOX7=7Si=aA(a2F^=KRnh#X4Pp5~ z;u$*AyFcqbYQLPiLuYp@*6gxl8MRAYMfIbD=XBRIR(b`QRxG2(q~Q4*xvZ^FETe4< zu>(7n(MH#HT&K0J?YK@WUDF3-S4ZmFj%BpewH?by(6t@QXrXK5OYg_)+Ky$!>DrEE z#Om6PWyI*ZSMYteuI*SxG}nd1GEyj~E5_BZZlYaGSRkEs6PuCaee8pG3cYvAKBrrC z-I~_ui&`r@z*l9~v@oVHYgt&^V%D+{12Jn@h=G_jEW|*}8kTf>4a@iJH7vwH%o>(H z_8JyqAZ85xBGZ4DBJ+algt*3%(JthzjiH5&g=kbXfTb;ELSAjpDMEJ|D0tY{dhJu@) z#6!W&Pp*I_K-)rFL$8D;LOVh`Ks!N`q1Qn>L%Tv#q1QvZL%##<1xogwQybZ+dWtnbh{w9foxtqIXQXsz>D4^PWYz0f1nN#n6{>V)1aE2mBxkCjs= zY^$ZI{lId#%_teiTT@fdX~r%vV6shm2MQ>Sw3R8F1D*o-=vu^DwTV>9Yx#%9#1 z{7j^8vGTo%v6u243RS)%pvw1FsPY{JRlavZmG9k9c6HDp7 z4pq7uz?ZK=HGgI0$|=69>W1X5t|D*-RYdD6}8% z9fuBto`&84J>x2?2umm_R~pI0DXxX$M>8=I{Aea7f*;MqM0!D$27a`1r7-}iGzPgz zz z6DN5cs_*cVf8{=`)(!rGR&h^vgzH~7fL^oAaVn)*UbeW9kl$}6#?g4l{# zUxF{ptS`YAW@0G#!b}W>_)10Cl_eD)GPimX_e{B=rrc0dZoSi?q{5D~B&(0%`$|?H z!}q0FOGQ}6l8V#$H@JwMd>gFaExr?Tc>-G}8hy8!wHvkgt^TLQQgjB#|EHKd$STvA z_E1Vg*6xuPBmt-%ar?nrIVk-Y`+}4=>8ul`m-|pXu-|O-9Deru2kVV8i{?%B% zo*2tHd+%K3w!2nM5^dF+btz!KS(owxl)Sft{U#m)_M3PJ*l*$?V84lnfc+*O0`{AD2-t7pA*)!= zVnxoC?^>~>Bw{U$t>jOtH|tV9bp_X@pa)DW1U+D4A?N`U3qcQ?MW#5Q!qHZXo0I>N+A&=Dp^f{rjT5_E)# zk)R_?j07EFVkGE@D%QlD>l$3^R86d1SD`OVd<1=A;v?t_ z6CXienD_|#Vk_|$r4=k|T=7Oz*7)KL&^0Zs`a;q+Ye&!}X6*>N#Kca}B_?))E-|qa zbcuw$>&wZa5pT^1Ut@;e|$snJu=$*~zov!E|C+1RM+9S63aYJK@^9_wI z{xwu*jqh;I8fR@tt3lrWe`G%`=96+h>Xo&_n6LUe`;iCF9cNt!>o-2r`i_6n+K_au z4bk}>A!|c=+G|7h;5*Vd_6Xx6HrQ>A?<2T2w=)!olt#qx25#XR_9>*>ir2&y*~-6_aB7n{V7nr z58f-DGDh#r!G6^{^Pzg@d8pobLGMHtRlgpbvoHJFuji%TjIAW={)b?fzWbE#&cqj0 zPXfD7<45nvy)#zXVEi%8xfT~%E91mlQtUm%oOnwt{wTjiS{C`^!`9fzj}N=t3&n?B z9t*{XU7i5Nhh5$png~sV;=?ZQfR4k5HvE{<@dn(awH6+^Noy@Ua+B6tc;qInweZMI zT5EejNo#FiC~2)7041%pgM#q^lZKyrO5-M|(tu|s4R~hKxD~22MnRRvolvE5cSssz zxTiG6L6ru4G-<#`lg1>d(s&T6G^Rk6#?+8B^0}up3ZP1322^#K1yx<z_zN})<)RY)4;+*2Csp-N*TRB3F1DvkG` zO5=T~()bXnG(HVUV>kDd#y+UhsDUbtLr|r06sk0iLzTv9sM0tCg}3{Nv){{DTe8@T zwHL6c# z7IjdpC5t+kxC3?A)*%=tQ5;(svMro@ouS~t<;Gev*z-?zoSQNke<;4K$!48M5GzZ} zIUg5aCz8%O5yeqjmWTUjKT$0ywo8=;Kn0|C1WSvMl>qPRfC!83IVs<|J43O^V$pf@WG=1_G;tRem zfQca~wg;aV!RLv{Ksx+W97XFy6o=6ok*&y-?iXY4-J<)*mez(SPEyUa(jCcrimhmW zE#>!-;wrq`G_Im_NUtHTayxOAbbHN64Zdq9uA(!?o1XvS3t2NF49Q)wXo1#@D6SHE z4v6AvItPUFw=ZkWhTstZ@Ue6vbE+OPN4=ilt1zhS)M6iY>6^`ESNjglA4HC6jk6S6o^=r7P(* zji>0G55-gH7trWpVpYUb+?@9j#$IIM>=_lo-ei&NQRd~mk0{MutJ@Q4)$b-A5z|)Z zm=E6Z6u$Wbib)Vd7>2+90C9=g#1Q5XJ6Ouu?PKitaW*li_%nm&<+1l^yD~{+KaR2L6<3#?d4M$=x3R86>*Y4lUWf3dFLF)$>n7`|1zlM`x5(xH>vgV~3qB*R zqqg+Yze|W~9>OQG+v6_2nf`SE|N1f3?dhD0S^Hf%U5*Fcm9@*RMkMu>cXVXe|6#nuk_Ctya z?6;x}j_+g*_9^DlD0aD3{gZtwV~CNThMyCVkEh5x*UsO`8~x|oOupE9XPtb7Rnmvb zS9w+to8OD9s7|Md0d6^dZiUh=CJ*sqgv*;&)U_gY_IE2%Cq-1Ges*U?>ZZPy?>(=} zw|RiuS8=Dur+yxLEHbT#{Aco;$nQ()&pznBtpi*>;nN)Qh^;#}YCg{%;&+ng^51wE z+o;XL5$}FIG~Rvv4Srw35B$C=zvbI@lgsDCyM3iV-542x&Y@l~^LSO~$c08(e;4ScECuyIIY8!Z)vItvFMS(5giLm8# zRHDI?xHzpC05} zSKe{eJ66)FPfEY<+r;^Z%#A2}4mmo&KCq#<(i?nP39iUau7mU4g6nQF@N|I(jT-`SS6@I_Vni+&7)yo?NMs$&&a1U)XbYfp7w%4*GBtS_qK7UrNN=-;`c{c zzMXfweEN1T?}4Xf6*JsD)`RIP%l5;KSo907g+=RHyLzY`_fd`-5BL+|Wvy@!do*Wj zIM|a)-yOQ58mGC3_Oe5|H3tG@u;=!6;{cNlO>x4Dh!)dFTUlYe# z^PpWAiZ`=uyy?~iZ+a@7L7aO>I>MYUDW~$WY`n>&o({$ccQ&F|ocT1WPYxK9#QMB$ zmw+)*$c*r%E9G@C#g2nL%@q4fEzb~A2)(4 zgNwkEAg)}ZO_LXjEzQ_8!WQhBIb-ZSmb2N{w5d(Q+`c)VVfQ$;jn#~8a~KR?6Jpz} zX|Qc#8}Mf4!IeJItpY2+nE70%MYj&D0AqCBpX)>5)hOEz zdRBOa9rU~?cF^xd!LbEU^|=$TKfB;TFlfh{zhAKbO~xOTNw!VpCD=A&kq^hV**E{2 zw#_=^QgYzfHt&23+or^}ZDO%)a_IxIZ?u1IQL*1w0`5IMIK!6>EoZO2O_Dvo>rC|$ zkFScceno5R>{fr!ma4EKef#(>XxA_=-?nRHJE_j&FTt+Sdy?s>OR#NXnz3!vX8S^H zo6norHuxqR>>A<8T5Pp-2|>H&Gwd3hQpdI# zW8(}u;cIqHfd9?eHA`&F$upSKIFD}_a?o@hpTV3LYRLj7tV99>2_eX&%V2N<$L{zK5AHfn`pX6G$&+WGD z^PsQ;+vj2E6l|YIuzl30`o1}T!Liq$!=8D5!FF&&_Dn$b46@?bGuP5zp8lEN_x!!c zE4p?Ja_}g&%>nfD=LP;AXKZ`sy8ra|_&)Mn6*kECVux2ouIuoJk^4HlI&zcu2H!5u ze8z5A^-g}$stx5yt3J#@r+cl{@6zu-n&9@yrulS=$9E}q&0*wTHpT?(8sUm;8{x`R zb{U=W?Lj_OHkE0U)v>hN-|=)fbL=AcMe?iWJnE16&aq)$;hK7HpJ>}K%BQIfv->CG@`$c0f*)QyW+CyVf@lUpJZ!o42 zbE1piuVUgtCR&E+gQzgCpm&U545?5_5?-5=oiQXRpN3ma% z8aqk2Av>oLJ1%UuoO$5S*fK6`8aK9$2OB5MjDxx%!;bGo;~&kld>Z+ki`=%beJsq~ zpQHWSV;TQUWKMP!@4U`E*B^$Q<(!APIv1ihGJZ4Tp=j!`172$^w85IUU)+phCFyJBfmH1_JeBfcuzxgBham?E)P)a$>7{`nMgtEwgQqTS{f1*rxas3rO zKw-taH|H%_0Zx1XPMpHt5LU!sYn&$DEZrPSS$fI z#7Jv<628TWn(KiK2f*YW$l1@`g2_YIAEzy7P}piQA~j7?#GBb(x*#Nc-yis>$iJa{LdP0gZQU>Xy@dMU z*QoEwk8wtkutjtE9Xmi{j^NqTVa55jEg+267{klEvJ0>S>Ngfxr}ZuC9_CosCglaz z8O`ahuw^)KspAi1LFqQ<+YrV$a;&^#C~GCMEM7>aWfw?ySmI08O~~qv$n0QbH-mA= zJ4p*^`10}!mBPJrh`>jNLmWjx4BKsF7Hux4CzhxZpC+T3x?`iCSTacT2zyI`C z=v!?19*-frkJImegU)&Z`PqORe`sA@ijU*;$L07rVh8p3B){Y7wF%$vA)DY+{0VX7 zuWz02oNvngSL1I8ep8;`@r?70d=MMJ28}Crz(du6vF*w?xTap)51k15B5IFw77qO& z8M*$#D!)N_Os0JLwj$QuV@IroZ<&4%FZ{@}-r0aWFH)M+O>!#zs_!Jzh4dqpu?uab z`l&BPa{U21t2)f8cjUSP{8`u9T3t>0O8Y)!*~7b%uSl-fT8y#6tkv&e2l0+sKlC_x zW?w|UYmp7duh~?-r7zX5E~a}AhsdEsLhEFFJ>qi>!4$C|FuQCTI^P4^%BXUK0;T|CQP zM;EG#=Wx&U*L6318(rK3d3N;f3iPhV{l~qn11rIem0X|jwh6q2-Zj2RZ`;5f$o^^c zc0GFgEA;j$^tN>IWaM~0a=a2fybHOOF3$Uwx_GOti)SIr!);ysd-UkzS7h|~6ZFsM z)0g7?tG_^or3XLqvL@c~4-kX+Bo&>Ef56elia|Vr-h33fR~q{EJ+Ss8_U`i$ip>{F1+1N9n#4s!2 z`i|)F)y|qbM}`Z(#rQmma!bE9rr)2m->+G77=ZnS~h$jSEf78yskZ%5x7{iz7 z=Pwz$+Q`Z6I(ci-_Oy}l9TI=j*dr1$D^a;(b4fn zM@v5|2B6qt9C#odExA53!SDNOT!v5o6%YKi8$CVK-(wv8|7qmuiMAO%wm~cI$?$!8 zXn^nIeFJ@eX%YjdV*KpH0Jiy)PD|b^(7`R?wUd_8`4Cxi(zsY0L3NQH|B${?N#AsI zcoEl%CmgcJ;)j^CCT!ApT>4q@0VhxC@hbTb(X|dPeMv0F(dp%k#~qzMk92k2pwsO( zaPUComtJ>t`Y~+vdu?0Y8Dod$Bfb8v9kcwx)2jGBu7wHf(CLm(PiMf@;d|+Q5nRY@ zf(tRk3UaXHpnEjaINS8T8cZM4Xi{}znJb&VX3QhZ?>dR`}|7an6@ zX#HrqpP0_g!8N0qV1f3+%%Lr7Ze)EbXSGgXF2f9HX8p7swJ${!<*|>*M3<-X@N3~k zI?vV1{hHUpigMT+BWJ32Va+vG{Xy1@4yEmDm_xC|xlgQNqjSyti5%8-)?~6yo3v_} zBXPvjnPrptKWys?tIV0&cA zF8=x>kkB7_V*@qm{`xy~??yjot3hw}*WV@T z39GCb%@`UyT7`WwfI1AtMqWZ+8R~&DFDZw- z>OW)Y9Q{w`eWio%f1_wB^RINjJAF{J7w_qQU+#-CXKtguAH=*`=91jS8s9wH^b|2r z#X*@@GIu^yXZ%fu?&lrZk-fQ2;5U}*6n;88^A+&&FU(_-JvWg39$#qXnp^F$YLmg7 zO`fpYnEG?0(r>Wpwh+gt8SSqxq8+pjKlPFCmnL)XfTsigkM_m#gKVU%sXwqv@pH`G z>5dKPe1klENV|}$Q}|;~g9FbqfBPiuc{(bg|8HqOT_0%M*6_HNd79Dic7`Yahm}9` zF7!gTKxep#2|}xg`yRQXZD0m{?mhn3#kURY0sB9LFY0Gr`q{=`dl&BI+{X>n?N#d7 zin1n=PmS8qvfB?nrRqO?@`}Qz8ntIApY$I-<=A{u+p5i+HatT5YG>{HptgSnEY!GU ztQ8U1KgXIo+GW*_0gpzx!fLzGPK($d_Ymt_mw4TQR*!To&FA_!$z1(o0z^to6JyMX%kat+C+6$A8Du~@~S!}xLXENZf&gN zjnr`|^_doH)lEP?mv~zQ+|<$HdN2E*XibLtZ|aomN;hC540T1AI*zsC0!i4Js^jOX zBV|(^r!c=|KtynE%K_@7x@I!wR$VKp-ywHQ;Bb@reM$WupscTxMoa1^+p$Kxwd{IP zKh;Ndd4;xAJ*t>nn(&=qJ^b)0gL(*G(#da`sYhI(CH3%gy@$QAKJ&zZXUtc5v{UI| zQxA6_#fk|efmb(C2hBAsU@T+!6dsHv*P_>=C^vTJ*ZOEJ<=sMEW>cQ!lv(r0qC?88 zvdV|4IC>s2@GABoO1Jw|FUl!@RFWIHH06vAw0OiA> zshpLoK9wMu47c7;d#kMS`<#93!uvT~RL_8id$2iU+WedK4En99o{2VkCVJss>L03W zWZOIXW^Z~>-;DL6Z+^`^o{h}x70U|uK5dkO8Vx75Pjoo>6*L9);CYs z`exdoMt$>3?2n8ozg*=?OzBwjQ-A$i@Sx(+L96u6Vq#!>!HZD+lXKHI^-pk2?Jm}w z$LG*LCurY?qZ9f+jlAi4Q`{x)A-Nz9;>&(Z(XvuHQocXx`-I>L2!!9(B8|f2P^`C+F8~7FIl(W7<=? z$n{9qReEND-^b8L$I(YQl(l9`-_rR;AGJ2vp|O^uk2Lp5`sgHW|2Fz)6Kx`ljv;pH z=p(h?7TTwTHmho)kJLu*h3F%-k@S(%M$PFXwZX;ok?K6y)<>gleKd4xXq{V_I_sHq z@Mq|!z38W*Mn7FsI@aJiHZXWOI7B}g9Sk0;&JUxX-bO!-rp%kDqv~F1>!+pEcS{rf zr21Bc=qJ@z`pK!U(Yy2s)vr1IR8GBabMzDATj{6NukkX@)GM85<T40X`{YU>`3}bb$ZU$SJJah^_9wBN%@zB=quGh`pT(;%6>k5rE>SQ_0>67 zP+!&jDwH45l-swe1-eV;VIKv%Yp_E$KGLg{{mal@35FL(T#dR*w!$Upu5}*fWkwOt z&^f{}#5ZDzMaD5NGrp*Xey(wn=7sNNY_pVjhQ>)6a~}L2XDTxe(l}`*bRy%S!%M@9 zCNfT1&e-P5x5J7`xIdix2jBJ-bNcJ4D4+U#62{?Yz7 z2N{cT26W{?bmu8-1I1%C9y-N1NH%eet(W2$2OVS_5=|^i{p=umPrCm~*%aW}Rhrw& zc%*|UI;x{6Ix1ci9pw^5M|ngU6D5c;7HTQl#a|yG%2+7U&>=;wpwaBB)>?bxGbW0I z;+LpA&A3c^?8opwjk30YN3U6|r4gm?z9x$8_zeB+HEhQ${l}huK>r!{96`Qb!=BzI z%DAVe?k96rx@c$K(|yK0Jw+M!Y}5CQd$yr>UPBK(BZ?k+KomXX#cq<_eK}(t>7N() zJ!b16t5w*itXzw{DC5_TwqvEj^cE)pcb*n-A%uC;8??BzyLtb57BAKVr-?mA2FMrnc=2 zKQlwdJhfn^#yp3h(m98bwss39TnE~V zG0r*qr`n4+?|-ZRwkD4iQ~H!nroFt_u<2GS(_R`MBw$ycOXiFR*0?z18{yEi5z;5Y zwyCu-RpULi%^})jD>m$x^heptN5Q!5)c=Z*`Y%zvLiB5n{!@R|PklvoGUGL5ZMeoV zl+CG+?0?lK9o?X_8Pe_gxb6CQNb993LEGOeJaW73`ov>@cP&lFXSo@h#2L$|J`bTQ z`h~MkEM?q8StY~rflE)~*Q%7yyVet4Tty7?NK9B|4DZR$@?LIE=_D6+*Z7>$oN(6D zVE?PUuk?~XHoDfL{b#tM1EVSPOnh7#N6F`{xuqJ9Xsjt6mP1T%Lqc-N5DX;PL@**%yM# zJuVTK4}i%&%6MR@yGWQk9<0_H9$~V%29w8w$p_E_q+h9ZJ<@MYak=)VL(v;;i}Pq- z%_}*89BZA0cv{=VTO?d=Py1=E*_GVaeATN&!Q~F3;BrS%a5-KyN%#ykxC}M83^lk6 zHMk7LXVr@Lgv$r;SGCqR2AiWAusJ5m^Dn_=gU8^q!DHdF@E2S*cnmHZJO-DubpJAN zIdn|mV6yyJ8XxGqCE;?qjmtg3Wfp8V;PL_N)TY>cBlz~xd9YdIgU@~ywb1p*J;vr0 zR%c8ZxJo!(%o+3Vhv4)|aQXu<`q>0zwyo8FyJWVl$MAP)2u|nS8N}%cP~r4M>;(3w z{-OGegVjgD=wI->;tEgTo;%&(wC932ZSWbKO1%u67DhW`f~Gj_;M#5*rzP8$iqlE% zAWqjX&!Tax5VV_x*$2^$f1%x+F~GAY+kx4^cFGL#NvWM=LphkO_BjG(zeih~53{|} zO(Fg#2eaj~48?5uDuvmqn`~m$EuA%22D7n$rGs2HX1l@c7pDa6Ut#t+m&;&wD)Fes zy2S@=XZC){1;0O2-Dq3k_XkH>q-~En*Lmv z(`!{N?;ZIXrne~mQCJYurE45}eg|xk z5IP8Y1N)1b^P=Jd`?0lTYo4>PPm#G(mbaF=jnZB!hp?Lz=UfVZfA5YAXm65x;7=lT zyOKKKgb#f6gUE%WJ&Co$kcZO=k%8x#%T&rd$FD|s7T(3YMm=-B(Rfs2ickLftN$2q z>{tKssdZ(5vztopc8OXgcRhjKOdc8N`*gnBOR6uhy#=j zflnLY(-8Qy0X_|ZPaEJ<6?qP}S_J4LW*^)|V2TBP{nq8J&7_^Hv1<=u*H%-m0PS22 z_Nl$pf7He|w}}n3Y8$(7nmf|8@sn|}taJ5N-psu8dCYt3!#s37bG}g<%Ql{W^jH7! zF25gHtpbZ9{3Rc_M6Hs^?2$H~eUjA1)mEfw<72eJsGP~A?~m_Os{XKu`xr&da#U-?B6=`0u-LRi!e>U50 zEt|9(cFn)N-DInu4rw>~nbU6BwJp-R(T-|2;b&;O347Geb>OIM^-k!Qwe-PG#FN+3 z2dnr_{Yht}4(~0yIo89`Vwz}|rGs;_$n$m#o@a*c?Hc_kqm^ zt+2o`u=zMRa3Tbo)36bKOg+*fptN^dG!&Uni-#ifX;(mz`LwoBWIpXmC^Dbc5sJ*G zb%LfquY-ckXnOp^9TEURP~n^G>ii z2JEP|vANpD<|*!oK$4Bk?Wmh@7?nRWcB4zRSkuB)$_ipJuE)Fn3DnPsR@@ zWnRqOHSTr>H=g7E0P^~Xe&fn}#OUlhTvzwC*L%S0l(ydf)337n zpP)PrUhDkeQ27vEs}G33ikGSXcDC{QT6p&kyt@|Oy#w#Ag?I13yKCXyJMivWc=ryx zyB6NP1MjYdckjTvYvJ8HHeUC32k~0*H{mt7IqEm`>00o5jg8l9Y`k7$g>KNTSI>6SlY2OI2q=)$~uGZVG&kYLZ=|U54KvGbFdwZ zmks}N2*xi0<7LCkAF_!4C>x%6KikKF@lKikTL%HRXW#Ld3qbLKA||xMI$$qXseMF+@!mp{EZ9dPbO`Ozcs;dt@oMB&qk#Z?91J74@Vc0PL;4YTe1Lp>vkGO43*GZ%eQi|-}R6<4-8PnL8@!_@e7rf_bA4uZ*Dy~JtpuIfcB;$zbp7xccuJ)Bl=&A|y%MtjF#ABm@*IQSilx38%(qBU`@^$rc%Zr+Wj%2W@9O;Z&~ZjB zez>KKHI~LF^bcI+?O)l&>VF&O2|K#08@7j|yW;6jJ?W1}T;9rCp$TqpWgWiz-sGD? zUwx5#gV7Z?Quo35Ag|%Ko_i6r+kR~AW9T~d#lD==)h{X`AbpdDe7$91JAfVRu~C-B z9!_uJ-$-LGCg!=9rLlL?TM1D1Lwc(yPYf%{jW#6urp*H5Fa z3OvHS1w0$=X>WL%=!py@VwcoopX|go`3nEs-?0Du@N~?R=_QZJKH-;$jUt|&bGJ9+ zS&eOX$sQU1aA|2y|I%IbWi#HT9+|vX4nNhVjbobdV#CwAMx7YL*uFU)wuFa!;Gz0! zC;T}t!LPCKa2)@cvlQse@BI90Y5Y?cQKUpPwmf=LGq~ z^+)z~H+ay%AADCA&WGGO@z{$^6NkTm!;3;q?NwKf0g$H@d(X5t3X!N zK8^MRdo2n7nv53@ArG^#BQ$2zcu{jirAswtY&u@N1zjgw;UIcc>l{b&zcIcu0vjL` zTVOag!7yxtp~QD?F2Ys_#&<5oW?;^kV>3)YpUv>(`E3Sc^I$h94tS|{1Ga+k3#K5m zj?Hk0SPHURH<2|A4R!;uBH0b{qseYKjNMRzt#G&xcEj5l#QnL(ZqObmw-D1jj6HG2 z6IXc{+hG~DgZx$}ILGuDeO9(ZnQc2{VLN2ee%cc>3%#X1L9?(Ov?pj5wuANr&BAuj zo}gLS4%!nm3)>+J+aU|vAq(3f3)>+J+aU|vAq(3f3)>+J+aU|vAq(3f3)>+J+aU|v zAq(3f3)>+J+aW8&c2HbpAai`BOXyR^c1Xf@D7S5gao7pZcoGBW23mC|E9Nf{Jqq>M zwu9zf39DYC58Z+9>?(A{p4`Wb9g960*fainW^PtK`sW!}Vr2s3wIpQcC2WZntr7#L zh-IDP-k%xko+7?=%AHublkwt&sD~z_!~CG1D+Ov*N7V* zakUMc#HP?U)$SHX|2$*03GBnBXp2mZwb}+wVN)DoPV%0a(@OR{m0ogWI`V{X4SX>A zC&}HmHlOj9Z2n2~yPK#-f%roC_raTKoFl4vtFkHHY{sU@Y?08vvV*t(QlHiTo)DX& z@>|*zFVdbjVN+boZv%QteOz{h<}x~%C!0c;_mGWw4}p38uqhtGrs#)F@enpeKWvJJ zuqpasQ#^!C(GQ#AA#93%*c1<8Q;ZFMCVoDL(p^Ckit_3H$CuPy6*Sb8GplPAsZqQ zJ40)2WoNvAE%5?=19*BJzi;w1 zXlGoEr_5Dturpr4&Tx3fUfGR2BmVz(#hf6|(9fmV8PaXCGuB{VT##oxZ4z=(9QhaKK5X#xN62JhZvRz!Zo$V7`1ZNKub6ql-6pWc zwnv`B9yyKw?kCtA!q6%3=27g5yTDJ`=aQ#(VC5a`=Xa8^*D1z!=fFSNBX6B8nYj~t zWGA@!Hoy72cN*T*BU6m%N>Y$1*&-T`?Ib_h?(!opMy513^jY#t8~;%0k8}E!KFfcV z^EhKU*(0lvDYaRnJ>oVpRcqTL*d<2yJNAg$`F!?Bs1GB>_F-h$K8#}8tOtCMo#O0c zaQ3l_*d?Be@71`M`5;S)J!-EKGw+1?AYGB)ID3zh5%8!8{vKfN;Qh=A)LbN$UE_hl zjC&P3X6}uNJ8B(cExySF?1?Y1z2p~{zoz%#;9LjAw$)Fn@eyt1y+u|+Z6R3KB{#3M z4}RtcNBVvJnDg}3&-}iX_)D2P=Uc)4p|5Rm``!l6UmF$f)4Yo$cUWyMeWVNT_x*8( zFO7MgyYBJ(KDsNz_g&KY>14m}XL%XE9;8(m=l2!$%kaH=x8?hDn9KLaF>c?gSdVYT zxG>*;bq@Efyfec0r@oOst=mz#HILpPh>0{mHVT-kw{Kj%!)YmF1jCB?D z#?Q7NnbH_I!9DFw|A|PGZUx_%bV)ngO?xO^>Y((ZnZrkVWu#a39=~63O?tl9*+233 z)TPejuiwr(#3X!k+GE2@-J)1$sQHt_;b}K`sGLlBRA_|e2u^kfNj2Oec#!xHuaT0Y3-)0`{iRjYjHA4-2Q`%!D{ zsP1)TPGvd!3MdWLPx}pI(yvvPW!)`bEn^>(KF?CQKaAfJ>aBGwgJbD$+IJ&l4wtDH zc8&HxHT^Q!9~;j3>;Z37hYZpy2E&xLZ2ZhAIi*Ya-)Q4sWDVlQWJz;tk)=xfa*ixX zj+}i?B}bZ5TSdEnY0tSjpB!yP79~fSU}y{4QR_*M+B{0I8oC2Jl-^wLztnuR@wDTiTC@CQBV1NRj1_zmXH0> z8tU}YtsbAglMKdd?KARs4fGgvd6-r7(g;_PeBB#t89d^e_U0J&57aj)A$g@BV|&Rn zG@qBqNAGQBFG12abAEe4>v(VZprAZHNj;B(hesoW@|cN@cs_ZQUbv7v-U5$JU#3jb z58aWy*~rJ^;f?$(K|dQ_vQI&h$^}k#*Irw^%UM6ZWt6D|o@hz@urQ}zOPg3vAynxIe4bg>;%&NY^9?k92e0uaqgWOKE<#sx0 zW>}5&Gx;FD!)ag1v*XXd=>DP?lV1;G;ZQw@ENlLyqX#ulvil|K!Z_^bZ>+1I%MnAqk%J2InxFKC7-Ci8#_N~0z z<8ySNWRN)HH*{eJ>8cHs-fUa<9i$IzpuDKx)OId z#91xact)nOsgL3i(m^Io{(n>7d5pfB^$+Sh&4Kt=>APaO#X9zD+R6QP=#PEuec@pCAMh7Ay6)9cVGX+OkGDqn-Ws51jruO`e~7+w zbWl9~OnTj^3u|c`>T-^LDLLaE-?|a9i~YWncl&+IV}o*5I3g%#j-GSm?Uj7i!-wcQ z$*&9D>g1txC;qj0-8ReCZIk}lx(%7@fo#1PqT80?`w7);TQ5_$C4}g<#kL$4QeWw| zP@UGiJO=I0f2J-ImRznbJ9)={;g5EieI{d=lj7K$(p3iE&|7)vstL^V*VtU=(m#Oh z*Nmq5lCNbkS>~Ck>rupd_v-L)XoN?Q1V}m5rPN%PR$;~b8ll#Ew zzQ~^TYMRfOZ8>=MDrZdmk^P>Qqo-D~595jfK|Q5;vdQc%m6CgKP^a7@gRaS~9ke*i zDtR&1Ribqy%6|yBsr>U9Z;4;mxfy%07mRRUeN(orlZVj-;vZ>Nu-A*y>dk&Tuh6G8 zjw(ML97kpFj=s_Rag68n{$lo$Sxuf9;kHdiy+U<__6g}u+bMS7*ksQ5$mfbt9L${C zvo*v{xt=jn3HdB%pJdaA;4QI)`kv!+Ph$%fY42p^q4fMAdO%|rXKXT&vZ4p-msnQ4 z8S|hIHRh?XbwGeInbH`K9*bt&BOM|e%CVmYTjnf`?V}l!3lE&JkE3U{Ql=5Kq4e1? z%46XVI-Zq5Y-6q212MF3H{38TBoVW3*1c zVJu^O0-=#a5Yd|(ImdT=Zg6L_EN51Ni;Lhw-i&l$(8pbxG*54OAL2X6YJC-=)i zVf4j9uT}C=f~({s+C}BPjlQVzsxP*oja$+e$1vs>COVjWotrZr&^r!ZhUyo+HyeH` zpHBRgeuCXUg}IAQ2FDoc*RCd*Tf*EleTTp9hGO+o+n$LD89PYUoqifRb~s-@b;j}; z^a+g}gssaNJA~rv^7G&;KBs#6_34rUR*~lIIQ=*OBKX-{KW3b2&K;<1(vO|7!1?;| z#1I*4Iu;OqX40S4zn!r_v;CR)?8W-?g|KoWbJEnG^DYr9x7t`)LHWC|H(ej>k-qey z`RqyAkGRlE{u?Y~A3BX!mlMNS!5)&!@pFL}2Cs%+05i4!UQ^6eKNV)Gp9=5PPo2Eq z4`aMbJH6T@KfR~1n{e@bIJ)~(qd)38>e0n6_a8{-Rm!dJwZ6!~RmGA1skn+QK>x$1 z7POV+d%9d)&HWeS>gLPARpMqB>xUP_Rr>Kigsbo{5+1gqpWFj>O{PD#W}Nv1HuHS$ z%eV7#sLgEi*|D4HtBu&&{p&Wfd_EN#Rl>DdTE`3?Pn6)8x?fHB*-xhb9 z>*xP$+?5Po2zSrj7|uM(7R;4MV9rEK?29X~Sz9rWvbC8)jjLi8I@<|jlKLYa>cJr~*yIvvVf%IIG}uYqPm zlc2Xj*{dKr1Ij)H(Y>LphmB6L=GNTIzB(kY@Rmhv>Srh9yFq%Clt?a zW()|vqvu*bYn~0X-sqrbeM6qHzSTVIXT8x;&w7PCW8Q{&*4ui+t!Ldso-x0`JnLb- z;nK6zkY_#ZXFltVa6RiB@~oTv>{{y$OV6+|RTooE<|LRjQ>-^)^{hk4Gv*?gXIEQq z#OYaL$TQ}Mm}l*+H{$iIb;vX3iy_3}z^Bkd{SY-pCKi2gw*4%rb z+pMzRK_8~ioO;-*f0#2e=d;#*8s*USWL;CecU_Wz0rcp0l_tt&tuj>R|(@wfR zLL74%?WyZ3UDNKmF3~mo(e*4{!$)0PN)vwT`W3DprYswYolK*Doi@*Uz6AUUXnyCUQ2cs0$dYHFDEq zS##0K+uzmB>R*ZMN#2&C=Vl^nx3Z7wQ1(*S`J@5*V;+4cCORh2lC|Nf|A+Olv`bFK zj0I28AALNF!p81)5BF(1UqxU6d&D+;)BT6sr+xD(W-XXSKh*hiv7D3AgV@SwcrdGC z_5#*W&edM-aqQW8{dc$zpGqp8Uhp*iPiHj5vp;iBe3YZ%WmUy93!b6x9pQe9=oW!4 zjF(5l?<0jzE_jmiCa`yV!rHkDTJlai`q$s+OCR%7zj~hEef;|H)BX`ZqTl>-h57*T zY|_y65zeVoKEI@mt8`6SbX}rr%B$;Hx~5LL&eJvZ*0qo8T@x&y+UMhOE?+M9bPmRc z9o)Vh#6&hQPS)7#AaU$;#pPKK%)Ftw7022c8$FNpCTjl?q3uuGsQm-P@#1LzCE%&b zWZIwhQTso|{iWcm%5B=8x;x)!F2CxQS4ex(KC`0Y18Va)-g%lfcz~bg>bK-~Gwm^X zh3ePJw1=+qbWOUGP5rngZ>L^W_+Z~7{<#6%#_vJBB8%6M#t3X^`8!e$w=ngRPgwQR zxkKt#OH~f?S6}GP91!jMl4F;1M&XPFvv}^~zUspGYi@VS7RPTSX+J=je9V&(;8Fv28xktn8attT$B+~L#%v_-HeK+z^e0_q+ zCpJ*Yo?on~Hsky5=p&8a&ptK^-FiDZ_J`=&+t9hUGCwhk`HA87{KWs=x>&lm2HopL z_dbPQRv(mJ)_Hu6F4jJ$<~cf9&&@ONO}be7p_*st(nKOf}EY$qmoIPw8Uq zuWFv5lN+A(2jquSb%QSE`Ww2KdtcMV{Qrh7=H54SG50R2i+Sf? zsf!&wES>D=->2;UD7`CPb_OzXcxNLjn4K2 zV>rZMbk4Kv+hN4lyD}HATW{h=*ThJc@jv`|D<7mpS9nx!4iUz<(nL*dg=K zg_GE0fjwg0IKmv+IrIUYrJTTA&L?$08f;&SZR*Gf>ws72KJzs9FP^_(K4oalImvyv zH*f8{1;?>r9>Q)t$v8*$>uKzff!K3TnVY$)wV-rH&6IsS|H z>I2_~|Kj-r@M{kJz~OZR|GEDpeZb%$ymt8iD);Bp2Xrot`0emte0Tc5WY$MC%YS5` zf&W}L@PB;>|GEA){QuvA1?S`c$vak?*zMWJ+(l=ZYpi}>i(m3M>uA#DW8(T4bCSKu z$sH9pXajHG;d5^FUku6K^9%{rdO^O6QF4S*m%FNmjhsKR{=7y28RnUP_rQ;<=$^)*Ri-Df7CJ zGP~pbC4)arxqmD_mE9fRSaugw$|IkmRe z?JO&`MtwnhXzlDV+UlOCr!~m(XS34HepCA`_OhYhr%)fgtF{vksqG@D|Ep6Y%$T9m zqxTxQ{*3vp%zK%;pEYwy>}?{Re?S{H)#2iQWrraD@lOV0#H_RQYE1=o?G9BNFFieN z2lGbePIQF_bYAXq=I6Y|+=)s2ei7ygX#Kq>*dBEg*>fmeb0@G3zo4y-F^6$K?e)GD z7x;kk?6jg;-x3u#Z6yTi*(YHSb7WJ%q6*5qhdE;A+RZg{zjcl8Y4slFed~I)Lsr>;3c%)@g3v0FS1j+qQt~s^c;0c$_+(ppGdnZ~tp)pWC_jpgW2=M^S;_VKb>+ z)J7G|4{xp=vP0^d9a7)v&VA}N-MLTs3!VFvztFj_awZ4or?Pk1+?D(`u*P99HoIa) z8Js^Anty1Xq4|WS{TupId6YLzyskyw6&E)0NIzK;79NPDpOnx~meWVHXNcwwX)R|0 z^3oC=6@iR)VJ)ZTbck2PI9I0-Thw)h#dTEhIt8Am#09Ti;=g5IhjZ=pmoToq_WR<0 zto;txz6`JlzoYgxmQPcAAjgom=Cy{y6KB4w%E%_>fx)rNU8SvD#C6YCwrt9o;gp#& zWY}fS=2~k}hV%2rh10LCuYDi-O)_&()NaM-(;4KsCyYItct3->B~$-=>L0|YZ`$E% z=P0!?ZL9XKrQG^X?M51_Gwe2*MIW55vfAw;Of>)PwsHOshX2@8+Y5M}PW#LPyT-Y^ z0hKHJZ!Jujnx-vYX)RqrEbn|3*me}UQb<$Ku6Q#R8*xjj}13$*BH&=J*)fn2qsrO>L z-c9YN(7NAwiFNl~V%;6vK@7UC8+t?atz};I`Rs^kJPWlWjBZ7z zYM-R0c7)bdNRQ?-$NJmY5f5NT$lh*{YQ8)82M2d#%L}e z{a`q}K183@7)0k(K7&8ub$)ZOIbP#Dty%29DtkqJLg&J3Y@xns(r1n8aBzq=nmd6y z-*PMC3ewRWJ=>1hu0C7BF9F@4`GwW0KmHP(5iycAMx7b&7Eso;j>Mr?xZkCJ-S)ty*`Lpv2GT1ek44xLi`mn3+|-r}_eF&T9tU@>mF~~|`KqzV#uMO= zWW=3&?JAvVRDixqvRc+w@U8Z%3$?q{Z&E2+jOM06E3p^;@is$VFR9I3A#LXRAJ=B- z&9vFMb*}9Pu5nHL>OMEV84u&;Fvic}%uR`4Odi~yu^aVwFt-xDtTB0Nhr*Fj9Ttx~ zVUODi=?@y4-!ajebIKhyw}v^;-XF1piN~tn%~;(<-*xk|u3j>7FXQgc*vsMkm#rD8 z`>ynz&R9ML-fHdDN&52Jq&bD(Pe^y5Cv0x2hjh;lF=-}WT{LnJ`=E3pEmugo!L-<0 zg7whGH$ZyoCubiULfw8q9dDwpH{ypHTy)@?jES^?i4AH`GwtP=%G?{)ht88vAQxLh zdLR!RckI!_jAtabT2C5-%pV5V^TYimkH=+{JdM1qZ!@iA!`1#0>-6ILQ~2dG7uyfb zHrK9_d}yJ$c9)bx_n2!>3Ht|`SgrJ^Vzm>{6N8yEFy>5Tnv;HWX%_IF=IUlMe?VuP zTt)j0Mb1=4#aMN&rf|d5hkE(L{3RohrN`r%FQ?KJW(m7w=S-j-S?89fZyU=oeOb1Z z(Ek9w49&NU0+W-ly<=-hNCZM~;=@(Zjp*@{6^)}@p+TJ1wQ z@hMYI|7zDGV^>-Jl#VIM$qxo7mwbDjw1dR;-^YDFJwO@}rUi#$YwC6$E<0<;H&Mc^Sq_}h@d*~g;o;chmzG&Xd(Fr;5 zozb1lnJL@lvC6i)t+FqUWl}GH{lWbsik^yz4tyUSqjay=JhjKo-qv$vcWNx7F${am zZ@0Ks`OU-cA=|c`suFIem(Cn z&kDI-;E4|0?}-UiaDAFRWx}?NE^W`VSmFjTTc`Qn{>?Ps_QGkD+f`Qc1n2FrUZUnn ze|@ALbH|H?lv-IZ9I4CW-cqiT19zX$in77fBq_=xwu@N_Kxd%$16 znd=WpJIQ@zt)D$^oVFS5;TtgR=`^?RH+K7spl#GfHLqqBdEtMb@u{T;NTWY(cYrda z@ZV{>1GMWV_IiH}Y$>?j@;x_1YwoYCT@3CyX*%gBU6YobhV!l3zmV@|jkJ8aH~qR` z8vgU7aT95DQ+Y_Ezsf=0`hKqYzLC$mH_MdCO_@Bt?v#Cp>O?&lOO&ZUY=r;$A@As$ zY5boV(;_f}ews-?9ZEl4?6&$3;r{3JyTkDJ*t^4umbh9~dU@y6j-i|#(ylU->-xPn zbKSmD-{#_bU=OYByVHjh?Y+Z~-29;EL>GVk-)aXJjrk%n?H2emi(ei;FTc^QsM>OH zW-~nc0-beIe8ZNi;@P|0(>$NoJFsq&IcYs8{}#$#>9NWb>niDG`To$$nv4IV>>%>E zk$P%f!*e1#}iRAoAQ z$K+uvrJl{$p|gb!(HCYTXXQB)O5?$cZP?65Ip?(w+fM6`N|Pn@lh-n|lMYgn-PV4HhTesuqo?DIVY~}qn*nRG){(08aU(b8OV+Br9j#BEq z#vN1p>PSC(Uc?4+a{aysiLL!{{Fu^@+npc@=T%KRHmU&`ORO=36@vyn&&)QLV0!ehrWF^C-{xt-)7!-`Lv&UX)<-=-d+4Q z+5cs0&Y<7KQSMFb;h=LYw5NubpRjxrV*HZkIC$dnnfv`IXRa6G%(;gZ2g+ zfo(sG|JcBDUuY3#&hZl8W<*(OPmzcAAlf)(Y3VHZg$-XxyxN?JF2CeZc(s*r$ZJ3F z8y*Rxmw_vDNpG~}txX0Wrn8ro#s66`34uI#AYOwvS;8mbZ#epx6#&%T<%)` z2!E-Ia%%62gkSsTX?!}G@z@yJ{0Z(W-Q8TjNIo~xhDq%CnM|8zu-;wyYQ37ukZ+e^ zG4HAjE%{%~vn4$9xGbOE{e=JdJd>?*8{g}jom|i5p7`<#?@DiXr9D&0JDt3+IYvFj zbrE^b;C=Zhmv9}l>Fz+k{un*`BlPVk^zQA<_xfQGdf3=>L+`bYT6H&{t(joG?%flE zZnw@3<6iQKo6lMUU1yV$Js%|3bNvf<@cJy*C1GcytjB*C#XfCOkGRXC4uq9OS<`-q6}cxTeDt>o;E*0Su5ddGzw6dvaP>sI)cQs6(6Qa%PBN8|Hso~s3)K0PslPzp8$}RgKlmEjgcUNpt{8?vJzT{+nBgS+bj?Ja{ei?RsowSsO z(hAsVDV=Uj(lKdlX-vZ@*JY;>QVu5#@t?T}hX050O{gq|jOq7-0}c-u-~9Jp#z_t@ zbED}WyNQbw;7jimTd}Nj>xyOS2V?ma@Oy>d;NxRQ799UMblk{{kiIc?alr!l z5tFUQ>rFqw9={j+QD?VppbhtP2K8sL#O>_#ZVL0>|AU*n_umV>Pn5YyR`Puv*I7?2 zz?Z+>O1?Mb`sX3n>FH7D?crYkU*o91`i|Nlv_GnTs#gm9?um>y z*Oqa}MMJ-)k2jarKkL5f_vj(@dvtTN`8j3Z&EB5s`@20!hRUy)q`q&G>d6P3v=^e1s=1KbV2+mB4dh??h4BHN)VXEnD!XQ+NM}6pdYN zIdJ4kax?gSWb6H(kF?&uhu^r7;@Nch2R@wiYxT{o2G?xNO%3AlbQ^!vADw=8*zO0W z|MFe~7GB_5d1aHA`jOUuhkj$qVdtYdZZ)yfF&*=vi}W3JtcIpQUEoMM@1Q$JWp}S0 z@jCVGl~A5_8^8a;^Su7Z4Bl09T68@=%&7~IM0PkNDYu%2g= zH~qD%`cVP(6rQ-yXUa#qBfDUIT;n&tjl10c%0s%tW$TWn-#0CT+MooxQT&y!QEiX` zb=rS7d`b?h7&HbSK?*b*YTB22sC~DhkEI{SP(R%-45^>^s5&}nseWBqL!H7nnKfhe zW7%Kva{nujP~F({d#7wp8>>G@IQ`kKr+BM+?g^=Q-l2UswC?K1oxtL;@K^6%%C)JB-H#pZs$Z&nYOEEikU* zLFJJ+uH#_uJsmy#%2UU7oKAYOryozu?&wcgJNp0s?tezRaCSi59`^t8fHfL#d|SRK zofP_*=3Pl%_u^-ktseU842FH1z9KnS`Yy>#d3MKS@0gClJz>s7)>&C$iTA5-?m>+80R8`)*c&qrRjY|)4WaBYSVz zJ8}~DlEGo&vE)Pb5=Lpik-;50-9LChcE=2*S>TVmr@(*9jeht2H+?$h{&8EfM|Lh) z6gRG5(JlJUbsh{0{ocW|!{EsI(|Vk=3U2vVJTsVJ+eo(zhK$I{OOO_t^IabMHm_-dOwISnj=K-y3J&E8yPC_C2G|Y<(w;Y%EVf z-2Y|oP2i)d@BQ!J%w(A?lLT2376}0?35&qvf>voJ324}@(&Ey4*|Y}2h(7AyDoeuR z6&a{T>}^YY3RbN-F{$3CdRrfROF~csAxOln)@u?LnPd_m6(ysIdEVbM=M0B2!Sz<{ z&AeW}*Y~Wy^IJaO?f3njGjcAxJqy;acZnZYX=$g7%Vsbl7!=)54-Fre1tsp8zol$1 z**0?#C466=CAvYzWgL2D0d0MhvR&4{ExLa+eD*25&oh^z7oVb(q8Ib!J1qDZGEQID zalL)uM(l!d%yUom%5`a$v2nZF|Cl~(!O%+D=oAWWw#Tby#kQ4PPgzqxM14B{LSs&@ zekgraMDbEtt0G=1exStT-QlD6lKo`@tY;>|ctX{#LtkH;UwbrMP4|${W z8K+8OzvSp1$WTu5ZXN#n;gvkLB~HG9|38Rj-g23@X{+qTW0*Vg<7dy57(d%Thf3bx zB7Hm;A`=$gA~_fO|Cy2xM9f>q%zWl_4Qt~6%^FE~JJNWDw4;$6tDf3X!oAjZ*lB~b zWkfe^x$zeg`(8xsd(kbw`+tZ7q92Y@*1z`C*-yN7^X$ieac2A0FhB+CLff3WMDQ8^Mu&osSiOAnaIVbaC^=UlRDL#o@ z(-W0@WnOjt1vRg;)_IT^ZC91O&NoVVn#bEbEtBk?W0?-mi7O*KeFy%cTC-~ z&6et40B(nu*Uy|n8M$}S>|txx1kT&Lc=qGv+h%@`W4V;{y+57p=6J^5n`f7Bd^E>q zQpWCGs$V~DZ~p8uj-Su5t0)up-lAVWaqsfk&v5*Ix_)AzZH-_uKFR&}O`X}ne#u;~ zs-Kxd89~1uTeWTGNVca^&cp6_f_?oy<|7_C+A8+)z4fk~d)=-P@!6?4_il5^d65|+ zyYcPY9_5=#--G96{>^bU$21-wVmdb`kr-M4G)r`v^G5IyZ1cM&z>%5Kl zY6mHYD2FM*apJ$84ct{=AD%a^e&#gFqkEU=INAxG1ZRi9Q$+2u+2c6(Lpfi2Tkw4C z{~bJEd;9E&eRs_Ms`u;T%-E%|u93zXM;dDzRtycZU?>ZoF!8enn^o>j0~=|qfyg|G zy%L<~dRJCNPO6+=!}){QrzU>v&aBjf>({!{kc}h2k*pWUIGrAwAcl1RQpe&;dR@VK z&%Q0sTWDbUbX$V=oUJW3?1(Rl8`CE2s&bGAa zyWyo%Be*6(d7Yh^MSZy@-J|x4KjU0OPRrd{fd<|`mVB0jWY?h%M!&#T5H4 zP^_JhpJDCKFR}tnyoY@r@ztg=?!YK$25o3^kmtZS$T##Gh_^OUPUi)s@Lx~;O222c z^`r6b!1X!A;wN#vBb)cAWAru!-!*5O=bj|yI-mcs;X5{SH)N|ebWXr&FT8rw|GQ!9 z05zya{@e65wr_ZU)dBhD#tZ8Pwj=-Mp3&A{X9YfB45k(IFK(vq*SPrRy`{|`zn`Nu z-LA9-{Xgrh?d?l@`}6;AgY`DI@C_ar<1^}J9JF=Uwcq53@xB1n$KWh=N#Bq1Y^kS( z{(nTB^~}{6yd6u|69~-{il#KZx(^{ZIO{_S!WiJM+$K|38%5WBETo+WG`-F0jY? z4pDp8pH&`xa7~m}0AJ2XR&$3i?$rkkd4s&GKDP2e3K(r- z9wqlnWNHAsrzZ~Z4&`_{#m9H8eDL4L`F1S^Ogtj;f-%1oIs6yLb?xs$<$D+MA48*A zbBxL29i&VZh(Sgq{Kx%U^Z5VIRF3syI~Bb%20K&u?D&&8TCAh+>fPtX7Vm-2X7H>{ z@YD&~(n{_|1Mlq~gU{rdPWVY;*5&*^+&<_D_@RM(3dt4c4<-2CXA0l#kMpjvJ8MS3 zPx;uX5%B}OktuHNKE6X}Pw3~39Hg`>hN$2>(D0kc#Kq)$-AIf~{-fv!d=|yJ)ypYa z+8@Y2xpTH#OSap+M+e!oH`)JSwqoDmJ^Ev%ttZa*yS?CNQn2!z49Lm!jE$OYvkoc!XxV`k#-fB0zci9M0=^% zhVP^}j!p{5cVz$K7}$OWA0PwoNB%ZnfNVvEHzU{PK|}iBfh)A`^nvJtBj81siReR- ziMh;If5x@;MAzI61p|sR41UX4!?&!E;~VWmyf+{lMUGEL=Ugv(jORwOoeu8>&<_DH z@4H##dH*o|GYEM;AoRRpL0Nv9A4I3o_K+^4jsW_nyLvv(&(@?av;ETc>nyUu-Hoi! zd5!u%EidN4_p{;uS;&Ez$b%Wk3%(mQVw$ImoLKSWJ}Zy+eM44A{+!4Ov0K`a5t6_A zea7&5>Jf}Apk2=+&vNnMbelvUBe6*$;M;*A{{N8eRQOi%UrUfN^8a%DN7sRYjJZp_dBDx(tBz5ma2L$Ei+4FY096kTHPbay( zAJKQQ(WI|pr#Xo?_!3oMpws=3tmD|=N3nw~=R_8gaqH za!%^rM&B|RXJ52Cupj|^G9Ji~wnl7hALG8wF~DDe4K6WPMH~}aC+$Jk%)LD!#rx}y zu4fzJhlA9~J;~B`FET^cMx=hZFM~Eod_>w+&KxzxVefPHMx(u_`GsN6H_r)S<{yd<(*f#Ea}1OIsG2oJ!k2- z%4HMTzKCsmLSrtE7E%9!Yv-jy(52IQBgI^8DvG_g@^B z=fBGFH`rg$SQvPl^Y3!}eU6>tn6&9*j&*S?DTy{G?bh3tl0-X`f^D0~@k`m4=U&A5 z$sCtzobvEH_Rcg9s@jAy`Uy!T?F7r);n44~KA zmNm<}m%%vjcd6~YZT)WZnyG(H-7EUm)V<+)*VMT$V}F@g?UveDYvsSNd+mx|yW*R& z*S-v{<^L;Bu-4y3IVXF;(D$JyDgD{Lm@<#j&i+8?_bE3}I@muK`UA>*O0MK1;Inkg z7lwQf@r7Z#>h?COIc2?@7JvAEmrUy>d))5-PXyEp~jG%oxUWBJa%GR{y=0wJw>XK8*E+6q%b#C^9FP zQ)Dhe{tbRmEC0g~J{NiA?wRu5tNOirX3|>LgBZifDZA(NOA()8_nZ{UMQjh@nD`Mg z&R_H%nX@l?Pw#W8b*u}9TlhaRotI?2;Y-YyjK9<~7T;d*zkxcO;5LJ@p$l<6=g^`F6CW*i%)Oe4)u36_++h8x3R#Z;8HMo22Ogo<_!GxaLwnb=iAg4 zeLjh42rg<_>ybW+fB$*fDEIC)v9KzLg$=6=9Atq5jtLIRIHqHOW7Tq<%s%2)mN6EeJS;C(i?p187S!>~|O=-lXpyklL zT+_leGDmy4riE+tIpLf>56~;gn_91;_1v$PpLVe}NRACW=~+sik?`bY@_mEg+SwD` zteMpsYY0{w-~#wI+y=1D?>Ncr3FBi^ujGN1{47;ux9AzsC!)(mZ_5~o&JkVnW6E8W zuX3-B1>;%&=x2YGc7K(7zsWuehRcZ82s`oO`BeubDPl z;W$HPv3~tH=S>^Uw96jnyv{S67aOXq>9h?sYedk7+DtxBxD92+w2OE_IHs-pCbrCe zihdGYn^@L$kAeN3u930&I_mj4>iD|aB6By^qMuAWe4aMSy&?T1wg+Z$Ge zT$%bu^t|Yv&#(t{J&)ZcV{7FX$v+Q0$IyAA)4Mx%M*lzK+B3%YGp_w2^-AA2k)JH} zTgS^~9WTrCz3v8Trd|7C4f&>tiJ;ngpii!6CO%JTc7?@%lzC*pka73w9g z0iAJ#@4)>|IlM1Ze3TfQgYT+1`0kOzyNt4y;^O)pT<^gz-yTs}mc+Lr`thxZOQ4rS zck+#jt9ajH?v#{Ueg<6$JFyE-!EmLE?|x#b?{YA87pWm`D^OU}Ol-2wgi~zuK-`r)zFi3E6aY(`C`uh7jy@7z`s_SlWXO@hfH~A23?t{3iNkJ%`w^u&xH7c94Z-`-!c|5J~8xnG;|DfCUh3` z7U**5F2?4iA(dqt7=!y6gHq^I(7n)F=rL$_W23j7cNomJ%X>WL{1-+jkK|(N^DAvu z$S8biX{*^r{d%K~g%NAY*2b?XGp{%6EH~;b_b7FJSy}aE|89KtE*DHap*~>LxZJcFcS9y(ihW8C?IC za*Q0VT{dUY1=LkQUGiNB(*~+wtOVN?tOdNzI)GsMJ?8E4^VQ7f?Bv}czhaSBCC2_( zR{fPx^hxnMR5o*)oDM?0gkJ zk=V}^wr`^R8|6vLNaMKV7gi#(Qjs&Ba~NCtA-ohmH^W175fPdT4?JbgMR)Y8&QEB3 ziSy~>8yuIjj<1ZL@KI0WC*!1_?_r!$mgDy1nfGJ}{SH{RUal~FS^uhn6t`vW-5q%I7;eJS- zxjAeeOx-K~aP5P2o-GuyLJpMP{%@Vu_W79cw$4eo@0)U=reOHx}!2aZt6Cly~cR9DW4XQ8!vGwt9&ZN$J>si2Qdm=UqKSUOEMzDwyw194njs#T0A4`+D}D zpqRPtzva4@*gs0K=DMpb)JLhHK0VJ}>SKQ~#hT~7p8Y2%W{&$osgM1m6f?*Dc3BUy zcf2R~cRyXotQ-xKn8WKD+{n3-#8jftNv=e_s(v%eGY zMaM^MoEFxTg+E&H-@nZL@}9cj+6g8zD3Zr6?}2Y(yPP8V>*60f!B_@G^44qRJlo|I zIWOxPxAWh-X6%1lieQZR5U~isSa`nroqVgLTR&QG#oAk_AC?%ti_KL^?4=d zPv+HQo!1`5L%-fKkCJm{&Y|R!6c*lHitf!pu(3%uHb;g>M ztc%LG`p}KKe*As#y(BY#Fg(A_%xyFCZNF&lc<+6|@ip_pzV7kV@yfTxbiA^TZR*@F zI-fe84Ot<0Hf#gt5>1(D2 zE=8V%%aL+qy@^k=A7;PIaWrxD&wgH?U#rajj9hp9dW+2ONq2|qW*6W76W#0zm-ps8 z8M^rlTMv038vigI^j$fwkC#Q>_tb8kKMZ-V%la@~YSo#7Uo(eKY#Ncj-Reup>oe!m z%&Y4mm;0N#P9GoQQX*emVsmuUUb)81*$&r*rrxvWX}69&ou}QMjw?6bXSC|LGvX}R z9^pDo@^?*{VWp-jb+4!N`RBWtGXLdb51ICYk>A?Wc*=cch8*fnmss_N zyw_yrW4nxeY}4)(y`j&&G3VXsi*aYk(KhGZY`3XDzAMM|@wdt+Gq2c~2eUtNooSC+ zeYQ`@c_aT?{Hc(PGRL+%880%*e5OUlf18f!V`P<4-LzM(F=dn~zpXNRR(IpkosVN3 z4^w9Mkc%A}qkr&qJ&uQ^9cPS3?9!ndzQ&mRSIO`AtoXd^;m;qRZ@noPpT7h9)Qrb_ zvyoxF-=ORL2Hkh~+kO&{pJlv1F7m(kn}ogJdi<_nK=e*;{Jl5+-Wz}K{jRO~9YFae znfV<+c~>y}TY%pLR>Vd${Bp#@_>3U?Qm zUucQTyI3oexP0s9jLY9ayjAygdSmgQ>w9}W#;fXVb%Y=vc7CX1+)~UZ1;I=FV873&{^7 zPG1`qx4DB@b7-!5th?m*c$77p|K5%*Uz-Z>secxFW<44boO}ttj`&* z53N!4#^*zF+xq=FGv51M%RkV0{u_?Zf6DeSVv=UuKHd_yccq;@ZhwY-Av(Z}2ZqHX z%=!9fKd;a4|4@8BTwjaq57*h7jkx`+aQSc5)xB|gJ#W#>ZTpliHOJM^rLC4&&>4DE zVm~I{%~;AmVZ8oR`p_I7^r1D5KW^OFXosUxPGnvt!lnyV8|&mu5Z;Rt;i{J-c4%GyE(sRy!*RyT-Og@ zF@FCkJz$QrCAYd3UJL)O+8O?uHEwP3PeSAPEjXr+)t8Lpe@dpCV`TbAL>t(eHnR$M4DSsu7uQ{g1hx-mwpVzrNOt-k6i{FR8Q@_R%yDvB1sjuyg-G^d2UuSGR6o0nF9=^`{_4SBvaqQlF zuij(C?L9rk?M?r=_dR<3Kkf8+wZ?4t&jS5F8ZB#J);tFN`mYkVmz)dM(B(Z>Gqx=6 z)|ZFJY^?Lt8?*oHn7xc^Sj^shzh3fMOJdFQ^2|8h6qovi(gckZq6 zfX&1>1*;}5&3QEAD<&TQ+0TDU7nt!~i%ntuzqxQ-aJKjBLoo>>-_DHRn|gbe5xXB3 zt_Li#MZTdOnu9R?^j$fwkC`-+Hh6hT`***J$bu zBPJ?(qs0=NFYhTfZ^6ra{N=W$&3Yu$^UTKWXgSczh0L~GR~%) zH^$kN^W{ctUUcqf#-J@SF*JtXf@AtPeU;d}jE^a^jq%ZCc5htX(=sXBb1XB*bK=S= zp1;qTs-2v7h4$h6E4B8UbG46_Pt!WCaR-RQ7lr?~nY?HJKM|jI_3#b+a9!d0Y47*% z<$wP!;``^5lWOMNGP!b@eBbNG?92b~U7xgP&cAZJ_xty~-@ot0$18Js-}C(DWrXPQ z-uQiQ{JuAS-~0Z(nMctpm%o}^?v3By{F}9WbKd;UU*Wg#`}g5-`YcPFzIAl)owv=O zGfsb}@$P>3cWZmUeXojS{pah9!GF;hzrIfKEs5Ei>p!i@!FMyp^$?%G(^wZVaV6_X zp?9}gGYXA?WgW;GpZ;e*ug~c}C|)mXQl7AQj5#mee#5snPJh-oeRw>6rswoG?>&oy z@6fx>5|8(M&UpOUzIQ*aH$Gov#tM6i&zSMv?^^zWE*rk#_`DuZvBcqLeMTH!^LkwW8zo8-jb_j#%bicoEETxtIWa;i*`H)^SVrFNIBX)`zcQs>$J<0dp>;CcachL*W&`r9WiQ&JvdWf@& zUD=a9(EkhE_^%$TUNE=Kc{k_xe`EaJl;f>NoPCxh&K?@u?qv8s`8)9zw8`$o5*h^nuruIK)k0^3>|e3dow%=#K0IKBc_A zI29Q_><_W77qamCG}+=1z?7;iK6#;odF{BQ!>eL03)X-IVk%xBc5 ztGU|dwa=$Vsye4lZA>N4KYd{KJ~r{dg!)M2V^MrYStpCe|5bCr9GT<5IJ zsEOq{Pp!MI{T^4W*FVA?C{A#CebMefJnd*5;|{cBUZFK}Ja1KOvELC}dVQj^W<_GW z_mm^C)OQ(erY)Pv3t41Ku9+X9+I*L(Kpg)=Rib4DhVh(7=}XgURh*L;<84}P)B7}q zKB;p{W*>13E`86@w^Z3O->7E1M5ejcxC7ee?B~aD?{RxvO&)!!j&Gk7uL@s9e+J5_ zuX&ZL_!wiehc+Ldqyoq2!|cQaZz9L%xDvh6r~RC3cEr^*U!VeQ(XJY`%KcC?xSyV; z>asYWJty6p*S6|FvnqTj$L6d#>~Pi`ai~%!{Z6t+c#pgKcuOKw-KI!Yv_q-3BMxWj zX4W8fM5^23_#dDs+EaRR^+Wwte9Z>x%Vk^!*lgasi1^}p%IQ`2+|+S*Vx+g^!_^Oc zLO&y^_oWC`^sXbtTd!RHhK{be`&RZXewj9;<=@<4b1od(c(3Y6%OBOTgK>Nc8+OMz zs%?={a}#*hVD7)!5$*lGJ;8emeeJSG`**IL{n0wcTFwuTaLzu;{0!mzy1wr6oOc%A zGUc5cZ+XX&;JtUSyZo`&4$a>6+Tq#j`Ud|?^k|Z5dz5D!qhCAhPX9ul^Hx_^veYGQ zvM;^4ql$J7cP<*b=#oW4chatX`dB3RZS=pEYg-uKyP+GhRgo`Q1q5f8sluye3{N?7 zG{KW#XvN0`2Lzu5ju`(QzBN|hP|XXd!{->xn5m7g+XvTZtK*7Gz+n^k83aClozUNF zVrqK5*v)PfqZ3TY_yX-QG`^D&X?hZ)F`=0tZO!xpz~O9mgZUEp0GU~bjBeiE4>^gfON~*t z&0yQ)qYshs-=}@PXmAEjV|m_(gWUnav?=qiho|&@Qr~=s)4voSwuxuYcf>Rg(|K!9 z%|Khs#z~GrHBA=Y8UmJok&p~;4bbIv4Pzm)diUx<#p{sKyH^hm$>;{hpwh+=Z}mTe zxAxix)#SoYx$sjA^Ry8@@iQJvk-IJMk$2UgVsBy`d^EUtI(!t*e!foK52ZWe;h{k~ z4^Tj{c<$2LEKkKg!^r`D(C#81p*|{&@_US-Etv&Ocf3Pvz1P9hVyX(*?do zj=N}!@XsOYU6V17?brp> z2X6CL#b7_gls#DoVB09ueE&Oe_|{v%9ew!tr<-i-ST~6dovphrHag z&#)KruoqhDqcoB6VmIstgJMff6FH7t-b4F0(gqjD&j&NopE;asLZ6A8PD0M_2A3Jg z_KBP;G;9Tt+QfV~(J&yd|=Njl0JD~vF9ioq|*a;ClcPZG3vIp&h4fH!2 zoTg8SFCKt>kO#&ZmgIHJb2z=#T=xmQCN{!)!$#O9*uq}h47Q$N4jY%=q+_cRY&9-D zuVaZ}Bh*{4B{srTN2IqFoXkoDSN2H%!7yAM!AAH1Ts>yN)$0!UV9;qB;jwd+j;kj^ zxN?@(tJ5}u;7ZzbaLG;B2sedrwKM-b9aja|H>QmsHdrCJ>dJ}0j*i8SjziAIBX3>E z-2~)sB6f6=*6D~SI+SYHPGU>Hk1c(WxkGS#+*QuvQx9CBEsl43Z%>Hzp0dY^ z+-Ym&ztuz+9Kwd_0^4s;*Bkf%USw=FIE_KhD)`ApTMs&tb(`f7@<{4Ff*t&xqhIMT zY~fSb7IM80EIMgJEZFv?sz9pP1IU{=#$RyldvdDgqORk}HXpX{`64s1)5Sg*FWYIV zsAOGAdoFWT;uzST#J(?!QWdSWEmTDd7pWp}`p{H#&G#sx*G}Z1I~~`xAGbNZVz1_d zYq1%?W!*&de%dhft}WJIA-1rJ^_Ns^+`1%2{wq4xKdN%$)*Uu=n}>DOp|p>=^QsS4 zKXeauIMG4cx@+4nq28uUcOYG;imP$LcLTCiozF3*)CXTQ+`X)0NkY8NPlLJluWGQj zgRwc`u<7>8yV1m$d5`uHM}KdpbvYl&HH(@5pzKX5J?Q9DdI}pR=fQ%IEq#;4 zc3F(=l3j@GrJTY}uhgG2ur$>+*xx{#a%j)NC5w@}i$naeHh)Bi=#Tk~oA?l7LyO#a z9O~aPRcl7>=3=vme@N51h>wRH65X$%hZPI4rB1xM-F z9+}u40rZvUD7FXnWE!@IVDi)hQJU!dJg~SB>@=V!O)Q!i6n{+WminyxAbMVGXFs^i z=ic;!1_N)Kz=Oy> z(cg=qj8ollY_5sJ886#F|AO|extR`!KfP+**7F5#js*XCRX^WaVBk&sF({mRD`OV5-pyTaP-CiEnA?*~rHSoPV6K@T6n|DEi@;<6;-h~M^ z@TS09)<<%UiMK|vUD5YP(J2lK-okBV1=i9i;=2jn8XQAHc(c)-wb;p37CU(-cCw=V zscK*dZ_-b}o5($<4UAzYJLsqEH=@I144XN_X6wdYz6yCg2e~~P`8^9cJ`;O+2KMrF z5At2Nm-~WI(?<3okMmYLk;mw})%}W_kjJ{c%(L=7!A1s;?}O8EvM#~13+zD~*_6+L z_p`P3zQ{H(Eb@7}MLs)`&#}nH_M5Y{4(b)G-YdMpGt-gJDTz++BiOUggMmvO=m2mg zGI|!V;HDfE5FY7CJ~uM|2jL6xDGxbPN`vxwNJu_Ek4zQ$EV4O9WOHUvHjhCz<5#>| z#xpmtUeipQrXzelH0-iP-Qav8{$Qr9Psr|&IctXRdy=PPScjNr%2P1o1E)dEBuT1!z(6ASzb1rr+uEPEt%z)u45gI zvVwI9k%IxpfYKc*TF1v_w8y?=MTaAQMM&@3^DpbT6uY6zJ}`uju61IM#$k`fBgb6G zvjpT?BJvGebVVPn6L}`_4N}ilZ3of4V#97GFQ$ZhonS)N-h6Rj7`ygVU#J7wD6%kas&1<`fj3?5%6W91 zV3;+CHV$aw??GMAV)n?V@A$2{@bB};=S-TZ`)x1~qi4B=JdL=RuJ2;NM z${JS9$6a&9UKPKg=>=>TY}9?o#dLftCo)i@zVTuYg7v2RqO~O)Yr0YehRc2y#T%tH zCDRA4@geKm@HtJpRd~TgUmKC{2eEs7SGogYy9!^3ZDrc78Q2CB?W}p(;{0yic8$Y! z^=^F-TO!pS=a1g$-O9SbZJUXy_NSdpA#w@i0~N$OzV(?Xmts__tBkK*4ibp2e=s8{Q#(5dM-iwtsi13$x(KMr@01cAI}U z>kN%5xG(4H9X7woU+*~_=(U*AN0A3syY@wkU0Y??wJ)iG-ZVr0{%+m1?WwjToxc{Y zjw%-Ue(>&}>ijiAxjSWjc(_uXVk@PgSHw;d`&QQFr2oZGQ%1%jBeTGW6|Z*YPyF-r z2*dUT>!$6So|1h}{(D_>FI;K6CKnzPedz-mA?)Xfj~~SP24wh|Sno&d#2wgx4By6_ zoJ5AZ4kN>GgKJqIo{kOhi1<9n z|4pn_y#ajxscg=Y)nmG^PxGD<~w zuS8FGNjwT%_E&vkKSJggI|gABBx8#X2-yS*Ouh>y3sgVxe%G3 zwsd%hv`H{|5S{T0FgX$bsx6_9ZU+b^Z{vBgFXwk)8ZyB7s9N&HgBq+NB2{Um&!X|spcx=g#*v~`=K zMYeReKKw)U!ByymIp~Mk=!sd_yECzOXL!&bq4i;1#v?~f8L!*D_-7*5MDA+Hc3B^8 zLN@Di7}>taVfR|si8q5G-3B&%HLJXzg`CbSu@WX4%)PoPRZqb6i z@WLA#ENd6$8r*6A2(HD69ykd8O&?M2QSf;jx*&IDT(LY)^bT<{FEOTv4(-U+eh9`d zrZm9ABEvsGev3cY3U3?-_irE*vDu5nwi6p?F>=jmOYzRf$KH;7EO7Mkd#Z}IPC$ki zIFkGes~+5%YRGUq{8$dZlp}A;;k%L4Q@%Pzm*J7DzlscZK_}o7mLtP|hYW9ZWFW(r zc8tOv625AMuYQCK7oSOZVd$(h6*_H-<#WnDYZRs$4#lbqi2(SGHetA&Yy;mhB!7oX=44+`^%lRD~Gx?># z;bJYsQTk_OxWi&YM_TyB0l!FGMZqtR7&g*&_`XXe>-=&nYpW8sIe7O1onIUVzu4gy z(VI!|ODgM+rVN+<8#4SsSsN2OSLRl5CHQi7Be$K%?Hbk}1|YXh>u^DP|}U`t}Ek0K`~#3y-etZRu} zm~UXsVULTQ0@j|u7I_bx9YuEw&W>}>QRMPVlrGxJafxKQ^)Uz!v-_3j1 zG0>Z2tbu*P+U!)3^4vDp(&i0E&Et)_ zkV#HkVvQdRv|>Ln?`^aA|C=W8=VJ{ZVU@i&sngDyQ%d_Su7sLnVAmnvnZH8BU@A0xCpWRf!r9vBhy1H>-y%~635 z@a@FjIf$IhwaKCASNI4riTE@;6GdMYbfjzlTjNbGHw^ zInFp%vz91)nvLC4PmJb0u>*+>&c}B1!yD1?>fhkA@7rB9^1PC?_+p+{lR~{EPu`_D zZLVTjudcx66`pQ{r*~sl%}*rWgHE_Z*arTo+U6^Wf_@^;XIplgghOL##n&(dB*g)F2nDv4~ z9NT&K-9dgbY;R(Vo#NjvRs-l?(8q1S=9abW0Q`6ee_C=-7F*XAEk5-Kd}>+8zSJ?q z`))G3wi+lsqYv6amaz+7p zO5I>NxXvB4?T;`|mb{~o%n%&kPd(}2Hy8ZsYu(hDPfU0jwt6f0EhC1VziMD{E6*s$ zu8-rm*!S@qlXI=n18Wilzwn`~>y=dfOk?f2#)(ZY_)Ta2Pk`SfF#QbpJpmskkyFwL zet!#o53v?3*v%)GXcMu?JR>h8iFM>)e35m1uq$g%f?fRJS8oHmFWF*iO4j{M%T2@o zh?iJnqPJun*qt0(>{L$gg1Z;%*lhs2_ki7cbj%U3`>ay2Z*jk1SJqtwyRr_y0-Im3 zE9*3Z-R%hx-l^zpTV5~@soODm=XDgEfnB9zw*o%f&YE|ZQaW}e7PJ6e(7>@phAwaz z*v0qLb%6rA1;nl;ZfIgxFx$YI_!0Qox-K5R9wh7H?}A;4SIT$Vj>7UGgzR=hQnA{d;Q3nQ_?k94FBa)!^HMJgLS95PdKn4D4ep!*#-6p1wpA zo!~+rT!k)3Up2J21ilfunr<6fQzy0&Yfw((cm`uC@iVd8C#?o^$=J=&oM-Oe#+GVB z2Q8U3QTIQBGBZJ!nbqT+#injK7N-L3j)a=lMDz*)#w7lW{b2Z#9jeE9c7*S81|F!li1Yzuye&`OC|PK z2tQ3oRNnsJz_#>G-Chg&1s8Q>GKQiHj-U%9ze#k#Rpf|Hz&9vhUvz=+)2+~v()G&+Gw?@=m(E9)egqa; z8-n_Ud@4OxYX@^7ST0$nPMCg=_z=>UX$3JMOb7F%1lRq8xJLFLVg5}WBN+3|3F6nu z+=&b^@0XmEX3kAVZ)Ld1S&bf4GY0&MuOs%`G3HA2!6sWjubh8HY%4Ha&VD_1?J?F( z1i#V;(Ffvp?8o1@4SiCL`?_FBjPn^YB#A*uhKMLsIA@I8sJ2f+b zyhHTCI^~ER3I2oaj|=%5;%kUK(`6f^>w{7F8$ZH+Zp;hfHw{~+G4G;|dSrs&_YnBa zWKCZ1I|}_aDlrJ;6yMa(&Y$lAjV?*Rebqy^w-_ z7>J%2guWPz-!VkX9LCt-Kgc?TtiOAwtGQ!3at>@*=_nR?oEsmZ%is;P+qsHk*z6me z#y0YNL%gw#-?Aae*iL4KbQ`ok^eN~- z=pUfxLjQH`YdFg*l1@7d&MfbTq`G3gL{Bu;GWw8n@hiSEt|Uv_&g_+^%7P`%reQkCU7C)wTEdCcwUcq7a6;d|Mh z9O}IChHNb>E=L>7_J!-Rwei%E#CD&{a~=KMn`H>iMrzB^DeOlj}VwP{ro?Arbd z9hz@BHqI~=@Xdpcgcd;E(0u3==rzz>=mXFN&=t@dp*KVGpmU(hpxdB@P%m^n^e*TI z=wj$2eKg-w(CzFOLm!9U2Q7i#4t=Y?_S&;-|AFla=#$W0&|gBUq3fXQ2Wl0&25G)O zvfaS3r=f?Tk3oIVEzk|;YOnlhs8*4#I(Lp$oi9&Nov&S}I^VuTb^iC|sfCje z>a3fqI^Vojb^h%q#4&DFod@nvop1e2b-uGwb-wtp>fHY;)%p4(s`J%9sm?e4tU7%+ zP>zp|*<2?3R98D9_t>M0;LR_M&`|O%vVPjEl0g^C!Et2~)GQ zLfR+oSreC~EoNKh`o=`;B(}}@9>umk-|&53w)OdD`=mM73GUA7OO#d-Z_{2MV%H8_ z<!FuYgpX68r|_MHpJl8Ys-rI!J&-}!$TNhG!ZCqd)Aqm@ z<~+WnY(~?Rc>G- z@M4AC^SW{VSDbI!)pcPKbN}`yU7@<_huO53X4*Y(-Rb}vwxZ-ux-NQ)^JYEu!|d9t z*9Olu?d13-RbHhEC+p8hfp?7hlW6b4ICtk1%0x;s<>OeeOs?Z&U|n>@1(Xy@2RM=A zCYBd+-o(0Kd;o1U_f6bi%JD3C^1Qz8&LqkQ=TOH$)#l({(K#OuV4pS3EcP?FHZaKD zDKSf_N9vqJkvimEhKbZ8$F0XMVSfzAZRaFiEcQpqGwZi{pSgEy(|BY6x_t>cJ|5d+ z33iXzLSm0BA=V??Vw2>%uwU4Aip_!V@8?^>ajf0ma&q;7<=m45zxt7Pi;;J&*nq^S z`+29jwZfaSwCsg$ZOXr7X`{f%ut>(~hgn*Gw(a-2Jp<7BHBP0yj83duVAm>A&>f6< zMN)(&*f@|bwxByui9Y%3Pogw0{dn!#Xe}Qdu!)#}#04@alAj}ZEbpU=ShKI|!q#fV zzS?1n^h-{JA75-4w(<;~JJD!|-Dtypc!9-ht;&g=%>R-xp+&1UMUifhKQ!7jC=T?@~AS?D$Z|&uFt!{`z6Z|zq zs*AlmcREFQ#!0D3SK4lHI*KuKF~8MZCpM*SUm_Pwn{pWE<-V~L7o`9>D)^8wwNnbn zeON=B!4~80OlO=V){hT#bqsT}n`i93(5A^f_zQEJn5z-YmELD(5dU)hONFYa8h_#p zUW|taInJ6-(HiBP)C^|GyP0$Y*PB?{Xu;An12faX#dIArvEEcCn88ks?Z`Ti-(GNF zCg)tVtC@DVX-6|-D(^IiyiNi)``k)KI-cq*3ZcTM$jG;fmL*nDf+Fu6!U_GQw=3Vq~4ej?6b8La`;dJFa8uh!~KS@AaNgw|K#Ia`Dc)ugiqD*bhIYEw7h>C ziC>itE_~7C?{A6H8ZS@*`HwyEtqwb`)IQ6%GX1J%>;&)PSCGvzLGU>=En4^ zM0Pcg5MK&^KUI7w=wR`spvmG(LHoJ`1B52J1O0_Yi*E&H@ptw_DAqV5N+nnS!8G-3 zy(7|Jnue`}EmHBlpsl3NcTQUHarDI`8|gk+^C;FF#CI}$up4CC@WHN^ZNmq{{72e6SzOw&8>QNVbh}oyWGc$%{M_A57Zy8^$gDBDXd=h8#?MpwY)o1$hH3EP_FeCK)F`@whOs-wPX*q7!|i!TAItwcx2O47 zmZ$TRY>%D$tFU2S{y~oR4D>(nts0=s(B^AYp!!y&?YhdQRmS6ADTh`&4BzBt{D(`N zn(h-VV1F_DH$vw?^Pu-Z3!%3|*F)z+$Ko@IPqUrU*|Lf1_&$sZUuHl*ZQgQfF_3x<8JYBYa+J|}rAL==Z=}SpoQplGg*FgAS zMhmhEf5P;oZevabQ`4b>l`&8g2k~s1^IdCqpE1|JXWu&C;zwBL`~-80FMNg#SwBn# z&NgP^SM@Y*x?jb-cQ<}7_H&kpkKv2`@U5Ov0qa=4_;7sri}+VErZSE%KAcegBL0<( zN4+~$D@A7Oep4pzkzzZJ{cG3U%h3ViGv!ibY-CJiETsR^=hRr$mV+LUG0dinr1Yb_ zjlEt%@l)iQHTX__(IezeVoL_?Kk=hvtx06!J3muewZ-1{DX&ajlRy1e$wZ$(nqLE`Ul-7ePjHe zmVQCS|Cjlw#;&TFaN6f*4d4u1>ORy2+JxVv`%o7KeW*9^p@hGmgNi?8&cm?X(HEzK zlO;S?_^3OaRCnV~U1s`I%;Ot|{Z<)o^X#8%_q;PQXy098_*nH$u)oXZd9&T_*)v4O z!d7%A{?jQNc3%?f3jI|1fd;$hzm0REZx7&G$=dh__)c>EG{wL*QO6s5>Pl-3?2qEO)t9;-`&aD7ndoS-ZM;IUcf>y0DBIYqvMn*R z416i^tM11(5nn2fwW)Y~DG$EX&$*`sy*`}wEYEUlx8XNk3m)9q_u1E-_M7^l=f$5I zxtaAzwqIGm`I$bn7xPm=#9u>}uy)HcI^I!+uw{HNtS^D3h~eW2pQ4L8~`O4@^WgMlyF8rwl_*3#c)1O*q_)|&n=WzU~z1{dz75G#6JYW1GvDd|} z7k{dHq)iiBUhL|g{3-GmiTyKY6!96xQ+D7Vi42zU7N2S)We0xMqlRBKBFwj{L7vGv ziriPlHG01{*gZ1#!dG^lAwJ6n3s1_v=}U?K-58}N{gCTSU&_Q1@oODRxq=yRA()va zn8BB-VQ#Fxl=wG^)R7?TUD$*B#D9X%XEWcm@hVWue3M(lJi9*SLv5zN;uno!eyu*# zc&X=drHQJxv-i5(v9E7kH$(^qQ7KPso)AJVphc+O?;>$^(v5woqecpH0yi8I8@vWHFsi|za;!D z`2ZnbX$0G5{3(@fZ2S8Sn_hB-#YftREid!fluV2Xf2*l4F(-?!q}Q29zwwzGM?^`C zSNE9|K2xqdFFKf~d8qG|`r4z&$ToDouO9rTaV>2phqrC>RMtp2mTHUeFJ=6j*_Qhy zFZwd@lxxVQ%Vpb;O_$0xvPs%7Nw$#%vVDnc_YZE%d(XK8gWErl?Q?_Mk{8#Hx$`mi zP0|){+B8P|F6bzCK)!_``C|ixriyF^Oy4-sMRPMbUD)-{=oafE*jM$RxNtf$hQ32m* zx7IvQ1)5{A$Cg7Ap!cc3dqcB5C(t#`;;U4tz@T|JMLUW8=r5rS&^Munp|3!F&?;yP^l9h` z=u^;>&~4Cm?4vyN>jvceCiH3vK35rf_8I)NUFh6u{ItFJVEa|4?jLr*%lKLcUQ>G9 zN@7(Kvyxbq_*N3H+D9DgU8uyf!sAw(u?>Wew<)K8EZ+!`_)QJ5tGuV(+Oj`pY0Im! zwVP7ifg(qiwk9rH6TGft-^Aqi**0-^6x<1}`f*IK^*VF^2HUbmC2LI;g@u<_6t1DH zy}W+AL)$Y_j@ipAh7?Y&nc~#;FqajD_j7*Z<@L|RX%#mH$6^ojR-vNHYa^00b6o0I z_SN>RJO>@rPqU8Eo{j-peMPbsK2|b5)inAL*EN+Tqh?i>NKB~K zZPO~8c5N5y)q5sHum&5+T1u2AHlwV=i;XC{M|93GyP72WXCXHJ;Y^ADh^_Vc@+0J1 z96#hfk>?vJ;^(y3gK_g(8+Ii7x=msDSNn+pzA`enhAz*X!o5od2mM;{Av`TIIyB(jU70(IsLMnFILe}TSt`}dyNs)MC8oH!g+z-Jw!fuoHHC&m?+9b~)?qYk9 z?@Vxy$yXBJS;uot-qPbeTq|*qEO=I)C(jWdBApV=y=EKaxEzzZ<@){My!Wg_+i&5$ zbo{ngCOEbI;G|mdtt^uVf1(1!y(USH^+2w9cbU?5rr`6fv}^S%gJX3U=aYDLHS;EM zuh?-)i;Pj7NiG$LB40(>Y~|N|5+aG58d*Mx1I6_sDqfLC(05GZM!c%Cln8je~E?(uTX-+Q=nY+Rd}v z+8sa2(*FHl+}f{*U+r-cpSqU#6l;nHe&Wys8-J7d)VeFR*A_%-@8m^k8|laEH%4pI zbvv7H)YxLZ`IJqR@VHbtcog}ygKwscq=ijiC+D{lr7-SK72a z?%;gZT}6zxU*Y5f&fxQ2CLVQ|v6FtpN<51HM>wX}o8b2tF{xO5sPlJ`kBEV;#YBz3j_&3~kwM#F(1M3)lP1^Q6x<<|dVR zllXh-#F)f~@>AauFdmFM#d$Xxai;>t<;%pKq`qyuHzBbzi8o0MO6-C6>4W%G;xl!w z;J;GmOu<*m!Dq_Gcgn)AdZ|SICsp|U-?_F&pQAn{>lJ0w9Tj{=6 z4U}h!&y|Z#7yl_&?)O6F{>Pwl|I<*~BKBG?V<@&*E^QQhE?4fKkG(a@tu@__{g#~N z8J?K!8Huesj&uFx_&oe*>T7xc->Epu6J3??Z)bbvp`%V_yS2mL%hH-|R)MB-IJXYUH90YEEsHgA|6NKr?d@!*&ertfg>26=jxUI+isp>7 zE+N|$Y(HjfZ;zDcbygR0>;dE00=8Ee+wRD^2)SoiWSvc@imXc%+7MCK@&~g%wx2fZ zi>Qm#>x+=*c2+Oo+%~f=wr@7Jhq1jlxGnx?%Qg6WDyh9?2ec6SN7?Vw-m*?;-}aUQ zIS$R2{eJB&FUvmk1=;W4-m*pZliOSFll_$TmKxcIR>}Ur_Lg4?9n!wDuusQJ>YR>O z3(x7OD(v6!vKr8Fpm0FP>ngcpSK+{p{c2FhYlVY4-cW-(-Ygv4@$&x5wb!>#)@}Rh z$A6&hc{o$|S(*>Plh^~|^I7u~CAKEMpX4x#FKEWr#P^e2MOR!v3y8(!kIL5aQgd|QB+Kj8a;mblS&wDo2dlu`RP1sKe_X`A$q!DpP2BGS)6IPc zX@dI}wgvlw;d{AeS3`8&1IWZzDx&Ke`G2{_n8L|j1KDqgs%v9jU#WdcfL{>sbAC8NA8cPJI1+v6%lpV?16pTBkEpc{`VC|)YS=H5K&i2 zeKi#mwB65Ms5SAe0-2-w372U4Trx-X9T#ZU`Kf2#!{?};Iqq(r>Y3YLATN`)?l1* zLD?noj9ti*isAT*%*)9iyR}bbdk5=&_>Hs0pZtRgwDA7VhB0n!^PDWLinUf4iTJf$ z+a2yhJ`$nrtccWJFN@OlT#n3PY;@nytyu4J2YpF!UR@ZYz5AzFU2oNjANeN7#E*m* zKIK2o4Em2|yi9z@idtmEito4)A5#3wx@AE>@^Fqjuw8t~QI#dq_UYsCCvz%G z67VVOeq3477+YDg?uTw|ODg^(zMHhO_7ZGo;(b*!9op`(;@w9Bnmi0K#24J zf)RzS^vn0VYgaX!wi`?|=Q`K-{!!PeUAu;7T5H{FJ?mMI->{ zt6^Q%%5nd3)@F&{{=gKV2Q)R`%H@(-(NBe^lTA^Ijf)n}H4LtCi7K*vPXbTgsNal^8m( z)NnABY{?O>E%^-iX9QSHI8t_Ht{d|xyHd7fIrdf6c{ymzxtVJJr?j=9}Xp!nHa|n#5lgwKN{awINtU^E99aXo|oG@Dm*Wz`8dtX zX--aabXDi&c4BLtqE2&s>5l!Vxis02bty+3%5mTSF|E7#wpWI6oWJYZRU3MR?Xh>< z?;qm(hmnbzgGAX9&e41-RrbNUwK3Ky%+#u%RWXts6kd2eeA^zV=9D;1Q z#G?S-h|@?y|Pyh0d{iPu-pzIC*Od^K+@hE~XK?n2vp!ft^_kd-E#n zPV!NVt3wPfYs;`tK5l1(j-(TBb{jS^cBJZ1dMg;5{1+ZCm$o)S9xs=+ zeLo6&D}bHl*o=+2hMkp$-8dS%aVqaS(a;E`C~ooAcxx2%@?-EH*j#5{q0mm`MO|$6 zOl-=j^y!I>hUL7cU6d6|TOGbV9G94YU*yJH6_-d3F>J&WP46UsL~O7l-#x1qza#(5o2HvlVwKT6&<0~cQQ2gsWT#tapNvMxmp?Q;to^-sifQmRIL5tJE_)*0irrEb5qD4?O~ZA9U+_P`;8C(bl_Q zhl7Ws?1Ny3oy1wbi=X-+bw4P~P?T&HgFjUl^3{h94vM!)ByM?i(mc_-%o82#Qpbv{ z-@BS8I{3}Hs2)3r8Gi~N-ln`%yLd{Pt@;)9ZD+*tW_hYGT8SNqOyyrY2;`cK;E8)Jj((zlLGQq0wX z3w_a7b<`evIX+Lo-;zYT6YDhuUGf$-(-$db#f7x+HQ{9Sh5A6aRe2&5C+_u+uIrj` zzjzgYjT7&s`3CW)aIkc~?)RR*o%3&$bN!`b_zv*w4&n|qj(326E5|Z@@q#nol2Ujr z_q-3L+sk~*yAks(!`%6n9kky8u*$*(^avXf}PKJXC z1N+_imUrFxmcyNifv3HWklRA@EL-*%fkO*?fo*k+3hNif>1_GU-FcRWuwTxko1vM^ zp9o{GNls|DoH|SAEM5`J53KxMzOIi_P84Ld@GeTqk=w$!h2R6tmptRnmnas^MpsJL zO(Tzk@UGVQJjXMdJ6X#2C-HsuBXaURBw!yl=jg#O|W|;D5!$`N6>jylX+?#W|GC?i@-n@o*cN)2{mNP0Xe2<$2XB zRXCWj=ft9chijVqPz=}1qy*Meh1+@rZ9S59ZbsX6qn(?(?d<48Vz!dO0K$p^cm0d- zU?J_Pxu`VS*75VwcL{#lltZ|=2mLeEUF&iqcwBK>!dfR8<38$mJ%<<9Rh(eh^4Tkn zP4f`D!JXcJC%LZoA7(y5?+8+sR1SmZi()2r(0u4L>U`KpXlqE`r3Qeko&UIhX)*766MR!j`)EB<_eQ3r{yB7>Ija@n{t*s;6Z}4yd$uy?qrUR6 z_)4B{#&eN#ABq`!Ct`ig@U&QGU7GybBgnUPaeYnlCG#8(K5Z1v(Uz+=ch}SesLuq& zwb5QS?Ulm!5>lgsJK(7=_`a~Je*c4W&2vkHPr56pQl*z+DR=-te5^dWYvH`w$p zFlw*BsoTj%**ToU#;Gs!bBAJNUl`NJ^B%R+6VMAAg=u^uzOFDs*?!$LJ49rTu$d@iGeDa5^ z%i)@A@sj)SVKJUd(ABz@NFQr{S@RzyU}MdXlz@#je^SyEy(HRLSQ&~ARqR;_*jRHd zC1B$pK^uZQ3z09cQ^qph*`2b>4$?eFcb69xZ`R$l@ijlv-L(r8m)0G-KzLU3Ald6fqzRY4?XRK*oH_mSl1XGyk&%;BEif-VF z?%;}hXpwWAKkyB8*x$S%uU0=ulNr)PGOzI)bJXWtpzdDZ}K6dz`h%v zO18JnHbQ$AbN^Cer@*&bYatB#?hf!S81}$BM#av9;HD3aP)FK2317JRfNAxj9f&Qr zRPPzrn53x-*Le%rOmj_J;-UR`w_9)d7E6ZU$I|tIM#!tb>RPq_s>dbcd~vk37p%K& zag@Ccj8xP=!BQQ!(H7f;b*Cm<8^F4oGE)QFnt^lcWLO*P)Cy=l$)WRCS)~u)-&jza zxhmEjE}%?Z>IUBKbu~V%Ypl{CTq|O(X&`iy5qhU@y}q}UV)PVJy*Wv5T z!Y>|QxF@EerTy15H_+|ag*D=#IphVd^nXsevj6jFXXeg$hmBuLd|MEm(e_A^X`g$@ zZ`U7f+6^A_+dpTFsEr3Q&bl#>amNogFd8Dpw< z6OKt7dpI8DkpEU#RcjW0$FY~=R~(*S*L#=VA^)xS`{H{Qhh)?dU%t)pYYuOn_deEn zKgQwtciZz0jZ67=^&Y+R#~iIWnsdm%tLu^1ZsPYhT5lYbeh=#4{tfe`Is~yY4&l9p7$Zp`Pu{y&H(1+mitAr@;GQVZ{@9 ze%@Di#ghDy;bnnRz0ydDmH z+SLIgkloL?xL14V$+z1Rj4~BnAm7g@{484AuJsrX^NjpXNz^S8>rR~*#=5!aj9u<} ziMznMrzpb&{IW~HyBXk(<>&+9HsRe`!n=v=F;~m9e^*cVHtge-uXifP9^{SeLfRoz zoZz!JW<=S4&5X7`fk$s4CdmY=^##*Tgg!~!p%)*t*Y)WZW#n~yXFTzGndC31^6m}H zv%JH5RF@R+t;rZZ>6A6Kd?c8^CX6e+lptRzI$XX|wdurUqump^yH8Shc-tmm_x1~wKB79N)0*2Bf0aURCSKLxA)2bi@l z*!4az?7iUPd%(qagKhhOaeIS{$?do?jEk$ragF9ZlFI?;g^e-b;zXWF;@M&bZSa|#-o`_vl-dLELJYwkcAB$cqYz+m=-YyEJz6076A3(d*+N@b4Pr2|e`1B0z zr|$^!rh|FQX`lAd$@rs^@#`jobCdDwCgayl#;+UB&+!e+OWEYtO@XHou6^&AVA{>n z{dNzqg@bKxH!4i@DsfsBCOYzPCn&m+^*9wKI`QzW(1y^KMn#Zko&#I_X_7y1xSdgv zr~6t%b2|qPH#I6owKMJi>Y5XHbV%pGu4_ztJ9Ybz_@J$NKE|{^PAKYlqP`gljp`iu z$4^cBSW;2PQ(45TBBPt$<+nc?Z`fO~gSR1Hw+r92iL-aHPPiy0$==W(-yD&T?`FNy z`^#Iq2BNc8^gg}3OQ7ubI|G}B=XE@gnP#6H*e&q))@H@#Ug`G!X6Ub0_%LrqhqVDa z-WFcty>CyS?!H-VH|X%Nm0ItN|auy6rLO#Ye%ykF$<@JomEx;lr6==@*TPt+VkJ zuVCF0YJCjZ3`T!5|lJU^l^R2>f0`6Kl2A?%$k*gLG@bKU{=>04R%0kHx4Kd`M;IM>t*3C54Q z!w1>%_)Kd1?K>HZk>8ul@244?k!}9VJ{QMhk$fx}KKrf9wCiTZ+}0Y}_3d79c4V8D z#$WUeYrF_!n}^fe)>OVLeZQ2l%O||OgWeT2-NYyCy~}&{2I?>Qe%4*%_-+DYV7)J( z{7mmR&~v`&A+0TzUStmZZ(X7S?+gmB5AMpk<2qo>@HjB&^C;^*cU^JmyTq&W?)VEA zD*t~sgYiE#DzMdkZ(r6P18l@V4V&H{mtMlVZzdegy z)7sxkUoKclF)xw$HWzFg>C5%-?KIbyy91f6+?9@>kmuX}r#~ zm80ox+EaCsA6GH)^5Y6K)`W3e($3HD|K)t2CVo@ZSM|u^{%G)g3V8laY)p*uFK~< zFHSz^@VKxT7cJS3K|W{MIL%#LBpat8KIbK10>!_}=dAd5`JClzUc$Er@$GWjDVcAk zgMW8*a51wV9~XzdS2gZk{?ruwsoLkFCjYbEIfxhq=U&?N8QR29J@GSZeq4ChX%iRo z%J2JR@FZVJ~^ z`9BTpnhti&;JI2ne--bk&HL(rUF%w3c-R%Z`7wUoV23b%wSF7+>ss1xhk7Y)!D8P! z=N#WI8={2I@$Hguc6#B}#`Goi+0QD=z24La^(L3EhlAzQ9pn0R%ka-SK3y;|`(ZnI zwlp3bTr3RR+V$t+@aqP6V!m)MB3y}fp8wY>7IQAg@9#w-}K46J;N{LZDF7$+{4T?$@&roPeljras8(Co5Cd}`W)9Y{?7%t6UIbR_@I&$6e-X9M{jvx!l7KP(3+F>J=;Sat|6d&`C zMOz97Lz@c!Lczs%h=Lz(7X=UB3RSss71IDtR@pp%Z*$u9Y*ZraIK%U?nwOo;@e#*3 zj~RL;d9mXXPASIiYC9K;u(l_Fz`Darrw6|@B>ic z;hs?8T=20WJPa;2glCC==p_t|UsrJt{c>{x4-D%ZxRGmZ(d8Y^`U9Qz}@??*NDFyqWba9l*Zi z#MzUQVhiIo$y(WaKREZ5thK#=k+r(_oms1TA8e3rpWEFd@I~itflnKD4@}Qm)B7Lv z$C>-g3Us%%yAAmuTSZyh$UV6so45exzuyKM??bNd0vqoE8^3!SxVHoT;%@kTdLjS8 z#_xlTKj^PqfLt3678n)A$cMnldmjfMk0%$>^Td>ck+;5xewoen75HS=AV1fGx8Gpi z@SssqiZ0#op;7TX<0Ess8K?~=&N$$#jjibVfIr~1o6}yK!|mm?)is?1n`VXENNw{- z7XI2Q^*MBf`Uof2R3|StQFZkSW26MTYW*CnT&0d4URGUIPY*9A3NMrY^LB<@q6w^ z_aDe$?AKxJXEOF1=zd}ez|9V(1|NLLJ%8h#L)^2Md$8A>duD*IAJcu{=J5D2>`mPB z7WZs(?>UC=Rv2I(zF6;`k9gM>?%T?Je|7IWTXEq>Pwl=GYZtq@BY#b~Bm1ipe)7)f z;7I<_HDPUUJZT)&<-6W*Z)~;Uy0EgxZ=Kvcs2<;Sa`T`z;Xd)8aDd*S_ua~&=e54H zAMeoh$ZPHReGk_)XV@%5xlrqtFJAS#{G}`Y_)qzZg|DmnixnpkK;J4~WjQ(`5>qQa zBGPAUgHtqreiD5uoFq)`V;yP+@w@she0@DQ>9(y-exC{x{M7Yl{=h{1$I64Le3(5r zv`=NR{O28rEvAnTZix!)qrYUQZ|)x(pnvS6)Yp5D-ti>3)q6(c;voI}HvO#Ugs(Rv zhxfO-6kmJgKa>z}?dE-|pZv&0Zuu{VuYW-QTnS(2f&UB_Uu%9O78_y?awVGa;+0)~ zRN-sYArfEzHPdkJm96j8$8Cd1JUzw5)2!nS2-m9rg{9e-zMV4%&F@5FfF+x}F<~(# zuH@4Wg0B_Raj8$cu7jyt3sbkj=B00H@^9NNP8Ft3Bi2`#wFhG%oBsBE*LsJ1+49Gh z(eC)}ofwW2)K~uNkx==sqbPf%FIn{xwl1Y^PePU-x2v&#>S4!lWqng^1z4SniMGxp+-#1pFywk;?BT{GrqoEQmw zzQoo}cknQ{_F3x7LXhdH=W}K#uHl*(^ADCVa5D3d!rS2S<-aiY9HxEKu_5l(x=~?r ze9Vd?+lpLO?Cl)ignuZ8wgr4U$7lZI2H!KRH8u)0X5`o1iC#S6?B5zy*cBV46kPZ@ z`-+!>7te^&=3k0p(;tVf1vA%WkKuKc^(^1JCheNwddiwPA$nG3T4wN7%G{pwdTI5r zb7IPWwd#-gGgrNkzi-ux`NvnhlwV4jBtMgxr#g#1`V#vhoAR!I$h2O2*l*2#f;B{O zerrS&@vX!%KG?!<5A2HfeGOC_sRK9&dHE+gMAPa zXT33(~66Y@5K^UEYiLrc$GSflqB@*VX@FXo1= z7R8IF{kVAXBXPxxKki?=cq@FNTw~kFw_nVf3i11if5f-7D8n-x0S>jnR^|^CgD<{+ zj-^o71N`R`X8U-JJ2e1eiQYPXML6n_|&Ge?O}Y)aY0SMgBw|L(iVv)12d=^4oz zr(UiOYgp~V((>-3`8E5BX4?CT$)3Vea z>izxZP412y>4rS%id?x9`SKIwP!|h%A$Q`#awocq%t2OE z${gg%RQULL_&3d!IhsSp_w zaU12>NJ28Htts=m{ZFZz~Z&A%B4^rh&ztbD zaIxC=2;XT8mEUkKyz?>grb!a(#$1_G%aJ)rh41t=to;MRIR=k%pD<3*Q&9o6w`ATu z$Qn=Xe0DM{cbHRfa89md4mQ>l$7dMM`>*_hJzSq*b#bcOT}CJ~=)^Wa?Ylm*!yKN!!Z*vK=}2&AftLkqLJq8-9X} z=z^^1j10*kU!W^Pz9n`^zPe`YLar}Ij^LYha+E;#3`VYda>cMuQFezY|k@UTAjADVULesL5Ezjoo zt-(;Oy;B~Zt?c8VxTI}-Q+aE4vTjq@r5Nm@JUkwDIpJa#y}v4U854nBv^PW~c4_tb z<+00jzR~it>amO3>u3aaIaUR`MB*07kYex{`IiHdAHp@mz%J6^!YTTFJOaOLa`DSA zB}*dk%W&|^PkBatJq4Lk$etos!Y`67k@yAd^1R~ToxLFbMf`G?i(h)V_(ic-S~FS1 zT%dCCC?-fUEE2m+qYWalOOW3+VVA`&cFA>QP0U5R2|n<0@2CwlH}TJ4my>)`@-yOHKjFdP3X56CvuU8^VK1;0?T!bt+&iV0F;~#&B94^x3*gsz(JA@go|C1TG9e)1ssoSirQ`%b}<<&n|Ob(AC za(HYo8k}FyU}5iFc@54TG_E73#C7NYYGj>1aO@82{S%!W{7@Z+5q@!OBNxMHeT#6# zR4|NWig1i%%F{nKt%)k1TQ=DiDwoQ$_55wtq4VvnU2b^}oxk0RB`%_vviyx_9lT-~ zYZr1(z4r!O>~gXi>>^B26}t!%MPiphSHLboz7xC*c2OOKk*1$C~U&xuW&h zk}Hwxv6bUXa>a1%p=TAw;wgHbkKL}YZzY4y; zKWI*Q7x%E%%UK(C3w+%b3?i(x&$V;jGG^rOL#N2j(Yfb8nU-qw74M7hf0dRL{B>Gd z@UgU7!PUr&&r@m#*YkbHeo4VMg8YzgB$QGw_!{!#e$Ky2sqe^#8LMXGuR%7vi)=Ux z#*lnif=}`sYeJ=ug@<0EUE1T*7#md&pGG~_VALZntzM;1Ba?k`8?vXX;y~qd2-43x zY2zVR5vR-C*W4sCuqdvm$(TZ503^>G0& zC!domhi}c`{^AJxL4Jzv*bm+4`(|2qL!3cN+RpP=$d4Gwe^qt7#z$|yFF(2G%T~D} zeG+Pa`9inT7dG|sVhxH}Zy*uWlcwyC}DGx?*dEF_K+-BZ_ZtgGZ(3r4OXv z&-CdG|K-4ke(Z@&PALy9dY095@hFFX*Qc{Corl8Z| z&3NpDZ|lPxwrXC(TG%dKFL%LP| zM(J1i9B+iLrCY_H=h5wVq5qEN6?OQ(=+U89n|5BD8F&)=`B`-8LHwfc;}`vq{9YT4 zmpXi8)IYZ=FTcZI^JaA@da6TU1NwcJk##PW?>`K7@Sd%F&eO@~evEFu-SVC}#4`t7 zOdxyVZTeF6n2LU0!MXJFjxWN#h3#h;yNm|||H9D>)BXjr#N*>n!O_Q4*w2iHXe(M>)?V@~rqu^BwXrim$fP4&rt3v;Ptu`z`pH=RN%?omyR<6O~mwtvL?q z*t3+&fL4sV6W;s@JlX|b?F`T6;1lp$@UY`otWL*P$G=r{?219OgU83;MaSL+AA99j_4KShT}g>xjNVnoe#8YWe@mQeEWNJ<-|09U=j3HbmqJ?_SZqzGEZ&4<<1+(*5AyW zCToa8toN>v&fU``JhwUY$)XOwf#>xOJ*(KkO~l&0mx7-x;(0H2P|wRJC|uyh3rhd? z`ZBD4dwqfbhw-XC9V5$EwLG3K&Vzq-pZ>p=@uc$nn(}x$S>+NBYwl9z(|okXZEdJm zR_6Gf`!{lrQ)bsTU;Wud-`;(kYrdbgQlSkg;W-8I@x%yhEE~M4Zq~Y+n)p|Cx1*mK zbK*70L-D2O`@fQ&mcEm|l?|(T%Sb&fy&RsiyvW1S%LeQJRevMn;Vj~}D#XiMV(_cF zJUrLs;oNVDlQmb6$or&UYtqZ9lv6ycc~Cv|u zu&$V~?7c(Je+UnG&mXGtd{zIxGlw`P-rB`Gq>nuvyaOHFg7Rr?>EAO=`_YJUDsF%r z9F_AI(#M`py$D~N?$D=R|FY@$c&c)Lk63GAJ z^7I7i(C7+0-Lnc$dpWLi!FtN&CY>za-RWTLi~IgeRh+oHw(dsSWsb|c>EtTYIXu)? z`v>htCri$J%Dj|fc5j47bxrR)#C7c{R7yGF-?q}pTj9N`e0-@czP#Ve^U-^{c;uQn z`Ipnh>_vWwF8*e2>c2X8y2hN;o#Xc&+h;iY{2YmnwT?Cgn|CIc1^L8=gPThlQy*V+ za0Iq)Y2#?CIp+x>t~ZUch7)gFf-jB#x0m39*IuV3@kU58Xd(Suf)6|g`$+jY6`QR6 zB&I6X$HX6;9A^zkiFR@s$X8l&+z2V3fpdP8bK(e`^Fy5DTX)Vs;2i&YFWrxitUpxo zsP&Azw{x?ZH#%U1z5-7IO6n-zX9DrNC0v^w4L$Du)_0D&=laee_gvrkz&+t_YE}fzi$|s`tr{Iq{*>Zr@oxP@(kRyLyBDrLFpNU-W{X5g@ zk;?DgYgVz}daAA&iYBL0omuRa6J4*c-W7 z@dCqI!q{3qAl6#eCC-|Ecf2+4sYI(_O_KHU0Lr?XJOz)ZS+5UGw?d4~wMAE1i)I_4 z1--6u_6}dnI9$wH=f&MPMsYmPv4*4nwN?Q!ZF8BcoQFSbK6qmBdEV#Ns|Pl>+D>D+ zv=4n53!MR_Zps&(NS#WTKx;wQK`DRfMrfnX#Iw}1)Fvg9jPSlflcDsJv%e5yLAkQ0 z(0|&mXbOF*ysuN}XXS#OLf@BcHbUwr=i9@0XDsg?4b`{DLG|q*RNsC9s&CJS>f6hC z_I1YY{0A)O`OaL|^F5(@{sHJq_(C5uLW5}klk~}4sQNr9pYXZ3u7P6Pd zG{5!et2x%E{m2h;of#@0YlPN5ZCWe->bF+g{nzLu+PvUu06$nFF-SES@XMGWi4yPzCs)MUBLC#&;`)> z(8bVB&^>jn)w8d*lzX?J;2LK?qs5!}Z#n;MfewN0fcDe9(5}#TptnOau66c*S$vq^ zoB4eV`Z}}(IstkDIu?2wnhQMx9SA)Ky${N`tvZ&Ykj0V%hg;+GW)0ED?I>4zLOY|L z(eCKvF4l=#S<6q1rZ?UvqCYD6xOte5ZN|%y&JS2dlv$DX7h)09Sg&p{AN`JLHNwW$ zn0a!VY0YFjYV5?#VZVxW^2vk0UcnZ#$n{yZT(2ySvDU4RwN_4vvtI6CgkEWrXf5xX zWX*4oY`s#HYR%n~X00Aiz4nFWi^|;t|Gs!JnSDqTl_z<(5y~Tn!|&bwc*HB(BizS- zrHwPK(xdo?Q^}M01Af10IQx%fQ`V+CO{z21cJwVVDJ zMEMy9&Nx$lYMi|Z-IHp)`e7P!D!s6fI#z8PM^@v1abz_)mK|BG-^Y2LI%#cF$x+S^ zx#x#Cr!LO*50KUTubj;bS>x9}`nRhJPN%o^+OZMX`I7#PHLXwEJwRS8v%*F`6hn7G z)eg@=)t?#hkd_Fs)}=)5h%vSTBPi zK1DyArtBI|8FBOh^cJYTIm!K|k8ds}KI>)Tv#Pd_MXqR@T+u6}2V&rN#)RT1o%ZVN zwwHb%ciT(nUVG`>YcHLvy`Y&qw-A3)`($Ifc|5+dy?kIHwO3iw0qAcsec0WgTxacRF0%@Z`A z7_L_WYu_vJ9lH7ye09N>P(By?r}c=yxtqy{P%Fo@ZeU-U&Am+PtpxTT?qFCe8~Chc zMN!u321aOECVfo*>e+_mvS`7Y@78|Px`jN(jo3f($0?@OevF@Yq&V+*C)02Ji1)qn zbJJS?Q@^zw%zq^MC)Oh3opPm?%O@6~J$p0Go<|sy%CH%ntI$*&$K4r#T>s^Z~Wgclbc*?-f}s5aA>Ru z%O&_GtjdcrD!+LIeC&La?>@(&b(Aag%?>`x`&JqEG~>S!9P0BRhu7x|vGsE5fd8n2 z`byYvFnsH+)eHAG`_<6D>g$xqK317{J|P0bE5<Xk=! zx$54UJ~~ZX*=D%B@~`Q8J!pTmd7cs8E2;(UT*RDKxzo_XO(V=iP;d z5%RuONF91UKKvZ=Ipq1zFLb{AzBHG`{>X+-VCbV@|J*wxaDP)nxIT>g=SSfFMK12I z?fk#}NMmBUUED7}MkV&I?P7n)>_%Z3B-sh^Aa8_-|J=&SPUS$iybO*=ltI30HxtiKCPzc$J@eLu%p4q^M*jhRye z+uPabTFN>HY=49`4ZlO*4@X`-#M-m6<7TM)b>uWBCu7-BGt`s3=DolOC+Ju5i&sd8 zYy%VSK!=_{zV5AMYTxgQf>oyVmsg100gtS@+N{{fwS8r#^$B)q@gMxw&a1H>cC(*) zee&z_-?_KY6VG!0UvjK7S!RW211!2b)+#jOtkuNrX-qAil4vb0NwQWB2qZ&*ner%Re+iv6;WfipzW`D?W2r)v~72;2rf`8uGOEd;L3k;78 zb+A1c$-(v+P;khUI?(OVOek$Og&e}tU25Nw$x!-F```>uhR%S}&n5Grj3MoZGo1bx zwja)Tkc}{$@!{+*O20T*KNs8{3%(f(rC&-XKNUwcj_bOJze=CgHCOkxL7E;5>c5h3}Mfg4TmR z0KE?Sm=Rh8j(lSoHp4vjC+TB^7H4sNH2#Bm?8#H;w~AJ=UukD@#c}<_5Mp0GHmwuC z_gkmS*kkH;>{_mW)eriD8F~%g|7tQc1_~~IEdkmSimdx;7Bm|ggf@YWg*JoU2YnG7 zw9yI(Tjzzo(>H zZ!ExWc+(6mmEEwMYfo{{9B4`{Yh|metil-VhVA?=;QH&(1<)nX#n7(MrO-~$URPTM zCU(O<&WpIV8M+C&4!Q+847vmQ095uvAE@ky&QNk-ICcc%yX5oxEys?~m_NqvjnES4 zBGLILVb6zd&Z3YCXUjfj#jMGNb$!(>lqxI_}Df_x{_o_Aov* zu2#~|8JR<}=BAk;jj;{SvcE0o8f%~YnOxMIuQ0IDU4MYbx8mo8@NVJFvDT~L!g-nT zRxJFz5MH+5NVL{ZO|ljZ5Z}Y^@b!wAG;0m}oOtDxebE9rH|=iIa`<*Pd?~(V43CO7 zEr%C3!)xhWJ3u=f58E2DLu6Zsj~)KO-jMwv{uZxn#@5(E-Lzko>bL$A$-4%HuetU6 z@GE?3Zv9FMO{<)9`(|v7CO&J8;o2I5;;c8a!+V1+Hu;XmL>DOIZE*}X$58flp>O`u zjPdya*9P(50q9Vu#?f%7#@;BX#=-D3>pU`Xd8>4*YCk^(j?mtrHQ5}yCgNLW519S# zUNEvTX3NGng^l6$J-9zoU!0Inh`tZf?`o5s$QGxs;?24R!jl+Ww_4-Tt-d z-{r(dc;8EsP16M5gll6g#m4vqJfm;zsq(Fj?zfIWWnV1K^nIfZs@fM%fnUa5#;(`} z{#u(un{b@vkR34_J0e?lgxe;vBfK_=v?Z#_)5!WW2_EvsxA&e%e(UkyVVyG~x_Udp zd-kwvM>M)Bp`EWj`>ucuWJlyO-ov=EdOJe+<#KjJ06XMU>URiS=A#c9(cU*<2kiEl zp$+72z6m>E5BPf{Jb#n+y96`rgYP%N_rA=*S##;<xKJhyg})Z4Q=qG&%eb5K?R^|aT+)6~>@yp`fbt(!wNq--SF%&s zgSf5i^oG9V@*~&`>L1xEk?paBZ+Z9BXtSvPvRPDrcmA|PoQ8ta@1{FW>FWWC0L z>8WsCKif%O>KoFr6PvOgiR-0}S&t;zko{^!uO$bw=rv}jttk5QdZ^|pj-5px4U4m` z#db+w-*JP)R!%op0#9i1s6QX0v=OW9R;j1h8 zoOs8{eybVZQ=c{C`|<-dqdX<2$p2*et-VH$wMEz)-M&TGyPQ1M&x2Q^uPyw#06dEB zFVXd*==%qAtcP#vY?Y()r+t)T1;L%===#5KP1w}xkifEl6l*>@c5aspYe9A`M=vcl z>R7K2s%w3Xu0M7boxXy1f*Bl4Cmp?s-@Bo=)w9+;Uf(J?#ra{bl|zp~k3xmlq{mM{ zrN>)kQ9o!lwkEc6jdL2{z@^y@EwzhH%tABtL&*f%fcBML(hSTdd!!kCA^V{jeJV`j zv>A1F+Ke{w+H8yLnt`;H?3!^<*)=afg^!j)W!G$k%C4bpoVKOyowlXzOZ0qasGffS zs^=er>iHQ^J--C1=ih{?Z@fOBkG}q#If{qDU`br>N1voY$3m}yJ_p5al$|xaKJ;}c z{UZEDyq7imda&4iU^(488mfDPP~AHps(aTNp~ZLLqk6(`S=^_29*g@*bD`W9@9VUs)$|K677LPbl}54TExD*(4*>^JeC|7;F79a;#&w8=)@L=~!1N z?mt)EB1+>74Bze4x@kU+?wAZ z&a!R>|E%U7lmA|Tc7Z+y?FQ`!Jq_k%ZcJFQPk?5@KCZDmOnf%*|ayC>zm;X z@qlh z>kqLdbiUCDb!)|5k^}r!K^Nf;pY<9!XJsQ;+CNhLl+AZGLl4JT3r;YukW=;w=Fn)v zC7MH1Uv%M~15oPcv?aFi+&7Y}Wdr!lICSg`wf#_js}F}mH5Nue)dypsyP=OlH$n%b zTkWwe-pXSQ2*+4#h?yLoJ(0sT*%RZiCk}s}-qw-p@+HQb&e~y1ZQdltX=BBBHfw5H z+o

Hm3D3{QLZ!?C%i=-W+LK{b)}I--8oY7*W=Ie9a!NUm3&P4Zb6_M|*JnTPEe< z7z=iv$>Cx79Igq=j|0me-W>0gXE5dQWUtDXx;ZRgw<6=pIC7)R&{PhaV;4ssu6aJ* zrR+hstV6WblJO?LzAp_tUdL#+im@g`|ixQ&7M z5>xT(Yp;O0jM>)3_~ld0&<$YZ&G_MO1s{)P3~AghXI=aX_W4`Z%m}T_#E*7cyw#8# z!GQ>WzOZ$D{P`bi4*nZH{p3r0`jP&8`SOLeg+n5J`pPk_y0ru=w5aX+^oPo)U$0O; z{RXr_RiFN0_7hdPuoL>;7<-ZLY|5|eH&V}k)psvHw%(!sRIHm#yXf~xjW=J-KKl08 zLCVc)#lPt{=IX^dU<(?qvkOZm2av(Ks1G|wmhn<31}Bn34; zruhiXT_~1a-)bSeMZ3u_pGvz4?+%Knk24SD)@PbqpGaSPd)oggVm&9iKKPyyc(x`V zyxLl2_VYerRi~{eYs+zA-@CWIi;v z?hbxU598O;Wax#?GlX9+@xLeEX=x5l_%nz2bq~Kz>uB`7NAnzw&Cs37%hAvbbrrqV z4BaPsjT!1IT3fz%DDu2Bv{Pdjzv8(qv5(f-+j6WUH+Qz=oBzdC*sagxSfhCM1UlsR zSDO`ozQ41z2p{}Xu8nD6R!CO}=Pm)~E|83uPB1d8*9L%fCGW?8b#D{aO+tt8zjQ$n zR61l6RQe{So;6Q6S9;?x*Q7U&L8Y@xpwbu8)zTOJv#h1)XW2?o_ywZn55R_=hyQ0j z{yfJgfDg}y&u=cic^`hhxobp+WzEGm@56UL7vH?vbm!;fpNTfn^L_APk=W?I+vA!OxwcF8+0N|2S9ozW^2fT@Drg-3WE(1swcLAAS6|iw*i2VQlaiRL{(S z>X{`_J@Y11b_@NOi#~ipSl~XegUUA+s`5PtRrwY{Rle83z+ebr?7WBl*3;Z8{QETb zD%R*}?kx>MxmU48uYiA-GVThwPx!Zx`-FcBxvwl2%6(;%jnMcQ{BgJWt;byaJJiL$ zLxq2bF%E}RgMV4~yafFF$`~-pB*p>bu4DqV3G@NzUu1=x3;8OFY=xi*ylUVv)+J_Z#=xlc6B!PF~m!xy}r-+5f$41Ef^96AL$ z13DEt7WzE24|FE96EqV49^m%^u5W}chAx3Fg+2#e0UZrp1MLZ254|1w2DBVn1U&@Z z1ljVgm#5~2t5To3_T7#2K@k90^JQg0eu~M8af|(209sf z4mu2ao*c5^kx*;a>4QmLN=<0@>i+n)e?W#g_zT_O$)u!oYDqQUN@!(en8xPli`10Uey;m5yo{NzkeuPgPeqSx;FN*GzO zJGX#~tHa4dA~5oMU=X!sBXqOVNAcnQsjfeCpD=R*n7PGG%*}$CKkdMNH&LuVP9j$n z{?fpc32m>RRbByAA8pkq@(beGQG#58FV(!UzjnQr=W3*Fk_Goea*v&6o^ke4|3)2b= zt$3JAe3Q&y2-C}_IThSIM|1GtZeg2BA2#z*H>Z-j1q`h5egpP?Q(tmWYnhKT#%=|d z4NbdPpJf^DxR+lyGp5pit@xxxx4GjUAGR=4Dmf)0{ntL)JlgSJCs*&kW^BrTE#LJK z@)J4!>y%6V*NUOp=3>L@VrD|%>qfLyReb#tF*7QYpYbFAs4%wt*O&OMshj7s*1g{a zbJr8zkQ*wB1sqk>~3m4;JF6rxPu=kDNbqB{% zXU7Lkoo}Q+=SBFSmnOLVi4R)k+F67Dxx{UAFHYtq@OV1yEIj_Ii^prK>om8nk+@tj zGQ#CYXs1r#!X+*)Zys+ar^o}$H1eHWY(JX2{KLl57x#Gn@K(>Ck6wPBu1lYvWpDd0!QAq_%OAY8lqC)i`S=L7a9d8DUlk0safxNFVO zYQqSv?-gSeWao8U-3?zad0}7em1@mPNw8NxZ-f?*_xLsD?q6<@Vy_s^J?Q6^-O}vU zkLPuKy;nol!!VA>Rb50*>J8+iJ{T1rjN!X+4RxLo%;;*`SM||(dazDE)4qD3&f|ia z!zMU;t{tSD4IVS?tO>eaE11x9f}PabwJsA1)2j>`>Cu4DEpuoa->qH!Hm7Hkk%7V zjf`op4UVxk?i2Dydv$L^z1vdnE7fCvR2|mo*9pp3v^MIus|F$83OUXPtXFq!=NH?q{?|Sx$c&C1T-lg~G9f9(T{pUUVQ`*e&0v(~WlU zwd2bvC;lOkwp4s3d7Ae1lplw2Kaw$j7jkJAe4!jo%5|bV?cs48HN81{sRzCq{@;5@ zx{2|t{rl!p{(DA@Id>;IsmM6g;ZF2XG5e7#PIEcsTS*S00@h9~Z^qbUpRbq6J=6*Q zmrutef9gHHjNtWYqN%|qf0_VI4&J~zEUzBkn9*7*m0Nm5Wy_;&Q>arYX-i zqIW*XUa;Qt+TTlaMa_we)Vf^VH<Mc%4PW-l89TBzs2dl>~P#-4tX&>pPcli=QmM|qQ0j! zO1;20^809CUV}Nm>hchlHtviJ&OsLY$%ndiR8jQ6{r`+?PG)_MC!5FCAe(!i9KUZg za#?$SjA4A<1AT}wDtSD%23dR;`SvA?%gNP2zKFNKL>7NRdxpnw!t|NQ;yU?`k*57MzrU_;hR$c16?5?& ztj~6JI=0DtWVmCCV4EzI93QU#@F8HEEHV>pA2EOOE3E2;Okbh@9(DW+Yx}3z>yYif z%*P!+!>hy7?AImVv88;O;~k&F9OT5wvloyYo-pkT*XukjSlYt0%YUfzbqm@wf*Dlb%WyVSBEFqua8Nz|NHSI`_1fR+iI3#FS$9@wr@+bmmBH! z3yeAS+fU(f#gD4*`nz)ehw%D(%BHfuLfMOAGuo<7jQzd^^xb>(TQU9iG5vIuK8r!- z*GJ~ZvIjvNeNp7b8oWw-7b(_(_MS(37vaMxpuHE+-mlT#Mfh$CXm7n+I-vl)I1jzJ z37zm3Izj8a|IyoTd%94(dpX^x4mN@onoKo)T31)hl&&DoN6#_NoNq+pT3t{5AG zT1LD1uHH~=zvOQcIwKd|k?ZP>06f_nouTnAxjgzZIs?q*=nUC2esKNW?6XsgdPmW3 zcO(C!>AM*E?rvmgEa!3XTO9o6*=H-k{9ZiHd~}M#E6v~)=@rMGfmg6+B-3Tj_{lx4 zZ$y(9NAHhiyzAZZ@NU(ys(FC#s$cS4{i60Zsb6G!Urx^yp9$-kWOS-@KqC3M6;JQ! znMm7VVKsWj^V@4I$##(4u*^p+TZHYfEWvHNl#8}QBXr62=pVI(_M7zdkLn?PpnAyu zimZpO*QAH&S0{FUdlfz8*$7@7N~9jTl8w+4eI)(l*$Dp*ebh5TAIVnGK8v}G=@jgR zlkwwMjcSuJD}c=)ofH?Fckgj*hSBJxyWxRH82=A7h?{jKJK;uGH?>R&+X=$3%C+X{ zCfNzH5%x!=24yGg#Ga6yFc_XGB0obh`)Y4sU+shVG2e@d4>q1*+CN}V?Dvp~H?W6X zQ}&Q+-%$2LM)2;ge(ZDA7w8jFO=+vm)Q?_5&I#(I)ML18KD(j;vIWpeL>9htD=#Wv$>AXaP1whieuS)hkM@%tBU>UnE^JFQLS9Ao!TVXn7XQJt|DJ}7qV3~p zXRQ~#nRc(*wy!m)xouw`aItOQL>{+*Z@jkqK5dYq^7yveD^}k)f{ci)i^h{zm#Ss2 zIxd_t>Up<+y)t^b!|Rhsd*v`XNY6<&MSk}H->v$+b+nc94fK-t4e#BpT)pHY4#JOK z`m?K-l9^lf^incwuTDgdUzK|^Ha`66=_ctUt$UJw3iI1H?U{gU&lFQmlfBaJ!OwOl zx}!LzsH1F~xA3nK-|g_t$`tsa4)#n+UdI)zJ=5Ak$@dTmz88B4|CW6l@sfPAA>Vuk+oL9XXppOqCQ_~z zJm-{|7->DHvi>`5qHX^}Hc@V)e~(SX=6{Y&6ioYeHqnjTC!46)wTb4Td*-8i7NC39 z!L#d;i?OVmU@f(sf&N*KZmGq7UsoXqYa<6Uk%O-yKih!2fAoOgeieDzj{Uzpd+0V- zPrZhI`WgFy-OoE-L)JgQo?y~duVYV)XFo5nN5wqG@?W>GyeMr|7jf)Yvn8I^z++S%YLdKju)RxpUk6A=F=yyHo#u0Kh&|M7QTqhN1qhc zALH0lE9sL}^vP=jbkA7FrdrcC-k#&?xq>Qo%=ckenI-XUn^1p!W1?#pM%Gv3H?qDN z)XS)*UK(pgm3lelMiy5sx2K;x-4|(p6&-c$FFjYK?O&lRF?>JIwM!%0^%cIa?-d^T zrhS&+>cJz-hx&=pTJP$?e}{dRgN|U`Wau9BU}MHf6LN~mPxk}H$t`KpkIB-DmG+tV zRci^H`Bmne7N8#;p2bF6*-F+qg6~C)5AH@C{g3hek z&eEHC#BUU^hHU<~)SZUrKKMo>>0|dB(w~v`y87@+I&`obpY^@jdgw!~KiCjq>)A1C zYu2r}w%#^yNlmt%FoE=_FoA46>DNemuaI|LPS1YdHlJ|7ziRVc=&XHFqRD?2Lw>_p z@*KvI?=YTRqzU1DQR0cGzs3k9lV?ylNuBtu+Tr*uZ~rCjOVA3t&cgmv%+|x$ck+kJ z7w*je2oI$sJM(=B_`$(1eH9}qzdHM0ynP&>(G(iu zIeznf!^fO^ha94Ryw|k$8!5p94?^*IEbnDlYX>m5x+lsK24CKv_>vA_Gi1U@`f?v@ z0pIQKw+q`|{($$4A7@$t@;MgsuI9u@wuoUJ>mb8=Lu>Y!r~OMxw6%6P zIVzgPTCWUe&@aT&YkxWJEgeRkz79soes6YgLW66^&<88jCg~+6$C7xN`~;Cc{z4BT5zG=z+9F=?p@3dg<)!PfC7d$0< zY^(SRJ`^65T~-}Wodko5r+R`xGic9G=+Dnwo>KgYcq&7A?BS_H@YIdmH^;~b$4Q`P zn}RJrXIyA+tfu%=&h#}c#Yi;8kMQLH(>ll8RAhY6>OF=mU@UpVa{bm#e5aV)G8^0bwYC>N^TCHv z@L@E37y}=Y7pgz8-o!WWV9cnF;x#9}6ZxTdPu2H6$M>DkPPY54yz>UWp}GxHd<12g z3-2|ezS=iHb=^g5+Yn@l=P$Kb+c!U=UUTn`w)C9%v60J*8{x;HoO}LI#m^jvC&iB$ z@S{myK|LeA=Y4Az-y00iMScfe6ViPo(Cn+&8SP`XhvO0PCU)Kmc=Pwv^>nluIvpS8 z&7Kkd&NAxn@n)pIQ#no+B1bj1hEHzq1kF9d)0f0bp#P@9FHc@U2UfRM&x@5P*Y_iM zR&%zxCZ5%rJ@KX5Ksxe-%eRqwatV5}oPFS~#J{Iq{#_Emzn*`zy0vkFwJBSX-q;E98yqfvtH3MQS%`c0m)@y#5aj5y_Z>1-jnO4iHda{|T zCtH1gdU7H>x0N_smD!@qDx>7{R=%H09N-aT)SrHCvKGX!=Jxklg{;Yt9P;#|cv5RX zs_IEU@6|hp*N>p9q-wU zTk)Riqqr>16@PP$z~CC!2X9G}gMJH|Al(y@K9qhaS-!=~=GdhF*!m&i7>bqKG-R)%`EV3=*qj zt#jtsF6u(fvHfq+g`R$D9v9YyUVW^H`ba;C=bHZ*eghxB1JC_hJVz|!8hCCQxxd77 zo(}YQuc|K8*l&d1^}eC^-t~`~^`7>pk=`5ged|5=@4LnbR@HkM5qhuS3iE)|Xos3` z>i4PVs*4r8lAb%$=S|1vQ(T^>$E3fc$AmL-nIF*lwW;6>#R6*W4Sd71$8I4fw>RDopABO_KlkZ5hkx*m>KWbt3g7j| zx>znkpG3}G|GyCC1s5sR0>c)Nx9;!cVwb$klxj{W09F2&(bS6^?yCWvF~2;*4*guhe zyGWT|HLhZHL|nxMFtB94FNye2zOj)#LiGN>l51S=-fUJMSD`v+Et~3~v3WIPvlh?i z;X}|nCA(Gb8^9~qqoX{z`f`Nq)_oeU`gUXDIXrtM!yT`ui0?a$??Crytm}Q=dvxt2 z*htUnxdj@xE)M$M{BZxjj=8vWeD?&)d*eHaIakTma^_t}wo01i$yJT<%gNLmiG$V{ zPiE|X8<`rOpUnv8v|$dTDcI#!lMxoD%qv^S;?+T zT-DgsI~|+QEsy4kB`2$vLHFT*xvZ?sc4cj`?&EzoP(SZo z%0r}gl#w&Tlf6s7Z`li$__p)0ri%*(Q`X4%)R8LtzcD^_8+iSj@u`=N&6W{%Yjyfw zXbe`aQQpQk$P4PV~a4E;*~)i+ko-0;WzKh_~$Tl>YN z)(w^u8}OL^yV_W}cEbz#FM!Rpcl&bCFhmYg$;1}k3oje->=q|9?X!Zpem!9st zN9XtJobQc(XHI^5ol`EIXX>2t>-=mQ=hRc@YjsY0=zJ#UAK@QT+ik$7oXfSbX~|%^ zDArBlE5c_xo4p~wj7}?@!#b@La?%}+&M5qQbgjZa!B-{tmZuQgdmMlImiUBr|4IL; z&yw+|6N8R#RQ-`r>OpXGg1Q`3S|i>KwMa?i8)Ye#e3%u$OYhcPsG z&7AzH_=5C|&U1Cndk0g`<~rxwBRSVTE4w-v)*gJynqxzz?^SHqo4lhJI$rgZ?Y0=K z*@QZ(Ui#gP+<2<*c5D^Zx74lc5|xcIO{G4qQd5G-)OWaB_9=6xCvc zQQlMz;+~!UZJwI$l-b(@W=ETei{+~>qaWn2*4|-GnP{`cuKza|KiU+x4Bnp1Wwg7> z(u40eqz#%EKa;O@94+~l?9frPiORA4DdU{Cj)SuP-z6jdr{set6UM?7fd67?Fd9uKh50VLz7qjV;*~kgW z15Z9kE_gCx+{W48UOp^{kPn}b}v?zc2aV)wP_uCL-sWQFc#GjDinU_Jp}p9r=)3lYF3@mGXgemF=IC4>7?)NB$%Q z%P2?ia`M6X*1skns@wZ98Nb)1cp+z>@IWj2_D0$wjec}606ZoPFb&M?VSwq#w&xfZ zr|9Q&Q8NtcAj!w+jFgbrR@6AO{?!br}N|NyR3IVf!@f_ zb-w=y=cm~7{oVeCbzo3fhs?y6@)@~{=I{>bknQM8pQ|r4S0a7Uo%ZkM%4lyK{JZpp z=KAMW(G~i>rz?7V2VGI!zJWFM|E2oipX-11`Dpsuaz77jbW@({tw85>ihqqEa>qaWI=WP z|7E}b20!?)7ox0+N$l?kPAbQCI}`0I?8JGH^Uv82At8HERx#&w>DyO04oBB6JQ7{E z@P!pWoH;h?>L7gd+2Be{!u|+-{9uxej(;@a%y0joIdjG98`*=i=HmO2uXqW^3Ep!O z45ztc*40n9@uy8CzOa+8PI&?RC|}Wo*qmuRo5?*l{a5U)*2tFAJ|lQ{N=C3xN_ucF z=Z~{ATDj`w4nKwq1Ig0hsIUf#w zcnF8e9+h^n?7oPy9|rHM?DIy9KA)2?DC@})t*pU; z>F4XdML#$D4*lGz#(wri^t10f_Oq|re(v-y_H$&JFYV_mm-(OXXJ2_u{jBnP{j9kN z@$eFCJN0cY)ag?<=B8na@mc8$bLzjze`VLqso&g}l$1oj1x;i6Qs%~N`rSBYj21s) zdrZ$o-^Uht6%i8x}$rgOhya3mKLJ14m?7~4o|*b6z5xu*R=?|MA<+%%rUu9`j- z+0f8Pc6d@{(|SU$Y!msW?4dJ#!hVsO>WOae-IKbG;v3VT%c%2m&QJ3#&7mcD^`>4n zb-x84UdHuq{gFrAqm4Fk$e+!I5$u{0AG|XqE?67+Gb05(#oFaLQSs#)ZocoUXzKC_ z?QxtroX=^i2cMl#$;%&4?PM$0sP>V6i*etGw$r_8JINunT^#jaG(OJR$Nu^!9&-5l zW5cL;OE}3$ryc%3_Ra-9s_NSRXJ(QxdCnlOpcoP!Lc~~URkYP+5>P3x$*pOn?frK^ zY*jF^z4qVQDkQ;ZfdNZId#&7i0SlFh#M;~Tw%otmg0)%;*mAM~bK`zFH}22eHxl|(;-mBg`Ts`x zNBU@?i_mBj^R3C~P-oDG`qmlrp;-iX=esohJe~8j==mmS zv&$PjFM>AsZ$$6syqJDm=0m*5Y6zW@gY0DIriW#}qjcKD*au0<7^Y3sen!Z=i#41? zWt&Mk%ZxHphBCd(GdV9V<69(e-}LY!hqYT|_Uc z|2l1q|K+qlHr2Lzu9rfe#ptg)T;8yhYvxBjRVMqprLD@LQN5pgj`tZS`X!O=Ab!Y( zjVSh_=yB2E%$Z{^qDL+E;{^3==<~LfBR^7x8L}1)A55bEJpj+jIOJXWv>!01@H}ZLcE?X2){ZT9 zWh$FRY%Gb1A>#|-P4#}4J1lx(4Brv#@zfcNEqIUkJm|K>HsRSOlHZ*fevkWhNd1{J zX=N^B8nN6*FdtG+Szo;S4#kHr-aX>Pl^JrM){+H}w|d4lj&N)E_%Oduo4*a;$+^(I zj`Q>*^B#Kyp4sNg2uJOWEM;$q?@&Eshc%=-epni7e_+?_ML(?JeXID!3harM><4zo zgMF9p&P@x?NAHYeza7u)ubr5M{&@)fBV!Bq?2#w%KWV7P_8O}7ZP~=LuRs&4?InCO znzChQM(2e#V=r`{X2@$wG;>*K<~rwSR+@xnr+(|&wr{wr>iC^*`UDSsLmFe~bjIZw zjLThWTt0^S8#;G0{j!Y9M-N#wEpy1qX-D+2+bZ}$#^>Yi(cVAmPHSjpj?BBHtnHMC zn9GdUUGT1(q+Pmp+S`o1M_@Cj^Iz=EK9cV$eZv^f7tyxj4=;OG{EBzpPr9D>Uhhe3 z813P`u3OYQ3ongK+vOS9IE;6>EbooJYY_Y3zxZZ}cK3Dk8w{a-op zk;|%}fx?}nvZvJO-E$xF&efJL!sZ^`JWri3l=A~M>U;s`(r?JVP@CBkFo!kI6885H zKLEjYeGS`I*}BN=H@%*4277*th2Ph3?d#ax-)G;Re!IW7UFJFWvY#LQLhF)&=t15$ z20yJMtOLqEQ-`5VEprR=*(>ND>6aunjIlq$QT`kFRyjZX#X!@CUoC5rbMRYD$E%~~ zC%zqM`gmUSy!o;~)BY=?=LfD0G`;)TvL@}=&gvr4%8cup0vuxIj&OYpKjX7 zahE#(Op|t;`$;P^ZfFW{oT<)lY^vqBN}YeUX(Pv7>b$f``-uBVD>JmF0LPi?+|^Xe zag{oEH*Ms&OPza~;H2sz(#njqCZc#(&s68>O|=|Xsq>7cjU0E8WY4XY8GV{$KcpSl z(Cdg_{{;2_A-)Lzh(FDf*vmh{mVU~e9$qeE2IBdRf#zS(*wdf;{vxy@4fbS*KTY}y zNoZciet)$Yo7O#@w`tu=12?VPQ?O~B|CUYbJ{jD!uJX1`>%JY@w9cSgAf0+a8=+eu zoqBN;x&_jy7e}F6Af0+a520HioqBN;x&_jy7e}F6Af0+~6uJe{`!r2*rLhN_SITk2 zD$M%<*3<{2WHF zT!DNnK{g)6&(H_oOMh6)o_h0h%a}J~Z#$kzV^52W0`^CwJ^VMbKP3BM$$5YHdLVpl z)N^Axd3Z%mRlk^NrL3w8L`^1d5ZkzORdO)~0kZ{uR}c6< zQPIv7rQPk&&Uk;o!p9;{lc=K_TfsY4PBD90R`r=cFg6SCf3kaiOdB#X?{qHOlDswKjXdS z$7{CF_VfoA<85qaojJ{wxp~2fvSqa&&fEU<{gGea%J^jAyAN*PHzqUk1pSlDdo^c! zB5S>_uoqkM&JX5p-!&*ba`?TB$j*Lgkt0EGWbbfy#77(cp1%JeDUZJT6QnmQbwSgq{Ee-UNa%)Z?(P4PrNxP^TZ zJr^^_=4(7Mo;AC?KH=+p{lntBGR}8Ncve7b_$xjv67N~weXY+M-hnU662=L~J^dRe zJ{VXgu>&SPICt61!rHdGwE^LxqFVO)XP<5v4|DzvtzXzRthVh4ShdDS0>|FN|E0w3 z4L=JFu1*g`9_<^5q=VtycN=?Y-+kAyeP0Q*UBOXo{e7W8TOsG)|Luxj&-Vn@mwG%c zwaDLZ+=E*Rv9*e!-zB8uyi4f%8+UF?0KNI@JzC^{py_X1gIjj;|4$R}$TUfwHjkPV@zRb(caJ{yF7%e~j^O-va?W=@#rc)c>JJrK{dX{n;z!$ahC_e2Qm{Iv8ywWtVbGS*47!chVm! z+?0)2LF6;OB0ef{u}L4@a!cg!xImkudR~DijeF(#ZCrbsvTx&>Jp0N$(P!j-!6D0k*{}UCIj+bvG-1whC=W6J@!j;()px+BI z=MjL;QV02l(ERs&Z>T$`WhdwIy$0&=HngCw>YiFXzc1yG{k*t8BKQ2S+{69<&2bw? z?rVEPKbO5GFNb!5oho(Zw5)_~3x#I9a{>3csi%B5I!5|C`jQIzlpuY}H2RpS^jA|N z^f_vbG*lbflFK+G#BnS07$N$aRn$2hpZwpDC%UU}!Hy}~v6a(`NZRW~)3jq(Rj=W` z0A(-Yn&g+u_1A}S&sEi0VNrp$uBbrnaWU7Co)$Q?mG&sQe@71Y<_#-!U0F1G81sDX z_sQCY>;qiLJ$x(ARj6I5jpjPm~Dv-L#dYH_EJ&U{vo#${YObe7~8w(4HNK#+fa{~Qb`8n!)xyu!v z1XibPVNHo#FPHnETVY{gLE)6bf=SFLiC>@4Sl%yfS<)}smIp72exr!C-OC!hK|jXn z|M@n$$4=uC`s8%RwbJGXX{&nHhlb^BS~sHqrghS`vq<%%HKg*_hgU!J`pD|?!^5YQ zS=xKn%KE2d9j#EC-=^9gJL>D$Dzf&oSIbaXwB7x&!;-x3jyuw-zm5-|Ryd;Ix7tHb z-7z7pnsuse=Km44|7o%RC-A)W{t33{C)l2!V0)fAZ%g;WtIRV=y9x~w@siY2@d5Sw z46@c4ZKtBg({*_g97<>J?(OTU?2LPahpTiRPNl9p(UHPuJ3T|y(F-pPg^wpAcf!xu zf|bI*a+GU!-nR1e80K%1@%3G03!Y+rmHpynZZKQT4T`_v8ClHPfPB%Pb!A<;Xs zb|JJZ<7mj;PI$bKvG$C#x*Li(&g7`%2EJ1E2z*^wA2>0iUOOTAt1SE`@+&-NysL`$ z7Q&}Rnmat>el2H~mKN^nfBD8PU1kqT*=+AK%4YDqq1XDO+pf{|+f>~?E2HdEUl&KC zjH2I$A%Ay}0$WB*E88-1n&`C|k~p8aWwp|2yO2M((rLq#P8%L@RTsTJJg-RfoA5e) z|1jio7D@Wcfd8RTt^dcNx{8NFWgMlPhP`2@ljz-H=(<_RrpUDY+1|LO=%(9aQ<2k- zzF{qU&(e1d^c`{f^sRut(D}6JYk4-V?mcj=Q~FxIVWqFeuV_e7wdTLFpbz|enc`vg zUb;;BXsuO~qsT;|R_Vd+tU+&9;bSynLg0nk!pqe9fBi=6Tx`Mp$jh+&5e4PmVFe!2 z&e6jQ>Uf`w4d>?<6$JeALnC=^rT*L+_1sAPxsfr?tW^gJ|C@*tKMckdT62N&xz6qEwJIVbVXWV!|CV>>HnpCWt30b zSX5BxEmY%rvH7a7uf>)YyH504o!&=FUv2H9wX}Th$*&%^HZbK5QpTIZs{6b-y!tZw zk5Tl~{x?Td_kD9@^;KJ*o;G&NkEdO>cEyx$kdz*#@0LC~mv$M3{VogEF1UWorwjHl zj+HiR0;@0Mp*s2(84m^UOTC?U%K1w0$kLu-XGvQcZ5THOvf1Vn-n^>%>u-*&u7CX} z({AGUb^fn;{jzBqn0)c!=-M z(%Vv=G1}5A8Cpuj=i(M2(#~|F%41ucLKbD&;SKv*^ll?8@79 z8(iiFWz21*i?mtv-olvsX4>wXk*1Z19WU>&+Wx{9OK2Bq_af}s9UOOITf4APO1UPC;RQwTe?8|K=i2BI1+Kc% z65g$z72c4wE#tY%d44j_moKR)DMt^_@)i}8UpKtKy=mwRB?ES5!rR5co%1KgInV*`)IsMAXuaO>kv+&A#@5;P-`0kb0SLeO-`iQ*mkI1QB zjm}*|{u*@dBd@Q?tABlD-XrAie*Ln%J2dU2)VBz~HdSZFd>Zpr(Rs5w|3&yK7rqK{ z+zMZXD3ADBnfe9V6v66#=mMy?s(dCL=Mlf5IFM`>02KjEr6 zFwR}|ZmFm0{aezi4&irpm_4*VWIu^xcX}ho@6C*yVC_}pXe+u_Xt9K&ApTv znc)TF`lL;}pa1nkMpW0sBTBCpdaf+zTRF^at#Gl<0gXx)?wArJ71q_Y1xP=Atz_DJ zuU$FqPbB{zOQ!vF;fg8Zza_kwBV%Qr8S&j6IX96WDNMWaCg!icw&k+CHQ!y4Blk&L ziQF=m*;<58WEngs{cvyAL*~Y!WqQ$8rujKNl`nOZxv^Lp#DC9DgEDXr&7%VFNf~@n z4xfyNPbR=8H^C=2!zcJu+&K|G`5b(r_=-8-=p5kb^4URrjfKyeI0~N${gd&T==yd( zO9zi|=fCVHy+tzkWCpT286FW{DT7CfNOx~7nYM82bJJ#Ty>i+j&J}&&k!#8_tBe19 zO5&EL%&V>` zOuzD#!i+2D!&6Uh8I>pRUB!EM)0YU(r!i+Lw%_ji=c_p>gU=?xXEI+J<+H@~sAAo$ z{kt!B_}#O0pQ0mj6Hlj(%bOCN*oW{;^P=cF;YHT_Q_^`wUDSC|eK)M&TJ+s_(08Kq zW})MvdT&_4{4=QU%4oO1Y3RGI)Ztv#bu-U`t}D@X-9eqdI@5LS{FPWoB-eFUqU)aP zvaXY{K)Wu9)pd3~Cw**3dQSN3rWEv?jE~yC8LKy}-#nA|h<=*M_(=3xEKQP^PoC+F zCY#QJPOH*&T1h85O>DS!eoNhk6Fp|wao3^OtTx=4wBJrwpNSosY8@PX7CT1B)%Dp{ zn?8$<58BsJ#cpfoEk~C*y6p7Eq*XeZ#v) zz=3+JPB7i9E>m0=XP6Hd?0yFxyCtu-21w4wxjGlD|5av<=a%= zwr@E21@>Ot2WDUt$71Gc_j$lmP%nuywiEyQT_nM}3O+T8OC8)Oe)pGguhix6@S7rD z{Kk)hIT-31)Uv-n{@CU0k26MV%XHU%{;7Xm_|!4Cck|HkT5JEGFMR3}FdRd%=a1sg z_|a#yw#m%J#FXoI&?BDVS;-L)B$_v-Ivo@ECPc$_8=|f*SwmQ=ner9T*xG`P(#k=s9hwp#P zyWfG9duZ$T!QH<6qK>>%kYylD!4NwZF`MG;-f9Ic2|IL;hdKYYn%i1=iPSSuGys#E0|D zWxV4m-Z_$Q2rZ9-r+*h5iquca@eVSy2mFBjzu!yz0%9pVGes+2$=)QIyY|MW%z%g-gYzr5~zD3l#t1!&ARVqn?7RF0arUf?MVl3+;Y7#Z~-g=9iz((2Bpo(Kk&i zmU8Z;{1x>z#r2fGod2gN?<~gC61QMC7>O0UTYUGuV6fw46*&<6_1nn3w7INZoP1y! zdSfd3V+wj?GWui^7=q7%d7B8<+M~z#OGDpeqHnUep3QwZ+@H&H=$Z$7k(1nK=$j8j z-yj=r`P%i(PV`Si*EJE(;A5lNUqtB}cy14RLiEipbcU?I?nGvf!e`B3%|!3?NAK9{ z(yns`nYv;Zbvw$t4)e|$bcX01O@H66+ipVlB!AyqVBD>GN8WYh*7S(9QwzFhAZ@dM zRC(kEbkFEr-`(y7=h@8u*Q1GRb_u-Qj6VGwvb^uvj7Sc=@doYO+-H15Y zSlnIkt>9i{f0Qiz4!@~g5-tP}J0CqZ0ePH^4e~Ybknq=8r@Rjt`xUXtMtO-b$a>$W zDqY1NRBFXX!L5YADT;o}fIju$5yj8^8OrfB)^2`YSzi3B{^N`PoBbv}JFC3-(|3+9 zE|L5%xQl-^z*C$Hj{84mx{Lq)Yo6kR?3eWOFKEU8G1J9)X82c6e70DyK>z+uX)*6) zZ>6lT@U`r{Ig5AP!L{G<-Vb=L5AQj`|6h@cxON%G$6eaE0n~jrYf5`)8^IXLJ#tO% zljjeXf~oW7hQ9^YW{DP9KdXOW{ZV&d{Thy}y{#YKUt9kxx3>Nrj#YVD@plHgibIs4 z4*kgeZQRo)n7SC=Lfv=PGJ3oeQ((k+jFGlt9Af=a^ zkq579;N}6e)zpKhDqH51M}9x=;^mpTd>7Lf7t^OKVt>Vn@Y<|gZ}@HaVLJU#Gx*hX zSAJN0>YMMBeq}&hf43Wqn(@6tbb`G;(pQe+9jXpV-tjiHl=m2A7u`~7(MN@e^iiJ) zk$7DcM5jzcw@gLHOhMO7Mi)&&7k#ek6#GfY+Gm$-AM}CU_5pa-_$yr>HG{iTx(FHE zgPe*kQnn8=>h%paZJ!vrDBEXnV%vvze89V6Y@fe~{^|H#akkGN)0OR$eW_M?t*8Iy z{q(;R(LZakeI|Zw;Bv34|7MBHdkyV9(a=eIJ(20?q|qPMfC28md32^XTulF6oZEkM zaS?k~7iBINTj^5K3)cQYYzKIAq^kg3CGB0%&a==V8PC2=A8oajl-}DrKJrUw`ekfN z!F=uj8@r!9F&1DmHKY5y=(+H}E_mvC&%n*4*i2Xde8E$HMP_8*hcW1#m~w5!R@zY> zd3D|(Wh=G(B~9s6qkKoCY_|zt4~*k$*|%gj<@*S}JWk!-(fRy@r$EV7%y+2&O`aj) z$+ z(>C0y?$fwW<35f1v<)NBfm#4RxHN6U^S%1eH+rMxY(mG|Jfr9+WcsIPRfta=r!|u#Y!A{0?>7Nj>&b zkKNGcolI@Li*M}8)i(T&Z*SzguRQ1~evLj*bg~N^w3}zv9*NpgLKEqe`d~N7dp_Vj zyP!v#D=j?CL)rUi8$Ok-tuOLXH_9S;s;=bU&i`BW`UUvT%hc;F>avDyz?Yjhfh5lhM_S7I`U-td^80@X+ zyN&4Qo$&E!u*!zb)$ERr@!D;z!KSVCF?N0%eU)Kn3FfE`+e@&&N#BF4M(wQVdp^+R z@MGk(jj|u+`DSP(HqS?w5IY0CH-t76yX#u3-G!YfcGtzU{q@qX@k}E&rLw>1_ddkd z{1}}tc;J7+rfOPJ8H15;5PvbtZzoOsY$@h=yI$|f~8ifoU(Pd~R z<&*0oJCtWz8~3e2AG}Q3#l1D~pX}38&UeRi&+quwGaNs~`-{jEn{79FPjHm>#$FpI zdpy6*o-JbAiJd3cpJR+gy)JlyK4T$$>wfrJY`wpDGQ%Iz2ef@f z`lCei3YVz9(y)>CqW`7eXh!aYzp(E%|Aszr&VFpXd&?sWYA#ux>48`20~fi6DBJG4 z^!3WF%Vi_ z&ihmBxVc+)UfjFRf}Pi126*-yvh&)@C39lEXXmwl=heSGJ1-6Wb_e=RY`qb3j(#)j zy$Sj?vH9-MuZi7Ph`tt^Zy5U7u=nc76I)O0yobmy>LojGr*7kkUf#$zyJP3MdB-r_ z)*B}07JE-@K*QE6<2zln^V)f8mCS>mvv%H}96Rs)w)14%Q-r*Utyc@5_OzWR`eCxq8YZL#-6Kgcz)`9wd+HN)-`n@()Lwxt~o*X=#S z){}8>5SJGwx_hohYHBF<%>p6Fh={Tp43 zwcI3Q{lKt~jeT-?_qLA*+ud(O}W435x5(dN3pak~7{ z;g^JRWTnd)<`=dqNcih2uBle5WxtKuw!YeB1)n4d218;m>_-nxI$6FfM7+PL{ax%4 zpWe8HwW;SweWWBkz|u(Y z8voj0wVwY)^1n^~ZRCH4{51Xj`AZiCud4_L2QLi=ulFwsX6X4}CjWcne}()9$k*Ht zw+&g^7`&n4+2Ey1pAFvVZwzX~9&Y<8dGp8%kyk_BQvc%M^A$^i4gMv;pK)%~>pwX^ zME-X2Kj7S`=i~l$!CzD~1)uOY1%Ju8QSUc6|B(E@kbjhOqux*Y9}WJx;s?PW`F{{x z&-sj9wQc{y@i=+^OWsGE|C9f@;0qN$4gRzLr@;-J|CRJH|99~J6z6{ErD*Dp($t{c z=gC(zC0}T2(5OH3QZ)5PX=>2ui{vYsk}othXq^we6ixk6nhNc{OrD}Ad5WIUZV2>J z^z=vR`BnZaTJqnaWmFyPJ^Iv^@vjy}~IoW9BFo5bdF z`uXHGlk7!%&l|+j()FpMKOOz)*!IzJkz?CO$E@dy{gZyx>0)PG?~LmkgU+~~c_(L%B-r;nn{PTBdQ{@7ywASxt)xSwe&vM)hdB=5 zcrEER(lP!I=J;vSS4b!Le<{b$l0u}3?9JAMFL8`Mw#@?=wu>su_uP+u5HtZT_khSm%B$5OeW1QG&-kVrsmWqgSDlNr_AAfW*aY!olCdRmFGXXAb8#Mg~7p3E(~6uzaTi| z$+f{7rbmL8J{bw#n7>x$*wy=tdEo?e-RipC-Y~xZWd7pduct2w{wRM*a6RWUTy<^# zl)oY{OwDY6!Z96|2WyzRl|MgP{$43B z?XeEtJQL-$)74H-J1wt9mkh-Yn+iS8G%s|utbzp)z1J80bu~%!-E}0q>S9~Zu=9wWH|%_2=#qn;oW_;#xLud5ME06k$4aUzxTc;%POZ8i<~goeWnFZP z=qDw|>Fh^*y~uIRp{?u_Zs`C1)cG1xKIwYW5Ymk#bM1_^v?}np7JB6P!QR1(KGt`9 z^tV3N%zV(p$NG(LfIs46P0R;9e8`G#aQ;Fca_)m3KIFuAX@10qoco;jIrQrazexXl zpygDh*c}IB<+DflQO&*KH)5019OTqTpYNm3_tEG3khdINPIKtfee~%*`g9+Cx(|6% z*O1d3`fML^>Z8y0(Pzg#$A9G1N1yDYPgc(@F4G!HWjxy**_=Nk*FQ|jW(e5~A)6s& zGlXo0pmhk@3?Z8#WHW?phLFt=vKc})L&#G5plh+eNa}s(e=fKC6;*%1hPo4T0dk{;X`bACorL0Md_eH51+TPzL z8*f?rTZwNj<0|AZvA>GV7O{_$?6!%0%;dZ`>PzC#p?B$^^a8PBrOz(G545-FE#DEJ zPg~!e(8hiadsvN|8E0F^>oRWc$o`h+UY0SlZLI#X*x1C~mND~zot824wb3#2RN_e5 zZEdTpmw-)7Hiu%Tmwo(amviU-Sf8`*(ns;{2fEFP-*Z>#tNJbWjY)+sJK{a-7_Mq~ zbPTsdVt_N2l{q|P3}@x5Dq?e3%A1mn?A+h+n5?Vx>41MyYL6uR)8#RVF}ASsjLi2Q zAtmLR6m2(?CeHl{#|mAgPg0&qk-iCe<}_?&v3cwL?y$0R8JFgMMz?RljN5&!o0uOm zbdh1-TKOth_Bgh!4JiAT*b{U3jxl#@tOrE#BX(Lr?7YDzSe$sjs>}#dfsXG$r{_o8}-f;0!%tZKEpU zNm;&~uq+UKq`nF1pQIm_z81XVe|9yV zitN>v5O}3$_6GiH>!1|zS1%ZA#XA0~CT2VmkG~SXje@b&eJc`Rummeq@TuxsxfX}R z!sq3e1=my$s7Qdt;@Z)Ian-kREe?+*xFOQ1rLqAjf6|TkLvcL>&7JU z@L653H|He6XR$wkxdxJG-xc(yGwyfB{m!`GnM1kiJhY|GhMqmE_$^Bt8SAz7xxpCx z7WW%-i6(x_GGA!ow=DBpCVtB@SE%q?9n2GoZSLT=qS!8l-a zJ__H})8&VEO?+1`m)C9w+UcwCT^8D2r{lY}Cc}4~L0;)<*}IMRvb2R=$Ju#B;k_(; z(p{fF@drHT@LqoKUKJL+mmj=Wg$3^g)(hEb$9sYGLT-}dy`1+s^h?4oYW=)3yjM5) zN6qEM`-~^_kI;Q{&uzRHSTAI=-3Jk@7qZ!o_X6vMY_{XQzTSvKUI)8~_sS(kX(AuQr1NHi|FYO7 z%0HI>#yHDvt0?@JWz6NgHwE02Ltnx8cZ<)Io=6q{)eU;fcg_X;*F9$f|7D?*UC!-t zZErv4{#ZP1XY^70+Zq0=gZ5DUSc?A8XAJ+Zm+z&|O`HwpiA zd5mKCcb_)?s{>okq)CdlpL73$5Vl*=v074|Ns+z@dFC{1=d+IgVjaD2g$4h`|6BRr zDgMiOE-{|27wDpRHC6mq2j7?XoHzI{OMh=-UW_<0HhcMezpmQ1GyIo@9_s!S?VTRM ze_3o33#}9Ot>^po4%#ANeUr3BLfgTx54w#1vh-sneUtQ4=lk`f^iNXXg!E6+51&u? zuU?D4C3<8jzF-x$_*>+w_*>)~7$svpTE*YOCNMBZD*l$tb1h|lhkSEgcPX)={CfPY z3S0aw@>TpT^2I+v#orP?gQZ}+DlGB0$W!sR$TQb~SqJ`Eg)RP;_}q~`Am3QSHrFj# z-~MHVE&i7H-;s`zZ}?4^et7uat*@}e-x6Ou(nsW}_*;VSB5kP9<8KMRi*$!nRA8Y-$%YdBXe#5`dtI<;_zMM3r!7Lt37vxrljk2+NnKvg`T7bB+rWPf__3z z)6a|lH4ZJ+{$36(_4r#N>keP0EaNHTyBr-5{55)0#WQsDsna((eN&LVV4Z$mVwPHL zCb8KQ;=3IE>F7_1t?StKj&1MP@m*(Gf10=heXMC8W5(dS^zo&ATxsIFQXEsBR{Sk= zr!%g%#4&X2P)CnCb0pC?u;|dUZHJx>J$lyhTe>|Phu_lsG-DppK35on-{M}G4~)TY z>2rZG_$_@d(8O;gnFkcx+re*H@LS!9zh&uj?Q`V@ZcD}Any$y+65Ps@;I*bFiNA%7 z(LmDYx?}NL`kb~g*KOj7B>vWPOZ+Xt6p=Pex5eL*vg-U8htJaa(U_w)F*kxcqwF#G zEb1WTPcS#%{qn+_G5D-rEU(>;v(wYWXX&);H2zjE@#@;k?hGUjlwf zDw}rSl+)y>D2$m*q_0vCfJ3TVhw~wupV4WVcOB{FOdtGJT-!`yu&typ8kTDDKFi zL$~Ol^g>tgSEosD`Odk3zd9rFx1fof;Q4z} z^hZ9U_^Tu~vW0(QeR`eyeb~(sGunuUnG$_E;GdMAPryG3eW-Q*Nw`OZ;a}Y?{8bX$ z&7?_+ww`mpj=xGm&!jw)B7NogIG#BTTlt*8U$NFLzQa-bR*%2M{|@6k@r;Vw-Mj776Q{q%9KK4u*YjTKFryA2aEjq@Oz9uP3E{lKLj3f0BOqe8OM#TKugF z%Rb!n-KGz~(gneP;F=kKi)#j!NUbebSoQ_QAHcu|DJ&M(%=lYeGuLsKg2$==H)O=$ z!XMkf8-1B;-{YDYe~W7-E~~;4e+%r8_#S+9X=896`DXkr^3@t}g}%qA9iK(Mioa#Y zXI1F&x9s>V@>TpTJ3gyIj~8Rb6OgCkZyETk3O)XofzRTXMEb>&06Uj61Su%V^rsnr zi}9v2?$;XL@UIJgXlWONCieD`HtcE}skH#7&vN=Kr_XBZiTS9rp=Zx3e#@dejP*tP z+@PVa>~o1Ge#vE>=G4!D>^scj*qg~Coy;~VsEMVTUI>RbUpr-j^|1mf6KzJG59Xa z7}uENRu~@Yq~dQ`@mt^>TUbcA&y{S?H_qT^8DQ8h`5y@=90B z-fg^>r7i3_&dw_e?`7eW3li^Dp~v5<(D7asdi<>l9q&afB^7_mz4&?6|2iD}O{BC?Ykf9Nt1Lb-e*5eL z4{py+(;Du1fA01)P3!-b$J+P2^Dttrf_zjf-=pk|M|ai-=Uw~sRPqb!*NJnH+|Zmn{5nkPJ|b?){#lsAX+ z=DGrHdEcFN%y;*kWB$8mA6s&r7WqNI74a7F?pm#3G4Ec%ySJcUR%zaG&7RCgsmr8H zt@4|^Th3c@dGE2B?W6m8!k1@KSLpKN46QQz@VxDEJ?9rIo|==HvpMX|4p-s-lFhx< z>7MZ7wwmp)xpFpNnVA=UjqlCjd`xC`cn;?)w7ixBqgGrTa^<$X%kfcHUe*dNyQPKm zQu5^5qaJM>&ty&JnD_qN$HM-+@D}Pe$(0knf|TnGJa#Ybu@oA$dh#0IqU?bMf1Faj zV8@i&v z-f_^QHAd34h9wUNj!h#~U0=7Bw%%3>o$jHHchkmF?=k6Gr7K;#R?!1K$fVwqp9MXN z)3Y`gADOqkn7S^~GQ*j!%Y^KRydowcq#J|+T>kN zPNSAy)8x*5aox)F(x#_#=dXi)tuy)d3cb8ply@KH-4746P~N*J?_|n4=3(s^GO}$b zWnIkunpXSy!@*5!H+n*E2tSA6=h5(U=83Xp)Tu%EdJZ(4!?!)WQ{=0Px*UMVhI+MD z;i-erq&7X!G(Wej=}}j9_@x0gO)p;>XsS)qn&#)Yn(F(zn;seBX{y)UidPp?AK_Qo zFQJ^anQ^kuwZ`-2GvvPmFAE5-EUWJK|MfuRd)3kN2fr7H+%>H%(omsAp8SR@@^Ga)@}v7b zkw>PdMV=~5k9_~{Ga~Q78_TEkiAekUGvKX^=)BZY{|hG>z;r@PQ`?#L5=@8P|)BRX_rTc2y2 zq18{(pJUyDHa}?rIa7C8%cSl6N9=##@JyV|Hnao^IxZLkibr3z!+$YfXIj=V?@09$F zn)lkQl1*!`xMtJZMOSH&@YT`s)WREH%3Cb$JECr_;ty#9_(RHuEWx*%)+xTS$m9&( zN51ge4Z>d&sjtBwCm*;OUbqRKm;i5#heygIC+;naz%vcPJJK(SF7}d&56pY4c&>N( zLT_ex%=Caws zxf_t9)~V=q;lDBHfW7F{58$Oe&|xDo^*Vjk{JcQZ~kuR!(Wt z?*3ZSo*}NLmuai};FGM|oNWZ_MU!#q; z!Y76BiPU2bd~!K-m`wc_Q?6XFYnsIKqE8mPa>IsB znGAo3oK1#D-$>tr&QiQl=&HSOChz+kbf<4w#^{*)h;XeB@gl#RVnb^fy}?#i0*g3KrcTk3m;)LCLq`&sAJde=PP~#RVnL#GU)u zE1h^jM!ifQRq{U|-|($7{i*!K1;yvw@aHuB&&fYZzTwYl`k(uW3+lJT1r>|{c^{Fd z;(`k9+)rFk;#>VyVq6J^ivOoLH`gTmQJNaGGuJT5zYm%kG&1Mp{lp7{c5%3K^1oxD zwTV0TM`BIP=uM{&a{8cX+$E_M5BaZkB+QVF1!?oR9ll8Bc$=^f%E9CDbe~+I3_%PNF-F$0Ut(s?-wYZ{NPF2c$ zd1+?gP|25APjSsVG_^pVGuP+JW!^kGUtXr>%b)eD`ErHXHrsV+>^!-F7dPg?4Xn6| z`I)cB{1mM0lf=x-PZIM}FxKt!*s&OKeZJcGzKLO$n4kHUn4f}UmN=F9(U_mPHh#79 zV;nwQx3P@5XcO}*@jt1D8UIu2k{_*0f;sr^mz}nX!H2hx2i3dn^3$F2+U+8B5X3m5z2V62?>2>+-#I2$e;dG#T$KI{M=c7PB0%hBc4 zM_=!wulLc{`;b4CkG%Tm&wcdgKKk?a`^l%z_R(ki=(E-R;(Pgcmex?}3ZyQhdf&VZ zzliq!ne}YDjM{w@{lo+H>+wJZA1?7g{dzo5!G}vc(0n}}sNlmT9%z1&c%b6@>AcUO zUjlwfDx*Ebk4a=Sgp7ue(GW5kT3J_^tIKEz84V$$A!IbPTF*yDL&#_d84V$$?e~+9 zjE0cW5HhOn7ysPHGqi?V@rkL|`ib8hI_Tp{@ky6OIdY$o?KE^ZsAjNKkzbxfX$(DBRU*Ka- zz=s{2aO~Sv`gFigDYb_@pMalI9=pVhMJzldHp&rFjE+h)_OS9$ing9fBj^5vW3#T( zCn*o5NZ%wpl(KDY*vj>OH$Ldt$Bb*I-k{sXT4K9+N&oi#(6EbR{I=mWyB%DWhraqQ z{&m>q#yoJW{j0~jWZurikK=D7zQxXS#8o{9=%RQP{J66JWG?44+f&{n*o{v6(FFG0 z(>8l;3v)}xn#hvNqW0QW^qS0__BF<5IzJ||)0)7unD!ZXPb+`g?K2Y-YsZi4bIj-a zb=AJ$$EAI%#6IeT9_s!S+R<+JOdTAxdzR?!By-$Ke;DnQ$ljryEageqzh3CqJ7}MT z^hnY^arTAPju5-jSa*nNlSKA|wM~-rS7w`Bpx2YONs{^|X_L7Ax;q$iEy=Gp@Lfv= zq>k@8sN=hyV61K6x#rA6t{2Ac^JUrvOJmomO+N$jCB~?O=d#2TNzo_I-lsWuE(aU8 z-SJVrpnQ}C&(#E;%dl&Wqa*tczRT!q?S72LnB4B8WAw!i#@NAY&neRy{+96CVyip& zF5=lc`wCcnTNT#c(VvTg->V1*zv6fHXi)K&9emfHhq89Cwb&WYKcV}(JGRra!H}~* zOR!p>-}+m!ozB;>roCUckB52#C+pz5;`ehB+xtSscZo00FNbN{ey;nD{#^GFPlWrL zd9!7WQ^$Rk=(w*Zz>ci_IEwX}d>&)H*hetY{zE;Emty|I!FMf*tTB5@+Fnk9^yweXC9Bo zjPq~lNvxNWRrZDcx$dK6%Bl?mwq<`i2CV6aQ4ClU_H;)Wu&Tm#e<;J(H4z5P^l70_ z&K(rL_9|IfmUm4>S3olvmVNY1cCstlc(H=B;$+`c6!Gp!w(*`fWD^{G? zl1^}9xdY;DXTcO$ZRI9#VP^k$ns}nq#UVM*^(?MT^{?j$J}l0j)3HT{JvYUI8=IO0 zH)gfts;pR-3l#rlVs(<#Pql9^;ltwWBBQ-5d!yy*xG@9YXW+(S?4j6pN@x#R>Ulw4 z@1UI$)>qvhkK;>-6N|GmjCRuPjDy+jI$!B}t9@Z@n2h zC-7pufe$<5@xQ?0s`y_Ow)kJ2;<&ba87b;&A ze9M%7S%oeB7x~IhmwW>!r}l3L7pCIaR9NDFk*DH+k*DH+5$j9E|EkdAfAL?%|Kh(I zg9}YbV=c5(b8|vd(tYF`G*WXILR0aFgmy9Uzl5fw?^tN9e4>P=r0aFssrX+)PtpUD zXT<*!dXjF0b}Igt(2`W+(6T-LmqW|oKRA3DJl+KvkBR>U?HpNuH27;rZw3!JeUQ@! z`PT-ocKY++h6J{e*yeHgFh_qjICiJjp!~=k{psk>b3}jI`O!X>G;s}peOH|Lekh8{hu_%Mq- zYxpbL=lD!L?br1*&&Zsh8UM>N?`OvUvdmMN@xLr>qvC&cFyAM(_l1lPON0aK(fD7| zu1ll+tbHC_#s4B^mWuy1U621oTqG6$YkHFSU$OJp?HDV|e6=x;tzuLW`|C#%|7*G> z{ueQ-RQ#{$9mM~#@MBE;FUuI!n2T01o}?ZU|BHH<@xP=l66cA!B$$Kme%awoGyYdE zm)CB`*=eide_80-Y5cEV)utS%L=t3r?eMeHvX|Eofe{{=ou#s8YF$NwVsmx}*2JxTm8=Y0~lZPd!yqW zhYqJl2c;)c#ea2!pXED$Gx5JHbh692U9Ro&?A)IKFV+D)75{dI|LWxV1o4`Jsm6x3 z$HA!Tjrd<3*wPk$YWD+n?l;G7U8PS4{FD-X<@p5sl=9fc8n+`i3BGHYj@?NZdpH^1 zs{>olq*02tp>zL&&{ZAKGbsoxCqee2k;^*;;&yiT}m_4slw} zbBW`r^#WZKucnIc>frnGp7SRDm&G11u_;!3)CF{1wQpzmE=zsY{qeY=ID5!$_w;D| zFN^(RX`h7s@A-bcgZ4>S-z4o5XJ1(Dh#ramW$EusnkVVc&iCs{+ayVSle9@(f87&! zE_)cT-oSVDdi*cZH%lFVFp0fk_s0{ziLUzO+2;@pES!B#%veLW&oMZ7E(aUu;0(`g z%w~u2zby8fBm2bub8xZ24;+lKgE3}3(ZOp!7yQ_QeHUBZ!FM_P3Opa{+u9!g%fWX! z_%7x-&joz!*}x?@_^u_6?d0sw5_I;uxp4LkIvaY_!FR>)=OniGg^cft+n=&G<9}J^ z(9cz@ml^*nc7Cco{+EOA3jUJ4-5h*ZV2Xdf)?i=9u&+~`OZYDP-llQ;z4u1^FUz_( zwDB=UG5y@2jqf@Kk9{I}!AD~A3u z#w$tiVJXG`>cHMKc_T&p(YgQJ$AiV$(*`e{&G=s(;E5c3*tvn5Q+n_m!H31!b4HvZ z-Ja{Q_+J+L&BU@KvDeP`>#BWw2_F_`7a8rY`4SK-4d^mt!>OS~`gRJHjJ_uJvW ziuc8TwXP~OrC%Fsp`DtO6q=HMANdB2)EtD+l=b;*pj`|;OlV5}cPzA4@xFwne&T)U zv{UiEgr4L*AbCc-FQKQOcwf*?#rqOk^1sHRWqZ6YhnB&AaQHHKybChkAwCRRI^63(-)WQp$(kzye0mZ(Zt_CC$j)65H6=VHwGUzT}5Gya!l-pY*sWoa7~|Eq)fKe4?pczoDJ z+P0tX`QCQFyI=U{y8pT6u5HtX)wb1YwQb^;R(JEMN|{41%?uox`X#Nh)HUx=Nr819 zy(~J9?zi}!8|#xxW9QH9^W!QG*mOM(SiT+yY`Pu?EI&ycu-N(S_E=h$d2C~TTVa`| z>v6#HEpfo6>v6#HqjA8trfZc$3q5~M(3iF2!z^P|V{TeufTa8q|BLdQ@xP=F690=j zB$$)$ewk?(Gya!Feq!R4^-y{3cATB2D*l&+o}I@3lJYK%+Fw1*4_z&5x8r|VXm8h9 zcHU6&zbt%lCgXqE*S-ZGrer!jaHvVghLxm>H!;@ESWm06;!Uhrp9($xmtT+nRiR_U z{CfPa>3aMxaAMHKcf)i&{uk%?Z$i2@`anqEj9;9! zZ!UEO4s}~jt$qL%dJHhX9s{gG$Ay6rLw|*?o36(I;~Y5+A*az8V4RzB>bx(6oQ9Co z5ONyD)j9Nw=fKoQ9Co z&`LeOei`?WkDP{(Q+2=i!YXJt)Vyq`+bP_6QP&!~b{(jij6^1_0{90zc`mUJ6Q`LW;=rTkc=FCN%| z9}D9q<;UWW+Ff>E3%l*2a8H&omFZKh+x5mc&2BF{?{(b|^31ogN@pWX}Y9zay+U3(O=g$4H zda=9oQT&@2r_;Wth=p(M-&1{9yq|1bybHY#3(|)L>BEBbVL@!iDG~?6edVDbc zcZmP0aGpC;^i({YD*mg3c9HjVH$Ir^XJtRul)SeStO~!P6 z4Li=luS>j9el@Wt#`=kE+;*W~SMA#w{>x%Nsryr~mrT1z+RLzu^!76BqJ#O;PK?X- zc9OBTU9VKpPL?)E*#BPW*E?vVg!D+#MsfCsX=g|q8Fq%=Mq+0i%x>5F9kxr7{>*Hb z3-o%@c1cp-B<&K{pZ5gr%ZL-!TllZs{;A==a&`RIGF@jc)7QL3mm7Gnmbo!w9tRJ` zUYKuy0sGM6w`cfc*!}m+cwvtJSc82n+ORbpS~@sIiM98)gi{neSjpvDrF}lqz|Gp{ zZw>z%`}~-L{d91&4kp>bcPq@i|H8+2i;eE!znpyto{#l;ZI2IT!P^*faE{&S?CBu! z3+z4|#$F`7Wp|zp+=7GuB0iY2pNq4fi{IHB=da-BKfVC=89Ez!)WLtn@BbvW_l1oA zvg5qUZfRe$Jcw_q;Jl{lI4@(KT>OcmbLCOY*XHvW^Hq`&?<0_4e#*grG57xq3%1U| ze_7&#S=OTMbCu^3{>xb7F43`G2Zyw;r5e7a`S>MVPZ~nHQTI_;rmw>q@^7w_IoPj+ zYu)gGgVj2N*gmy}eSu-Wl&?do@~N+1FVp?$lFDZi9{~8(!7Mo#w_d~aC6&(};zu<< z9*@n8^I_>p%$FV8HRV7%wri@6?OM_Swkx-=z3=S^+hzKw=svPm+0GplZ@Y?*tLYQ$ z;Iz~nxiKeh_+HrUd;7Yx-G;Nzp*r-9?zP-obWr-@0>yucU+@$Q&a0#ooR`?%vQLB9 z-tFUV_AJNt&dp6t{_Xs3=WlyEI`_x!McP&RDE>Xm_%Cy;r{la1_HFNPjIo~h&zk(D zkMXSii-Y@8@xOZ%$7SKAl*W!7@KVap-xKc}ALn~-EF_$`xvQna0u@=q6@PryGZ+t%VYZpU>^?Eu%cmA!=Q zdmbn~W%z3ASOBY@+L{;J4>~w4;&h$I_%+3|=Lo(_?6t)f99LBm9GA?!B(u}D>bNcw ze^@so*NQ*1+gm0^alI zurCDHC3Zy8_DEzuSlc5>e`U(b1$sScdnBoElJMi_MQ+{grt*!shy6>nS zzpJJbyq2Fmc?!l>_hqe4`bGo0r1t#g+IcJIJ|6~KR@a3h8g5TrX14BPk{S~gM zI8^S3+lDM{4BkM@sSQJKtiF`}XeAbv>|y&=^5>C%YG`S7h64L0{PeSjG1u&!npn z-wXN~G*$7v`0voNotGV2Y7Guw>U;ioLB>0b@8!sTG(HXVbo8jxCpmo*bLvk2p3qhj zn>`NS<>*gGe==rPe&CKx@7VN=FV6-2Y3E0UGqj8?&G=rHai!WH#9{}j_+DMGGv~Op z{>I#?Ix>&6m3;)vwGg|X%To69M1MNteP_JyjQ5?n6VV0d+dS0S(6eV1|7B?-!%xpX zCuq*uGDrIl+;7Ywn(@CZ^Mq#nFUuUE8UM>NN2ua|bud3Dw)usO|B`j|U#1fOYeG9N zZ0nb!xG;%Np!VQXv4HgWU-*g8FB`FYqWfJZ=#LZ4t*ba-5<^So&55Tav9yLfxi)wM zF|^dYIB}CCmX;CU%9uM=v9+*~E%V{+v9&C=iQxyTuuJ*G10yEbdTTteeByx-!)k*b z4{T}zY?#;|YQ5INuSR+7{A!d>VSuQYQ71D#81=eTuT!to0p2y^gY|NG?RKD@#wtFT zg}&Em*mPpY5p(Tq^GjFD-tBl{mbS3-jGb3hyf6!&T+s2t{KN~ZSlAx#%1^wjiinSO zHy?EIU6)V1FmOfag=qYzeBy->GYp-OI9{0ZK8Jou_(iRgcZM132LGrzy?9JY%6^x6 z9HMhGUKqZN$Z5Mjqn~(J#JoaIqw&J>i5CW*3OS9&f66Cb7%{_;)5P(@ocB5OOTsTn z<<#o`(hdGeBBycwGCheMGi`3&=jrDkL0;v0dzI=5|}g?z@q6zK-#nb&T%3*AhPrdK=?hhrUt&tj_45 zbVjQ9v2O6QeCJ$>A9jTWSJvb4!z^^N)88)F_VKZEKk;T*%QNO5I-`%`-_G!39khq) z)8c({gN4Kia}g)3h&W-yo}HriX=g8fSO@mCg{Rv6j-C6>@myEw(*aMVMBgMlmGXGT zjO)=G@xwZ>|4cfiXfHbVgIDW-o=N#AMfxV;qm=FKb3T3;>-2pqEch$_-^zdSEAJ4O z<~(QikH-2>w4dyGdMX}H6<^jtyU2UaoA_ZCd%?uaSn*{S&~??mo#D$Y^;P$$U@!H0 z{4k4sV`-y={qy;Ly@NJNSl=XV6lZ^!c1BOe53}@#Cf$?tcjx={r0tTVzDe38u0QV% z-pq&_W*mD9f3{>`YWTBU9e-x-&6Fs9*gVHS&F;fujDPLxtww*ZVq#hR1`U4?yZ@jW zU#ts0g!VDFgGY0)eGX1i$DdvxILVm!Vix<(k#$Gb9a+DyWZmh{qiYI1VH=4J@8Hjz zeGs0H^`UK#FXrIS9Q+ycpbq}b^JfQtwm4|4kvZf0=w3b!w*TDF4Gv!}3ii}KNOsw_ z&)M|`&ey@8>G8)b`%{T+ej($};^L3p)Q&T&!q-*ek8M5O_+!igSux<}1Lmv=JXC_Q zZF~GN%N{BYzN{U?=itjM@yIN5pLXBNa|vH&?$vrG;*nX_)}e`yv5e_U=U~g$`oP#a zSh_`lguVw|>+xp`Sf_~XPi$=Ph!gK*s(49+OcC>ACF?kayy7ewuHSLjD6BiHy+s%9ZP16!wr8A z!@t9B*E%>a2j_Jz;{KF=xIpn^63=3b1t)gK;*WJOCb8=ayPlvwi&=Yc?vKUG+UcYC z_blVb%&}c>#2@QmEN9`3lx%tD{slgCaMFEOI^d<0=&Nj@c;EUszir(XIy3RdIv8u1 z{E(t8=-hwqr|Ix%)HVSSUdO`Ltvqw&Wq_Jc|9B=*Ajem!Y>B&lzb z_K2JN>JA?4l*hg8#J%pw@jE?{W1+Ojhqt9i4hJ(LCm-k&Ir&>x!x2xP#t%HM#={=1 zu_-&y<}EBNC}S_r(HvbI^R?RlT;Eo+eZj}&%a&&Z9vjM-c*yToJhiapf$jT-yCUED zVD9!G9<6!oRrY7{x(04;9;HQ&dj>S7v1es|AFbiO46Rk-*zNRO+_>T*t#z}Tch}@E zUzz0%@8Wy^(>nHXxz_9pw5`b2T8F!|%H}?Swm`Nw-0T_HC^&^(r%qKid${fy*s@X6 zDxq70R;x8|+$QB&$Mr8~4-PLsaq1VlJcAm4$2)mu(1MTIXO-)jl=t%1d5=v#GH-j1 zD{HemEp6IxQd8^P?NZJop1ejWXGtHe@;P^IctsZFH##PKDL1Iho# z*FQP!7g;N&_@PA+dh&fwPGhMnt3`t@{tT_5)#Gd2oxbUYhk2&nyK-7R&;8`})zenL zK4RMM_`mwD`e`HFwKsC_Ho12wUxh&-me+zLAsT(;5!5uh#C*Xl*}!DF4`7pVHbUAFJ8^U!>p9)|Pr*X$_et z#&6%(#}(OmNm^w4`*XLK=4h=gmuPK!`)F-buhuGyPn2)puW8}^{j|3Ip6t#0hiYv( zF8I&?*@9f?yTJAE`j$%qZFkT0F5mx|K-;9u%kn zKdDtNI$6JdBK+WWX^SM@`^3TJ-`h7xYnz>~RUYjZXd7Cnwa#Ilx!IF7**iC|Ozzu1 zMr-qKx_t9KzEjM18XuZ|>~nl4;u(DGpYpuneeh=vWoaH7Xxl%Ivdzs}9`StXSaGH| zdXFz_^MM_=MD~3mT9&Hgy#G^yHn(fg=1`i}y6+QO+Z@_;LFV|jJb389 z&GyUZYRjMHU9&Q?!o_o4s*V%*wrjU`uXgF$Y43Q3G-|HWH-w(zN10O>Xj^b{!un>) zoXLBd-I<#Y{J2kK-#D#pH1%#ierj9}vbyj8-4ZF`8xdso^0|4-cm4F{$ZWnbhPJ8Z z|4P2;^XBnQ@A7GUPw4v|$M>^yH)qbBvAwU*W^T>0wVwQA&9qDNEm~W%$aayo%}qHy z-&za@vP7{`v5{$JTnL9~+dO!~2D9I|EBpucP|iFu@3D`dQ99q@hElzkAFJJ7}-X@W)VO z{&r;kV#+y`cW2l1S^iXZcDTiz9S(Ec;kmd)t_^!=^6|H6+X?8AuaR2N3p-sGhcgz4 zKDZ2hFsgcn=i=~1{P!dG6>mI0?Q!yc!nH@lS<-lOoW$j*V%KwB%kE%I}Mb}V&g zwKVQIRoU=S&Gr=@ZGS-N-XWU@A}_yp4^gsmIXn|VRvM9$>*1}ZkQFQ6hjpGm!uy2Z z7uF0~KHP`Q&eQokH_GQbg~t`IU);E7RG>}xy;bN)xvYGDknb+&qivgb(Xnw^(EdQF z))p2XpPRk>0J>oz?>mUBT`@0rc}aFoIEVZza&yAP^SsNO;O%#_vp4(Tp;wW+t6Y6I z3x5~8yy0TbBk*}EGPKXrck@1YeILAD46oPKWJYc4U z4mwzPzsZ#rUYwa1UJLI}g7<#|?;{(HTMwVAtkHQtgR(zk@_yFlwaDT=_zWp)%)mmggWPa_dqr)09FfltbJg~3U@aLl<0~ae9cv+W$9a?tF zN@U1;sd+2k=hkLkAeGL9@#g16%%Czx+W~cmL%s61I-WTqp?RR(vw5){x z-OyCtKbAgvrK@k89Ng~d8@@@Gg9%CGU;=VLAJH&&n6yWxl7oK}Iq>8*PT(m0%>vg= z>oe1}zbl5m`>{caU0I7t=|8;mAN%3M+1Mk|KJ)MB|0?JQgY<{f=ohEbXHJRi8$P*; z{!;aum&|qD+Klbub!CN{>F1RGq<=H|v}XDl@7zn4?}KNh57|e5=B>Ge{>&SeZ{2mO zY*|eot@R`Lrs@QJ3FWErc$Ew;Kt7edC%#q?v(BU>WlyF8cZaz4P6^XJk3Qs)D@Z2zAAOWOMn z^KzEAKp*L|4j|K$YjT!nV{2_emM3NAgs0H|&*A8%y{@F}TU{3`d43I9*~0lczIhyZ zKH#}{^8w`fUF7*S-<+)AG3AcvwG<(tAo zA>{cjX%}o+kMO-N$LZ;rN{$a8#|w%wmmBZSaQVaDK<4rT$np32<~aK6iO8{c?!0By z{NIQi|C`$zev`he?ZbJGiOnp1*(~Jv%k*V6+9j$ly9+t?s=n;57wYHSl~j()RbMvx zg@@*heqo|6$ET1h>B|ZzZz1o_tMM%V4_99JU$P3q&uMw#N3-(iGYeYe`ir&GV*0ap zJ!y^ix(dR3@SiaHv!JISe2D+a`m^`hC-1NHXRqn~*~gy2ji=m$TV^27uc0^Z<=bMT zTKlspp1knGgQNXfy-V95*X{jTeY&>cuk>fMBkfRN%JGMxnlj92974dG_Xr^qv2oy?2j~vby&FpUDI=nVDRW zJ5m!at%M{Bl>#cr1W+*viZ$nZJUzY+AhZWaP%N~i6(uNbg-L)^?6KhM0E#UW5``AO znp60i08-6`OSM*8e+@T9lW?n45(|gl`?KeHl3@rpFTI@8KjyXeerBG1U2E<2S!?e- zvUQKshk>k%IpjA`zhtc(xzCJG2;K(0kKf2H)cV_FayX!@8B1E z+4f)z_8{L!%TND~))v_$PxaH-dt2?nGmplA^W5OPSk|)+tZN-v-{M&3Jm9=|a9&Kw zfmN>1pTT+`p$}T{5qM6GdHj8!)$#{$$sgRYCGQ~{zX4n1@e#(`=eAnvuC(%ogYUHN zX$|to21I|qiTx0Q{uzw^XVUlwj@R|F@;=PZ3Y|ipPO?sn>SrCF!MgIQJJ1r(8u#&s zS=8UXbRcVVHawW^>bZC#>+=BC=N#sWJVP)G1#@5--XpFf!74L-$q8@~f%xr#a( zS;I|v;4lmMchMfo*gAijoossBCXBNaQ;>7UaMVUSVSaW(LhkNNOZ#uBYR zV3#^?LU1S8@OEEf@D6a-1lHlfeBx+VKI6VKJ}EdDY@f_s((}RktHFt@T-F1b*w)w< zzn;uHTN8Z2Kx`m$vd|4&13kez6Rbs=uW18ol23l*LTc)=tsYDbqpSi!WQ3fcMR_ExP!0QcGY(5D*Yy! zpSELHNyZj>WLJ3`g{$90?=E_{Gjk9hd=MLaUQF-cbo?*#@lSjRt~q=ZJ2<9i!zB9b z%t<`+;mpNecjwXz&ByIz^ZJ7Y=(5`x`)lp- z*V-+6;i#Dht7Md`b7Ljr4-c8~Po}KFjQ=F`uEqF2;Oi7jMW5(BjlX;{DPSUvzw6JU9Y7yqH@v=(@&7t_VQtj-Pj$xz7kXU5 z@7v?Qknz`VqQ`$B<1ag5G~++t>fAd18XFJxtZe6fk{{cyO>?3*%SnSy`*t20jP%mUlGVG>~O& z(d{zxJ1PYm7W;QRw$eW{M;{{Fr;x4Y`?ErliNz1cuY3?3{}h;THn>81V5ZN?6R(^j ztH|j-BCOyH!>rdBcOw(E5kp&&aBDXCr)B!@5>Ly#-1BU?%hJ@JL76 zQ^Z>5@W(_~e4_<^5?|bj&MgFch%ZXO8{!M;(Kf#5)tHKoxm3QmS8{3dh3aYJi}+T) z_k?REFS=0E?O3FkX==3$(tQ`dBOp~1|P zH|V7exyWxOAI(o1Sb(!WB}MVbqN_(^D)W=bdr8dCAXie*>KMpe>P`wC0~2F&nfciX zt#^gzXU{`jSEhdS@aqSrPud(iW72u$R`auedg^BPjMUAlNAt56{Irk$#2~k$k=uUw zr=~{9?IZ4(;Aqd;cvQcMmRsS`KKKVL=}nibb$&jLzjYmY@gM%yD1U3hC-=8%JpbWu z{fzvr#2p-e>!ZW_{RRE4gN?s+lYFiCTz`PC^%MD9X@7hER{2^b+w#Z$&G=j4`A_3- zjpX}(_**|ie`}NsJi;3D`TJYX&;Osu-%20a^S4IJ!9V=1VSlT$CVwveR^nK z{)_utqvUura=dlMU&`OwzC2&Hzm-0;=WmUc;N*@bd1zJAW(kcB%eWr+N* z__F=2lyN?PYcxN7X8u;@<1+lMhTr}s{jKed|K<8yDdT+p*68v7U*T_cWcAYgt*yNA zH|1}Q;tTonF5BNq8Rzr2o`)~~dj3|)^QHS+&7A#J{jKfH&t>~tDdT+p*68^;ufNsp zChpmRxMxS=9&yA!Jj6lbiHCTJd-~WH9aHilaa_ec|77BxaW#AUu}?s;&m>pqO=6wS zdNqzTd!Ad~u=lv`Bkob&JG{Sj82;<;d_4abilJy9sp6p5yV>`1b$Gw&3&cQQQ#_RT z$b-bqWSbIqI-WrM^!2@L zWtOjd@J{-f%18U)6yGl=-l@1{a!i-tU}B!DT)xIaVs7lKdLV)LpNq1dW`CaIosT@! zZKd8hKzvgBU+|BYOi3k9M_jQI|NGP=7csGz;G@Zb%*c4B_Hf4HhjaFEKH|4BRgaf= z%t7Lu@4DTk?-8&5;oSK@`dNJ3IkC=XD<^K&9?G?qDVq;Q`Rbpw0$a>?#NV{9?peai zigjvlU63~YmRP4VP9L*J%ZW`lGH2N?Uu%3icC13e8Bsw-0^0=motxN$4Gk<2fM^MWbPEN&S&lv1M{(;aXm3G|NP!7bBWI? z26i8FcL(z}h>wX?69-egbEm6I@F=ks#i%ARZ=HC5Qi9jS!2-;eVqp7-XKKC_*Y2bF zis{{GG4}T+bs`pG1;<=qzT^WN&3vhR&DVb7q6Zj*gPcRSnfdy9e1~&lqu;mZYa#O$ z9TWY&Jzt8KE@ZBVjT)Vt660=M5HVk^u~GI1wZ*{Ni;d=>3$oD(x1k%xp(Ac3HhK%O z(VJ(Ual6f4KgC7|FmBV3@AtKb2pRv7y%1(^5VGlcsMpFDh?l-fyfiG|p5VW@dj|JW zcVyhOnE7$yrjasVj=YbX-*e@Q>_Hj;|GdxM`MV_l5pmQoKY5AM#S=^Qw#HE%-Z4Dt z?0INi*B$;*9925ttT^fhVGQlV)4N*Ry~I)*uNxDRuE|%N9o`vFzpvqQlzkbJd309E z(fH0m#oUVEopN|*6tOqOQB!Q0S3LDNG4@nqb%Ws{y`$Kgct|{F!9$9(OXe3OB?tQu zPwRI99x`h?JfwQWL;Kj%xgU8y0AKcphYrRQOOJ}v-E8yFSa?X^MaJiDws~kQJY-_) z@KGNVU+;C6%xg?c92L2Dcu0HDBKM8@ko)f94}0D;k1sKgnm_4*_YHq^564%Nx(DB9 zpTrqr-zSN!W*%X$80DRb3Ge%ioXfWOYIi%n+WuVH@l`XITJJwd9}`MrKiPEnWHIsF z9Of>A&tc-4lKXPzPC7W7xyv^()?ST!k$c5hpHJ`v_p;9_i8)hjRcBr_XPWC|_SM$A zx*NGKNlHZ5Ck6Xmaeh7BSN2$AC*>U!O>Fm!wbbsIP&HvHU@iD=%QFHc1d(QeZ zXZkL3&c0~RS%2oN5A(&EvnRaXSgmq@W}D8k_*v@{kmE$;ISIK=M!q@6^57Mr)9l?n z!210m`#yCBD&2~!8Niwz|4_%3FM8sGo`+&q7J1nBFvHp)-;ZYj^NwyE$ey}v_EoTt zr|g-12b*9&a=ZG`?#=)2(H_l<`>{SJTgQEUt-MT~e{o+` z)4;yH26xvQoo#XUOR40Xix1!$t%m)!*dv#iT!cJomg|!=d0=TaNzM-->_EhO|okCu{XomAMC-G{n3~6 zQOPk@a8!(STzh&m*-JN(^-yO}V&Ln3-%Gjc(bxI~Ct-v1V_%Qf9`W^P*87F%qKVo^ zk1l#QF(&vdd!nAXfG&C(zJ6M|h%xh_i?pZlJ?zFq@b$sv^UvDv`ijlZ+u`STmqmPs zUHgj7&)eZ=?XNmSJ9b#T=|cyzXYXz74vnGa%S~H9WRLcyNB_C`HhZ7XC|8$;RP@Kc zJe2i#h%ueZp5qVLhuV;%{pOrAVt>R3@PN)7q%rq8?;w4YhF+>hN4*LEoWv$g!!D8D zX<OYnXqvb)YM` z#?RiZMP{9$4YX+>={nl9m~%_!3=?!c`_P-*{+h$So;ADRq2s?C7rN$#`Pedhk|!#_G34S2V}dB z)_P?1G(2!1(F#ssU)Gch@PO8%d*OjI$fH4H<8oBV5N!hg3d6ApXH}K_{_MF zhjV!c&~4Ib+V``c^(oBDJ&j&#kC(MiRXX`Yc=_1#w;Efik-Col;+By1C2Id+lh0d| ziOxN89d;FCp6u0H>kl3`JbI^8ryp>JSsa!JlbY=v!4{c z=z9^n`(F0e3oA`wFa2or%NX_|<+Gn&{JNg^r4wa4Ne@oF0Kb0y0{m)pC%h{BcE%NB z_*MHtH9pQXdYgj z-wVF*z!$yF-p_dGOx`+Vf^#y4$LHJYV5D5g9{WJL6@xJ=Z207GKk-_xmG4t`)77_PpY<|*_`uhzW*~gh$Qc5)uROk6P<>khpX6ho ztwNVc9>>Ec;$P`9*=bARljmQ%*4S!G;gfUiv|ff!248}m7Gv9Kt886n_z+u7c3Nx{ zZ$#Q>9in(6(mpf%3UA2HU*+o5s>||`ec3hAI|u&zOn30rXW^SMw(sAxZ>CdpY{aqU z+x5+u{un%G>zcn=-%ON`Z!5fXS-u(Gf7Z5<)?ypg4RNNdqye|6ssbJ!M7 zxu|cZZ4OyWrQ`o5d^1t=^$7EIS-u(GkF;mjM$K2GJ@XIWOq74t(E*p{n_2cZ>zj$< zA=$!xFUL2-`!`4VW+G+&=D!EujC6sC#axna#?0kk)i)D0XMOE8|B`((ydSyt%Qq7_ zXOV0FU%)qG?#TFG>zlFl$S3g4w2s^V7~f13Uq20BKYb~_8Qyy($~W_M&lj&6ON>de zpjZB0d^5I9@(Fx1t-SDe@0*F@0j)>(UW#vq_abc>`DP+{AkvokEBj_z=jwmFZ^qVb zm+6~n9iP8h-%JFL$~U9)F_-O|iQ-rJX3od2|4V!`t@G}IZ~jO4W+M1RJQ8EuX&3d) z@Lr^SCf`gXZ$#Q>m+YJ2zJT-H70^JuV;b?BQK{DP13$Dv>qlx2%$V1G_&*zokDtI7 zp5ylhbBP-kaj!%Zak25l8q1U>UUqweRZ^=ozUe#g(H$i{p{rGLXHR_bUm(`VJ{cFW zwSRA#_j+ef;78YxFE}#q^(Oq=-{#(lJyWb@sXx8tG;~9sj+G;0u*Ls*o@b@vT21(; zcX<3k`O=9yA3r?Y%G0^^Y>$;!lGq{G#J-z;#F?6ir+h_}nAlCC_zg!uZzQgHgEu*N zJ@=&y_jV8JF1f|=7W+)RrL#|;*|f^)(3nkZPkyojt5dMp6{x7Qtcs!zfr{E#tD-E% ze&(^C`K^jt&J&y8CEL%sK%Hkj>}S2LiUQ|Zs{O3LRgvpF8(=@nuqrZ~XM^l#L#ztF z^K7{NY=l*@!*ZGXkm_SPHLk&j_)Gk=8yTl5@tuMji8l;pT)(9>v2UdxR@xs<&r_Pa z3H1CyrMraF-&49rIQ?Cvdxz6=No%ZXxc}`qer1i9_D4*yVuCNk#03Aqx!I@qJjmy3 ze6HuygU=4jV`6adgRbvmE)|_ zMV-k%Nx9vjPf?!otD(xTr99==L6t9~Jmoh)mES~p%D)6velz8s@`vgtSuK?X*6D&P zdA66bQlJMZL;3fi$~RGl^2eacpP&roo1w~|vMRQEJmrgKltPUuh;rSJhjK_P%^)l zb$kqW?2Wp@YBA+p0)OqXI+o05-^T*(yXm{wD(}lZvF-8N4lAx?B(WB)1vf%;t(CWJ{Qe3bvA41fi&rd3^atyR!>6;B=d(`ND$QE1 zbPee=)^go1`WkVo6!yF16Vpp%zl+MegFP|3dGEO?*0IWGZ#lh_b-SuV<;Wwf4Y{lh z8(BN|SXRYG*3Lb#Rz->O9;otutKtFWlcCCYu_^{D-vg?AZ{CZCra}{-{drIM0Z`>L zcu)C3Q00g4p7O(?%8%f^PS60fD|9UHDL)RXd@k=PKLM(I9`7lCH&pq1c&{gP67&nu zsl2EBG^p|gyr=vusPc1oPx-k}<>y%yom|`<#9bBT+&falC!Y`ZRI(<<=w7uF-TRxO zbrJrmcXenCnzc2lw17Pa(hHY6UR9pLkrx}2`gyUT$^q8t=)TsU>KLjYgxrj zU!$KikDBMC*uWnH@UPl9kiCr0F;6?0*XNk0oy_YJ<(bzd$}_L`DbKv#r#$m|jq=Rv zHOe!uI||uXOuV{?kK)$sUn$|vmg8B(uAjvw>67dk8qKFg@M%!|2)~IR7l|J|?2l&b z{HL;BAJ4i=T(zVOUMz$cYvIKO@M05v=mjs{PaAg8=KE>GF4}yp^0fI{I|AeMK3@ex(dk2mLIdOxYkAs*`?ZC{I7n za7XtP>|tm8#8U-~h3t-e#-e5j?IK?@9IE^XtD*+ob0&fN&d|T}A412&U+KtF&E2#e zS$gdr&AF>Y?aex##NCiqm-V!9lHEqr&m!w3(@*;SEcPmU$A_kHZ(AdLw+Opl*dvYk z4<>qpW!SjdZy_E@+ZbHo!`3ac=iN`)rcaEc=@aGUGIweGen)Ef&L8gM+B2Uy(>P9) z+%Z>8?AL6v=S=An%ujf4PWyK&Irl1w57e&(A14jHi+A25wywOZA!*ny^8D_Sah7k) z**NRaC&XF8R}R(^&JwbU>Xan*2e)%8)hYL1Po zW_;q2O6ufNY0bXv!TJW=Z20wq^xJp>c!BxUomxHSu4Dz>#tNdUgqi@a{-;3ia z2P<9(SKZ4xDqK}yp^K31f>|GB3}@ zK@*|5Rz8~!R1 zmU@eFg{9u1JmvR7l|M*%%D)d)zKQabKL%C)1m!8;3|0P=RZ(eq$_s?EQe71V!dd;n zW6DS1GQaZRGK0gwWf{tY%Q9?SR%wNCS#CPdr}6%^&;lEifzb>m1EU$t21YZO4UAT4 z`O7Q8Xa=8cu<_X@zQ2z5!D|`Apy0JiE3v#1tR`%haU;)~!x%0ayJd{xyHl=;tI+F3 zZ~k?8QGB=xTy-EBy9~bkOY@{~o$60zU%4<>t@!D4g`|xw&&z*iU6%E)NR_YR~OHY?ahUtnyTF;Kle^m@v`Cgnkw8LA;MGDhz1w&v2;n z$jC-vx&Ty|ZtS@j(BPm6yl-Q`2W$*zFyTGCcM%L|FyAa2^UVokzRA2(HP@<`3=XWC z$9EAJQ20+cFd749!gIn`kr;3zw)utd^M%H)KG|x~_?2k<20%3)8MHMx*P8oCoans-oM>=ivW*LwdxIO9dxINkdn8U2Mif?z#EJXc!HG@qZhJVf z#>R;=ZJhW?aiMVFAjWVv?Ht6I?xvj=!iAaOx@cT@Kl6G%EL!ieT7*Z15#M8M)y~5B z%fpBPFi=7SM%)iZ9Ko~k@R%^-aMI7w24VPe`Kum51I@_d)$<07#--pj> z7I8T9jM&@^;&GFS$C+3iv9}|v4|n)|!5A>(6!2mvaAO>pv59n(kGLPY{huiN+sIJ& zV?X<<@MGx|Yj@V4wvE(zy3yeI;dY#^fpunw$J-jGt21#rU!!7erTnIavmvF7x!Urv z(qMk2|Ex4IDy2^;&A2H2-%1msQu;%siBTziKxtxBN;i-emYM-y=}!8q`42J0n~MAK ze_X_yGTh1M#G7)RXCC;M`Bt2!z?ENn~cS20_Yof$7|5cQj z<~^dsME?zX7g%ykeAi%hd=E1Yjo_tY%&ErWC-C-fk@w0#;fTt>0zQw7sPq?(G+2Z6 z(O`{tJZY7c3rAE|exJ_|M%2&ax$L?^sBpqU+O7O!Q03q0lvWv7L|KbRRAx|??6FGQ z_TSELlz$Z}EKtihD8C!3{5!pQ03S1yDvl6L5D)iToqLXjO~r2E2%>mWIJUm|0-1ZT7IkiZm9Ba zP`2`KL6v`pvX$QpRsJBqy8-$>^d@K%bU5^wi#sh`B?Th_6<>os$mc0Of8bMlf|wU^ ztJyKGk}2>h=Vdw17>@1Y$39-p|8Ypx4ztGSPKXS*s~uUhpPeOZ_Or8O&3<;4tl7^} zBW2Bgc9yK!&(4xH`&pE%^}!!F%8u>QA7Z#J?l5-|({&NsbrIur5$kmk^L3fnu9yGP zy58N5XWjU&8^7tsZ@ckMH{R_Q%yxAPDh8ka7WYb6=gQg{{MFB}E)VntDyjok#lR$J zJ2I!=tzmtUjkv+KmP;VvmsJCMC<%T+ND*{hCqRb(sgfhzBJRm@O6 z8LE62SH)uGk=4b@+p=1XtWHE$Cn7@=dty)Y04MREjRTRn>HuXUbJb%hQ~7aF<#Q=h z`3X?v^C(k!WOlLg$ZWOplc35^4a;mbGFywxN?uKUov5$3b-W7OU?8$rT}XY%UiCui zQT{Qg@su&&Tz_#`B4C6mDyS&VKt#(;amOT8`@fMtH9iYxZ;Osj0Nr>nhgkBdiU%tPOpz zd8**qKCD?)u~tQv^2lkH@~nvqlt)GvD9?JhMtS6Ojq<&D51X$l6^e~l)t~p2M=rCJ zXANASJTkdJdDg!*$|H|!lpn!+*nCw1C^lc!Sl&~998~#S-cx=8RQWvKQ~qwK^7rtb zFxMogFxOPxQ+^s$`2yZkeil^uIlO0Mv^6$Ha}lfSgI&Y_Du&xcM%ox&9LRTG^#= zEEO9_crp_nH#mznn)n-YdpmQdc-`&Ho#J&%lxJ?2D9_v)Y{1+;r#yWT4io;$mi@~| zu{#HcT@CM)N>|zOsApn5!@kKgKfG4k&bSCS=>O(2ng6;}@CEZUQT@J8Ot;5tJLqrWe&tEmgs9NHS*+LS%5s{ zf)N!zlua+Yak1>ih*;ubWTu>0pB+!!P-x4`0_3HM@l`z0#P#T(i61hicQA&EC*HxB z3BQ^6A!BOdhm7eR^jGo3JLqp@JW;X4>~`Xb<*XCJjkR_>alMI66^@K97uh@=jwSj^ zC&h$u;!NIkVqi{8?Kax?8tuD{_BpXc-4`-La|b4jCEYrnu?t2h28Z9u#NcX}uiL-{ zvfFj{KsI+~hZ->^l5oeqEv-?#{Nir&|lo^^J-Er)i=uciOC%|*8cl?FSFN4Kt5 z8q9P%7|Pkpm;-jI<9Qf2d4tZrMqwx6rt82@20MYR{seXsZW3nt4)16$W24#2=xf7B zBf>E^VI=M2YgU|yan&BZ7NzZdd><=q@8kQk()3@yIi<9{kMF;gw)gR!P}<(dcYw4o zlJ-k#51)gP%D)(n&H318cn-dHp20sRHVuF0IM3j5^9&x(cb>uL<{7#p=sZK#&9nZ* zi=Af!>}MJLj(wG;-a+=WAyD>TnrFl9XXs8RHuti<=TR7G4;ZPDeV@ZQr?3ARzFriJ zbb}}u=|<=X?i#s?eV;?TorC@C_@s|JB%HmB>zNOqo0v5+?TdrLvz3Klr9!Y$;Ugo^ z=`B7V&(TM|B|Hq0*O`1Fzv~WN$oI-W235X@u~U8-RQct4 z4-8efctrg@)PX%#Q^~v7V>R1(NBLKw%GdIa^1GqRzrnaD{}xpFcPPt>EB9e<5tFa* zVQC@$39C}9{VgudF-ItGcj(uRq${a&6m&avDgP=|`C95yem7M4H>gYbx1h?uLs`o2g(`oLx|Dw(s(cgA zZ-5?y-UL0tdtj}aW>-ZIaFy^@B-Y9Yd)1tBRS0t}x3QLkv$Th{cz+wt(jHnLG1qhD z)qdu&<;D-cIKNA_pLKyc&(4xp``uLgcl|Bo)qXa>ewHD5wVw^LpACVs@3B33jod@4 zy^+f=v|ku`u0Ei>w%x#8=VGnpHr5i(@)3i(n9K@G9bi4YRN2+<9qhHj*y^s5C!wE4 zriG)nfyF)x*^b0y-!$!Wm&{W8B(v6MFZ04>Q@~|D$!g&9m3?8dEMjFo$!y_YRtE^9 zU1i3zLrHH#JC^i<8jOZ7)4^!{uJjD+W5>+OiIoYXDRvgdXg-6{*4uW3_D^02qpf!^ z8W_;QXo{tbXDu?Z9gBDk_LKHbnz$J9Y~o_ba|!mG_D+`Qdt`Z|^2qT<=V#$ zFJ&|)E4DFNfxUlHm@Iny>gTya^|4mV#m1$6o;w`>y2QTMXAozj|0WJN397bFWt`}% z_KH5Bd_j0TwxhE&zrtti`#Uc_yN|gPKD&>(yc~R%-wr-YhnFJpS-%dRVGr;ulRAiD zgyTE#moq-XXU|0AGx%qw@EZIxQ+N&j(KrdGEg<%r$=ug5UM7A6ubB7^ykg=v@X8|L zHh5(byrJ=Wjq!@aYvOkYuZ=Ny&D*%%$d+$qZUkNvZjgPh_@3|@_PW{oc^LU|@R}3X zi^gXr=Ej(s{h$7G@!9=0KD)mie5O60YF`nUz}fFv+zvjAtW&(Ui*a>gcn&rj-wrmL z&RhtaO=rG^&4kOaL-WkONajcwPUXEudBSSKX3UYnX88u2d4uZ>HuD8@3^ohjQ|0WB zbmChox4l@F+N2oPL~xkyAQErPB>tt?A6T+NF|j|wyO$K(a_|`YJaxD3nQhwt*nvCE zI&!yJ9Cw^~xa*AnqkYgDipFF4;GfoWX58bL%Y5s;Kb)pAj=$bX80MV=A`i>wp{i>!eDgc!{NJ4RD& zT+^=~gJPP-%v%KR@`0EJhUR6*gDC9a7t8Q)D7w2Qun zvJNb%g0c=6-%cu2x}-l8KHoV23ZL)HfFheaSqGLNn>&X<(OWxN2bQ3>cCro#Zx!L! zE(fcx;SSiBnN!72r-1#-z@{4SIZDHmNccca;X4 zDm|IBuxYS0ZsnXY3){l7i4~7fTtYUpZ0A0-&#{jchp4m9m}w1Y0&gb}k4V5yzK?x_ zyDaXTgrDzY@8FRRP!BX7>W3yklcAlUU7%f|J)k|Iy`f)#rb7Eb`$NA79RN*(Wf@DoSy6Ho9HSMX!=`h$He z_A1lX8pgW4I6`quSlrf~uZnP}Q>>s(PM;s-CBys%JG+^{j=aL)SsC zg_ec={+aMxT_w-px4P|6_^s|$DEwAe3x(h6c0=K}x;LQkTishw_^s|8DEwBp7Ye`C z9fZPfb?-yrx4I@M{8o2Ndlk6@9G;8jr@9m2m|Y}aedRnnHJ`nFicJ;4TeSgVZ17fX zJG`aeIrwO^v6EdT{{^jOJQXLdWjqxpu4OzGC$42YqvOQ$*<+{JRFU3+_loomyjP@m z;JqTf1Md|v2Z|RLF$dA{VwK^~T<)oeNdj6QC+P52~`^O%wlyH*@=up9H-c zI`#bVWS#q}VqMQreTBq)Ro_CW8B?h0D}t)NWl+_(9IE=@(F}N0apervw;HPY)!8;{%i3a7*=8O)l#Da9W63R0t-A-oJ!`dBkTYe6u(8&HeZFL4AK7K`-03SjP2Xp7 z&+tZk>Wf&jwNG%p(yY}=he#_X<-@06WUtw}12};_gDUq3Yw+{DmrTr{oHcm0y#}vg z4L*#$EPwLgM>tbq$7u42%P3Bx7>%$%Icq=hh&^XmqcSf?oapovgF^a^M)v>TN5vrjK5>t~;%wyi6~43D-c zW;K}eBKrS<#por?wfMmgKC+lQ&0`q5aNZv~Vr`sf*biuX0SEFB*gth1DE3c#``Ap}sR!Q+$MhBsfk$23g)jR?cdx2^_)uj=kIyPQc4hDx<7E8V zo6a7mRbZlhU?O9?GhXS8m+=*2cga_rM!kw%E#kaVbnI#^v8&suUu{x_AM&@PT!OTw~%;jI)Vv`N37py*WQee|3H?8czv(R9b~2qkpIP)lv4p z{eSlSuXb5ORc{V!(Re-%1~TJFz2@v0^_p>{UI!2Lk2o(jJz~FKemnd9bY85;J}>5> zU6HY=Y{m?nY0ifgQ~yNTB1{B7w8qq&m{yGGlSNF>Q+kZ{hw)K1_~tfn&N%SSt>B(p zxXbzG5ID%(<1Bkxb2}D%^g8ccXs_Q{_=vrJ>qkZ4Bk<2r8yn@?bFX_iavgjm4CLS= zuum>JQZ}~k*vJJZEh8BW@22owy&e#MGnFi_oyHVh>HyD(5C^Ck@REnycM0~y?7V<3ZjYz$;@ zkBxy0?y)hD!96wxnyYeb3?y9TU?3+pl>nc;KrEm={G%9x_G391NNtS7Ksq-i4CDs` zmB)bTuodfEV3CgWtpn5xjfW;e6QEZ>J3+fayFz@!}>e{@`~ zR(4Xr3frDzp|X?4L1ic9>i6NeT#CmosPmDzVMMPulZX0DjV*#{%$^+4Bf~ zGJ77uPulZX0DjV*#{%$^_BjWtHRBI8;}13Ce>U&xOq1eI1uElH^03NsaM#18eeRNZ&|H;m!JBGdEL83DK-Ioa z!P`1NrT9~Z89#VZ<&TA`{Bcl~p9}pAcwFbK6o2xYv4$7TSVPTNL(N!2KL=h{{Ar*W z&kiL64DDF*C8*YT?Gb#&>=8_2FL$72N^j4w{_Wr}{rA>k?#AD65ge8ify2sKZ~Wje zmAi!XekrzueCU4GoMhHt=MD?!?g}UVQWSx?5{P51!cG$A>J&q~!yx|-8omgiSzv_f-n#`UM7v%|)xG1lxKa_H+20$rC z`wCr@Q#A-mIoem~q8#ljbWx7>6}FZU;F-!83so88peiF5sxl@(RYo3EW!w!_8TUk# zF^Oj?V=7c-OoOV70;tNE1yvbypekc7RAtNymobnrWv_dv-fy*-cpm*S@x1;}Y!aQ* za^iC)4u##KvulH~sTG4Wu`Aj=7`sI=I1{6z-GlA8iiyMN9olK)fV9)Z^=PMw>(NdV zr=y)FPDeYVV{XadvE$&eN6~HK1Mx)yc&v~))LfQozgU+9gU9-LJi{KLPr_sI;ISb5 zlYuR!wK@Y^tTq;kEmrG+VvE)Kp|r0y8A_kb{S)-Nwg-9oRNK1^kCoea>`RpIkKsGY z^ml|(rW31r*^X72JqM|*wJNheRAn-j=Dvyy{oWmV8Cjb$P4TqdQ{v#WoJf2|e5yTsru#~Y!DrfEs4@0ZW_x(7C<2d-XN)!e!eh}` zOwWZ+qA{89Nj-6+Xv`Ij&2-jmpFQ8gUn=i%@z>e$s1B{?trWYt(0Qv&_#raZq&QT2 zF{*OamYK-DhZg5#F_^bmsts{6Xj(C(ObSUa< zRq%!|-ts;aw^IKD)`+b?1Dw#QSuk%#w2^>i5sR3+t9r$4m^71G2+)~a^?ZwvxJ+_ZKYjoe?kNIA)r-rC=R0%`H zQ*n(mkjLlw%^&!GtmkQ$?pSoMPqedQ|@t*{QSvELM#yHh{&dk;MkE82GKC0W4OHd^aHf)yQ`P z@?YIW-$Rk_2IRjQ`EFpotC7_PWVISuZ9rD5k<|v|zZzL>0E<-*f+By_L!e->>funZ zSoMf77CXv+r}W`&7-6yhf)AF0JsZH9IwRHq*3`MN2C$~il6?#wZHZ3~HuJxDC;9&s z_0h-whaF`fullKV?tHlFB$&&=RrnW=&m=CTGcMYz+JJ1kz*N=P3R^ihlFgV@V=rVg zCe_#r*^Egw_ChveQjNWk&6reUn`JX5)!4?_j7c@NaW-R8-4Du`R9_8cOmv1Jn=#QD zhHS>9`Z_3MQjKk#&6rf*2xUyFM?o1A#lZgH?h+hjh2u(3xw{fi>PkGREAgbR+{4&4 zcrvCdzEW4Pj`6DGULdtm^QJax-qc3To7$*(QyVpJYNO^&ZPdJ}jhZ*LQS+uYYTndF z&70b&c~cuTZ)&6Ft%|z|7psm5R>fk~kq1>BcSBXjJy6v#3934#LRH5!sOl(ys*YJu z)iDREI_5%E$2{qIkGWrNhRS`?KGX6PROPOQs@%0umAekAa?7A9cLP-AZi1@Zm!K+l zGv%IYyPGguW$m@XXI&0LRo44ZmDL1QS;wF%>jYF~HA7X_DT{jpJ?0*{foA;db1oTB zy*J1{hcX1J_l86D-iWjBCiI$da8-ECI6(E=xll6>XWvcOa0B1H@63A$O|h3_Sd z=6UB{!mPg%e~be6ICm0$-h8r)n1SlkorGsNe_Y9ZmY+4x?6TY?qrghKlTdY>i4WgN z_-XlP7k5;sT-`~yKR$dX;pOtuF7BlmW#377rjylT%HdAJi}BSi?xGlF-$}TCkyXCm zRcP)cjOH<6nLLAGTqR$Hz6UOu0M(kP|AtxvmN|~kcR#jeAMT(C$CTn4LmPg$!i%4~ zal`jl==&g8rT`s25IafdG-i-avtvre2X3Ep^65T8${NpldLO^#Fy3z6Ee1U{nf0_0 zJwFgVFTJ;v^=6lc``lP>cKM;KH}YFAWxbK#dMWFT&S@-Vz0o<1D(*kKL%$is*$w?> z2volr4%Kf)Nbkg&JI}7w^Skkh>-jy>8*O*$6)mtzl89f)=dSY}<_kIyOa~7hE^`(^iW|y*7>Yjk5td;V)UvArzsH=#&mQl{`RI6ecAW%Oe={aZ#qcGJJsG6FnP8DpU;V;odvS zw3ac6XDVYVRAo$qs*D1t%9sUJ8FQd2V=h$fm=`WXw(9ffD9x$nT-c-%+ju^DVSU@IpGaNe#SUut74E_SJNO(!QD= zP}*11+rcK@@Lm}EuEKqZ!T@336$WU4cL#Du-EDS^Cf~$p;@Hpj!xj1PruMI8vwuw( zAO{Rk%rjyUyLD~`dTfEYvoNkS+2+@4XoL3qTtgeQ-=~WE1D9a`?qqJ$T#1|=N^ISm z24>pxGJJFxUyp3~TG=VI^BU$~dx)-~U)n*bt> z+6jLjM$g{??sLvV6-1neI!^jHcZp?WobB>8|)LAEUz6q?; zdhQ9VqjcnXD05B+bB>O9n9gL~S}LzRIoviotTJU+1f@O^sBzlC+j zXsC3@BT(s%tx)NX7<7nq$7rZ@$0JbqI(;h?zD|#UXH%gaq5Yv==m2OUGy{4C6dq6S z28GAddqLsx^sAuoxH%6|slITo{_TRB*Bv{M%soh!+)Mm}8+N?QMn>D9uv*uK7 z)|{%%np3q|bE-CLPSs}3soJbLRhu=ZYP054ZPuKs&6-oSxt=@QG_N&2o{fVhL35#< zp%b9pp?T0Np?5=5p!Yz*A2pMpV40e!P;f-eG${C^rT_{ysF?)?8`R8!f(>fsLcs<# z^JJ@dLP6EBjCQDwev8P9h;!4<0Yu-*etsz zjF<9N?pyfbRPH-amAe386($h!ocb6)MqmvheROMhiP6c3)!87cc7!M`*=>K;MW z@7yE!dGn_35!4x~$a@4oYd+OIf~v#0NAT0~tnLvs_hUrdBY3&|t9t}>zAEw_!He;- zaNHMc9CxL`ah*zgLbbjhz%Ln&NhOpX1k)Yhe)(gpCGvOc4nge=44;ciD18nbr~3q* zy@54K+c@qu*~yGm5Z`=}y(A)!$eQ53g z#P-u!rU$V7be5^TJMGMQox6#h=r{LBXN9ra9=$u)s@S9F^VqilZi|kA?X=GzDF)`; z;m#fkV{^+lXwG6;#8$A06$3Ld7!MR1Sg|h?gGq*}y9;?u-FY%2N!?+=Vd+N_pmP4azfjYfzqIV69~Y_>IaK3&rMD49t{~ z3uO!y12bjhK~)ChWA4Xbd|Jzx#50vK6{<3(LDh}|sLGfHRXgTDRR&|}+_9lDbZ1~W zI?u$GnBPPjzX`8|@mr#aElmNx-NSs(Kre;BZ=Yps3A}Cg`pHgqVmKkgv+;&!BV#!0 z?HG=Dx=2_J`&N7GK1mE`rX9l(F1waF6}ETokBE-lXgnIgv-j(F@QOLN1g{AHx+MqP z6R4QcEapIW5zb-`bQj@Xdu$8sI8FvwBN|_vZx7u8`%KY#+IbuH`=y*IQY=O`q%daW zop}>!f8xgC6>0HcRN;s|j2-xAmHZrbEJt_d-Ol@$8p|o84#fe>;6KFy%jmmei)Hj( zvBfg@PqD=^_)oFLGWbui#WLhTvBi3@{z9pc0Y&7FkEP=VeZ1l7Akpn7*Wwg>MT zI}Q7?9*mg-<}Qu` z^d~-&A63J*ieW7S_qzB`tp?^qajOab*x*1gkz!gt=22KW#hU-eYHRU|9CTlb75L*V zt{~@II~iQb|1TUL-`V2cgqYG>*=O0np1%*d=dXdYPA5c(!+a!49Oe-81!7cPyDNnwS-u9201{H!&-7hBqtJXa!o{>z$Q( zu*>MoBmO|kTeGZCRfa3{_S~2d`?I%f8^V8$54X1bYJ|1rcOzm$)dQ@S?RQ&SUb)BG z@_T>BP!D``TZhMmyeA)i{e>7WXVtyj@8u01vb@3#C2w9qKltB{GwicdOtwCTeGg#8 zX8e`HM2l%lzTKW95p9v5fmlNbA5M#W8e9DxGk5fk%XEB)+q?KOoi#{psAO++J$#`) zXP!8-N%hR2zSJ1c(9&zF)-AcIYMs;X{dPUo!|AKX6{;C=c3)=`n?Dq=O4MiJ$koT= zhL+Ms@K4Kqe2(&dbp~x4;R=aQzfZljM|D0a(DE%lAMw5XGH!H<##!Slf6D^oSmS&( zGOSp098`RwzP|B>6?&7t>b(Q}F0#*R!_N+%86rKV`fer7U4x-Yk9B%`7k8$feVsOZ zfQ=D~lYcF?aS?ts`G(hTShzy^+4zFtVVy59zF=&~be$c?uhzu6QpDOhlkWk18*E?j z?yxU7wltA?Zyy;54Z1lH+8MAyyT-agj!#YgH2KxkhJ0)k$EP-vzRyRe{&qaUbDUbhYgX4eVTxW~3k6C?w zE#mId+mJEIdidU*Sfi8WKh&5+_?=82y_AoxZvoS`AD<(|P0Pt$K>R%HZTSJ%!$B)l=x1;s`w@{ZuO5gf7As_>s{|#K)zVm-baa*>bpI$*f z;d|eL-MZ!X=qJvDY}uZN+@hm?g^qd$9fh2?2-Ckc&#I8FxD;JgZtJS`#1$M}<>&yl z`F`5&w0B=bTldpe>4sggoS#=)V>@JS>(ViE_t>!R+M4V|kG1J8oi+KLt+x_C4C}21 z>RxT@t)=!DNq4C}XI!dB*t)Bo@mP-j(wI1UOk*P*VjXMKV`{(J?X*?$JC}B3966Ks z0^j|Z&v*DV^BdU#V~}U{TW!$1Xl-AEY-ql&Lv}PzG0bgeqtg;fe{;7L`t2mfz}AD& zW1;r^qOnb<-9=hDZEJE2;}h0t|3rI^qnkE%4C}PT=(KqL*T&Il(qpofoLE;1I?d?Wz=rL@auKomc#fI+4@U*?X>|`$kAJO+B(a}TwjXL3ZF#}`^F>nQvz+1UXqTIe>_s3wdtq} z`@}DF=G@UydpTcj{@XOtp6VB&r#N5X*i+b2%L*gzfRlYCy=6Ie6}FerTR~SyXV{EA zh0ePb9p-1AWK$i!nl;qbW>>xJvh6B#lyHb+SN#He=|fNF;3jnTa`fMyJY9mD(3eVo zq%`)D(kDsh$^PmWXnE3$y@eh26H)B26{6_Cl~An{bFig;@$;Xo_|4B(toZ2XD_1no zmWiFp{)UQ*I4w_IP_PpwWWG&htTgwbPV;BEj1)AG!R?Lv7=;Dy~#(m z)Jbfq9&!8!S{!E<;F~kpI8E46Ev&Ek@aE!<{8s_(3DO3&WhV4FKD)sH(Py)&(Hn|s zM&7fy)$hsN-X%U$XVYqibPVl8Z>mi?clLXDTm81MtyFIvcKrdyWsJ)+bTRhT%p0oK zJ%3Bpx+-tgy6Pce{Z}(QCbY-n4oTMcGjC)57&mk=IwhY^IiH<;q<`v$hwUhxX-hwL zCQouK+vy4Vlj0Bmr=R*Y1l^*3DPGiGpBx?5MLLN3+WXCK+UfTjmNg`@@4_PgPFvJ& z*;V2H1@sKtOMk24tYQC3{o<2SY%A%bAbQEcz>)e$zgJ(*V`nX!WXs58;%d?xva^hi zYRAqxpKfyOtXx|+t%nk~#Lg-lNgTALO6SGsgUZ)lMcG-xld`G8I%&Z=>6RHhlWvM7 zy$JbDrEQf93(dWT`_96-v8{1E-D~K>)@sMTS|%IvQtT_~FX^sY+ATfS2$g-MbesOV z@ENf)+g#XKE@FK##QfaE{$hy%c3_WU$B?7H4#a`iy0zI@vab|dj<&6|7g2WA0oE1A zrcyt4!3)B9yR4-v-m`Jud#)d>aCA{781HB3qBw6{@E4Rl8=cdc7?Pulx_aAmQ8&`k zMgM`md6L-EPq=Gug}0N@JtsE&$BNnLpP$+KCj~sW9=&tY?Jkx5G?2DDiQE)=voaST z6F*7}oc=EPsaIuXe!bV|%-Q5WE(nB{=ts=6?W9`=ek3jRW&;46?D=LbP{_Lwy-~X%dQNo<=43#XUcARBQI`dlF>~` zV7w$SUK0Q1%Uv($!Fjc|?Ihh~?3jo)N4D2#tJ=5=8$kP&D?PE9+k1Ba@6k5&(kngU zv7x+~+GG3<{H(FK#>RQVmBM*W9pARc{s0 zp30cb=MZ+!Cg`qQEA*=gu22f&-U!Bff@iJv5dBd6}K!BpTbb+Cj*EDmIYp zmn_*r*e{i_u9+9JgB)GsXFi4LjIOcEGUd?EBet$_FkQ5+(fLtF*K{B)J@lGowb0Ls zdj69_clAuR^-a35jV`WlB%e8qc?M;H7hBrHadnJ)q@F3I4{iTHaSQt5X7t8b^v4+V z$Y}NqWN`-7WzL|Uy8o9vbMajazj5>1*jByLh}=8=B}b<$r4K*E?nt)no?`oqzWhq} zv4=(bhdG3}tPlU$jz8SoGkpl%aToTF`tpXYYj#-6SG>X8%crEYqie?Fp+v z{)C;=;}RBOF#v0lji*Nn{f!Vc+$y!Rq5+^f|dxgL9D6!u6aeqY%I zLB{nkI{gP%1wAW6_?vHrC z8^3Gs`#0L@M>iMtNR*8zn@o1ee9BV$9)ddhK)xRBrIKFA#t(g)VnpbG^nhz7-@Kq+=yI-J5I&033(^PL|8qZcAw6&> z@-IE0@qfKl2Ry#w^;RA5c&iRL%$c;i&;fJM0pCIgd>g*~j%*L!zYe?OC3Jv%#>3?^ zre5iT>!lCS0n!IIpbx|wkN^Df6`Qa(euEx3i5@Wa2L9t6m9COz^nlh`=>g$@>(K+V zV|wD_?TH@f$sYNhLHT#J{yVrN6a69ItMtJ4z7a@MT3BJuBb6f_1d9}Q3}ilvygY$E zc>0P!=1*BaH{3Ehb2H!l@%BLZ@1}k^?Tx~pjQD+Fujb0wq4-XQW&Xx`YQ)y$8}JL< z$eK7J^PR%0nmf@Zr>w|woHFX?-4LoT?AcmI{k#$Q7TU_Fr;K0Mjw%;+s)jGsPx#=4 zb&{`2*Pc4twy61#`^9MgByi3*X`fT)a@zmvT=o;k#?rV04?Lc+n`rBaYp)NaDP2du2Z2#0A@A%9PTP$?SGdE;>kL*U{$c5i4`{FI z5_ZwMIgCX{X1JVk%HgiRmf8#}O+I1i4yEv*MD8y|UpQF7(HHQN(HBLG*Ys+iOhb ziT{I5G1e;owH*(7g?P~GoRfKb05Pc^zD&vT8+q>X{qW2z^v!qCDNZ?hUwVBvca*=4 zEI9EMmC+hgq6{adBz>otlKvw?y5aDj!oJz26YS%&^+9x7Bikl@pt2>S7Pf?btM9)6 zf2nQYwAL7r(?(-gz*}nT5co>EpbOM#_v^G<`al>Wvi+*##IZI#u$**je5gm*mnyqq zfi3qT<6vCvLA%|2l9!u zA0+q3X|HSr2S?1L&ys2J`C`(t6)Le06eE%jn1Q^Q_|QV@l<V;3eV+r*FpB zO8k9Cs#Ox{Yjtdf3~Yu{+TiGkw0KYOPqbI_^dZz@&Q9Dh26-Qi+-G4kY&Z78rNpfF z5!aEgqS5xV_Q1cwyp;Sqc7k-n(g}{s{?d|$8)HJUBO3Vq-gI|J<8{6m zwSy;$u~l?7@>+Oy19fR1@*wX1UqgL;c;3hB3O+-LQMKA8-BdvJNPn;ZXo`%mwgzA*%y4s-J?`@MDI9;eSwe8u`h%(vWqMc~ZVl+vh30A;0HgY!lfO(if8x18K8v%1V3qo1@bnoH54O8Z(&d#jLlARXjp$ z;s@xEpQA&b!EdOKH+m_#uxqJJKnXd=_bHvs_O0zIUO=ZeUJs8)>I-K$w+9|g_s@(d@ z-u(AX?`Eg$`pT=w>yy%~aj2&Jx4}G{;U$evrptQa3DS(&9^ISWY{pUJX_X#%babfh zcvv^p;yaZNt+Z{G=y8xeBEPRm%xN>X_Zsrl&fSmY&+yB?h}Ltf&5!ejdQbzU?BqD&6ce zishV0o33(f6epg2Id)2&ZKp^d4bBRrU3-0&u~oeEV=`D~5`Gfy7D(GQ+?7`4(SJYs zOQm}vpG)V}^POT!@6r$1Ey6moTO9o*-C}f?RX8%9x%S2se)YDEi&pHg;)BvzQ!dl@ z;iEi952d4rWS2}=c~QE{v02LLpZ=Sunf6Ifoqq7b=fN-PTjhN4J|FRG{?}S}5r=)k z9aiRed_m5GlxV+`Y`cZDM=_gX{^xdy)uCo7y5tVdP|r$;4RX)u@uOACR^0AsesP(t zeD~`s;1fO2> z*otItOt3!Q+NfuPl}-q!r+7$r3a1Y#-8Gz^t>5$vrssvp*(-SkC`tUY=;4|M~;8dr)7igmZ2iD>*|j7(b=jQBC^a z`R>3Z^0Z|&`9p=IX-g^TIeho{)8uJSA^GK7Nz?FYL#k#CYhpE2--^ancgb z7aofYLE6vI^*i2zFg`X zZ`ZYiy0RI^x`kG=>cBQL_w6a(83j8j<7q8ET&VJe|5wDBCBl%i);a#en$5a&?IgeRW+$!gK=E%9y zj+`s)$hp#voGTqI=X!QAIoH}Asp}-?h3sLD)OnKkfjo=UeUf?UgGe1H*`L9)NL?uT zw|Ew*6Qu`A`Ok-F-ME-%Ptu+b(FKpA3v{RG?2h68g%iIPGgo@nfL$paAYMGm9E*n= z;bDhwU-LJ=*yurK_+I;0n@AUt7M~p^9m!wm$mE4|fxBv{(FN``OIvk;o;kWe>5J(C z>458O9Z+NI0Lj0PJ~Yw(2KG3~W|%=+9eq%Y%$xqKDKaXCB)tQld3na#Y;@vb`Ye5= zXVL-96Ssh6T=*_xT74He#3U8>h+OX-J!#f^dyhr}_WJqO`z5iLw%(id1KoZIo9jb# zVi(rwukiV{_9me>UPM10ob@FKUi-A`%ZWQiGyhp&*=+{Lw#N_oHa{4?rhe)F_V^*w<_C>+d;0$x zcw!dg9M=DCTmQF>d(8#-pgZy?KIo1-I((pK4j(9eF+O;cIgQrKmCSXtUY^0TNWCnc z$mZFOc6h?kIk}9B_~0+0bI#?7Hk~6L{j@qKnkO#0&s8`f$T@(k&czAhlcxW0g=Ayl z_h`8fU(U`2?h@fE2RH0>cPM@L+R>rR-d5g|tVwFWzUt=z^3~+S8=FA zU`tF)prT{Pz?KZ?7oiXH{{>TH16xi(4?xwfNZs@xzq^;;i8oVxZ8~ZN&lm7KC8D1@c<-hWFazV>VuADx1IQub!lUylpe$sL+?g-mM*_ZwPypCyGFXz2^9nyYrDZif=oA%QS z{cfH+P5A7b_Rfn*Q#`r#`8-!zQ2BXQ+7F;bW)1V3H4J~*#=VEaYnb@0fwcZJrk~a@ zc&(EB)!5VGwQBpj>sFJ8&sLKkQb`&^M)&V;c?4PrRXux@w=_aR3-8xY)T+}&oMCX%lJKV*$b7<`^kxnX`*%919p2H-aavX*6`Wv z*_afQ@}}C+UYX+E2FCrI@8Jct>psn6{bkxLKE{t);`B>=aR>kZr8Wj!)|PC>L3JF)SF1XT z;TNaRdS7^6=lq`IyKMXfdUyPPpLubj%c@Y^BaG=UvS<5Z=SuAL*4Ou3TyECe_2KpQ zUDn&P*I93AL-*04@u}f;Rye(YHP-RhIcsf?E1O>|0Dmu|uc9ScVfyIgF#X|zFnxSY zm@cmhQ|XWfdp*q;R=>>kG}6Y4UQZocFLFI~Y`)0#RL`9CRB2~DRoYokm2O>6KgD`> z-t`h({|s|&*2~Xvz1I2G*7X!#|2pRxpWWatS%U6aJ#Fsrovf>s_PV+k{;fn_WG_@I zZLiy_l}5M8UMN)BwihNT-9KzE1eDGQ+Y6~mvzANlDyd&<)jpm{7FBkY(stQvl(x%W zptN1~WTox0vy@iZti@$^dHZ>`)h=(V(sp@ID{Yteh|+d>_bRROSf77lmvMk+(zj~U zHl^({o>AH^<58tm2J7`A`@Mra(;f@Gw_R!by|qf~J=X4d_HW&MUr}1WMQ?x8 z{{B6l?a=q=?R-5$e`)WzqhDpmnsxkO8)lb1tMz=bo<-QTTGLbYEW*AuYdilB9BJoj zeV?Rf5%#Xud9R*D*uC9oN90=n5o`T_q3g8P%g)exAF|hY>F(!jomq{}bk_Sp_=HNm ztOI;6y||vV@vCN8C1s=|*PZqF1|!z{-ADJFyWZbxulM+kTi5$eRZGlz-)YT~*7aV` zob_I5XTA5evvzM~ofi)#&^N8`1<3pPYy$nZ4|As9TcQ_O?*~)f#nyWxL(ISI4kJVG zhw3mgq&kqHL(GHPV`Reqt&s`+7GBc1SAB1IT;Ib>`X30{0P;~d_Li9!*8jcubsW2^ zhILzZK&gB}HZRqRzgYM6Om@Kkwe5f$+YXQo;Kc@5&wOj`AIN;)#@sqQD_gN%>*Sgu zV+Y8$eK9*A!T7imf(g9i*a7P!)_JYpT^LWT|4sIDGY|CFu@C(E-nIp9w`~D^r}a&? zfEiQg_tH&_U$i}-7)-Q1P-N@@_Sn$puszV`Us+|_177UZ9PCuhmyh~(QlHwV?_GZQ zgnDDVHcmC!Ri0K&EG8Ebe`YkZ>Brvkd-h@4Ll$hNnY`|n};rZ~Ro()zSdtK=%>{FU!+v{&J zE>jp=>F(b$zoSV@|Gya}o3hL1+xEKNaenXoRxCsPKk!5TKj#d*FMIu$$j)r+ zb=qL;^`Ee2{%-p0;n(|m1ykKAZ=SDA@qq00bH0zT*IU1{?RCzk8hbs`2RUD|iY+c5 zq{gIybfgc`%RXjdLG9;@wA1DPYtK%<&GYwfr%#Tu({*;zvD337{OT{;a`}1N>Fw?F zIiD;x+OpW5kJQNG@v~)dCNVY1VzjT+k;mD{yR4D#G%SO9=E$JZ zjtpuIZRKC&FH)YghDzqNhUzz$BBvLU!7Y~{gU%W%UEr*t(go3LsM_PKq57?}hQ4X9 zq59rgLw{?pp^{6jp^p?Hiig_c$Bbok`)Rfj|17`lFxp-X|e8Le9QdTKj40bnVfB& z?s5my$a}a)D3= zY-@cb0elX$#7FCu-U2G>M5baDg%R~|NC>Y8h*6OLckMZ6a&nT% z1A<`h%;$4HXU;z7?6ddUYp?a&Yp=bwhQDtl{YPxx8&jB{a%=c|IjdMx+OQ#7%fFg< z3`)*pJL*Xs$?Kg79*KW(1N>9RIC?VYA2QEchhN=|{QnViFm9_6SJW%S2Ku#wGkxOMk(^nJb%$KjhwhMTVgu#LHM%lSu89qFfm|nZ zmg@o9%5HK!n6q3HALxLc^9arnoQV(gfm~n8S*|ZfXH&=DrLi7~<3^6lIFdMyrs99h zu>^m#<@hs#Nh;^2*eYk*6Fq;mYxT+4nHSr`>SNH)nZ##X56)P0cgnGH&R?_tcMWrd zyBtwfAFM4{aRGjGA5g9f=4dMzV-d46&a*9v^|;Va#>KVxAs%MV`$=q>hp=hpM>$7T zKpXiV-Lr?HwajxyLF*Vt`L)wiV;01A@1gc5j{~PkyWT;29-v)cadv~Q<<}VUI zkgy;jj(K5x7w`P+{>??SZSwPDkEc+te&lZl6Fm#PWZ(qc$-7!*_V|2>BO97<$=ZcZ?gYtrcJ9aLlv zQP4p6(1Xu+G_q?1dgB(7*>heDL9L{%?fWOCN||5FZ!s*$A)S0X=32ufH(% z_(95k4)o|w{jedg>`wh&g&z0?t$Y=F+$q=4L+Y#Q=~nnz;$l^bpUOsCf~SCbx~b<6 zsHe;gW`YsLH+0e6BagW`%icUS;O$S~i?lTbE7GWE8oZcBJx8*3H9JgubqxFUH}Au?u437{gMp66*B_%88wN{&xDY z=)rE(L;6@Z>LGnh>ahgA7oL`N;$nlH%bE$JtuCMr(<$=_u#5N)ExP-HV|RRg;tkpF z^B^&$8=!v;vA1n&`mc~VDBDBSfj)A;hR*^zN%&go(N*?C{E)pK))^Tx?^a~qFl5|b zWTMEt+mU&5xlTvs)xBo(zK6`a^}eVDb@(ES+&ci>ZiQBZpj$fUbZ92`uZfNFT&T-E zEn&FJka-6JGB3(6^B9XCd}P)#_-uw;)9+@;HT`a)T+{C+$~FCNgj^?c zmg@oRQ6<;Nyac&M<{gBumLc;FMsP;veJIz+ybtBN6EaWudm}PW_9!uA-gCOlE7N6O z5&9#`Y4fBw-5HBx9iFsR$*wURg~@lz^kMQ^3JX1$e4x|UIfoSch{-QjT2f8vo7u~jpW6C>z9Goa%L zT}~8hO)~Xec&`qbD!xJwBU44T4vKY-`U8DI{txMy=+|G&>%`EnVv(tH2gWYQ=$_~q zj_<1Q%MAD=AtJ1L8M^wp`Puv*nsL6<^qm9ELm+a|*p|!~A6lk{?T4S$RxfoiD4URPeYX9oR&{}9II%x@V zT4>1pSLMbckx$(f4c*YN$dJ>NN91%WG)%?UHVYa)8*A%QKF)%MnHqWz8rDEN@txm+ zE*J<6$3nw_&~Pj?OwkfN#kwAl^**8pgnsWh{dxdfnP1OAv)iH5TxhlwIg`PEA2gfy zK-_|iIQ-k7*{`742hc!hCOTjQG!mJ^dN(C=ghrwRl>CQ2bF-imdrl}l27M;#I-oyx z5zztN@nIgxTxCkMJLA>Zgf68|O0c~HFDdX> zf*l9=#&ix{kFmx84`4^vgjK17@5+In~l44r}8x!M`wmqFBpwN?Ku`1v~U@%88#!O()GC+ischy1@0UGfL= z7vHn5>4So&WxO_mwzXntnd1`-Eip<3L+7C%1Vby#jO@8Uu8}5+>-t3asWtq(T*uH)f)D-`DOkx?xf{M)sAK2~ z^vO_glfuyK??wBnxo`(GIfnoASM-%%gQ4$O+keHcDd!yzYb)p@mA?}V?QqnmKtI9V zGB$c2OfLJ83!WAq($|pv^~jzc;A!k4V1c z^q&jpKL$Qb=uZ4w=-nOu7g=x9<&RC5KPk{dgY8rCn9puO~C{J`Up1ePh9C_>hUXEG_obE{xNLk z*v=cOz{s*z4Lf;54*$gulDwB&zp=P3%HW|4x7(ti54LV>NRl=W85awtT}u68snb&GcYyCQkA7U;r;JT& zRob?Jw>6f(l{pLH)g55k7!#(IaeQnmn07DYdXYP~BX880{z98qFzqhtn+vasd^gLQ zb;+zlFv*%YWSV)rF0w{2>ve)z4Gar*6$~r#MzHG-`7iPYnSs2CN8UtK<><1e1$Kp2 z3cJ$&f?X9>h4&R!h4&R!h4&R!h4&R!P1dpM03E9$YZO*R)+nrstWj7MS+k73B-nKs zeaXPCqT4$nYYwy`YhuuC!DUUis`<#88OWOX$eJ0g$eIz18zzDYZRo*eU5pcK=t1EP zv9F8&rpU29^wGzVV>QUL$6}nLz&(i#{EyaU*mUeMf~De+VZ)GF8L4p#_QM;}b_w(i z;g9afFB^VPt&RCpkr^`PPh~73WByd=Eo1&v=q+P@WfP$NlubaFT`9fZ3!>R9FzOQ37Fju$vGai%L_$md@n zpSPLhv&0;aM?Ne418qhBNQ|efF0Sul>MLc~hprTRQ-?5S8*$%sS>HIGITtnN!EU<` zeknx{+yR}$_B5KcmsjIgD0bIc)@mN2-|vbHt0ta8wagvX;d89$gk54fdi7ImLx&g_ zfEOBM9n)>>10?MsF>pk_|7_oVo40B`s~;ua@=-@r^%vl3$zu?C-3bqlv|-bN2S-di&MBM+az3B)5YFkGhjY&0{2k6CIbXv0 zV$R>?d>QBObH0MH88TU7wh102_JSV=g8Tl%?yMJo;Y|E(_livw|Kagme}%oZERDUz z(9?fPO7!$|B`#R#=v!Y0rg-K?$AUjO`+7dUE^)z+o&7wLo=SROXz-&$_iw(_ey-id1$xt@<5+RBmCc~hje@Z0^I<&^T~QC6|*%GgrITp|y~@om2hjZtMOqa1>9KR~{o&}FEk3C7JshF(DZ@~Gzp z)K4(3U~IWR3|-50`x*T<5+Y~waGHu%tIV$aH$kEqKa#Y&2qjL0wE=PBOC!Wy2%8X6Y>|JY%7pyEY z`Zm_+$~qL0(MLKdqm6d8%4liVqJ}0JT@S8QGFt4K;m+z;v9C>lPfMwv*fbB)w!*j4 zt_ko#HnJ=gnU;;rPDQ3=BePSH+1beKRFljW`=;y@F>;FF&DeUun|6DX+%CX&Ep`dP z*#+ndu}fe}*~A>_a_cu1cQNivdCZTq4Ly*hW9%4cp>QL#m570vN zgYfzY(hPiU=mnY66FrrXE%l92^)+-u1G*s-+pgF@Bu()356}&Q4SxVV1se*!?&|S` zje5%WmVz&2T+tz2<8TmHIfD2_k*xi6vIaDYHNertRgUrMy2cyBJ`x7Lp2ryS5%kS9 z&}@j!S^X&I;hbf!oNHM3SVxTbJE7k|FvvAvt#SIEvV(YL2RdpC&+mJ}?iJm14RaE8 zY4TpAr|x?;ulRNvcwGDbs+M=rW4%Hezzc9=DDF5pQVOJL08?oTOhfY7{IJa8tF~4UlbOa3V_nio@ z`V(atLOF84R>Nqo9B|bz+A9YfI1Ike0S69)?{mO`!@%-6;J{&E`5bWIFtB_MIB*zP zJ|~%T3g-cw&*watGgv-n7-z72&Irz6`J9V5gXMEB?eeJ@4p?UDAKO+W43Ey;8AOXhrs5-;D@n^=;4JipSI%ZFZX-NYZ+HWR67A_Y)rqKLEy+vGqH+AIrX@^O0c#z?sZBF;?nDOsKw9 zHPL0h9p|iBx;iS;x3>GNrPyg3ioiyhT3CG{>m~MMs}!uB1BRBpuv3sLBN*4uMcxc! zT(4{)@PpK|o;H62J~!*KdgL9QbyN&~?Bn}sqmG+&*~_{vi-V_Z=(1dR@dKN!dOm0T zDXJT={fq>=roy|U;oTz5R-J|)P>Rjr8O^a1S#Tx%ya+#!UGR+X=+x(@ikLSBleTlzC+f(Ps*g)^)|Gkdh z9vO4hfpPl~`}4A>KAuseeW4|>2Di6rTj9m6pWXlXn;cKtxrg6TBXM_ZF=akSk2OnE zVlsVcJ!UP%$49m8U+ul?li|gu;N2(r*7deT&qs`nS1|tQ9iy$>%J{ffH*KZ(0;#bJ z^^&%g_!&aurZGpjva63b>9gsK(M6w`+mv~b>R;2Q*eI(H(xwS~Q|35j?3YEGrm8lz zd$MhI&kovj3~gFYn|?~0?&N!8S+^zaD)XU}Xj9hkt(-)gK0%vKqD`NmO()T&Ptc}w z+|p(V{x&P*8}wPRvn|ovteiG`g0@PeUCPi2N7g?1j9ibWU0#)w}Bmr?knG}3=BbM*9NI+9ddea`($qIxkF z?CIIcy>0fM^$y01KRfo^W6SN~^4{aW;h1UfMPIZNU(6Qhi#FQJL3_!VPx|6Qd#`#o z?S(J)rkQ;2I&9u=&|bgQdU!r(EKCgdl}8v0&xEGf8eR?KoxjjlLOVmp)*;`eFTSkD zx|(hpbM2)~=F-PAn#Nrp%D78>>aAlaL*JUmVVbU2Uxr7FaoBqP%UG*vKY{V+v2p0L zYtU;~qu;I~rtetq35P?)`{vxP-oqf!zTO*U_ln-zImqEP>pj5>W}VlXzLWa4r|(49 z?IVV$q3b-<)6jGK(Q_NoZTr!GpMNh4-<&2rw;w$>+=g662Oe|8SMNvH{VRyB%SG1> zN7v<|>xQH2a?y3e(RI1#y5Z=$Ty))VbX_jGZaBIw7hN|TU6+fl8;-8aMb`~S*X5$? zhNJ6p(RIVob-C!e;pnXZH6}6)4J~Zh% z(QzX8_QP8@pucLs>OzCA9D}Wn0;{8YZpV(c5xpw&=Kbk6Ymu$-=+TYnm7`#kqol87 zJ^xX3Dl)cu4t;VCxacGF=~w7ek+b(YSa;#@>(fT`>2L?@MZrX(M|Yx6<(-9Eypp-Y zwVs~U=+iLza9jG6`2*F5`!o0JX6=BXPbZ>JZS>&*=+g@1R|0)_KJv?|PyaKBKCSQW z9K|}*QDbaPb~Xr0Z(paZ{cjDqY>DnXXPkcV8Nngwl%=<4bFo5r4Er?>J;XLYsk%W2Wm@Ix>? zEjl_Dxhr;8!7i(jD^G%5x{?){2#1wTJqZ~(l}85|(-E?UC@*yEJ#Rs4xt z8*}di6ZnwZt&O{7TxcCX8@Rwa4lf61sBt*$WsJkc$Le&9wPj7Fe~jH8){y-n?O+fG zSR@Qg5)L+rV2m9Zh_xka4~(@#D%LN4P$l>+RH08K?r*%l&xp`SWHz?yWrCURz+XYu zKFZi3o3WV8v!;U?<}udkk4SZtVLATxh#f8ix-IeS1UTlx^yzvvacRBHNiNA3C#Sb;_8^owyl&Bql?Z*!9qQTnn z<-@opj<)EWA;`=@ym!&{S>8*q|9mvX?fvJCS>ErG-;ZXxy+4_r$(3S2XlpuRl_vzGWJ=mq>gp)f4Zz6LMO@m zgz~>37XLmlbQNtLLz`bsoBQW6{q5+?^7{y+GrxhIM8%7g7`F@Y$5~8yVsE|e3n|A^ zV&yL)Uaic%-bP=Dz1Z!IXAb;>%iP|jmup@hd$Y-R_EHY{?l@>I{t=Q^u4$vh4j7H^ z<0|6ZN*=4pdpt4PawtzZJSg9InQzEF@j-HO?;4Jm258kz#!A?T)V{BZkCU0(rmpbtrcC%agSJ_yIqS2?SH9DqZ|tECOBmFOf=cNDH z7|#yMc4r75AAwIkP0HebH_zueZtsqS?2Jp{^WKx(-bnc07Bf1d0iTU?)+D=Yfk8~w)z1um$m5`mH`~wpD4svym9-;2e4)V;4 zZY^W^KQ!-~@a&7^`yt2n zH$K+9D`tF|v$1(7 znf++IF0=JDWKLwZ_#EufWwyu&k)@({Ta(uk>t5vbYUZRI-Q5{+$mONT&c#ve_1C5S zMavL9(6#h(D1RRDEwJhop@Ha?wHBQs>7rB8phq-o5R#db7rl`}+iKzN4ACog>MiAT zCrkhJ_t!A!SUi|@FZ9{j$N}lwvhFw~DcN;C{e21dG-O0M*AkP#Mwv;q0I0~J2lxxxXFS5sg z!kd%B6yAJ2Kf?Pad%qc&Gvf?k&T}pDdOLelio6!wH6+4c?ke^P5Zoo{f@4k=V{X?m zW&i_8e)s?c!;{bGvZ;j z+3cJ0BsjCdN_QRMlAlfZ~6N>6n2Y-bKp@*v)|!ic8>9|qSsXE!#Sa1z+? zaNb|Nndp?4St}`fryKZB*%+}oPRFJ=9ov%3(U#QSYuFoA%meW;4cHsY@pabijT7+A zQ`iqVW3_h}w#Jd@8pGa*ACaqhEd8l78{q? z5zDb3$~?c=4mA^odk4I2#-7sGeP9y#)>^(}`ctgxCs854Szlie^x|~&bScUTRul&>P5nTudbO7E8n~%zZF=Du zFVJ_y?k0F_Rq`NJ&NR{c4m+6G@x)#yHaLkJuhtP^n|pz4xj%>dVw;nD50dtL zVsyrH^urfVBW{vD)6}QqJf9}{`*v%6dTuLy`nyJ-w%{M>*Q-wh{t-FVwfG`j;fwZU zPGPNI=A2H9lksVj3>yfoZE?B`!{*hR43mD@mi&_aVwBB9Fex^#;b55!*nL)?2Ah}2 z#jb4^aH)=G%raKilo~R2mH0gAcqJK|*6F}2ZOQ8_-G;@Sk!l}9UK@5S!-i#+)!&32 zOR$O|`wctR;k@Ru|7C1QR!kDZ`rEyZ`~En6M9c!S{Kp>EHYQ_d=a?hwP9SF=0JnWuT^CM?9dh_>7~oB zavhVT$lQirrZr)rqg$3kFbX!pHZh8(W0ckM9Qh42)#(N5Bze4#-;k7lG1%c{_`0U6 zF^g_T@?now_A&a3%uAa4lNqxt(lLvac?jO=#xu=F*M$oPo(ypREclFE4_!NE-3?uW0yJOvrf?-Yvc3DQhcnuzsc|*Z4 zTIxuJHP$i5_#qe~0n8(Fkuv8fbBMBcjbM%4%q1S=TJEpozRWGEeQij4pSAn@fxHrYb6T(svhQ?Y8=22D%VIOO(PRxR@>b^S1lx!_J{|a` zJ-L3iVjP*P3x;uItrB}&sW?MJISXF&VtT&F>$bd@yNv&H_#e4-K<+Ny zU-yWXQ;Z#gz2qAz(1GHAxr29QO-ecINgc?maPpHp9h#$h3F|9l-}wmE9lZTyS;icv zj4@MOb1ubygtF~i)&H%(%{QeFKHs%d)BY^Dv=|y?@eScQjWryr90gm0(QSxMw_%;8 zo2KX{Hn1~8H(Lw3rJW|aRnt#1p{ZeSdX#plf|r<2s=S@^KG|QMwebg7yRl|oSw=Q( zRZLrDUz+CnWOe_y*0L5ekT#e!_C7j*Jln_`EFU(WBHBvLhjTrhb#=m%Ma;*V+phgG z*t)n3T8lEA{rzrG?tJ>(^Yp#QtvX6kspK8WucI-l0G8l~igOl;i;cUnWWk*7e z4>9S#>By&Wbc@KBY>ha2$d+>Cjp(~sysOp{pmVzO%wFWzKJ?`CkG8JM9^Nt{_k~9n z<`BoZLE_TvWi9bO-pNFVX~?U&to@GD;;M6rkM(v>cg91|{YB*9x=RP4yTk^H{(>LY zb=NZH>auS!`AAF^(NE*i!&1)}&Qh23cI!eMFLbu(psuTnEl`(?ldMaiJ*dl2>~vD6 zdg>&8!Ozn*MK{ z|F|iQwh6Oqe>UeofM?9#AlB(y35T^-cPc^h?!uz=56Wo6-;UzG=o8&H5(KNZ&Mn z@)Ec{@yb`}6DV*~&Ub$uosKh2M|g z+tSaqE-kyHerQ&Sv9?3vScu*oO#Hv7V_8q&!&`Zq_%KkG=@nX~j8D-^4g1iyd!dW? zM8-f1(f{S)Zr`l9EZ?t~Uw^x2nQz^BZlCtWqS6$O@-WTk=3J!y+kEAm7pwnv-zv^E z>c7J`hqY9Uh0uG^p1<;)+tDrAtPd(Y7I}%0f3Q5~@SI?XB4SpEpSbvyUd>or$~c{N zioriX=5JJesFyp;?aN_~eO6rXa!Q`rTC~Sl6LdSYWWL}MdAF$?6-$foeW*jfO4~(c z`M+5WUW+yPB;a#Uso@h~!mjxLe4Jv~|D(~Gq} zy;<{<#5%e@UWLK(h<8BDm}kH!7qYHRaAM7GCAN*$_=&^j-5IGhzWg06KM{ZD!8}vT zy7@|M6_134srbOxYdxyp`#S51$DsF~=~`nIdoPRw;|M?14Gr_&N=({OV4bJ&L2%-W zP+P1uK0>`}3$(@`9(`c*_db7M^Y6fsd*5+-4_(NbeAeDK7CTsnWcMaYyE6A&$>HD_ z#PLq7=G~NR^EQlkH;%YmOOL-uOP@PTOCL8zTe0^l)=&|5Vja9tJKf#5Jld&Zbx7Zs zH%!Za>F(E7ERJ`2lC?gb0ou8qRIQ(98*7Qj57P3FZjSWUj$*CV-M6fGiTyWgf21|O z6y4oZdxO?k&tC4w?j7TODY~!c$a7KN49c~+pO(IHj4l1|7unN4yv328={UDOxu2H5 z_VTdwT1VE(t;yl(|458TuRY;j`R=9K%G$`RmG558xp&seTO6Fv3e*neOk9E53$s?%%Kh(g=6+od&JVjAzntc7Y#>gyr2n%2q-hleD;l{T@JAJ~XZq*iU{IoWW(9-{JvMv3?e0%zaOC9NDJg*Vsy4^O~=3Y4%KC0cp znfN6w+HALbC37$IXGWbir`K~+z zpS-Jo<6UA92%o%53OUR2D;uu8}gEorzv0JxBULv-DTSjV|_PjXWD}6kj|F@x+$8p|Azn@Obw(0!; zwBRaaw7c=}Xg6}&jhyzc7ky((FXg{JH^<2LcALF?O-B5+RUKAhgiA8tI z+U0S!9@PcJ>KA_p(G`1sOB}T0S(|;thF|=kd&PetpNWxL*=;29KhoXU4}Ej&jvn4P z^8I^1d-~eJj`Sm}>8^vmq9YbTi&)~w`_NYszxsgab99C5hf&X1`Yz;U1G4f<{?~!= zu0vi9zPtYT2htthvh=%ebf>?xdf|%2+;bw=e)wZ| zdKqJM*^jG$IAqD-sF=Im>Ax#+rx!SS)Z_b-UwyZh{_*X$bmAG*Cxf~EG1;E}&mTL| z6US)jKXYm6bI#Y&M-SH0UDRa@X>Sj(sdpvjknyZMC(p>fjDJTztsU%tM)qwmp1EtR zmOf^HmM*b}jBgsWk#EU&#D278jGb>d(tDE6>~u}h?bGkkR?zO)9h1Dvc;9$N{2ij; zn{nWmxIp_euIIat;G-0e-rkt3dE?O2k6o#)koKsBpXz9%x;Q_-3198`Epg<(j#NA$ zwvi1yzaG2E`y%(?i$`tfb~m_!_m-c7PJ(a7MQN8*q0{TZWTM|+;(t1Pk_n%T2kQ@m zKV)yR68J-KnS6VdTT5T4byIs6t##VcYp&GNCEu5#iBAX*Ootx~ztIeA6Fzk2EAMA` z+*$5)iQ8E**S+Eg#Io8E1<%vYCA71|u<37OZwYX$_-VPpaFOUT`3}Aq`R^oZ=_hPX zMHBg^mZhbC0^L?abK@CF-%h&t+sgB+!58wJU|sqD5$9EITe^+%iXBG&|2OA!d_Bg~ z=L*Ol%$gq!&ELmYG={dcQ{QUZ+dnS&A=qy+81M$L;PuFv>%dw0UNE9s@7-VPUmpkW z7jTw+6$`}XL;b^vqwrVK3gOG-*lju57u$1FII=m4IkYWBx!GHabG5@$aoAQt5 z&t!p{WqvD}`%(@~?(?qf2b~SBOulbTv-A?mY?OZ={FuUhDU&(hQ%RF@i(O309Vc~e zC4YIwDED^9wBJklHTFVAA2hryGJ&&$h zxt}f14W8mkPA+sM=M=gGr#Tr%*>w6yTb3mH+cHDWd?SVU7P}ey8gycO%APG@Rc1QQ zBSv!gvC*o%GvEP_f9rfL< zMR-1mb^CTEX88_|$@YDgGuqeL@4h(3S$cCn#$w6STx+%2&%7AsEEQjRyc*|yAxM1$|JF5S0hX450%@|3-bNJylecIJ}CV|{5M6`mFQ*3;%vwYCBKFRmtP|D zG-RH^yNkv4f-Dl+>_Y}OlV9{@MLWH3N*f#BDY42Vo^2*WI&dv{*BCO0v-N%Rw^hB7 z@w;UY2V{rLCk^JTk*0+um1g%#EzQ2P$d$$a6t}H(q3|Yrw~g~^WT_2ZkO|GUQ|<)# zJs~2gbR_*?B~Q7_)vx?oI0(a<5!}XYN$~KRI@V9gOvGU}bUFFDtIjg|b$v3(7(8p4fZem_d> zg8XD|WL8p<%bl>M;C$X&q`$XBy*E{VZ)&sm7VGaltG^feUWlF{e$f(*{viG(H9PQ& zmGKg51#S7K#pC>dv`|BjV#JrI8;OmktC=4_ZGpH{s)( zO1pnNwRFV#PjW}D-;q0T>B5{D9G!h$1*Oz0y|7^GKT+#5# z_{={a{MBEcJaMAmQyjnH_$|jjqAzxRan6$k|K$8-^hK`AhJKgW&WZF4 z+xpln|6kny*PW$LZrzo89p{_5UcU9_+%0N+rrT%qF(&In8Sl7r9P`nCPp>H`zI3W< z0RO}JKWFe%*KEpFthcd@!;LmJ<6-eDXg`lRjMv)xPS-sn?N_9?_dGSGYyaEwjGu4m7qYB&fbdKA@h!G4|;1jZ3ikN!`xWbE%bAL4WUfHB#Z_=$bg0=Dw@>mE`@Q~LNB%3B0i8rg%rk6nt$gPb{JKu8wQs5& zq?PQsz*e$%l)YqsrlaKGRbeHcoKx z4ZusyY0&a}@?0|-u-{YdAU8ae1rKGzL!;rLG4Rlp@DQ<_nWO(=8hf|gaSc4Acn%x5 z-%fh6Jh%^^8R0=6XW>Die=r{01`pZ-JQ&Wr`Ly5f;W)r?kmC~$gO|jHJRUv~e#(MZ zZrxCl`{;)Kxwma7&3%mjKOp@=juh@)uyJN?-p0eZBR1ZZJCXm2F7&(fzu%EN?(Z{l z)Bic+fpHwE;m*?RaQIc1;R7~on{fVv+v3K;UkR=er3v4i>I%nJEpq7{cSPwdc-#=5&DLzE! zwWL?u<(2o^qgM@aoW5yZ6*(!qn$bad*~F_Ilb3#3ImLAevho+mN|Bev$Vb1-oZ^~w zmdHxsRrg8A%Ffj9Y?hNnXFyJ7=yGzO&U@|2$r|RkIwD6}my@N)$^Aj(D$d^j``c)Py6znK6(7=zEA`{Kn@od43Co2m(Q@@ZbY(8#@(`DfXi!8Lx zaSL7y$-(~hAvt(@$1oFgeiXiJYaA_dup%&i={zPhWTA|u4S6`5vGm!95o^FmV@?qx zcDBA}vz)9s1NwP`&X@CazBDjnYjUzlKes*ZY#lF##?@_&|AXO6tDT^u&z-H~>Jw`n zo9fb>CC9KaemN}~Kkyj*!DI0Yk7G?>y!ZHQN97UhhMzkks*Yhh+?mEYz+c1r)k56ab(z1MpM%GNAjsV{3g{>B!_4<$mtwTVk9LwVu_8BSpWWKC-T$c<2@Dj=;};aD+`TYnf=2rmas3< zixb#`FkE|mKfLAO`(Kkc`_EVHqOA|I-tiM+aNRgL%lltHc6;B-&+=ZtUc$R4Xx=Y> zXyd=j^VR>+yvMGS{}C$vtN$ba6Fgu3kUjaziL*sq?*w9`uOr@xJa<)W_7`(V|JQ7- zauV}cWm z$QG5ax_k+7Cm+tyyuT(MZ#jG7bhB%X+N|=+!{fC^UtBMb>`AG`_V#27X2I4rkv$uS ze178faOy7W&9fJ==jHkbyh6`k=h(b|*TOx^A~f#|&fRh~uatc^b(%P*%)5j-WpjOk zIutX;U&wljovhJXNZvBn(u`k>{!8DGb?G~h1;Ph1e{=laTx7`vWJ(UQWjr!w9BWLj z@z$kXU4rb<_Z19721Ox*qPZW#bFn-h$9u@1d%Jm$^PC}r4v7q+zkJfINd|34CV6%F z<8}1;VleBllnjCocOg4O2Gt-(+{mHr^kdmuy7nAR$)dzoWKokG>K%|DHPr1Wc^xKC z*~?aBk*4QY!+wb`wUOT^to=93BFXE>q;RjalgOi9w9TGD+1|^MM}z<0-J6{@;t8Cq zH4d)3ck{XMdo42d8v1wL%MspK_+u-3XVylJ_DUa>b>DT!hC0?mssG5ZTKH<4-BusX z-s`MMZ>W=ggnUVO_0eYrYrU%bla|PTku5ReQ^5D5BFeAb6M6q1qlQE+h~9JW=J9-Y zE`4$yeR5wW{N?!J7d7zd{$G#v9$?MWvhE3rROYW<%g^?{nK;_}Pg{iN`^DMb@3L2AhNNF-_rBTF;f-U@ z=D!r#y?>hO@a|*Z?$@r->eUbLgwZdLjgs~LJDFG8Mcc^U2l9;Eljr39eVObR6dUKci?vwu z=)c8@vWL05aS3PU9~;vWwZ=E;zn^k0iPyZp=w7wVLo9LoX;YGi!?yY2XE&ai1>s8&8wwkc-M1KAB zZ11}d^j%P=>*sXzGYiZbA0tl1)$m$zcc*6u{4g2aR6FfTZ@4YVGnjr}J56*=&lWmx zr=xG+d&$TIYkfq24k8a#hah?EfR>VvQFf6nnI?IZ+?qU!9OG>%i}H{uxyY6Y$e0{t z&3I(dIAqZ^#K8#j%c9QWA>@G-4-uT8>P=l9)!KDgL?7HmKNVS|@DP2}*`rSzc&Jb7 zc!)e?t!pzp^pVJ)w)1L%hdu~bcu4k%xWtiIy$79qHS*^r@X*y$do6I<607Ub%@@<& zR~s^Ex5GObnKYRF)Scmp)q~?$TZv9j@19tlo~2dZm=d)>@Y8wphZgM&Z=P>+A*-al zT}?a-9fI-f4)nGeKPkDld$e~sG`$g=c^H|u&EBVePcPP$f}?7YeNN<@=Z}v(bD5)8 zbtX9KqSqdI=3n%gZOH7Q$em{8iUL1v%l5waKyQVg>OTroGSw*G5h>dZ!q>f8@U_Hq z*-81nf-k?MZlCIWe$3(W_bW5LrQcrZ=;!&;ac-63^LFwSe5m>;v^qjx7k;m&pL`9z z$^=&p)p6CW^uaw~wCS{o$m)HR;Q(clekhnnVT=1UugGZe;qcMV4j`AeI}(&^-UD_@ z(2_i|$M@Q}SS6nebom^H%(^;4Q`kYUQw?|}0-Uf4pT&2a>@yJSKK>eUNPme;n-bwZ zelL4@6vw-dA0q80ss;9iOD0(|AtfIgRJE;{%X^nj7E6Fzxtd;2F34 z2h$8dK0nO8WUkA(w}5+v{1>bxm?s52BG~Kg1i@F%`eN+zX}lxxS`To%$e~eASuZSQ z?#6i+b(8-~z=e^q;hs;a+ji=)n|kboKA%QujW)hf6Q>>j8{b~Vci*|s=G}-66q#%T zOWAqnr6VU^4}%thr$i@3;)fvl93r0@=+S5k^Q1B+NRHHwUl^k`rgWojltt21T}hw9 z^(4K1ZoadQdVNA&mQXLjU)!kDX!Otl@{&ER+s0hT2zaEWoI4Ge3z=iaTs6p{?cgv2 zch#K>UO_+f0Z$!;_a!FM*NhoO-mOADZ-IR8*Fr17JYSvbZXA!?>qi?3?z+T`yTFNp zyZX}hmx*5EohmS;!e8jU&%v5sBl88P|86um3@qnGw+i+W>{*AdQaFrfYhN7eUFe9a z{~G!5IeNAVtf%l8a!T-6H!zsw@3i%6hRI%Lza7D3b(CWW<){zu=9x}kH}KegzEOiL zd6w_q(W9HkNm-l`k0&5-8 zzrUUDAJt{VcFHw~K6FIyLq(KN?n~dHJewMMZVB>W9Y+n%mcf6+kP+E@cQnuZjc>ik z`9ktfAx$vbPST#@EbR?m8i`{_WT46J)iIpcBCj)m^P0+_ zxA|Fu^P0*fcCyol^P0XBe&zoLs6VBVNV?2?|tN7-r;5<8dr0Q5NRsNgso?t)&>t*qsPU5^K zo)S4HG6I~?QqG-moVP55^S<3UPsTkd^f$qJbndxH5y zKFB=-_X(yG%=ZapGw@ywX|3VB9X#_*<2=>(b*v|Sf0mB-46G;P-m?nlDI4Btz#oL^9m8nZQ3di>2sS$nO8W1J=d7m`~vKM7(3pV;PM-> z4K4(;7c!1r*$ul0xY<8vDLA|*W34dk-A&`yBm9^7uWHr}ycFYZOp0XAFIB7DiXEyC zEri~rJh$(-!asCG`3FW3Db!8OWb2w=fe5hK^N8`2~8AjF8yCl zmxgaOzsx^v{m4w0Z+ew|&%Rj!pc7IO5^ZvOp69I}3-9vQL+nKSNq_7ep*t)b+Y z#Mut7n#VlUA2>eX_>|)yG77(gs&t;q@Tq}%UMe}&z&Q4<=U^@ zISI@OC(M60Z^Xp+^19D|FK=YRyLom!J@M&x^DdhFUS7|q-^;r=>D|2B^!KhO{pY0L zK>9C8zgp9SLa=p@P1x|o_-_g_mKW0((fg` zRL`g1)9>e9KDj#Yyr--4u1I=6PqRPR_!H6|AgzG3GD%B)uyKCE`n+WmH{?|&Y{+|+ z|3iKlS#=I31 zKgj!!gb(so^1raAyz&2X{*tu+A?+(kvsEKe{p=x^){|g z@IL`sDEcP&=^LOI>55*Yo9UGZO%>gf{B$?ybv@~d?xYLd4SFR(J4N3FKYfK>H4>G~6{6-~Jonuh4hGSGCdwhsQ>%< zDI`z*bO`CEkbVm3`;Ox$;<7gE{X2Wo1&OZ|lCL588p8jKuR{3$tR7EU^}cmHD&tG* zShNLZV!UY`gSI#RoNm(+gUL>nZX-VTkdR#&UVKaOkx&w z&n|R*hoc9_MI1djF5&3SaVbY1j_-1u%W*l!c^u#8=+ALAvGU^ZLpIB6!>(!AF|Byl zuwz>7hK3!}YR3^hW94zN8CvaLRvTh__Cvwxi-`@S*~=TX)bd8LW8Y?<_InSbw;Gm(SC3=3^7{Iix+${Rg?f zME-N%$Gb0ZZyndWxc3hCcJu!Q{oWehT_xY`!FMO|-5z|G|Est!{<~}ByIfD<-e~R> z^M8$g?-SnLDc>dSdG0^R{U!Y0$^8SoyGy>y^)Bwc!@b?|U%!_QEwaX(AYO}Bl^ISv z(C@R)v#sp2xc(aZE>HV?14kn5eKAK8$7LM-IIiFb*z3j4kMFwC{ziKS;{$8^Tiesx zexZE*`YuF=(@O_6ey=-`FS0FzIq%a&Z~2bQZyPdaAA6fGlD>~TFw2}p^idykWkF>M z_tbmzXS2*`_8#}lGC^dK$gxbYbeHt;hYI5oSjUH~FJSJdpnJl51j0_M&NTF^R_uYc|{M29Y=L%V&vi|E~I zAAhhgjy_&MA1?q`6o4xV7Co38r}{Yezzqf9h5~Ry0l1;ydHvoZd9MfWP2xSaiUb!l ze~)YWcmaC90KKo?o13LoUbXX{&2zL!*-yD)uj53%%*z*`t z$4@Zn|8vmw7jhUlpcnr~wW0qFy>H+>D-N{cJS!fsVm?DQg!1*A^>|@>#TVulgX@1-<@p`PdzZuRa z7Q3}STKl)PKZl-g?pJfF^ih0z$d6O^;mg@s`pWya6AR;1@uz_!%{+6M@!}DVpghw- z{TirK==oOhbZ6-klxI3f-&T3%B=C&FCAIe|T%zy@`ej$bp>K4i~9o*Adc^!Js@9$^DW#2Si6puFO<8NZpob2}{pJ5%8Z*A$>pw+w^cv>yWdysNX$X8qm+n#vh+(UJ=F6ZY%fFSbcp^z zWLuy-|EBkY(mzOjTcv*x*)H;1V;x6l<30P=YIJU&5V6Y_w&!2|bqD>+|6=hkuV@7~ z%?GE6@3U)UY4?e(_?Y9*yxethY0t^6_?dI>sB2W|4cu$tYc77%t_w>ik*C?8O8J|M zF9iFP^qJU-&$;+S_y1n$kGR*u?;Ky~Kli_)^t{P_n`prIob(g@GfN9dZ{~lFk9j4& z<}LhBNZ(9)3;%O`%%8&7yoLWc=^v5a!v7pU^Z&r#yqW(wY5zl7GyijZ&Hodh^ML<3 z*W0)b_@6^7gZ9n*&q=q^PvL0jZnQ@;|8vrfwrJ*m4owYO2mH@TGiV&}KZlM6ZI%By z*9J|M|9Oa>m1^E5M9)fvn?n4V_hl#hd#nEEq5fZ~e5s&iNS?AjDx{zC?g{B9wf--p z?^(as27XdFzCHi*kbDivSLOv|U9Ivz3(41zd^Ofl%X;3kU_52zOXYuV8jlA2&rM^| z7MRKKKR01g<$vCW{G8bl|8v$$25j#--PF2tDF*KiwoZ40&Z8XBhIzYUc^~r<-hz z0snN9T}=6>o5pa;zqt+jqTu+Df4bjyUHPYHH_gADHve=JA6VPpYUi}JpMeJqJ~8H> zt$HbxuYXP@M2C~3gBss=)Ia?c_*uU5Z|R?oZ#r{JP5w0arZacc>uz$KqURrUURbMIpbQ9(? z^uM9^4IF9J|Dk;SdM`wWlcPg0+0fN|*Q#vzw)>}>`nR<|Tl=rI--e!VzTToe`Y1ka z&p*A5_K^3_3jg#r^lN};I;f9B&$B+X4SELUnGVvoRh~HsJafkU(@p)ssviUXs``9G zNAX4Iz1H>Y$4rbGihnNi7*FGaKAG6v#O6`)&&5}o zv@%H}o_D}s8efxFCtBj4i~kA7A<~U;OklpBIpgJ%E%DD~9+~4P>4v>MFptc<@ydy& z_~$Z@%<+|^8S&3$9+~6MllA!L{8#bMxemxVXe0Cu&?`Zw7wI!i^a_j%ps~=sh5tF} zLU)5+fq4XIDfDeNPe+>2wweDqbQHP<{Li@-nuh48C>gFNXNDQ~f=t|9PnY z=dD0)s{JHF@-(EMME8gEQ)Nis>;C6K@RP#n9q~U8$=8s4HT$23@PA0Y63_T-k*@*2 zN1ZRN;fUXz%qDN?K2m$v-tO!2-#Wk zCY_~rfiop%zj6O^9q$_Rc2+%S*fFhkoM!&zx-GGpf4Oc)4EUGpW3+&OauEBW;PjAx zxyir$n}~ld`dQ}#Yx`U6nAY|)dtD`1E^|lz_~+tZF7eMN>+#RUzg*&uaq4y0OY1RLseEoVaM2C~3 zLonIU)qK~gZ1{KZFAsu;to_&8Z>{}5^t?X~XRz^-;?s`!mj`JNdH<~OFAt($13b_{ zeH?mT_b(4Z&!9ZhLHf4JGbe#(&ZvL6-XE;`G2l-c%)dPJo{B%$QQUR1^i(|B8UOMi z?IQV{mHy>=d9C!Z>cemQebtT~@h{iup`QOH{L6LO7@%(u+4pU~AC&$<>f0*)gUI%? z&cD2?|n${$Jg1Ad5KRG8*Z{G{yBclhCTXv?){v5f%xa#3;0gY$9I}M1M$yI z{?jV{`9xFvbLQ#{AL<+Dzn^yx_X6?Hxo7xME5Gx}rugS(-*eIf@z2fv=M(k#=hk^q z(pCI(tN;0AJ^s1X|D1Fc|J>?-K2eX?Yxb8UO~pSq`=3wNT{>{^ucj`s1I6=;@Dt4&R3Ovvd8uRsZu)|M$l+49Qb}eBO|LQaB)_pLGAo zkiPftAJYncQaHXn|MQT14arw?oY;_j4arw?{PVLwzAC>+6JIL-bJKV<;D2r!iz@$f zGiEaU&rM@c<$vDB`16$FpCea8b_3#{o7%~+ov8TdrnWNdCN0{`h<|Qsv%tFbHrh?u zPeXbwq}M`vt$AF9Q?zTHDLMR&`lp-Zh9R%4c11&8sd+=5H|#tC|8#Vw+G`OTWWYb& zWEWHZ>87@5&%P))KIETn@=yOJ;-8!Nz}o)GKi$-x*7h@e4-7sr{DrJ~DU`3@FDpcc zlcR$g-*?nM{S^3FzVmPCpFUBKe{T4v6Z>4nKR5i-iG8l(pBw(^#6DN?&)f4)59RBR z`yHagDbu0dK5m}FKLvWX+Q);;`F|t+>1R6rxk+AHah_FQS?8Ax{cq@f14ml*e<)wS z-V4#;yR zz%w1x$D!x(Pj7>sL3yTw^p*Ep@XSfznKR~}Zt4$K{TT2^ZOuPD^j>9W_20?TQ}Jj= z{nOiM7s=iN^{pKg+k0s01!ec$%`LFpf)zOB+f zh-^O<|MXkFDBGNwpfyxGwfyvB*_&@Wc>m`3W7$tk4|8~?@4IjF?X~xAF4=qE=8fT6 zWw(a=H#;;fQTFGY5UJ(&=e?LPEx*ALUv3UxM9M`NWSS zeRH(1%8?P;rpySxZLYgL#uILj^+b^WcFj4e8)X@*b@Tj<<5dnRgWKl3M2lFxbWQB) zrOzj>UOLdVdTF^+^UX=(Td{WEgZ&)jYp=|XjrOc+jJ!m%uexHjw(5#!InwxF9;W$b z#oBnrR=MuPiQcQjoR>&F>+M>;6Z%9!hufe>t<*Q1`o?AXVr_mrW6?KTF4%fswJ(8cEc8M0gdTDB$zuXmiS;{r- z63w>iiWH9Wu!^M>c1@MbWAm2_eyMbaxqaEOltHgUHg%ACaBow8z8!nwM1G;>yu=+w z-sDk8o^I-0u9aV*Y30|haujU+{D!}H59isux4{=qj`Z3G9#6mDx!}>*D9_NnWsl#& z|3@gV&}4d)mLCI6ZiiaZiOYBx04P8~j>4o9?j&-1;M1y_3+S7 zvV4URm;0u~U*Wr{*Y|xlyE1(<#%jKw=GuIBU2pg8rruA&J3s%4!#C3&?pdhw&>`|Y z46hu~5iGY!9TM&p5xGX-p=#F18d=aoO8%JKyZqq~O*?eDw7DTuo7oP6-w}sChInr_No%!?Hc+}t%LiHUiGUq`W`gWOnq!A_iyah$FtzriPvi!y{m2{Pu}VM$k$m< zNO_|u?~o&9kB>j{z~)$6bhSOqnVZJpL)J<;k048>ocrh>e?ZPHjHaA?CzEfl;e3Gl z9CSFVl6Y4B59RzydadGJ%IX}TUace{F&$j+F_d~85xc-du z+njrm{@X1-&3!$7VNL?yNI};hbi`I=+M?^>hx~*Htr8wAfd@?Qj%9VG{^3M=_658k>V!uxqar1y(yPVbj@M0t-*(i)L-phohnz7%9tPe_HzmEjVejno3M*1Prjd4rBzb%RVK9Vf^eTd%_=|@R7=9dG0Qwi+% zkzm^IL;R*l`%2P`{XWERDvA9*lJxyPxK{gpa2>$)34Zzp=mm`wy-1&FqF2BUmc)J^ z(6_~UE7E^KxkiIhF)64pRkp6EKPci;!*xQly zS0TKWw*uXz_E!qw*U)$%G#*g#mCly&l~wOs$EVHKU+H7h7MRJr{z@N{2G(B%8IPXQ z{vV9*Lw16YonTqWP7tya=#K&?r~ST~ zV^&sqqxSgwdCAvS=h+^w&PzGx{k-ydTFzW-_SvLQA$>II#iY;D)1T#?Iox}mdk=DN z3HP4oo{x9dk-m%ccSzq&`U`sc8u`u>d}k8hd4lisct39y_cqFRNS{LbXwr*GU!$jg zBH!WO^W1xodrP>tlY26su#WUyq`yP@Zqj$@=}(@+JdB-hrIsrjzsymmnD+SC_kHG2 zY-OKK=&v!)G41ya9EsV5u8TR6I4`%&!zt0B-M-#M{2ZjO7l1Je zz!(K!jDkfECdcW0y#O3h0FEdCM-)7-r_gboy!gH0 z<5M{aUzIsp56?JcO^*FUzRcwpgu6d0`MH)~fXGLJ8qz$z!=dN;>~21YdWxq%N1JZQxURvc)>4T1GnIwlO|>z{WI z(c$#cA(-swYJLtPJDQD8MDPDW#tn=$%;OUoHw42E^1O9yvQBUUesD5oka5DW?I-fZ zrzcZn#sL2~A?}79n{N#I)dvtYPoLbuj&O8~#H z=3qKF%aCUs#V>*N8o}0Y>Gm-z?g_o;_e<<5x+q>|{gxW%)XA1n)L02K7M^W54mU9<(lzqYkHQoD%N^f_2$3(ebuh4=aP0U5j|(=r&fBX=R44T zX8lzXtW8?eTjn+iA}0f7{5QQHv`vE4H%Obb&=aS!zUy@O$7giHKi>CN+xkWN`cvR= z!zbQT-U?=$AM%O+JmeD}@`=~yc&&3tp*04fH5;8-10z216Lg<=D^3WlhY77O`V4=f z(0Z-VdaZr?}A??dZ1^Fn@^TF5UmQERvYWFL;mlfb%zp% zAT)*zjbS@8hW)mVTSNZu{x}+jU$1qZE9C#KRi2FhyZE6jKS$g2n(mt>^S!P4#8;Sn z;!861xwj7b#6N{!t+LYx^NTfN*jUy;Sbbis_REkDygybS45Z}^$tM?Rw+ANdU?d13gL8Gf;5ANjys zz6B$6)_=^38AAT=e%u_Q!#7KZVDh4?`8kNZIIaHg)-i)%1!LSGe(`fne)0QszxWA3 z{NkI94a`2w9UKz`d~`$4tGJ(5`PW(cD84FRGGk6NFLh8i z2WS;~zE$kiS^5O!r4G_p-al1;_!ij4;3dH}7C-rncKqb`CARJ-Z^kb3;{AS$0iV^7 zAA7U-qu&;~DBe9w{NG#XTYX)Iu@3F_`X;~lINdLPgT)8CnVvQKTnEvc)^@Vm&A;vU zRr{VMKlm2<%V;lMf9;EDzGk7B-ZHmM5IGs3{lDq`pluVRzUujwYZO|HnNP(Z-iRA- zoKJ^;{27lQ&)O;#7jB{@emrZ(Rs8tLmiY0ctN8Jx2mC+rx&JBosQB^Z(=2{GF^5$A z_=%SI@uaKx@uVB^(EC08e%|H8BvSF?C;R7le?r;=q^bDvl4it@C+4n-A3xC&Kb~~u zk4U;P?o#pj@G*LdWBFuD{CLt-{CLuhc_tOBkC=Qa{^mqe{CLvTyrZNU@#Bfrr{c#? z*5k)>t>VXXt>mZBSJtmVmjJyIbb66K(?l@#BT=9KRslpqH8(5&Cjm z1|0+Q;zC=FdnFBesks87tHgVPhAMu%(3GQ$YZX5}L{Go}QHY+EYMxd4bBI4X)!#c5 zKOUNfz3WO@U+ML}etjCqAFo-PHsEPBf4pW*D(^pPVm*36@$)2Cm-^SS zYF-!lJ9DJ3Ui7b5)%EjlUfiDX-?(nr2@E^ITZY}mu)8SoSB^&^xq#zR*s(5`oV}#^ z<286UAeUp17Y}2s+7VT`theg~;^#GU638E~v4hF^<27rv=In`t(~bP`mneU{X8!f~ zMsFY1&ztJoSLs8wm+S4;M!A06V~oT3jf1>yZ)xoO1<*6q(?}d`gSfb0`CsY!6`emG zxy|J~@1vCb@yH!7=Z_z)vX0<3~4>Ki()eA2KhimoW|(JPytB zy4Ih60plHn*Bki%+cIYUs?Hy;k*j(fsMl+HoUY;o6(6WLQ;!pja{cDy4hhVa! zt?4d^>}Y@!g5{?Th99no{P7w-*YmZWkM;a(Jn#1#sPxv{_{j9yT-2_?e~MuNs#^qnUe;3;zHu(FGhU)Wyv2;w7k^6hEdnLhOgU*+04QB!Sp^w zb*-R};ZW&D&T*=V<4}Dq4dOWtFwKPriP9tAP{h;{?6^*;r@&XRfm@mz*`?->>cX zcYSUuH4h{+<@s3GJYOEm^HEO7OO`)gV^h+{uoyA${=Ip|I9%Rwkk{_lXbgOV+-H{} zf4s)m3!cxyekUUq-WZ20;cnwC- z$B*eTgAxDk*OkUNT-tF6CNI<&12v{VzrJrRJqM8&4dS!fGX7n^X1M72<2C%Q=kGvF zvNjK{@qA#vcx%T;rrXOL2j2jLsyV;gB7b}nSTitIE!5@4^H+X6d;@Hw($eM3AFq*5 zdU+p+Wi{g8{dtHnR+LxwQjd!)2d|L$_Xhe_%}3sj<&$l$&GN@<=u$u4dYp2#-6)4|ew30k-l@wOkLz;IcwAT4T5?RnXeItQMwc@l*X5k?xUTk7j^`ew z@!YufpYW|}l}==zqcNTtPY|s-5w`(a4a9U3og(M{K+vR|gP&*=|L5X=IS0Q%pZ*;D z#Qzw(+iC2;&vO2;Yjivt#K)6t1*Yl^nK(s>Ym@Ol$x2+Gp^=rcU4vvU^0(av*;+3S zhGZ_T{~p&>St#-GM``tv~>y2-zH%g{~NtZtGqeRJ{ghCDFjfudIpta@!@ ztE*N2>*-Rz77fJ5Yu2F+)}LyAc+L7#&JRCY`CeI!^1U+rFMqEL|6fpkc#^A2{kYYz z3Hb9!HJB5_CXjK;u(2360jx>;TmkFW%Oz(oX?(l}?*`%QOIa}diM#N~ePj<5ShaQl?(xZ7~V z;J6(}7aRj|)U(mUrtj`pXQNl=QMF;~@sHkKq_+p@=S{Wg8s+-+xiJnGXB^~pdrM>E zHS(;r^i1_M5+~ar?k!lpO}gIN{O|!;fEW^|DO!9}iH#q`32n#kS|8^qB}Y8+nPYsi zplAJY@%RmX&+0!)$q|p=;P--_}zftjdDj&&L?J!Lz|96v;5vh;~j+G8~FI! zJTAT++w&&kH?;hI@8Ku*&S%l$a>U=HK@Kf8)n<|pKPyD|e&=H>GYk5^=TN^Ne~r1J z@7|tJhYi{R2i%H7#RwhoyPt{;5HIrTPpQk|DJ}rmK+5s(|e2eRByVooVmZ$xR`TETO@f3487 zU7b8o<$%04(C|OyRMpd@p4aufZ9K2UH#z;b%nKbKnP!`djc)?e()$;hBfddwp+Z}P zLA7*rnxF8$Kj3JRj#{Xj19W6OpM_YC23Wkc;}evQS~$Kz=%~e5h~gjvbfjV%H7*`9 z=iaiJGQN>94bjyt3YK9Fu}sene%TcT%%Wl$ndT5*uVR$tUr z^Abc(2Il1&y&rU5g7i1Yyfn}gZ5bDTvE$=CEsT#p-h%k}Ws3cdFgnqttgjF&-(yZA zSQ|0!My$LMD{sVh`~3!bf77)d+pWg{Mm#t2@Lj;(7>boY-lO5VK(%oi<88zpy>9I7 z`j6Q=WAJ<1^1F=vjrjS%o>uwWJ(?I>HU7hh>-5u|K7LNMedyzrRX?LKp2l9!OR*nO zPnUXnG~(wq`Q>R}?Na|fWp$sZoL`>eH(&JcRW|n2`^QTk_n?lKv99&&1HFGi&FP@` zp&I*?k*{B~@8)X7v6oBE8u9bjcKp0PR^FwFmG}HgiGOKfto$;?XQbNg12%gjc0L2K zzkz%tYCM9v#!<1nUN-4vxnB1fdkFk_OD@$o$ZPg%G+LA z3Hml-<{ODuGWHI9l!Y8Smu(M4vwYr0^i1_MvPY(Y|GzC`=Jm1h>ou|Q?nnKx@%ACX zW8<~^=G1sHH6~UY8&6n(_Tmv1sOP`eVF6Y5sdg*94%Dx6^g2<$PE>WPQLcZTXpFU0uyVaKQ@f4SLOcMv+Q;CB=9#t3rrSd$*k_LEP!T z9`{abmSDKVqltqL#Ht$c@BZAuPJcekt7TkdnRkW6zmq;)uZeT-*;&a!p^I}5&}L(L z)QEe}kmvLw#l4e$qCI@teFuU0XiP6@=Oc)o3Gm7_dOzrV1nIB*e8YVR4c5DD5$}G{ z^1s*mbF0^fF1=0Y7e*`AN_R>wLJqm66o7zgs1V;t47mGhb7e|1ddn0I45{rSv|@ywTP zW(Iv`oNFb&H^>if@V-BPF~-r5r5|T}Z|J6sCk)**!a0}a8@hg1#`{fRCmHKE7vFBk zS3|yP^TQkR)sU}>z4c0wuX5a_hA!p!cFkHe5Z|s@hst|aw3tcF53j+a&8{TPNKQrL#(0(%xa@DXIe4O!?W}Z~LiM%gHGgqq3q`{o2`QtTnX4ni28;oIt z(d^HFtxL5pUTmAz<&wjfH2z(KhgEr{*Mq9O(%X0f`QtTnLiR+xjZw}YuUW&%v9nFs z7754y$LEhvsmoDyQS--Z=3hVW`gzpPn~DQe`cUJL^fqjxT)!X17>Dy42YFrJ(ir#) zpl7P5k=WP^hd`ZTdb~`d&S;9up^(#JqJbg19IphO5D`Sv99y#O# zvCmHAk4Fx9_~HEd<71FN9y#RUUu!ylyiqR2+-3DL#^Hj;p;>;{`uZgwPy{7j)t2jZ$2P)3g;{>BzzkW2v;rzxSnA~VH$F1WHWw$~#C*{ES8V?HCUkRvj#{Y8 zjpv)jmO<&Lh2tB9j#`X`uK4`%8XnT?*Fel{V{!M!dx4ziEyZo;JDxHPw={mfiFu)N zu8REenm*{qOs_Yt_WSbOHWx>)84vmShH={sFxExMAFt6{fpHHaN3Zt#LFXk%e}l|R z13l3e@$_omc=dd-;_E#vjITf5g7|th4^6%O=afHwuHiS=`_t5QnLfTj)iZ|QT$4}I zh_yFj?UlGSJ$|j1htJpx5_E4(i}K5BFo?1D#@Neb#NY2Q;zO^n_)ygrcBSDdd2L|C z-v{=~8u-<~uMP5{8+%2s_`RZfxvF2E8u9nWp8cPUJ^Mxu1bmBXgJGAsXm**)C5Mgp z`)fP?zCr$ZO|1PT%|EZPHRcyr`e=) z6YNq|mP0yoU!-(8!ztCwjFM_n&H1PKkix@vFyG7bVyX__%6!ACa`%3TWBWw5!NQsI z!>d#FmWOdt>2+pl@3ANM4zjUI?{&;~G>rL1_F%x5X? zUQ&*Fl2K3LywO#6p&p6vT=kcB7O~ta_-trPS-jw@=!dq=iYSoy`>T@e7XLHR5hcg= z-zk;f;%|%l9Lm2{@R_(yC0S-xS$+%i<(OFJf+0hEN7fFO5>W3P^sD?_?%wXRSiw4! zHO?L(CePyJJ`P6R++jA2{p{8BZ}~2Tj2pC%#*=dA+2#no1&`hJUK!dPj&jOO;U&jj z4wK6JF<%n;U3RXvpN-{xTK@MtrJkrm@=+a*f&;G&l_sN(9+;b{_CSg9yxxDAE z=PV`P@|Kb#d{m(Y<8*=x5qH_0;&wCJvp9t1?`N#?2yZF0pQ+9KAcQ?)t*y-rLmNM@ z%6eNX+s`q2QNt!=?~%X@*N6GxmYoDu`pukzaF0Y`oAVj&E{jp zcKF}9uQGkqzLV+8aqX|T_w>Gp(;wM)B;C1~jd)-l%g;fbiQwsTe0X6z7w*mT)n*Pq z@#Nl8&MKPQ3+};a^Fx-0;^w3MV|c50LyX{lrT_T2>lv$|`$KSVJf3+4ecD3T&8+5u zFqG?}If(TZqhIgT)^;L37{K!G2}i%qf@X<#jtvrgm7s0npK8qGAX9|5@JMZD{*SqP zSMsbvz?_xwT}nEFU$&dN$h^`WG$Vmm3c)A0gSJ+HSG065Dm4EC$|HJTnA@qKTQqof zwnFDoemXBD8kcE3w(!V}f{*CElExA3($f8L)Vm;z?HSgts$T@g{@5Vq6N$!WSqqLq zHY`MW$H8ki&9)cxjI@b1T)!#GCMM6e6nH?}-&w6Ck)WZEz<0g4s1l;@WX>Wc}W^u9I zA-)RQPXq0L0NMv{6z)7-o0+T7ekj_$HbDCkC9i@P%R&E1iI6Kw9i2H&oM?_HC{M*) zq3)kJJO>^io_}>`lE}aV!y?1Qj)^S)^B;)^Vr3rKs_?*mX7#QD4}1h3NCpq^lO|Pf znLH^d56lG*m?kAxZ=Rf7{ei**`>m|9o0(;X=Y|$UnZm_%6

nd#{ZZ+^k`F6f#f9tWKfGD`PoBf_dq?=`zZ`SFpYPyZ1^PFEMydSX$DiE0ii>K% z2V?mtafrePgM;wFVDJHSM1JoSnvZaq58fs|;O&KjaVCAU;174b7aqdyPR96FL_&^p z5sL;ve^{VDDnN&mIhM)5Gc_~shW;A`J(vM~m=3)-QmURdLMrbz+zox1U&LqayKWYD zZ&?`R85bdzLC?wZ6Z%cnX=TtemRZ*ol!InThm=E~S#qy~KC_5a*Th=avfMCM`6KAY zT@AiNdvbY;%!3QSpV0k#;<<LuPTvtCzH2L3ohdV=_z zw-thymXc1E`z!N#Y+)8}@m6$XnJZ*IkEkex{2@N?jQ%I#OggcL!sqM2=e_J!@sBz_ z@1gKH@i*TtT>NNur+Rv919*#g`~ZKQ!si_Ve7*tlpZa`E;qC3vUo_u4X4?w97$4GE z$H3FWb8Q7yhgJL-yqspYi6fx@b8t3czHUa_cXF{ZKkovse2m}gQRg}E^D#cQ{=@#OnDu6NB<`imc0$s^{e-RS#46 zxE6dxx-1UuO+eX>T)yC+oJ0IyN1XUK<`AECIG{7*ymWt4UV1%!_8q>7IgN`Gk6@3G zs?Rd`IPrV@A55Q}M9#8*L!a$Z^x02*=fYZ3XKyz6c^Bm7G}L{#Lw$WVf_I2db@uDC z`5ddE`+9vgKZMo%8~P0Mh6Q_ z{}+P(%lL2w14b+=2kx|R;f-QIqW6iQu`=LDnXe%K%YX?9pO!(FRzQd9G2zB`5hZ8Q zjz#G8x zdoyv+8t(wld^@jWnf~ zp=$rscF-DR5O7=x=*f!tu%V1+N&jH&veRJ~cVTUq9_bJ>u(-a24^`d<)^K2rsRx_d)J1oE?oh2p4ApgFk8N zB2M}Vcn0%u3hSEVCxC-Zoh!1@XLU}(F(2w&l=9JqEuIgISp>#^F~(n<6Z9>L$U6!TK^_jq`|~mW z4`IFVif;VJ^Ja0rjT4_z#(zG>pPtc=|9p%;;e-^7e=ds-j6cz;1$dUQ^8w-?1=l8X zTmaM7;n};u=flA7cY^PSg8zpA&khEjO$DAc$#^yvyhS`5f$|p2js^c(#8+`XFintV zMq0#Sz^hr{uXBWp(8nyEljnC(Brq)S??_;!`!GkRz}wZ}t*U7*iFjhs?C^q_SjYbb zYr+{=SCAZVTUjR2N&{X6pB?~z9t3^RTJaFd@$+hzMtRi&nL+w5Ug1@eonFvo3G~_& z)H@~GCicX9t27dd_AG|3Q|Tj%3->atpNKB*hRn^w+JopKAL|XG3zDOCbP?-KfQ)G= zU5q8ZROo{0siTYV09`!Q2wlu?9bN1px~S!N?-+$IWxBw)&>Z~uiA$=Zi_QVMu)@wC zgZZZUBALBlb`0joBK`&Q6b(FT!Cc20+Gpj>E!uPZG9-5S0ZJwMRly7N%4o*s_<|ijqQ#~|4 zrC2{5Kz~f&+Z6C^H`u4f>-csqZxT~%_1B~HjGk|4J(>vn03*4{ae?`{5PR#Z6WK|_ z-m0^=-gW-=RvJ$gQyccy3$(W;)!AF;^&fW$?X8c<_SUz^)(V^J)3CLkm%SD3Z_M6G zwpQY8vd3OA_EymRh1grQbZ^*O+tA*sAOR-gN ztHR#e2pzZD-ik7U*<1DW)MoZp%tuS@t(cG9L~rtZz}9j_*;^YM|JK@DQARL(tA70d zBkZjzueQM6s-=x9%HFD@3$o|6+TMyXg4tVx(8cAmw-P_M(B7)f*=4o2HZni0wzr~; zVD?u1`~r}^c7=D-C$b6qEF|G&T_waP_$G7%_{rdOka^EB$3dNCpGW@AxwASJ&^w3VpQQK~*vIq73GmawuUG{8`}k-MJ~5Lx&ndXI{!WVFG{X+3 z#&OQIF*nr{0e{S4_&LAj`NAXcS3mQ3?(=KyVGVqpYx1(HDTeZuy!h(FI$Ql3Ce+CH z;D4jIx;1bwldqFv>qNBa3-~(KaXN<>E!8*e#hj&a)_{LnJXxDL06rC(H}a|Y=e>*H zM;hHlTnL{E`9iDVAMFXBN;%$N$%o7FUg|v7_mOsiAFPFQh`A$wbq?l^d|+0@Gp>UV z%$D1wU<1GtU*rQj z0DmUU7x}dlX}(NdybNPMHabd7u`w~ViTNTM7@l!c`7~b#;TJuGF*wX~g+nl357|Q+ z_(q>n=4(FYOYakXN|`V6m(Isr!8a<)Ky^N_#(bm0Aq&zV6Yhd+ z7zP<}Cw!wr;Ts*|uI2fFZ!`(xHWByfL>SWhy17{1M2*!stlkwJbIUOdjx!`-dj^8k~KNxX9w+!M89vwaRPA} z#Pg6@`6ukrBKh1FgLXW!pFLLgqsA*dPyWRNJ`3M!&+iZ`hVp7n{g4@*b&DGUCZ5 zfld~|pF13LcN31|@M{v^doXt-gVQi~h((8wHP%}Sz9%2+Yf%=l6meFOm^1RNp5!Cs zIitCDrjU=dlg#(|(GIb znb}UNLG11!=A}KElAzPWXN4BLV+$88vrGkxZHRlodWigjmN}Sr$ky(N zsY^p#1>#65e~afS28v>mB7hMpAP4@6nAv{;6C4EJuA0}W>Ywvssup#Fe#Sl&Ygd-p zllH&xaU~Uq+pFL^l+fN5HC~F!Ux)bE9K4wm`2$y)MF6LN?17k?> zRPIQ-NZ8^Se(ynhKAz<)SdSQ<<2d&O{`ih}l$=0Jf7EPY+4P55L1{EAIe^#<8Gj%K zpYTW5P`1ZuV&WhZtE8CTo`|K(f3c+30d?K==vp+MHn+H=$c|0j{r9w1zjIro?CDK#)k?$Zvj1j zyHs-zxb{PZp0|LWDZc6`+Od^&K_5con7yxnJ7^4PzIe3t6k@dBns;B-T}qtKAg+By z0_4a2vs}w0jOpWuIW9*WYQ=DhH^)9B#7C5a258SgGUlH4JCHm|hFsbK8MP1ea~7C1 z8MuVxjt@H7;{|Tx+sko1DlMp(gyfo@4oZPf4nnT@_jUMbA)$^I4zNrqcyt%yuxr7iC&1gstI!wpi)eshjt=}G{$AJ4L@@z0Z~!qP z2Z0-j1_-;RkUnCaWg0ltj)~(Cmo=^l8X$c%7Bo-`J|!C9QT}1j-M7$3ujpt%i)Gg6 zXh4f)NDr++JLZE1D4vJ*@emE5?bwfS4`kTgkY%GF(=s62(t%M%0;7y@L*~gCr5y65 zJQDK_`Ep<_lgbgBn;aEgQjRr6f0RM;#zAy+N2)}0boR9&65*5M&_CeYz+4?eA4n!u zLC1bTaZ6v-X4)}7@rd`L{Xyl>g~_vG3Z6x*FzuP6-$X}~z{5musx12M9bWQ8vdkwS zeAh6ulMvfSG0NqTk&{3hv|r*hZ!ehyzNdX} zB-_gWD@961>?Fy!@LR(RWE#Jf`F@O!De(cHoK<973DzuUfq!oYPTB`PJ&v_f7HEO? zFc3YHo$~O@!z2s#42`q^-7sWuTy9=s&FT*lN32cxVscI%628l*t--o zN;rpTv<`P8o)mP^wHe$!7P0lTRvL#``V`2QRKz0D{xG7~b$Fj-B4H&G)5t<8KDJb`^OGL7dddQi(3gvZKBwgT%P0=@ff zF@$G`Zv1;*UV&`;7BZ1^B2(nx4A9B(ZbWOb1!buF*o}7rpT)|3D7VIzbO)Vyv4?=# z=dpJbsc+AMPI7?HRzj8$KaK>Q5dD%YBb@de=;XDXx5!xSInYT%oE9t7N$(cmG?RkU zRw}YgrbA#g!f9q5ZD{dXh>kY2_)Ml(&<5fBm0VOnmgRu=3D=O^IrLL)C#Gq338ON(a=+ldIvOG@+_g2f8N5G4XPeRT{E(}`Hbb&N_QkZwHY3mFWwp)d=B%rt z^IL42!TVa>PqrEDoN0CcC9ur|)++yzwi!i^oQG{@scO@@W^6M$x_$|C{ZdP8GkEVq zoo(hJ%R_O4;bS6S(1%xxZAQT)=V6-((85)3o6*q#>7%hNvCZH;EtVnMjFtwpSmrX@ zW&(3{&D&-a+15(iOkjM}_0kn*o6*oH*=A^eOsj1(I(jABOfY)=53$Vz=G_9ixh8Bg z8ag2wF)28$S=$WW)8aF-&1h*ui_cnWn>jb(eb|~g*qlwUJ@e8;_-kZ;7V^kYKDUTj z<^tqR$j?FEh-cwD&9|}qNch91VDF^eEIhvp*J+>a?lEjBdtmcG!pozaX0bP7U}%r= zHfDm&J@@;q?3oqY7>D12{Cka4-j7|mvmML4(-|V3vPFoeZC3FN@|YckpLpYwmVyqB z4&png`z*@p6i~3`bD<;MfBff`r+>YO!9i7DwnA!W-8-HA}{EZh@?0w_K6-PJx zc?IQ$>tR8jIK&}Po&)$3+iw^uc-}~Nd6xf1@cikwF3;npqJd)_F83cMyF5R#qJc-c zxZH=^r?{(aMFV$_VxBLjaGpJxCeNl`tY&i`*e3e3nr#DE&8MTyp0ASf25uS6YCe3B z)$DyR)Dy$y-ShSIFi*^>S$j8{V#MV*X5cu=V#KY?oWHLf`T^ev`Gl7s&(ePQe?;tg zt%ARN3VaJo(UwP)_I$5tOOe1ldxmkI{ZE>uqV{|IEwiKKFZ7F79p8pSa zTI##!bnRPI-(=LAV6ybxFtm96^XbLwA64r6L8))|Fy{Fz6MY_0zt1z~!f%zw@~Q7z zF`o(FhxOfnck^-l4#z3Hzk4wAd^&>jY{eWeM7<;6FMl5QrsJr@{SS~ohDXi@8gCls z4-f}62YGC0yl;fx!i4!j9=Usp(1yJzWA|XrL+@V$wM&?L8)JRO zgfdTdMBc!MODfZ`Q@}IobL6{Sk826I_7U2Xi1rjS=GlsQSOY#N=2_o(C{k;EYZmk%1z^dEFn3< zS}8x=?e+4*-e&T{tC1hwF>Y_(BX~AG;PSixnw}Hka=-Yn%ku<&kB@b^$8}6`&vXkS~xayCGBdjSuyF(?4(ESJ{yg$(Gj0l@j<7RJrmd+P7S3?_-*_p3%1x^6vA| zemPTY32|?U33cxo7UnKuXj}UT_cp=mF3Jn_6!pp*_zmcQ%Wz{&MNin&19Z^4>mpy%OPKAEL1r|V?uJK^ki6Hwf)p~YBLhC$n7Ma%SAhE zh_Ty_fpi#+#a_sqQt;rR_9picJbzT+-M>W}jv!CogE-F#^~;<^kU8P-5vnqW zD`c!+x54-O7V4zmed#yqrr-VWI}`HxMZ`S(8UEuHh^H!W zgvqk`QDBuf-}v*2&)-;Tb0nAbb-}_n2Q6k#%be!Os>qv1gg=~2wP4J8d{m#Je zw}4^Z28Q|aVZrnDRF`M#VCe2_&a-Wp$@BSm-m^&nW*H1wGYoQN1mwxY5YO%rp`Ol= zC0|Sl^V|%avfShVMsbK6aqPoEIOQyG3UasXiH9HZIOe?;xZxOZiVu1$2ekF)$u5c7 zQ}DY~iP|#zm#Narp{det#(YJ2c>{{_=HQq+VERtk0zQ9@fJ{?!QemZc= z8^A4}{ib+5be88+@awk0CePkX9=OAl&rXH)eHt=g0uI6_dvI*+!aRf}h+meX>>kL8 zqGFLhKH!pM90K~JKJ@`TQlBV(ra^zYFrO-O+T*zf_dnyMeY*ahhP=@Cc`M`dr=lIy z-YGa&BDNx5!77EoC?q#T$PdCQjmeE3$XiBO$#TUk7J`@j*d;`?0>|jFi`uuAU>A}# zBxgQG`$^__aVA+qzg1aNGxJW!k)e<$Lm*cML%yUU&L%~IypdyUYBzI`I~=Z?aG%F> zW;`DPxf5C^cfy#I)5sc$u+4MeeWxOKRBXdhJgkCkDj|EQFP|uK<`eeZibIN=Il|4W zlQYpQ^EHf3BxJ@~$QhDPVUQhfqK+AmEgc|#-b7vWyCZ(z3pw*B1QrtgIPnF&oD|p_EC>r?BUjg5cK89W|PJq0*9H+pV@)ZYy+m5kp&TX7`6eh`WfPjZTYuYnO0fod`F{B$7W` zY(w?Ynn|rg9hV|%DY5PX?zIME}55*M{zh~h0CMQFG zIrP_r{_+xCACEp$tj3)4kwb`IP~{NG1^O;7>G$5iFEaLk3?WRhr@sj@q~$o~7~+=* z$B-OS%gdm$Sl&RAN5@(zhg8g=l|y3`In<+ta)@}AFpFANV_0V==0Gcd3eo;EXe+~e zH8Y1ojtqf384S6S3i*-(x#N-`Z)CYsJ?;N+jl(??p5gJV88Rm%Aalefa1O~Ek}vC_ z2j*a{sM=?yVogg}hvM<0Ad5+c+zYHjefdn0KU>-I6`z3){I;3p0r_JG&LQmbN63^v zL8iO_IrJjr&s!+tQQ(^QAd|=j^9R^q{tq(g0sMZ#5i9;3GHE7$&vaZT%b&O2ST4(* zGr&19Z~Af0x-bSz;VdLwZ$n#Rq5mI{`9BtX9}7GZE5;z#x{66|2PPQ=Okxl9V-gYL z>H#(&e6)0^3w9mB^VV-%o=pzHy``hey${$ZW`f{}od#^AV1Q3y*Z2%rLDk9o9t`n( ziFMXF(An3(M5UOg|C=gEY8j#Nf~U*O6lsqh+m!LrH(MIVHhUCov$ad8=UO zAGBYM#)B|TgMKKOW-yP`1E$eo%RrxjZ5}~c)YfS@kI~p*!eN6U ze3Ryk5Gnpswz~k!kbWY(L%A`@#z?wfYm3oi8=~9huuM;kE7?>^KqJJD0W5^|)MnUk z$hMN<5*lC|!aWseH_5SV1>abKCzha$1gy(Qj+yapxQb~Yht##wfqIyxd3%hCX$UWo zOleG(j8w1;#RF0sRJ#qyev*TPX}(8${jy~U#WZ%s?mHQ>rNYX55s)pVJxmhG zA9)?6;F}$aeA&Skt;kUD%~0;SfPCo*{FDLta?&0n9>BaGLY)5z?CaC7t@=RLY=nH- z2%K{pY{G9tzU;&AzK)Kve2IQz*@`|Jmalkh!yi|ief>`|zJY*4omZUpv=(y17DTv%Vyi!8*HwR+S1_O>Xh5w~TVG>!6``&o)Kx@vE$HLk^5SjoBGgetbu91i z-d1$Gd(ZY?y7yP#A?-LkK>GUFK+-1Br`TeKttrG^fpyRf=$S7R zyj1#KSl~VN*>33b3}7UZb$p+D3HvPn~!!%^lWdI?YM0N;_EJc~B^<=rsIusb2khC-$dfovNL zd6z0d*2!2Zm_53r84RVzO|Oq(yAu?@7GWr@orru9e&5!z738z<o{kVjR(PpTXv`^oLVPcuxNA;&sH zj&+6{>n!6ZRhG$e4ERZ2KVdx6Ku6Pu3ZCC7J~6U^JQbJfe*3qA=UuGtUWrY0`z8yX zAAje94aM)@`Ap*v`w%wUZLrnud(iCpQt?4;h3)#QiJ=~{7kvYN7}<)aZ$%xDgKGIy zZV~J^jrq0IGPQm!DwF(LvW*CSt*t1N`i<|f$bPMlPa(fLWKAh#O=JBD*7g~|R@8pt z{cQNO)OXeA30o~eUBBBTcpgFfRg9(fL$)1(KB+&qf;LIsMB_|8EW*>O4~zU>D^(vB zZ2#m-q<#|qrut98PBaI;tUUO#=Et&{r{T|fI*!$lo#+VPzL4xhDwdLD8&P-x*_gFh zDo4RmWOELNrGPnh*Rv5pUJ#~HeNtMv)0mCfjeZcWBHK!L9C}UT{Hp#v%Gwg=IG!1=QEwT^E{gUm^*#DyUXQ?>qCg7+-v_ZvD z^u3lJ&|a$d6wV}98uM+n20wZ6ZL&*+pO!$Dk$vbE$chbEi#dUbdO4ipYG9>ATu*dx z;>*BFm}p?9x_Z<)8Y_E$bstGvb`i=h(v)3X-dv9P=FR4F4GL{>i&|_ped!5yW8Ji}O)nr7#661(jn8R?^C`rmzyN z1;3N++77WPtVA*^afl%G8t9VoQg_I*Y}l#B!A?~?m`S?_aPYUmE*S&g5NL#CiS{1J zNV-S$5nfWU60H*nE2(mh-lMf*IdaV;gv=dKY@0KH?*IJtr&o|{o6t&|la$#Oc+V^pEZV&f~~iV_#Y$w zhvILH_#Y$whh&QF%8LKlV8s6z@jr907PtoDf9heDfNe&JS#Hi}wnVY*jDW7c0^@(= zzO@9qkR6=l46XMFyLc7sLcdk~V#NO#@jpiVj}iZKjmH0MFyeoV_#eVF*HHY=Qq>+q z7@#>UbCtyZ$bB;6e~kDaBmT#T|1si!uGsjWrAGXZ5&v_I#Qy|v6zyxLc)_B}AOGXu z+kRp3KXSi}_#Y$w$B6$i;(r>8l{Vsk8jq7co$*0n4~P|cx+!*%_I0?;ytocI+tvN^ z9(;d-z7I03Z06pj*keYqKGX3n42t7igP4yLn@yar=JdgLZsa|th~=U@)3pEM2=eBW ze5Ux2HP}eYTk(;Q9S^CAH51*l|Z77@S5_n5rY6FM1k3sz6L7U*2k2dSuOV3cu8MSj!E!Pay{5XIuG^9 z_1HKMmA?aV(8ahu7S|8qJRfJof_h%YxV?ZrjX{12+IJ-)&X97>TCo3MGUm{Vd0jwr zihYA+$~;--P~BX9GWH!Ho_J3#<+VYK^-31*or5-TlPU&K-K%Vv1J3MS1V0OhJ?Azq zLY#+v=hQEnFWN6ob){h}sq8$)`bA<4Bcgbb_8HK-6n7{f#!W;U#-I)O-y@fa>kQXv zA1T$f24j=bo|nJdwc6y*f0KhYQO?zH#J?`W`vTsVX;Q$qL(nhe`q?uGZSRS(U&$<9 z7s_14@&?AE-U_tY^?2EU)%aia4V}&?_V1Z$VeW~Xu5~<9fjv}{!8gM-d^1wRH_SOT zE*Q`BOz`teiL-?~gXf56=786>34(hEXz~6@f;6PF3%?`9|9V7_eto}7N{vc!7lU4Z zl_W_0vR%@xqf(>;McHwtP1$h?31j0DEvasz%cC*V<0>r^;(B+_j&nvl66eJKM=jIi zoGD}C$VTOioe&olFr<oiJW5GcjgtTsO-Ye1k4KE(K+q@PDFZY+Q;`W=hPM zxK#X4_Y-lS!~b-zE3U1!M7oI&7kytRk7!nEqIpKb~9>nAVv<*53J=8iB>3Ut#NoqqD5&7_}-q0d~PORZeGaQ>n1vteBET|yUd z0qMK_7)vI{hc}%Ir5--ScgETTAL2Vb=Zy~+0Ouh;BVn)c9sL;ZVeHK!oxYlxfH};b zRoIL6a{ExvU$Iw=_|AO^@nNh^78hYZ2en}ia7VFOZKofv5hgibJea58K^4zX9oP>S z6#uQne7o8b|G{?TA9M5tw6&xa_>XK)g#Yps{6}L$*pGaCRHj@Wu3b#*cf1X--#TD6 zl^?X&PtA`?*l#MIR@XR-I`dH=f zw7U;KA?vBrG1gx%!`_Da&XSP9pXUC#--UT>3K z+zQ*?e%SWl*9NY2%C^14$tMQfh&+=AZMmKsO;&urJ=b#tcr<|BY<}$aih|wZArA<< zB}O3sayu8^i4@6hc2uC>4zY4ruIJP!K`!fu(Yc?TrfY zz=q&~4MDc;QM-r29!2fWgH5jk+WvEtoeiu;b;?*x(=NJ3a!GB+hiJn#v_W52Z{%(u zdz};Zy6iJya#<=qqcS`{)b&Mu_f^OYPQhhftO4Y;f?}6T!#>U&;D?)F$6AQFNxH~t zEY(Y8(|%TZmc{_~wB3S&$>jHOEdhI5SE;r**yHA7Pb+v;&PBZr^;0>MKs%Ik8klZ3 zVLF*Ml=V&ulXKz9d8Qk)$?YUurmRuaJ^7Shx~ZIuMCZs|B;~}j%rc%Y9H-EG1oC5& z{n{F05p#gWY~Mby*IqtTwqw&;EE3lO_G=A|_xxEM9`n<VI%9?;<)g(vBkarc-!N# zcgI^`+i80|_Ex4%di%lpcx>&6w!ve>bA-oKzD>fsguoUT23uSb<|PWYxMu32Op3hzf$B!9@JqJx4WzzVQypW72-ZG8F$1l zZ6bCAaBN-TbL@(;jm`wx6Yt-Id5cxzx`3Eyo;?Z-f3P8MKb(mS49y>GXSYC39=~0X4qOD=nFskW+ZLblvW_|h>|*da$+{vL^Fz8qwT-EEF|D4s6t=O(@m16LI!W(XVH4Bh zD=YL3{AKwz!dPKXzA>ebt)MmdYKg`+MskKQ$`RyR)8Z?_R)nz{;HyQzNw9^H{yE&< zkCBd$&53-c@DZ+BTHkj{{6)S~@NP5s$|nBs~&mY3p*d|^-JVg9DqEF{gEfgzg}>;PiLk`XC6$IZUxVbgDq@Ada6V^Uuz3% zE9~`uQ#km)>ia?89p5&7(6=%}ftA|c54v_l+xtO@=g1GL^4!J3UPD#v1)rzP|L}Pd z_Uijv7ib_w`sGyEJ%PctjYyF`%S>%!dm?kc{U9g3JKh9r-1a`twIf2Mw=%;d>Q`&! zA=$#{J@Rv64%|txF6qYf6sgZ|Qrj4JF!!r5oV0d?32o)&woX1!H}9&f4b@?`-N0;r zx(Jx9C-ONFZo4>`t?m7tB-_c~8PE@nU^WK7COt=fO~P#5d%7-)U$gKgPHIZmT&dWt zaldBM*sY#lv*mGb8;rQOYcB3>0|U-#DyHHR#Jz2}z__;!0{URYy%}+D6su>%y%}+D zM`4@S#t$2DZ*3JfYQ()Qf-QnIh|9imeW#7%!;2F4CVvAtSd8~hjcu>h`y!0EH+dh# zMTmP_+A93l_Hl3W-o;htANRJjHQ3LHduzKr4@TTu-JS;{?(KZ{HW+bl>i#nQ-UTD> zt!?)$41x`D$yJ8k8u!aKjor>8?rmu)>?ga7xHozK0AUf@OF{mFD!*h?oHimUAH&bhu%|HFuT^V`CRhmE+m3(NIk#J&0B;hS9Z>h>xaac}3J&*MKe?rk{s$k4vfG<`RMkjIpn6G?9UKm zIuCpQ9zlJ*Qds`}5cYcBkF}Y-kw=ZnTZG@^aBo}@%cu4f;{R=UC((@9Vf;^f02ZR3 z!!aI

u~_vIKqe%(4`??RGH@x!^|FZQ>&A%N&F@7p`8kVhP412lsM{7q3vqE?e=9 z(=&fVA9Cyt@o(r?8qW0XW6DXU?%7@s}J!xnM${#@Gt2TA|-WJ1iYF;yK++&V1|1|ehU$FV_smwo(?KI5& zQnZWadOzwV8n_uWFdgIS#F*3A(|Bvgxul7Ck1k#!&wKRhC4qUTYwEny@8;%xALdX? zgEa3&m|HC^()>@xH9bwH;TnBUTu%cu|1>TmFfKIj^bOj{n0J*PmSC*qxj?{OC# z0}k|oXuyGITSo(CnJ@h`5F##?X)QPn*g=D>r2#_*7&3r#fFT3^vt&T9@3r<&V&NOZ z1XHoL-o=^owb;Oe{1VG_*dW*B*9*5=TaOK@IKg))M5yUx5`3mmp{6g+i8w#T1YfpU zsG+jog04GZZ!f+Do$7S77jq-n>vXL*{dV|&kF&KG9}NZOz&^q+8S_zox6Ni&^M!@^ z$cDJd#%lICnUC_j6}M+K4CioQp4@iq^<&CA>htQe^xWt826#~yRzvuMu*WIBePJ!I z!-#XWnSX~Knc-mhXhWq1diT$aIlrDXqyO#JSTTW*-$(6etW0*8O{>7W4esNchWcQi0!EJsg4j6VKzFF| zqVE<1S1bnJA)IrUEnMsYy*CMZk97UQ)eBdU4obs4Ee08dIdfsoZoxSLXL?q}BTFzx z$n^|H~LG@675kL z^bCy~l~IJbq`bL=dA{TAg}JC}3iLYRsxh>827T>;JbX()r+1;Ri_nK;dxV$M|Q2dd*Z@)=VdOF$=TpXvQ)L4(PJ^>8l@ zxQ^Z(`BUvXSse3KV%+^WELA#lUy4+f<&w@#*dphe4aSRJg%=x>%`z`W)#pVw_97E6 z>T#ONk2AoJEx~Cm;lcL6G{l4LfoW78q-!b<(r=XqtxfQg9b-&9Hw8Z2mhx#+Jopcc zb8|eHjrqhnLY4&r{Z?c_0{tdg0J=Jga;QCHaE)k<pJZ?z{r1aBS-!n-!QukdNQ!l(2e?L{Fz zmB$q0Bl9}OwJ~13sS#dXEc2>aq(1w3wQfDMQsGs~8!#Me8=5aG>f43-NVd>@&gP%D ztpdKg44yp(xsV0E90^*y8FDHSa*Cd_V=YB}M-K8zD|nX5_#WfmT3&5dZzk5~RYPwY zdehLGLd^%@!Aq<+ckk+5pBD|iS(|C-O+#;9Oub1MJSUuG4k9d$Gvz&z2-D!m!C|Y) zeSQhPn_x0q(!1f9iX3t$cyr-NEUL7a${+-~W#CVSLmLk^!<$U6S?Ym*k z@(_-pT!=W-7Ag+0g@|`@y!ebQ40dQ<%w?h8mwMfK?q%Ri9(8{UJ9Mat7q=`HMm+tm zu~XOnYuwbWpB+u#fZy}+J8a+h^doqG>tZ%y9M%?VP|qCTgKvQ&4&hylm6vSGJa9({ z%A+!Q#+R%)Icx74tRX^8A>x+hx$gNE`li8{xcU9@{R6b^a9(`%hf6irlOBpY%vjAD z;FNI~8yD&zdnDlx%6k`V+`22{c2ldzjmhIS=(3F)*I?YbU8Hd%dp^;dh%?dKLd;7c zXlXdkQ*b`cv%>F?Tj>NJUbtyVt~&*DH4bx?(tljs!8!5On-^-XCE*&@fG3b!Vi$0? z2Y7J|=89-Cnbw7zO{U2)uyt$auK6}_ZQKUt)wkiw=er5%OTlxmfcL`o9d28`1Al4x zPUc0-^DF(wUjuyCnsI9--(A4CHBa+F_>Oo?;X9cRF;8RrkG-^fciwGi$aiG-Ao){* zwX+Gi2N}M%L;1au67VevoK-nNvMC&Lyt_vK1#K(~;5Nnuu5w&X$HkiMIXcWrpb zjw1@s9E1Ei4n6tGlE!t}GoMXQ|Lc;KBNm6V$^$6l7$1S>!wUyNhcU>j#g()6+L^s1 zo!P`K9R&B&827c1gIoJgfbODi>p*{j9=1j?_hLo%twuRtK~Ir>8V)^7{j}mteMxS% zFU-3NI-7J*+x3MD^rh!{_9ZYM=t~dSU8ql9^ohR15`lwo%7|>1@5bEx4dd_{XA(~& zvV4j)qwi#4uJ$bE2%k8_EZ`G{9EYQBuojH6IYpC0a1X{Tl8o^%u0%>i%+G{0W-4e7xC` zzBI#|E$E9%s}_Y;FB)%xmxwQA-T@A56>kzBD!i%18TEJ*&k%2F>$t4E8MMDG;LVHB z-&%Zk&)b7@mv=L{&rRUrCo#ic5+a?>2$gDPhDkM>IXQNRd{?J=u8`%c}GrfblKRw;a|0!xFROC|B{@kl*)BWJ}MQ%=u*|xj-&9$Vm|g{hw`2oSbML;+>l@46xwx~MT$S*yEq4sFB(d({k<$y zeAE;w{t*ZD!G*lErRL)GV;#lo=XWe#|JS(U^=exaLQpr_Lv2k6LESi0TN6T1H_p`7 zgb?17%$fY(K*cu@#vu29DdO73Aiixbj+b$$+n6sz?6DCg{M$S^5eg7#Dl+@kmB_#fiRin{VMvF2l_M|Ja-fNv?ve0RrCk3Z}|e~CIxh(J};on0>+ZYSwNdp(DyXWw+$A9 zCss4G2lJ%9Lvu5S>c&{pHOA!)B)&PF;eyQANIq*W5^urA1J2;P%4qKVe+M8uYqy0&ErmP?H{R!X4 zU0^ng;lS&eEUe@pu=K*8T+3bnUlm$7F%6gyF?jjq>&((5Cnt8p|1YB77JP4@Y#Q?| z0Kb*x;EZ`Ko4|bUeLrjO*Ek-;`$z9Uz2AgOXZMCkzC*lJ?KMg5FgJPVBZs3aj(Hg8 z%@*b$ zi#UhtjBjtniu1WH;>rlvM{Z!5hd!}LWrGBt^!UVgEe?}dHjeo$j*enk7V~-WO@*Jf z4VEm94&up|BcvW^&xd`Ow5baxeS_~C?n2(rEWV33I~IAj2AHL?7X;rI2_e!~$X~kq z`Y@^NWx=-q^0jO&&S5yO!`X!MYdDAE{HEX=1|NCZJ2;2n{CCJsdj12P@qGCp*L!6J zg71eJg6~XfnshKj@Yx*bhrNTi6?B?s?Gml^*$f5Z1zw zCBLmo{zLq|ZZ50p9PB-5&t+9ryv%&bvt}*pVhR^0bAqp8E%Q~36MUn&aPdTs;ZhpT zr*V`|5PXwx?)pEorJL8}O5fesK`PJjU%TTg^;$=%e478-&7aSfZozXWZtNt@8-(vl z{#=mCUqD@(R=(TIB)oTW5cgg(=%&1Y`R*SnNX{Dtgn}@R*XZ;*KgSnvlBF=PQKWEROT)MvuXS%PO!zV#A zROfbjW-ZQ29oyk6COX(|!x?36M@$aU!S?nzqm1o{w;?*%-i7&y#u#E8g7%MShWbY| zhW^MjhW^MjhW5)ehW5)ehW5%dhW1W?{MC=8+|~q?Lv6htXN<@8B%Gc>+ve!bMOI?ww1()*}?JH3zbklT;?<@RGdCDZa?NqrWf>IrWf>IrVsRArVsRArU&$2rU&$2o`3Y8=AZbK_?387<=<$G z^M$sn2H#07>xVkQ|5nJc2*|N;$g#J+4C_m_kcng1o{AesO2o6t_&pi++BBSxPY`@L z(0Mud{cYqh>F6DT@6>R?cXsa($k|lL*%WE{zOcSO^3K92mmp2UcOORf6r}Dq`1zCj ztTuCxz@$(5bJEV6OwyK8mcIkE`3kisTJXIJY(es~Vy#OGZ^!z5jkb94zong3EQD;A zAbX?Qp-rX;@mQqbTg-)(tb{$L1elCu{;_wMZ&8hF?|O?>%zH|B*A7{m0UhEP!}80l zg0Bm7M|HnW62;4W-o+$sHuF+B#+T$h$@(aaN3tVaTnt@;VViLV~yr9LT4dMu7f*C#V6)yyRMd+He1ElHgGZUS;4tIyJN^bGk!KJUWNXa6%r zs9SyJQIt10iAm(YRO?j7hw7qw$d2}zfH}kWsBc4=-9ngj68QDoRL~~Iu)5!U(rb8M zeWnn`Z zU&p{3wx7aex*!|T&R$IV2==2Al6jzw?i}hsa$$#RSm%VPKd!Hdj?|JK`^)Z>D0--I()TnDclp+*^w8kI}cK z5C@kT)9!rA>H-u9xEaH@HsE7Z3J7_l5i%Cw-1{ zk-$kD+D);I^m{kX7@xj9F!qB`KE(ktjC~Qy8)(Ixn$YhO%)5V`^#6eA9t5`gFJQa} zfc5T|s;7;XYUhQ?YovGEEaOYTHWr8{?>r6gvg^ zU1>%AG%h9RcLEdsL2V>k^Kkrs7uww&-|cd7!at~e2?;nxB*YO$vOvbrcxcO_GN>$3 zDT~VKp({r&l2vcBo}kWxWqt;6M{5;Cvo71IWV7PQ>b(ux{lF$tJL1 z0$XYcfqaI{dv#uys-%PS?}-O}4}MiRSA?-kn#fMZC-9$l40pY^Ka@QJe$OQPzyZYj z9t3aoV{CacXz)xo_MYWB=A>t9yK&Olmr++1lMXM#8V75U19*q*2|SK(@$M14OE!f6 z*?Hmx3)vEIkbRjse-&2&`+zspRh5HrokDB0SS#U2Cv}Iv z(VAo55t}nhShmq*En)8`#?iOnFdr1}cJ~K}lU@&6cy-=`RSVJAWb`!|eI;HxfWBZn z@@rR8-FZn*Y&-Y3+=1kp?uio7^@15RVnnt0mz4gkV(W#JKOnrX$hS% z&vT$-j^bIFzCk;Q%p}5Q^64SiWK>!|4qO^iJyhnmJn-Or#O`&q37+fm{gHreAu@+1t|-*j!&i z0uBL3_FP}e>Fe=qFFe-=&-BOpKgaLBIR7HurI7>UR?S~AA9|N~su()!VXPguf=`aZ zwm^0we4{6SKjSKQV5|>gOy+}cSi<#jN3o|a5&VMtd&c2ejKiLg_TX2@_C#guXl%4{ zfa;=pIJ|coXs)So_CO}7at;3NCd+K9zVp2&%NXbil0hTE^G%hb_Nf%>OOofMe0(8& zzpoT@Cd(6yBb`rY{1bf4Nj0e~{}dkvIy8eG`NH4S*1nL9b);VhxIoMPH9zrHJjSa; zk!d2%B!lQo_f+|&%C#QgQ&qN^Wck)rmTwbX*tg!Ww;6%iYYwIEUX!Cj?C-wblsvZ{PPdgVRQ;$GSl!5olAtx3=&ZI%k zq(ROU&Wo$+F3Z#iS*F5HlNrucy-o6c1?IZDB46)?d_{cpBL@)cUJ4m=0CRtk4+Ra} z7&O88FGZP2N ztmXS4U-tD&{|o;A2abQ^sKuemnwT$cPk-QxJJQ)_z0wmu`$c-n|IgmLz(-Y{`~L6D zBxEMnLGG851W+>q3daj*u^9rkTmve*THCV$(He-M`d82P0wi4Q1_vrpd$z}THlR>5 zk@UF#$L{Uy;x>S#XauFY+aBv*!bM340pg7eg86@cYt|ZuF{zhYkJ`-V^M0PS)~xqk z@8$Pg-sgSZwO+fZcX{N^7DiSC!^{C=xwb5E z(=!=qk7rje@yqSkL$kE^5Vvtkq?a-gAcvIcWS|%<_Zq~37>kFba|ZrRG#Js zV7N1C@fUhX`ZWvc+^6eKnX30{Fz3z78x(}D<-dGvfj#0)>hGfdh1{e1&!PU0R|lsK z@mEZrKzh7-Eg=p|qU%Wc-0z~EF6y6!T``CHKL$S!_L|(sCSY;${~Z2DZ=a{OET=89 zfn3b1RKDcNoQmhO;(025n)O-F^$f`qmp!LR&YxU|c8_bfWAn=fcoN(65PW?QzE0Ft z!z|sD(cH|qY&YiVVI}4j@o1^^QL4Fh4*DZeAMMBPIDj3jzAd9qrEkX3r|Mf5RO6`j zJ7cN7b$C9J=A~>IJDWZF&UjgKHDt7G64}kO!Qot!5^`{qXUpisvemE2_kY%N3r7ia z#pL|X^X1z(RBpn}$mkbcr)Bhp9tL+<1@oITWpl#|$Yri;Zoybs^X~?)WNRw2H3iw~ zsbP%^{hooYPe<4LtT>kg=zIsa>f1SX-wLAai% zd7tN|f8I|zKzfVxHt8TqV>Jz5yvAw?x=wr`op*YyzH7wmk(6zA`{v{-i7!rO~#i*{XBJa^Z(i)E87UO2vXF6%q+Mdk8cV~aQLy1Y2A zZb@`DDX#%a+P>>M#Rqp47XP)%wfNu@Y%cR!9yV9r0@+)de~pBf@3i?@ylmWkb5+hb zWaZf%@UwJGM|u8|YgWB&{?3A5Ix5Sl6a4gS!?579y{VgQG59lPi(TLf3m=Cr=-d`7 z6~BK9J8T}9P4q!@WQTdGM?Ta(-t2DAF_Aj|$7ROh3x(B>%Z*QiRco%m806bn-HUur z#;O6Ue=QvSwaWclz-NWk33yO$8FM90;?6~VKJ8rgP%VA@gKi;xfdb*6Y*umI9bCv1_ z4=Pgf&Sg$@z2z6qPq69E(AU&sFm~NsY@y_QU(|KsV6W|W83Dg{R<7=I>ee6(I+BVnadbt^fohPj|#6(_R_bLV?GqeXQDYRJ`&3&b$lf2 zxi3*yYy9?-{;ZC+eYIJ(uhxamc6=o3n5!n*rj~v-bz}dF*)sMbvvuqxX4}{?=J~Og zncs{ZYqpR5j`{6a>EiBp&nfobJtzBW=vC04Lmz_v0s4DrzkB8s_q}IM_79=gLH{T8 zQRp7%n^3ds7Iglt#YMaRA9Vd~#mjfyj&8rBc>AvZhA#hyV)OEy#rZGaRb2G)-NkcX zzNdKk%L|J)zPzY-`^$@q558O;_oFbE{@T>l;yYqI%xiP--OR;bb0fZ+n=E;={IwrL zkv)<#vLTRP=!?zWx8jF%UuoG6OOP!c`D>)tHE(e4{jBGf+(?)QBlUvtwx zo4PrJOz`?a@~tH6^d-o-c29TK6;AtBWKSG&pPn}y+7|QeY*^Ou{2>kh?Dv0r+NKC0 z(;CTxZ|9^bm=pKyyvcgn@r@%N5-){%ihnxu-Fz~SjIep6rF%?&?1e8J9;ry;k*^cx zzD{-hrtn9CjdIGF+i9-UlFs^dO6CH|`#H9Kg6|@3pVXnVO-kPd`P`Ea9eihSYRVgf z7tDnYeQ8MX&^y;oO?_j?g0o+GtoTNbExvX9K(b{A4qi|+CV$X4@(->EW?jD~m^D7n zeg1?u^3K2ErTpxn6>GC@ShF@u`CRa`}p~SP z&pji#E$Zi5YUiok2Vf*EAEWg)M%jY$Y{*ljX zntWc8uim^tb8LT?_B$n)4|SbDU>4a(umgt%z%n+1KXU*XDBV zar@d0_O<0)+hAX_{9CsFYbQ2Aygn7nv-DqbeP(mraD7WcJrnH|tB;)dI{4coJV4c! zMBkn4z=wVpE{n$oHO~^W#F!}d|%XctDfLY^_0EbS-DOV)k9c77&L-EU|UZ6#7cfTtdGZ-s+`G$B8<0YAp?t-SfN7UiPDUueUTIgZ&yl@}Ug{VyUf$#BX)#$hb{fCf37E9aNVg9n|H9pO@DzM_+6t{o;R?*KS8w93*L7a3gDk8-9Lf{g%*2 zs^4DTH}xy(x6c!dwr_LqZamkN9lF3(c6hYA>`0-f?AZ5G%HEmI{+nyMg+G{;7H+%A z8$R(9U-;zJCR&KR&60ft-ANpy!^h3cWz@DM@W%Rz?O86yDW=zYqT2>8h^?`uu?ID+ zCiCPK53Y6PHqVx>3GbOYfi(1`!Nqw?mPF4+?`;ULoq7(}CXxzXI;S|u^}tK#7FToq zv))hqn>9zy^atNrb*jjEZUf^!_^v%u$CHNe+{f=?pDgzY=b84J!{8-*vc@k7W(~=7 z=Z;^pHp}^6vW#_mYYfi;$I_)vd!06BvKFHD$!C9NIZLxX>$z2bbV0}UTKchitq1Fp zTaD)i8Pjo$X%S;Oo-v)km|nq{ewQ)*9%DL@F`dMiS{Sf=DxVh*)4+l7kY(dReIh*U z0T21$A$d(d)=#N%9MsWIX9xC7okUvyQozE&)|bw+#(n=w!-_|6J!R+c;w$z4&QY=d zcm9{y|2r=%PT6%)vC~glm&<3JJfC&(#4)c+81r#pO!qHgyaxwe3&@k5&)T{ASo$_L zMticxrFe3k`}Sd9Ib-kkx^nkbEX^7RKMw%|(xGbO>2}4&zXe?oD{H$|)~7kv^X9;( zXVxW_|IgLMr8(1Q9CJs%wQuw6ZW`h(I|+}UxG9~uungkEGKsCpA_gZrd}^7e@_qRB z1J-L!vZk}IhbO$B_+>A9?G#)046&?@Qqr#=oJDi3S

r6Fsi+B`-q*}@9AUhztbK$T|Z7)%qgR5 z+25fUKSOzY|1dsW{R!pm^9-b{UT2i`{>$u{U~kM6_O0|Mob(VeyVnr!I?>g?%IoS` z-!y<&cb_+;JwFrQV*d_%of>jT|Q<-~knRpW)ce+~?s(VqWG zjB#!-dbVposA*U*dM-4;`WW#t(I)z#DNXwhd$JbR(~5C0*1o{LtTpzmIfwhy?h_XT zqu1fu#dyV5C9$)fMZM1`n|=r{`$5}{9$)C!P^%CAOj#=DAkS7ghiU%_ z$|Mf6br?RBL|qzsfAOn&e?bfNJ-Y3RaIG&Vbc8k>S(IH>>&p)P>_qviN9ng#)(sjw zeW`cfnheHhsWFks?Aag1zM+6(=GjXi{p}_B-Tm0V-?RC}R^IR9?HO9?POqOu{8&A9 z@ zUk}Pip}gLdeURsaQ{VS6{DCojZ8yUfNxeVxFDN`vPix zeL{xkEvf%G-@#y%5_oOita=~UX*gY>?)N9bQ^&(dp`6l^Uy`o{)Fy0*1MF*9OF4PSx--Tep5Dt?YVYg*jM$jUK09?!F>+-Z@?|P=W(x9V zGI86J!c9XamLZ2MJ=E?=K_2;#N9p{Z!F`$BpG7`o(LFiD_QvGVapcjv$fE=B!UxRB z_6>;1qz~zr6ZA`%=SnUWu)o2QOHXtQzYV4&m-3JyZD7R9dwM04dd6hZnlogQh50_V zOzIn#BW;xN9=u2#QtNT#Q3Gw?i(aix|MO)jhB#3ky?=GLa5;QArH`38kbd9swk>av zcRScW(ie<2F^&bu8p+fscrVE49U8>-0#aix@&Nw(u+$y)fr&>YFX;12k;DJt`9a&8 zQ0+Nb>lx5|9{w`lIMdoxXrex3ME1X|e5AxPus)Y-Jvq)r?+jxuB0f9%#Q5-iu8ON( zI6J-SMfhwA&sqgf9iuKSo^!>*^cY1w95oH1IBqTzIJl-Jio= z#P05l&=R+|ehBDv>%x{dTt_EGv>)X^JpJ0>gVQ)d2ChW@9zt$kjodzjT)PGu*9Z51 zH{6jvkS=%&yjU_3eJGhYk@4z9zwSKNK66ER@2Y_PoWK&|ypiMgxd(=Rh-{P`pN!7A zPI`>zrgB^W??urMQ83?lljM0Uu3dc{(?9)@C%xm(%ZthKGuqKa-Q&89GNS08j>`G4 zw1~YWz2o&)-G7iED}pguak^||g{5C8|KsvvI(%OY|4%~>Ohq0{L0(KoUQA*?d`e7S zY;z$mT)aEL%{?COO(9PzvV!*qEOS}$!;%&6AuA*&WVf^-BeW;rGWzg3$`M9pQ?Cf} zY$Ed_%O(Q(yGW`4x{ur+I);eA1=HLG3SLcb)vOnQrSjK0H$sR^8#@L(bL zt-dklFH-)A#Fyo=U+Kvr{1n}`Y$`c>%W(gi71MjJSvh_Fh%LkWuDNgez%`-iZuZ-c z;`+s0PhsEvrCh(9>o;G#W%yNG|323jT)t)aZ5;mt$KBYMe;xPyJ@***z0rF(F4ytk zd!uXotET@8NzeNo^j*?J{?*feMbh(s4?RxO^B?yAY(CELs>`GP`=>uk zdXv;n^7~gzA4O8#u7ci1>giuO{bG{p_W{}gWjWK>NwCJntnM+ zb^m*4IjO6zU!Q(Or?r-`Rx4XTsS6Wb`29Lz0RL?3&4*Y!?!s@E{QVO2nsl!8t8}UK zvGlKWt@Nt&t#q?=vGlI=tn{?>vUIR??r*WX9jtapUHIjQgXyv>y6lQC(_Z^aajkb# z9K$c%N;<13Z_tIL_c-nay@YfPsg3h~(91|aB%S2^ThQ;2W|1ap9|m*c4*9~C?{U7c zZB@(O#uk+wYT4Udm;EYRRd%WDTG_F(Rb}7Go|XM7TUGojJ61NU^hmzuA?}K(8C((7 zyx~0exr3Ho$uaMxGH2a4J)cy;JDFB3F_lZRc)t_pb4g3HV%Hwy-VMCBYB|?7k~Z`* zwYA*)i*E&Mw{w1w^owuBu5IPsI+fdla!GZRyNi3bs$9qeZu%Uwudth zapoP)JVW!6zo-uM&tcXNzUb!+m0U-5Ph*~(-0m;BU*(H(c+KH4hqoM_nrIk%=J+40 z3p}7XUuXR*%y)$M$H3)M@cTIFF>ondKMy8Tz~0%U=fR}#D@^{B^CuE8X<^Yv-wYKN z7r&!Dgo#*e#!r@5PiHRe%%hz#7e+^-PcK5BjzOP}MV}_-6TY(O|34=`H12AaxMjaQteYCBN4Gg^4WuuZi{iEbY^~T{PAjXN|GOS7V!%G`7k0vc^&Gt7E(>cyFBE zYrLNGC7iG4yq5E=oVyvPt(@=Td_U*QIN#6tQO>P#OAWrWn)zX&tNfi7_L`|KJ(1N9 zlLnIF{t$d5+BX(&=TCztsZ$PpokL&e(APQNg#V-`4&w)vu9}uO1|76l_v-t!v9SoA zVk~khc*p*#e2qo$6l0M?49zKC1W{SXIl3UHj`Lld@74dD@6|i`IWLrKL1sJse@4H< zKL#J1|192z6AosUj>D(hy&~r#(%0p>)vv5Et6xXZubtIf_YJrEbOkXB)s?|4Fa5Y; ziQ(8}tz!M4mh=+;9p=AP{Pz<79p=AXTt7-$?sr@9Ia6o1vv-nM4lrK3oY+_}zIsy3 zAGk5)<37M?{bKzu8|NKT5Ag5E-w!zNlf(}##N8&#qLRB9AI8(%9h|D~3A=3SwyDRa z;%CX}(|);X?3!w9nQCm9>giqO{B8D+#V0K>KB<*n`GWAIa`iqP$twB6D+0l(j?L@X zxfKC3bpzKO`_{2(H*kFwu>-O_GY_7&J+m&bZBO=ONN#n9KQ1C^PLL?SYLROWpLEt9 zhaY-^eeprpeKGlw+%FbBh#`<1t(AW21Rovc+k|}+AOG;_@vjJ*sahM^z%^&=H*ih! zAjh`TyGUi*jlj11b7MV?*j#6!au7SBI)`!3`KJGGjURCe8o#o{ z@#~~Nto!Y8TY>JEE(}N?hE1mUcoWy818Yf}xF$Q$vFo&FN_O39#-ua5BHrJ3GajrH zYdka-Uzh7v|0LKIoz+|S#qA1s#I7Z5vA%>deksp6TX&+SpI@N9|>{-9+qr^4jSLd=KiY zM7t@m-)hkhjw~o4?zt2j?X%iPUHQ(`2aetNW%rAPFXGB9J6`t5=NwOqA8onTnJwbj z9K*4PESZMCtL&VZ&7nA52Xl#7`$YZINqrrC*;#wODgRsJm#BX}p+BtqopDOC8P2c| zm^)d%A6wVR7EBzM)y!cf1LFOgr0>2i*RB3Y)HhE5CAG)Gw=<7T(%XqTx`p?~Nk{L* zx6+}$amr87&+)$c3S6`LBT?UUP+#5S=$jP@y4TUQpV)RscLlw<@wR^ju65e>gu0#4 zcKQapW7Z1x`^b+$Ow{i_Yn;0iCq<}KhrhQ!$3B>hVBVlYV_IeNc-f!T01qpc%F)@0 zw2|0(y(2WX?}fN2{INdf3Fncfkj%IxQNy~&VEoAw@E`vef3abGA)i!4noC+v+DO_? zI!H2e$isZY>vC5PhU!}+6QGl!KZceH%H|&FWU@gALo!B;s^>U6g>El(tVS_TY zr;0wd_8`EA&i~fFIr!H(Po|%DOq_B|ika>rxk(;U3MrM;jg&^}tY4k{AM0Oq>>2(0 zDf&tK$DHz#=_mDHMnb=>Pv|$N9IOA}cjr8re%}6T-$s(A}Ds}v7Bk8hAPv7h67 zmnRe^hOvp*=KE9ie@;~svDNo?tGG(v3i1(?+(hiNb)6XHdF+X6BBnGajaXsjBSzW! zkGSM8al=i-W3T2vAKw^i>ZAX%s+xKbe@v`oQx506SG6gF80H_Qhj|BRSl=G{;F@6c zSHx9kVIN!XkY?`ueugjHMx5nw-ks6RH<<3u4+a`vHjz?S@6c^Ju8$qxsN>(pj_2xl zTkQBQ9X}U4zDLKujvX)7@xR56Z@M)YPWuBM11|usbnn>es zp;MvTpfjM)L1#jL4ZRlnZ_r~IfuBE|8CbF+D^T%#b|AFS5QFTlRII3CL^olRXn#nX zn>b%$XQQITd!G;`HupnO_M4m(B_{YIQR0D{Mf>nA1X1F}kBYKir4`!r__**}(0z|j z3h#%u@U0ZZpdW!Aqwn{6CWhai5{wqH=X2dVX_xe1%x0M0p=|!!N&h`gx|8#Mp5CF$ zNIxVkB>gj~o^%`k|Ac#6p=U!irYb9VEbWp;zLnx5y~uuxO{6m7sMNKD~6gtU;mV z7_GcAMt}ZaV{}=s82#zDV)VgL!N7`(OyK?ruE5VT-GQG@^#np=z{8Bxz(4iuX2que zZ7IJzM>#r+_%WtJF=cRkAyzu8R=nCjmfA`QD;_)xwd|GDrP_egrXk@y- zia6@gT2Cvcj_UiTNSXBMeb6bSpO9EPxrDW`$S(S+mgJ|eRzZ1Ruhoa@zZGLlV0F+1 zPThgfSWjSig{eG7J?0qy=tIQb7x0Ypy8EsEnS$<8|76fV1+;Sn&-mr#!9XqaDR@Hk zv5RRxWv=m3=2&;2@*+n;UD5zxl|yYx|e;e)VI}S6u$|wI;LqFvqjFrvKKYn9(a-+36FZ$66OX zu*&Zbd6A=~>`5sm)w=xkm0)r|-(_f|4X%~HpPI6A&(tF9f-6@9r{)b@U)*hFaO#qQ z8;TzvxUqQTpnoaeJ@6OB?+$#TxWMh&&7Sb7Bh#h#U6rHU?#j9BMc;zXZ+H2d_p+BU zhyFNBeD)E_*8T?PxSBqA@!~B7KN+{B;J(Wl|8b1fxY+khwHHKwp9;p}0N*5~oY4XD z?x5f2tM3EsvtT@zq?k)Ao&gi-^zkgak0;W{nUud)eT>cFWxtcRwfq77-<(DtUoouS zOTQ1J{cCy0_VwgnVeQHGHoH02dtI8r-6YDGe{ZlYo$qY~z)d0bT1CA))Wx}HHhVMX zQ750vM9`z`FS-gHcqO`U20C#%y0Mu30@K3iN-JJ|HDmB3x@m>U2n}Pu%4AnoeSp6H z8Q46^o~pdma_q3Jo9g&B+19M3n~u^3?F(M(%4%N1-r57m4wLlztnv4A(ak+c=O149 zzy<6%ANk(m2S$OnU5k2F9VPh}_pBO4+T}{&yRs?lO-l*!O}>!6(bwbT_y@~*&;M%n z^Eh^d-YNB4;|VpAeCY7=NK;5RlU9=+BYi-hKJWHcVh22^?*!~b&N=z(Dk`E4B(tO< znnwzfW|JyN>qvD4!KwP*Ox<~A>T7OS#LIkQo%z<> zKE5@uopC)#stcH@yC^dS`}!R0YcFH2aev;vKJn~eExNpRGS^B;JM8PLE)Lcr->mqi zTCTrjUoSL^-_crA9q+~1OL7w@Fod)nd32EE9Y^`32JSz~b0_fMjijHFo*+%;zfzKT zu9kGbzOT@fzcT_Js58M+dr5BAyoZps=TR=nn@_nU)6JA=Ex?@}qZ7F2M$%77Pmm^a zkJeOIk!ndPy<_)v8x*63#7m5@>K}ZT=RC`Ep5-~u=5;e8v_Eoic8pT)vy}TRRudDEjLE@q45q{}m_<8CXMwc` zzx*A=n>c;H4|x#ddw0CgHRcZAJJjhPr{5j^cKBUQ|m?jt4h zn)*KlIcnzf=oh{xYVGA30Uq9>e|`ZL93A0?7kzqPH~P319$aVcjnPw2T|$_+gicv82FE#ZA1^M(#wFn_|p1*JC(T(F|ROqJb@4sTtLo$E^pKA>l3 z*;xG8^LY>67ScYF=ZfIwiWD?a5pYPE?P4>fy9!XAsp>x?! ztnd0}QU3R^p9b6BTOMrtPtw1VF1*r&FaCim96;t2@f|f`aomS# zmuTL2kh#%zlID-yzhwg5dbtA9-TEdO-+8gVWvlPG==;1OWI!fuETi4UwAuPTFS>l( zyq2-j=dDk1w(1_*EE{@?&;HJ?*Ax5BZl)(B>@a7!WGc388EN>%ORAS($2xPe z^bt!6f_#Tg{WS#~4CC49JeRo>-_QjId=G6o`?K+bPi=rqCJjH zocL{AN2Vmw&njPd{-51HI$~YN`$zR1pX7DW)i=iCUhiPE$`9_knk90f72T6&0NX>(kLvs8$MI6s1CGH(v&iO`?vu|`e z#|KHyp3&Qwhnj?aubNkHdE|6VowJ9u%-%zK$Dw;Vn7_pKl{OO_65r>=e!VYiU+HSw zzv|eRo$YUQ_WOPAJ*@Q2-@^XU74DdS_G4pd?@Q{ee}(U4Y%c|~kCU8zqbXqTY?8BQ z^rsv@k%&k9i&i`*dq#IA?HP6E%$h&P=FKtO)qjC|NdKJwO!)e`?;q9J%IB6iw#n^s z#<2oFu-0vsFn-RyQ8#1d>=|9gv9n+FHvGTVxUruui}(B~ZgGSihnu`-^bXdAJM@9z zE0teAUbhbRRelrx)4W3aNTrjeC5}Z0?bW@R_`U2giSHq0Omdw4qv(P7-q9r-JNriU zE=%oKSe>+Y)ZwE$h)+7rOZcGF|KFhfr0U-h^sljFE|d4is&D1HRNs2(&lO4gNLO*~ zrKJ6%tOLgPkPc7WLwa9q59ze2r}vO{12IenKmT!jX5eF|uOD>~~@$`d8w6 zO}h4%+V8XL+Bd7U^H004*pf@^rx4%NvKH_~;YsBt?k{z01jp{JASS@EcO5%-1J@KQ zA^TJ|>CJDSwn?)_Nx$%36XN4FFG${78n;n9;GGey*(dT$;&Er~J#ANZ?LQTsoPF!b z{bJ#V{ihawq}M*@{!<4Z72(+4QwJLxIA6uu)@=zkVr`zV(D~j|*&14BkJ8MbKzSA@8L*|mQ53`c(5@)QS z$zvtEBmqxf>;0&XUD8>5)URjQCH7ua^{KND)v@hr!LYM8D30}x>^jz^cLfIf^4c66&kInQGf?+Mk+iw^{aC!rs)D%TMo3?Z_S*L42X(TXNp(p}lEw`-{Fj zW1Uv<9O#`yx#1kg_aA*m`>QM8S8M;O#hBG zK+MLNX6q|w|ElKCo!J&G_MTQ}Y((SzpQKN}F4wL8A`UICznuO}YLA8ggn6}N7s!`% zh8@5+No5C=CF(GzPdl;!q{EzNeg*coc2pOukBHxn>#z>$t9u+BX6Y~c-4c#YpVm=- z&@Ugei4yz6(UD)#eXt+vkA(W3(I4sO4O;P6+5`JV$A3**|LNnurrGgdw=;io;=Mve zUGZLB@m^o=Z!=i$b+zNY9KHH=i0$dBYuC39q<6aFySn1Ly5hUKzDx8~i<$a5eebC& zzAN_r8TrA#YVlo)lXtOI|#~H@)ny}_K;_R3|PVbwwuu1HZgU!+}FcwUE z6%+k;nlp68e{~%HWsTcejGMjAD!*u=uWz*-6IRwqOxW$jDp`J!t{AY7eLF$(l+4c= z17?j&I^$ydQ{((($A;}pT7$iv_@?+cbj5h?RXpjM(Z=86y_=C5mTwpL2YD_cN|PI`38Q`o2KN>yUpd{UbSZ_$wCwW${>| z{c6XEIB{VwCB%iT{)D)&m<=0?p=?3!CF*u-ea?wDNa7#ISLooNGd}xUzt6(Ymk|GD zVIt8+bTE-qF?=e{8{VC(WtT9Wl4`q*h z%to~1!kl$>?HjP{`$XHYD-JA_7zbwcYodMU^s8;(wIr-1pJCf+?bwlb&RXm@Fdoe6 zpF|tY=^xo>32|a0zVqkf#A-W<6HBzgrgah*=IEJ(c!AIK{im*c9ouDJcE4Eij_(^- z@h^Yn_%I8%iFS`27vtDHo0?Di>ZTRMe09ljV~O^TVr)9=FUM~ArvGn^-xm}gW{ppx z4dRSXf(^2|len=&+oLuiPAuO4N%qIr<+|0se_?!>)vt+mg43@F`wMsag?HXPn5f$l zZG^;i7M{{(3SG?Hoop*%&rSW0Jm)FZ))pwC5 zpNhSguw`w>F=EN@2OHtUhdJ{S-lNnNA9ioFD?Y3%K1|DF#|8@U#U&6ce-s9aF z`W|~5v>)^&^jpyXoA3XM`+t(+znpi1wb<_j>nQds^aaL#>HQ~;-`=6ZS6BSkr;q<~ z-Ul|#j{S1{)5-4xOZ10!z5h$!SYMfgj?H1No1;0%UljW#+0L4m_KrI7NqQ$(ZF1a~ z<9F(M2Uu2hrTOHzFZC;a*TjBZ?sr=;kNOV8%?a-Y%ewF5abL;)znz~p<}1O^y_1+P zM>dJZ*K?ENSKRjbr+p7pS2-)Dk3!Zv-%)qse!l7Pan}0HS26xeYaNb#YOmkxonWgH zV!x(+QtVd(-(+?24zSK}f7-6>djHo~;r(Cn^)JUpv@sF45kKLbV2QEjT0?YXXng#W z)?d5gzrLXOFY&RxKfsCqira{G?AM57yCi{cyWRoT?Nh~m$-dk9=hnvmR^q;dH^)YE z;=aJs8F60;?*~h^$y&f8!SOSZDp^*e~Do|E=-+BICO>K8}rHkB?(xlyw%{ zl`t+l6Joop`I#dhlI)B4xO~O0TmAb*#CNG*9XlYgUwLFD?Oa0^c zZ0!EAbb41@SJ~mwMP)|{$Cn-Z-h{Gure6_$@0#z1KbZBsaNA82!zX?+DSYzkV3as6 z>l^4!T$e-t8u48_KV^JZnH}GChb`~#u=n9>F5|opETilDk6rOS*Wcr;n&8F6z$x}ka#8Dp-{)L$>9=vL z_&CMG{XPH9zO^E%nDugwHE+&875h%cSMB{^$#GtL6TbaemK5g|e-Hf`bC+1$R|bCj z__x`<#BV>=+TVVZf5q{0b;WwOuHe*asG%ZkUPe8%i^j(_X`QtzzUwcK?~2=h#3al9YhfRd)^DA6c*V!cw*Su1ZL;B=n52X?U5(4v z<$AL1(OJDeW_xt-ZAaN|iR<6-zU#3)U;B3qY}U0=;-+tJ5FmF7b=`ezh>uv^Io1;k`K~+H#(mdEUvQR~wB9HD;J-tveX? z`oA+Mo4wH0`4ti0qKv9T=LVybeZJ5k%JnVkQT0$d$33`)oNvetMxAHuM9zo5RS_vj zHM_kob6+mLnSy>rcOU9uqUT|E9M1?wbAzUJ3hO(=Z;M{<>Qg@}#k4kl+e9ePmdboD*Syl{Uw=}wImn~e=8UnM&6_DjQSGVv?H@^bf&5JUCS`~iSiCD%Bng- z-@HhjkB&CcBebD7BRiDC_36HxklOSL{~hvV)E~OgL|fCc>dm6y1Bbx<tA2YQ*z`W2Knk$&mza)nA#GOMmJ-jKQb#?5zSq=u^BS@gh1v@?}* zx2KrOH$8nrdrVey!^!rUyB7AS`YmM(4K}UU z8#6PTXAL0#O`f#SFWuRp1+=x@oz}c{Y4M3=^tJvUoRU&}kntJF|I2y=YsbGaWWj_t zhAw!+lO4KuK(O}l*B&o^{m!93Hto@X4PUC+6_&2xCpAKTmes4UgV zJ^!Z7+o;!I@8Ch#j~P5@EA`rC^+k5Gi}vs2-oy0oUCa<^;50O9a`nf4y^=h@O9-{uy(-X ztI7I+ezLKaN_{K9TGQxY^jueZeG_~)?}_xPr+AhRnQ)1#d#GvK72&`Y=~aE)zR*Y9 zv+k}NHeZ&J5_;j?MGu_x^sCv=ICtZD8^PdQ(>L@eIDF1zG#haE6gccgTgH|C*uvow za5%2?oXu;&VF--u84|;xpLz_Wj9<9@p`WJXQ#St4nsh%{>;@KRSXk6O!s6TT(t4La zG{k=zi*vaD?1#(aSRAsZu-3oSRBpGic#L-M0VBfV@4=PIa#63x=FQo>taMHsi%U!M zH~WccUu*mp7VEk9xF@?tGVw-^B}=RGgAre8ZxmAxo^em4p`*6y3xa!xscPyQ^{z_#**@kswQv~ zV62;v&n4xO&)$&gP|`l`!SXcI`XPP3lm2bw`Q;w(p&*#Ah7XXLyOGg{*75%4YGhrm z&)hnNV~3AEK*nE2{TkE288}Vnc^?c2Mulld=3fU-S?#2}Ssrime0&^DJbRYMcPP)| zt^W1>T)z88d-~TmCGgfju>AA%KJZp=OJ3L07n0R4Ebd>m3>p2x;sJ3P-Qek8vp3FL zz0TmRm)!m9C&Et?;U^#CbRT@uOn=Nr?jDAZLW}xWg)%bWqXAWu;iF8>OD!6Fpum#} z5B0ZrXot;1#c>|W4Ap|`YMX~9!b4^7P%S*v0uQyud8kiluP4*up;GWKIWyUp6=Dul zSqKm91@mR_kej+)0PnQGI~!cS`WAR++Q0cM-pNYO3T<>{hg#sB+Dm;^-P~EBf8?GK z=<3nZAW{k&V?pIZ|6XXNHutDmys{PO{QIS<|u z_JpItai+Dov3=$y@YU$av~338eO^~)y=;>j#^^_2=@Bp=1*gZ;f>GHn;PVyPJuSno z2p`__-SCkMDG%J1Eb?JL_-YJc$KzY5z{kSxy!hmzy_J^q?B+ItlFz021xBs-xT-0i20E!YVuJa<0W z>E@2v2PZ_OTa+C~trRHuFOZp22oF^;RPrDt2XD#yNYYy{b072v9Ud70|hG74mobce@ zCrtQ1Z9BRpU}grepCykoXm^S1Ok{u$oGthI7}K6r?;@jc4!86=zMS$yC+DVVDw?0)Aaa+@We&ls^j;6H$2zt z4c(TW9y;z$m)vRH*`4yy1^cmK+QIg}Qr7R`g;Hef4shy2&YFA2&)eteW7#YFkwq%^ zEo|VoJw0pku!E0dSLohGaOmav>EOCC*FR*0*gU<5cbG*XHNEF`u+8E!=vgsho3_sRW}BT!EfBha{bLY&?3? z^MkgdE^kP-YAKkOy#OY6k3#3?`_Z|s^yUWH!6v=Ay6(P5=J}AfTswEmeUCinGPjoD zV;@BQ7`KpVUG%`+l;K7HgqL00HimMW3WL!CQIk>ch39(bo866`VKt5LM8jP-Z=RQ) zY4Oqk^8Oz)Aas(xImWw-WxKqYW-1jU@TjL(XpbkWxq^Aew(_1;*Mo`tdDqAJBY)oD z&8p{p*e?6PQ*OR#ol6`|k;fCdKHVMKZ#<#7$lpC~ce8uh_!GKkE@K~)yZ)Mep6)fr zv0uioDUaLHHzwFEbFo{BDv-OR1Z<`1%;B{Mcan;*O)dRNV~z8yVZx&=oe`3eeZWo8f$g@U|>1d_Q=*6}+K~9~ckT zhT3>*u<^DEJV@?Ich7~=PrHv|Z;j&pKeJr@nrFAQ&n)zKnhUlqd*poK&6C}H_O_or zQf}i-b1^S+*bin#3vUa3Rlfvpf27%nSd!#}#VB*0>J)^&{}ph#nHo#dBU8=jEn{Wz)_9=c6cV z%lumtY}!$qRR`g`6(5)ETFKS!;JY%Tdnn)dLQ68hx#^BQJMM(;IgN8~O)LF+fceCb z2Tse?WSoy;J}8_ow{hNzUDR&+T5>fXo9y}ya6XE;V4(ky&@oBFut-Lu(KdQV*Mx=X*k`BLnLc6Yxx zKH8Vb7R|sG%|wo6APrAfa zJ3dePIMN;=;_T zW5|+0Zg1!qwzgLkpRd>F5B0Qc*Won{ca?3H{VJVytk2s1zh*2ozs~&@eu#fs+y4^d zPnT`t!vD1%{c(~$+;8*Nui(YG*wv5rz|ZVXZ?1rM_saJPmh(#zY}%5+o7D#KmV9gf z-D55q2S4t`u2}1KHNSv=VXuju>;F9-SF^)gZ+krGG+)hnWI>`$`FroAJ}UydcH4d%?M5_ZIXmx;w{zQ4VeF?mmR#sTCv<*&8nw{ZU?@_XW~)XTDkkZ?P}mLr1p{Wc>(S_A=emPayBBJpHi;`e29lj@ttUOuh*w%T3Sb zb?|X3^CdU!%by>^m~@+G zWjl%Q3tjHGkIS)n9Xt0>nq*5ye&H+81vAhI)6osZ=!j|9x>K=rr<9>P;(lREz9T~& z`EJ>~%*`a%By+>ab@_#xkjs_~My_A)afcH9!>i>VrXG%eI8oM1MwgVkt@5;{;FDa$ z-kgVQkes}jbD!koAG(DtJ#(e456}T8c;;dBf^13Yf)ecF!(e+AhlYKK6x#o5C4b5VH zy$<)U zr3JYn`&n3%t@e=StC}ZL#~B&!&^l}_*ZkN#aS8n5ny-1{0E=J5YyS+ttTFD;j0`{g z;KgTtPUm84F_(421n~~w31p7HPfnPlEi`G*N{PL)6AFYG$+f5&fUv9yt zs`Z+Ecg?o=#bfh}8-9`A^usT?_#GV?uJ$L&aM`#TTj5Ih@^&D%y~yo)d=I^m+YWYx z{Q~+&I#p|8@=NvM+>z^dz%yE#YdqH*E)a&nxMRcYK(?R8ZST+yWcnV)=mXj0woKoX zAk&3i+m5mH&S|^6cYG})8N-tC!m!ppcOX+%P+$3ACNYORJZxh4J@o$w@G4$U$>mZ{>|FmBVh1@m`^b}4)687u81i~< zW#@Ky=@PAF_Y3)L|JO(OY-gDi3umQ^2UXR3Gmy)REu76Iu0(SAdY5@tKD<);tK2gv zE|=Rqso3LL*yGvA+8kuAAKBX-8GIJ@c#rU@Z83X%Kelwhq}A_O=&Gv0uP_lBf;##+ z6MWu}Zf;~;y!bHWKWq3l@>%vec5@@Xv^TKRe@}nyL-w3UTtD#;HHCiGInOsUN7(*k zjq{-k?6QzaURO?iGZ<*Weqh{NM~uLa0R9@Wce59{tNf?j_?!B+E%0U69|5~w$qt{l z{(SVM)`exeAG!eB9hvdV+I14-eD0rQQEZV_ZL*u`tgiDY~-f{52kk}kR zc0uEG6Mdg~o$Q@`$jK6(dHCE(VN0j*->=d$LRuR}X0|>F-dcuHPu2sUxtA2>Y4 zM5oB!r~aAH74&(*hwY;+Tc5H6tP`ETD6{G)Wgfxr7Qp5|%Cnj%KZonPh||d7+6B;K ztgUxv%|$xN@L%;4cZLhx-g@c0YILZ4C0cvYI(m3npEmL4VEluM*{Cj(Tn4^cc?gw3*RrH^B6{Wxob8$H)xTbZ5*k)a#mP5Elt z;WODykEf@FE~Wl+?0NED@h9`-J?LAlt-oi?MY%3d^CZS-Jn)eX|?MEHw z;wRY8wXJvE72_q_?q)slS&`}_xs#yzOH*GxlfgLHy$ ztk?+VZ6%DKg=H`zetMd5lD-#)3$Sa>#Bje*?4E1d68+dM`>~IBp5w<}%UG1!cwPdp z3&(+SVv{<+am>FRv+LhtoDyP_3mE@fz^@-nKLvh| z!GnI{NcMu?$H4D?e89qPDe*(ASdT2RV?q4*k7H|!UTZB|J||(9Iq>gp1-si_>Gjpi z?hH@NXYS$E`eII~dKuWgJiW?myrJ25&9$)G0Cw*NyL-?tZ-L#XjYBUppWop?ZHpz zVAr6pPu+7hI^-&J$(87o8R(Yj*zv{K@zcuCIhGw?!nk;S?)tq$+~Mz#N4C6lg5_HP z&yx9Cvr3eI&B#aH^CmGE(l>iNIn9T55+~zvS#eF$4LiVh8S-QY_P=z&1z=zoeVMEe z{^Ng*2}>VH7tBBp6f7E4RSn-rt`@ik)$f*Fgzw2~Uk}iyS~ru;K6){jgU^np@jqk# zI(Aem`e)vxQP$ieCNr}wnYrTvZi;l~zq(3C_SBc4RJ)R2EXb=^x`!al}SIwj^$ zKOLv_AJ|j$?Hc(A;{1rrIZwJp{SHrR&GRVhu3r2{n<#Sybvn#{t2ocJ_%FTwqVcBk zL-ax`^H|xAN3k))O9jlIwT^iY@lJ(q=FE(Zp4p-`C3HbO*a0iQ%V!?FkNANd#P$4= z_U&NIJg)ws>R-c8^D?TQM;A2Qb*sfsN8qP|#pnXtUQ2anr+*iII*5P!2)2{>N%r(E zY+TuExvcqBz)vGHOsE$)aLvENveja9f{Qm7(udLoZ=nkm($yM_{^5qzOl{jzMm=H(uIB_JJH~h+dS{HnT|8th< z7U!pxwjOY>qIDzj#?fB~!^lo+-SUv77t*b`rpwR^P2k|~5-|uI>$4-@g7)~?sy+r?&z0^*1Y)|dmb+Q_P6xQ`%{>U zvQ9b+S$YC2v^2!@3o%qyeAaV}g|O^k(wRS~EtAT9aZJZzqlD{TFL5o?G(SY|xwL&1Nn{u%MPhUOWvXWCu;Eq!nDJO+ffPXzPQ6tg#llQ{woQjp74)!pUCb`l^qJNnstJ zvo(Md_@@ek^v^8({#R>#ly&dh%*+DfxmPaqR5gLi$=(#jZB_;raSrxYdhKKE{FRyZ zG4qU-^r3YRm|xk`K4xCBvafv%U#uKxAHxGH^X%gbI38*rgY%UI_A#=5<#79$dBVz( zrZRL9^U*Q(wOkWjJ=RoCYM=066LY3J4=;Rmp6b}gM91X>9|&;XlwqP{8OxQr-or!( z9DCxb7iOC^dH?5orzDH>Ten^AJ9hAQ?gQp0PjXLlj)|W5$Mmpd+ksik|E~!~HwIZZ zWbA9_GuL45SCe99HeVf#&i18-WRDGE4%iI8^%152`-x)v^%jL6dWj-4dWyn7Jw)Mw zG*QN>yD0qN7d;o-N)#ENEsE^P5=B;ILihczap4T8+siz9ylI`!{>^F^^L57R@3GaM zWSmNA$2B@;oOJvH9Wzck{=SYGCmsKtju|H%&(tyFq~oh~%sA=zdpc&EbbO_b87Cdj z&@tnr3FJ+88;nI;dmY6W~g(K&sgnfW?s4%9o>t5f74@# zH8P<~p}&Xziu4faHqt@##VDwsq}YjFq*~G{QYop3|F;`|<#NUhzi{ga)~ME+)aFBa z#|FIldg<(EUNs*L@v|0*F;{|d8Uc{yQtqp9ue(nB6o}SHIWk~(1F~q^E?yj zFFKU#^PmOf7adN1(UIgAy@>pxV@%{auIPurQl80@W^ne+Wg?Q#brR=Ye!(kozLc(qBn3o6P=_5@AzPb*C1^>ofxalhyq z?iYQC`$Zq7Trc!do~QG5JWq53*L~1x6M0>96Zu7-BERU<BA4kr&lTw}I+W|(p#`qU>!QP5k$)E*Nq*6b$S*p^ z6}e9N$GRxLXZVF`6TQ1P=hu?IC-g^@tMl2ED|!RhdqGPmSM+Ae6}^RWMQ^8E(K{(u z`4>{|$-dz|b&SWuW-YqhZeXauZs?MNB&)Oq&xD6=tb_x-J)aMk?ZoHW8LIGtM-MFuIP!gIWHlf=*{F4 zy@hymrwR>i`qJOF;Pk?Ki$Rqj`c|@NkkLWYxDS&R~K9f%+lBe;Hy`$ga5e$h7W9}YdKXY{Szx})OqKaKQ69vQ{8i#(CLMaOs|*NudZ z^>9!B+O2=Axcr4@T+zEP=GvY7cM)`<{u@}kwXx#zJ%6O0&v1PV*Ef5pM-KDOPnN@5 zy@P7EwpCo7ERTz}Egk!(lhg_OWA6`x(e=M+e_C=loxO;X!_uu=kiA=siR?v|Zb>nb ze4TsYGo5Fe$a+z~i7XNAX(Dq(`6=z zBERSu6B(!cV^7QCwI0(fS)58;9i6`#-*IzBFuEOGYq&1i+TadG>ZoTPvbBzS*6Eyj zt`nu66{6I0wkY*%KsMG<&pe&ynTQdko((Q2^{f-6p6f)ZXN4&BoGnT{gUU}m3%#aw zJT`}9Y#uVUZla0Qb%RbNujmZ&iq0gj=(Rip8Cmxuo~`rQJX`bzuBSpvOr%cqX7Y>P zLVnTP$uD{*&sP40v1hNRFSZ+Z<$s)N?<4)S8S2R1x^(VaMLD8tC`a@m$`O5-=OH`m z9;F^SUq?MeH*nnttu~Q5(M{wReTw{|Pm^Et8S0_@o2duBYvKoHN(QgX=GrUVEBYGu zivE!@k)d^O@GPA-@GQ{-Tt~*%HJV7B=wY5EdW`&{?~z}$jb|zUNfW`xQ>o|WWijtx zXe@bLaTfpebVX*1_H{)Xkf(J6U6DMUBae+J^0)zcTZcTZ6Ga}c6Ga|Zh+af~(J`(_ zQ2EDlpGmDO>}y)L5l^gV<@!zJPcyl%H~(KtdC1qgAMqTWBcF{Z^0@)|TZeqE6Gc9+ z6Gc8(h~7?q(K~sL@-NhLx>?Vuzz(vWa}&tlCfKrAH7gJ zA{g0;URW)PUMLeqFHA!(>_ji@G?`YuSUs|8UbgCyt$kX39$RT@A9KZY#;Q@l$V=#q zn?=WXB2&>BFQGGDGFg>{V;E1KyLOa`EG5rU<+29cRam zGj)uQSJyLi+&gxhuH*i(V;{%~VYmqbB$G?j2`BlSnT+A7t znr)7m*JE==*E`!TcQa=^#@u_2=83LgamXKfcX7Y>PLVnTP$uD{*`9&9!ADQ-i zg^4^bx}5x?E66Xpiu|H$$S?X3`9&Wlzv!dnM^-(*&P1LU-9UcPYVwP2BERTUQqgPl*t~9KB zX@^WM+8JZ0eIRHha7|;XMnp!|I&FdML;kG~M zJZPEhp`P7C=W?8bYTE@|*>tJf~Qc=bSIfbFxKwPGNdX#{Lrd zQio4UV_3`Bu!k$MT9h#?6CG(H(?l7=5u#&EBukVrR2i;Z*=VjxH+53oSud)5`akgfd$WF;n`N1R)SK?=tY?M4)NqmuxB@b6+;a}s}%X-7C9;|_Y(W&i4ua<%DpOHuH z-9%k0Z2#F3{AY(55B1M2wCNT4VumPvF;bMi@QczHw@}|#=!+Sm^u4#g8H?PnSGeqf!k)rg2UzC1W!@8EAu`gvm_GD6kAa7TcXwXW3W5OFNW@VDNKPgi5uS-ru{spPuu@tf$&33c0&S#i0_()ynIV_I79 zojhzUJ*Nh}c=Hv(XI|nNQ$^{U;iB|SrYL>09e7iK}_{W*2 zOl}|dYH3^ft5f+ei)T3XIYhslMcXVMpdA(u(2nz|n|LN$ly(#|j=NYt3J~AsC8-{h z!KmVp*3w6->Caj8Ydw5<#FJ68i9XwzP#*k#O4pAv7JBC0E^~?QpHH6#=qnF(*u=5b ze)1778QDQSw&&4ae5Mb1xSutLDd40eJvAghxYljvu})mWvDPZYSKfbF`N$+<6Vhp; zVp%g7OO21#6SU4uoXYM(>YGJBM15POC3> zPBf8KjMqz3P2^_L8DL6uCYTbv7EFo$2)S@p%$~LQ6+W#)c9+5{^|zSF6w%vFWSHok zCXyk#&|u5CE0r&wI=@XH9uW>IVmNs3XwK;WA=|scRoTe(@Z{of)t=kV_S;4o{{!|K$3A;~)Bt#1U%SrcG=BfuC= z_Vy!ohjZ390vR!R*u?rqAjdvteIr2JfOQY+8-YIdG3y(F0roNL8-ZN=nDvdo5c`<* zjlj9~G3y(FVfHcW8-WY#W7anU7uv_HYXnBy$E<4vzHKUp6@q`_yR7G5=nmh_+Q!3~ z!3TUtV{06P@By0x<^mV%81vK))-euTW1?x`vdQve_(BH~)-qQAL~9xSn&-o>P1x7@ ztYtK@mNA+=I!#l8(Mv>G%eYwd|7Y&)6FbM8IMZ!I&-j9koY+$neU9XmOod2dTl+bCotRG-hNhsr zd15V zE#oc`E#poREn|UrS9mx_M9Y{j&J7RG6VWoVMYN185iMh`NSyyx*!Qia4Iwy(_qKD` z&IYcnzvMb=(6vJw{zB7SkDC5-O?z5XsiwKcwEQ!g_Ozx?Y8nkg%RjAYGz?9z(lpn$ zrj`HRfQF&zPiYzrL(?lX?P*OVn$B^TFV{30hSs@X(`Xo)mbUpE8pa&9d1-=gdmry* z>_bO8&sgX~Xf^$4q|!3pL96>QKWTQm_&vt&Eh~-Kei||UG-CZ}+%u+e&zQzNV;c92 zX=oT}_^!0=b6`x%NTol@p6~V}(lXMx&-u;6o1%(Kof7H4UKZ&`_lWePPl(Bu{hJDy zV%bYHpK94N#Wa?$gF(uR>691o2YW>P!4qPRWq(%X7gApHi=pQ8xE}GV%IDbUceM;{ zueMv;uXduY{B^@?j)jt@IkX4$92a>|Pplou;0FIG{n%5R{&<~KskZ{mJ6Y+Zdhk?Adz5o;(T z?!-2+uv5EOSM$56Q{2mP`ukILmVHXBr%rJ{<;4S(7Y|aW${(V<=A%&ahpDs7y4u^y z`V*ONXMM37yTrpzonSr9pQJwVG|TZ7r}`}Wl-N&w;#taz7bq_dQlH9Sp}gjYpyo$h zjpK=%m`?WDzab8GD#d3n(R`}Uo++lW9RG2OxY{X^xZ2AiakV`nakVGJ9H0GJm0w7C z%`b+U&vSJSV(<2Eh<_=bw&Z5G8vEiJmg9F$5l=fM5>IkDex63s7$nkPQi9Dh?&ZyDupXP&s&%OY{H zJtA?jC&Vhs69+p*Tx^NvH$u%57i*4pwUC!X%oG3GBNG35LOe)0;$5eRe=X5`6l$LM z*D2y(&G9!kJ%}E%C!1x&zMc>VDMPI56tS-*njeCiC-&7GpW4)nCi27r=FvqyOHAt& zF|H+=C&slz^TfEC<7YHAql@%bu@12;#kQ8*4T)_n(LAxO=J>RxW=|hklFKq;T1$v& zog$`nikMb&{H&&C^p6M8JN}ryen}p-M*Q}Z+{egg3~{gG9C5XhOtccc13C0?dGw@Z z*(b$#%Z`cx%ib*}S@uRT*|LW!V2Wig)_kgEr;2GTUk8Jf7t<*(W>8+tqP&=6*{fB4 zA>}o{7-~Mx?dPlUx4r&&qm$@9Lfd(fd%>(%h+V`H8({ldM7FP9Wc#W`wy#`d`}*ly zP5evqY+t;{_VxQ<&~oD*Y+t?R*}iI#?JF1Az5-iLA2)TVf5i#GTYh|7+r5n%~W~h$Md zXnsHC#RHTV53(&Pe~4|+d=zT_Fx$d&`)z!WU$GSR>(vuk-cC8O8~gObCJv_glhiAo zW;y+|iGzv#)GMB)ym*1~;vn^^{1xic{1DXqh{ZV&U#q$;zTvtLEu;Jcx)O;T0&`xI0bTtRn zw=bDC&_B17vd+zLHFb$=sEhu)h5o)pq`$8h>F=vW`ulRRin>&O1LZZp5o&&u>Kbe6 z(lP!S&lG&mfn)5ioeNFf0c&`{4D^cK=p5o+bPoFVn=EwS}Oxq6=V+aY{Y z8@@?;%U8|zjBk7rzKzcEIy%c+JnMZIo#l0OmZz6li|#f$%XD;>XP(u61LPUyeD|x* z2GB(GeWd{UM}X%YeRnB<&Z6mTO{24DI!n{UNi{uJ)95Uk&eSwIi>5P}j_E8Rbe4TV z;)>`jJ4alNh4ZXd&#r^`Kj|rbJ~Ty(`%*EU*sT~Kb}J?kyA`?C!(s|CTQQXwu9$}2 zI|c?VSL05nyyi0~FJ@6*%%Qxvkn-YU%8PlPHXXKHZMuN+2`n$9yjVhcaTVpoQp$^~ zDKD<^^yV_l)tk#HPd_Qm-RRAgl-GO}<;4w@7dKK~+(dc$IVWPd8gez|HNS=OVh!cR zos<`MQC{3ld2ui0>AR%0J7PWM#r>2Q4^Un_NO|!P<;5uF#lw`RA95Va)qYzjulZKW zi|v#byD2Z8puBjJ^5SXA)6Y14mhFiBlo!uZUc5kgagg%j70Qc4Xs+leK~0bFpV3p^ z|9)RF@5TlAh4{tv6yEbYp09WPGzN{Qr|8>O(o_0$PFU6-cP@?iLypdgblMO_=VxFb*?`oaSheFDTC6p0YQARAKjJTRI;u=f#uJ%^2zRrV6))%W- zU);d@;zrgNH|aSl(D3`t1!*n42eI)xj&)3Px$zkGj!bOmt>XTH(e>?8T=!rqWrK&U(Mj@?E2|xP54d zzViZoXa@b>cKSVX223BgP1D4%)gLOaBZSVPIMP<)NZhv=ou!7n%$Uxi{_q0(Te^yA z8*MjjqwS_`wB58#ZD%`8+eNiqRNF<`{wU{H7wujt((Xwj?Vc~v?ngPFx@h-Gk#=^fHlE_u4jhi?0bFSPP^Y_oK*8@H1_ z{%SAVCEGvYeANEg5&z*jOOmIvWPJpkl`{WQ%OO(Foc06&%ZXY@76zCXOX}pB%#pAF8cf z^t6@t*c`D!T7HrJuR7B`ZDl$4#nM!amO}j*)IVBNc??ZuAAR&(&XKc$F?$E zyI^otefFkC0?mFO^H6*`Q#0sTeXh#n(uvbawi zt*In6zRk0KOjCJ!aMGgxsb>i^m5VIjhxV}^KYnZ{=byNX^H1E(`6uqR>|>s$@+8|L z4d%;eD#i5gM-Ooxic!u(@vvndzJ<6N!`+W+6@yMrL%lk z-pD(nbrp1mGvrwu`_HbfGQhp#09wm{_l}M#Q zNaJYcTdgsTrbPjpL-I(KxgZPBSfiV;k=S^pkry!21>hLvEZZI~Ofg z@AT53bp~{brq*V)1?kOy!4PSFY~?Z9G+JeFh)Wjn%14BD!+B8q>KHj0(!8|4?jc9#C+wFa;Yi+*Fz@k&zmj)^cLsvQkDQ7?=t!hG z??~jU{Qg^BZ=N+AB$lIR0Q3Iq+2qg>XG`7@XDh#LdA(fML1HetM>p@Rwm|wCr($~O z$d<89NAj>sjKr~g?t7;3kiNqSc<-Mk@$P)Gk9*Ph+PHw#bfkp!iE}t%))Nb$?iUNa z_ee{;_exh$j@XJ*HpY~jjc*GQt8gN$FIGc618ni$53TXu7v1T-Ke~(Z#7CT}v2J-C zXJQeKLpiYp>i)l#@?tyX#cs-rCnzhP#18R~BY{axLE;RK6=#RVcsK_J;_NapDb9|F z$#Hg_m=b3@Vrrb-C#LbfJ+TdECvgzV8jg!Z@@8``28lN~VQh&7@OD^;O|b-<;wo&4 zrIZ&}V^drcXIpo`GHes$=w0X=KC+knOT58}U`woqA-DybVhuLMo!At2VN=|VO>r+a zGhtnv9Te-aO^m}im{`kqfbQiu*g;|vj)PsX1!lum?2GN#7rU`9o}jFF68qw5?9YRJ z*cba*E<0zb=K?tfhuMDO6^`Y%!(u#K00VxzOic3I5i!|s*NG{9+YwX!cAuE$x2=!C zpx+LP=`5F>49aUhi}l;7gBXVs#k=<ijsY z!;V}R>`oTa?xFHl|_q`Y{=Z_gnXeLufqpO0_S@Gca;CVrj#F7f003$<2$*j@nN zV!g-UE`H7sc_1Tx`+H2M`q6s%#q?l<9S}vkT>l+@yipRaaADX@&n!X>J zz8`X~)~$mn7JWZVRo@TO)b~U638Lxyq3Qdf>HA@h`hK`jeLq~Rz8~hfeSa=`biSuY z@8!EasrCq4W-)p6F ztXV9Z#ecI{XBO+uqRcGH&cenlY|X;vENsuBj#<<-i#lgf_bk5QJ!|`SvD1%l+{XQ} z#^`u0khp!Ad%h3y2fX)k`AK4XA^bucenEcrM&F=yp2L>3hPO-{+E@F}Foe%&!)FwU z_zY9_2EUO%nOLLj<3HN)A4MYmqi6!ef0#0!57D}AJ@K_2rXGJS?_5m_;al49Ekz=} zrD!t5x2zLA-_nL}xs`X-jXnHJ{&d>K{b~sR(uSWY67e%dQz3q4ors^=DtdmV4L>6r zY{#(d$cDQ;V{04C_Kd0hmbM4q(}oW!67fMr(;+@+orn+GD&m70M9&Ac;e(8AV$#O; z95)6V!Vk6KkBUV6QPC|Bf3!}-A8i%!M-3wWs7=Hl$wtg4vR!@o{Y;K5jt7$0gv~+WM$V z>}R>`ob}p!!S!?b)aAA_sW#cPbA0VYwG)5EJ8E{37!UDlZ2`Z%PE7LKTg7C*-5{p; z?KUyhZx4uRemem_*A^r`FQ&6xb}}fh`7Gwy=kkI1xfY-9pso0>wlMXG1rQ(CR>=Bd z3H6Dqs81}VK5;d6#5L51uWKu#KCztTvQt5M%~w)htfCygkazvAs($&ye7@nel-PpN z5hB!!KWnRoxo`{R#Tv?sJE>pXg&lD>^@w|^AAi?YNBv?w%VlRj5|Q<;8l+i~A`r9-zE;Iw+bJ)0Q(in_xmw3b%4_~K<>}X@VFbl~%8O?yFJ7R$I7oT%ise3+ z3{hV5Ba}zWu+T7KT7`6r56bhuQN!r)azc{G3F#y5ejW|uh()YFYz_5>eM96}5A~*D zU&lERAL<>6YhZrZ(s2v3z2cTYs5d-@b>oM6*RZaRNg4l%<+Mc%ObGRE8rNWHe@FOF ztmZ#))NyjsBllX`r**6&*0YZI0mq^_Jbpyark3`h!}?+i>x&<<@0%;FtCsd#A7#XT z%80*je+OoV{*iktU31bU9Nw3aE-_)0E@5;A3%k-8;;}0RIF4cx$5Bk?IEpEjn=g{e zanwBfS#i42c_4bH^I_e4k^iB1>qPp1PVC=!e54iJ#(kEF#V_VKZSpttrr@(#uJ`X1 zJCCEh>_1Q3=JhGbykqb7X)n;1NkgchACgwE&Wrs@D|i?ULH*e_`m^HN=mr(BIHG0V%z$nkPC7V*2=jX-=`On!J!3w;^v_=nUtK_Jg#8 zKE11geZIX>YP16OiO~w!CncVCQ27Dc!GhR%K#s;drpa;G8D?Lf;~PHVF`M(g&po=I zZ8y3A`_cT*F(Afbw)gwe2RKgy?5|e#V}{6n(G4UHzW^aPfC&594T;xn8-(;+ftrf34$CZH+un z-1TWsmsoCeiD}yvpAB%G4WLn+BaZFndQ5v)m-wJu55{+Uk2Z0QJ}8M?57qm}4im6K64Dkr08R8B_GsGN)W_!>K{vt4g zZ^`-@@5dTW(D$d%2-cw^B=X%UbB~D@upAxX9NNelw1fNjq3J&AL)YC(x$}4Wj833^ z@I2?fzAx4Bh;lM!oX;elqVuqgn0q5ynELx>#8z&)s}4``hRQ3z$C6 zckJuY398wKACmJ_k4~WJ*ENk!py?BuMkmnpk2H-=py_{NdKTB2a!S&Pxu4_vy&Csr zGck{IKFL_2Rr7STMmCfh;%jFvF zAuf_jT%?D%$O9sAkq5M#xX9xoagk?4;v%+4e7=X+h_P`SZQ?$xhj>UX@sJ+kArFYe zLmtp_;vtWV#6zADiHF!C@sJ*o7`&5c3i264U6Z#-6wZYIzNIMPeGyh`X>W64U4r_hMJ9 z!)_+5$Bww4|A}h^{q)sS*!FCeIs8WqqNfGhVk^}0cIps`WjrIEpbn8(Mvr)!I>bKe zm|7wePs~1d0n?V>&L!5+6YsYl5Cc%lll=DMVzS?UMkHooi^ME?L}C`h zV$g3-{}@cij+nu6*~y~3=5tuTJE?Y#pLlv6>)g$|CI{<_9S+UZK1=L|lcq{YQ!GkB;dN ztL%v1R%~A}{df4q;`yoMm+j#BAr{Y{NjyJy-4jvuo4FM*p8iY>KrK(Q?8n7q%YH^o zv20sRwd@`-&9aBZpk+^g7^Y)K%wW0fWKmx8Ig}R{QeIrl|6-mS&wm2{{$($o{|@77 zeUINRevk2ci*XDWz+`@xd^3s1&m=BCllc5ho+W1TOfi$V{Y+x{Gr7l{Y2x{fXci%2 z_J3?l{~hA`CZ5lC2!@C~rFyv<7rFO}XP=qe3HF)EonW7toC@~YYUNb0&rGfb`)s%7 z*=JFaeRfi0pACuZGn2c)J~O!-?6V=|VzAFPDyM^ewp(PMMMd`6km&8RYVPazdAS;A ziCqqHkFkw=(RcGeS!Qx5*v=b$Vjpdq&HF{)7xBB*B7V1C#P7D?ca7~JIUK|r^{!F+E&PZ7O~>!H;C~B5{BMDl~#EbOMIMNAO@h8C&k(2Vse~aE#jB! zMf`G$h+pm(gUsWX_l=N$VOhibIG1Ph9VO)~q~l*(!q~?zw-jJkEQDHKf_-rn_C@@2 zy@-Eq5%JIcVp*IWe+MjouO6W~+%_fF=9o4m)Xr0z@VzY&>cnTaR8yC@1!{Q>b&5Nw zQ^a@Ii}>yq5#QY})=_6BtjCVHpa1dQ5#kOUcYAh@t4E~cgIgTd!RNNLP>0wGwY;6W z#BS;m@!|C%KDK03>TU?DTaSe5dWz>BSEXR&m z!E)KDq`c;Q#|``s#7SR=RCO;$=>&?lv{CR(C_6}%xGlLB%@7)!^zR0 zFKmY+$R?4j`@Fbw9ov1Q&h3%aQLQZP>kFZ4U5iA(K6AR z5{;Pur$p<_|5Kul`F~2Z&-|YfRjfK3PKhd39gd_#!=jTCEfcM&(TMqfYP8P$KQ-!@ z|EEU#%>Oq>70(WbZ;mRS9gf@_RXjWF+#D?vt!dGS`F~op&ip?u>X`qhMf=SEsZqte z!{OAZV&36MYE&`ru#*}s6Rqjdi1~kdw9foLJ?fbMr$_tD|1+YBhlj&6qKb!yBQv6k zhliaR(K6AR8I73#XGZJH|1+bG`G01#&-|YjReU@gPKzo&9*(3%6(0{fY0)y#x+NMh z|KAd=GymTbb_MElJDv!X3uBwvR9Jsh4DRlGbLnH5#MJnYPhmWkHvXvF+KJ6dP{ zXWr8Pv!i|He{%lNCn6s~pGf9?%Q@s%FuqJ}KJV}D@Nz6JTy(!#C4Is&=Sy;Q?WP3{ zYa_`+YpwU6BWs`WW7%yIht$1xn4 zKC;&O!;f3onK@#aWAO(btC1xk%k2M;etdLHzhShD8I6<2hkv0y|1Pf8PYn8xYo71W z{hMnWiZz>9DcVK~(=+HF**7UcN-Y%MD5L@*?oxW_wfkU)#<7%A+Fp z-X`Bb?cw}Xdsd1WUj9TD^9zWT(TC_8R5{{8auGQ9l`HWm^IhD3D`!Hv60sZ!bow5>fx=a|tr zfp1Zjp^g19I>#;S*GA68*te)2^9L3c5!)S4yZK&Fs5jmkme#S1d)4vm$7Af@xsdH2 zKE^(l&fy~u;zpgLkbT?Ac1!Dcp63oN~{V2F{h=rAN#r_xl+8 zGI7*5r_S+g_F=gY=KE>vU*$x6ST02NC=Eh657Hn^E(Gm%^B-n3?)LPD4#oP}C#_sB z86xMO^oI4Z<&=4ixT`b=;nb0{YE?P?A)jOLI)4E)Aih`+ia7h)~B z5I5=#e@HHbIq$>Xd0*h2_l3IduWQKCk93U9c~<6~XXUJ`^S;76|0~r8jBAK}KT29t z+$gQ->C2NA{U6<{qi>9GtS8ZbcSW!zR%1)tf-SLzYf0R6Q>eED-PF@JCgDT7>L?@D zQ%2lR8SwyR#DnCALA3ATu0!N)=(|#XgtpO2-`~1vW=Q%*x6V!U4dqEl-%y^!wO94s zsR@j?G?8(aCNcg}GUG5YuJ#i*HB{hRrER=9WqiX=(E!enAJO*^ngBTx_r%85K=)YA z*c#i2@f$6HJc(_bA7&cOV;eDkGfkevHm(UXO`gOyV*F;BJc(_@_{}tV65ELJo9RVN zbFP``yO<`%Z>GtU*hY-sOp_ zeRs-7j>J|iNAoaaY>+E43++$(#l~Ig+RuANzxo?DH$wU* zW1wE{SZ9^~>Tj&!H~l`w0*yb<{wN(F4iVHhskm=W)ifOlzp4LD!pYe3%PpCs}g|3ndOVS>F>pLgfRo?^ zI0f>a-}Gs4Dx3*8JK^ncE)2o>FdN?}{P*n4e;UA2pV!7~)<(@k843D*f3T*8AgT197x+hSa9z z5zB5H3x|?JH)=;k_zLqLEHS)j)BCmS%y#mgtI83x zTBmZvtk$UUdQ{9WUZb1NcF`?>d0b()+Fh_#eIRI)D!`@4NCmp!Z$PyS#PqcINA`O>9Z; zy9T}YUE9q0z%p|_K-njz)F%7wQ1*!_waGp)r8e0orexkjJ`LwE-v?#CABG??r8h3P z@uUXXw}@-Vems=@0F?bCDErA!_EVtj6E|wm_7gX1ko_Q({d6e%8891W5v$=H)~%`| zObkVJ6hPHc2vtW3R2{3J>L`V(gZR)^)j@n{tLi9&s-qmLjtZzcD&YcH<-WVh_^2k2 zqe$mpggB7SziO!SZwu7S3vo6-tR$3j7smT9wCootDk9y?V)dLYk~A_ZLN^Lt*sr>x3zUc`nI+ca3(wn>D$^) zL;AM1K1koz)(`31+Rj4ywzdn9zO8K#(zmr;@!P5NLBFs6+DiX-qaM;W?GY!{G9vA59*Ke(I&LrdQKfsKd{lFPeuQap#Mj{lBahjwT_;% zk7D-}c4V&ywrTya<@MPC`fT0{aCk4kAvbRm_mk)l-(>%o_C(k|wWk`=9@$tsAt_q) zcy1`X`M##GZ(p9)*F8^J^jc|yY&(|QpSM8uNv%+Q(h&WAcn9w%d@WD!3*@6Oe3Sdu zttx{L*g6*C1GcIRK47cL-~+arzLCB(MSZ&K3)H8p{29}}G3HxTN4X#8Th@GcHomqF zu9IDS#X8x=SFDp=e8oE1#aFB|{UbE}BlcIT&-Z)=eSSW=f;0-#_ebc{&9w*3wFk|$ z2hFty&9w*B_v1g_*z4o{fC){#_zl@|kHxs!$IUj5t^F@BWcHWO^$Ylwkl9~Q+lXHY zX&dn?#xLMkj9VBRn*9pReuZj3 zKF9b6e2(!CY4BE-2kBeW@ee8FNu=>hC0D}JCXVv$qfeN&1ZoT5faw?M`%S+HO}_|D zzX(mg2u;5TO}_|DzX(mg2u;68y%qHD2NT})9rcK}DAA*lEJ_H|Se8_(4JAe;4zYQH<(eLErjK3*^@;Bu$ z16Dx!n@X4itDyYN2K){0IVd;cDBqk?ZiM=oUy>K`OY$PF>?>|~n|u84ksERMlE9)b z-ES?L?~R|)!1x)pe6w5Y?Bsp25MxvJvF@9^e;2QLlcPUw&m>Ko9Q|>7CTiN`=#Seo zLDMEjf83rVO`9D3aeKyV+T`eu+mpcbY_4Bk{3=~QuKoh*n9Mt5Z=*XW^NpuI#?g=t zp?ry*yJP*6-- z@#yTC-Zy`U&8*

LuMVY}7*8_&Su0XP|8SKcj3s$1>S?9?C`ol#M1R8{dYq z@jWOTHk6H5M%id)nQZ(3%0?TMjSeUq$DwTe2+Bqel#Ty)l#MeilZ}6dvM~T<<06!e z%TP9c31wp#%End5aVg?lyWeNoYnNI>YnS?l);7aiEc;FRlVu6Nzqage>c0n}{`)+n?^%{GA++}I#x*>t|2A_T>A$Z+{m1@( zl749!`#XGlpTy&4*T;Vt;7C&h`wtX`*Z2i@gr+rvFzKlelyhiKY&`F zeRrGIXW!kX_1Sl~(bp|w-z|N_x_TSFZP^)?;oFw|GsL$oV_)5dZ(GK`x((m9jD2+* zzHJ%%>NcKzmL<#%Rr4OD##EZcbFQx6LGBU1m`MB>zoOg<=>x@6CvTTlkbM*VaF}nC zbKkezN8FeDzU5;e_kGI~AoqRCCqTA;`DDoUFP{q8{^ipl+rONBU%Z+3YPqjlKrD#+ zlTzKcJx@&Jdp+*>73#yjMjy6=ex~j3#xWK>G&vFD_Pn5J-Ww0Gyn*R26Q^9CXq9%7 z8_~(KCi1wx@Z0-eXKdlxuhIADeQABaqnX%LGX2rd=?9Z&OU+WNww8FX#tvEUn@oQ* znf_-o{SEPt$-FPixBRbh-`Rxk=_PKzo&KhSI8`le`!sE@rEQ<4?X|S+)3m*owtbqm z*U&Dr4(&JV(0;QH?KkVt{&&}z@S^aUH!I}Vei{ohP`iV+n0_D)sSf&H*`4(EZ6yUqh0cbx~=H|K$lJNu%TeXQfGaW0-? z|Cu}#j=?I*+;~0alP9C|K<~6mV_CVBYxD2?4Qr=zU(0{e6m)F{nNzMR}ouZNsRqd4I_`P zXdvcZ>-g9HxJ&I`YS~?Ev-amo`@Ma%GzdTY?j^1FAbNoI&*%B}73cVhT(_@|=@iAj zxY~a2SnC*1MW=X)``O~nzS=ifz5^R8HoN2TtoW+?y{Hvix!$__ zRG--aRsOg;jz-asICtsui#Pjgzhl}FS9?(O`D|Mw`eaAHz(eO=OafVW!b0GRem<(`XZ!%Ev%c(07|M$;&X) z=n|RaXJnC|Vdl{#ipj~)_n4kQuUJh^Mk3>6oEuM`q?d!SO*YUie*IP==a)N9#u@IZ zfBjZ*gYq$cM%;t*_Hxq~+;K9F^1OfUGvVO^8HKSE<9ctegt-|OQe)Nf$ZsFha zpr$$Zwax}jqcLgvF-@aeX!>t7jc%doFKQYet?4RFqg!bDQB9*;X!;A9Mz_%PBbr9H z&~&Ay(JeInIZdNmXj6x9s#(ZaNb`uj z?~I0eIh*e@#dM3m_jC)5tI@~3Qx9WQ1pVZJa{m+@2f2R=j)&Yo1t&r7pMp~$_fNrT zko%|LOi1iFI2#fh2;K(ofOo=7I2X=?^Wn$fUGN^b2!0$cf%n0D%a+FSPF!I7KEJD5 z{Mh@Zh~8V)_eJ!b5q)n&-yPBSNAw*MeUC)nCDHduV&5y#H%MNiEz&L2F2{PmcHwAw znB}xpx#Bl!7zJkykv)rV zJINmLtcdIp&x){b9pYIL_N|jlYzSS$^4`}iQ<<>O9p9E%Rhh~Vt143&VpU}-vkG!N zoKni7ZK&+*WOP?;SHx%cX}hW+&kg!URiC!22J+nC?1VfwIJ+Rv4G!@seW%LU2+{`C z@A%yDbcsU+Revi~{luYys-HMiQ1ugs3UWT^J5@o>2glPmEYohjwPe~2O}n9KH#F^r zvO&Dbk`3ZbmTVAjvTlVzVle0&vn^}b^1fH4_ww|;D!pH)?^WqNIeo86@4M-HReEnt z->cI5XZl{1-Xk;Lt0ESn?^T&^VU@w#nJ>StbvUvcp+4EIhO)Z_%5DvmUE)ZN>=H+E zWS2OSBfG?roH?)#%5FUj!Tk`u!}7jcRi`=}zDuP#TA=D^g{p&?QJv}_W>lv-h#A$X z4q`@is)LwOo$Ba=s-qvOjL6AWQ60pJBC3N}QDgxmR^$*X@_Fydnz5US4~2Ek5g#(& zaU(utzSl;4$c+C>e8_xnjrfrHej4#1^BpweL*{#C#D~l`$cPV_?}iZ{GT-+iK4iYb zMSRG7D~tG$@?gShXM|;HXEjtiw?MVC2Abmq&GCZfctLZ#pxRjn)y{gTcJ7C2=K-j8 z9)x$pLogRc-FIUxKhqB1rWyxZ;CR>yC&6|&1$M(}@C2L*Pr}*oG`tP=!8>6;oD0vw z`S1d~3l73X@CsZ4hy1qkG5*lLQQx|9M*Q~ZKJveEpEN-}{wvZvQX5yGb#z#~BZSt$ za~}79FaXgyI+CDoZL}j<|HBmh4^#C&Ow<2-Gc*V}79Hu3W1%q}S8yyersE2Zg&EV4 zW1%q}S8yyersE2Zg~oKeQR|q>eRZGdKP;~G_vSIq<{hYSa&PU#{x|&#>)oJtXg$Sk zoAH}J0Kb6h=q@-w& z_gRCH`#&?j$P^Pl@j55x7`8AdE1=?Id8j@A?Iy3K5CHjwi_Qc$a&k1j~b*8=?;41W~aYb7m1DC z(2Oo}r)g85_I9Xexvnr{MNnr~0i@2ZLP(umC6GG1Rzd3Q!hdy9XV+>-on82^F6!(m zgY;Wn<=)uZ6%cL2Ar{g*nf5VG!>hFKMe6E|F!ls>byh>_>f8dUtFs1DSLaShU7h%} z7pbcgpY|ekb>h=rq^{07NL`)vFa-B|<8dFr-=L4g#)N2QY>A7+>YLF=&O<#%b~ubp zLERlKkh(isq56b&s6L?^s!zbz?NFb9uiK$M0bjR+x;y$Hb$9eb>h3rTv*Cp}+l=Ks z$hZ{U<7+odvlxl9HMWFuHcGe$YPT5Qn`^c`9&*jL2O!sMdlKXxs682S57eFlxd&=b zg~U$V@qr~l7=-CC9cI7`m<6+74$OfI;X?e;Uf3ig|xkWHKgtB_`?#~-d+Z2dwV&g?d=tiwzpTp1+WU<4LA7h z;#|Ieyb<5D3EkTqgQVJRQ2o!3F(_cA%4km7Ws_v zwU>z7+>b7@R`2Qat*UE^^$<6TaO{H?-?K&^iE#Xb@$|JE|6l-e{Db(j2**E&KZ|hu zgZQ%u$3KWai*Wpd__GMH^LXr}2Z=FZBRw5rBRvCRBORX>kB#&kh>di7T0Az=@oDkc zNXMsHF&kl)$wmQ`jY23JB~Uh2LD?vUvauS<#+p$!%2*~Fs1) zMkpJbM%jq4Og5^aY;1wDQ3GXTCzOp{P&Rf$+1NYEMjgv!qaMn}ekdCUpllq3vT+E? zMik1%;ZZgmmdQp7l#Ny>8|_dwx}j{GfUy$&SQvtc&gO##1uAxlpMEDky)~SYCXA9IiHIP0b z$T0|p6Grm)664bPbx`ZqL#@9bYW)LH>$AUge;Z_f2iI6vgDT@Ntuif8Wm=)iv_q9) z{{~ft{Trm;3G&Xb6$ks+zhhxPOoV6QM0f$-1P9^GkbSCWwIKUc&uYOD#>~=pEBJob zO+0t&T3$sCM<4f>x+bNe%t=Y!K9_y2?;C9;-jvDxWTwS;+qs|2jECG$W(FYllbK17 zc4Q_)+L4(8X-8%%q#c=QmOUrp#@3TP4Mq7IPjS7@$EG=^98Yt6S$8&ZYP}aDzdJu@ znei~@rC&e3`u{odHxyq{d|0-n$Ncce(qpEdSO1tz|JY97sP}uf)2B7iZ-toNrfH+c z_|4cBYn8Ks9@B&#qi>Py;2V1yPh4Y(tDn;t4$@!N6Zd|~i-jv^LgPy`@zXYp9=m%5 zzDzM}=`W9J`)S_}V(yQ!U*@yl9%aAGXTLqlewojHTaVsg)??q9b=Y@i9rm4BhyC&H zI_wLx4*SBali{tCHENw4Z=HqSI*Yw^^4xW#i*ap?$x3;T1Hz6 zDM#CeGiZ-=lm{odI!Xp@l8%x=n=;t;4B9luakZ3hp`~Q7O{z=r^>4LELvhEs$mBd~ z<6CEioEy?m*7A%pm*W{5S6sOeqsJ51m=n@XR&cJ2&fSph(ch2C=DJ$TzL9P+fo;!b zA4qeUz&2*HA9P;RJ~K{6Hv1!6%h?~w^Ek&oRy{Mk+>RG0C#@ue4QV79l$#TDHIh%F zk*p<_IG%gU)=}eHw0*$17DdF*we8By(6xRJZ8bpq=CDsjk8jaLeH!0_F&IiSX{W{t zw~Qw8;bVmB-gaKt9oJ@Ffva_77wY=I-Y?Jg@;G!anqB5<8rkKprjcFYY8qLU>I=p- zXx%xSM{{^yve*XG53sFw{$kRiL)^E|iTG-N&2g31kri>Zj;v}|>&V*TY8_cMoV)a~ zbIIe#FEP&=<~vo(DKoduvX`?z=hj2cnYsHR=lk3PP-PCf<67WPhBFV*Z+Uqf<{Wms zbGQXkHnSC~FKmaD&FqHs>6s_cNa)i;^yzaVev`j3hdd7DZ_KgW@x(t&A9-h#Hqwty z@Kfm{=m;;6$02=W8}W7NBjhZY81^}|5#?|wo_!8&WShwgU|M+_>^rj@ZA3X7W*Tio zIUHtMc^h7Q{T$lJHqJ4#9BpKq7hgYzHliGk?aBwp@%~36QO?E=;^^kvQ|KgrL0-xE z-xfT59-U-88i@JUDLToSiQ)gA#ry@d4~=hO?4y?`f8*gYYcSmN$*a=Gmoi@bjKR15 zZyb6_quPR&a;7(tZk{o%rz|%biS&_2%-9z3=M#wwNNYH)Z&P{u;^e4rR{dzyH>*x) z+S?beYueivKh(6hFOF;4+ZWxM_Vz`WroDa9scBDZ=+LyMHMDEm+c(EF?P(3>yIG#r zpzmf~yWhH8OkcB$Z$zDcp{Su4ZD$Rd?Rj*a)#4|^!+$QK>y(PwH>7 z*I6Z^>#P*fbv`Ac>#PvbbxL60OZPV{hxdEqM9^QEZQuifrDxE52 zcjaVA--u}&yc>1-;#^m2t>JwNjTb&0T_Zw_?w|Osd0!m&VZ`bB#zNwBeTk4bUEf4V zoUZRCNSvRba=XBkwTgb26<1ADi7oh4GgsS5TR2@T5b&M!yFu6f_ zEM>A!?5<4qQ=#mqLD>&N*-wYEp8;h*3(9^Dl>LQJ_7_9h&vVBD|H*!9I2O0-2-|Ij{;Ygd5;uxDn>TP1nZ&S1wFY+qxHOTkD{t&IO&aZ7_`_NmVuOQau~eG1J!g=U{Z*|`E`X9&v9h{i@q zZc3^phOlu%mJn#I;ne5~^Gk zRJjdMtIN!=^sZ*H)P-PB6l{o}eCJI&NFlC989SPjr6z6QtQ=Uph>l4R{ zX#Fct>kmP#KcYHPn~pHP_Z!5U%FOmdv;E!}8hN}g8<^2_gt5JU67>y-&2dAoFvm^* zr8OP##`Jz8%5t+E=r?@d}KOCymU^D6vP_rLk%j=akA^Pi$0zQVcKOdN3^_x%p{ z4Ec~!H1y0FuQPw2!>Cq6;rcE@sJe&(Opc$ql7#_hfY(PoB^$S(fk zMeKFrFJ8o6C;s9^>~-QVUc_D}{-Ozcu`#=qgK`_&^}iUi`+uoCeq#saI`A7iDA$4C z*g?4t{6-DsV&iqka#w!lPipKk+cBZB1Ab+z)$|p#BKed`%1gtjqtYOL!%I9LD|XB|XiHl#_u<5!&3!oWXLBD;{Mp=x z6MuH_UD6iJeYoz!@Ll|O)xlpm_$vp0<>0TJ$q;|#OojL>XF9}RnRiR@SG_^TdoScZ zRqK~Pt-lIt{Zgp)SHoN28t!9LxsTO4Rn)I_HbAYj5o(=Hx(^LBH18zttp9gG{l6RP z|GoM@vEj&SYdEasQR2*6ewgJWNe#VaY|lwzd|GyzWr0Zzy_+UBR7Z$&{vQ2CV?ao| z;r;jH`NVH0@Ls**#973Diht?qH_pFc|HibN7VZzE5wviBAdR4f{Vk24h5ap!poRS{ zji8179nUdpSqwQwEqOeDc-l=%m}Rn20A+(?U~F&;84UVI+!ErP;c0<|NJIY2K z%VeV-%Eo>u8wa3l9E92*hoJUH6w1coQ8pZw$wmv5jaDcd?NBzlp=_LhvT+j1#_3Tu z`dB6#{ZKZ}LfN*kLW%I zy&<3L^c;OxOm8@95hLS%@@PEde)4DlazA-A335MqG#PR~c{BxbeI8APTyICyxL@SF zR@^wIH)LR2<4bg5^C{ZWh0UjEOBXhuqAgw6e2TVoVe={4(uK{ZXp6qT^b~E;_m`wQ z$k)XB8TAWY)T?hWby06u7UUZ4%7I*?8mrp83$WO-JGt&U^IRLcZ(NGLs(Us++of&Q zGHDna%vjy==hc@Upf6QFy3Okow|RZ2`b52hub5eg_t>v-KXi=mPF*06WC!c$ee+8C z!id)wZl*8P`}Jwm)k*uMA*lV*5Y&F@3~E1p;?E1eQ zpIL|f|L!^*17n9{VC--V%sLzcts||Xn{!h8|0VPV*`r{dCoXD zN~N%e4FYa z&Z!EHm&NhYF(}kJw7XF2&~E7%&!LU$SQvY>H$(r?-q##gvp9r?aSPk3ZIFghYR2l0 zKfi_ZsL`B9*ENi5ULHuXcRoqOI7c4Hdh$R@Sf+6@h_xP6to0oGXCm$Bpbchh3)+AW zz4|HQ{hj7JVjny{hV&ij8^p0oGx<$qU+LV7*_fw!_QO2(ODFr`=ja+2I3B9+pDb&U zaz5HvUwVf2o30bt%jUd#jqBujV%Ue!Hm>21)^mSeLC!~PY>ai}f++6oey7UK{}|xi z2#t-AfE^vfxom%cSm$f(t7_uG#EOsS^K3bR?bbWj`d*dvjqz;fG1fni7IJ;;?hn^F zGWm{+^cbCAx)0Fx*jb3~!7=MBf%Flbs~~+uXDP&AcdkYcp^xZTLm$EQtbXPZjzJyQ zuEV{7dH1FY9fUF+8z5ymHll}6rhSveyEAdMF+D{0jvA|5{ff?o6PG3}`X73pi16(k zmTL^}B(|x2Cu54s-YzJ6ySX;7cWkd^YYcDRyE5lyYz*)AL+Bo?cPxtT!FopzTeimU zHoAw7H+I)1Si{=SM^B<_uuj`)^~L_$HtN;*-9I!;Z2STIFkM6AZw!ypHh%h-uC6hD zqpNG^J7nk3I*MaCA7~uKv78Syj^bF(2O39lEawA_qd1oHfyPlB%lSa#D30ZPpm7w( zaz4;Fieot+XdK0{oDVdP;#kfH+D0)sAIr)0C?UsVMJ&eyjpJeZ=X2;As~L+UQ~4g~ z8|ur?p=W4J??vQ&od5Zz*EM#xv}T{DWt@M(*D$=+9jAMkXQk)4rlnIWz1naedB%a=1oi+AHO}HyosY+$NhWib7yc`xZ5R=1#24Zqp&_K2;hXoD9_z`Zm=cql4(3fzjW#M_awz@s6A8QHdUs$njO4$Ei5()ya*RN&j82jLmPX4Svetk4_bL1EgVzFvfHcU??$M4H(9(l zY+a40&MiFCsm>axI(I_VxeKb!-B5Mzg{rd-s?K_-I`>1>c>t=;gHUxIf~qqLRp((# z+k3UQm1jiN*A7)*H&lHmpz1pbRo`i-`ud>i>xZiEEL43Lpz0fhs_zO^eM3<7jp!LP zxxu`Toa}SoM^1szAl9NAb5Wg{;R8#guemZ5j3Jn^%-5F~zP^bz7`MjxqyDo^~ZPUSa3l_!4I z9Per>5tSoO7RiRh$&5~N5ULz;vWUt>p~@Ymesq=Q_?w%2=qhC@LyW9!0UU&;z0kB5 zn)a%$)TU;%6_Z~=EX?GW5DPOJ3b8Pwp`foc$IobLKFPb)RV*{x4b65#v)x)Jt*P15 zS%Nyphw8qvRQ}Zcww~#4<6Gv6+x;o~?-pWxsyo6x z$UZcWf9t-y1&!sJ_w8KOw=bC(r8&Oz$L9D#b9~_+*{>VTW0TK5j?SXG&KR9#a$}}B z#{Sw2_%oh2?tn%INwkJdUJ8A`$xGP_srxv6ztKbJ`;8t#-*5B~`hKH_(DxfXgudVC zA@u!54>?Rei#4uRv1@5bv(Z|3wo*K4*yN?0_PKc}_yMDZ;0KHrf*&wi2!6n5A@~8K zh2RH_7J?rzTF8jc?y>^LzRq817jK)meX`Lq&Wy2|y6_QQ_=qn0_%3{e(Ma$SMkB#T z7>xuUVKfqagwaUw5k@1yM|6@Cb1l=&b?QZHX~yP@*y_Yzyhy*_iNAP}KD-luVe}FF zh0#aw7e*h!Ul@G^f6;;7B3tgZPH1d3+d8rFHTeC>*6?%KHn}7C5|cZEFEQE)zQkxJ z_!6U?;7g2lf-h;umt6Z8-+(5Lvl-20Gd3#Oo_2gnB|fDcpJH^AD-b`>J{0HbCL?h? z-zS-}b@V?Ee${Gv5Y1$H4$ryxm*w1p9K*jD{RIDF^b`EcF>*()eVlJNa6F~8yju=m zb4+>*bx3b9TFNFr&&d;Ouib~nr|0D9*6>p5SxP;b_?@HpolN{rOmnF=`y;9GD~976 zBZi5Me-Aa*`0KH;#>owtk{4L?UB0Kq`=pGII^%p1@2eii<=y6spW>s4{CT*}K|X!M(56uY_8^3TpigQ0s4mTA%)2 zdPzq<%SJ5we&R8)aVdjN-6$BnM*a z$U=y%Ba0!nj^w%efUyx~nQRn5+2EKN8yquZV-=K*QYag%p=_)fWuuH`vQZ9YgX3sy za2$<|DkvKpplob}vaxBDjR?zRqZ-P_7O1w=K(%Ejl#N|bHg-eV*gMKb9m`~+9?Hgk zC>sZ$Y#fBLaR|yr6w1coQ8pZw$wmv5jaDcd?NBzlp=_LhvT+j1#_3Tu`dB6#{ZKZ} zLfNU~~uC(3R%uB+{|fhjfi$ zSvsUY_(QdprF`>G_i=G-lZl5C+j`98iMV}blNsmZ_veY^lP4mLWptiM)_d|qW*B{i z7`;1ghuWm=O@;JFY7^Jx)%ji<=4)9N%TnpH7Q1;A8lU5Xv=!x<#PpR;;?%vw0?~Pl zUQt7zp!|^!(^mHBnkOdxt~SCp{;4z;edFZ<=U}$R`1k;gWeOV0DxQ!2B^rzJK|YGc zqGMCYxzx(B`5=wu4By;ho#70QQESHgH5QElay#1^(^x)Z@H$*x~FVnL7Db|y=qVH>|z8>i+tb3!bB0Jc7S6BHAx=Oy6 zGjf*Lbxc>$nBzB&{}CFMGon7^;Y0hLRnCZXmC@sXNLSN1AdJ8L2jz??=6Z_lmyYrx z$K?;zQC?Lal7k!^hI{En)RuBE)KV~+2Zmh$xAq($|5Za_8dfbl*O^zPc_Gvln`KJ$_i`e*7RB)WnhdpI*HBQ;a5JFxioYUU-stCaP}a+>tvM`YFp?F^=ym_9Z9ubc{R;? zFVZ;wuM%xJKzf)T^tu`UhNO*JWqOi z+&vA{zm4A`{C-aR`3`!hW3ex!{%I$5BwfB%x{Lq5#P3)9ulS9hXB+LaH{-*vT$@N< zJ=c7HApFXmY@7V=IH+UawaN@bDJ;;`6j=%Q$P%YS!1W-jB|`4ZXXNepu&o0?$3M{#SjHzFUSq zdHFogY{ggKr+4oht#{A6Hyld-vv8<0Y=ycCe4&`$9r8`w9^j{K8J*Afh@R`{N0)Q_ zq-7s~(!4tsTOsb#uWH#K+b(@u+r8e)-P=a{L#!9z7f(BCnAfz?zk}DuWKx@3^}R!` zneA+UA=iqo-vDha;+m5lI9l&s$F(UPHW8aK4g2?L-pWOk&i((f_b%X3RagK2nVDQB zSAyIEVlG@H!$sglLHc4d3Cb-PywKXx{tE%BfdJ}@7Z5ZA(uxkKRIC>K6{1oYU{ow< zXt5Be2o0bVglhX20xAgsA?22!H2=@{%sI&r6E3!WpZEVhzdX-*_RcwH_St*wwZ3bu zz1QCR80nvkhI8IS!IB|h%3!c%5EyeC`yJlu2XoB*4r^V)9w+x*JP+f&aNdsqdpK`N z*yA#4e4)4lR@C7R7&6bo9UEs9+Y$%Xm>2_Y$*&wiJGCD{3~ds=BpX3&xdCiRHiRwD zfGyE=*b>)*xkPZ~PT>mp(S=x=3S_81 z5w@I*0$acnVM}#XqKPM`-(0w8>RZ2Dv>Z(74W=a1wo1mzww^Q=oTZP>(7zq2-%H@h zBJgA;@{xl~Jn@|jPZ@G@Xr8x+_MPv*{Z7C1_UIQ6rkuRlQ!=-|=VR}6#U-<@+pu}l zbsPR@ByISldVpukrlbw;8Oa;|TKrwlcFsp6He&sY$WaCRz=q;VZ}4RuxFS1|GmCZ~ z09)q6CmWIfRXSHD1Y4$9b+YRdT9@s`!MYr1P?t^Kq_6F|Z1N_5eSo_Bzwoo#JKr$Y zy+R%u?2ZF-^iBB|OTn9`z?z)slC(3ly$oFF`x7uHEGYlG++YqEdL+eIupj$a@^Ab- zvaj$@byWR&lXAv_rvw?4%+2LH-A`VOJ^gq`aWw3!JyNtj#CJN8nsGD8(_t{EbF}x} z-WCouH*qL><=&BoXWx$<9{u((?}4Z5OXfOz{1Hr7U3MR@$D#+x3yZe2bo5X??xP;% zF7PM9&066g_9$j+7}%2r?+#(DFmv|in)9K^XfpBorSwPi*Y&s~ENBL=#Dg`xZnCfj ztP|E~52u}Gd`%o{&4YGfDBjGs@TOY>yy>ZO25{~j-{KAv404uGH7Y7~A#>jj7qmHx*MOe9@ZX0O?ukW$?QTGX{eb zgTRa1z>QnMkAdJyrXM^B;>s1;GrEe`5 zqkKQ|$H1$RmL2qh@CrNVC8gLwzfua0Eg}`qoq6+xMGt{Nd*1rhq9bqNe^4jcHl~*qvB^5SMtHIrTWw21(5^XzJto^mI$rpp|KGRl8|m5H3%k^|ZN^wQgHE`}uJQ4IQ+CY?3v=>J%&Cv#8;Tq>jN>yg=L+qb<9t)NvxBl{rephvr*3>};i6M-zKA{Z(xTnqi0m1k>=|Umwr4uS zFVFnM>v`#3d8Pfb>@Dqoll@uy z<=NZaw|Mq*<}-G~hWBPAZ`isidBb0F(dlmE-S^@9k0&`jvT6P{&E>fgyXH7@FB@YL zc8zdFwvBM*DXWfl{mPI})lGHUZX^{S@FrD<6Jr;_70Gpg^Qb@JceV|)j6Ch$J=wBh z^qqz_%)v%%7{>4}vSG$t#)k3n9p5Ev7}Z}kOh;1NhEZEOHLzhq?HBo8vR~N$w1<3B z^`C6v-e62U=0vYtW7#n!2|+t%0(FtzPqXY8VT{f*xRf0uIc;di2veG2d-Uq>^?ZQO zBwtZuQ1;kXV>V1Err77#stn=E2Qv-N3F;PVy9}qVZ5(OLb{S&fNX!-DNF;S`%wCCu zma%8aqk2Av>oYJ1%Xve0~4#v1J_CG)`Pqb++U zv?cbhZQZy>Vx+M=E?|e!`Z7upWPHY1Gsp_gfkBkZqvf>Nhn`Z_WAWpiQCQm^Ov=8`%^e zCkB7_3F9;S_?_y#<7{x8q?ENxanxlD_Jm!Z7pTud>SNd8BkHi1@}@>(lSB7|rz6dG z-wP`k@EYYUv3v>DX9;op8UsHlsa$gL+?g86mQ=##-V){!g8TMVN|A8#1+@{7hgfX@p>)SEZwG>%aUr4597f5#)>X#9> zBC7+D*-T_N13%=w6*|kIDOoOUM3yyv8Y;_@VeKn61v!@NY8>MZZZSpB1@hR+a&^96Cpot;lplz*j(EHwc za?$gQSxA2O*?Jz?-MWdf4&+xlQ1YvB#6;xxh>^&cWg;@1$o|ENbz=+mxMdvkPtw7X z-)iiD+mV|ZuebU<`WBnM#|&h5Hhljxbk@tr&sOC4FUEDnjB!+dvWany*a1EMIxDGq zbHaCf$R_w3;|XzmU%$0~Xa8oC_d3QKg1^}`E9qi1r{9*uI(zJiH5gll@97IK@~n5ZBF{@yCT)|P zO26uNlIbG&NOkN2t<*m8QY870&{+qfM|eSFma3q_Vl#cxfS=C|7!uM{n|^Ba3iA>$&QeCaVudpyC`xAy*Hno9~D zPf%SY(+&3@`&YSF8Ur6D?51yda%|poN9mW-^ ztEHQ(sGrKUWmvk|a0D^TNVp*hJ^rq}=FXPkqHp1!M^SI-*QWGx9QBk8UuvCQQ}TS- zxd!GFd@a9eXJ1M;f5{xfm+0p&@!=cl=v`p9bo4IcnMEI1a}8S^&(`Va9*pzZ`uMlV zwEXWDA!B?k8P{8fOiR97F?acWWO_d`t^Mad#*W-)$@CHYa9gh5W{yEJZTjNOEu3TA z&(_hBlN(GO9gmKVM@Pq_qvK5-E&Z%H0L?ANfd|sjlIyQ0c|GUHXL$6#<^!K_qNkts z_81TUKZ88YX`RvI1JaVQ8J@o#yV>)}XZ<~&Hkbq0h5u~N0es+1u9m!)po5#!*LGPd z=P$^bUB>0+5!4px@xQ<;rSPV$!~NtnpK#3b#g7rQCTx;FF8!?e0sEWM>FEr(D#l)VUj`Sl8sI_O3!OwAW)&Y^<3f&qP{*SFIr4YAqpbQ%;^jct{Ke&3$zzzF0?Ek$of>yYMn$Z!(7s= zn*2SLuSWRu*hgft!&Q3xjc~utb9HmS{EaYwE_-9-&U7y>?`YH?FcK)o!NS^#%{~On8mD>7e*Fh3!dFL z)PEww*u5{)*j;g8nEw!S2Fx8*1@PO;vls*BH=2{Fcs1I8c4L%34|z^vePB6r0@*5$ zIf4wO#7+!U$~f>HN?Fr*r_wIonn6mtu`i_3KI{{zw5PX5<2>d4*gsO~f3hDW>G#~R zzVdW$%{{s|(979sq=UURqm;VBO3O#%LxV>fuupEL4MVVzSHLSnT%^P$<-$8F;HPqA zbOk)MLMigOLidTinydebiyNZ<@uhS1e+uub9LD~)DNQ5(O82|NgGzhxp6=hseWk?A zZPV`u5Lb(RIEppCdC>G6bD)|BC9Y(_LQY8?&QOr2Uf_=_dpUyYPV+?6Oa&?aJ*lKX#CE~ZwLeJ``gnmDVe#%$0 zZf*8)C2^Y3^zB^Nth-8o;$G|~-9b9n$($hRF6MntT+`Y&7oPi&|EuC#`^v!nL-dRI z%nhGyd&0f=Fy}sQrESY;V+-n<%y-H~N5i5Y{gkHv=_j{R`l(#>4DBcVPe0{a{Uq9o zW;P8^P`>D_eIG>oWniKFl5s|a@5pn;g3%76atwGh(h*kK4LU7hf81lNZ(ZSb`dUoy zT0D#VY5eaq&}A-lD4%vy@jBwvhd5gK+~Aq?T5?;)GoW7?^eaq}uNt-T=E#J;70^I3 z;X}un^nxCu*D>g^6B>Q#jAM_7i)nHanyjIoMYKU@J+}yE}@lOg_1n|6KiR&0bJ zjtH}j~dbgp;Z?qm= z`sOFn0q7f3@1So?z4PzTH{fiYzWGs5-}v3G|5klt>KXKni4D>>(r@2V-`Kk5Q}mSd z&C4PB#?#!fV2Pz~=2-eBe?Yyyc?A0-W7;Dd9Eqt(2Kr|O`e#ds{t@p<|7^3c$fWDH&_9ZsyjuOkUeY7)w)9WF zrGIjtXtlWHv0Rg$(nXHxT{q~N0e+uCADu=Y1wvwpq`lgAbvf4x^ujnEI(>@i-ILv4O$M%nR|9#ZGIH}^bY!IG64fU1kUrPN~hv+M{LHf#W zgX-RxzEZt=TKcNi5!6@ZkB9b0H1+muXol|6dDthx?sDvqZPR-dvws=7E5YoG6OMY_ zC0pT&bk`OaahXxfXXqT^80I%(nTw1gE;HU=4xh_UQe609e4ACwXUI>I&w2DeIa3)w zNPg1Oq?7T7j;{*$PsUGr9pC25cf$Mu?hoVs(RW<_iP&EgXGSm99ygWjp)(ObN&DQa zwD02!{mIVb|7ic4qxfQ+0bP0&-FXh%K=ZNkhtA;#$tEth^imvt&{6!5Xy&rSXGhU{ z(*5mZQ-EjJDz+DYq`gvfRFYD3RJ>AjltU>x%B2*aC_yQ{P;;eSyfqO@@r5GIbdbLV zX*Bz)wbb7D_(XA}j7yYOAFe>8TXI=L7v7zvlz=&Ku~VXO*Id?pKN) za$`5i?!FpdNBZYwt{Ijd(i#ozgm%hrZ{5c1&#VxirxMJR&vT4aI_Eg@HV3}3@l$i(RXi&LuQp%P#&>v>Yw_0j zHol|~Ikd5J8+A2pd8PD+@J;@W@WAv#p>L>vV`HoQMEQsEE9L9d+5WSFHnVUVMj=_K8>2 zCevR-)`rQKp>B42WdEx@>F5TX&5&-j$7!|4MOm*-3)=o};gQp6wI?3?yK8YeW0rry zCb4}PwdY}U#ZBSt6H6VpQ&-8b#=xZ~8P_V+n0KWseC00YFi*sUmB#R%##ug`m|OgS z1G{TNZgFln>uIq6Ro`X3G#(pWX+ZzEj%eR#>ijfgT=Jtd=B?OL`6Kc*rNeTW6WsRL z#Nu4a3gq@IR!pqEXXF0oVD1afps$ienZo{+Zt6k3G)^Y}V;J8~HlivSH)q^BsxpJ` zWl>%_-;RU-)2Qzb>bp8Ot++l$Bpa-~9n2jL_GW>>!aX463dXixAvSj-j=dYW zJONy;0GB->xZL9kak&Ca_E5))RZhP!c>-9iH9W#(GY=+D0Fx`w1C(E?bv@E=4RN{h zM?=sXtyktjU&WPFAjeu~p+2qb;`R%d+d@CZnziG;;#IFz3NE)-3N9xp1()NMCJUcQ zONh4fN7Z3-OqA>2g3BfzgUco!3zvnz;IfIw z;IfIw;PMFFzY1Iq^$BcD);N~@1D&@dTu!%exhJ^Hg6%q7uE0)hh|L4Rw^tj%X88w) z9*3+de@ek%Zan7_Z5P6voQN8y75!!X8QmyoNWVU2kDd*GA1QD$%e8qTl6^r zW`77R8pCY2bW_Oqla1LLvkb*-ja3S>)i&A0YFj#Mu1w6v{*?}LSeWeuvtOAOw10)! zwGM}g*=fv2)weA^Xgjm_%S7<|klF@qh2I~YXx4W-^)+SO!tGqzRLZ_^!teRe=y*(A zX*{~+B={X{W4IYlA{^fhE>?k+x!`y?IIg{O)~P@hX3wdVJ=diOw+JX;*aV8JD~CEWVP;=t5chk->YQ@qX6t-$S46 zXZ`*?QKZE8+!II2`u%&Fld^vQo>ru+-@m6VDeL#|xsEi2G?layX=lPbbfb`Z?$C}_LJP{#iJ(l zH^=-BI1^3T`ax7HU&4$o#X7@lCGiV8;ExLWO7vB~6@jxEF$w+3TDtocwKe)tR?BV$ z*cj5IjKRcGkK^>o{q*-q`dj0`C=p)VFb&hhE;sCZBL_ck%p9axSTj{4k^wU=Q zX%PLim3|sTKQRxmeh~e%m3|sTKW(L-2GLJj>8D+MGt+41gGbC5nq_X_>}s(*ZjBBO}2V<2;JZ_n{GLk&H8qOj-s3JGn8(^9?`i99F?u! z0sXQW9_+w;@@9B&7rzrf>5SB2y=6DYnzs5YVCxe2O1AoF;#6g;3*Sfc4bh7Egx&DT zI=j3(CiE^=EUDS2%yHXwE3$0$Jn&iADE%rQA{&_yjtl!dd`tC6HexDMko{ywjA^s) zq@MZIfwlXL$#~a~nBJ|R}$6N1fsu@SycJNibDLhruOq{w{Vcv57(?=_^zeBaij z$b8>+q{w{VBvNF)ZwJy;((6gV=DuA?!REdrCIhN8fd(@7|;DI@5RW(RZEcyZ7k3&h*`T7GC#u2Ju?+Z^CPEbL7w9=}Pc= zqlMQSExg`n;k9^5bRPs>M_v=VxYmd;>Hc%@dMkK6C@RADBzWBg+}1PEUB40Ch4puX z)xY7o5j|TF?G5a8C^Z5{7!#Rl;q^v-C%V56UHeSDuXxXdF6Lb4XzmNIe}lc$1zvcb zv@Cb3N%yVHgO=p>*`RuA%xIe}@5rt#?>VRIG2GXPZfuO!!tl${S{QywT3Q(1BrU=4 zi)aak|Bs+0xZV|9ucKuMuBRTY$Msbqxc((rE?c9@;$7Jq8i$UBj-9~CFsHG;6Zjq$ zVXRN+5XARPqq%twwvF+!;a>~E_$6SxYc7S=X{ce*>8mCE0Iow|-7um0EAv2cH$xrTwce+&D^SlC|x zUltJOs5!JU>~raW`P8i!ZMy>;1{?ZhyS9_u$_ zjh;08iZinH!86FF;xVF_j~zm)dD)8JJN-k5tP zh7#vr(Z#44>W=hPuot}gz_#s=FlVb+p<_J<`PE+WuO!k=ikU6&WU_C#_Jh_`LQF$_YYV5(n`iRU|_GT9Nm4&^T1%72=Z)SmCS=gId;8zy*W)}FBg}s>t zer4eoWTAVq@C&lgJz4k#S?HcD{DLfWkM>>6LicFj#VmA>_Fc?E_h{e6EKB!@?=(KL z?i$AejjQ4x>>lNcKwsbwGoF#pzKoAhWJJ22T^5 z*ZQj2o2kszYNhdHejueVJ&+2fCZf;IfvdvJCC0UtUgS9jy%kG)Gl;iH75(Y69Qr_Q zJIQ+D7~a+S>!E%|CF5|b@HJM&C-n1O>+VMeC%v6O%6>?1wmzQR!vAHNTxvHEna@qF*A@2grYHpX&hg zyQ}Hv-?M(^-WlDCz2y73+L-5w9R^z#qX)mHkA580-q+vN&ZjkO-{+Pt2096IbK?*W4VRx?KM)V-CRs@2h{t-BI5^ zg@&`ff7s_|+UDni{X_m^E8mSisOulbt}fX>=+=h)GpdpP`OhovpD_Bz_3itIc3jjy zbvicKKc%15>)AZ^ac-)Au0+PdkuRM~cm(bUpFCNFCbrq zDIxOG%aoM`*y(lhl7hUvfSj~OCN?@bpK;4C7wP{}`eL8%VY442{}OT%j+`hyp!gr_ zo^I`G?K=P-NmgD8kM-4nZ=<4E+X?UXrymx>^D~gAd{;YDP7)bs`2l<8wBg<~p2;S6 zVIQ3Pdim4mumR5@Cx78u$9pyO!+B&S1zGVTE0Po0^EUozOvMXk%h!0D@AaGTNb!rg zeT(1bf7SO4^AUFci7ZoEaT`Hfkp}%-Lx=yyj zQS_+RIcD>J{rt{wY=A6mfnnGLL$M8pFuya{kF5}#-?n2OBWHp4OIQjp!M$*f_hvm2NzlHH(jG}#Tuu^R%|3dj3kH@uU< zygzyD2JL}zJ9C=Hu_wNE#g!h%c36$=pmD1+oMUcJB>K)VmvxSNzsJ`-$NbhgXJY9-{Ly{z_WqfPi~oU5@hNii zsbtP|t!Yzyg-vn4tC{Z&=8aD{TKmpoQ|LDboXt%A^Q_Uz_Zc=tYh-Gi(b{(oo8km9 z$z@OH2g;sG51c4Ko)}vLA58s|>}*{*3x7*C{{!&dt+b<1{X+deqi^y#M^tgEvMJtb z!luY-me8-Xy}REkkI`>zh)q%YEp3Wdpy#dF6rH)YqNl{;vMUtJXk(si3Sr*E7Un$+ z=G}x%@h~>UP1qC?T3b82y$i8d} z=G{!4br6i)NZj!|B|kT@aU;68cyll>n_?rjgfOzN!|D4KbuO7097A{nomavbf}YEU z6rS3+>7wtY>wblu@fBkhvLO<&Gql!LcE-!t5--DZOXzRe8EwJLAA+wpb8kA&G~O)R z@f`m5#n>4Rc80A?_2maLe2ru0ENsV6?0htcox9Dk8_Ul4J$)=X*V`HFmjZUigxDFY zuF%d1wG-8+ov=G@qff8r`evU7?TpLyDY2?`cE&R747<*5%gL3x9>B{_NeQ#I@=@IXTHPrw#TJx z!%*9!2zg7zF6;mv6wzlLvG;Fb zyode#&fi-|IW!;o|S--cee)_k9*ug2p1S%Fub(cSoiQlWaV`6X2c}J~dtYmC50ej*LY%h(AYrLj+W-!)4 zbKByR1B?;vliO3cFwbw{X^f_;q<%% zp1(0N+@rXQWM^3AM0lhN@89_S3{PL;Jok_FdOjYN;rR~b{AjAz^OL*`PY=o}it~E> zH)VL1|Jd;SKFs0y?HH$LL#)fQc6^xU51qn2>wXa7`Q43?9`)@S-MX?TlZ^qePdCx?e<(BXpvs}u~aY7H3OB+;P zG%&qsr{ZC3WOI$-eE4<=WBmoxqsN4a#j7cQHF`IhIq#>%kE}cFi5+;7eGLQb z1@!s=het72`c|dB1AnOerR+zowWGS%m6*y+>?@!$)IRMukOg0>E~~p6o=SWlvwWVV zaepY+3fir8ESa(JoA%uZiQzKagY(m@|P*o!6tzI-2k*v!6C5yYbJxx?$(!k$tQ@a;!G z@Om`A|Hs(i_-GMpz?bhZJgiw>??~*BRK&X4wd@V-NbHzYlxDp9PIuk!lvFgpc()e1 zMPutkBzZk8?#l4A;yaSlV&tza-}%n{UeAF0GCWCqXICWOoRQ)A%?}LE@7Nz|`H!8* zo(tIvL-xWwYezR0_W4>zB>xG7g{n6^$^y(chkA5c^jMv&{-#f+em47(eF~CQFL1KE_S)iI z&ie7JrcMF+L~EiNlhc+Va+)GJvuMB`A@3fuWYs}EQxa2>kn49>@x4mMB=z3Ymyp?$ zA-d3(S+!T#qu4IR)1yb~d`E}t7hw4FO zS@D;)9#ouU_bb$eaoEq_S{Lp@r^S2M-z51$w)&xK&hh^nv~~Far{}jn2=gd*Tr%{| zfJjppOukZG*xBn@*-#gDvUFkEH+A7))?rB>+Imnr@or@FQo2z6<%tN^*H3+=tAxRx zu3pb^^yA) z(B1%Y^*Y~I9D}|!OtiCfpT_UHGVgGVvs$q6Oqt4|J(`D*4l>K+|8MF$m#OdO{ge7m zF%bVQeb?=u)OS7pS$+3w^bzvW5_y@;K8i1(n_8nUwyp5HAmd#>4e2Bi2{5sIB_BT}Sizs7`BI9)tGhzfzY8ORiRzoxSG|j7K}neI{dw zNwMus=_(W7&|7)vs!7E8%Qx4#^!H8ftnB&Oen{cZbgicgo% z)*BhJ{kHk04U$k<0IzkKII+0T#QUpnME0~-(?Wc<*TJ*poH6lR_Ir9AJ++Q~7}wq$ z)KiL+O<`}T)QLw2beMQzK*x!d1D1vvfmdQ30j(p^_XmNS`u;5ZE%n#+PJA!+f)VbE zH)Y$}-!OH7`j0Y8*y}}Q^=7}FW$?88s7=*DKPrQF^c%e&hd;0Pm$H}4yL>Yv+_K4N zSE!EAJ|W$qo#qZ~o6Po)JdPO6gPG^{>;{uKG z)?|27ytN8n-}GhRE!&q7Z^@6*I{i9d#vBs}^<|<%e3^LP9_;m?FB9Y2Oa7yVzDx)n zivMgsW-UCpt`Tf^zz0rv(KYeQ0b%fBk=qEon&1e03|&;;JK;suSG?E?8aIa*$Kdk| z6Kzbs-pLsc=p7p`L-mW^n@@l0I~}-GeuBlH!rY~2gFc4%+R*@W1H`83cZ}EFvQm6% z*)uUAzJp}l=F?E$p)o$SefbP{LcW8r^>uuQP<(y85qxFLsRq8T4%}?`6}Mya@2tz< zXH$HPpK6{vP}+cxZC{``m7Pd*nuVXd!!2-o(7nI{r7YjD6_jufEP4##;7}e4TMF@WRBaVVA&6 z?Z4L$GsUOEO!2AkPJC*Adv6&2E_7Pn;Cp&czMF8dF&sU3u6{h~dfL&&s`qawXF2uO z@3p?j##PND{Y!BbTLAuJOf6_DYwYQ2adqOq8CU;wHMq*W+2#1~lDG;V|5Lb1A4bxL zE#Q-}VAoXmu_b=y9Bk%=+}GI7YoRu?)z7xw46oK>Yxj$7W{nYD4X(DeaP^>tt3~ju zurR`^qp-CP^{d0zM(QG*75_GcvndwNI;^srQ+5RHw98yWnZJn*j^nJvx3qCrdrJPx zaTi|JSj^@SySA}0n{SJ|P4W4^8h0gwm%`oJf#JkaHX~LdftZQr*caDev$i0PvZWbE znU@kg+e&+=tzfN|_M*|A1kqK-f};c3yRD3rHLB6ONm&aUT|!E{Li8F^;uE5aNQp;? zUO?K7w1AXY%IF74JCf#*CX?Pt%3cN08Kmq}5Z#-U^{~;Y#)9&}?5oq9w41~4#+E8) z4EOUn(f%@Dls^okA*waA;|teNYn7|ouOMtJGm0gP7>M}0JleRMbjXd$|| zVxX5Xqn5t>TZ}c=mOQm+?#;n>^jzy_&1e0MHSP86#*k;MZ#AFYWUNWjvtA+3h}$rq z^)}Wx^{jiyGvW)(XFZHH4n0c?dDhc<<}ucU>shCeXWgu4osBhyo?&CEEoME5NifSy zHP*!HS^JP@#3Go_t~1ue=~-gPGh#%{XKjr&@p{%WA; z=Ej-?J!AjOx@W8QO24`J){5ZXXlrD{pK^)@wF&=YSCxo zj2f*6G}|{g_`8<+4Yu0^uFtR^x{|iyI z=SccjWBtvf-&cM=Ywlg79~kR@N%|-}bM8^2=26bbT*zAYeCnb6ROPAfqkESuIzmiH zKJ8Mzx9-z^LPRgHP9y1?$D!)s4=&pP~dHP5BdCJp|${Q+^{#Je&`A4bCHs((9 z;g4MIr6Oa0OmlcR$K3osQ+qt^;hm9APd>b-cRr2b-eK;IcX{&RRo#18_l|Hcg}vhr zm}RaPkC*?I{Frc$-kG6y_Va%)-uZy^FA<*o*f83ol)Wc*S3c^k8O?bBlfc){=M3{# zJfDS}4fS^cW3@)EAeJ>3E!_PaZH#`U$e!eF6?*PzWbF?2Q60iw>N=m)2S4V)J2BBQ zzUHhAPx~*{$3mCflDUhXf*(CRi^9h4HkSL)&Qs!B#2&GAzv+G#_n~iI$-G7L;6t51 z7t1**J(yb=O&`oFnZJlNlnb<%dmMYV-tZsXr=J2P&n$Wd{?i!^@$ApslQGKC^yRLS zXBRyS@15X&v*>2NF8Ir%>F*OoPcC|r`X;b% z(*6Dj}a$X5Jh`Cdnv$4=M0JnMmp8(L6us*P!*=ds>I^dBBde`q86 z`0#kK>(ZpuzoI ziq&tpT;QQi*?!tgM5YCcCN>$&+1e3LHLeyHX%baLG@@KCx~d#0Mt z(8+brz)$I7?XPM+Lnqfg>k-t&+H=)>mKO5N6V%1ppVfTUDdd@@i_6i!<}>85u1rf8 zm!p5pXJBC6Gh|!3xE%d!KC>{;Y#%rvU9A0L&1c|X-S5B)>0<5iXFfwd>Yjls(#7TI zU-KC0OE++pCUCh0UbTR*bLl<-J z8@iZ#m(|6*^Y7HfwjP#Fw)O8*7C%bwO4qpI0T24u2QS*X+SbM0(7*HGLtF3Ky0|;~ zcRoC5>s?zH_dx$X1OM52*Ve^1pnso*_iVju>*Aj1-zVWaotdU{Q-sT*y0{myf>*4I zUklO20Xjc^N zNTZ&%PM(DBRXyEB*lhG`UQ@c5I@Rf7@}lpR>*D&em4{IWAKz(G7n|StcDnfdeH+lx z4s^8>o$U(F;V=iIbDm}24rPA5E3tUpdShqMhdrP}JpE<*qsRxkziK{qneC4*z>av9 z{x{_SJLGwE;REckz#cJcP7ove96X@2loN>Md{XzL!S>DArnZc*4tTBZ6Q_A(>B2<| zsY6T7N$$hF=QlsU=rlIW!`Q87@pELqR%4I!$5wm{dt_=f@yh&8`6KMHp|T&_E14nB z_a7vGf-?+1A`WQ(n4n!oEamPh;?)&DDZ8u```;19Sf%X!|7CXB=@A3z=S=!L1DouR z=KM-i{ja_k4}6>cSD)Wce?12u*nM5s|J;8P9x(BczP9^+IrkUB13H&R{cZQZ`rYP% zsjQD^vj35Ry8b6$*Z+SE>3{OyrvLx9U_oR3fA*et&AHnPGo1bl#2Sn5D;bwO%{rQN zjWLlwMNG0gB_&Do2CeAZx48~-=7iRcM#uMBIF;CD?r22j1*$xm{-$W(JHQElyjUpEzQ?yzdP3W_~tR6neJ#=xfFR!w)PQ-=eLQxbHn4| zi+iFgPC5+V9qt6*2-cugPYuR-_h@Y&wzcS(e4Vmtgq}daCZm{d0 z!F!4&ZmOTv$E&P9KJ7B<`uU~?{rp_q<@-79pXlfGCi~eD($9{6vY#D|_H+8T?B~!r zU);}^uk+RSv!k-9epdbMem*^7quhc+T0`$9~vufd6#Xshu?=@DD%-(GVay!9c+^Bs;v|sSS0@im0_q(Yo zg3nHHAG@ehd-gl|z2>arjb^29%xt#!S>&|V!5(a^+k-dF_Z9OgT+ke@a3P0r2*nYs z7yv!|fd*k|(VWckp%baTI{BL;if!1t-NNAHSu!XeQv zg7z<;8DaVi9Ui;aluM?m12C)+la6K62@@f6OD@c#3$?Q3luGk4|!!MxK zDPkCpK(D<?mgs;BIYQ{ z_e*Rh(M2>WAwIk*I^=}3Hz%aM1@?W~Rbbzz{zdkE>R)8vS3OgL@u}=xwqPCCR@OLV zVzX_6#TXC)7^obqssyecXxi2%8*i~rdU|zSey5&&M z47<+MA;YS34tcFb8OG&~3x}_bi+&&an-pSDM7Ndb)46=JEQ~#yct3--rO^Iav_FVZ z-?YO;=P1z_+KS$l)LXw3-6-ST42vf7;K2gb)uM|q(fr?{js1Tn{f|AhyO8JU&}Sal zHQwR&sa`o>H8bnfux#~}*3uQm^3M6le!`ZUd z*JJG~^V0(X?7exGy{C85!CCDuFdtqX%U+(cnZ{1+u|Zg!I{+MmCvwo9+ZKm)to zORaV{w4XxT?!Cgcd#q zK=@!7eSHj`l@FqGDxYOM;Z3gRusPn~Jgs@`zbbo0JfU;p>o=vj8eZt-k@D*@f0_`(BfKjS4jBO;qMMxF3?3#seo3FA#Z+&rPvh8tMV zd4Re_!{b%1-YYvf4PsE^S1!R;I1Jw@FS-|y4`=QD&*7Qf4$i(ZZH;Kt*7$5;exMM) zVHwx>U#4GddtlQXnZ=q0$}8d#gWA(fdpV{Nd&BzB=QSoU5nDrgAP*e3 z?a|}-Gm=}aCyhbokAv&8!o7jnaT$SUkhed!$`5S4&KoeQmrhCLnnf(Omo&%BI|8#v zi_E+;u!*$H%)0{YA7sweN{?!;b`p9blbC@qUq|+}%Wta8Lf%uXZVvGSI^*P8=r;s8 zQyn$Os&h4k8)kcGmp9BC7>+E>j%&J}DpQyx?2?@`2|BXQt*?GtUyp*-IYvUi3dS-N zZy5z9>6@kKrC6QiDcV6p={Li%=@!R?qXR;J>xN!-{CF%lhn><_@9CXcoJXERym2}7 zRoQ8&O_!~6yHh7j{F>k00L^dUx06C+)9teLp5B>NXndWcIcVy-in>ONKGc&jW$NjD z*D-zE2E&`0l*&a}-Q8fZJ=biPrT6rXZCh<}jow%5iXhhO8hEP(Jk}CkYX#4>MxNT3 zzH#1n48~5v@nIb+XC@fCJ*)v&{Nm@tbydVT{0E80`keDuvr>(b&9TdT_(_x4ugYsg zmAc`{+0gSS^mqz>)|mw*(^nQh%^rHku_uoAiT6MMTJ++lX~#qK+Y8up{n)o=8hdoe zXDvsz`#YSa+oYRj7&VXao5frc+28G8dW8S8`*o&Uxc_`dm;Wo`!^}PSe&nrrImU_Y z)2j3wSASQl(qWucxRAXF5?!r)f#-k>!{@E}6ZyYTcCxcwrI$T! zY}$-=@f$GhnZ8cX&n)^3hc=>7`SKBdH~rsdLRxVJW%Pq~71SY>|83e;K-cZ;_5KFf zQh0;md2x`|+_$S-3hvou+U2NRvn;C&`?sQh5x<|8ZFqFA;QC-0-bTu}l`^`iK9tc< z_2Aq3{Q~p%_5G}S^UOLqsgujoox1N)n`j5V#Cq|=Hu`^7$UFK?KL0-*)66#Yg`-8On|IFb8N%5iZA!Dq*Bl;9zHO;~JCU&m_R!jW zFny5!@I7AS<~IMCF5a52Dl`3KzKHC5JN+|{E04>~HQEtXxe1*46MggrI_s?Z4O?m# z&)(;r;(Xq0&$>xs(t1$;9n`(lWvtg+SD=^S`Asil0plO*k0OuTXs6aSw4=_7OV|cR zYA=H?kR$zH9%uAh&+q1e6O4HdpCJ$RF?Lx*Loh@_> zUYL)ZZOWZg91mW6fX#f8b6%^k?X>==IK|*BZCB+c_-XBMhvz=(@w@3Aid7e1Ok3YX zWJ_!5w5|@jbvNVO3l1PVJ9)nqcAs;m_jzOH6VK0a8NRdBqnLJYbjDOJ&-SwCMXWD( zqSx~fb8EkyFsArpr`OXT`5J?7t#{<-JPM7z&!svT=(Km3OUq|WZWejfXBu=wnprbK#pf>b;#k z9CVI__SA553Cp+Rf5%ccKbQ91-b4)c5b~3`0?=&|WyyZf-eAMA?T7L|Ht>R%n}wO@ zc&Xp!Mj3sd;v3q7Xxp?^#q;PdZ1_^f3N+1Ny}Q1v^=hiaEUOMnc~^C4&i@B^wt{Ca zhvCt?f93yKJd>?*C%@Nk_K{z}J@v~n-j&|&3O&>Kb~@j}<{0@Dc|YHt%ljImTtPl) z)7^u9{XTm3d+6Jd=-s=C_qxlE9yV<{_ZZ_tqiXPl755r%x=V!Xw!@(((L`3vL&VHcu|*>^>;Pg~S<=lZCMu=Pxgpw<8mspHjFv&c3&k zdf0WEOkbt)EVQhv{a)}S1-j2-o5*!8;V zGD7NMmr*@>1H9&d=a{pc;qqVDf_>m>zFT8Y@{!~ZrQxf%!xh`b+Qm8TtJ4k@au&6_ z^41mJ^TLJldyNmpcder`)*py49xUOWe55k;&mnxHeulBo1CO8W$hkn*8VP#0wxh#; z?l9jPZTLUqn`5y5GzThM%Ecw!^##9`FXc!(@uJ4EkdHr`^PUM$BppTPR7PY}=Rwy= z;K97dTP!YkY?8@eqL=ZeZ_z4e>zHG%w62^76O$MBr*?VR9_ySjrN)MwB*~;~NNnIc zHxlQt8eUj2=f~9^+UlXL9@?7ja95Vm7U;3y!i<5?C=*&`AeX`U{+~zmS~#X<^*i!U zDxlR-Xmt=fuMB%u=j=(gj&mNPWGkSQel8;J`3Uct{0;A%hQ&48JvS*@Ql6V5)E zeb?Ek*$dA;oW1YtquGh)re~)ZvujdPxV&6rX4hn%9?!cE@ZLkbGmYQhOa2kk|H>YC zt9we$kg;{LK5l8@qQY5>lZXcF19~rh!d7JE2EACP5-|H43H!71XTIaeN@ zbgleeZS9L=oSX?~jvZvc&%t%UlC5OnnT2aUQsIzNJ+m>+#aHe+Mw=MQoW#f|NA=sn^Oxw?qv&>F z+E}nZMx+@+?#Ic6T9h_8%Jh>F9QP&@#E(4w+6m5>9#;a97)&*X6&1q zJ>a21f$z>79I$a^6xcF@OBl6qA9%9w(d?J`|6Q(6xoWv=Y)RZdJ$uyt8QCxIe?0rs z{rTC(-UqT%_x@LQ=H7?0$L)P2+s2;4z0)x5!NA^CEZOR7YuEyRR-UY|@ zf@6E{Vt{zqxcAqFa0uMZaSjTLi#A|3Z(sE#YD({c5f@IhFwg$J8Sw`pF|^$p#2 zB|JFa;=$W39?S^g!A*_u;Q!^n{~7!z-6B3ZXc!lyD}uVkFic$G98)*;)fmzx@O1{* zbb!=B>cD162Xki8-^oTw)%*s1Jietj@aC=Fz`gA=0&fk^2t1dtX~Y_?F`Ei%mU6v$ zs}Xnz%qbq98Td2zPwCk~o^ic3+!1&LEc)G~fq|E<*)(E3SMhjf;1klXxPCXu6z2?9#R38f$a|-_$SO5Oc*NFZM+v< z>&p)M+oEg3_pkQ<;>47weeBPUsiYb{xof zL~xGjpj$d3932scj$rR%(+7&dRtcVG!x=QtHo~N>@IbW9v1psx2yMqSLEHa7(l-=4 z4gMP1hmGJ(j(=gg&(Sd#Y_O5?@fuJC*4F z#`ye7bhq|yGy4jiEjdUft>@LCZ|ZG;@4dkGY2&^4rWt|9CS?SSUDL8tcTLZh9Xa!% z%s}R@$Fj%mnvpG=@`;%P1Hx+LVnN}qnc2&BJ)WI{4v;QL0q=$TTX#K?ePmaD_P?uK zQx2}sW|W`U3pt)<$+G0waQ+}!Wn>`hie0OxPf(VRBg>8b=AXG|*4Y}jibGZ!E6c7E zvUK5KviCx3>Lz;<*$mo~Jsp1W274@ojJ;ee>#ueE*V&Q-oV^y-4m5VxMjADn626HS z$2y8nzlj-F!l%bA+fw`~J}m>=>-!u{-(KM#7XN*#x0}OvFDWPdb<~At{CXsfqU`rMejp=PD#$=pK(hIzFk) zJ0_{__we>;;)`rEMTZU*UNT+^m>88XeRo0mDRZK_*H{oLX1?1WEqr}XB!i)$*^LaudO`?$_= zC4TzAl>VPSIAzqQ|21XCr&FiAwCkbl^}8O<{$$r9*nX&I_8KA~x>AQ(R?pGAA3Gf^

sXe!S;to9C%_}kbi zk$)REWjy)ocZ|>Ovg4=O*RNhOJmGKSr!@cDPp9Fjxa4*Hcw&Rj+Q(Tib; zus&l73*!v#mxR<)zLWk;`dD^e2E2GcZ6&QAo5(QfB>mBte?Ers?INymUgn{=a-dfc z*HUB2fE+H>M{^bq{PxM{zcI9@h-)cr$>B16cjlnv=bHN6zv{kO zPk2n@CdAe>`8~Uywyb#Ls^5{mD4~wxu@ZP}X{@nU^DUZJRovcT$}?r1H0tydzesyL zZ1F^BMUa^pgRYo=FKg8li#@M-aGWEZ_-e&~D^4%Uh$_vEb!(p+=8L0!#GIOAAz9E$ z-z2Nr?-R>-K{k~E zw%PT_;a$x`8{x*2c}%%(QVy&IaX*|Lk#t(DF?Bur_rxbSl9XU>)!t zkKtx*udDJYY%!GY@{yZ zUr>DgSmNu)PWjFMA=VgOa)e9sHp`g9dF_RZ>sJ7cRKGIz8(T;H<>R3b?4t{Rrp767Su#-4o%{STeM4BVg{E&^u<~$Mj8ITRHxRkWu{7@16BYRLf z$JQ~EDO0f{dZ(#W<*HvzT}Nuar~7u9hlm5O-QwI`-o==Fw5Mb8v3}TX>8{Bq2Zv4m zJMrtM?}_lA9UbYf=IrY8_ec4^zT2otr%ksr)?>Dt)W)Jp<{V8vK_-g|x5hc(HR?l!Ahq&O63szZO_B{Ce&U;Y!2F zo*nmPui$=H?hWVazGI^Kevch_*{ivK1NZLW>a}Bv`F`&m_h-M!{eO4)#9LRD!IQC! zW6!&g8ElvO`tbw92Xb|!U9UdAWOyg?w{dmGMp?%H=I`i_SZn;b!_N-H4g8rmu48Ow z^1z=hiPLjohH#(x=Bgt0ZN46vSO42|wP_m5xVY%4@!N~bnH_hv|C{wfXEt81+25=e zxFC!#@8X4bGT@y|cxNEIb1S@a8@w|J-Wd$<41srs!aF9fnLhTn<-5r`+X4R5IHCAX zG!fq&fTuOj*KmG1NT={RzT1S}?%dAR9pB;vu!gfx|?!mJ=^?c`p z!RI^wEBJip)a)?)%76CvCY_;OD)OJooN+1`5XwU-Av~0UOxXNWfPJgqrotPk%qgp% zWZMM$`9NGo*uZ2xFW~thY+0LsobHU|Lxr>BQo+WK@R83-)`hg@+Cj zukS^c&UNISc*EzukiMV=?`$KELbjc|fOxd&88rvl<4kL24iP(c*ww~dhhwnzM)%KA z7QTDU685H4jGIAuW%~#ES6rsNL&OLlro2x0M0QzcK5+S$ur4609cO?VElZCQgM3d- z{+=O@c8l5bthC}X=;jZZBt?5GQkzeLwEd_L_VEptTadC>Pt6zJR$oxvn zmDgK!J>Sz?Gio65{{490mC0UhQD&Wj``MA_yQhik9?e;~?3FumZl+O%&Z%)bXWcdL zt+|U@8|_LDaBna9ADw!9kM{Na{hYSd;NQrL>iVmUnlsQ~Xnw23M``=rah$6cQs=Ku z4fIz`HT(xT!}j9(wxGVPIAi`=v(CrZ=T|hosBGxqpYy}&pSq%azb7?maEY?i_9J{- zw7QSTXO_^YASUqB+WOXE9amyT)Z2> z?^;ks80EF5>?3?fbW~e{=Z`z6gJHHU^#4B8Uw!&^Tr18$+;q#&X!oozmnLu6~4TYP7N773#W- zI_Ep1D-VNFtFhTWBW8aIU(6mjK0(wxWh5QK9xOuVMaE%ay$4k9GuAlZ-x;U zJ(TrsLs;)-oz?9M1COG>qiCMT@LnwM$MHL`Xht()TZ4FX4m>&q9({%^oW=h=bZrom zz5ads66a@Ms17 zmDdo9T)qj$PtXG+daEFb8PcFnqLd*|rIe?rc(_|`mR z>LhhJ&iasp><#uMa;9&(ktfCA1~`L$7wJjl;UH@^v>u&cCe9Q|OMNMnA1taK*>^^jn?dD0$TTds+KO3_wjGS2%H(w<1qJO`yJ% z>%ez1xQ|Y%(OETraJ8+zNDjcjr@-H%eZW>Q{3y6~4{4n~cy^G#F?}Fi@HzZqVj}ub zm^g&~Y6V?)o~#`?H^24b?UoMp6cFbOjz8^e@B0zhC>$S#&KV^=#&^TXry+Ya=!Y73 zzWhGnc`&ZKUjMWM&s*1hFQpMIub}RAx{NYv&_9ip^HpA^UuD_#SKWUYf)(CIu)@?Y zl>be<7=hepBmcL91H-|CEbxN8sXGpxT+6y!GoED2rPm58c2-!SHG0Ac*)7#zgks`9 zfrg_fM?5lydaVb~hTy}QHi=0i*(71eZQDBeKTAFtxsAa`T>-}E413N2-_sV_o4LIGM=);nF87nPS3t1+2WWSQT||FhpU$KCKV?f)C5(EZO>YfFm{HHWFW87G&E zD~v0gD}pPM%ckcy=tn!E9IR=#VtI`oK{|9I6&)JZ|8nh*32ApxNW1Ow&GzFj+5b08 zzjga9UcB1zk6Fen<`QPrG$(FWG}1mVN0HBAi~%q{RJHS07x(iqI0oUhT zCz0#_kG*$+kE*=({`bt}GPz6=B;1h%uq6m6yeepgG6|^RCRS*~_YtOzu`+0tAt>1dq+B@*;^0A%Fv2)J6@11&1^fT)E`OGC{@&4MAdCX%b zR8oWYwc>J) z`4-1qKdoxT)olM~wkM3Js<@f$EBS4>e;6)e+ie_UjQU~tzKq@TAIR7<|0fxH=RchB z{CwT_7i{}2zjgno+4q0huKT~h_Lum*_+WAPRrbHm_P5#gKHJo$582kmwu}tgoUzkt z+u#h^nGtE*B(`6`Z{7Di_D^HG+IczKzr}C0?P~V_Guv-w+ih%9+gGx!h;4ms{qpoH z9IQPs@Fd>*FwuwKFB1mH-?Xo2KIxCXOl|LL>z7;C9RKV1Uin+c_s05N$LBsroEXg~ zKDO>+9(h0?uju0ye>=YRd2rniTpuTo(av&O-r^B&Bi~^e!1`pCg)ArdJp}nRmTOo# z_nw{{^2xVOU=HVFI$_5)+TP8#ZSHFEZPvw8Exv8@Sn*f!R&mvC-U^>}c&l_( z7;`wQIIMh$OR=-NIBbk64lA}e(|z54+?mTdaj9|IuK6Qb_|`*c1F@>UGnL2hr7U;) zBHMn-v5yhE-o>`ZSsoi;Sl>(h8L{lM{CPKIn%c8M4pG9Lbl|^IG9p4ZKsjfP9EDA#YxrN-r__=>>#+}5+^x?F@JLhK( zR?M(-{$Q5#SRc+d#S!Y~KldDs*`Iq(-+gK=oOuzfq*h3i%w&$bXtE8B%> z;nl)vJnu>Iy{+l4qCW+wI~7hd!Q)R@hR5QP`K4G~I={Q0cMbSt9@gSm;8C~~CQrdh zFUOpMzg~{{EZ6yaYKt|V$7x|T9vXN`sYbM*Zt67-I#YL zdbMFBIX5I91u5ChF|8bBtsG;G3Hz*ZKwd&k*j+}VRoB+~y)NFVq<#pU3@oRH zN;-L=z7H9hJA1r`IkP?X96~e)I0wCrA$e+lU z%Wn(6@;UM~-(tCqF6TQqj#qx_SDhtJYRo!jl7 z#67TcpOvdP++!ZxSlnX|NsNYuLcN{L@ z^tr_y@k70}-5M*$|H#kF_k0Q;u>3r{O??}!FB8b=ci+eMdGhH!Z9C=pKjqj{`u9_g z{d2CXXWyXrh4DYp{pE}9uc-U!__|I`rc`xtA-Peo&(tp`qhihr_k;qrcD4j_>Bzz4#NyZv=RMSbif!?L&{Y&3VQs8{hp5QQJ=GtElZg9eYaqKjqjz z=eic|Y}=;TQ1=mrE!)<^b6EZA*tQ=0fNXxChkns_6uO_wv6cS(te&==(*935wx{!Z z+IC9+eabcUj1}~W`eB|OoAq}7Dg8lzc}LC8T1P#_ow<>C>@Tg2F!=j(>m!y@6LA*x z3iYaMz-PR{`|&?BJi+H!8d;KD9==QK;k(hE;0l&IS$uj|hT{Xok=s+NDlX$)k_9sdU;lmND&&#Os)`c!|Cs&1MgCu8!s%5Nb?iUD{xGr=ne&yZ ziqqn~*AKkNd;QmtUq{ZKP*rhx{NU@qb67kUyGMRk5G_ zFSGwcWEb+j@y1+z)Bgcm{=}9K+w#laesrb%;M-jbd|v9p*Y!JIJq8H)*bGzQ>j>O(JM zn#`DvN1lZg2J#ux*^KE55l;2!n|*&jkag%k#MvcH!7ZOCKDb@te7u;td7)X5}x zuOE6b_(Fai`NG_)iYpTaUw`$*;0w71`Obo>ihpN+Df|B~avO5sKUG!yf&G7EeT|5ALB4%8^eZcO&;8k0N{O8>{WSpW(E2|bIp)_&0^jU);`sJeO_DjdF;~w zY}DEO&z*_9vpDHQ(f!9(4=Bx8GoW-5%Q5~>M24Ghu(QM;`N957Y}01y_a^j^n-GuD zH^6$cZ5zS$Pg#yq!?nZZEjfp46{!|5D(6&5UkTfl%mo}^9zd9WlW}|OEMxA|ZdZvH z`<0BnDzkr&$4;GvU8=BuUv?3;Y+hBxR{QrY->j95;)wk_pEi*P zt7x=;Z=g*-r%kQ)Z(%qD=DE()gDmd&s;Q${4DOl1GtFfEyDb08@&rqqy+&cn_e zWN~9#Mzds}Wu#6b_cN3A@3Q7CIEaJMy zB7jas^U>J#zjS=M`p%6AwZE5s%F;X9nit|a8{s}qKODlA zM6l@Yetzk1>@!FIcAnqCrGr!9(|I<%XR2qDUhDX1>{wWcdKM$W2q}+ejkUJh&yby9 z1oW;;mTU_8p*E|n-8w_RMYqdYciLA;F7v=0%t7ma?An;N>?-ZrXt%3@W0e0-Xaj2Q>VH#pqD@oPFy{jc}d^`5N!o9eaI_Y=UR^IY<8-Otkl z7Po+>SYIc5DK0PzKeilMh;)3s?kn!$w0BQTdmY?3(rLfr+pP0#-xjico727tJzPue zlb_Rh@?+ii1#`NGt}j35?EfM#=^$o$9)Ow%AarGQ0pW*aptaR*wAK!3UlrNFZpN*c1BS!2;&o*x{ z=M{2ZZ#J)Zf|07+F#mI?{So3O*}i$2a{y=3k7(N$X}7Dp-EFpAkiAm-oc1iS#vdOR z*>Cx<#n=wjA;j9P;&>ylAva>T^lpLfr~5d4BYzg#C(*yvKYp(5-1`pu-j01*>?)q7 zyi~M(DkjF;F!FC>pNc=F{(a-7lX<)7ns>d|5<90jfx36i72W#g5B6v9zp!p?Ub(-J z|BJQ2=wG|;qe#tr%8b6+w7%N3zg=zG7xCVMYTliEb+qnl-KrO%Rz&Y>^t}_&_pZs` zI|el8*jNAF_x{n};=QiVQzzV4|K9gr-QWBBik}D9Uqt=;x@2PVsJX~kZld3Rjrw<| z=H0lTx^xy|sz|*%d8|BsFJ+0YcfX6@kFz+n?my+YXZigWOLVQfv5D)kRB}D5&Ry5z z_fnSVI`_Ny{WyzL<94ZvTkmv@)M8{b(b8|2*ffue%%j_y*QPEUI5ueC!2dV_8(UzDN6653y*!<~43U?PR_Bgu%iX`5|%&*YuNPx&R970=3kcWR-ZB7Yxx%XxVLuexbe(& z$@Z&Hr6*23x%$=WPqyu*e!PPEam{y;rz-iTRX0AssvF#b(Idrk_SI%Q5u^P>6=A->V_k3WyRm*mtB#@4qvwQWwl?Vnpa z-gjQ4f1SFpFSpYfw)vk@ z>uwz%W%GOU-Lbye$9E0on|-nN-Wey`H=n}wu=m~lALE0*EZeRAin8~;wcFAU+umEY zKE{_u`%K~2scjUek^Sw_U#hOp8BeFKu9sTwztwS8|B#oGeesEN^w3@%xb4j{T0d+4FkpkDg+j=>BkQb}zNq-F@^|KCajPh-$|v{gJ$U#JVpqCr?>N z_urpe-|=brdEWz{JwM;}-AI1^7Wk=?j}PWy!}`8K*Y^#&FY&kil#ichzdtVf-}gwE`L-z0Os1E_ZeW4{IXx4_C5Q8)M3^38qu_sBQr zoOy=7$NanIO&=I#)cx4575=eZE8J6Feo0hb-p5>-^73t;F)x1$`Bp2|>C44`rtj@J zdCKVdp)ayl!HUKIPIB=U22TTncFsj}sL}cPke#33*h_x?7IMPf*kZnS#`(>*?uyp=!oS**~ z_po!4PToE>DsS%_`N?_vQ}~5^fRhi5$wxTj^^d;a8sGn>{CupxmhF%A*&FS={k&NF zAMLCA^7K~SqEp-UF<HWd#rC&^DP70 zPC0q~Xn)d%opQ$AkaFCBPTwhM#QOi_?S(1>IEE|-j~mR$w}bqa=v*CXhSpTCjY7Q*Aj=?(q zFUi}h=7KqNz31xWmi2CZC^lykJx+Z&`%ll=t6yVs_RjnDs%z1=Tch8x57_V6?}^IU z$Idy0K5M={D!(>?_Z6M_>R9Q_TRVB9-tretzVyqM-~LB`K1=ui9`g69{c*U5osZKy z_Z9pvcjw*7J)iQu8P;a^Us2WmVFYnkpF<3GME+dpQToLq2> z{g1Uxj@{S$^_ERiKRb5b?q|o&hwR+EeD0^_prdSJcOU*8Y_t05FUifTe;k`__m5?> z`||RE)+u>`qtj;uj$bk}@Td6~neQyT*nDr%CFY6i^34x!m}Pcc;SZCCFRAn_>dwpm zcjo7Py?g^d)>rtx*Z2K<{olXy`2P8{jN19vPpev??|XfV-}*m%S7+>+|Icji`~H33 z_wW1g@%;S0_dNghHbQ>9FMr>czwgW6_q~7L_x}AC!`(hlT}t2g@Bg~rzmLt+=SJn} z+s=%<^S1Fb=IL*>-`$V>Zf)PU?~M}8|9p`-_&+ztZ_QKuo#gDD`Jc8yk#{pD^pc;y z)t(n|aHaXA?svDDGwSYxsCl61{PaKierrtsO8I)tNd;o^G0wR3_zmB_JpCu;>0|Tp za|0*8c^_CBd57Nj3HkWIXUxZc()aEs^yTMEoLpgV`57nQ`(>*?ux!KMI6rUYQ=;N;;lRJ*d&p7zCj*qIAU|S`S{qp zdmHt@t)BSu$Bl$wY%ZmZ|H;zRyYk(B&CZ#f(vR>IC-3Uy3VXBfeaHRv^Y8KtEAUO0 zok`+iyGGjrNA1$2^#B zgf-`MW=G!sM#Bg;rWoOn+aLC3tQnDJ7@1`ycm6hMbxLXT7=L)SFDcl}b(2>2FMS}5 z_5N(bt~6x%!_GbS!XVHk;XA zusXT4#gklqb-K59WqNAxeNTFM0fTRroXg~{*q1e-Qf?L8}oiUj&qN> zQ)&x%rkd0fQ&NrM7x16q5Z7y7?JGS>-|V8z$EFzJqddc<>1n}qw$Jya2lY(**w^ey zsck;T2)8HtYK_(Y`lcx&JA80B7`JHs6p zJm%{cEQ>ShHpCkx+YF=q4Ue~cBXbbj;*Fb9_&-1iw5R-?HTMrNQft?7y?pv*pvx63 zh)XS9Xn2Fh55C)RdwP7Z?7cPjf5dafbKPg-jFQ(qgM;;kuce`*Yr)=C{Y#&t4I>M$ z>u`COj5zp1qhn;@n2v4qW_ z2W#nT?H?8Az4R@{XE^)U_VZ*Pegvi0!m%uhkc6EiRsa z568CBzqcdT0qeF zogVbMhSfHLyGHQj1y45>c}p!_aHR)Lo*~DF4uQLZw^#3P0$*dnSJT->_z)Ot9E8sJ z1_Y0wKkBED%N^X{OAFou*3j#c&0y__C#l@`9{prvEuQukgSDn9{_q%Aa%~fOx9o}J z(yiPp37at4)i2moeUUl#qU6#+?xf&H9JA&2?{s`EJubNO{WbS@c!rc8W}Fka-{W9# zsWCYCFgV<1q_h~|a4R@W;8}7Dzh~j_4se)T_|=Zh;4lbA>ib79mfW-u`c$tMo9V0Bhi7q|j$_S3mIElq29RHP{6m?^9^nKGq8Fv~b&)Qf# z&U4p;5n=HqaHVUxXxC4dE$LWWxTG74cNUK9$e=cHvyow8v6f@s_oS7}CVrQ7+0wF- zyx%6p8{Zg<9cfPYhsU~-*H6bTda;Yk^?!`OY3IsIvE%cw=X0^^bFlBT%?|3Vnnvdb zu=`fM)v(pBD;hnFnF~9OJ(Z1?Z7e8CDQyBbV;Sov>~lep?6WthHWYN_ZYoMN+CQYv z_t3wM+`q`u<>7`eT|9W44fu&zxRgu z!@~5f-!qQOuUw5zSyWK-;^UzdOyRGO^aP`pAV0sFAG^}(sdX&oVLQDGZ&+(+9LqjbcsFCfwFdsmN+_dd%bf*p7dCZkm z+lKDU`*o6~JH9ku@NrjKunpY_U653o;PwUY=9mfi>cz_f9sh^lA8~GnXFz#_tv`9_ zk74W2B4b!fHsd=F{dp9dS+#trr9ZjoPu23#9T(X8(*?d|$9=R#`g54;u1)gz zZvD}w(*X)k*M&r1FRHlUJuF z2j4?q-%l{w_q-wQa4Hsy(b=7FhFCnF#NyCyEJ_C|(4ipb?6vtq0eqpgKEafY7jM`J z2F0aj$&O=}chUa!w86*rv%rj=XFmIy@Mp5q8QA%q;BqXseG>bMZLT0&J`xd`1%9q^zBLr-#qM^7^>Ts_{6D{pzdagrklS8CItW#5G( ze774{+Y7&9;i?FJ<8TCVuwro4H6soloeYmo!Oo^)Z++O^H0*CWJUYYd^u(1M&T^aY zz@^`YOCMtF3b1uvS6oTMX!vi(j7>U9=aS{ zMUL@vjz9eG+^6Z$nP&cKZ|VE@UTiK+^#*TFOAfy8PL|ziZ{vTf$rl`k!*qe|f8km$ zVXK4K*cxz}gq=0eCl_r!_R6H)a{( zEb#&CO$z-lTsJ;(k?G@F$FOaUaNo0JGvVpt2WM-2q)}3~cJPUO#;VLSLm9iM^k^jvk~n9Ca!Uo8aJ;tb%jZW4ZfWHx@+mE2M( zE^H*XlvS>OXjzi}S9EgAn5y*;ZF3nn1(-)2LHiiHpz-dS`+vYSy!aqKZ-c${W#(hTB(kEK5tZ^l2F9{=pa)?4WOsdt4TOc|DOFGw;zp z>=_WO_xM_hi9M=|GE1)o7bBNl+pQxbmyPOBJJ-Ws4uPqxen$IJkFg-n;|X4y<_;b< zJi(>d;Ci>a#l3dM2Rddc;~%lT8RdsO{mS2m!_2s^sGCcFH;TI~g}dYxV|!WNho@Ir z_Zd>2z~K$~XJo&KmiqdVk37SV5tA;h6&Hy%T_Y`Vy7#_s0BSrnhi zW_t18iska*EGbQ03$B8TXp9=qzR1kMU!KI^kl+;0prCM+1NWE?_Xy*!0&l@RxXyH& zdkB;7-foj(7XiqE^`q*yeHAX5xw$D!Au5=4+a1(`@YUI($+J z?J1CcpcfOc$3K^DAic`xxE&pNaH&x;3f)NJSqkyfvKPcUbr%rFzmG0t8A&ZC z;A)lVgNJ(!V}3#Q`k9eNsW9K}$u1WbH14gm-OGIwh)WlPjWNWA|K@Q8Prx^>hQodY zej4#Z!nt(LYva7g=x_1bCE$D#*Q#26QxvbA)S)&A=k3gMiQCGq_5HUHujilfm z>EPVx2dB;bK*yZKxwpKXemz1=G5Y?Kb~P5~lkSY*{4N{k?Qo+mW3XjcN5adl?E&YL zhzF;;`gQXTjaf5#-nnb41|M8j8xoHa6yOKAbf=JpWAGF3~>(&{(Jh6;D^Z4^kKox z*raQVulYd7IQS3;4r2#D1T&*Ioy5m8r6*&a>P^!_7p!F-jb$bC60(C~&%p9+Mxup} z3u%vg*~$)2;mU5m>n^;o;{td?mwQMzKDyS5kEXy!Q?X+{>{%LiEgky?7hT!U?8KgF zUiS@R{>MCnPaIBR{svBZi0_#;z{Qx?-PGu@IO;xeRL$@Hp>d@($9pi5`CZjbsOOZS*IKAhw#1P zup6n1DdSu(n9$r?V+t6C*S_B$>A^P27N%Tean&a9X4zfs$JYtN%t5pVm~&_xZG<&% zi=Vfiqea|+b#Y+fIhwOt_K<5m#(m!aE~ga@bateV5B{jh6+ z_^RTDrr*I`;HZ1Ci#fzrUTmPr_0AR_1nW(ACz{LH)^v#x9;M&8EWre`X%NrAF^$;z zc4AJ4w@Md$JnKR1`yqI5<0byExU2L++{)puW8nsq+{}5oQd;~LcTIu21|PZ)E|KL< zX-V83e2962n>LbD9Y8z9UA^dz?Da!jCx2CH>2d7I2)8$QdrIabzm`<~2=*YF*FF=)YpZQu`>Zh}IMTMiKU;g{i7Z!!rC&?dB$Ud&AG-Z} zmVS*k{GFN~9%UGv;z}d&E88}2)T4d?os zZ5uA^KMCgZvD>-ewLvyoem@51vfI(mpq$Jsj-L*`<@aT4TYh zw*ME_+QC|SHxF=?X2Syxc}AA|Z(q`p!~B74w{*5R)d)VuSSA>8!AtPdUCKv+%K=8e z~}U~&mIf8_E}9cq&>c?h5J zV=y_1_^LgvpTz@&$(y*Jerx|WxPgPoLtwNWKBM{YXijiZ6eeeYNpXTaFzIlDLvV;L zW2lA6o7T8X>);-Fw_h877VlWwC^{7HI0-R16{>vDLn!*!bzWm|fhAN~gZ;4=Ke zeEh?u_=$P&-MR4HIRX4f_x!MB<42#g@fPnTK9gOO-8Hf8njdb$Hd}TW+kUOb9gLnQ z-Ux;)4s6G2(e{2GcDkU*ZCy{fg(TTUIOj5KgY4va{7%Yc+sg^2vyJG7e1YaJWV59Yw_yJtVBVq?e-T7C)JD zCqHlq{5vtC&N0yQ6nsJcs+3aQPkx6yS&$sl{fD>Znco28lUW+jVcGC^vEPaZ+t7_; z;Ql3SBAmTM+)f;4DR#~48XR0ijJ*Z>Smf!~5~wbDXd*Ve$dl2sr24*xvTPgfMju1y zO9*=#LhsJtI-xa5mJN?*{#7>Ihnz@E7{Z4C3>)6&8H){H-Z2I~B)w`wufB;5SIi_` zIgYN3aQl+qM_2Z^2Qg1Q_{2MKmNx8)IJB@PzV?7(R>g|6@v?Mxa0`6PwLB6l-hsZj zmS+5IqK7dQGMznOu)WHJBf*l<06v<(+8 z(AWxB!k4!PyY0np*E0Vw5WDSQSJ=;?f8AbvY`{Z@2FIl9I%UNc7+2ICHo zslm3N#O=Uf4K}@=F?vtD+_vfUQ8r!Jt&QO_5pF)PJEswgVcB?LSUJ!dY|1*?t9h7N z#F4Gz^38Yf{}aHgbm>X@b2j{2{tcc{_W*bp3pVQ+SIrTma}8mr1q?k52DdU^?RjaX zHx{K~o6o{FU*Ay)wvK`c99Wh;jD1Yh|=2B6zuXE z3ujBIFOglo)@5{R9YDV0-&vgHMB#_cuGMBb@v& ze0r>rSX;BoRa(y6LOwF6IR=}zrh?CV@y(5li1NJp@QLevX|+eeu2;6h=dC>pf2sVi_kRE}8l z98ZF`wsHQjYFoGQyNNmFAoC~d;5o$`@BFQJ(n#9f%yZ3JZIrgm_lK7fcg$K%jRw!C z8jV9>B9d1>nfr_Gq_8sJVPqK*Qwt>?3!ZneXc*2oY7gUQ%jF= z-J{IEjfM9g<9L^2@$y0GDDiSVK3946cMRj) zESINc7UML7T8~HJT3O6_ZpV%dp^ZzKFF4G$?YG|^(I=aGlUwXme7n>b$n!;F+y*$e z=CZ@+<6+`y)u1emo?DEHsYer2YaaUo&+y>u-7&SRd=arjB|hvB*AnhD-VP4)oq1~E z+LhE?2}i$bb?zp`UkeQC9OM^-Wz|Ux0;`Olg=a7#o%$8yB%d!F=fG`F#qp3}s9EKxtKW9FBGhQ;@2AJLoevhMv8Pt><1iwE8zlWI%7Iq7%CE7r4vcRqj$zUEi zl3!$AAM9%GN!TS0e&Hst`>ZRuwruV9&HRzXAF0YUrU%Q`g57D!rC!4uTzvad3%d!mj3Bgk8MM%*B%`0Howj}nzNB#u;v%Qv+TZdSJ9Yh!A9zs*Qv*l&#CvMx9r+O&5Xxo z)i}vN)PU~*_N0awK>pxtFtC@tjP(is`PBKQe1Z>ua2dWJXZ48EGW14vHODogwocrL zIVi8aeJp*d{F!+Blr>;(5WG2&{fzyqaH)2D(6V`xtoS2hGt(@aS#!3x)bTAxQ;hHl zPg-qTI{pP8Bl{Uf*N>yOj-ScL&pe4;E2J$?vi|~d=dDv_neu`1GZvSo_BIQBY{L)D ziS{$nN&HJW=g*+62dPmw#F)yjMCR2`)@=PJoGQ9+Eq%o1oQY3qq~FWXN##C|k%RRz zhg88e*U_d{_HE#Is-=I)wdc+-N02QS>Qi4ZgtU zZ1L{2(f29A8@du{E>9y*zf!%w$sXAHd}rn*{X5+8h)V( z9DFkx)8I>(-X%<1Tq%O}Szz13G?>3eb`(qt&o6`LX0Wwv-kH7Q8QpjXEX#Mii!FEj zOTHC5FSKKE;diLzU*4TVJW9TF5w`RLu+Y{J@h{XZ2udKzvE+svBvok{CXKX*%0S^)uc4DZ#I5wtdE-2#G$q0z^`H*@wcOl zmHfd5S7uQApBJ|R!y$gx!)uQ+ZzBBa8RQQXckCnHxCwt!NuO!1FFVz!HH<;QM}oJR zBfsq}%_Ub;O#D&A^Bo4i+u^Cx)2KVdAFMSz$!CE7Nc&T|;|;|c;xk>Yp_V@wL%i`# z`18Sn2!2PxWeygc*HMp65PlDX-|5WB3%_IVZ)4IEi7BaB1;653x2{PneL8hO@Xp&K zHKvc+`28s{sJPS9Zn%sq@f3fc{OG%2@dP&46O|u`#qZ#?5&SxN(znR9PIt$3<5wId z3G4|c2eIoLK2m*3VqM1#gYXN3@ef1r6GQPA!-zYEo71!D8{!AeQ)vD@INMk-u4Bgj zb*nt3vd8(Uah45UN4veN*#>7{=e5_d=j&4Kb>fzF8TR@h)-#Qgdj{LT`}2FaQIbrI zvVwkqNY$FJCnxB0QOiyrwTw|+ApFPf zpJiIN`)8Hb?fzM*b-RClpmq93&w9Jo?SA^c*6Am0zm0X>t8j#|;O!Ln7=4lN!jCOt zOyZF@Ad`?qNFVZEWIyC) zUP~#G^ID!ja$d_*M)*HG{%~=kzcWZXTCM;i^hwKgNcyPdHY9!4ayODbYTY?*I_TbCN)6&%}oGm>Ll*CIK#b(0akWRpK|ak{_r+~@p(a~{kM z46XME23O|>P8;O!>^$lZbi9-sc<;@;zznW=$u)UqZpsXEJnQGK%`?yD8X2tjyKsit z#@t-X2$pM%aLXcpxHj7`Uti!dt0%h6edl`2#v7=Y$Tq@_3z26ai;#X~A#x`23S>U= zUgTorO5}Ht*C7j#^N}l%n~}xHAo4EcZOC=VrO1c-nT<~(xA40Z`55wUWEt{i6LQ_@=JUT9VOHiC zo!iG7ozG1)I$u23=zR5jqw~L}8l6>hjm{mH8J%?tjLw&@G&=wIZSoj58lC%ZF*;xQ zzR~&GDx>q62aL{rKQTHFJZyBn@EfD^rQaK!jn{DP4sosG%m472(WyEA+3*78s2--K zZzapsEElj0W_h1{(X)oP-Q!*6p7Y)2^L~%HZjrHXgsXDe>lww< zdh3(=RbI0$-w5~AH}$Jh&H9->Gq#VaQ@x?O`;2gOUsb0jhw5hzG@U-H-jEuqUR*q_ z?iC}fKC9l47OK9nc$%KQerA^W6mh0_UXIK7mHK5izCw1*pO7&{w#)DHPBC6hoO};@ zet6TxruOZ@cBj75wcrf;Z4>>mhGj0x1eP?GWAurxNsV>;Xs)Mzy_V$?mUCH#vUDc+ zJGK82csKLcmk(kg-oAV&%V3s+=@*-_mtOj_Cc`lIf8S;9KG$vTor{;XT$&H6jeSf}|rYtA>!%2bzmV7S}df0@VXm)%q2dc}OL-)mO&GQN9e zrkdL`eCBNC^~SPjoFB%Yu4Iu;T#Z!zUAiGZ`#xMzI`S+&NMmHYojy5#QdQZw^Qy{z zM0@poHz3vLmm$^flaT6rH&Xq7P|v~`)QmUiQe=b?{JeTIi;&<+;q@72f) zSfs~;k?#{bOP|%(4K<0A=h%L)g0AZJb-b z$}p{Q_o5H?xdL^sxdVS3uKT!39&#DyE##Q$R3l_#Bm7LIJ8;0>{}c8%?dUo;gRy`0 zqpt32)n~iRXXm;DuiWSX8?KT;A9bDg3j3Yw)MvZR7p{!#Yue8C4MwQiD4u5BV=%g7 zUq6HPE=los&SaUyGKl5FWUx%FybCpZww5 z31*Pzc=5_avk)J!ft-Ny0%KWJpCdem`WYq6+1GW!wc6lU+g$N2s)=YJ7Fz*Vp2K}7 z+3j%KZPuJVIrs6q zypx0I;ic26$|hb^Rpvgus;m}UL7|7aXR6z*8}2cMzlL~Ya*+GZW|7W#S!#0(b0;_* zL!bE=-x`h+r?mJbcERD4+3eSO<5_$xMc7f{Lw)LIDWdk_4)P4HB!6cP{iIw!G0^2n zjLlB&vHM(?sdI=I7Tm*Fjb^N@XZA+$uj8LBHcD!UCr;7DRCJK-%=whuVR)xBgBj{> zrd-4E4wlwOVQH3)8RfEOTbM}>W_iI3JT%4YT4a#Z}{otImvys}xVoBDS*PoiTCdXN{?> zNetCYJaZ;7R5P*EnZ!`d#AK^H z{_tY>!1d#D&E};>csYI0d^3_hX*u^oK)&4-g&AEY8>JyGF zG{UdGtwoUsv`gf8ak~%?y=)SHoeR`}z!XEAkh_ zRt?BzWb+k9xaLN~+;N%9tV$)mGCbyI2wCtYcD z?))}3c?Gc)oblybnAiWl(Yb4t(fQnc#7z$nH~ob8>S1E5UlCjVhWP9E_|I2}rLN$f z#8N55QuVJHoduR{{df%Z5;4@%EKV$?x}@${idqBd!JJlX74d`kTbaqp@Bp2_rv zV*Bu!Ok%68MmV}JpLrlP^bGNp`c(b+%mZnmXNa%VANBq$vmBdk#ZA-w;RA5T@qg%A zFclx5m?@t{eWN~6U+DSu%vs4s`waYm`Y?~>43bG@qfgSte1FvpL2nb)|O|rZHl4hKDo9szH z?8$^&a|P{FTLKs6n)6xLm|ljBR7~NFYa;8`xT5P-JcFgfY5vZivTltj>xWsl#*_7$ zi4nj4TpV#ns>j@s{mJ9F>)hVQvG)8_bDuqqik}oWNgtfJNqV8U=_TSO>BrMZ#Zb}{ zCx-IBkUlvFzP8+c);W4ko_V~U5$V(OA@}Op=>MnmTu8es{hdW7T`X{tj2*!OD9X#2%}xs{OwR;5zgyEDwfhUb*=MQbUnpP6S$_fM{k?L z?=ftTj-~E_|B5%x#YclH(d!H{P6p{t4_vEnfQ6dQ)g^sevE!uQC%~G8kXN z_|vnb~?mPK}81M!pU|CCz@z@s+X@zXK# z0E(Y(;GUP+?dgY93^&Sd#~8H(-mX|mIErqE;wh&cil=nF`r(P@>-3|Z*GD|Hn0QL} zbKJZQOs~Q z%QoU8*o7V#igZV`8gX?3w0KbY3;bSkJxA9Z=s(uiV^2G0VCroz!n9 zmQwtEFu|De4UThSDF;jBuPrR)3p3zCn3*Na5KGlEHqo(^;+u4?k*4`B_~2f}Pw4rj zjQ5^YBfN+4rnZK0_I)gd+Q{=NE*i)9M#oTR>pD{nQ}#mJk1|%ekKu~MQFD0S4R)NQ zewSTRoTO(`taOJRE6t&;ImAlM08tud^6B8ZUOZNkyxqYhX0>^E{<_?tcP>rh>zk; z#ZYh58{z5j?8E&Nncs`#p;G1;3nsYU)qUDJoX7u9?nr6EhtgouC}A6*C#cO!>NBVx&&QGWJbs3pi~Wr??9_#vj(VFjPM_L^4b96_S`> zu;d{0Pcof%J|q)~yOCLpLHi}V|C$}iIsGTsFTf`@63-NHOyd*C6y&dwoZGk!$@4YV zBDrScekAv3{3DWkHWJ6I)A_d{b^g6bo&P9O=l>F^^M8lb`Og{QJBIl?*Xi7sjc|_6 zy#}dsZ$|1|;+?Mqa|5F{j9pq-@3HVU~{&gMpeFJ{AjF_te zKf9GUZ3jNLhB$3EG1xw%(~1u}&}Cw+{Vy6;-b%SD<*byeQf#Gs)n4*guOpSuip^VX zgd0eYHyhrT@q8mh`I}mDR|QY`%@x1SHE*cSGrybV50`jy%{x-^OyPAcza30|jdcfi zZ-G1EDwA!()&a);CDt`ZrMafc;^L{5#domWIkkR^$J}*>wz)%y}v+_HUzSzaMRT_z*J#iVP(=YX_`kT8}orVv}G^6`ySI0oJzH*Qm+gIwJ zfm%o&gWp2gBtJW{)m9AgOo^7VSXI^R~ zZ)GH9Oulxs-)uy$3I^qx($^CG{x!Os;6o*{A24+lft;6WU+kwrujQ zZ1hDK%I13LM9qtFCh?ZJXDd1a)@r3A!rDys3%k-0Vf>XOv#K)LeEtFSLztz1g>g$S zqzgCldmHi=r1WNZx~Z5)IV0tX6x%4?*-I>>cQ(f&p8>0ibr!q5lMBXGl?BhKDw{C9 zs_gj1Rb@tERoQl5RoRX!s>;U9t144YXpi4zR(jp$4(6+OO^jm>HlDeZ1XG+*^YG$` z@;&l7*=}Qs{Ld0N{*md*|A=dSw)zNti{l%dCwso0MR886JCZlw+u{^5xh<9Yq%tzxk>Ip-o^C{-cYn!gEOuN zuQ1H*gNgZ8xy|}jk-oZ({TbZ5hH+EgD|v!p#wQt_89pPNK)s6La)qw?C~?aB@Y5Z0 z4YPVUdAAJZJZM9v-525-^o4X!eeuZeyB6%E%~kgqfqgZuz<%P5Zrx2So~HhLt+Q*8 zY>jeQYvF6MH&b|K@nQ9Y(=WkY=uB~l>2Q~ITkm=3%wODP z<*CMtZ@@3RTa6>8NVfA-^EpTTy&OB^$Id8^ zGlF|1;~R(FlxvRi`OPzy<(k*c^P9JPGuQmLfAX6@A%C^YOMdD~@>9$y?*F#O6gK{- z{M6b@%oi8Oo39llnCp3t1K&wBXIng*Z`8PwgM};`SYq>1A@C^swT*A4jOSgnW^zl) zUnD={H~ag@*Cx>hn^cbtf^@uAXA3IAgLCJ1s}|f-KG)gVjIaytfz%(w(`8SaB@( zOATRHlkA*{-S*vQMB2WH7)s;&2)X;(Y~HzG3=GCV&+}}tt3mQm8F7%GA@|T~Kb4nrSw49vN3dX;X`!43b%0213<7wRYv*er# zV{%UF=OR~H%XlkhN-hr~&x-fs2aBieCC_$@anbdQiIvpmBF1_ndLWyozK-L*L4IpJ ziMH&tb4*Rtg><#=uEJn+|Lzf8c)AWzc=DXn$$kUPG{RO*Er?6_&vssxklTu zR+e&IY{z$6F<5{2pMJxU6r<_-58B7b$$@3^B+a({HKt#MSLxvJ`@+rr;Xxs z`8t0Qd~1x~Y`Pi#HYhhRDm^c723&Un`vz$HLgHwy*K{wjQ)zA>u{tl1#J;8{jPP^+ z%Wv+4-!-i?!aL!2O`DDI3zy~vUL+RV2`6rPnRmTj%?m8VN4=BhH;;TZ*KE4Z2sfR^ zzO_h>nUUl-bD0xwS<0MBKWc@GkOPpLjBxXQBRuTfRboA<&!TCSkx@WMP(HH}B_ zHo`v|G{Zdlpb_5NKz{ZH_@9gQO4hH5tiuQE3fNXtoLDyi`7U!~HH#DL61B~rSeJ=> zXAw3zJF(8AZAM}pb1=j%33Y9yss&)ZlJ%gyzJ>LyOHFHgG3yKM?TZuYQgy69p)Nx* zJE6`kX(ZJ3lWd5uYkiWugw`urf7D*z60iGp))cetUVGbO)>qo={`k5$os%73=aMwy z>(V6~;_6y|>0FQXr=07>)x}%ai_?8OYZkL_vvV!hud~;)Szj7iSNz#}1@WGdaiVn_ zvKaYm{qA?7b**Io6Rkzsjx5yg%oDB8={NFs`aR%8>n8mkbfWcc{T_UxwN}58)%rc; zMC*f+!%u84?$`0GaazX<#iw;t7Z2!o&KTISzj$EB0b@|dj^ZI5`;4I-FBT8&c*z*n z@pAF7j_3AGH4ki=W^wzP$G&dvdSJR0vo!BVC*cE%`J(F+m0MHnry54Zf=+Htv7c%c zeJK&!QqQ+WSLl7)`QQYdya9YICKppUCeJL$nqkEzxk0}R6MO~2)`Q= z>e?CC=PMKH4oYr8+WVhxh_7p~_Qls7vi4cOJD)F(w~p!Rr}N|LjSo}CQ5}TR9)EZii>`krGKEEXQsvx!l)gH3fXY!lh~6r9rSo+y<*9V8;!AOAZI9kIh2J0JJ0}80J?las_fYeq_8W6n)JfcP3hK z?G$3xT01V?PcGAmYn^L;>z9d>6`$?!y8=?=V1-pk={n}|VO{W2zt8#%G$IK`6Z zP(ufoDu7cdmRx4XlI!6=%iwC_k%}t|?V3l$m5L=>h_7Pvbc&CL!wrT`%Qdf}&hZlF zSkD3f+2n~P`S^zPRetlfK(6`Pcc^8i#&J6}jxS#6VQwo@Z~O9i?4m!Cm#bS5$;&Ap zr@WkUa>~)g=H>PfYaQd6HoMZT_))nu#g7AMM<(sK;(zcuzLt%444E2L*LZC&dAP;o;S|fvW>Jh&$RhpUNgaY> zxzY4f@+ZWR`kti0!1pA}QK!TA+a>N<)mpXZsK{986LWs7xe zO3Peqf)htt^JR7p&WQ=eg6Afl^AS52S4exCnJ@AZOX?b*7DwtB&0Y1Bhl5{oEl;^( zM}5B}nekKHcqY&4)GT&YCwyY;*fk@*?%0JXfwpr}$`0=jMOE1nHQOGj_MNmh8Qto>zIrAenfom0^Xf%J)7^& zOOEr0r%H}d+=v|Q4}V<}9yUgDm_Iy4vOju-RD4Fg7Z|&%cyuUHnU`~#? zSMII)$(_bM4~@6qO&jalzLvI``@0*X~oo>G|V?T|AS+TIrc{!yM*okAKR~G z+fueIVOuTR>W1Y8c2CI-9W^Say}f|JGj3jwkdwe)EHsvjf6Oc)q_=d6TM% z#nZMIZ=70P{G+M0{^Du7&rJ&Ly{vfJ>%5!4n|a??&!A2Zzq=P-`vUx{YLyYLTNlYS z?JuMLk6hDBX`OllE3U#!6ol84$!9;%i1 zR(FwK*}}NbrJpul=r?an^qcB~{cu)2_lcQ)(|P9iVxBWQ-X9v3R5>ks2z~_pe#L&) zS2S0$)ngujJG|214!r_**h8J=0p_V+;n`miXQ)gutKgq{mIEu;zBn<&hr~_inUp6w zK%VH8nVDwv^Bqv0=#`H@i=JZzHRE?Nhqt5H7phv97Si)oF32(qc)mT@L+L=VJ7Y?5 zqIXK=;l#=F!Cw>4lm*8-42CYH4>nBkn@eu?o3CEyH{Z!1|3fX8_Kzg?5}y;7;@$7d zhq2tk`CkWj*C6T#)293<<($r=7avwLh!a04E;NOdN;@1$1xi@W+mIy&#TzB znr+v!O=GQGo93Ig(}z3hLyh~62i&2TH+VuC|L5qxee~ZRaPUVkqyBph-24}~X-f2m z^_|_TS@wbt<##Giq)+}VZTzHrHxavB!1tG4W*o&i8vc;j<265ajp0W4pXj5HVT0}R zx0X#(&DG0Y3-DK&>W?~>x4q1_B-8KIdM(A5JWI^YCw>+sp#)Ex@PyWzjxSY|99 zwDK*f<-<7URXAN8`IZAw`IhVKe9Lb7?`62_9XA=~{YggWk9|h?U$MbIS>g&^L%!uL z_{obI7AFk6WanEB*!h-*8PvejUkCZNh4L&l`;0*SW3IriOrvud*w5Cyxt(YE9`VaN z>HhEo%KYU2rSLmB4*lO`zoRnp^#c|8w2g;YMxAP^c zMGN68dAtX56+ADG?T-!chrIj`hp{ZPSwbuh z?snWoT>^bkwayj#Lk_uk_}z|7Pv{`}ubQ}Ac<>gk^;pu$9LjTc4yB5ExN36RvDe-~ zE~Sq1^;|yjV6bP^qQQrI%YCSZ>w#o1?^8wkdKrDaoPN%s@AB#Av35UOK9QQO6gYsm zV!(d?MSPH4)K=x9(&$@jo|m!n=F+Du;?1)epAGiAE)(G6s?!qJY6Iggo^ce5Q`c4H zHA0%RR~?)35YNLqoqcU=*Yz97C+HeM+Tvp|IKMLPj;YFrZseI84DVz^_TJ|YRmK^i z?WwMix1R^U>?$cfk!bDX_=+m7w+(*NME~f0q*){VrpBkfgPhgY$oPndZ-c)t;h3G| zd^A=L7ykw4=Wt$h?n5*V`2DW~K)EPgsN@{P7a^|}2{ zO@QYqQe7MU6{5dVxu4hP3GPOx-sS${s`~vW)|KbzT)6O!+lFa4cv>85Ih<+b#PY3MRV!m z!fpSH_j1^#SiJEH=CHtXBfeVOk{DyQpD(q<7Ch|eky zGK=^?>#9eaWycAsWt&BupzQ-#*ZorXUZ{Ak`XvdDt8Y#@bv@3UuQkWZJg??~^-hIT zF;MPKX^*WHj}Zinmc80-t|%7N!*W|-BP{Q`QQq`u=c zv*{Y=P&; zAWq!sRlWs;DpLe6wo;)bC@ts#m4a1+Pe~Yr9oWp1!>tLR2q;KVaJprz5CQ@fg`?!l z_x0Xq?{J6-;L!Wr=lA<#KWm?T_VAh3TJN$5(W;nTX@su;+%GUl54LDw7M_xm&~ z*wMF9f$Gfq64oO%X8q=s_&T%ji^rF~7t_Sj{%e{W=)V8V3h~fP@&Z@;KcBd;|8rkw z=FWJBjbBTATO7z}zbDDGPdyN{8;vmS#*YT=UouA2#{C#)-5E!ki)+C(wRd)#pe>#A z;UJ&=@o;RON3eMq8)}nXw8`7F$=me*M#hNR#ooMew7eMdOM`0x1Y`@Q=lfA@lkD&ZmdX=i^-K6J2tu%BuDgqW@g15Il!V`n5j zch8Tjc$`KY=^XMUrE#=jjH%wuIG*5mkK+*z`EP|)wPxWD9NRd4&EffVy?5yy^51&D z&%Rf2NJazk<=;4d!{M#--p)GjM>#zIZb#msaVh_<-lKQ^l%owt3l8~rbv^ppb^QL8 ze8#?bH)`A-1-q8_jf~yR;M?l)OJ9*gr~`X*m1=BgOnl6}%?21&(?p}{>Qtkuc|D`* z8gOhL{@uJwjqv_%#P0IUZ{Iu9PW9bPza)Zte|?8x4dR&xcxDLq4{d>e*L`P^Kj431<(>ThZj_&Q5dEE#jGq@C zkZ#GvZ#S8^p$c@1=Flf0uLnb)balWGWcO1p?$sW8^6mBnqfAB@$oDfGKa1A3Ydyw8 zJR`qT5_OBlx|7F8un ze`qLt8}aeV*E^ZxJ>-q-LfRo*p5U|BW%%tkGXwT#@Mv3Nl1#8#A298B=;Op4dhtQq zT%T@PM&Zrxk0o9&ll%oW-o2K2miKv&>XHJ!H5tQ4oU*1?4h8eqg>j{q667mIhs#&0 zHXWaAba;&Rh;HvEY41b)cRpXKhku8IHG0yn@&{=>fyQ@D%nOd&HV(`nAGFFTzi1_G zno66dQuef1{M!!pjrfhc_1;?Rt?TdNFV*^Q_Rw?uN{6`atX;rQUFp0p*}=f_SqcXy z@II{vxgZ8Uc@_h|!aHYDM(|jlR=h_T*oi50{nMLsBfjXF@?mq2F!11h#SGdLO4$A&c{W?|3qWr=ym3*-yzWu#lHZn{yUhp57_lCFzlV+;yb{_w}WkO1LO7v7n9p@T?7}`jN=-? zdnA_&pl8;_fQu7(CW&X0!Nn=y;#A(5#=Fz`Mh4$vPmnQ}Fn1X7_sZ9s+#!OCh3SNE zZ*h6K0$l6ixeBnX@LW&E>uoN6bJkga-@1U?6eCy(mOVy{RqvD}c)atp0pQ^yT(1Wv z9s$2=Y~H}{`+2{7yuZOdt6-c~EH|tdz~rfn5&2m?A8V^>A8V+nRke>bRHSw335<<` zw(|cbRSsp0$>;hJvd40<{Kq`OCJhPxu{jV-LL?pXn7D9gOS*XN`J2)~F|9S5-C@rY4UV`uwM&R|s1}!Lm1r zf~kK7ZH^D1Lu!52tdXZ&co%$ng7(vQgn84!yp^<1$3QavsAT-Q$>7{%{JP2bb(8Vy zM)Grf3-eMo`E^s^X@qN^pN%u^-1MN`18m`7+nbCk6TM2DR+Wj4+?@wSH?kh5%0wsb zz5&_<+RCU3@ys(|i$6aREZE(_s4CQbZJ@(C7wm41zp#U8|E^np!6O4Z7i_uQv^P_? zPl*rOsOMu$dv8M7&4(MA;qdUz1^@hoY41-eyZLAq@v6w^=C=mzzmGNSx3PmaAzwEO z-?WRfx3EsQEGEfbdk?-jA|F4^UlmP`$)g; z1^;McR&D5&ZtuuNf3?Pcdp$a=9oX^4$Qtjh_?kZGh2QsfFmfN(5ZuEW@V>0u9*JIj z1S~w7b=+gQm-P>yP6tcBU{q~<319IV)_K3q`&id-lzscQeo77@_$=C2*@`~?MZaJ{ zuMYVIJ=%0GcT_EBzb%x^!w0@|RiI%v(cd}g)P z!&!6PC)KK1A44{SkzjvNqh4hOYp(lTW_k51D@?J=`WN2(0rgZJ%L>!%vXO;1Yy9oP z_uG#;{|x_Z9ym1@oZ5^!=V?wK*%n=QT^r9jXyGx>k0=b)fJ1fuJNkDQ_RibbJFMYz z-U0UMQ(gDIu?6&h!KT)cTvN{{7(clMKFE&8XHq|CcV#R_e{UkcpJZ%CxA}kGc6L0L z$;Xo6v)?*TyKZ32ZLFhR|JEzcj&9S6__MxYjTd2Tb9Z|Cy2^KSUfDNPDJ`Z2B;xW8&uix@Bd*ealK=ByfZ*OeKe>|Eo=J81$>xvbRA%4ljR~zIV zEx1R#BRr$$^{(a|dbT5n<|lML`dT}F-_LdFq`lZTkK@PPL+(vKaq#kENOyF{uV^oM zsz}#U>hR+(055nk@WQjT`EjM+o<^@}?QgX&7p$b1muP%D3~U?i%k}W>6xWyg4l-N0 zD;+-}&$s_S!Gb~1hry=5PBp9MMqyjUFW4N;SRudCj=q6{^^Emxxr`Cypoecge5`lr z9nn}=If`~kS*_k?Y(ClUq0rNBpAm(O`toP5raabYnoTCpF4e9p3QTDZ7KHck_K&I`Z< zihq~SS@G}kIm_2f9Q>00e7ll%O6HsC;NLBsT+AHA$Hk%V)r@y;c}(CafgCZ;O=JP*3XC0xY6Bd-3hb z)Iof*TxRE@b4ta%5R^LDW3ZD7#e;M!Zk zwY|WmJ;A6wz_qu4Ya{y)oq0k&-4w2;@_!oGH684l!E^O^{u16(pZ7HYyIyL2>0wv! z=3e}|p-vI}YW+Uq*R{0Y4)s#pg2ldd&N;qaHbjY>PEC_;kUnJ|g!`J0ohw!=T z`cuq7`vc^6R6GN*4hDXw-PGF<9=;DMJlqp1oC`iSgonY!hVU%$550t;@#`w?;qGDi z1@}GJx!_u^wMUnCIvFg;+n!$lCaiio&$R#2G{4}<+d3DNfs40+i?^|6YZL#COgHU8 z$@qwqI~V+h{mMsY1?_jhA@5$#`j$>$UvlESmy%)&<2K7$+It5$cU9KP-oMIP-n(no zvfdvyPPb2O?NRV$=k5icH@T%?YSxP0|D-=o++|jwyREJ5$OqZlZ*3y?_c=Nj<#>&zQ|XjE08OV@sCR6Wc1$lPoe)CUu1>~z+~ zR&~2CSm3pr(_R}Q?d7!9<(&)G&xo{<+UDUb{Ixaev+Dx&5l*hFPF`-J+Uga-NC|e$ z`Z-v+Mjbu8th%b69$roqUMBzRozju*O?&sxf_9#(!)msJt`A^tA_iHh94NvwT0?m` z$2;x7TAh5hcvSZd#D=?2y@ekiNi=?PCvw~4*+j;9&F@ZbJg0XT#%pvxyxaKY-q&!g zr8kD%K9N4w9OcFfwE23*?u08B_O7el-(p<9e5KucQ!Qgk7}(=+jV}jNf0|w`cWSG{ z#gxnQXKReBZOT51@9;P9s^nKY7cXzX@3{lrzcYie-+-~7$=GkK`-vq0H#?XbeDEpv z{5SXP;+}2XgT3b5GYx$GsO|$dN5+p~ujih(xM!1l&whNj!T{Ux#d`Prop-&>eH*#& zP4~W&RcC%OVe7eAyWGtk`5Vd|-Cue5$vdNiqxnbIgtfi#q;Yg9-}QcbW2+t4g_S*i z%X9OfdVH7X=0R=7ed0mk0KG%+yMaT`Ykle6yhGQcuXW(}9bDI(VQz+Up=EzE!@;N_0dtrdE7Jw9nWEr)d8C2>MhwNtoKlI@Ao}clBHN z`YLeJjT@c(K2;|8soQ@93&!I=Rvt{{!|cJKeJac4Kkr0rF@5~u+x~*>^q1`P4fn(r z&_DJ*>g&Bn?|2;C>OG@z@ge>EH~Lx6314qO4)17vF2454zbhf$+RFP>Klzc%-146f zUtdlCTnJwmg8w{xt@)8yY>1i2l>p<#E4%!t!q=)pG`@Z_({S#Ut?$&wZG&h$J;}w> ztm7>Zu2ugFOS3P12WJkN--*NkOE!69!eUHZ$fq3wUn{2LT%Y!(4yJA+Ox+Hfm%gdX zziqoXRhT-BSYKh*9*l)-`rGqe>mBlC%O87)cE@+`#Bdy@zVcrWh01^Jr|i+bWYtU9 zx`Mhr8HKMS@xPZ=W9(aqNi7AJMsk;4T8**ASDT{nwf4=8#@BE7Blucn*hHBt5{d8SxvRdBV@W5HZ{r%l&aY z7tiwv;ORsw51V#RJhAFv+e-4^b_=ZdB;d%kGMIiHWmbpwBjSiMJBjFKH#XbvlRe>j6U;)<1Oz z_Q*-%a?dC(7x}b{{!eE8`YGlMhtvO0^6jDS_m8;W-r9`3jbJ_5xZZaqW45~Al?|`& z?&=iDt*m;TWX;gR6K7WF{iS?I{n3lLVXI~N{3$;zpZ{=N`TV{2l+WJ?UntkuCi3l< zv!+7)e)ym9?Jdgi6h{Gv+F&E|hl;@$-#^3HJbdg-pXvPfYmTq@{|r7s$r-iV-WbIn zM&!&s;uD)wHt|(FRQ7n1^5!1y_ws%zhjMppd5vYo!^oahY@TB8Wsmej9#$f6dQ_d8Lzx(| zXQdxBUvjbQ*`wpi3S1e~lRk{j|K-i8Mdwgfxre#(=PBy1H3tbCTF*XNa)|Rp`8%R) zd)4`EWwP#(#aQ_S0Fe4SV_y=c_3KNrnIrnE(- zG(nzjMwWa<-^f>S6=i$=e$#q!XwVu1U-X9;#y%F2A$um71$&SoEx6wFPvrYcGs8Q{ zHL|6d&-xJjPzHVwZ?8k9d^FOLDOKx{DIbrHvz~`<%E1g3?8_T1PueA*d2z{A4DYTrG4rzuo^!xDIBFY=~Y66?lXnN!b^ zIZ37O_c5#;eIq#r_i>*vPT2&1f!bR#?+#>*CwGn?iO3!16das0Ofm->Ym(zLjO6`S ze!(8D&#<;Q)y*!Z@$$4wuB0U;BO|&XD>@@X^2rzI%8-l1F3FeGja|s~<;anq z>>HyTCD3C7kn5gYaj=YwVI0|l>=A}Zgy)4{?m*6b7YxH*2@Zythb(EH79UzpJB{L9 zH>A}Mt)MK^Id7lVAhZHmqVo=%??x{5aq-S`l1t#77ev83FNz|silCA?UxJ?>LDpA@LJ`RdY+Qc`Nx8_~eZ3?@T zgI$z|$HOj%UF@Rw*TgO(qp*wihKR;4t-m-wcA3gIS{<()yQsbPMPZlyHLy!GZjlTr z2al0|xj^zmxaL8yi*&efihdu6!Y}Jx{PLfYB~kcgF!<#cJfpszgiI-APmv4Z7s-}r z{L+K5uK0K5FCHklr1QU8c|m(by%#@4B$dd>6Y6 zb7W1-S-S~7@N(~{4Kz3LU9ig$zA1Sl+h2Ki6yLPTh{WJT?57`&TUtfSn}0EG(f-#< zktN4d>xJeaOAc^;AQicSERkIKk~os($P%4@#knxfT`tCXM6v^n^C)yQ80WWO9JOh? zWz&nkSn@o$<%Obc$PwX|5V+;7-M*PJcFX2~otZ@A32guy``}6sSk}S$tiK=={Jq6 z(>wS7%=-9no`WB1!!W`xj&0;(7_Dy+u9yslkxUVekxY5=r=~St<#Wp>+d}11c{ZND z(b{#oqqW5?&#u!qS+T@Llv9@f=2-`?7{@1uk|uQVVtwrl^Tsgo&cDOaBXC zmk{3xod>(94#G%RK|SoE`qacO(YQtSm2gXWp}#=3XGHE0tAw06D|eDgm%8|6or_5s7{J6f)2J+|aZ^m=UN_>x>PTzjY&{q4z-=(X4$X6f!? zmgI|&Bf>0B3|}ocC6%^}th?`c~Rb=1D_VoNI>DV?NE|Fga-`}4!r@V!GSnK7i4QmTucLRe6Yi)P!oVSc=McdIS zvU7Cq`A?>#8hyn3;`?8xC53*ImKJ(6tzKw3GUJPs`k^&^-?3j(@Qolp0{xc7ipJ{_%ufO8{*Sw$Qq1> z#HBT?_Gx6YFK!d|bX6Rvd=4S{`CZz0;3dTAGWS)IWERYeE4%qvSCg1r^4n+n?DA}U zD9xCgD)ZZ6#>O_*0~}yHyoj99{rmi7H-DXUcKz8$-ug55xw6TT=RO9jXg#*d+WKU~ z=h2Ei$EWap#UprS@V+}Ljr`rTZP}Oc&VTDN#xGpJ^YWESuTSIq@{{^_UgeN4Tyf%F ztg!U^Ozv;Mb?@4Cw`?o$k+MFnz{|8`6Or|8 zF;3ipo|Ei-%Z%_)0p|udS~gO5@P-qAODux;vfSm#ki(Pb_e(f-1UN;0(N>hdwtbv# zrQFi#imesKNOtWFKhJD}N2TYb52W8u+}0WX%ZCqx*c0oWTzY5L`S3;f`0eLwTCs_nP9yt&1Ay67#}2l1Bg zk7s$4_&E7A<Vwq==NLDfBOo{I{jbt=%C9? zyD-iycpUrrX>{s`_(ea)FZwC@z1A5ocKW-~=+yecqE2rX&gfJ&p;N(H^!pYg>r^V= ze+ca0JzM>pr;|_p6y1E2f$W99(U+>nWc2eA&ZVE<`7+{L*nEPq%Xl#G zFYL=O?O!2FJU;#$9DN{#{mkeK^F+M!zj#@^d4Rfkb05F%ZCc{z9NwccD(8>tD4yQn z+9l#0*#^q><6Zl}T@&yXJY9y&DMwbxAL6aQ{DaEMx1WFxxBzcg!W(t*w)CII)UAB` zeECg|MDep?_{Gy+48Q!0;!VwYcyasEw}&a4-tWzKRPszB&x)@!-yt8P_-Z5VAYKzwE|Iy-L64Vp`Yk-Kcj#Hg4z4HG z?xPg^WKqw1v4eVEK0)CEFJ4gkx7Sw@{oCtH{6CCW?dcd@zMAFnba5g4tNZl-6^tj9 z=Qotc)5$8Acvy3nDxc<~HE!!ey|OaL@7%wRdz>=6w)yhoXMKBjajyA()=GufrbOlx z#K+^Ku(53Lnz~u*ZtCJ++1-wQX3UA#BoD=xp6~xcdRqEU`c^iq<}IW3wDfXh&hjh| zOD`L&|5yEujEAy_->MQXza4{L)#c$5mxqU4Bu>^`K_c&yeyvL{r&3Pwu;y9C$DUq} z-e0jceSCVvYw-2C`-BK9C)fXOdbp+z-r(xsw~UEJ8<5`-d_1vQ2d9F?J$>u>hH5YtuasVCcNzV=cPyAYRtM~#f-bJ%{(8yr;CTKm|1i_UCdtO=jh^Z=cazs!P9l-r0yKQZU1(|+2?0Z zAlBN~9Bkg1To&XL8w_qf*p&MC0-+(;x)n_WRtwG(!d!3ew+0h$dk|k5|8G8s4_Cx%N^3 zdcggy@9cBW^_^YrxxVv>d(L+z^K2&1%_SFY$EMiY*u@1`lAnOO?>O6S>~|52n>4mET**@4$P5T)ShuY5nqpp!GQCPpmYp8RbE1 z!lh<7Ku)CwGuSI9(6F@O%%D~OlYDFb9K%|`*jn5-)>_mh&YFFDyfte=qE);i$$Ggj zW!*}ig3)Q#YlG6QFk^FN*(KJzmyGb7UY9$2htFpm&S$Oj{O%mXIiBTM!Ew(ORxvSc zCCpXM!XGvpJTdi^b=5!{W zrJ3qED6gbrSuoT(Fbq`-2;du=>gQ_Jh1L zmUoYU>f2+W`gRDaZ$A&!w`W83?ZrI%8e@0%eU|flXRhn{o=`o1AM{0hp^qBj{tgS-}?M+@`GGyhAT%I;gwIC){-}a)^eNN z*4>Qo0$m?wg#SXH|M``mHBr}ZBGz5k`!P4Q%d}D#2Q6b-zI6rl{N^qr{LO7tg9|&50z)k@cb^6 zc?^{P*1oLs=Oi*-k}U1nv$~l4w==jli2p*+!O%yc^HZ%QAEr6`71_n?Gc=ED6Zr3W zXl6ZYc9%=6MXlLaXdS)6{p2HJ?U6Mdxu&`l9Wkxv-MVDWW9>F|Szs8}l5C$fhuEm4DY4ePK5^EoquDQj zc6G)>W`b36W1_YEo+RtFW{j_`^hbZn&p2?#nfg=X><#F9sn#o>rXi=&OG~L^&9-r5 zHU1YzR+D4dk=6Qrfaj@`);1m7$N4Vzd>7}`#ku|ovYP*uvw1FS{5l4HzpVIJdV8-O zbHL8$^lz+becs_d@?x1)Hu9kyx&^9scm}He%!nsfIDNUaj=o$g=|cejjk#OXpsD>D+5CovXc|nLIZae^SR}W2$)| zzPi19U?H{Fq2_(j^^8G|O3E&oSxK7@fU4YTC#^kNpf<{-%rX9gS=ee`TX?*->5Yg! z(VXIwytfj4sdo_6Peu$DIVS&Pd2*7C+icu^*OO#kZHCgiec$(rvrLDOnW9^)ML zkNjzhX>}YKT0h}^tA1%(Ykm>57K8cs1b%MKBTjBf3h(U8o;l6U@X|ps z)(ZB^d2J-JpijKD9hs&2^#l*~$OwcI1BG`EPe`1hI^p~7a254R(zRwuGg^{^tQ9#5 ziUSGlk2vK@t&~r!fcEUoIC~a(5J!8wOibV+(`UW>x}Q23Vej6S+^_HLpapuAr7z4=VI&SHvs=p2lbV(;Q;v7TdNo8Z}zL9f7RD1(S58k@q9uQhF6S% zzWWA!Jf81fJ7nmog_LI@y3{L=>~ht;4SjTswzADgdF5Zz_j=I&YV$%PvR70~+PRE5 ztxBhzlb!eWqz)>}!q$f6U4P20=Phpi=3>9+(~nNwl)r+ymD6@j*e@kSIi0%UhmyT6 z|K;3lCziH4rnrDU(!0k|mmu#hHH@(Lty1dHbM)X-$mg)P{qtPhU*GwE^PZ-}bi24;evE4DU*E<4lG!;C86@m~E4GEO z|9!&#@Ip2AZyJUDn=qDz@vkt$|1NsD8UBSRdg*Q#^Ealy_S09DSF!e3@|t#>z&IUq z9<09wOuy3ao4SMJB!{s5OHG+m1KZo#=vvA;6KubSH4T42-w#G!J;2(tLkG<8EmxA$ zoSck@_L<)Nj*=1(c zI<9R$WLlqLmzMu2XuW$G_QO{8Q*T6mUH&`u7JA}o?*D7Pbt22G@@#;4x5rwgMx3>r zxIK-j`I8c@g$I+YrF|oMPKA1JvFse!t`~|Z!0tGMQ51-fFsEW-TnKgH~=)+lapB4Sjthp;hM`g|ZQ1p?k zxu=aPuZ>FaCrrXW@9ZTS1DyeeM}|7s9*pE*`wS>JWKsj@W@sjqHk(8aVd*Zl@4<;s z`cM1d3{HklgVN6jXG0l7+7D+i{V!}knDHPRVKC#v*P zFSG}aV=K&rUdrzUP}vHr4QD?cVGHN^o?O@ScR}@hKPb3gHi!CFeW|`vA35*o#{YWH zgS_Vn{B>b+X<-kXrtjb8I|uWi4WaizuY^8ogy(@HUtfgHFpK?3ZZpF3v$#G2|G_Nw z+m0g*0DqEF?ADmE!V%k8~VH%eih#TdLlFi3NC&% z0ooIatowQfG#eU%HiM3W=0fj+z5tF}5oaBlM}E{5@z#-P%s+HNK8}NShxUV>0+$wJ zH!L^9Yh*XfW(*B7oc&H6JU52llTxkM=U_LyVTKpVZdlB<3EVRino`eN+WHc!GzPn2 zGrxS9GG!NSAGOO6cZrILw8P_&I*F#rB--bR2eFu6URQAJdP}vWi zq2$1D>+w`@D zU{5@N%&2T@T1OaH2V7b4(SMoNdyG$wtEKdFM&`h*k~A}{F}C(;_P6C+W9_rQkc*o0 zB?dOS>kshwR{T5{-Yva8)_Mh8I4d*WiiN-D!prvSiPoCQN!Gl+;(PcVzFrcOX02eK z6R*6oFIpn!rrd5?4&QEtFU7Zv;o$+(a(HnAyq3l_6oA#?x{nmUYdDpn~RkwbhevMDft>3{?)2ig$z8+hnna^5bxVA?BIP3N7 z$ljpyO}?Ws(FMwQn;(PCF^GL#=$pUhGCn`yT7UlA2^|F0I2sJq*c%SjI2fE}okk`u zZk=w`?B@yK2<;tOm(8(dJicZ2fZ5^h1tS~dCD|B9u`#^92lq$oi^K8>(f1+xU2XC% zvc>7^xJVywg{ps#L7jfAwSVbLw|{N=cQNr1-uIGZ(=@|3;o2Aru`xaa&*)q4)%eyr z_gj0QvM&~9`o7f$HSLQD;FmGyu`9NKzgFhcCLAX@WJkP&9g!_N!fg}T5nh`_+Y&Y9 zX>|RW1P^=T+j~zmzxDX9h|U=jsNIh6p55)*5jmG6bnrD|-xaWd?1*8E_Xw`6-Hs4` zIiDR-fF1HV_1gt5^U()6wD)z`0b6}$crCe`ufq;_5B$9jp1)4}U4j|5!}sgqdtc^& ztP=WpaTzuq_4H-lpCupM(#8SHC;n$ndUxx09)p&5wYR9+!n8p4=_v<_<^xVDJ@OP54l=TQN_hD~8?2g5Q{Ma4=YYA%+mg7fU zIylZ+gCB9@1?-LHwb&bG4EBaorqs%E@TvR?!v6!Pw`X_wX+JZwe^xQ|wAaAX)YGKi z#nf|gmw@Fx|3hq$-LgU4as;Ew(VMc!4)IZj?5Hv-Yo;(f<Vnis(55D2wk-lAfG-x`8VSyA8lfAgN^ zK4+iIig@xpQ!i)U;=!(#CwCOjue?taB6U6fE_tc1NykoX&Uz%SS2Sfkl4uk5s};S1 z9L%DZo8k7N=+CR5ny1)*5`FYwoOK1ZOL8-#kLD^g&)gB4#hDMoei?;bGM&S-Q}Vec zJ7o-Zism40#oiegh^Z_=mgVBBJNO0hjuV4cF5gq1-*66_vc#=UDw&FMCVWWd%hI{cUGe7|H?IC zQ>#+~%K}oY+347kE*aLG?0Sw~nr}3)Uh99U^$ogy|4DTE65a`Ba4?;8^m=}8h2GfE zT0Odvb?_+XySY{g-4ERd6<(7bKMa)~Z=FT`pxM}(*vfUzX@CP4W;e0aE;ca>x#)+3 z<7flgS9VD*m`(ObE`1^UA(uWCrg7SgIy-Gfn|N*Zw(OdIw3Y0dF;Lkx&qIZe7DHv% ztb@v~p>3SDrR|-zrR@*u`OZ*1e;-uOKMK|J)1Z2O0aVYw0af34eLx?5^96Gh4}rmw zxPCW%k_H_Gy#)FU6uVJ&*5F3a*P!%^@E7r3)=O7`#qI*j>E01g-5Y}H-q}#yyV?lP z{~122$AXr{eVXU7xUXUul=~_sLb*@#J>}P!*1KJU)+X*dlt<2I?mN^I%6*3(gmT}Z zCya2<>zVIjtlgcFZ|%Ry2zQ}Q`@2CISNjJV;r-*x@Emx4e?KU4Nc-o_G4Qoz`>m4J z>=QpD($CHuMl&OV_lL(?-_-x5HM>)sWnB;cSi&hOUMlgU*DWfIa~|1sw=IP20P+O(DM4 z{wWC^3O|f*{|Drd_-8yaTymvlI_tx(#}~nvm49`SVZggF%xxrCi@F=(((6;uNvRIc z9YGdlH$@h0hli5Qi2UJs$);`DT;Bk1hzG&9_`O{UE66y_@dGX6XP2h^P3sd zY5cd(2uns4!3%%UxP~`M+nZL2&bJ!jjsJ!%q4RY{xO;2%lI$C_in|DR_^emSIV&5% z(*BX^r)<8n0lGWJnsb?1WN3r!BFCOI}a17WL&faSZdD;mI5}#}cC8pxn*Iof7jM+Bj_~ld0@HJrM4fx@203VNH z3~AghW?lRe_W4_sYlN3(;zzqN-fBXQ;DRWBzOZ#8{P}w|2mdXfe)2g!{b+x_eEGuK z!XeQ~EjXleEHs#m#YpLh= z_1(*lt#_zD73*fxF8X~$p9-_!S{^9vvv94)z&I=koO6zI&DQ+TaAhM-o5oDXV!gB-@Aid z4F|h=aW@N0`a=8o3BNXt!mrJv@GJYeJNPv{f?q3=p=UZz6Mj9%|DN2{(j1)dXFl=k z9)6v2v(e`c&2uz0!(Ek^qlp>rCVGV#zDx9SGu%hCzI^XcJk9oud^iA@Dpmhjcki_>7A@`Fg&mnX| z66HID9?=@BHu>1V|Cw*K6!t)uw9JB{OV+XGYW+c;M^}8Z1V1Wz=pAs!;REP8_R%_d zW4^WL`p%Yo^S`(m7(y7k@S&jfB=-vcKFPg`HF}bJD?(82Rcz5J@b5y#T`Bho|CVx}@NX&i z9U2DZzC#m@@YopqaW@97M_v3o$i=^dgnu7o91g4n|FZ6R0r+>-NHEG1j048q!Ew-L z(EFgHW39#Qz`mOEpUH2N>ocHTpktujp?5?p!1;bK||2@p~IjbLc2jfg&u|O zh8}?KhkgP*2;B-j41EoH3_2Tn0y+_T3i=@QG&y9!BjGlz(+88hn3~Yxl^yZz|AY*4 z@E5wllSxVG*zU-kR{zGHb=6q z4PA`v@FRTU@FU|e=c))s&P_5Se3S+~%Dy-w@uj;M*}=_~jH7#8-q=iigr(u%qsYC- zz{tPX_=kU|q` zlV=q&{=NWj$@V%*I}f6K>Sx;FLKs=GJ8i+mwc+G}Q5g9nFo@bR2i@%SQGBF-YU>Z( zC(N7xW^Q>MbF*OP&pWZ-ji2?$N#u&cU-~G%)N0JkzU;3?Vdmn5IBV6Qc&8l{E7KO7 zTpMm47=@XCyFP-Mg>zIsuUsli!MccCZs8|K7#O*dV>stiI2Lh?s0BBtxwtupxhLP{ zMu&G9qj~k&qs8@Ow>Wdwk3Ec7nATuu#lxKAn`Hh%m|i~3$>8Rhnu7;-3)@utu$hm# zK9$@pU|@~+Yq0m5`;tRi%Y1+_b_2Llsi<468cVQl%Y&+%JRH_vCSdw&S#ZYaFTJJ99M_o98Vx_*S~=fmELj}i9HAuc8_ zIkL7?c(}!xvvD!!^mQ%R`&#h2gJY?)EO?kJnY#DQ;b(ak*k-gv<6pi2Mn8JfYI5-T$i?R;mvw+#*5A&(r&xITnOm{HbPF(g68_{z@GYkj z`;r`Ac;~RO$uoxUe{8!A57vt-yz{fbm}O7<10k*LeemFz?YBWk!lMso)|>GeGEMsr z*2fN;PCh6n1_+ESU$1a_3&qH@pPqci$>c!3ls-Sn-u7RCx#fG8KX~O;%)hZ$wAO`c zO^OYco=FbRAafNT4t?-c+0bL1KJurBa>y0iFVlE6nh6!w9eG6=M}=7v8+QJHB4>!oJcg)tZ%(U@w2x z2+tw!@vF?;zuY**UNV?_(9cV|r`gL#7vB6@uO_UAVH}aGx{RFEYspFdp+7zp!*}DF z=sY8o(ap3kxlQNkp$2!G_GSHa9v8}daGbN}+J}_0@uQ}lHBQ&-g%X;NvyyLaZ{ye^Y1Inmxh!0LtCa=$&?^4d1->vzb9qgeX-LR6p>)D6no%#iN zm)@gy6jYw=KkwOJ&}NP=CkYvwZghaJ9bZl*@ehf#rQ$Qm)3m*({5XvJp^W)kkxN_P z3*~51t`p^HkBnpg)E4Na9{6hbf7>qUCdRM!?<=AFcMKVMsw+CF%-GebEBdIM{m2!k zxtQ`TB?nP4Yo`|HGB(-g>t%8e<-z~*>6q+KaEC7=bXA&YYN*+t$3c@r*RT%DtA{sc zwAM=HmL5^r3TfLE>eLgNrge{cr%jvao%gdBtoOY3_tIQZ3*sWRE?4&r;6Be+=9I;K zPS4DR=jHEnd?{Z=d?`I^_)ZI=~=Z|!T-luXlamy*chv(NQpqzT9*eJYtu1PsLB$vxwe@V!d%kn!U5w|FR zi_?GEk+{Wt@?v^EIq6H!Z=x7QeNStYdVz1`_tCz*26KM3tcJPZ#t?Qi(~O(QdWI?Jpo!FRAG+tumVCbN;@jxB<1GFNhZu>Qk`fNe6*Ot5{# z{K>DdtQRtUiT-=U@h_~rC&gZkZ1-i3cKi&l3{JCOlYGaP@@0;7d=4{_6Gu*-QH~9J z_9a1Eb^S*p(>{=|^SIFQ9;W@pojR}Ql>g-YI?srd`9F1@9;$lGw9j0n^R!S!OVh6W zH=QSkj{MZLkNSf)bDj3=44*wW({Il+1NPGHG4}G?W9^mq#M!I+$J?(APOx7anP~sl z=p_4%>}1=@O|chTpK9ATrrC>)bo+V6ociq-@VMef)pz%}a{k}o^)-}DWnD$t%VIOy zt4@slKE?FiNAz1c{kE5W+DD(oAoCj`^JCeAAdbE$b7Ku&p}or#>p*+YqP@%T;S|%} zb7=2ZY40+8H^sEK-YuO_j9#3DUR;k(cnh7Nb>9E%9ke}NDBeAvZd>WE&)U@b$eWu<;vNt+I<6Uxj z#CdcEn9I=_vS)(e`rFxOrylk8({HyU{{!@041ISyGBlR+IQT6Ne)H_JrC@$99%nW> z#o?7)ctv`}v1i~F>>0^)*)u_MkLw!&^5W?Iv5a@UJ09MxIaW0f@I&=Wp{rli-X`^n zZtwHyner16J(G-1l@3TGKeyuPJv|d`JIt*`&v<@&jV0L*vKtoph-Hhi9Tp|HZI^P^ zcE~}OT!sEoTWG&YPyeVM(g&)C?62s0=z3jxh<py>KLc?6TqQQf3rjGe{@J#TMRq0GnY1I_Y+J;9P6J+M7c-anV zp)vdxZY(C>L<#%G&0^oUHI0q%@@7VO_OpI_E_tD3N4(5_$n)3_`IWx>H{1v>=@ReQ z6KjfNrd|?2R=&h_e1^;Wa{urvtmUU8ShSSAV#<(F-?k;{McEP=QMN=nwuFf-5wR;8 zKROOuLiR%%wuFf-q3bE3l-0kt)7}_Yjq5yI_+4!Y<)o|2miP&>?jzbyc8qL^?6`<6 zk%PR7?t_oBh%Nq;Y5yY)8AaR2)6QBidOhu4vu$5(TzA{P*7t1NzJWY$3Ey~a_hZ^1 zWtH*mwO6dZu?HCuT^EffuP!yqUUOVHWz_R-|9WNgbcfd`(e}!2bda8tY>NKwPQF|7 zd#hr8@2Knu@$W*) zVP{f~!Y4ZEUhIVP+COEke%hn^cxQLs>CMUNee-#r-ZRvFkJ?`PrrI8I_0*4I58>ak zZzNulZ#LnZPhor1We@dt_0f3B)sp9&G7}@M=Tz2zrA@Tyf5;{pmh-Q%iP-$lv57)y z7iSY)%YCwm%3Ygi7P@CPx@Qi$XEi*#2Duo^x(U`&+ZpJeHRzUl?DusEa}WboJD$=%@c+Kd^gw$E(Qt``8mqy6QFTiLvbG1@@?# z#h9Fpj+%pxl5BA1Rl&k9zvQ=K7d+AZfURsUL zI_1ZndP)EP*0GshMQ`~s$2j)VtDoxnSv#r6|8hI&PsokB?4+L|Kcejn6i@pa+{W zPMVQZRDQav87FPiq#u)|7pv_v@vGJnIP*egL-U*3=XGZ39wvz?_k3yI$-W)0cwi`1Ql=05mFj`Xqn4e8HldtH5a zAssrvjnDegY(4a$)*q~mvi0m3wKeNjTw8AwxTG#yPnbY@RG2`vp7d+9y;sV+&ZlR8 zY@1Iw;QQKqXF6+NlmPk9V#se8OP<3x@*T#Li!>p!FG@V|^p_jqWbzCuC#e&^RX-BH zY=%r2N?&egE#L?D1g)J$M(ET7ru8*>xil?k6x9v}QL#7}Gu^Y-J@Olxs7az(^Y_7%joQg+>Qh_O}1 zoWKBlI(wl{Ks~vsxq8L7%17VUKWf%F{=2^n)wMn z=iBn3RO&m75&80qnS0fmcU^1Ap1>ZzseZ~C+7mxbb)EwJ)z16*PxH;^&cQO~@byo1 zef>RMA4}qS@)Jb+_)ColA0HBbxp@h^*vP6e-QkmN@Jmz-f79)tG5?OFL+A!*hcXcd?-99yR0^zIsyh2PxSKgY zcq&7A?BS_h@YJ>3H`B<7#7UrMn}aRCU|eW#tmgPrPV_M?#Yi;AkMLDr(>lf6RCIjM z^7jmjyrL1AiQZaCe2KT#Z;hk3B5VDcFfOIHn)fD8&o6>jd%h!{{pjAH^;5oc-zf5i z4GUV=@ttyV%dG1d)Y@M7%m*L(;lluY7y}=Y7wR5jy@_vrhcTl%ir1X@PUMH;JyqYk z9N%|B2ifj7@Xl-ahU(T|@e!1z1m4S`zS=iHb=^X2+dyQA=P$Kb+c!I^UM05&EIlWF z%yD^f9sD?mbI%{D_?ZLnr1&udel*D|sAr`2yl-vcdjsIP=IqkYWY z<9JxSiJi9u-uxqVJr*#-$KoTr*)z)Dd5HRZyczB9RF0Fm$WhI$;gj1oPIHg&^f|E- z=)WoO%i|Z&fwisG^I|0`_5CQG)ts%aiD$KDPkgC1kd8d;@@=%9T!5aeWFNQ-@$WI0 ze-}jYuje1FZLQw<{G*aXHDf3)lt=$!b>sqgu$q_Ak>X|P$O>`}57b&c_+30*3O^r$ z*RR7jDm{5U*Us{EBrZmJ^4cGwC*MQg;eXkI-q{JSeTWXqq;E98yqx*v6@6nY%`c0m z)@Xj2aj5y_i_(+1rq!yZp3HUiWa}SKPmYJ@HWFv6GFy~cWt4p0$oGd42e=0r_2*xj ztOYTwl6!nsDQhw$hdli#p41wUntC$Gd-YDI&gA;=^kOBvC%q}XxB$KRlB*XVcJ<Y}ntCrIO79h4U>w@*F%0=1>ffbeCen9UBk4;^0wmx`#(SEZP>orH4l&u~8WgK5IU7#3^C7 z{{CH^Zty3CWCM-D##@BkoX2`G#YSnpn_{D!^=_{2%Emt?thYBsG$piu`Z#D(=(EOW zW1BpmlVYP(uCByQs9cJTlFe}q&+jHSO5;;wb^|)W`>w9pT+?^f@(s@(YfDaUZ@lk5 z8Nq%*?$dJ)|KJVa~RFRE;k_G2H=yHeD&54YaVkR zw&)^ves)o>NN!NcF3rzMcAX|a%On4cj1pMd%+SHJ0EMhxL^QfjgC(ps9rtdW_&ntUfJGJNcl)ZlXj+nJ@Jn~KZo9<^!#4|qs;jYoC z&scjPDyB_yChFhX=1k_b*7eAoiDa03Pm*CjME1&$E7@z{L()4-T-ocD+neX{WM^&V z({mcnDob6mxW6lleNpePN4rG7S8)`2*CG75o@|z#`lHKc#`X`HBPqN94z6q5|If^g zM8^M_b(;H##djNr4>ulPZUR2tMCLw{tUO|(_f9mdWNcFHWpA-B+Aa7PTe3c;)gEJN zWpjMD<GZYF6+PF0cx~+$lX_{WlGuPp_1|U2(v@qUFM1wquD#?JS1`)~DMnz1x<-HSyp;zQToj15y8hANrscufB#8B05^D=I3Y zPTH%nQEH=*?4q&y&u<*RclJv~FVSX=QyYhF;@XULGm1_J;!A&zP1lp-Dh?mVKfo`{ z^E!Dp3ZPrSW+T`aWK4Rx^B$ewt8>0L;{BOL9d%B*be^em%CGa2X`E9}ov+k6?VKM+n3lI@~c2v=}gvXrI3?ucOawmAAx$Me}=CP z;#;0XZ0`a5>2Jp;bof8?pZY8r|M^q&m98IVd>mx&J&lnN?Kq5m`9kbn_RMfI(4>$*T?qfC>jPwUi_P%`x$ z?3R5}$)uvm{NIA>svmf9YHP}y%0b+-)4wfJ)15MVd%(QYZv1@tst?f*@>gr`FsDqk z*?iakI}AVCB)1ISp3H}6ca^0F-)}-2v?zb7Nb5LS@h#b*!)X(hWAg;#l(&w9xVSzB z>olsy^nl-Qf$zJ+|J{%SU75rAIeWXgb2t|%AJp%X5&u*2!IKH2;H&A#3hntOS>VYB z$pp!Z=a3VgEb!!mWP;?y^T-KL7I^YOGC}g<1^V5S1)hA6Opv^Ik@4-x0#80jCP-cs z(N~@<@Z^JJg5G+kIUV^p4c$M|S3i`8zB^j}Y|*ofjSP4_BQ+y5QP0hdrKs*jMN}`u9LFIDu=D3pa3W+S+MFCy=ebM#p}IpW|}qDP&|v^zCEF z$WdvHoac0YfPI(s?#Iv@8M@B*ALjfhd%k~gk74cXAJHMx@ueImchOAVAswn|6HF)9lgHx`hOnRr!xjTJ>d1f`rR7?!i{zIKRi&~|D0F%|N5x@=luKhzxrP7 z(er;m9@O6dXJtW;A0P{A>;JFr{x9%@4|~CHRXxG}j^Lz9Y_}5uUuhoaA z+8|qw`Hax*DH);LQqn`)IDeRZBp+fg*j>JiO5MA+?Hyk~L3xh`5=xi&uzT_UJnK-y z-ZCEfRL?jbq`aT=KbWgB8ykBo^8-tUj5zh|kT$2bP?jb}TBYjraYQEczL+u_r_L?2 z?C$SU=5Zt%X!K5NK`)A_H%TZ&+X?6 zm-)N*v#+wQepdOte%4%scz6M}o%(hd)ag?<=B7!CalGRBnT_7yzeAVLY}CS+l$1oj zg-m1WLgvP7`rX)Xj1WI!drTdMzL%}v#OJFt;M*ZSqn-BJJW4Fg?(uo{QRN1Jmr59C zN4X9*sx+~C{5&uJf0~iLdGXlv8PC9{XME&BB954`>D;dv9LWX6&Pgr|z&6qv_EL^y zu4#YJTSpH&HHGJ}tENsyHZ(Dk9iCL#w4Tr_+jzbyd+5Y%5x+=X^+dP#?nzyT^NlIc zMbvpQ=g0V#=Fk$ndQ&f(y0^uLmvNO_f8^0E0i#_U@@Io#gu11~hq|W3h3X@Jrlp{# zSi3yaA78ol`n$dkP?yhWj|0r%d_h~?|Ma+OUfw%7&sMHc?IZsd<35MB)4ggt$sx5} z9QB_!HqP0{{;J0waQJ$!VN|^(oMfca4slMOe?S}7)Zyy?%EpoYUjUU14H2W4px9Sp zcv?YAPmSBIHtpq$4NXRN*tEkU=0(*Oig|U`H?rRMEzSdxHaT8E{H(_O0`TDh+Uo@M z`hqt5lK9ZCsLM&pHJWuFzhxcBW2_Guz+6lY@@Nm`9>DyLbMEJy7zCY{>l~k`&cQrY z_);SG#Bk196X*Uo&RJ*T+^=(D7M%Mz&++b0;M}`EiF5CMoqP9l9`x=fK04^#pT@a& zzs|k;Id}5Gf)5WEsUhLRcaZI><0I7Z*VOSh)G^1G(BW#@rw=ho&vAVf$MfiDwTs%Q z9Di$F?J(YJAL={aYai-W;$5d+CEj()U+!I}{N>(tm6Q0O=sd9ph#z!vN){?FX#6?f zkN#%#ccR~|y}HX1iSwneE8%&~`6Mw`=EuZ_k{K(GoM5ahVr(3wj*1)79C`+NyB_=< zhpuYQ+JPKm=G0d{AN@d#p3{eng&g)@(0LAhp2OY?&OMy(G2HW5&SN6y<+RP7gvfag zZA1J<4x6A`LZxQ5M90~PeLwnduIQseMXR`4btKCY`sgwBTo|uFVb8sIqVwBr8}D~M z{ST(vHi7^1XrIZ*uR*?qkjgdSn{=m4HQ%ki%BPJcWN^>d@f{>zYQqkSL-sJD@S^0n zWH>%^@FH?F3NQYKx_EeT_Jj-L2xl$4umrMtC}VpV&wKi)2V-m$bKp(DhuO${$u{A` z6y#+peSSEQ2%bzToi*jGyqz^A&$byWlU>ZGd-9OAg|x>M#`FC{3mo}+|Ilkslfwnv zm*U9Q{pYE>bZatg;MKd>t#?ff6kY@L<03xe7EXb z$vi@BSYjOiMPms^zcPlMTz56FgkoKVB?_6Z`#xA=1Xx0tzb-7HG3aw+QiLU1<}hyo zuJy(bI&WJ({G&4T*IYE?pa=T*Fk@DB$mi&^XYeU3;dmC@vG@~3CeMV&xWBhoaZD}*VL5vUnefoSK<4)&l_wk&^e);=9{gE-V z%@-Gn;Ef~)Zzy(X0=C0kjzNFUI~RX|YYreE=JLK0zA+O#QA|FtL1XL9*q<5`dK|fP zHTiY|Lw|L8AoAx4}<8|Gax5KmNbLd;e!z*XSYTh}TBg}g{24be>26(TpoAb`>E3Mvp z0+*Fu#k+h_?~T0ca`ND0M*IW3PvfL&OjqjmbL!ZIx^_nY<+DF>uz)sj)}35TPO04e zzxz|d?~EBwfw^-J{@yu%Qs;-qIp>o&m)=lbsLkXAOkvLRTJn1+4nS+Weg(F5ur56N za6%vyN6wG7jQ6?x_W-ziB6)l2?|*E&{G12K=Z9W6KIJmxAn$8MoYt4j11itd3EHL* z-@@bM3VI&Bq`hIh{0Jxc?X7Rs`H5ec_E*0Q+B#?bmi_fZk@M5PG422R`^fphYfSsl zb&>PK?M?ghp9O8>)b59JIEv$Lv`y#$=e(m`2rY5We`=RO_c-S_*~Zu0&ruxL$u^+_ zob#LQLTHI|{xiD_y2m-svyE@KpQAX=uubRy=iFx(LQ9-;zg-61IGH1nX%N1_E5W-vD6Ey zb~9tC7gX(L#!@e++RcoOw|n?v$ibGNa&#);Sj(}GBPPDQ)72bZIUeNj%8?yaj_jy% zWJi@Fn{u@9`6Fu$8FxA4{VVjdew$-0$3Bjj0J52*E60NzhB+WBhXeb(z=RHP&V2=i z&=Tj|Ur+|!=7}s7s%VHUKw5i&=ES7NxReP7kGVY*i@3L6N z9qp+0E>pWe)!t=l7pU61Ozi?yduy(@jJDZ=U7`8j*7&@pfb*YbzvXmd(MDXKK4TAa z+{&YID`RI7W!}o6dIbH9l~~3~Dr4nR#>$o7eQfklpD5oea$!8NwY;xW3CCKFeH>oh zYxZ#q+6`>pz7V^jgkvqoJ`RJv$Z`7P+&)VfZ%pk^ytw)W+!ECwELtv5=#-HT08H4{bC1@+}s`u^wj2o|R z{VB&V`Dv(!#)RhEbNm?{O8gO<%LK!yoeiJi+FaeyIo;4XUC}u|XP%t>7i!6AXvUmd z*OIFA_FQxNINMivbFeDD!(sU6%o8!h-lXuZRNk4!yVKEY8R+bK)?Q*|nt*{8w3W_| zDSbHG*ajxrCVS=J6Nc53yigg~do#Wsw|!`!0qbHCN)Iy69OFyeJn3|BTH#l}-#%-! zwdOADlP5nPyS<`SqV*j5Nq(<`$pLGAf-jT+mVEGw-)-M>d8~EfXq>gXevI|yZ3)(a zrhY4(K71S9e}p3qUEPLbE9>#z&GuQX!2-z~hy3i(!|@hv-Zsr(Zx3|brAD8oX}(a$ zG=J#Ez@?!Mf%s7SK>g56t*ONhICca3hK?SvMmgtm$vNljLy&ewNc(7={wCkbO=}R+ zo><4p#eWH9*i7E;^}Pbt7v0E{7-)o#Exq*1&dk}R#fNT8ZxB-KN{93-LIX`>>fecB z(SFZ*cl-2&&@N(GreG(09cWP6eXKc6dk1tM`@3lavJ1mQj7vi~IfdlU&W zuVLkGe!tg+m>eTuwWNLVtA`sv4>s>%CD4yKLvl}jPJ8RS%{YHk=Y01D&ab1bHr;Mm zTN>~#y&FC0y~}%E@6$WH=e=hw#^PqyvaIhFd3HDDuK6s$FWk<3_Bv6%do{F(XT3Uj zeWkLi+$yWesN6}LZue6*_6p)U_7(f1vM)BrH{H5fCpws6_3u->%e!v^=fmiup+lXsB))wz@6q`|`fR8HKI8vAm;c;4b^|D)eiv&maUBPU!p6aFM^jo{eSGe3wTu3xi`M{Oae2x%^IF#vqbIPzCW7Z!<0)vNW}>n7SdZm=a0+U%Hh`s2 zJpCUZxkD~Q2_R$;&HwkV*?Z5PJ+qSuNg$*v&$FIAS^Kv3yWZP+*ZQsZY1bcaB)bOr zA`HdH2@L&UX-q@>d@tyFC&RZsKCy*lndGGzygbXniA_TE?RtN5In~9{*X(ZNM$%<6BRI_qJ|Z_lHj; z@Sc>|$PmjSKA^b5f8yRU6lxFlqsZTv#(Z9u*titGscr9|Z>fEl@AA`J&j;X1^;$ZA z56&G%+jrv}-TT2qfqUqB!XeZDv|szj36k$s)K#KvSN?w=$FXSdO5oWEBQzFs@qd9m zy8-Q4ghFlIiuen)&xi6Mp8p2!x8m9p$m#nm#?LG1d64h%S3x(Rk8E}}f%=E~<$BT~ zx;~*bY1AjLp-+m?CyD5fBs^<3zQGHuH_0Mmf_~6}KeHLK{t)5;)CXh-3eX2+3wC0@ z<1lDHZr2U+c(m&fXzyC(YLP5he+D`ifzInBd!q|>&x@Fc3v{M$pmz|>|BUygS`r!$ z;5WUm0^e{Lw7|FWYnCeY{m>5D&kNW4=$ikfYjFKPaNLa}uJi9xe$!r)V?aB?PGw06 zjq5?T6+|;Uvkcc+@SXJDz!>Sz0 zd>F;SUW2?5oeOYGi*aSLt!XLgDD=Iw=LGa~{$2RqF_I+D09Ge^MM)-|&!Ou;x3sjh zl(gKmlo^O8(YijOF+IPOEG`YVEh1_Q5@kd3cH`j>fp#hV{%g z@JrQG!&*ikD!n@uc-*N~uJg;lM+2mm|FmKJ{wa8tV( z^vywx_YWZkK)h3k{p*Q$GU-U)jP>uw@hBF+g~ zAJNf=WZ`%k<`mGExMuePTn}38j&-_DCBnWCEs1_aAC;!6{Lni&U&3#_9sL$9?PtNh zsB%vDnD*Y$-RJo?U_3^$RR|eq?|tewAAs&Aop->RT3v_VH{$o%5p2`KzRXGd+d4w> zog-QBGT#QPwA;m&-+d5#Oz|r#%2Dw1G4M0Rt^Ts^vwx%YB_soJD6hepy7DfyTikTlC)1xL5ss#`E8>p;*s01$yH>=Wg21st$Io9&!cwELY{T z`l*zbG2Dj6X*Sdj8%nnRWqjLT?q?@S2hD~JErAW)hPkf;kjH~~=OOsDp9@Oz3?>llQ4@w6P!kib-6vte8^54<%RN1797F$qOE!-f~-OiWM7#_x2D`__Su^Wbal zCppDY!Zb zUvUiSe3~yO|6FYcjUneh_I`wWpGoMddTIlWA*ro2hBSMwv8_bwCeYPLYZvMQbWTfg z5uH(@rZZ?Bna<-CI=}f(M`xz|wPLKGj-yl2XRm?J8{l6bhu@<22gY*yAK%yr-rvsH z?`svCUqCiL7UO1&YxOq&FnskR(5=*;g|OdVJUbhFM}3BvXg=93;>%dX@Bc`$0UjYA z!dTKUmoPVG*<`XOr0Zju>r@kDDowFFa~b<;0gI98{5+gbQ*2`3*@5^@A;lmJ*LU=P zZuc;Dp>bOiXkffweLu|yHIy)4BW&GYq@b@Z%_c= z;5GQNDlbz!s5IGiek=M@<>yBfA9h)P_z$fy8Fvk8d8ZLR#|ii-k@!34+dlBC!`26W zbp+)o$}yDVDAY$a@P~>2=fS5X{!hm-$p2G0|8IppH}XFHKd#J``xr_(;w=kMZo=;; zbpNwm6W!^%Cb=yrBk=oXoO@Dte(J7oy6@k0oBIxw2k?6W&K099-8I?$@~+$6%Ta!f z-|F+p*PV+(aiF;<@9+AS`{=GL_jZ)y_|5qo@eA@<)8WVFz<*u9ec5z(_TFE^mra8o zn^u&*H=UQ*1lOq==!S8SzsD3ABzf(3T}%IHAwRe$iN-~EDD!jYa$_H9xG877&J z<{6jr8kH8L6IMgUqItLajgZMLnCsqh@TsYpv`07j#^?y+J1bABFTkXC*&hw<9eHXEQM^mjDq>D zO?8ZI!d%#J^gsBL+exwk(dhqp_$pE8|GyGX3H^Vu`u}C<{}CP6|93`oC-aBOxEr$fhbE7p4C(4;%;H z*2k^M=ShWMSqoo-#3#kK@aGSr96~Yk^_ws^ z>?NO{j(FG0@O@)11bloKeye_qdPH1%@=a{p@opTS$8jx=ALF3HO4J7Q0J7 zdeXh|qb2U`9~HZge^jEyzo~DQ@i_K8`27nIyS^WB><6Ip=i~Q}aJ(1Cf#VC-L@e9G&wTxI;9qvKw`T`q0 zPm5jCdYFEX-nf={R0d|$JT~1v?voKE zkJ@pq+xf}JqJE!TTQqLREAH_-e(k=lYF+M+QMfE%j7?+KMDWCL#E%!C#C$URhLJl) zB*lI*;)ZK?ypl8$ape0^xNPBiHI^J4pDD35w9EV%%khlTgzi4i+o)Sp(cb%v-$eM= zW9O%S>lMdMw>STGX4aOI&;Iw?*4AOqqhL?CVaT~X?0@2JNol!it@j@|o{zgZWzyJh zV*b*~V{oMFX#v&|!!p%pysSG%V^jL$@joTLK{C7_?~-r~ z&dFr+xgiFAP<;aZaW3Y)Uq)Hr!g?!|d5F!ujIy4sn>-IC9nU+6K9vv`FGLJ+E^O(` zDC-dy$2?UaHjZ^xbX_{yHwJtee0N}8ispmN?(3?0-cH?oklAz9cB(W^MY~@E{Ybv* zum=apR6UM1$sRWyzq7#?3vs0NLx09zP-U1?C_%hx0oF`Yzfzyjda>y-?B<2ogKGuO zvx<^T$u>OYY1@dDbjV66elz@LqerGl%d#@@Y<@4v3avF)?;C^rXX5^x)g_rZwzQN5 zw)B*oi6c@h+fp}A!TLq54zV{2Vi)ur!n0C4=HXcs=VLK3N$Ds#DAZP3*RcSyqx{ z{cMxdfivab3}1*LPQvFBlo)g(`k4C$;4?4L1xHO6fp+QFV~s2PxCQXx*xq?43%K7$ z@PY%w^|B6%d!@pyfdco6mXztkkjrg_IGW0z~(#uu-$h7Scok0 zi=dZD9#`Y2%0MI5yO6Bwbp*}f5{biAH)Bp!$uaBf=9RxPP5*SgLfrhK6sskb;qDl z=8FBf9+b3Y%ls~s-|otEf4=Jm_g_(*A7#3KyJB4~;VFq16F`5OOC9<6{)A~L&!olN zFb#9Q_wTqaY2)MT66iYWE7C_;1Js-jUp*Te}Vzk~EgpiS=@+O#~OtLo|7Xg)cV z2I22B(jXiAkkpNGflso*CpqAgDd3Z-;FD?KlRLmCz&JfH9ei>Z_=NKn;@N?D^NZy( zifePdk0bFJ(cg^E$Q}gwjOP3v_*H{4##H%aE@X8kc!YQ*8$6PZ^5D))_lliwxEJlb z!Mzf{Ieow*w`ALkGX8!`QpP{V6@Mo?u4o2+&%p2Du`xwCvEV)O7l?;G+@E;uvi*s7 zfRCIhmKBF}T0X~cT9ferO9(?ciHs5=>uH$rs+=SDq?e<1RC*%dW)VxT#j(8FHo2YbN zdIxo0K;I2dnE-wFDD)layamv40lhaoW%(u4ciHGS*G1^Nj(kH;)^+nPgRaX|bX}9e zU+w9-Ab&;H5$3w?2I#ssI;`tx+z`|yp}Nkf=g7vkrRRvhrbVIWXeCPJ_2eYi{Mzg?_8 zBR@3SZxH${bc~Rw=(C*$eHIuW1lJyt-xlO8q059WySOoFp~7!uN81{ck}i9>6TX?M z&uCn#>a?dYF1=j78sP?~cG6et=r{FbU3c&@*wLK|@2ysNPxaj*>AGx<_d2TU#2D2~ zHyX#<(O%b6$EdCES+Osht?&Y3Zs#9}19meW7|sOXZxVqsOTyl4){4MhV124DAeMGy z1oI#Ig~g}t%SQXPodYIk6ZSo*Yh8Z#Xsb=O?fltui1Yc6A>UFfbYL1{AuXeT?RE^7 zZC_D;RFuGSt7E<3C^>3{1};j^93PO|AwW`)=A_tp(3Gjc2XHUMYkjKY+2p zLF}tmmDKjL4q1m)9|m?-ea;u27~eYVT`_YA;)9j}wROm&66Y`3Q~lJCmO;SF)&Wm+ z_EzRUhg?t;^9^a=wj6)`2<#_@MvW7uY9m z2-bWLsJ>45mNO%!`E1z7;T-k=NtFgSHUOKxA_x0bk7a(lW$E27{>O?JTP(KP)G4fa zz*{R`yc&C|q{5#+hqdEh-p2eh5sL|F*Pn9y4Ig7~x#EG9vysQ+!v9Y3{fjlwwlAq& z4>MnYuEuutN4o|hf6^eWZ6VL4@4d~1y;1Ce@5FP?ett)r&!TTU4w_S+ox2fxOQ63R zk=KuKk%Xfp%<>%U<&*pQlY1IL`=%SQuKSkw%BI1{&xgD~&jQc#FLc}fN}{ssAoN5mz`%PeuVS+qw1X!P5szSpwB z?vP)d<;tjzWxxLnyhV8Gukq}opye_2^+~LMJ%s)3?wymJQTYQ`#!LCx88=#E<>Rwh z#`*6__?;qu`F~i(h40XB>^q72FaHnycFO0!$1+MWud{%1ePEx-?bsihuDdfK`wukV z^C{Lk&W>?yDq(SrR>Z_d;GXO7jB$A8wRi{7@*MWJ{0#fT(DzY0jzWfxVLuP!|4tk7 ziDEA4)m)ab9(x5d%hKCx?1N;=<;-k@9$|HINdzP%jvt5w4 z5Z+K_6>>oPAs&X@Q=ik^#aD~n&>OR$KXRc*Wzp@UE{M3X&H_Ee!0E@&mDuF zAboQXI>QB>a{w}X4t!RJy`M<$42Yn2f;wlgrYjEO+s@%xXYfp{*XDYMDbG82&ot;B z^XDA`W?!#&=viOPj`dMLHA43cLf=nH_;2W|I`Ghb3-$yZ%<}Eo&?O`@&?U~_ zt$1-X8&sQ$x&io2dL$n6+FTy7pIk%V;{ULZu~)`n@2#15?<&aOYRF#`_O+_9ey`;q zc=EGf+~K1=@83i&n>e%u``T1wU~cSllwHt)v^U;C@Gb2_L;DrS#j*UKu&d=X>`%8G zdTc7>aVC6_`)$MIA7KxmM<8SG#6W-A66EVJ-}jAtDdTiL%Q%O9WW3lnY$jy459m`0 zd=c#@@haMJKjv=U%FoGoXTX$hS1^#~rB^~Fk!*Q*|ZXJkkKf@zWkxA#w&C{2}&t zY8dPC&&Hf&1N8YlIBIqWd~IfTehs>j-J$&>0(yBc*UNQ~2b(mgb|Cs{R#R(!V?mDZ z&&5M4KU8dS25fN#Y|2XHJDCn%Taajz4}%}(zz)@6Ke<>bSso2JuX}*(%0SrbP`h#j zdq%16OM^}@eh=BoQFsRb29swT1}*71YTHS-Wa;!#S|okccd9R37kQvl+|Vtvpks2O zYi2?h&44bts}MRUpo=={`#>KUeIINDRsD{lkLs`w7S~0P!DEn9(nZ|&fsEQ5L)!3t zhD7##@Ql-VR*3KOG3lS+v#JzKU(k;l0=s7LeLjlizEAu$EPsM^Ky3r;?{w&&D)>Ir z?;liYlLpk*!w%nq{+_Ptq~liK9O$IcUzT7`z5%tP?KU|B_B$hSKy5}ka>%9IE6G>7 z2J#Tn2J#)io7YMy&{fpmDM6kE9n5(4Fl@BmSK@l__!QsUpy|EvDQOS3{n)Fn0ejSz z!Dp(2?z2J9$^TLI;w0;!+AR1?H@{W(;-`?A{m|KCp?5;swG+P5{v6+j#e=!8)cA1> z*QsjzzM!_3K!l8tS0(eDJRx6A*6b7}a0Db7{m++6%7ziKJYr^AkrpYpzw zd?{Pw0>r{c;GR41j?YnEMPX=XU+^Hc*@5FRd>j4V2p_PoEml5?Z##hRIF9c)0{R@a zvrQ7-aWIi>`4ir~74QAvNhxC&Y#`}m3HuUSa8K120bhz}LN=){{3d$NX*}m3=;4=Q zx-GOgITd0nIE9&Rt|Jlm-x$vIt_^w0vmW}u>@*nr(o2I}HeU4{U z=yV+UTp>L5GL4U*Bful!dafIMF6bP+&vg)b=m7jM)$gjm8vY9G!w~pW??C_1KHOge z+e-RwE82bld^{R^2dX|-oh2~F3;J3^G+*m$e3u_~MD??<{wd!N-;4I*Gkp$ZHQ;9j zo^x7}!>=Kyezg4z?ym!_$mjX;YUHwo-W!HKB)@Bd-tU5+NPgE)^!+5VYq+NxJ|*|R zVE3?BlmBbzeA=JyCHVAp@a254t>k-=?^zF9#r-f`TleZ6zI9f67UGk=hL|<^Wc6sr7__4?)*%-_)>VJ(GrZ##X^?uOz0U4hG|1OUWmB+)!K_2&7E984D)?rPv4U+EwPu~gNb&)LNIFfv@ zCt1cQ$Q;Qc$szGA$>AEv1MTDb8gP6xo>%~XYytc+8aExqxam&F!fEjFfdQ^9UnD~w zhCxq&htHwz5bFG>JB+$f{5po~7_MWuj%_*4uVc85;W~!v7_KA#EAajUc>g&?M;t)A zMnQ(YP-JKx+DGR}cF>;Peq6T^`d~ZCL0nq`{-b@9bMW3NxaLoI*Q+>w1JB1^G2EX! zg1YBXNBs?d?N-XUb{H6gWAMAl&!cnX+tGdW4D##Fz^{80^mF0c4&nGRjvrH-@C~n6 z*rsJTK8hpJk96VuxPVTZ3*DEBHZ6nhos0Xng5D(OB-=LZt=)pLDD8J=gUwh0+u8uW zCSUJkt6e?|8{q#w*(2K5F4WID41S2{-$Cw(zu@21{t-5?paDMK!#TdPlB+B2R`4op z;7ZFd?%O>MThIMEyi@b*`1`{By2FssBY3CYw+qovXg}F}&9`ggIYf7Q7RjaF#~aH&gq(-#zn<&oh0sXV;gMoo!q4kEKaXN!m%`8c@So4mi-CT-5BiOKy^-`A`c3us zrYh&i=etiiM}A)#^fmc>!=ay5e{UJ;$k!u3?-;_P0aafw8}AW*o*Ks&ex4e8>j{1yjeD>UF8B48f=|2J&m;YCMDg`VKOE8d zd!!%e9Qk~tALyLw_mNLWKHnj@QdRJ=NEvaj%J= z_t(qf=kXX`m-%@dU(4f5uzB6!=LMfnK2oRrytZ>podzD@EXJwlFt0#46x2D)7Wn?O z2lameOMEY4gX`e4&&4=)lLK)w_|1X&N%F%7VyqQ|cy~~*eSzOJ*Hw%4^i}aLe{x^U z?_I?z_8|^61mn1aD@!u@Gk%xjPqSsDOtFnlVOYbFgWu`+P5WjCa#szgmN3?`ps4c) zs((!LITRPdvv;3EZ1xe$5SiY2Jz@t8{&ai!rrOKB)nn)a3=YwR(`Mp7IbGfs{1Q=) z^mMtz`~qko|LZ7@X(?NZ{kfL<`?2d%ZbBi<1?4_%fZmz$Rn8i(#F}RfkdUJ>wt6+@ zqTWFH2<0fsN$4rs+c)E@DQmKDUFLtWe7bi!a=oP@$H!~Mz|yU7B@`fb#8$d;oy%E; zML9al%RJ-qR(SfAukeg_mU$K_*WHc!pQ3&?>K{SrT$*jKY{x1qkb{!ndNE!u<~lp zsPm$(1a;-kRh~EVR(mR(t3AKNZ}q$WhTo@9zX$cF z@mu}Qwa(W)zt5}jJnyXWyp7-LckjdRv#9?V_2=+g{q7f>&wBol_Y2Q2oxkvG!tc2U zm-_!d9M7Zfe^K`(e!t{=!?QW>x1N7@{?@YvzduF!8vpOd|E>7#1id&-odKGvw7VPi zoTjKJnyNG!0D5tnIs-ISY4lyxbDE-_XsXgW8T8^bbp~ikw7VB|oSvxT^aSmOfnJ=R z&Hz1si2pe)@xP#DKpq4wS%u)sj>|aW>&;DLB4fJ==Ph(VK)!@NWfj6U3EMRQUG7x1ZfZ$4N$eZX9FMn9(@CH2Ogu9aAol*Z@^G@}q=3 zTQbDBUX1IjJz`vsxRZ#Hc=}yo@uth6M=7uIN#rJLMmdGjKPN5a42}bFoPcr<=nftcPqWOKi)eV@9mHG;`dgZ--UbMrT60h`8YQP=N91iyUMvk zxc3OX7j+wO{%M@wh~GzW{&U=WjNXg?kKx=0ICq?WE9Wvmi}X8Mu|F8A&az`)t~o3} zODaCK6YE0*G4*U7Q=bWJE&4dO0MFC`vQ9dN z^b?okSmbt^L~>kmYUePPuj>B+_~u(sl2In13`4mcMVmXrTpHzw*3l!u39KD>(Si9L z2kfl_b7l_E!-4r5$3UmgfjKb;=;45@IEEyza6rx-poar;;Ikx zpHDQl=9Ayi6e^!x!jC-mhIJwaIZc3^I$-l1u=x(ydjiwHLDKg=~5un_kGK7qaPvYY6Uf9%G&tcDDvZ>!?d|k@M znDDhIg{eV%Yc}4}+uPN^ZqgVEau|8Nioq8#j+2bOiE+#%o*P(8BIwY)bl`fSK47ym zvF@|G=uPjTHJ%3B9nr^r1O6}{H)EWwAFtE6xh?;j?%PgdX2V#0JNej>R6%3r69;r- z<_Uo@^Q^0avakBudRfl|7Ez6t7>`DJ8OMJ{ITzQ5t~u*0eK`M~R(wvZbM7d8x!uA( zG12g4TfCa@t*&&9Yj@ixW31xDPs z*187qAvK<)`nP(%N~ArF4RZtBzeTQz0=!3!-Kz5e0XtWPxtuD#9y{RQR(WRQe_AuV zP~6ue=)!rmS@G{ECUcSfLeIHAT003p>=pQ7#`%_4bbi?Dh!d%E7qhMn_+gaCN{#*M zc{3CHK1AY8&YzGQJ%1YguZ6w*JnvuZnMX0VLh^sw`Cfi~l>MmpX)==oK1~zy!>M{i z-$#YW0j7I*MBCZh&$rPR5$R#l7ZH62)jx=e{w8)vqkk_wZ%TiY?~O=*6FW?{7Wl-! zbu@np<)u=fRuK=~fxp^0I7fARUOM{*& zomG1A*dppx8i(MsKu49n8a@kk8eLU<7U-wal&|B$|ALkVtec>v=cj@%J?A?h<5A(W zgbt|S>yklNp-+Wv61E9AY+>gk`b^~0hvBn?{uKH%Tfw&q-(L9kh*9?h{mF3!I)3D9 zWp!gr4WFeOUmE=&j?e0VzgZ9opN0Jav^kJS`>sHLigCXf_lt4Ah@p(T0)45=p=U2E zeoNO!>U^y+HW-56!u4t_QNwTP;)NQ1OBdhL@LRfAA;)jE5hoHHIQO#sIq0n7DEV7fx^T)=YuJ1|`;o+~o~7Lm`_>i9JT-=!PlsxfYk z;lVel-=pEX@J-h!-^1};UEO~0u7>aG=Jp!>KqGxQzDq~ji3+}Jry0KM67ot%+umus zm##02I?l)|9Pg#$lg_UBqxFD24)5gz-YZXs_i_U7m8Zjd0qX_X3F5tg^@7}(m!u4xjh^21*{ijGq?s3STD$C5bp)77i2St_X5@n zvT2U@63-L#GvOCg+0?Iv=>-2olueVhGh8-tt~>EwtLlPyubm3sYt}vHc(0}o;=K}) zlQhyAMANt#;lFgg311(J|J8Ap(N{69(Gbs#0{0~7OBnx7@fp_>(c-^4L2r6bPr!dY zbSdy(IyxEU+$h(^{u9@S;%VEX59iC*=PL`7c{{^{@-MP1*0(fF@6d^wFKQTl%3`Z6zkH`B42DbGYn--tYO5x(|dKIIj&PVh7Tkr`~%n{GOMQaPn5#K?* zHm_TbTv1LX|5lzM{}$?b{w>tg`UIYTi`E&G1LKva%fE#>o_`B<+8i+Efq$20$iGEv z?odvnUY*0%<}ES5{dS%q{}!#kLpg_fb=`!v4i9VZHs$H^Z_!#jlrK@o^KTKp3uQ~5 zl7EZvT_~;iUmM$keneB1c3P|u^^c%lrI8jJ0R3(O?ZWU~s3)4LwB~#65=~JiDYWBz z?h-vw7E_%b-v#;+J+*aS_`gKZlJD;&XsP7iB3T!F8MTZ@jqegV!1D*_O`gwC=u=^v zgl+O*FIZvcDQBt9XCj|HBEC!LPoY04x32K*g>Uav@LiW#e`>e`Wvpo&V}{_nl<}o; zT&dx^q8w9RRQ@gKPBE_6<$)D`sL-P#MiR&a3mtm7{m{#yM=v{mOYvvJ@LP&aQ{#}v zSYZf$3)fOSFa*D)!~#R`TS_cY!*7|y0m=6k_$?iNt26nxbT-!*D_3z_Jpa}lCI1%T zR$c&JYmQ0&E%+D}C`zn56tAVkwAEO*h9{!@TXS^zw+K^&vSp4T{}#1X;m0t1mcox} zj9SCo5bg|Z55Z^Q8>syeV)LDEFL*NqpVf`+HTrQzdTRJAg_iB+-|9wQ=xAHJ1dpZA z-Z)=j7Bbrg3e`MeDCb7GHm<=E*PGz46uvXk zhx2bV_^USVe<;fJ$d?pC>*>OIwIlc|lm4XVTv7Nd#olX}5+nYqmz?MQ8x8(S`Cfi~ zl>Sba;ja|mMAsJ)ZR?eOzKyqpyjby3)^^(%8@5|p15jS(`62R|&Y_7tOD%fE$sJ_nqq z>@jM@XQ7_w-!kH}@|65rMtm0PdHyXUJ}Xbj7o*1$ppNI?Qt?@NO8zYspN0Q<{w)=s z1-kP5TRNI@UkmlKiRP$RX~xIKpeN721zM`~;;}{4^ZZ*n`g2SX=*aVLDKzBxEY$HF zCse26vp_$de+#tZ_$>U-^KS`S2Ju;fmRt`AzKmYRqrzti9T3PLBlM}~l(0?0HaUSW z5OzL@Y1jEo`cuunCG@AzpFKo>a$JFqANg8Y-568DXX(b59G|80gE&5` z17pgI%D)Buspa3ocvFn~S;ao*>z=c^eo<*+>>ujGj`k6s3lKI-*eqeQ{9O@`x*U4; zvf{UNxEc@&eoGfCXOF-__Oa2k&b5u5NCx(GTSKE**V2zDq~j zcJpstLSE@;+dGZ-()EQ=#~FEr_|CWmPLM|nq ze@n&hAomu}zh#E^63-L#qx@SD_{CH<^=n}|!9NjY(`4<;rNDdj$oyM6-^4i1GWsgU zH5%F)>`3@8@!UY(DM5!$(}C-WXz^d2pf|mzC*Z#>N&YPzos4pBlxt(GS6r{*cXFM} zSr~sC>BITAJ^WW2{lV>6lQTnvOKT*-wgnuHg;qF%a zR~x>ZMw2LgKXEfAymCynoxnf9bxLUmvBv(v^*J4UY??+Jg>}2LEA0YWEhFN^i zX{@4@NfkA*Z1*4Wu3W^`54E-quCrRI=P~E4^=LzXw8cKq%HJPvVfl+pC{0zrurKGrZEWWW3 zzq3$B=bp8)TX9d^OdOL=E_qINCdoVSZ8M|<`FfN@o9nrU(I4fYQL{Cv`ViXgD*Grm zr)+=jh-IwezA{(t9VixywA*E4xiN7pH$##toEA&P_~BUBfN$O}vBsAfz6IafXpN~p zfoC*XS+y^gRjhv6)#64eoV2V8eZ4yibb1JVd<1<=-#s>#e>x`Z&-d zBQ~x!#E-$VRL z`z7R{Z{~j0cY=Dqb`R>0f|rSpiJu4J-P7^y^v*^J{c$0C&x2?eZy(y}igDHCf{x1* zb??P9azMWVd~XTfcLML5%h&|Ec4^GA*XMzsNk*>1v+db|XUlkY0-n8yz8T*R{;y!t z*4w2A8z$2|HhiBd6S<%tJ&(@g++@juzDE179y31<&sd22XW;&^kg@9_WAZrgiDZ*e zx7&;MsP~V?`7*}*)ECqbt?x;@FFY*y&Oc!BwRo+*v-iaK&Uj*dUoGzATL>P?a)3LwGNALPZkzi~5Z8(5${JKNa^uN9CWiCRTqAnl=w~ z`Ho$a?K_SB_}O<|zMuav+xLhiPJZl1F5eSHf!|L);qonXXZtGhnD2!jOTMS`Exunq zX7xQYC&u?;TCDG1e$dDFIe4Qox37=-*VzZW)h7@aJPBGgLk^GP`3FF22ih`?CCR@- z8D~k7sSPg4Hlc3FwyIZiw^iNz-ECEaez>iwY#8&cog+cVS$xm`z>4RhAE86H`uk3( z1Fe1w{W;#^@;gz=pc5;guSi#I2frMQu}z@wYy@B0KpQ*gKz&I(6hGeOOPrMLbD%vZ zr@4GbZ_oD0F6LV`Rr0MKVDXWBJv%w@K24s%GxkKQoa40eZy-HT*VpC0%Vv}5nN+`( z*(StgZmYWfmTgrl$1$ILbD%v-!5cQTH-q|jO3<3#au8B1)H zD{OXo?8%bn7UFjq+Rga@yc3T$kiE(T?-0K{3pzXM7Vr5n_~$3!rSYeh?3o50B73$= zvQ$8hnrA_;6aS5c4mb{-dK$cR40PBEnc53mwLHmHvlce2bWq^#pp&O2GGd>Z9vmROnIA%R!6 zf-VP9=J(k)`6-mNSXMJUQK})monym$!GkZK?>k{TXm%Xs5%9=bls?xmU;l5zuAz^2 zf=|-GC-fZ!;FB?+!%TetDzqyR-(LipG~j(M=roU%Bp)K30zJDtk=4w={iIJ;Nr|$m zQ)YrcNX}-0N9jJ|3F6sh)Q_>-CeK4ji(U5mGW6vP@InDfT;{T>xzHE94d93C(T0^S z*hajcbPn-_>*=hT89IGKwgozi^G2Gq^!9mp-d*@cyA;qJsvd8B&$7F26f11FQ7Sxf ztEKR>ENkJ(J7Wq@L5G~Vr;qQfx390|0h{mq!*<_=+04I`E%nbu>Bp{1xd~-EzMpJe zgH@`Y@m05c12<0fs8EfC_lU8eWJ#0+IS5wwx;o8hiET8UQ zbL@#dPD_7zO~c|n1yX&?moIP@Xqz@j_DG3Y!mDC+h2bDkH}8HzpUc|lR9;m)1d zD;@cQ)bG;PsG|Nf>eaP%+IlJ{@`7T`xw@WHTYrxFbEsF>b873)oyZI7)a3;wi~#Ds zL>iZE38zZJi=ISFTgrYh~UIZV_)0-CBc(qi&X7s!f@xP ze^f_n4R`Jg(3EJWt@B0QVxp%WckV=9P|!}po#X!!LCa9wxu9hrFR0+lsAQaQ=b2+z zzL0fY-X-Wrp8r$mO<{wC4GQF4681dAH=>wEM0~mMl{|kC`ZKUUhw$Nr4^P;o%RC-3 zjwy{}$q;%Kw=s#_wYME@B}RUlOs9o)=fT9D1}X@Z(i+u2VIL z!KbEK|87>)_@f$cG{ytec%w1Kr|K(XOfUpLuEhO9@Z(CHDg-~S#QZe;xJkT^d~d>! ztGLbO*-QQGxs{%@yJe5`oa{-zYNcoC3YJUjR&r23AN5mEzX0{imHJn3PbtoAz`3V! zZX?cZz_}XSvmN!vQ2znykE8xKO8vX^o;7&SY`kX;-qU}jXDiO_qW7SFKI*5SegW#= zRq7AXdvI<8&OMED8*%Ok&e8nWcGMq3{RgN&j{0Lt{o3J}KeXUo!s03VFrOk7pPH4T#LSggImOKb@$zgQFMrL+q;P6)Vnj zekLn9KM5=Q0&-?1o8sXkEO<wnl>oud+a|VJ;53E0o6(`{i>%3hj+FH}Zyt2Uqw))hkW3W&*lE zuUFPXMrr+aMBRaUqkhn?H|PM;HKd=`Ud1Z7T*taj6@HWCy5v;iH8kHoDy11^Ku%i9 zEhxz-lTe1C+>XNM%07EC@?068dkdjU=(;l}@^(6PxNxV>f%zH-=;D}2xNyn?ir=7% zV>01*y?=C+OEMzmT0i0bDrwA5Ech+9mwMV{issIONpiXG zd3C_nJ7DV_u=NheAFqeJI$+Nou;&ig^WgQUhs}1tW;l&^}{6+bE;PwTVvT19d9-KnWjC`9PgYK2XAkQ$EmSB_Al^!zmwVvPnKrTKg%U zC+HV}Urc4Ri}=w*M!k?xFJ#mU8TGDTmX@fN&I?F}8YR zOyu<_J5z%;_!R7Lf7skJI1a>d0?IwA4OZ8yz!rzBSAl%j3<{39jN>bHJZ1E2jpHbz z@1o(ul`)lgZeUG`phNf4f$It2!^xIsVjW+1(VO05#D_=n!QX)IY#iIunA^-Zr|XU5 z`t9U{V+|mUw`q*M>OgReU3hC?jJ^7*pdL_tan<)Gd8c)1M!VjW5R`j0-eRPeQSOa$ zF0OBT4A)WmaQ+28ocp3!J9#np)7tZk9UF#?^+qaVz4rJ^8RKc|Ajxk8f9cvEl`k!> zFLNL!;D8?-aqQbs`n17MQS}GiAAz5u9=n8$MRYtwKFSv;Av!A3*h9}lQTlosjl}g4 z$7UU+k0}pDNnaBlirTkUedSW81#8gZA7flQYqH`Ov&eq&>H$G}sQSer>$bsbMnAYP z3Hs`BtgnM_uEv2w{a+>DCE|7(ejMwKXl=2$4|!F409`n*0zc0EKZ@mCWPj3g2)og) z9gX1My<+g!b|AK-&WWtPF5s{2gkGcAX+L#*rto7VKdlCM7R^5c-c!$?M*mF1#2WGA zN{snRKhOIY_;KprLh_H=p$ES{iheZuJ+p=c{GQcHKT(XE>kqY`BKbS$CtZ6Y+SlHG zzK#BgNDq_#3G*-XegyfI>byfppG5K>^nGGtue3htrRPoi#N>NT`XtO=cLrn5O#Zq9 z-?e&R^!TnO1>f~N#@Z^LtDqQiy&`mC~51psh))}Cla*PT*moA@3lxy;gHcjBU z1U7DuSfkvl)+iI6s|I*3)vr~LLiPo|OSQGeb&Tqm+_*+ZwZ#HsEb!U|*{tH939n7Q zy1;iKpS{>uK)<$?W9^0hT+h+UJJ^{a#`DiB>)nO#^qR*j z_Gj@FDeD|KLu;sAa`V7vsTt;;cd zd``QUVZ6A^#zAJ4wJ4f>RV3{{;8lt&!Pj@9laF)6*eAMu$(F`?8N@y7`&++ zTv%XC9>90kU_GU}{=z8#M!6UGuh4aXT}>a(zn2;RWgH8Ve;GC&RB&OlbeJAJ?l5Hh z2maFWRaE0YaeeQ@frX7%RK8NjD>`1%UsVz)NA>_u=a`P|H{+l|3V$l|Ajif_b9M8eBabO!=Bfu=eQiytMNj< z76og|`1-OuL;f$+^L4tYS8;ND|90TQcpjTPUH&iB@%&$?x<|A%2V=x;eVe0 z3;**mIMEbkypDD}Hb*o?c?9(;jd%=$XiDoLLA#LrUqn-sM|HI3YodszD3cW0@%&#z zPn5+}r{@16dZOG8+VT8fL`#$sLCawNFF{MsPX%9k&UZk@L-K!tc0$&l_54BTP0uM| zgMCf`OSW_Lx-d&FUUQjQ_@5+8J9q0owe%I~!Abfh^(|0nibUF0sWyOc- z{8@FqqA|v&>1n5;r*RL(1hxEMy11W~|4SFA((-@l`iAHKY9ro9zISiNheg7Hb!q-D z>eupsoi)b6dHyft%;NdK<|z5UkQa&P|C(cx|0^_(9mH7a;?-&#o9C!P?yp}`{;xT@ z{9njX#q)p7X(Ruajvqtvf9b}kYAl-Pc%tv1{9pJEE&msN3*~vjw?v4+cfRf5O)dXd zH@Da5#~EqM^MC2++HU@@Zsd!OwzbpwzjU-W>MSE~@cdsozG!!E+ABH#ms8mTJx|I1 zEf_{N~aXNnC^VjWR!#cq~JO&qz zHwjz2aK+{SLar{JuPaZ<|ApLNJpWgolK%_%D4zdoj*|ZixxaY+uQ?|9zr^zd{RmSR zfnQ8zRKF&r6Z~T$qhV`Ux)Kw1#pVCf`6|Y>6-M8M=l{};skAlN%DO#uoMw#ui01~z zJAw`uM+dGaqQ!r8f}iO<|1|l(baXPxxlyi-@+_{8fER0no}7Q%!+*7Ne}sHZo@jGJ z8}nckc1QlNHhgIvKLytTi|e(qTSw{B20ukbU%Ed6KSe!u(dX?*G{JYRQLsA^a}S&0 zz1r~gG#W+e8;a}8yyM!SrzsCbNnaBlirTmCG5NnRPu>rO)))u;vns4DM*c7S-zH8= z+!r~YS~t*z^J=vCt~TCJ&$(jqf9d=I4V$9JNA;rfynoxncj>;DUmuPe3iF4IeovR? z|I+y{y8el1f3NiOZS+sX?=|V4F#kgDM|4U4FP**9Xl`Q9uJrS!ePZ&xCVdiSue$=z zWlRIs9r&(p&;LdGrd+HCqud+D^?0;yqN8i_j4=ci3ulapsdMPY7=yrb32dCe8TM?< zW}ErHbpD%=edPWVxLD6=fiV^sW6UQCy!IQOuXWgW^3?^tOYAG~X6V}1VE!+G?-KYf z#5sEcKK63p5(K_$weX$9{wyA`*G=!)H|TQcQGxFY-_MDB@7|2>3frHuJM(|(V(2{; z>!s!Y3XM+%^M46^m*;Kl?I!SDu3YDGR$-jSFwRr-Bz%`~Z_}{--n%3Jmu_Ahv~ggJ zqOEfSZ5$H?#!Fz@^!egC!7qGHyO&|S!uGjU?X$X{ezdvz^!wiq;JF&OO=BkO10btj z$g039nP8Q$o>b6Jx4x#M{9-DrUBr((W*&~m3|rsQl~^w>t5_TpDBDDeb_o}vppHC`VCFTCLUfR#u(y)s!_^xNhermd|s>LFTKzM*eI&Yc41b$#2Z( z{AE{y8kZ6hvg~xzMQ&zU#R2xzEH>WedQ_nzMRUs9sJMpec^vT zuSzt9T^p~X9gj&8O;P^{>Qx%?7zEK2^YgcWb|LsMqABVh)zO;g`y!e;k?%{P9nbeg z^hDias#EiQ5j~yA_XXPVd|yOM{9hty8O--3XzBT>;7iZ>4#;?$_%P5?$UgFa2|emL zC2W$gNwD2F3;XWb62Vs@-#iQ-CiG`up8}yjh5i&ieML_hA8}lR&Yl~`mRkNV-MG>? zp5%LtbimKND0~>{&+@?7Qytgx{9mXS<9TJjmk#uS7|-kSe+io;Y*Hs}(&f;jmlYqT z^Jmq#tI?)udKz&-eJn=H|D}rqYWcr(aVstVm#%Mk{;xLTf8=}jdVJVbZ1-Eop4j8G z^q1dK)}J%WQa>BM)W4K1_0zhvWp}jZQw%-J?m9K=dn`XoDn6B&qK~6z2jb{X-P&_? zezH6?er}A9^E_a4lssU`N*=H|N*=IelRRLd@$FzPEnOU2jc;=-(;OuaSh6k;*c>Gf zSaKi_*v?p%pPFXZCu*YbbSH&FgBd_#noeCOMYe$n!O z>EtIQUs)Hm*XYL?Y0C3|>FC*R{x528dBFebYJTWwTRWZqOGkU7&NA`_&;O<4i%XgR z%Q*K<_%JThv941!3N|b=TD*xmcZT`2LOtF@kM+q@@_#v%{9kzrHq5Ey|C*!Z{{l`7 zba70cqvZd>Z>--6;4j4U9GD+-fUg{gj|H%Gf_`EA!soBs!-jQ&A9)Ne9B&f!+DTed z$=0lNq|KHc(i+*3-hZWI{R)=A*IJ@}1L~hf{YKPpK)tpGf^0L^#p!KxmgG9sX*t!e z1ISZyfH{>MV0j8I3>Y!!FYm-TN)9mmhMamKr-2+`_^rvQc%Bz>>V=$oA*TUcouFSB zznIEt7x9mYoQADqQPwznH{30IpMFmEZoF!xcf+!@L`6=$kW(+@)C)QFu2W%(zqqf(ktPNDSANlQ6{<3JoIpxlGP*Rfzt6ko?ews=q*>sT;u;_FzP z0l&+**23t!aNLt_Or@=7?A1Cv#Q@0rEfHjXC-UpixK%AiIW@|sQO?Enp?a~i^x^y) z8K)E6Q$)wN#`kd>7QRk4EZ>Dv(D)4T9Bz{oKGVi~N=~rM zDENzVHRyOGxVEgxW>TK0c1QI{58sd^psPRq}(uR}T0SN`5f>-zNSmPuzE@ z=*f9FTKrcV{X);_Y<@6qXK7Cmb^b)bsgOUHfOwQTXOekcz@H;ujmC8SR6kC~ud8hV ze$}ui>imge+}7LA^ZsoQ|E2Sv`1MivOPXIq{iXUvN`I+-QByMY6UOCAKhfCRs8rb008ZXLalL z)b$v~^?O>rFtL8D!Z;VL`kI240;fp1_Wqf0ii8Kt9K-UB@kkXnYmDEj>uZehF@gOQ zI9h>87Wi(CnRoVnd^h>%0{XeE%o%y?ZnM%ZT&JzB4#y*@U&J zg!7uE;Jnm0IjtuO#L5Ghui7gZ^Oe~re2qYa_>{nZA@=`!9kx#3zjXP*baTWuH7s3;`7&a= za!&-YU9%Ky*XlN~U5RNy+uIhlOIxF&tdZ5rcH-c0-<8(5YHNZ8PK(FL)tI=t_QL4j z8|R&kKAbUzD(D;7Yq`7V!1YBh#edPd;9MQfE3+M(7x~__PXqbh!ErbCEQjx%m>8Y> z8~NSH-^P9v*N5&!+EMy&{=LljFKw)+;Jljp1?`PG)}!^a8h}m&QMl#vTIiMfu~s zzyW%(*Lj!Vw>18V(swrHpAOs~fq$a*t!dr35!W@V4P4hw>?LH}^MLCqb*;971<>oM zok^i~P~fzXr|SyFuW_F3A^0xx*H-CpT!kh$E{eUF`Dr^9T$hGFTsAjRk3Tf}TN*~C zm*T&8|8^6;OY?h%1aMsB!>Ia%>k+k|BKbQ4*X3xVZzB2|{Q4;T3&M4gA7R=bk^BdJ zf0)=SO;&p8dDH$d`CgO$2(#Cn!E<#Neyb)qI{eno|EJ$~RLS2}(hgqBi9LBz#uxR& zTpig)6}!au{KmOQQod0%8|NtJl!`~-`%+>}&D$xr6b-~aNR)Rdg^Hc8kd#MFg-kLW9r&mM;F68clm_->U8sIgOiL>y@+_7TwL zLX7KN%CVm(^rsl_i}Ah~?~B+8>4GaQ4s|*7>}AD&>H0`rr)P`_YB5{HXn%_9)fl3d z|4SDq)bfAnVuV`$FI|j~=l^OWK1jZKZ^nPoJo?+wqT~ac6#*MY{s*70)$yy^ z9wWc1?c*3Ae3$x7T7EEm*EPyFb;~z^ceVUr-P~TIA84d8&kv@f??eWlj{G>}T)W)- z($TheI$xNsFN{26^ka1KTwVS!&|4kn3i<}tXSGKMt}~*=k9C5d={-Fu zf7tapTv?ar57W`fNPnYT8^_1udgPnIT%H<#XpcUef7`>4wb38kriHJ`^`s$Bn1noG z>Btj?+_SlgO}l*g!`kq#bvzYZcPy^g#&aE|Pa8ZH6@5*3D(dl!me->@@`ttI|7moJ z(q9zU1FzNwJx%#2O8T1cQPlo+kIx^5dHQ~NI{X#>pN;=%U3r_hG;yD1AJzGvfSv4m zdU76)7GKszztD58nEYWne?h~{=<#K}=sfS=_V8u8@8#D=;V*T2{xF??qwAxH_W4Rb z-$oxr{9cnj3iCfSKcg%2hw1F0Mt2i?ccq^j$;`VjWmRXdH73Jet7v z37n*YKkX$r$&mbFI{!|{x{!4t>%B|Xg*^|KvIE-v{>c5!n8op&JBWuJm-(K1fE{Hpc9_1LrI7XG;Dt-TqYM zoA+k?Sy=wDX+fM>A=bK5{;{1Gn|};3AUy{BO2C}e01p*mY#Yo!rrSeB;LCy-K7lXO z-KzPx-}M{w-@p$Fi|F$D2#Ig_xgV?d1YXjJ^#5VGgt;Svs>OScgn~!X@f+bVO;p%!1b$y4?uN62if%EE#xIeBR zdMSR4@>%TA;lwUk{;@X3Bu0H<)Dy60A#)Gn`cS;Akv^P%FEf5j8{2h9{;@X3ays6K z%9j_{mpPz=P4``CgO{SBFZYGQ*Vc!v+g5y`OOt=Bjj@Kt4^jGp;`*MC7wb{^$J)RX z34B=3z|C=;*F*4Omn{F7&Og&IECL_Ka|LmJ2)AKjczVKzX?{;v<{#79XIYg)9BMX@8h}uStJ|#lAX&2Wz!jc3*hd;yeF< z)z{*U@twUV)_2C!$M@CZzP_*ClPbQj_N_i`m8#EJS#?dk%Wq3dOUcGwo}+P;a7<=P z|F+3rvZw6poHdnwT+gLqOg!vQ>t0;ZxOh+f2+8;8=_Pyq^<2qwA7Xzdn>47lZWQyK zw+^h1!Jd`LeObjLeONQ)u{&TLTD|Tn)?91BvrCdI*T>o9gLvO_@7=JL&eb_w{&n%J zd4$CB>-xIvEl-MLP84NozuNmK4{> zKo@5pR?%#AR3C}mHu-7XQ)*l9F2#Mn*}K8LVed%ypYZ>Ng{AJ1mZi7j+TC<*D%!mY z&pG4oJK@McSLQRew3JtE=_wm=EX8sA#8D}wqpwXl){kXw2Td4TdiPh0?*tFr0bZB} zo|p>WnBu$eaE=eWQnA(Q-Pih5%hd?-inc9@tMpmF*OFnk1+H<#)t=aYr?39TKwApWbc8+3uBmb#wGOLh zQr14AC#|DQSmyGVeKmDc9olTibLuSi+7rL->#M(&`A6fs>&~~{ngCg?|9^M-GVu-{ zWOmGwq{@T8y~DQ%?-+}|DZ>Bj@lJ;=3GcL3y74}u@8>w4j8Ck!FPXciAJJw>$(kx_ za!VchrS4AVuOrz`XS*$Er}bwmzsASOf59Hg_AiQ``w}#Y#e0v(u!_aj{&Fiz zY&`H~YkuRC$(03=_4gJPY@YgH!DfqXM9Q_cktsP7uT6=;aRmNfFnVOlyNS!Le|OQ? z%~Kv6yLmtQs}}r`3Ynh|nIDRFrsCQ0C4DPjjE|QaE%CC9<9_SVMmjhAshQ^wqi?4| zkKB*a2)%GX8Y=fGBYkik^ueg2xz?fbRructxzF48rh6^weuHz*?7PnWlYPH&JIh$^ zy-%@<0=$!S!Fj8_I#seaF7>zO&xBq*AjQiu16-L?(dPSY*QS)B{qwBx^0mpX%ncJq zri^543!R^abNAtz4fv+_sXm5nxfi;Z^5*VmLmRi@yWWB9lN?Nj{L`Mi=fJZhJ14SS z{$}tt$~vBKwx0X{!KJkEK2X!WsCEmGtm+4((`>2-dom} z?Vf&B%dK&s{fR8*mx;%h#8;kxZdifmH9^*{FHWq?j8BjgP=9@5f}BxotE>TUe-i`dZ2)P?4^{XZR&X8Pu7$JJIr z7VE+Pqtl^Rly?k~Bza*>TxIci18gA7cMx66b2v5LQ+BN-UV zWnjA^1N&Kg<9f)zhme5`$bj|12RFaJ=s{B%SPmJmJeaZhy+s+D|ES2o{{FDTF)ZI+ z(xci&F~}EOLhbQ+uP;;42%QDE4Hv6 zeZSv2uyH;3-vXM_^T)$Rub29T$-#VUKY5xW2UAVtU@GJQHlkwuaOw{`mxKQ#Ij|;H zPsNe!O_?-plRcJwI|KA>fDe)(#jVVO{jkA)G=L8m!5<0O%x}Z~<-rbmU=Q7}i?d)e zbA9zAW){L;a=Ur;5@~iFd>5M(C)dHwas3JVrrNYR*cscBt1Iilvt&c+Vb5$OSHqs! zWO~=a*6cMUeOdFD;G4n=uq9|uiPgqsunh9a{T(T;fnmHvwjkBcelixne-2qahxd|f z*Tr95TL<|#OLl_f9R3Alshn&w|6VTVL#w@3TVn&}AYbHi9@lUH`lrr1tab>#e>9F{ z6UQoY{u<hHf4Csa0qK4h~_K&EGwBvi)3*V+MDo?%arb7B7raI~PmZa~|2N<+Ck?}DuC z!0*@b&hwDx6V{=%Cm_$CL7sO(p36&;Dqn}ZmEpSAA!}tic|HMoUJ7|W2zhR{TDUwH zKn_Va(mRQVypZQZ)GzR5t;F|=9LL7mxg4K>9G9irE7fQBk({#4Wv@H|IsO;C^H$jF z>5yaFlHxT*_53e;LaxY`rJ%iOcy>~Wwer8EB>7iyDe@aENq#mi2{tpOkKnly94s+;I!mY=4S(=!DWP@+WGDkN^cUC~ z7ndRE9mvo>U<*|l+Nj9TA0b0&(4*Cmp&49;7HuB+;388QdWFl-n9U;|9J5*0*+MI9 zVO&XU<>t6Jxq-#W)i|a~!x|4QpTX@Q`6Q|xykPB9{a5IJ%E_$CO`bJQz6$@F$&GU* z%bfu`_!8_c`GF0PoBh@yjawBvNYDQP?BHLeVRFo%Q2F_`b(nnZP{kj#z#qhUqx_8Q z!?uu5^3n*ReVpbGwmxOSIM0f4UJUG6EbLkz*tfp0b2g0g>=@@+Dh{ucd|zU$cNY4< zj~IdN!jv_2@yuTg-uu)VQ*jKk@pp_Z@=<7vchJiGJFjN>6EVIcyGJ(2Mm`|)_dfU! zJ{-Tq82wB9Mtq=o=TMe^x**$k0rGSXb|Py8YhDDq@@K2dZ-xlQA6z{KHhKnl zaE3Hwr5E;jH0<+C__g1F&0U0d$i;D$_rgD4?1+xKCz=e z)V?@i?Oz>re}HCjp#Lh+UtZF`awhtH73jY+Ey37sQ->A@^Y&ZD}pYwiT6Zi?sZElfR_I>}`qMQ=`j>+K zjyP#DkhiC4m86>@qI@_7jSL1V?k zcuqi8heecC8*~QQKg9jGtdj0*1Yg#|u04eJKIBM{Gtu8FkMu!%R>9V({Na`CjSS-_ z;)`!X=Pt$AgZQEX;|<~q(xXAX7}}Ty9TP2I%qO{2_=4UO|5pPmxQ`7oZFE!IvLp`+|HiMB|Hi%;6=Yzp1}SXO|TxqmOLzOX#Qm@JDUv>s0hp z3;MbSvYcuS$g;lAY$VU_Mrn~6gG zlYy}Sus#*5?2tt%xiJm>lZ5*m=$~w zcD8uW;Rok#j#)IQjq&YbWEan0HX) ztxruHaS7wCdgP&=OmDmb53p=uBDI=S_w%6Dg{)K6F|iz2(6hJr{fe5 zrbQAIi*0E|2}&<=5+D^j7JQ9>*m43-I$%RX`R$G4!w+bZTR;iEsf1`z1LosXFcorJkPV%HulzyvbAD!{VulF4`pwq{vFv{Woy;k zmOb`w#@-6ge-wLbB;Ws^z4c?Xw?@r@M_6M%d3)=H`Ts-NTWLc__SWcm@PGE!u)S4t z=@YZJGSC0d-ulm{{^Iu5sChh^dAxbXU&`Lv@qE5wdn;||$le+~k1u6!b@==5&fd!0 z`6%|*$Tjl+?5!Wd-WoNBR+%~UN!nXGo;M%b-b(#DvbRRhpHITx`eoLXzdL&?^X+o& ztxo&?F6^xx@bMMfTPfp0_SR^A`q=EPjK}5LThAE}Y%PCN_STO2|4QwxlyM<@Yjpqr zuduf|bM-Rpt?o3?SGBix zFg{moZ>5Y2*;}K>=Ysaum>B$?o$z~h#_th_|HFeHBp!c=7r&>ExM+9z`}lF?_xz*r zd&br6&LB=ezRx6A=q-GmPJfOgP0Vxan|6%r9{e7ay~6RW!?0g}^H1}BCLfC8NaY8; zK8CoTYr^rSFW>{+CVwdYkq7acNjJssbUXq7>A?(QRzHm&h4>-GD3sw#8IE22G`>># zP?{;D2%lG*&lB5x13r{d(DQsKZT`+4whu-A&MskpCw>+AJDv7KuD?E>Q|#JWe4dIe zO_=9fUWurcr~H=5?$qF5e4bCce9a~J+=#2X zKLP)ri?UV`pC^CkBM)_5u4ne)pH%z{_VMzmY53{zD^_EFpO)mpC*}@5njFZ9^mi(T zGZs6X6UX_8-^$T@y!d1Gi|m-k26 z>Yud&o6LXs-xOE(Ebe9bIu%1x&=xO|;Irvl#@%+;<@h0BO8OL*dq`mNiUFH}vcJfyjF?RBS z`G{v+ix12{uh;Sd{Il|b-OJcbVB7}rGrns4VDfivccli8;%kwQYBJ-N!t;|8yv7d} zV7%l5+k-z-<0Zd#y2i`htJ$LO?@mg=7h(m+Tx7gt0~^hFseFysUi_l_=!5<2L%5mo z`nUK_=lMpzV~^JY#w*$<`W<_`O&paCwV`BZ}8|_cOO=o@|R16Vw{C#2}Ol%Ny)ALZzG;z!F2X}*ZHI^S9`Vo~VmkLS-}k|n{otYf@%YlC{B$?lJTw*_ z(tnZuxtnbs8Ve5@-#UDhZv5*#&zbY;6XQo^-a9;`7_`W^Q6KZZoA|>XH;v;Zsxswtj{NSg1d>cN@C39 zTh*QyjhV(enYh|US2r{7%aan3^+~~ut1c|3`$`{6v~v2`V|JVvFzx->gY577?Rh^~ z74Hs?jT*Dh*kjg@G1GsMWA+()%=$5A>5La^&aUu!W3|ouvs<*6#m`!wz&uW5J|{7+ zlbPS_V|nnZ&>3QP_pyGzPu!>WKxJ8Rb^Te>;~(n0{6$ZE(DRUcd8vn32XtQf1wHc_ zcVz1TV(RjUt00c#_?Nj~F;I$0@}eU&AqRd;%i<33{nWw?*5_pF zxUa8On4|qKF}>@Wh}&z5=~Ab?El#|Y%3n)-Y!PvZEzI}BV~CwYFHp?BzFV(Yq$9)? z?qE(XXRSR#ylgG$lfA2Nv%X?2-<@RD?IAY9 z=pV%3OaJK0{-|WP6&&TZjw_}&hgiBvtcTiz;)bs?zTM}JN4I4JC!>R85Z9x%M|?e+ z^?m`eXp-XSkww2xbO)a$ChD1s$f8y7^(x6C`pkzcQcUA}=#2;9>-|gTofGf+n$6Fz z!q2~767e5;?Q1qazY0GqzUlz=*lP8n4V_HP-aF_W>O+lJ40U~<810RZ{?n=3>^PrM zuGFS92yyKAl6%@gd?+oAMQJ&ORgJBM!j>+H;V}*lWLo zzv?=`m0ag1cIz>-&QJ&HG=Oviby~>2C9{VKx|TTfmKcBCVPB8B9q`ccpN|V&dqeW_ zV@b(%$88=^%ytpF%x>oBcH*$lGDnXxw-2A9EwoEKU@=B}zSZZB+->Q>G z8^r_CT}Nv@vU(UE*q3MprxKSn^&&i=_2_PR;4Jf0JPmcMiH}Y`e{)Fs$zj$X=B?RxCVAIF8zhrX zvBtilxTSZ_7A7!0eTetc{-8swg_#d^U%rS~VeOeyT6}a5b6EW5$f6@J#)K-9tio?2 zf9~O)ZHje<2k&upue+N4JrxhdFRvA!>bWsiYlzrB#V8*_M&1K&XurgXn1s4}nD^TE zCfRoAUq**AiJg>;i@z>@x#98atk(Zd>R#81esa>5ZFOLlljy(KqbI$^JUt9nnglOs z4}(Hy{^JK5qT7U4k;n7doJepF`Ae>~&De2w2;ZdtL&FJ2{ z;Zf;1;?Z`!n|MqY6J8a5JL_^AepOtk`o|dy)!p#h^WO}$Ae+Q%{gGeVli3IPRRVw9fc$z89+?i0 zypH~%y@(Evtbs>f*WMY%*zgJbBR=uKC)%HdKK{l*_~irmWIB9u5S~%2z6T!hT<2Nt zK?W)={sjD?ad>@RPx!(EU-UdTp7Frh!ZpkZ_Q@C?pJ%Uwk@G_O*dfVQbp3tsd)O8u zeMWo}-t)2=+4g&6qSi#|Ws-xl;giD|;n9>uK6>$k(%O0DRKS9s<>` zGQMk2ZCeDN6rrCzjV#lA91ov}e9olA0dkmhlWzFBLZ6?abw;5i#BHIklKWpnqtI?76qw`1F^`5nL zq}9lLpzT+%kvK8CENlJS2Nwwb8$dW7-1BHIklN9r@HqsA*zpZP!AOq6}r zkpY)yn=$* zbFW3&X8z6dncidZG07M7+TV+9#@0zbgl%StW8eI{x6MTHfYzhCFUK~+bCJ4?Y%`HO z5UI=jm2ER^WA#7YHe<`SE40nD_0Qj|Z6<<8Wt-9dm@Br;MDeR^GZ*65|0TAWwsH5s zH~%ASGZB0u9&y`x+9ho>JQt~-$u<+o8^Znjn0e-_$&Xs7vFE$=u<2t4Bm))LVmDekcZF&MWx}&5g zcCpI8&;wiir|~rsC*#7m_SKfTZ(QvO{NP&h#Yg78(Sm*Z8=O0F|G!#G(tdo)8R+`- z&ebE`=;Hr0*Rx!HtrqOlTRr}uZ0Y!&j~^ax6>8slp2sRIPwW(IA?_vvKT`|-l+TOe z6T3+iyWt4vjrcWh@FoYZ=e(5R-flsiCATo%BF@BHG5gHfjZa&hn)C4O$xc>mr3A}d zfvN_}sw(XisH%^(s@A#fYaaWW->Rx-Ke4$l*}j$vb*^={ul2I3ik)j|_O*UiRe^J@ zzkMy+s>*h*4YIEdv8w#ewc+-)5mwb!%Vo|(YILVGuf&GZ>}=e>;v{S^cH>h^dx4 z_=4LV{2lvdpW^o*zc2B-o?mx)h}R?O4|tYWM_78|verhDV^Pzw(AZpwzusdBYn}>Rq6`;R`5r&Q{*= z1eE&vm4_c{l1oO``$$i;s_K)Vg_N!QolxcPqHN_SLzSOK*~(9cDql?5%Flu-{{Uqx zKL@J(T&t>at|wGI&N@?iHTfqgw;S{+%2R#?RQc7Er~DeI^6My1`SnocH&UMRFF}=m znQ~A2LyeQI*6L#GO!23Hw@`-i$DqoepbX_tL6tvkRc-coDyx@R zX9_aN_jgqlTm#K^Rb?wb2&()LSCwD+;ZWsAxOmQ6*|?rMY_!f~XK`&7_YQ&_xdY0PcIo+{kC<+e;wCe;=f_gmt9rjh0UJC%FT01Mn=z###5Ak+B!1~URj6l z?XSTn^EearQDO)F68y5ua+m9U0mU;uY<(iUv&)JppU3%F8LR<%$0+Z|!$-TUSX1s~ zzD?TKYker5+GTYrpV!kmK87>)MqOpKnsP3Kzjj%j%jXgIF`x5p`YyC8`*KcfN4&Px ziYp(9uSILYjZmH6c|BBXw9e$6ZzT`gi$7+57tgRPmzAou^0p1%S?0s{wr>5xWs4I1 z!3O;BS*+zntkd;Mvz9AeM>>P80RQY0_Q+^gy`3HDT`8iPK=UP=MF3t|(tcps`9Vz8k#E)|- zSrgqlSFK#<{`S$j2!A!YIyDE)+L}~ROw57g!j<+{ji+SfrTV0CZfvN!zjY?Mt&OKU zhZ+YlZ$?;WbQbY1kxv>&jdN0L;12=#S9KgfEaP*G({{%7ImT%_wsi{Zuj@L~&X=m{_0M;&%h=liI`4(dEm zdFnh+dFp&0yt9Kk-v{sPpw0u8r_KYFr_PIsu{g@PV?~@%lF3hPC}o}yJGZNmx;HRS zpQe9wrqogV`s?g5jDAl|Y{^Ldf0+JN-Bx(4p?c0s>_Rbhs;7=c)Txd-W|7CSU6o0G z8+F_ou*$EeEeX79Yid$ZeO2>8(y-5St&D!q|AR@7rw>-p2jU^4Ur~n9Un#@9gLW2E zrgV^Oy_0rkD^EMmaz^)5^kJv}#8bueh4hXh`l4$Us=qhx;v>mb7|XM8go~<>YIB!iL)WC)O_kV*{-8$XQ}m)X(#P|7JZf2@u7V< zx2+kzdkno^*dvqi4<>qp>(Fr(Zy_GZ+z?#mL)Uf2-A~!3P4uH_6Xg{!cA4D2H7$JR z59e|1n#Y)_A17(vF;*?aYqr>9rt}HMCmfs8@!3kwxk};#wQK&Ll7`;FGw zG;9ZX?z?Q9WMa=EuSSWLQog0dL`?xTcKPD{e45l*tK_1=^ z=4wo{T7|RLdAT+Yng}hhs_KQ2CPIag3gIK=k$t7gBm0V#pA1!g8uCc_=}_g1!7a+q zf-3(2?^1paRQb7K%vCSEw8W~a7hYOQ*~&i&RsJc;R(=Ik`PG!I{2Hk8>nL0K^-$$E zQnvChL6v{G9eo}Fn!#u5ZG5(o|F7eD@LKjTD0r>fO028~s|lNB-^jI7VGI|I z-LgmV-)UD>Z{&LETYp_%6dyhfuG*K3UIt(OrFl}gPVY}6u3VU_Ui|b4@~1G~(_p-3 z&`lhSS1Mln7!VLz35r93PgsF>S5=qhQCSe0qu zz)SJ7FkzyN3H>%EWM54;=@WawgpYs;_o3JK?dlme)W(GQV8VFL4HhOm;uj_)=GWG> z>y<{=b#P!2n9#w8!h}9}P?%6<-e+UN%f)~O^Cg3s&_9I%4JPakRlXP43tjVK7|>w8 zK|GHxDhz1w&v2;n%#jVkbOER^-PrRnpus^CdEUl=_uClIV8Xk2?h+W#V7^&4=6fKF z`KItp%^a(03OKN4F8@VfK;b{(z-SDZ1J4OxMPk4W=;jy0&ll^v#$>Bi{a3F3>kn0b zWK$RAFVbJahnMOT2P+CEzJ@H+SZnMfaiaG!aH7G5$u=%z>v~l8x#f8FwgXqJZ)N>Ghx|4cd3>W5r>!NYt#jt3j$7&TG z6-IoIzEwR-;;#%N2EafG5g2hV7;%JfAv`9GIGprz@Rl%qCG)Hy)~a%FVwv*1KZpMt z7@HjB8Jn5P)8?7V(`MnsEy!Z=SsDG&0FRZ@AI45i-q@*?XHFXYI2@|{2n+o!j2Cmk zi%)|Wvzc?!pA1$kw5skQzwJ(F6n0#7EQxQ1Sg9T2r!HsiO7 z#*cO2H9r_q*wO5Vq^>OoEO>Mgl}#J^~}RQ(u7ZL1~S{E z@yX3VX5Wj=X%>Dsa}D3z4E%9Z@W&Zn9lp0CtPd0XzMvb-I2F8@0&a{0Gq#X!@!|JF zw*LcVe}g&H?buJgDEyfBuh!1oKW-VRc$3lK`Qf&ou8DPKtH;~sr)w~NI$yJVZ57

G|K=L}U+P?A4w!4qfqLg!ntiPw z&${h*Xg;VsWNxu#UmIj!V@?#Xm&wdI=7qVBc_G}XeV9+XlZ`)b1H4e15XRpBtbHf= zG`}Q@PxD_y@oC;Aicj>*&^y4AW8%96^WwXkerN_S9b-(@7e9ixf6aWa4wQ_j4$SBG z$cSox$w-4WSRW16c-NCzUA+Yl%@PQ8$T3K zmhuy!!V`s*rTm>xa~=Pe*mzs1 zp~}~Dukt&g%D+k3%D)X&{$0veem7M4{oHp0^dR&mXbW^W^q7k?EnMZrBLY=lfSAPPZ7k;bRZddtK_>}#!>}L%7cClk0ujG3inrnwyV{|4&c8sfoxn^HGXRg`T z&Y5fWwR7g0eJw3=uG!blnQQj7bLN_TEo!c%V-Fl<`*vv$K3o@Pn7i=ly729~@bS9v z^}6u+x{Pnv%eSjq^m<|)ry%u_yUE}lg%e-zzg zz^C}HnrHeznUi_SGbi(uXHL#go;f)~`KURmGQ#zUEnj5n5mR2OdNB8DEmze5=3Z^A zt13@<4^(--t7?Yw$x!7}T~!N}XRa<(-kz(q%+*QE)k(~uNj=afx`UJWX5#?nTy21| znRB&cDO34zP~{6KQ~8Nd^S%_`*ULIy zgKjW@xmR1l`*1wg?GapwfKZ57b`Dy}CbiSIgJg59PsPY9o zr~E{y@`XI7{GCwc@8UUOuE|hgu4z1{{B)@D#XP6{EU5Ai@SKg&R@xZNg|99hy@u~A zFppjM>s-M~e&^vd+gDeLuWk?crwF}I7_Bj2ooWoYPBm6Tqx<94*v!U24F9=LHC{$r zt5ZuxUTJ&i!!zA(EsyK`{ZHSkV_WR>lYdM4I0>}y=}!)x^&^owwVzBiY{_&211 zFBt!Z_2>4Fd`Y<}R-wTK0UH;<6Spzu+u(`Y81rrL#BK0$8oaFja{NorSe{{D z@$1n(<3FTNC(wuTCr+Txgx`$+kUlm3L;7?A?Ug@q0_~0TC(4(Y*MUE=l66A3vEKG4 zt~I`?l9AEnGB?kJeTlw`$?h;toXN9}56tnY-A4VkQNP=$pW{o^c_A}2c3{F-(rx`2 zJAZ_HaM-Pk53Y{!x(!?)y;$&@BiKo} zNto%IJfm2~W)sWkYsW|}ALE;bzw@1I@VL1Kj~6-D z;B#{g*%5TEG1twte)x->YyItO+1y85rFril``QpF@t5Y>aQhmv)A7x{V#hoRBkcks zH52zaoPGNG&hYi3V5A#F!ALhkM{w52O~ic;@m?Lwu>F%h&X90o8P_r%z8HMg%xPa7 z6rQav0V|b&l}a8NflP1p`M8ce@+CpX;{R5D98`JaQFV#CvbtBvNZ-}uOSrEabOHY> z|9z6`$RQWgQ7v#3Lmh$LlS<0iI<{d33WZ<ZDnHFt zRj2%PsPe^nPq&Z{eX6d6cc4$zEud`WzYkTul(LmaAIn!BeXK}%^syr4(Z_2K$UEPqT{n`h=AEOUuktSCUxzAR&%2c0303}0-lhE8Q03pH zEai7YmEX_1ls^bnzJ=>IK#xIhf}Y?xuvXnES5KeNGxduppQZX~&RQS;oEI*e3NG_$t_D8Y zxi3tXi?7V5Ia~6Vl>x$Ny-k01D(_`z=klIVgVC^MIv6eEQ=Vbzw$H2*UzsqPd}m>d z<}(;=t*u8We)3`%ZLNdRz<>@$lP_gFYmxEoSomwupA!;}>H-m!r=qcCuXm zGnY3g&ph6sJnOFU^Du{vUyM0y{9?>s;}>K88ov;8*Z75)yBp9?6+5{B{ZxK4;{ysn z(Y56_Gk&pgP~{7F4qaTaljX`6@|^N_LY2RZ>r;r~io#@5Y)m%A#$( z#?MCkjUR9_RCS+5Khajjir%k$akxKTMP_Myh0lolyC6Qhm$4K+yO*)N5`0$F0Y1xu zmm=|5Mkmj(`?+TPJC^O=fxn#o5k7k+8lS;GGlkdSpP9mI@Q?aQIBh<@-yFuif&Mc7 z8+gU|Z{QW2X&`BeQ%!30jcXITgMEb^V= zwH@@UHWMyG4=prtk&KZroXXoqdBSSKW{i=+W<>^@d4p>W zHuDAZ4K@p(Q{}`*I{qz{+mSCzb&?Nj5;#m}5Q#Tt;{THG4=hviTs9uxmdS(J3h-@B1fR*5CA-*Ru$!~rY$h>~hrwh|aQz$LGRK!i z4C(s$Hr9Ux9NQgC)zO}_@x#J6t(J8$o7mLD*RVcdchmV-OGUvV{~-z%c}f&4vRo7_ zvJCnod^GcIA5E=|!>+SqBy~P>LI&}-@Y`X|b6({7@=Nm5F3N>6{)^^A;p0U$P=$gSl?IzC{h-od zQ>DMHG}u(>Zz&BnReB0(Vbfrn-^$rz7S@HO6U!eVzl3yV>CWlY&(V+NhiI_(m}w1Y z0dFVZk4QjIzL&Vc9Tw+J!q4{-J9wlM)B}x&`k@KXWM~RB71{;b9ohrh3;Jnj8Z;f+ z5BeEse`qE&8=3!+mHXij~~I0 zKf#Y*!H>@C52jnhDpS`w`nn@OLYX@(Z_7GEJMbSg#BjaDs%nUXE`a)=--jkaqx=MA zy0;qtzwUh%s(W9D>fULZqO+Iz)U@h&N)-h#zOV12dZcNP(7Or)w8M4PeHpw z`#^hzeRd7_0baom@Cvs1SLo~UDib|(yviI4Rhi?UDzgBpGABY+W+7B%-U(HicR@3t zlcCo@r-kkR4cPy;WB+f|JM>Fo-P3!}F>~~u1yIvRP`#%Vs`o5`>OD)Lde4(kz2_;Y z-m?O#_pF9yLDxVBLf3`u{yFemLp9gnw}w}t@LR*{Q24E(9tyuT?1aK^4R1o>w}!W& z@LR*XQ24E3Hxzzr*bjx@8V*9?w}uud{MK+xv5K4l4$npNQ^Sd{&n}X$K7Rq8nnx_3 zd{d?HR(${;8@yHD0dMI(2Oqs`^ki50e?jZ%Px*=K=}-BI>*-JViRp%V#vs~XtTG(mR2Zkl7@StDGA;a>Dl-4&P!WT?tag)*-4BO9Nz&6Cl7WbNOQZ>m6L2k^P7?6FXlJr1g}3!o}{B2;A;LRB`r zY5d>tW$VVeAP|4blKVsKe)~FozHNJu# z6GZ+WW^GM}pKC1qLh!TvTIpV>2bu`=L$87+L%Tv*Kht|cSwGWzLs>sv_yGpv6Yfhn zo71eSzLc}MACz)7_lHu>=4>eCY#s!qoXxDGeJN)%>u6uf*~~iHRz`qpDq}2EWsHNW zi~^|2maU=n6H}=lW#Q^&fRl*MHRA`1`24@%K@8`C#Vj9_nU%gw)OW{HUAp z`B69H!=rA-hezGyvl`5P5q*DPA#zD$Eq?HWk1WPc;~2&+?Dt2HSRLmX_Fd|pz}(0j z&AFQJO%D1`18vDc|7nPYqW?5_py)px#jzQ`Q+NIsj_D;F0*|^l3t#$;&R$jd@S)0# z?w_Y^-<82q z^nLPQ9VPzl|Fg$`h07YM_vW(}jpyfJAk&Y$*X%vxy`~>|uY-sBMeG;5CnDajsDpSv zd%xHc`KmnBE7CWWN1uT+&Hk`5-am=D2ou2%Z9X-}r{y+nvhWFdDvnYAFh0ry-`oby z83*3E72IoBd!55k zz!*xG);Syn4n6|=6d)tbb#PAsIB5y@2qS#~jHEqc-y?s{9x>wmgpK5{8s^|26Vumz z_Qv=d!~DmG2MSy8!3g&}VdEZyf51Hk|A2c8{=wfB#y`oyC;5)cqw(SMHyjL9Xk#Gl zp&XAtY8^8FcuZ^-)_zdimQf}dkWF6pi$PE@P{R-?7>IbqsxmN8!-#eaB>THCP&MNw4D@wj7aIc^ z++$-PgL`ZYWN?p-feh}kF_6JMHU^rba%>DFT;*UO$2XM#pS^%DpdY-RnQb@S7;Y#PiPNlZ|J9?eWB^l z&q6-~9RSUQUJK2EehxYqIu!bO=nc^8p*KNCqRWASUciR`w2gr-bZ&?_PlLJ&1FfUp z^_IQIi}8>4%hgLy3Rq#?b1YPP(m1H}qypU^_RE#(z7pzS^eL$B`#x0nl|qd^1vUDV zo@@8Z73 z7N`t#(*l)&Zd#x+&`k?e2D+)yAJI(h%2gG;?@Rj<6%(nwU(Q}d@n^Z^p>FB#-KT2Vecz=;N;P z)zDJY|L~^if2iqysOf*G>3^u{|8sd)dz$2bDpnaEk%v{5gS#F!^@}N=3oTID7QCtY z#X?m-4^;L02)wQRQ}RD$oBo3*RsLA0${z<+`32CAfycGaO8zIm>1%k=^flD2B6fb!X*At;;S1GJB@?WK}&d7h2!a5`WRSLRkGBF`8$`dAWQC>|y zDCN}jhf3SE>_GYCpKiYs(cj^YYkl%u%9wlV@-QyF8SDq|c}WfVYF#zd&fD1@qv zJE1D$u81-wb4_JTgQ|?_P?b>(RT;COD&qmD%9sOH8FRyB44_Ymbq_WAtybgDqg}?I z*AI$LqJ3J9f6n-!&|9>3Z7@2ud~n8hMZE{3x5x))e00=%uS_FR)YJIssAsg#Eg3v^96a_YvQ2y-zDNL%l`w`H%L>Jdr6w3Wmf`UXdxSO# zkHv$>g78l^x|r7LY;>{uSSY$!y$6aeR_}*Wzxre-Z8GOi(C+%~YMX{@y>vmaDt(wFAEifr8<6M6+)wh}(Q5H2%kSVM&}Xe_4d!Y9#~O!%Y`zfm;iipFNzYqrN8Z{aVMccu92Tz^!jw*6M}-CS(HRSx_R z>1&c7sv{p&CGDF;y%q|Of?ZEi-!R^~#Q7XH-rDj9+aEPX{-_vmSuFUh6F99icr6Zp zlqYl`YHwBWhA`gpJ`}fH-vMhz*Pj7SY64HSpz9w-$6u&CnCd7x{vLGu{pj@aH?<(Q zZ}-OqePF9&B{=c$*@SbSG$>ZSB=m8O15|3+!*r}R5YQ%|MeR+{=M z{cF-DE|9f$e@t9OxxIJF4;EY4$12?MsJ;`KQqcq!t7R@WfyHW>i%noL@LN?ASge-$ z-NgK_Wqvm?|7%nAKa~01#Qd*iem61RwanEf=4vf-wTZb}%Uo?@{?{^Bo4{hVgP_d6 z+96P|SnY5qSgdwL7>ga{+bQXs4I?b}U+}?VuxArkQ+vdkz?#}O)&$nnUa~)dM_c2Q zgQxf|-buc{qBi>Ye%Mjsc-2m=a~Hx@C&63}uEM@}d?tP=?Qv18Y7=wY1*WP+SJ=$H zkv#gO7JVU)KB+}t$fHkc(HHXQlUnqJJo=;--7JqjsYN%=qfctljq~V}+6*XtQhN=Q zKG7bAJo-d?81m?o+Uub7NiDi@9(_`KBa}X=9R;ONNZmOfkO?A|`sg4>q)luW7I%?ch zM~$26sBu#rHEyb-#!Yq9xT%gBw;IkST&QNwK56f1c?zm>S3p(nYN*Ox168@}pelDg zRON1js@#{LD)(i|J>7mbVV=s`ZH4!`?1!qXgHV;#0##YZpepMGRArrls;tu%=LUMr zIdTI`|JnOovY~ozki8FO2vpAvhw8Zz=blaIHT~eK@|u2t>fSj}(+}sKP1tk;|Gng@ zYPt#fvi=L7OBl`b&bfrSebfLWCuP2y-#Nno@M`WHRoA=+&r_xiYXrj zR??Y-ddJ!L@R@`km49|{Mup1NnS^`e!)Fp+DKG8dT#8ZlnS^IktX5MFXA)kDuXb=2 z#VGqs!o81Km3v(!=1jt99ut-+G#JKJ{zd3};F5_@t%>?J)Jm|-acsVO(Jj+CgCgux zifaz7|K2h$cJAi&-&v;rgJ79rWcUE|B<<6fK|0g+DH$8Mz0b*~^9U(xJnQMb+?&O| z5S=XsJvN2)v>7=+068zYx0v;2hllgrSZ{Xtp{zHuTQ6q4k==SR>y7qlEM~pYK8+gA zKbxR?2C;WT_Y8sRp5aj4GeUAF)|`1ZP}lFoCa&vuNp7^CsaHDRDo?_HC7ZkUdzdri z;KL$t)OPI6jjS)&-_2f#b*wKHoHx6ewQ`3=U05q+bHCEMC-JUQ-nE2scBWZXODJb& zKPY{$vpk7|*aJjClof{IOj42j0j+H>smPv(QcI=+7*4lR9`I3*Dp+UNG1o z8A|=?QlZqZt~-?a)%9|)i8mYzW1m&HCs7z6%)7z>P4Mmj&ZxW1_R$m>A59$bY~Nc} z1aB&SEsyv$VSs!vKpEGv;aB-0pvUH$GYjJ?l5KvymO3cj=UVEZc%K^14_t))yPdJk zbS1KPD6uUz4a~Ib75L~dwjSy5_0m(Q=e3N#Vu-G#U5X)!F7tN%PrGl||Frvdy_0s| zu6NRI?J3(&pSRHGfw(X}XrW!wfd=rb^uYl-U{-85=HUIMU%phXe^~?~co*B-X$}`d}*o#^&e3y=_og{hA z`GW(vN4iBO_iVdMV;A;zj&W^GDNw%+WC1*)|nK60~JSqRul;)&iJIP|>%I zwb#k)ZvyMI?Rx_2C>^;U%IuQ@>*%`KKLyrN+U%=}+7GqR;GXvVQ0u@Jc^RB3j}2@u ze4pk1a#(hZhDvrk0+sC843+F~BSR!RMnfe#9)ZHwS(~Bob(R~RO@nrZ_Jew%{h^7_ zZ0J=`cs#2s6dup&35CbAdPCuHv+oHVezc8yUSw=H*!!M_*!!MtviCiWh}idZgE@~e znX~TO_dV4`?0ZUpSGCV1S|3zwT}Qs91RDbt!?#{!fEx@Hq;2)s!-Fp7G3^U>g3`WV zJe2kY6QHy&m;$AJ!7fnR7wiG0eZfyd`$E&9pM`z~Islppy%w4S{Ty^KbSU)m&>NuF zLvMnPw5moU1D4W1lWYv6efip-uYLO3udlr%+Eb$VJ7PwH_4J?Wt@S{4)|jf!8dKF- zW2!o9OjT!%sp_mTRh>1asK=fC z4eI7V!3K46rK@;CLA_%M_0T((LiLU(p?b$tP`zUXRPR^~)jQTe^^SE=y<=o`9;{Q&5$AT0S&S zWxmQtV=tD<=m%99{h=xY9F?y!z)|@s100pFGQd&!DgzwloF$lL`i(tarr)4?ZWdI} zJpk2nV5%%VH@E#PL7!>6y*FheRQG|ee7X;O6?vARv(Kxe`Q_~M`s`ns55A2*-s96`OqIY;oL@~qAgH0NVPoFjOp{Ht>W zwZAIz9KlQRvT)p|Z5;P0gX2;vdO)?l@53$`_DLmF><82B<9zvJtR=E{>kL7~28Q=V zB~&~Ij?;O9PHbSE(l(CUCOw(H3Syftwc}N`J3cAOU5uWv1U*D{^8S?H$a!PYzLNXV z56wC7=!fPEKy*LtWx5~TPkWgZb zt7VOr4@~~HmFUdGdjovorcbDNjBya~8%RDCL>6H7L)VtwDM6fwh$p;2xDR z7K+X*ADAhl07@Uq2WHABgsKes$DEHr|Fo4cnQJOz8dPOWhpHaMP?a$Ys(L&CRT=c9 zbH;|s(3ydi$UNg)Vtf;A{3g5-#&3znw=@;}b{FG41Gy9ezkQs(CGfV1^^>0L_;5mo zXX6deM*48p+CCidbSZqO7%IiseHb6kOxuSeTo&!aiT2&7KbpX^(Y_nu-x$pU&Iy#y zXcl9jvj}G~20Dvyx81iTwjU=OtPzbbE)+wz&)!qCmU`ZX{(d=oisXxt4k?Tod1l@u z>YuowY*}VJ7*#kToxTJAJS{ti?aR@bdAIZY<@$2g@ecU`*TH}C1Foa(@-42T?eZ)pL712*z0sc6hF)dqWEDBKwrQ|)y3N-_#*W@ z$$PqS_QFflv->>H^6uX5!7A!|J?S3a9-Ji-+x+#PKCx`+Pyc<{8$W$=*@5*xTBfz3 z8EkeEf6fQ^=M=mA(pLhZ|GGCf^whTlp~Z>0p(*Y_>)naDIWxSup=K-4`d+Wxoc*bz zbB_1}t#8k=LN(d0&^vS7A>y+)Z5hJ1#)n&*elfz@^qUc}q1ygd>#KKKn_j!i+Voq0 z=TLWSbeo6Ag}f&pe&YqVm%Zv<&iC>L4_IE|hVr+rq8)s<<1BG@^2s*3iF*Jmp2A)! zOtg@?6xsDT5>Xe~8SpiPu;H}Irm@-IIcICHxE#lJ_-d*z$613^hiYP@8{rGJIp@UL zje5@v-k0X~46V4fX3e6TYSuXI-fO?7b~tVIxI%R!&TZ>#eDenaR=L_N9J%6n+|UZ@ z2>xlkm)}vIug#`zBU~Zz>34W<{ZZ{t3bcNm-v|6JyG#tSME$J(mAz#?^H}|S4Rct& z<~XSML~VWZO)K;kZPj!8xG%EJs>4qXpB*AOruW@SnzIH&)gJ52tEn+Ly7q6>;Sf4T zC{FgZ*yhKutI0OJcKw27lF!B#3=eC6fw2XnLuP63ICixb)|FD$&YAp=O=i7q3*H&F z1;lRZs#HPxXA9mTP!&7|$~kZJjh@e#&)9Tb_@ zsfQ?4oyR?y{Aw+$RsLM$SE2IQpVm9}C-!x=I&sWu^R|ey zOK)S2X|9LQ-HA0aS@uKqNrc_Ww9!lX$of_=ZEHSb;n7&LIxR`gbXq=NwNc3bikAHvB;w=biI>|ljdd?eO@&q5c>Joazp>YTq)_C zo8!o&sa*|9{^EomN zUxy>pBxB?sxE2{Uk@+6z6gXq#7&>7!`zah*7U>^|lx1bMEb~RlvI$OEjD_?|ohzny zS++dOr+-Rq`6aowt-lp=@PRpHjUJ$G9u&@`zsuM%IH!NXTV9nlH9T!y$apS$gQ9&q&;j#pF-x{ ziVX8JPSU9kU&9*eYS*h?aoKtmGDflCX_EO~EA3dqTjmS%-KTsNd zN$HcM3#ET$1X`c;qHm#x{YVr&Y?&xBa5+@#!~^J3Kl|yAmi_9d%a(oc)8)&WsLP}j zcniDwDP-eG#yFjN{s=ktNaE<6<;bhK=vLFZj?TG{^fR3Up$c@Xx*GzaT|;w2@1Rp{ zL8tl+I#tc^n9$|`vTH1IY8>*bz}i$hwo~Z0BRYqANS7KC7aD*r<>*n;sovrzUFsyd zRQEW(ffmP}1^DJHI!+6^R4eOi5xlvuGv6wpK0)fBy3B+=$8RSXAbM|BEpkIX&B${W zH~T#~ucpT5Xm48Gkj|m)$W7Hr`_6s~Z>!xFx|QDBfL_0kei`HP3|)wRHS>m=HP7Et zv!=#dv!-@PSpL-wcZYU)VnUkhdl|Pee~25p5SdcMuae((ev&^8!^3)%_OxXkJ6ouE zEZyk|+SA7${-&SWH3ZqBcFAAVQJWkYmMR&<`#QSkR~@wbP0Jb**>+)(uTmG)TY6Ra zyMUfyyJ>GtoHguUc)$3h0^LgTD2QBgFmR+i(*0`71@$a@j!eO?Cb=O!%gCq>^sEcX zCP&XIuw~O)D1J-ytdf!VL0fCIUyL@We8nnC&k~-LP8F6(^VdkW%;1`2Q!MFB;_A#^ zy9FiYT*E!*;M~|Yzn;!D^r35YpkFPK4tY8HmE@OXS3UKX9BYP3zf!tgeqDTz*x4;E zbSxLXJ~uwU7<_-R_y9W*qu4p*$gh2I;I*#pI+pY+`Ie(~E5#y8uiD4D;^ z_w*yq+FRyLF|y~x`hQm!%x6ygATe;} zTjZzp&dvF^o}+VSlmAn3Aarc(=$sS&z?qkkOV4`)tuJu>?P*r%H?v%!>NIQ9=6=W@ z^r&B;NBwqAOo+XzMkc+6OxlV}A~sY)d{>u-H=PKb&p4f^5!VU z_+9u}eQ~Xg^Mosf^PG2l!|sohlWW#g&8S&Jj7jL%)6Q?V^d#v>Z%+@)DB(PnF`M53 z^q!5-9R*hC7ZY8fKJc_}@}ku`qCQ<%=k8oMl04(&W*%NhsMMavrPA9Z9+C(@Eb+bpY3-aP-w$KInayVAYHuqb|b zFxS%gW;^z9bI$YuWXB!oKWfXHwyfD|EnW5|V=tSM(vGYdkBwLI;|b))50M}L&Nteh zMDA>)j0cb{RmdONh`xo5=)b7Tmq>rx+cWrc>hvJ#2fd#%vS#B?mo9q?IrB&KobEqc zZfr+&_^4ZBk}I^ml>tt_y#l|sjQa@wM;d#|_D14{(Nhj&3o|?Nq;Q6lH*U|rP zcF>NlF7%No9Z@=&^pbg$rTRStb>x9;J&L80T*$)?eVcql$bhwAA?Yd7Pl{O2oLDN! zg<|;7w)ZTV9Hobd2QMWHG{({;>KJd$@xHF=kz@F6_FI^Muop2GBXtnTix%oE88q3} zLmW9E{bMF&r189LTy6S?eXjDk`p0G2cZ~iaStGg7kvter{Ur~ySG|mPJGe*xAgM(4cDe}A|=Q2Co_pUZr+0OT>ZMUIZM|!h` zUKfiF=RvpgqsJwq$E9+=tw+qFeN!TA#2v{E*0J;CMm{!FuPrymBTIDFLbRMHvhBcm z5q9A5^xY&|P7J(0kg0S7?H&Y1naq4AE;w^1_FUl(C$BwN)!2t6GY(N-y-V0d&*sw? z**W2IDk+Dv{#xs^txVa3B|DVX^NyV*_X_PSYLg=igh4uz9eS^1hhh;WUmSnjh2+Ga zzWOqWL8PnIGtUO-;_8sC#-Mlp3Qh5J6y~pJIjC2+FWp&RnIVb)Ronow2`Agd$ z^cw!4H`pihPJet--F-Ql%WoFORPKdmW+89Bg-mhE(esk)J2|8L9p-}LUr`xtJ|)U< zd`gmc@+s*X5t0pu{}{H-wwxf2&z1+#b&af>gWvF=nNIq!I2Z0@t)uxsjtTAeW=BlotQ9&`97L?pNr1$s?irN$7j6Xim(J#V2WZ6AW%Jym7 zJS8_|_dJYlBAr6=VoG8lbJk6{nGb($bmoII#u!~=24lUD^;W)$NAOL27a8(XWXLnv z4cFf?Cg*+9dyyy2Um25GQ#vSf=gWgLe|PM2nZG@FU1n_nonyi0b2cv-nzQw(VL87H zJT+p=(i?JizItQM8-f2Au{qG^lrTkIb)T2I)31^5aLRA$pxnl&avQ6A@$H*lr<}4I zt9z5zug@vSZ^8A zrnl%FdPer!#)x-0@0i`vE~l<0EjhI%)s0>kmQ(*iJNIE@-*O3Am1W|LV%aCsE~^|J z#qnofiJsD6>nV~)gL4C!1Fz3Dx{8-}OaaSG#!kZ70+~C8yE1D$`u3y0Lb50Fx@2A> z|H-HHd)gtrMOa6AizB}zTa4_oN=C*r)?RnX7jN6}*s`rwd{8oL>J{2Pe3a+Np)BN( z^pbm2UX<)|be2lmr*9LTqJEN7XCA!xe(+1$R@ooC$A|x#?^^3D;;>D))AD?eFUWq7 za>YAIw_8AcWk=)P!Bs22zbx774mQSH z8+2{3(h1@8R1fKtaQcAKUBc8_BvL`J|-Jq&VMp4Pn??f#{JZLP+zN@eQq17*+VcGJEiJTOZuz)w{J3e>av3T zffCZxrGoSW{P*}O^3KXmVM-gO*5X*v4~kHr~3>#%=d ztR7>0luuya&O-XRf_;R(#MofsL!F4_OAL;pkMe4Q%Wg|J^`h1$*%j7;4@wy?#rx^3 z%0-Nqux%yx>wQ{-4-o5;$o&)G3Hf0;)1!4hzg7HH=05r-kMZ3{zZXGA(Oz9ERhqUc zT}c`}%Eaw#rQPtO@Pql|jU);}{o zA?QU07EtGf%nQ|{V&%eRWwg79|B7l#m-)F@e0(qUl+9H4tfwwT@aTH#ng`uJV*VP{ z#fjNl$2?PA7SJch`OcL3U@bC6_3%=UAmz(%qJC9<6eH>MtB>~;@V@c(yB6`TJo>R= zfptpnKsPhz?aAL8p2HWnUoNK2KGybP$_T2B@YISwoGmQPB~2f*d8#mh|20;XQ*8d= zn)VG|?)=Z<9MSMuF%*{u25@bG?y1l?=Ey?$nF3;M3&3R=MVY z_o8e)HZ@2kR zz1Zww&hWqDS6fJzk`|vGCLPINSgvdYGUmK#&&o0*14=I<1CkkM$$(_;y^svh{C_`c{;MBV z2hIOx>QQQB&4uTGA~NWs%zsBlo3Vn=0@$=1S=}PJ1D|=h#@cLT;$hk>d8KQT0jDNy z0?W9tUAWt97y0-k<@bnO?;Sa5)_XffBLRK=LhJpa*vnh*&H90CKY-5lJ~Av8(kZ$H1kF2sh)1ZHzr!4U)>pwHPD*D9@AZCX{Xi= zeGBhVc;kN77iZlqVLf)%-G!v5@}K08>Qh1f7d(?5#b5fKL`Q2(9=!HRtTE!RJyHCn zF>%(He0c4nt}iDhjAs0E!LnNnj_rsaifn!`e9ikM|2yJ`9Gf51*B#0KYvGAm^mADL z$Jp|}z2ECD!Ux@$kK%)F%twb0bj{%dr7y(?Z!xCPa=DtZj+VXPqHSd{+dtXp*}Iz@x`nM!@Hk)v6Ql;gHGf>N5*|Y zX=IzOk5L-gr1Y&ygCCT>Luv4X(uGRf=ga;B>EDC1HNI;Z-w(j1d)QZTpi^Lz+a0Lt z+&Qo*8~PdO!+gJBT5Mp`Y3M$v>J=%Q9^}5exlg>=$JZ{SmT`SP*ZU;3%c?$CTN|pl zzJlv2;{fujPq*;@v-SL^!s(?-_X($8Al(fciS%833snaK>AimuORNWbKDLtHia$X6 zJ)R}~8a{#FvDae(=~sIZC!1zfO(nghAMqggRY#NFjGy5*?D0q^%|5A37W5tVcwiSb z@ub2nr((KQ2)h)cYsf~?C&Sv{N6*#!+um)z*LkP&KIdJ|d-RTjl+nEpGPaL({2;nq z(6W-N@0l~4c+B8rSD&{&Y`GR^gzcj2D}H`%=giGl^4#1`nLoRn`{%}H{`g||&5g+v zK0B|zbKRNpC%0Xn>&gr&KiA6qF0|CFVSclQVK3XT`#^XN6TdZ));D7^w1&ZJ)#R^1 zpBAsx+W%d*f;@b-g8Yzb((qXY>0+LHVvkiml6QQ7F8D+A#6My`>YC&kwt;!NggL4F zIBMlllynMw8^pfz3wYJ=K67OYbH(sJJX^zDF}%-Q5%0^#-{&f8LyP^)7W;mD z-savK?%T|qs%Ab3o8k!Hbqvh=3U-eIVxh8lJ~@#-O|*`Cz;2Jj+b8asHGDQP8me)y&G@Pp1STLeE;;JeAEPo?Mf>1=J7j1Qv-om~Ag zgY-K7SD%+o;Trv}Yto&+felW1pS@;RXlyA@YrG?8C4)oW_+S>}cMs)DAA4-&W6LC; z^}S=+2^>5N=GXVpHw}cQL3M8#OXdgQ^VOqyAGF*yT3KhamiX1bg&6-c^iDmk#v}`|Oe| ze4l0Ta!9rU+MsNYwwJ}tb?3a1yd#ya*oXRYo2sZ%c&gTI&1R?+g@Fnx4Nn0{}5m_EKTOqbS# zsbmQI8_arIB&>dg>uIEp7rmZ3x?bdZ>gart>#43e>#5StdaAUuo+{n8o_>V&?1Jkh zxc(W&+N_tK;Cik7t!?Wmyxs{p`s}RO@rjLWS6mzYm&FB(-x)e zGM-V|F5^+9RR-(zWA=0VxuzHkJ@=~8_H(P1)^n`gbM1S7&$ZX=dtXyp_ae8yX8->l z*S6|^SKVYr@FJzt8dg&Qj??d()FWLQ^Ei-G8na+AY2%Auamvw;u zB^TF{Hg?rqt9%{l$aQBew!w4O`||VG`@8M+9=mbddY@9W$gKA%D;Kq`_qyh+_ewkK zy|06{do%02crbysX?-tdzF$Zu(7ox5neMklFS6bbro2n7_ht?;{?a?l9D+ae4l{@J z4(8AS#zFNlbHcvY%n98KFKOSa{x>|X|KTNl2SPf4Y!r^ZWyXc|e>Zj=NAId*-Ig9u zA)AoROZDO})_q-*9`K*G9*}SA0n!1y=m2XOZ>{|U81LH{TZd<*D>iDKTv=-L0NJ)L zr3WM!8&^UwfoB{&U~R-Yuk|~X{?z*4VqZ7oKzkkiz_0&pUEp?G7tnuN-=qteK6UPw zY@+|7^#S={qV<7NqYn^cLz~0;K)ZeAXyl*@2Q~mV6%MYLM-mPij zGtFAiJtndCE1p2-0HmV}>AnPXCAGbb|9t2LD#M9EJoDhi^??h;7-exz$c5~5;Ow2- zSH_w>9~x)*=PL2zJx+U_^Cmr(ktc(ZRrW^op`-ZhHlhP2qYKZ2A9Zc8(&+0-PbE(20b5^xn|_%}-%57> zn(-Y?TJry`sJSVlx>M=U&a>Uq#H{1K0!K=leNl;eF}rKWFaDMqj56MqmFC zYvyn6nLYe^U(aA#OrN(dRHk@9`uchQN9gNq|JnLFdsB_R9%+M|r@4wQE*qr!q=|H- z4bn>-v#_Az`6Bgn+5bAy({JDcEB3tnr1kWU;(RVN7n|+5 z*pZFY%*74o&c&Jd)HD~PZKck9oXvc^99!w-%)wOjUCqJNm9m|N=b)}Rb5Ln#4r&c; z<6q`q2- zle~v>G!9mC_VaXRguGqTi{5Q_;3C3_H)<$$E z4{^TMv$vdyWB$nIy*(ZORKOzM%TvLg(&G(Yt8g6WF&H_I?WhaqNdBWIIjHk5{t5m$ z1dhHF`p@`T4-r@QCDQ+b59Sd2n(re0KKlm`VUJkni3<+m9TQl8pNHQk@U5ulIU8tk z43s&sI1b8QtoxK_4&A3bX9G=8p0#p<@|+DcQ28{d^8Ku;B<1@-lx(cd{?>$NZip9_+>kiXRAlgSTQI4bP4p&eE)k7KEn56;wyG+ne@^?;^=lzuYn7!m%vz@ z*_jY*?9P5%`cJs{AaRH%@OdxAmiaz5&BA!ku=(_j(%vpto4%HxGoHTo#gu%psIPCi zKjmukeewkKG>z+hjOS6t^?6Sc{aW(HWOr=ctcR1ACnw++CU)`uKRon?i?K~xeao42 z+SQBl-RMNC=zBNkGOeQTiGQhDMc=Pdp1zkYa}|C6i1PHkFwZLb{%+;zdk_3lDSext z{Or$`g!>7fX8F;T#BcKb)ln|1c``D%uZ#UZ*jXmAkGcSza>bm!r!DAS_IB*!Y_a#? ziCe2Dz4TT3>Q;E-t9D24|1m55ad`qeIPyz67%h)Z<^sTA2_0g z{+LN0RMH=n@Zq(jx5MlAKAgBbF(EOy9bUhW{zw+D|M-?O$Ef>N^hXNq!-iayLi;w* zAH)S!ZJ-HIsHdM?JAquiwo));gF(J2c0VXou#Q+VL!WFP_#u zaoJ!?*)!pc)j--%M4iu~yC{BW#hihs@B8!F|J3(Bk8!5-ar%D`XK%YU_jyS^DA#vs z19Rl4i+Xa~?zO;&Um`8JcbCT#lsssG$Dgxh-bDC&J>xMEnYW(tSb}U= z51%bTX036a^nU}0QGVd6CwH}#wtTPmu zw^Mm!-cIGOK<0_Rwr$XiH8{Iv<_syQZy`Sa3bkApcyT}Va_2t@>m(zPc z@^W9UTRHQPlcmVmeKEb6KZ(c*f5j~1#31I+Ec$VfEhna0?K1Usc<&H0Rk1?fL#9f$ zUgP%+dycuF^nfm@;q?o@j*ofeN2Zp3*1tR_B{g_0v8&>jS@216=h(XStZjoy4@RDr zKb*3Ba6(FOF!FRh^QwD?n?aa;RJc+@9q>nN`7eAb|d|*OS@F??h34NWynx(#$ zoKC0jR?^qlE2>t~*RsK}N5Fi)dL@0WzSNrZEOJ_Xi2tj6`xMEi6w`+R`f!RPr>T$R zbYJ@L|7Y)9;G?Rp{eR9(9+MXY2@nwTASeU~f-kgHGYNO`j1Dk{CUUVH6+#M)*-!{=l5PUYh)Xqc&C_n=`7w3FER&Deqqpy6m} zcmXsV4GmMZcz2Ou2V}jE*a4y6dk&u+z*pw8bI|NA=rjkKEk(~{aP5U=b03a-HY1Ms zHfZ)sX!Z#-5Sob%7zT|*=dj*Q=^UYv*Z`&fq0gKw=)|5A%8o&w@rDgZ#xEi^pc^sF z!0He!g$ZgX>Lu zLJZNmIeK@uiJ>#lJJ&fPd^(7>u(s;&f}d{$AK!$X5ezL@dZK}$f57!E*plBgB^3Nxd7y2}4424?PWU}o&e z9_)o+XoZ=vC!fgw*poldCxW5>ApaZoMEI#K{JhM-&4+c~3q%XbDxC`=g zA@T!0oG17&z8m>(p?5d}U1^VC<7Dzmr_hdH>e1+yu=*$P znd3Wes01U+S~dLS4LMv(93*8g3;f0+og6$;X9E*=x9sg9nAkyIRZ>sZ=vBp1*D_=5 zYT{d|&zLN!N0h2ZCUtOAhr9GB=!35tAClagi;jy0(=Mfbv9xI^?c2k5nMXgWo>Rx> zwJOKMZa5h z&6*U}Ay{-x96HT9UKd>>nDs`%tR{v9y9$OCeIwZQ2V9H3L1&5~*%L925#XML2CkzG9X1Vrj9{rabl4DdRz|^p{Q?R9j>Ie&`Gt^Y>#cB4hr3&|Aj*{h+su`IS$A{!=~yLwBX3zeRruR$Wr& z!&qv}U+&Xsf>Rek=Sj$w&|8i9W$bU!?Iw;~kM7!s?pjYh_My8(x36LU_)WS`w~vJ0 zcH(dg!DTPRvUhhUjxP$qW%=kuS+gLy7fjzm&0wcz2YbXub<>qz9BkmO!HhNM%+Q|| zjJh*CwknqS#A0Y5^frAjir(~5yvPN*BwpkKT@sJ3>;`ScpAav+9H1-bFm~Qk=+l>r z(8Hfv^dF6d`vf1s_{AIb5Q)yeH$OnapcyRenwZ|V@nJVxF(L)SNsWzI#7dGOor zf?rCo19w9w@jZ=X?WKtuYgwzg54qnK8CFF;g({gltRv=F(FwoAH0;okIAvthFzyD;{jP)C}9#xN%Z~2fts_HPfTFMweS@*(&!*zUG@Zj)p&e5Es zICtS3$N3!2-8lE;+=Fu;&b>J&aXyc8KhCL~FW`JW=Rus)IS=KW!TBQ2!#Q8Z`BKi` z<9r3@t2tlA*bJR4Iokvel6%321Hpa&VRO_=yl^J*wmZeAO8oFx{{IGlYiSyLi(#k# zl$hY|o0RbELVKV3IxxjEx7eTklcSIO^BWVMz17jzE%*CzzYjF{;l78~-)lS9eI@O^ zT0htQxm{a6oHnnE)Ry1Ecg5eE%=B(!ETx=kWK!ZEchGl2Yw1U|Un267itnMYn>L~n z`j0K=yV6(a?O49UzR6X4cxN}Ze-F4z_#qp6-*Bt8HwEmug??_Jzqdv@sz%2-s$j^3 zS?J0%8@>Z{c3K2y2j^(cv7Ebbj^}(1=LF6@IVW=N!?`c#BQ@XZ5f>n}%*)&z>Q&?p5v! z{kzky`>{o{v;?=zuU|$V{)74!QQy_n_dfc50rumsdUqfF9~^L`N$@Fr@ZZo-cz9XE zLr?D8`_Pk82D;x{6rbfyqa1WL*Sc5s=3#81=-;Gu_tRf!C369aM z8{^m4u?kmys>gyS9SYxyzWxw?5PU28`YwyU4lKXgz_$)^)kvKg=PKPQvLO0m3~f<% zoq9@r_fS{y>&nbv3S#s_WP+*vWsjN$miIukFTA2URkjse52gqKc4 zmumRQ#D5s5OE)9S?AtjatCu$7D~m2=pE@PeyLtXGdUP#%bPuxfsi8}S&rY2lC1xEN z++*lbC%#S5qp#4n9nquGucjV--J(aOUpuNtn+!d=89Xsx11mE&NwamXFJ7>+=;%9H zqbuuBL`NSuP91IbYoLynel2Wh*3tFgN~NR4uNm&BdJX^DIQX=L_K8n(FMTU~EBzV| zA7rD;`k~XZ(b@gbY1!!Pe(3CMbap?B&KCcs>=QA3lHkqQdcm7ETeIHI$9FA$3BlR< z*b4DW;7eJ@9O<&aZ!GR)+?o1>4`-WpAj`nmG0;NcMrbhyTFiqMIm|c8yw~rch1dt- z^pL&4XbJ)Y2M zr+jZI_(H}N9kMlcJ9(8O$X^u6+D`{-K%-a#98F&37>{9VJTdGeVdCq#j3FPx-dqpO z2I-Ef$2kw>EPLf#&$`Dta>U;Y{Vo85To2aTflYN{QwQ+QW^B|3-rqIf<`LU;J#!Lu zY4W>Bcis1OkHmJGcwFKlitx9H4VJO6;PNlQYdbF_=fA{Ckmqa+@dJW8c3#Nd9?&=u zxw!*;F7t(R^r)(xgSEzk7iW1M=elkHer55!kqiF&*z_azbF0K3^BiNL17LVx>_m9w zpQy_q>X8Gs8bW{NfUAbkUpe5wA@F?;IB*Djp92mY0+!DK2Mz(t=YRu;faP<*fkVLZ zIVqe|Irry$KIeg)!SXpnID_SLhH(bV=Ul=WET3~ZXRv(Em7EPMFZ1()<8MjRD>m99 z+=A_hmsrR8jQVPHMJfB0-?M7LGrzPaNEDelyQw*>`jvI;=l9lQ}2GNTKMjTM8Zg-F5*n%$D3O_F*&SM)qBRqL6`E4eUAD|E4$wRi^ z(6r@w=*l-3L+4R{a)U3=qwa4|_dNE~dISGY9{Xdxf&V9u{juJ_|C7i5Sa0C}5r5AZ zbg$UF)J5WFwz(xY8F4M^WE`;qeYFrh`kNKMd}aZ1z7Q<@)GFWgU}Qc~kF8q9v5;8S zrS$Ql2QE2M!1(}u@mJ_&Zo-eE0~HMqT0-wH2o{OX}6-{M$c;~8;BjpW_YV@kdD?yHuj#$++Y+PC%ILHC^i=}=Qi@GDuzJBW^8Z-+&=g<8YhnE6FZ7kr&a|<0=y#FDSBzX&(=21I zo%G2ZWIUsJ-1Vu9yCkMQa13SITkAMXGwkXs@Q67M`-p29Yc=mDFcv#D277ircI`Us z+qLBM9qnnd+f}}A&h6Sg90Kjzy%9E#*u5>jJ#6TD!xd2QJ{X>WV>PHf#Sa)_F? z&P_W_JGUD1G~2n|*twxPdKnvd#NMT9H@5DJAhynhts9E1b7AX- zV(VPkx}n%Q7q)IFw$6pE8;Y%SVe5us>s;8nq1ZYXwr(i4&V{WTimh{D>xN?MT-dsy z*g6-sZYZ|Sg{>Qkt#e`PhGOen*t((EIv2KXXa{Va4masNY^dmEbmsbr=vJNa{G05P-l@FAcwTJyQh4c2 zY{5Zv^CoyH4PNTa|4(D96P2w_G;B5bxT((%l&$XF!d53@yDyJQboWxWy0_m}FT+-g z?*EMAShm{K{hP#AOa7?iwAC*LvDMRdH;+BVPanuH$7`#FUrvdgh9832X|d6<=w0!< z3U*nEUReNk=}cZIhZh7Nd;~8DF8Jx$f&<`%6$j>F5q8EPC(f0^TOiLp8rV{KWJ=^JCWhc#q>NE;Z$4i*UmlZ1m! zA{b*w`g3i`+5>a#kjnK-98@uJ3zgUt$@|;I*k?p&q;&SJn!-$X6R#j^A7$*2%~(w4 zS<}G`a~bO-<5L<8z2m{h5(6&#(~56<_)PGrW~-3cbs1Z1VhkO}dkeMrdf5woz@2v; zzLHN?Uf2KddUu=_$Iw>5=+ZF4a85jH>!C9&h)kI`iJ3p-xXt ztkbh0&*}Lo?X3*cJa3Rk?3GxXXBq93^?CRtD%Nn^$F;0oh_{vgv4FhaQbrBqCG{=7 zS4iG6^IOC$HOh0zvo?tP^*sNSgSo=S$|0KPRrXn}ppA9#f4Zz6!Y0Z5go?i*7ym9W zbR~Tr!61yP%=*aTL2&6N=fuBU>ic?i&(bS3kC(mK&EP-Cx@o{cnyd`VNjXY-4sHcN(MKM;w zNA#)qh!h{EFt<%x;o)_e@Nouxvru!?XHl+vCz)^TpbcXlxcxA5evQp||5{{fG_rLK zGByfX8wuXe2JhPpyx%byWNuxprR|6eiY*Eze;LU>`5OyH>WKVh@Sez@&Ukh}wlhQc z_yByeH8G3puI__5PS57}?2OCd^Ij92o=Es#j~SWKK+MKDt5T9u`dySX2wH7V9G2AJ z7?3o4P)bsK^zfv3{@?BxmK2|zs^SCU`wU3x7ClTo-|iTm6qz_cJ=@DWFFCc0W&hMX zZ^5%KQSPT4)hWYNxl0l+NP3A_^6kWwuc7=1{$Jv_AW3pgN!-Dj#PgHh<9~U+gy-S> zFVB9*y(Nj^8LuO2Qz-9IjzuXKs4|O)S)W1-yTqx_rfh2ryOcRQF)is4V%p_-5zp=X zFY)b@x%UEltiFWavD#p1e=2eDX^zySB7T!i**5-9CGK6~-=$26jUUPX@;sI2*4X&P zj;saSjpN=g=FQ$UMta`R{>#JpmeK2+Qoi)LAHa-ufvmftb z=xk#RnFF0IF$X&goh>@zSo&IW-HX0n$()qEn=>O0y}T6Nxj2fw{yJ4&v<$HWol7sf ziszBof4sLeX%KN@Ow0C5K@?v7rT*4-)iB`46!RV+AZ~TrilFdyA?sQ4h>qAh;v>|$w_rA}7~6Him~$=<90srWAcGxNA^^uilmH6Ck)t?hB4NQH)t_U`#&- zlJbZF2!q(7D#?Gm|_xy?Y?Zx2s)QL-QBE;I0C zIW((>w+aj_Ddo;RF&z2o@!-f^rw~VuP3gE?9ScXskxNx%>O^tmh>cFq0OaUm z8b+z#Xnb7aM=ZmCDD(W{JJc)~?mh6f6?=-Td%+~~t<`+TEO$8j%9y^uvO~Ucsuibw zjQ?DIBkxPj=*8haOy*|XpvD)B7w?3oLh~W`z3Oh&=B0pxWR1OyIhH_UnNv^K*?R|{ z*g}~P220AAV=?u~FnoMo1Cu2im@HLaaaF%Nc3dgnl{jSimXuil=8|{H`7M68d4iXu zo$7boOW}8mw6d#|-_A+@vNq6~XI{#=gZf6}w*#}OJ$O|;jd4*ndt8m7p7}w1cOQef z#zNay7|S&8qqkD+IbiZ-%z3T(a+>fZ{CF4qSPZSi2d;2a5Psa%$G}a@IR)p(SU*2{ z83&pEx8Y;N|JK#Lk9ux*ivKOny-izj)s}?pj7#9JS@7pC;Lq~tkr@}`d&}b}=y!?o zX$|jlQPRcu-R{Hxc0=^VN#`V9lr%j0qNE!P|Jx0Hh9^zne|bKf=iL&At7jK+@3rf+ zjMu?Uo$J#J&-fj(BYro*V=GbysCv%EKldnpv^Ds_#E&QbI`P3t-gvc+2;bcA_+OsS z=DGOhRfyguJA>BI;Wu4r*lpv#>v>)tivvV)_RKeJ#cIkY^*sB5F9MgmK7{`* zbD%->*NRd1pV%G6R#O%G`!gr&VEMqg&TQFbY1xV`3D|z$h!_J<7Yl(x%_h zCMn}X;)bODi@^@Bz}GdMjadvok{5rp@{b`aGB0VBCo5)IWMCGl^B{g};trt!^R$g3 z3wNFd?DAnpunYd0K>j^$>~ikOVV7a_>&5h|*w1Z!vof~AFM?rY{&0J&vr#b2slYC; zA{Vd2Lo#nD7)I+iTw#qh%rSlnhKL9A$Xuk%Im#TO>|G;RV>@$+d--3Uui?4OEvkKO zxc9rh;TcPivCj2ZoZ7j=pt9e_c%&`tag~q($(ZL$qrIY^`cs!{jQaHF8|IwwH39XJx!uvs?Mi)8sgHGTx179Z z&r>IKTE+{s)6{i?$fYm+#eHuQx-P4eu}ud0O7P7o!8Yi=Q-N({KF_L)t=LABHMHnk znXeOUBl`GM;G6dJ`q_$cWUek4#*wv3>~W>?3=QTic+rFZC7NRzN9_KGpPWm)mn|&J zmBt}!^7ar%zn5|QYX-)VxkrgPmV9f;x?W$6ZV-%P$DfkM9!pa=M(g43(d59o#u4tm zmUSz?U`^93huvMOMbtk(?Aotiz>j9<+gr(Z6~S7@cOP@+JoCcqaA^@V%HkWsa~f+nR@n15 z2BX^`gKk5Pn{Jw-oA|)a4BhlrbW1x$bgM#6Goh*JZ+e`5sf3r9PpY_!^Dfz6p0)9N zSi7-mZfQm~eN{wXWnZ3_^ySLrcUH3&(|>Ou_t^Vr5Bh8^Yp}fdcnaw&H6PCZ(^yw0 zJXy$mthMjjuY=ynb6LwGFQ3SME-@4z1-*_7M>MQ^okqhF?fa0fTWpFooD0 z&ZeGFekAnxAdCH*hJFgiwupYo*2trWZYe|Gh`pQ1Z`E1?Y)&`c*@^zzg`Iry@wRQ* zFE$Kw{qFIFIplF}ki0ZISxdZ&-(+ILH1yRR)_%unaaAtzW4+tMneixee+fOf=JEm9 zF7bh4zu<>8-L#B3hVENTIg(RF?9*86u(UIVv$Q3>-L{a&3!5!AsPo#Q``eOnf^G5l z2W=UQpHA9TPn#qzIGRInO3pBHVbeDftN#bqP)Z(InFEkGWcrGHoPtlfkdu)5;DZ`} zevHcNX6xe27zHh-7+9c)>uj+#@LL$`ADB0&$frL(q(AnO%T@A~vOcrY_RG?Y{=c!{ z1#m>zs^oXhf6{V~z6rBwf3}w2pLeX^AlKZHWV0h}X-76Y(3aCCo5+&LrIHO$e|&dR-Dm7 zHhD*6)B24wDw{#u+X2};1?^?c!n*pwdU0Kvt+=f(lKrgX79ITxI(i>t z#oPP7;JUu=^R9!~!a;o(yJGq-avcEUJFvGpV}a{ZQ<7w^=uPzZTSeO5L$B58H>pnFM=bIL(?-i%35;Pe>dpZ!5--U zw!L3k*xW|@K)R@F$~mYTDevs<5TQX!!GRY zPUs>rkulIh?0;Ff(>pUR%lk{_*Wc|?>Rof5)2kg`RFcY37N&WfoD0>p?k(fISY6w^ zD>&DvYrA(gYpECuVfUilf8{%OVOz3UA5?H8@-nmhU}etcJ;4x#CM`E@zn2o5LLY%(&q7lrpomXt%j0=q_l9ulzFkZF4=!mlhKHP=|e$ zzKhE8eX|O@7Hf$~Am*S#BPKv6mcaU3hb0~%mi7N-lrQheJ7r!;;q~7?`0)DQf+Kgn=kV;im^JyVy>BeCvkuATNsxYJ?zw`) z&M|=Fy;#k&E=Bh=jCD2+yHZQ+8y&UN?G`nor=Jq``a(G|7PtE zwZ@mDySZy`)*9>C%l*joQJ$Bh`?wFh5ar3BUhDg6>1#*n>HoOImj3DO_Vi5qx%DZ1 zwY=3=hNaiqvzBj62~YoLLPUCPlXLm|mut&wBeRyjetPO9n6`$0<{-sEw7d5-{;Kpy6&8R;cWbRs0d?8n;1h2*WwNY(Oc@mF*om;ZM9&yy!iEzf_uk^cvMlK=LOUi$L% zhaR5)U$!Xso#gGb;Gri8PFaKcm!nglscP`)B zojNdvsr!Z2II#!M$dOssL|e&^Sr@44ZFeqb4rY06J!g3y*oS+Z%O8{H`#8(8pS({F0x9!-^5O`eyiZ;nDdT9Ht!GQEcG73S?WEEv()<% z&QfoY52?4vMrpY~rT|I_~K_+MmNWLtDuU3Z_XM?&Y5 zt=CuIQU*CTUPfQUkjE&R9JEp7kKMR2@-p!S+&)}ex8tR;9+B~MuJ6PykKw!vxt~VP zwrN~%&A%2M?QGmX(utmSqNjc9Mc>>Id0G5b&h(N?o#{Cl&h$a&JJUsGQ~%Ds80hG= zm+0vq4Y8%Kx<{+{5FY)F)P;4)FJntZ=hh9*^6VfV?Pt`Zp8sF&663xe+fa$VUeqPp z9gFRjwaa7l?p68Z>Xu@yU>A`jZpto2@U!!Lfs`SyRIpUIJ0(RDcbKhoLQ7khK$ z?(UvA%Kb-QTl(sO_Vfd+>8^vmVj~toi&*l=d$CuNzj}|@b8Lm|hf&X1`hN6f1G@5S zuIs>fH=-{GK9D@Yfxg@g4!Rf{yd9l6$G}5V^~Ap9RXw1ke}0#qPCkSB6foC6C)(2g z`&N5;!YD2MCrMiR?DMtskps2#B-*lpd++ww)o&%|kom5>C-2C&sp|W%om>z?GGo9!e6;p0AHrWZpzUPSG!U>fcKDu$ zm+-q4v^kY`t?$j2-))%WD%vpFmAb!(GVXa;ODRm!mKG+-Z_2^k;bG3NKE|e}&hY&< z^`9-@$pSaa{8kFjr5>6*=eM#SbT+s$WyY!&_lv2sS^r(|V=B+3PS$cy=AP7B{9;n? zIB9bm<;y!}y{qk0pOgA)?1hXyXm~|*0%sYwq=Y$(wG{tf=tkk4xd}er87F6HGkSVE-;#dmPb{$XfBA;VB`Hqo zPM$u-$LtvsRw-?kJ{(7W=I|q<6tAryPEzvt^^0|S`zK_1FX)@?9X4vD_oAFp-b?bX z@m_Z8Xz%5duSIo-RhWDzn8aaKO7I zvo}m(9(QU<{Kk7rZrFHV$%2hPDcQL3XC;ZB{k$Z5|76#i{r9+TIC#HntzY;=q8z+!auZH%$jK#%QO zgj@Uq#P(NgaFcA=c*0Nm5Xc@BU|l#&unuwxfMn4Roj!vdvCA2kn4kKn&8`b z;o+t<-E-&#7%=zisCGCAin%p$SbJD zKb*7^pUkd^tixV#((mzOlswesS;_rbcxTCxwfaMUoWwa$P93k!eP*t$XUuMU`XH~~ zCB_@~Q6KL^&iM;$aaGzog~)K6TgJ`Uo_||NUZYs_if7iXhj&cUJ{^OcjzLcI^{9Fs zT2y22kmrh{(?=rX*~oepI{jU>CScH%p$lr+tE#%k;kP8G;0|a|kGxki=BvSeeF=66 z$!kbnMHgFjMtkzr-t*3o*E&OXMdqS(tx<4`*h`VWWOUkCu(H^e7|voJ|7l(W|h^-SxJWnDVrMYx2aLsRI&Zfp_3|$v{ywRO~9s-FYVq!^4U})n>Bpp&}#6nlplBu!*&cO7V;toG1{>! zuDT>I8+$s^8)z>tpFGO@y}PdQ2HH(*qzfDAy7Kox7@E!fiO9AcBPpDygD z>$m*>XO1s8nmA0`iCuNw^4WCPZ$Ep;^~GnUV90yGhCjv@-w!_gDd{nNwsrP~2V6@w zJm^}pVTNn_hKF6kV>j*hcwgeH_|t&uRM9=Lq6T*oVD}4m0f4VBNE0 zGrTrf^K1df`RrAc#a=m-t#!&3 z*pqbZNhV_ki7oKik}Pb=DrH0dqiE=Y>iENNNe)(#$sNjmu+K95Df>otgg56JygAO` z&BYep)WCOvd};n4;?J|2KLuL}pH*At8AKO(89Q#5y`dPBNSw+*&Kmc$u*8z=zWqwF zFE32W;yTr-mn;-HMm{%jUJ2IJ8DnNrMm6K0_%KIFd_-c&aIo}n&XeYB%Gts(>Z5yH zlRs*3UGvest|?rwbkGa2}BX8|JuKU+ExNcl~uj?mVzrnKtt(>(d<&ErN(@+DJ z(!r5)!MKwcJH9Z(nKR01E3tnx$(8oeWY-Hb7Usx%smKVpaggZ#+2C}+>So=Fu{D!s zyxYcaAI<5k`>L+Q!%AHEmN`KZMH!{u>o((_ZRAXrIL76($dQJ&jd1V3f zjbeAiCJaEHQw+OQz&hq4EzGU#7SB=)8|f?8=LaelUq!(C$NRsOFES}(yIr(D@b~8T z#b39hlsuoald-!6SW{lff+QH>Bv4LPV85hwH%CA-J5IYu*9cz0mB=J&Pz*|a& z47wH?bQSsr(zPrIT}>K)m*{KaYm;~79Efk*;@x^M#56F(G%&;~;9QfJ#dkD2(fRcX z`qbp-`wV=WMNamnQoARr=i#^N!6rMwGHvm9N3caPSXuEum_qSCveMj7BNhLbq3g{) zv7U#`appY0ky=LVJjX>`KWO=1Tl;`3b?whxHjXr|FX7n?%kxodA9PJwJKZ&!V=C8| z@$6xavb8f@i`PEnn#u73*Jk;GdBJSTXH<|fv+>=v54*OmEp@Ho*v_@$b^IKHT~ooc z+2GhBg=q_2SsOMhJS+anaEJ3N*5>93K6=2AKatgLqa7vMIseR0>GRM0)EoYpuXC0% zWh`?=8}chL9UUp-biQNKK=i>}YU3oGFbk%jU$WFf^O3nBjR7=Lwy2RrMX zli`2t(b2^mGUr*#yiYALWVNhw6W!i4Qd2hY1oZo>9nkOdM6ZVQ`-$rJ8uUB!X6W_~ z>GxaE@96oH((jfsg7WSuc&EesZTWU*>G%ICJGJ`Vl^@dYCx&lA{4aBOEihag{NEA% zUYGXg;{Cb!q^C3AI}2ao$ZqSv=oTG*-eJKl$>4*rG#5oJY%2rXHbt+>^MHA{)%{=lU=Jn;h*~}3SxNb7J)$d3u z<9=%5RMx9A$I3CA>)DRU%$ZM4s&N)1rBPOqdcR1$uQBehzF)-qMUEogFH-ND-zy+C zFrT^DmFxpCnR)1i)U$x=VWXVPJ7+QP?DLcJenp_KJkUp;c<|nm9~``|mV$5eeQArOJ&tgPEB=eyONMjYz;PeP0*+-I8#xYf zBz|#E$rWGRTXM@6KQ5X6#eF4Duf5;3eCH zwe?iCWd&pB)7cj0z8g}>7g~g`zl?EuAfGt7XQgXWpjyU5;m)Jt$6dr&G9_hF z(#QC0vJ(rFoDTen90inBGs{*id$Y06KKjx% z{G-jT3zja-xrgI;-xqvdgDzCDZ}=SgF{e_*5ytoWe^lfO=bPvAjsDE7OyN-Pqesm7 zm1cjP;aA2l6}*g@lrcN!R6kdBw8R{>_r1V2N$l*luSZ?|eAkPvYWTz0%s=n_NyYfF5gEwUUOycj<{29KT+bL77 zC7xgUUHUr9($^Y&?VmHV%-f9m{GO8AKEIdPwr#F!IN!$qOYkkeYs{PFfgx@KLoC?1 z&2=N^+xUOk#@k#Q)ZE$3Mrqqh!$&WE$4T_9ecr14^PgUoUv&ADr2brob3J?Dl%!eI ztH|hM@jIG*Y@Ig{zhnDl%w{gFz3+71JJQBNqrb)P*pY85x&*Y*I=?ApWPe^*lFj_$ zq#5La!AGk}p9Rb?3kl$JwDrMPJ8t z7Cdr0XBo4+PrSRVWh({G6lo6C$I?F%b9r5ucFFDFna6pqRg@O%j!ER;DoRR4Zf0|> zsqx3;B)vSdn6lMxMQ(=jTl05=`2ARZpFOvE0V7$x#IgpNh2a1 zt_S#kcHjObv*8h?kDd0CY`zssY{f#Iu`GR%F?Ul=E=NjvdE+0}7Q6PWEpdIy5&ucC z>ko4m<_zcA1P&+9UniDg-`XF$HgF_-aL}M!`eZz(efA$NH+nQ`vX>?(5+p#dG*se6c7e%7eS`X$TK` zISUU8{e$t~CU{Wy^PtR^Pkn9&#~zNo91@3U@{+{qjD=5xpR(YUJ3cOUJ^t}-*PS1i zxSrtp-?)DOIAWjM-b$>)7>`-?c}t z8glTTZeA5VDZHA|L4DcGs~yvqK3zE}=`wWX&(M{kFN@HRKAkx!Y35m?D}`5`C!i~j zr+sI$o-8~AdNRY%le-MwYfn$sFrVHLJ<_(GEJ0814x%UTM^EnWu%47LOSArIsV4(< zVEb&fPU*eKO;SL&Rl#^L?#x zEdHYw_0q3t-0@kqr|#h%&rawTl^%vhgOKYe{Y zwBJSEs8=qHd3IMX_NVU_^ChhH`2rFH4zF1JNjWlEkyX!M6!KRCREJ?&o_=k!#DYj5m^wmO*IC1tGhU{A()_P1O^u4;Mj+Su&Fv$_ApEUjWfn6t4|i>|jb2av{Z zE})EIl-Zwe2rUnhGio>aiKKl}k1fd1HhZ^9*^}G)e|&5g_aDf0w5V*wl}p$&=odMf z=U42dR>pqjU2Vi;&n&w#yo=W8jqB+iK9e;DvAx`xf?4pjjSq8r1|4jABb>HJa%3;k zDsn%1*dz4(RgUiYhZgRBHA3^;!?~+V^GMyd)28vWOFc_yQ#SuM(S{<%_zQ`(*+PD; zg_JFGEiL%PlwV{+a*A$77YHB7{LRtnE_BH_bV?4oWh^>o4EcesCr0VIVswv+BX6>Y zp@X8(LD4*q;k{VikK=dfpXpu6Gt;bt_K6NcUcT(wtb?l2NghN0cE=us}qGnmAEzNqUmtTL>(6&RAwVyJHiBh^qGs>&E z`5N?5@bbPS_lH#%Nm&ObgnOi)L?88}Z*~mG_FRcR8upKPDx&O$9gz?HF=|lMv(Y=IuOG{I=OB}Fk;z?|@R$7u zhil-|-M<>`*+ZP>tKH(=(bUD<6Lk-Bglo}@@$6~EyfE$UR+)@^MzdD*Uh-n5kT-rN zx@{CPIu<|Il-Ry*nLoWPC(E-~_Qi^g<(;ytE`Tq$!Q(YyWmn~sH&k?91ofLuK3U02 z^Ah!#!u-|idD)(~5=MIdTaR#GU6k$l-qexg(B%G&HqTo#IF$;O#$c&Gb;_y6xm>6~4|vRwXCSWS;$va`*AONXj|D|8H@m z^6Uc63v_Kn58A#(t-aJMW#3wPN1n-h^7~zx?DZZS=f0mjT62-#q6GMd{Ju*#Gym9_ zmY_Ahh5T;iT--(T{H&+$$)^tG=*%+cvI1IokhvCehF<<)_l)&B20I%kFfY3U9WOaG z{k90c4rYs9fNo-oBoB>GSNBr7x)ynG=sl}?&{yMjHRaV$%l5qgaGz%%H|$?J_L<~h zjZctc_&Rv4sGGyR8Ge|EZK|DmjVD}BbPq(%Yp05>>CwstZn5|Ae=h}{5ZE5Ep93gE zwIN6uo1vwYW7b`COQuC1rL?7wB1d^z>!MtAiVNK`4jq$&t{ICi8iOvno*c$uK3#OY zcnEzEh=&^C|F;Z%RBJPI5i+<9ITc-`@DMWU=-%6khg#?$v472a$byF`L-G{1z(b#j z{yBD8t?15_6RLJ#ldnVnybKw}3PEqw_Y|de`qD?)x!tR4uyCfu3{!@v&#Fu=lLW1V>%+`eV<0 zfy`_|XAeg2w5V4U_-RwN=YxlPDg0FbS(wtPX8jIG-EJ1X?%9g3B^UP=>h}$N`893Z zYVi3HdyG{(Q(T+|~?`gcJ@t($e+EMbPG-}Smq?9o2=oR1@r(A<+`lFwJ!Lt

JS*T@u$Ew+RP2aguXp1GUpeZF@XM$18`*1X564R!8ug5X2c^zkId7wFa=ipx z7#SPx-b&l5X~%Zju?6~UjnW!*zEKmW9sL{MUcqtHDxzj^sU(;H#X zLhzK>q)6frq?~<}Qv*F3^)Poo#sn#m+R=++w8qq~w2ivRJ=Iq3PvZXxM*Ez6XASN8 zlC~_NU4p+h(Wa5up*@sUVbO7IV=iasBhqHX;>@(!>TjaEny6@-rwa`j1&o}2f8^@ye`qGDjyDqchE^wmYu0Hhr6=K); zO(mF8;VrEnPU*1j~_v(O$@|1J9AAa=GA ztf%l8dP?wES1_2A@6h|Uz+|tmSGr)bI_fcqdenz^bx%XqO+2=nZ`7bmp69!Fckk+U zP#2N$wR#_9AR1op#@NOu<72>guLr>L<@dcBs#~M&4V3K3Kz1!@H&M-w<>}Hs2k|JAdO_FLAz@^2sTs@Z1*e z{hE8y-{7?oiO_R1Ynirzy9MXTGr@N9J1IkO-F|T0&!C@^wtdNYG3U>uPPE~9o7Py) z`DdJkexeH}Mf-Fb@wP{ksZ%++w}9WRfZihK*o%3B`wufdb?EQ}Hsf(@>kjx@u-<3( zDEC2ZK;sX^9<}9bI9J(96C-U$|BKzIMec;Zz;{)D#Rg8>0mi#K+w)lIxz9$~;Zm`1&Q-|}KzY~7V z|2do&hJKrjeiN+MU#`(_Cf*xmJQK_}*?1Kojd_@txzudCfc}dQNl%IH9$kJL5QSUI^!X zw{f0~ds2}%!FpxzX=md+(GOb;tS9;5`%T$+-8d!gh^9l#>=bH1Hhr#}m&;A*3`7QVc7lPRf7{@N}ieCiW?3=R` z9NvSmRv7;7=JD$Ru4VqKigg1o$2c1kBboE-r&Vmk57nD-T+I`u#j1>5qZ(5jcO;E; z+@7Qn6Ozq!D%X;O-?s-`LZ!}F%f?~OADH-9=5quiQ}((;_-603^>)iQhu}NVz|%*k zkEWg1(B4tBdnCS2<_Xi-t8KL}e;w!V23=T(Bs5X9x%~e)T^d6C(xx0)>2jL+MKk`6 zcP&?$R!085vc~S@XTF3(){xYzT#|a_n4{VA^L4GE_=xP27+yJ-xv1ZBe8RDnV=sD& z*n`S+-pk~@xOZf4p|Hy9j*Oh2EPEN1v7T_QGe_pjwO=}O;+YqYpZ9+5u<;+{cANJ> z?(q2cb8W``gs0!ny=3ACxjml#AotS5_jB(wes>f1f6D!vx&Jfn-(}ptfpUIKITI-7 zKFSF*%1NB}VeaMQt8#nKtIEA1{=?h|BI{xF_iujLnU*p!izmIFP zoeL5_%6((v$GN{w{5W?R*Je96a($5dpK<>X*JeAPj$fPm_V`b7|0DjB+~r&s)RZ;; z59hDB_rKixM(*k5jlWG?oBPhhPjde=@sr#?bNvO!xBS0}|C_juhZc&y@jm+c>BW6T zFYa6El>ki@-4lIuH|cc~_Z8i_FLXEQl?d$=edB%f6?)ymJw;pY32lX5eW9D8Yod>? zKjMEyQ~np4hUn>&ix541bhshJpC=^m_}d$b2RE;E56QoepF;Z7M~9Go3fZTSz3(`F zaxgEu^B+xZ<_?h@A|d@6(yt->&-f~Y|Ih02RG{4t9FNNQGH@)~3Nta@3><^DH~yTa zYYCz!gC4r`t8s%ueu0o*AmkSie^|)R^4<1Zohd!rjd(E0ThuMPAn78G?i`nJ^x(LR zqZh~J9KAWd$8j#ll^o}BT+NZpaUFZE#98*v5S?xMHBCQeAl^0on1Oyn(~lYG#}PXd z$m8NO4D@>i`Viam9|}%iEPEx|${MwPWsTy;zSB1KIXB15EdIy8tDW;l@$7ki`y$V0bMHl-Kf?1Ra?Nuuzx^H0*6{x} zp1sGj?OgxPc(#h)u8{9`=era5Zg;-R^$MO#4DTxWF8@#B*+`xhalOiT_9egFBH!iS zi#&gX=S#TW!t*`+cAI>c|F`k%J)UisYvURF>NaGJY9eQgR+$;je!^F4d6|0YS8>T& zp72NmM*{tQDMup56&!szuHx|f>&4Gc{JPoyW`73bgTVd|?9agd3zh4$cOg2QS~{rl zdtFnW=(Y^zy-yXrp&Cpu z_RD8(Egu=rXO1WTyu_+}=Fak4(K=MFZ|*ZhhfbwKyE5KM^lnqeA1R1K#`BT!d~gMO z7JZn%=#i8-CF48;H{^pG^1%)H;D-Dcjc1GGcis8j1b)|@-*MgIcl?ix=VSNtvHR+G zbF#FGYqvbOezq3r9wUB9dlP$Y>XrG}__!aFdsb}xDBs-tlCakE@#8G^{~T=n#T+IM z=*jhnW7vPw?wfco5C;b0yg)nnhcA28lnBlcg{D+Kq))>v?Vnh*C&&YnQr-vZ~e$3mby2FiD!Jcr(I z8CP?%^ih1u{yu_JcM;Edy!4gd-$g!*lf|DVjG9 zKGVQG$E&YHzw^cW1>&;PO&7(Z4aWGJyfi2JeJN*12ld;r?5p2S?rgS`8HqkSxr@E) zO*?7j$71$_39NUZef(E{uKJO@gJPeG#XbedWgtD&`yK2r)8=%D{y}t`zdrw>&x6uG zNPF9)e-Pa+`dee&#_{IBmwic2NqqUY9gHvkOF(>ic^kNC9ym>6pOc1{bQ|ABj5+bl z%aSfF=`pd5ICGvIN*YmeGtXMZnoHbt(#0hcDAO8GrQ*#c7J|J>dXH}-=3HW;lfPf` zL!PyYJ0}+U&&gMnoHxRRU^A_>v-1}edwTM3_*8D$-Irqn(^ZzFP_s5?@E0gvu;?KDs zNI!+6p}W~1E#l9)Z}vrt_;YA#(%K(?&OMXH{`hm~Xwp{2pYy*-Qx$(6qGyGgw+Yd+ zLgA(mf98IDobujg{CPy~~p=l|7O0ZU(*@qw-Stj1WkZs{}sy7go9oAM`y z>{`gKh3uNjThUp5tuv*EPiuU-MQ@n;DsY@(>Z?FMk3T-$;%oHBr(67DDn8vZhEws) z$M7!-jt|AB`(oEse0p~C{M#vuPq*+vVE+gDIRpF8!~-UunDftpb}3Y@Z%!pdhZCcN z8sB#`KK&&4S-$fx8J|vUI&(|S@ifGyGk4S+pH6H#b3@JX>BOcp2h@IidZ=7q{C|iJ zCryWTW!yT4e-iX=Q^teL`Ja~fbo~s+r(5)8AkGW4S1LZ;g85AQZ`yqmM+VyeP`N(4 z7ox+7(IJ>_=xn}gQ#X9qn~0XMN}~=oyq}I!NC(dFBN0%o&SMx5z`Fee}nx8uJYu#TTL9wQXlllrD-# zI~t#UjQ*5z&bs(?OML_B6R7XM>(5m`wilmnp@(|^RL7@VbfcfXL3H1D{drLO2WfAc z^bexj&${^Z&dz@>d^r!jHa;N#xy;)?OQ2Q|6F3FxmPOp$mi{kmnPQa zwebP@&n5nZV;}d;ag2YypE=`Y69e*}%RDm2A?}<0cK5~8P%4t~5C;?Luf_n`6TA^FdJ8@;LalL+b4 zkbM%{AF@vsA$xDcp9jHD3a57@{yd~#L;BSke;&gBA^l1|0XwunDB z#-gpppw{?vV+`t#KM$fmPb&X8IyK}M2>AtGg=f`1b0I&AZ=Z>fpCxy~S?U)!Q+oEa z$Cn#;*POQtv~#8(GtiIIBEH=4CANq!H~fhH_;O>6=8sPf;y)Ce9*Qrw#Fw9r{O4ky z4L%6$|3E)xVE>u1116uCe)2$nb*NmQ-#$c#6QhF~zaLM0`3dr~eCJ;_zMOen=8k;% z&n3QG@}EyM@}En5x#T||Z{$Ch_;SgAKCzwr=Q1}OD%Tf36r#gP(V<-#51fNPL3+0- z<3Z-^PfL9Hna+Q1vHyWMFVJ53R=YbB|=0@#Uf4sr+*t#a$;#PsO9h6JH*rU!_I_O?m?AiDjmi!bl&{O98t zJDB;;iFG!0Uc%GlhMQ=~e@5smNXFuhcKmR$;{IS#Xh@GZPfBthz{Itq{KHif5 zoVhwPhWeIyALc&DGk^Ybo|$pfD(-xuCI7iK_MH3v{O8vA^YKRh^T2sg?yLOgf$`@P zjr`|<@#oxE`OgF6&&M12dad!2+*A3_t?}m*jr`|k{5k)t{O4x;IkZyw&!Ll_hVecc zs{H2``l)dPbXWP$p}9#r|Ndz_YZZSEO;!GL>%1uURQ_|hN4&n8e}IlE|G61|&i^X^ zxfy>RqNgwad5E6A{O9m(h(C`{-rI~n56QnTk6}ok`ttLJ?32O)A^T*+M~3XZZ~vG! z@RP#v?Zuym^lM1JTJyw)^lM1JTJxWu1^QLRMOye$#h+WoqyG4F%UD#!pIb4L8GmjW zgR1!RV~jsfD*ripHRLxS|GA}~Oy7yhe{Sh3({IwM&&>SimOk^ZTR%p>DgSB6u7&Jc z$gZ`_t8kKjtuv*EPiuU-MQ@n;D$uWJ>MJ#G$orZt(=GAorz8Klg%1MzU&W_e`ZKWq%-92yPt16sK)Vzw*B6%+qQi;NL5=S_ z8lQd={4C%3myAyzZ{$BWoB*CVWAW)0c?h(R{&>{3;?qOFt2kc!ccS!EJlfIt^kejk zlyla_r(5b9NS{D^_+5Xl`mw$EbPGMy`=>fS-J%=)^bMl>zU$9}(mzOh+oXRG-F`Ch z>9-#)U7s1RHB>pYy!0d4>+jtA(E2V%vgc0=v%9D5nz8<_+Ue_ycg|S9He9Rd+VIeN zyQU?`{+#0?wY+407ZaxCHQ2jU?zP)0wFsxz)+NguZi{ojnBesOu5Xrii5BO6b(GWl z#~k1F8+lIeo400p%i=Zfyz#oXJi+Fz$g_KACWd*RoEYw%)i=T$Z__L0-x}$it%X$# zkI>d-M)-Vl-E1-La9gZ9g7T|1$B3@fWwh4S{Wp%+IHV3v-Eo-~v2y9E*p*9POjx<} zf~1v8%N&|_b|T-3wRs=uYo}aWMRshodqrd9WtwfpRV%d>S3S>>#&ubk=A9X<^NwDz zrm3md$}q=e($0FDmgj&zQPAN|=us=}4X3?vS>6~u+C904(|g-_zIK%-=(NY?b?AD< zEaQ97Y+a^ducNxz-Ps9t_X^q&O&hL{b-1NGV^;7G51_{sE#9M3);8|5Cnv-HW^xqgiL3QeX( zX?Zcw{1d->#`+3q9lqtk^`qgPFxooY7FIEhy3B=tgzpB! zKl|+6D)-ytDz`&}YTEE6Z8)HHalhCT9!kpcF0*xUzd9Noa`~>`xCtKmQI@wL;!5wN zE?0Rc_q^JBTT-U?p3$23$1dG_|4laUcG|rF-udZ|?B4rq;qHY75ACDe{qV{GE#6&Y z@X!P;!hOFj!u@LwsY4b#R1J+Lby>M|cF&bde+Lhxglpb@ak{sE51aRb^X%SqU3e(g zEj(HXA5}xgLilGU#|s=9@7s7^cwjYrR1F_Vdxeh%L9?;YY7TXbgEm659emdbKV`#D zxq27(7WiqK@KgBJ-kEWk-q}4g?-1%jt61-&AKbX}XW6_UC4K4&J=#W#O$RyKUW(yKSDYr`kP-^TW6w4zERc zj!uvC99^wf9I!`L?z8Lgf>!B`aW*g%h0i>`{lWEhX}afUduFWv&7smKKltjQ^$xvfRqX)H^R>N4rSM&1q*iesJdhEm zqv3hisEzIFc<&dmFvNr z|Ij4{-4|W?Vi#x5yw4|0^pt<2P;>*0sI_z0~69xR3ji|08Cl3sHZCN1GSn{)j4?oFCKa7xnO z_`iYww{u=2&o93>Y0bc?Nqf6##anyp#e2y2N;&J2Y0uK5O?j6fCsO94y#JD=yfv3k zRpo8tcPpTk=%S<3uZ1^8!z0(gE2H3Ge-Z{cn zygSogy!YC$;;(Yyt(zk}2lFF6ho?F`U*8?&IWj?OMAtPIaCFmdd+suhHOOAQJ*;vE zd^hH3_WXQZYlsW7zG^@Gxz}#5tb;$(c{fwu{WEqj#r}_Ahm<{k+k@+;>D}G&w(jm5 zwS=mdza2S0nf@GHuI2g1$i_G)eje-K1&7VUU&b0B)30i-#ZvoyB(`3MrS|*azJL7? z<;WT=wciKjwAk-M=5^<>1}nbx+AFo+2loTlU8(&(5?L!`?)M>UgPvw@3b|+Q_aW

yq*&jC*&wd~Ami;~?Zi;)~ z$USqv4~d&fWWSF@W4{mnSNnbNzaQ7f`{?VZ7c^4z;{JUWdinieiR||QeOs-!;{MOL zZ_>*@ZyxWXuYbP}Xs+_9L)#YXtrFSq0~)IRK7^+HFEmyAeT3-QyxuBA&k%ni<6j@A zyo=4=H8{AA4(S7*pF5;qeRS~0|Ap*T$X=QG>1BOD$o{v9rx^ba`YU5h>R*2qWITFO`+qRL5BUi~ zeu7s+eu9vnz*q<7kE1wC{a9y8&vwT8s+XgkU&S#GVfFR4^55b+RQ_87@B00>_y{GJ zmN||v{hERO)na{>;ahC6zRK`}`PWzR|8#tc{`EOQ{E33oWqs8k+tlZ(IA&&*HEK_O zn45A#Rj&SIRc`7zALf?L)pF+Gv(M)KB<_#oei8R)8uy>)H?w*6BF`S-*%F?;$TKg$ zS;PHp+<%Yz+qwTcTh$l_?*;%u%OW_W0O!HFGF>=~v^D zHRd^{KG(pJkX?{;DMup56&!szuHsNW`yJEQi@)CV-TVFa`234|Hv8?(el-2sf&QdG zd=uy&4(vCRpF-vO{4pUqoEkc)aXag(M5krgPaQwYcVu0asdsiU#~7%079qp3-l9$2 z!u>$~;eS6sM~E&G{k0%ktH{)y9hY@uer`VT3N7a7g2{Sd%uYP}f5aR6f5`eO+5aQa z*#AS;SIPb#@y7livc5|8|43|S{|||c3YF`NpAFIBr0CGDtalPW2a)xBFh)KYBOi>B zzvz*aI78O+!4di3hJfPl}xHn>aDkc!C zGF$8J9)qsQu{Gt%Tz-DI^Q+>YYI*tC{J7Ct-cEaX<&v-ncUyD#87a+};2iA##T+I! z=*jg63nq|xe8B{PdLph@OI&DTMAM#|_`t-2fjA)$2L|E>|N1Kf6Nbw5&AW%_aBArg zOm}oPKL^nrEygEe_kSiKA-*TLMy!280$~r8;wY!Ml zJCWbZnwk^Et5*DJ<)QtIHxF>M;Gwqgrj>^}XqWwM487kb4nJP{1m&R)(pP@p1`l-@ z2l;Uganyoq6rN!`v+F_w#}KdF7LFQTzs<1;Z`ly0(p5{P=}62h+e= zratQ^e(|r@2)2I9@Q(%Jp3v`nafzKp7sbo0-%{gz;(AY%zEX~?-)c`U2D8srd%JT- zv%MYH+h=d*qW4U{TnnCU%g(Y^C9oZVcJp8Tx$0Neb4kAzi=7LQ(?EKt_dC#kR{K>P ztWVmqTh=}aq9^@z{1<&5v`>PxH%OnfvJ)q>zUx%P$7dWze7yIaW7`*z^w7x_kYnCc%=`b(Fex- zx#$I@6O7pSxgC{x;%kp7^Fih|k@128oNGV~LC3WyHmg3lbOK zVr*cIVea6Vz#pRy8+xzL64?j@4{-o)Oj`!ckBXw!TRdGQW4FMfw12fLA- z)#qG?u$#ttGWyLg_y76)o~JzcCiY94FV%ikM>pQH(8zA-=O%=n4ATAz{XgW~gcvWs zzUdx?CTr$%$%ohK#%ss(ksp82>&Ihn6|W07#!x>Vd&hbG__2oi@i@=x$K!l3{}Va) zzd<{^emvS~R6iayhj{(?F^2l_IM3_HGACL1q9}(xZbr-MCha96n;Pd8KL;ZN1GWKZ(Aye+_gA(#xgN3+HF*=*4k7(H);# zK;K67%j|WXneXuj@ZBxG%^860dUS&OM+N*~8#-{xbiKjT` zZ!Le`)USmme%0m4n)=n$ud2W6QdwUa?Y?n+8mu3$TbnkKX{~;|ZcWPfAGNR^J*WJ6 z(yL1Y>sYO>3xCfX*{kOR>s4+2{EO$eW&J14n|=b*Pw7FQ_2W@Hp4X2btJaT4?RZ{4evDc_9<}3n{rIu1)Q>mYEria?>1Wd6 zoaxZ2tn1_X=Rofevfd>A-;p` z4xw~MNAq0>-O&UmgsM*)3O`&D_2YGNZj@`I92@1=ygm>&(D&LfXmxDaiMmug^-!iTY$wO_VZwMZ0GY-0J>c{Kk$Y@`Kxv|aV z-JAC{UyuJh(1r7ITl42zcs{jr$=8q9_20;UM!Rvj|Ig>OwY+&9J^1xabF-V+HNE}n zkoxgDyA`B)2t9ha{~vNrLX0=WoHVf$=aMgfKJw!)O8t1^<)wi&jJDP_W8G%XW**)T zW{fFnYXxHthsHO&#;GokLyNUE$>-Q#;Qzw4GwZZ2pTpc^VD8zt-1lrWsUNSy3Fdwn z)Q>m!7n%FDbh#DgeyxVQ6?6aF&a5AF`w2OQH}l^c*N-=`mAQXY|E?ol-kP}|_R`#6 zY+OSb*G^{syTtc{-Me&qVDHA#z+P@`-?zC>zHxr5i4z**1f&1iTxb0_*I5@!&zkw~ zS9bopu{M=f2a;I|{H$kzzkn6^sV3wFs~@lPDH(HE%pCZ@-aL~I7ncrv?S6&kz&ELV zb|LD=>tel-`5f$bGIQZgI^>|vR?elu53S04h**7l`%lklX5UMb*nUUm!gHCwoa)Ex zFoH3E%!nDx{P%#ZH0f|*=@3d^XgLO2PJwZK-(G$Wp)Z=`XLV%$yK&8M-s{KfRxENa6Nd5T>@-RmL0?^8dhGqEvw%cH9sD8%(XQM&yOFg)`CO*AuZ3D*Mh@& zUJDNAgRw}o^Bc6oYsRCUMm6Kds5RqVhMMs>&uhlxe4|+SShZ$+JnEfWp78UU@i@n8 z#?v{iX8ahnX1vQ#Gal!8&3K&G)>=F#VXT^e9B-%@kMq1{JkD$5MsN}Qjm)0Nz>K{^-p+irnwZIlN? zIv3}Ehx3}Q5p6mCH0c?r2W{G>z}_v>HeIo{iDUZK^5ac?VCnCKnGy7gk|*$bK3*H=d-1cLaA%&rQ4qpLl$3 z$EOEAgYaqOqlZu5)3w1zug#<8!#3g{qrb@L4>Hc1=F>IX4cK#&4(EpseBIvG+<2Wn zYcD_3bDGJMZIbsEs@^7@Z*P70pe{fTiOUivzpm!S4;G}3lXrcLbBtOe9`(#YpB&iP zKwdn)L*8=+j8SXE<2&R%XW&@1Mm)YZk$1D*kyP`EnRMt-I(l8fkr*c^TKtQ z&}=slXEW(=e(4ZOZ*(+2htL~Ma6*UY$45bL03UF@q2`=YuHMMI)* zpy>g=HqgmG)l@a|q*2z5vTa_k<~O+lzRYu_59is|a^qXTv~>Tu)`)MCTd49DVNg9E zofIei*SGkz$VY9o%|Sjgug^g)M-wdGUiyUOqc+kv1Ru2-3sD|qkdHKMqvgdz=iFT} zi{l%PX^5|GRIvtC3Fxj^U=|I_aGpbcy@p+?osF>z;h12KYIAw+IvFzJ7xTV= zF1y%t;XK^d{P-5;liE4g`tY2WY5%HbZ`FKjvbWd!8tiR#e8Ap%VM~ugH`ZwSQOBzu z8|>^8xsE)@h=xUscJ@mDpU-V;x$(L=<<~dOzixuDs78Ent~4KtI#*=Bs-xRDFCp|~ za9*y^|3l78i1CJ)mnL?iBlF_VcYeILjrsA_ZOD&bruy#)qZ40pdxc#2K69JF+Q@M? zbLGukc{8^=5H~Q!o38ZSZX*UT^SM!n?;Q5VP_BG+pQh^q&Btld+sr$9)!f_lquD!S z%6rGkJIDTJ{(NvxD}Q&7F2`2O|1k491AJ%9pVNFF#(ZTh&S=up-0OKE_9Ghk(#Vfy z{=BZfJngGp8rY|-?GxqogE59`?o&p+ ze%-#C%N567EIn)H&tKX3^Tu3xw=P%S`!hBFrH#4r%hZ^W=C=>}?9JTyEad(M>yc>r z2-+G)!}3PmWYpzG+h^_}2-GdPP;}sH_A4}Zp64gERnFD8+%h#crByi(5&r~#n>q8% z+P&%Tc z=^a8xG|AoS$ozQ9%R8#egYT>6!B@vN0Pr0N)Sy%CYBRpW{#TOcNno)-tbyi1T z&w2GC=f(5 zp7P$wKG7aN{l0_Xd^Bg5^z#wI&ID!U3jIIie1sS;zrN`{geL3Vj>vaEZ}s2n12$&~ z>Ql5>|NZZwb0;Z>lJCL7{&X#;T@IDg4!zqyvH$c~?B}35#o9WV*MLXO;Wra+ncfBa z!>J~*=1=Bz;PKz7#M`F-68}*iKrkN`dGfy@U7mQ;^t;ifzQ%F<((PFTT=1zT-9EiL zV%1a=T8kfQ??<1N^wa6T!hck2TZ_qR?@0gOq(RfKhM${iZU^Jo@nu={NrR{7;5^md z4%UZvp*}q7!#2qw#Q8lq-=ua~JnF+EKj=+keR!Pz6z5MF>%+THA0GAi8s)&^+?P1_ zJ>|usW*@K77mpf!h!6g+R-+HU_u+RvesdfST4}To(h&1NG&ItW<9g6tn-7h4!FRi< zbD`0V`}09>jn=^&ZJg6+9IR&!I%>4#^~~{GqbbjMH|ZIuXKvE7ko(L`{>(bvPI+%q zAKsMxK>cFS(bT0MWPM}WrmROz+ceVkI4d-5{m!g+Tfj~n>$jHQZt7Q4zv}D5oBGw% zud2WGQqiwG?^4H?JilGH77gaN>(-%s&x#&1Y4zcCc(k>(=S+ch*7jO9dxXDg6!vEZ zV;%Z;#zC)|K7$Xk-q6jH<~QN{Vsvw*`AnM3nN~kuH)p2L!1TeGJ{aBp4EVY<|Kj=f zXb%yE*Ui6i-i`BUoHq>zX#An&AsKzxX1jqnib;pFqXS>pw>1a;9Qc`@(@bt`le{-I zH#X~~pO7bZAxH^|AhQpmzv)Z;~6(k-6}cr}}1F>c_K7 ztbV*suNrZn(XJU|&l*n9@PURijX1$E!gHDAGOhzo7cCDEkp8A8|fQ@ zkJ^ldF8TWLIvFzB*I>?Ub9wjXeZiXNZN+V8OHa!J3Q~mhI&A7QSo~EtKjQI_koiXF)x_XjkuDzLSujZ{8@oS?x zeCA$|kb7&|R9{|)LCn23=3XW<|9-ofA9{)9hibmCOASx)wSk#`AKWi%;#U*DHmQeh z?iIb{_lg?zs&Rd4=HHup_J1(*C3f^PHQ_ockP;4wu#*j8^45L_R=! z=aHV%%pRL2`2!u9bKj)?ye_x?lBqwh!ve=zp#>=m7x{smBdiPxNnLdp^Tw@_4q?CRht2>(cjBMhHszRaRxs z(Z}`-wzC@FRm^`Rg84_?%(Bz)e-&eLRUGqIiP2?MNz88-B)`oyC(*%J=LNrC_>S)? z$vcw<<3aCvYX^5olxFO@;m;WWiNVo;XA{_*lwG^7A4{R zY{R>!KU&$1`KQLS>{D@)Kgq>v^3K#}SBOy~Y_p|hbX`>+=C_shE~~_I((s((d1Gtu z#B&s}d+nb)+2qP-$!|wrDv~6B)otk8?5H9|yr(wJVGCRn7gct2-yKTjje)*+PNV&s zB)>)IR+eLBHI+9qf1ZV9FBm%1e|YT>B^l41i*Z$+&fn8}HY-|(wk9~DE>KBhgNUC9>@3ixEoYZOosFaaWwK zzs=4HKC1lN9ZFw3L-EryoJIRz8Kz9dGx}g|rsMZ2JTu1b#4~M0qwzeV?=c*YM?1ow9vr;H{_8P%f_LXQWJ1oW) z+d!vdLYTbM;gYvm+3v+*tZ*M=HHSr8vE#e??DxXhL(%p1*%9dDC$;&{zATQd?G|Q7 z`$Sher)w(c+Fh{73vBkHIm{-fTI^;2cqHlW-sn^1jx;$rO!E9OCQ9DklNCn#^Pl}5 zGz!CWk62mZEHO^5XAWQa_x0Jn$Ks3fYG&`*G&Qeo$dtS~i)}*UwYG_g=>zUhwBpzc zzbB+lOx)<0J8k3CK6M#W`qb^ie3gMelCZa8JY(4j=w}kzj?RxLS`ia1`z+D2jN?8r z!AJk~oHye1LCozCt8MiC`1r&w#pObR9KKi@x%8h8On>#Clcwj2334a=cI~YhJ!bFm z(aUk}FZl1ty$_6jXz$_CuElKRz4KUM9-f&3nLaH>7AFajz5;)J_K0JT?I{2Ai-CP zalKVv-;Ly8AS?KJB*t|LJWH~3bg<;F0dJH1)L|Y6TcUi$hwHNozt7*ZQe;&U=Bz^O zQPvgmvdz+i%SvzXi~?CHhMe3E-dX`!(eu5m^8B}GkNACIez&4tagfZNj4i7K zuUCTC)4=O<^CJU1iR);r#G4fv%)bEhLcHG#ynhJ1|CJcSdEYBo<;4!C{4#hy6TJT( zcptJ+yyIkjcD~B{;pqF?An!+&y$o5b1plX|K(DCJ=q?EIWNTDWWd`O7&;CKcb&vs) z`ImR3$_z3vJSI}^n!*Y{`JQATfy=-aRR;F4Xx}Qxz&{`ZX^;VN%9OgzQ>TQKfyW^O zmMLjqeRIFh|t@cTR8i40?RrGdUxG0@{e)S|(#A2!&J zD)8Y{fn_tu%$Zqt!Tybb9n69~91XiTN~wErq*B>yga`Jrutdz+d(~{=?urQLGa*W@ zfSu#|6ZTEBX%(9hE&3y+48S~J+sO5tjYE6W%&`T=6mpsrw($5{^W}` zE`tjopRoPAlZ2=$hIcQKEr70is1LsXCuH>$o=dV_5q(uz1?1y9vJ)idqP=+dcy^p@ zG9NFO^Mv9Y(dMh_%Cc8*IghF;hyEcs?~d`O;z%~Jk1FTuAm{xY(efV*a^6RkbCPed zQ>6TlIo%rBu?>(dlJWiGRjQnK3Cj5f=zkjXQB}6L!G6(vZ=Yi?@_{~NvyMWhN95a! zqMgz52ax4Vhg}{C`=5uS1@m?;-MVK^8uO{ykAKqzE|Zq zEG&}C@lnX}f|ST2t?h8ZCEKLPqN9-Gr|`_%V6TTmj%~ANFPo0vC6MF4T5R$^Vaxod z^PfEp-X~i&0djmVY*{|LirccukYgLSWs~dX=S>bN$LZXbrPj^MORXEO%5gp9ifmaT z`kRclo%v$X-vy`qU(Q7NznN2h+UbPNO!U$J8w*A^vS(l8otO^_iSl9W5z_2gmY67i zgWsX-*>Th?`xorlPSu|MAa*aVw{-XALY{X*Z$5};AL`QBo{bcp@)O+y_H4eu&d~ox zdp19eo%t8+8Ril5kQkKX?{bDJ#6uc?pN1W~gX9~skQ2*l!?8p#Lj?2??K8z?N3Qh8i*9o1LP{PN5$aeefUf zDe=%KyoZ!AL#l1uDTuJ0lHUuNBV4!=vUEs{F0THzK06G&_oOOI z5zXMiZUU2sGkZ}g@Zb-Ey=)<5C=D{S9kxc|G6cN?8TuTyP?MoGstmmc8A^s8Ertwb zav7Rhcg>WkA!XB}mjng7?|y?=?;!o!da;Q2k-_|O_} zzsUSMu435(u)ZU^M>fbt7!dk;_vZ29=2{r0?R&;=T_<Jr$gR}wc+R(r<{d#*BIF0{`f?oUwNSWPDhN~Kb#f1 z@cz$yA%^4;d#Eq4HL%4;<8aT$*l1a@N}h2sg4{D1*d6zg?qcBOBxd)OW31w1 zU5X|ipS`Eo6eCT;qxc89g!uVo~pTzxJ9(nB9t*9+|$gs9} zKn$0+*hTq$6<2KruA*y<{InUkiezlQjc`?zkJi*>1uM$7miRS>a$I&Q}8ZgNHc%U1+O6=${L{%Rv8!Vq)cF=o7k+=sV&kg~e%>bUYa6Fp;*&-Q^LVFA5BtU*`^2<2xe^63p#n|NGz^gfs zuhWE!Fvc8F;PX2;1{fCjcNDPFFEK|aAlr42t=b3O3dzKxIgv%Pu#W#T)`T;$t{^?& ziDucvD@|k-a=IV#c>w%DYsG_TCm^dmnw3=>bOzbKBvn>PclyAWWw2}0@Z4!}cDXO+ zTjP;%^k*?_oyH$ILZpvj{X~3m7j$j`)*i$cg;;M8UyvSc;EM!bGIUH^`C>fDrOFrd zoCdy#4D!Vj&G5zi_VLAT;){Ag^o>*HlJf=VLUZu_hi;{TFS-Z$A{u`Fc+5A=7wPN; zbK)^aHu=w(r#RqI8|FF*^Hhtu_Cl7E#DFa8=gda(jQJ`Tou}Yg zQ@{&Ve4e7@DD+_@Nma`ElaD7|c&UAx385lIIyQMm~-; zF)$aOpPe}F69V(Idv=$i(LbW2N>y1K?u&zx%gW(>}&d4b^g{n&)(k( zdhXINwdrp?M}KQ-gTHm&fC(4S-}(^ux4uEXR`^_>gs=6i{H^GJbN*KHwUTU;KlYOG zw}R)-#owyud(+?Af&Ny53_K1SbN>CUq51!;{H+*6bN*JN9GL#rpuct3dGohI&P{*o zJ+&9y-)fNKbjb0>sTa!M+Ppls+uw>YH0N(M%5fX~t(?CvJAW%==N$d5L2PLHThGGZ zYLKB9xD1_7e{1t{<2qY^EBfD@ztt!|=gHr?>$3B=Lf+cpZ{_2=Ec~s_@Nv8St!N{Z zztzZ39p-Pve6+*gx`FtO&j)-hmz2M?Ir_KP--Zo9vg&)G%ww>C3B?e@2#jZpqp`9GBvr-}1O;L|J(+))Nuf1P3|lt)T&(A!1i{ahEwH+>T^(0@=o6!DQ+ zh?@~MMck>zf%s`fFXXH`5Jy4&5alQ=K`dne{Ni|k=#8uRIXHcdo?n?2_XxvA+YwHm6q})y2*E8oXPl54k*RG+|p&~EF-t8aHSCKD?rQ*c~uWN5dRa<)(gnbqj=}z zv%412JqHn=r2H57#|tM&h|?jiSOWk1#5e&lF^fFUC3*DmPRil5!VjnAaXxNm9(qm` z;xUI1=ln_(iw`4S{nR7*&#rYuG>LVtDafg#9LkpplIji_eD!OXbcX+j_#5TbtwDI1 zVx5#*C!7m^+GB=V9(B1{RHc#&w8++4Fl8jYNEw zVqnuTcUhRXe)#ZMHR50t@7yVLmX9IULNTgb%v&t(&y9`ZaWDzaj|l`oyi$3%oq8#ygxxwVa2b zUDx=B;;5tp8pTn$4xl_dx|eKw6!d`arVNF2O&-PB!8@Zc?i=tqhP(`tdFZUdV~#kP zVs4AUJ6;}VPvCLXBvs}qo?3$#dop5m{lPqxuGf#4FM1F*gE(q zg~!(u8p%A-gvU`K_ZkmT4w^o1G#YZ>4RWu|8_na}m`9pF(gPKoKe`3tt8v}r3gk)D zBldj?vDLm`BUcRV)ms92pZd9Mh_7~2dIyjAE0kl*=0^$DGVV?bDpqAP1}*apL{Z{XHMefA$53MIK_9vuo8ky9RSc z|LN!KT6NB@!JMUFzF>292kZ?C%6_ds}$+G*>{ueQ^tO|L1 zRbrPi+S{V#OHuplkRO|eyu_oB_v#Gf&H)!t&VJuymQA+#YvdJv2$?K`t^FGLvfJ_f z6!Ov@gg`$bb5eVc`#j82K z-HQK$JVFUP_;=*l>gg5**%^WRBax#*Igc*P*NnRQ+ty>Oiv?EZ1Pv*l$`j*|30oY+ z_uc5v2eVy8>yg7#jbmTnkFP~%*)inw$Ig+K&3J$nmB+EN{m9MW_yalkgg<(Qv)wKW zlLuQ^4dwLqMJ`>=$zv4F38J(_g)ydAydTwsu1$m=27L%Jv(_I^HeQ4ZzNLl=G3 z$s(^oPSlGn=%N?E*DsJR0?neq*Ob$E7`X8W`1;WD{6_mX-dFi~Gx+(drMmxsYu{J- zc{BK#@~e)ZA6r-tj3JEY?0pX0K{TZK64BQa$kBdd-Y;wKRP%fW3!STyp+D}K?OvvU zrjH=!xDt7&RU;_h9Q%xrA5jS&pgjj^n0wmqK>8>RdTBd!)L!tj>~dasw|c)nv#89(Q7nZovFgUwaF#GCOj& z7IHg-KA=y%@qGvSv}>AuoIIfATMh|3p_yfXgqkJ9&$=NAfo+4;JdG2k6tqHfF8@NG4OyM%a9#fgMQ2h z4^Tc2?c*UHK;N++;pfm{cR`npflkYUZW|4ZG71=Fqz5{WW0Xqhm&zE-H}uQ?wM?l* zZf;s^Tv;X76a&x(=^H2U(N8lJ;-gcq3{?o9RKxy2Zi91m6k{NrR0|vX9_1~4UZ3s2 z{3IdYi}nXq!WO2@jxTx|xx%z(j=mEgO@Ry(ziGPY>$gP38^f|6h5ng>YyLsG&fvi* zLVQ^q_V*Oejx5?vd`kBr*IGetALS@lLPt&kZ_s{;lcJ++3gn*ly^(IK{8hS=hTKWg zagjGg7I7ZGiTS^ei!bv7pPW*4TN&0Yr+|NN2Ts}xIjzQ8DF?jJ2fB^;nf#Q8UL3C2 zurKc*bQ|e3%KJG0`xM~i?p&`m$IIG!=>&NB_$zmCtmMP9j=erip}a)OKRg;8Rn`|e z_v@R0t3Y#?6SfxTYdDWyrSfQO@eW!isd|cVcR6^J?M&v_yBs`9IEQ$&0e2&x6nxRM z72G`@x%ISGnt)vTbm*51ESz5{ zFO=w`%?0(H^V=)GQ;tG65wE2}f6<=IB;u9PAg!X3vkMF4fzkCNinG8Ou0MAgaz70HLyUA8$gASy;_>ZKWJVSgF*z@udblX?Z ziDVO*st0F+PpW$nuO$>!;Mqs7y94+vfsdgwI-#sL_{4`j1k^vTqpM6~dm4O_2Yj{? zx{Ty;6!?VrmvkB7v}eF4uk5&yW3^|%Crxo$0_T(dZNO<36{oFKbs6VFU^T*NRs(P7 z@mZLGH}v?7^DB6RaQ;dmHmJ+;Ap3-CNbel{p}rf|R~_M-SxUa^OWHRx$`~8buza(= z89p9blWDr``4%#=_O#KL%{rczup=ZBfSJ2cF}z^n8Sv6 zN~^w^hB<^SB^`fB_+||A^*H9MUA`IIug5bl8RkomXH4IW!9T0%fVTN&IR9Ldz8M1# z5f<**9^VY^A8PQ;=w*KB<=~qkUBF{8ZSu|VxujUkMf1%V=B%e`^V{s3!ToyMPre!b zoat@<1@O%T*D60s-;An9+UuK1RrimQPwR^D%^3Ll1@QF?ZSl?EzV{8jnfq<`Ck{c3 ziDE(TUoO5G6_d2rH>2l;%icF*-~qBnsPR7Wke;WisM5Jf4L*GE~nkYPPiqbrTBnP&eXf z#7+zCtS|=guxZ#k>99)A?!^Qr3(;&jjV_a6bKXPDbkMUM!fzLhvn=S0A z6Mwz*X-!TvhQ$($tUbl@=1HN{2l6;9YLJ<&Bts-U7TIyxA5#!XsesE zoBTGOACFow@y>Yp9rWXNeDCh;F2BdDzK37?!;0mvJ-1@dYtOGZvf)oFs5V?58|uU% z4}t0&AfDKH!!XJF+Gw|T`7b2zAAjxkK4K{uG~Vg<{C=w2`#mcebhwAxbEtE=r_Nq7 zXxA9#{cM`x-JNamZtTa-Y`Ph~i2>}))`9HIM`Nwt&r=HqZ63?cyni1%v**5WZ@f_O z^DkyZc;ipZ-t(p8czFx67VhnYaUeEAG2tbsv$PNK9~paIYY{J>hS(q)LhiD zA$nhjxP=AtgF13QFF_ynpp9Ka1TWpQ3D@M$VS%yR!##WO{4G5qJX;yNd0aU2=0+6^ zdcYzSPV6dr^6=cl$bFiEqYz=cmH1-eh9_1;!N0t2!~7K^T+uScoYKIrR4Z=^e6UG2 z)QAIJqU+pyrk$L#$AkYyfR}Uc*)+%p+EFaU9VvNx#Y)~%JM(UI3ErpC&z-0}l8M?k zY535XDaMX??(X;CU-e*|`9hd)1m^W?%~=lQCF9(k=uZmzQ_7fk3+7=B8h_fWi&jKIhO{w}Owuz-S-~L68A!u*Tj0M%J}m8n-<^a0SL3-WS(vX3{q`Y$ z;V6zJxc(&iOmV7{SsBn5>ChW)#HcpEOm(tC>5;r{P4&nUgC3#UG*0Le(j%;$`oq)N zs6QMXrayc-{m~nAd;RB<_sx6V-siy6bEDjz=O1u;AI0~H32x7XuIZjx&XPfO;Psl} zZqE@Z-Q&;AQ1;x%ykE=^ydS#?29>6=GaIjGXEsBhY=J)64Sn(vWa^V#t9R2F=#p&c z7wD8-&?$Q-hI_vpP%!B8+!%#)OMCQ68R7_$3QV{Mf=~po5OYi~dm9I|(^hypw4D{|EJ1 z|EWIfl=_@K+0485zUJw$9QuW5q3Ic-3F(>g;}N%N{inWDpHYZ~_mgBUAN{Z+$L>x1 z_eXqY;&T$$?1SFOfDFe8)#3=`#Vv4UzQyudRbj(x02u>BN zkY3?<1@rJV;^)UO7r-pL1Yj1|4@W+_3wrYi^d{-YotSUJEIZlLD@p|wvj}rnXqd$V z%RnHPgUfM;JW#@ zz$&l5_NNt}ytZP+cdr!%F-t9C@24Eh zSd_Bwv1sMX8FuAg102dGt5czVl@2Tz^ac2n#!i@pp1lOId`Ym~`eb71`We72uK~Aw z{EO1{uvy-ZAg^18SiF0(Mc@ugAv+Oq>yyv{lkg#YvKyZ*J(!oU1j)-%wA}|aQ8X;_ zhkM-$$06WP8q>|-M;a65&omiN59Zf&PG?-#-SM zHOQ?fRIy4iFbe4n8Tx~;N^^Rn59*c?Rym5;>f&z$c*P0+g?-nXlH z73WBYP+mYJ`t}cM!_NHs>_RPJml{>CtmAscDi=eR0@x)?js}it2D|XFwFSG7t|2}1 z0s2om$A=^78v3s3nlrQRfF2nJeKHh!WeD_32J&pu73dqDV^hCLfZh>s-h%%{TxZ4g zVbD9_4SFYnDS6GTkqFy71KD?}dPl=H0_DT1*ro=$hsN@us%JiA¨>Y2mByaqiJ z$Fg4mZDOD^)u;vKLX!6^eBbC|7_Wfw zS}aFZ25|h!3P1~%<`cAaRBEKcKHKz${(Rq zo`W8G9{T4EwDDWuns=a+$OrR#_+b7AI_X|~f7F>E{|!267QWANUd8p#8?Pi11Ez2lldZR-FA1>!_j379fZQhlk0i+PsI{(PlG}kv1_P5g!ULE@23@_t z285564s*k=BYEHWh1txJBltBw23F8)^4|Nxyq{s6 zbsBv31u#)L=IMW?ON!P;c#`DpF)Ll!ZNxSeqVnbD=CRFg729m-5$^rxBTj|dCS3Cm zv`u5}`)vT%^g#`A2yu2U$2N_}pzrT;)bpi2lPpVwZ`AQ>*O2@!N81{v0WJ(;8X5=f zS0j25rfD(`71PYe{oQa~lmB;#$|r_#-X;Lk7_en<%)mAep)Km`gE)@U`Cua9gCTsA z>57sm|C9S&fMv)&5#FKNnB-$5+pqV<7_klUZEIMjFX&1>l``-M$zu=;VLi18{u}bG zWVxj#*oJUV75YtjELX)h(ZCZ+&_*)WWu(WfxHnS6G|)rZT4{eHOw%TR497Htmq@2H zr%OhuScdWesSldphIBvaLBcfOpuYj#G8Fn^2=qn<^hY}M2>N98^CE}bF z|Apb2a9kS!-C_&smi<^09gIK?e$*Z(zi+(3@2kf&4%P2F6}qJ=n)#!kTgv-b6w*I@ z9i`%%?W%s+&K9l6Qt{0&;hCU*=?nan1^sf|5ijq@ydOlK|1s?AGp?;}hOT)N`sGdF zoLk@%{wws$UVOjR*_G>;xYw4gxOv0!6%TLt!-`X{{*mJwC^*#la}-x&ZGHr~m0u&* z(T5z)E#+ciE$xp`^-c`*P8D$ROiL7ewo%YM$d|{5{I#?WBK<>aArE{(ggZ#@jB`o~ zeRqI=XMQAkAI5rYZj|Ji4_^cMm0v`?-nFpZuV$uu-o}4b4@k=C>6nj4)0JcC8A|Em ze#)ML!hs(bJUj4{f@K3s3p&?rWL@eu7j&uH!n)S&E%@WWPYa$KxTzq%?tRw1ZfimJ zx?L=xZg)XK-AC-Iy3Y!V26hDxXwTF?ueoQH;8`VjRtY_8!Ofn{&)?!H!81ze8OsNF zwwB!P*}d&2o_%#cRkj}*sC;pBkn+#kpD7=f3|2@c-Ug2my}*xe7UDZ-xVx6qljutH zB|5X>a^&RptqJ&q`oRB7`nd%9ZRXEGBlyj<{ysO}?bWbPsm%&sQ<$d;>!6viGoPw> zsr>7R;CgO5Ov{JC>W22OWK z{#^s%!?G3(B7J8(PZ$d0!J7T?fjeOb2x}e0c&HELlcqLDpv~+12HN*>9au8xgX`go zN1KEx={vQf>A?2-i)gGGmLgr&oDQVt5}w))xg$M!3VjUdyW!AbcR-g7gH9U?-8KaJ zE<=H?<5((`Kf0_H3}wVkcm0ZONLJ%ogrW3)B8o)>Vq42rP|PBLpOyvnTXTM*mUP)K zp$lsv1Abtw)4*D%R6o&7tlO4j-S#H%)2E1W(fTbK`luH8Nz-HGKe-+FX{Mz+^jLT3 zvF^}g-8p{JbQ#xUz)yVr1bSwIk7f*$yuVRnV&nsPA~D1B*I!HCx3Ru^DIvq-pDKC3 z|BV|ylt6svV_iJ#efVs*!dJWZKCAaLH3qc>zU$8?hkMCi^d;h9n-tgLJ|e`mwxCTKH{Qd-<60k_Kz()SnsVrx=Ef7M?=yj|sQ)DU zxrk|L_iEP@wpxN`{br-&eF**6FqSqB?mGg1(s*tHZ<4-=!;xZGgr~I_7R9|*YB4PM z{wbD7<0Sk|&p!b_(Okr`3J}YhpTN#MiFnqNiR=vdi4KdMi^)%Dj}cdq4mpm#>it6=(1-K` z$@(I$LkLIBT~9hG8P7*-YXgsMS>ZPXrfC}6YLkD6kBj@CvH!&w&(d(z^}tcZ=!1r% z=zT5UqQCUq6F8DyX)dgROHtAUkLa6ZK; z$S(pbO~ChFPEqv_#mf&vzx*e7|97f>IRXDrXT;BM+3?2|zk{EM>z6j-Ct9~)rm!zmEUA@@2(Mz*5VBYi{vuT7Ii%>*m`$9~V3iT=l1co1g!QXVO0h)5=r(l@58|e+Ln1t+ z$C8gV#Z9EgW}+Vt-z#}%{@l&u>RNnEYnyc0YOc!y@iC&grq3uo_95D!_!#Lx;G*pM z*@5^N^@04D^n4y46PWiukDmu!b+sGQie)Ozd>$}mwH2&<-$)j0e-5|AxzmdP(Zv5e#v;mhQK4FOZ5Md zj->zSd4!iVtVHWX!b+N+qx)#BSczIQ$zhKVEVa)aNdN!at52>V-8QM6KBs7EPmP!L zfxaRvH09NWD~vi$!%1F@GYvYh7W5{4c4pSu;iTwheA}($|7;Gvq&`b=grI2_tFov*s{TsP&ea z|6}I=Q2vdX|6}I=kWR5*TKPXq&HNuT|7R}N0#`u(54Z2+d!d}=pl?RaS#B+6wnX*q zjD)Sf1oMB|j9thNPI`vcdxTwlDt4jo8h$bJf6V+JGyliT|G7f*f0mm0KW6?9VVWx? z|EG^`KNk5fTEjAzL;jC8CNuxX%>Oa-f6V+JGymt(%>U^R+hgYcnE5|fLjI42If6c- zOCbLzP21ak4*5UYxXk%u~)AwMF@Fzr)^x)t#C5Rn)n8A>QJBZLH{hv4_?BI`)FSB!zq5$c^yQyP&Aw zcjFx)cc3m2^*tN4iAtF5wnU8KW3;suZPT+P(RM5KK|tTfA%F3JUGmOHpN;*cYbfW8 z`Z=MKJov!)07;<9-EB4Hl_@;kIT7_^6U4RY)HTk>CH-$wA~&^`zL z9PA;Thv)I<*aa`Oza4qdr8qwx=MUmIA4lYZdS3+Hp2L{Np*{ueyONP-NHu3|*ncn; za~O?zO{F`woy#ygQ%j+91bzC5!aUMIVGIRRihStL&Hq!RlLt zIE#Qi=XN1Vo`-$sG%lJi+AmJe$^)>qutiLe{oQl4m=3@t!G?GPJuJ-(%!oJtQgragSTc zh)wsDf?t1@Dk-<+x|N&8q$~SNauY2Za}$%3$0w%PGCahWN8)EB*4QQ`_V1mW=!$wM z(S_efY%>yF>EjZ~N99VGlo%T|ga3cT_E2I({3QP0aa^-jVxD&<3(DVSSd_JBdlx?C z$ukoFo+>KT_L}$y64%=9Pdt*Eo45tf2*>X=wg(coNP)Ju#NVH|6~F2KHTXXazv;if z;M^MQ4SI8IxbjwRgmORHeG;El$qyt_yA$FkCf<*C#;w79^U=N)zbBx!=X|x@`SBAH zpTKYWe**p&@tgjmnzk>xqCBsUiB@`{y&?GICQsyTro@j=>}4B=chKb~rlV~Oey7;R zC#I`yrpJ#<%)oE@KL!5__)Y)y#JSbB7!S$eqHh}X5zWic?vnQox7$nea@U=b_wKvs zyF>nudnE5K$F;7H3UvCY3OTe!oixIrlgg1}OZ`}ly&jwJev_%#e>M*J&ZnnI*vIcy z{`{zg#}>CLPj}j^2Shd(zRRu&H9Ko}RPDhYNu7P@j>o*Tk*?jQ0TcW|2)_ z%`CtiR_`k8MSHPxxc4vED@JnXxq$dE!Jvyvu%CnaFc-L^)T;F}fY%6ut;Br0I}-oFcNCyG#sc12(hmGb`ALNT3RL_@v?1(A`N`BKZx83rC-$rE z0PMF8m`#%hJ@(V;qZ0O;j`b3+YuEzz^CTLvUrfvEB?I>RSE?Jebkb3aCqwxJau-}r9*w1-huwO<{-#Gbtv>Y`zNEfVz{U_|VJ=@K(Uq{>i;%rMJ8lJn2 zg6&7Y&qXKZexBaE`U$33;O}aC zS^ZJFf&6tY`0H}Ni{Nc(_>9`{e%mk>?cSH6GXxcv`LG7yYX#LWmx+CxdB6|XlOK!f zP+)EVvOmYz#((|%UEmS_Nf+6GC*Wc+@dOUB;TRhlmj{T zP#=^0*U@1%IS*LO{?(&<9F?=UADh-EWqQ=W_a8lD)_$he@IH#|KnC7Mm6@u36UM+i+lHx zj>ltfPqe|e)A4xhjcmK}*ZUgdv9%*R29J@<5gyazHWl*{247qRd~vCmmst4X;^2#; zIq3plTsQK?wW2@B7Z=bUVo-nlOw}I+z+UuB!d~>w^sOV|cd>Q6-(~GcYX@VmFwa@( zxWfS*M?MSCxWn-l@6*AC`P1g6Gv}2F%bNeAQjmkL4=9qF8B) z+SkqKr?G$3FY+O1{b~!|Du+Le?j!7@wXL;F=eNT*rnRTZ#A?WdhP}$p5qmjQ>_xaK zg?wXbY?NeyVxvKs)Zs3QjcT}yd|`Ag^`Gh@5%zKeJK?vN&QEPW_Tqcy+Jn8ep9A*l zlhQHRt6Jv^I~(lv6VzE8h&qb{P$wv`UT}L(W~VFP-It-<1euutU)ZG484B5ay)Uez zu-E@g6A=Gu90z@SV#mZm-^dOJR_gdT=-QDT9|t9wqd2H0bLR_t4b!j}VxC<75%VPM zb?dL);DL1IC)45g1P0qWGF|yNJEMdBiOloY`vm3fi56hvj*oe+9T~2?ksYDXxZ0}^ z$rncVQJfQV;7LtzE7y%qS8o1AMhD{#=6N|@P}YvLpsynD>(u=X^RDUIa06!B1x}D;kLHp1QW@CtJ(sdNqB+S;kulu~jHH)tol$LDGrHb8} zk88G!-5SL;+n)Ef)XaOk;_}{>GT^M1aw;xB-rLf1%zIl(emXPn&CGkFTs<@I&CGi{ z0^htof7r}>>!`d@Gw*E?d=ackUiPIMJ8d2xc6{EO3wzmG#D_ZHUc2{2n0aq}A4Esz zz1e8a+JpZ(IPWbQ&!-sas-dzpD} z=Z?M1ytfAIW#+w|TdfZ>?=6rI-{P9ruvfv%dprAj9zWK+w-MMQL;FHA@!sj^*>T+TIGELt2tJ>bj8!w+P9YC9(woe z7>7$P#D2j3_{`B1u_&&iNCzl|vek1lWP#rncquZ_3kei0KKSqw}Jna2@ z2+!-6&I%9O>Q1RFh2Gv$-BQ_r^Vx)CZaq8Et3cJ@F%4R+;*ehi6lpnWgfIX!L6& z=1Ib{=-d)KClmcZ`va+c&8JT9+hVv*t81pud(2bkpXQ#P7i#|ds`F2@eGqfM6#b&P z-iPNB58MDAm;t)FKy#u!(OXaFvKHn&u5<~X_qf$dg7Z%2w0WoRt zYAw`b125`JEHhw(d`rMC+!Wn@Y)~sm{)1uCnQj)zZwZ&q+=^ogjt?`*pKF!QP}^_7 z)*W+nmfwI)bvZlB`BCguI@h1RI|JV**gMO=4F~4HKElr!^HY7dO;&d1QycSBy~d4p zc4n`O`Ki8JX=iqZ;TQ?bli!KGdQ`nfyI#ANuKNV<059pm&Jg|}>~TWuTwD+AF!FSL z_TOMfW;$6R`cPwo-Tf0|t}mv{9B_Mdf}AWS?WKM+*Cso}9<0K>P5#F_$EjcRuJPc1 zj|s|0^!qWU?7&fvck;v8tL50|NcNX#Qibn&I~;}j6VKwFakNhzeU4V`I?eS$)!r=z zu2>AbLpbM7d!*b4c5e#o9@+YZs~4^y8|E{vD1CElYp=o+FM zwNZk(q`J9;dA=4M#rb&FG}v{*RpV&y4940Ab@-NmPw&K77hw!(jwm@3xQgpNjPX76 zIjwZjitWj)P{UTl4_R}QYU#O>z|Qb`9C$`G>KW5BmVi&RG1L7|g9p*2pl;5xc@ z)DQJ<+3Hk2&-R9S3JH*;Cs-B=bq*o#cEXvArn zJkEqXwgsoPMFu+q(~t~y2By(ukj`l`NZ&OXjBY`m9H24D+%&{++bX9m$>9Hj&aKH{ zF6I;K2(Akx`mXANWcp6J0DN@>?NEQl;T-WA=?yo1r)wdXR6mjaACGez>3_&&8Jz>4 z(wah#M@cRVAd7lDO7hto=ZttX3g;H%{?)3Cj!vS?mvelX75VCnRgzVbse^pLLt8z;B(OwjiQ%+OR zhs!$X+MKLj-;As-=CW#)Y0LpxZCDSjRArUw28_VkhUP08&)bRTk#3>?1$$uLwn%vI zGGz8B^g<5gauj&+2I#32=qb9+fwdHk9W}^nq9L=?#y6mUdu6p%yScxyteSSyw40{g zl+L^d8N9%DbHmP>#f9I5V!LYM}hJbdi6`OhxF zdlM{H+vr~SOh*m5V|bVOan!$gRjoy6LtPyg>i!)QV&x=3@Rg(11J!&If$e)?&2m3J z!-Oz-m_1w`Y7dj|5JdSYdj$N@qMXmdeJ}L8Ou?EkX3w-buaKu5}3tIWew=4p8grPlZ zQ)FV5UgwE!sMt9+;a{w`rm7Tq@3C4R{Q-C3XULdx00n zVXlZL)9^b_uydXq2Vb{-?pp7I(7}CRK4Tv)eYu-7dMRY?CCFaH-a{QLcaSf=+;LgN zJijzx;uRow?V(#cG)Ohw6K!CgWWaIBI%=bW)$B(bN5v>yA6YyYHjF`EEv}ip$H5$BqnTaa z+(q&{3A(R^9^5iu5^NW}TL<Z#CNa9CnKA(+JpM8fP?)G?uhh$HIK8 zV6({vb=+8l;8^;eD20^t*@L~jlsrQ~%llO3q*Mf7Iax;0WvENp;sgt2>9ev0B&}ef!to8vQeRh6{Br zs(*$tk$s?8xi(&!LmF@DIgJ;*)b8;L7j3*3S2i2#?X!~2)Ja?}xqN~rE}m?*Wh|}8 zW*f$$@v2Sb)$=BskR_5!E<3=1?UGHBLsd5QIHQql;u?}o{WC7AY=#_f8)Wl5jJF=| z-ShPkJe9pHp3{>=#7V4(mxL)Nv%-}#vm%r;n*^S_L$RxqqEJkHuns!<1lEd^d%QUM z&K@s}{tmXVcaPPhqkF6x{Vg!Q4fa;R8en)zZX(qx{wwtNr{mc1Qx{2X1&s*?GzFHYAT>bLsgJN9qHCT($8ZR852z;0@c<)S0S9v+&ivsK*)sKG* zZ9am}=V7Jo>f+J+#jeFY(N<5iE$Z41wAW4EYl-J=rl3vY z_cODa?sKf{mG1HNaeF|gLRuew_shS7Hi9x0w#kN1TE(1a)36R+NU^)4`Oi+q_XVhR z&ujMMU4f`iyJs5KsC`kV7HgP7T4&QcJ!jy(g)Y?MEkynLFuV_77%&0F+WLb=h^y5& zP`iTftK_tfEk22L@Db3t+$wnw*xlZ5@t&EN@ZQV6B)YwCVvW9GnB;wJbm03NzmU9t z{k7Z6YRjjm;FB9Z&@16Mj(;EIEx>U#|Nb*?36A^u_rYFv8rS2K8_v8Ej^p@u!CQdi zYX05gEx~a=K1UFb#eD3}4HvyLvG!hxxuLki3H0kEi;=&@dvW%oUNnqe=et?B{98-7 z{0Dq!3~tn=Ew`4gAMY$(Kfi0~`oAQWuGjjS9ENA3Kh)RcFgzPa>T7Zso{b~*H91W5 zrU{n7J5ccsgmI|-Uyi)Cama6b9G@5Q(b|KJ47SJ6o=p%6*TNRB!aD$nzCQ42Z_LpM z%*|qq$wBXCz#LVfpAu^Cr#sxqx)3l5pbNr44M;9X4Q#~6V;CwdNl$0qo|E!b|A!ax^CXnLffy;8<*r98B8_%w`j z?)p{qE)R@p1Z3`djA>B;VyhSra^DIi@J%}SM!Q}@pC!pd5;3{^!sOmPWvFZ_NUrzbx1K<7<1 z<}GD{x6sX$8wW7OaXnKO&1bd8#jA=JB8Elz2`*;@-IRHQ@m|S) zla$Km@T`q1-|lCT-Z?&4cqa{fQ(46P_l%Mh*L4zVZrXhX$j$ibEvIr|WsLIumoDY& zPh&w-yDt~|_Nhoo`DrrV8Idh1O9n`s)&+PE%T^mxzDyRBy?7VXjsc>w64y(37e%vk z@H^0&Xgmu?qOoyK@+E&N_@-hWj#eCvbMzGEg8r9rr2ma`_6*vk|CixN{~PA;G4KpM za~oZ=7Dx3N+Yl=zKGxBlY!W z9I3AZaHPHt#L-A^BYlnZ)X}e>Mys=Y{;hOBp1+Om2R(TI@qFHY(1Z6M&*%LIJ$V1| zeBOW1gZCeE#rXx}=lp{4bN;~iIe%dMoF6cL&JP$rpMQ*>=AY!0|H{HOK|g`UlTo=sPl?~Sz0%U@!p3~eI=#$wE_7QKCaK+ zEivW80fMsQdW*8ToE2^dZ@xtRiIe;<16z>(tXk_lQDc=FO&4+xY-$Dmg zVQjm&4s;g(2XvtAk)*qA(1G8>A9S~1lea<-zJTL4(9^2SQN$*iE}-Df4Q~Er?YH3ck5#M2+w#Nqejri** zWW)YL1m_F#5$)*5lz+f~R7N@vywO{LodzbV#C@)=kuvel!bo;&By6+~z8|vJHhj+n zujGJNWUP5cf=AYYM?ByW!ck7NTLSv6WHG)~@V{)b3CjMVG`_fEYJ0NNDW`!K^1v6` zo`X@i&I@~Z0{jrOv#*>Cem?{ZGuQ5}?e9STCG5>~%((}1z7unvBt-hk@%}M-w-oZ= zvLm`kf#>1-*3L|+7i^q1^lSz^QV#l+4pIArYv_DA&VPyXbbaZNz;&PU>jdQ!97`lY z5zueSZKUtJa0GpB?E~5mM*EZp$UyrNRxl_Ub85l3%P{YOb<+Os*WiDK zey8AE3eKhA97|3~Wb0EB=|4AeJHxEfH#B4*%2IEE)eX`VaTe z`{mPt8LcY&L4Vgo z4?WB#dN(jv;#J+oCq8Em>`g z+Ua9xhqv)TLmOJZwEncY4P8I9HfX-9kvmEA?WH4)8!_i2@E2%vfHnO`2jMf&=Atif zMOa-1=Z96mT=S7z*3(|PJ{|Kv0iX1fy+*rrd@y0v{1x+)Sz$7J+)s2RfA|AfKWqW5 zj$nOxz>f8edhbN5b^86+Sf@WQ#5#S_L>zlye_!%-iJREG74$9_mi%wJ@7LsOyWly<~RxCDpPq|#*uiDj`W|#=NfbRIXrq#`i$)k>muA#m) zcjWx9j=_PWb{(D9+N60|0=c$9Crdc4#D6K6pM5ye{0Pu#gK-V@e;nrL#h0a#y<7!Z zL-2X=<&_9$&GoOsCk59FG#1hmkn?evmlt3MdfHgh3q#nu^goRivelb-em>@hbe#Yj zO6`;V8dtE{5xl41Nk@Bs(=*6^399`v-f!#!jokxXO>^bO^J(k?pR<5nC(Mv+EghJP zJ&_vCxZMUGCEJ{facSqZaS`9q*fidwarI`>J1NW}Pi5?#3Ct?D_cxzMon1EGPyg#_ zOEyr7s(p)(3vW%9+;`e_1&U90}(dH`yUv-AgPx}Aty$N_#)xGz>&p8P> znMjZ+43Y$}W`j|z16sA51h8Rtpw(LM3kbG>7^-h?-z!xh2)2O(Ekp0UUgN)rigKc< z-naF(`8TOysGlZTVBXJnpS@3Z4hf1=)tmD?>sfnTdkx>;xPEIL z>L5RQ9&KMvQhBp9{-jL*&>m>f4vw%70b4W{KH(!1eDX=4Rs=pWOb9twLZ{%?-zCl?% zjTbY{O=pgD9%aQF{%ZZdXO@w`xFiH~#s@0AB&V+U-(0%R4}N7~1syY?w{r<>_h zXPl+m4@KXbO}hE^SyRW6X5T)mXD(^kYd6o&B|ath#X0b_n=GC-)0X?a zyR1t3_wXT&@V238!@<|kAwE)7*03Q&*Yg*@}ai}Cda%sbpCAe;X8&+9e(@z$+52un}61h2d3W4 zu|-pSM?5nbdpvjW(D}KSWDd#U{h>vHh;Nb z&*RcT96suP-{I%Y7EkT0th*>{S#sy!`DFK0zgaeqokE|S>KoB*Q9qa2dfffC9_QBI zW?FrmG~bBnLmE7P7J6LfYZ=I;434G8-DT-;IrNWoxGB=%EZLngWR|VJEphdobez|* zmmK{~y04y>ev`@AZ!nBX{?9#llYK3hYY*7hX4}_hbL~O<+8q1ZGOj&jU$gWzTc4Az zAE{5#vSi(huFp)aOSjnUs;Bxz7?Hf*Kpkt;$K;M)TT8h!>5B(Q_tOtIk*x39{g91J zo=M#{knX1+oW7XF{Sna7NqQ`}nTNjjD5(^EuYj}>dtUwWXcENgT-mi1K)*3@jPLJTU(opBc6z#-LNpgvM8ZiFhNvI^R#cfX68JqoYZW+CUv9N!R0>OXyD{ zzT-E?G9LP-huryQMoi39=ezo5?yALQ!s={cc2)cLb12*TKIOW#Bi8tB`e4>rgK?+r(uGy;AZ?gTN-z?tO@DkaQ$8y&$$!pLPGZm`PMcUe%V*V( zvhMeW%c|%5A6C{-$`T#;$Y)cxTx6~{s@yv9Py7MW^W5cG{X;y$4Ekv^YsLN4(L|pf z zQ+RN+xA4#yU*VB&#T33djo6>-dW7Dc5f?i41ApkfAI67{Uu(3E5no{M0r7{O%%M1R z+=P!$ZCe6uY$$p<*~2)6D7 z(5V?qmb9LQ+RR|!l_O_$^Vu4 z;xN+isqQhaa*cTo7}N7hpx2=R&wSqLnTc;qeJpty9;2G%oETra^W0waBWLWreoy+| zqPvoFpyy#=K>irDvAtd4@oz>Jgv)x`D(j;h>wB}H(~flszkjN_c#vzSJLZn>rERNk zcf&A$;c;mEy&ohnmy^hxP7-rF$;9jQ44qi&Tk{Td`>wA?>2Y6d>E7PH&_3o){lxhx z7V!7Xqt!3I>l(k$AN241=e702jMg_s8m(=Ne{FxGHNf~D9mX>WM$ms^%$nXjvwfEF zg!E_s66R{h2U=_U2U-=Ux0hJJeZKxy?4RQEbp0r0y+xVpi7k2eI_9t_ukPisp-mrB z-d^8e%Ie#ptaqL#UW2%tNyN+yWGbtX`NHd%Ys>QtEb)8#R5uJ_o;}_lRJ>8%>%}&GbO)S)M_`hLM5Rv&l_l zQ-hC~8v3DuIEh;JFR3GWk)MVYlbBYzpYyNroZ9`~d4bj)ZoO^>WH{{0$t~%9U zK!53&@4kI@fY=P9sj;^)y*SrsfseQPy+zlo{J+Kb_A>@n*KqAcj=y(w(ViLf-5-_? zI0o;U-l49ui0`2fCM@n-(n#B{jVJcTRp z6rtW7%BCMeOTT;UW?y{p59C%KoT4n1bAWHFoP)IgJ<6q4GjKqx?#>{f0P@1 zNx`S_zx^p8P<|l8So0(5qd5ylY7oB^=(YD{l z(zf2Dcl^DB|3!Q5x_(L4mi)7R|9?n7N-)+uKwURd=f&QHrhV|J$Iv166H~FCax&m` z9mXD-u462RGN$_y!(%G_?8F^Y;VV<%FO%Ujli)WKnKPacY8aMR2p_WKP@69XeiRQs zO5lDX&n5AEGVj5Q?nogPIn0la!jIm9AMJ-0-bKIIJ1ESP-lt#QqhCUNS9~dp*b0j; zJ<=oe2AC3G%7BL)10#O+2og`~(-}{)FyG(iNof&2a*Q(Gh87ot&7<(6TH0QRT-}t= zp{&&%_#0*IB{sm#kKVbqM`$s0IjO%feK7sL{SBMn!0)ybyOIV*8yLqdc#U{!D|pXJ z>=zuu^(<0-I{X0od%wUNiU$+-iC@s?m%@kt==<)mzmqH8VZCoq(+GTA@i|6I!x*DA z9v;#2KbPN|?;Bj5&b2-q%eOreTSRnr_>r-p6`rDNo;@p}bU65m=0O&y^w&AuVf z)PT}M(3!sJhn`II`!Y{ja2NRz=%JQ%H0jV%DY8F>ILKb!#NZOIzj_$-RDix84%){a9=II-+X&yj7QWpGU%QSxA`gCXWvDZGAX)G_c(HgQ@=!c6kMZhD zzwSKJHobUpzmjL|@0d#httNbYg?Di9d+ z%0HQ3OoQ&HLjP0X1C!wgli(K<;TIE##g7T|i>)5`g@<(uUY_ysYz*(j!Yf$2u++2W zEp!+0inrkv;uF$aj=>|eM&WPt;SH1{jLf87>^7ivHD0lNepy5AVUA|a-0s` zCSt#Ag2$xA8EcNdyJ*h<`rdl?>OgCeS9WyPkm&Ud`{JP)W#UTqjqlKTx~sz^VHO3)70kk$<^jFQXjI~d;z)I z+$YU$FS*s;G}=o_OQpS}fvL2YG&qg+l7^(wUQ%~^yT|vlz%w$uWePF-(pS1l-I(vD z@ot`my!w*zaK(!rti#spmR;Slx?4B-#B?mZQ#z;gO-I+1o+%wu`sK&fE#JciP4_TA ze`dEn-K|e|+Yg__Uiz$PRO{W2FyGTcIwLn@$OWXgIqpk-G3h$eG0q2&e}nWr(s9nu zB>yI91}TqN5ygHuHnVI-*^9CjrDJ?t8&Psz|H*RK`9-a(JUxO%tX((GSTQa0jNr5) z)@?t)^jXb}tZ~OBPpBX57n&%Jj z{KIFqKVPNtdsBYz;Iyjf@_SQ$?@r3+`Hej9ly8;Ivj=!qWv^Uft>bR-ENq)DpTn4V zVm!yN4}xdWo5=$lzeA4YcW7cRnm)YT?RT(z4%pd_&%yCE$j>0(f^2U27UWluPeJ~K zXxrNHA-qjF@*_C@1IKS5UxCVzuRwkR`3U46kS$fn980tv6|HwUyzO1p)8!{{@ZJx9 zfK^Z1A0S_V`~aU?J%tnIia)J-Ike`;LdikNz^?kqwB?-Q#XgPkJKgWgcP$!zlO)>h z2Th7*2a?}N`ju$ZU=0I4L8pEiC();9v#as3XiobvKto0Bi(t_j=c2U-IJanwbJ5r~ z`~Jh6i?$xN?^o%4Xr#*hKJ?MyeeUah>s{{YUF{KO(Mv463;U~`@yosh{2Hz*eQISMt|#F z?a2|wS>OaXODST1%rMRZC%{?C1DuC(7B~UUQnuOmANB-JfU}f`?fX@ngR_(>_xru0 z-sisFx8CKR-d)Lj&=_>okAp4Ys+RQvU~2~0ngO8DMJ$*qYJ(?CJMI zG`3Es52LVE3ASc{tx6kPm0)WI*s8R#RSC9cfDP+D_^1S1Gr)&+A8b{Etr=je{r%oi z?{i=8TkmpD@2+I7^D1KPma%V;;sui~Z)5!mv1RB0%|9njA043i3VU9{-^sj0TBrKK zV{_0EB)yUtvL{IQmPz`L^lG`}F( zr!pKnQ1c7U+`>oI51cs#Ywkq)0p&_RwB}BbK`P(T6ECvmk>(ISwR(1;AAH<;Ir3Td zvieK)kG7ZH{S>7ieB$vt-S0d4fpmQ72))q<93B5%&ii-N53Kr0KXB{t7LFJj_xRj} zo^A2&F7)z;(7(r_8{CZk{Sf;17F#cuo?XQC4P1BhY(2Y;Yp>hSM)c--m)<-)tT*pu zJxf^5hxKOt2cf0cxah*732-J_5Z)YpIT};aiEk%q9Nl`Ju)PX>E?W21d9y;t9e z)~P=7w}emZoR33WQzbVcD}1LryDf*WkzZwu#laeoWv;=2!WzLonAM(KOmeZ%@*88IC0I!LeWpx+UH zp*~hSsY?g_?#puBl5xadN`K0B^rueRqvy^c#d!9f z?ewSmu$b8BO=|+lR=+MWI5v`{C#e5+aPKkht)!24aPKkhNvV8il?}6bGVAZ6_tg~iiq7cXxi$xzUwl~nS-e?#@)zk^bU$`hH>V%P zSBs+bcZYYL1C2Ypvk+gkc&B)y!#mx5R_5xne8#H)85zwVzbx0S{&Ul07wyq=7EMO+ zH8&5_-cjOVlJA|;q*Jzwe?|J~3vewOOP$rt!BVk{H#xlML)-1pcEFz=Dft;=@1 z>K17?*Wk%Bifx|{v95nU-StcE_}4c+##%4 zwZB++?PA!@jbCD{89|yvGT>DTV&}4!ZS8ve^E=`3?s_@LN%ZH+_>n`B=uhqA zVyz>C=R5bUwa?I}a~_?4-2QjUF=C8q9+H>jBgK$nNj*q$q^|nadHs z{kRXg@Bfhd@gH(O;Y02xe#rf#F7G>ScIWrAU)5%kpFRH)*z+%mJ^y;L=U*0kV~iM{ zH2<7&N%PO0lQe%X&-m~)er3gXa(D znen9MlozDDRh0J&$_r86qm)-hd5=@x_j%@Lq-QCwn(}s1-t&~#KzU7+*GzebDX%z- zHOizi#%KdLOg0v;)mTo!-**49!12u_r|wRj_j1jLP3+XwspD|2jVC#EbLu4D_|HgA zU7R}T+sQ^C{5|OMe)h%LOw#xCt-T!k(C_peePcMs<4G!CWq+UJpOI9q$}DEDh+VW} zRwH{5ptoA`-at=+4@cwJ>%#5ME_{m`NB8)}4`&R=#mrBf6Rw{*oG~1iG=Jorj^j6B zIAb`j&-_Vq!u6gzoG~0Xcz(g0aDS_>bKt#BUyJv;`&!>o`&GWXuT`%4LFK8fk-pRW zAJTWl?9(#Fz<$H_aQb2k^z6`XA^q>ht#kZQ+s{}gsa@I&z-d?VDcVK-lbrf%ZFF?~ zsc+I4d!AhNWj?=DxKx==-K_DU-9_vJV&NAa^GW%o!#AD$bl1O#fA$d%WzUE6-d?nJ z6#t#IYhj;~!=%(jeM)wDV)#F)7}kZx1Y?aBgDyhckI`@Fl`eutHAIB zq<2{_vyK1t_&R0Hpsby&b#vaYDk^HNB^gVKS~Exi(o9kj>3&j`^?x74;1n5a4v?xw z7?WT0de+3CYkUn|!;cPPl9qA(mt4;~D^NBsJ5V-}YucN?*uK8{_~)Yqf* zO!_J55z<7SkuPZ_sf@J0Z}_=4&J5?mqy3#qzEjC}D)~<3)2V!iB>hP1w~Zb~w$}N1 zdxmqqQ^|KK`A#L@sT>C{yqWY<(j%mpe&OeO3<>A@e}^2_;-$mGvRPl5PQS@Mj>f{! z$k=-$W8WM_$I;_|y5k)gKlqi#uMqnCsN<(`DYKqGy>U@L+wYxLKc};d5ac@kIJ*mCZ+-{;Ap&n7o(drUR$#(*M+U&9^vhcQC6tvt3JwTi~1F`{A}a z*mE$aHre;BHaVDau#(Pw(XHyNx;nJn9L4V(x^{BaUt_9zSMk3(PMve$g%-Wpbyj_= znA_Qv6RvBSbv;ZYwaiZ(2JcqChp$_hX6KWkq6WCCv%Rdvaof1hsNhzdG*AG^; zuODo|ev0-b?OYI^lN{B^-a4LC*5aoIuQ7U6=Y0@fQ&@d1b5)_5$#Y11p+D&-`?R)? zWX+4O4#&rKwWiQLfB$)`C3MeUe;#WH-Sf(y$J)W}_I zp)>Y(4zIr()UA7Jj<&1)E4q2e{~R9jY17!JvVJasI2wFOa~1?9%hz-O7EEh*P6xY|++}Y?#ZDM&S?9Kvb#%-{Db_7_E$iFXvc8RT#l0=#{zmR^v+qC6 z{R7;8*uHP9g_VsEf-ZcXD=PqQR63Gr18@lIjz5v&8xj~+-u`L zasBAy$4;>>PI&sL^-*2aPo1rai|&`RZMjnhra$R8oz8ciIQLVogY)7O!@t!Pt*K8m zJ{p_O#>JtXq6O`A^8m*V&1|!;i(VeKuZvcyoadp1$n#v+^P&$uDPhJ1zV&uc26VXtu-pJI1Nw+Pi4E+WlU5P2%U=FCU8E zk2o$-_>HXVLx+j1=|g{stmgy2k+po_H?oc|cb)5bu50}s_>HXHJHJQN`n^E=`n}}! z;q`lk=q=X1f~?=`Y+j(SU&na?={3>%%%kizo{v7G`GL9UBhnw_3rSsFZ0R27lWUHk z&+1Lqnz@U})9xvrb}4D_J)77^aMQGW-Z7ESTKBRASzX8b;q__Dr)hoDZK=zx{R5|` zhAf%?WAf|MmQPzix-l)po`4~1y?+_=L8^nFvK&3e@yihB9)Q2=U zjrx#2mHOEF%coHv^^N+)=@a#b`l7RbaO*FhbX-p7yN>>%cw}dNp0oFcv*$)0`b#1D zi&HO+32P6g5%WHcwFf7SgQKswb$v&-f2b2(ewnSe>z-RzuWM-6)ypisgcuj*i7mau z#;tI%Dac-C%IPZS<-qR&;2LQTkBH6v7?Vi=UT6EMQSMW zEv*;0Gosw?4`Doc22HblN!FS?h<6?evY)C-9Ar z)yG|Z%fY4AE;;LEF5y^v!dy<$UNFLCnQ&>3U!*Q}Upsy5^sUpc{h~@AtaUx|N4Uqbi~g{lM~~DP7Nh%#*Q{hLwH75jcC16R z^~04KGmclf^uofZb%>5^9A)c)k@<7kFIwwq>j%y_d}-GvN`I$+yK0Z6-w;co{=0mg#P}orJyU ztl`Pd2>aNY(KkCCcYC}Y-!N)> z+K6wm&$T(|m4Yp-k`*;T#uoYr1-CVSj6W)-+~ z)?GRLtW){&Wm+Fe-y~Z$fXz1?yP5Tz>KA8yq+51qU8E%^Tx%nnv)b22b|xPRZ9d_w zk91<&)PK=(p#y)QKGyobF8Enwo!3|NzJ+189`zypVLcCDchAYVdHBJ%+IjdC=5nny z?6&SCo!sIJ_Ly|WyLY9u z`GnJ#wm-cCj}@PAzUShxU!-eR|7abx!zVhaubvT~aQLU2XJ&WS7oF+k?!Mp~mM`{; zx{fotFI@GF^hLsmA;q7<8qUum{&>~qQ^y|{{v_;+`8sO=oS5U0)3GtSV~)GyKJ5AB z&-(vZPA4|W@hvM3{^S1d%<05FIDTl)Pk#S6%#VK*@ z_>{Zjkvpzq`hxb0-0X_y{50Z$yW^9?apPb1_~d9^UV4kAuP_g0LYK2# zam&fsVSQfvTWQYRt z@<}?Q!&hvbQG340_Hp9!zV6lyyJMA;75{C;>qo^ZpQOV${Tda+yy|=580O~lPaVVD z+(is?Bn|?bb0=fv_K&z?3ZvH7cgHV()HvjWpK<*1NpjteO?Lcrx3E{G6W3h!A#u%N zIqzEM@gecL5qw0&lSxK=75?AF;)(9~#1H8YHxG5<=)!9~+SlqRt}a^E$=;5ZcT;+Y z`U-np#WSBI zOPs!uEOEs-XPc*vb9UB4IC0Ky+0jh@x@CusU9p~>%8#%5{ge0y_&Qa*x5Gc8=}q17&)xCQr@4RVSDnvv$3J(=x*OAe#$IUu zbN(CS^Okj=HjQ=1KYu#$&x)1sxb|btCSJ(;zh@`**;-fJz5nuQ?GLT^ithcFhYVzo ziW^D4B59wvX7(=WB=*$urT?S%Uyk^sqvD^PJ(&xwy$=_^nfy^>pPlvi-LcQzvCpX+ zQ`p}Nj{-gI|p28el^xU(~yW^k3 z|7)cGm-wV(^_7T!)_N^_Eu$0vEPX!H75lu?(p&gX-%etmtN($q&)HG2&%$?i{B!4f zU_0^TP8_+j7xWi-e`bq!yVesb9?a3D^ zne_@?^oJ97D{}Ska?jN^*vqghiv+Bro zJGN1B{Tx^9bK!@@KD%PN6zkPt{G;OIzAEv}5!`msK8r7YR`JgoGskyikC`LylI_^% zS9rc6huUhidV7SL}21!0=p*!#A8gpWU&kWp?ayhw+Kxf01$df?v1#>yySm zt6v>HZ1=0fhYRi4=R!9RapAx4s~-ETzHxY}-8U9b?T&jcJTzu(;gN5RD|~a>_|V(e zT^V|J#?}rmY$FB{v65G6{$~WVsNU(D!2cK2^bMsT}dq79?^Rfl@e{tEo zM0U3Li?bJW^|jr5L3jTL_(Ri1ckcx~?emUf(_YYJgWGkg&pYPk^KNf<`?~jk{vz*b zYU!8V@z34y&)xCQ{8w-L-n*Z6Y-acV&)xe!cdx^{`!l7n?)c}^kAIebZG$V;dDXbE ze@*kz)_`yBpPNqVmSzvguJ6qY`&eTAL>qUrD-G^{t& z`or5Y;oT7USRw1=l@AMhJeAK|~+!)p^{Q*;#{6s>=E@BjSq$4cX*|3vr!NwMD= z8;wh6W8%bNylMY`dq3ix_5bZ(rub*aN7_6x9GkJT??>C?S^ur6_|lFvW5;gv>@?on zj^SALm52-OB%b{%)jrE+I@Mmekr&^&t-aVIdS%C0 zZ|nJwv9GiA!YNVlcMcEH|Jt|n5qoU9W172+X^xIv&b}eMXYn5wZ*snRF ztJtw@k=TXq{hOynVi&uJw>#ZifTbs*>4YeBvW`!j!7`%dDOPW)&W{o&@7r`o^Sk!7pSuzf$o zfx7p&mkg6X)RARw8tsl-ZjW1*jB&>nM*6KY`Jz}uXZ#$$!ap!JxwE=h_;K&E+DU!& zOjq&B(Q(PmS?&8VN6UvY{&Pr}bK;UCa>0&MbYm{Li#X*-oNE{S@~eLTB>wO*@5TESu*)ls0IolP79O+Bj zpYF!{S2-?OJhRh&=!$1L7;<>#|L}d7+qU|4Hw^O^9{*u{=)E5#gpSQf4840@Qt0hz z$)OW>^bDO?>Ra=cuSe%No_AKLtC%rLi{Kv=cIWWu!z4G>= zJ!1wNEww>ks4m55Z60AvAHltP!wA+V8m(pCK&wCXn?rgUMxRZYMdk4e6H6M;4zx~; zj}JCdZv4XDCBIDIxHs3}AGPU$R=)9szOfU&A3C$BJS*1N?e`cf($Vd+2IT&vvA5AW zg0)dc69cX38Ai*b6K&J$eesk03j^!)oPV=foxpbKmL zBkYP7@`d810F$eIXKtGMx^GbF8@}G9hG)!6d9+1* zEO|+wHFRd6d?fE4_9j;6)28i7$3`a^MSo`QthJ2t8W$#)9HMWYqt1s%8?A?E!_>r{ z!4$4fi%$uvO)qe-(U(}=c!AN{5|>is@{_3J*P)K9xpy)nVV@x6kZVvOA@V~sUW8Ai+NK7Z*d?C7UrjoT92+NSrQKBY$% zuI+0iRTooU9{tkK;|b=+B$Zre_=CnzZmw967#rO5=EAif(9T%OeLBWi^LJlbu*OJk zsy*H|eb=46OP-+)nFT+n@c1)_)QvYPG7HYGc#3{}6dU}hp+?J1hB3V--x|dGKk&r` zf9>rVoKIWZym3ui?wb1EQunX3lX%^pkrLC>M%2J)Y^L|jVm3j^J4;^yT zB}0d7pinD-XnZ2 z_Qf|n*VHzBvCn9nNg4INLG+oi;xFDo)uBa+C7Zxu1Na#TKK>)2Z_vTiNE=UKEDbsd zOSSZG7*7Ktc+$7sm^#Ea^5ei#&tsz#Xrr+7C|G)yu{#Ksmh;U+gN)W43kQ`<0pCen zKPNsZ7|(f`?!_fmr+}gD-jtxA+|`38KX|%rvA@Kk1y4#aL>sc=p&@XWe{|8F zx!}tLUk&FOt$V>(eSc^szHe|p^rL<%^LT?R<9h~|gEi=S&3dr5-xpt+{4V`uV=b2Y z7J;>f(Sg>pJqgtf(B0fe5=x%nTk-IOi#@%94O_>D%<%~&{k`$Q4|wMO1#>FCkr)$v z_N|3$kNXCc?qi&L@V$*-aJG>aybl~cWh6Ek;P44>*n_s@6nx*p;Sz9|Q}DHl_24iF zMrwwIahOUy22;kv-qhev6M6)Xut&`5gjBHD11w%;VNuTri*Gn7SEq` z&*BIchpiq{mU@@5=4l&?M`(8q7!ej<0#_=_L%klDJF8-8!K?@t?<&ZwNM&8odLz}s zVl~em_4O#6%mbf>pNdC|H|8%+ENK8YCS%lViuE$`FkJL%tgzQ5S#Zw!F>P0#^6b2mJ?@qT>mo8Wcn@y2bFICkjh zU3mOAs9${?I0L5%eDB>sfmUJK;rTZ}Q&u}EZ-&p`G!NUdfp5?7#W!YHv^B7LfG2*% zXy3r<1{ZA&2Fw4N&>!0BXYuQ5`a-<=*+m0OmcpZ-T{I}dqicNwOY0)E)wcs}?eGq) z&V!!vpr?4oX$5rBM1RbK?;eDXf(r+h1QQdXqd_GTp`#?u3#>e_Hp`a;4GpwtXuC~A zQzJB#6f6VRn`|1&gN6#Bp)zQw85(Md&`|$ioiE9vp#tzPJ~J^sIf!n(W(+h`2j&Z* zAun}1587#lb{_J?S2sgDQ+^k3(N1zga&V)kXRsOCDZ4bjq=z>-_%oi#Mpn<9TUha5 zoPWT($9;WEYi;_;g?~1Pn||8BxA=H6 zbrJpSquiwlzThe&Ik<{C)EK^|2a2Xx^rL4)Kl|Vhw|bI;zBAhCCza=8?`@|a-`X){ zW$^OZHvKgFhLpA$Ln8E}vYwz`H_g2j`nlCbKcgzjtbR(4(9gT{e-*b|P%WEw3^ z^=;E117G#NBwJ^QU*Y#8RZBN1WsGhFOZS5LR&aVWF3>97#ldIu$nl|rHCKiXT|jx@ zHh*C}`a^u_eaOih5)*>&La#@A@E>xoOLypq#Ug0-S#*YIJho$TU<8YzfybbsAn)w5 z^@V)&g@ZLcLgMk#8=eJ&(xoPdkHeRrqy8(XLo(OT1v6^TH10JZ&%~!w;q%XeOB3FH zKKF`jT|vA&6C8?<@AIXW#?jux@b6vm1A@{M7K6Kew6PgIA%^eH13NvuVf~<(cE^F! ztZ_*t{m>8c!C39w{EF**{@`|=`v6*#juQl0kBy%`x6VRg*;j>Bb+hq7| zPxx;NdUR^&gfC{zzI1QsZFK3k(53e>c9P$IPt2OyVd%fd$F8e;#0dSx)}x#G?A$5J`3XYGIe*0@m1)yA4x*BEQS=-TnfnXi!~(~gWqZu-7^?6Aill&)F; zrll`{$=&B8^D|SCxt@fkTIswABeB{K&GpMPcGvqxmexZPwF_>kn46Gf(b6E^{i`u3 zc$~gD!X7TtUH%@&-bCKM=q`POHNNDgBJ7T>i~E$^1ST@)-V~va%(+7=RL>RYE_=a~ z#&$M)n&tX@!J87i!F`4=I2-<3h=~48Tv8xwH^yr&i zddqC|mfRxvF6k&b`ey4p14`39gPLlo(^%@WckXO>?(7I{+*L5FLb789{U-ZC`m=b( zL*z~C#)lf=xq0X-vdzRV{m5?Fae3$~BblfFE_#T@sQ$e1p)6!&JN^a)NBjB*g`+I= zjxp#Rt;niX2hckxXN;|P2#-g9(IX^zpAQaaft^}pq=Q8VgR;d`w#swUf@Hk(<|c4i zz`I$C`H-o-ZFWQ+OX!>M3q zwD5Lke95oD+bfiN2)u14|Fsbh-g?@2GfOvtx1t5}Dt-d}O79ll?gMWhfVV@8zwjm+ z*yZUP{JZdmt}MK5zOzqB2IHGkFw?@@21{4Ys8BtHw_5&N$-!H#*AtwXU<5Z9p5Uwm z4|p@cTh4oW#=%>i^j2j30py9#g|}!u*#K*qqyz9|;jPv;ID$71^|=cjd81Ur|>48=l6gybYvgx)Oj89JKolpO^>G&UHK|_^)z_)RCxCkc=%*=B@X7{{!@6@c0%u&6ZDsZ!h+S^<#%O zx4xAdI@TNB28Pw26T`fDwR{AAcyj{0@z@V?L&qssSiM`c!8fzu&1s4L;C<+_1G>nDAOBjkL5}UXCWZT>um=`EBmaJ_ zv1TZ=p?K#4WVHANcFyigvEh$G3+YCD(=qh4D(Jz-w+7)`5WoIYrcolyxA-zjg$0fK zLF(@3yFIW;XM&Bhu?zpr=LsG|*SG=w^#kxzj~o)tMRR@|=ZlTrmQFhhoS#owo9Er; z(rM4HP#uKx7JOXNYsFW4f$uemy@HuWd~iu3I5&Es&*r?RXWDV@FKwY;_hU~CTiec8 zqj7#d_MmXS%*J^OdQqE^X7SZbbh4W|!TI^vf@3_rB07i0tP#5JlAq#(xX$6HV5T0N zhB4FM!c41ofW=Si!KuYJ#ZTeE!eX0^Q(;KF6doDDq#wRoOP@w#=ym#9@?E@Db(39N zzi2>-#>d2NT#mky02az0y(H8`8*kw_8$Mb_-n zOtN@oy0<_0NGP3&58cBzw!qU$!N*cFwPgRI)RNbIqe^Ax%XV%AA2Ij`mZ2*ebH88l zx;HlXQ(uqZBjBT-5f?m0o!_(hU?$(W0Unq}8e`+*GVrm0I&RKiVBsTEHc$Sg6{>^q z(Te{> zG!b1i2|kt#KkEr!OM$JX~*^@;=4f~-OGRH)uM~x(_L5Z zv-H!|(of~v{VRLs%D-C|XS8ZhQ0b=m&}_YUEjnrnbe4zQ%th`vdTOcf`eX9%mhuhx zcP-y8=knV+I%^qz;(Bz0^fQf?y~tkauZ64~*u=YjFd@HNeIgjX0RLNWav!`=yfAUR zrKdK4H;eD;KC(_2#{bZwIKTQ~Myq_b!m!#Ajb}$+wRj=rJj6F&2jBA9N>_E>lPvL+ zjWr9z57B=|+!v2mf*ek3vSc?i+!o5^j1G~BVK(k<>cL&RB{Af zGQ{f-9zoakE63;Sk53Kuv2@pSN^2JsR!D!9Ogqwl{lGskma?xkW_AC*e&EGMYJzkV z5B{$W$dBXn;Xa$To`e=>qgUV88$Yu*p{WSkt&{H)ENAAsblUu(6>5WMOTM*#^BEWB zK#z6k73;m8rf2al)EVJ(-LLU^njG4C!{Y%rToG}4Wm+eQYJD*I+Ao-`Qz075^{$>CM*fG z<{&fOSoJb~vdgn#Y`qtpJ9=+cTJBGRZ?sLn@J`RQdC-?+Wj)x4;C`%Z`!LRn;olu` z-iJApKx;Aldp%?Fw)Bks@NddrZ}V^A{!#Jo+o+eN3&Wo!^P_Msp6zaf=3*xB{21FF z6Q6DR&G?Y~8^W|j)8H_OUL1^13{IxrkGbUjL2$Z~XCDIxlKTh2uk>Hx`(5yTIk=Wz zcp~!uKG{3){+0NsUIN2okiA8=>=)Krz*%cnl17h z!G{>j9!5;?Tgd1(%|n69zDBQv_u%&>zJcfi{n11FMf3p!O#U5AE;jl!-47kNU@v)T zU*^0pCTGF#Gv^JhP@ROyy~vAy1(WAvTeT$gvUC7p@;1JwbKQRmy}-fbUNG8%E+ap< zTOSzj!sJ*mDSaRpOgj3&Ui61HW1xk}+ZK6CcB6OXF1RU-Nw1AbuTd%8NpwHP@hLS#q8FKqF5c&NkQEu9zJOngl| zHw0goU$_CjZ1G_D`b|D>(Cr^yCI2w>aQwq=UN0V z;mtl19zL()Y;+;fRWo$;ZFsosCDFt@u$~@&=0=tU{~1BqJJb)H76#& zRDaGLzW!rqMssuZXZu50!Y~+jbeQe%_IBL%3vP#}*Dyx!N+-8@dX0;x3%k{w>gD|+ za~aVX7LON(HTSt4o>EMG<%5}k4S8^6Ug&M)e>Qj(Ej>zqPDJ;Xd_&LJ{Y&s*g3TJn zRsMk#$`FQ{z|ehQ@CnANCAVkEt&4lYo6m(e&#$NgTbfJV0G}A;PYrtTX^Ah)u(9Uz zCMJvnYa7u;-T-F@kln)BVct0aUw)d@=ILkgGx<3Xz$idkMzPQpRY?UMM>)C6%*24>yXfAs|FxB>d zeSpt)h7n`otYFcQl4^e>fcu#n3 z3OqLz-rEZvd@6d^@VSRiby0mG;Rd2u3Q&NgwA&)#ro;AxjBELWq`1}R3xt?+H zEM1Spt_lzK(ns|uPG3YzNbB*cQ=&5!R{eF*IO#XI8t)#oBFH7_hZ zu<<-}cX;-r&_FXb#G$d?P%?V_VemQyUbLOLhWg*dg!17@{v2pvSl9-TUQj>HXnhB} zPWsMX_+&ocJb3nmkR?;N_hdq1P;MoH7OK{swc?iFoiOzqRZ#7VU3fFfLr;)<7^T>}dx89367s(`p zdz&7)J(T73S4-w?LWas$qPZ8%qlcFEKPK87ihocs8=G>)SNb1&1ASWKwhNj$OdoE? zhbS7&Mdzt8jFvZ~12P{tLv{&t5eJ>V3XOfkn_R8$ZOTk4;d|9-l)LGXe+l_L$tChr zSE28Up6j6JXVIxu(#zh3Ot&(*(w7Eb$!=qGzS!>)Zkl8$car<0sh1wJi%4 zglWmvyO~d{lwCX9=tuj)wrwr?wtU#F(BeMqXvLk(cKZ}vw)8M;Y5A`&^$iaGJz`6< zS2}h>74mB@WeIaDq_f)?%yazH!n7y8u@?ROy9;yH$@ZFV5ZfS`ARH?;0=q4r@w2cD zMnq4)Wt=4Mh2bppnvNJA5DY(a-7&WxyP5r@L_>}rdp%=OVB>iSv@RT*i-}F@1jk|j zc37`}opEwvjoerkj+awT7WmBrzm|`iG7Fdoz6CwK8T>xRymi6C0VU0RqYRxsk?Yd! zlengP&2a;&dkVkMp!|88w%#7X=UnYapBH|!82?+rZz`C60{k9<22+V6sRO?cfZu)i zfQ8)x;)hl;ADM5*f~4X<4$mq2t+{OZoP=F$;6L96cAxenRBu{(dnhjx+rzK<#gyQt zrC|4pgc85u56)aL+rn-w*!>CEtwFxL4t9TQ7zwZNzOXBQi?A#I`yJ@}!mj)?!tVVE zF~RZ3YfpZ7?$Od=^3SSR+yT3Wh21LX?0$URZH8fCS93r!kps0{yUCUVJ{!B(eU=*{~ga7s5}rqyI}5oCgMW(U;Nk;E#`A5|TWSEVv3e zkhO3~$tLJVd^O86qk4mQvmyOI_=TOW4bpN~VPU>Fse^e?*@;i5}rO zBP3g(y-m=h<~$EG@9M{Y^cZCpQ>TO6TgiEnMSls^7mhX7ypLRH!H$*gco-c+w3LPY zta;2kh<6&}#b#z~^v!0?DIp80!46pYb0&85Ug8I~6W8-=+P9rC^LYjaH~j&6nwwa% z4OvjT;5Lh%4na>@i;xAjz8354nQ$fabO8VMA#^9vll19b=(y6?(wXxsf}Tbt8o|Ec zz%%d1maZ1I2`;J_Lmx^OypAkTOp|26Rm6pk!Y)|MxnzOp=~l-62>Rk)hn`&edw6cN zA8m}LCr{}+@RuXdmD_IE=!zqGkT??A4NrR8Wx)sdKW7*{BJ{M}mIDq}G;bu@IQ)k| z2;OPUTQ*v9A;F4k`UY~L0UUhWjcM>DOg|?~TY6F$>l47Xg=sMVJ@HX6Ej+&ro*Ti| z+zBJP#xu0>HdvPIcn4nY$d^3JZoba8!-e00mV9|<61FJwq%+{9?}3Hp+OT{fhRTZ1 zdWx|SmK{vW=8<1tZJDq*K7#3RY?N@_H;ij||LcsuBV&ZI`e|YO`WZX%5a)fxp)_)D zB67=&Cr&kPVD(7wD?3NJ+abnE@?fQ>Pf+)tmp%)I%Q&w=r#*zvMEF%3BoAbBynwB7 z8}g)zK9ip>!*5g@M*rZ3;J<`2Z}gYsFh^5_?Xj5l?E}AC&{4+>A=VIiu+;D+3Y@uIkrC;feccP$9Vl|E?W_W|rr=}u32(Pcbw9pr)LMc)C7$Kbg>*StV9 ze$$u}5PlEX^56h-tYf?}5&TL=i3fYaNgaHBnN0{D-}e3uho7dLt+LATRo2 zbMyNcN zUQ8b@?_-~1!xX0(YgP<4)+Az=n7rH2AB-PaxTgWE$NK|oHAgv@@wUo+#DgE7^5NNa z%-^2@F18!qHMO)S-B|SIcBA;-c#j7jv-nNFXZgMH*k$QP;LRQ!?+0^=F{uNa2f;nE zQZ{Q#;aF?~Y&GdVD;TSW2gir}@v*@KaM^Hnpf!>F;3H#0@#I6`uLqG4gOx*Ja}W%#fJbk#`zEMkyKk23*zTKO>6pGzJMYu6-A5%l zrjK;}ULDg{I{pui^{wEU#`FV>VHtgq=RrUHol%|_Oa2UbJo(Gy$>ayfdyyX{?@R9U zl;;f~Pb3#VOYxNFpC2#}K5mrX!g~j|lJnldJ>+Do}>33HOkM?dry(;z30jGUJbe4J4~+k-Z#o$_65qf{49{Y<99jc!AFep=06za zJNpO9o2$tOk-tiwPTpjc|MVAu?7LF}l|R}V$o}8!a4L#*8)3B|nGrgTFD#5B@Gtz9Yjh zH{a+n_gv*QcVFT&>z@gfZ_hBw>mMf{PX01^fP5?YIPyoy^T-d7&m`YNK8O4%@_h0~ z$ZsJ(N?t_1k9-;V^W??ke;|LQms#)2Dc^oS=WlWT5P1#xCh}_X+P>yre#>zp&s34O zacvvq$oG@K&%Ju`Ci35&X}(Y~#H`9PDz}`&+{7rO za?gcEXjJ~`R-^LI|7=vge7jNk>YYaA zwqF{R&)j2FzIdNex%>A<_Hm=!?*-Wt!BQo{9j#`H&Ax5gCOp3bo~rW{|yu{E9?Z@(NGRh=e#O{-6)`^=qz zm@aX>W1QdIqVenm-@9&1GPhozY)-_lR6r8mP4d5^a|rkMk>5cQ{%;_^lq5QM8abme zGTutLIPap(S+`ueIqN(dt1qZ+^zj;U_4{mc^?eSx`hO_7#$fv>!+bvAF?T0<&CPz= za7o8Dn3>dLn6XAQiqBf}`fEh1H;`XS63wcwtDcR!SiF87$s|=%{z_6IX)5VF()KLF zeD)lVx#L1F`1hGlVSx@y`-OiG`1fZ2Z7|>AxeAYQ(E>el zrvc7+CRNz?X1{3PmpuLdc_#FywhOgKXvGI@7g`t*-58=9FLdL}R#~kl+Agx5=b3PM z(2ZYo6TZKEYxsH%*Q{@)K|}U;`cm)BiGj+cq+3bfA$@~1qnFY0Aefh2_}`@KNSBdx z-NE!M?mL(l*85RU=iI@+*6ECaHs|yXRDO$eA!)_{qs7O&k}nsLX5hEX;oRieO9llh zH7}`hROY25m7)2{^C(Bx-Pg2!!Q}c9BXx8WdTnNJ|L8ySzVwbwPb|B4+5 zGJQF+T{_2dbROwL(nFRrx1(d}AS;v6TR8Sh*GM&55?R-obUb&@e00%Y@J=e@G#Ng( z6h0>&wv4f8M*kbi_ih;_}H%yH1?**1PJxH`w|%dz+FKz2W5_YS{dJ~Q28 z(c12rUb89<`N3HHH8sW*cJ^d}739g^ZtP*Mq-`%=A7`#a1{4r`Gm(V9lR2s+c)rJ) z3&K~wyA54yC;HV>p4g^p?7xH9VYi?sPv*Pl+x76;b$9_f!D(%7^~0y_Z`Y(6tuNT$ z)w|o0imup4pQ>$1F@ZO$7#o%S9C^UwzxWntbm|qGvqp{IoaH@pbJmVa4D$ub+=(pM zJvdAcMIOT%j|=FVDbx*lZRw7d9tlrybVrTZ2%ZrSo=6hS`AOBHu?&y-bii0+jVsUS z{zy_XX+3;X7!ptRlGZ01ElZeh@Wclyv*pa4 zwEDmwrV%~wd}}-QMF-kVf@Z0QY?N7X{)-)KtpHnleTk*{U~7VnAExFTFY?m6#+99{Ok(A!^U~csnmNHV{S6P=qKsh z6TzwGf!0f}rrk3bk4cR2A-?|@c~AOeC*x2RW1P^nBaENQpJ^}-_}=7i(K|X`K_58% z9K-dMoU6~RzCy0EPHN3Y^ytk#<-?|>5I&3`gbJm#O_ip+cc8VOa%`)<%Hsm9{mEtTJjcFgAAlLf=daE* zep};%?NCvMr@)xk=ZlR5$2D~F&nW>=Jn?KR&wm2#y#ZNLyg!_^RQhS zvE|0`d@lOb#K(DlJ-B?_D4)c$O;3^YY}51PJlli~bs^6V$A0=6vT@`Exu#{K@ytQ& zEz1_d-tzEFA8e~$*i=bR<(h}F$Nu(GAp7O#ag8V;+1j=j48_63VG|IQ%YM9%u@|c^^d0sHkc`~p~ zeusW?i9c*>&E#D6#2oVVECWtK6fX! z(`xLgUt(X~gFSU0_SbKX%2$4GRQ`7b^7IvKsR#Kcwp1dvRLyHfWxmBzJK9cZ*iSDJ zYxgASC`q=I^u!m~zk~nt8d#5+h zVXS}6xqG}1bM7AVBaAJ!u=$)zzr=Y>g+!mQ8{@ePwQ$7wmB~(Vt{Ti5?s~N_3INwU?OZ6MZ~Mev~9SvFs~&ci_({7iXcf&9mD& zN$sS~=ctY3$!Z_@F13yRe_ZV%KcY5~%kHVh=HDs%HQ+OMN3};bmFU^AsYKUlTzhG# zu}1X$BsuGt);QykfvqK4n#*@ZN1fqh`|ysm@N|)}<}%0DVjTG+KTB8JoP>Qf-JAW| z@UT92iEV#j^OQfm&6EA|F>m&BgXs(2|1oyeQBTsCXUoSD=*mP=@ol-f74t;8xCwv|NRk0<(HamQFVDeXUK7LVLT zy1eDPfL^x`_n2R!Z0X#{{Z4JLbnfe2*^9CxEaektD37|6aFPI?L(OLdivH7v@` zs>ZI|b-BmXds}%%bX8UC&E96ym1AEOyXaMRjclqVhJSPec&h7RjJ}$>IQEr;Gv>7| zoaG5mU`BYFAUt7TRWnv@`${%X3T5=frW%E=xJxz_JYXsq*qMaS8y$d{8^%5PB)e)A z?Uju*lJR!iRp+T3{AA)Kx_*eU({~Jyaj~ACM0;0~oPHP2k`1Lc$&OlL+fkFKYZi7? zBXjzWEj6JlTZ%S}pw5Ymg={H5I(oDnwUci;c2py_QyKMsm%5$GergzYRbkV}Y3w^; z+fieCFgMj3`>GFiR$uI`e%M|8nVU)r&rSLKATY%h&r!*9GHt+&1f+vy;4Qc2iObr-~iWIt=18y@vA7ayLJ z+Q*!(_||@3UdXbEkZ00AWD^xYv(4kfb4{|rR?yFyACm2JBbae)lmDgTPHYp)9OL^*bm_LphE9!pb>JvKl& zc384<>@dq-OJq%pZGRPH7}MXvE~}!Q#mK{YY?$IW@}uO5xVowWB$?Rp=ZrV4qqUGKJWP46-{wO#M__mpqO=eAw%oy~le z-or-9(|g!1dHU`?qkMcYCwuI=-0VC(m+mR|j|`ZNj~nF)+?NfOK-sdPdZg!Or&3-5 z&o%b;lwbBe^ea8{DE641*-Fm$8?l|vug=XLq34+A`ptkqgQbXIA#G=9qTh9~ZWp^0@a5_a>7+N?=e@Vd-y%N-ZpJZhm5;qv zjPA4&+iVkd*JIdXPhhKU!-m_At+oR@>;>$t!@PsN<;ULI^P*wRU1_dLb5@$GlC7or zD)v~h=CU%09dqWc9D8dD`hj>_5_^JZZl?)*YsKRMbLAg$%wKHHHHB|sdH2#Ba|`$t zcDHiP!RK`xI~a`PSQxvIT=;qtd|XT#grDkF13M>R9IFXt z9zGCm`>Tv?O(2$MVvf~3k!bFoW0b2uUS%9>?u-lVGE&VgX}z-X={+(1ovl~AIK1eJ z=ly-mt%Lh!tB>R-p1$$Q6E6l5kJnW7H#e_N%YI?|fNb^8i?`Qr2; zruyxrThh({3lFzql3r=E=CYdM^O{o#cvD9Q68)q1S6>$}Egk@`8IRuT>m4xt(6e}uA9@z=x!~G> zdGW9uQ+(lfg#q({Opc-b5zKj?r(@_l>zsgj?%*7=3VFOci8*0tO>KHE19}stGr*i^ z;Kdjdd(C|H31|$gR*S|&Bcd_Q$G#G8ZmCExh38%OLu0}_V`9;VXiK=hm0Y;~XL8|s zaEht9C(T32UXq=pxu-JhDB)Z7R3^HJ##OeG@lMLc#UnRoRbPOgd(7sn)NgIh%1GRt z^y+wUA}AdlNG-Sm_L6W zS^)bRr|P$u|Fh{p&v=kYvJ*59=_d^&4FMv(s>;+S92hl?7g9HsBF!}WRc={*ZHol>zecY3(?<&)?RSYS~0fT4x8RyV4SxX zp@%y(ccW4MSLPBFSA7G|5#S2Fd9s-cTir+W7E^SE+MxR1!*ji<$98P||G;+ZnQfS{ z@kV88GID7Ny6-|yXy6BNqxI}1*niKb8qldHdv~f)X_aTuspmNHt5&~=wxnxF&#=aT zd8#E9ZPTBnJ)2h>(6J|bPaoI2NyZ7)<<%2y{l$0Ende#x?TYuvzVh0AA%I;Sf$Kg>H0b78+qtKifPHS?JU_=ld!W2xULxCiP%}vn^sDf zz|IO{LnPR9SJQY0{^!gZVs|Cp7A7Oj_ z>vsd@zt74spYt2$UzxYso$ocDx!z}1WxyYY<8!+aS_SXY%T~~y7iY$q6D>bm0`^q` zaSI8-m89tTsxol;6uR^OWAEMLqpHsR|2>&pCiid;7a*Z=sTVSmXaWex9YN9R5fCa#1_X*$qFBD~ z&z`-*fPnULzNf$MnLqaHS+i&Dwby#qv!3;Np0)O~y7OIH18Yv>z^+|ivCOM@x9ZRW zdM8&cYn@xLCDvK=%Ld-dJJ+=|KHD@r(bW6aSxM$$c&N6dU^^WkUaiWV<=9SLS*yxm zt?FKErw19kTQ0IzWnnwH>mg$qj{&Y%SqG61PBzMw@F<0`a4Gw#9shS*Y?*h+W}?4U zf60>ve%G<5%#$#+Q2#BWjIFfA{*OMWRhy8tJFu%f{Kq?Op4QU7jWvp!gH4FsOKv1r zk_FjI9%l{8l@Wb&n}(GwN+}h~pgj`MNrv&+4%XVV{-m-;VIwKuEcoK`SnUe%TlP})no~V%Qx~o~ z}dSa-U$ z{OrKQ$hwnP_EK~1x8VPlUY@TM=PO_Q2_)_fWiD7l91u>YC_+~S} z{NafYr?9@$Kw7#)_QyA)^vO4rcKkMdl}4Ya9L0DVjDJMwe5G4O(tVU}8%Y-^eMKbQ zkF;!~p~#%pZ0fmgzS4MpF?LcN_HaG#*BWAdH?31aull^5l&SmJZnm9-z0_L2v5VS> zCPnNhy(1NDc!D*sjVHWw4x~obi1fbY$X7A*tM=y=%Z}L(6=uPnDi&tJMkvMxsl%oy zX6}?-RjhdyoKg%{l)YoyLD(MtX;%JVJ%bH3SkD}X>Y0zBdgd%t&%6!p)jQWf_0F9} zD6W$~KfX;-exmj?D9?H$RENyhErK#uWM|bSL$R|?)%x>KzFL%j6nwg>m1WjHWHc_- ze=Cj9QuJWmCL{FRcZ%{~U@loop1PNf(CfEY=Dzoe@*DB#ZOZVQo11fgA@gTP=n^Az zvbkj*?CLk)yRyivTgE%M-=n8x=1ugQ*Ird**3UOWCT*z4HZrMSJ@~DDAG*)Vs;tMp zc(nw&23iAMr2AKzuV2^M{D|+VkM752>ea=p->LtbSJpoZZTVH|&AzIvtIVxN%gS&V z--QL;Dj!Cx%J53)g0AL~x6$jR-OTVQ{olIsaLXSXWfNS#m|yTQyfyZCFrf?%mtpeCPDFHfC=CbM9F>z|>y7 zNl~_r;zY>S(fX3smSp>wxqLUvK0VnzT3bJcy`%9tz~c*#*M7ZWR^wLWdOY^^-Kl=_ z-pnGixWzylmrE9J2K)I}7Ma&~!Y3LfUoH9V%Fsv1hbt>pq$MNo()W^+I_Ou(`$zhE z8~wb0^3~=0OR`Sy&+B4}H~VXvW9ziQE^2Xl2Yu)AYX9V~m+$v=H+R!dF5j?c$k*od zu5G={XdbRFX?tdWT5rm0S9u{n*VnW^(*Vxjvb?XUxJ(!0?|%Ae3G)ZzdiQ~WgUe4c z@2zfbJm>OhyUu80+?V*W`iz#%qVd}XI|JKAdl#F=ENx7~ei5HeddR2o#F#&o&YBIr zgeu22*>t02?!A)d&l>20))jy5YMCE=)o<1_#_QPwf9(Lv?AF3>)?&MCV?AeGhdA?% z`@E*U4Yv21GH<_bB$(^LBnQBZTMCoR~#+g3rAH)myeIk3MCpHTE?y6<8XpLtRwu;twWXJ3oAF*4sudyc1 z*QfZZ+C14Ae%5l*uwy>>PHo;Q@J~%*ZQgsgV83*V*e~1qddyu0Yg4jcsJHNk-o0f+ zf~hr}-SLU$-VPBv<^-5o{ZNu<^pS2I!j;O^fIgEQvpo*q3Rh3=$^W@E@BIC7@d8X)l}P%E!2@WBw)%#>tr*&yM&3&qa@~0oXsC zslPkM^jqsH?mP9amOSJBcHiHN``Q=hj>iGm1c$&N8j}N@eQ#QS(b$xKQ_rg`t-n- z{@gH=Zt�c9-7s(V3)z75w+kc*A_>rNe6ck0Nx zQ^%^0JghIha;?cneCBNWCX1L1T4U1slGd4YzQ%>?OcTIJCur+JV&5!ht?2}7L&LzM zMbu#s^>XXv*5?=0={q~cHBVBAipKPR|@X5vS<4A8|oNMh}yu6%!qHsYZ)z9CUT`U%*(AU1?Zm(*Op`}WwIXBtDj}=WIouA z4!wao2QRa(Gyy$v44&WNNe_3Ye~+<-6o9H6*-SfWue+v{gS`~C^QMM}({|TR65pj) zwXWpiXcO75r=4Za`qDV++rXUU!)8($V;C>O+C%UUxHi)i#+AEYPkH3Gx!BrLI(62X z(n2E=Ybizj4X?E(bJvbA6XOnAi^b25JoBJx}+#k*3Xd8XIa1<9$4RZU1JyX$-c>1Z(9m5!`hcOgYvbgSPe_t!?Vtq?p=Osq2 z?y<)&0X_+fEQbG@BhtU7^(NwEF^2R#b{ckR9rL5UBdY7h{%=ui8-06J*V$^+cSm)s z0X-y&ZSy-(Y%uAtI&7P%P|bV#Znu0g-yU>{m)~=NPfzqtADXrMp z<@f}YzFui;Mx}388rxB6t(7jvcAQDt?|erk{1hf8&nfJy!^mS6?@q)2JcD-2S1%mv zers|BKfQeS`u^l%KKt$LwUJGoicHIYU*u)K_`sUN;RF4rZvboF&0Ma%HTO-cDf}^Q z-OKOw?5}xieNEvy{(sVIHU0`;<~)AS=J%fs>eSMo|7m#?G2iJq8 zw7=%9n(2k>M&eiO#T*BY`a{jH3U^I5LT^|`9p^d)2@uD zHubuoEd@qR|EqnRX~LS->x{Saa7(Gzd_Br(Ofzv}>EwuGgJ4itTDw%Vzm` z-!o2RLZq8}vy*lQ4skuSn`+Q%*mC#dn|fH#hP@kEaY*1_vWELy(!Vl;O(Ycx2En z(~-d){6E|2Bgx<#e!H@?8CiN2f3^IMlBI3P68sFUpYF?-9PNPL?5S^(qdLlQ$IotL zO6v)B`x-O*d{Lb)_0GB^$|SCp-Yq}sfwz-BSJrvYb}&N!4i7#jKiuC~g0EbC1okXr zpWdzH)7U%gv#OJzUt=zoum6Yi=O5G9$Hbb?HP}nKC%h|Oi^o@S>6;AgWp??m`?{CL zJ&o6|H{&_S2d;5y@?V^j-+f+w@z2$hu^TPt3-J5fJZJn$*0*12@zRy`B7~`xDY=?&ru|BXIjQR}g1J9=$p=YKN7m@XWY}N;cur{!V zwSi5Pvx{=HK46!{7}@9eY+XLO>jM6r#B64~?u|z`@Z27rd!6Ur0Jk(f>#Psl!@AB# zsaE4}@%w+cb5QwD(|iSM;EU#yrKmt?1k!b77Esvxr4tf3pJqe3WOp!-MIL-~X4?Uw(hi)3IY0_^>6l_BE7CcBN!X z81=aGO-Vk!^o96tx1!f~GcVi#zTi8OYQt}x_JBpNWSr(O4m&3h&xiCt_Gn$t{bxKi z{e7H4U~&!R3UWz4Yglisoxz$JJXw!@q5Y830qdv7=WptcZ1Ua7esn_@%JQo`%6`gY zH7)`-ZeSf?132&%@qfD4T+euX1wZ+^Z1#m*@UHc&2{`8l+54-&RFVBuVBgKizrJC< z@H-Uw)n8B1w;ho_7(*XC0{`>i>A8JJEt>WnrQ%B!Hk|NDZs5Y7-pC$;)17(m4Yaw`@bxk1i(Ian_^Hc%=`8{^r<3e^{4QKiIl`5L zxrDU`QV;Q5eFgpwxfuKqa_}m=SHB5^+qj*3TC-K(?N{H`7;`l5&hM%fNtn`1i*=fIl6aD!e?<*CH@RaRrGPQZKxk;-rOFGn{m1(!{^9 z^MFyuwC1~_cE+5$Iy(2jr}FLJbn^Z^+7s1nGI58%r+#E?SmTsE@M2CqIMstKYl35W zclpEQsRyUNH!ZPTvFXy0fma=0%d5u2^SV0U?wg)R=5?JK`Tpvwq=jdF?4P=USi&z8 zbGZSz^!pM5vbhV07cD=1Us15^H$*#Ijr~NyvW22x*=t3?k>3<$Zv8q`{`oEFi^Ih5 z9p2J-{(X2`4_+uEcIeN1ohLowI2>GI1@J|I8-#Ux zF#mVK-#8$trvIa)r?v6t|BAWrA#ljU{Qm~`c3tn!f9=~v`Mcp2xTNtXxB2t`_Z>y~ z_pweeiaepQR+gKnFSrdKdX3epYF$aIs&{LCTe!WZb>$AD z&FKv#ZSZBcsoHP+u5hQ(_B8%i{Mc=)HX7}!Hd*aXzgp9-@(rW?>0S7<(Xncu)e#?e$Euo|j+H&&TQYSU>a?#LX|+pO)@>6Qs;MpD zwDa2Gwqd{33?H$4%23+z>f~$6_fPI!zC~@B(KUa+)tdXQxLQ|kRa-jrdI-xyh#uX^vN-fz_n2p+^= zI9+49#=HByX@eLKR^zt12u>Zx_Z-q2!kr}vz5=5zsn6GF>tXtLp>!a6?Kax%`JrW6 zVE3=Hx8>1qTIP1vO~zhj+20V>a^|o0S7@I@2QFgf*x!7AmvS2EyY>^U;1Gvz|B74; zrB7Y{U5i}2Rcr*`zSk3c)r$D${u3OzWuy^e4{hUy4(a*z@Y0plcRR4ZfpJ{(NoF6n zZ1?$kaN)p`tIWT3D-gg>s$_%7#@ zhj{FK2Mb;#&uf&ay&aD#=edI6O%m1(Q{~P?jkN@9{@}>TN;KDj^;S6x$A#~+Z z_If=_9i*3+u6k_V_}~9>-Z0`&>Oc86iot~s+qlp>e+={KBQ`Gd&ez#!uK&~hrg=Pe zf2!jPy_;t=cZAW8DU=_L17|uoa2#{5_Qc!!;gNUExx#^2%(p9|H zLU>Jk#jghoPUE-YDw$kExn^={572J#-v<12>%il+IrJA8ZWDX$);r%D^R2RtBUO$c z%=aMW=($?UU_FWF`F_0?4DEhLqj%I!PB5e2vyQTMG4|CqeY-5|r@D{54E7Vxh5tAw z0sI%?^%!_PR53r{^Ev!?Zr^#>@ePj0hcpa+>Kp9_-gz<12Y%T-g*kRY6y{5ZU+L5> z1OFcw58M~|wp-uu?!@3E-*bi#hZfcHanhS;%deC(4?=KJDHpCzVm z0Bj%TH^GD8F&7VNACBUo4sph2J$P&gV{;fb+Hl5|z4slwI0S#?xA9l{@mFfyA`D)< z!I#1~9r&2y5F7_a~laAgxe zWr*-0Sk+|DjN(6Chzq+l;ljYYZcVr_(1Z)qZCuD1SIC$4;H*W@l-oW@@BAvIZJ(rf z{t~5apQLyGOG-1IRM(KwwolSKznt_+u-kF&ml2P3CD={rr$`ICHQ0DDr6P=eOFa{r zua^_+_z}()E@{Kw*Y>^wi*JwLM`zXJC+&&PwI1K=r<7aI9Dha>+6Szr?oH2pZ)ED`5)i|Jn7>*-nst#!wZV? z|J1>%-tVpHzxVq^`EQOJn4d@b>+CbRdS6j~K_`4B=)i8tVD1S6^Sk03{06?kUibzJ zd2U!QEBK%H8A0|jHrD1b_jSjv?!{VgZ~PX0nSTn6>K!@60l{ZdGaP&QCiZKOKi>_(3P*$IL0||N3ln?Of)m-!Vrm;QQOWlKxwX!}C@pe$gt{ zhkKRu|3fh`BiLX5&O1i+arT33?UfXKma%jHz?#BwVBNdHJkjlrZm-)`w~cC>+OwDO z`=9u4*#~=i2fnKR{6i$3P%ZI<)@LW??;0M7C$v}XnVX!yVL?j1+VR<+BC&+_-ky>F zW^tw+OK7j!u(x@>>aXuv-&Fm_pNk{3*QvYvEvx!`|4p6D2c0(DNgM8>4O7s6>(MXK zZPC44Yy^mJo1zqZo&IZn^qI z^)7ig$$n1x@~FRb={5Ss(!lrbqrbpBuYh?rv_$^0k-rYeDS6t0#q9pWzoWHh`OAcf z+&DS8e5-lH`Bw8y=Kr^x?=X|!KHFQ^CHHJ3whhmQcJu7(JiE^+`$v4M`RZHeV&Ck| zMqZu&HiNaIWpWC5SL^$(Z0fh`?^GYFZ-oclK6m?D-&9URW?WgG;;ipWrl&Z#*CdbR zTi8I)tE^luJ=>3KBG2o7^ga1UCvac!=N^e`j-&778^m9@Yg$x%o1^H&XpA@xd>HL3 zbTQ%taL7V%miAR@O)LSuB|Mj#?9U$!y%Rh)3O{nhHyFW&J zmxsKd%j6G~-#|E1{zjFhGF&VP77T&mKdYtB6+>q~&nk|M@T{JfZ_};s2Flt)S!z$Z zg9}yP^vmEvVZy7Dg$X14E{ETC9C&yw>}@*pyRdgBzC~Qd{H1-i6P$gv@(Icp*bSbJ zf~PvyK9&B<1gGhH!Wv?=_;Q&ixi(tVHOfEe`nl4;IEt@-DgU6#JOHi|#+@ll%H@OK z@&oGI;i1U%=ly`*Ku+U1Kj23Ak_L}6;PK1SRbV>tT7JG<(&D++=X+A##r%5@Ik;{Z z@16mEarf#z48|J0i)q<|As!HH?ap-Bf@W`Hp>!J1iMPR{t6 z)V%y77bj*Wl^+DNrS^^Z4a?;>1Sgi;{zB5=#B$qTNE&>|K@kzG2reWb=bQ@sgv;;~ zF1P)Jw2l8#=1*4I#(ydEA5hv}lTMjGNonw(e4rCa3x^3~9wyFh{q?->(+K{X&<5KB z|A*?Pb;Zm1GI!H24NC@U>p&LuPBZfGV$yE`AK{?!d}km?=tsWN&fsn*B9mAdfUK!;!rg9U=MS_5r6(S zKW6V3d1itYiN~2gZ{ooG?WAkKjqffh%72yr?&kLoncv6u9GHJkYb*F_2kc7rO!@3;oaBQ~qV37^zO4cx|H}z&-?#p*+h0Md?hMK|Hbi?7VyIl$UN?qHbbH+xDsFpS+$p)*-?x*gGtseM?KSb7_^g6a$-aWp zLrHx`GA@tyv4R5}eRUykbf5E*N5rG!ghN|$X&qLv9k%s0f*MQ8vxslDzK9RPanJdT zuMg*)fBc#iOumR*4!uAw<=5XXj-MI~#(s|Ws^ z+2>Mq{i1pZ|H^+Vc~{$W-b}K~dZpLY#Fu^|1Qh5WZVeZ7#RR|9mu>NJyvv2c-EB@$ww>t+5PRx zOmFW0FJ#djTiUPc?=(1p zeILv#o0|z=wF)oA-&VfrxAPR+J+db+E6VSd)wJ#!VDGoqUDH^19mZT1@w>IMaaB*@ zs^07!Bo0M1uF|^eIL8MUOpUC&3Rh*Y|4uk#C>SH!2d6SzADmma<kS80{2-7tT%zZ6*o}v1BY_yT)$fO_wIr(>yf>6 zGb3?1)`6F6;)&G@HkNJUmZLHr1Pi*)NM8Bou6jMPsCXXOai1yfvEo#|k`0EX-on~$ z{rCHlO!e&!`no}J>)A6dJ}-BCm0h9oRp#+d@gUkyyB%4()Ed3yMwooY2bl%hCnp=U zoPEp%lyh;Mn-v)KmvcB*-=xVdStwl!u65h%`Z3jR;oPe!PdK;9f62S#ztsC4j>5W; zy_T)OyIB!GCb*-5^KT>T*{z!JuEq$uy$S2;EY4`G`!c>vVO{w$x6sEr^Hcfd%Zy;% zTfmYxgDGzUTYejic_aSI8#rHNIM{O-7mXTPz`v2B=_4xB=NBi>?C*w-S=XmUhSiJiG zf5fZDM?Ks1QP*7HqYmTa-0Z|ubp6z}p8_l|Tj?JWubw?2vt#k>an3>@#>9Oyn5C}UYpC>%J1+GFTjWV96sL4SiJh^IRE(UWAW-^ z@#^PjUrQ`rePnGY7Oy@QuRa#9KJv|)&YSIo{W^}dlHpvjc=fS(^|5&M?Ac-;S1ew= z5sO#PcX6?J_3Q_p0d|_no`t3CYg^8qomK2t*uegU&FppCh8?!c_#YjwK808Xv3T{d zc=fS(^^xzmweRP@GG4up@%E35S8tcMSm#Ro_3`R?c2`S1`%fIN9{ps0hZ>7l|M|0( zt^a~}^^%E8#jCgfLtg&7;?<|>`x)LJi&r0uS3j8jv$Az!@#?Kuy!u$Y`dGaBSiJgJ zy!u$Y`dGaBSiJgJy!yg(`}t%e4P|{u@h$ltKBV;`#ovm>tB=L2kHxEx#jAJYddA|_ z|MSMH$M3W%7O%dhFcz==e@47|$@@PlUcFrwKGVM?UVYQE{|)i#)8IiYUj6@_c=fEY ztct~}kHxEx#jB6StB=L2kHp-M#jB6StB=L2H|E6R)e}!P7O%ePjG}+mc=ce|*_Vn} zpUi);c=Z{?j>;qsX;%3^YrOi_^ivzISiJgJy!u$Y`dGaBSiE{-gc9pM7O&nt*YeWw z>hTwz7vClpum10kSI>Ui*|B)_|D5sag=yVbK(ToBv3T{dc=fS(^&4+GKR&-3LnRik z-l~qptB=L2_Y+e*7O&ntD>N3bJ{GV3|FL-W(-Jwey(Mw+TM-|>HF5IW;PcGJ=h?RW zaKLCBzx>cIIkUZ(vykdIKgs9u2FB=&%thXEleCZX)$5kTl^1h{ zQXS_a@&DF3&TR5=u2Nkp=PMC=W-(`e)Nxi&5$CSe1*}G$HBrZVX6md4zcJ!yAhCR; z&l{-YoT0(Wv(RX)>+GcGljhtZ`~DN8+dJvUNORuMaOLNmf!m=vhlq3LUJK+HBi?(| zX#9lpN;nXpW!rrb%ilPZ5iP8g{N{h!VJ!SIo9pg@*({#bJ@RI=2MDcNO_jWs=o1;mN}Go@C})s zPy^+1_HXsgQ!VoculvmhNl$y;GUwO&&FQVIkk@Nfx14X84c@jDZ5R5@7H=1sOP@B( zWt_paa%6(J;<`k0(M?I_Q`3{p(&tjlXE+DDF4G@cdS8aQ_O48`k@L--uW4>BSzv^o z9^S@OjMJr@jj)tB7fXq~zLfatOJ{RE$8~#qvoy=7=B)VYr#P!(5oc8_Jwtg;yN3Sx z>C`=PX{J`8OJ{TNF9>%E>(a_&I0KGj3LJR$L=ZoLtj zkp$fYETKIJ^gMYojO&rjpNo)19v{Cwz>ezSg!5gJ7w9HU>#pz06xfnC-V zr>sSk^|BEvUd);1v;5{yLPh2WPZ*(bS6HF&vqtFohb?o}D}M9YV38Sq$OtXd{bfez zKj`HANKmA^Izx(P~Oe!3Ex2Ms_=6U^sNCz^HpIcvH!$yB`jrPons;^nK)k@3*d zr<0MH6tgaDh1Qg&nssk;?=Jq^3N41Nfi6uqSG}2GD*k@3G}Ba^{-x9TZyz+fnYrk? z=H`m-EzCME@>a_IQ`~uu^JC>F64W}$t%tD6&P)ef;4S4w zoK>Ci66eWeaJHghRbRc=GIP^(?ly8W%QEL8kCL6lg_hYO(+ag_?7hI5&gI=bpBrZ{ zER8qUR3@0KXC#`>6dIuydMBGJN2HjGI;5H})TEnbyE4pYC(|ycUaI#{&b85aOl@YF z$+^fbV@+eMq`)$N)!7fvJ)!!}x0YuStJ=f=8b5jImf7Pgd{9z)b*0q$fNsxV8Kl;{1QW9C3oakUV82B1_P#p?c>u=bawjxirr(pXvK~*(vWHZw3n! z%vG8T;u-In-*RcY<~Qaidw$bzmtRV|{8HNGm(t=FG@IuZa~46rRAY{HB&msC9?nb> zzZ$N(mHFNZ=^LRib(hWzBR`{|s<-&0b1IjKM|spaJ}&>MG;hAk3wNwlyd0TR^o`NO zlpAKwRGIqTh&jrv&{%Pv{nKY>+e~H59%Q_hwKfV~WiIcOWt{BBIN;2l*OJc^?HL>I z4GiUcsBV1cuCu=t~#U<+seM zC-}`=PkP`RX_ndVUO#1|*=6j__M2a)ycd38nU&x5n=6YA^Wd~^l`py7W3Ec0%#mL6 zP&cb_^wfCY6MkJX!cUA2h1|}Ms<_$c|nRnxT$@A5w1BS0a@#r7Xy)G&9 zyQX^s-8jp=)a&bW%&u2@IF~j}fS)%aXS2}-iSXkY&gfcUdCX_3;%Ji*a`Sd2Z+<_^ zyoYw&+}1K5y1_!WoOWAr<}<^+ripxv;~L5(SrMM|d8YinB+h7h=RwX)vfs&jALY{7 zxU2L|&d_q-)j%Gd6E%iQeIDR)`+PC+*Nd{ifV4qveMj3& z1Ju)Q8)t;3P);K6Q5?yRxPuUfQ5pv&EK^unMS6qpH4*iLGN6x?c z7x4V;;QHIZ_umEQe~0t0MwFkOmS~@Mb#|cee+%xn?yagf0EH?WcBFyIs6}`tAzh23v8zUMy_plj!oQ|AMnkIdBaa_M+sxjyJ2aP#zaDB`r zd@udk4}8BpCo}Jkx0V5n`{=JEF zXFPn zE%=QsP-cPOGzZUaZXV`rvqcN|PjhhZHs;EAj8H=MS2`wUf3;&$_EjA@BfGlH7ni>% z%bWiUXPhn>Y*Z&?_v*NKyy(>(7ta>`TF1r2-%iNBrsLu_MSFK#e1@2C^rv`K@!{DK zGYl(&?c5(;*ikqw1sGde)wiTG>dZ00`M48^I?tx)iX zY=z<$&?C?`&{NR%P;gCgC#Y-%`o+fjiyW-K%)$C#eA`yo1Z@WH*#~U}Jp_%i6=d_- z&rjn%^Kg@`@C3iX{q@fpHa<{as_)cC=h+Eh`?|G8Xk(*guEQQU!#Ll}d+L5hEcdq1 z$Dmh0R~n%u;K-^K*bGk2h^a=Mrc@(h;W^>)8R=%# z(-FI2dFcgqLs~O)b@%3GMf?}o4a+I(XOuO(HGBi3Ti6bg?HcY2Gj4z$hi-;0gl>Zh z^IvE~yvhBE+(*VQX-ll-|D*iB5-M9_2~@Vke5h=RN1%iM*&kZi4{ud}i{cPz`WLXxv}zqZW1q$O|f|A%FsBE8DMR(JdY z?%36sUd;Hd=#^lGz=ls{Cz%P1@x_eiU{$hNIWxsvGE!roG0)gu6`x@~H=O=*>Z>+& zMc2)`$ujM+%b2pqE;?+y*D~$#sPUF*+2d8?O}2(@i+l{tFX*4>0mGHjV>PZIm59xMkSh6Ip{m-FpGCd zCXiLxs!QXsJMKy`)i*EYVS7jij^aP*zq_E4Bgv}l*6~o;l*Jk58T8`H?wRI={X88k zF=?PPZ}>$We~{A5M?0X;7cp;eeat00V}b09ZYdu^-v{V-@nk#t z#qR6GNFVQns((*F?S8yy|I(LE{|4#bl~x4%OaG?uz8PKcPuO~t59P)lSTS6G-a<;?-Y>^LW-+r)}hd#)qEj_RWc6zMPx&)=u0=pO^>lyPs zz?mDt4SN{(8yNSV?A(rJ^z+IZ>^$1($-c6qd~vHgcukMSf1XkOYelv%6wZF?UI~4oP-n{Eq9WC>*JC#le{Pug;@wfQR72_#?gvUJO z{Z{$PyW-3xw|mW1_r_y?;D0DiG%N8ZzWN0=$5rs_LYu>i$L6r>lpd}Hqsq4+8(=i; zc5RP1_-AG3bS$Nv!Aiz7?X+ljDeYW&o!4}q|9k9^1F}P$dT8y+t;dbjMYf2CI^;yv zVI_5N^M5IugtBd$1ZtAh-J(Egwhc2ap*&oO z&B^-37}hW9yBUpIvyctha8*3I!W%GIzo_r#EgysLpuUa$|JH*`Jmtx(U(}}>jVZKi zF?K}VkwN7LGnwama*lEvtFd2dazHu}JMHB5e#VHereZ6C0j}bHJ?j|Ph+@O^7R7$~ zx+wNbAJG<89ZZyT%jh`jFquYh#7#i20T$oO~6u6SSu%wA1pNyNx1q zv+#F&PiV97cQ;Sy&}=ZPw|&K@rC?IvCUg$F`tTb~Gb8OjlS?1F@&CI$^%(b{*){bgre#%^b7(ae+l|enYFHF}Sx&f*= zx^G)^&3*06x}KyDaIYKmFtjtY4%!BK6smc>dq>&_&Eb4#?Bz?YZGZ!p=X5f~m*7Wd zN8~X-$hOFnZSgi#w#6x^unagSuLYF$**v4oHqYRR%d^e0ZNT@NW!tQU%C^}Dm2Gnz zD%%FUvstzcys>!;?`__~`#L>8%?Q=$dGLIlo?ipi^LwCrJ`C0KV5~azjoSzG(Wm(D zM%@htOX2<#V6Y76v(V<>b?PPdWMOmqE!_zX?jd`u*5IU$xBbgZ$L6NJ&i;U3Wm#xs#jCo`u^fY?tFgDB6 z2L863IJ2y~H?l@SKilgVU7*6};}gtJTijZ{s4&qi?*;xj#XXDv_Cc?Mu7M7QJ^?)e z=4Fjcn6W~(%W>}A#eaLC$f|9-jE6qPZ*1n(vY91oCEOEMoDL21-wfzZ=uGHJ=xpet z(7Dhl(2>|SlGRdvOGclDp5^{h=n?30D7MwoRnTS7=b!;-C3F(B3M!eefoj}rfNET9 zh6<~0gC2tJg39jN3zg0CCiEfbJ5YD*XnY;!x5if;RM__@^ked!fbNHW2we|74P691 z1Mg+qV84{$ZylB9D=2w0sn0LbLmEF*(c#i7T{ErF)?WA{kXiXyR~QCkH=gwkpSfbN z5vu5w#+;OHkGW&$qMXj?BF2I})}()UUb<;_4)+;z_88I_(HKB4ofyS^jSI%1JuVo3 zKWy&EJhE)dJY!QjY93?ZKP1= z#o6l>hst@jksxN;6l}wGyH2Yy&Y_!nrToa4X?Od)sQN%sj6O*tf4xGyD zW9xPK6O*h^GCVIA&%4CiJidWBIPWUU+(J8#_O{Hs8Nag!S!QV>cyp{}-UdHyd=E}o zWyG0_@He}-esw(SH~5d#=UUU=m_vQICSFN>xLhn>#64m8NnrT{o09DMjHW)W?p6KL zH%0X8tLV4}u5oczXeL*XYX{f2x##+Mm-mjtC+s!5A#d{SdosY|EscWTBWuFrsZqZD zuJ}O5;NS0tfB$^ne%sIa_Bl6k_Z`|#fNy`H;rR7y@y(}Op|66GH{pxF27Ej*!wN}m zSN4rFS9ORtSL7L?)!F#c`X-s3Jn`WPQ9gZP>vs6`-_u(B=luGq=lS)cefsj}AHvTs z91`u<-|JwtZeWG3nT}uoF8TG_+J5~G@Zds>eIU-)NA?{Y^!u3P&2wo#eZLmoz;v*l|4$Jt901o|h=n8md8eN)f`w-xyXvQ@ zPP>-#?qDyE-KJTtU;gJmWL+jDpfxhBNof7zn8U-a!dvi7zWH?cCcHZ;sy+5vl+&JB zPJ5#L@%`Zcbe}P2s^fg<9X(aw^!jyWgglF?l{UkbU4kb{w>tN`84u(Dsb=C)L3{Ah; z7@B^1?>+|~fPZa#um&pJEc~ozgr9|*&q8Ip(3j(w6MrE*0RFYHvM{siw-c)R9dWSo zr{G|4gm8Ar-G1|7+9C}6FnQ}&LdmPWMi0{#tsA@m240TrRgg~@xPpAbz!l_cSOz6u z!zLp%IUb)}U%&Y?2Ls>bVBouiftP^0z+a!kz&?+;3=I6jz2Fq#G-S`-|I!6IpWpW- zm@9jOf3@~6JZEwLZRmB-wa~#(;omy;h^+#rfcW}EQN;oe>rp~bQN?Z z^f~AkVPK7y&Ez{v{%z1B&|T22(7n)Q&^Mu9gn>1dj*{YpB0$UmoLB z*x2^vFxt8pou+}6R#_IPBR*y9oXmV0FcC+DSDk#Up(WgPCh7k|2glWpwG z7(INuGd38%_Sj(j9!K~60-XGS{ljPsE$z&kHtI&P2z3F;0G}ks77To+np=I71XPHw|Eb~Qtra!}<+Ju|qrb9GM!|MubU+K^Hmt?uAx?Tb0jKk1L)digbHf}IzFoo@hp3*UHL zKQ?Ppz0$4F)!<;s`&Y5~ukxe@4k7bLkojxCW_M+rYtM=-NABg@&5m#KVQZh#lD>}I zi#kAUk-n3pN#N# zF8gB!q_Q8%@kN_w&+U&nudgqHzw_y18_UvW+Ye2f^Xbp0qWsXyeNKPkhgQ9|U&04n z=kVO!FY_drJk!DCAqSIRs;w8p=GrSGY<>_v4FDT1bFjIYWX!pouX%!l&o$1X{miPn zYzo)MeDcfU^z|P0$$0QR$Kiwa;){;QC!N4PnZ$DKhnJ7}Ap2ZWTN)!wFnS99=f+ zm*Ua_>q&1U{cd|Jw8St%Yp{_kbBspzgoIw~Wi*C5q?@HVCHf{1L%t;!*?a(nMPXHnbFN7TDN9G&8Vk z!xU&%;I-lUoe|hOLNqP##z-qz8gB%jO7jGtZW$MRrh_-Q>bm&g?}sM@pBs@Fd|_l# z@Fn_Ey!kHni}nJEKezi*0yWIxIq<2H_pITaFYx{nZ&n}GX*6=O5#8DtXEnBiH|^n# z+WHJ_4MnweEp1hPjditQU4|7r+}a8zcKUq1{+<4OJ}xov2ABLdbK@TP{V4QJ$;qTX z+B>FlB$s-x`!3xJa?dT>ZR_(XKFaXSnVVwFDLI~G*V#`#J*Q{3e-PoR@=v7wr`YfG z80~G5>?^n?!zhs7&i19S{~#IOq_9s?aycGf%1r7x0Xe*pIUy6-Yk*((B7aG?OnK&* z%m+i!_dA#q_9KI{Y(0@|`v7kqGp^D<=HAnTw7=0{uLBrlC3RX&9C_Bz8&~GBuO-`P ze5NqT9H23WzOXzs{rzLYg+*gd6b>A7w9swiAE~$MCOxe>l)$T0>M)G?CCB6IV?R%u z_55fqJvTHNzA!#a_8wa373CRC9@iJ8-xHi?N{y2Ki!H_zmvrT=A4hx@!!Gbuxbvg@ zAMQG@p7XKZWl|S+ZrmQFTY9jUkbVoT=zY##(Zgdk_8c?z^ol~G`~+>2KSbrJo}HX} z%CF%14OCBbd?V`@jf-=rCzo{I9>*UMaCDx&wMjuw$e&^N5pD6&cSS~W!1Z}aK3%_r z_5tdB+tGK!!4C4PoT5Gk=keTve}0&kK20~mS(Smkl_%qwx|IBfpM z(LI|;zunvl?ZW1N9Nn{<-|wVYjjv(*KaLLC!*AiWce%H*ofX>K(W)-P249oo%#C1t z=|>mihtQEP7CTr6+kA0O3D0AfW1laPzFejMu*(8P)UqW|2gH7UL zf1Pc+Gv3orpB(`<4gQKYuFn!p3cQjn+AOfi63q(i9xR#}*mIL;M&R|^MbiTNMp;4D z1%r#SJi*1;als{)H@JFmeDK+u5`xd)o)}y+Dk-=W`4G>)!&ub5O7Z>=^}8F>B_Ja_pmo%#v9J{*M$pZSu!Qn`|4wRKT*EBHz~D|n>n z^Yxdmor|88zLl<>=<3>INqt&TUzL-qa-!MQ8k%qUn$U8IMX;gdeaw+@%n#TpyZgZErU z7pJ4o7aGYnKKFxDZ$dvcga2{x|0Yjnzyt5S$ox&{=S0#;^h119Kdc7Bx_eBduiZT+ z&!DrT@wxP<@VOtoTZHZ&IOcfab?9!FKX1|2Xx&|k?oM-b_i=BFiZ7$TOC0@OX6tMC z8ns8`a(evRA4T+ds)Kox&qOe&LLC~u(su6|cLq;J&@*$&a|(EUs4`AlaY#nub-yo+z$eH77peuAUtr}D0g z;oD*8d+Gmk`1T*8@0YbXU*BheZ)bsTKk!ce{dn;0gudzXg>hY-?@2V~{20AIh;e#3 ze0vPsEPNZdEWQN?I`}r9e8RWj<`JbuHHB-6{HH7NvxU%0xzlP2`{9><5v*6VzNYYd z_$=3=AAXor(|;}cYw|$<=}Fvs2_5+^`BtGLvv_t1{3^9d3fEYk)0G{-FI~X3B@Y&c zIz%wJ$fFTt$82 z!Hlb@OJYaSW`V3hqM3o_H;QHiT7FkFJ<$5yDPM?pUqtpUg?FW^D|wHLccrTrqNk** z#jjQHE7Xy1oF1$x{6mJ-_};pj!cY0_g-0$QR>23_-o%F&qxi6P|A;@ zf}K54<-b7r(dE8CxprH^=P>nm;jxRUuR)hj%rXj|aCG^we*mW5?qKR#>UFc+R%VG0l;C<=ozZIK5$k=`y-SY%v?|0}$ zFMHwR(bw7N;V0vg0$)kRw}{T}hkl-i{<=OB41-RZhpxQ2g;o81bhq^R0(8i4+E~>K zcWUl(X7C!n?*AMAN`AHTHxc+pU1X;4L1K8WBLi+(}c|v9Bht$nF>}F z-yenVzw2m)7J(6;ez2x6gkF6GjJRG0^f((jEbr-E_p*2l%J=VAN02)36ljON#GdEbTac%FCIZ9H-g^Y=l|9tHEy1oMw) zy{J3;Sk^f6L;%eH56AoH*PGF=e&&iR>a8eQ0g z`I$qSFn@R5kL)X#eopx-@xJz|$XBEG=ionA83*B`1qOEKmC;~o%`MS>bj>e%MmpN| zr)L}0UuK>$xh5KKf0_B_sIzC~A7(!=cW6(x_WHXr6TN?1^T`(G>Py)V zqwtlBAM%$guhIvRTiFttm!jKO5g(b?E;T=;{ex`^&7Xg@O>vpMEwR`#QQyr>jeIvV z6B>&x<9u()K8{#ynOJO@rKRl2IK`d}?IF=#jit~j&{%AlSZtYCY#BS|*ibNjEVfJ} z{#q=yOf0rcL&Rqji!Bq2En|PDI+MK%69&d&%fw>KuxFP2uEk#H%k1@LZ)hER-;3Gv z6N@bqi!Bq2Efb3^vm_QK#A3@xH^gGg#A3@Rj+PzsIu=_d7F&kc zII-9=#1`S(!&qz?oi7`UEo0;8zvVm7FBwZL7F*_AOcljcjK!8I>A%p4B~TiRE%R5$ zYmCK~IT!OW7F*^Y8}~65TP6}8B^FyI7F&ij`dDn4SZtZ|VhqP(%bXvV_;T?o|EAb7 z>*I*Wo%h5b4|wOdNNy3BPrT;vRaWCqJnaMfI~uDeuA4h= zZZ>DC@qf$YmVq#5EB#dewKG=FUH990zir7`Yy8(Lxm6&{c}qXjf31zx&#!x8-V?-n z{+R#TB)18KIfLmw{ny-B{qefr&HG&%abu=<=VvEp2g01o^mF}}VXV$x|Kz+Ui6h;Q z|Jo+E4TL%C=@F1S(A4<<9{cd-|6z|r>d(3g%o0yUsc!l%pw)OU!Iumt4 zlCk^G-i(TcoDGo1S;z;xSrvcuHmi7?cwBYFuAf0Xt|P=z+MMJo_*cbMqR(`$+#~dr z?jJ=y>U{CGj0E80xcj~H1Iej@^yK8gQe@$dbYpdK?TmR3!$*_<9_IcsWaExxW3^cu zm=}PD?lUWpl{-9~BVG3EdB28#Gm|p{GiztfODE5-i7R^#S1+z+Tt&#x=;s#Bo0;qh z=ozKQDNVVfX{V_)?;cCqmz)yV!I@{fuJf2W|0GB}cEtyNnKFhGPfG1nT=S*GcI-kM z)h_+c<4k6?cPsHB)ZTiht;NJz~bWctTq|)AEr|vV#X3U$(|0egQgg0)2{Pa(yH6G-c$3&*>%dOm_nYJ_z3Je!Ly}K4D)fsahu`P;Ssz1lt2S~ zS6znk{!Z|~tbJsj;+uElUBop%Io^I(T43vSMx`6yygTE~u;Z5xWxNkz+z)2_UymLb zMEvq^5x+dniC_NZ^n-MRbj1HH{ov|^iHxhc=n7XCxcWglLHgpi=m}RBxcWglLHgne z^n|MmT>T)OAbs&W`rXw9u6~eCkiK{l`F3@Is~@Bjq%Y>tSFSE_^@DVR^u+@DWC40Y z`oPr>(hIJRn6!Sum)8$ZN9hOBP5NO&lzt%Hq#sB(=?BtH`hoQ4^aJ<)(fYyF4bl^? zPIwqTy1GDP)K~lPymE9x03N!!z|{}S7+1fBe>)g!zeXQis2`Ts&b+jKcsfcykZ#fs zw6jS+kp4&M2e++Z+M0}>%cJhDo*0ilP~ClHGj07a?o#@JdN%0?(to{vh!0dW>4$_s z;BxxGe%F7keu&|Rb2=l2A7c1HIwFQ2&e<0+{1C$rm$NV2IW>kK{@=n6f4;36`@@63 zA+G#0&Nh~v6(%0>hn!V3Kz;$zr@ZkMzMPznwWM3&3wVL+0B52c^tP(FZ%_BR6XRM3 z=3+CCZn9~LDASKkvwmrf_l`GwC+`1^V*V<2w2_nBv5fNu#!|*IuA`K53>z>Jdyn(s z<^+j5Jd+sA13WFmrPzdu(S9d>$qb&&CQpz16Xy2>gP!mhft!3;fp7aV1G`E8IHg(O z?v#wceot0ddEdMG=1-n4(ig$$Ox-Vf4~O!#K+(GW$J7YJ3YG2?f%L-uaBy;&e|w?C-?3j za;ffd8RzQmiK_bn`~|A}Q)BL_D)QxYd~i(9D!2Y><38$tseV=;FLV0%L;OE(KY#s# ze*VjAU);}me@8#d*LCrJ_C)ow=kM%i&qe!rz~9)<(RDt*pTD@ye|E!W*4aV&lkJ0-B&H_K~O|3XEbwKbq@!5AUmdcQ`dPd^11D%T|wG@HD?-RHFw&NJ{saRdMBcPHW^0XdS6mtkS{UN0{!!tuSH-k=Zr0k zOA4>+b?YZy+VVd9IO0vOI0din{PmQkvHaf50l|r!A%Dck2y8^|bK#xxig(gO;$0%` zUotsyeo{(O;L2b8yFISo<1B-X*x*NuO!$y!_xbDa@RJ`^;XCr6Z{yIrUc3Ldc8K(U z0IKu4=*JO0&P)`4yF8?EH5E}fnDW6o8)nzJ;#5q_MU zkV$zu_lxs56@TxPH;uN%RqQp=1Fut`H;k0P$MnZ(BP-BoGz%Owh~MLaz$*`D1zoIeqp7F%2-ZOftk#2TuO=%|5=)gs2MAAj!#TVILK$>K4*)z}PEZ#}2_P)4=i z;mJ&7aH{I+@I^Y&{_XI_{ar-=gQs>+=l=onWuv?9^7sO(*XU0(?K)jpw))CGvono6 zpClDXw_FT23`4iM_%KXcrNgDaOKcp04voT!|C_eBSn=4dFUAm>3zCH+gtv<5W1e^C zqM_iziJVE(32c~yzISm}8qcKD=SRKCV9AtXXw3xpY&c`(Iw6ZDE~sR z!Xe~NY4Lq3>4cN_|K?Z7%x({7Guc=p#l{+k=S~Su$8K26b=Ttq&c_~Lnj1q7eA+t^HP1^u^P{Mjm87m z>K5Yf5ySspw6ZkExnWX!k0dKgrd@g<45}o>IWO)r`-pBPM<@WQ&&&lI_ zao=Zy>@st%D8KQbw{^vpl;w#kH&Rv`&PdFT_y#CXa&ocrn(I6JC%ES|pSfo(bE1d2 zF^)OX3!aTH&#SxtH@^Fg`Hz6F^Nv1Xryo}O#1uO{gS6%e%?Dds7$efyA29)Z(t~s2 zWCu?K%ie=L>)hsl^LPVuIA8Z6&iY);Gl`7bA5Bi0Kk-lZ@6o=9tKpmCY|oP3qr7g| zPam-kueEudAzXqV;R)uTx#0Kru+Kjxt@G1G-{&k$(G!Cr^iQ0Hsox)R7N+Q*e_#cT z)1`Oka+M|)1}*4lJKaB60xh%C*9L2#2krELpz#U$xk?j_pamUmr#-$>wM$K`3~p_&L;aE=e0c$nba#V6GkeuZlg*N?c|c3BD3le(*2 zRs!{es$EtB^@OTjR)RM;)Dt(S7A#isE9*=d*E+64T=5TCXTHKUi0emOl-nbhD<{F2 zK)F3Am$YXB<@TUl(s2_gw+H2t_D(Q8o;gLn^uTn!fhyyi`jR-#&*NIhb%-n8i*Dl@ z#PuUC!^-Vwc#P^CQLvsRnru{$5=}R%OGKL))up1XjOrTEwnp^<(JPEg_m)2rgDCL$uH?Eu6108xOncOePhx_FlnC> z@N^m1I<7-p20Y6Z-x7>`3p(0Pd-8qEp$8JMIpALy9oED@=BP{Y&#kj~WWg8Y;%@OM znlIzgIRW@m1}~D~#W2nS7Y}NU2oFY|*Y>H)Z5x3;t2H8R9erLKr;e`$%d1VJ&uh!n z?ln5cO}Ymhm`~r!my$-mQC#;wyj_> z_li%F$;iBV9eQve`mhMS=x1-pE4IBrxpu!otJ#p z>t%0QBF`o9ypM8{(Wfc+dQ;2a>uEG~(WQT6f7yW~ zPx;{_qcO}rF&}Mv1ic!-Z!pRekDV7E$RgdqzO!$8;)9 zAG&c)Y4NhkpBb$JhvP~bKL(HeE%X<~<0`|Pe>n+^I1X$$*`W_Wf5z`H`^QdGb{qDU zwe=Uf_F++q_hky%V-&xf!>(; zUElm5{jYoT`^`EcB#@kE$+=y&2Ty+tekT%Xc2_K;z!t8alxf}JUpdM%OSJrT^n7S?FcLi;4z|PS4 z%o}B#@RcRc$?0NbGTuLX_+N|@IUimBnbFVlS*IND>p6|2e;gM{e?~eO|5>u};4R6? zoGJJ7xKMI^d??wNdQ0-?5P**pxeRScQC$lnU50A6g~Db_bhCqoSFNF z8>bK4n9F5+IQmB8^t}%(CZ7fWa`{j9i}e48carDc2aNy2-kX3|Ri1m_Yww+5XV^)Q z0AY|MfF)s2xE#=`hn)Z#W?N}-==(CnArLm|tG?DUBn+OS8;(ZpX^-(cIM%F~w4Sf^ zw0`X=1RRhM6>+NdHDQtr1c;)rH8J1szxG;TvrTZQr`5j6bv@U;_8Okye*X9L+|OFc zS}@e8S};`ixX@LxG5+`K@ISNBH`$x#6ZfCEzJTlMN0xMvuJO!K#+i@K$XdI_=bY2~ zJWihiU?`hwk#&99-|E|LTsYCNW7Qa>V-%7l$zC28vdjOeo<^Z4n)xF4@GZ$Ldjm1wEJ{s_K*MjF^$u(6Y z(rc=OgF?z$%Hx#$qlL599=#TN&8)l#Y!p7U_JOroMp>4zI%MGlykCbcc%FFwd3boh zhzssXCYNQ`H#x?;bjoD!((9nti?V;v$hv;$R%6WrtZ(l$vaX9bzBc0c8ta(Yb?2$- z3)k_*>toD2W4-?OeT-k&5f38qsPSoB+06G@$bC<|^dT2rxCY}r)%A6qw{Xw8r*J2p(tOGgeIxsu%EzlLnRcI_xoIkV_x>3(D#(HQr)P)|I$UUX>VZoUC8Ba0ZacP@pe~*5AZ~JgCF}2IeUX-+X?|s*AFr3!oN%8He~= zhdq{koX_}l{^5x6X&yC3r!I|el;IC$8}8s@!@wVo34Yn%Y9rD%MnCWL&Ec`|9OvNK z9;fD3-`sS<7i|WY+XWEY>ncd6!tNiuIG6FsJRj?viSN$%@ z&E^F|v%K?%O7}_c1d@z7Moh+1VRJXKB$<3T(!WPOv$;O3%0DCSorO!|-zi*r4aeWv zam}oeJFcDeovMv9(va0Y$m+NqduEN_v3J&uoXbKNOD9W4G%o2WAMr!}kYP*n&tEV! zPj#;K`xdVC&z$6UFP;3({H53ISTJkE+Ks+z)^41s=Um8goz{vj>AtSj)U|fz zf5$aTFW7$4OiQnHH0UzvTdI9pkwRiQ=jH!fowZ`$)^-j)>0N≀g$ z=foe`xtX$aU?ZD(U0~Zd=|D&SN(b)8PjhTsD{bk1>3%))uwiuYeW&0;`&>Tf9vgQx1p^sisaPv^I@mu+~+5@Tm zID4tjl3!@T(Ute+tcWRJ%=$`@wKK(n%2WHyd7atk2>P z`zhzaryBAn>|NcJ%R1yl*0b(JR*E=Q{&-!eb50`jR=~VXUEwYt&D^=*_0*3Fo{j8> zn?9&7!@KFrH9u2Mcp*9Ag-d_=|CCfek9{~mS^L^`vmSr#`dN?t?BxEff=BA#?xUS^ zo~D11v5pUH&qD{)fhG0vVW_i?Af4#MIz7cUF6PyVZK~mGHolc%$2AXgP4f~N*953j zaf$BfiAr~CUM>3tb?(!7r_Md(E_t_dXX8jC(0rCFaBzYZG~9nLyP37h zR!VaFl>0WiGF#?@+hNu9GbU3;>{vJ}Yt^dI89NGRJyyMO#&(Lb(XN=%@3gry^j>q|JJ;!_`qeoliFW*t`n{pEUZ~;#$Oe8iaUh6u~6YoJQ42d z!E_#Ly}OCm_E7dx_EExoitWPPX8gl5#@5f6N_lw4A`3@bz>{#c7d*w(EtxfzYv0rL zx|_n+>;6~xdfm;lVqUss)?fR)#bm1pvZ$x4!D*{7#$b^HRRrsuWHXCeY zvkszplD`t3=XzgW%%n_RU&Zx3_@@qj+}^y*J!@C{veAvh!I9Ps#HSOy31i5LF)b`U zZ`Bp9_teK~Z-I^F6MYFkbE>|$@FTt~ZXBPmbE}8_A>Kec`sm~96Uj*=k()>+N8uxv zm%@7+p0W?oH6MB6$P2|*?&<3>_g;u!z}^t;8Tmc=pT^GB-}idFL2unH_bQL}9(lA* zFw{Vv&W|h|BhS{DVg$V%6U+K?Ptkni6~!~&Rpj4p&kHq>kJCPnJ?tad>lt9}9nl^S zoj*ohA5f>hUGULdmuXPn?l&iy6`xbz9?uZ!>UUCIAHM7{H?ntO`XFP@AS1oD`9gna zZm48efoo8?*Ogw|MBXpQM_zD(5h~by`z|m02pY*RUfH^QSB`H`FoC(w^`+P5W*cP% z*$L&1XB(`a80jaC=Vy7LCi0AD@a}gL`M8nLTzr8hj}dBu4^8Ad8$9W}-;!Q=V!upA zQyC**aL zXZwHGJ<#76R9nxv*EqiVgB82x!Mi`K8F(E1H|M0j{##z?LwGQ?q+fY6W53*&a8jQ? z`CyXSbhBYLcm_rFw=eze$2TMfTYWxAzE}M|sc!gSuDSB~wd5)P0JV5Mh5CkoCpq}MksN$3(H}a1KXo&G(w^!P@_cKs86NJ9seJ_TueB_TFP{Ao};&^j~xOpT2&<&F;+?-@&*`V~mdZ?$V2^eFK69 z?PMqOjy>`z-*68x&ZuLZzl~*VeJLL%_YM9h<5_*hs){W|XB_|El$#TcvPbCa`S)v(i3s!dtgd>DGMEP9E^BV2U$8Py*r&`VSa$ElQO#W$Z_g8 ziY#j1{xS4X17qKfZLUZ;bzOVd6BntEKD<26EI}@(4>0Bof$!V)*!l*2x9!mKT^V4s ziFwRH*GQ*^zH z8O54oLLTqtOf*6X=!lg6xbuM`&yd#1Xdv&cJ=7%A=cF1L!9MIkTIKfE z4o9Af@l#`x`v+q){N_(%{2@2F@z_CzdC3sN)ZX?a{Py8kuy<*;8Ow3ZU8JhU;W@~-I2~Y>q`2UFS6UoUF8quqQ~!Y4+&n4Zj>IMhRwN3c8qpoInF`$ zLfD58m~XsZ`n+G1J|BcW>EG2}R#=yxG>#_v-et?EBZU3wuAWbd^G(&|^k03yGD27Q z!@A-`-Ln6ZHR}JYzL<^N&qDrZq6cQ652m9prlBvU2GA4sH*8M5zx4xKR~$uGNKZ&d z97jiJU+Yic;Z@WljLfHB&!Eo=h~X@s#Nv^Bk{IN6V3+)V#BnBan?#ISfsViEvP<8LX288wHC5(Rx z?o)F>rKP#WrET=(zdk={>g zMnHR#@xsTh%E7iTXXTeD<5$LDhFPN}KNxM%z9A>cEne@)c|9AC`w zl5=a8e4l%M$URFhu32&w=l>_?qb(znOElaZY_-#<>#C z_4f7K!&m&Uwm|)76CyX)9o zzR!>z=}&y-*u`3Mzh1wh*RS~d`Dr-s2w~(VbJdiC zoEN5rR|}`Hp-bhl7O`VJxg$VdEeV9B@uJsN$T3 z0nTmHeVTvHS@X}igSt<1&pB)EIX4l1XKeC``Hs{-8^{AV^XgE8_0hCpMw!#^v_6|> zpF`1n^mMJK^QV3W{``&F5`Sl~rl`3IICFD~wspVW&$VQe{VElA_-9H!*WF=oY|LDZ zT&>S0?*^@g?%JclS)ILx5a|P)joe220Fm=MR(qGC?2BVpioi;79`B?_*csUq*>c%! z;a4_Cw&sVF+bDm{vlbTYw*J;_|26&oYo7gkj#(J4A|EBpZ@^BeFY{fiLVwM8EV;1t zROwj%498z{e6-TBw$5U`{4uUOKAPi~J;rrQX1FdNs;cRP4>fam*oWG{ey3<3DiYH^ za-q?fj{NOl%NeKalW^@|*|I%0_ItWVy!txo`8w+Oy85EI8xvul96WrHKI+*n`y}53 zJ@-X@7036OiEk|5qmKD=e2>U^4PcA9`V!?6oWz5uYdt-4(ma2`XTI)s#J45b9mf}N zcy9TQ*rA^KZq1cre`M!ndp^e>ua}U^pk;gUi#D7Ywz+zLAUa32p&Q#NJriwz zBYC|+@98);jyz{Ec0by_YFu45_8V}oc>Hbav*vsUd($OHt>EIzj={p%7qnxtQ%kU; zPORUm@2e8)`QCJ!r{4NpDcOm;aX7-=TG&FC39clxv$ceC4&+E z_2;#%`@8(>9_)!@Hv;+|7j~nXy${{$HfM|xI=*WR5$8_gRmA!3?memhpL6e*sn^1t zt=kkEsvTk2(rw+0!{V!>+q$s>()odI_)@LCq)vT~uJjjVc6aWi{(sKB-96vkxs&+! zIdy7``d*p%Fw>6BdV2mOelR}ouxXj=*iW%KFC35kwR1 z*w1&Serb4uFHjmO34C{M6lFSP31v0K$NihQKR{gm5Z{SP<9itCd=KM%=ta;ie23#Q z-Y1zeIb-RMq06EB_%25o*MG+KXQ2NDjXS-jYA4rU=Xwa*0nIt1rs}j<@6!HLyi30e z{U_+Ou{BlSkIh*6!zmd{e*#?zWsgJ^-ve6ubFTjZx(WI(7uQtn;`*Ch{{-3zy=ROu zV=dnIrHdP&ZH$dMDbzv(eN~Wc^4Az8^&uC1zBQbz~CLVg3(?sTU4D?K>Fi^mp zPGe4&K$k*=fjhY#UYcsRBj z9y&bJy?U=>qTUI@R!%fZtaoD_9&JHpy5xhsYvRo#@Iw4aV7#NCqoFgPGoeeNH$b0- zH_s2LsagjQ?t}-G&?ligpmoqg(C&C+^__P#oWAQlC1?IiV~l|Iep~aaJ{#yLVrli& z>7#YO-N(|HRaL8#S5-OpJ9Spub(ZpetFCF^*H_h5UqwIlLr0y(w_?Zf-s1S!}R_b-F7`+l-39o@8%{d;4($xXqjVu*u(X# z%UgCfb(Lr@U_{PoGrSVEH?tP-I_m(!^n1+PV`myOo_4#+yy&k)^i_rZdklK&O!QKf z{rj>h=(3qLRZrQ!Z}~w@)&B;gzqNn=;@X<32Ld%!zqfz?@z$EEKdq>#`lJ2(CBAvj z_uboS*kdR8)Ed6}w1+-5(5J)nDW+dd)pt&>soH1%E}&23!KxbV-|Okqujtc3`?oM$ z4d$umqTLiX-xnONZw%6A8e^Kw@wJqHraVE3vCj)n)^q5Y-4r*vWjH14Oe1+bxu3}# zUrYIC$`h0k_Idr!l^S$bCVD1t8tWm9L$VY-H^aK-BBpCD0?1UPKN@}f*UnEDf64P% z{0;Nvl>8O{Bu72*Pkgej_rRx2y{D~pA?n$H@8j@c54t3bMR(WuCBM;Qj{NP6-@&DW zQ{mGYo8C3m*d*7wKN33@79z%C@cmZh@vOPld3y|5aYjJzyJSkIARp?p`r0Kk@GY`m z&auOea>Iwkf8r+T zzL{Ec0B4eqNZl9ix2vn)t+rl}zEc03{w%QO9~&0FZrQN;=nm~eh}K)Be2;m3URZDG z-2%0zb{yWwo<;K{^0)Zor*5b1Rd(BsewxpBZIzdb)K8_vc%VGyOXbulzkmr`$gC*qW3j=--+mb*W{ay0j)Xq?tkxn|LE`WUe{OIC)~UL zz4yJkZ~FU+Uj^4+$Nu+qiNxd)Ymw2uiGKer_P;xO-i>?Nmrfz33h#F(kCo5wLQ3R* z_qF_foZ{?te}wy<=l20h&cRjR^s=s^86BeKZO@e=2EneUGIUf=eU}p{p*U4y#zZ)c!t4?r-Hg z9G{VW)_V?aprEq-!t2m8mX z*gvlIF7i}mKd|TXXX9uA^kfd2g(zJ>y+# zQpnUvYf@Sl)wc+-jh21(nzGzs2!YwEufOYuoZIo&Py|-L3m0bbe2^JK8q;__m>J zvoBiTJM(1Q=97Fq^nDlqqioQ(<-EnO2z}pEzb*N&^}VI*qikuU%@lr}y^Zo|q<_1$ zm)h6o%%`)ju7|zcf3N#2{*afFe(}lY=%&BA$Jw(TZ3`W{7r9TnbYWsc0 zNE>%jo(10{+NNp$uA?&|siQZXeT_ZYx8D1@w^P>q^IcHQf3^G}$6v7bqxQs4Jy&Jx zq3&!+q}|YaP0oI7pS>U3@jGQVthu-6ygPd__7r=xoq2cq?bwfR%Xy3ck^0Hm2W-!S zGalXN_@j|A+h_E=z5iPARF{r&c-x(hmyUAUjL`Alq;nRJB6U{UpznG zdTlsAe+&MplaCMPqr-Z?LD%~Yx^MBf{gjWNX}>=%{ongd!rpH^ep@ggyVIM0@6EsW z=HGk2YwLUmP~RkTz5}Rt1*5+O`1iod*RgNzZ)KZ%^Y7tr&N=H0-^Bd8)=lppX6*c# zy;t~W_Fm!c^70EJ^71~`%9NLH{epS|`3*hf=Wih=+=VUHOS*U$u`bk`k7u7v5B#vO5RsetGWmFG z?h=_hdyTG3f7p5Yx~RO(E##WJ=BhU@Z++*jr@Xwy+cJ1`1)RH^jBSiKhmDGqNkjBcc?S( z&ip$0?r+O^%Rc;-`TNh<0f)~Kd#me^wdn7vofNM{=B*>*lP*4f1I}5z`ign{&**fA zM^1di9ua51!)J85vtPi`>Am@U{)cS%@2}0@v%jlWIzRG%%=Pq+ef0bFbx!`?S<@o# z*_*%b{U37Q_W#Y-o4@bPe|}qYTC&l-`TJh{{yp&fRq~v@@6Y$XKi~WQeE0u9&U$C8 z_x<_M7kPiaD+Z6s-$%bwzbYbkUv0lrU)P(v@5H~J(KG0L%-ie=k-=nwwr=2ygkvSXwXMy!U8Y9-gBKI*^_y09{d+oVk4PEcKI=N-N zTVEZWvx%Ii-kkmC=j_GTsGPm?e!cd!=-aK4@7M?IckJsTa`w?{PSsyDUmuZQ8_WBO z&U$q;b=Iw&yirg23nyRtZOd=}qmR#${cl44UVDEW-^0$w>7Dy3{+GM+?quEPymKF! z57CB^(UvcpGkAD3#w!q2nM)(ww|8FkZ7M$w+`mUUWz2DBs-#d1Drk%SV z8*K+7bc?>B-8BbM_UYSl-r`wg4$R?k)Od7X}g$O-ae9t z|2O6Br6(MlX!Azj@;NDQAMyTuwEYY8@a}zN&P;xbW9J;b;p7o}vTwcj{hjjj+SlmV z4Lc_)yKyigH(%XTZa!iUsjr83?45ac=GUA(M?V+I7n2C<>-~C5CyCFF zp11kz==o|pH!qv}xjEwEpvT=HS0AIjN7^BlX&-kNj^S0@7~IO;Desn%EyhkV012} zmH)}o-MjK#cFoS2oy14{6esWMAdUiGq_XgT;NH9OXKG8frFUkC9ZnAlFw$JQXmST3S@sxewiK{${Uf(y;2tB%H z;PDoZyRzSWi^(L(7TYi5E$9*}6UTMhmhnzO*a*R3V zhSksH#2P!jF5|9D_W9=w%>PMqUn4Y%J%-1U{GrS&qiuRe=bT1Q!gTKn|9!g8Ta#Ox zNIOrix$^i=e2Kx9;r>v0iZ|F8?++!@kJi!t(7}n9n9ZCoT9H`Z;z_K$D%D%NEHycJ z%#&K#cp-hJFB{kwvd}f4c3zCp)_9>2O5*=eRhW69EZTXPu{5nT$|t2J1e;d6tT9by zOvdRIv-W!iSH9=zTWPo^zTt&m(rLa`{*bvL|C!M|d)S>+Tf~^QB_E%VY?Qu={R~x8 zU-JrI`5}0-ojxC)V1y1ahFPg8!Bo!A_N4|jrdPPu>`AI^KHCVj#rtZF75;mh!Tq#s zV`m=M^JnJ-i`rJ~YBoymo#gV??(=wS_j`;=FXK*g#{>`i`UERtjGgOajj~OK(YD{? zt=zyG#HLu|rX>CkP#pcKJi7AUenxWbI_fKcFa2GvU{OqR`CP*rG=6ez`|YW*!HSPo z-uo%zjHTY^V~nzQJsH7z!`IT#-Z|%`<$cRvpby!_*SEX83x@8#!)VVg9@)MLK0b*L zyXiEeZJ}YzNujO5Jb%3>KKLtlN^mJ-?R3YtY*{_)<2CSF*N4S;XB}XEhH!mNUw`$a zcM6wIe&?E{?|4#zcMSGdKl<9QbNFi?46rO23AA7Y^MNI1LDDE+>8 zc+4}&6rO~kWuKJn5w(hXr50Ujh?~q%(&}y_uyJ{Wm0(sIBWtxgTTkX zrSuCrm>O;4DU7ATpTSZC{0-x2U>Ba$b|j__(MC}`SV}oQA(1f(OHYEO7n!?*VChcU zJT%w{?N~9md?xr#=KL66axj74)w&j+RGSKhwz*S-Uf1B-MsU{%p1k1crV?+tB@3?9 zpvf3=e8>>EEBau?t|su63%;7pGD3U6SmOZt5%y)PwrCs;$S%Qk|w zeV&9$-$(Gt##$`>Ed^^$6a1l(uEg3V!!-IaggSodP7D5X3O=ln7Q81V4J^ii#mg)#>Kf5C>=eC zuFFg?Zko=qBS#;hDiU&+{ZS zXIZi}sCJ+$;jRguLA6a0vNZ%O|14zyvenNyTs{psO6GU5Mg8~Y zc#@H!L6!_{vt?*jmkcEbtHE`JEkgy!P%AQ2jSRISLv39$G$6R!lWfUQG5D9Bndb8a z@wv+;B15~ud;l48)3>vbomOP$F;_xuE3z~5cL|p4_)>hq$6YDGR%EC8f`sxow=eip z?iq`%p1&y2{-6B*lxN#L{VE%5`N>Cq3|oHY8G~E0nBSSm&!gzfn#F~d{Ny1&HH(M0 zUtr5mC-{~g_t6*0&tB?Xli~?(Fnqxc^r7DHv^-Ke(~=+EBl+2jez?))3wlmFAwOx{ zAN#-w`SIL4vAUXdj6z#}T0KK6JB^`T@}s(*qF)Oa-H80$7$HC7+N&)-`MTuiBY3$8 z*%9`Hqlwu@TT5f-oGS3u=t;JHhJ?GkuH;(zCY8+5kHFFcU_Jy+kHz~#@?9K!wvL`+ z9<0B_Jajhof!m@L3HT2Ql@DSkuS!h}euTUpi!<8l_RDuT8H=UJ?2Gse(Re(8#X((I zlnhiMLqVQ-$@UkD@D~o&$C=Xc@*7?RgYu=ON{^$Lx6}W-=!1{*XM!1xXExWGuxHZK zY3TVE!DTMGeLUAnZC^pUJR2NJkMH%QRmL;k!|3mqd;^2>6H36{UdGsppAbX4i@{Eu zJM15gD+2M&P+neb&kK#*zfG@p=xs(0&x?;*2hU5RXPr7gS6NdS^ z?MM6YpL6k_rH_*scM-O{2_29C&ekR;FsJF|AD}aD#y&Pl|Kq2&;wLwy8lin)pee~8 z+B+}b`~Y3m2tG5x`zZVj$7e{w4_yqd!uR-j#vl4O+G%=pvRSagTYl{BOU%M#Z}8@n z#NaV^qV!H%EB{+fwqP$lOefgxPygQ_1_+{Kw}I0H^sIqAx#;U2&j8D3*^53>-TU!_ z-}9tbX5kAT!?)1=jbPDB9}>ZKW2OkCLOdoX}Mz=NM`<^MCiJvb2 z;4B?y8)X%1GL9E8R~4Rt$J6-Tm`B+fZ?-Kk$`&j%%E0NpQ?NDPp~$WsnS|~1TzUMk z%NvxxS`4n`Gl0vTMgO+u_X-kIV93-i;^6%zLyCdin+HJ-(JwVvnsQ>E#Q-MfReFT{4orXjr@Y zc^Cf69x#>J$7n0`7<2MHp5Ve1cW|%a2^OM*>)q}a_nJu`>z+d9KdgJxD))H$R35{J znRHJ{moI&7gzr*_?~-4N?xh^VPp`4s8CaR=8r;%ApC-|tJ&Ov_yM_KD6}4W6+lSrkKs>-2!|T#b?r)UhKDGxdMEaq^8a}SK^CkjvCLJV&-5kPhfCh zaDrz*P&mrL_n3(95yD;t4&ZxG&qUkz5GIe^9cRks7lFkEV5b2)>0r^ppyDyrt@@CM zrxce-9%Sd`JGX$#VxG+@8Bo3i8E9l~H$d0fc+0_N3vUL$*9mWH;?29k+fCpNdwlOC zur|WRTZ4_a_25CePxiYI3ZHf!#^)M841T!=w#+}?IcK8B(~`4w%>!o&Z=RHvGq(QA z10^=z6pwk)#c5z>g7CIHq5N0i?Jep(1m3nme`O?qw-g(1xs?^*t@QTA?LR?=<$DWn z4}!N(!P_C`U;eUW;3dA3|E}OwsmiMvlaqzZVzAN_r05-)FfwyR1*#K+V6vb}BTZ3mv7v5a- zXElEE)(Ag&3x2Xe|1*t&U3k+tg*WLvuZ#J`PxdfQ{oaiYPq2OFT$igGfB7=>^=$O^ zEcEwG^!N~m#(*o#s%U|vbMjaoy5q(^=(u+RE-mOe8Z$cki{xWS9eTt6^9zOu5 zW3?_p+a>O>kL>8@&J2`KKl=N$bLNVoDPex@}KM(2B&(EM! zrJtpn6Qr9bhIR93bTe_qt5vkQj`f;m`ZNtaybha`M1P7TAIQa6^zpAG8&EGamFwe( z32sM5eqLyl4MR2(7)vpBTKa-mXXgdP@yC#bOe3M?IKJ9uQ42 zJXw{(g691oefQFC9C7Jmvy=e2QOV)V8A+6CZz zJayG9zA3`59pA1#2*!N3(+Ez(m>FPUCgdJy>C;AVYUxdIx&a+5EOy#B6^5is9UUo5deN&5@H84j`{A|h zyL7Airr5S|<-l^yPcE_Jo%k$?VBwi3&o^5b;}VX?qDQNttt(mY0t2bwgW$a^%f^S- zl@!!Gvo3?KOty4orh5SRNUWUC`?fCH*n&>01Rra1)5`a)Oe^2-8CR)TU-5G@_=sVR zU@iWlvFN(?{qES{KYQYWPk@j9Mttx%eg4?igW0rm6*@43GSSAzh2Z0M`dCwRyM>Qz z#XUtAw5t!oM+p15$;QV(_u$~ar49^!0xeG+9NdUbT3CAZ$GXSChkW1=dhio4GyJ|2 z_;{}TM9j1GzKPWrtYICEvW#^J>A{evf8{14-onR)^vAtuS-YorS(n{)7hl+Z0e(ZL zdtetnI@ib_O~M~dMvwW>XDR5lRP-CZ=(0X$2l`Cwy8DUwAM<1!-2kKJ*6)k@D5 z$F;^=NgG<@wbpm}t+iXnZ>_`EZNxXoJl$y9gYA_MyMcW%6+G(&6Iy#~OajCBwa5BG zJ?KX1!lWsdui6COEWNAi*g9dDHHfwVYYvUWjgZ!DopH28;W^T0we%r%Jx06x!MN6L z<*z!=%AUB;iKCXI6Y+zS7^~K>YCq|mBY#zKL(?DdUGPy~LNDeJTY1reCiR^qe-NxU z{WRWO#JQ$RjnFXt&Z7k5%%%a1fqNR!^=-tQj^8R-@G;ii==VMNy^WXpL-JiE7xJwf z-!&KCV7!|(FIQ5F-|}6P@Lhus+=DNX=}u~i-x7R)b%UEWkW=kPKjpi6ksayl2dJlD zd2;y?^vO`SH+Wbp{)vlqu!pfB?eK7~EniO~kA+!A*@MGbZ+0iPlp?>owFU~Fvx_49+M;3Y8iV9Z zYi$4OG0w?DCU@goY;?O?USyqNw-NrW>-8R2iz8p}c|6#)gvy7}2a$g5a}j>+R@<+A z-WV9nw)O8X*IaQt)0Jk)*MgOC<Y@0*j6|C8ePI_I3T+;w>YGA4W32sXN~KS?ou z80+iM;U{A~o!p5(v<@AKepf}<{)6CjJ@;0D1KIwA;8#Ad z@cj|^z6e}veRvu^z=Mi;(EsaMt9kqYXfVkU4DS8JPSWyk0-m*fBS;= z9M%t{yCt)w$wu%o<}%KR30{hw?o>VsT=p~iBz}y}FZT?>Cm4V)+P}*uFu>%yV6wzW zZ+QqgZX;%LGrsJ_VN5PS=Vvb-)~-GYlY6ikKLeBFiLcsH`dEH|FnJU0>9?+L!Z&a* zxd)84;m>G2Jklqa5`oD{U{XFoKA3cTf<5>UoyH&wlQ*q&m+!>)$iIDI7?b2LElj$N z4*5@#`-v`h*IJk3_d33AbG&p*ck9F7!ya6QU6_r1n1!8~iN8Aoe|LHS`_Z*NZ0UIP zsH5X8znAz-dQE!QM7L{wxCz~C>0xyHLXSHbxlX(R3|T&~9jis^`=ge@poo>Z>( zWOvAu1<5hpyLVH*`8_ax9;E>pmJa_A{jGSg71=lp?%zNs;g0 zmR9tNd}v`!{@VSDSrseN$IDXP!H4j-T#Li8;wt3DwK$7daj+#XlC^(FUhXm6!OK$9 zke4(|hmW&=>-r|nIr7rr@v#=-sr(~4+!NtL$41DD2YFFm#Xw#jw0)$9ko!(!fF&$Zn<~FPW@AIyzkAw{`eES__b$tGN}fgfDM5dfSWMu4Vn9 zKYH82uCSj2e`HgY8`Ij<0De1q{Z?c~Il9J?UNc7+2IGz&vkl#T0=NBx+tBIt%+W{k z%Wa)rAEDEQ-P*7()79s&Z{EKvrxA@|>3CsSInZtBly&r1>o8M^BM*))FppyY$AVYM z(v$FW8veKJ8-B*l`@usl*sNz>wMLLi9l}rx7%%w0$$! zQcm?@^u)O2w4jT1E$M}MHr71uq{PW!?QwjO_rTc!Y`1WBm}d^4m!GF}y82ssd3tJU zFb(^-fVF<*l3n0z!J={P>W}pDr{HD{IIATVoQ~X21fSB&#h&nQU7v^_tpN3D~*X-CD_R06+Pv;u(wcD1v$}3r0 zD1Zi`*k^05lfmcR*ycv&#mhQ|*3cR*L_f=CPc=e~tf#$$ul@%7+Jo+yHV)kfZztmK zJb9)uXRN)hta)xe%dQKZiv zO6?)A>y_^Cd27$aUMfE<|GoJv{C9NrlgL0Tam1lXZgVWY{9*7q6J4~8oI~U9V$33R zk~a?-7#@xTaQ$ zemAkE9Ay1u9ez&fhSkr=Ps*m>&5Ud63ZuMbwm(!z+%a_pdo&oM_Gs(@6XCr2iQHc# zkMG)$|IBpiI5^!1tt-Q)V+_gsUMIeP(mBEM_o+XZoY9#plFJWM?;+OTa`F2Q(_Ry8 zq;mcx_H3kb?kwmL^6Y)czsN2bT&sBE$7YV(TPwR)fgRPFiSjVYshevC9G83!W1Ud@ zH!AX_TLv6|51(3d_Y!h*7(Q-eEm3lskKa>Ij^;i2f#e3~;k&gU8}Z2MUy!r!x_z~3 zuOd6Soc3xnsJG&Y+f1*^SFZKy&G@{M>D|cmi}+RZQpxvV6K*kFiFxGNkC7*Q+%p&d z%1A$e9(^47)S68vvL@f@(Ukb$1@wP`9WU>ej1n)`V{?^fKWZ4~WV$>pQ<e+j9HuVR^EBZ*q$rif;>z{){gi<2K-PYb`s3Jnki))*h6? z$hF0Yn0h!dwbrpO@C*sQ+Z9v0D(4Y9Y{rJ|p)TQ0^X=fUz*(mju3ZVuoAJ@FT#eKO~Ctu=mNZHn-V9BN&!V(X7h)}Cv<`1HbW4)cEm z{HB5Fr@-$KmrCA-1zBj9&0Yr(>9F?)&DlbbBE_l2afjvUS}vaSzywe}?J5(mF} z6WD#;l~`M`=Er71Ht|QYa*e6MiZx*O;>2>V;SJ8ez0ksL1K9lu*saIL><7EQHH^f! zcwX4mx{I)@b@*lY{KBr*X@uQ}QeuKru-UGna30d~V~WmbFF6UjhK1eD$k{`zd3PFy zg=3@&QIJeNY1s)r_#D117Fu-mJxohPO9qbCT4XlapN1h|s#iQ4Qv@ZTG*j2t# z>p`Egu2s)^p@Urmo87VOa_q?Wu_xcdu3UzFnT;<$3txU_0J~%P@{P=k*XOR?J=|^n z6VJ$>mtC;d7r?XhzH(QQICQ=Qf=6vMKi}+@kdx^rdT?2+gaXn$F>|wGD62aDYdPs*cWV!^k)cJ zKZ4vkcBTM3^CWt$n7%y8^$W=W!l8?mhX$HVv;lBFDCXyr4PvG-}Bo4A>|QJbyGEoq$!?0}V5vx%km zu$N#PdwhPy__i@;9@n5?#UGHTMM>q)Vhb8>zsZuPL&#IkN^F7cv&Fho5-&lX4zSLB z2>(g)B%k^v{9O5LndJUTk*9H~Mz9|^a4o*o^4G#~!MW`d;h}87er$pEH^~-U#vaje z#0Dk&mMxGx-N@V@!Drm#$Ww$5AI_2XXN=ME^#?w#f9HN zmVNnfI`Js^(s}68kHJD~L)gBspUT>+wTZb9mK{tw>-!qZ)RKfQOo#VL3D^C?xJLKy zXZ{@iWy_ zt-x?Kzw7a953z0{{Avuc2Z}phA>OzNd$JjxX{|3S*{C&)0l|lZx0%^L@|Ne3t0^V^ zC}Dhi!S5FQ)QKtVJH#HWF+7PQz<;>^NnP=VVh#B-ovuNaJs3&6@dNzl-9=&iX5-83 zE;_fp9-Sck?ghUSS(6ujM`GVbrp6OfvS$_i%HO(mWqkS5$^C+>Zx8P=ebmP9BgCNc zot}2%%edlCvIokKeh3ziqjNnG`GIKsW~>S0*U6I}AlEw49n*zh`6vlsPdM3)USI#I z_NT<}Y`-hKWLpo>+iv7#+=dZlXk6J?kSf(E=Z2C zbnrU*?Onk+eD-x-`xt$`F4;aNZdsRR9}nO--6*>=!~Wfu-$RVDL}HXB@Kw4mm%6mp zpWyZ1+k}ivW!^pej2o;r*bB2eHO@Q@Y-}^wBZD86X{>yIo3ZYJ1eY5{kSQMEYBns~ail+KQ8KUw2&;Y3Rn%_lE(C1<=J3ML`seOU?KQ@0B>DcDaavj_J zS*Bx~KR?ki{Lxr%*Rjo~AL|%C>HKXRtF7Xp#+(n5@Wb zyP4_uP!?>OfLf8gw&=LH7U`vVzU^8%+0 z@ON|^@(0@A$P0Y*UVdN_bzXXPzL}Ra$sEJ+IcxIGv#2AD<31NoGFw@jYZ*$p+6cAG z^M`7)4D;PNE_3TRxB1FB9<%WV_Df_Lp~kt;5zrE-A6g8Z480s$0KFSJAG!>B4fJ|w z5p*_m33Ma06dHuCh292T2Q7p?*vD*q68aFo%b|}!e+sRD-VA-KpZVHvIsOgDo1srY zpN0M$x(&Jpx^|$s`Po5c*PU*@{QIHi<{YDA%NV2Mg>gp5 zYv&jpZ=Y{;{GW@Aj+z-p$Fr9i9Xsb39dBM?bo}K<iPuFV&}n^t;pPduq#?lpH7 z7@_WXQ@=dfte@;Nqj|J7*;~Ey9wQXVtF6h2)%BD6n-0&mu1~JsI=}Sdoo^W-@oek* zlYOtIzTF2uR#IkA#!^x!hvA9pWRG>*aOx9Z7g8>zoI@Ez>4@`p==#I>-K<}K ze*lGe`}>0^8I;|rmzdI*Uii5!%`kWU*k$fG$8ElJ5wc>}J&E@>PvWgNHt@n_ae=pQ zi4Qz)xXX&Ao5uUi>F4H|XI|_#$4$vIOX;8bvnnaiEaX^oeN8HU635PbkL1{zZ{)r& z$JTsveAJoi6o1FI^9^%zvdesZh}+zCnaASGj*DV?#C)yaYu5BIzjc$7%`Is@a~kV< zxfIRwgV@t$6v@O@Q03nx8?v*<@D(K^&tro$N5%)K=M{{vsTh53O~t>^UybhusCa%E zRD2%~74O|p@qf3*!W?WHW583F`O0Lsxh2K}k53-MgOlh7Me=(U^a6_HI0Jf&*je%{ zUN>xuKTmcbmvR?vNRFa0fnGDWBNxs*K5w|nE&xlS8i%lTyW2RYez{><^X^3+?r{Zn zzT*!3Wr*5wl|A4x&YjCWTa%4y8ylhLHoF6_+t+`=^`>V#&q-tM-~P0-tFHPim-+k* zci^oXJz&FCHsI6FbKl~+Q%`-C+kEwk@U^BboL_HLZ#7CUw%W)*cI^7o=@IUn`sH*1=C{Lbax&>(+@a+az`bxxqD4!z4To_cgX^4$6S9?f~zX=&%l z|EPFs?E}H5?s%Z-EOY?2eGxW38Q)_Oevf=1`6G+S_2^hWNwE+Ag=4RL4r2cnz9pQ* z+U?S#D|g+%Gik`{X!Knn`mPlpkQ{Y-aEjk7y*AIxU*I<<|BpO#BpAtxg-_qhGy8Gu zzQZ5rkIk?38s-bw#GUiq=H?7+2Rz@L7Gnw|U`a?C?lRy6`&I~e+*RF^+i?IRg z$q6VgkW1129O1FLk5R^&{mxE&tycW2O|IA$?TKh17F&X^Je_vO+x>9ceRu^~VDZ}A z>P1i6?bk1lF<+r=wOhYD&eZeu{&-XCa~fY#jQ{;oc%b?h(~jTeJuiqH&bqj!V%(IP z3is(X6}9LJHdUK-7rD)yLp-MN*AQ!*7o^>36v>R2Qk!F#FM`vN@XW{jZsR`rl$L*q zUT}QMEUxRhF%%!A1U)Kzh^KB!340$_k!Nrv_&ajolXCsUK;KVbZeFB~9p|`AJwv=O zCzrVz&Rkhz_J;AV`=2j0%C-?toFt3M$ROug^C??pcqcT28TQ>wxSIPNEZr4>rKvV% zl*^iCVJ0z{=>;?Rsfq1*yNZvO?3%$fAN?9dKm7Eg8J_B$2I=cGaPyMiFkhbRGH22M zmze8Z@{3-I`Y{b$Di5@gISw%XdCb9d=Jt>$sj>>10&nW1r(;Sl*103hnd;wc80YEU zM9C2Mi1#mMt*X#?9>+&UwkCXsKIr#)c%+eDFlZsCAJf>LM@3<73 z$Ai%A+^^%K^gq0=_9Nn?tBIGcB5t~p_~{DP)#jSSQ`VYVrzd`A%jon#)9`-eb^3FC z0QY5Z|3ID_M0_>aimwz`jV7)d6%kh{o|;N*WyL!qW6UoaQ(5=-C7vQ?xYLdml=o5o zr8|GCilvD6GZjlg2P>9>4p1xw?duQq7ft1zFVT3# zR$!KRXFLD1?^Cd&z54fL8^5jh#I{ss<168dZ2nHzS2E7@PMH5m{CR9PvSP62k*ql= zcCurzt95M0U{~qbj=`?fu^oe5p<_D+`=O5Q80-f+w)r}jWA!PBK2r>)e*HUq%Q@F? zj!Iw;CNa<`^l39O(5OC8VxUpPP|d_MqllrJiKRvnLp2kVE%*3C^YI6kj?Oci3ysiX zc+h+^6rMD%fx@HayP@!`nHXv^_vhm?P5U+X&sHqM+9UV2+yLd?mJ%rUDsDT6dq>>u z4}1q(IQr~-^B}R$816Y(4CS7K%Z!kVd(yZ11AR8;1(MmHaCoi}dixE3;LYuMfnC_T ziue72KfRq7_&qUJ%P;+b=0kabj!*LgZl2$Y5A(u5O){T?{s*yD1GE|1e7Omg8md*3cVS+7CH|)hL}k)%|rZN z1APp71GEBqHT0=|rWH$V=J(zFeim8+-3Bd&?tspP4k2dR2yNi^&!PLEL1-g%8B{UT zv&2jzuzSQ!&th+08%NB9--WN)QF{?_Qvv?_4C128h>PYBCtYE5y!az@@)BYxe8x9# zVO{^nM#uK$M#l^H5I5aV-1H0Ls|Sg#eoJiid*ZJ@Vn5#^mb#oaiKUW=rRv`?I*Kga z`q>!j4PvOLDNZbkTbdGC&Y&!u~-qj~sTI6j{PhY-3q|*=B6jMF9bxYzzGT?{QyitWCh5d?er%ZA zBK+;fN#U5x!+v~=Z|hAHafbb>1NqYBxrx@qnCVkQeeKI!BTtffUm%xR2 z=4_5NrvWBiF^|z0p+1cd`jW;5|9`Dn{|z18*z|*Lxc=mG1e?R<7M|R6j@j^ylT_YZ>$_k639qC5`erG1DI@O_WUbv1|qJ zvJ-tMZ-S3MQJN{b=HOZJlY{s0dV!Jd{8sFvbK2YHN9N**onAf7-|?T6Pbis#!ZIbB z@_R}LC3C3JraGS)L@dH{olwP6s#A5HOHqA_nZ{D5&PSe`!0(ZqkBp`6#Q&AwI0G9k z-zF%Eza#(XE*;~u>R36nTw*E3Rd?c>D3(fMZ7P{qDnKmtFFbP)yFQHm+>qxtZz68G z0zCNf@AI!b5jUk{=M_(l*ueTE$1l(4ea#`n^UUAobZkimwuJfH>5VaKazPJT&L@{5J+PkmN&EklTL|DsZM5U3!{h-JKixo^m)ZU4162$+ z%52PCpb+slNIl@#eemQRDRyPt7NuQhQE3wZx96(vZ(##8W%E ziKjLbPZiU?;v)I$^6M2(Z5!b-<;%;j?kS#P|0216=8U44;VjA~;v?x`@mn#~2+Ahn zs)y~kYIsy^Rf|5;I*Oj#%01S&*SQ1Yz2wSG8;V)hMaZOnJF%4F@7;06gzs^m6H7T* zB7bdRsX&+k7sAX`VTM?$mbrqHp0C>1tw~tB857B=)(2 z7-%jr(hu2t@Pk0h=sD~^2**n8H~fEGb8(NGdp$f8Lwpo#Du&u$Z-ge|XYcJB&-z|C z50y0Cm@_^qUXna(KR{QkG@N56|CGrwzWtrHPp`egijnTZm)ATt4Isxvyw%i~oKr-s zWYw9Jr@iNAODk% zPY)k!UtBtK*T~#AsW0HPX|&=l=tzG^-@?%Tv4NtQim#x=1R0_OkU!B>-uV!XC+>!3 zG6!v!^8Ra9IOp^y>Mz13HWJSiaZlqD&?M;Zpgh~S3Cj2yYoXNHxC=@fjemjCW+QRT zIz4|IRL|cH)$@-+_55$3dj1bkJ^z9cS~b|;u};svX@qk0?A1^`doxte67QT5%nJ;^ zFF%l@`(8FejidZ#^IRj;oQOYm12hHtQzP`=(EPv=Y)!Lbm90i-&|JTnNF9n>5^2Ag z_$ZO~6@T@q%@6Dv=nY7w{;lz!4fFXAKYk!&SFg^D=*I+(=XrLKbt-eByU~_<@x>PMri+i{?K#C+y2|2 zG0>}_tVz9ng`4#&*502$zVU6WSZ4(EW`5^D=R#kLHD6fAe)!+{LpvXeHya=2++?l= zp#{(zpfjMCLoZ7-w;D-i<0g*hbM80LYoI@cE`b(9b5hJ2f2!G7%W)~^{s3JIT?4%b zIuBZPn%TI^AKLLyU$gNAem})x9)$iHdIb6;^eA*A^f>-e5%zT*`h7iiwSt(d3OoB0aoV%k+-<~ZJBYzv zF*>aHupL<@*4p)&Vdbrqt5VKNxhln0%2&Na9_wAG@>$V&s}1-DlH-krw`C092vPo~ zmfTg*ul?qd-{qM%Y|S^X&Gd)LJbC7-q#xqpLW ztx;*MX>)1mMVm`kQC446|B%PrK0@c*)tiTuUR*obYi?&QH<#YY^}8;re=5n`d`*}a z+nKk`MtpT$Oq%KNrG9x|bNlksutDi&B#*YY_c!Y|4=|&7W%1`q+!rwK+rC6I%NHH*zw~r_k}*>zDIZ=WnIYHnP8GW^y=hWhA7Xw{W=MY(%b# z2IQHN*E0S79kQI{x8%~2@xK_MEac9TUF7=={La0nabF_${DC=6f$lOweJ}T${fAQz zy5w!_U`;m1oA{o|*?#jCo&OE;PyYI>)AP(N#HPv#?J9CxI(d5*@*)gnQ6Dm~?X?(_ zc+0GN3K;=wwUQBGZ8Fz|UCD?r{#JrnvpLax`F`X>n1#Q>xFr{og&X<333>}uax)~= zRLrBCk@7@}Z4~dkL@cCtHgloRfmOvi^WEO_ibmH|1kbLi7(211;>aa66-Inb#TH*p z#j}^!RE(TiQ=yzto!@0{_PWhyS+CwcE`~MOSk_YFO!{-uc52W$vnGg za5%123=!DmHy3}BXI?XawcE4u%<|b`S;)jMTfq9f{CdR-UW)RK+1Qe9aX*CE2)|AJ zYq+$4wMp6_@8Wun?@aKFBUj4rtf5^;wyb;)_bLyPhs>%ywWAm!hZ4`TP9Jn$=Tx`u zee(Nz`iIBY<;lIyC5|=B*aV{^&1Zz-*so%^T-8^88b9F}{^_$b40G!c@@{F$dC-S+ zn-}sm;DuyQym7?v>z=yvfl>dl*2+_jk-vdIq8Mtb z?GG!4nvVaX+?4hPPs4vv&SAaQZ;7o&6H_GGd8*kwBYr2+S3i12d7PoNm56N|d{dq| z%;z^pEXp&lpXoPm`9Yrfum8hu{(}6~b}#v;);#8iU0jaNbi4h0~ork61C6_L8g7s|j|_#P0i! z5#heiBZkuaK1}YuHY>&yUJT|y<9WW+Ri?e0R?gLaPBD`1xBOcCQ2DhF$9vBc-{x7k zxa)yw;6m@%Dt~#ZoKq4xr^V!)mN5TGv^$@6m3z{6$5Uwci{zY&qjFB-bBQaZWsDUw zB~}KZXU6)mgQXY0M4s(1^P>7oiIvpn66SgXav+^1UdPaGkl#8^pf4}lIi@D|gn(QoNT#j!7|83-v7mpbxl-}S0g z9Hv-JG0`Znpxn>pql~BDgzt^mktX#|vC~<0%r#oME`E=+W3J(Ltd*%;7w55^Rt(k` z|4+a1krbn;{?YiC2d1O@8{*48NDb$i-hoe6>~#bFRXqG4W+;+h^(2&gFg$kx|7+7-A!} z8zKk@4jduM2_PInU@Ha_P4I?BG@=_+0*TssLo*50P|OGpCw9{)^UyLnL>eK|6M_+i zt@P!7zO}1h(>7odbD#74et*=nYS*seoz`04wchov7u)F-e;~d%Cy>a0bK z_OL#G(FpA*Wq$S{@Sl(KLeB3EpMwWWMssabZhT1t=vVm0HVum}iPtqVzN9|%u)*Nb2%U6ULO`$lA?RfxRMOfthkaW zQ6sLTj%aCYN!4QJC3IfM`8@YLKUUwX-jvI==iO_=IG^aAo3SM^dL}Ej#3yRRmZXc8 z#*|d8^4`b!GVgscC9%%?V)WhWO~d$aw)Za1N4e)&oR0{f%l@po3wzJVI9>G)G#9#7 zzw4Z?nj%{7bk!tXhmO_n`lqWl>NoT){cdo&YNmcSJYDs)erKMp+N$5sV*PG(y6Q#I zCa2fu);aT@QTNRIxpmJJ=QcRA(YWHw?%XTR>@^ym*^t}l%r2wxnLW9U&+Id!>8pd4A*j72#=w>|}+r%F$Z70*uh3g_BDo|;+_qq@g>hA6L%KImmG>U>_dsU-Oe`RcWGS7 z7xe4S!nl${qWMtw|DC0=C8f@Pu_d26|2e;_cjm@A_nfVx=VMAf=f53=F(qL3(2o3= zl6UF<9l0?jC8EP(N{V>j*1`_f=G7goa&i@@j}CU|VmW;LHd9sGXSjs0a$t};SZtpAzX z&a|cv@LR?BT45ySucNHZHEqaOW32UsvDV%tan_b>WDafP*oLNozsn5Ul8p0Bxrx>% z>yjM3wOw}Pey+)mgcrVJKlTgTkKTNlY{$ah%pGG#%9fPv_$)T0?8}mIVLS3j4>Oc6 zTe4NrB9*<*_1Kd=iWa3|Q`gLo{`_w`|Z7_ z71tFf=<%!UsKb0y{OZL4YL4nf_*QdQdS36+Je8i6eJM<>>yg(w^ZPq^=QRC-y;<~R zlwE|cC!V$IJF*ijjfKtFn?=~0Me_L!L6>9mmw`bKM_{A9*q*6=!-{I;3)HP+RLibx z_P?U7W(`;?$NfiHnUhVl z?TpR3)wN4^GneVvwcb1LUKQU-_Spv07uYj6DzKk=0({c%G?+@Z0}=@0UI>#=C^)MoMSJ1 ztT!_LvzRBkA=M21>gT5QXuxlMJd#*uVjR~K$VD?eFIO@yJTIsD zIL*sxPEK=lHRt8FV{09!PP2XKj{T^)G}(_0C`WzDao7Jgt=svwSB8n4?{)2}_5H&3 z*dF)$2l;*}GEsAIY1oii*vrS9cc)fPcWua&h`7f4E;0`{jCnZOGJQB?z-ZnI45Aa)~|Vxa2M#vRjJBW%P}o=fDqcTF+I z8YB28I&NwmZe!I^Hl}hOtt?f0vqv3xdUK z&cn&3Ovh)~p{r@_WFAiP?>5S4Ut$g}KpoZYLw%`^O?eE=FPl>DkzKiyI(zePS=f}7 z&b`T%{g4Ts9qIVX+&MVUCTPQWE~lPz+_|{1l(zzZQ3AH4-f=lQQuoMrRogrq_>y-; z7sz&0{*om6Pj+Kl>gvTTo-K~Mr0saNM{LRQo00>^ZcJf*E|u8DG-4Oiu`e^QGwWb) z*2V55AH~G_#Ne{F4EyBM&PM1^I`L**u!*rFRfqB$!QkY-@OZiWdL!iVa(O$>-8uL2 zR-DuK<9$`lj8KZ=7H^EV#xXBH9{+*Ob@mksZAV@- zz-G_HrkqBfRuvnT^PbL8RxE8*dQ&(qF#*5GwKpm*ksM;!h*hm_A%8?{uq$h4tF9;S zyl6W!lr7pyb|bWf8M;{%9M($oN;A}1v>to~m3>CM?|Ywd1^mY+ErUcZrP;FH7m z%C{9ix!HIouf4lAt&MLQ=i#+!*Kqy;{BGeh_&GApQVc5fx<}{C+3I|V&au69ey`54 zy>G|IWlB+Y*D33> z+tXIJp~Fq$bV~0c*5{xkRHa_@88JDlskNVCz^m2!5?Msn}M^h`2!WiksM7c z?AD>1X}4_PxA)^eIDr4)1N;XcRz=w#pNzIYL2rIDu}?tbBs9RR*1Snkhum)KbEjt) z=RTLc)y(a-tz)9S+0kFe%;w<;#r#?X44+t|9CR;_|Pt|4bIIa(iw@D;!d3Dk}(O%|>4)m;VMb>Yx z=7|n`w=Swj3NhnP;=|jJn`#$LO|wVU|JuJGOd#t%>NL}rT?2_dkN18OR@I*&MP?{;rW{xcUg?N z=8T!E88dAdGiu9w7;`;eH0?o*w<(OFmub_dx#tD$na({Exu$l0oNND!YY%cweXY4R z`J2|$hMQ?a_4|fdQTG06(YE@3Bki|~_S?=l_=qv1_WPJ|^H;`AdAu1?p51#mb}&9P zzf*WRz0>z8*E;XeCb=39<}PxfRum@u%< zop0Ie&bO5IAO@cHIz(;@&9iLUWdsh+_XW1pH>wjE`&sgvyYnpfW51kCH$$_SKM}@W zot)5l8FiMghCmptpvmnas^MpsJLO(&0o z@UGVQJkK+lI}z_q;rr}Iol(TBt4@UZG}cMXn>u5x0?kl!W9|5WftQ*u)o zlMAzBQo&tdhMy8+)jYPaTMp|$eh$vd;rjdrrk%hZ4sAI4aM&E)xZCg~aS60V(Nv%P zL2KsX!FL<#N85+sf5pW4!NK{wYkuPSIh2j=97+-KaK+4N*L?Q|=2A*{UiC^94rc5* zv1s7oi_Lu~hHF+*0_&;5ZT$dkJ%)B}P22URo!hwW?C3;dwvxdB!ioWR{fqEm#bD!v z=AzPQTgT5!-zE5IQx4(gKJ?Etcdg5{;Bm!i32Pl=jQgnL)f`@2S5bmt%V)1RHqArq z1b2G>9pk#*U&?%f-VvlMsT>B+7sgD^)_mx6>Rf6hbTTCGQ%$=t#<16?`0Ru_(dcF0 zjNH@l&VSs$q=@&u1HLJzeY76w*5;A>G z8h~rBu>Lm6og2By2yN{e#07!ea6LUSmH&gFaxKFOzvhklO#pXL9ZT-(*lA9#O2PGCKFML778LH@w6hvo!^ z@&BUhOlwIezqRz{9BX-84=cd@im=`X^z(=G^GD;O?7u!1Z6D0a?Y0SgvKf4`1#Ggl zL%fZjzFOm^5KJqbwGZ9Dtv7K|=*$hH!LRrai>Iun`y3TQ#^DK?{@1g-(uMa{8+j^)ChU?S6yq?U-h_PoG*&DO2E2X7Dd@x zz(|FI6D-wn3vIDQSa({owH~ayAu~0wr8PLWeuhq}UV)P(h$SK;eyieEgw zU{_2tOZ%^BZlL#(v#Z5Jv&jp5&i^^(^8U}mJ(xS=9X5U~@oj!|MyEqbrhWQ;zuovD z({A#p-~I(-L~T5jan_r0q`9~@TvK~zUGKN0a}JF3*&mL>=6M90hq0kHIY^sqqD?l@ z|C<>jYLhbXrNlr)a@wt0`T*c!w=1Av|FDZ@Vdd8UQ-I8Mp z$1aXXIOM+-R@It?r#MPDe#PPWb-j1#9rE9Lzt6u{aY#me@#Xs*zvl4PdGBVO_oE!1 zf43{|(72R;SMSj~f68$^M;i|LcXd7T+I9T?j(opr3Cp((c$uys!b;+8=ZekdqlSPGqiUF|6R&g>fzsU zV2!@CtNcM)PoVK#6Z3-ON+y9B)#% z{!*>~W)D5buT;TxXYB%h>PqK*$qojV&r&!zf%j=W$Yn9`sq+~44c!04*I_!&{Egv@b2m_aa_aoz;7V!?@;4NPK4{hkQIpb4p9vT0ma<^tKqUJgZ z;W({3pA-@Q6N!l<*O{w+6}(5Wv!2g(I@nk^Sa?`|TMrk1!Fd=L-wjs%4>0RMu&ti@;NmxDodx)< zC%8>9f|X#|6U12cPf3Eud(0dT9zMqPI$+|5;CGG98~J@7@0XAF3hdo-#%cK~!+Hr! zp2`@JpVjlRwmauz4d%5w=VJ}#X-eUC7Id4uZ{-bQZhF%9vVUYXI^$Vzb5 zsMleQdLnjJWeZ_y@`$0&e=2&Vur(Acdy^=b`etY=d;pzO>#=5yJmtc>;M0?|pS~l^ zn-1o!qv5`0bYkg^&}PtfMs<*9o(Eg}X^KBk+S#bi)qU4P z$My)6wlb>6bvEt))hj3P$cP?+9Su!;D|P#r_@K>tKE|{^O(^VE)z}P$#`OsN{ch7f zl2q93cvIq4k=It% zy%S&4-hTLfZwDg}WDUV!)_@OT-S&9&;v-<;$5_YxIQO#t;p16g>6eV^&9CAsUdKA` zw|O7y8jiDX-_DQ8Aq1aA`YPMe$9E6)2l{o+3G}(XN8pcd`2$Z;|2*pNwadrPhucGK zpt{RP7U`qh+JxUx_zGx)i`7AEuH`eUtsc&r>w&3O&H5O!8H@ybZ;g5t8mzeJ6{0aK^Aok8C>>bwdIqv}b3_Mr&A+Z7aKd_~JIM>vR3C2%ug%7gg z@tM@~+qW>|h4Qgv`0V#C(XJaAbDJ;HuJ8AYvm@KIJpR0I zSmQ+)+bm7*bg}YX?)&AGT|VKh*?L#h3=^NQ_b%_*YpB2E`zd#g)cOYwz0XE_Pa}B%DV2orkM%@4Z3^t5x`|kLf6_4SKd;ON5*&7cU2a3n= zetTm>{^Q3OV;-M$XI-)4F~l$3@zwfyM;q=D?+DN6dA+L@ho0@qq4^12kG$5A-}iA{ zI_XpFnP9oQ!e7iT?}6EV&H{mYxCnuzdeUu)7oG8 za;v~1Q@~1!d5OfgW5KqOzFZIAPIrB|+mPAHUFrA)tr;W8K@Z=0_*n1KJ0h{Laun^$L~hcjUS1|Wt98U4Cilv$ z98K@jo~o1lxQdCFA6J<1Vi>m_?ffkNU&{As;x|=&Rgb3J9}S*Q0nfjKjj3^Bb$A!hXXvcmG@;S@KY2)G|**MMcIWGniDE?hOXT`tE=PX}yCEp&#w<~F-H{LgylFk%#(dui8aX%j>B#Lukxap7I3OQ%_N8uZz#^)%7vG*t9mHo_Y1f42Vc%~b)!p^|s=tR)*7lUO zg0j~1{TlR3^~5{Dns=gNQQ@EbW z|7l>?bg*j%&(-1iy1b_z@2d}XZD4)nVOQ|xr}%Y)*c{tX= zv@VWyu&#?^9enHJSO?R(IM%_sE{@%;F$>1504pCMzjJwa#)*q%mw*?aZEOtubf{5% zeUmx;jizAQmdx*x)2lQN4439VH(wz>I&{^t{+|v-jvxyv7KY<4I%6YM;1BL1ijVoH zqV0r(p{;~}q2S`1MZpg@iGqi3gsNO)71IDtR@pp%ZyVb6R8%7CIK%U?nwNc*;}edF z925AjCicCJeI7jQTi(>Pj^lH!x-Q46z~`##&oT!c6-|Ce#WN7=VBmKurQU|{@V!vs z;l5DeT=20WJPa;2glCC==qC(~UsrJtgU03r?j6-5a4pw5q06&R`2*c|=LEon)z5V| z?LW832|P2PN1zZ~TmmjGVa?VS{u`fe+9Q+k5hwQu{F?pBA8YEjw}C^pcVK->HrSV( zIJ;6(Y+>A%O;_~a2hM%H>6-rk)pS+=Tbi!yf1pXaeR^k~z*jwb2R?6hYhXsx)&2iY zf1JF_tVVZRJ3EpOvVD}bh1`?tvxo~|{`-Bf@owb$4zTequ<_n5;NEQf#l7+S^h5rG zjXwk%e>7OR0J%0AEHEyNkq?5AOCAFsKTa;B7l}1-d zH~a1Gt`4i&4!S;^xrrELrE;JM&u9&0Lym17!CKiqTRf`!MqtBTuHM3rk0csDxf8kV z@oXaFyykbOHJRVPC*w7;AGSAnt^YM#YwL|+w@;*xHAlJmGHt$rvHRpzOZs1|-QQze zzjl?~e@iW6N*LJVag8qrQ-7R(PVUrJhwoD^&!4R^uC^)sAinea;8n@5jxJu_h~IM` zx_^HLW4}IQKa;WFME4U*0B&|LHTd9T?)fYC9ORx7?!jJj?wJX`epL5?o5SPBus3ke zd)%|dz2^wNTVa6R_+q_#KH*)PxNkG}z3bk0s`~6tp4@pM)-G~$NB){}NA_2D{Nz2* z!IAuqQa zBGPAUgHtqrehhsooFq)`V;yP+@w@she0?=Ismo?3zfZLZe(Lpee_%5HW97kAKFmHG z+NZKe{_|{Ni|OM7o1y}{=`Y#o8wbY*=pXwq_4VGPcRT@Z^`6nVI6yzYPe1E9;p>ga z;eG8d#MfT=4<^K0J9(e#CqHtbTmDPo>mSiSm&4b&;6KB~*P0)R#fF%TT#079cx9I# zRrp$Uh{V_LW*W}Dvh|(%xNQ)Lr>DAjnsvMZ;ac^-ur&M9cXsBW`JG4%uw;`rCM?Fp z<$T&f@U>z(F7#D9Rq`OIE#vt;?y~GZFYY9RJ(k9LBztnA8GrX*hRjgL4>L ze6=M4Uu)moNPPWHR2W~Y3|lC(=R;O(g#n*cPy7$?@IY|!UEt$8!O3@kuWtum4*);+ z2S?uqzV65Vo?*Z7*(p)%3lYP8vD_cWbMZW%0G>{?x?|HGiYHbbY+FhGyVlIlI586V ze2J}{;oxC#?Q_(Zg&;Fh&*aQhTtmYc^ADCVa5D3d!rS2S*z<9n91#zuk0jQqOW(Ti2i{;g33y|7Wr!G&M4 zuXs6l@uVni{N|lDv&zJ=wV4cO_$Xy5E%zukRkr4(C=@y-u-aXvxX5tM&c@zN7x=$K0^h zwrJ7xpB628IId{Xr-O?YZH6zDYitYo_KR3kA%3s=dweI0GCa!>;7}WEX8uqy_~QHL z8Jkrn&JLW#f4}1Rn*Yz@6O^1$yL}p?_`|TAIZS+FE6OImiifKIciv8(wZT41&q&@l z^>TG!tK(-oMsu&ev+?tA4szw-Dn>W!+QWRi4FB9zWRCpqQ;|1=x!=qCr5wt=vE?Q6nR*Qyy;VYVGdixs!O}-U5(i?fw3%PO&^5tj9p`I4<#?6~7xf8|pX#S7kzF6*$L+-?fSJQqcQ|kv`N1p8C-Cw6(5nM|-BwJ3U zB3merWXoyhZ8W}4E}xY*VdbCl79vw_K&CW9o^C~!d`;iTS8+9Ed+|QgdU=fBnh0ME zgBKqEZCHjJnra3PAw$}5y~Q8M_m^gd_LFO5M@ye|0Q^u0eh_aLBU3&Y@5q$u4ak%a zAB(eIgl~$#4CU<08!1n2aO~o!8EcR!>^)p^5SbESY)GdlU;C*Dnew1pozjF(SWGDhV%I4qoRQ1zXTe9^ZQgO-nMc?P*MoVANz@eSsL zsyqDx`4SW6^&RBHQC|3?rZEE_q!ZJei=2_XQ`uCm8=y{EdWPl9GS7u%PrKwwI^*MiLT*9f z8P#XBi)Y$>4|$RZk0eq4wzOw5^2zZTB2(YXx-bX(cG_0{m#xUb@8%Wkg-o~w+3+)D zL{DTz4`fIV`2t-T@;$Lj@)Z}yF68=hs*Gye#NVXp)S!z@IWv`UK)uA-eL@U9!v>IGL*mRX#4N~<4SjV#f5XUoFw6mb9}Hu=7$yY_Glsqwj!`VIRbW~+vgNrP zzcn1HwRg(HvzdJy6qmGxZz^xicGhhQyA*+4l!wQ|E>$je(fey+m+=wUMSDX;Vwd(` zUK+d1;2Z6}s2#hgy$(lUmm@W>OC)ZQ3@HMSk$*WL`5|003hW{sE}Wv@M#4|;0`?TS9Db2(iNr5p7bh2!^6~t$_~kYizw~qQi(;>|X0njE zK;`05Opx9kiCw1C29ek$$nT3`mqjji8SBWJnDcfMeBkBYQ5$G(;vc~-$M~k?jckAA z-BEng79$*k6SklJb==Y}Qr`TVaf|l9UV$w6BDGF%A+qEs=SNeKE65Vbm9K~+S%oao z`PZBa4DSArZ7ZV7^0-YfOZ@UTlu z#-Qw<=Y9ddqGN^QAG@C%F4E=LKi?obgc+{>lNq=Pe*XB$F4pF$U9C@Y8=o#Bheshf zJk}dc&dhJJp#P5CCZ`V=SCLcVsx$8zP0#EJi!f0nb{TdV>=NWV!AoEl)j=5PYN&@@RG*sIB@(yDz7lRJ z%8d%h_6*A%VwI3H=jBdP!3r0@6ubE40m>qM-`d45k~0=^=2G}YeELJI$&QpOT8}Nc z61g5*Ild%U4A&m&M}K>AByugbhgo{Nm?imp$Pr-{Cx)*UoRUgghSyz(*8(~I7vzTU zjkoT)dg7(=$~U9l2FLioGg06gVtL`KO7<1@{3^00wohH8uzP~?c zPI(9Su-4028+HSH-3tsNthL*DxmK6MTT3YbY zv^v36$c!&j>IK*FeaC)D!8d~ZkZ&ZE(kS>Q^5Y)Pze#EA$cLFLXXdR&Hta<FVikv@o7wmYJ^Xt5o<6S5tr8JoKGW@eQ}$yr>o*X<#Pzq&)aF^ z5p{{vW$tTEk{MVSSJ>^uEhaI!)?ncQC#VL!-EaVz#iZ~DHq*4+?i(2lnA{1x&eUci5h8?ABkbiBq#f4(n2x#!DP zxgvcMYJd4cx6&6j_3~m3idb(Td2t;$-qY{$PlVUE#W-;XdQP(UJu}Qh0nQC@v}`2e zZ(nfYZ;3?^UlzGM8FYB^(tZiYjsT~~FWQds*S3$-ZIoL&U9q*o7|E`^5ydlG;8E#$ z=>zHalLLCde>w1>AA4ehlS}Vxu@7H_kKa*TkLLdv_%jw?M4ZE;i_x()c~o|Te4gS_ zk2ke`^FjFYbNmnDPx(2;pKALZE^qEIp38gB^+CMn`~7*|BtA|)P5CoV5{vOwN^0=y z6m&Yg8IPUteSMgtRz96K6MuvFbDyib7lZw!yHCNN(re;RVdymY^Qe)AzcCFSO$+Yh zkZzT~QTkOr$7|th=~nUQ8Fc$?=)c3ch1vfHJv#CV)6R`E15aQ-KZi~|fM4`O{GuO| z->cYoIr|f%@#zh@dD-vgzLH(|WOiUZ`hADd^mHoUe-P~8J$vpsPbZ)LDZ2S4%X{V^ z&m3?uf$W9%=}XmP8v1!T=hDyHz6$#mww`3{G9C>43x_jI`@fJS9v^=Wjy{^gerEKA zc{1MlU%V{dJWAcXxsPAtD4yQv+9l#0*#^q><6Yb9t_k=Wo-Rb@ z6d|kR5AoJtKBcnq?J3aVm*MS7c;lkHP2Dx7ZsXgR%5QQkf}a(`FP`>d_~mC5Z)(oN zi`$pJt)gtwuikt|CC?=CtoTav9r7`XuQt;T;&t(}{{kKRJ@}dDJ^d-2T3enIl~p{g zIS%RAQ& z_Vnp$^r>w4lPUFsYiOr6oPR|e(d)=k*RftJuK}#M*t3f}bqnc`tTQ&&ww$T;RnE zO8@rzI;?;DeTDyr@v1!?BgYu(Mo_}9_N zvcVa1;x)-b@ulbcznq?yzLUO{4Xb&}NIfmR9Gza-R@xQ^ZWixQ=GWFw(eTmWwy(^>EtTY zIXpB_`v>hrCri$J&b*XjcCUp;bxrR)$aU>0R8BeJ-%irWo8i5he0-rUzO>)W^U-^{ zc+BeAd6&|~>_vWoF8*$A>USMHebJoMJ>pA_>^7Wzehx*)T8CSK&3ll`f_!45!OdkY zsE;o?_yD$Ud5dVP4d)3VuD6P^MiXyahA)l(x0d09*IuV(@kU58XaW6Oh7UXk`$+jY z6`QR6B*@|M#W53qaB`eABqiF(WguT^*-;~;dVa|yoaLx~Mj&I#L|A=$^>-}^; zKC;13#iKSda^D}D#k|peBlHb;5>QfK`92ef-!0?XtI^P-?r(kPuzRlW9CXk1osZme zzB7$yGkI;0cHtv;#z-bsE3-s|Vu9g|J#7kmBI6P!<3V_L5i`K>1#n4xHLD%F35y>g-(6*QXd zx9WYAV=bC*Sc@53%Z9{SOMAvy^KOr~=02Hd<*!b%UK>JLcao>zu{7(gk?B^5vAL$O zuC?%0BQ(EXLuc>sMU2BmtaV=0n`0cu3mmIC2489A6Vo<_xyrft!{&h}7M{feg2r^@>}m3~$(*s1h=*+wIzesaD&ig(8H?uVfI_C%<@9fa!JFGBV0 zc~E_O8PC4O*qwK;w3N~RL|cFeHmZqqef^L?SG6unFCc{sQsOHb$8!2f_F_K zH^Kt;(wOeI9(g0j`g{=iL9Q}Gl@pB6nrBRF`MZ8=l}&EzUPfrKu8%cBf2Pm>^oHMh zTGwwP)?L?!GB~Hod;b6?GD}5z*_a{6_#@E=I1wb_A^?vf&Z5A-zMk?=r-se-3#pn-2=S|nsKGG z|I4CMesARW5$IdcGUy~|6?6jh1avI)By=eBH1sYgnuPxrK{Ze%S#F`C|ZpN#%E$H#5Ld~7pb4)wU#GNR1tw7(FGn8tc_gZbz^ zrqvu9TVv*l>83S{@u;yAH=F$`(#a2`#bov&O;n!bokl2^91g#C_u~<- zXpe9o|CP7Mw8{_TA5JAt=8yROj^XS-mPJ`xZ8xos;+>D2JX6&_`G8n^WKCDDsV;fP zOzTCrE=?D*cAL5^HVkWdmd~0`Y}AUBSZm?HIP1;F*e`%~b;d(xf;Fd0qP1#plJ!ch9nH{N5l{*h@tdp&L3 zd5HBI7~*sE!wJf+@strqA3$$_>YG#CZ~FM=BI2`NBR;EU`&i_Pw#gN}TzVh|erHT5 zj?!tb9&UT-_ffaKbndm6&b{{1x!Mbw$#Vr1GdgZ*>;1A)^W$cIoT$F#0tUz&~mOzXV__8-nR ztQAds*3!Z#YgH2?v^0}GrhoNpGjdt9WzF~Xe$%>vJjTu0Kk}z3rqy-4pLe7<@7R;+ zw|>I=UjKz@t-ITAEd%o(ivF3kkT|*JDZFzCd*-w6NzRAZtgCg8b-&PRE>brB=!(7N9-* zGtOQ>9>mceuMrct)bv@ey&XlJjF5M4TkgFUjQbGvxTBG2J$)N<{9e8Bf4@doL)kC zmY_?$^2jb%-LI#QPS94i87{BWn?7Q+7b3j4zg=dgc^2<+dC zu`G;#r5XAU(S~N|Zc+5oAQ$sDp}&sMSCv<@_E_?oc6^d?I`I-%e+QU;O_XoOK8{ly z!uGGWU``EeZ)c%vDeG*o{UO#gJcYg=jl8;_wPzJa&Csn^k<*-%{JWM^;{8Ru^+^cZF$vhFx0p2fwxb z3halS?5EzC{JQ*i`aSf-bKL)z9P4CLv)Z!(7Tz9f6&P{WD&qDurWQ?2w3d`5Su2Kw z^`Pv4Ioc-@y|Czoy4I2CxPp1F@}G2a$A;FjkBv}l=6^Mf%e=p7eCDX83k;)rPC`^* zUPg4_HT>ZVdmGiUnd6%-7$^F0(*-Yx{<7(U)uI!cE;u0iNYe#pjB2lq3h*aP#Xs-t zB^m>L1q_c2b+A1c$-(v+P;ki9`p~V=Oek$Ol^nv-U25O5r=j$p_Q4sQ44nz3pUdV! z8AI9+XEgmUY(JXuARA#c-oE&dVVMr+%KC$eXG7y->HwB_w?d_y=N5fnS#GAL@q7tfiv{| zCcabF9oh(bFZ3$tqef^UIP&eK*bH;opJad$TGW*558*$U%bq+1eyeaL`<3<}R~*-? zMiBe*scBXH-fx|#V2`Pruxq*g%^>KDX6Q|L|C^_wF;H;vn+edqP-NXVuRyb)L1;_p z1ZZpMUC@`najWC3V++ZTx;oxEHk0{>p2){Z(B9CY(9_`3eC&o*W@w%4hIx#kk%qJ1 zse|Vx@_TBk_4a)1hIh=+64?#Qxb`IX%!a1au~xLNYZb&`H*Dp1KG)xZ&WA3BE`s)g zE`fH3_PfH$H?bRbb6&``jnECywa`t_QP6GBd!e!)20&#$^nj8B!?7b6-(_FkV>xz& z#{3a}7emXSQ=nDQN1?JK9)ik_7y*?XF$j8wyztn>bFqo%VH3~CCO(FZAgpWnTpJ=I zp>q~C#Gz#3@Y>NwH=3c1vLgZBoNMmgMbL?-+xyIUOew=Z9ll;3lV+`EpA)aVvM<^q=ceCoS`Odt zgfGRnjNx(7rseSBMtCiqYx`-ZqhVV^c8F{X@v*}{*c-Ax#NXnTjo2ESsGIhyQvKF_ zCVAJS;7zxFAAf^S&8=TqfoWB8Zg;@eXz8<78?LP}EY5m6E4(-8B9rfEO!R~@-WJ7R zbBttP7y9Netr?#macvm??T3zpY8;J*YV3`JY8;GCv(6wBm$gr~YWDM!;0Wy;bdS-3vxG#;dY1j$>nZeGl%B)E8Cq3DNgK`dw|Z9ogdabzHcQcS6;_C!kJ0 z*4n@HrQ5$Y{kx3#2=9AIvT0i4n{aK6CD<6BfoJrsT{XT{?0)MIRQAP^Oy76fpr(EC zB=}|GCG3hF;IB0~v;ium!}Hf^ze_N~Zuou!eDBK~-gFNAysQu#k9ztt?`tX_ z+=?dAmQVc8ob;;px<0b$oNS|7-)Y9aid%@E)4q%ye&)Bb_|}ypO{>Xhztxd%{k|#t z3*D#lq~IU#!G^z+oTaQscx@ni`(bx18ySV|5p6AJEy61Nh$}|NS?lm4ZoZ7Y(W(}E z!;Hb+aLSZgSp+_nUqSePIQ91IjwssC%pBGUp9^u`NJ#x?pHUB(pkBr4fRMQ^0V64<)j|3PW zRlIAqFg)edSZPl`O{a`c&}QCvuw&e@z?_HfZ{|y>d=@*iJv^;3v=e(}3HF(dUqJZ} zYuYLG=quSN>_OZ~c6u{ka^)dx2KA3@mB{v3%(uLIE^4!={<2wAe|P?|GQwt2y=7#qlK}Yyi)GsdRIT zJixp~c`JkcccO#J|533p8lOXSunqGT<%^@N2e2v2>pA~#J=DtQ?2}m@PrhgB<;+`* zy2bM3j^g>1_vy)SUBB2)Ug~Sou@hUd9*OJaEm)5v+Kl~bMXw|WvuHyz)JYWmc{Nn? z6h}^>k4D8=S7N&)w=@Q7u0r$7U9nl5`7rF43D_mGI6ONghikG^CSs>(4&pZKok`I# zm2;3~t?|{BeM!9I(|)Tp-&3Ep=KJykwWd5}$I1U>`mGWp$J!+9jc(s0>|IG7>leVQ z(N`9Hn-3mE_m}DVVf6idIo5;M^{^_@`O`niv4Y^vN_72SxF&3BWhbyKAjO)8j-Asp z!GLZ>h1onQtB(@95f;P*~wmqymw#~NE@$2l+MS|#)d z^e|L-O?tcvDm~u5DfNS9VQXS5Uvy3b9JnN_nWc8IiCJikekhwn8_>S8OIm~3WRJ9_ zFJwQorcZ@woHnD*PMgstUYl)_T{D!nl3gefa>{sp?dyNsGgq*)$@y?dj1`#`o`-6`smv)nWK0R43@<8LG(!)bON+4^m!nTNd|ep2y<8 z^083vD}NfweVXqny2iA&-{QBna9>4taz1lkMPDfQRg8jiU&Rz7)VBlkU5vFs89COG zn~YFT>U5+RlyP-rgb_M2$qdbh=Z_49B8Rkp-h2aJTUL}cr#<_`zY^|eXAYyK5yt!D zVy$oM{lc1;9cNh`z(1?F$K<~kp*^9GLVH68K~I2rnHv*UERfx@jB6wLZzhznS~dYX z4tf_9dwGRtFXwWtm}^f$7el8)pNCF^J_LOMIsiHg+8sIsyGCO*pWnNo^P%rR7eN<7 zmq3Hi<lY3K;(8QR{pZF2Fo4ogYsoO>X?(^JSH@y}#r zxa3OPbk>J;z!$-om49`qVZggF%xxrCOM4rkf(|L@q*RCJjvM~87;*Y+D>zNj=p#Q28C{8q+v8vh+OLXuH=@WP)puHlV> zPNp?S=R1wi<{x8A=)Bkn^={8zl0*Dfeox^JpYci1cjfHVg^}%@P zPUs`hV(5@`t1Gs}d%3Iu;h2C8F^j{qCvvzZdtxH?MCq65ogBF?Ut+xJtR1%0<}G8K zHdc&h>sF?Ro&;L|&@Za(2Ctu*xkM!rumoKa>91`i%SB`1btsPjQZ9Uhg zKTlj?8SWWYHO9*&-;W`owlN^?IwnO@80^7v&H|U@7=+!hJ#(bxSPc$eW88)gkM`k z;MZ0W_?3O#9sHUe#;@hc(6c>e3cp_9e^0)}(j1)dXAbe}9)6wP%@}xx<~drJp<9%f zqnR1%C3>Y9x=Xa785$^BPri33^1KJMdkYu8;=31qJuTn+kHWrp^@(C-ZUDE|hGR!W zU|9LuuLb)$x`td?KIRb<&^O78{Z<9KAc^l+Aor6fPX#(5iSkvTN3_Q3`W$TFU*=eC zg+0(EZJR>TCB>|{+EB*x=!%b)<3~jgZ3A~y9Yxo%kJhO!Io6>LJuLa=|En%`>$5r5 zIG(LShy4Buv-;2X^spASF8om~c%zL2h#z8KupT7rI-trUe{AX@$a zZ0Nc8f9B!Ob9@5$@O=3E=HQ$6;pdyPT69#?Ir!#%`0nT6n^&7||C0PO(I&bc%#0q9 ze>y=r87!B;SP=G0pl^j26KHGc*aX^IHbDYyE#2HY3OW=DFFH6n3Hl1ZrIS}d>q61# zt*?O6XRYZE$M&H8WP3RHxd+t6zmD#o=<5C#p~An*pu)e!P;ok!8s~8LAzKW-f(Bm=q=z7-agGpXaP3Zi_zW7dmK!!Q^3*F$!q@;9gcjQjHAG0Sp zGG;s2Rr191X{f&(KOTH4T$W{W}=>M2w~OdELRu>E8<{&&_50eF@%@?RA`X9!dGs z&$PqkFtTEIZU7h8hLcA`VB`8rX3kHDvtAz=@3ezrWo`f`*M^%%L}2FMYzSj!;T)CE zE0@X=C=SczHc{jV10z>*jN^Pd$5M`mYQfEEE^cnl+>@`N(Rn*#w0k}FXmS16ZO)$e zV~-^krac&1@h}(oCYiqwrk7818n}73=HS8I!Zzo8*vvh9W<468cVQl%YFYsGa zH_vCSd;b;8-AH(occ9Ci??w7zb^RFEFNM7oA0zDDoVb|o$>Fu7!ozLOo{x*Ups#De z-q(WH9UM!Y9UnAxzLx%+8{vaqlHm3yK4_I|`$hcEWp0~$aWXH1$J1$N;qf?_Pr{%42)^Z1VqcQubMGAcc=9Xbj{J7F z4G-3d%f0in=!q+zi;50vZSSbEiMt0t$HSu!XV!V;Gh~|fAFPKRHj8{vP7DwjS-xK3 z^fro-XFompjFZWM+<-nm#oqQ`gSq867daSx?%N9LM+6V@*j^o#6e=CfVb;=OgwfdAP4> zPaxOv#4F9vD#Hk^>lb6?XXSQV)f-SfkL7lIt6wwL!!VA>Rb5C<>hbqDA?Jy?H` zXj7ti5_*oc;QcczfN*1pCc#iT2x%B-wv?JlS5MDOl^A;ZL79= zvqoFJMO*3qYZ=pC8ysP6+-KyE_Uhh|dUvAUm#fFVsQRqauOF1JXie1bRt`hH<$oUE zsXk@YH^c|0DU;V{&UYzi&F|Ly&OY`~kZxE*-u0}ic&C1T-lg~G9f8X8{pUSY*y6zbF$ znWlA*dZ$gB=$-em7p(WZ_V?0UQ5)hSwJulp4d*`3SLT$(eNN9Tfam4!b9^aZhkYr1 zYxq(;9WMWjHxI4n5}1!`L-{@Zy*)zyT}PZFXXlS6X3$n}>5UAZj3LlSX|^0zqsmlcj%%potP=aZAZ z^!z4@QPlUeMyVh8Mt&dd%WE*_S6d#!@)kXi!8yocKlxBMk1LEGy6+#6&B?6K@nrLa zi^%5w#~$DP5OP_2e~f2*-T}RzF)DdH;Ucp5HuCLD7FUw1gM1P1e~m2uiuMeT;e;8p zki~0|#a{lt>D)Kls9)I}oHZxPtX>-x9~6In3btB{j8jgGBS|{16Z{+Jp(Hc(8Te`~ zvgT-xX@7x_VlA@g@BK~to4Vvt#YeFgdHB^mrY*fFTzM*~u-j>T7Hg4*p)scYEx*5Q zY=+J>HLK^~J6MU3V6%3f$j+bep<*sE@jwbu-ev)2xbx8E3@V81m!(f;qplI(Y~l5MMXioLi) zs%>{kvzHm^_KS=;_1oR>xZ+3EcY|Fy|6_Q49c5ElU#IMau^F9IC&vE3eERMK`mKn5 z`;>k@1b-o2D=TN71}wW;-xK{3o1hjqfRiug{AsgJ%P zjH~kRcgr7%ao18-2jfy!r#!m1MtN#}e~qgvW=U5N=cDHsXU;bwajmYWes=cU`@HwG zqYtDvCbKX5oH|D5d9L12Y`^4h5;|inx?`-XHv;fve{_b%yX5jim(UqtE=Om`p7De0 zZ)cyKI@CLge!Cs{A5GuI(08{ZLt{CQgWuxdH_tv>0p|DOaps{@9A0S+uSl;r_6)p& zJtLVed&W=haeX72yf}J)EaP48j)!+^j#bSA{HyvU*VQj-Zi$jUjLzlA=`l64dpFA7k-=UBCM(88i z3fgCJEMqzad*N98<15E?OnD`M%^;l=7n^(MQEY~X&`Gz$0}nI)?{5T zh&{1CKqg+p9&)YNL#}Hx*$)}P+k5%3AJT(&a_t_jeT2-M5HH&yEjW?i!j1Xlo0!AC zadX)>Ze0^2w5p{Mn)gDKy@0$>vLjw&Kjek%hy2D6{u^h6miLTz?1^>xF*E8$BP(C! zIzGcyL%4t3mDZ{=VJup}UNMEpsPEbmbs}tuj0jsI9b3Z0mI&JwO&*kewnTH}Rb(H0*p%4f zKbZF4(vVTKeLU@~^`aeU_nK|{W|NDz?OQ|6x9vN~KliU z5s`J#c=GB}v+Om;g;Pd7@Aj`(Mo)KmeG+M}l%j+5oMcnvclYz%n%`SXTRGoAFL~ea z-re5SOFrTt{OF}WyLu^^xn)l;CA0RbD*Ex2V>@8u!=IjRl1|dPC+Vj!zkS!93Apx5 z5#==5EA0;aY`360ied`8$+me9{~Gb#4&SUufgkE)&!psbTh7`utu2&%?{m(kNk4DX zfEyf}=Fi_9yPwpea{%3rO_R?Yv)-w8Q+fJPp5d{DPEIn}5#rw^l*7)X9Jy1nbuV_p zCGDR=S3e!neY~?b@AT$m^}a>CPwyGyzDI2@eRIwpa`n^?Vh`cpvbzv3$v2zv&1bPa zE@lr6bM?_=%GH+VoH7$5t>;wMf2U2f<^PdQG`9J_$0lO)KgT8trhPw~=vwZRO;qIC zM03$S^Uyu>(LHP7*>%XpSk_IjmfFrh|Exo|)M3A`y2!zL$iYnH;2X%#j^OT}-0QdB zK%RDH|1ZxT>f-9DH_=Z&XFsrec*mQ_`g_?EOuFhV?1{(O&kO8PJ(n>#4;?ih9VOY| z%&UThUwbvmUXGq}>>;rGN_5pKbk({cx(AyG{Jv&zJoeI~j=i)Nopm}2d+JsF{~O0< zdK10n%be)gOK*Ox>*wvHKL5k*q(2}xE@mg)jQohSlg#+Aog{yw?2I||=Un=89{u@7 zQ)1r1-tXp__C|Es39$DU{P%bM(>f;B%2(GPWIMEUsB@Pd|COFVg-hJnY(Edag#>zfM_V_wI6|D>(FB`z*oLgNK+8^%JAD&eene4*M(z9l^TE&>iT(7L1dYk?njK1 z8`7j7lcg8W*=OQcttD{gSDAB~kA8G`78`9vdtEch+F(BW=;)(>Xu zp%1nGV10zGXUC|mS-0ZadRxFH7qj(*38Y7b31sU@zed`71-$E0diIC5`Gf=hS)1={ z5ABN*P5!eO@*BpI=P-_ZhwGsRLnD+-og?`h2yup{g<>aL3`{v z3;RzoTMuI2$saCXxHJDFJd~2`%=abW2M50lRE(tj>g<2<{!x5RpSd_w^W^f|-#}i_ zqcP0?u&(@=;{&PV_|12Z9)EfdIYj?>r)ljoQiA*MgW~g8*3Yok3}J3{SCl0TzHBh@ zCD~vzWWpHwayM%M_YU@3`;Cm?>HAIV8}f92#Csl}Xj%dCITrD*HpED_jbR<@FvEIV zYxbC@{Yy%;wPrLqDq6={uMdf{-XdOQKk`K7Ry>&#SH^sk;>=VB#iS|5tPo$j{8f8+ z??%4E#C5Px_>}Va;D-mIh^=AXesY>=Eo(`xh#1Pgn%Gv#u6rsNTZPOC49BPQDRc_d zlbf2WSA45{^fyFJTA4+jU_DdBGtPUl|EKeQ=e_vOo%aztrMT8HTt5b#0kyz`c2A%6 z67~6lc!Lu1kts((Fv53VK^?uE6qn0W;Q3U9IU{ovh!-?Q72jDiKf&jGTRxOZeTOk3 zUw%GwuUhl2Yi-#R*yA_VPdP*T;-@*6rvQJo^M3x*eDj5Ku#7o;{WDx&e_z+fl6Z;y z1d%@e0wc`F72+>9FM$^uSv|2ge9{Ykxdp!Y8T`|eyr4ZS`00D^H*^@jX~+H?=lBZV zY0KQJw--o1cuMxzX7Lq#C_E^;tTvuH1_l*R^#y}w(4L>spI^85| z^BTUPx(!o&1Z9~6?=`2s+BZOT-9c>I2xN)pFSS_PH!q@Ib8e5e^qlyyxyy^i@Z(6% zJ%6a;XO6;?;>Qg5(Il^+o{`@3zO{qz4Tt9 z`FrYmBH9d{h!68--w1za1@-rMGt%Fw948Bqqnca8Cs#5_bC2-!1+fz7zv=MH6PM9} zwXN0jVkIi|{Rp1boUN{jXSHTee5p2&j;wO|Hd0S6Mo(6<58UPW_k_#8izE2g^N-fH zR_{{&QOTj2F%*}}qyK$%<>VY5p|yJOyLh?)ey)JmufsPgJ=uY4 z=Xp9D7b87+?Z2WYccJg_zwATr?1$G5po22$8_h4TVt#q`kQhtz%i^hZnqOudYJT~9 z>B-in)vl(VZ0+jF_CK7SoD9!xCeBu6wkWg8DEYjZ?~f%8@DMWUProo(3u0Jv2K%f6 z)?`QydHPX2sWl)q^`xKo>YYxV$@Sss#Y%WjdQ*CFF?#b=S1&&7>czzodhw8S6SBCS zeBSh-H>X`2@7au7@t*3VxGc>Te|L?*@Qbby(6{=e_|E0;NrlharuzP&bpkmVz8^#v zk~`wxrVE4a-V_eT{1*%e{=n>74p5l5J^hykNz9-8U zM$ECT8hk!xkXRjStux1VUKeVP?SG0c^z>7kxUeqt>SIOJNBT)T*XF128~At+Jojty z9I=e6;kl*c{u0l5I?&_2nz~S9zd3r>`-a|o+utv)_q0Ea^xpU%TJOPs|7wh2O}&>9 zq4)AHGY>eOcDNW${UP;SZLxxv({m>WyyMt>ip%r#nDm$Qm~h5e<_ENXZ5sGOv4C1T zc>x{;b0`K#x=S(Pj*SOiaquWO-NU0E7QF$#rH4nuu~8WgK5I4Rq0_={!=esm-x!q? zlnpci8*eFcb0O=+6dR@WZiOBqA9^6vnD~4f}b@xAKT>loD>_S za@|7QgvzDZDA^p>@O&w;Q5v5bvm4O~-gkA)=9<2e*bU_5_QreZsWA5QbDy4b z_y^ypp3(iU^Idg)B`1aXWVVf+o) zKXHtocRAmU{S)c8i{hw20k2$*j`HN{YZ04zb8+GL?hBUp#&;5Pu9B;j%)5?hpY)0+ zS2e~jB~z~@4q9V8nX&tQWNLVRHY1$VhB=H@V3!+_Z^Q9POTKz*h&7M75LZJ5s>ZI~>DYvBc{EonIa#v|x)1-$C1q`v zD{G5%AMd+{`g!kC9wNP?f}9zi>|OFh%U-a=_nnV5U0g7nvPQ@#ZQ;)HBe?&~1 z=1kPTwau9&zzM{Zs3)f9fmXol4e(J>t7Ec(AFlV#4}ad9yOZ`#jZWZ}`d?(db&VCL*1weZ z68p?(FUCyvl9%88H~KHlSdm%$a^B1M(6u*Xqtr&hO6EF#tN)UW6a%40=g-ntx?aWjC}ZzEjgcVjIF^0+g7`pFQxk)W7z;yFjTNJd zrsh3EJGSA!XSlwYu`x8!Ska~^m=~lCy=Rs(R)+e>=QroMyytk|wA8fVw4&*Gsoe7% z{@RB)I&jqC$YBf(Up+f-8onSsqw}#k=e@(JXB(aK?J=BdpOqcihP4ZyvgX*3=_QKo zdWU!PL&vM0vfUPeHCs|g)l0uy|6lgb1wN|k+W%)}k}!GBAP<9L9z2wYvC^t&tIZ^! zQXZMynpWE0e+R_Y2PU@H-qsh8AX;F+CZe}idjEih%0!~}w!JO)U;YJa^|pvD7i;hR zUq6?J2!VhS0U-$T|E@jfoSZo`lT1j$t7kr+{W)j$dF{RST5GSp_xkO{-gr{qjf_>K zzI*h#z940zOpB<`6}dU#Z0dWLUiSGb=0CfL>m}q%{je7sub{lSB>bLL`Da%B@UkD9KHY|Kra2%ug zmW)I1g(gytjq|jl295*2xM~fY#)*d~BJW>7?k6Dsx1a}ZW)0^iV!P>UI4SA}X?M{P zr%OK=I^llg>M3-E#C#K7VCVJz|aq(6GUJ95Ite&0z*HDP7rxY#V{lK}YA6~WS2hL6Xz`3a(I5+hJ=W+Uhyx!IihHel&Vd#Vf(9zHZBBS073!aUj z6T;BY&;^EmcmcV30{TfD-Y3upcKxts!=mK+VWmYsaBk`c>S^i+&U;Hg7u9@h$ zQIy@#6ZayAQg-i(MM^)+Or{?wr>P$}m$G$FKct15l>W&I@1-2!&h&%&)~V@-M6n;U znfHpd7b>=hJ#YnWTM8}mXh&rOAY)KN!e#V# zA8crNwgKMoBkTTLfB5^7PoL<2<@?WmpLa?e_I|`+|Apk!|L$Inoj{)Gh0l<;!I2YuCF$XfpjTVEmHH~NZ+ zJ?JYE#SKi>{;~Z*_u60D{65;N0X{I=+H8OFzfT>Fwl>=TdGeov2aJBeXn$#Ug9pTJ zOxpg)fZ6_>o9+LqrTsZSmG+mm7kW%O4f-H)`*-Mqi9OH-iQ508+ty(}xEL?EBPSju zz9V*0D`U4q9#>Nt=V8td5)Z*!G_7C*=Ywe5e9zm^LRF&d#;?7zVuV*mIE#=T$SSw2XlPk&76OmIFhx*g5B!!x5~Xc z#{Au}M=9?I9&gjjF2=pg|1_wh*8FBY`l-LxR!(`}=Q=i5YY}7YKe0aW^4<3xZMgeW zN8h9@g<4*#)an04bwXt-W!8?ymRZK#-6`|$EM<0skFI!UdhHC7l--@zQFfQ5?Ax&S zrR>k$ec$m4@3exscYo@*QGThT_(%;KiztH7s)sWk} zUE0+WYx4tqVYbzmHGd#`10YK);Ij|N$2MvWFz#{lyv+aSX}*mwJ>pyX1LX9ii@lKW zBZfEC{o;codO>{WL@!KdY$O==CQ>$e5+8KR12c~<<~hbyjf>C?g<7_fNhzD)35~MV z^Gz8K9STP0MUvH%zP)l1b-kBwEavzkb$*HS1AI%?(7Z;ysaG>~AHy7;f0$l>^wAeQ z+SM88pVu`ld`nKh@Xa|H;Q{EMB{}p{V3(J<`?bDu)vd=o)MXd+*v%TwL1=aR6LU;i z-m$2xS@s&0IP$N-_r=go?iJdJ9unGRQ2*y2$xw0ZhduO7CD%JN?Zj(hCuu(DkfGZA zFVN85hfDjn4vn_|3mipI9TZqD&Rkn;_4&YY;6WAmI~E+u+T8s}h5)cqNpgELY0%Q-#^>VD3Pjr+Zv z8~0~%Zrm^D#{Ha^8~5WMU2fc;$GLI8oE!IZuJ(h)KHRP4hQ&U71Klol{0?>eN9y=p z>R9aZjvWbos_{{Jn*4ty{g6Id=pr=Qz_lez&|Df7avQG8|?HBWX>o=|6vA$bkb+6CF&zH7tMdoGACktL#ot7TX zhF6rHfLC6GH(IEp_zlS#x}Sc#Kk}VHUv(MSfnt2-q^(>o+5sOu)rRmwG4U7VyqGpG zCia55hx6^4exA;GTJ(GawAt>Bo)<$K{5PWab6!F}F7qK?WHp3N$w79qbJN4J-%&bk zV(fz?Wen3MYCj`n-o+YDqOwh=oaIKDDMPtl=INZ5l=Cf;w|{zgJF$eV&s(1n^%>cBy`RcIwOms16wJ#JSX(w z9Vg0lnk>??!u3*Coi3sm)qkBf#{UZ1ADe3PJg%2PpGD}eJ6ztdlxzCOK2;|ByQQrv zpwT=(_Z;gtR`g3E+d=%04I5GHMbYD;!&|9Nh_9>I5_PQC zor*1SAGU=}|#v$+1 zr~QCAg_lVU*c~rDrX5}6%2YOs*jN%1L&g`vo9g{8cUbhoXuc!Z<0;b^Tksz7dC+Z% z&BC+IB)>Z|`~mlEmHIPh(#BjyC9&K`Fds6Hvc7Wn9f}WMxqHO%D>LLitu+fCZ}W_4 z8sXOP@nL?SHh&kslXIbaJ?H6%XFvHkJhR!A5sum$S<2oJ-=TSo9afR<_@}a1`vbdX zH~L`}?_0q)mSImUXFsqz9_hbyXKq?}E_!Dq`|Wt{`Nr`X=$}W?KQgv(-!t+!{wIy| zu)PYk{;eB$_H}4twY`LIMp3rx%;>z(M(l;2(+qh{iDoVf&0J?4&B~I{?BwrUoA(TN z)f~IiO`qVQZ%AVdozA#CgK@b_jmt+;JuRN@e-B!R4GCu!a zNIP)Eoz~dG9GQ1Abs_uUN zMdmqov!5URLfhiO=t15$8b7VWtOLqEQ-`2U9diqF*(>OW^h**O#@HX>2>%UytDGPD za-jL6ua`H=Iry#SW3|!qpby<~oim)Okkp zdXC#kvgg+FjDF3sAJSHA=rzQzf13LL6TS#P#GmFF?ByR}OF!#Q4=D%h~bf6In7pAK$VQ+?Zp zHQxHn8f(JCctsJI(OC7aa^I!-8JhuZdd1? z8ZU8HC!w>~=lHJ`eh#Bou0TE(BO6cPXXu0Pr9Z4=PrbRh<;bX80zJsPh@Ac{M9Y>+}`gHh?qtJVOI(!Emh2HChE*yp4 z>xC{Hh2HChE*yp4vevsE+PulQLe_h)V9skXcK#E@TlV3LcHdRLrQ2EKmVGp)z&kHe z=0B079_4O$B^_SLg;xf_E0=ViHzrD($bPTrg?fBzdEX5yNUxCIB^hjcC_rbsA%Vk((ZO>XS_dP;bW1fiPTYzt>B#vT9h`^V`x&pW&>j& zp~3W+y4T+uVDBiYtBfbm&s)OSBsuV(=;=Tj^Pf58&9d*R@$G5wjZwF0lw+pM(@+oL z30ZG1cKgS!a7V{n0c;qdv*yzDG1mn8oLlH~Zl=${v zy@>Pg|8Ci@=XwHb%RHXeI^^%S?jfy3*jgpf?_$z1-X(PXtvk0hfZqJWy;|h|LDS#5 zhO}lcRYemqTCX)!PShY`JV=#7jFC-#zN+`_NmyJA(7y%Q@fu4Chxut3Ot0 zkv9kOE_pZor137}d3m3_!+73!HUckh1k3X3R_~$lwHa#WtB3@-bsI~a#J>91(DDAiukC+#U_1x%Po;ZV*~Bdz6*Jmao;@7XVOOZ zX#K)>(=KWI`(2yOd*r-@HoHf|KI8iK3vY@X_zYIWfbo$X_;u{Y4`U_!R^6)+yT}D* zj8sCJqYY`T-*dA14g8?qo~AYa(W|xp9p4cD8u9m$7~^t2Ay*swGT$kIhr`H=rXx^{1p?L%sw<@z)%Uc5t^AF@OFR zbiGBRt`A;!RAgD?r43%5;?u%w7%P>Ov(AZ~^Ct3Ghz#wA51yG5STcXktR=_b!C~;= zSn6<-k(zsWy-^R6fDf?!w$+NHD8+}IZ z7aX$um;KuRn&XN*Pu^0}Yx@6JIgX~h3&FDsMrbr^@qeU0yOQ$ULz1$tXZ}UX7a{$D z_rJ*V9`4;oOy4IMKQEN`A>U&zKsV4v-c0RUX&-5qD@2D_+C+TPq)lF+O=@YAT-qa# zcWtH)A+X*ei_8hOLkIQFHe~%?<^!Y+q#u|`8%STUiS>?mq5V~FUKt)kxpqMNz@orW zmmB{x=v)h(m$@=q1N3_#<~#z>S?VC)5Ssrx-z#+Iv~J^EzSl?{-h~#_Rozpk=l7=^ zvY!|CN93OWmV3DWzc_B@$bIc^>*uo9?vu3(*$23Yd-ztKt4O<28^v|rA$uEFFa|4n;FY**w5wUx z8p>MCJ&C^2TlrF_?Z}kWNw9#)-YfSRb=u~+tU&4}>tQkv_5$)Ibe_qvC@oN`tuHDl zCP{r|&k6K%^%tn`c54|5i?(whc36`4 z-El`+?Kkn^(~3qE{7!rH**nIk)v`{t+5A7k_CGE5|9GCa-ap><{CL~*<89AV=gsL} zc$Il3X;+~^B3_buDn6iopF`HVqU{v)c)Bi6fQDD8#+?> zY@4T09lh{UA$&Xuxf6cI7OWQjm7`p<^R|_zM>BtujIZx1pZ_fLtLzsqbA#DxZcyyi zZP>Q*{ZTw?oQrI^0!0Oi_n~b__*XB_295?#7a?oKqXLtQu@en_W$?7{tLUy7Z;goR zx3`8@3XNnf*cH%f(Rq2=e_l0Pcq3QQ%cjSWTe-hnmqXzPqrLabckMcf`+Jfn-rK^eqbkQ{2NzQ-torIDsOsYOyxH?Upar- zplLrDh)7;8^UtG}U`<8+QnZs03rkHFVO^8&}G z&C`xc{t64fiTn!B8Skp$y+!b8vE~j>dqB&Xp{0ep`(M7XU6-p>mE=PQ%`?(@FI1Fm&Av zWK(3?{%l`dQ*_hqu_?%DXWy`vy?5!m3i^&Xb^2C8U+8>F^tC)2SNC4H)+K!{->}kG z<5xVixK{IDScg{g?*5G3_Vtn8?bw!t|_5XS6 zv2(En_aZOD@<$X@c!w2uNZUpYFR15zGB%u>UtAFI&kc>_x#jwEtJHHN_2)*$JhwuB z?m7KA=X(x49e&XU8uBnhWb)mU)gEkJ*|+3$%ZGe_I{JK+KCXI~_2|MP&tDRy5n5o~ zDd~!|z`9e>71IAp`N}Duw!XNa$Xlew^>8B zY;|Dr9i)tRhSm0aXL#+U^dFbePy62)QQQBWk+oO7@m%GYH-1ui>FQ;ZzeQ4dn7&*3 z=v>-m81};qQrbJit{m~k@VxYQhF>}Ijpy=4U_XC@B=<{SZ`c#|zFNki$=@-Tcg)M_ z>+7nFmT_oT-?P_AeDii48~^#|e4oCt?WIX&>kd5ef1f&ea_G|}@Tx6Qv#u#P zdcjkn|KfNo>*|8*M}M|p7vorIvu3dRG9Id@f06M}0Ke4Rd8eE&2ahc6DR!2$rO}3Q zV<4MtKK`AnYQOo;nA&-7{j~BXj^E_}s<$q!d{vEuqVpm$29kDcrX6pm4LuF?Uk%TN zW?VP2U8MzF17V zNV^wf&u-#bzH0|oU!2B)Xy4Eq6_YARQdXItn!!2!%874~9)G9k%6Z=9 zdGp}A*WX&1_v%|C^1eSJr*bvyG>cQ79{oisy@_-o(H*)O0%*b)p zUPX>Jp=*T}i#ZxH(8@fu$gfrIWj;!1RE`|U-l2Q(m)cKypSdiNi&q{DFVvwkTpt@6vH>kF^1z5a8<3&!?Kt9*d}^M;P7od=I7y;|hCvVw2rFt@eL#X1Kx zDqXO3a*$M1U*8@ez4T^jG7hp zD{o@{>Kkudnz!n~WjS)6w3Wy$bD3?$_(Yb&bJ7p@Wj$nWELx^lY-O68(_8scH<=rY zr9u4n>@+9`_t0`#06r;)Pb%P(aq!7__~a(|^P+muvD${_CpQUnsGh}^`}sq^xtL?`wkJl(t~x=whJ_5PG} zUU4^dUR2)=E4UVY_dWET=)4)|xTxM6RxtN8>br8(byTt2I(W;`WsWX8wJ~Xp&TrC>b~YvzUACeJ zwwa;NWL#?Kv_~12o+(>x12K2U_s~}BZk^6%UAO%V^rM?}-dn8mo?*Ks(skt~?{!z# zIb+mhy3sz?PW8UCb&PuQcOGz{-kRggxgEPR3*X;te1LQC1C9c5s}l^E!Xm4$n~DGst|;;b2-s_SBd(g!w`Dz>O{J+nCipXG_zgLyitUJez$b zGMhdctF<3t?^&7ijVa%TiuOIj!7s4);vO&qmvJm%u6BBx({(Ql+7lYv_#GXHbKjX)r)7mF77ZX#i-&eHn{VQ13*@Kqe%D#ce|9f2I`<`go zK9+Lbr$wT4wU=uE6(t%J?7#k^PQ`?&yEjLc{~dCx~5+}!EAq>j6xxzzK>r|@N` zy;|8vT>Lv_AFeFYRIo#nKX_zIE41JLDb_Bo$zHnuLiPh^AJQklCH)W#QY&pQ`{QjR zy$|*$leTy{O)Hrwak-$K_}$C?8~GmD7ja; zFZ}$JKuJ@&_KWx7Ef3%Sgm=FOEqBq@2f*F$08jhXspTb0|2|Oi9s7=LN#n@sdg7T2@6 zFNgbcc@ACkurG3g`wV?^Q1lJ5vBTG)Z?>U-BD$`Lc!nGu#r`5n-@tRb&=aC>wxcs- z{dF5Mdjvjf0c$3DXCQjVUY8D?bD^m#wo|twyz3C}oP*8~y`$;x+kV?k=$_>7+X2Si zs(0jFhi^@fNISKndj``sdoQbq+<@*Gwf(^@UT~f*?0-FqxMml_+b!tRFCfc%UdV{# zz#DJV&Mp1MMMNG;sOui|z#i~V>KuL50uOCN!TFY@D2@s zlXc4bk+EM9o9r?#F$P)h`%JZ~7h$$zom#OG&JlzjHiaV4da|0Q?HuLgNaa=~%`-|6m>fBS}~WIy{Q{rpQ>$^V`1 z;yg3_tEWF-B3Ph*d#|j7ce1xqR#^C2_THSqJMQ4x?|JV*p6kbZ4)gz4q++gJ%JC_e zHg*to-^rTNF4{&ghH{Twll$cP{bgY4yt(1;fVEky1=h|O7+8D69ay`HBWrJKhY!@& z{>rVbeUD>Jo>ua`!LE`JWvE9#a(_Gbv7>)gj3{ z-i4O(9;57{Tgoi@s3?&>>Nh?TuZx1{luC5V6m-mFbj>7m(L{977ivzjpMOhqS+ z`gjf);DH-QWqQLU^xq}912>iwvuAa2=2EehELa!TygAZUfUc7EF6iJ{=#Y$O z-=&YX+Dc0A?H(8TB{cmiHl<)bw}Oq`%bpnXv6))XeO~lj_+REfd%b7y#xiWCtA9TK z+5bUiWZ#F;=$)8yZNgUCS`qof>4auX_V)H{=A?{hWRK9z>_zo)K90a27f) zFR<<)d3T`Kre$dB?#$BGPS4ZU-Ky@>xKHCgjr+89BhZ0b06(}iZQb==ZEZl#b0j}A z{C%z!@xP91OSv|UbFr)HxL!;@B6iBFezB#ztuvSlAI>v3^NkNk&yzID*$*C+GW$60 zqHc1&3LCJWH$D6wb=yWgc2kd?(C58OZLN!MY|qu!{hn{H=ew^z;wpKQK2UVB3mmkY zXI3AM+EPLj>67|lH_3Yr@}BL`qurGj9_FF!{j_zT$=22u`=}dbkvvsb@^9z=t$O_e zeCIXlwS&5>qF!P@ZlzA+=!ZVwU5yqUm)Pb)N5CWTdae&_E_9C7=Gu-P+J+rw*j;-r z#$KU+7=k@@o4)VQCtzpYKZ-b9VfmJqat`>K6jMrgn4KZ!4Pq6db>8lJoOE5?6 z*j|GDP5K^WHEL%?-*Zrx!%vXYcFKN;=Ubqa*gPL!OzaHw-cZ_5?5=CAb{BS{*j*RV z_SZ|l#xqUWl*<01-}?w#^AmKw;DLXPP2Yko7ol$z+e>WEJ@i$|4&&aI=WmWI^JKPu zf`0gjezpl)PuXMWDY3_V*kJO0uWM+GP4)tF)?$)Y|*kky!Hp;l^J;qJ9APWcK z<81>2>kj854@1!t@bD4xc97Rj-n-;ortZ_YPvbs~`?Pht)qNWGY22rApT>P+e?`B) zjqe}Pb;LHxbr~{rSeK#cluxdU>`-=UYisK{evhNjPjun!Sy7!f4c%8rnd;HK(|B$@^cFdn{=&;%K1UggdR_1YeZ~U% z*1hnx*m{5UWQISY4`~0I^hb&26)skNrC}rOM*mB{(SqCwe_`Kk{4IUp%)Qun_fFbqU$2U#8PJJ)lu6q|5-3bqNYTLp0rft{BdxY-tE|E*4 z{8IK&UFkpSwjDW-*ME+kHv*ap4G;Ido%d&R9sr&_i|o9Pa><-n-`RN`-}%Geo}HJ5 ze!BzxCbr%PIY+-4_TG5?n%I1I=-0&VD?(q3%{L7FY}kAC&ssb0&$7RxW9OZ_cAkuT zijg<5_3Gf$-nR2ZKkU?PJ<$(4E%u)12e~FTpXdjUswCvcD2{< zSwj~vI6@Oeo9q6@>GDg5UlPiZl`f~5U)ZW3;jg>6raG;T{Wj{_`)ijLe3~Q}42iw4 z7d5Wmo&T(ywTqj)P_CQ{&n(ZlNTay4tWj!MZuS= z76%*si-SMo+^E-oaQ-3rTgX4ixlzxj{A+^0sA>*A?QahLl5?ZpZ*%?;`F|z<2W|XYpwXAfS2QJGXll?pA9^X8`lB=z z+I^KgMNje+J)zxD=%wiCkJ9t&{8zN(zeCHYJUFz}8XdmuzKk>9-nRd8Fj-0VmvD4I zRK6U2sx>-&lhZee&E@p-$!#Xti}v2PiKV6MQ%8R~`qQ!PqvImSwvUck&lde@@6YXH zOc^uU$Cq|HsJmmz87^(0p&QlF#a`1T&bZzg*Ea>7aXs@+&Kyaw|2a0_bSCtu#8o-K zzVB_M4@mZP_CC#+hqTYh$Q+-2K2YXI?ekaC_t@=)uI2&7_O54749#8Ft`WOP=Atim zUsf=QG`F1p3j&k13l;{8z8DU=E(iyUIj>uwOpS+Twf*UTyN&tpK|Rr{_o=2>s;H-`A_w0ukh@8`R)L|dn?}^z;`)c&-FKX z_7(Xq|8M8oIIhj${1yG$4xZg9-z9G)*B|5hD$aLu{R5ueCEw-$F0Q@KwcT>AUn5p= zTk*{&*)L3MD$8V_uBpUZbj|*76aJ#nIrVZir#=a+Ep0q`Chs%bxFL2f-9A^I|IGa0 zbyF7vhdi?&czyo-;LvAQ2XB}f310F{BzR-~YMEnK?=$9w6U=q1>vns?`2I8bi-NzN zx;Xfw{KdhwoKJJrxBobQP4G8Un}h$H-yB@Wxy)M;qo&%yQ4r znk4$}I+EzQ8%UzxCTQq2Fl}*lv8`v=dBn~ecD^ul$$n2x({ku!*Cor5y=K<2lIjYs zsppVWt1gInj%!v~7ab$|Ny%|K`w?F+ay;k5P3#kH=>LJ#`5IC_>3Y&o(v2i@?Todw z8t}LldgS=Q-ocAL)^~jLw?5X)e9*(k`i*apKjLFe%m+Pu$ck@B{sJFz?t>ma@1xK6(dYY+w;Wwg zbLi83^yxnObRT`X4|!A9kkcIcY#(y!qtEuyXU9Irf8^9hpX{SgR?jUe*BZ-YJlhl5 zoI5SoKTOGH2-yrFn;~Q~glvYObqLuEA)6s&GlXo0kj)UX8A3Kg$Yu!H3?Z8#Q#PIV zIrK|`Uy{mZFY!-8*{q+Ii)@CF%@DE~LN-Io>x*)gY;q0R3?Z8#WHW?phF0p=mdkSk zcWHW?phLBD5+@fNw@fPBf&m)HBMD$S3zE7&fCnZFmI^{|BAeKJ$ z%bM~_S(O&=i&9hA(cdN;Z&~|WiEl3BD&#P+zlzNkv5%ANwuybrZ|R`) z00~#(oidSdE(*XIsbXGH&k7{+8!n!)~{a)n5}Eo4DID zX5P2WGG@LuI%b|i97(&aZI$&>u!+g$Q0(-wkN@m)?%W^ibJkP(DE>XD+no44cbC4Z z-(ugGRQR$p-m{M3YKBM0aEm1dIAd9v!!yQkR=%nsHixCWDcQ)*{hg1=x=Wu<_$Q_I zNWwqe9+Mbj3oFmaeD7gWQl3fCb~9<>+@El)&|Uf@<(U-en~-Nt!B!TVcb?xJR(3Aq z(%jGK_AQukyRUUK^FxL%GVEI`U**ak$F{WrW#1BeVkX})=5CGkfM`FrnzfwO${#yw z->wec%75`0zRP*;te}hH)i&L}lR24FY!`XY6{+@<*kR9MhuPO#p0n6tYnUf8)-I-u zjM`xmt4erO=6h}RrjaZj#itd zG(T$7>?a1Cp+~H3R6{%|%eNEuo#*@YPTC?NJ(9FVLfgTx4^pCkl77gf|9N^nDgBew zHzEC#^uyBEf>->H?#5G*z1k82uk_5mz+Y{;Fh%^;Z;Z8K9e*_^W;_y)zY@QVf-$xI zs}f+a1S?eVnc7>q7Kg*a=jE3L*VGQGN`S@U+L3~>wYPCC4v!_cA>vyNsY-y!;@ZQ7 zpR4^E*J5y4V20Ke-dKA{l?|Ik{>j3!+7S6BrU(pHBUr3Bd=~jz$dAKkfx&tjELI#o zi~PTmABWEZhxH@ySTXo4^8TAV6Q2bZ>z~17nfNUJZ{@#<&w{Q7y<_lMZeUsBSfx~wC`Ghu;*z|GuEJuGj`mq}HW7{)FeKzP%g)6Y| zqwI{m!`G8TUJLC|8|>w$z!> zvu6~)WoaX0z1BWA7=z#9eq%1t#BW*V3r+l%Wq!-VZ&~IF6@IIec|x(x9sE`l+okYZ z<2%+so4=y_qTz=cU02ibTlf&sH^<|*Y<;rAaM5Pgd3J^45*(Dwp@Zi#v0UJw)EqgO zuB3P_>=Mh|cn6-#VxJg30SbQvmg`4gx?=ELV7dMoOqYS@DoubzRO_`CevQF*S;n}= z9Jj*oP$#24CccY0U82`V;k$af{P3=c@9N|7+U-C)eHFgTLfh+feAlLA_^#8)E8Q)7 zkMUlXwy^6sJFh6bmxWJy>hmZ5fM*@v%MaeG%7XXugZHYk;Jv_lAv+y-FR)(7O>(@K z^FD`uN%%#rpLd1#>H+_#xx9Fv@r3>nx^M2;jrRiUg=}{CAcFNmHaqZMV7-vd4!jpw zFJv=0-phHPL%$^al2kUWzA!!DpMd@7 zxx^?<K|1K8k<4!hdzr9;zQp(I5J>;lDbunJxU&;Zy9~ zZ;sKrOP@~oCnfqO;h%1gQ4Ig?Q^tRFV#}E{NzwLm?w=pRc1t=|OUg4T(l;T`oPzCq z#_?aQqxY|};J^5PEC0L1e>u-3#?$oyT@&T)*B)TO_P+lD0@_I~evsxA9+=e$1qAl78x3 zzn+x-N$Q)B{z>}ba|!>|XYsd0k2K&5R%MI7MZSu^MZSSiGS;J2{4Hz(19PO}Z^=AY z1M@rNo9ns_#ESCk@wcjM@wdoV@wdnq{{$6(OZ*HPz<5t^uvh_x zJ$Hqkq=zNXitmDcLQm7ri~n;RTB`lM99rt}w?x(*zD!xhQ^t2WIw1IK^rnhu=;%|Y zZ*uykAbY_&{k+61wb)EzvnRxNIr`JlpAuWwvF#n(-ml}k&anP8aR>TX(>}(G!FTE7 zOZ&Le#CN4QraYziTj)+_TyKeE=-8o-9(CqOqH$o+p=a6-JrjELjN`X-do~WgrT1yZ zJfwZDFb2QHy)qvdgWuBU0%P!7`dpxi-%2tMD7LqQ-?HGhdJ=!j(&yUe$_?C>ioZ2g zkH00jm1n?fO-&Mi3mc=6q|bH7;A+*174f&kuF`E0`#8yNo0#}3eavL~ zK->32^6PjT=e<$fkwb?b(Lw2j?%=OZk>2v1vjKl~TH_af7QwJ_onENd|L5WNo-^b|HS(AI`{jqn->{&j|juRx<~k{B(|GLlN4<|=YAc3m4u#2c_u~r%JXqNa|*WdS%JS|ty_GDqxP*H ze~bT}#(Cly6}QWIuF+UuiP{0ZPZ!0j-N9caX-|32IfcK{`+E~pV#i;dC)ZW`rh>oH z>#Och(cbAb{FQE-SlS|C-+HcJ@1!jf);CF8B(xn2`{0!DS9(8Y(l<#zb*^7eO8+GF zO-TPF{qVVjzv{F2TUC~Qxaqr1AApAW!GGYI8Gnmw29`*zEmv9g1;ro0zy~QT7T3)9 zTU;~OaT~y6Re>8a;&0)PZQzZ*%C(2MX2##*nu*J*vc%s4J0!jbUvFp%&L-cCzeT=U z1Fq8d7`5ZG$XD^V?D(uIJ^q#*pGCfkzh%c~Rq63!tat+QRQxRipH-#D-!kx7{8#a} z415-JRq?khG*z}1`L_zq$v0@G#>UW7#ovOK2EEkWBKa!*mWBQbQv@AV{4Je^3ZF%u zig6-&20jb=srXyaPT{loui|exwCuoVIkZ%Iz~Rf(WjrN(mZJlr@nal)8vM}do1DJM z55B5Tie#<%@zf*)Dh#h{73eWVS$+eT_F!0EG`KFjH|+IwR@ z>P+a_Gm77`=ni9j(LOh5=qvkNqKV(K%om#YEzA6tiQlr!6)OBzC-a12n>+X|3x2C7 z@waS!a-w;5h2xUATSojX1Isnl5`QZho(sD~#ovm~jd$RqEcQtZo{QL9D*l!g&oxz# zzop~3lE&Y%@M{dd%QD6_=C~DxhdQbFTULA*byD%S418B_mml6W@m+mfUb`Kr@Ld-A zDtwoPwq3^GI*q*2-Lm%>?`3HVyN7cojDzNud_1#mc=%) zkF)Hyirq)U^ubPq|8m|NjXUMgp~rMkdLmW)R}bhd-#HubU#BJhmW57sIk(HTeXiHJ z-^A}s_RrPg_}fk&#lKzQzdC6T)sLm<4}IG3U!B;@7XIn*DR%BR$7tQ9Pbd775`B~K zPeLE=KE;1^V#}E{NzwLm?g#(X2|bhYOp5eP$TO#4JD+j<7rw+Q{uXP(QTtYpzs3Jf z@n6n!iQ{ke0bLZYri%aSF-TUixvNM9$i=M+ZF!HQeSm{iuO*g;J+-k ziKQ(P_O0jo^-kI%VSSUdMMB%*l<{Ade$1qAl78x3zn+x-N$Q)B{z>}bp5VU@1%DeU zE7ICtNYkoIj*r`N&%Q^tWT$D3_a2zFB~8-??(le3KZ@?`aX zPj*wK7HALNb??zCEhpUHofG!obMMiSq%oXdN*crY>s)`0>sv^#aehB}Pu!qI8V0%| zjYHj$$0vIt6-8R(Jk8bEoUU#DE#I2Vy!wulCogRAxSOVH{;_)~!vM;XImn~FpY7JF z?@9B7C$`PnGL!P=P~Kcupgr%w8ApA0&phhC`<|nVuhSww2)H8NV%}Y+H7?@a%Xs%2 z=$93mcWjF%vq|bQF;lDlHt&}6)?D6ubk3Gh{XOB!GpQ?d`ALRWoqcHb7P+4Di)GKw z%*@#s_GX7`@PEnX-r96ecv1VDEpNJVHeQ*T7k-oP&E$M^W_EZc=gYLb)_s>PyC~$! zZGE5P6Ry0hWm*{@6nnf>00CB#{x$yNj2BkucoaxmqDj{Y2%%=vDABXx>oH< z*RECcfDbaMx8!F*kCODPjU|U?Zz-X!3$@H}rYkeNOv`L_kC}LE9PN}teHK!e1zJ`( z%as+*rY>H}yqGq5-;>j%rO#=0=f1LLd3ss%bGdWZK)<%>e0!N*-Ym+yhw|=)2U;ob zU6gkcWgY#Pb`%-eTu50LalfY3eeqCm!|L^((A&b#Vfc9z{G55bdArfJv8y>)5zYo^1`A|n^@?#%M&yTiOY zhj-s2b*Ap{f1~DFf1~T}y*J1+Ug~Ga#AN6v@006XyTRqAttlV=nAKUl<1U_`$n&F- zu`7_V@Kx}M%Nr(dbBOX7&yV8zd`)YYwvcu>`8(I<)ZQX+XAg#LVu2N2ipCl`RK$(^p)tU*Wj1!Y2It4o~`hu7usY(2Wd;; zq3kh%NbdFJ5g+9_a8n@i-i_sva6pSJ8t;lM9_WsUd_8eP^n0c}!!wz=o^XZVqv{}f zprv1+{R>`iSl%i5>oxDSS*06RUvbTb)eEoEBH^o}<*9==yp*>@+IK|#YQ-PY2JnZJ z4OxP3H>^>7Ws%8gypMe0w;P1NCQx64KTbS+GrVvUJTV^L7zdA3M2_EA9)V{Xg?FT1 z5?$;imF%1SWXUY=(gohk@aO|`p1h0m`IKAn0lbq<8Kl1|g?EHso`BB2mWQ`|3;y{w zyfo&+SzB&`honDy)8%eNj@qW6*M zA1??rZ%g}p^NyUd=A8qz=3PTw&9Bi`_roW0;Eajf69uR*ijr2FBK zr$`x>XpsSbN54iJZ-P&X;1j9GO!(w-=rD=;FQQzz)V~&*?B#m_bXw4r7v3Q{1wA`A zS8JZg^P*1{xpKpXPMHLMh@4G=N98%;3E|m#X~*;pH%uoLrPr^ir!6PK3o}VsrS+?) zp)XV!;D;+H!@>Z4Bi|RDBYY8ftgLyWMc+u@g3eOBQRJ$-aXRn&0(H!EMRkXv$4~yw zy}9Kwt!D41Ts8Z~x@+Dq^VA%;C9UQ|bjYFGG9n*^`bCc3>5Uw_FEeudR;|5Gt81S| z>aSf|@M+R()L;6#y&hN7#1j=uLN0XvKo>ecy=k#4Bm5%iPo(!qhdlk74tP9Gd+1|I zPK;Yp#=WI$wQ6~O$*zaC_}v4-OZGmzWu|LD_)FTrjjKNyx1=x=`JAOyn{!h3^{@u^ zLzMAN13v2hc;9pt7nFQE9$m!+^~Yn;Ra{W=O@Fh7C=Pu{16Xu_JO*9G1ts5#KUZ-< z{ju0{6&I8|6L;=suXN%C8TB%KRLMU`zTsPE`cwId3yROV;m>LMpOb%te8Zp9^gs6# z7u0Ww3n~}^@;)X{#RV1Ixu3Y8#JBn%iE$+uD*m73++36JM`>!%&RoMJ|9)s{(8!#V z_Y*G++Qs3{$^V{()+X-UAEl|#&h+yo?_r^*6?g6@E-17!ap(M> zHoRlQ3wG%YkB97IO8Zzc24Aj^Bkkiy6T6(^*zuI&e^xo;cW3?Qfe)LRgD))d{3Tg3<{ypuM*Dn#G2dvP<1_S?eNHe2Kd#UF#o)*Fd8!!v zxIX7+;>VNB_lfN-_;CZbIk&v7U3+q2u;`27pzFzSu=s+7!MX+7Wbs?6Apdsq$B{pS z{JDDmb38MTYb&|-7}r*DZ6(*5dFD0pcai@(`Mb&gsheM{Dti=`Ia9D zEvZih{-?$M>TSO0Zdp^ogRgd@*I0`~cg6BJbH8dWNT=62pN z@!&dN7BZV;TW!~x~pLTBfF4*k017wM<>feRP=V}Goi_6q-~IXD|G9C`I2 zuRiPmA9jEb`ODGe)kk0Nqp$bT*ZYt^m5;po=+Axh=RW%Lj{C`{&-T%0`{=XP{o;H1 zRF>9Q<_e@Pqk7-G1iy%m{+ab`yNud>6aB;k_3QCK1s^W)K>d0=P{D^wJkWeS9;o2M zB_3#gl6auv`{}&TpOv{oIW zk6k=f{1MZeR;4BON7+=^(FcD>KRkdw_YlWH9IqwaX7s^^zY2YEjK2!<-8{Hs%w->6 z8RIFtU27jl*=-jSAFhw7ocBh3N*p@$Egh7e03R-Wc`1H;eMN8ijvXJK$OeBA+u1(0 zmoaxT+g$FqkLzC(8yp`%8E?xNd(pOzF?P+^=oowP1s!_8u*D7ATjX8*)a?EG{+tfE zH|AUH^s>vnUCy2RJ0HV!mp+Pr!G|kb6yM2Hd0u?aPjze`az1_ z2!C11pOP)@+&|yPoPZBIIN{j0yY%UVpHgZMc|HL@r95_t8H-qWNNkkDq!=BQXzXF- zp%iUBlSa<{3CCvLrB6~GN|C-vcqnDt+OU=9`Q7-SV;?iFopOV27i)>_;>80y`a{Dm zj`7=u*X(w1O&Fxcgeh+i66(`NPLT(=ZLF%7SKiUD)@0_|H)j= zDYmD)N3a`R_M-{xyXS27+8fL*8EYboFOAx3o6u`AciP_=pXvOV$WChp&tlqV;61JU zX}8ZzOspM0uFo-_>(^ELf*+Umtr7dE3wo&gQ)oxK-7{rK)b3fVx0B3qEB#@#QzCnZ zcCwTwVgGu*U+<)S64E0{`^4E7Ry#uMN@Lw2rcDyr57stG(qEZva-LpK+9pZro1{(R z`spy`w)D1|O;l2fyZb_GnP?mmPf9UkX_}*i_<- z=bzU7-5uNMg<#0ppCwqU&u{%L*-q!`SkvCG+s8wFfs=LcUGe)liS2zp;uWKRu06*uJeDWAOc5;KHI~@+iK$8GlN{|H3Z+cDZ-(U$K6Gy-gp*zh@Z#WgiQQeHk|% z)Nx@`ESMfE?l5Nj2Y*@kDy8wCbN~5=1B)B47<^@nS1i0_#fLp@9iLcvDMfqG8j;pa^UCvYdmx{#aAx#g>MwQZ8v3ufD# zr`MCVO_KU1X`8tDwBEpr^#wlcw8#Gfi>u;)RoUWyk+1M#3Up4#`c$`=2N zd=>wTd~<)!2I7C=@2KK`RoUWyk+0%^k#ENT!uLtV|EjXY{~}Mt{~}NAJqq?l?VDO< z+w+=yh07t|m@ibmDEO8s|FSAu{4esApDy_ZPEPIL4lYc^v8l4e{~}Mt{~}Mt|033x zivLxm$N%ELivPubH3k=&lEzqQr{?B_rlkAHH)y2hFodS!4+-sJ;(rNEN#C>3TKPl? zO-a}5v{UiFgr1~_CC`ZeCG;fS2<=q-FQFxAjzi0i_+Jh!ga6?0W$;)xWIQJR7qoL^ z{fXeO9laU+(CLGmKFGg1c(v1?2iGOAjl?#O!-qNgv(d3TwMOMf?&wcPf1V}!)6S3f zv80J>u#6+^a;)Nib;GVarTAZ>KO3TBO=BGU#Rm3!A>SFl>-)WQqYa$#yJgP@$EJ5| z`X0_Joe4d9M)6@5d)Dw*w9oOGdfKn+X`YceK{NiBW!}$>|7Dq{GUI<)+D66y>SVr8 zZ13|KAC?FQ)~oTqq+J`L{j7Z+T*dz)W|oToHC2!QMO-8m|7&WJ_+PQ}*c})v%Y3yl zkF8=<5&P>$68~$eCH@yNs#N^1sh!0CvhZU}{4dKG)tHM`F`lFz690>OnDM`)E)wU7 zx+Iu`?|IqbO*8&iAD7o|$JuGC;(uA_+GYH&KIDt;mbJ(6zbv%3>nuBOsQ6zNzUXpq z+H*PnmtWrly-JV&<=3%cReJm{Vt=XlUsLt?U&Q`W@xP`fiT~xi&!@0;4*jC>;w=24 z)~~z5hV_7d)Erzq-XzX<;hc;AMXW9rud7Or|3&OC75}SBkN*WeO2z-0s>lB#_LqwP zH8n~6FXw#@{RC5&fM1fzsMRN>2mF&nM&o=~dJ_|N&c*+-*eZ743cKy1;(uAjRHhHM z?zd-*)9iCU&U>Td9fuC5MhB%QQpJDufS=_%e>3sFEOfHVxm~X9^6cE7058@FJr)0U zh5zc}`2_Krf~m%aw#UJ!>5KSZo!HVAe(LZ8cJ4RFZr!C%C;XHWedYNC{FL(8#TvIG zHwnILiH_Y#7<)Jw-m4Q^&!ka`wxM(X{LodM&@(9yrAXf-Je0C+eb&VPVx7D{Nqmf> z_StHDi;4fm|4wmQ&U1<5sr3O}6tAX=@9O0H@}6@h{+GobFtI6CeAIb#UA1pl_%2I* z)&23fp*VZUZuj(R{4b0BVrid*{qMPcy_5DySl=Y=6K7vo?TB89|7Gd#OqwU@&(8Jh zN!uhzeUr3FTz}mgcrJSwu)e@|^?Cd+(Kii_KbXYcu>0eQ-$ZwP^6Ya61{TgfCuXdn z+vgY@JePxwb8v=dH)gZb_+J+L&5?a#|2eqW;6Vpt>|l&pPjv9wF9tucVBf`7cko@# zz5*}D`nGn&|8nqM4!(>zfvjm;JZqA>5gU*B=b?{yB`#Fj2 zeLmy6;`XQP%lKcGIrOs?>t)9Oik+Y8i2vo_yMn)DZ#M_u6`1Uwt2Nr!G3@IUXA{25 zzPD-Ie(!w||I4y24sCplQA|HKXyd!i!FV~CHfy}N9`K7=(>~8IUUB=}8vV1epMI*b z`mFok4pgxkRG+3L@ee>&L&&OwRZ4M?yQpXs4dU|FYN@Che2h6X*K%q-~R=zDe39Za%Fy@M67z4?E=P zw^8E0?e(~tCZ4ER5^`y6rGs46`#kAQi`g^rMbe*0?~x98JWYE%T2sl1aZAd$Z_`?> zTE&b*=2w40{OnAvZRYHl_{M7O&)narVSezcDjgr@x5fJ+U+ou8zJYU4Yu{CRyf5sD z+6B?^Uw&J>FY*-*hkRrIU4;*;(&K&kE%Cm{Q}MpY zQ}Mp4^mt!>-EW8gD&80W)w-(ClzwfDg?4IAQfNy4{p1@oQgaYOQ`YCNfp#(YFrg{= z-?PwK#rqPP`ib|Y(@w?v5_*#Nu;dx>zJ#8B;(b9o74J)E$^SVHEj!|UIkXJ^gTt4> zW8IMPPVr&T(vf}Qe>r+I_@UD$Ieij+_tj4S9$c5eRubDh4j<;|&*(k{j{bD?r(@GM zo-N}eg=?_%=k~Fs8UM>NuC$LQ)m|grurp5yA13; z=PmKSoIc6vlX}=Eoe4d9M)6@5d)AnDwfAYJo@QRqIu~Qc|FX;jn(@CZ^Hyg3FH75~ z_+OpO|B3B=-s8hA&^G^k*F#(U?g8PS>;C7OyRKauR@Yvq)wPRXTK&x@t7Q(oEHm)o zl&@&jWvjk#%s0h01d z{4dIH#{ZH!Nc=DAkYG-}=Vhi{%=ljx`H6{F)=TBJ+i`Z9s`y_PdUhHAOUm02wZD3s zAG%xC9>@Q((B7`I?7X4ke_8nAbjJU(uYC(XOv!Y5;KODe8&;Ys-o#itV?C|LiZ`)h zeX8{MUw%FQSCx(p^Xu`yrt0y(z==T@-wjjs_+OmkzZJz_IPdeZKIVh3e9VtUv2_mp z;`l|aUw4HK>j6KiIks8sGk$T_zPZd5_^`)vYV`xC(qn-6^%!7PIxY;182T%8-BdjW80W}o2sw?$ z0OQ=0Q|EmlX2*Bhs{$p<+HLXE#6P1xlpS<;JK`zjr1XDKt)l(A&!GMUQ4=- zr2JU$iBf(n(iacz#E*q>lk#KnN9``VuZ7)qQMf0|n9B62*6n&@oMyL|o%cHQ?FpYL z9g!;j%VK+Xm+#~|r#t?a(iK^j@wV>sE@SL9+d9VB&10iu?8O#5#X#i!nglYxiTFA) zZZ(o!PVMq(mviU-SiRU&`Y8TQjMM4ZQ^dlz_V1}aEZ$ExF5ZRShXv`wg7jfQ`mi9j z<7A10Ve%RCIjT?6`OF;e=`q1dQ{XR&)nMU~4&UBR>}ltIuw1=JpH6rxrS?d|Qz?&M z%<+imE@M2R^OPEoSolb6l*6Pr+r-L8DcXA`A366Y9A9*oK1ul~MfxV;qm=Dy(_R)n zGVEoYkDkJgo#%Ilm5t08ckvCnZLG!H##08x+s2CDEc>A3`eOS0^|P`kh_Qa6<5a|+%V9ps zSTiZTG-}U@ttMl-{)QcA;n&69D8HK66J!0vHf}rLudDX$3jbxXpVa*+*h{8eB<*F` zMS6P~cG3QPX(z_zdOOM3+pbq?XeUb>B(6@w_hrNh>nr?M?!eUWU%5K|Yl*J2m*{KW zqRS0DSnI5qF^_`>V=v6N!GL{a@!K=}G3@?(X1p-Rf2`5I7H!y?4lNy=qQu(!Tf!*{ z9<20ot=c{xY2aq<^S6e7jeUO1!G1b8S_hNt;JX!O-hck%yTwL#@L$e81TV+>ymrI~ zv*2xvIXK7eboO+R_yu+!4r4EpzOp;d1a85>e-R(d+0Vt<&&BWTjq^X?=RY|Q_8B@8 zdep&x#qa+lw)gpr|FYw}%5Ujdv)qqws^Gk)=r}K9o?QHiqI2a@%-6)Ui*i3tfd;hrTO?JTu&NGx>5I0 zSfa1P8uD+hlR4P0glpaKfP>XKjo3c5hJBu4zm%^-s`9C?UoX-9>5|H45+4Bg)xj(| z7`Hyd^d*(gUgAeJKOT?GjPqgXP0W`a+ckM#2exa9j_q393AQV@sH5-g4BKV;sOUbj zR@u(IFy3|*A6L^S*uiP3IdWr8-0;1y+xPZ$XS)q&pF?%%8{KQUujruk#d(VV62IWd z7MxdU7dS7my=9*UvAsLS-RxP8?VX#On*7`O-Ok_kc69EK-HWul^ilkKhVftKSWm}! z?eE{w-xy;(@t-yMOCRG|`xgiIrQ(10Dvry-ODT;VJK?32pT9TWH$KkqTDOgSUwjXqGTEyu(hw*EQXU`IRm)L8IEI6*3 zBseaadr4-eZPIaFCjPK~TCNp;Xt%dajLLb6|5EMSNBAz&?imurafuCM=o6(!jCM+7 z?>M+FUngyo(B4q@r(j zt>*mH@LQYyk9FTsJ$~1mF7R4@_T(uTQ`?`lI_Vn??2_8^n``$Me5UqRu1U-(1COBg zrNpP^mj&0<4q_i9i92QZHmTsu-Qw)QrzN$e>Dub|?2iC=Qz^|eE)qW;MSX5p(1 z3xf}F?cu`D)qag@Dh`$VvG$=2O~D(8Ikm3v#@b8Rk5*z)$sV>}Cx15iCkxAJL*$z= zsr>kV>p%N*@LaKAB!XvY7t%oCdNzbtcvX8bS99HEN;)ye#z*yiUm z{!7-;f0;`BukjtYuuWf$;=&|8f!c#l#RAgff8i%Wzih1moSXxGWD`W0h#n!?`w## zYpwCX@`(pV46Ai|Jg_MVuwi0@`)El%rJC9;&@@s`yBcu;TN?|-W6u7 z2mGVv^x`ooDf?aOafr^&cwzW5BBvexjDF%>5%UT;jm8VhCtetMD&#a8|0$n%VZ;nW zP7}uqbKd9BFA2XSl~b$#OAq)biJZpy%k(C8%(S_6pQoSe_)R;eY|ZWtOj+qaMdO8) z5_@-_C%q|Ays**~F=bC%;)U%eURccBT37fni|t}xGq>9^cHfPp^L32ptYdWNy_Wc4 z(Aya2I`obDXLUsfr882+kM)3`!dwYpBC?v8!RGDn2R`J#l#6C_UvT6Pdjt*!#c6AEj-oXckJA6j_0~d zpH6rxCHf}esg%bvW?YZHh#%I8{b$lCMSIb?AG}&8^i0Y}DbhCyAEj(>pY`#>Sf}q_ zWx-$Z|5pBsUwNmvH0L?9e>B#AqWxs=(^K(qs`#=_+C|=T&cqM1*b63R#)>aHkFKls z?FwIJsjs>}1$(K_z%Yw!ulp@qd5D+v@?1$ewd{{H0hqCzdP5j zCvBG`^-a<)as7Eu@McEbFyq))__M`>Q^TL->i9ErZ>B`?!)81FX?7nLWBhAhZ#DXR z6%)(iH)!~K*!>61_+s7gA+(RV9Xy(Y?Q?LFI{x%L!AZu%7qi%Rj;uSf?#TN2CF@Rq z9$i!D4cka;cn5#x?1S)ftPgERd@%=q=HSnm2X*jgp1(NwvqeE;jm#O}NB8n^u>EI; zZgBW=VX(LML9)xXea@~gaJ~-yOpiZi*`G>m^Ya;h78if)rVgB04Zf}te{9pK#vfx2 z$ch0!7cghd;Gq(XZ9C$RS@uwI@MRqsJ_lcBiAQFc`?ULBo=x~NbFbFZ5s%EWwhm2v zjAcw;ItN>}+6Ttg!O|@ZB=kM#F2ATX@$(E@*5~oaEIt;{JA`~Xn5ZO}D8@Nae0>t1 z42OR6LturIVi1$cXD`uH&8Np>KjVC2dJ|J-$Bw8U7oEVq+*WQ*C$!PqDL zRO68?*0E&9INb2}F#J30cCCZ+a&TT}BkoV>hw~IaCh;uZu;9c_Tl}$3#w2!qVb>G% zXEAFJ&i%1?Sv!3c|DIv|m^rrVi}+)mjO8r6k&-R%+&|xk4oJGSSMo*lOIyF1)clPe!SRO6@RP~JduMBJ3DZ5O6Q#=_^{I!f6QW^nHUxa zAEsgjDSn9W!{YGtf)6w8p5BZKto%FajAHUBXId-Qfax|0{`RKOv$f00H|fHiHH7v z*|Q5;AKtQOxGVC#gR{2$(~&t({(=3Oysp6;TQ1Wg$2^0Y(%7>yzn|84e}>j3aqPBv zE^1nKf!4Os&AaF1FI}GH4R7arpMG-mQ*y1v7ieFWt+fqzY1J+L0_}lpZ@9%XxJhsd z+fSaXZt-y4Gq`oVrd30?My*b3-swtFsYx`cP~%!TtmVV_m5 zXHwqF+h#vG>G13=Ij*dY?zFVZ;iTraSzDx>hdp^sQqIzTTJ?+W-0-q2%E@=i`fF{k zaQuM!9PoIX^0`;eM{;~AD?5C^lhw2`D=@j?onf^w`nb2K;CXLx z!77gPIKFn>Wd-v_jV##JUn_kLnrK?x7f(EV3p{W$yl@jdF&^F+7dd`kMFd`HT<;0J zee%(?&3lGt*BrkuhikdqlgGV2p7Fys{UayFr8OR6U#*>=)7pRXX#UY1pV8VU9i6k~ zKS|!NYcG3UX^ol3$8Fiu&lTBraav@{fmvJ1a*&^|FUGrX7YWzO*~eJU$6 zyywC|yY%b2+CoGq8n(w4r!yJlo&g-d3+R2|3jZP!liKJAj#mG60mHfgT1x1nc) z_)+H62ioVK7{9iKGH3Fh7I)^xeLv|J*)vvaA4R=ej-4EvgRJiPKet3m`9=hpy?j>Q z((NzZ9Jz;YjHYdB`M;cR`n-93)4Q~i?+JZB;CLWAcVp(PXw1swQ zxkYPl5!o)*HoGaO=Q|6+qiD}PTHTW)T;7fEdAu8UdHOebMK)aN;Wsn=;eSulHZMX( zw`yA3F61Wnqm$LYP1nAijhyzQjDI;a`^nXwsYfqN&*A+-*BnLHQP6dW%N?HY%~?8I z^M*&cb2k3rfx=rar%ZePP!cXm50w5>c2@Y0?4g`_c=nSYL!)%QyE{#5eAqJ}d{WD8 z-S+Xx>eff{m(E1ie|OK!-;BR|=5O5I;RPeTBMK_68(EOXaX9~Hj2cn!N^boXuiP{G zH{k^8E*U#@(Lyq|LI@wYFn{Px>Ft@O{= zCV%x&t#Kyb6kTx4li5`0%51G`KUqBqy}Hen9Znk>9kzu_HOFpu(2_hg4h<_Ahw zUN@p(gr=>N>o;-j4(?e=onDpvG;Q5i(Y+EkcdK?$>jvueD`a2f;0EMh_T)VR&x-8q zD+{!@!P_D~|DU~gkB_>#^Z!4S31l)exyc==371wv5`{_y6=VXa7=q%O-F3Tu7ZBP7 z5)@b4(uxq2w#p|Uso1r|#{w2xCXfoPu4Wh31e8iHT&lI&`fIpVAqkgM6k@Ubp06{X zPcjVQ=B1b2{xOgDIiH!&xxU};^M0N8`tdt`s*T7rLk(Fq^w;i5;pYp`-kIwH=);|TAo#*g*QiRWUi^mPG z_bz|u@=&Yzy+z|lyQ2C25cNKiVC@=r$%$cp#=d!!)oP2!9}JW=qZ=NjyhF%Z_PnIB zoIubH@;o~!XlKszl~usohXaAy6nJPGayQtMRxAF_^!V&d(qZ_#1sOW%O{+Z!uOEch zGvW0G^AjUH$?w!x@#cXsR_i0oi+I03yuTmb{~bD5ykFsow@VU}?KSXz9=!h&ypL>@ z*B?7oFyG<*&b0lGA%ugJYO0>i#3^)f!|#GQVcq zK-)qF#sw1X?rB!(FON$GdK($o>c~KY6=+(C3~WONGLZrAj2RnWo;l;JGVl;G5H}-p zQA!|LLWu%uJEQc^?Uf{5Bq!hp8mM}-+~>n z(vuj&|B2`2|4eYe5N9ns$lN!0yEUzZ|Kk`_l|O_vdZj1r3^};Vn`YnO$idjN$iZ0T zfHk6YNI%U-qLG9DlpJ`I%Ev;r-aO*Dp)#?Pbz3Im+lUR4>G3Zb#rol6{b+;_XJU^; z*38>j|0c2y-pYD-3+v(p*3A6y!T#fCv0j>W^U?=Bw;#ZE@p=6A0oFOAKUv@0HSGZF zjPJoq%MQY`T0;)9p84iq%6jIrRoC=WxsT6Juv-2A-^@D6T0(o~dwoU*A3;8iz2osW zTHqzE1?h>_y;-Ecj;tQ1Udi@>z@@bZkdF_vPDswNFCGAV}WdUrhI%GL7F=*$r{ue^y zn6E2nd%dT(k>~BmN*(ES)cGOu-0baL+l)LPMxM7L&r9bgm#stI9^rTEkhMpmJ$%6LZ0_(Ua)1o;(JGqJ9SDla@>p@Khig`%q_dKC)M_a z63d#A@wP$LD|Xky=DL7Nw)vPf0_Lg zE7@M+PiD=$tV!Q*F23co_3W@WzWg4~W%fJ7Pq^#Zt=`M*-;+PvdUlk1@_x&Dw%u9J z{^;#hek!h4(`4j%J9_gT>XwZfy`JTJlkIQ!imYb~J=P|DAG@9{>||~FE$bQc$UI!u zCdVI4x*;qca{2p1*0Gx;-^jwGzE+F9|5SnOdp92-|0w)_z?;QJ8kt_870c=9qrw3R|8(BkRZNlf1{k6ww$AK)q!#))cO~8jJyRD-&@*uXQ z?81Zdtg>?Wa|~;lv27nbU6wkMq3AVIYof6a>5pq49?-gm49Pxx#IX+#^$4{#t-Ud< zwNtik3w;>Gx;UQvdg_;~RUr2{iAna&(7X9fB6gwH&vv`ef0|urJ(yPJ`8swXvZl4M zAF{L$S=#TcjoUq5*3M9C1u`eQa0Rlo-y0}D^!}-WPVnA$99c@}z#i=3vFveHuxudq z;2%A~+DDP0Ok}8rwI*a_2)%<0y~SGS%Fr{8484R5WuQmPk)b>zLo+uHm@)ILGW4{O zp+Ot_<4Pj#>-7tcCvhoys=){dS|}x67gFo<2=`AIdZ9pllL%9X#pnT)q?ium5Cr zf zy&Zcn4ttRAW94T+XKRyelBfD>?EP)_;Hih>z=F1*j(Pk*z-qmWHRZS7_|kWfjbDN-3it@)?eSWz^_N-&!@+l2 z_p}E2WCNnV-@twdL;nCq{{w0E2U_ZTTLteG=7vuqPsdp&M)kK^X0ool>J7Cfvc`S* zUM}_bC?CWcod*x*d3r6H#QHpt^?5vY?NzL~GpWO#=P&y*_W1)Te)}oL+w40qmMf^E zi8b7m2M)84e-G^u&f4qPpZl<(_OsXUJL-Q2`N+Tyb^GVGLl|Tfc3ds|6ktArl(9tX z57?#NpJZS@O=KM&!Y6@t6*BJI6I1LVVEa_=lAaINUkOfJ;j!+`!M4V> z_{9|7*_;%xL-C=UDFKh&FBRLJ_oTZlcsbn)HtnXb-nre%rX8F2TL01FIH$=E`l>?(hgaP=GL-ACtjVGa`Q2e85C#`Uphu>Ufj{fYO$HHVI12gmhloJ^nH zIZ0$b+_~88?NWZh^TC+;8UGT--+r)5*?8u?gz>NU2kjzs*)5FymCpEU?Uuc8#LRU4V*Kw7q}dth6TPSL*Pcu|m`LOAnb)nXpT>XYT^r-4 z-*xWspV@DtcY42#s!!v;mp!I^=tJy=*A_DVUjZ+yjv4=H-gtYV&trev8UKZhzkU-t z{tFp@*$JZ=|M^yzw(%FQ`mkqZJMWSFICgEO$J1uhw%fBeBj@9g_nVOW8DaK? zzZ0;P{*gI)57|D6Y@N6#H!PW0G%vC20dV|NV8Z*r719H<0#<=|e{tE}DZs?8ptb^F^;Vz6jvM>&bj;zNE7snb(s!^4U)@PhGG_ zeav+_^K^o_u0WR4y%AZCo-?21nfco7?TO8Q_Bj*IcjwHLPbOG^vp%IIiO8a-XHy3AlgxW5%+Fv?ifwfc?Em zH_jgII~|YeH?eXnJldE20ZV$*<7u0pPqN>70N(%GerwErYtqNxZ`FAIZNK$X+Hb}0 z;O@6RJiPy(aKCkk*>8PLd#&ubeuur*kG0=Q`#aii)n2P)Tl=wpG5f9X{3qFOjpqBm z?YDl4`>ioD@DOXvXTRTicK-iZ`>ph$qy5%cIr!UtYh=IGU6VhP{Z{1sZ~LwPbmC9G z-x?#wqmkpyGyhcktsTqrMen!LhmQ7JW99gK`>ig2|JCicB0HaCzmJ{CpPst>ERIe|7t<$lHbPx4QlNtJrVtfR8VF zzm+o1w%;1dPoLU;EAw%o`>m(V2YW4lQTweOjsL~&w^GL0_FH4e|Nq2(t1GJ)w%=-W zk-M+>7qj0Q!x!4myXgH^$~fD8>sk2X&$r(ydA{)dRx@XR*8SEF=I5gKTPfpg`>nC_ zbJqP+E;_FCJ^Z-xd%kb{o(Z+P`x7T1-)D*^ z{06>GS3iy;P0Vxa>rRa89{e5^eIoI#!`Q$6#)tENCLfC8NaY8;#!KAK<&k*P=kbAV zlRp&y$OHJzWSio5YDvO>dZ0fstDnP@7Zp2mHn}q+*Ls?G~pC^Ck zLl1T@(>wd{Pb&U}{qfSN8Tje&D^{`pJ}t$APb|)UI5m_L?eA0!XFU6GZXD-BK`Te~ z`0>Z=$ItmYuebai{MFx`Gyi)(PE0t%*ZEAYN9TwRsEaevHS{WDf*llcz+o8s!8 z!M!YBr()}D+Vm@Yo$fe&NQ{>2n{Hyx@;rey|FnJdRKW;*Dw;R>R3h`0>{N zPes1allVt-@TnZ+{T1Fs6Yu5Dqj2QqHck?#YFAwtIABNoEM1|gfi2YZ)2kH7R){G}23_SygB?Pc$w z?r6X1V&=#7n?}og1@b;_ey_3@h(Q_%{~RE8{&vZKlpi(1Pk#J#iTG0eZGKdjcMOlZ zF%NC)y30TEqe=&y=0`PagyQh@t`A?TA75(IRb#@^HHGrC!#fk`_Z57O5SJmDM`x8D zN$g_F=T-vmRKPo<@V&{8n(oNF{HZPY*fa3e4S|RBj(lt4A@Q6A56RCinO~5SYWKyT z*8dzlWY%_gNcD(^_7Ky#7kS?YUk-qW_9x;?kMYyp;PB8`cu3zx`{!|7uDP`v7qg zr|^9r$G4hum{>8&I~5m+`;4B;cK>P*$G_U~Tsr>M^}>rdHUmCcgg*;>7$C9m_bZ5?5hyl~tpFQaQe&3q^y{8l7?6EO(_IYQ{1~6y( zE_%*B@66c%=BzLC#hSA#vffy2a(`-z&awnq>ywb^a*zf;mt=Has7~dY{ zKO&AWgiY~3#MwrVn;+R3&-;nQs3_(!mHC=|^3?tNU~*HG?U&% zd+HucEn7zn&mm|I_Qzpwa_x~opf+t@=<(TKwaRvCGXQZW8OE&Y;A>*Zsehe*44Q`rDJSLHZNdqqRqTJ(~4?A-ZUi;^@&uze|p@ zpCKmdhv(2mPs7(wOBXR_0d$dK8sEWgd>6jnzij?#@vg5p{QNTf{JW)5-(lCj;_&m! z@U!Bp-lZK|tv>XjlZn}T3%f&OsQL2J*7u0fe(vFaJaMxV=QGOFwJ`(z@lOxtJ|1RF z=MZyzkT}%F@rpO+oDuO62jKyoIml%0b>2bxC=BT#w5Ou1zAn7Rv{_u}}7*S0d*+BD^5`GL{!o;qheV=nCdvZ5`xEtql^p^{82A zXaj8;M7o|fE#lmgIl}~9OB{N$H&}Zp(5rR_Jk;{v5n_6}>CAGH2;Q_^L7h=oo zMn<<2hkXheJ%Vf>IzeCPmw3Qpj`n;r{r22#eeDc*U=J}Od$AkE1F~I5Ydx}h86Mb| zY}r$Z%bI!)9?*Jp7d&tZITa6hDStnF_dC|3)iFE}ZOc3p!voQ_jMkxNXvac$K=C{} z$0Ht~?VREJJ9OA>=(36Ev|G__w_u}8z(&cRh0ZfJ%0cwY!4&2j{j%rBR#>sQnMqx0 z4}vL%Q-<_SviRt_F=6r1@n>%c%RV{8`V%d;&GbP!=>%)+ONv{1>r_D!^OH`zm(B+r zWG&2muxHs4Vuf{PPHFMc3}jgR=IWxuFL=WhDOSO^&_6Ty%{Ik4!-F$CJ!`vgzNh@b z#IhRksoo>jI!tVzVw4Y}BWJ)HIxlg|n^Zdkx!1Wj>9&LaJUX07?4)#D;#G-dhR3h6 zTK|yJv$hrc2`;F6`r6-ad4jbeiIR z_Od=jc)6F+YaQ{j;#8%R--DNrK6{g~m71vQ$j@#JD=ty-hs^Z4GK}rL8y=ONBOYzHyNM@-FZ!Lw?!JrIdSRuh z#L|yOzlc^ zJ^PJtGrCEs?N?Z$A(YfAMuF~ zKGFFs?D5wQz%L)bC)44R1MrMu^?mS&?^|M$3ilv4hgB*!uh6_sCw1>>2S*mlarqV{HZKiZyI9W!6i_RQb*W@7edT^(>?doxRg=l>%2W@30qws60T*_+}0 z8)EimqGkSuzlOaT=>p@6xuCrnGnaqXy_uLf>*uWb7rZya`_XH^_GY5zEPCz#6YR~H zJ2L*y?#(!QZ8W_a(Fn7x^=_&$HxSbR+K1-i1@1ctGpXT^F)9!+X)TjP_=tc_7-B`7`g$w9VE3`n?%Pw_W7kOxyVU z#qP~S@u>D@bUx;y_hw@FReLjMy&11`d0Snj*y<%%__aE7klxa!`Dchj0fM^zc$Z( zt&1=8y(`EU9iIDIGyB_L=iZ65f3%in{OHDypzHf~t{NGKE&h*leP!}%HM2jx)fcq2 zmyX}L<OAWTb)WTgp7pURi`-`!&a(klWxo4tpz|!-s?2tu4R)RlwJL+|v*FIO z5mx0^%VX|CYKTi~TE!mXf3rWkfpMCem}YOl-!O!6{hHGFzLlP*bTE>ht2B2L==lRm zca5aKrF73o`kPAkiKOR{)>zeY|62?D${H`lM@+Ti?C0a+?B8;3_9;FO@cA;IYxwl! zv(@q$9~^wylEfV$8sA3lqSPLB5$~QkzRt7L#@Bgv+W0!pP8(n6*=ghJJUeZCooAaY5({%ag`C*xku78h}kD|cIdv@Q;wp&#oyLk*um z=|f!t)bI#Y?N{FL2b8uKDQ|cKO1txwH+%u5&DqKuo`BNcpz`oTb!zd*x&Y~kR%KlZ zw1BdezXPiLos_NoWT^7fC|mjIQ00p#Tlv{gx?^pKDb%%=Lw<##tYgbRqvF z<@SI+MS04vget$9@|0f#Rel}iDZd`7{BxA2{EJZKH&X7&V7Otj)ml|#eN=QQ&vsK* zI&?o}D1QK|d^2Sze-x_xG0IT>1XTHxR^?`&ucB(H^-+F*@&i4U`IkepJ(bzY4~8l~ z)KeK$emGS55gy+2S2V1r4bNF0WoPkhHov_RdOyEWehyUmx%zFeV)OLkkyUS69|f=C z*^7KP47$-%DPGv@ORm^Fr+8$nY&4vp{FBy4!{C*5_}=~;d@`RqQ6DCD;7`FXyR5iU z)fZAc<2>s#;hkNUw{$-DWA$eZP#vTEpAH}Gvf@p-llgDbetzp?@zgG>Q|bKPR?8Uf z*c)|;)oRMQ0RGx#buOJx+{Xg$yXm*cs_4f(u^sW+Rx6=&B)%4{1=m4!f9Ew&tJnZvOHE-t(7-#`1bMuzPEMj7cF0s9JK54!)LLU7qU**Da~50bS>#j z)^go1x(&ZoI`J-r`1F#AcTt(Q5)-qN_YVJqb+qc48$U{B-LCFbHS#cPLq2Q62G-79 zmQ}fdwR2azRavUM52}37s=QbERH*V@t;!+F_k=3nhxZbp8PFu?0Nzu6AXNEm-cx=s zRQaL2r~GiJ@*{XJ4H|-WgO24r<;Ovl&*wemCqk7k;63H8s5l~V3(Y01TR{R}oq-&Eh#v3yzrpW5O__)YxysQA%Gd^BShJem92 z1lC>rs-?~FVlliJgcldUi_P?*H@tWcZP-DZ@1YGlX!DiI)8;Fcr_J}kJ3DCeJ@C#B z+I*$*wE0TqY4c;mSRCQru|n=B$>gIxlprU>&h2WT?e)m%3dTowN*%$kzs{M%*m|nt zi%07FLyWK5w$f)^t@r%IE)>zGI@(xBn`&ue7I_@om6_zX(Z;PItMnTBl0;owyQbJ0 ztLhI@hJA@=ix~%fKZNul#$Y95ARaRI6=fLvl`>2n^s|UEWrJj^PWqXxJpDYy9oIi+t^HsPZGM%35^KsU+?@L;ucy5FHPHWg$zochGiZY1^He zb5E(-o7Gw0(tHh39ozA^&P4L~L*!995nasbP?6=op z<0{@lJd(M=ULL^KZJT$>Hhp3oO`j+)pSjEA_gga}cm8l6*RJ`@nZ|LFWt{_8)nDI^t6G8=j;n@)tFDQ`Rrfo%YUUUIytt|j zL&3{N9^mCRtOY-tXYli9jjM#ETwEoLbuqZg#UOu)xGD%1GWzDU@jXATaJi4z)`N>e_r=gFOpAJ>N2;8FlY^d`0QuZJrC9Azv2B2@W}?f9!iSn5s66_$FN@|52V zRenF^DSrT}d^6=Ke-x_xG0Ic^1XTHxR%Mmtt0)rA%J5Vc31-%M1+s21YZO42)(l8yL-CHZWS16|ATNqZxd*-oaf24_?b21_iHGS;-YuU^QX0?CW@TB7)&!v0L^izB}ouybQfw^2VQ+7sZDwz*YNF zvCH7gKQ&JZ*Qx#t;>v}&>cmfCVU7? zxDUI&Uw7ZIs~t=@9!!|Xy}`nShl9d|#QZw8cAe7Lx-JeZ1QWXWP?#_P4+;~i%zIo+ zc!3zuV7^o^6ZWStpuvPaq009Gdtqyy3j-R=H<4d$EeV7~h!m~RU2RL`+0r+@>i=ki??1{D4i4vfWs zIq;nDRWt_NfNg#*{CuvlYe=cIy{v6{ae0aVwaj~Ls;w$Jv;Y7`S zG*0wi08TWxFxA0@%)P;l%)P;lv^^Rp3L^?DM&rc29pJ=fc()^*SnJ@#SuRfen7B|l za4=)IlXebfOn1`GbK$}qa9u1eJQo&i@L8?Gqr!;qFt);o#fcY(5kp|0q$rHI7mPT9 zXA|HtVZ`C2e*$j_!&e|@_3>7vixU?sPyIQ3U(ej+D9_x?Ql37~Ql35wCvHI(i_aD_ z9`*283FBe*smYstYUPnhgCB=Ol^7yz9XNc9cL!2CgA2)j=_|XPG9t6L*_^}DURV;q21+N9ckU@4|4P7)^4H-LYQv6)z>BlMjvp{z7lRr1f*B|B&O4l)6lQ$-V9GG+_d~b;J!O9#8R~KL$A2gM*!U0D&fNF6 zjMRC$(ct;vj-RfPb!My2-{z;QH-5T6lYDLE{HB$&A?1v@+VY{&V1A|lpfo-zrB5o& zxG4QcrSVZI{hreJsFdEPG(IY&8%YaG&4jOXC;jF8hnW0Li~I9`T=<)^y{TvToATXf zKKPgUmY=4`eFp!VXYhZC`wST{&yayS_gRMXYyj`ZIdw=LR319F$a0HAxZd{ZBe~f=~0yqWCobSrnh7n#dsV1-L{(@3pNB?N1&c=-tik$du*TcI%&Mw|BdV&t z&F4EK8s_p`c3m-4IAI~}R{jyF@^7bQR)rp=tVJWLvMEdUSe0Y@zszrxe-$b$P{%kZ zzZ0tb+r6o4G4(7N(QuM?Wb<3tI~IXD9Cbr4D`4sOKuaFLv<6LdsYE5vcG*3FRxl6sr6(%2)nLsPa!yrt&MH z%CF{kUxcoKUJYI6sjMzyY_B6-MIFK*FH^SiuR@itKz*=e%oJUdO+oM)%Wn)56pTGpIrr^%Z0>@-<( zp2f&oU-kn>Ilf)`gAdok9p)ZS-_48j^*Lb`0tUKRz=QrK? zZFk=3&b!_1JWqF9KKQIRxmUtEQ`SzgU;RVYK<4zjRjes(2?IicIDyk4)w%k4(-~9+{k} ze2h%0j7U4;OP82-cuPyv4rH&!@>C8&_G;oim3hkhpvnh5l{1x3g(~0GQ@KcaWOb49 zj;z)otCNt`NyyNoUf2^o!AbmQ;~-?NCPdlDT+LX@RDK*(`FzS$ej-%)0?Je#nO&qj zGFzkkWT^7fA~IWp%+?{Zl2=n-8uj(DTB@-P1|fSj#ngxF)hwhQD?E0+8NW`YBMj|adM#Azt?cH9<)h;q z*p}YDVf<%i*Oxf2T(RB;k0q7O1efW%?cCWR9~=h;&043kmFrmF6@OEQ?yq2d*MGg# zW2>YyZ?%q}?ua+kdPM&>!h30~+4$<#S2^o-HS6_Z)`oo6hQ8Q5)$nXz)~xDyt1?%4 z)$Hn zk;hfakKjFQzUmMZo3DB-?;;MyysxFRSrh;;H&G4UBmw?Ajcm3bsoEd&lxz)@zs^!tJ?$qDa7s*Mr#OJ zCmKSY6Ae|+*ztHdKC>a@<-52O4HwYYs;#qC9;O4io;$ll{v_zB?C(T@LS*OIJDm zs2|4rhW#7Qg78{h2je2#p#Pi8VgBngz!%Ja{rc0#N4})oG^@bif{=p?;E9`=^KJ0N z&CK~Wc;X7J{{zUl_^E&V+RjQIr zYB$rqZM5%Z+UNQbbzjI#%^jF9o^;!I#xEEl9~}Eu#s^o+eBBH#kln7k2lBvE`d>z! z&DYpwZdkP3AMwGhH@;WOa_i5gKJC-zNBQ6iHSXZ3N#LkM)IXnovaT2(T$Aai_0a|U z->^l5o%VqZUq26aij8Z`%01oRHlB8AUrYaOn~!d_l?FRaK)0?{8q9PH7|M-h91nJ? z=XnG-`E56@QP@ei=_>G(!A@YS_rXrWO~Opy;2p&>Hknw)Ks!bn5%IYRBPovWg#1K| zt77z8m3HFzK2+L?_J8^vbNDCt=UQ#i9E=H>Oe8e{w zAkOd%eCgU8J?c)ZYk2A`W}=nmU`hOC=s1MnBS&jvcrviTixm8Rao z&a2A*Aw?S z)ZfMK@AxMJ+#%t{GOlGl0$zO9$aEk93eQ#*gO!TGO2rS2K&Q6`0z5|_1yZ16@qa5n z4yru*sH!-wqN-2v$UqnJ#r&=ZbRpj>{|Hq162?yXrBLOU={+!1@uCq8cTxxTSZx*W zVvp6n%sa}z3RS+2ca+}=RsMCxMfo?O%D+unRzgJpdkdd@WdM7tHr`X2r9Adomh#wV z!?vmoQ7*Ps?O4iF9@{KSd2F-s z%43_2S03A}P#Lglf|Y~?3Im7nIRtW|zGRQV#+(<2q9Q03S0{EN`_(5s=(QRmzA>pId^)Hw?J zGIc5cDpdJ8>Qa6uRQcDbOZhjU%D+un%I}6Mzn{94KLAy}ndjF+k3z499^*Z*R_zH- zWlwOG@K!X|Dg=Agp7c}-b1ieQmW#6#L%VoyJI+!JZ2+I^neys9^Eq-8gkRj>r8>{L zLfvPl$*c2jhV#1t7V_#m8|XaCmb^O820PD&LWz6qNM563XcZf|>|F7}$aBp;#oBfU zcb$o~mN{5UI4gh;?tC&UEVYmI@Iqx*zjv|Ma$~D|N}q&&5}6i`+5#5)G-Nv(ll_}% zpSN_j+9#Q{K7E-NE}IH23rJQ&pRMc*ljY(o3rJ>*|Fk+l80|7Mo}Ehj7}~kCH`HJ> z_A*_J*8fu9u)dDZtO8$|Fq(X45sVfv7;UX%M<{;sTo`Swi_yS}09(1-z&H9Z==(jL}Ukd2>>{u@8wWT@Icjd7x{iWR+A`J%{pyo}D${0g5D z_jgu&b~ke=e0Dc;c`^8`umgOS1usS8v;LiY!|vso@$XoUe+T|@$4B_=hq3q!{+T7b z2LH?wUW0!$PQqym@crg6_w|gI@!!BJ#(x8^82=5t@~Chdyz(f#q4CTqJWO z45#w8QJ%1xuo-h?uvwwOW`29E!Daz_yuoIXd#c>{NY}rmay#;6sZH`>O#+AM4kGc! zEc{>c{edMb&)F|3EXk!!EE{XwkCql)yvbupXR)I*oE zKCthm`>&RXf<^vA6fE+TC|IOS6fCkF`U8A43mhL!jf2Cka$+QPKXX!lVu9heL)_=Q z#PcsN%1^r_7s~uESpbEPmsCUHl_j1@FPgoZTxeH)4`m%#QVnGtFnc>0Q0bBZQ22a1 za=ZjS-<}OcHn+15EI~H64~3$)wzCc_L2qqm9T46sVPCrftiFmnU|(WR=O3zgqY^wAFN`p<6{+7~UQ>DMDG}u(>DWru>?KZ!ad&Vqc3(F>! zKSF*9+03$?`_ewwK9(P%-Z^8YHJ};1orFIk2|M|2;s$qE+&2k7-%ae`;Z9H=G!Ytv zCP7o7Y0$3FZqT04UeG?!&p|VweW3%OpN9^FWM4_^btrawFq zUpqb%`3YX~_*xb)mdoOT_z!~k5rX&=g7_7J*t|izuSKjfZLMXjJMtqej*IBq#hsxY z_z&v6JTJB?>l2_0p#kV4&=hElpJ1_mTZR8$zkM01-@XdfZ|k64pgW;GpfUb|S$Y?n zbC%wXhw5D)RPP3%dN&oSce_F_h4zG|L;FO0cJ=rHUcwLX5_|J6G1e1QCU)iol{prw zGRHwxWbtwE+|0WcEtA85`zt!)C!f*Baq3~P%0Vw=d-wcJ{>W?Z` zkvqWQxmbRxKNj)XMf25{&cajkiRF`Tss!Gu3*lpfx9U3JE&a~LM;ncu>?!>(XdUAz zKXD!7DL-)?<0(IJ9pf46C!SA?oqSUzdI#Ps(L3;7iQa+tO7srASHc{~UtGc*#QKX> zhU=S(;54tnX+RAoVrqK;ZgaOvsK?psOnn{&4R9hUI|^-?wiUp^Vq3$oS~gd zZ-i>y-4E_rtyn?Ml)a0MwHoa61qb`cE=%N2U)gE;K8JgTH?XJvC~LOj1lKCfTCH@L zw0u$l_Vi1fHCuN8ClNEKau2fxKg)Zm_zXC^vUH`h2CrfbK7>6N#BLe#5N9eJA59^C z8To1CqY)OUVC}~rvFj9TR1W7FU&4;D(f@~7Tl>P#)fRps_*s6fzJ90=nhXs>FM*~) zyF*z&`}T&ie)hc#%KGWS4=@Cua6ih~oMBb=qnynHpp>(DAe3@8XG1Ax^I$0DY-Szp zM>(5WNBdFEX4cWRGD1948DpU;V;odv`)M5xLrfU1l;pep0es4^z=Ol3@is*LGS zl~Dv$8MC1(<9?{hm;+TAb0cL8U_5tP*zhqvt0~xh^5IRvR;aEz-6zvm-_iEI`i{06 ze;;i({yy3+AIt*%hPD|WA#F20KiXz|ezeW_@MxRy;n6nvtcGx2ME^gq2)(4a7C!{R zM;3FZd5mBe&ii9WtWNL^`wneSLN+o-bFU_RlY{+JPhWDdf9m6**gy3?DE3cBacsu# z)RXUpWBLe(z@r}S!k2xcyH`~{e5f*G$7hA(yE6DJ!`b(G?(}h50Vdi5CNj2rh<#he z%j^|ncWJLUlX@NB)zVns)oOfKw@|;@r2bBGu#nDcCURa=vQ~&6W`^Vm{9|L=$i|l} z(tcw@>0DG2=iy}g{Dfy6$&;Tt6vwEzMfsmxd{YsHZ|WIijejk2Gyz>5jctT$j30_| zHvX+3xQe+q=jZ6J&dz(_hH z_FeL)oe?A6PuNKQs$nh;GBJJacW+F%F2aBO@jyW{J{aMiCmh^k@DI4h;2&_0!9V!B zBKRlOevp?r|{C9F^l>AmJ(( z1G&DbB>3!kd;uNdANddzkL6+@wJ{n4>D-hsP!J4M5eKHjR;>4cMLN^BPEbEI5tNbD&>@4uM_`{Sx$A=rz#mp(C;7 zz(CKlhrhzXKxex*#N4MrTZMtv(e66SxkrQfkM+yd$xaGc5!-VtRCdxhsO+SC{XXKC zE79+YX@jw+p!(e-Q2nk1YV0Yfv8VK2yI-zI?^Y4JsCQq6>fKkNdbbX$cXvYd?(0z5 zVsAoai@i-*_~r6dhK2p7GUA~s1KTuTWni1;s|;+@e3gN1ny)gjO^yACZJOVodL}(6mFidsRUMB&RR^|c(2O0_j2%>UV2cJFTQsOTR+3j8tD&j`TQsOTutkGc zLDxfvL7$8G<;+>XDr|G`lVTo=z)y;KECN57m`CuFiFpJ+Ddw>V{G^!2BJh)99*e+F zig_#oKbe?E@RNyo1V5RWNAQzk9*e+Eif=SAkS9EqvFE;YE>SU&I{W3GRTO{rzX0A` zp)*>nBXwqutn>1ye(&O|9~pbxQ@R>jV#XidG~*97;}13C4>jWtHRFFe@9IpG{7*$H z;}i0*%5rhnJkvgJ>0D^O%C_K5wJ#p3_W7V{-zVU0ou88bDcg)6JgM@>LRJ1asLIcW zehNIUb5`;{1g=h>(7J3#z*0>3f7w-I85a(VZDD0TS8|}f~+~ItiSFZ7Vg~@ zuK%SZ3Uejl$6A4%B+Qi-hrh$Zcheb)uGaHe*z#$9@;;s?LxWJ(8Tqf$SZCzFN@Jao z|0<1jM*gcbY|~U?LOhfwOyZ%u>H$#7sU8TW9K{uSD5rWblyVeT=%F0N6?!N~afNMV zgm|Vh#zIxbIH=0VhpLQ;P?b>tRT+0cRmPoBWlZLo%9sXK8PlOEqX?=pWn(m=fzAZU|bf#-B&Oj6ZJx6q`inv|Rt3@k3#^= zsZjc4?w_FFbv?<`r@B7vc&x&~V_%^BU>x63X0S7qGF@NQOOCI~#2jR>)~d__P?gD8 zn)@oU^?PslC2-j)`1EYJ%-m(cn3{Vl7*qMOI`UQBNPAp-Hr3aDPl=1q#z*5b{8Js_ zGu>CR7<^{p3mIcSWp;$eN}}-C1jbn7FFY2D#q?bGBo>nipETe%ip5;9*i2{5_Bit` z{H5|P7Jr@YkLuKR-b%ikbDg)!fghrMP4Yu^Z6>v7r}!CU9KpTogh zTYm5OqsGV|pF(>jCK67Wa)!tchMt+KC;;4S}y31#{puqJH%nc$>G@KiIl z{vmAqMaqMzj$q^O!M5LzO)q~_GkW`$V1gY0TOB2TkIqvO6F3OpQzO{wD)7Vo$jkfa zxgB89-RyN8Vee}Xch=~>!|(IGd{2!r=ctmd7EdKK%|srbUAklOZKY|K z(!Wuf_9^{qrD>niZz)YXm3~uc+N<=hNSnAo*4q8vgz{47>{JjewkX{y*zvIbCo-+P z5iC}NEH;A0YLLZ7uo(EQvJos+gM2q4|24>WBl2INuFo#Z~Mt zw#>pWr86#yRc%DJJz%ODY=zC78_8o#YOojb7?T?8g*?Wj274ioF{!~`$YV@uu+8!q zlNxN}JjSF3+c=Lgsp$`8OlmHNGA261kjI$l3_~7cQgaoQF{!~e&SOk!u7ffrHKU-6 ziF{zc^>(#KSrNa|Q{Ha)le*zg>V`k58}~4FvyaDhW3SX>*E3#K+zX^OYTndF&70b& zc~cuTZ)&6FO>NY?sg0U9wNdk?Hfr9~M$MbrsCiQxHE(L8=1pzXyj62I;Ud*B(W+de zItrkw;|{3mxD%>6CPP)nG^pyB4pkjRP}MOTsygn6s*X8O)iGCk-e>NYo2hc2bk4Lq z1y#8#p(=MZROPOLs@!!@mAf9Qa-V~$+!vuLcO&JVY`>c@Pi5`4B4=IpLsixRsLE=F zs;r|>m30iNvQ9u%)=7(d1AXQmxj|<9oO3SOP`x+UIfpV7s`rLN_1=in?0j1*HdinB#h-TVVMGhVLYXO2Ym-zG7+jZ zQU49K3M|vYp6_04%f8$}5%DP{G=O=gp@Ub_4ICjTg#bb-7N+^I)(ML2|YguJukiY80*arANRSj-s}iMS#Pv& z{TS7;EJYi?*;< zYR~;*+nz#QCDgT)a&~4|l}jmS=Kv^UuyY`kG1!?6rGGo=-%|Rwlm0EGA3N#aQu?uz z{(o26hT$SY^ch( zAF49uK-G@9kuqefK8udhoNCU6O{%bs=VKenMs%^s6t8dCQs%rI8Gkev{((2LuuWmjG+>tw?D%L3jgKaQc((5@FN8N0zm`Y*nlQk4Fu-D-;fvU*b2IWuEHHN#CX}Z- z{CWj#P`u9-v_bJc)!ZMr1p9Y8bDQZ&=Il^%TWlJbY1d2e(INJFWW(3VPNAJwF#n1n zx`KWwhA6hoTl78szD3{D?^{$S{k}zY(r=w9+s~LcGv=X$2tH`0U$TJ)@viK_LA)z_ zFo$-#@+Wz-HHNg`))>-$n>ntfE}J>l7~9OT#@J?#HO9hpx_{7>UCG~gy)&2?cD*wc zs&|I7rt*$#3(lgJ3g7ib*G`f?=l;P#{6@A#CcoKsr{*r=cUc7v@YBW`#yrHBtBr$b zqimsApOPo){8MIfLfiSLLFPWf1T&ACd-wd4+wVkY?#44unauUp4F3NW8$FNt(V3?S z><6C0jtt^gs$q^|eOygop!RsXap04iz$rI^S8f2ejK!ZcCVXm)<4Hekh4!>T^d8qC1_aXHBt>8ZQJXBHCd8iiBE#RfE zA&=jOhtG8$>X36Din%f8q0BiZ#!+XT%=sp;PTRRBu#VEv=b_9wDX@;7oAXm(9i`2= zs+jXoiwy2*KM%DIY?0TWJLTB}+Y8@k#r;b}cZ`NgcRU1@?$`{K?ubK&NOz2eN_RX2 zg|D+VL*eVJICwS#+8H_k>W2=5CPTBKmq6k1tnN^FJgYYp9?!ZA3Xhv}PuTFI9o+K* zbGyMg_cYWw_jJ8;?rB8SxuDvOsW$4|Qxd$Yb0)F&pknJf@+~Df7^n!o z^`isgz(6*At7AXh_8`af&+Y`Je|932{@F=T`e&y>>7U&VO8@L$Q2J+o4%!de7y1S0 z=b?k3nb0esInXachd{4}ehGRl^cv{((2-WD9uv*uK7)|{%%np3q|bE-CLPSs}3soJbLRhu=Z zYP054ZPuKs&6-oSxq&;|G_SP*o{fX1K=YwppcA1zpasxNp?5&jp?5;TAGMRAV42!! zP;f-;bSU_vwg?I~sGSW38`Rzp1sl}Pfr1Tc=gL;`g>BWbly<0&Wl+`eBvf@g1yvm@ zp{ipwRCTO@s*ZI~)v+F`I-Y~7ju)Y-W25Yz2wo~wxo@%$r*hwhs@&aBmAfCRat}aN zZZlNn9)+shV^EcQ0;+OP%7^Bw7_TxiIE$q+20&HDK&Z+9M~zn*;HdE`0~|G8Wq_l` zs|;|IdzWCA88^;&nQ?>az1dK`cRy6`fvK|e-rV-P1Ouk;&e@dbp!yy7DxlwiucGe~ zbkBKpBwy}1uP^+W`A|G~N@t{;dj$W~{HS{bRlj?W;AhR7x<^oFsG{!?{IvO0_Xw&E z_a4Dd%Covh(Am>b_My249{bST0f_CVvrPA5`{^uGM|awp^E!9nJJE0M zl+KD^wOx95j#ar!&*u`i0B(!*fo*rrAjt>j-r-IRg|WG{H)zgcS@>44iRA+`J{TVq z8(6+C1IMYaJhsc)A2WR1B43>^_bUXO`o`5iX1M z;l%oGG#-uM*;wC=@UK^Lz&(NT8O>%6bQj@l=0JB5?smqu*zx0JgEeCD#o1!$_Bm&Y z*3!!?G1z;*DS{DAA|yL^l5=(~K2>)=267T3Xl@-41||KwX-haAYa*Z{WNLf@oATpyvi zlMopy(!0n|k=`8&)w{#7J$TpHY1o$yV9Z>w^?Wd<>nF_O_p;Nn_`SLBp5JdnUR=D{ z|4_=Xa^}SK8SXola`iOw?mhASn^T58P5xXs&HNV^I#U0|RoFuqZS-sY2jk6sf41wB zBKB_yc&HQ{lyY+P+AMYvioc zF;V<5ABf_Ic^CRTKB{j1ZuSeb^EmbN;O>PNX=l&*zOtVFo^~bey@qrze=qJ5iEsMa zPoG%6?5F>}{I#Dxx%}PrKUl7{p$Tku9DmLS_~#V6{Nh(b;s3fjH~iGMLgB}fbHh{O zLalct=jP1x=Z2fCQ0qH=a&z`~9i4MH7;1fUwiT|<_JrS>6Bi~vd()Po{MYz!Ytzq1 zSet%5B0gL*&}x184r|jZcUqf%6YL!B$sXP2;R#{?@p-R3ALr++x}W>K{Pw$+U$~+4 zjZ5eU|J!kjI6L`d8{&w204ttgzfzcJ5p5}S+H*LnE!tT=o${%O6N&k^3Q$);^1JYn(ax2d=8h|VX4TEE8U z1HRY3j2B&^an|^1zhwb(tZ}{^8J4d(0V+OGUtfRS3co>L_1->y7u{#I;m3zg4V4~K zeK(QjuEB7X&-&=)uHGCy`wDG1h>a0W(EeI{)1&OGX>WM#`i0A-pUqw{JgoBtW-k~U zGD~O2*;i|3T`6JhoW=L-$*gzwf_Fytg5%4RsrQzVq43}vLgDQpE4*W@C+zO2X+KT- zYHC9vHj2BaHjBQ`N2iTvjt?`}>!9eoR`r!Vwbp9=uLo_`{hU?kUj4S5e&_LCMQPWIb~4PQ1~{+CS8oMD077KKdyiUEd0( zZ5_{CxO-0vqxPN-QNMhciX+fIQ!`Y3xX}A{*mpXs|GZ645vvSNZr4-joi;s%o>?5F zr=*|CrJK-2*aF`(dI|ry^pf^!&aIP@+I3Plondu!5<1A}By^C`NzxtYqIZ31pNc-p z!qz*^JV`dQ81u@Rq40nITW`~orF6xG=&A}wSFOdb;OZ(@2dK^W&~~@Id!pL9m$phb?1<<5yxJPyDQ8R9&N(~B zMs(NaR6lyGU3clM$*&!~mHb{rZ#7c)N=I)!=8Tbam+EuJrDlYqyE+(;W#})BiL1vn zHqs&1(RMwi_N(1)TP449Xh-(pQw7iS-S_!?gU<Rt&DT}P zj^-(jx$RqDdN6h@)SjO;wd=IoNlT|~PK{%HB0BBwX-^BfX+!6T zPFsXdOXPoTT%9I8CR@q%b)}=z0XM9Q={UyD&ZJ-r)_10~U&I&Ns7oxKw zXVD{j!@?@iJ$AtxvQggbH3dCw`sIJ)jvv4alXQ}r?91# zM&AJ^`$~Gta_uT?FQd0?PgrNzj6H?Uy9pf@WS(SG9lD$~)YEQPz2tH1Ds+@^h-+8< z414K4Ul;p1boMgz-}}C<_H*bBru}4*P*9cGz-JbYK}& z>%{%oQa}6Y50?M(r^}ar@YAy8jkIM_8ob56`U!O7apt%$?fe0H?4jh*Ic4apx!6|I zx{uDehx89Shr;F9R<+lL!n>}{4Znp=wFR5%*Vt6m!@c3nA#~SR^wc=?SH88WW^AYM zZ$@+u_mV9&G$A|)TgtVgWK+GtN4C^)Y^k0J{0CYBXBOa_Q`k7o*ix;muZ8gDqR#wR z0qwDAgW56+`V&4o!2q#mvue;A@@Yojv$#3v%XztLVvf$H)eh|(-j3c>n{@8%H}JOl zZDCuf-g@l%eT>T(kMHV5*jKZztzPr&jn!+a{ncw~hDP*X?eMtpE}u6nS>MaNjro1T z)r-(6g?uXbZ095WQ$IXnN9jyk*3nZ1l4IFUPtc$AVB|mj)UToF7WGU1qK^9H>aecT zLDbjLZ+_W9zhAekq0xO87Wr4&qIS!!iu^C2Z`f}7Tb*DH`)BGGpOj-;NgvthB^Lun z>m&VMeL0JrwREy`ZgUELHR%o6Sw=^7U}v38H@S9JzN4GgLh)N-XBCgc587I-^J4Tt zaTqX z;I;1UHkRxw`IcjCE5#zpuG+`C;@VW|#}0TwIB$pb*z$KAocE6BJIh^Nlmo{5F}f(h zpJ4xtvhPFZbis$@>Y{G`c3sq+v~$X_w)nq+FR~VGrH&4`hQ=3ANuFVj{Zpp z&#gu89QS(5Wj_s~El(mh#s1u!1<1trl0zSTll-*Ha&x}YdvwlyQhbkZy6q^;;AViPtI zAH8Wuw$=KJ{LZsvH@#kvFe}CArW7z<3K%bi|MKOom$Tr!I>&aBZZdXERGXvQ>$X*G z+<^_Cc;zZze9p^#I)V3S8+z%Lo{88{{v5>^zYRZYEUs{Ho^Yjbo?FM)o$)w6xq40I z%<477n1p{d?aY45PLhrE=Jbe;63$Z@_wjibyXQIRj(jWp^NF5tI^*61#(RQiZT1lT zQ9oo0RUA@#|%hkohwN$&~UMfXiQ=DjFv_c&=}mi~I^Z(L485Uhi3_Eq|v z>tMU{=^g2uCdMQd+qKz2$bJSkknERS*+SSaRq>u#=d*)cT@z$Jh3Sm0amq5~(9gq; zu5mG4tgg}dQCHV=A}u|%&9YkQXJrHbNuj%Xra1a0%h*Qe*Ef>S@r*gK@~z;-){byo zJ>wp&XUgeA`~Oeeh`zW1y)hR3F$O&{nwWuH&Y*hC8Pt>a{5Q`$d>6-Wy!SJ+}tz$F1q7(>>u^zbw}51 zwU#Y^ow?VZlG3iOnZO>e^v4tEkME;D{+<75e-gd(9A(^(ZmC57XpiWd>=FGJZTT|k zZ~1%M|4o}7ApL;%2^F&_$hm6U2YFm zZ0%!J>>TTDS1Y`%Y(X)yPZ1jXEC_YrhBw>ikEC_REuT z6~DoDQMs~R6xX5tBz+LET>{*BvzRvSwtQE+?R49<4FA7ma^Yr?qj@l{N{T}ak=XV`_|9S`g=_guy9}zlWEm%l)itLj@)-yMjN_wFPK6Lhb&agwogXhx)nq%1#wam9< zyq~9P=z-6(Kd+mMKf)d4w~%PA+8>f{V|I&GI(EmTy6G;bFcF0_QwV7 z?-=_-x<-1TBYiM|_Ddh=tomZ=c5#z*wrhX5F-6C29gEzLLHfn+|w<`D<-D;PEycaELQ$x1$5@M+bZj z9q@Jd_8YQ2c>gNwju+7Z+A|)mJ!9&XKDb8u039HGa4q^kyz%%?A7B0)_Qo&K1IN(= z#@=B6cx#oX^aOf9>#X#EaKJU_f&1cmvB%pBJfp2{^ zl&Q3^!u=0bjd%boQrtO|^Dy%A1p46VOF}t6V*OlyUs_L%*qnMT`vTXoCXUE?yZEvbX|%~LE4m!FjE1?_h8v1|wUyB@ zcLaM2?PWAj#xLqdRR}xPz?bSLeDM4l$yb$USG{9f)V|04VzhrUIOpGJpIhfL+W(7u z;)&zqX=A6H`_RF!^ZziC&E4P}{Z`-ir=7A#q(35iD3Sh4hsb7d+b)|y>}0m_>uNKl&eT@wy^8svEh8!c0ug8RP4B}{NAx6X4AhZ zQG3K4=?&JgGxWxI_E7zf-k5+c(OnC%dZN(T2hNMy2cE##O>*?amDhwam9D4XgTW}1 zk$2*PGk3C|E8OAcbq1@7{bA{hgS1z53A^as@r*@wPNbX)%HgiR*1BvfQ+vYF9ZKtc zcb_HqBKKL;Cs!8;gLI@jRIhZ0ViBcZTz}lz^u&k%x)I&sL5IX)J9yh{2fG8?!QDey z=iIlUy_5ySxVpAPtj^G$ul7wYL}w&nH^lCj9AjVPdGy2cl=Hr?oBf~Yhc`%n;OlPx zBl_h8`lS{9@*(==xPwuIPnMxCo=0E&3g44-MiX*>A^O6_3a-9@myEtBVJv6DE3>f~ zz86_S5!uuPl+;IpOW;Qd`kL{2VS`cIz1AeKc~Pi_P$|u@^4HXT1l%j`k{=oPE}w?5{8{rT>bZAl>j-6s~Y}!A$H2 zoslHIw6yWMxUlSqMt;9L%Ny2soy|w>;)%uBDmoi^B|N*Hx)g^zn7jX1QD0x4_w{@1 zA5!mB(*6BjJ7RbA#O_Eic8B&-zk!~RJ@MX#Wy`zblm62B?=Sxbdl^PI;Q!f89L6Ey zg5UM_EY}^;TaRL2ut(?G7s45N#=eO7khSl5Tzj9+o~QJN_B{__o5-e+zL=66%A9?D zZsxpy8=d*U%rVB+n8{o(V!f5G;vsw!-$94`6dm$I_6^tHI40*k(tFV-Oyv&$<4{BSZ-IF>!b=*T z9FO(<6QmilUAi~>gc(PTr&WIV;nCsxmWXbuWA9Wtw92tnV#h)Di1vNuliHlxIj0e9 zbszI4zq)it@sW1BMS4m$OuOCEKAzH3tKfh2IbyerX|r2Yhu+ctZ9`ODZXNeEx9h1c zq@|~}bdAHVi|DC;rl0%RW8ZQfU6p0xjN&;b(ypsq8^!f!UyPkn@7O8QM?-Q$nO9zu zYit!i{g?ulnan;3cMD|h81Bie_UXSLgXPja(a)vx8u(5=rQgvH*)764vRhpJCEa3l zmsLD6k-7HA760AM8y;Q0)k?Ibv!-6;-baA)Ts@S99+F)$L*>QjF4tzMpnv*rq7$@F zdg`MG&V3&IynCyh58e~Nf6f0|>n`HRo^YqK@xFk~d5}`YJIS_NNPFb7S$64z7sDjb@b0f&LEdE91^Idk61&<@a9%++&EIo(a5`rI0-VeK!0RbL&Rq6HKU~A- zTkP@ufzMph@W{M<60Hq-Hbm*9NP4P|bXp|+uF~Bi z>HGAXUXk=N(!ZL_e&*+_4R1~NgjL4#Jp0Y;xbSZWN9=cHI!dmb_3G=0RKiyTig!vM-t>E{nPiyeI#JVK& z`&;1&`C+-!qjdqFr}?PNeT+{Y^Sh66FNBVwzj{`pG<{XNf;4uNiQCyqzu~D}?s>o> z&TgsQTs~U95FWWL-e>OoZlZ4WDWLu^wn6%_n09Ede^z3W?MDaZ)8<9Uh1yZRYSHq= z^t+Jn3ad+&2l=h|_-@*%JyZQ=J#8t3N7vKVJm~fj3)ZMDZp_{~=+hgjXXh-XjIp7+oNkE088 zr|5m1BmWC0eqGF5=~*LorF4LJ@d$G)9&Un%UB2BGJn=%451HY6#jiG#E+H*GJ48B~ zzp{|YbLj$a^y#qXSCLp#xHxXX$`cetR|@Ao+hUM*cO9YJ=pziFTA2U2}H%PeuoQ68U#^ zw3#dTEX1CctE-!(ci=NW&sdv{PCP`PrLXi%I^e{lO<)-hdlzwSdl%#JNy_gLz23We z(yaGRj7Adn`q|d|CGi)w-kbFU-Tp2%*LuB_8v;`4RICZRW8KtCSgZp;_Z#i_*0 zN^bOQh|703Nov33Nj#M9wOSryJs95e#0w>qB^z`i-?=*OHl@*RdOk*Jbd%CIDGh#5`gWzk z4@ws(?c6W>_oROZ&er^{Wqv;Zo9^LU#k-wCo8sa^m7P0>Hf2LU51q&V7fg!}Z8{0v z2UWYGb<+d!{^CU%>P9ly+T}-o@Hb$@7&wR~hf3ztVd|zQ0fJUm8g- zQ#w78ex7s>_DE#!;#;VEH`MpCpT`sH!I_V(q_^S^(0Pw%NWX$l;J2LhSV;QiKE%mp zSd~*rZy7*52!7Skq&MSd_%&xd`jY0H)Fuo17H2%z7d7#u!YwDfJ<5e$im)}bN0Hv& z+7QIfRsC&sJN3GCy7jqrx%H@y1C-G-9UYr)wH&~fvn?yNYQ~)5#ADi%J?U?J+;T1M z2-`*37ybU+&Y7Dp=DoR{GJkdeQ`J;3FZmu^|`0R}K&W+2IKe_GsTu-L0{9G&Z zJJ1rdh6T+U#(vp`-S0-$F!5U>Y5iwRf30EgS{3=rv8TmrHO_Zett1bhtt3CRiZpyy zPP&Nqp4elRj--wcum!)5o%lZcN8M9=!!{tNOOZ+C*MV&wMK&MB2MyS>lZ>5%JrQTM zWL7U)zBF*+g+<7yVs;mSD{a;<;YS-=$hALYb4lKsxQir--`;{=cov;-q;IFv%aOhQ z$le&}XefI#x_^JuL(pQV>e+?7WU^KWr}wj8TT{3hZoeYebL{+ z55G_ze$d@zOW=ocd^h76Q`x!covjU%@nIBVlWSaNl3vI68uOAVJY(GTOt$ma*@II) z;H=r@np?`#8t>{^>EN(G(avK2X8ix`oeO+a)wTc6naN}FWFi3q;^c+E070k^u+?S~ z5EWuVB`Vt9CLtm!CStLztw<8U2M#Dv+gj-@prTG-6e}q7Y8ycD5k$Elw)grkL|zHu z6?sNM{@-=xoZ)2h1cG4i%;$4H`@CkKz1Lok-(Gv|y~$tj*vthppA-Eo`yGp)!06A? z=a>D_-?@Y|iB#U5O}mWb+p}paS;z2WXjO-AQDQeW+CyVme@N$??m?#uhWeGhc1eH6K4-(r z9`O}W2bo7Ad~D$Lzf)%6WBjP)M!keDa@hY%%IH#+cSb@7`Nkf6wepSG@QYDr`M>n% zWuD)wJR6ChK>mBfwwzH8H zySin1Zbdpc42>ZfQ=%@{Axo*G!sY)=i$XSSzu&#fH|(jL`|ar&*s~VxCH?vr zXlu=0o<(~t^IQG)6kcD*e8z=?Ld%~(_sqZZfuZZLtChN4-2(qsA}@j$D&<_a+wzVrhwsB8lEaZRh%DMi(7v)_3hwYxCzq_4#@9FQpC+G4mdixjp z^Bvq6XhQIOJdIr{VF(Cv*X)N_Spr`iamc^?wN3{*z_d1XTrCdZD)UAGtL$J zo-g-IcvtLvq}(&%-ma9xZ0iqW>;Hc`pX6Bz2$7vnX(ZK5{;NWXXFecxWGEc1yv4cCc^>{3!PZ%Q<*m&I^fCx?jiZ|9~!q z&{lN!=d^Do=c4~N1<0o0vJxGy%YTgbjdw+cKQqbiPW*uf*gxk4yf1kDzmT2D;C0HN z;q^aaGymtV$wRM<>fuhZCT?mePvHT<>rI}U@Vft*j@Ox+s^N9B53)pL6)Y}3NTJCt z&dol^NaC2KFDUVRW}GhmzvgiI^|tRnPA>?+=`wfH!097Re)YfWa(T9KdUJ6;Ey-e? zE{o0iNHtl!udyr^F{UQ680afCg%iE(o-tQXa#G%DaXQ-K5)4d2ZOy z&vhFraw#^{toKANOOZvh{uBA^$Gt#37|Fd^{C`0UHdK7EBA5S;HqH7^rbr~9yjvXj~JJU#M|8&Vs%F}lx(PFeTE|DHs7UM-A=A; ztfNuWNA-L_Y|kY82lugl=U!{CiaWsu;h`ZccUXF(O%18KlV{gO+uT;x#99h=dWH2Z zl!YUXha*4Y^In(C_>@Z_-pf|OoYMM_BbD6iSdT%|^Z1Usu#V)-HoIHazqkedsihx1 zmh@J}S!;=_yN&aoG6qx2yyl;Aet`LdwfG|z+rr(0_(l%)_ci!EhrOa+V{M=(t)$4r zli{Sy#kyCnk)eC#nzey)vB+Vx)Ab06*cnynLbExplh38|Vi)jroA!?-Z9i}!u?(1ryvpZ3KO$CIg7 zPx7~biJph{7S?5Y9@-QCvf_DY|GZp7d+}vH5ACPPHMEyL&-2iJoLobD8~oxGyv-rw z>6gsy*vi^sJK%|H zEAy65fL7PS6BBe=Wh!xQV%aoW$y&yO4{KOc?lSaP1T+vnbQAL(32o}&u_*cxyP-!B zH1I+XFMK$V^L6lg@zmJ4u`#jkb@2MV&|?YoP+l5wd_QHs0D8nxKYYk5;;7&2(1W<3 z6|X~&`{WvWNPV?>x-|SO>tdBloXR@Y?#`p0F6#Lc>M3J`>0m^O4W0kM;A1XQ*_)>t zy!|D7k-92xc`Efxg%?w)=V0cpW`-!Q4~HM5k0rRj2HuGe^Tq2_5o0KAzl(ZJq%ISw z*CJ$U0Ozx)*F96C=SIiShoxRc)azBsiJyAT7UWp$U?=J!GS-QDh>S@+7Q*+!(=tz7 ze6X{aGhwvVCDdUeWj+CRk@%tc4_tEW-fvI5Df@lyXHDsP=wHp++v=*s*WYZ=;bIRm9f6egk;AxwiL%go+aBZcBouS|0#ug1#c%iroElQ zeI=~~Jvj>6N8YSNzieSt|tnWlREVSc&`?nDzQS3qEkh;_KCI) zc@VqBKL#7 zqOV60hc9#=0Il~x>tg7;4%vLw7VEx*^L@zYlT$m*y(FfSdmr*S2U^ErvxL^7r<0-G z0%(oDV#NYzEj~Er2x$9PFM!rUOR-4{(bGah#=pwf6^MR{(`e{|h6RS6raYpjQ=nlA zv9=k|@cC%9L-{xZ8m24QJ!n`B?Id=7E4H9FG#mj9dqcw!&@frCy9;$YAoG314ha3; zxB2V>pB%jj^oCTb_b_k=TY5>`OBIWw&6XppV!WnM>Uf_Qi~=N40^g*J4)$Th}r- z=B_Exntj=C4XKj#UhlxpWPwe<&vOJnPu}+14Te1-hG_jPwX@s6&}rzM>ug~@9YkH2 zTlL?9pKk&m-;A9R3@uoCtd5~?<@`2m$*bfqv1i{RgMz1}zcz@r4aCqg#wQs1653iY zbPo1GFtmo5(LI;QHM&Q`%;=s@a-F1O=3Y8x#-8lMUI>QPFf;b#6S>BoyhfV{hJH=1 zb$cTG)Es_Zreo-5zz6>d7p$a~?}YE>=@_~Ud(t1=q+w|G_o98Zv2ZIiIY#{SH^|D9 zVCZ{SCoX@Ia^Cx>vK$#H|C3;7tFI}ZLAU9al;N7eOD zGV~DrlMFpX|EO{ey;Zq}-UaeL^e&M1p|{|#!O%rw%M9Esa;V|SGM^m~Ijn>(8OWhV zAJO&TXM;W#%Dq*yhcOis&?E1oM<$?0-bassCDFy5*sIxcl0M9h9Ely3{Q`X$I-){o zGv-Jqa5TPm=w8Q|v(RH={X-ixT12Ch;pa;D*-pJqDQmGX?tka9$5l;l zd$ZAT(O}xe)GwMkEvA0^_%7q<$F=8_@#I{U)_vfu_2utk%tCl|E0{LQglVN8AKeV5 z-A%t<^v*r#8?8@&sp=6-yMy{>!>gj-&AMhy67vvDx+VslX6~pFahiA$|T7`XbWjPl4Xj z=TCv&(&yKF0<@pz6VP>6GWuKemtfU}r9ON@ zBR8PC4xqa>P>uuWF4664*gt-=>eKDRptqGc+yZdf3(@S|-GTj!0&rO#dQs*q$l42r zZ=t%s%d?9;Vk6>IO)vJ>aaVu(nzN>;a|NUB4vnsgW<0S78VJ1&--||X+Q=?)fiAI& zT%b$r=*l=~EB*w#@N$5zoJHSxUx80w&PNY_YSNdCi`GAm{*iUa67eex;9STT%fC(zHEP5N2Z9FIjmYxW1)iv5u_o-#VPz6+?Y zlwl9HQv6Nr!kDeB`=;yqhEa^UXnh|1wtL{0V(h@Z&`Eqx!uZJ z5gt;-dJ0uCc34Zyu|_BS5)-kj+wlz@pkDx9sF!(6ce4+Ww1=#LBl`W9d!}sIq;#p8 z$$HC2tPxd5!PSySAM&~n9vrOV(}D*FhmuB;Mv%sm#*kh>8b{idvlU_{PkJL#zkTi|-2c&~ZuOPjg^oOKZlKz@9|!{%b_*BbGKRN{?*pQ{l!i3 zbAM**>6ZH`-0uMmetO`c4fk0tbYDe%f2>~U{=%xP7)+hlhbt>?GOC~FUar?R^^Ui zTaZbKf80ge39Y3awfz#2mt=eo1#!xd3g|zojPFWYp|_*?4*Moo?c<%j*#3RsF5!nv z?0q@>kc4k?8|_?Adv6c7RgH+TRl$())6tcw7JLWj?9?z)8)+nIG-)iUo%90Ic+#$< z38Xzpdy+b!!!*_t03T|26Mg~*)#c+ueN?s8$^4`fV9nj=>k}g5a*a(A`loh@c6aU( zJ-5VS)#yLHi)C&qx_JY8hRD2Exi9qZOug>M7EM>;-7>y@1#S2l!LiaSvw-?%90!`_O{J&~`MsWf1Z) z8ofOT`54V!1%r@}(d<<)2>Ec5kI-$TUtdRSxbjmq8a!#!@U7_UkKhNvx1z7_G3o2T z{I}@%*2Y>jQfB(Onr;woivfX=N#)9D;E_ zLBF2Rb*S7EjGKcFy@dMZP|r)KpI}_U*z$Y;yewGTr}J8$%h}p|Ohe~IU2sz86`}9M zPMUR|*n`&e-MVJ<-N)$Bw)I`?_d?%QA#?53cOQbI41M>uN#9wxf2#U!FuL)#mN<7t zlq+p8KCzC(3+Tr*(WN87uq)xE)6u00elqbN2I|tS$TItO4$0`I4Ef5WOWCJRlj*%Y z{}?^G4n4XLS@~4grNU=tPmdC_jtuV8^{5Nqrs&Z{v~7FzsI;r0N8d2%QEAuq>d_Or z9^DF_cv=A~(>F=AbgVC4u(IgryP2aa^H4-bA38-HZM18kj+S;Us6VNr>%f(ojuyXW zsIBUC{A)MBr^VDye46`dTj5)2S37)=i7rb)r)8qEQ_yLd=^y9R_$BbAtY?gLS>QJobTIBre%yz%4LgvbW9%qs zq2WeoF$-GEffiYeH_EuztI$I1gYfzw?iu*nunRJ#Cw9u7DfNxg>TB4BdTc{FzFqNu z$UVW=KfyK#Hv9?n6l^H?x}*COD)p4_Ee2mmzoK2X#%g6<?2{|>)G@nAH&{U56$|iwyK$=14(7Coa>qQSj!sm_d&njV36yrpo`q-L*ebJrdh#;BkqID8%0)Hdy+?g3G@IukF5s zHUA}6f_2VD5(>>zb#cQ-((4M3$`O(Vmb9UOO4zS_(yC{kd%~I^>ygyIyYiXm zBwb0`$qMEN^A%eYsy^p9agh%zCH>}-V@b$fveL~h?K>Sg?ba#5edKG#eEYLrzS@no zB)hwxP#DXHUhhsxn7cbw=o#V`o}Z_7uXDoJyZgw!Zf+SPk^6gDABs2t`jydjTX`PM zzN2%{VZFeaj5*O)>dKl>Ju0drOT1ezShaX%M7no%+_c5`Y3mEXM(IjOT|VHW8kMCzN*fj;-9R}|fC~8$IaX`tc)jf=38@k|O_<25Y z9y{O};mK=R-)0Q!1N7iKxybfXMOl%Hu3SnVI+yaZHu#EM%D$Ac=dz#HQv5%;?2ok+ z|4%OaV=cx1lgs{COY#4Rzh@-6S8QJLeDO0|+_E+qaV_hmA5n?Enui|!edQC+J%gOj z0}KCQmG8ViGM}JES1se1M=a}N+Iap0mmSL|Jw#jl9XeZS>p$XGx$>Km4PUF>-9L=z z!TMXYJ#`+75A;FKAGCIJOP{M2jN6^HKd+4F?jFLuqe=pEaJy-3E4;YrtB0O?i{lv! z&xku}VBH-xs>EyUylQcBRJu2{^R&gp_-JkWcT2arBzW<6@a{8w>t;3H{W*Q(<@7(g zMJX#b(Le6mNm(JWKw960dP&>L`WZswlYNd*&96SnWX~qjM;Ci$Zd1lVs-C1x@ljUo zr%mmAQ^q)@@0UTFrf6+yac8O)_g30;25nkOn{KB~xADCZ%-fQ7mGRIqv?+7=R*a!d zpQcU6(56q*rekQ+r)kp}E@?Bnug&uL1~MytwuO3|mC|NU(^m1cO9?jN(CTNNlj~8m z%Tl>VA7(e_r4L+sY(D9)zJ25Ijr7Il>FxFV&}jE2Wb<>it9vN(-j`9AA;hFKAiq~y zySUS>30hkn=J^#7UFi#Uac|<;W=oekEB(b^9(&>OWtLF+-&4Qmm~82aELvD!O!dp6 zN_$ypFX{7%EY7oZt#i>{#A2_X%=d1>=lv$_^#`T1`!IcB)^J~Oh`#V-Xo|1l^$`B^ zH`+>QXV}s+$S||4t7=;}hi9Nd>yLKJ+?ON9K9pO1)wQB2qleTI1a0s+*_l8(JV)wT7v3ksQ zPw;}-<~3*Uq`qz0JF#_pSVPpXb#Cfu*txyfxpmmKz1Y9QKgu9B=cJw6i=7*&qL;CO z$E>kcd$D!@YQffJW9tTD>$0(R1F?14*t&t(x@>ISKx|z$wr(J{E*o1n5L=gxts97~ z%f{9X#MWhF>jq-$vaxjov31$lx`EibY;4^?Y+W|CZXmWU8(TLJTbGTk8;GsT#?}pN zhpm$}Zp79VG-K;NHQ73`aiaJ3!dtgszpBCNLW7R%gRP7Jt7Cia!H>2MyDH=6iO9`r zbZabjbRBl(2pHuE_g65V{|Gh}9a}X6nVbPG`W$=u4fa&@?1NV3U08kgv;liM(8_#K zFp=2NZP-)!&paho)42ncF7B1s(-361HG9hVfhNO=jQzTpJ7Cz;(b!WJ8SaHWEkl3V zk>NS$uRweHpDozax;Wbq=9vx|uAcO>xxrw8y7yG=smRGWv8DJ3#g_iA1zRdMRP-`B zbHiA4t4e?VP4-FeP+p=xFE)HJy!0lv;4r#*GrW`vFLmeoS!{KJW~&o)Tg`ghl;>8> zR(Ee?s}r!@Ln9L0-85U>-EXUxVXH;=f6mdGtu}Q3X0g?>e$*-2>K9wE)f4xg?0brz zK9FBd)m96?oE19_KeS}0#YRV?cg622*kvVpvQh`6L`_v&GoybUl`bbHgG{;KfDy2q4mRQFQXqWF;-`z zuPt*jeSPekwT7AQC0;@1K1$yqlfIaYvpT^Hv+3(3;#2Amz3t#*i2;}WX(euI@HFtL zVkwu{b?IAdrVky%d-D`~o$Q6)=k9xsUdlMq{n;+h<(3%tn?HAXma(2L>n|LC`BTO7 zCTmpBk5`Vr@iVJueqZJIvH@JPj<(pGe(20T{O_`xGdx$||M`5J%k$6MGCV&dzn@Na zd44`I!_$vEGh$qxQO*p{%Rf^*|HVFvZ{%A%Z%11_OK%VH{B1y}XW7^=&tLn7dp?84 zQvPd+_1#5m#R#yL)I-*_9m9F0VjCj+_FPJyz4=BTzL{@jt{i1arY^f!KddUmu|?4q&ob&M^YidY zl&|5K%(={6uv<$0lFz!|l1DZDCGA^$uYh&SjBgRM)F97gowa`4ujBcr9E=qd$#-^B z4*Bj#Xf5B6d!>ppMErn}#6DKC&aLFJlDtQ;M%ygPBQZhpjYWJzo=FUnjc3<$ywXeY zEQDT}_87M>-jccG`Z{J4DW{EZMbKBmN2Klhs_}6WW82gf9$udgAE(ha^AuZM2KmZ& z68Xk1>M-(wJC8Ev*KiW=UyDqQK(?+y#)czn!@&EQ;C+jZ_uD6fjIC>PY1<=%VvAam zzqCZ3{EY%5wMYKacu(X{r9ayz)0HNCdnjmuIUzGi@k*-ffJ_6Au5Y zQNz;eiP^YdRgxnql(c34P&{wJvoDkHryN_725I>&Oz7=+nOO3j#FVcg|1ho> z+Il-=%_)gHSd(zE<9)8>`9hwDaxKqZ;@-l9(6l#@wejTl2*>=S-ddi8#H^1ehF#*+ zXOOo!hF$WUk&xrlpeh<`;@*Iqhzy=b6qiSYQiHTbg_3I@Yz_r(+Ayi^*Uwv!3nP z@+xU!9}Jy+cXi7;8=r@!vuDQYI$NJZWbEU6=!jCTWlaW^ zd#<;WwD~TKMWwZFpE~*N6L!ZJBS0U_uusy4VsA6CO%h*awohKlE%s?8_AJB3*dXQZ zs5v?d7#9?KHxl3ONaSDa-AT;ZG4`&C7*<(3qCNHw%(+DTbS>GuB5c=bW6pJ8##mYN zHQN0q*l`Fp?+Djo^Iv9<0S#}C4bkxC(%dl5TkQR2V9vC2fH^M=(AQhon^N?(;I4jQ zzH(QvPk`Vqxi2{8bTQ@@9b@`2kmN@UKudV?IbAo^vZr>3;z{;kk@i@u*GuG9#x?_G zzb!l&tI2+hyHv-MWzftGZ{_P)Qu3Ys9pK14XAwt^N@~AcwT2^OSW8u8>U44BkWDU6 zALQs#{6XLv3{wx+Xh$}`*-Jb5;7q4#rv|$>XkW+Qoa%RxWtcGivLi?`Nemrm@wS?;B7PZ6j}FzN#t9r`HqqAVD^2oZgJZZX*&#PmyL>-eQtCd%$+_mc}`L4tv%eN%Yd@z^1 zQ^tSsyUlUwm`nQ~_mcSEe5Lei&2Q(TeVH3*UT0qNxr_2f;~z>h`HN_^lNZfb!a_w>+l z6Jt&-^JBE1AHDR04FB8Uk>Y>r@nCehHH5~nCEc` zgSBTr;NI)kDQRzjn>yB~7n=4GvLk*s!DE$4eYA4Uz(4m0ezZ0C!NiXz{yOo&$-42{ zJR*E^FL5o;XYgEnbMowA?!6cvnf3y5`0`oAO(HWVWje;aJ;5j2&B^qvW@P$@hD-oX953+p6Xb95vt*fwscN@jZf#CO^lPa{iF`-4XxD}U5DZGYEFlVT(+jaWWN~A zXCjyspVvUJ%*XhBR-Of)m*~ZgZ5MEW!BZ-g&#|>ilO@rKi0vV#=3tIKBPcQ62<)6-PS38nK&qFu37)% zk7^y0Fs9!&COK&X%$OvZ{d;}==1%T$>;o>odD+ibFu?-$7k)Wf(R@usvj zOVmm7_=vb6DgOen!y@>)x}!0R?nm8}N{f8w!R|QU+^SV+~`BpMoLmU>+HZlrcvcLzKO11Z(VMEO9^A@_Y@?Wo%K~ z*M@s9^$bm0h>UftzvAqU9kwX@&Gbi_!!AEIWM9WFG6r~Y4_8_*L;j;P(pJ)@9qV6H z(q5$fM6b#Ch0wb+>0-xl(-`b{u%8(x7BaSa2jeh;5i@CD8Ecg>nPJ?MHO5|eJJs=g zbSP_=m0sN*-Lz$=-ha}^`BJH`=%-$ky5|R7`}GU>(X4!XJL_GAF_-b($6Q&De34T8@)xPaYc{@| zy@c}_oQH4fmA!-iuX#+#D#Q=LUh?&2*g%QD+{%AtPD&~BNv-IsQ1X*Jt%|j3A@eJE zw>*q_2k$;pk~YI8eavLXjG@GjP_}KAiEqC*$8^8%#g1)?@{ZurLTHr1H-zUD=5SP6 z^ES0aw|+X^2Ane86pe1;13Nc#Q=8B&^(@h?3OP-OriQ<1ChbxIFEO4}eh=v$*&rd%KH7&qTgMzMFFu|E+DaP_=XxUZ z>Vzi?7>_l#UE5_)JGcx=lQNwDa@Qw&4l?&5vKPMT#g3Ib@JFAnYXZy>{!K(bg<@Mozho+`qla!OMc;_Mo5p{&xdhmpINsTf{@R0`d~s&; zw(QZ3gR)n%&GL-ot;=v0)1OY8G?9W0aVxY}Utmw~H(75$OIhdT`Cq zKG-htfnvYlhc$6Z+ALl7Eg&CRQ$_64DD1G*Gm2E|;%u`ntmB2v78}%Yby59wNjuHD z_}hcJ^v6#pb*iII5*HlFAvhNl!6HWI7<8gnRR9a$LzkT_)8iuE`JpTx2zA?3jb zHTvQxZCy7@tSfCev>dNvfkMtR#n!-YAeO-UGs5aG7)ZCn*5=^{_IdeJM{TUlej7kx45Zz;P@2UTF;}@5`JPTj;e~p7|Fo%Fei`BDsW~0?aTUq5yvu~CWg2CZi!)_EECI=a_OI9 zm+JRmZ+Al%iHVGY7GnQPLtWl!F&W+`7+-(4ONn>QMJ}&$bbfI%M`?)Sb&(cm=c>1q zbb)qm@m7*nYv)$)4CYeN7sBpEy8q61?!mTXGCwH)Sojr2{w?J>gZBhO6tHH6#EDB> z>2>tArHm74rzqkBWc)^}5A|||xV%}6u}_O>xtx+`rV{Bk<^Cfl|JP=UM~G(re<}IPd-6_cNZC~dGS7!P zEF!;1^1GAv5c{5_x{fFRrtlc=4TQ!6dG{WtYV*eo7)DH!Gd1KKwT0>Bph>u8B%R1$!@y1mg%l)%FkZ+{K!- zL%=%E5`$nP7NMq4X?To!)#ND+w;q|g;YWw3Zuk>8a`*c-&w)#slh54yhC(azkSv~f zX;;Rc%Q>tZeK_8aRy^yIR8RdVSHqyI6ld&ZigVTg#W`}gvV8Zo%%Ni4i8b&-%|utj zvPhe@R)@&O>;X#dD-XP}d_k8W(aew9=LP) zE9}2n^HZhamB=`E%`Hkp9ecSSdvLhtmB=3MLoY;l(kR!4o{DqbaMk(QWfteBcUqn4 z)(h*BdMdfAuL^P2STj~^N(y!UBRs@-z1KiP?op6f$M&su_S`YG!#v+lvZ{v$mjNe^<=LeINM2fjDenYOURxhGLw zQJcz<=evLTvZ>BXxWBIt_xf`b`tA>^)$d=*y&tJ73e#QA-}iPoYpv>v4_D7y{+`vf zVtZ%GKp&>|QKeyQ7oM?3X6*^;%KDhK;pz&l4@2zsfq54?_rBlLdFZW+ocrq>&|Iyn zM28I?p-%qIUg`(mJ@ z*IlMM*AB2aS4~pNKY~YpFJ)m~@+;U<(Ydw#Gd#OkkM?uQQOETwu~F{pu?-dI>-n*f z?r3bc%v~O-cCN}}t$vAj5L>b953GZBJY$2Gwc!^$>{|X`=x5eQE$=iK{U7dX=!v~K zc5i1-4EcW6)8bs+*XlgPobFoaD>h<2w1{RMc`x=#*00_t_8eOw`(f14m%bl;S&y## zn)6yP-c9Jsz7Hghv7s;bf`cx_2Jb{?jh) ze`a;Y4_BPObSTal7c0(TeHEvJx@_d$yS-HHzq00#@vgil@5sK4pJAU?_w~Ia`!*Qw z+&@Bb4)3KnW$hv3n+9#yi{t40ihQYn0{E9yRb&Ep1dA2+W-vG1>N?u1X$;geBd z{Q>ZY>`hh#e+Vv*9l5`S`x0*}@2>=3$a{iy<@|HfN|)+XDX;ipnQx4d-?EXQuO95<>;2|WR6UZLJnnPL3ZZG!ffT>xNPO~tZ_Jhs@S0+ta)#!c^Wj4yc2YyWExs=JA@0r|_a*JO~${izh zZYFx4f``4#p;#var!5}iOQ{g$KIPU4%u{H@im(!7iZGeGY*c=&OCT~c800V3+K;xZblOQ8)aI9&}b#&z<1CGSwnqy zDq-#~qg~!@@fqIz!!x~KWexM5>UUorZY#c{Cw;M`369mu^ygjb00&l_C_ zJf9Hod_utUYXhEN8}J<3tq--izLNRM!WTlzK;9D?NZqB~E=N{dYa9B@CwUp0(ORBo z5z6@OWxIn4k8Wwzxmr* zz0vVIWe*2*hm0rnB~`emgd`Ma_Dm_x99rPW;5^x-7S9vjgzq+!u0)rr*n)IuwuN%r z;dgsjLh)eaU@+;pS(~%AaSUHODf{-d_1V{~y)Sz_=P%Bhm-P$oALPhgH!1u6b@kad zt-CM#mz*!Q>{=L6@Ki2cWJ+iQmFSZUH7j#6~Mj7l(d((R62$XH6k?TqVOiL(mxa_O)F6Mvp_5Ur@{x@F#-}pxVTcH2%dHsLE?*-`@{+pO&{j9m8m*P5pTDl^{b^LU6g zHY)v{{ZIVunG+{^{*L4K9Dm^WN95&>qZd4r_fOKVBQJAY+5d;E?QE@(wk^V6BYm_o zqmRaVhj)^v^w(r;N7`N5nz(*HhE-^5f4|JsHyHlKq~bfixUYD|#vR$$klw-d!i{%i zzpMAna{tBif8AI7%%&aLH<8}K^|DQOWN*~^XS#nz?_)AQl>Uw@%Q^@9_w1^?!lC0G zy*Ll$d`92#j_H)EP;X=DhZ}8d#={a<&~_d(=&!Z)osN4)+OI%w@7Y?PuI+Eje*)`h z?n6uYGruS(&IG%T({aD}D>F=e1)+rfP^;-I};%89N(ZU1y)NKLBeA>EzL!L`{ zWiD2eJ~=#kH{$?{I4bZ>Pvp2Kg!S$?WQ{%KyGj`>*igK>lm|uOI&(#s4#B zmlS1UHwtY9%u^_ISjrNQ4a1l0zX!Ipuor~nBm1BVe-)6I#MUSww(Mk%Ob#il_~Z&X z@5TBK<2fGrBENWsD}?zk1;y`g+?M?={{2UXClHrEu6UsiR(JC+Ooc; zk5aVj618ae5KGbCbZgQ6YeR~@%3*EmTf#ht^Vm;ng3a^wy%C;cV^|xQx_i<2BHKlz zhD_Dr!xw#F)-&R(6B;Gc*RI8W>|(F_Jsf-S^NC!oddQVE3>{>lO;WJO!{~?KL>u1B zab@C_#aI5Q&@m+3mi++NGkW$ao&k?&dfa6#&g5Is`2FUoK0T5)dvjI}M^af?!)xn` zviGek&i<6c{z*~xYqRHN4d&Sx4j0egz|VJJ-7m5?a>RdfU-lb3XKYQ=Bhlg;fabj( z+MIO_$0P92HQk{CX-Cls_;%|v80*Y{AEnLCW*xZtn~ z4Zur{Y0&h0@?Ikvu-{Wn9~V570S{%uL&M;q;qcHk@DOV`Ge&=O0(-aIdp$g)@f<#I zpP%$}d2kOgGs1&jQsF_Ne@i^L86H&qJQ&Kj`Gi03;@HQrpW{mogO|jIJPJM$e#(GX z?)tbWd*;V`v+w@6IQwzVf5QDsIg)vH$-2qeIqME)4_bGB_Gr#Ex*&JX&+g40`Prmw z=RYS+9m$arYAenRg#)Jo)X$I>-F&@A4hdZPE`Eo!FQ*EtgOJ(NX*KF9qKK3WjpGo+2j`m?5Yv*om+wt`B z&OGn?AIg3iOSP48|NjkVervZ1nb-Oy^D4Gl6STRqGN1J*c^n`wnYVWM7S*$bzDe~h z%)Psy^}N2c_9UM!{(KJQgm~sLm!qybeJo3SRSkRlMiQf6F}?fIuFN;AVgAP9AxguM z&Ps#$xEq!4`0$3>R9C~r%#o>OPuV`CPJGj~R;8hqdet%~a*H*d^2b*+p7YzCy|Cje zdh@Q-KQBPqce&Dt38bsv3oA~!WQ)?H?atV)-;ve4+Y>9-zT z$i7G~-@qP(p~}*|@RpVDe@ou%KVPwfw%*Tt$1hof>$b5Op8xup%ky?_hUXIY65e@( z;`#bkm2-#toBvQe$8M7IFzx;~{~_mg_t&?wCtoS+Y_YDloi)|k?lRvzbG}lZvv#US==o%p>iJ9wbuS51Jd;Q}Wh)*j`%daKdPa$7A$7{+ z`UG_-q>n$3`4rojqcxAbWvrzUzZmk1Y{^;b0~xsgnydT5=&_54$@*L+qLkArY9fZ7m+3BPX+Ja8< z==#TF?S8Z`^RP4>1Rw4|cZd$EMvu7ALtBtz*;~5i0!7nB@jhL2Ql_ZONj=ofuRp4( z+Y$0QNS?Bnt>_{}&##*O5?^U0zb~2lZ`MVU*P$_?9%(1hM_p-~U41e=SD}yk{=Ww{ z*i_aNxKL^6Tl?UK3*q+~bnNxWckQAuPc;0oiM=yx!iRZ8MrGc2ExMtW`B2(9I;;l1 z+H6tlBH4SLIqCJaB1h;K`|C5G>#KCFO5|QV=b~GpB&LAxM}(E$xGVgjzeMzlm>aq4 z!40GM?kr?-HZr*<9saW3dbAoo-TUMS&pzfnEs3+cBPolquKqs82-l$(?W~K+I5YK) zt4KsXBZ*_akGa)JtUWXh-8LK<9fhB3d~{EDK4XPVmOy+^(W_+L2r9OC*dj%1$oCVfU#hRA&XZH%k!plxLD19?ZD$$RquJ?ZQh6dmKf zpSf7Gk>A32*~8q`u#lAT$A;8+rQt2)cROiOtm64~SJjh88OqR^rO>4kT6mDTMskK; z{&4S<4ZHfYZ!6=nyU_76FWYa6;Omxb(F@Q`Y|(aj(Wk4sX}Y=wd9bNntGduuH|#l) zTQ@P&^TE^}bDg@KablmDVAk+B>r`9^uNB7G+*{#?vDl`X3D7u}L>(nm?n>7(%Bo~F7e2c43QZn*&+ zlZCDsg)SP2F1ns|FhYE~=v46#`XCSwu{c4+Te?1~vFN%88Qg)KiZ0Ud5Hf1(+}(_a z8tEXhe<$^j2@jEn%yn&qhdvkm(|TS_@X#lr8Xl5;BCfE;SM9?X-HvqLccvpSmqHzN&8wb1Sjw&bat0XNFRKTXMu)!A}<<4^7$` z-n>|KpsS?49VdAfI<&;ITd~__{G{o&6e(UySg&36dYB9?z5ri z+<$rOxht(*tJ1+ym%Z`WbN@nSHlwrqqjwsWD+2tqIn(pu)NUGns{1@d)2T-J4oTT= z5x(x)gs){imu-~q8~E~T>b70y^J7+rCRdHVgWO(Y?dks7dSQje=WXOE_)wEkXmto# z7k;lpPQC?SrGu;b>$vJJWN;T4Z6a+Vx_S>~*hiT}4h8dQ*y16@BRX1QIK0T&KJ@Yy zt6kI0yTDF%CBZFwe6Nm)*7S3puAf8DS=WUr8g>xuR1IDU11D4xv-rM^eFmak$KPNb z(%+!d#)Y|#KgeDlg|V*V2e@}TdM!UpIX)p$X()(Qj*rpaQ+Q9|J%#s_kgK2u9pC9E}64#|Xo6EC&&IM};=1Inm2=;o{F8In;SBPIemH)_kt@}7$ z=1?f7%omn2cOu~}Jr-tZjB7IArQS@CU`gsd{+_&>d%vEFY_4`ihxbA+e^)0%u z0b6C@tODY7SnQ?2PFuE^53=gRS-yM@=yLcia@s)I5Ay#SXeF5En+shHqtJUjX+yzX zSD0}ZI8ks{58D1pv1|ON0!*plFYMl7u;#bue8K5|8U_vn%XzS^g1rQL)?%wP9LBpf zFOTrdvqsc?i+(taovi@tX?P4hC3vh87)aOq`kmky@@;PgUpF6iW~~xiX1+TJjhx=i|~bt3>1RLh(RuwzSMU5 zQr9922jJr^@vh^C5-49!^aMP7gnM6duYr48xz|T~58jDacu(Oy<@iqRJ@|`z4cyzx zy*^;A1N!&3@ckpYj@UxE`XEDxbQvn3eDYjmhw`j%;JtVtr zg(t8XGqJ6^;A_EppIal`hp_<-KNWk_oUh?R%~l#1X(#$$>_!c8C;SDztNJ@OaN;g7 z9(%e!R&wFo2rImb4Sd|vQ^R(@!PaZIj&J&Lo%X$^xNa*lx(yyQu%wRdmiT=Wl)o0f z_hY+O@)5dAULu!9`K9cAo9e%l`TswBM%1kuId7`}&TE|4xDIEA^P1#!4shPdGU#o7 zp5VNb%=_|7Tfypud7 zdQNl%IH9SYJLfoWiL8AY#ChLeoG1OBWaLe-UMYOq(Kt`^!!{l3iGJ8-!h510z~s90XO@`ECq*mp|2H!zx!nW^$_PW{#C`ifmfnj4GH0l`K2i3oA5(* zryp1ScuA3#$DZL0Nw&M#>*-F1LQF^|=gFMQI!?ZIgySnz`dStaWBkCt$Hw}>4H*BdF&!)OR@b9)_=zal%yA`(@o@ndd_K{h$l;kc1{0ZHE3I zr%Qd1Uz(L8GhM!8eo^?H>O8JerL-ZRqqB0ypD*K(`6G4ckX`7Uk;gNi&Qq28qGPho zc4)wttXg(eKAU`$5iSV?WI4^6ZB>mnVFXbGQD#o4Nlh?%%@w zUvvK+{r-*Q^9%ABLq3zqCq&ODVa`W6Lq}KTbe~g|bEW;GoG|_VZQTDY_kYU$2f1IY z=hO4qk8-XWTa|OsvsF1)Cw!EnSRQWpIrpY=FOPdAaxdlKhB@}NIZH-=oKtT9IOlcF zje7kz=U;Ju1NRSbZq)Ocgta+K$9|miyM&K(mT_*>a}(!>x&Jx$k8p0(^I7}4oaLiG z$@!!GlbjWt=U10D{155Z-1}edeIxhOvW7n;tjl?O>?b+@nea)@JDmTE<6EvbbA5ty zJG9W~YxmLDPcQCk^y0pmUh&XWqkDpn?gqVX=DtRE?hD-wdL=+RjlOmteT81PaZjTy z_k^}Wub$9NqiceXu0Q8mqbb)y(;z*4auKAbj}A8m`SY~o9bbEWQGe!@>Fa?6<=@9o zL4E3@L(o12?NiX+w;w;TE^Gbn&)Ac$h4qz!`ZcIugZQ8RRS^H5*ZrwLyC2vemHuU5 zU$hBkqQ4o~2W_kWIZ;*OThg7{eUv23A#p9|S@5?z7IIiUA$#FG@-(N3&eqz^+_BYzIB|Zpj|G@SP zY`K6@9W!`Y>S)_<=(kt@0_jWO@DMQ{0zjBgt{XAgUu&llN8ADDH{d}P$iSXqlY zg=gCTkY}^bX!JjxnRSBbBGF^%VCfE#@kjDw?9Ags*XJ>Ilox0JFpsghJY+nNv7x*! z2_NP$CYOhd=P~w^N8c}xv9&y8JdZJ+yo(a5@)$eIYeMT_zP_=~ARRiC4sFVK2hqD( z8GkrG1{u#o#`C}xdEkn?`41<>Xfn<-a6=xrArIV;2X4rFQGYgH{@0oRjp2W66$vhA z{6DUd@jUE)9(G^*->eL!{Mv25-7rH5m;IFMcUw>7%D8+UHa_MTtUD_l5Xf| zdN->ZTCo4G8rUCfVD57W_HPXv$p6*|?hoj1tP%UKsark(U{mS;`_&uF-$=0OdYXn2Hv$m}#7hcK5hFfQ;0_INdX zRR#_*^Ol$WLJS|&os?%HxaU;$b?|?_c)vhg_FdCONQ^&%f#O7U|zYeVe6!3%XtOx57M*Q(f=bH&^4-_6ZTce12Q;<=?h5zWi?i z@#SUB;HEj?G>Lt73@(lv-As%*@yyE{mlt;#+f1A}&yF~T6yL(LCb8xcH|@Bzcno=( znCQeG75|iHP2$dpg?=aT>f(#W`h24P*mLfmNK7xz<9?&~ zb7IWPi8XH$f5QC@+;0+pPK^2Qh&68#f6o2Sx!)xIoH+A85^vrp{+xUN%e_YN=fs-- zCo$*#_;ao|bM22mhgJsd8^xbD3i4;p*QY4&&BmVx<-c5urGl10eaif(pnb~u zZO}ex^Z$bOp80#N;3o~ow-tXL)UQGP%D8~ctJUJqg8DV6UyXUxGN1Q6=uZXmr51l~ z>W}*4&rN;NCYZ^HKR01gE&jX}{W-Zk@#oB!^!wg*x@q&$(W^nf0rS>P?PQGqYjXum zZDqs|t z1KZEQ0|uWM?#?52+XF%^}WxRzk|L-I|T|LL~=_Y*{ zi1PyNl@_0F!hDAPH|)NFBLnS!FkheD3)12A=+KgG=xDxcRyTa#T*fcB{~-5`cxr9^a~a2Y zmKgN0tliDpJlgu_5-ZKU61m5E-u`%LVohEj9kBkn#Gh~+;J(q1@sIa2X1r`{!20Jh zj?8g{`-Z>WKaR|}@ru!=_0MG-nd2L|XRLoNa(fa4;)SzD= z=oeT5&uaV31^q0(eI|l_mYgx?sbAn+>Dli*zFfz<#<*ReoiqHHfqtAu@#VTNu~B@v z?nm^;m+O5re|&Na{zJj(!T54heED~={<+v^oeu)rKhTdE*nURrfWaq*pFGfC9n9C~ zw-3_c^yr}V-%lmJ{51JlzVmMzU(Pr#V@JOA&n3QG);}MsuYWG_<+A?yXnp;2i7%J+ z&&Rg0{<(||2lMsC4+ZIPMs#RX#skOTPm|uw%6JQ7_TNc-`MF;I++_a)abBRk^2e9! zn9s2PhTS)CWT5>I=IgV2K{}is9a_>29nE*m>V|)Z`0^I;P@wz<%59+h2jBOt!`V`Q zN#oP@#Fw|w9`gV5CceA{`|9U`cG~0M`+9tN3-oM}XWB{MW_ji`@XR@lFW2QE&_4R( zNn4695B^VEf3Cf_>vZX<@#v|$aF{qxcE9gOwQiFGz~Ui`DH4L8=b{yA~Y zhCljdp8bkv{`Jp!=8v78L+mtp`qw`<#ZPPNpN}@Jf6iE)5kr03oR4yT%QOG_=R7mw zsI|EBv8MIU&9Ud)_pg6$jz1r*uYVpmF3Npv{qw;1^RfE+=YjF(+}GAW4~#z_t*_T> zj+f+~w*I*}{(P*y{<#r<&b7AwxeI`RX7H1S~}zjtsQ_!F+voFGz>eqeDx&p`-b(S>5n` zk54zrcc456%5R|D2H$UH{qt7nsqtxB@#$^+|GbD#Z^gd)d8VEAIQTyC>8;SSMV@IV zedYg6c;+ ziTp1rM9HnU##ZdNS}K$d%B!E}NaegVMDb3GR(VG)UvuI_x0NBbE2N%v z7A4mPeIlU4-O!^(>KjUZV=}x^YNY%2E-vpK7y0T{7Ozqti`S;A<ZJ-6c=m$@ycHS7wd8-JmkpmF0;hCmyCdivVG@EZ-$3{p5e_8yUIH*_G<6#U4QJo!;$Wt zG(z$IB3t#|f3wBAlX^b`@BHfLR_|m>sC%ByLkGzBAiQ!&vAe5v9vY*Bx$n1xxqrtY zWypYswm_qCu`3tP=(=+8OYl%qsNzkDQN6voSiHS2vU;7W@KCf{c(edM+5#O5;Gbz6 zFK{TlZ{dC6fz|NQ7Whc&D}2-snvH^1vnXQ>v=N%^;=3;RDHDFmQDfcP;HMqJPoY2d zPK!zR&gi0eN0Jw`TmQ&Ks&|$e<1W$pX%zerA{OX|%HnnS2O&`VcwY5uy@LfWs`qLlhn=us2yYH)QD`RrODvD?+iP>q%6)~Wy$y1{aP2CW!MpjuuY}u(C zUUaRL^ANgJ%DD&mcojW2FOqWdopipvigX|K*>AN~B=D}B_a}WVHVWMrS@B}5D{Iad zrD!<)&X`f68?|*I$voOWKwDf7tko?4_~uvg~{#8C$>K8eNgD zM%KX(x%M!n93Cu!2aD#|@*S_+3LFbbXOP-|c%NfN-|>$3xUT1VC+Qk_KJ-4vn!Xbp z`{R_N?cLR)eXRFNKI;>y&*I}JaxX(pB+o~9|7BBtYlcqH^4r1xDxsC=qT>%<3vY~o zN3MZahQl+%Jjd?M^uR;q!b^(&JF(ibo^~$Ub%|QEdx)iIZ@RT;|Ft1SU**7Cw}g2P z=Y@NYPOy2tzBj^iY>d)?u4~BWh*R$P^A#Lxki9x$KBk5FRR`hE z{Z?y5E&S=^-E?{P9qeF|^|O`^$@}RYzuhoV?d-N&I=gRD;;UZycG%O2v}gY^CD-3a z*84g3In09>95x4k8FPRPzp618OWW@wq3Jv3r#5 z1={_<{%NE6S9;&H31%|Szta1p{`pre^heKV{}1~2K|evzPp~BDCkXlp^m$5zQqRnt5iQ&V1E^U&xTJiusnf%|S_{>ku z_pxVpXD0uwG*fuCG_!k$-I@8z*r=uO*{9+B1e{OD`H46`Up@aK{<9F**5TR{xV9eG z*5R55|Ji}_$8mlq&Y!^fKda}r(sNeeIrri@EAX7S-I>L>wwIoR^Am7B9p@+F{8sh+ z5qb`;t;4k^aBV%V9m6$>C+xua<2b()=TG4LarOMl4v528@T?yB+{aHb>h8LCd{q4e zF%%*1>(NOJagNFVU5!u2w4B5{@rlRhE_}M-GZ-K4v#*?efc*8E?>^wShtJ>Lx!!ND z&79oFL`|&e9hxS!zdZ!98Mx)+Y z0~x0M7D06j&Kvbd;C_RSAYDZIYh@HG7$Qh5mvuEhw*vVJ4PtbmWZjsvN?-bVZ!Le{f^ zF|vU%vVk$O*F2HzP-Q(EI3gQ3A{#g&d!2e7vYrjBkPWPm4XjXqKh6UWWCIUm0}t@~ zDenzAJ_Yw8S7jlKlQW=eMp^2zD3+fcCVlPxFP4=Jo9`ITvQCO&MeD8MaB!&>7zc=r94r@d*8fkxaA*ng#BLbF|e z+})%@`_dtl?r3X%4xu|5tWU`9|Ap2KSZnClC$w$|g&*jCJg5Yibp`qv4L4)r=g(*@ePfa?Hteb8WCV6Vqf8hwm%Zj@^y|C;wVTqn1dKAc~% z4~uYZ74r8k=Kr*(=3?=x9)Ie2=q%Qo-{8}Lhl1fvJrA|eE(gYF-X9c)x0XI3d8mc- zrT+)vp%&wy0IoqEHQ^eLXRw~Bx>dz7$X5=AW89tmIx7}~aLeeJ;J77# zU$EyO7dT7PXD!7qf&Chx_HU{FF(d9V|L4z3Y%98OUdH|{zRpKp@5R!W+M)eh&FRHZ z_E~RlPgd63+tFS9_I4Tcp5~Wpz_Y>ZEcU7x$6>Ua*ZXxouh`E;^Xeu$XOL4PJ^1}C zm_NP!a)+9eV0KGCCn5A?ppVz-^^kKCV!R>dq>-Jtl>J@pkssf;75VX=cbc{@BjBH1jfZ2I%ZZ<-%ZYan zQDbi{%!z*vd9~b6A1W_at6^i<17Xa0G5Rmf9C&}N7?Tc{mkxaGex2sPbA1p6eW1qA zNiT4npytLeYpKj5zqU!44-wmh%x7c2lbH)|(jjP07HmNFz$>sfqFI@5BYvjm1ljk} zD7N30x$s=(QE%^cH9sRD^M)Mxn{_$zdmi!U$oFj~M}Ch^Uud~yT3)O^M?MhCH(-R; z=8qXMgPH&C$IT`kE-xKI>5I1J=Meg$UGv|KYX-s!+PZ=A;+N|3;;YoW_|YNq;v1|D z^f}BeToVLxbj|yD-A|+bYb|{^-(KN7_(m91<1cN0rjD2N`SH)`F{hrFT4f{) zt2N%B>K|+HP9tMFPBO$oS&tm7sv5LcYN*yeH+w|C%WVFJDk_(#bYBxUwrNY z9RqQ3qAfnN=^W_AV+BN4s`msM^7`>aQ+)F9o7ayw>FLiuGU-{sp5mClx%_!kzZRJIRhK7g>Q__0s{XF4 zWqoC|`^NQYpnkk=ZQ4ktwfgb8H7VbJ)Wmx9lJe(CuP*nmW3{?2{5^AIuU_!4SGD!? zFJ9P{`CmA1`Uy-w!5gOE#q_(V`j_XUkY2#&a`>^Xl%Bn;_2YGTH=vi}Q5O$1*8GT? zUN-u50{Qd0ISJH{*ZIMC{dnCPtvP=p;dHZp{AH>iubY1(zA^fTjq|3(_BH;{{N+Z! zwb`y8_n36JI6ClkdrNcYFM*%wIYIJh8|B4?s{cyouj=~osBO;cypL7u$D?*UuOB~7 ztsjrt@w|TgShapUYRB{X@#C7QA8)o>0G*e8n@NXDrbDx`u8-$m0=+}XdZYM%TjtDP z)Ai$Zdew*njdsn5(>0u+;R6k48gYWzuHQbIbhtP=gwh>t&37SmM{R*SQfNhT=DU{oOXLAK!#s4)9P5 z?YMb=)3`Du54DiKA$X|8IOv+GAFq=mqkRqJ#sp z9EbCLpS$*EUeM)pn0pM&Jsa2ho{dKJ<8?T}+z*5L@#g*_bHA1@x5C`7RiC$F?tj~x z`MqvGA;<7${(Hmv@g}x1_iyUob)?H%Gxx(@o%@T8YbfK|$;^M3_yhuvXZb#= z33=J-$LoAb#vB$i2j0Io&!oeZr2}8PU#B_njcT7=j{5PsSTAHg8~dHiTzHcX*{HLX zeYNmIvoaqdR^QtG({qCCdubHgZ_8YGF7ww?{dgTlFy@aLF@u@^?zfdD9WE~&Lg@=F z$3V*|Fs|=g%g-V7MWg(zw#SUk`J%djn|HCksp6nj41Vr)}l&LD9Wf!Tak@-r|qd_StrG? zqGhOG@fUnPz~@VRPK#nug~*ChaNiKzx9lDNdrFd$#gBu1sNKMOO(I#foX@fXctyj? z%XekoHdf7#M;&u*O~Uiz$Emg8P=83vbLO?+aGuwK!}&lg67Bp3?eLoMXs1EV__1ou zc$cAOJkIl)@i^Zg7Cug`86S^&=awgYykU$pL8Y8&(i5i?$;oli~4PMLbo=^gCU)Z^S{G+O;_?5;J*=VIsP>1>8}TE z+9vq#x3-C6`sVWEO?_bM1J$mYSoQkGR@bWiH}a)%EgHy=*R4Yvtv|K;@VfOU zuMa;?eP3C;`o1#!=I<-R?@Ou=PkME^AGex50e>B-Mss5N1TxQOncGjO4kMEH8?15v|8u9oJdCwj+POTA- z?~UZ$Y%6l8pJA}M9it)F3UVI0(|8?YV=;i(1V~-tJ$l~Oc$iK;e z9$IRt%c2;5c9`__o<~?#Htc`j;r=-OdTZm@y(76E8*~5;xE&u2BXq|10U9{t&yZ~SWeVkvDZw()4c+iLwj5yG!XL(+@4ilQ~`r~XS9om-;q4Y*u^K%Hj(FiBB zd47Bp^ak(&*BffiDdp;oDyv^Bcy9Es75a9l*9V#&;A;b&{8LR;BTpJ--6-4U{c3)b z%kRs)RQhn9Z7w&y2~11>ztkG>jdBZB-XaXD=cBXY-Bb|D-S$WaZJ=dP0>BYrXe=htOdnl7A&Tbdu=#C%dams%g5^D^yU z)$FaBZ%y|0W^cW{t%~>CTMumMY3RmkO+V^*)op{FeImz^3mMU{h|$ho@7MXjDe2j)uip{R33_Nywoh4T_ZPX^}YI=vopUP6pF#Jn`J6K$Cn-`@H0 zo)+fESG6ENeue74BaBXb$?X+#<@?MFg0+$3Zsy9Hx$a^1)7i3q_>%O^s2eH>)&SYj4AJJEAJfpoB8vBJ+1uRJ-QrQE&s#J z>-6)TF@H|;eHinVwK$_mPjj#5<=BsCsr4(Fvb_OnhwSos<}@Y_4;-DZmv}vd!_WOnLmGh=g%8+L#NuH`+dP4}rgK$>pL0 zU$bAQx$`_fsiktR#^qM1xhc)cd5HKY_}k2x50bBB?j3kP8#Q*W*dB^z<-CpfnVu75 zk4&TZe_Q6v8*}3~>2l-UzxC(FJBEeMjo0s+)AGr*oLGHsJYfOai$_?XLHyo;1vJ~I z`K^pL(74Vq+C<|zQM0XPyZ&{eNr(2KLns~5*7OdcBO2vywPk)h<>j5!<-zw=^Wdvu z>+`=fnWtRRfUK+dzX2XF^WqDPI?brVj5@2W@8`UFh4bQh{-BP3^!e`r{t1RB_59O9 zyBnaVdH;2j55F=<-h0(8q4VB79fRk+)0!m|F7fE{-~+j;X8yascCgD|5A#}~3)gv9 zN&Y+8(@nZO_r6`#8We^+_W*AOv!iC-dnTXLcFKDv`$T*A^!pA1^AXH0>E|Paoe9Xw zb$UJIe1sS;zrXQ5ghuP#w#avHxBBmOew(ul^(mUH|Ni&Txs#Mb$@gGkf4Y{_E{Do# zhu-a%*l$KG_H$63Vr`wwYrv!C@Rr0oXLQ2;aH>hH`IC7acw9S|IAF#vagFi-0{O7W zlm89r^2FO`+>18#HICz#@6H_Pf=@N+t{GhstEQUJTKrIZKl<#XAJ6ztT%%gsT1-}Z zNBaLJ4W97>__?X(b|8)&Uz%B$G-O6L&QtB}Kz(=@>cgWxY@-}PoIil`jcS+0qdq+H zgSHs!!{huXIDgJqAKr!f@TkYvAO{xbzQDQfC@&T@`*@AMc+}`aeDHs@8h!YE2*2y_ zo8xfMN~3*%hL{JUp^<(Z*Msibd}y=_yxUEk3yp5vpAUL#v<~EGlcHLrY`+Q=GUfe%6!bUO`}}% zS%GQm_h!D+1a{(Bzq$N&Q@@(}RbL<8)UT#~RsF43i+<&KmpZ=W`R%&3Xdu5`w+`id zR`i%js}HZkqs^^7X9=vMw%0P~5&o*t*q<4Qb?Dz22fb?g4E~Y%hHjoTzX{(Lqnj(u zXVPfSwEFS7IWv6*rVqyS!RYp9z}Kbu7u(yXb*1$1WzB!r;bBc*8SS8^uZ%vPK>c{# zoRB}!=wsyd<8^B|o;%xwZ;^2PzkU7qwE7xV?OH!xH~+?YH_oGR-ZUJb@rRa&Wb|R1 z?fT;=CLJ!04t!nT(j53p;AeVHklfftd2ecNZ05S3m1aKiY-#5D4!bkgEn}nT{8e8+ zp57Ub8uEdfmGP(_j~eoU+-Dc+$D@Wk;&A@@@$sl1j~eobuQgpi-fWk0?y_$)>2S$( zXjb0!vHnY-cL;fJlpD~Nx$u;yx}_!c4VnepwU0n)tO*J#=%g=vBX0)TmdD>r*rT-rTeQy}4)Ktbu@c zQEfK;GVSJ2PJ~z}NKGY3}`H zt3R)cCqvG&?=W-jvrRf&U3)N^mGcn!0Ii)zdQOl%HjVNJ+A`<9QT=&cZv9nLe_n?L zjPX4qHZb$+{Wj60!{wzzC>^2YacFrP#@w~m@^c6s(J1$;E%WOyOZ|DBj2mTIi|J^z zG}6$#|Juu|Z}a-|P2k7?|FqCfoA+P+`Se#+{dt}4GV1d{4y>6!-=IeC<)#a_&sRzQ ze7n}4*YTo}#zwn&tzYMJ+D>`%ZB~C?XO{x>4q=C`_3I($BgA<5{f+lAG+OUoO1^yE z0_*;Yp3&}_hwRFQ$qwawwp01`moduOOqWtSJ65Spw-%fg!-|gmnE75>5P$y2?aVjw zeBOai@v+^{HroViL1azZfre=PYj9FcM*o)?*CD_rS}lqSv|6WEd@yq|4E*R;nfx-j3gc$Rf8PVyzWSaoh~T~@gm zHPSXmT0!?!_F+C-N$=7MJSPRuDOxb@{5^P%B6dChrw%r`B3km<(U{lJ$dMErQ%k9U)&ea{_T>_B6KOuwzBGqTbVD{!m<_(8|FK{VW^Uf z=gz~pDlX(5=skxOZbVxX9Z_=19D$ExFTbU#J^kXZasi{OxX~FP@?J=o!w!!>sWe;Vt{FF3bCPd|__& zoCDjY<<<(8}k4@Ua?bAnUEld zFO^0u|L21x8%7kdy+EH>3MV}wAJ&?E%{U0wz%Rh{l-0KETO1zP= zTDm?A*Cyefbr{ojI&WpQ_eG%HRGNbXZwbcrW?fwul7m4k|7VdH*E#Sk$gIWTgmlau;}O6=X%v z_p-|K-=IC>_r-Z#3VX&uX6LGW9_#1xGU9R0>j_22Z<2h(@6|*{^h?k8Re0{A2)2Jj zhw}rXK>L$Jm`^4ipA%hp61rhA+N*-B-8|P(*f+*5+j0KpSi77u*H-8OZ+{gXT^a)( zdLMFkn-E({{GB4$

~Yz~|MFp$aj!v;w?d0bWl5ug}Yi^z$U{qp=cimZvk{BFqc% zeoyfJG4TGEVhrbfk6@LTI-K&$;Qb8n{=49P$VSngvvpZ{D({D*?;8TVA65D?WU&JL z->Wno_fET1w5kstg>8X4O5dEGsfEyfD@h zDHk|n?f0vC{JYNyzh8kmtRS=jD0M!cCC3MYwMhWNnd7o=-xa^C8cNAH60EF%9h5S9mApLqei_9D9T`dzL9C%3tGmD0_AqHOu}5d$w1#XWxrmi|Q<0y*ZHQ zz0jKv;n~MJHMD1=M5p{jSHC@5D6m?(ZnS3$!&vRVV9zj*n1{rG9DkcVOd%f9`1=Cv z*xe-GkcI46RxKZ^%d(OTsK2A}dolRGT#QsPVAPTd;7*$m859E&zfS>=l>4R9fw6I&?!2DlKiMz*mBWa%hm>6mI8_X;9x zr{wcM<_H(AhAbTuql>D(smlrj?>(u?QbZ6u*hOIS2xc$r1w8n@U@u(^8A^c+?S`$9 zxC}w>K!!epE!1Ray(&ZRLWYu|M~fgs8C-^@)!aCBT1Xjsk;_o;nx0d8*T_0sD8d#- z<%Jc#v1GjFLN$FDn!de435Q zfF1l3>@MNJO32M2v8%UOwS(0D{jh`kgl@96b3^(0sn|{aVSpT(bJS*&380=a&>{|rvoDJ)|NUZZL1xHs4%6C}neG7fy6Iqe1 zCT&G|H1j2b_x>eX3ywoJfaecn;X`Y@!y@zTxq)R3#QKix9@!upVL<5bgTN08j^Bx0 zioV15c-Xo<2`uYOZmLoPc{&F>F(i&vPlH|AB}%?X*tiR4Qt|vQMZIC8Gr)rxLf0kP zu+P0Zz7M>24zHa@K%v<4Xh99If{ipG4{(Z~)@lJ)4PR}{ebIPQeq4)cRc#|vpZ2+@|a<5@t_zk@34#Vdn&Hl4qQd|82M>Ca23heLL1?# zC@-z64?^!Qo*Rcbh?HjogFkNRCQtnycn0%u2J4!tQ^3KNu9Z0$vo?@K}dJx;qk9lC4`Xn(6pf3n?#7f#VUM6!Y*LY%i4^al@d z`n%9pKhQr1dY6Iz568sH$68taq^P1Ao`j~a1G zj~aR&(f^3(EIJB(I28Y12>L&O^};#>{U?c5d7)j9pHS()5cH>ejPzd!`V&q_1O4+@ zT!8+>t2W?S!p?_DepFnWA_xIYTaRb&ft-(kyx$GE9}f8+20S|ycs3n)*23{@0%VJ1 zI1248nwtRmwaG8zc=#bnnH^)3M*y#8L%uE$F2WeIMS;)nkQiWC;NQ`}O25P$oq=rE zK(@|5l&X+SESVcwI2-HuKVeNc3+oEf1MX;+MZD5TRw1W{A)iOUAGB6Higx_6+AXN8 z+MqMY{w1lhO1jevzAS}Zn~vvBkF(2tG2a@Ggrh%8Ve2&h$QB~K4C^Q2i=RU0=40(a zd{Kb)2Jr>y(R#i}@FqjYw3IIk@i^u?9JQ0L17PgKr_7h*!38HtrDwmuu zKo^>W?>iME;fG?uq=a0vH(|nQ6UNkozb7YhMgn5bs9<^bvlQ2)`G1nf*a+2tm zW&NDlNS-lYWnw%qf9N@*b-t!wZJ_g1JZma=p_0#2lpKXVjDr03#fRo61#1Bh=)M&4 znN9i7;+|qiq zJNyHT^rj#L=I0Xqt$b}~`dfAW)_X4A-&+2k?6iidO@Hep`dfR|`&$l@^2h0pa#_*yT@--`YR^S6?(m1LXzu~&`16+C|_{#HHToBq}|^tT#hU_NY2`}V<#-r30???haM;+@)f z^!7Iz|D)WsjfiwE}tl$kMIe0MsH#j01#X8sL zXV*{;<+}W&nqvlE{dy+V@@t5{QC{78gqJDSNx5}0`t%86of@4kAV*7!O?xqC8A5a* zJ}sZF%Nm533e6kERQ&Va%^xF;>n1NoOod{hHHeS)MNFjv|6eUe@_a9C9vj9;yCDwN z!a2m;QM@`Ab4M|-Xyh|)L=4QH*R60A;Yj>ylvUKw@(0)Q(tej?Na(WZ!SF%TQ=1-eave3w%W5oG)fav^wb z5M*J=&Fh5${R4?-$$oq)Au7z(R2dNkMryRNS z_mceU;;4Rpib9+&60y{%Kpa)$9nPa#&O^YiYy3lTRMG+Uaa2tQP#zxrmtv_=h^2aO zPgh9S&cC5Y#a#N6GCPZi>tB=;W79qHf<%pG#k5o1m8mO<_* z#`;REO)f*8RSf2gVymacC_ZO2*RC{*v3B8dUl8Y%`#EFeo*hE#=^li~oXkz_)0|Z! z2doTn;=|DWJs!(@_C<$99%`7gAF6Y9Bj${*>F4Z+>YUw(IqQ!3g3a0Qw>K;x_jTK7 zFN+bl4tZ&hL9b6kTzs{Y?q|M@$cH_N+{3Cq zHs#bv=qD%3dejnK`um=&;9lg#o<+XXx8O&Lv;PD3{UCB^&UJ{ESAriM4wjV!`!o{y zSW&7T>5gZQ$YX`upxZ{|35Ao8BjC@aidS=dyA=Hyd4v*p@bAd8)zd8svNIC@k3^0N z~E6Sg>s@B7i8f6Q?eZbA-E6^?y@KfV&3rKgb7 zA3IlCG4nxISQf`h47u z3-6^BU~mcu(c$?cnDxm+P(p*S@Fn^LFqvjdsFvxmy^K*hmj+41h|oSfUs*C*(27K^T5##OrD6mtcgwV0NJAn;DI{G zDe-`a_K$(@zJxtmXW#)nmRWD$0X>!>JG37CSO^}Vd>-1zLp*@KV?V;rpu>I&T{adv zEfczJ3^2-QV3bjA=sb>5DxhB~Vldy(FNZfUr2@IRDY0>-6r`UoWw^zPFIMJ z&b=~BA$(E=`vbWR%+*PZfppS&*w}X|Z|SqTEC=Q%3He^MKd1t>FlA1B;nTCa5wQO{V$#Q6y)|% zj&cQb9&ghOjA;jJ4re&^7hC=&f~W;-*<8Gr9R-3bE&J z(H>eSsd|cVcNut;?M>#`y9_)^IEQ$&9(N<36nxR68QeVqx%ISGnuuKbH0YOfZBATz7(3Y5i6wSUA5@UMSH=n+xhY=eJjWr<{atB3|nS z{Y85+lc2xy!5=?@{+bOQnF1c!1^hvK5j7rp89cI!_Re6AIiG-kh)-g9s)SOw`;VEv=ucfT)&@C@;df6vQ0=(aDR6UinrRS(VrpH%fEUP~w}$Fon~ zbT{x>0v|&~bV6xw@QD|D2&jJ^M`xMF_B8k;7x-*7bQ#IxXz&T~FX=MEY0rR9UfFXi z$7;`jPa5O21kNY@T7c6mDo$Ii>N3uUz-olktOnlDnD zTkM;`|JSP+X&o@qF<^eZUvIsNk=8+nU2)$G=CD4V(yVW$ehy(vNylFmz8S-O&BuJT z$~S}m>+#Gw!+h!SjOm*(_-8d8&@$f)=bx+6H)G%-!oodT#xQ3+RGZ&o-wgh*xBcXs(a)LQ_Fo3yOkl0@Z|R#+^~go|W&(7( zZhSKazJ3vW{bEaeGx*U^S&7a50E{Y z&=TJa{-?(>6;1AM_Vslb-o!LkCJbO_Q$l^H)G&e z^382Yd;t`9A)0*|o;D362M!p$6Z|Lz^OMNpJCcgt;vjCs7 z1-@rdnSyu?k7uEd4Apasnqw_Q-GqW%)Qxx=vC{%OD~LfnY&!N%I;_&OdvTuj*}8tt zmb3e|4JN!i)@7CZAqR%`81G~j_}ufp-oc()wUY_>F7fZ{U3@=w^_~ta>uy(=e8wIn zpS4HJ-=dD$3B-xFJZ>xOd5`97=1W^Huen_JoKM! zxF>He^N-yg?mmF$@8}lc-pSY<I~)+5UddW34zIH5~OkFZwi4|hkS{&2LJ{_yJbM{m&W^`A+eE%&8*o(E6Q zi%NCB@L;OvF?^qtkm{b;In6!WSv_%P&*w7*&quEO!6m&| z?be%F?RMys9ndHHp-(=9OnsbV^=umpU6KX;0-drCI_2P`aL*S5^9O&H6QhuBX^mbf zMI1rXE3cz}E7kr!qU-Co#(qNIeLT*uXG&~g?(Om6?)@Vo+{Fxi>lo$UDMh=B^TR#G zx8)E10(?Mv<*PG(y;2DtTm!xG`p-%>J*PhFztm@)Q=fAni+K*-ADj*=p zd`$7>n_%I;ZY&rf75^8%DG^0G(3?89dt2;y#Z1Pe`Ks z*XYA>)XBRa$9ds?owEcwClWD2P3MrFfov6IfX_+)kk0WMbPnkP7y6SNCVA2zpP6^1 zD#bSDDY4}b{$QY}XgY`V4e1@D&RM4F9EwwGI)`*es@9gOd#cb+dKOdl&PdQ?sj6q_ zUeZyTj!9K@Omqu%4CNgK(zHStW1a#=aH?2^^a{r-n1`_jYHT{08*qO5l?_5&L}u&!q4D z={ufH-v{7(7WDHA$a(l9;>W9yPgUrQ;JW#@z$&l5_QzEpzqV@Cx33ijFw1$w-p^r< zx}z^oCQ6?9&NTOO=$3~wB+n%9?<{&#Rof<3*P$Uv{Z%qQ}~;7h59o0N9oE-;pxgg#(c&3`Gbn{=ixJd z(4k|Z@_8{ZOa2r1{p6rSgP3ot-R9ojG19&7@hIicBuvwlDA+I}!T094`MBolf0Xq}X z|4Hb8$@maH*^keTZp=eig5+g6+U|pzC>j=d?!Huo;}GyCjp+{XBaMmjXBv&C8}n&8 zrz7rbbp0byIcON~S?CMnn0GL-U1Klxi6UDcu;T-7@$_t1@-`=M-?96w_E}SRqQmyKh zja;u-Nk!O>pK;Q5jo4RcR^p1e@7F-u`pB49qLGOgu>zxRu{-vhPy$j)rXn z%7<04O*M26jpYMX&wRk1S#?y^GslGm^?D|bWxWF0#6V|kfSw`!6an4wI-W5Lx}_8J z&+B*=eeaC#_d?J77JBA)&@+!Br|$Qt@k$u*?5itRo!R{SstwRLA4A{J+Q^S>1im)9 z8GTs+S(yiHKEFend%=Ws<+u3#Kw7H%!NfGIkJ6QI#z~&)5ozx4W~M7|PLMorM->nL z`%i#x$R5M4mn1{q+z4F*jI$3I=U|Rs2W^KA+5sK36FO)=FwTd-IG^2|KX_Y?UHNQ^ z13JK|{A-XObGmZ}(sbds^}`hIJD=6!|jE@tSf z-Rgc%3Ue1T^wDK=@8}i@tP|xXokaRak8S9Av}V$tq0!}=aV4AHoKUhU)c6V05S}4A zkgVraNANc2|FquvD~=kLp_>W( zF4^~fz%LwoK!*^f*gwz$9nx|fa}xPWgkwk#Y3*fFTP%Mt>7$da)I%EP(CeY`svhdo zLOnz>OPED#D;U<rRLyruDJ{bzVk`Dcn2ECK2K;Lk^Q}fXO z;GBSK7ThD^UMqA?SU~56)q!Vb*CT2b@Q%)pwKuny(p$3hpA4!IXthsN@e zs(*H{m8(7iANYMUD+Bt+0h~kFO2p=sUo(jK?0qDqn05j&1g<*k(t!aL+#CbL-;1c6(v*tC-=Jm%aDB{yhF7y$;U{xU+;@C zVjJSy=CDj((3N~DrQi{g#{d?>dTJZ|H{@H%OqCj88^S%6=r`%H92MV015YeN8_8Ig zksh<+zmXcIfgaMsyE*Y(28OjHwK4^X$(*2|d3DbOy{`z&x zFzAb+&>QK{A8F7dsK5Hq4(^+&OQ&^|h;vq43&TC(xHkg2#TL*lhp{F)8i5-8s69@8 z-*|)HSC45Ns^51SbW3G4^F=|ol=ZPFq<{E2O2s$3RsFJ?Em@VR;+x^ZGXee57x*a? z`sK7EUOtR@KZ-p6Q`pyMTwC1%U9$!HWeaf5o$v|&75e2MzV~-_=K3Y>wH2%G*t~Mp zBb%RFb?((aaC`#=hdO_bqAIM-PawDQE95$Qk;A#8Oe{E0`y*7n69c_d30yqO5(S@a z6m$>rF z18n!J8ENjfaINw|Nx3is^YK`kaw;udDOq}(av;B8&`0^t4*EEM#h{Y>jx}3Zr<(2g zooaTl&NT<~|1jv2{O1R4%a5;lk9DotncuZ$A4{m&pPx|kA-kdG)BM6goxuazGxg64 z_N-z&s~FEJre`g>!@d25JKe>2Mln5Ox)Obf&a9{mIk|nS{XU^S@c)v2E{1-a^)t{2 zelxAV&reA8XxOL3W`(aQ%w36f&@9-QPgJ~A_EkjSf7-o0G3HsoNTloh{vAKfX$?mh zNuvW{B%-|$BhmkQLZ%3-ZUt7|4y?KZSe0-f>8wm|JytqZS$79~&$aJafn9CDw07WE z7w~IGR;%HrF`&g^tkqL(^}gm{dJej+k)9g~evi_v<3>aORf5K(-}JtoT-Dc;q4O_0 zfLF3r{dU`6Nulqw?&%%|AAunGa^@8en&FUq`v$>>Wz8Q<`p$TsFcijvHT(QQdtnC% zYaPXSs1M|mrZz{S&71rB+xKuCSUmV2H^Uc?HVISGcWOt|fvxoy(O5MsMY=4Q4y5N2 zp4tt$BRzQzee~F&S&TJpSw_1(IJbhmGsXS)-=+JpC7J)f#E zs2%WKe>NrDL;j*K5Dz0?@ys1~2K1oTKDAp6|4p#Cme!^|u0?H9T#NgN5ZBs)Hfh{= z4-1cL{o@Skt3%h6LDvKuPpH1n0=A<5lkDdprltK?yPvStGCb=yTP4rK=)Z=sv~h6X z5%`nFb31sG^i3R&6vHAst;Mh??zLKrVZrxLu|ygt;ct5W8Tg6jA(oYoSk}S>R{JF4 zSx+XiTJjSe7dsY_pGd<}3ilC37Lt!ykEL=|EJZ%&P*@6>V_yRw5%dLN8Z9QJ*E_*{ z%x;W>a25GhdgEinRir~sqpy1ZkQ?+N{Xnw5gzFH(QS&yDPD;k}E6^t%+p@xM2u#yB zw$&p45FZ!!KV$!kF`lL2sGEVKiqHoQN74IQzCnNKxo2=By%H?8)f)Wd#k zT}J+)TcIm9V=d+aCc4e(lGg$&b;tScPCb^~)Lf zhdLsDe&^;ttoj}NL|nhL5I@nnEraWwrtwo1)_a=XA^bG+C#lNZ-=rzC;aj5fOQ4I$ z|3NXE1@OT>H9Xx-KJIl1Y3|J#>F#f#lg>jYRX*s)Py62Kr~K>m|53guza3a=C2-B1 z?rkgkyLWCM;QlE81>mYb4%+_0kKFrq{n&jl|4+bE|98-*`TsqL)?=7xV5j=$XwL}N z_Mw_P6n)#pXuDX~b}`y6M%%?`n`p87*bwC%@REj?p2PR1@RCQxOPhd~qCf)%U(+lV zFKKZ`jUE~f`h;SMh|z897#_e)JBLQNNslF;YK)smkIh0q9=T8Q%=%d>kE?6(F|BRV zWox-E^T)@C=9)gE_}B+%gW_YP1A&XO>gM?4W7G%oW76|^d`w`TV_gv+1KvMjQm7SZM02i&q(SoQ9J;7-tG};CaxS^jYof zi^ECLL44ay<^Ld-w$IG}G4p>&*IeuQKSrE%rSpFR7zw_9!cvVfQj7de93v49)9_Cy zzf%?TJLwtnpV7MPB#wlUwD?&tj1+3UW#<2w`9G9@W9I*u`9Gvn>{nO*&t^0K$ISnk zhqb_UkpHu}fp6#f%KzbGYYBECKRD?bGyliT|Dn7kGykVPre)^;Aa_WYhj^Xl|719LHveb2ng3(v|6C{eKLH#? z``RgAu=vX7|M>T|Ut0bTAD5Z`W9I*u`9EgFl+%L_VDIMyr-V8% z4tY{%$@3uc{?@=wy@$~L-UK1bYS%ZC5Lo8PBH0%{Du}Rq5gM9_eB|nfL zc_ydP_ZXS>%u~)AwMF@Fzr)^xwH=w~Rn)n8G2Y^NZJg+NshidFI`)FClfpf3FwPaalZnmaq>2`5jsv4BA7w9=Ub>EP1c7ZzA|} zXrBXr4)%~P!1MTX?1G2d-;F%z5}coa^G9)9h$C`AJuiW7&tpvEQJ;eLUCGEZq?)rf z>_3=>IgG}4dQL&;-`wZy6ly@j0$E_57 z7>_>Sw}-ch^9<){A1OU+J!q5GQRMIKT5IvwzsW_PsOD-U@?Ur3{}TSsc~Zi=LohDX z`q@7OeeVm}uVyxHD%xDb@&_m3xs~X1>Z9d@*5Y@4ZcB-O&r}O*PvrKfmzhfJp_&G{ z8L5+-(K@+dt{I7;WTtPjUuH^OEtDDDM=~=HvbIx_+`GYxznUs3!@8#8dyM>_4@=6= zf0e4F$ELYUz^^~)B`E`PQkC1srYVPua}q6Ea}tx2CnR>arMrnQPsGnmthP-~?AJRd z(G~S@q6@!I*k&fW(#9u}kII!WIWabBCcl2d_HbfE{A7OZH163TG56b31m$ltEy@P8 zy$_$VG)08yW_fm-*l}9&aJh@xJeF|d|j`PXkLbOl{|N+rg~^zetM7O zx%a2^-68+{SCZ$KW{`8onU=LOs)04T2CJD;_Ot&cSJSsMzYfsQ{5@@v$G^5(9`gtOH4g>9Ij=E!x z^zwlMZQ0NAE$NJ?ZEY*iQ$;Iu{tzmxq{AKsSt|Haua_=e ze(3#dpex=>m_1_@-C3y|`?a<0FUqns zpy8$4DA<1V`%-jb&X?*M88(!#U0d5w-oGQEb!-%LEMd5qHpX!FJ5v?HaqWZQ+F>4H z4@S2V!*%C2!6lbre;n!m!ptz&pqo%<@`ydpbCV?+?{ClZ90wi^U^lxT zyRB2PTN3mEVYlv4sK4AH75|Bm$!~T-qVG<*dPJV*%vg!H_070E&$%4 zz@PT&2>ABKhI!ya@W6+_eS6gJ;qXUMzw_bK>x91l2ind7R-l|&>4b?%e+_v@U?>Km&?FD&RpP! zo8iZLA9It8xv2=yS}3r=HPt6OlacrwwTqP8B$ib! zibWGuevd+ZO!8kxhuP#@U@`lbj~#GS%;tV(j+(WY0aaO@@0G~zB}aelTiO>da82NmFDgRzVG;RN$LFmr797l8~Wme z$hPstz4K_>Qz383kJ4eCqVrzT9%Z5?b zHpX6I?u*iK$Nf5vd={W_o8v9sr=#`rr_D`A%mMj+mP2<}qyFS_2OF>wb3m~T`b|1$ zBl*vuM|;C>)~C|1LrLbF)SujU1|JN~AAD!GLQhuRB`Js7!FT3CJ2#O3gf1;A)>=ZG=HOm{uPD#Efb_&^T24C6bZ$6#tIagMX8Crv{x~lrI zOvP6eEA6iKbvycL>>u@udyWtzt+S6oWEo4H&UZt0a zy&Nj`BHToIRrG9%jgl--Y&0N~I^0FEQ4M#IFO2S`{!?8f!d|JsPWbJi^V2Q^_S$_3 z*sD+Xw!vOiI$zkuV6Pvc&f*}{SsaKuLH_kZs{3qKn)2=a>B{YpnTha)O&*i3kj>Zo z!rBUZ{m*m(@vnw)(6=VFO&s)%tZ-nZwvU5u7}fT1P?9-{gK9F@Uf656hP@E;+LtK;Yqqrtvw%&bH+a<19bhDr|Woxch z>=rz(*)(=*5Z7#Z-rHs~@9nzFd)v%_vzp4OxD0u3n=di%Z8Q1l%)B=wlPe zZ+>4G$*`ICc4@Uf%)B>$K75mFUc+7mGwv{aU=Dm%?9vRvfnt}IDN6!f>9Elod zcI?F%jk+LK$|F(hxjDdVPqVPnXYd}`Oh=47GuA2}5?J-AlI5$Oww`};IsQZMo*nCO z$pzRC*bm=->WGyWsPCRlcZhNl_JYdTYj6VlMavL77A(bsd*Cw>{w8Yc^^!%aPC4S_ z*Visu<#NW!ha%albgmzLclf_gw8zQC$gAIqy$n=G4)y5vYc=GiqwSB7W4Zu){~pHk zZcAeYhr-ya`QO!L^+O#sYHta?PsFu}#jJq(Q-t3;@t^Kihm!h0b0VYd47?|Pq{}K(UvlwmYBQrmUKNdgjlw)h zcov;ohUa9UA83CNwXgZq>3v%a_npo3*EQ4UJ?5(OPjgSt3pM|J)%hpdK7_enj(*Wx zAHs8q2W|lm%miIspgGZ==&h%7X%q7vSF()Hd)(S(fqAEM+Pu^E=H~q%=1|XrH1EZj zTRkt*{7=IF0qkc`4_$&^+J(54M&EOdVkA0I~t54*2)d0ioX4+DFZW4~7Z;g0=PD zd0_>5Y~VqCi4_KHkZ1ARh1;WBj}6WXlJ97kRNKWO`7Gg5ZGRlQ|6?&WmEN(z$;0-Rb{6(cV%1Z8$Io_7Q%{n2+kaZL_l4Pi)LbKE$nd zR(sIJd{p19q$8_kI7R~Vc#%-LKtC_kE0afERaTwS+$idz=wF7S#bejJi;l z^*7j&Sx#1fK2+OacmK$k>+`9z2Hq8&ASa7S2dN*y+GNMrLzVb%qw9F*IQ5I(H6FP3 zxS)JUzaMAH9vt;}Coi16T84d&WPgb!mH4i=!%?U|@htu`p7yDu&(W$~r@3CN+PkH| z6-$A42gE#W`AT#Y<>6V=Vb=*)ji*&9u zzpr~UTVTFw(A|&2(v@$2nWmi2PE{^U-p*^yhLT0EDvQB%Gnd7JhO+3!USyI*BTm!g zaTesUB{;1mGT0HAhGeiKFpVaIbWW2&`mV`fbQALA0F6oJrXz;iQaNo(2LBgyZcYYs zFrQdQa9tqLcU2c8(|6JZ;Hwj8hx#)f=ZM!xZ=}+9x)*Xu^%Lp(1f1JS*CCgsbPjw< zYYIIcCArLpEb8$n$!BkzGvd)GoLh?juT^Dqq$;B%pHaZ0v^F8R><#|C2Xd*&>vYH@ z$z96YrCdg7t1;>Xz=1K&{N%^r|3Qh)>1Tf)F7{phRjkMUxWUwmDOhL=Gum`YT8ZHZkl#e zs(lwSc$w|yzP+zDltt5S)@7M?)3lrIX*UUj=SH%uA%w+oq`D^xVH$jL@v)!Jdv+P# zn_#ip#`MHz25QKi60Jq2QUB&uwHBcbb#+{*`*%u+m6HU)TZUQ>RP#v$w(p5G%LDif z7sBM>_HcQaJxsn^5ap-r5%5Ecavlr!zIfZ+7hVF+6!GjY;fD^li1PNO(x@l@Ibp_z ze@>jS+zgFtkT) zicBnBe|pY=^;koMTf*e+EA!k7ZS+oq@reru;{6Bc+p+wln)jCL&i8sC@fc&ZlqWb5 zv`NJ?$RA1ggX-ReqFZm3Za24@Zj94y$Q7fT(1>n5+lg-E&nJG9aU_0QjCm;nFO9@; zI*wH$EBXqxl}?F~MO&BUxzjLL6ERn511BaPnU_?vZL#iLFPy^~@DyrG>;>-j056Wm zToF&E;CHTI=R7$czHa^8HQxuJjr+j7#y(vAayNO*a>(2|$X>+3V{I#UkT1R5aaqJX zuNye&I*_~8(5;nncL{WBp65fz9m$v~cU%rJPZI`CxV&k2Vcs>c z*<^#-ZY)AzEPXF>EP?sJSo*;4LSyn`O!OX>D0~Q~jLKmJZp_VJK!;ZZi+rj(E1+C6 zdQTSSYX4G!@QG8-20me^aX9ve(S>8}F4^Lg+(SXj2UILD5#Q6u)_~uvQOpe=ML{;^ zV>jmGv>1bWvv%MNTao4IJok-%(BTE(2Og&G&t zKf{>FK2WS&8!ycvjknh&jTgMs>hTI!Y`j-iHXG{gi;~S=let`S`24wF0miq%-U?U)jOd<|NVSUp3jO{3f3oH0EY^Z@3+e@bGt^fn zGGCe4sYrmHx8Rff17(cxgO|q~72}F-#9EBjc;Wa&;KPI=2WMG2%PSFI6krFbe*Bwg z^HF^Mh<5K1tp&ZP4sqn&yv>{+Y#fvNgQ78q#m+@N&{hw$E$Z6#x7S5JXo=@-c1N4U z@3pfV?{lo^ndbKPNp*ux1++f=_LqMLZ3JZ+Y?BS2l=8XHreGbsm|}M)^PZi8?~73D zJ_)sIy?9q3>eC*Wjx}mu)TzZ9rhwMj^iI#2cyFN#wRj6qzdj7_0~iiWK(V%dpb_F~ z)eh9I;QJ~$tz(PMVjX+}bS|?>o+I{D&o_9_%sRaH^3RE>o-J6TZyqjrUK`{8{>CpP z&tHF?>S5>0W^~6VCw!1c!f`zRKG>6w<68dxC!S&)5A*LsJnRDQ$0sM8c_bXi^Y4Nu zAIG))yTwzC<6(SGARdeP*q;+FdS+qmy&7{vafLJJ*I5=Le}nhp97erp7`=}7vT*sg zmT>twd}s`*s7qUBE!i}|S+Z$i=aNl-PAu7^^))#R&qjZ!ugPI}HjdQSG znCM9nEdF<(;vER%QTx9Pd2Qp7-!>ngm+;ZrgN+Qd$IzZl5DGTH7O%lO0EoU`@M&+% z(MZhAQjEz#?`FUpRid8~YVW5xQazc#7sonDp5s5Lr)hrJPXg`chYbnzGat{>`k9aC zW#D-fCtWHyWF99478r?lF_9l*B=Vf-Is6@);Dc1b)?W$(T^yn5k%IP080$}YXyfsz zALqPHYv^4b7}H3|+|3x%l6=HgF&^Z;6-eNlH1Lgfzl1(Zpe4~+LZ8zx_6*&-4VFSC z)-v=5^Q8TU=4Ku}8?>f#OvoQha&tB_6*^<9|2>`yBKFrj6pvVISLl)ioWB9`(ha)g z2MXby=rJs7ro{F?2ids^@qCh#!_P-3-^^mZBMSuOSTyrR_hVUc=zlNVQ$B$C{($%6 zF0xwXNZ|D>7EyWxSbFjIsVkm`Tou^_IRlswId}yX8?DMzmmv4V?-wv`8{Rii{t)vm zg1nXI;)r=IpUix3e?8~G=lI-@|DX6Vp8G|la_&Hw;yWrTHC~I-0dtd&F$(zfz-IyI zyv@cuB~0)Xq%!5!flP7S%#;l=2~xPkD68TQ;XfE}zJJHfLwKJe&Evc&Ebm zJBKPZXD9jeOHoQ6^yj@hn6kB-pnQS%8}3Ej&up=qHzxsgw+302^5-SrC&^*TXQ*Gg z@5TtF{3Xe^2>P{r1C9|mZp6`o<109ZnF?`yir= z!j}E|e9G^W?sW^<=P$tDlkrUU`O24=FJ;c072Pb6@-#v6Rc>Iu%88P1oDeCW>N8Tw z!0{|T6_X|3R2+N!-?_>y>+_VaZtA2|NB7DhiqJSED7xb(4gen|5zLax=bs)2SR;9ix2r zg-iMBlUUHy?#+R|eJWB?ew>VVMr29KvVjt(bw1w1veU+tFOmi2Al}8aXP~I8#{ClB zMGgoX61L$TJpXoqaEt&jW~iHyL#bBeZ3h+>g#Pd zQeW@Dk@`9iN9yY!9F6og($`2&9sO?8Xmyd#?@#~7^LNqzK@Z-4JfHU;^x*x+^LhV4 z58i(~pZ6d1;Qhy3ael%0Ilo~1oIfyr&L0>*=Ld|R^8?1u=O5#z`6oFgc_mrZC)16!cgm^w=ApM)W6N$dvJHf8|Z16_VK$e4hq?Z3d22lOq}HC@6bwwkX@n@V;sA<~r(6oaB2M*n;$D<%U!xvI86NIr`$oZ(E0Gxd^&l zf$oj%fIeBGbPbx*GnNQeZOD`6u6Gz9qG(2R7NF<@_h4w;a&5nXn19uyl-$n)v~Cgd9#gVLkBy6-7z8|vJ zHhj+juVjN)WUP5cfk!riN8I2M!ck7NTMYWGW-;D1@V{)c3CiJNG`_eZYI};)DW`xJ za={nco`cc2&jWjS2K*4Sx38QGem@2bGtZuSzMlj6m#{Z8Fz0T}`CiO(6brvNEzr?GF0so?xFK#IR6FC)BPnw{r7#s?-P`d zaV(YuML@qPw~@Z@!x8l9-v_iGg7zs7kb(BaEPwF-WA9zSqb%>d|9fT+dvHJrG3`}kZ`CQ7_bEGzFp)01w7PDBz@igYj>S3 zT?1Ga8=z9%ZeQ!aBpf6m1c;(!5X}4eJ~PiS#H4ELx@t4mbzi^ddFHv#-{0ZB@85kt zKKj&2z3b@r*f{CGVbgsV+wMEqc(-BeeLH;i?pwp{E7PnoQntx%UdLXCxzHJ+*nOM9 zXewi%mw6^U~h)V`NU}?mBUU${oR7^r24x? zRP|=f`3Um`b{{aN|II<>4D7xbgI$q&c7lb6P1szk@GTpfzHLh({l9=vcxu?pf+QR) z*tF`2RXL_2$E=8|T{R#6H;f;4(pEpvksY<97q4z`6Sn`MSUijA*y$h>Lx^3XkJljsYQLxFcADy!b?~(tnH+>}-9*UceTV8IIRT%~r(d2z4-9pi?58H0t@>Yeg||iu z^Q-6|$vOu*ROw59%`bZ;BN3AimTbsI=T4&zT9X`wjdvG8aa;r^!9lpA zPif5~7nj!5?jAn#;Of@KaWkLf3CW(x1q%VHD5}#!J6=Lj{w9Tb%^Etjxb9**f3*pB7fI0o0}r+D>h{jW)IWS+@O9?7anq zJ0F-ob0T5E1M@Qr39H_^b8#X5DTQy`flRyIl49A957g zHnjh6@HKRZi%^?4Y{+ub(>(r){<%Yf{uOh(<9#vOWdVKK(2B1px^)b?^-t{=rIU-i z=^wYz53;tIo#XoV`c3IOB+qJdhvc|(tns1bW(#x8CMA8busJ_2JJH1n z*;xxNO-ld8xxAAL9r{3Uddgcv7cYPg-#u*R@CPl=zSyMn zCJbIYA3HAhtsL}H4%f2dR#-M%0qr9jZiZ|)OLylCnIE^`mM68HY@EZ4mu&k@wy)lo zeUnSyZ#L!^e9t{}d;D1;&z_1uTM&P?fM-v~pWP9Ewu)!Z#GhIATHKzKt)D1Q*{Tfv z*ReeFcrM#wTT(gIF2acP^=8VrUu_Jv?b>?Mokv?dMc70;+)l9ajkm)Xbn-mPwwbVr zcCg!GKL3w|k3M3@f}2Iyd(RW9vG(+OE>od$~z%5zJ`& z=e(SpA+lR!?{5Pqo6^mTvccq|mYu_Q&`Hl~E2!t>(hYe_Qz{n)O+|<`N7-ib6$~!V zXC0XD#^=Vy5&76u^WgC{V8UZObwzIc1so>g#zvDJwV5(ZldVUd%V|&JxiHTO^oR0v zKH=YQ-TC|rNZZPvbd&Qp%rzM|b7~Ir3%eZZGufOb)x4NJ zm_62D+^)N9VU;^X9i|h!##R((lplQWi9Bl&+^ zT0==oeBgpEplpTcTxW-L8zeuO50IUgoSxM__#@1vowl)8+)Wt+*wgtASMzExT*S5Z zG?X*{?WNx)({EFjIx9!fcMjTeI{jC|^`p=e%-7#;wP!BvnY;KY+Ef1LHS#^APlcJe z^y%``?Q{Nr<{dkhBbPi>j)u0b%TYz#j;|No7^m0eZv2o6l5B0F=@SXVcMqMJvwV5<67<$H z;f>QT<=JFH-tJ3h26-OXJz{1R&p++^%)MJYpY9L7v-Vt}m9J#i&-(8#kp4YAjCZ^y=~jzOWOQNid4XaL>R6(Z@+bP5fpz#_NhEd-Ea2bdy`b=58j=?dN*r}u7MrO+l%t{sXojaw90(!a?TW?+!v%xJA_w$ zyX{VwC-ifu)dqhgEv0jWyp_%|>VJkb_tRIS@X<-ur6J~@w0EL~@}Af+DO}^p3LU2o z$CqYS)_5{QkDe)g{RHjSiqBEKt3T!LU+trhR$x<3Wxs%YmIAEHp1_Ym`rA$LI|s0L zpm+1Htnp&+&KYjO*EuGzfb&sqbESuNGXM5VZ!mHs z$5h-$nY6A|YP`)Wk@qg%?LoX=q>(~;eM$QW`KTRLmm{pBAAINU(~&dNkT+9VGoBJ|8aBBEIb`Xfc2^4W$b&rc^1qMw(s@6Fc*vrM zv+zZZ$)nTAqj!-rV>a>t{`;WV8TNpQ zCnYav^Xrhq|LXd7+aI9vci89})I5?oE>D4JZOS)M4>BV2A6Gw7vQo;pccT3kcmsR7l;;WOpwhMxl1_cf0Gp}o+P z@IyWOXtLp@YIJ`Vevmz#zR+@~yKWf#RE)i!;_e$t?H>&MT}m+G^!5&=4m5#p3^oDz ztoqFuh4Fj>%Y)GX za(s<*aOgY8M#=H1=$zZ6$H+I8>pXZbihhWKd3;}y=Y2ZJ^MS~dzKQ&DVzT^#dNfh? zgf1hEDEg|I#tsCXCKMY7@)vO;n~c1s&FLVFY* zq782&9bsf1<%%HBCNnOwY!a)DWRs-8w*wOV|325*@U4&WOBFJvf10T{{r0~I*-uPTslPdYcq8`N}Eh&jKmnRHDi-uzhY|jI!BMtPYJ&!944Hk z{jg)I1Lr0_md|_Z?!@-Tj#K=}%$;ZBFZFyOa}_#vtub#DN%(l_} z*RPp1cztM=6Mz3Jcz!j{Q}Fx0j_23&{I09FjlPNJ-{Se=>$i=*kL$nXx(E9LZsVQ5 z<{jhwLG)p+OLaZ;2hsKZwX^<_p!|LVeV6dKf8DH~5tRRLp{EJT|HuAEXGI9}?k$VH z3H^YevOMYk;jCW}{y=Ca`2FRxt{|vvH$m?s^!Bfwbu~d{{1)_Y2`XcM|C(9X5mc7j zpbzVsG7k2KW?fHE+5Z|^O6V@@m#1CPYpo@HW7rnHyD`y?-!Bse95A+(sErd4i2SC3@_zvML_ZLI|iZGWj zS-ydci#z3uxO`98+i_dfvbV8Cg%``-=DF-w*{ZTjW!K7%m8~lKR`#sySJ|rKSJ|<$ zS*1sEH5PG}Ma|H%sKyN=og;>~V|60x7(kO0eyGdmLhqJM3{r z$Mj`u&iI)AwRNIB4zb4_wmi_dJz68Zbmb=_G#j4{a={KU=B99oS&wC+FPc+RzItc)xYZ7j1GO< zv0PR^YQH@FRmQ$`ovXB&`{mqk=Dvpe?c6)*r|sPD<^CY|E4e?&{R!@^eoMu_o$+D5 zqx78?_WG$Ty^+;FCJZJd=0lhxDPbNoQO`$$XDL$_ZJkA1XVKPKUxojqCyp@}DqS@r zxeq$2uin-7abkTDJV#$-m2t-3+FbQT@Em=SwVC@G?rXV67i86PznA+4{m*@a&IIH> zU$O<6ZMXjg?GFDK=HdJou^%rSOfN2AUbkmi)>VWr%X6z;S!Y(ej-g$$fA}h>|4NekuZ#Au-jDZNIl5oEFd%&xHtFKyEj*JBtRZaSne0H@uFF6_$*x;R zpLAtcB-;BS`h&G%^@sZ6%ktc6pCr4Yt8(kTgk1s8ST@Cj@iydMZMcPd*>1AM#^5uq zHcOUcCGt74`7Lgb$qxG>J+s;a|6t)OBN<;w<<&dW<2IHs)RmpJD&5I>Uf55xN6pvb zb`!qr9oJ6BF!!LgO17Jl+pPxuV9SCce4mT4(LSwx)E)0aePG*-pLe@h_`7u;0zU-<#UzPu@{!7+B zAJZPz`*uHduo*6}4;VXHb3buiBU>=JU)C{(l?+I#m?X zJ5l#9z_Tvvo>aCA>Q38Wcg!tk&yVIY@QeD*r}cH0uawTXJ$ttkAF|HRw>^WOnEcu0 z7bc%}H~vhGStdHb9o*n%50(R7l<#;QygEo_z*kIulY@V`@rl*V55L~zGtqJQ`u+Xs z(haZTzv*`Lt-~if{4Rc^I@9%G{KRy2t@^DV-@ccJ#?DA7D(znx^tnR~n6lV8=qk?^{=d)LKlBf0 z+h1(JckqL+jeMLq{Z#g5=7F`ZN$D$IfHJu#o64m*;-i#RXJDyp)5yC}>n`kdDrD~S zcG1+b>!&il!H;}jDS4%n*KNN3oSWsTj-G3Ov5>yfUai7OW~=?qT`Dj6({G)gt$X(F z!mxd=Zu^cD`Y3}w%EXs13tv7zzI;9L<+~{S-|hckPd`su%n%BEiU)Z+iw z1XdQ(routvcf8>SmpiO}fB%+X^k-^c>^rlutI1#(#rOhwgyRhQ17)1N}MlR_H%NPx=Bs`f++-d3i>l?3K(w zXo$F{NBKQ&LC0#(Lz@%4UmlD`Me$)iBZ|N22cqnuI4g>u>xZKFxHgOS<7`3E zQS1v4We-LxwCU-B@L}kIr>BGuLR&a{RDRXRp(knk2G`{9d((o^LiRCkdME9gUi8^) z(>IjKe|u=ZrwI>mKfu*D^fkhF2ule6lTb&vkN@xH-B#!&Q1z+O3Z6{6rqLOUcnH5@ z55X2f3BJOau3%&=;ReF}gq4Jygkt``k9SW%uYf+wyXC`8pz&sWd&fEg>#lVM)(>_C z9#2mRJTf{p@Ywhsfr{Q~fmNJCsIxTZoBrdP$om_VUovA#Z}v9gbN#&bYo0B5ojnW{ zlSc;w)2Yk4>w20Fz3xgk{5c9*4xR4r8Cs8z z`Q_ZtNbecq@R871ufG!C;Bi_Xnn@Ye_Ae09Y11{(X@t88tOZ=dT2Evz?NmeX(^hMt zQweHAwO{$QCa^B(0H@ACXuK=13i)`Fa?CgWaYvC=dE_y&r{8LyX~;dbkB|1rqn=~P zxsl~X6>mB}(&KBf* zUUjocQ*#%5JELy8C*3;3a-;D#A0M%CV5!61d=mPa!|&Z_(wmQQt#fPiAKzFVSMJF4 zPKKUrUAked-yd=#M~m5KF_Tc^@Yhv<$%CA8*hn25tA9H^W%d5)h3JhN%Y)N%I3vEt z>frR{gP)oC^x$V_jvexkGxrVt$;@{LKQ}Ya>Db3U=jmg;X1dPIzQXCOSit`3ZOp;8 zJN(TJ=({Z1;~4(N$4Og0`}TDeZSbqBx8>bkuq|)R_4I!MeN}K?FOEhJmC+Z6_{In6 zj0+HV7wyhD#F6p1cm_;(X=C|9tBohq#_6QLQEiM~bhC%Y-CDXq|2Ol$ zd(x;nH|;)(`foJ8=G%#1ZtXL6H#@m*q`cCRQ%Gac4}xu8&aVu>C;9NnTFNCIY`-&) z{RRsulgD8qF`ao6I`Bqx;cRr`EOg^ceBft<(Uq1D_&WOFd2~~`@r6dQ2V<%uqb@*O zKMFQauumu_wX|kv#`Z0>ob|muW5t#e)L}k(ZggZcFK55#A!LX2Y6u^MJanoDeM;FX z7NJ9n(YZR?S>?lCXf2^G$B;2An3@&oj+#}y zoi&dPcCn1))VI4h|J)mTnRM=gS9I3+ymM*S*zpr_cz!r8&l<@~=loBmF3Pi+wnyK; zzL0WR<)fTI&NQFS`A93URg%*vWbcQ~o5#pc?W%XnQkHL#9B9JM(HY+!Fru_Wq@7OM zi&SUo&VEZ^%=zC7kB(Oy?PKTJzKxWBI_;v4 zwsqGouhPDmj_C#Xe>LZcv!~y}VKHSA4rR|ES1WwrBK^SyFNMY~&(l6YTh`j;aSO{P zh8xS~ogkan1!hvfPAVAc0hail!^7^d_S;}fg`dZs4RFr7n|;f@Q-T{p$gpC!uTtM? zE1@lBnhc#cS9md|d!So89niCAX|J{PX5z=kDBN#YySM2)i1Hr(Fl-TvbU@%ZWxFE2G zy=WX@5%_L~Bk;{Toq@kDa0R}#FeNa{ks7$Aq(|UB=E8LjFXwvfE4Vl<@G|>27?iUwjwaVd})-OD71g5jGRn zj7Q$5#N;t&x$o0{FX3nh@rBFd$oa*VoOV}_pe>rQ$vu42T=gCWpB&`;UoY*deml&5 z!D7i}^hoCQ1pz;O-_simI0_2_YbPSB&?}FoI0J65_`_gKU)g;tUh#TTL&w>B;bV{T zW@MoD{CPdTknYhXbHI5;`lR4XGYD(1TApX){w{EOSt?kfOpix_o?RGEMJzC}%z8mRy-e z8GVz?OUduX@^z23Va0W+x)&CWPB3#J;&9Iakej%;oU4&NJg&={kJ};ZgcO zCg+wR=Spja<9~B0-)PvLkzZ5G_ZfCx?6AHQ@k*anmh9U`drI~dF#dj%zO=vPk#}s_ z2KQ1^xf)%Wce2#Fmflp`O#{>FOUXmNsqmO&!#ZSACOmRC-fW{?G{a(CIrG8Ki#$a{g_?F9}M|!S^Drd?eU*YE`i9-wFRjxbj95zWUpa zu=GG7=N8LGEchVp8rhXcu$^`iWMB5Y*aUj?aag$18Ni&UX?;6F=Vt1A7$IztbXyKk zXX|?y$h?AuE#s$x)pZxLvXDS|BS{(N@?r*GxzEl@LZ> zy}W8U^3%52yknN<1(A9AYHRR3ioCt#D_ryKiScEeKfEeQo(tE=+-s&%*O3ms7f)JK zsE@5PlfOk__$z% z_DuOEbo(~jzKq>_roNne(KI);Cuz@=y>=yPuLE@Nsq5aW_+`L=#yZ{mr@HsAeqQ^6 z|J;2h-TSAy_hrkE{EN2V{m;eqXR&{(+>1{SW7lMRq~kSvePZVo#P>mb*88XAKd3!Z zW&``02=;!dLarAOlJ`oj;{I8Jy-#W<*GCA+d!)=`q(iXxM-_6tfRMa5Y8Cg-66}3Z zJGnkWu=hl1?j(6%7vu5PZJq3qDv9rr`u@>}I~l*k_DwZ2hm_dIz1z2`Q?m>`&=Xr@aD78PlWa2(JU-tb{YTq%mZ&&qE+iI>- zZR@5zJMNQO%d_1b_D8V>kk}(NI(d)On%Ewx8Pm`2k?L$7u;ml(lS<4JT5GDdyo>db z!n9B1lkS_d{ZqD$VB5XgJ7wFuww?P7 z&*Z}_`&2gRU2mWFW6t24(AK_v{6FRQ*>Uew!ba_ccgCmn7loOTFL9woAII&qwT%_+BfuslCt2w(V+?_F5&d-kDwZ zCE9Do zEp2(hIxRMG26`u1ZrImF=l%CT)&BMFcwemjSr&gX5Adh6`2XYZXY1Rp>ho3k-_l3;jwke27yM|~*LuH0Kgs4;i@!{= zjj?A?%*L?K7_s+fY5d%kZP9|hvirrRiT3ZHProeBt@dL7RziQ-?c1R~7XFjQ)wW$A zKimuK0KT;+JD?<4huLk~nGGNvX6N|@*uT|TS*$j~r!1kvIw`N-v2~cGzv6qRZJj=& zv-Y4}K4KFkw}-7Gzo7fPKGGgZ<-MRiyd#H{|CxKdpmm<%X{{?)=Xx^{k?3x`epF#=`QP+?|k9U`iy_+oo@fGZvU=s z|E})uZhX;vrao&~_n)cny8XLi=VNRA;)~|rB|n+R3(dYC#rK{3D8BEcGruoK<@vd& z49=|58e0kTqnZ!BpEZ~A54+FD?)Lxs``F^ARig7$329pU_7vF_$Qw;k?qM_2f^r{%HHPbf2Hy z`8woZO8ZF89Q%U#e_1@1Y`?~RL~OsX-AR67>psRWEM~*Td?;Ixd-l4VO}pYBd+cQ6 zp!>ZG`NVD)3qPNO|Cfb{WE;`OM6!*Td)`Lu%t!Ek%VvyWVO@L;J<+cn)-$Q!U)1^9 ziS={a|E7!fu=@KG`hQt{mSi8w9{Grk821aa*V(miz_Ra?ZNqLquu!renANVy_MP3X zar>?%X)XBz+fHl8cK_OIv0u>hxD#u~os?G?wAa!*^9QrqC)q}`+ebE9lAqX^zxv~T zVl`d(i6z@$GrI5#OX!#Lz5t)fK1=d5=#H24-Ive1T`YOW`Q5_fnEzz|FblWIc2C?d z#@Ec3E zJ!+Et#1ie_LEnB^o?GqvXZnX(?V4;S*zKCMzi^LVcrSgYCRw*7+X%^iQC~qnF{_P| zb-vw3_M8rTDDCsmKce$(9c1hLZhx`g+P-3cu74Q*<#n=G_1%xD=L#R2%e+j>#?F1j zI-UAliK%*ysDLKvD@t**6o|ica%Rb z{QkLqG~MTab)WyG^HIKNzHEOkuDj3w;(T+?;Q7xu|0^-x?%*HR@l3GXB;T+-*T&|X z>@&e)^BM88nEs2-0Mj=K?0Ky2^S{>463_jN@MHg@vm%5i<9f`)+2QtnXZ!oyZJePo zfVGyh(2Jq}mFIsY@Yuos%RUpVC4MGYXTD#d&(QZv=bzZ~_BQSI|1uR_o&S~SwpG52AeckpNVerYb1H81TQwf&QHCRj~J zzb|{R#Z>i5-g{%_AG_4!Jg=iY#F=v7$`aU)UoZ<-$!Dl^QeM6Dv3_5|n{A`neqZ3}0>7`M^T9gW zWG!IQ9xErG4VJv7UXz(5KkT*jPicQ8{5!hiU0`>8-tA)H?(_8Tl3${2*Tj7|tg}v& z&IB8i*M)Ca(mtJaaUIrGd)fBOSN(sh|2|9qF7=OXW5oN%wlPY&^6g6Mmpw_oUDo)_ zmJc25i$uSC!Jk|0`&syRsadG{$811et)m-??0}YWt`o<~Js+&2pI1ZDw;xM7_<1GHp}%1467&1= zF>jyvHrwa;?Z=w`5|Le=^|7Fjwt^0)Eer)~>eZDf1uw<_ZCR5?7`~0uY z&;PR5@ig|czU|Y+IbYw8&ue}E0vQtj_F(t7A3rnSFU=R0bXZHZ+g#tR_{g^ex_!R> zsLxljZ`ax+->#bS_;(+t#m|pO=@II1jX{JXT?Ir_-?vn=+EjjeaK{0ZN7oY93}m#xFQYDXJac6)Wd z_h0MZW!r!yALGX*TcXSFFU8ly?Y=g;fvXm4p>T4#P+3HvI(rhWki@#9MPVSGwG+a2#h zePR2}f8On4@!;p=za_inSdyPsNnYoET8TNcPB14Q%?tFC?H|+uf3|(nReOF_{+j5U+n8d`*+ZnUzX<|(NQ+uI@G75j!OQ1qpjy#;(l5) zz_9J7m1xt>bo^KROfKo{PTQg@oo%Az1tH$^F434ZLTl; z{w?X@Q?oL{=N`@spIhmwc-PgV`jjiR`p^gyeR}19wq}>Jy3cpa`ky&dD%@u$zJ6qw z3IFcYve)y|O>2F~6>i8f(Uy^B&d9TcuQwVKYV?_CjWZZ^`~PZ4CVQc)a?2u~rM}9e zBZASX9#7~f>3Wv-s(jqbbuXSF=j*eBQ9F-4$ocTaWs$s8v(N1?YqHrdnKz*Dp`*P_ zbR>4iXeshr^RgqoH)tU67kPUhmOnTK72s^50?sy2@N-@0ip$+V2+ z!D#s6U}O|=PdI&bMbv3mdfT{kQ}&ydQM4239bKAHd7QTS6=gm#&P0z>hnc?2P!`W; zd9p&P(`)>9)a9!?dZmfBre)NbrNIqH!Tr>pW?upS7f#Cy6}`9Y^+l#^!vu%B?vTq} zci3gB-PGIfObMOv^bA#{n0;$gO~uQ`v>tZ3tJk3iUQRXl`mmLIP@d|OOE>f}>2>9# zH<@u`jMQqn7LG47Cg=+3PV`cgwx?=0Q$A@xip-JL0>;t#I=q5USKx&CbXoV`nW zRsI`g$SwZfR);%xNW&zvHMe-g)|Y9==NaR?Jk+$_Zp@ra@)|_^@43=KKXqn?7E{-D zXIk_26*JGQq^ki5_)ID^_?bi++vYm46 zwb~-H*+KpH@a{3%_d#em`*0fjnW%8~byIee+VHe%LRfebh93T)^mXC0)a7aZ6=#H& zy3Emeq|xXaM4Op4|Lz=A7hdM8tOAEk;AbHC_y=#Fkd3KPaXiJaH0UE(s;7Npcp8wv zlk!f+)N%4CN&`!oZR5PuQCNB&EWJwK9Ro|N$@BOi6WzUZP~{BpozC;op7fB1`x^b1 z=BvvBL%W<=A-7{tT_d<_1W#`8bZ@D<(&7b2Rwzsz@;vYmxGOrn?Dd7L3z*A*MV+98*xI^%uXDUP*N%3B83n#KjABOKnk zCirgQbKc4q$jgIFxW>^l)U;z#I55du+0W?-eaJhT9=v1g*L*3VSKnQ_;jC*w^+Ec% z2l+h<1{awAp(nxN%f{Diz~Kwvum^Q1DE=D@hs(iXLGh(qH-f_u7}-B8hC@H)7)%;J zar#4#c$rf+{?K}_A1w9&i?b~(>K$S6ZFp(3!yg*vKaa)vyno4$OA}Zewm!eczk+i_ z;#fRMz4wC=Vez-%N@+PL*Ha7UZ(UhDKY_&+#kpJk__S{{ehZ6rynEV}SuL4(C)bjt zRk^{4C)L~-K#m;E3PuAC@0xt%q8qum$iX*m!Rfh&XClXEAkU{G*QX)hr-sksuiG?i zatU(Z^4A@-)Nym8i#~H8r;(?U(UOftrM}80a1)@fn~={%rIOF?kjhZhUhr6HnrZ!j zHs3@0Hj;m-%Y8Hm=BwZXWad6(^wCY6-&}>P%l4Rir*Uob(fi2wuTj3nG;jt^z2x`) zpkP#(wq^cp@RU_g(wpmYH!ot2qlvudx;#g7EZ!PeH^AXpGtM=zt|^JP27~1vdHcaz zeJy!iM_Wi%zq)K-n6ibli?>1{j>%?X{J3E zA$O0#N1>$yD?>gXd^D(XDtwgAeX&J@8}eM~@X$buhjztzXl8+Iw zneopai+3`-8KGw#nV}YVr{+3OWe;aY=k6$i8KHHQ zVZU)TKUFrv;vc;u{yB(zxW|zZa$R(ufBd|k`owwuac#)2sX>-6i1Sa2Ye;pw8Is^1 zrS$^kx_#k2@XtL-{4;iIjnz&W3I2JXwpQ_J1RM{7TXTw?#`m*bkoSC()C)`Mja` z;n&kWOzWP*vK=nOVi`R9DmFt$Jf6qmzyub>16$ys5OMa#?S&%jg=71BgeBu;H@pf4 zWlK$w97ityit?|a3>iGX49uuLv-qzGeI_~WN6xfc@P!NBu2!qXQS3s=u3aOJMvZ_e8Y!j;N&VBwwE z2zMrMwY~Tf3s|dmwNFUFj`m_l`;fEg$lDC$ZYJ_K3p?5$KIclQIGF7W zpTd@Y7hC!OeJB0xcBE9)55xXFJMpoG=S=wD<92jQz|09?KT96_sCSX7xq{V#Bn9?Y@-%w~PUb zk+HkLsRuc0elTI-0ariEUO9*?Qo4t+f!}uZuFk;@K8;oBX}tx-r{Cv#|w6 zA!~fJy)fPQ+@x>@bv}VyYsBuoO!5*NUADpHy6$Nzs#f-In@m4dxdycPxo^xR>_`i@ z-e@Z3-()Jl=!Qw?nM(=MX(uP3H(lRuJK=DLWUCf~Y1s>4a^Dr`{9Hde*WqohmmO@p z%~iE)o>=HX-tuh3wlz<@>@fG1FvmWG^3iW0)4FuSL!{wG|AbfG+IB7JHsuGSd7{Qw z=Z5F{=9+ztu2I#E@I?KCcWqthO}BVy5OM#H85BB8+nnU=V%aW#NMpY@XU06~>J!@U z%4jZQ+_9sycjfJ1B6s2K3I51kICQJZxdz+i0C>vIHLVNqqbYQ`LbrRJp@YU1T7dlB z?{qdhS57#icNWn1F}drnKH%zEeH!~^!uryL9ern#-Le3?rLYXSOE`^Z8WXCx!FSmFMv{AT-X^FC-l0VRz(XcSO-u>sY@OuJhw|hwym% zhdsj5_eJ1vKG>;8N7`7lF{rVa(pGxOydWJfySW)$785tGv|r_2;J=Z+T?Z|X<1G(; zExZ}-%Z0aan=je%=o6)JylE`vMh^SI%sAm~iKp_X;O%#$dmOy&g8tNa zz*}Y$QfY`itdZbjE0L7%vi z@Yc~zHefB6a0Hnwyw$q~C-CN=JS(t~cO==!+p&=i<ndD z0ynbRi)?KBUSaqw=?beq5O0uY9!9>L)?~|5f zb0}FhW9u}~{|Dd;jVBMf`d7!~^WcPhjv!MdpCy|;lFj+&Wiw-n-)teza^`A|Ql_cM z;d1nmkMb0WKj4co$m5@iH=v9iE3){1EaQO(;gSDwv#A&gZ|FR{Vsy0R1>>B3*D;1a z4KHLHPjee~S}pwGBCkQrEl6JfGS^fJ^R2F&YGFbBevGoa$+rh%(s^KG1mnWLcR50B z*c!KCzkUdQ8qq_-xp>YU$9bvgW!bdz!TA-WwQbS8NjB{jTU7?(yp=gF*|n0ZJ;8T{ zuV*ONctXp4;N0}Yo-H_|ch2M7UENB%9%4K(Y{PlE+7ahhFdh`nSH*GOie1!h`de}} z7n|(%PH=t&W5Ik!&xFmPK06BEcPUSqgSf?(r(mWLoW?NI&%#X9Ilz*qjo{Rho06x< zU}3R6j#FVsvNRc!Zsck`ZQ2n-hiPl+cga?jP2<|eWdkbJKLN&#tFc$SU?KASwc%#! zco)}WkfSxwmSxO!fq^XWQ2{=3;`neod?EESb27-vbW2udJNtnTZ}mL(Lp#W0J2I^r ze5?%kD-SL6R~~kat=2eSW9OsbBZc{aRoIGV;oodM>`V{HF{{-4H&t@+)CG!xx3 zI%b;|!LyB$wb-ax@Y!VaW+8gVwo|KJx3+11x0*aOziZ9ya<6$?+h(m{p12X)Ap3gL zdH~%k`?aJGV|L=Y!Gz}B8hv2+O6K2sL0!m3$wJ>G%T8?qZceggJ+U4; zBHQU}PTL-Og871b*Ws7ihjPkpb;CE3)lZPl{S01Q zfL;A$FXow@-sUoRw?T8CU^%xa$)+tDx>a=$Z)tAr?_K7q0{F23yJDl$(flg&3k@cA zumAVE9L+Xwz3p&Fikp;;%?JtvT+8uG5cBdH-%8kq3pRByKE!*L@cx(Q$9+i^q z2Oj(zi?@cE;4d^UJk*$9$WF>dr^rT)xQ#I%SZ3>V6*_8nT%B@{iktxUP|2V z1?RTio7cbap%7=OUA4q<^JMr-y0Q^$Byc}LWBVA+%aPv~;=DI&Cc&t_zrB$@IVC&e z5b~S!H^$|+aQ{5=d-A=M%d&-$&(irFa4wmhTnDYiOyT|f_;^fmw)uZg3Tu8tn6`Kt z9EPxqLmppfI_2Jyr1y`3)3v<21sq849|OO#e}(V&!S{G@t$E?8=>I1*-a+=SWsd5% zU^pM$TNc;-!dfd>%SzY)j>;TtfCH}F>fnR(x8^Z#Ah|7`EpwaDGxTK-lM=cC9o;^d z^&@cE$Mp1`LEcxo24WBN!w&76um=p7`~#RQHNBfR!N;wPmz>lucTo(J^O5(ti-vAh znS{v$=!+kL$txIJwR(G6Hh?gBFZt=`_EP?Iga>cQ_MsT(-??+qp;6BwIS0 z7rqf)FdLmP3*9gi9Weu2cRIH2v=VejVqVyi@5oSFzFRggV>8J$$=onf=6-@dvhVOL2~kH?md!||Is6C>6shj z`T!kphCGj<7i3FH7ZhO^9|PNKN#{1^WgFoS>46sJEF`nV52w%rk7E-bLs!TyF1P0_ zEW0?i#&VuNVsmiP0|&srJw8+%1E2fQ0h5>bDwUsf&SY#I)|NILe7P`uBN)GiP!A7F zhQEjW)>yCw-Z%m7e~V1SUayc{C;Mgra?S1NADYYfdK2=o)YY@OWJkplW0B#dE`Rg< z9qXUSj>~W-{8$6O)F5wb;Je|ZQ?txt$?#O>T_wXapko;i)*!=wf(&nQ1(4y3wvNCS z5?{5zSKma2YrG_0ISH=}ab|c=!z+87{g|Kb-*yUnr3JYn`&n3%t@gOas~RU##@Rk+ zXcM-UV^M6JxEy|QEYdh}ki{?Jwf_mftT)ckY@Z)~@mn%{Z2VsTzsxh6U+P^M%!9b9 zw<5z`N%nJU62G|M7pHCvpy09C!V-jVK+2wr`YZ)Ce zEEz8hYwdFvGNqjIY7S-!W5{EpCWlX<|Hpt=@zV3O=TvNO={M|*eUF2O0NC75ziNIU zi!_9xW-#<57<_^LYAwvHyr(o1*?bwYdGXd-u%)%s&B%$dZhy$ZoR;Lm+&I=;PM>!o zSbG**;HW0ARA{Ny3(qf8LON`UN5NV+72&XuC?p|A%A@S>qF*j z=b97?XT{5gRMxqD$mL}g&KBTTBDs9K!~8;Xc*V3=scT3=F1Nc%y|#k6w=L zj?8`@9%x|6Ir)^EmTv0c`#g=NcUPLt-rLsGiHX zp%(phfV70UHL}^`7@Ta+PYcry&(V79^KUOLcuZrjIR@Vb=>*|ez7dSuis(NJ%V0$O z^dkKveJ>2>Vb@%U;Q^u8JGZnY&ttb7#6BWFdmeiueNi08^Ky7yI1ZHJo74%8WAocF zyZ$i!WcwN=V_7&}O*(ntcQW|3=D10-n04U0u+v+>?-tgri@o)a&AniEr^8!Uweo@RZICmvNt^2MpLP zWxZN!g*JAD*?Q)}55v#N^Wh!mfixff2e7L(O3ed($b8m*<_T@=8uazKhi^uQ+=MQ< z5uGv{-7*V1ekOMOj1qK?WycrMFK&;su3?xn{8z-0Eiav5%`Jdu$$YI@C1a);`KWjP zfDeZB&3;!_^U*!{$+#SrUz2phF7RD~JlTc)FI{js7}!f&cGL&|`oFFXOCLxV%tjC7 zEge!>1>Z=n<~fGc?UP-^+><;0JV2Xj-Ap$7xMg4tK0A@d|MdNv*io(MpM_Jdu*M!S znVD(H%w3neD{Z}U+-IU~uFSfYteAe0{EWiuC*fOL$4o}YJda!}rYz6%|8=aF9~(C% zEd3`PW7*O8+h)U$E$Ef+ zwT}5P{!aN$#?17M@@&zX61t!c?0}WuZoY}WkB;HR-Z6Y2vF z9E-kh*=n&d!PQ&yX+!y09Yz<(r%AeCHh!UF85fjtFI^yhx`)0$iM@Eh=BFh4J+?O5 zmpXRjCr9;r$d{AwRr0vu*(5)br|~1vxZ!8c^SaMnHty>{Y z*jhMcWY>6xH%@_N>5li1<+gsAY>k_5iI2mD-+`8Xd2bqHQPxT4B1_MJg_io5e!+*z z^3Qsiz7UpeOlqx3;|JAcN~tG->6mYnaNQ?{Yh?dn`rp$Z+(ARTIJ8&eL^c9jQN#59mnrej6Y>N{mh9i<4C(eA81|lJ+Rn@%ylKL z3v|S9f7S$q-y?B-aD+A1d}m4mzp_z0U{5${K(4R-5T6v*0lHcPIK%u@evtN=%e?>1 zS|4TI`#v)#4}b2}OI(#r;Bu-vMShzVfu-Doz18mcHFo~$^!PR7jMcQE^$wU{-8+8G zxMX$z_%(d7dT{(29$1|dzrLL7;qhy5zB(^{jqG1NI)2SKVf9#35xR=;=(X`@*(SPf zys4PdKJl?8#!L?!Tk`rsm9d|R7GwoC1h{YVndo@>aG}p;(@(m-QP=d7u4n6-e$w?UUDHpxo~dj4 zN!K%UO@HZnx~}OrT~FhB6a8i=bD_sr^=M{X+AxT=qTT=CGWZ&q&~?z?LVrehoNyoE z2>Rj*sGlI;iM@mx!dgNxp^*P~8h^zq`iyzu)-kM6Z8WLPM|H*qy!mGFyqDfICURyG zG{r=|rhB)Eys7(i6ZscWzll5{+S^1P676pyXNsVMd0+QACNfZTIL{YC^N24xn)sq) zi7$E;@kOsSk=qo1you~Ijtb2=XB$WBW^|>>t8%OSs)y>MdZ~V@r|PSEtNv;SwTIe8 z?W1vnKRWDK1OH}nTP}NKKIn+y3^>RQ}FHzM? zRP_>7y+l%B5(G9PA87&Y~qN{A&%&+q@4o&E_v#H9(ju1!ShsT zk%_!1dKd9U?;*bE{lpi2fIJm{iHYnFn&|J#P2^24_iK4ybUp8jKF<51KPFu_^hxs5 z{U-7geTL^AXqAb)DY}LDqAw6%^hM%}zC?bCztu#xjV-(WUoyFVjdw)f;2qK5kybkN zE%MNPJ$Z;8;&}$N(L~-9Jw_g)Cy6h5iuj^!8c0|6W`y5APpy+U(_k`v-B5#V0c0~R~bS&{juOhzawT{Sbia*{#`n|)iR+;ES zeYwAt_`RXuC0*UmBVExuc-{wEM7pAPk*?@Hq$_$q>54u;x{AMqbkFt=@2{mleoWl{ zJbRKf2SPWIrsy*~9{{Z)P0=l+Df$9wioQsiqA!uA;%`-12Z#3$zc~E0A>7xKj_4uM z!A5(tk#t0lk&fs|(h)sHI-+f)GZ=c-5!vqc*Ze-m85uZ~`{B;WnOta|GxDbBXlLYK zM8`TKPl#USj65WItuu044s^Ve_!rf@I@S?Aa|!oF#1p-Xc%t_ZPxOA`i9SHQ;m{?D z*Slu_Y)AC>Rm2JKYzuKjUm%X?i^LIqi8y)Ct-NP^72Dl?YYKNPAOHJfyg!ELCwX7= z6z_|+@&0J&S>@5cX8W$P>wiDi6?x(co?YdNJS2LpD{|Xd=y(_J46NDyyRz$FeaR7h z=xUxlz<*akm*~I2HQO7@uHXMV%J~w{ujTnx7v;!e-1+fxc)NQ@&Gxpk>pRNh!W}Ed z|Nbmx!v1LZPB6Opm+db~4tv>)C^;g-9$Ev`b}iH zXm1mlFWTQk8jz*i2J^n|b4eZlBXbgkjJWNW=M7^$V4Imp&p%2}&>%DG9Da+Zlw&UvDgvmV)4 zOF46NpJO6MlycTPpp>&#lyYtorJQA=lyjaardNi6Tp zw8c*2toYY+?fs;`wnA;$TkGY$wWK4uo^(VXCmqorlOM9P_DRa2`%RQX^ckLepj9SP zE4qdFqAw6%^hM%}zC<|`e=Fr+?i&ArIg-JfGI{nI?~1;`yQ04%O=M{8TjZttdh!xI z#B*e9ZKH|QiXI~`(UZg%Jw<%cHu6&ZvnIkEPlfW!$za^S#8~pU>>~c_?TE}1?eB=x zBTs7wJ0dx{M;;qddnUCazrShSfj_bG z%J!Sc@8|GdU;e+9^pLN$-z6X2BcF{Z^0^-QTZ??I6-7R85=B0jiQZ3q(Fe#!@s}u{ z9#%eO*g;l4?wG8uN7mLpN%{k!$Z8{stgc5E*CMNHMUmB;M3L2HqA!xZ=u4!p_*<2J zT17r%64qd1>GwC0Y~}HmBXY%%U}PWiyQe7fyA}Do5Ba@M6#4y+qR8(G(Nm7dy`pW7 z$c@PFeP?;kxUIAsz@g^92U|FFRSbm62c$oAsMlCB-aj-Lc?}tVG8dZXjJzg_jDJBC z8NXf>8Gnx`GJZBP{xxL$YsOQNpJ!U1W=xko)I^+0?+$0=#3jMVuZx_KUyCC1|CcB- z|A(T;{JTYw`LmGuzeeW&+IX#W#4~R*er-LgGWx4eK-+sV#`2JEl{2z85RB|aAN*7l zeekF#`e3mr`d}vdU@!V$FZv*VtZ5xaxwdaxK7PC6qZew%1S5OU3+qJD3nilHg&F9D zJ?Mo!Cf$k`D@R84udQ-qYM)lWr&gPqryMbzvG$5!WH&nFF41dUk?H7+-RO+nCZi(% zTKbcGH(p^PD~Pi~aqf3Timwhv>d+z6&>?l`kUEoDkw4zF&S(5vI)Jr*-o0J#qEDuw zPwLPoJJBaUwaSoQy~U!w>T2kzv|#iN(|_E%jHg~4=^yvTc=x!gCYo_ycbeWBYgN6l z>IwZ<#2oYb7k==BXPK)qi+Kf)*3KDAWa^spNpzi|>&)17x~`ey)pMV&`^K)lx*iz2 z_HgYV!g?@i%Fk&o>jS=v&732wMb2)Y@H%tPuN(4lFlO}6F)x@mV`E0gJ3Fp-GG;u< z*n7RkiH>086=Nc=h^CmxGoo%2StXioB6oh31&ZE26`R zFPcYu(b2>g9ZP)CtB5apE%8Oi6W;?3n#e1n6NxW6nfRj9i7z^v_@Z-&FM2ESMZZgY zGM~LmzT@q44k6c$ z;Um7ym_HduwSIH%8DS!ivWt$U?4n~SyXaMvUG!SYEIOX?MvjSYo6P&d!gSsjoz45A zb9i6$R^Au=F1A(wvg3Pe~CA|{ZqdZA{v)`Q+ z-oGR>toC`1v95Sie0PTR`&M`Goq5tXw4v;_sa(tF_zL_{>KESgQY~#+E1H7N7j>ia zMbpvwqJDI~Xm4x=(f-&BqJyyw%a9Amk9?=Uc{BYoIzAsX5?UgAsCUoM2(Gh`4ZkLz zz3}p0^7*MK`8+C0K8r=kXQn9mTqa6BnWE&A?~TdW%sFO#Ept-p!y5XAJzSA>qV!>j z=vWh(Axa;P5xv$#GDPV^rQyhyjpnFuQYMw1^`eRw|GnQewhUd1?34`EJecI+YUWub z8zm1{<6op}$-}i7%&&3nX1!r5zO zri;=xqeW?(bWz%7C-YTWbJ7~$Fxs+481u7Vk-4I)QrzRd&KSf;+cVi6nM!;9iZn)w zl7?55G+yRAMhno_Uiv2}EK%+NZTl*1>=C7nU(PVC%khO%`8}lfDs^zvzPo7Oowc!f z@E47HTsHZsOv?Z99a#w+t%>7kN*pg!!NgPxFW%5xAM@jkQ>L~Tyk6W^`ucSK%ODTC zJV$Ali>RB$1JuLf0qSuXWfRY2ic*i6^y6OEj{^9&xd|%AR4^((q>Z%EI@)tC?OF$4 z9(Vbwx6o#LlG20U&*}LI`a*d=A%_KYA9#`lzW&0KHz6Ukd8gA#ln6@9LOMmU2ZX$Py&IVJW zbHJ47tzb&@yU2x$V)m@XukdLtvbz{wsk_HSritEfBBMkfFcF{V5`!)0tWdmM%KSEM zcw9Iri{aqZiL7z|AltjlQPIfr(xoPBVRAi~MArZ6aWE22hjPwS^a5M{~wC4aU6)RZ#@6EvGt8WDQg0(Zv^PWsqO*z?r_igM!*-7hw@_z zWW}#p-w5D0V7@M#ZmL-w0eD zzh-?SaAo|Ob&bHd_%-Vqfv=m2QTgB>|1K;4E1ltoSljqtiPgLRBUx0q-exNNfKF+8C|NoyJF?$%nyfaXQ;YZLZ$E^8T0tYwU2k51FHVDuVM z)-tXZWi8_>QPwi96lE=AgeYqnSBPF2jE)s$E#q?0QNieCqO4_%5oIl7v?yyCqePkK z9}2C1t{@zM=Hj-bvJ99_d>dVaI6<7-Vf=$bVQ zJ-<=ctYPSSwyxoAU2Fe)m^BPt&(t+*7`mRJ>-bvJbX|{)J)fp))-d!=p{`lO(6!b! zH?xM3OPLEij!^yoW$x|6qqxpI?P|4vmJlF77y&XgMz+;LfQYd{LADyBi6tQd*=T0* zgjwWR*%1cE%9_mLH5oUt{0<)?Mr*RchWAC@$P*+Zo5W{XY51@tpJ9hc-nOmpUcVe!VKvj_wg@M;{lHE&I3SFvYSLYdzJnr-^B7 zUkihz7t={E;t%$S_=CsAY|H+F(k~#r))zso=W;*dSJj_mpWoFobi6ul9ly$<@~B)Y zpUSE7s@y8S>Ou9PdQttTo}^{0_Oy)O#)WNDHc@30RW?y&6IC{1WTtFdSJ_0BjW*qs zO;p)Ll}%LHM3qhHRW_}wY+4^06*@H%=95k=Ae~rDI&mfTiI1HsVSlZ!B42Se+wozi zN-g`8SVnrWob=*)(u)=3tMnU4ul0>k>zjC94O>@VO=NisX~Zhhh&!=OEbP=S_SO1s z@)Y;7o%a4zjb)z_YspjGPkQkH>BWQOsq}|Puk|R@`eE`cwXSwIv;SDuTiIXi#4ho$ zQzzI@>nF)iJk54|#i<_4J|*^&pLmw^;sw%+{p6?gS4gk*0jTvMSL1km0?Wxh`?tiw zPNn$l#ad7G+0(={w&Ooe5m!4U5?6awB(AncB(C$$GZLG0cB zE%7hK(-z+ZS7BdV&363GDdK6TMB-_$ip10Qh{V$#7b~!@^czU8^^H*Lo385}i>I=_ z8@u9O?Bc6V5l1^E5=VPgB#yR6B#!pDco4h9%}yP{p4OvK>xX3*&7;Q}Bpz>1o6fe= z*yKI)6!EiDBJs0VMdD|BMB-JtFb1$HjxBBi?n2_}5~sN1@h*w^D?KWT_{og((NSnC5&>%_hq<5L?N(L^4f&pNuu7l>({BF42?>%_PgYn>QZ zWBk;HMs$(x3ictErP$WuJ0Y>H#abt})fk`F(CFzSi*wjUOlvVQty9FbP7%{;jGx}n zi2m^addDBr)-TS*){x(RlIIxti~*iioFlGQl8IKLZy*OADvO@9Ec>JwZ`n~XVA;FH zB+K3?CR_GEIZUzaMOsg_>{Kz0?Q3C>^kO>c#SGGmS)>=UEqj&HFCe|v7eTG(y6t== z{%Qja(rbX$Ct149ACQDIlgqQb9^&+?|72u7&HmHX(Ze7u^|>#H~haW&gVz*5VN!IhC->*b^u*OOkX;24yCgBM5H2(`Y+JqE>ErIY-< zo{{m5wNN@qZ6f<@!IoHsEpaD#(*BxwnAUf5EaG0aC%_uZjpx;JELz`Bdhr11#e*D+ z(jVd&v>t_8Kg_Z4-ad=*_!Uc0yIwh#?X9E}JF!nYY~o;AKS{pgX|~f&n>d)*OTOY+ z(u)^JFZPqK(qADztq(x04_RCT@zu)PVhq=PXc=V_*q-dOdufxKQhav0)>D0UyqLyz z+UzFU_$HAyzE-4-uM}zH%fxJ-ov-u@NU!xpQ0uv*Lpzx<&D9)~-@atZK>OTO!ag^_ zRpceECNJ9WCffTZk@mh;q`j{cY46L#3i49=4W!rlMyT~o%4?*_OXv9Kyi+iq1LxRZ zJsX<51J>aDspu8E(K*Du=p3}|$MFTnMSQ_`MSMZEh%fkxco3a~w*UAcbP}ycq1F#e z_ZU?@gP625mAt>@ooK3w^$aFYC*E<|XZO?2cj6a1Mf}1+5x=lg#4l8c{XTmoexUP; z&(7BR0Mz;r>CjbVU-z?HuH^aw6Pq7j|10PlX*FD$iLQa)=n(N6FN^q%r$zk6BO-od z1%9FfzcEki_>Fm5$8U6?tIU{b61w2q%?MPpG~ z_jr=(>rCB^ss9e#{FK$O84YCJeBP_^LCf$t$M8Y3wT=&(t#y3RF?5z0i{1Kp0-fan z(mg;rqpjeNX5Yzv_@mkQqnOq*BhR|Lg!g1=EwTMWIeMS=+aY{Y3%*Hu%TwlfM%OSHSToMYdwSX zVixJeY|@JhNG~oTy_oB1(_zcirt?Xk!1e;ti^ZfDSCU>VA-%YY^x|qyZ!Wc5y}69^ zw3E`@jo!ST^jfbVy|{t&;zrVon@CSP=R_=5L#`yf*0+#etRlU*ll0;)(u=!EFYYBh zZI`rmN312ixS#am0n&>HNiQBEy%;6Ec$oCGLylv)+HVu-wcbp6v6b{6gW9;?E-Ibga0@i%)Y-rL!!V{$ZWv6{EAbZD@$L^8#&X2JPN<+CBOV zm^N^mmWg4jJygGr5IT$ENLz^`@!Vo`mMZ#X#&j07hZm@C=_;mdl--n#vYWC|c2hQ$ zo#QlR7gcsqWfv*?BV1n{lzW9pxyOl=d!9(SAK`lHpxi4&$~{h`+)opiHW~|MzeGHK z9eovw_^HoAh~JI6isl2A-XUG(lBcVD?6D8|LMwj9F-r%#@i=MYuXb}>vi(!8N7c`c z_>b3Fl02Oy>l5fKcX&F>I<6VT@t29z_h^y&o-I<}F`cEG>#d06KT4e^iqv_gNS#Y( zIm7o#?87+7#ILFE43YYd=`63@ptC$uuMe1L9 zrhD4TGMaZxTf+bn#w-f=-FH&X9FV`56ab4`iQ$<*Hoq$O+|eTq^Bfu z&ZLn@Pf;Jx;d)A*|HFEUbg(NNm#e3I^f+#@G2Eo5G;ltor|b}m(OJcn)RkC5-HEHH zOL4WUr|3DhJDYZ~936)Hq;)+yj97sVBW^%{5jUd8h?^{)6NhUmN%imYt{>A>p6MUA z@c+}h1e(f4w(mpxSce}!wv+2m+{N`L?&kUv_geNbPg8l4W03~)1e!__?fcO~T!&(m z>rgyw*++RtiRmgve?eCz2UkFI>Q&!yUklhU+K&AmeE)G<}&Wrr>yGpcX`{;S^7RrzoUCxt@0dN%U=X0 zZ{LQVat^KKA+!|rOE{0#vK>w3;ftHzyoi4CxHL%RJJ}li4=hur&fu?JzuWX1dEqlx z8ArE%Dw>@7j%;JUa}&{A=8di!!L{4_h3oxB#+ZI1lg}UJ`9vB=BV)D3G>#)$_B4*8 zTJ|)KV_Nn!j#e#u8b_O!J&mJX%bv#3p=D3w=+v^OaU9pOr*Zs9%bv#Zrj|X8)o7Z{y6n${g6n$`jlE$dDZ&1aD~F+7+HRaeL8 zlOfGZ^>r70bbd;`90^BqkA(S-FZ}h~BYZOuB!1+S2SP_8mAOYEPx1SIa=UY_!6305 zy#tuLELlYKmk##hG$tcD}S>`$D-3A3M=5B0oQ;C)A0?0r|dl61sYoYD~{ z-3)wNkXVHiVSlj_>K$N<_kC!U_r2&&@B7hRq$fV&RE%`f>pT;Sa2(Q!O;FGO&7>Dw zNiTMiUOYis@g#PLe;f&nYX}l&aI82xEXKo`Fc4>#ib-*HL`;seYs8c|+YwXa>>e?V z@9l|gI6H}hSk_=%B$7LWYcWW?!3kqa%!jwa0&I%K*c4Y{Q!F99xC)!%>NwlF4VGe? z7)SR4-{6tG)GzS{CxR`p5{BRwY>HLb6nA1%+=WeXH#Wt+*vy1AaduFw#Wpbx=U`$r z;{e^wd9Z`TBpe63ViTMLo3SspVqfgUzIcMP;z{g_r?Ed5_F!M^WxMR0C7%oQF*wZe z6R&VAza19i;d~hI+ofWX-;RjMe!E6Y@!O7=>bHBuG{0?q3I_dlP)ujL>|~H$>sjpI zN*=^GoG`Y~uVJj9jcA(oIvT!k%hHF?|xOUXklW4r8>lV0oVNiP!XcRI&Z z`z)R%H?c4A5+_1lVkOLhTSy~Tkw)A}p5iXjio3Bb?j_Gp!y4>}wQQH2{iN6W0n&>H zNiQBkPdM$n8q8(cAuq8B-VK{cBes%8>?DnNf;`2O*cMNd=RL58JjGtN%g$NSYyAT0 z#eUL@SN!%&V$t{VEAsgmlZJ1h_%-lr=XZ%8<1bWOd0~4#e24uWg}eAU1M~wK^4s5M zIn|HW%P*!!^DK7xV#l=-eb;gkJz8UU1j`@u+J0!-erVc$Xxe_rwOX?lrdYK7FjZ|o zOjFwr)h39h?T4oAho$d$l=+Swe9=(@wds6v*pK(ri@q3it zJB%4MA13p=7xQomlpg?p@^R;oCNVRYeoFbR?+h_E%+JP;5Y_lM>gE!8ChLtj%P&m zcXT}Xo)&yip@MVwTP-4ft53vlCE&MON?BJ~%3R;oo$DLyUgR6hBM-MsiPc4>ObOKmDii*!B|=_e zCB$E~Y#~pviabU9TAherYZ38leIkA>0e{v~6K99STGy|&;MZ;;2F`hR%Qv5M!+T>0 zpVZ=zhu8%1T`kSzCAN~6h>xoi@o_C8KCVy1$0gv~T6)M!>}9*`ob}3k!S!=_ja?G`cBZ}*95emem_*AgT? zFQ&6yb}~q>^(@w@bNRr$9E+hlC@a3JB}_hIKE%hh6tKToOn%}@@)JwQPh5o^aW(nj z>sm_5Pb_1*?39yU>+4A`R*()~$hUr1mB0L99%FbdA+}(2gb4ZK&sr*B4%|X|v5NHK zPVyIbVMp9eKH^^T$KSQokiS^VcG=ladaWNIy?BuH;vv^BO0#$ct>O(dk?$Igt$tbB z#pv+hGtw^5souQ}?Sg0NA28P6+3!8|oVHyePIBz z0p$xtd|_CWFBIhq#ca}x3oKXbSVVfQ;|on}IZXNl*7Hd(7LZ;nCcU_l^kRwSY8|Uc zul3cW9|KECFP6pLFG#QT^`sXoNH1=%T&-gx>9xLz^y6WK^kOCH#Vw>4t4J^IB)z!H zaUA0tiJ){wPNhAKg`VPzp{XNfCy62=z zID9W7U1H2IUBc)L7IvjG#A8xBor2G1yS~3y>^zS2vi|~cn>QyW^NqdR zroBj8CJmvSc1T*mS}*o1t>7Uv1hr?|XwTN6A*emm7;XE|5Y}<+4A&4Y&^AT9c-3a& zRqDHOfx42WVESZm+@_BP^c#ZQ2zq@q z()pk3PJJ~h#4N8*Mz+^SV*&qj&8e?Og_wK2uZH?(sGo-VYpCDGjhezmo*AVnRC#?h z*72>s!#i*6_!`5$A>9u*9#3P6b=C9>x$zisM}^LC&8Qxv9rWm173?$iMv2i1s1u_V zP$$KncCh{wrEQb6Gw|U5BabbBy5=9<$+&H*tNbG$#0 zKEQPvpuU=^#|)8ryG&1qi7Hl1J=U7B%j#zSI71DaoWSO8?^_OowBKa z!a3^I+!tI&x-aI6)a6*N8};#+D{_6s`go{+hx&QE5$3uc*2iNx=VJ}$rPb@>@uE5J z)>UI?fiKig8I)JAr$1c$9ooYv%4P0X?y-25&&prxJXBgkFA#Tq#?vL18C_!XcEx7{ z+-H4g6z7OzyL~+-zpqPtR9_F~cl&@gag8=8iM}4n_mAo8AuZsy#NWr#7Pm*JGqI97 z6StsKidB~V3(Def&8dy%+n~#S&dXTZ=Jp!Wh_$2<_mf6EfYvD<^wOH^(QUJ(5iH?) z4bWCMJDxVsWZ4B;Z?^1mvDLD-h@F3) z1ARl7N1XI$Xa#3Rp&xwJ)ej22ei`apW8&{<28F}=WE2kTlTkRVPe$ReJ{g6>`eYOi z>yuG9tWQSaus#`u!}??t4(pRqNS}-%`eH1j4@NP4FwF6w8T>_HDr3p|neWFMOwjmK zXasA~5fT}9$~l;rSQu ztJ?;*Gq%RXS3Xl$6-e5?BcRy(pVfUMFn0UX0ma_)>vl2*V0D1lJF)h^qmPK@9DPP4 z=DtfL*8WYAn98?A`e4jpOsv^=#QI=NtuN+XA%`}oi?+xi_U;gScZj_^Ns+sAXp6eS zmYqYar7NHRiNQO>;2kdppF_Lb72$v4DP7!u_lw+r4lxynSiIxK;&W(kySQg^h^2IK z&)hF^&)l!=+%ryOWP?LYK8O3Zi?~S+ag#3MCijcPP43rr;wF!Y$v(S_*h$cDH8{jg za=2f+h?nFLFX6kA@0!?A7|e$2B5Yl#o3RE#66xBQ{rq}B<|5Arp4KV zVvu#kKKy77PeyX{rg3byvOSDl;vHT2*b@t&wijbpB;N6?Sb|-Vct@AG8oOd?oIT|> zSmwq%9Afo3A=V?1=I|deh^{7Vi_K8mTggKtmhr53f;>cG8C~LO@(_E-V=nB)j(C>svU7p> zJ~8{8`7B$0JBL_9SG?c8UkpHPPx9N3iOGKZS&^89EfTZn5{X$1ib20U~kmE!jO1FS3Y@(1yI|INh7W#Pm#EVEfTlr5?5neEG5so zU>SDAa<uM4n`=ik+ktPmoqT zN#5dV@(_E-`ySYf9q}yNW#A%M> z7SB(mf7uS+A7b(RX~gq$);=CpyO~oC<7v;t0Mzy*%YIBuw(Muc6w9{7RLkxX(=2;X z3|jV-hhRE(#0<8}P8R94o=tjj0qMm>{4eIZ@%+c}?@xH~{P&nw>-+q6@q3itJIrG^ zA13p=xX80tJauOJPEcp2?*w&b`czP7tJJ50Ix~GOsI%Q#r_Q1xb#_vu&IUy4 z%=F!$&P-np>TE!LF{rbR>eE4;?G~xCs7Rd+h+dsl@?5{q>#K2=*yRAv7~6OjeZLWzVF9BHsycVKWaZWRvK5`VPd5|qZ63^2obM8 zLZ#PFVatd8Cc5S2x0TNr>^ojRf+nwjK{M3$*6aRL`@8wYSHEKN^H+aYWf{+Y_*K&{ z0KaPb3*cAtwH?1|`Vrt)oA9d<%k``1yhe{O-}&QT&3FFzSM&Wp{xyFRY4NY7UjY8K zQtSBFCj6^x@V-4L8}9LptbW=Y&xqgJ`?!{8FpiS?ETrRKo5I+~FE{05S1f?qUW|QlCH6)9bFGMfZW8g&y<%yc z9e*1v`=B17JlrxRR%e?sB~;H)eP_F_jo%XZnh;QDao<(4z4`qw5VKDzn}Ovg7jS$;bmU)&V$xAVmS)b=F5 zT_z^`?MgAlZ`X>ce!EFb^V_{*&~L|o3Z`R6%wW6hWRYI$+3b&R-z=>>*Kf~YJg8Y{ z6Y2QqrZ8#n$xZp>DHcF&FD9+HlDx$d@)lQNOI%GJVkvpw1x->FE=@Jbhxfr%&wl^obfzpQ!cpiT$2F zalq3j4tn~;Ay1!(y1rie#5c`37*qX>;n?bGIJ|Gg#8C3W&wrYj$`WhgOw+ew>iGh` z30B_NUk zj`@FLw8#9P5>>1^98QTURvnI{M8l$!5-k<2NzsV;e^Ru@{68t`nExk5d(8hgMHSBu zhi{51o*jMyu;ztsAAsXNNQ9u z@34~^EfuXP(TMqfO0>rOKPBpz|EENI%>PrPiid~8Q=^K9ha*#?iid}tsnJr=nih?i z|EEQ3%>UD(j`@FDw8#9P7FB#a98QZWJ|2#wMHL?pJ898U(YiSrG5_Bjtug=K9Cgh9 zH%EKS|I?#QU!lJY?Rz*pJ*s$lI5It|czM{F9xWBE8PSOOe@3*%{Li|j|7S#d%>VTH zL!XFz0(~Nx?=5H2zk>N?s`L1McZb)<;=)CD%qr;OdMDfo;YBc^ZViRzh={&b!)8Oe}30Eld&^-z%u9Q_n*gbWXjMQ>kmI~VQ1Qq zWzNMPc&>&Phb&Y7pZxsjoPNt_8B^=Wj}HG*ZT=nHtDoujAJ;nL(EY1x8;UiXSSi{@ z3d^RSf%Vba#)CY!8Eu1Sm3iFP>Q^va<9L8J=thm>^Z%gnsNDAO#e>iN!5WA9mTu=A zC281rRBk_ssr7S)9k1yZ5%2Yv2zY%%lDvKq_-}K(Dg3YF=6U52k!Nqy-$3Qz`c!#V zh#6l0i7eLV6Dy+)(HKm#B566!Z`qptBV&qvZ7 z)_Z*=q-(_L+X-7k-5+th=v8i?h#QY_(}<9X{c~I<_Rn$YSi6kQF|~dSV^Ni&jr}n? z$IaAhJ=bDvEUHKSfrW*{c1Kfg#tRB{$6JHaI+pURI+}VsM*Yr)9RJ`k>R395kA4t0 z>Kp~sZ8OI$t>XpWSJW5c4#kd%g=&4`8ya)Uy;kbDR(_WrF@wI}$EeH1VPj66fFp)bT5`a;~OH~b-eAZ}EUtRa*-u1s;ZNR9y*!WS>n&O6OP0w5&xA1@ISsi_2 zh;u!T_PZm3EwK_?;udU)RoqMBrU{|$Vsuka-x!Av?WiG*SW6mlKWW4Rq!AC&KMbOM z4|W`)--gDO`XjWBX4?MdP18csH#&7~qHn05g!B#dleqS}#+@3&d`n}QcWE5+FC{Y% z6Z2|6KB2B0-zsh6?TMr7euf5chW-&f526XsN8+y7+#2W}%a~hZ8!>*PCD2b|8`p^ zamoyMGn@taMsv#T@K!h*hTuFn2i^ha!-eopxESU@zGu$4n{QD#-c)~p`FWBrZ)S{l z#SnkOPqD#I8cS3Raj&2FA!T`;_UtX|!*R2|ILf*)5V}!AYSH$HWw(rk1IeKqwWC7( zsQC^TpJ~3s#V@vOoPr%!GpbX1Vq0}ePi(6$5fa;~8w-hTy+vG0-`WO!meGs0YJcKc zTeUy&tgYIgc-B_!Pdsbu1V}vVEn->vzEPv#esrr&3ajL!)NSvyVn3%qs zeankcg-kj5Hq(?7nsP!@PN@8eMTL|Bhdf&UYnCpRU=6Zm# zPfV#r_FJLs6H{uDePT*2vQJFOe207*&Sbp@%6=~lL1Id8U2x+`b+T^}*O2{qDEk2@ z`$q|3 ztc1#=1S${WLtB*x@u98CqZBHSGN?Stq4HP{=fev3+g0XAHT^gWb^S$%1L^v!gu4E= zKwW=TP}kp1sOyiokoi`exRCkIo4AnqCc6d_qty4R=6mM-kXW3)S2f@L9)!fG^u6j4 z`f+Uav+Qs@v~4X-khZO*8Pc}3v_jgpmQG09)^Y+)gC`+vTgz!k+t$(pY1>+QA#GdB zSxDQ~askq|we&;Uww5b?JC!!*_qAVJY5#82Ls|y>_FF@KTe?WBE!)ZS%Ngd`e(-g- z51-Ko(9w_L^Ct7(!@MsfGS}Dzd~(cZ7yAA~w8f&Yz!De_H^TsIgh|l1Cfbs$|6z*$ zhrC}EHbUO73Uyrsp{|Q`sOusF>bl5+x-PPzu8Rdw*9GrZg}N?yx6(UQ5%Uu=4y>6w zNzW6%;Cb?++T%R52_3iIQ~T5oY_w=o(f=dp|BYGXJqxbBi*gb_E+3SKW+COZ0 zZFYb*o9_Y~z6)^Zo41MQNpy&BQ$MCW5spvgsf3hAHr9+uiWWYW6AEv>ry=aym#h8t z%u^b@R#GS1j^(!JO;Bx8GgO;2K)WB_!S@N@$kq1(dFTt@=DBsN(%=KOj)eGttxAIr z*s3)6fUTx&q%BQRo9_AowdqQK#*}Y_8H?&D&*O|`&A7Ai)irRf?BXld$}YZQt?c3} z*2*rvVy$T(p=lqnze;Vs=QC*Y^UxKfQJA(rLYr>xJ!tMdXzo2|?mcMkJ*c)H|MAvd zAKwRzY3Rmp$d-F9MpZv%j&Wr5e}*AbUq06_;8#MXzMzf~zY@|h;#Z7cz^@p;fL}3w z0pIdg568TdxhaPAnb15B5AyDnVCt22-qb5J^$JbBLQ}6$<;Uk3|A5ai{vi$C!uB9- zYdZcRg?Qt~I>gN9u9X;n z6UHCtn$L&2<_n;%`C_PRekIg3UjlW_uY%}z@^QxBltTHNGME9&q5REym<=nS{LKdZ z4c|GaZ^Ti?oKoKiwKKn>U&OEI7jb1@QQf;dXy(K&q3Mvb0{14L)rMNVKyFMn{0du%EnqK8xKR-_zIMbuR_^)49dpWhS}K6HraRz z%EneG8#|zER72VLCX|h5p=|uW!)!dyHraRq%0?ZOjRq(i--WXAeJC3?l#SPh*=S^& zZ2S<)MhldUHYgj%p=|sZ%0?HIjsG*u#u>KB#y>*Y=!3Fx5z5A8C>y_mvM~r{<0|C5 z6mqTI>$B`NORRx4OMC-s8sW{B{Wk5%(uDZXnyYbjtPj4f^(B`5I_=BSt%1;*B_r53 zeqhaCS@yT}-vdzpeF@U`EKL{_TJyD0bx-QQ&0I(N?tWR#&4HaFlPeoaG0^ldG1^0Bks#{-?9;q=e}hL zkmtT-V<5-BY&_)nmra5k|FS8N<6lPI7j5RdTAu6X6AR+`q(skcFAx*?ewRCch1#%h z(1z`xooV^nDCVMvrccDEJuhmR@5V!HuVeWM;*{$Wt&(>7Mzpi7fqqH*x#zv^Q_P(oa*!!-IeaWa$6@5-LfB0)$1FD~gM>56|y1&XTEud(;J1@nLM+Fvs ziFbuD)Wg=9Tn9Sux(;;Sbsb>eTn9Su)I||>tn;jSE}p0UOg|LP!AjEHct7URPe#{) zzG;`nvSJDM=GXjnYbNnr%YV`obZ-8JV{DdB;k=$Do@jY}FP`MNw48JW)Peb*^Bwl; z;smta@POTk~hyY7sd{@wQ`&hjzfJoG)G7x=W4}`dJ%P0 z#q~@b9sdFK^f2X+Zo~KVCF&C)jc5h+HI8FmK^@Q2I@jAgty9O+9Kt-WnC&^f&?R(> zvmC$jdlaogwnk&;Va{U$Y2(;m^I#NWGoX6FX3X%Vuhl#lYw_=z3qo<;EXt($V~V(MDv51}ym>Hm{E8E59*iC2pG5wApKXpo z*Ubgy^!X*nO6>68U48uD5ljCZG4++i)>jZ?|7_jRBg^ZExmP>>H9zT4xtCaW2gj`X zTw#B(jg|)Cr|w?Xeh;7rsD8f0*jHTR%X8eeI;K+;`Qoa3y>qQ|JPDoRWu9k?Hv6jI zV*3tkEZ^+T$Fuw?cf6?OTe;u3$BTC`zLU-?*Pq(5K65pNS(IztKk71V4CNk-dVV2P*GU<5pNS(IztKk71V4CNk-dVV2P*GSwdgO+n)}WzsLhETc78-d2cfPetAAuzh5(kRWs!ITz#%%nnpzSEXH<{J>ppr*(076p>7@G zSrO{iNhUUgu3>rK>y|1_*yqk~ORTC?X^2&oDh;u!Ql(i5IUi05Y0);6c1AL~tBxz; zvwL)0m5}!ajZxL3X?+wl_$a{lBd`ja~85=>$p!^-5JD)CbsG#z1hRUBf zR8aX7hYBix;!r`Z2aQt|u_W@LVmJa31xQ+l-(*QyTp+k*(HwT$S!dtM|O!LIWu7ml-*hw zg8LzQhvki1Riiu{#-&moO;C9>L*+rts784ZGpbP@#Efc`2Qi}>j_w%7a)@M0pS^iYO0aMG@setSF*9h!sVY2eG1v z@*q|eQ69vKBFckUQDiu z5g#(+rx71AvAX zAKEr*tSe{8Zx3%H{|nDaW8~w%F3lsgemPo4o5eRmXdS%g@%#q^5Uryv3HsJV+miJ^ zOws=^RsX{@{m+=8LCCpiONX2b&FQ$DbD=pMmvb)6oQ|9e&FQ$DbD=pMmvb&Or{j%U z$0VMsdrbRbaj$={AL9(Zf%-Pj)=uny)4s6Z4SI+6Q{1)@zxgAcZ}q(MLs+A9VXuA9 z_uBUY(%+za==-}_e1~Y-`3P;IX-}bPPa*B8Z002;MT30L8jRf25VZE?DqZRKua!iU zkHeS?I=&{T_OBV5HWZpRlzfM2ATyZXK^jA;(&95pP1&H*;xkP9kIyjeKR&~>|0!z! zUEiVhUpCH|a*wFqN_iX6LH^D#zWOO>`~$wl_y>H8@elYG;~(%X#y{X&jDNtl82_+J z{=xGxLHY#n-SyiXXHdt9uLhL&zXgqaz96vN3A95T&Wags451F~Q z@k7bC@ZDk$b3dSo%=jnf8;_%YrgHqutMDZ6dHr0sofh+FbKQ2vL$2G-0OY#uOoCju zoym~vwi6%K&vo00kLu^T?Zij*(}r{gy?L|KKd6hu=5A<27rEV(DNubY)Vo|qn7Jaz zvm+mpXGZ}f&yHe9o*gS8d3NBxI>@tQ6(r9N{8tBgc9cTet&TEpZtZf2HsTNq=^jt{ zn5W@&%J&j^wMUqHg1p)*A$hfLf#lU*1<9*@CnT?SeA-Lo)s9bliM-nJX)lpidkrM7 z_F5Q%`@Q+N58!XmM`CkAG%~luMPl`h=p*N$-Xq%_=B6O;wkAm4ZOu?^LMv38&E^o^1_4?%CEP$TLuDGUOSkH3jkv)S3#3owniwi-Rx-(_uQyfEh3g zX2EQj4Hv)#_@lkPtBso5g1IU-fAd;NF=cNJ^FL*8&4-k|wE$A~)?!H6TUSEL-dX}F zd+RDl*<104#gx6Z6jJuqGDz85%OPcNT@UBO3V0{n;J1r%82@-9zG)M>w>bw%)!U%j zr*}q$2Y-Za@^jwj{*L|{-$65ZhyEJ3dvjsbF&BpBz|j94=;4}kyqDNwg3(I+j19%I zH=glDEqmh`U(m8Qp7D7t8?A)*Of4I&#J@*lQmx_MUN?h28fYczd*9FaQY*av8b2dW zq;HxpF}8M07qPstgxVQHsDWRVH=%$ zQG|0Jv>4ACeI&y95608ha{hw>$oUWA&mx@vApR`E`48gHBAovq{w%`z58}@v#LnZf zlO80-gpKrch>i3Nh>di7T0Az=vmrLp@oDkcNXMteVyJX*(hb3Y?MLSD2KAK9?C`ql#LBgHa0@p*fh*Wgl)1>31wpol#MDV z8#|$F?1Hkf8_LGsVK!>mCL6U-Hugi=H~?kiAe4J#KNz&k^M5d%ZH!@*mtqa-nLQZhIe~jQm=C#^g9S=6Vjzz(r}9>Fy=b3O zsC~+y_9=(l@4@x5HL9*u`$QOvN&8en?Xv}HpDIY35ab*L!wExqdx>#r{~DLeY@8D|dYEWq$mX)RnDory~npUVZ)NfE}sNW#%PLOYYtvJ|2{f>ma zFcF@GW8noj0rtb2Aa$yDwIFq>ceUUUb7pDW3dZl6zao!N)_cz+-2!t6s`n0+$5eX@q_lkM%Zz}sh$ zw@_Qb0P&Hkd(qq@z4A&ec&eD3f%Q49b+j zv1d@GnU1Tadx1xLs`Q+%52VOY+iBog&01c zxaORYZnB(fWq98W*&aTA)Ew@sHPnrClQA6o9O^)t!x)Zn4)vhxn(~=>GUiYpbF`iM zP(P1z)UooJ>hQ|@XSS?kpnjH=VVGr5jt^1fto45l66Sa1L3xP^y! zZl4+PRsV+bDy<_c;%XgPm9Ey2wZ+vsvZ}atX=7*8k0Y;q6~4!ZMN;7pSPzAk-)J_{y> zeGY9zeK-`)K8H54&GZXkS^YMsJF^{aM145SGTMmxaF}KF+wkJ+=g>yBagCYnXd~Oa z`1(1t5%uBNuKobo-v4MM>a($fIJy~o3Z3LH=vQ+7pYoqMk4~}<4aAIficWH7Z1}%s zv3>#VL-Si0`{*U=zd_%Po#C$gu1X(Y!hG>l```KBap)!WDhpc5neIfIdFHg9xXfrI z(nlUPb6dopPb4lNt>LuBrt<3If%i;dv)<6EqirwT+3cvbZXhF ziw-S&bfO`4XJulx| zw+!Cv%@aX;WsZRX1WV4K`N(H0?k}C<%_djt+k5pDKuaB z6m*RUF}i3R|&ak`$dkT_k>1W25&=O##;u4gJFPS-OX5~u6A z1rn$0xeXGh>&b+~>3Zfu;&eTqg2d^1?t;YWdOi(_x%J#***TPLFa0s{JY7TIG=I{5 z7#h<>-!H50#v7F5hqOs=S=d(0F&--B7=Vg7CPB*L;ImW~2Y;oqIQV^)#li2ZEKU$A z=9mr@bIgE>Ic7o1;$%a`92Y>v9P!C2i^kX`?Nt-!Ea16Mc@{(Exe_YR5~w^^LFKs` zD$i1=Jjc>(l`^4@_Wj__lej1ehAe8-dDEk>u z_OqbuXG7Uv0A+s>l>JmjIPjY1vkVI3>;f0y4} zXPG+nxpRs1LQ|*E)G0J|3T5XCl$`-6J42cqCAmR;VtQ_6JspN%2Al)4;Cz@3iI+GF zAn_7s5xg7bG8TGZLc>q9eS?nDl@r%ey7f@$DxlJBfJ(O!D%~cfbLaV1zm^)MIRKUB zAXJ(|P-&u2X%3T?IN6cFO$~9*=3MntiD-Y~SP|`i1#15RsQrhOM{2_n=J$S!cvGo4 zerS&0n?oa)?_~p18;&ry_fMm~!LT`R=oRL?>A$pwBi@|eZ$;T|jsyLM@jZTD`+FoX zz2OM+ec#wGF3$OSuKHN)<+ z66rCo!#{QVn@8V~*Li>bQ?$cZxE33UBktq5-{F}dk2$sfHQVJzYee-v`gK3ZyOZ*w ztfpV$zjQm@$lTq^r;+oi?|O7kt~BS=U;SBV@($ov{{`Cd`Lw^Wxx4q>;%X{q{3-Q$ z!{+Z+pU3gE@5j+$#^WcB)4q?VeLsE{(!L+R0BPTk_e0wEyY%o3A_8cjf2)r1}nX z9AoO+;MccW4PQqql22JrdTAKzN#DB2Z?7YLY_4wgKY5aTHCOi+N%KXeL2p=wkI_8c z%kVL=dAdzs5#FDlm|_jSzc2o#2zy5lyK{8EWR78U{Q-D|@3u~$OT3HjaD_R#Q|=Bd zOv$$vF82CzY~_2?m-!Z2|F7j+=ySZ6_3-^(Ix&QE#MAUHH&@HV-?ThO%f#KZoTX*H z$JFv{El+ZnGg-bg#T^eg#GH56xTcN~`+JA6wJ-hC(l;yUyKx!YKHd*srf+|b_kMVW z_ro~e4=?e4tk^Nvpe=2|Jckp1HqYV2pUrbP@n`cKPW;)ycS&0?&*6Fw!*}uDRR@3N z;IAC~m4m->#zXv-GYR6aoGB20Wxg%JUv&qW@4bNMRPA33wf{<}{Y#+sUj=W5t9g!1 z+*#NcAMyP!@={Yn|*SM3ov;N-&_5W_D|M%+u#JVG^tiiChM~O3Q`(d^Z zCDnD8ay%!A@oC#>wgtx3b#EG5R~aGB`Fr#m%>g0phVS2x=Mldh!*}(H6K4_sDf*SG z-#Gt_`i*HfO*|h+BWU9JKpH_4^(~E{iTajC&_sPpBWR+&<2gr7iy-HyDVO&TPrGRf zvrRVgp=@vtj1A6#vB5boHaG{y#wsWqtB2VrWt(i2LD?vWI*#>F$H6%=HaJJd#zrU` zn}*qluuV29p=@k{vQY(PgY#%?a2}0~-B3364zp3iHrc3!vauh^#sMfB2chcY5LA6c zp==x;X2W5dY&1dHXoj-U3T2}c%Ek#O8z-S`oE~PQhi$Ua3uWUhl#L5eHu|A#T!FGN z0A*tca$b7;$@S&D3!8hE`|&-#qwe?S>DG1hu%2Vk8}hhM&(U_p^oFArF*2SfkH$lu zCyxdo&yz=!AkULWlOfNOM^hm8=h0Ni{dP2s=S8k-#f@WnLk6}rzeEQ%pQbDw*nFC@ zbYSyo%F=<&rzuMZHlL;}9oT%DvS|FJrzwlZUy|-1UlVI*)Gl<8uf|~NAm5HG$UWSV z4Y@}(SGD;TV3B3FbKkY+x;FIOxCDJw&uo5S0^StskCPnW4C@n-~kq*)j>lfg~{twVbJT&BL zA*%b|m3K_zQ2m>IsDE>O9E&+Vj^+J*g5Eyq-aZ_k*@yane;>|)vBNnqb~p!SAI^dH zkyg>kHK}^JNdJ*io+qVM=v^o_mQ>2fz`_FFGtLduD$F&hYf#suu0iV0T!Yl#8{rRX z71OEj0Oz8aa+g!*&7>)(ew#^CPJK3$<_C;T^&r>Oe$JQ0`O-Nk&_0yAK>JW`=@`$W zjq6+(dz3dr|54sI99OeAgobf5$EstHhEZbX>W)9Zh3lx^Tu0Y6jB8#$kRtDTl7?}P zejw}U2U5&7&682j@hH}Mj`|r(Ioc?LncIRg;6tx|mUw@=8At4+=f{w~BYlH7c1b3` zvDB5Wy_k);TBjc7QZMb)!!OV^E^t1S-#=K^LiPD*VSnivsyE#yvNwn8>J9Fb7l>gW zLfg28KU&B0c{zPPVsm4xr7wu$-tIV6ZvT%yzKzh_7zx19(lCczq1Js&-ZuE9Pnr_~nw zt6Rud^LPKqFtPrJ@Z)q1&A%}?OxyU`zjt+w(HmV|L*tO0L+dDt_4z>KD2ny@K;tNi z_4z>KD2ny@K;tNi_4z>KD2ny@K;tNi_4z>KD2ny@K;tNi_4z>KD2ny@K;tNi_4z>C zD5B5DGWvQH)5l|Ztd9p8$3wKw=g>D+F&9Us`g@>ns4YK-o}oFt7t-(J{4X}WskytQ zHTyg*Jl%u5E4{!yEiGeCg**SoL-ehXmT}W)v;$Aen51P-*GbW`r|V4A zGS`5ft4C?s(=x_u+0!zTwd`pbBE8sV*0S;d3{(Gdwp2YKujN&JG?$DXdtE!3mS;&!-58~ zU42;4Kulkj8DY0C%VhPHVa|?;?%W+Z=IxAyI5r;HTjtcn{2V`+59iSU(TV8~aK6cW55k*!Roo z6Qb{+)z71ebLyCK;YTQw-p3rugx|9r%B1%(hcfYOWjmBf?_&;S()*Z0naukbWpXH! zLzx`PR@{B^|dDznNUhQt?9Z~tULgm*9mEQ@d z{7ypUcN!|c9;p0!q4GNmmEQ%Z{Q9Bty8@Np091ZMdIwFeGv6a8``qu5Qy|aL(o2l4 zkp`t}pqCh3BOPY2j$UGP4fGPDYh=R(Y+nEu!9_3^=DK=G_ewMgm7xTx46C5Zuo|ii z=q5pxp$w`F=q5px0o^31GE_j7VFOedHbRwQ({gbP(cfMh79zW^@qZY(@v^ zhj+5y6_^7D;N5V@XE(+tH#o8pM5nkF5=S$d2yrx{i4aFKnn*U34dQ5yY%GGZksF7N z2@Tz)=p9N={H!JfiJuvLg!q}!M=GGw6F;j_`i)TOiJvvbyP8Tw>4=j>=0M_PMkhH4 zm5w-BMCqbX=?;@Wx=LgGO$|PDl~Sc4Mpil>_Cr%%Xvzysd6idcLnGRX>0d%D%=9lI z7G^XQVqr!@L0@T%pW4uPl5eXk*k+C!n&XD%xV2AOL!+m&1a*xO`wHqBBlab&r7?bb zL*qc?ZXepoAJf+N=)9x3Jjt_8BL40ueK*b%xBCB^mZCX0{!QEDT+7xw$&+X)=ofB3 zzI>QS{T0BmuozB&E1|jXp}Fs&?$6cG-1pGj_t4z;(A@XX-1pGj_t4z;Q0HlrdkoKe zn##BJyo{gLcvR9<{?zTZ-sxvCmbv0~f6Df|iCCZVj_?e!56$CWyDe`*W4Y#yovZxz zB@?4G=a=@_oL^|pFZ?6hb)$J~^4Z7HS(MiqqqB^!&ot-QU!4Je&ilq~(C8qE)}ZN^ zLfdcprR;^|eVn%6=pnTIMh~IwH+l$dztKZz`;8t#+i&y`+J2*l9Hu{uHL6;%YiUU{ z&{}x6QaowU^h-JIbNi*>2aFbiA23=7e!yrU_yMDZ;0KHrf*&wi2!6n5Awxd9!wMMt zx_+fyyldk2$wtdKGs0@xuU z(N3S3Yng6er*5>CMr^)>t#-4L*-;({}`4V)~BYON@4cFEQE)zQkxJ_!6U?;7eNZCD%U1 z7|_IVHlw+0#>RS%rxl;F9-q>RPcgd56^I{b9f)&vlc6}??~_d08rq)+p0XMqKr>mE z&3i8XWf{*P$M7#kKf%8k{RIDVjJ_k+KFt^ooKI;j@2A7p9FyKc9@1Nkma@sud-B-o zYxkh>={-e&;BDClkLD(_AV|eI(U?-Eee$#4xe`Z=vQIe=|1MIDJDV z<^~qN!gyMIPs;qLQ_mOjz3M^6BX{Q=r(XyCH-4u69sfdoL-N!&MDuqH>l-r9>l<>0 z*pc32FY$iF0J|x~KHR>!>5!O~lL5!TEJ!@V$%d2Q0yqUOf;Yolh^AhnG;05qM(w}S zsQp)(5~wt*pwg_iWbbNsInTb@e?8Rx6;S(cfZBf})c&;h(o;&c&rY6Swa+f7eRf0b zvse2h)^#6rzuDLGby)vLKdh|;wf!V*m(rak-O$*&?h4xO0pjS>**0X^_Y#kZ&1>;- zePv>L%S>+!v6$YHL_8{NVap6Hck(-(Ze>`=!LRz7Rts2C>#AyHm*R~7=W@d z1Ua|8=c={YWr zV>0njVq1@zej;vL*!c^ zTkJEK!8vNq_^`&JIY4gZSYsN?=S@EmcMgzxuA{zT{Y3KUC$f&`iI~PB%`W840g{EE z?4wQ+xh9vRL&W-t6#2$%HyV<(J?pR^T?uciFDq^q#+jk=2LVDEih<@4w&d0wB9v&61rx{Br;zj6ML z(6BxuYC|46wC_3f8Ii6sd>#<#YMKXx`M3X|J|l{`p5pkWqrAj<`9pP-*VTq(#L9W`n&Yo(>xbfMB?wi78zUW z9r%5Y`Od^i+y5uW{2qBOOq{$u$}y*+2Q5lW-EMR2IV|TSPTBqg#(w+i_rCeuuJ3*8 zxi`PJ`?+`M7pQqO^2Rf66K&CZJWKzY_r4&<8~u;r(Es}KlF;7&5)OT1S_An7Jf$5nuN9&hVUmeAmeeE7Y>U7wXy=7dnz^z4m&V_1aGt#fS9V#CIyMor)hB z+ReRpG(0Mlz?epcXmwzm8{9ouO`SPrHHI}d1D)~d@t2`Ug=HKRCQJLOD18lu=?T~EOlZVblu`h5} zU1np(j!!pcRE%uQ_}Qw)jFtsfs59UT9bFU`YFyoslrPvm`eRCsU; zZSi@^v}KgFa25OOT<=Hc-iF>?Ks&7KIf3_{So^CsN#mBGPhLLHJ6qA!59!?phwI&Q z?+%BO|12D84_l#*d|xQ0cZYmqw+Hy?ScdoKdsy#vw4=*7f6}rKKxy7>i>wgO=~uO_ zpJSK4t>a$j_1)V>`9tg%;1^Fhs#w>u(Z7S&=VVfun>F4c_sn*Vzkqv1_iunQ7IM!? z4;-#{ujSs94x5P0n1=oPG;j4ql+Jwt9<^@NybEbd3TRXAqiy*NZOorB-eGz{URbIOjYiIOr%NW`k)5g$l z>Ai9V<AZw#=n%nPI7I`37xE;`?pOn2BhKv@4%eyF&YMJKxguanIiY z?@UbEK0h&e`yuM>Nv2Cx*B+V)onpacR!$~6e(|M{LdCppA~o3_99eLBx`p1tQ> z_GRt0zH9Bh*WP>1+^N6(#oSlHls;feB6X`_tZeN;WA;(n=m_n5Ipuo}JedogJb--U zA`=gNE8SOxoa~tCPu0HjUAW)%7yi@%abU{9YkU-%C<#Co}eR-%X+ZvT+mA~^zyDY2xi6^&Fmj4SstG=@gzthnl`RfmE#V|QSAYLBD|~> z4q=acwuXZ}De&%4)(SIcZ>~8XhKwfSuU|=fM4fEF9brLBcqI<3xvHOqHDH~vMteAI zH2rI0S!*7$3!Qi~%fg$Ujqs+o(iz0LccdfC`GRsPAH%|%OzLT4jBsZidd2pqQGN2j zm_*j+^*j%Zi9}|EFFh!)jWM?E=k%%BNS^Yk5x!_maT(!J%4P7~srzpLCx(C**Ml3^ zfggjxm5d;G62g`9v}p>Q*wTzmBWyA48f%Z`T=q3>Y}34s-LnI`$F^;ZW^9}NVE9Uh zZL_l8wu!07pN-fy2e55k1!KBLwFvwQdQAma`bM=5ya~q4A>Kc#P2f#1M)3i}_kmX< zEj#EL;T3k!a|*G8o>vHt%_S7i9eLxKxjzAew*2z>xx0T!|AR8gwy8W1+h#oSVcRyl z=6uz*S%X|k4s6@z{fn?|$}HO^2HPed9*}*b{d0qh{k}4A?}?0bUoPP)_S#!7+4H+j zZd>H>ZKAJV(Z)En(I2v>Alls`JG2uxs?5WIFOZY@6t2Y#Y&R zm&3OCtdVWQ*krw3BRpA+t+pmUWY_G#9+Pb&9WVUQ{|_wt2K#6E`<8w4W9n(!H`^)m zQ0yD|udBUnU%uw-8@=BY&fv@UlpkJ~U!Sl*{(Ki|-;A*ECdT3_9TqyC%T@=Iok97UmR~nA70LHw-yw?8j$f&UxB3`^i(dvw^b8uBjoEUDM#l z_w{X>o=)7kK%3@5?U{}E-7f=6Y9m_(UIj~b6W<*Pu7D-Nl_Qa@1HS@G6hBH_w$Ck= z?ei012e!{WgwwEn?#1>IPhI`XIdc!a@htYtb8|O?BeG`#vS*ML+n(tLzdZ3nzwf!* zkymu>SmfY7Y@0pk=g&&~sV6Oa=8FIEr+x={-V`>-_hP4o*=stzlD(_btJ&+l*ZQ_{ z<}-G~iua2WSFBx?xZ;m_=yb2~-Usmgr<2@1*))Hd=JB16U9%s#myIzAyGFPo+eWzZ zm{mr*d{xM&%BC`{H#(PZ^LIWTj*ndgR|~FfoJaj9zO!wZg~X}%=E;@~qkJ0MFx#83 zVHm?Z!-g4q4jU#wK7q5?Fe<-nn9B)m8%A~M+Q^1++Ar#R$$nw~(^U0I)qb*t`+zYG zm=m>lnPtaR#E0ydiIhcpKgF_RgfTkL;B0n`0drQ_f2r(V{qaa_J*(`8e8Ky^Uc!DF_h(bXf%*?>2Cjf&u93L&qZEey2PLQZiK(?bMC*7eXu$n zoM4ZrKhaNE6CalP=Qc)N9df^xF%Q`W`c~i6x&3t~qCz%>zAl@h=pC*L9`xE0c zpYWZ^z2RtRoTQSqOtF+@EcS$5o@Xe}9Li&t;bY415$R2h!X}6A+Yh%e-+e!yL@eq6b6-5*evfp)s&eH!q9pla#6L89EV_QJ{V~=fUTtnj<%E#H)*l6i~+3#i4 zhjER%-dyxEg2ER0^4oTR`W&INr^6N(S+;;MT73*J@5(N~4yapKVjS1Etb2IMz&2S` zVw{vue}yH(f%ENuAPY*jxv>plj4j8?JDRdqBFkzE$+YYO=?+8fl5-uhIvAPFKz7sV zhrGW?XE`({%au*Yvc^xHvMd?azG735W67?@F>XX|CBLHm2efxcb|0Pl0dx<^?xXdx zTgLuX8W+`==wbYnk9cDPN7=hgeTo+JDeQ63_KtDT1jjgN2WuSkj$b}H_i4r~B)^~7 zdLG$byNa<62IPV+;1UWi0bg(!rA7mXq#&|>Mn^i@fPukyT3}P+Vpnk;`+E8_%Z~N9V;?!&Nz9XTrh?>Kk zg##ZXA=jT9tFBcZQz@Umt%z}_Ziz7%TZZpx3qSI#ch(}$^OYublblMw>O0AFDSV_d zc86A~pLnSS@sH72+ro@GTdpg>pEYfa_qLI~(!K*(_VBLcs|E4Z27RnBgLubW zKQx^@bI&2)HOPiNuGv_=r7y)-=hD6V9Wt$XB~z}^yX;4JN@JEPrx~YCtH2)VXM25) z-;(PZ=n%=brcCqgdd4e7i}ieCk13>|p_9)(W@(Qn*!tGqf2{Eu9hFrw-FW}8Z$f^X z)WwVkd_@=kZOye8ql;6KXIt;SiQZMe|FE}B;4N_DE#gPKZ3Azkcg?X#Z@a)KWdAsN zyAHj50=<0S z#gD~0hZMlA%IR?qJ*%xPS;TYq7wvLvZTxsg)ICOLzIyw#= z9cSuj>1WLWXl^kUJdlo-T%Vle_nnxK?$dwG2R`IRPe1NYodEwofjm9jEoG|n}Tpt?wp{}EoP zgg0#+9we^$gnd?Dd>=k*!Y1{{rJprFVCN}4zDeVU=vo_>zF;oK*6FM0kJ~!^Y0_1^ zUZ-0$u<=0UmtMDZ`XOxf+bvt&?qfUikzW76nzQ`e(|Yk8#DxiK(CPM=p3Z=)W$dNT zIdCDf5iUeCSCEh0sPo$F^VrO}fkyKJS_{rQn>9C>3XSGl^52raxZ=p+NX;*Nh+Y>~ zeZjauIBljqMRiYUYQ8{o4tppo7|>sOeLZc`7z=JXL!Y0=SRMJ;`rPRo;+!9__4!wQ zL(bmEKHK`yG(U4XH-y%VW`YIU3o{Q|Ru5)6W*4c6VC-PX=S)Iu` zZPKd7N8*5|E6XN}Zf@ratIT_#-8_xmR)aC~SSv4#EWs8$x^h_XK)SK{lMG|?o^8W} zJD4+I?x?nmetUH$W59f)Ihj2#MFo$pj0_ea&z)HxSk0V3w$fvcAYCE86N42p4t%3R z)->Lvu)Du*h{B%i3#qU#`$Q`2?XS}~PxS!yk5u?=_Jbt+t~Vx7o#w9_rF(<@oUKN9 zgTHRHLQhy_^%(ll;L!@~lWVBMP;BHy@XAmRA%01D@XjLmsTvtw1Wzqehv}iLN_6GVNNUb_4Sm)noj1LFk}$_$l{(r#y*!dpw;O|LCw-;~?uOYs&RT zIpZ9&KXJzdbiP3WV@O+(t7DAE9tQ`W!+-lI^gJFJKj3H3Pw_qN+L>)!gP&#;Z9BtL zbaUkoz4N?;8wh8(nG+=3#Jul;OWFlyz;nOne{Ed5Ko!`(gLV<0dEv8l4|(V9msi^(E8pUS_JT-@;FQsH<(j3!X`@C3avu1Nv1#zmg>NRU;Q)(;~kA9%vw$2%zIkdO;7-YajI3 z2#vmQ$Fj%6nKU^FO_ov4QtF_yo?AO;V)7C+nW4Hu6SL0HM0FOA)YlPtRUPBqtpdq6 zHq>!2bzDMy3S*4gNyz6SZ_9w2IvT`xvj2(JWQc!Lrd?6K78_xxE5fYfcq2BDh@GiA zex^E7Hq~(&{w>!;gnV1}P$$(jlRmfVT1ox(xuXO78`bX%>USq)eSl)&{Fl-gl}p5e}(Gdr>)Ydhwvqh{Fa*ahz+!&9)99g?2Wa<6APZg}H7uboW42RxXfC-1y%tHiu{+P;(HhFTfx671Jg-q^`H@9A z%B-?#3{&&y1I!loEncxbR(B$IimtC@AZ}6_4m_nAC#SojuT(6ySGny zA9yN-vvS2#@si1KW0~l!vTEGt)ct4Q&*7YU20X07=8SIpPu4T&x5j!V%G5Ja^LA2y zr>>E0Z|j?#X(4?x-jBYyP5P#tsc+ObtpYaatz5!?J_04S|eG~M0{;B%L)HCQC6C0#&q~9)5-`Kk5ujnc1n-?7V#@EU$lR1D-(M6kp%2t%)%O__*8pXUXW0{#i;W{ZmHx8uI$#C2dXp^9KJn z$F&VqG}1qd&_8P&`bWGc{j<)(B9pEcp?~Bzd7=7;y`)FpV(FhkOaJ6O)OKFQeR(E5 zrHfqm_E@232KapleRLRolt)>sr(In>$J9q{Ozco!%hpHoeUd&p3hm!PAFYQb!suw` zPHlZ8`fY$dWzcL>BYh+q{obLEL?h`Vn?}v)Bhlbo`bc%ou=LRwOCJq=z**;(W}Wp+ zI(P^AX(#$=sHvYWFCTB>IyNwPnc>h+rVa*=Rp)!qPw$|g#!%+<)KPV>wDi*w>bs$l zeo}omIrNk2EB$2G*VMc4gzDFvep*GnZnE_g{afj$lr!2g)~r_=&sI^dTJ%+w^c8LC zy)U^u*{qj~Gf5ltmFAA5uT-aJEqx_D+gM+z{FRh{sY73>4$@b49aQ$F^p(op+tOF3 zT_Jr{{h+fwqA0g-MN4#-&ci+kc2{GEth@KBa`rDncg34+alqA}yJRbzm+o5Q!7nqC z`3#*S9L@Yj40DmO_+`cgtKoC?ljIk^lfKOo<}=h!QlE40w>eXpevtY}j}uO&KeT^I zcyKcPq}S-%eDO|Lu#Ee|xxe=vPcR?*EB}G0dD`QqhCOuh=_hHQo5l8h`a-|6k6?QpM>9D$9Bxof9&Zy^`Cyv0p#m- z?CB2`((mc5`$?RYuCOcb=|26Q-U{jWe5mi~_k4)nc^y6Uq(b!2oeI%IUhF2>-51i= zk^Xss>wZfQX^n>ZDr2O7m?z1R{()al)8C1q-*Xzft|w!4?V$Az_DuUeWm`V2e>py; z?CTFddfL_GVoSZqxOF@1w_B%utNe7jQQL$4Z2kz39_5=OE!eXMopTJ@eV0DZ1JF+K z_3hf5?V0K5^VEQu>htU)l+M|Yygdxx*!Zcr?^>Q!fmf?9X&=}*+B0u$T>C(0haB42 zxsI}$w!A{xL-?ltjqt$iheBUx|Hj5v^%K=URKHSvoqF59C}cAWN7T0|f=)Le*EP`R zbSLOVALlguDS9!_`v>uF8}fK_THo@i(94Srn`X2&>81WbJa+ZzB+htXjf>sC5e_{Q zA$<~}O^t=A>hFm*`=G~0Y}ha0N7>5 z@_LucCsz5{xc?cL`;0r(SIHzzVgF(;<)B;|CsY4pIC&=;ku{8)Gj1JOlTLn_q?bnC zvG9Ki<=sGem*%CEH~5HTgS8{T+$^v+6AT{Cobs^X={25^Z|iwtb5H!(dxFao!R0;R zvd@9bsppByd%$ELW!$sG9TX-{1go`%N0@BJ!Q_cx@*ea6=~rr9kMvt(T&}roD0-va z;sWR^zmh%3vDR6rO>4S)gTmzw&`-W*9l0-m)k_tE%bgU0%bgX1%W(=5h0la0E)$x# zOlaaVp^3|cj9InjJ>l{m#;e-s8xxx&>#;dH((@0&WfPCVWfPBu%fer9*~DXT*~DXT zIY;*|0GFM80vnSxj-~#A&RY^Lr&+k%8(e0=c0Dfd!A@N)u6;`KD8@NI^y_hrRKXTyoTj2D^VDvNb$ZR`fz-Gy8JCE7EOB^^|@VyXDPa+gf zPsUDQf9jjXGd5Nq1fze<^NLG6fpOn+gVUa~;?h`b|X&cwJTR1J* zK3|+pbcb-d8b6DMzCy@u7H027H~tm6*?oX#jL#05bxUK-m5JHdztTZ23$xu|_KVX(_OCGew992; zb_(-R4RwnP+0N|!k`I3GP~D)d@cZKfE&H#hyrzs>xSdCxD%lrK_&p06?T?PFj6=5^ z1iwRd3^)BrgyWmR#agg34;-%s$F-NVaJ&p(BOA;8l)o{S_W($ls{ApsoxpjOH^RdC0_am_W4&k! zZRK+~)CI<2yYDBids@RF%-=-aUIX(ruUe5`5Gs$$cu()lqRb6vqn%wBH0fi6ZC(Jz zM$?YHz<-VRO`+T$Qf}EnjKwpqsP#=@EKO%UKlS&fQzzyUz~|YY(U16?_G2w^?W`DM zD-yIe>?1`#8;_+4cBkgpYrZ4A3brPIp)93 zonXq=_afT{;_vTXt~0zA8FY%9Pd_&uqK(fRQ5=l!#+u(@iwD{N#jG%U}WCZ@{cC=Ef+t` z=f3(JA8=M@?X+IyD%0=iZ)}9dqKy~%-GfcH7o2q+Y2UvmGGWpc{dGQcZFwGa-9(ya zr5pCM?9XQD)~XTRuxtM5bd#-q+(9?^%%)pzP0Rj0p`+*~{B+Vy*dsdEf}^t4yP#iI z!-HLzPhJfVZsI%flg>yT-bZ$GjA^UC2)53LuVkx_!B17Ty6}Ard5BibCv1jK-m=rX zabllx`I4G#${e>{wo=PhF94r~jnc2`Lu4Zp!f|1rkGxclL?gN;3E59{MVmJJM#@=8 z8Cbi|n2djU&b>YB?e*sTO`5~zXvWKxe`9QxK9P*qBJcQmHjB-*7B;8GGVa1&vZA%H zIf69r0Gsz3VSz(n^I>q{hy$DZVuNVKrIJ#`aqyps?Ss7sHSm*#12;>Onah`>GW z_N={aANaJBXWsYmKi4zihQ1MQs9*7Gu(>_h{B3;7bEExbx49G=Wdp(H(H1r*@}02x zIDO@T`P0h3Kk=G!{9HahDdM$W8Br+oKD0tl++}1PEUEhfA!ungl>Q}g~ zM$Z;U`O7|XDKyGi>4Fhxk7WR*|u)i3-EXL1K zb7)oA=h6YQDBD%k?M8Hf?01c)H>U$m57sz(3&wD~jO9c!rW3{3PBi1_F+tPU=usmm zKOkzg3hnDXB&hm|hh^uhy-YiwJ&cA~cK*KJ5y4FADBR3P zpVTn+Qs9cJtm$pkop?PRKPTcn8Lwvy<_E6mz~4QcDr*>*nar5lPP0D{xA+i!c;)>T zW$8gVGO;%`?v;tXnF)SnVsB=GUzymOnc!C@_GTvdm5IHX34Uc_Z)SpDnb@0|;8!O7 zf=qNzCjEj;bWbMzf=qNzCjEj;bdUC3%tZHS-^EOHkM>>6ME7Xl#Y{{0i0?E$^42A; z*&0`+f3SJ9Cjxyzf0*%%LiT06XZ4)9zxQ+qBoe=rxWX{Pt32Q&{fJ^uhsttShss2B z)g<^bi?Jh(_XX}9SiT*5w}kZ%^T8zM!bV=xx`TOk?j%=2U@5*)xgJ*_nfCkh10Cko zFixd&UpePCQ0R)O@xgC3jA0exrXzSLd%g`x!Ni!!4n&u_P{jK;TmEfXeR1fSWi^*w$S=u{~_v6$d}{h93{Oyp=Z+ z#=E_hwT#{OA>U+p^+oPwpeqJb_YB4$FX#F#_adO%ZfxyC=sNM@)tuASFETzLebXQL zdfUKu06W-YV|jn}aC)2ZjsEP#grEEJ{_LIfc03{bA-&y(kiCxHP9S6-#kV^d%jHw# z!*@gWWX4nI8tP9UVDosx1wD-#Kl81H9gW&)t4rqo0-4@M{8K9~pQDbhn98kO+nC>7 zN;|*H`kCAB?^W(6KETu7JWp&V*fI}2c#<~yL1d@EKu^bj*06o+ftY!}ad!;-6+YVL z=~S}^47rr{x-Ih3z`fj?%d;__4rZGscv=J!uuJN&Pd>pmIl=hcm)L)P+H~xrX=V4z zKH*BhMp2udc6TuQv+CP!l|3@?p7Qd%0p(laWwXCaJu-Q3740dSHuP!27n^Nb+n^Jp z>DxEghOKDBD%w!|+J*6)muRo?wBZE)f490tFyKby0vlpjBcWRzJ1>96Gr=ZF1~%J#~JNY zuVX{)Q@N`_&la$cb93!;J~9@LeCb@m-N@H2c%d)ybv5!8hkW%zzMet8`XOJ>AYUa( z4tcrCl$F`o>GkrGguFb1oU}tGR=PQ#am_F0>VG9|@rmwXv+pDR9C8wloX9_*{O@l) z-pP9Q5u$ci6Xk(|h$xA9M7Dt<6qeT}!tZ@|Q#mOq=< zzx-|ftGq{;kC5+K1+pUgG}sU9wIuv&)L-0(Jj}$7P@hr#Mfr$Im#WX$xW708T_;;% zFM3q#9JBe~Fu#+94UmZ~FdUm;7`DMs=67xgVk?B^ch1LVz-P?18H$^-86IugW-x6Y z>;}yPp0C}2tzeD|CL^=9&9INT6lAw{GHV#>?FQzGWH)FWO?Jb6?1nOIh5dc88{SE0 z-k&&jgZ4lf!JOuP?1__}*vkFb4ok5eG;Vc-b4(Axv$7qQTed?EwnGl|)1II?=q>FD znuG12JwbD@9keHC4z`2#1kJ&A(4L?<*bX_^4msEkIoJ+4*bX_^4msEkIoJ+4*bX_^ z4msEkIoJ+4*bX_^4msEkIoJ+4*bX_^4ml3nLGvmD@$r=|fu~H{AraeQm1R3jz)pD5 zlMpyP(5O9HF=wvAgM=Q-c97qduX`zF9@+pC?@j zmGSh~5|N#kuq9fyP6!-hF6$Wg-lea5jQOo&?u5!u=#PE^Z*P4dVcy@dDgKJw{8cjN zxzw~NzQm@u)6+8WI`hT{Til3R-xDs7lzB&|_O|cSNLKxZKAfB zVh|+eDiUJnuG(pv}TBzRfJ#qrT1RZI4i!1$H*r9%r)+owi3Q z@|KKU*abW&rOhtK-oFn1?#87yll`InM7m>p^tEh{tC2(Yd0&1tw#P{1FvDSclwf

n4(gZbDVN7t}7r)7K0M~}y#bNXRRK1=?k`S+N3`dwtQG=IPf zJvV5_kNNi5zrXqT5qI0bO3NO37JKA4<9FZ3-VlaPqiycPt{4q|%08DowFfIlv7g^j z`d-KA+nomgWRJXks_gMkutz=tH{ao!!+XbRn>u8Q9$i^7G9_C?{jpETPqw?p5f>m+ z@(q24{Q6J4yZqL?e&x^bU*$YZUrzSO3S>$&Yp_S$rcBjX_6T-~srzkvM09S-9&wIg zBwJ$`>DCy=VrZ61JIGG4_c1ti|2gau&$;(%+=)NP66PMYSBdF&f88 zWE*2d8+mWO5nodZ)^*P>DDTTS^G~wbCLbQ@&ihy2n(ph5pXb(be&45~(|z9}o!h4ReLpNn_ob3nX{_HD?3eC) z^#_LU-7uH$S7Y726)_&)n-ju(zv&w8d+U1{V@59H>R~AFLOWeLUDYxF)3vc>Z zce&qS?IqWE;=!wpU}=mi*oSep-N=;s!13>(mj9mX$HYm`_XhhXK2Ke0J^s4QtV2v>Y)*S@c&S??>kQ>TIh;1_Nt^0yNR1(= zjeLxwsZGVh*vRG@!&&g{e8&2VDM#wW{PLxwzZAWj$ej1%;z!mU_Qno8$i9YU>;?4N zAeT=*SIVnK`M@7ae*ybZYwf7+^}wfc75fS(4b@Nk4P?UCD$CMdhOdUckC{HtQn){i zYZ3L_vXk5-|}fKe3~m)9lwAthOjTkO~{+-dV?izT1(J^^jeX=>NVEm?XPkOGgQ}2 zRy~vB{JulfQ)9lW(`zM$kNwf=>-5r%9-qFG4902gGxB#i;UU7;!i?ZcS+1bQy4PAV zc)(To%dzYqsBe-Tc_kxbJIT|T&r9T^_kPD-f~0Nw{9Z*^%X_a43d!T6)bk*Cc(6rC z9y75Ko03QAg|o@y2-?`>Wy&P|&{F1aa)Fb*wAU8za@LP; zDP=07O|&MeDLHN9kkcf|nMDKk2zhUxC95vVnUs*!8M%IM3HjA9CaL!xKa0#Bbm&4` zW>sHdk9@o2Pmdm{m)psf+!m8&y3tTSGaux4IP{e~+vEA?++Xxu^6Q~5?9_wEvivV? zJt#lPUgxO`W3itvS{H6Yr^We~_mg}fTLaKF$N2v`b$xY^+xM&Qh56(=E*W}fPzzHR zOg>*-*v;=-+*lWOwRB<1S9ReHtizH%wDq8L;w{MN*>s`W%NG$UZ;9hi+ZTtqeQ*81=GWAB@`3nA z>ARj^Q{Sb2U48dI=p*E#4e~ObeH5QTH?>1wtYN>VPq^P6{jrO^FKoc$A)+t=t>gT9OX7twdN4vK@%q}T1bu$H#IE~nv3$rp_WO?h!0&r4 zCM0L2Ss^*I^_(ql3yW9}@6dOWUl+R7&O_-={#*09ZKkE$?)s*68#0%QY`y5vZA%&Z zaq6~>7pU9f9lC9SC5NTdSGvuq)0&sZkp1~h>M~);h3c}SqkhA9w9DLQG8&&0+uoF} zGVu+)Re-LVgrC3q<~o=DPHfM5d(+SOpw(}S#aB+cswXyy*>A)Cw)<`JPgkF<4>Dx; z+h&B7K@98!4)LZPs_~tbsJtaTc zB=(j{&fhzzOa6gDm*>|ES`cQGy%^&v(>fC6KLp%V{zde+)LvJ(>3gvkjBsDPDcjc0 z!_)<8KhmsVuNS4&hy8XI!qe(UtvVj+N2T+QzR~-!^yl^d0``)5k37@EEt`ybIdz2g z3F!sxGZJA4`>pe05IJRI=dnYRo zrRR6(0rg$%zR6_DiXNz2WEgd3p9g)YK2L?E0|NBPl*UB#SQPyp=@8jaw*8c0m}g;Z z9z&m8cwqN^Y(2A)GG#$S>9a$W$6!3@a85dN8>`Jd5S_eLfE{x#-jWTcae=0IYcjkk z-daLm-|Wl4TXtVYyrq7O*6G*xWy~=FXJ01D(U*w}Y{6a+^<|<19})k!abL!PhvGlG zAM++W_*N6x?t%~8@S-RGi$P)VVyV|Cdnw*k_9=8xd2fOjRbKI8TWH)0UK~rGUzliP z@)d5*ctG#icFsb$YZ zJNgcib(>F}eTSy_)b7it!xQQ|2wPvH@8HDO*P6gr#+>Tl>*HnD7(w~%*!){`4*YD6 zkLjnH=MGdh;$yon&=em}cF0)czJTyE6Mhzd+kJs%`I-6IbMf=puyQg!Y2xRC^Tf)H z7FJeJ{_gBe*B5)_YItZ4ds6meUg$0Un^?v^bn36Z#vI0*>>>FY<6Pi{iC4qVf|=TX zuQ6tdPlcJ{Q{kQX)Xw{(F#5aD>D5O0={@z`go{n#==Ng`<55>okM35vUm=}WDYw4Y z`XU=wHIMX-;wrWP{KuGD$X3?a(}m({{y!L3e|I6c%DmaR`0%W_3Lk$hT%`?L(1xwy zlW}0zRQRzC{mh54ndfj{V>>T9ZDy;TZMzv>ZNS!EXWGmfBf1b=?O@^Rb_-Wa;a6c{ zgjGgiYhTJ&kFQOXMK~+|Z4PIXESz;&X}2Qn2{AfchmiHKQOU;a>KoWsrxjsOSI~O*6M9F_wSLxoHqcnsNzbl!JY#*U`K+I@th1h7<#>kQhWV_I zvCOS!y&TW*FEF2_8p~XImg0EU+j{0RmWAtCSI4uS*0XNLGDFX>F;y3{ocJV|X(k)X zV)U$&;~BmP=CjL;WwCme;CP0Qi21C8u`Eu{+Blx!FJeAxYbo?!>qKVw|Vd=_aeYoTYfpZ)B_tNx(*jCFjiOCOuNYrIjX z^?+vmZU}wXM&Dq&P2}2z{m_H3E!UAP#_}WBMW1jT-DxaePWTJOcN@#w@!rvG27Zo& ze>Rq1LwKv=TUm4OA^gx-{tLo;;F)9h7M&=yWx))Lo#iuGxdGGmX{@mU8gcMRQ z#rx9ap{L@T6o>ALmnlyBC_Ynh+EH;sY0}<`FC=~sWm(7ENg@1^ z$Gv1^?6)Z{ALp2x|1-45(-z(t>Gl=EdwS=u(cIg~y$K#)A-t-4FX-NG?j^Bz+%_}K z<>K+`KNBAt?$bN>>z%FqzlwK0B>ZE9Z!0#8_9$iViOn_l`0K`S9>65<^|ObE2lqUk ziJT1!b_Zj%My@!9H5aYD16=Km0hP#}sf8@7x!tWvWh3>J^}yfjD|S&XYS1yQ^k{W zpM>`gaKB|#%RqPf%VTKo1Er77eU$RXvv+&^>e+K!@lJdA>q~g)Ph8@w=eX|R>dU45 zBfbmY{Pa!n0Q1?Tq4)vLsZ%~bg~po{r!0z>DNcD6pQ$)?QoKNM>aDns_|{2=PxSfI z1eY(LdpZZ>kDc7UEzF6mrJt<6*Iwqa(=;#7dSLv9W>*|)Z`$YutTz$;vz+vYHllxk zdAwNYzX&{4nN0daAJP9Y?k@pfRc@31)ZP9@zWl0NK`HcvJ~JcZ0-|{=?>qqw?&OlM zek-mUpvTlVRln9IJrplcoOGv}^&?K+cD*(+2K#&FKi7iWjC)Y87K>MsMiw@-#ygVt zw>0aeF=5q9=MIUlmZ%)$FJ9<{4~X`C$+OBiqjbjHnLPJ#Uv;7XHM%x z4_^t@$=>&L!_~zpnnTsP#6sSgiLO*1zSQc&7tr^aVdZb1do=6BDRp&o9){uM8vScNLk{cHv~+Pb z`qz902G&1Awxx@!(ZA+13j@vifdkUT+Ar381`gJL2VO`QYmYzk8S+v83|x^eu15cw z&ye~0XULm$aW(qad}hhKnICc~U3}stf6#me|JHv8PfHh9qkr{GI$7tM>brl6E`Hge zi;36k;@>)SG4Xm`OuSwf6R+3B#Orl2@p@fMyj~X*uh+%IzoLt|cZM$J|5tP|_r9Ww zxpz)o%sc-`U2N-N>112~K4$Tw^saP`7as7Te*^HMt*dQa+!Osf6F#){uC0rEp?_z= zgSOtab#W^C_X+sV*1NVYz7qZWB)n(qU0WCTM*ltv-|5UWotq+DcIx7*@D)67UHq~` z7Zb17#n8B37Zb17#l-7%G4Xm`O#Caln0ptai*5Q#C)@h>F-txlLzYAz(Lad(4L~1T z@7lU}33}}b=p%YQf$kN3oVs`^c2Tps_+^JKCSI?L&H7oo*s9ls>SDWInba$idZbWJ zTPIIK_o|%UQfxNbwV*j&OquF+F>%rN{B?1|*~-HyLx6mm)x~B$7pIF)+_3^3?Lt?( z(b=BR91e3ZI_FvT?J(xod*F-Lvk!I#ZI}uj;%G0kKZ<;y`)g-mm)ZT%+1L?J(*C9# zV23=7F1(997T6)4Q8hA-vjTKv`JKPkJc68qm3##p87{r_cl+2NeQv~vdSosLcRTXTM; zx%OAviw7>I{nh4o(q2!&2Xp zG~52jKz;iYuW$d~I@+K3#kBwb7A$D0{f~}%&z!qGb-z1!3SVRKeGTK1hgnCHrZFbs zhww@ECM9*&yg^&q_HC~1oH?PjqmTc}H)kro&B&4Iqa3pTGe}c&cxy}^^RnJl^VyNi zVHrvI#4)d?xuPXp`$%U$b6T3O(|&iX@d?aiJ~PeLre*>1m}u=I5XZL(ymRG);q!W< zD-ODhz>VJcKo09tHWU9bXPHc)AH2;KU!!+-^ttK8!<6>``#`?oiVGO*_41S%8fBY` z!KdWVUN2wpJkiA-Ka3N9$Q<<>6Yng4YGUtl`9&rgapwL3I;+za7M6WJWj2`4b(UE^ zx))OBHI6d7?xS%;);X*Fs5b`5f%oWW& zojo&RT0!|NzGM7?Jr>x9mwk0|$xCIEZ-HI5V)7LB9L}jQF{inDN{%zBtM=}hMfe(Z zex3M1@{*s2*RD79GWRUSA3#1QX8pmVVIJe^2=M0v!wB5K`obaJh(K%b=V{I}ox}L$ zeD<7Lebp^2D>X)b4n4GX_7Joh_e5d6Ebo|^X6`q&+h8vn_&%BX=v~oHI3(IdQ2$pS zh%oyMUGBTxlMdY^=l8{*|3V?a%mRXpWe5mR_x?psu|L zMdKyM3%B4mGJCQsJfQP(U&BA=b$lo8;`(uzC!qEBo)A51C$r~JntUg)4L^rghwx$C z4ZS`xVgnyjo==P@*0)3kjvMiTI`&DZ!bdh4EUKW)RrrXRaW`>%zZGZf>Afoaz7;Q4 zoH;i0oiO6ev6=A*;>-b>?<>w6p!q&=m&ba?OT2{@k0Kszy{~vn>wV(M_WSrDC)@AG z5x3u0+1D}+^C$GUH+A~DHLt4uj zkG!-(M@1l`-C4^ipANMZbDZxbGq|EncNJQ>nAdHpY`K&(-7Yg_ zNVm$IOI&MFhI4si!{KY=jPIS_B;kW1x-CYZ&LGdKF!pTX{dDS#m5e#+Mia(_;wyk*j zjq|V-!M#Y?5wf=!gDK#9TNe$-E}V)lKq9tb5_oUhS=!?umNwtVUNrl`(HCbmVC{>u z(#p!P_hwr5p593VXSKh;EO>bwdwI%c8ke8CLRhFh-L zpPY4Hb)I$iooC%`+vCFP?rOH~X^y(fj$jVDwkLW+^{v6Lx+y!NkY`Ri!qlzkRPB@0 z*pAS;3hB`ze5^0Vj<^#$LiT=hc7%A)W%*4?&-K7|slleT`A2w>f&8cpW91779}K6h z_rbI3gXo;fCmBz8gX<}5j@LO)YbN`z%3cvq=v;X9EySB<`m9kM4h})1*^{XAh#Tow zkdA!xEIVSec(#ly9^D}S!fmQQ<0U#HBAYcvUFq+ZP}bEGCzyP=dScfVSF)aS8)b`v z$7?-(7I$?U_@Kruo{z1t6TVYibT1|z&f5E*!84m(oPA~58d0XLv1?9YSqc4ygXC6$%ruiO3uafb4@wu(ayilsrN!P7tcNzo21rkQ za_ase)a`oecpY^e%s5m=aL?uGlc9k*H>f?$w3lNFzBjB7eOhAz`PdrL0|nr?ZIAA! zKO?!-deUfQem}Tg6z(sZ9-ChF1oHOVwuNPDFY}if#}`aV<|@J$+fSHl#$9Dagr#QO zUABs_%8Yx;*gwdetCb$rTQf}nkBp^U)@~%2Xw~CrO+fa_;rMX7@fIW<5$lo#& zOj4ee=%pB){6@K_sott~v)4tZ*C_Kgd^ zWiWOUP9N5__{sPCA}hV{ z`kX_l*la{c_a2$En9pXt!su=LWEE%{2DtP@lCL z*&gU}SFV$8y5Fd~k8kF2<+H!r_OyuLt~+(6TX^uq<(}Y|_=lN$@ZILGdm-A5?bEjM z7}r2g+sfgbRXB&e2ogMP1LN6)1zfJ(h+o)a(XPOE&`VdccXWUD6%%du-WEESxNc{Q z;IS><;Gqv9gTjGod|-ZtpUlotMxExIJKTF``Sz8AgGW3G^zGWPPZj3@-S4lPAzzwD z@mY!v3sgUB)E$O)pC5`2UIstBaJ%7qe4NWSGu!Q}PE223+{NSTitfZ`X8Bf+v3#@J zSpNB;OzP#Y+q*j}_*isQ;5+CTrF*6P)TWzzThEr=slJT*FzhkE*&wd+_2-@O#81Qf zIw$Un0Q&|`_1E3SUUaV2#m7?${ZCgH;@@9kl&+bUm4^MT=wHhBGqVk!?iF7VO2gko8rP9VPnCx>2B;k5 zt?y@>-#4_g?#(pIUoXnOMRlSc^d**yAJ)2=_mO@AlKahdvkE)ecTZVFWNc+l6;&nT3Al{);-{v#+z#dwgx2FvW z?i}SuZmtg=>F%%lvL+)q_VX6~N6@LK&$$O0-O8mh80fTjm`f{UOsiMU+=(f9Ts+^FrU~{SVFeT|VunUYz4c$V<_!2ImU6FW4+ouN zp*=OcT*C78^uJ>$TaZipZm+@zdnobATxHO05^2eP(B5EK*!IKtj}1Kgg_dFFIbLeF z8IeZ+$H+r_5UrcGqn3PD-{r3HXZg!rlv8_G#6RSJTK&^8 z^vA|R^M|>wbhi_Kk$eV2!$kJ{OoFEAtan$wTCb)u6j^0hz`H6#EB?*6W{BbPl(Uvp4w$0?@Dj?fSxJjokm{R93vki9whG>yst6JMZ`lk-6-_y zt?1eBqHjl{cW=Sp>*gSO*tF?}-eMdyYHv8@#eVb_d(rL2sbSnpI&#A)W1#C)Vv^_M zq&nh1c8B7ph?j+(iZrI*9LYXyk@vcnNA3w*9%&Tc9GM%rTsl6{NUVuPZpIL9q>nL% zzD6?m9L}1@rQJy^?w6OzT4bt87*9hRU{|Hfj95p4(;H?z!WCR7Ul~)tA?ph3F)^Fv-9MqujC& zGWkD@a`(U%WjyPY%9oVHl{L2WaBMF5`=wj;wbN1>N-JQcrF42WO2*^?)c*J)nC-uxu?dx>ls^4#aKLT@`Zs^rNk7SM3yooipB!xR$9CpLN*GJ;5>v6X zYnzIt;)C&AC0q-+G7gW=E;;-o!U@^w4&E5Qpk%Jb5tEGRbta!+kKc~{sI%MFLc`sh zLA@h}c{?k;>%zQKuD{MZ<#xh56ylp?B;C<@jq&hY#_~5CNw+)VKXSw;nDO03#^$r7 z7fR#&)1%It!@d5$#ZrIqj%eWIN7YaDN~XPgBje4{G8Vb0=WBSpxwO8i`zGI`hs5{j z=4SJ=%f6kxJ;nRmJ&9(hIi^JMz9jjzO?en+%0piIjJfp1 zW=k$K7ga_3%dX!}-%I(W@SPpE^DMCP>}lnx(9W}QezwfWf7zLj+T$C~2f1oopR?s7 zJaPHK3}mm2HW&6zRtR0wO`Nl2i}vZ#%b%63zN;k%wp>YWGCo4KKKfC%@zFT03E671 zVvP^{@veu&n;T7BvoJR$gvZ4e{)iuKzT0o{fyuwTSC55fi7T&M@)93u{kQWQvm91F zs^dm;?sRPDBEtFljyi55OeS=JBWb*Y?i`uhYg^VE)c30RRXI0t{WsUsTyON6pEcGf zu3Nx`ZmO*yUzcPL*~vw3ya)z&@%j^Aq#vy3x#SJM_7ESHP*35B3w@@1q&sp;ejD5H z&Cg;l^uO|u?r>SUqw)8R%ODz*VK=IMHP$E^q!Zfo-%dLvg;fk1%NRj2VJ@LbU+N+H zZbTnTKaQn-x?k$3pW0D%w9``kda#B%nSL^B#_Gnizv6}dR~}B?*!X+9Y&MO>&k;61 zTlG}is-9Jjda8}PEZvqhfh*%L6Y$6Q(UcJHj?Wf9kF|7xu)ySDVH9=mLfyx6`HjJe z%G=4q&bo__yMV>xXs(|Y1<>4$vp3})SngcmvK{C{AK)<3B<41 zFd@79h96~Lv2=b`JaXC!IlXGbj_f`gc4ptjy(Dm0cr5u)y@XNPZzQ8rmnj+7-V--LfQ_6q+#!$!09+kW!W`@(&FQ)(Ee77`z4-OJ$Ki`KpI*1hrE zd&#;t!Maz%y_cA!>d?ULcm3D3}UA>p=pp8HF_&X^{V>?$+Mz2EnWH>G$JeWV!T>|#yXv- z0P{iSJDJc*c@{%YBl65V2g6Is(?w|KnH1*Dw)0h<8K#dwh=()q5wP=5r+#Msr0dL| zd4^Mlm;OIv7j`*vdDlld@QZshku#gH&rQ2tb=MiwDtCZ7WN}3qQI)=!NcqK>GQs#L z>1doKMspfw|GFJ%EpUg@swRJ%j?JaTd3C2G7aqd#lr00D>~55<_)cRq!d7Q`CVlW5 z9Rr;znES;?cZ<*P;&6!~13=b7RU#GJRuGh=AeV#dV(FJmN)%aKeTDn}JQR?U@T5#Kt?;ie2K z%jHd!Wx_L>`yR*K_qZvq{2%54(GR=1NE4Vk5D`ms&*?#W(Hr$cDi2FUb zm&MgV|^srQE-gdpB}jwPA|+exD6@X1~Gxe{}lHh1OQVlQHrBzs)?A5vrHk z`oV2kgSjrJUN1d3KdUS8>$$pNcP!+;`5o;M1CH*Oz5K|w*ujtZV=s@%NF4mg{8&8~ zW(d3K+t-$|r_vS3yxQNUt4-4w`hB9O`bB4^Gd=b~|C{AP4>nz{>0d1uxF9`U-Q5rG zq{BNI@Xla(=Q?=jdU$6DymJG*GZfw#2Je`>W{&kc8i{d-cM0~dmUQcJ7 zw~D^n4z8VCySPGh;;d(j?^aM!?vz_o$*v3iOsXV($&o}-! z^nBxghn{bonjN<3C)r>3dy~%4E}3zSWX3p>8QXC3P?Cd((vb<9e~PhL^=&e|k<1u~ z+DZ0GsGYaPriTqq)bnDV@4!B_`N!=|Pux*5JvJF^yc|B#c!B8DK${Q`nf*(X7uT9; zh0;BHT>0f%yxdTh&^Ko*iwisALixtF2~V%_;0NQKd=fl5aYqz$3DL|c#4xuI%iMh& zzA&EIN5Gn+%*~%<4rfPePjDx5_FjBo#MeIt&Jas>_2tJ*p^xab_|?O^_HC+T%X58G)S`Op*8nzPJ~oX=sjsocx_ z;HbL7Ekj)$=dm8Sa?hpyx|CSv-3J(TL#yuH!aV5gYWxzu&$&1$u^j`Ew6!m`O{I^y zrJ>1@^Q!wAjGf@0;jGUy>2-Tphuw|&)jd7^b=?VllwpqtKL_ZbJ@mIT-&)1h;1`s@ zSx;>iUtyK?L~noH=)ugz58!=I2J5IJ%`%17niD7A!_2vk;T#+Gu^Bld!>C2))Op=S zH_!a#jJfTMj+Hvw^dsW89eQw!_S|^);SR^azmaE@_4)L=BhX-2VcU6osrxOl?0N4f z^OuJP2lq@hg4@kA>zw6nO?lgL{@bNynfI{=k7#^G+R!2R@b{1Z$P*cOm(ZlaS<+J7 zcayi$*+>0Pkmfepswa5uv|R$V_|)i}BFcMU&Go^u*p`7WsKXZz#?32>jSD<+qHxOr z=vJ$>68Kb9aYfTc)3M=)v#))h419Fc@ps@{7THT)u6WM!7=wSS_V4z@vo?PAPXYYTMN`U&>(J?d%^_$zx;wRN`$ynCxZ*vS(Xc;tSA zuytVGD1{M$NA5I&;hg{Sn~_FvJnQgAdvtbaOyK<}f8B2Esi~Bygs{+vs(Bcl;o;o~ zzH3byVWiiNw0Dz_=%~7c&IWT)2E(kI^M7B;uQq)B9th)9Q>Rl9OoEqycy0$E~ zeZU}}DichJY-EY>%}shiH~J`&S9@HX|C{=MQwtMj@Y|0&nhD9!_GMfP7<)$BBhyv1rTo)&Mu*x|_VB6uv%!@S;7b-blL_7oXU=jM zzGy@7MYGP)@`Qm$k>F7j&!c%ShWBIn4lKIAW$+a536G9}M~A?pUC6>wY@!{PhA`<% zFzEzz36igHDFy#)6PI3%2!0Mv373+;&D{?Xd6$yj%Ch16vZ{(LJ=I ze7}ULb?|#i%XWdz+)v@EW?!pncN;} zWIAh*(u}%DFd}Z*V{`L3|1*($ZHOnLcY0!HO3n_vIye~RDY|)mx2So)L(VeEYbG*v zkh1K@cVj#2cE3Q*l&2SY(wub}=fiCxJcvAO$DcyJ;v6W!exC{K+aDb$c6%!?N1h6> zQ^R7~1;P{j!AIGL&>hz%5Z=)U4(Vis)<7d~!o>0Tdre@DOlK5*hMYw(ZuLPzdhl2H zpWL1854Lx^1G_uAf?N52B-`M>C$RfgBUr*b+bbK=f*J7mLj1tSbBRB7){H?K;{BKT zCY}3UxKkn`nxD9L+jt@uYjFuiF-*Dn7$X*@#p$?v} zzC(E4wvqnn2%fjA&o3#2BPLl#!Ze@-|dyX80-A^+WP zLBr9cBOVz`xt4=xL+QhrHi=0i*(71eZHIdKKT13ixt0IxA}~hh%W+26mJZO~{PwZ* zx+1s6E%G`y_5t1rU1X2gw2@8`eMn=o&LWzM9#YyH&;cR5JiFiSX;1TK`<%kJ_XKs9 zji$QFPV+L~P~Fm~>)`eOT;n({gI#a)h+yWW8>gY=mCb*R$*gTL-+Zfb~UT8!8PY-|3jGP*m5P1IN={Kv8 zhvB5jH|hzl#))-L6BgRQ=s*sN`Wz=4*p>{jR zIWiYGMo?rFF@|1L*UGs4f7IX5I;Fj|>u~zXjG@$Se4c)sV~$`SygrMy39oT|$n_c5 zL3keEmgaSVeKlG4kUgp@k#wECf&?CxAM-EYbz#==Kc@4AJMsD=~V9D z&ws=HNL?ZK?%^Gy%OiD9#c#-&6TdO%x%f>vFT{VCqkNZfZ#n;!|64qJkNe9158VHR z|6{9)>i)#@zjFT&_l|K-W%`nPr@0p&PnqM_n`KLgr=0Pjvi0HqK>jP=emuXH`zq(n z+`pCoD%)tD|B(ArxpxouRQCJ1SIE8pl=a_-uELnf?Hc&~KRodt{{4620rXn!q`g{u z8JY+FcPjgT%KGmuYqtKib+7cTt$Q2mU0dgVfc<6jYLlUhu~yE){*PVpAG_lJmA!T` ze68~<4>HzY%XLY{*v^Lt4|BC8-k)m}*Kz)LAp92BZCoe$e;MI-xW;e|l|KS~mL}!G z!uNW)uxwY;-X<{i*T^>!=vtZlbSR%N@|Hmht{=@OUvm+qoEqf1ep z`W8Lhcl^)vbmFY#G%opFPBIs370PB78?&`eGK2r)xu(a4?!CynFSqrVm2ht%*UN1U zbG`p7to>Ze|J_`#To$^wns-+z-PWYbwTg5%k?v}x%m49Q%)FZScJS^lrOUmAT)Rm3 z0PlXTbosxV>vPi8J^3V5%w;cj`ZQ^g(6kS0=3Ni{Z6jk{YNJ-r`f4t<%{5$VlR;c+ zi~9Zz{hKTK8(a=Ff_s&Ug?syf$HdPMZYv8P?|{aP=eJiF@ith|U-K zjoR!&zxhu->r7r35AWkVGEi_aXR`u}ag%f*-|~;>k1kq?vcl;J;?xIiGa?mC9na zCvzI&i;ax+sE+F2pHCU}?FO3{9t`oqj0Y_~NQV!&Cq7upJ(CBxw@&Y={kdnhKlk?O zJ+(de%(mxV8um^q=V;mO?Zls1jFUUtR&j&z!}u=7Y`fg?tybk+&87Be?pbs9&;AYc z`6uNi`exzZp|+8~joCJ5$y@Jhy}2oy_;W0J#6LpvIpYz7xG`cnay2uVc`3p&!VSE$ zk9X8Y8+d0Q@0e}EGqW8CZ@_P=#Aspqe(k&cG-HGK*dUXW^YJs1OnzJY8-&Kr7B(?v zw$U0xaM}QUk=w>LfU|v5<=fLpj%~e?2d@;Q8?)UGJtKW0T`s*X8cF9!*WAi=57*cE z*5m~%uYY#lU#Hw(=iC36brugVWxhr{KMOsjvW#^V*L|J(m~vs^R8guw%eL2S8?7kS z!YszGU*@@OquF-Z%RDz_hUc=OmhNe=p+;ODvY}?-57gL(a`JR6^97B0+WFt+ExVr5 zPvUEvmrdPc@qTmfh*sZ3I^RSZ-&9%DcD)?>$>xXiDWkrv*H5xNz`66aRcza11h%nk zkBzinQ@Us}x~RS^jckH5Xwc|cbKjiN&gc2fH=U2@HVM6B>KkkUo93qNh#qRL>}Ffp z`bTHRF5R_CFsOXA%DQ}<#Q&AKwAbLC&(c#djT6S`A4(^&sHX+2BpSZp1~JY4~L zzp;K*z3O%Bzreeq@xNP{)#e-U-Ig5fhc7O)4knMCryi4@nuH#;`}+H}&nkWWSlZgw zQ%)Y(--JzlhJLd6!lt>cpG1d_s{l?DRUR`-Noc(%Amu(K3Cp)zuvxXMxWSv zW3u)mL~oQa&)+1r*>!Yae0?1p_s*b|<9<``o>BgDyn7+(ntW$roBD>zM?7p|TNCv# z>1tzJ6M8_HFaL+ebScBvvc6t6R{Dn~Hg)fe@}J|~roM0L-Wl{eN1CeRQfMMNjIjD< z&3%6cJ)oDj%kab*tbduF9_o+b|L}(k^+lE+kKy=T=o*Ug_(WWZzrr^8HJ~#-V;{Iz z3{T(#u4=AGmxp~-JnVbq2~6Ue&K1l1D|vr1{qhB26-(pUE20g1MO;HTh;TJ~RNTmV zi;>qQOqoXb0O2n7teDO7XL!Dx@IAta%PN*`=K04wuOmD~n9{9c=_TRbDebQJPWcw$ zcL;~4RxG_aJYmYM*C$N5kMJQvd?c1W>n@{x0E4!Y8h+Sh|JhTY3H^;c3FBdKp>T zlWvX`K4*n5SmD2W`H_{{szawo#^RTxHjzGAz?z*3E`0A#dLHqg?8-7auxM5 zVoHke)3ZXQA>VoS{Hb-JE7By>!YzX_VdS$hPq>{tfPE$!dTG-r+O!wpm4xDfp|t66 z+H?}(6hiU9V>}P=yo~U5Lh-%gGKj8sF&geATY^D{yaUFg#5#A{shrnNi-y!@% zR>jiqMI=laJp}$De2DPy$cm-U^L!r9-y>W}*zW(USo$f?|IG7Kgr^A`^MFZ1@ZY9k zY7;cHX{L9z?jcQUX3&*sMxnWO)TYsDWTsv|@SzfIUW68+PbBs2LD-Wpi*N+t6v8|I zfA-!yKC1fO|35R6WwJ~XWM3pfXbA!WUzKQuG6`72CRAy0yB82!17WIu)oU$Fg4ioC zP>tAYFY$Z9YOgtwsoY2HZTo8Ck&Bjx7*g< z{PB5w&N*|==X{p;@_H|yGt@hnn->RFZduJ7tYHqysh_3ZO}&TuIQ9AF#_BtJ7@WRq zACI&C#c@VJxtP}as?P>83R_xzb^2)CZ}+h{ZsnE-Qde$q?swV@*=-iH_fx+;Nn3xa ztol>rQ*UI{7`_=hmVJHW&J;a*VtMbn3AN$V!UdX0RZoXS} zp4fzN%sY&|E7`Uly8a>MI5}K9T;7sPX{(5~^j!!C1643rqU{QN0SEB|h^F6R-JZV0 znDJ9Ld3VUKWaL$ueLe~~bqR85i+w)(YGm2W$}L;%^LxHlx#eG=(dX^+AAF;7%f>+E zmY>__zq+?_%deMLZuymczL#&`^PTromF(LQe~xB7)u$u$sh&QapignVE4O^{!pbej z?DKs3L>z2OgMGe^KK+k=b%9I>A%9DjrIuasvf!|dzIFRVmnWg=$+7cjSsL%h^^Z7%9q zi@1)p2*6X$TAZ)`bJwS1eu?v0^BbL)Pn}=QpZKV&`O`dEzjrZDncBxJzYy)L2m3hl za0FQrrA2qg_{G2H8@F@K8NWl94xNfVov~>zrp6||*8Q=xW6?s)Sd0WCq&S|n*1B$w zAuGWMXg^4%WD5MDKC7=CJj1*>eTuD}{#6jmJh&1cwEiQ;rfEyA(y#S)zv{V1@gMcc zzBYl_>qNzE)F0~_{b5f+o4z6Vr^=)l_+W-)S8j-QZvYN3ZEQ&A;|`YcHwv+h}yZ=6*CZ>5NPItz$gr zp~cP6Q)gW#c_}O~6+O0?x{%t@@p`YYhtuCZo%$O~J34my@8~w`d0V%IY~AMcZ}fTE zQvamq^qllq$9thUr+w);=l8z^P5rs_m4W^niS8SP?kilLQM!&&h~8UAi6<_T4t=$= z*68LA+2LS!$l^^GqdGmR$ zkmtIRdBvlRRK_v)n3k3XXAFUIq%y0(b=asn!I=Uitc^qJNq;Emh-($OZOM@UFSJQ z&zjXAM|0j&CU@tib?3Z)zV{L+=iP}{ca{NGz6iM@+ON^QC!%|=$=^E-$miId|K7d- z=x?#F>rcrO?#_Sj-dFeczF+ZAq3gdy{`=}=Z1NaiWM^!m^S?&^yOZ;7JW5_V1)D0G z?@k;mkMo6;*nIZ~IsXyG$#wrB_r1vZyOh{mcVh$XQ7UN9%5ztHoG+xr=D9z}`Hv`0 zj{6a{$N9SyC&&G6`GaEe^kvTv*?2Y%K69kvviQL8KRR*nPs@3CY#rG>9rliVI>f-7 zT+`T?_^0K(ci;c%djkI0`4Prx#-A+yXu*E}Q=Zp8bj3z3asONB0wr5g#HJAsXwPuYNDzDmkwmExN+@)-jHl z7{2lVom};Q^xv&@CFf7;8i-xjF6P6!KV}`3bLOm}a?hRhQ_i`wcDgQqkiOHSe1!F# z^7|aVzn5<~e1iA!_ZoJ7fU_1(zWL|nx;1C`doA6<9Cy`k-8Twfmv}^TDn4=Y$u+Om zd~$6!`Quy2AD6$2I918lti18wR^IsTC1>--XWkag8*dqJ=di^k1kdMl^0C)ZyyNT} z>ndlw!zTq#o#T^|UsT`fLpNIb@yF49NlyM?=lnJ&x6R47{bO^-yPu2Buag({m!40H zUisFTMX&g=9i98fuBS!MwyY36J6OiSFZiivIJm{ZD$4&}hi_ZhLU>|E_$;1SpB2Ru zK3mqu?xC}MrZ!zlo^+NYA!NNnpUya(aXIto(A7Ws@7DUp%KXpBb+_)1k@;Qe?#{Z| z$G88an|+<-y|YfXZaxR=A@4ipzmpF7yj-{DD@NXT)o+VGYPzMIIqT`<)pe1}{kOW$njhj)k}p1Cj`Q?a_c%G*opqt3_hR$3TSlGD(>|Y$ z3)%Y_V|Cm)aTc&gXPu_}T}Nib){fk8@*2C+Z{7EuUr$-<&v!Gm{vqKZ2QS$9tzFHh zp4(!}q4VjISiPZrO-??x&(6noaHsT!wf6R!pHE+m{*)YTXWgBCJNo1Ea^0H$So!4S z72E6Jj7Rr5cr?~#`-Ggg^RH!3b;u}ZZqFyn`;r)$<6#mcDj^jG&dGRl$P zu`+w+`R3z%Hcsq(I5NA7T5;_vdCK6a5& z`+YlC`1^LQ@cH8M3u5B(K73`0%eVZ2art|Qw_3JNcP#!7`re)sr;POv{Uvi1EL;5V zBo=Sc;04g29dnTnH8wsUvg7mXyNJ)4{o=q3?VbMZNZ03)N z$6ITc%-Y#Lx(@ka$LaTUird^nthr;Yy5sWJciy^+%Ug4sz}(vYzK*%=6tABV8?VQ= zG*92L$ocem{mefwUfW-#?xpYN=%CHp(;?Dclser9L+AFHdo;UtF230Vq!t(=uyRf9C~+RDgS`+`cLRXXMWI!v2py-qdz^4ukV$1&Q((^H&)lp z>>_UO#PA(l?!?l%iZ^%P_t%f#e?qU=ab!oYjEaft@3FpB&9@9}IpxIlWA#Z3a>`kE zr*_ueSzjmK{du`=>4!fve*XzQ;LLMOZuK5`t@C%)&au~GQ9X0 ze?q1^bL7}Zapa1LbpYO22JH_ui@2OuI6T1)Dd+PUe$L>2~I)BO7 zdPn>@Cid``Y~S)n{2j;coqhEIJ8mE7DsJ!C&)xgzt^c&M)-^U}!+#c7|IrxZ1B=aL zuYqt)E%?`?3lggwNuRA*Xe(&h*nRe`cbZ0#fBU|(h?T$6*q@O-7*R45=je$9H+-W?zFLn=W=lnSB z*X)>6N1xifAbq+gHf|p~hyR=6_L38hPPFGn-|{&pZXdJ%zO()dbg}n7HfAQ=;^;X? zZa8touJl{?eSf3)yz&|yylPAKfw4Y`xweAr4>C`M=Gy z&;EZRKJV+|8~B}dh3}i)-@n)Y{$0-Z&nITo%=*^E$|d@~*Vj4M|M1GAIPeRur6JAU82|K7=?=$6ZWo?PyZ-{1Zp5Ae--=R1GJ ze}~_{?;NMkjfvB@TpZo=w*C){)8A|F-S7O}+U{@P8zu7p{3T=Xe{783@>Bes#Oxja zPfOqE-i*;*#OLp|{UQ!s$)D7*cN?Ek#~j4?fnwv+|LDJ4Yx-A;*UKjr=oF7}*5$n4 z@a>M%e`=h*b3A@V;OsZ=0}G>j=zX6Oj}QET@%T^s-u>wA_$DQIc4*gp9$K+}` zaTJodk96t{8ov+^^wcd{iZ8lTbs(pcY|cGk((&#~*&9hZ;Jr?aPCdNVe@)j{KS zEc%?e`(F}|?;Lk;As@Ke6JP!#BO%y1meRt1vYcLMm0jrr>%YM5|LTd=3(m2#?#}xDZ;Zb?a=gWkv(Jo) zvvTPN>-cc@{$c5?at zNvX!E`k===oNh$qbGo=SZ-0Yf1RGL}NXYGvcr#WEPBV;NWhD>%B58R_Y143jWSTE2 z*hITY%X^kSmd0^Ut|3?IGyM_g9ec8kIp+EYevut-)OuaU+D!8Nv-{_Lr>UnAxtJWo z(<%N)W|nbkdRzOP22av-?{fbJ-RG?wQIpJjo?Uh0neX_LgW;k6NNJik*pTRtq|%R; zk^V^Yq_3DwTrXIjTpIQym*14`ty!9$8a(YuFK@VtKGT==RU4}&N$U@l@Uqd zf2hjL+(;Jhd6Ka-t}sd`rzZs)SGcS(O<_#Ng=N>i?HO49j;Ck2;hJ=)n)#AU^R4tp z%=LM{7|F9I+$l8$jHx>HOirp%{0jOr5~97P<-XG6%*`(Pd?Lq)9A^yIrl$qdxjxI6 z9@Loj@mrH8rKagpBXTOyS7R*qKiUM{Pa9&?=JI>qtn6UHspb2djN(TpySz2WJl>kO zJw~~gac8*Wf+u`Ef@N_=?YelQWSe1}dfVeIUyl!BTfA{s3jYI?K!3{LU-4*fBeiBV z?d3BseO#_!L0oF-T*DhQzVnUN`_totWgo71^kc>uPrEP186|Id`UUF@U%0-tea_xx zJxgDr4?_xXZ*_SW3_ko_qjgB(@YZe2HMV$-VgY)++jSk@wcYw_b7U;O+El zuQeBGVHe}y!@bSS@BP%P^Nf;)zD7iJc9l_lo#ybgXR;}J5)Cc=ebIi=XOSlM|*}Z&V=}hQ5mFuH?slg=9 zL;5W-r6wI3s&=Oby{>^Z4bWW!^yGz}?ke(@TD;&&51Nc2+XoLpcLnb)-`@y*jex!y z#~6_#&{#uXc*fT|cntp0JcV5D;5uJga1FEuUzcox){c3S%6%U)Pd2T^)8ArftuezC5wf;&&Ic(m2i zzx*icoWT2j1Pv}U`URhW4!0R8VFNnc3LPdemfXT`T6DM)I?OHnV(TX8FbIv*^^DSC z2K^XB8~^Ff2>xpt7}m%LK9ZIJEha#Vvn^WGJ)*^T;H4*B8Ns0$XK8T(_kZCBMIE#_ z^pQ!Sj0cR87j0TR$++vF5z*oy=t^z5=+_SyEofa;xS)d;A1EBsnn7;jCL_b5#TxEC z?MW+_O#BANlBH!OdA~@CH@-RoIntEwkBo37ubqTk^dc7*>wk=()3&A8BFASU&u1Xl zrz78|nXTkoH4e=WAos0&tAWd1*Ee`rGZ%6ic`6w#*;r7NQrZaJj9{%Bk4hurjS z47}3y~Fs%e3DX8tV6;vHX_FZd%@TCfG)30;{~n&9>YAL5?T=<4~40gioxZ}U$c{A1YsGtU?p&SHIM!aq+TGb0%{;6C%wDn4xf7+pM$#EZj5&s;e z-BoFx;CjOsTu&eB3{Uun#WOAb(LLgyqsWIlUA~~_g0uXS!Ts?Y&+?Dw(Mh2YevE}S z|Fn1pm$w^(JNQR!ZKYqgF1i!`xif};#V zLsIz@=*gSXlY<|^ucs4?Q+wVPb~u+7i{aUwV1~~0c$O9ibkL%BU<*7H|K_{7&d{1hRb`zZKh9 zL9%=ZbSOD~)RR%3$aqg6zxVq32Za-gpu3}tu?3tE$9oq;I|=S6K3L7T6QR@WDXFD> zzy}4;SpA}c*0~;Uu$ucmhS!7<9<(vSCeapnZ9TO0Bi8Wn;%`{ARS9h!Ui^jDMK(sL zi=i!HgsVOA!9CE)%yj6=9Unf@iLTxTBYXf|JrzS&2R-n?fU_9ksS6B?u71=(SKjhE z<19uHU8zq;7JUPZ@Qn_-+Ftkti>`{mHx5P+1}lcH+9$_>qm#kWDahGWOT?}1C@A31DKl1Oqr}3#NX8v+->FI~RVlGVe2JcQw4xV-=OYWR%;lI_S z3yy+e+M(@3^#2exKoA*Q4V@+-XASttMPHA2`dXOfDDp^czYPw4$J47k3oLvZY@z!b zphYizNQSl>GL1;4@Bs2Ah4~j7%U^$hHQs?+9y1{gLNKXxER}`s;F1#thOlE z%_ih-KA1)JnPjFH{ViKAAIy@{*goe*u!z>EVa(NLHu~}`4fYS_c=`rKN7-PHNnno% z`YP})*n@T^+1NugdHUf5Q#!u@T3i6_)T1XIT6Abo_L$mMd+@_n*=6Df>3L!2FmzeS zv)M&`OP9a{4Xo{Y>eV*AWuvo2ZwBY9MQ^JT&4;14yP!Ap@uQQWwP7~B)!X#84tkL6 zlm1>v%{jn-Q&>PVPRMF{k!NCPYk_v#>xh?HiRPEP4rgp8Pq{0eas%?XV1*ZXjJ{jZtF#e$Y~f|zRq!zw8G3vVIvp*)1n(|#M=`P^ zpCj+(nP++;+n`~|=V?*-Y|*0^dQ3((p1D2GY^7b%>UYH(yfYj5+%Mf5d;&cC3uxd< z4>|xklZ>88EVyy95fP6dpSPY*J|AZNkH8nQDUW*kl}F|Cpbq)`3uLO~vt)CUWb>q` zY#xbh#;$l}3-4Twzov;kO+yZ^Mkl4vp91j*d@&k%{A2M3wU;`b-^XGT+z*dDzR)PS z2;N9yEQRQ4$qQ_q+AFc+Ps0nDMpF0;Sgitn@bIpI_!lIvUl?MPisnywvdTpZTK8u9 z?&ZA+*roHKjp5ja|K@Q8&ww{>0>gd`{WPG5MCal;uTAGgMo$aZE`ZL*(N^W+yJB$d zxK{N+bbbmym$0qmY7gkUB)vy)h>;XrnGT&BJ;1cNAL*X6bnY!b#k?NFrWpF@S-IMo z&c{6vrSk`EIzI(AYB%~>a&-u}?AG(p`8e#sNv<9pxI=5!1mAZhPeXr=)= zjnYhCi)JG3{+2v#fKDyB37xJ-28$NkZ8{YVNtQY?QZ(sBuGTZBooVQ8=34q)vQ>SP zZQHP-f2r1I1h(TEFiSGD@QY`!FvE;-3CE+6qao^+75KZLfpq8tn^;i^-6FqNu&AWjAFIW!D4BJ$gchBc0GCwh*>td=~at*3eK+t9dtx5BH= zv(hInWa7okkcr^n6viqaR?Y9*=LoOLZfN`^*aeKb7rB^?ZRJGx4ryf$kJ|k8Q}}UVmQnJ=Q2fpAs8wSSf62%8Zyw{a zTzK*@*kY6072b)T;jj@s*Y9;6SJ>gNcRU{ST2lFw$b(p1`$7z^t+H|Li$?$85L^EK z=c*gdWV$je{#vjip;Yqy$o=26_-m-)ZM(kDxQY4^584zB-lG!@>cg$-8);&h`5?uz^F9N1)MD z;2HVDV==+iF*G?DniM9;gC-qJa0Cp|ZVa$!@~#!`(ps=b-u<^mX_EM*MU!r$P54QC zKgs3p@O3%3*TK3?iIOen^ACR&eJ~rnFbn-~EqY=mcy|VPcX|N*(cvGqWIS@zk?|Jp z#Xgf4O5WaWk~Nj&^RsKidF*NEgU=A(<_H zIB=u!$Yb~|n$Z_Qcw=>p@51rHo#l_HFHU;k2=wpRhtSbvT-ajyvUOgUQqSO#!Op=yWz(W{1QUmhTywlv=ds9WXbS&{I8PXKI*a9gdt@3 ze;D<|QV!ERsjX?SIiyDxs~erMhXv$P;rgrP-i z!fTJoW|gf-A7`h#gPXxyuEkMXaV7lXTAYQgIMCu3@!EgDFOL}R;Oz7a_$9-V;bZM{ z{l1NB4!_iUeE32<<-bCPdtxwjd}=uE?s@uFeHK&z1{tLd-&FjKK3n@8rG@1y@mL$BhcXPM7w;BV>qBLalCCKJ)wN^k|im5({oEV#$ z5p?0#l3bW))0)Sfk~{@k`w>{=9q8;`bhqg21kb#STz-+#?&@R7<>~3^!3^}{0(||7 zCA*-r1&hYEsy~v;A44~*ptBlm!Rhe*B`5#SqYF|=bl^Ncu)&I(ryF0DyTK`yVb=xibRC6ddxx{NmY@d}x*BG2Fs zx!mrF2b252prRDe*@~MN==x1xK zQ=!j?(ajC4ix)qJd}#GoA)kfW(~U?2{W7%GBgmdAf=asC*`h!ziY1uIhV6{CN?6LgjCi^mJ@XEj zT5Go#zB$1>R^v+)pXPyk>WI<2BOFL++S#UtIy_6;B_Ar+0#@=A{$wK_}c}xRP^;v!5nT_#@9;@RiZ)UF7JG;7|E% z+Tk@}r>D{qgICi31-89>SUifoT!+q8oc(>nxGdA<2~TC6CX?&&6j&=0pXYYuSbzGs z5P!i@u5G{n{wP1$*qhj5o9x?#Mjyr(wQ=jg-122d;K!rb)5<|v80%Y%v8jh*Q_GKi zrDss^tqz;oRXz{fp#mLtgtkO?T5pFA^Bq66=-QRkQ~^f6ae3|r*&~FAby%qXB2_I&V zQ*s#k{UP*w6ko7tw~$<-b;KqM?7WZ+{K(PxB7S{nSH36FE_U!McR{-^x{_9rHG{`@CT!zsB>TUHM%^yYj;? z1@nt`<);ztZcd8}UX9Ln6-48Z7LFE`7#8g+7BnASP|vkn zZC&88X&2ki(gg;zTSV+yaYKi8MYHwz#NURWWBuZt{Xz1JzXk0oUMYXj$N07C@E1C? zYoN2+mR^q@xek5uRrJbi^vf);{Iy{DnE~{Uh2_VckLl(#^bVboTMMBq3;0lq#7GQ`d|z+u$Q^)tP_6y z+!dyDf)9N#8(omSd~j(Qd?UG{EWcsC*fO1&*YKXAQ`lX!bC)3x%S_vnRF z*s{WpC%_rvrEF|y#WRdRpTtkX)O*3X!fctu z{)*wJvFS#zH+0}ye6NMqqISXMt&^BT>4LY>1d$M`|#83`Tyw8qv0 z4y`DDB;GjjOTUTiwBnadmR?AmuT9;N>N&$3T<074b9&y zISNgSo?nHYo1m>lGcWF%p5cx6p=Ifg50K@Ke#y6N=ef2mF8Uo{>6Z_tV~-Lqorf&_ z2wG^VkLnllsjOVBZLEc8*`Z0tzpt@OElTR3>1dvm=(=~5u95w3v;K~b5sfv>iqfx_ zwUZ2So>xvv6TeMEZ;kMgvzj=dW+e10TSxfqIBO++u+G&hsNY`}wt|L3oY#SCkK;EH z{b~%-2eLc%VQ<`pKB-{Nu!_jZU(-W~N$ytSdg}3fqky!fE)ZW1d?vLh}K4sJI53xaooqp;D%eWHH(Fcl; zegG|=LFRg5;sc%Ox8JHL{W@{dcZs!5a>sShuP{myv?n?_j9g#$vGP+AYg_N=i(cr5 ze&~;$7=XSQh}|*BoRr1fU_Z!DA^&@DnlWc&>*W2bmw8GhkMmRGEE&9-etVa54a~mU zYab)eSEt&?*e$Cw?Bl*1_cBV>^s~=6Gs*TkflU#1(OwsSWt~DIHIQIW8f#YNFiCQ%Op7aIi7wy%g{mbCK^tkIiFE;;HYTPNFWN_EA4f-Gh1)b#Llt zsrys^g!)42pCOY*`_1rNBQlcb!iCg47hX!ubKxL0&xK2=c`p16HP3~gGa~=t@kfdi z{cS<|5xyQ8VNSxgQ!_{5`>2_-@I%zhVfb-seaucZ6iE_=xzxb*Sdz<@e`pkGyP;DWyXwzlK`KP^(e z)IsV8sqdp+O}&u%i5_Odv(%e8FQtB(`XTBv>bt35>unx*p5vczTtWQ|^$zOCsjI11 zQ9szMi>33B?DQf{JLN$E#9O}ywu!&~wo zJa;!ZM6x%;KBY}Ye7KOk!wu6}WAY%fq2G0^lW3`sTC_2f+FAQcICj=Qo#R*w$4OPuAo>=>u5z(AgtxF44-B~GM?AGOhZ>luK5!(awNO_K5vfkM&kH2@cGdVUorLD9%OgwYwdG}F>f1~ zj}?>|l+lzl$_eH~ZIWYsYAEe#UT>vbL%EDHfYO%WZ`1Ejg1hlwU)Ps{y?xyPNRvcf7@m5zRYdzoxptBZBJ(Z<~eigjSsvuJ0bAKJ&A!A4R=YQWYajm zIsNin^OA{vbL`c*W-kZ%c17N)b=o zM6LL{ctd*jG+0qQ@*+A&Yh=8aK0be3W!cEfE6ct|e>J{4s5R%asWtE8s5SR)YR&&) zjfFL+9%V46E_2@$x4Ave!yKPGh9@S|4~qErCh99G;^ThQr?H*I&zkG{>csKV10yJF zd58F@Gfg1Z%w6z>vyLwsu97RECDj^-XzPBraar9m!?f1j3qL&K3e>*o4*X`2-s39S z=rS&!%RN=8M#!d($O{$jz(M=>AMktQj`qtkSo=3VZtrNTF3V-UIKv%y?M@H0;VS9- zar@=3@w?MbU6$K?<%Z~QjoZ1t&InZ*#S^V}^n-Wo_A}`3f)sz-6v{YCU&`;3p=EL% zpMutH zeAtI`e44qOkKo?O0DqfemTE_B=1|mz_A-p49bJ#Tb_M4nx$e3kW4!Q3+13X)2Dg59 zW8)ZP0J?nn&Ee2URy_0c)m*bT$L{a?1AWl>HD1Gf z37uFw-)&a(Lw7Le6&Z1+Xk&l2u%JIui9UJ#mIO1%I1b#9XcnRa))5m>Twnx6`8lG; zP!FR7pM7mRSgQqmwapbDR!&41TWkqfc{=YMXZOQx_hBErfb-g{@*=10_t!0pGxzas zy|->zf~n{0{E4Rga~fYtod0*l%z@fp%zOMU@Ax2mcvawfJmboviEUQ6QkW_8% zncy~S2YF1$5?u1TVyDxK@dIo!8P9tkIl(n+P?2XdD z?tih^D5=JtIENQg;X$tB^C?+rcypSd8S-v&ZsvZ6me$75(o~yf6w8`s(M)nM(+kaj zQc=$b zQgNV7tZ{(x&tnayv$n@QDdk(J)0mq*lGAa;6Lsw*Yo_)q3}d|RO%@MvkLG@7*2*%C z=SN^BH}RxvwAh_vg6**jEEB z`$~4zNbIVMW9%x~Q&X|6EPH2oocV{^RM!1Hv8S*Z*4Va!;y#N16k=P2r<0q6O;!I~ zqA6Qi`)}j1tFobshD7rBHzb&cFEt|ik3HE|$2`}VpJiJ)c2yHNL3R~%b}BSEYF9N~ zgU_3_aqKF|uBM@~rLgxiWlK>Hlr2TwSGE*&Pk*G3>U4jkx9UXMR?sZ=&aOBEA7@;- za`hh>Vmx2xi4T_#0V{z;D!v%SO2#GLocX^`98a>5WrH;h$LAp1$+p36*0F7a-K1mN z2D?$mwheZJj%^$4>pHe=u&?RZp4Yh?t4~4XnQSoi>pz&c?92V;#YyB~Vgp@_JZ-`T zy0`~5Hqgb`P)*n~7h^*;VM|?%4b_BAw#?&?%m)vAYhR#7uY zO%GEuXHD2pQ@B45%rxye?w=)F2Hzw1hVP)}-f$5$_sVX&jC+SY><@eqT{!a6JhK_w zXB77|7gBRi^HL+?;+|er{y>jSxq(#j6Hd%EB5xe>2VUKk8`zJoEBl>4@as2n13$;c z3je1+&~!XE(Drd&z|HejV3?P_JlWhz{a@Ht_0&z&P1hTd>N^c{$848bnTma7c+5Rn z*e17QKV0E8Et_aQ=LYJ&z z_BJhBs)F-}Ip0BDL|si?NWGhSF7+U6rcKoKoIg%|j5UnIdpJRXh3jO&Sw$$~!6I&_;TdM9&qpiS_t)H}^4q-$6l;YS@ z%1i37rN}i9A53pXR$)&#w$xp$sc32%wPZGbhEaM^-T<$cQNk46vl82>Cwhe3Nw8!T|H+P$uSqiT&2JlKbqwBam>jjqJmlkB z^R_xJuw!3*;El}*0nwLmljQEdq#2jrCwbBXc``cJTtffUm%vrI<}8l2rn8ZevMHQ( zP2|{GS9raWF<3mD=5PBU$JUy1e3WBrJvpu(8`bMC#bI}(ddwYJpT3T}F6(+7YpzH& z_u1jyv$?uqeK}!nU#MywLs*SK!q% z?!c}=nhRISz1UBuUEsb9{0hB{(EfUN;J5Z~(zpAut>hd30NY8wzm)F|99j}>_daYY z-Sd+wetXOay@37n`m*S6{peRNw$e~a2IU|&)2}FvluYtis-So2i5`?!p^slvnkf3s zp=a4o4!uYH1x7FDT(*y{DYwlJ&n03zy>fxS?Y}9%r(_O@@|1MS&naz`%)!PfwfTzy z*djdFPAywXZK|!yDQZtP(`ee%_1J4UoDb)EtSz+${43lz105}F6I2b}5k6Y0V=$|Z z6+;_=EhW2Z4cJ7sR0_VSRBWjLw$%4{rWw6{5&gL%*Kgj1-E;%=;0NF5-FVh+>V=+{ zJvD4S{z;Bsp3i>GLD=)GUqvr;Nk4Q6>sRZIGb^*vAEV@ROE4d%?+0cinkSI^g~WQM zQ55qTL6IC-hyA4dKgAXT;HXWu{d9sjfb6F`c;{@pKRu{r!(C+eW4QVOZkH`3I*RRw z>?x-ovZvHu-JnGCE#^_<^@~ zY^fU7Cf1gceUnZbY4Uf02lvW;g3qsIz4xRVkv*(8xize_?-MrEdd4feXe8?!YeS7u zI};33@GU-lTd9fI{b$-sjNxMX zoI-oDm%QNc&bHDX-s#v%P1r{v`u!n&i`qyxW9!_6&2uBR&kfi>bFq=WPVT|i0^yN! z$UlhMO09SNfBfd+9yj-TcqR_}DBhF}^>&>RnFP*0+A|UVUNjDsGToRnu9LkaepWs} zhpjY}V<-NU$uZb|jg9G*D=ZsnEm&Ub*w~jC6ZTePPhw6nwvyFmI^)J>Iy^K%F<#4N zGO(HQ^}fVtp61b>*I2$s$;eaa{DwN{e+c)|cXD`7t-l%{Dc3SxapA?xe-p=gUU|`1 zK~MR%Y?`2BTQ*&(V`P*1kfUQ{fsU`xaqsA{_C4qKj~;(n#}`JAl^55GwQFGQ8`T%+ zv~i^DF6!a_h`xoP{IUM3Gi6^n!@C_~4tM%-wMkHI$-b}4$@21wX*gIbc<_3mt$O~lazL$+i!^M8H zX|54zN(PVJL7hhZkP&%ja9-dfx~56CN|g~AFxPJ;(}wJpWZvI|eU!}mWqs2BdgqyM&!W%$iJLE^=J~fvjmX;@{E-*nx3}-3j-$Sr8lTh~H@NX% z;d_4u{s!Avw$3o>yE)IMo=bfo-h62_`S3sUM`|}Gnhj5JZ3@2ysq?Atpq@c}J@xEl zv&u*@8@6#gpKCv%zK!}J>Lt{L)Y)lfr9a(lsNuMnYrmv^ka`vMBh>S#w_IR0?Dt1@ zZ|-R}yu|s_TwhA9IM(gdKc}8W{VVFqkfsXfx3#i znff{Elhn^rzfZl1`V9D}0R6fe`MwUlT87QF1wFeJJ8cI#w;DTbH#XQlqs_7pTj6DF zt^EfKD{iG&m10(kRmrweylO9TthcBY&*~hvS`Ri5A8#_e;Zb}eMDd#%Vpj#v`OPIi z%Qf$)$}_)_>5r6na?O<~d8X)f73U63eu-m;?%sv&L|47ICfYj4+8^RrJ}UW|DvFCI zR1~kIJTRedv&Y;uOxN6@ib2H_Yo>V3U94qA@fv<#JE3lCidk`6bS`$WZWTsiXir>* z>C8*rvYzIyWf!1>dYQ3vw5zp`Sy$25>^xVRpX%$3t-Wrnns{JI@x&KW2bpgSy3jl@ zXD~UT#n&Cm&NR0o8RKsq>NgwUtAf6{ruel)=Rbp& zQ~VZRT0H(6Ba#K*S-cB>e*xTi^aAcn=AK`&#%a|1jY!Yy{brw`w1X^p13g$dgyU^| z&*W0Sxlh-B0{;`g9&=%?xgFb7F`@khZc8Tb%7R}+Ls_&3PgEa>GqJbKJzL=sXst#( zB3hfm@1kAth-mz^B(t(2*?jpi_(L?y{EEgcz7Q|m$@wDqCm1+dIBsWMx_K(#o>YlPb$jextc*~0SaIfMZx$vysr}xN)$fhLntkVZw*EO}R`}cL`z2`mVz8K!i z#%_Cgtk>KJom3lqE6d@*TZ{;CuN>uA_vfCsmKf&te%O4=+-BXf=v>{$?-{(insrm$ zD|xhG#wQtV89pPDK)#CMa)oaEI5Fom_;kk%!>k%a+$}>f5Bku{o(o|O=0d!uxp?wd z?Q?e0=gLQnz`kl%U_bUo2k)j9Pt^Rq+1B1yvPLniRp2$rn;gb0Jgj+e=0!293ix`Z zk&)9lhCQu4ygA_LUcwNQz%Hw|uGx6;e?4f$sYVHJfJbCQO||i`Y^dqr7saNO8$1pC zqL{-v`ERkUMq*PW+i|K{Jfr#Eg`DvtXB5X7%)64&jRWt>H81k{&0&jj&D&@C&3nF< zYyR6m`OO~?zuM&`K6L}}DSV3iZ}FI-jo&Cfwdxx4!2Edg&4L7TE#o+NTcSD5!r6SI z#+4i_q^zTKj!T80N6D{kd^2Sfd(oPREh&DH{DR-?=_6j7L?5X45f80f<}!Erqw85a zn;31c;)(mc(f7SXJn9&8r*R}J9>xD79Jks{3kU3&R5CWyCB&re#D;nR8*112Y^XS6 zykk2phWCOL$H!pJ4|Lv@4F%}#H4j^H1n)}?Ay<>^n2Fu@-NT}NpN9>l^?j1qeN9%J zDS9zj1C8g!VpoZBH?5ef{hVwh-EZMqaHw$YlZoE(nzwltU989JpJLO_my;!FINfp2U8~1i;Q&lC@zWs94raE#dc>a3w6dhY?4EouQlPJHh!1G?-Q z#`jQyDVw=gvJC7~3HGTJ_HlH;2FX3_bNF5Q+On@02RW0cX3hkcaZUDBGBKhijxGBN z{S#|neMvIwJoc4kQ`z>_c->>$SC-vn+gBrX9s7#!dX>u#ldUG3=wfI=v7hTNHh%gl z^WK0SX;lAYJB_hzu91p$aX#F(xrW-dR;FTITt|0WHds&apU%NZveDH3Nbu#m(~)8C=jlCmYREnhb-F*jAYqfU@Ob^s-fncvbpln>9U{l z^?Z<8&p$=2=bxjdFT!j2%%QMYK7ACP%h&Vsz+1!pX5-!9x4yZ7i_-G~!@#m-#qrkT(j|ZBhq*QzpbL?p2jU4%M6dJG<{ zE#O*pabj(6>JRXZRnJeXP1H4iVr?(#_vay#vl44Px@IKS;)B66&wfc z*H#hf8w;q@pDdlakcSQdvSViTlIW? z+vK#x@$L3;7RL*t$Fe`0ugBgqGR`z_qb{cYna+EhXn%Mw!XA)f_ZTBL<`$jKmBEM*JG0`o26+# zJP96<%@>=WsMwlpKjkpW7Ib23vi+2+=u3&pmO8#Qxr0d}1+$!}H97 z%*mE*k{k4!ldE#gnNQ_m2OE(&nc(sm{c(DST5HJ~)wNz~#;#8t>)v^d>R99Rty3Lq`+ejka;^5=>Hf%jIh1|v&r6PL zu1AASB=05rD!duv7slF?@c1m-p0wl^a?O%k$c}ND_*R&67d{woN>p|!p0NX2QgIQs zBJ1-0*Zt`Jb6bayb1OGZ{PbF4`+1u3ELN zo&O9;=KJ!ARBX$e;jsq3!E=2u&&{T(?M2hT6*)=2R zLoENvE*wgq93IrUVo%PqdPaUz=iF&$IMyCn zQ|VdRm%`M#9(yf^^H1QNGpq~tX61wKP$j;eME0)lo!~V^3p=qlE3r2#<@31(U5?G) z00uo8gN+Vid-`$>)7{?{xS)s8Cc83oxyQ`x&0aa~f0w;k;ZAC1e^E{VqF6pvF}PBA&f=sL&c_F!wBU`(4_>6ZPdSeop|-t?mv z{kZ=B`OVq9+v&qnjt|;))%F=td+dPy{yN@Yk4#hyE)5$p3wyc6YTFlHZ`+WmF?o%* zb`gi0PaICR%ruH@oI;BDeY!)^pq z%+Xjz*fd=eV;@98U6YCVdQjN(?T*7&Y(nTt3UD zYytDjrc^t!D_>^JPW&wkn=)+On-ZRZOmOT-%U@>4;2fJ^1oYg+j{1H{GV3S1@nXj61ST}&f)F&+Cd13R+^_T~lHoqUgCX)khc*;|HvawNxy98M?S>`0BF@lr52 z-@kBpxp9mUad^3LB*%8ly>U3lto;(~tpIkGWit-t8g^D1cH&d|_*eLTM11f{_Rcns;k)yyNBScZR1cTkNIlda`Lb$o*l^VY{gE8iJ>e^A*=OWC z&N-&M)=B=z*4Fmu{*8VHJ~@1^yj%H`JB>9PN7;MRM!42-9NnAtMUG#G-%Wf5HzMOq z<)AXIn{-UfR>!yK7~5ONH|rSNTgU&bV{C67U!!B_;C#i!|G3OS1J_vKrm z^i%%FRDAfFt1I~){Y{i7C`&1CQQkH9j}vz)@-DR_{_z<5b~b^9wck>)-V}IYG~bKM z^7z~8=NfG}t12d3v8r;ym8-T-7{y%8M2Gvy)2Y9nT$>aA&@Jf|6aSy5Vq!5lC-t+8 zw*TPXV($GO_b%Z2|K<9vTwBPs1zfA)TJ6Bx!0w#f&~c+;;(K%0-<9ESJ661X!m**n z6Hg8TD}p~7UhxOo|1&qx)W_fUZd%2}u@n8FVZC!hui`&=4gbOG_z&J_c8A`4-xGQZ zz4_77X#vqmWR|~8ag)li#S^y|ub)s={JjY^{^E(dFG~vTon1WfE%xT`#_#*aF!J=! zyL-{KuYkWQml=`T)zMhf{xb6ah&3G|*0g(bJm2bOe6He&FID(LFF!*5*|Oq^`*!y= z$wh2CP>miW4z-gwR5SZlcM)IN%(~BDp4MOGH}6dJo0^0DU{;O$%oM-rjQPV(qX`AW8W%!6Qu*XrG&*T4>Y$g@0%pZYb%{+cjDMT%Jo{?u3w zF5&w8#1J16H=QvlPIQns(Q8wBnX%(Ls5sGUpB#(Ev74On2l3(UDE5UaSEq$EzKS`S zW&z{dgFF-u6uUEWiW9v#6-N`t&w~CM8B-=0?5@L!g<9pqUZSjOOwtXl}~m{?J_LZ51^1@672# z-17+ctmmGkT+=)+aYsK2+Z`#fr?qm+N?mHfHhhAOh32FUbV*d6qe|w;V z-#{~(zc-eULUhTC*i0X!`rF#m zqSu6zwH8_f;Z}VoLV4oO`sli@3HOUv@z+@SUWzw}KZS#(^L4-T{8b!(qMtuMmxjL# zp50B}q3C!w__u?WS&PT5cuQ*eK<;@1Ojk?1O9wUAtjQxCyH|IIVES*!fHMlYGU*FxR>r3=a6lAsVE=tOh z+rqfz-~+`=*4ptB<)VeqmC|+V`Hq9|uJ-sm%rlCei1$|Ue*TZh`rd;V`)~wh8YM(= zu<8kW4^9YOW#y0|ziW~IKJdq2zD;GUte6nz19yQLzC@1Ikoby;dF%tZ5uBID^{0FL zLtg&FVFYCwB}8%PZpVYHRT0zf-1ZJ)DYZPWaruOUp*<@X4Lsac>_a(Rk0pEAPZgc(Ma=bL<~f_W%V(ZP z*z;`ZL~^!LzyQLE0ek<8@L>6M#!1DZ(wJMz&&%3*bD2|$aPu_QXPv#*Wi)tPd0N6+ zEzr1&FhX=jvDk(mbX#K|hTPkU98~CP?`O$u)sYCpx)~BwOnAO(k z`Ur<_1HUieo}I*ev{nul|1X};=DFC|hjPXa#O$vrNQ<}jrOB_oi0^Hk-CvV(PMpKS zr$eIOXe-p5+k0vPjHgI>ZOm7Q`AX$|UY{qp8=m@r_Y14){C6BH&e67X4p{SgFz0n( z&#!_(XA|R?Mef+OVAGjk)ER*jSMfc{$ z^3syQE7o^`N4m^4+@l<|qdmRm;YkMH(l&Z2FK-w6YZv-!S8TkbXTDiKop{M4aMJbo z&Tj{^vM%za3k$dWANJ*NO}2Q$_4u%$=LU4Ot|hU?iZ3hv(Ev79e53(vtoTX8aP*Su zp~A}4=uqXJHGqv3YiR%*UrwC~?kq>X{E|L4(dJb8E;~qZj;S^;D&K6XZR0CGGS#*V zl$SOYyFhqWageFl2Rc?h+Em+4P%hh4>;zr!&9UB>!uLXjb2TqXU|fB3%E{|-e7=^C z7eBB3z}i#ciR;|$4_&x4 zH#84S;YWYgFErZn!4*@%6<=jeF6R0$u-wQ?b3?@(ufiw!5c$yG}+`^_251G(j<#=W7hpU>1q$6O6IQ>>{vk$Ns|+vApRv0)*8EM1>xM4a(! ztewZNahyZvWgfE@th=ks9ohv(s<^>xYK*&>i(SIH>r%|^VBH=4e1Tor;M`srW>v2q z0qrNLYrVj1ycz$-(>;k*vG4F{`ZT3?;FTGD@M-lm8y9k|f>_f$>K#Vpz?}YpL$?eF zXw2*{VL#F!_HSN@uX8Yd@x=0dahaz6e@(H0{A2CUi-$JxUEmJ?=c>>4e;%Do?2I-- z__f5hn>`uh4=4LWXTF&m8gPd{H0ZwE&_6>Xn&WxUSw3{6SlkG%X}+_@C@J7X`#E@|7QDlZAFN)bi+xh*m?KgsR zPV*l_dXI31@!Q0X`YidxX^EIhj#A7=kEA% zm5(!kl1`B?DUC7)8q>IkQC3m*QSPD0e=Dr2JqzEZ)Kc!HIDTEHEwv&4t#f|1UF9Je zy~LNVQ2vGD?DO8wKJWV|j(_(`+7MmJzpHlC=9egAC?hEH@9KK&wM#hvgnY)n*b_sy zC%~?iw?t{T27KE=zpNGC5X#}dxynTwqKP-Sch~~M9GYabeZgn64ew#JeGwcx3IFb- z-bUouHRSH{%>Db?FVlEySeGPl@4a6&%-eY8n>@3K`xlSEziYR-(emvkmFwASxpzDH zbNjsTek!~V7FIrySO7q8R#W-Ua;BVXM6q3j@n%DIDyjJ?#hiObmSb zEC&7sZEm8E;ITO)X-62?$|2*qU-TyVu(?MVxD31>oA*>k8^Xc6octf=(3*4T zQ*$1h|D%3q6N~7)k3u+3`_7AE@_%A6aqK>GjjxGzlsoJAY}bR0g@c8M<+pWk@hOg@ zxcKW})vtkB=YU4jkeQyM+WcWKS7pWKU&jgiafOVwgVwll!0%`B@zw>&OltYjESp4j*f9qxPkHp^d;e`G1qci=i?3Twh1_ zm^PMwL&r9je^bXcmj8{8Z7lzmj%}aoOFFiFuCMCY#`Zfo78acb_B}@Ya-8iG-p#kM z7x%?yIxr*0$nsiy)O)Z;Jqf!iJXDyP?}(w#zodGgur)PU_A1q2>Mv6d#|Mz(>&c!q zzEdu|3qF0H`O|xZdDFqXVdm#bPYV916#Tj=;M^4ax+(Z|Q}F9Xzt8ar%uC)=#2FH?kk6&5us3zmz(YdZf`7@u+9712H3FXL-EI0L`$Y%ua1_7L2_9`IY(w|ysi@gA`7x7o+NjCd(JKs30J=b%5X6Nx3 zvKb6-=wPRDRT%8Kp3}#4##d3C8mgFIJn;Sa;R?O@Df9Xg zG`F*hd3|L@LMV1l8xzm^hD9&J*#7$T@m=-rv){#Tq#4P%|3ww0^-E2Ugax9s4@lc zVk;FZL1;k_P${%(@F@v{uoDjR`1pSRefAEAm;esF z&wak{=6UwBcJ|rBJFT_;YrX4T?}ZE1zyCFj@ju<0ztwr~0M;G@Y{I`67{M}wF_O+0 zaen^;*f6r~bMQ4Q9>X2?`d@x#cRXkuC?3Q8-yIwBACG5@xqOntx?;s+h+lH>)%tly zOYRZx2+!zwy{iR>p6$w^`3YT*yw;KbAL6=n(nr`gPvgfuNbXH9aq#kENO$zcuNYkM zY@x0v)#1l21uwWU@WQjT`EjM+W}?@$_E)~#rC^cCU?s)8MB>|#VB1Jvu8VJHIKJE+ z$ZX}VwEcuU-|2t(^M^to1)KgX**w1}0^2HnA;@8m74j?X9q7y7%vj%@%@{!ry7<<` z$9k9E5s8JBqiAnBa+5xF^D^mKts{0Zxm#xCX!;B7sXEDztC)ECafKP{!nmzz=V$r- za=uR!zp3i0dSr0F4?Ld)o__-yQ{zD8malUX=PJMAZ#4H12aTt#3yX2ln*A8$bC!+M(!oWtahl+B zE(H@P{#`z2#lOqvEMIde-yY1jD`}@hzL^UC-Pz5-%zk`a9Qt0(xOe$elklf%pNqQu z&wA%zVifFqY1e0I6GQdH&#d`z;a$5;9Ly`f@6(ZMg~4XRO7i=*Oo|TlrEV?3BC4|+ z-=0Vv#An-S*SM^(@3)WY?)ZMy--9S?8_If&vexwd8uZKg3HN|C?*?<;1@^oX4B8)D zdk46-AK0`n7_|?$_I7Y>c>kdbljYM*;(9W_r+{5k!LDgMSC8kf;XU|1A_RoaOCtTLSI zEsRiqa{0PASU%mcj!*X({#o0n3npeiY&*}E#)FNEg<)@T{JHpeYvR~e)j0Oq!dBHd z_StG2JEN0u?6s98$N=??jXxb6`yTz|;#eEgIylzGx(<%D@vVbnZA|OnSR3m)IChW5 zEEw|`SotvdosZ@)P8=+|6ukIsBV)ivgN*aHG%oIMWPoX#GrvntuLCh)xD4a=f-E?;I2?D;85`*s{@^a6_?W*h+FCdm+Cumj3NHSkDEQ$vQSk7sP?c+> zVj957Dx2%?ZArVH^~SS~Gdv%wdD)jZ{?0LhV;sNL#J+d1&x4D7k7k(GNqnxy+vQrv z@VV;xv&=zzedKpkJOi-~27adl)Y}jqeh?}=+!rdG3qCf4hrz{$@GS8U{e+?M>niTy zzLB~44?fa8zb)4~q076S_2=j8$;}57o}Zax2LI43H~*PCyXTjIi+6*Ice7?|8^4WB zHG@MF@ewC>&;L35m5!YhQGKsexH8Gf3Wf2z{VfkuUvp!8wM5_6~@T>!N|ME zgO8si7t(XYl!KACzJPvtiR&xy$*w_uZUAq;&b;A!#`&Y@(v2S)=bvMIq;EI#>w}5Y z_StJ=&-Z%JpYOJt-CkS5?Pa&s_1*I~&keVc+UC&={Ixaev;PY95l*hFPHt|Z+Uga? zNO8fM^|P^ZjXJt`S#?!CUA!DGyiESrd!!>fnZW};^apbs9agg)bUlx`i6~^Ha-ay$ zXbt7{96LIKwYqtN;!)i<1RL&3^%j17EZ+FuJ;-gBXX6>?HNQK(@xuN+7_X82@OI;u z``^s9R_+*f`b7FzbCg@J(B_*NyHjpl+P|)Lf0J?j@{Pg%+iDq8!oV(%Ykb+5`oq*} zxl>ymuBKeBKU-s5ZBzDsZ0Em#S0%qXI(T^te$Tz={(WhT{RWKvbjE&T-A^n5xY@?k z;DZmj=YMn0e(u@LJ=kmZJ+r~rkLx~gb9nq1!Oh(BCiiS}?m3L_Rv2IpzF7C3zw@rQ zxNj@>{n@$i?D-4do3iUttX=Npj{G_0j_j`-{N&xy!IAu( z*M*f`e#>$4pt^jQMl|GT-aIm7HU=S+6)s)zyRYRh7QU|OFIJpHKKfSqDl5?uk(gTX5s^OQAUH+y=O@so!b!r^9@e3z z5x=Ydg|C}}le%oR^ZT4P!B4&Z!=FD1|FQC5Dj#Ma4((G}F8_HqVvFhH_ulg6@1ehB zr*FAGI-mXt9-_YPd-RT{!L9By8W-=;&wrtx^_=ka7Ub~WHkaaSxBUC#Vy#`gPxX@@ zxy&j5bpiO#aPYO}N20MI<{?*nj2E};@}ml0s}7O)`p@ZxeXneNyFN}E zMB?db4xVNmZ@zG?`d?U@ed#;fbI|-BPYkeRlRG9X#>AC;+5zyjVmdDMXRW`V zJ7V+FH+A{9gAPs=rcNQ&SD3XAVo>e%e62EU zqs*=kS+Nxce0F}qzk`PdfQ#=1AKwE`z8idf7x?;4@N<7~^c~>qe(diV_8VWA>}6kw zDDI2q{urK%<@q@9bi9>=O?xnwSaq;%CHe2NnV+#^B=GqXTRY3f!{FMP)R%=Ivy#u{ z&Q@H*^-<<;En(n9<{^c*!Q;#CG2S^q`=(+;+@*D+!sPgv6-TxexvJRPd7g>?R19q^ z_;#Mh{O8S{XIX1(lxWPzulqK7@wmNztGA>VHp)?O;iv2?eiXcTMwB-HTojwW0=gE= zd@XwnucxeM`QG&@*9SIG*7S+Kx#=nCfmbMVSI!%zG{nw{s{Gli-xbbT^?c!;RWB4) zta!2TC}ooTOk|$wEc)nk?2AmwyWwHedUdqldg%$)5XJbd;a=ifiDi7KmEZcrFhd_r zH}g*?nW6W_5{pkf(EH=P%nyMH;Pt{`9AS%Xsfj-;TBi4#%zofO`);S!) zuN8(ctasL4?2)s?;l?|`&?(Y`Pt*m;TWzEpiGZ)tA{Uv-y{n3xPVXIa7k{RDGU-D>7`I3+BFJH11 zzEG~QZRFc8XHA9p{rEp)J6V+BS&n=TwZT^A4;6zizW)tl^Z2O?1LpAC&p1Bk_Y3$0 zC1=!bA4MttFf3;d5uezCvWc%^q3Zu#cadl9evhSRBya3`IXbY#$qOBQ+^g?w`6Qf! zTsgQlp__H>QNDcy|J*cWj{NS^kT>^pznk|2%WEtv9!B=8V)GPxFMDJV@~{$l z)93u9Ih2VZdq?^~^Cefio;@nqed)u<{9o>zT4WAom3yQkf1aiOT5}M`q4n(3 zC5JeVm%k&zwpX3sLjGtzPx7)i{3719*RR8W7vs9Z>(_@<@9#Em^6kix-pG?)$d#VR zmmeXAdRWLCCvURkj+g5`evjh5Xzq_e?!<=Wj<1HyK~_}D9OTM$`1m>aH^q@TnnT9- z^xLhHJIJg%sbd;EFN`JmqTJq+E$xsk%I$r#XC|^Gp<1@gU?0h2_yg~jTtR-cAm%|B z^j7HiQj!84*iUv1?RO@*L0}#7WH0ajJo&o7ddeZ$ayA*+LU|-xs+hOY_&T#9A=7$sgx{I~Ukrv9o_r!KLk>!wSvtX{gI^UpM4#^ld zpRQz#%CUcNINzY^I|KQmZz%??2-)%sawU(oi=Xig=7i37`U&zSD$MIU$%mu7@D-*p z3m>E%(_Da@k-SscRIUzCyDUAza%MWbzO;s%>C8QEz{A4DYTtu=rx{d!!(w>nBjio< z1lEl?GN+y`a}rA49bi~{2ZnPD9^yV>oU$q2e6_b^-rdL=SMHoX5tcj5DcCq?q+|{@ z)->B^7|#2z{DOTPpJ8ors@ok*#L9_@$g6j#kg}v5bRZY}tbB5r&C}=Y?PHM$UX448vXtHilV@ENPJv z8(2*{jpJRnrqmCtp)7Ma@08LYum)M8^Uj|E)_H z!${s#V;JTDt_H&d9SoBMh8aQM3&$uH*eWrtAhKmVwd9%cG3H5VwbTI*hPCoL}HgVpI#oj%;FoZPuGrJ)Lw@ou*=~Z z*d-FTNQRVy$H>2&FZm%{^9a~QI$StK|5rrdm(31-`A^A`2>db({PJU-QD09(rj)R! z$d&MmWJ@G|0lV0_nC!hEz9D|O!@)289Q>l#E3KI4rPv0K5w2qWF z|6<&t{jXOdOHL=(3oJ&KRB&FAj9fvMNUnTF9LZ{AiOxUgTo~tG2je^@*#X9R96BD1 z^9wMJ+O*@UIfb9DcplvHLg8-Yh;T~)-16oD&ny?aG-nLT{+am`_!S*196$bHa=1vB zWB+`C>=0&X`g=3~Hu(9&DP63s)4E!JFKAR%P7aSUa(HYs8lPL(cv1hI1&yoTGj1fO z#Es|vY-F60lVi6yX@jlxMzgT9Z^hr);t< zR4$cg>$xu0{&QWeolbf7pS#V9CN83!vixtJwegB!tX;%8_1>NDV3!lMU>9MEn%G5{ zC=$C2z5;d$@SVVAu#4&-jMNnBVi(n?CU%L$EwZnKTgnT(`LaF3a)($Y$*PoJ#Z37;8PPx~*=xf(z}LONAi`RE z96RStV|L*lbc*a8oxA>%8Og=~@xJ)}XDJDRpQoe*9#5$kSdGm1G^u`I1K+ppmn3{6 z$Pf8OLP-q+uOdGl;QWiEMz(yIy=r#h8f3#?kqu|T7?KaA_#~@X6DoZyJoF;%(iNY^ zIB!FI8Vy;4(U7>bhSffeboRw7HPDCO(wr%uSVfgCWMoZq@@-Fdkk+&glL_-m)Kjk#KSS+57JLGv~Ro>EY-8 z23FB}Y?Zam*|5)}HG7WF;QNY4aLeF+cU%hjyJ_39&tmQ0HlJ}%$Q$!F)v;aju0zdXWzke}jq?1$d; zeYV!!5NFVuwsZXz@*_s_A3FXyt#Na8yv9d=zArzy>&sTTB7G8SfB8bU(-%SN<;EJ6 zv)(}Rq8&Ki)$j68gx9x4*>MMYPO|q+Gt5KzoEzY1*+|6SK4-_@5{n?dEO&S^VDseV z{Svkv;lv%Zru?<-<8%k*mQGh}tuRKSV{dr*_BMD_dS3cK`u)tE-QmAn_|T6%vDwb0 zcVUwUUxbJMyVpCu&+K2JiY!<(_#30LdGd}P(s!rAy6 z#GiW|-CYXym+n3be@d^3KZT)F;Li#p1%G1-Jem^N%OTw=f1~uPe2#75Yw1?;=Q(uy z9q7M91!dj-4|;Uyb!M<2#>{^j`*|ih^*#Kef5R{OA^E*F883GGyV0m>b3tLZKNrmH zRyL(u{zmlsP9vi#neUGVJGjqQKj-S?s_&zlZ?oKI_Vdhp4knPj@E7`0^_Y%+UctHa z^N!ELzJ={)7`u!I1OLLIG&A^9WQohipMaw)lGx9TzA(?k+P{mJ#hVq>&7J%BS%1?K zKWFhCl~Fl=R7dgj7RN3T@5nY#t{?Z>U!64ppTpB-$eeOymHZ*@`paLbtbBViH17($ zT?ud0#oN+<8dG=h?aSpiIT69nis2VeyD|LoGm1Ag=i$cfOWz)+Y^szAbIQVxC{JR$Yx(|M~F*tnuO?2!DN5@X5 zE&IT~;@e-ND<`G+^A|&3L1)fSVSgQTE%VgDH=TI{+4@_U(_{^Ci1ps*rE}lu5uV!| z`gB>hU%>Nvhn`jJ;AUd&-cQ0$7V*3rJE-U76BI6R;{~OE`+XkPzx_VL|HF9Io{o{_ zt63gb7ZSFf7Rbe8=XP?)_L*rTT%E`9Ud-rczEPh;$+Pg#PdGs z*Shp_GUXHxYo1kn?CRyn{S|A|$LGep247#gPl&K`V*RhDhimHKEshR;)0kSg1^FGu z$5X3ya57lj)wix+u6~4Ht^vNdweYU^So4ZMhIjYDyYIof?}HKdI=owf?dT?|Ngt1ZkDmm04*s>7 zKZNo9EBN^7YyA0dW|+Ybh8foTgFV)VHRcKa3@^L>dFjzN8?&yMvFyG>&wmIHxzF#f z@qA7HzCDLHHrCq7JEV_Y9lQe_+=}vPZRtPK&ER7ZX3B>p6*+Nr`;Ua zBf)ygXjeK>G*nLMKbke8RkxVs_iY zqq?Sd?&rGp6go;d;onZu$y?#QntXhzF220q%=OW`x_HEzd4-qL#q33Xi7x(XZt6eV zc)HG<)ZJruAKqiw`}`dAMO%kjfX%y;%YuAj!@$i)no%E*FEAWi_h>Vp)spkL5Z7CH ztzpF59>JH!@7s^ygV$cCM`DeTWY8k|_Xs}lT5&Q}qR z;7LG91LgaSBYyV?*Ix2LE1duJokPyKzO&ys*LOZ}&iT%Co=xYuMdYIG+6-G8yEy+w z@)J;(9A8}JB=r6?{4pn5543J)uc=aUMmn`oyk?Z|`Wmzp8kRJi=eO#AkZUbjXjr9;t>puwtz|u8 ztOa+)TJxvGTSaRUtd|E;)?MT&7@uOjHZ;`=F*ethU1KeN$p|g%cfGxL_!7q964p8| z>CG{U<2jBs9QWT~6%o@`%v|Ms{9y~g6HCtVKBr!Nu({QC8p}uTq%WhPv!T>Y`J&^g z)6r6BJ?MHU_pY|)7MxQG0>oodV zxnQT!_eZuEA@!5}?MHZLH18e*)wd@=_3Z#u-+msdZ!duA+sk?OHOB6O2QB;g?p)XN zeW7~(LFkM4LLWCmgK7U0^hq&PeWCWZ-<9LMYY6X}NN$8h?4>coZ$0)(uJy@%K`;jaYYGAH>|$e$z@`?zfED zxz-KT^UHgU(3f|bA=b5ozMKq2=YRPK6nXMxcPRb-9I6m6}o zVh@#hW@vd4d#L=z3@zzFnI}N$Z|%#vWMMqxCBf33J?o3ee><0JL-{QL9R__Ix+K|J z@m`9(Us14#eTEiuZ3@3V4^6LUE$DHLwX6;M3T@*5BCfB7E`%9oqb*5aYah;8ZM(0CQdsxSBWi3B3 zn(la?g#M`J zkNqlA$tMr~T8AxWk?XT&xz;U-ves{iwpLDyv0m)Tvud`FMXu-|xuRD{4@AN5j0we2+U?cdX)pa>;k1{|-S*PC z+g>_XdqLBAZV~>Zu8GDhvm&;-y*ywcwb!v01JU)2L5@nwE}2Nrjru(h$@xFCGF|7?h_FK!r{0DtM zvKA93w<3vm4rI@q7G`MW&?svS`{leg7FjSL*4l&2QvLdZhx(-X0`b0rdq%~@&sCl9 zeRpn3y%Kb-dBWUQ=sP9(Ol1t*Lh~$M4n~|M$z}CfAy`ksN(EG**P=;ye>q6?l#6Zyp36 z+u!87zv0k2$`$%%H;?6h>lpWB^V@I^^?889?ej(0dbth2f7C&JC2W`n-@0q{!u`#D zHT19gIw`V`RVJQ~i@@-TG0=D4ppPf<-L}I=R4t`EOVOond1RNX?zhlKr)aC787{B< zYx-Uv+Fxy6V1)OIYDGJjF{f2&w{xQX-oDg9Wm($Bu-xmu_Q(?@#uMC#(_-6e(*a=%qV9r}(RR)u^H zx&HGD-7mi{&1JDavLOcyeF*G7vS$SDZ(#`6hjIUc2;9He!Tt5^-?txZMohPZ`{l=| z#{Tsk>@S&}6_!E5{&!$o2>U-M><=$gWB+Cm*uM#5Ss4EYGxYDG*PEdqi=vnAb1;8n z`s*-#RoRra$CB5y;}pi}gv((4onZR4UeBz(9A`O%?O$rfoEq3Zn2D~XtnT zO261xexAp(PnyAA|p3K6~<%_^q;4>{r^ITyb1KK7`npk4)?Mul?4kW9%_?8+I+% zzqk+jycv2G-v44MGztnXel-r-7mBR=VlFfj8h|#3j)P`H?}feqj$0FBomfnM)HSiz ziP_9Q^gupNg!YCGf>wb`i?AD3o1qP|8x}Bzh8p&Mr#7CO!2i>dt=AV~H@snnmdb8e z&b2ArGY^_n&sy2$8mlA&S z*F)ceJ_6kVeGn@9;ZCURhwe~vVAys9ra-TEnp?9!6$Vb}+3IjH?PqR=od5(|U*Tsd2TEeojjtl2M#u zhBU@D&SZaE&NbFP{sXzFIbUI5qdWcpmv6<-i{Rao_R-cW;KKRou~szvy$D_oz8-IF zn4VxQ9w@$t-{I>OQ7P6M_BnCOEBm4qa&E?5re*W(F8ES>%NQQzGcB7Jx4>(uT-!%G zRfKH~*&(tm#K$)OU~kC&5PyqTwqR?#McuStmFla8qGb{8pE+Q2FF;hXNLC%U1IVbjfoym#@mu8Y>uJq>q6iBDVy>60oMle+dk+} zsK(JSsK(wXsK&vt6zd!^ae13mt7bn>0Y_->(7J4nos;k_vj@yxXD=Ap7%$1jIEjtn z_C2^iQePaGPl&z`(C=!Kw~;M&U&n;|co$UtdkSjzW3By5UpoC8q<@zaAK`v4K{icu zd=rk1u@oEQWAKc=^-hg%ZF0VK5Gwm(X}aetZBWy`m;!#8a2dN|C-`e^E^WebmP2;L zOV|;avLl=}ksaZ-Nu(`NQ=UfFpGokLJHFlbMDkmoKZbSAa9{0qg!}9P$BxLlCa$xm z5&N!y4P-}*WW0xQW$ku^@XO`wh;d@VdUPdwfyu1t>k9vC2AIgvqZe?SiL%rA@K5GSQ5mw_zTsbVp+JGN%>lN&c7PZ(LW)${@U8dy9a`37A3c~++)Z4W? zytJR0J~*R@dImSZ)6~8lhJ_*|+Bk>W{v_~!(E4A1o`HYX_ylb8?Jmu6_X+uBFpo~w`X6|?h zMmb}FIS<|6#FJF{EOuracv@p<7xv0h?6V+#0p&leX{XeuuVkmN2XQCa=}kO|l?SmI z)IYLSBHN>sZ@Kr>X|t&QvRPDrXa2D=!e&vuWoPGNvn;A%w*;xD{NIBd8_bVBbnB_> zP1yTWHdr5Uqih<*4Jv-%8{03NVt0qI0bKi~(#b6{oOz3*Ee!VG@dcFsLX-c@Rr{5+o_06f+ zi7i-<#Py@iSdS#yg#BtoZy*P===El(lPLPLDOB?mhtHyq9*MDTz;;P&ZVb>|h31*N zVzb!uVc0L@uuJA}xOPe|*JP(mz)sN|#2wf>6Ma#Y#mKU3e04`YCEjtW-^%8D>a%RV zFF#N=Pp37cBo;#d}tWGz6)7WYWA7G~D7_0kfff%V$pYppNQ^@q=* z(^v3LFoTWhq@y?U|1M~khSvJ=jjSUlIX}R)O6XzeA*k@0^!Rb8^mv;L>Icol*2Gq> zb4~*sxHPkgrFIDtvyhE`I5LqopnYYRWP{mck7UyqvLCYPQ(+pr&8V~8X0(ahW^c)^ z8AMyju9*OpUGqFt_-Hv)cFiWJ>>Ap}Zd=;kZd=;^h@S5b)$=7vU9h+=~`nG4ji?McJTCR2YHY3!7Ivwr>Wn3K|VuTJ)G(!vF z`NM;t$RX{Yx6r`Xmg%*M+ptgk+;BhJa~REyFy0>(ZGBn)C)R>)F_zUH{Ii;SOn!SF z+5`GHv^VrV=qWHSb7R7aC9+$Vb8RTU&4w~okBozkg5C?oUS8?i%LQE9#I-5VQs^}3 zZ=lnmW1!DL?}W~Q=0FEx*J!L3@&6v^Lg*XNCD6strO*I$1#~2I4YV(G1N1iN>(EMQ z8FW8%Gjtd9E$C~|9nb~Pcc4?D??NAez6b3E{SbN*dH`AhJq#6p9f59#9*3@no`TMU zo`Ft=RzZhA&(ZddZBu}+b#PK#=YscQJN*hdB>tI%43}JKmCE|C_V^+gv+}PlGYoh) zin)zAYgun2RMI{Pos?|z+zDh+W;0~b9(X9x49g#$mu%Xd$@MMphIl|cA|5zN?$NwK3H+WK$U5<1^xgnGAO zFUf&^tEh)?hsS!AoU^hKEbSkue#+!KTc8J`tcAxJSIDX03g*yg!&1$msV{nP&ps&i zv)d9|xcK!1YuP~lXB^sghT488|Emv&K{XaeLDdIip}U}uK{r7MrdnOGE#53(4G70L zY=}7=u04^h>+&VWn)cdZOKsjf%5G!DcxJaSt!>ovct_J34gWsZ zll?tnz?&mX>pt4k#`oZa6^7SZfUnub^(&*8yTNy)_UH<(f77Hq9OJ<5b2wZqpUX92 z`3Ye816yM4^5jt-SN5uW$y>tmbt^LN7{^Gj8Jf-!G;vQ#9B?r5u6|4&lk3Cgg^fy z&B1@gr=NI^B`b7HTyVCwsi1nQ0_~83S;Mux-@M>$7+0XlgRqeK- ztgR=6eedr2k_(%@q3_+su7-_W-ME`llfKYCe!{QKBJgXA2>i;v?lyi+4dd6NiO>t( zXA8ex;(t%Lq%E8M;^WdNVXYw7z`r zP~>@cXihT+zv8bH)e3ljMLG30&%&)|ic%Zwr7$5vn zu8nPMo|mo=&MgJ!E|iRyPB7A}R|kT1CGW?Ab-M`bCZI$3UAmwQDjl*JDt!~x(3&rt zE4^`mYtkEsq0-q$pwbu8)zTODXIM+o&$5-g_yv6O2Vg_b$N#ecf1d3Vz=!9-=U0qx z-h-d7c#Y^I8O8YKJ^1d6@y)AE-~N>RGd>et4`xP>$Uhw?oeY+XV=M^!#nHFIi*dBI zbZi`LEt?>Yww7+r_Cg0i;YAxqCqU=&zjX3y=rvGudiHft`YfCNux$_8Pqv4RpSwdH z{A=s}36Ac69xD9194h>~3F^!X*!Y<~`e?j^4em3-*x+%fo|z5RGo?^H^9EFQ3;j3} zefWg1z`bAxm2VtW<@*g(Lmz~WkG7U~1p8{ve;)svT%QZ=0i6Kt4ZRO~B;G1o0rp*Q zhOzHzt_|h4=b;+Ek3)q~?iEe3G4+Zr_=30de*xFGK&L>LL#IJ!L#IQ>L7#)(37rGY zfkxuref+zklUprz2I(BD8;K*vDWK>I>BKyQP-4y}ZiLH9#9L-#=6g1!OW0bLAz z2O5CB3mpl4584a*A@n5l0JH*n82SP92y_?pIP^8>Dd+;|8R%4K74#A4IdaH?M?$x- zP9IG2Vsc#PSN6ts`Ykfd#$V_LS0*K-V!I=ETK^k+k|Sf@2D?h0xIPW_m+i-cPlbzZ zKOX#QW8-1^4PPF7tM>{cH*_$v&5!Vj&5w-3tfpa%oSk5X`6vZ?l6`T;;!AfhvW=T7 z8Ata!ys@472us7iCy{$kfRUfl_=kUFmjR3;j>|2$I)QqSE9hkX<>{k zTm35-`Bapp_E~4+sf1iT4WP6>Yorh9B^)u~oC5)`toetpQ+Hmra2#owb z7({KEg>JU{C^pjrVSWc@i3S8CYiqwrk781 zI=FeB=HS8I!Zy`DZ04idCzHDc46N~fGxmN9Phvo8nJXA$w}Q)trd+JgvNUJh%deXr zRqel4eA41B&bY^iEzFcmPKikWwTCwM+5YRq+WpszP5H0oyFN&MBHMqRbgBPZF*Ms8 zY*<^&ObC3PMO)Rx*Dn$?qcZs!Kk|d99mmj2^a=?Y94lZvQ zYs|WwpE=*b=HjzRAG69Xd&2cEYt4=AU%moHKY81Fa`1V`#pflLwU1oZU(LOzSa|uF zTeH7(OE7u@{^ZB-EhiKEk{DZX&&Vee=Z-r3#GW8LSTCmFo{xPKR?YPK0$SVq$dL(q z?u3qoM;}eEH}_*?n)V;8j~zCLd{A}_5Exm$Ug7kXijikOJ^73i$$@+=eSVg`?LP-| z%l9sS@Y<%#zp+=e)`eUO1$*~4>y?@9Lqf)us_M8ea+xFave{&!3?c7jL?RDQC3lA!4FpV z#@9<;*jM@`Tl16Rf~%i1LJP@z{3>(zFE>sKt{BEW=;xKaQ-Z6<7yRJ0eoa^p!#E;W zbs0ISHHfBiX}1jyvo`Ky z@<+RM??}BnQSU3&W3RUX>+~B0RNp1$(qFz!b%=I=l*?SwCsqe;0=l&3vBj=i&5qL=#MtKs+E`=y&0zuLdAnDXB} zd~8)ubW)kIzgtiAQ91jOD^7DcMrE*J;sB8taZ4!0ri%iqHN4+yho9LYnu@|iSy!Q9f zTv1EnBDF48_vLY)>npR%;ykBk7QyrK_u0Ob&%?fyzBPO)t`3)f#+`@Ob8*bawWR#6 z{(d_`{b-d_L(fZ-6;m5k^yUO_^ouT)soK2i^%J1R&HS#H^o+&a4ez3@- zoE(zN<&M83;K*h99TJFJl)uI9zszvlVlH_xU7wuvrRz6QjH14$HA?-!H}d;vUtWVb zzuNK;9&Oeg8JvqO_LC2F>!>o{puJy5HYc(^$Cb_F>X6O-PdvG2402g}e~e{(-VJ@2 zF)DdHt`1pz2l@6Tiz~_1LB5E;e2y&sjP?wV;ka3Iki~0}#cuw-8QeF|Xi%92&MNkr z=hu5<1LDt*z*g&#amtBtI6>$20{`GVlwgKF24Ag5)>P!0!PEFC)+2lV+20I)aSeG? z@lmWt9)9+K8I)cWt~{Gi_Jb;X7VD9Rp%G^AOaA||kr_IdVV*C>cd#MT(dpPG3y|Tq zErM;bNOF9be#3`=ZL-*m3wnt8lV4#~KV*9-Y^-%YXJEou`G% z{GU2c4V-_%3|?re^OV5RR%WpB-*lcBIPrZmc+%?+GS?Ygkmd<4O7{jAo4(-6-ciBT zcSQ%+-X9ZOKR7n{%CNZLYh&Yse;l6>d?Pb4Xk{k_OWP+0gI!XB%Z=3F^Ncz5+mGRK z#gD4*?sw$;zrpJpD4WWo-n?LD9NF2jdYM0+o!yHvg7P_P<`bTY{{U%-gqk2dms2;MvBI}{+b?G7c z)sCItUPBMLHi8?65~+u-WFz!NA4xyCHp0I`AN7sUN3s>P&*DhNbQ1Q$iP$Gsjp~>* zHy@ipIw>Z);GPO>hB4@*yWoLG8UGJAj+uKUJE5(kn_4G@?F3<1p>>D?qed9JXHbSeL8=(cyd4r3{ z3ne?^W%ff}%zntP4CJ>_MrcKkSlgc1P!u)m8XvOqC9dN$Ts@HcN8MnpJ{QKKCF~Vb zhK%~EEm1GRmPm`RB~q~^Ol*m;UD5dQiP#ddA5yR-Ol%2VPYNWhe=3;r#>8q|=i;0G&Md#1I8lJ9-0ZJN}JHVwGJwrT$G)v^2cT67Me+qP+n zm}AyE)ov6D{jayQ+Jop4$Er_9k$2X!Cs?9DsfIa$4L z3GdT;MmX&F^p}~$mnnbx;@tj>|Vx;w) z%KERgiMIV0*+e6={xvocoB!E1Q6S~&Y@)W@C!46;v5DrRdlsO37NUFB!?PQZi_xr` zU@dhp4gIqL-BOSJzOF$I)<+JeBL`nWes%Q=RJ2cd$T93}E@?uZDq~CvG+f1*bw>;?+Y?98@7|=Zsg?A} zD*EKrfx2g$ZBwlo5F4E5=((aAcFcESSD8m*J2j{N`o<*3E{v?N#&2YO)2NqGOT9GK zj2iW_%Z)6qS#DQ9xwee$_sUbM)Xr z=0p9&Xl-!x;J?B?%SA`9ZZdQ?daxPeq&Yc7<)^!eand10`Y};@vD!WpziKUkJ-^DF z(?ay4&9m5OE8FP$Mf)t_JFw4`=Scp)Oy-NEw`|>wz2bND!A;|{$OK-trv_^TeEJ(vGulr zOX{-qgbAcag$ZQqNxw$gdnLTKm!J)HorV3Un61&+ck+kJ7jDo02oEJC z+Vg#J_`$(10~8}EzdHM0{G|e))5i|Z)I7QT_8rIzS`o$k59`WL*glYYw%>fuu(4I| zl0)=&_n6jRBPp=&At*kN<^2q6?Lg*M-|kIO9f53a5oM2k{w+h6%j?**AUxE*>%q`##R|~0(tm!K7vk$x^h!<^@?wmkG_L<;;KyY1nZe{ zp0VGH{Xc{E+waA9ZoiM%DaEyp;Q9&ZET{z@4EFF?FHoP;#2f4;ADMC#1R{L*$Ec&5 zlj2Hw3S6IxFlS_r0`Y>zsNy?o<|p`sZ_9^LsqZjGa}CxM=A0k-^9^YPopN%^Z0hRQ+Ftm~10QV9IqiErM)m{A?YYj%7m@nxuZ1-Dv=goXWbsMbs2+C3n?`2V6?Hi!F z?j*Kt2(rZWms+gtTM$vN;=6p7o)bT2IlQkn1@Oa(kCeoTWOP4Wur8R7*6P9U;^`9j`53(34&SKsWP7e%`QEedewlHo`Q@w9li8-#x~87YcJySM?@mung6Fmp zXR9(>lv!nzeBR3UM-m5k5E=FRpO~x#F|6YIJyr>8G9-sw{V1N)8jzZL($9PKPP@+J z`f&APCA=rSDZN;V-h9c?i;p^bu{1(29+YlE7Ppqqn?7{sv}@zNAmdiNr}`)^OLN6v zT_ccJ=NbWht51@rI)6_xeAX)2^R?Cq9-p~ij| zde{Ai-h0PC>(+bPpGJCb?02pA;JvK|Ul*-kUxCxa@u~D))Zsz#|#71d+YRqmy zC%E6$wIJ8@osE3MwZ}S;liMBd2hN7EpP&2moXtP@M)i#DU&nX-(GHf2&?k{|*Z+US zNR`{Uo82+jnz*9NjX8}sVZPCk_D{rkYs;9m?H}g0Qt8*zjJH#c{iE@Al32Vn##;yG zi*poHYWoyYZTrWAT#CV;uD;%gO%TJ_5ysz){S(9Z`7`Hxuzw=`c9Al_W?aSUh`5Ri zU|`97PXh6wd}9-Pgy{W$CfB&$y~V6Ou0nOtS~k@|WAi%3W<8!Sz=xoBN_MNpP#KM zS2cDeS2cF^PTMAQ%A>hr$;p~!(0%w{E-Py@9a&qh`*`2Y)X#mF@(}4A$HEMDq${HD;Iznat7sjV<1FwHIKK0VE**d~*txex+TwYf8I(BN`Zzy}c z^c^v4p;+Xb_BTDun22S3{=-?LQ=hT+a70X-=1kPTwau9i8*Wbt4}7JDMzUypW)e6Qjt^sZz0b6wdi zJM}x4&5Z4DGe=Tz1sq)0xc{%28wrp93!60e5smLQ1|M!LzT7x`y7A0?Bv?7bM1M5Z zuoAIJwU@oczG%1OV{FCxnAQi4S(PpD-Iha}L5rbHpp&3CK=Yu?nY8vp@dLHag5vv$ z#qS)&SelZ_x&Zliz&v)|T=uIu#eTe}ebFVqH=?)CZXDh}wLbf&&IRvOvL@^?&kce7 z8OF+S8|M_xNe}Py)PTL=E1Q|2pXs+o#>zPxe^>as2E=P?znJ7}1C_)EJg(oaGghwM z_U~}yqe?5E0SF#RfynegJSb28i3xzMR&y4nBOlL27`Q3k^-%^Z~>6>0Gd=VeI z_GWCD+%QndT*njoEx}mXbyHzsA$8JTjg68U1!NaJso%WD=?50PRQM8Y);PIw;5M$! z-88rGoG-TIDQvpF98Ebq9RC2nFwbk}*~o|P1e=XvUyuo@srGwx{(#Q;-k5ji6?WA* z<mHZyQI)ZO`8nL|<_|xBtjqCis=s)#YBL4Gd=__47&iFXO z-g_D&0orjS`|<_wfhH%%2bM4v1|=IShm}t&e1>*x$#2hay_B&rDBf7vvOG{2pbgz; zmN8ZadC2EiJhN~n@0*^S5|~~-qcE9!X5z0M!_l6j9!D-?C~wWY!s+;e^o-6&>YVrH zQO}k-=i4JV*FGydyBXFy_>?urhD_hB*seEtM?ZAD>M7f830Si^byU6de>S=CRNw8` zDyr{Mr>>#{R zVPS-P;JjKsY>to*oL9>S&a34E=hgCo^RLJUu6@_?!Ice?6Ru2nhIVvifp|2o{Fy=v znGm22U0LADhf?@zChezvcxNIHYRZSD<GkcITiUh8{I$FQ$LV{ zzB^g|T;X$!jWl>YEjcYPRoBspX&a{&PUW52hdq{k*w^Sf`nRG8oWM27g>V zhjqvtd?}~NT{Mq(NQZ1kUwRyUp}7+2i`!}c-j0lR$HBi!UudqsxQ4FK_g!7l=UeEC z+V%~stN$<62VbxM)#qdAujd#CZeLgTKi8*HN4Kxt{$I@XIg9~U54ioWes{-!aATeQ z4-ZuLKj+o`zd54+IsYd8ufA7%^!+c82etSAMOo12TgZai`v3F${s?~XU@v&B^OM=% z5u8+s?RLiJDaqkH!1<@_hY*)JIHR2NYw6o{90z>$OAh+3ExFLX<(zTe>jLo6>AY%8 z!u|+%`oScdZ2xH7nWuiMIdjG98<}|-#rS^YD=y_Y&U;RP;WT&5y82l`{AtsPFU;{Y zs4RjXlbr3LOxN)7Dh{89Fi9L-*^`#ouuy7!|F zcYiUN@}Bg?m8|e!_u~I~&Zb6i=OpA)J>#^W@_xeaV6MtcZ0udk53CqIrs}!jw^Z$< zEKQ7*O4aFaVVThXYRYU>U0P<@-Cw87nf3HSA|LQ9GIs04mb55Q8?1|`S z&$sMnPp$o&^9}nsvdowE^Oei|_50aVSyw-+{BA#ME*qCU=}AaPpx*+fF>5JvV?p}eIBbj&KVo~#8i~G_t>47ssWjl*;U1%- z_S!s2EX;vPIl+_44FE3{GtN$O9c)x-V)uA?UjF|SBX#@oCsXJC20p#uAr}&H#Eeb* ze#PKOE+}?Rav=}fNNd-KeB2D&tX^1nvQH}VkFu;sj_K3pK0UuskQ>XsOquYH(#~9?# z7Q+biN{S8iOo|EANB+!CLQk=Fd7d}6a%1~@zwl9)k7 zu2JnH|0d%;i?-9fYCFjxwOtJLU;Jc@y^np`Xj?Q|E6$~kxDzn*nR$2+OVb$ zSN~Tw4)=d4R5CO`j9#2#Uy0#q4K1mfxJPZ;&l4S(j_e514$GJqRa+?L)n4DodfzuW z_l4W!bUyL38uO*#!wTB#4E6eyHv5eD(9fyMS;{q@bsxWA9mo@`56NRLCJT9VkaFiS zzhj?!IVT1|=jA%bC#rKW&v|?);d`PuXRV2Se+=iWGqLa2IWY_N{hVjH_s4PW-k-p^ zd%w=z`#JZ!_Y)uOckfT(+`V7t?){wG`C!3^6-IJE_;3fZU3GknI{u70{+v2ydEz?X zMEeXNM(KB4U&rx0I$G_bHY&&8T30(va@&XcPIB9adKJ6ZsaLUko${Bv*C~IwdtK!u zJ}5FzYz6Uyc23Cx$eY=s+T4#g-F{ zm1T^LBh*oGLz+WRLvPoEzhlrL6rF61r zHxEX|b)Llie(IJ1q}+N=?ZZ3H`<*tK zZX^UIsjN=BNG{s{J8k3spG*IPsdi7{_Z-@1I`V6%CoZ6J<$alIm#OBv)mOQ+(Udgq z`69Nn6?hTZ7cVS?X9u=Oir*v@rV14}5@Ral~c`MPg{CB}dy zg!${j5*mXZCniN$qE#027T{WU{Gju8=fXcK!(h!tGYW|OlG`@`f?B(7Zf5;Tdx^ehWdmL^Y-uzs<7~N-7CNRcN`#O{~_Zr0b;NPdu z4>9g^u6CcqdGu#b{r1<4ncbe4Kp1Z%*my&+LsPIF7I6&yea@x$16)&qd|1T$iuuMo z@I(>$z=l3qZ|>pbsKC?6otwzF;~VkQb3>3nlaW8NExaRcI!FA;tSMlxCPux=a-Q8n zn?zzSjhk%BmKY!Q3vCB4eET+oU$3goJP~c?`ES=|Id!zzg-xE_N1J)_zj(xpPVk`{ zqOhT(vCCty%RRPT-kSQmGIu+AS$27Lj*xw zxEcP?1s{7ZW3PJ{yIv0C#wGm^VS6_RGe`5k@Mf&8dvf-=_Iwt7t9W?jtXR)G$8&V$ zy*VD^>XXcGo|AW5kN}BR6PsDq}@4B8mIO$>k0PoW{IX|H%b^8%@>_J_- zqyKW*AK9Nz8`$ekt|q5c_TgXtHtv_k+-Jeu*+-tT&!5rx@rm~NG|r_rlox6{IRTTH z^K46g55)m!ZP!o1wl>y9^qPy9ihvwkc1 z#pv+)xu2WC|MOJ%{K(B_@L0R>`SDI>@RJ|4yLgYzv(KZ01<+#qJSJEMJ;&GO}(IMH#0gm*vAt^4z@Uzqgye@Mvg-q zQL*LSZsO?4@d$@oj?9R1WJZ)DGol=sl%u7`8(wS3xXU8%UxAnP+Z-D?4sk^Jkj)%D zIUeCK%)E>&4(#)M6PjnAd-4mQ#rC;3zYKcNKKJFvvCnE>WOi9nS|^R?0CJ@b{4oRG zm`R*rD&t=Iuz;L;Pbd5Fn~~d&XQIey5tBiFMEWDGC;1`Cho$odjO*(d*KR$_q8WFz zsoJ|NnsEnJdzVEs?x1S#vS`K~?Wp!HQ@cRb-eqbRsM@U%pY%UxEK56nNObFbF_85^*i*F{9Z>Aeb$mVPaqB~`N*fg z+;i~yXzR?$80$d&DC@I39N{?1rj-qdD)|f z<4xMULyE!P9_YAhjRDP4Jb|t$-ar@MwSmsQ*gz*={lGk}sl^UBbu0UZj?demob%b_ zoU`{KNVy@PeYDPfnQLXIGze%Y(wwcm1A0IC%h`FE z1)<@_wSlax0&@M6r(5mMCthB7&yx@q2qr+wxtIX+)y0sQxdw_#-_SgR%NcLT>4 zyi4u+hBvv=L~d>!WmtcsP2cb|tUSQ~@AM!h$M9LLXkYy5p$5<+E&5n-^kdfW?5a;_ zZ+*8p=YQ5Y-@TRdcC^*Dy9{e*1Ky=~qbJ>WxzFo;dWZYG`>e%S+|F8-&Hcj99-!Pc zp9T1ZIy%p`6Xm-%K?`}-t%KWFD!a<9vZ{>AowV&PFJ)t|Ag*Iyu|F#NVsm`itCw}A zvl&wVKFhn@`=)R{l0F(?#0G}bFHt9AJ08KJxRhT>}!@1s4&b>2JK z=)8jOv}6ni;1^wAGX9kfZ5gXe(FfwO1W&{5#3h{Wi+$4jynoL!c{mgTMb9TayP2iyujDr^@ znzN@({N?N~7=uk2gPp0vXZRf!&orw#P>#cl>1Mxptm-z_Ee)WqpECB!%I9tP(8JjC zBv%^vvNR6lS6Iih(>PQf@=*-fn`u~sjO5A!&Q-Sc^sUOrd{;6Z`pDy$}BGa1FA7K8lMsI;nruFKr}4BKkxzY3h?w z`eZbHl1zW3@UGp|VGwJ*#f$g^L$rha&eQPvDEtBH1L=X0^nrB2+stSv*IY#XVG(WEjNJ7f^_ZTuL6&^5)>IhvUKH@GI* zl{syHKclLse-?-FZdQMzs{QCi+-FkuEPm7VT>buTQ|@U$+Q`hxFqURz=pGNgj_4@! zpKsG2S>yL7ac@e~Oi#P4?56nnF5aiP3*`aMk4H8QJ{KJCxfv7aJ^d{Kl{R zM!!~w2&5bL(`#*CIo8(|nlxV5RVz+Ib{2GsD~Y8-|ezv_h<~~J7f52 z>Usbfsd09|*TklAjHM=wtX(Bh4@#=7cbvn zqU{?LUOfP|)%UY`);$+*dCbfVd+gJ;gEW4f@{~i}F`WsoWo4WFvcQS1ymH61#;au4 zkawGh<=eZ>`m2pJ7wj>O(a5|M26h?NvjL!MD2K@9?3JpB z7fzekZ?G{bRK&r%PRq~hQMuuXhmiFP#+yk!GQI75JDR36+tEB_!T5Pex=*r`an?%x zEuXezh+7WH`G>}DuI1TZCw*7fRR+m?r>ZNZ>PcTFDThpL<|-l*A5lDkcj zbwl7y@pR2+{}sR4?dIUIJK)pHeIv5$-);LYqJ5ix^Y*=q_NAS_QTs+b`<1$X2ftmh zeIvdR*}evUS&g$s8)@w_Vi~_T+hdsAOE*hL8>bCX@kFLEzz6P}h}_I4#;AD@^Ywzv zo9+4kDP`bXu;4NHrD=NejNG`U89t5!+08O0@jlsxPp4;Pm}yTBx{2qCoaYwV&)wua zche=$6+6$(cb@y7@BL4E{?B+Z8o2y0L_B%u!T=vwS9weRtNBCU&qJPPJ9gC}=A)Zr z`uE*ml=;VRA}gf-RX#uEGs?0uGUGCByIzRVtbad>8%|_ph{%C=oPW!O==z1UCG`jtc`TaZWSkV9GCG+}? z<*?;2x?4IrnSN;sei*_L^G*T#oA~8+gG`YxquQuA&BPx{{H5ilQU;#k_ky5(398eyyoP0?%&_{_WS<6 zpWnA&4F2=IY;wNXdefh9+G-hxroYEZ?y)Mbw~woLw2ebMd!F-~BscHSsY#!CD)=|I zv^+a){EnlK{>S5OZ6lsw1Ft%QdO#!oU(R(!Cog(D`d|E>%K2o`O;>)Z=n&&r>9abp z`Z69`fqjwjP?)^bySbiukE+jRGp^G4d}R?PoTGuI(9(?HI@$Vnr=eor803i}hF%{+d|{jB^g z*t6A)p{jWQxy;d+biRdeo|2_~avqq!H5}KftI9n1bdUH)7mbAfSFx|LuZ z$6ee@*8bMAvI<7Bm9xpaN_=WA``DF_?q&PdpG&S@<$tnZ6@2%bH#Qgi>Wwi4-yV~< zcr!Y83+-Fbx!-zYOTnr)E-m;L?FZkutl%C^`$)cbBzbM>n;G+I#H-?QvoHT|;jeu7 zD$4IJ_$tc#NUoKoU!dK{tB9$;aX0a(dx%Hf#Wq)s)tN%YiuzQKNt95bJ@-xqF`wIdoX*A_K> z!n5e`4cAeyjKU61WX$@vSy?l`z;;vd zO^a{(^ys1+`e)7j2LG=bF=p{9ctq*d65rM3JS&gb)+P`09MGt2&7SEIwvrVqTElG5 zzE(E#-Pf+3`3JVp@5^RByJpjL$=?!Q%#*P)*Npk*p1jFy-zv$vdNT2=dtbe*V9Pf* z<;i){S0cB>GFwKH6FC8%6Fc0S-yyNF_&dGqc&C+lJ=HGXCb6+Z8YDl@NrMUC9vUtW z!zUBqlXCcEB78ClKA8-k+zOwNQ}N{~@X2T46UA4=eB&{|Gv%`*3?0b>~AOJES}2#RQ5+@XDu$zhWEq=5FUDEPyVGV_T=9RABBppSseP* zrA67qRkxI6U;Uesepj!Ar=EKC@&dW<54i6^Y>DuE7BN%t{SJO(rHVHz8TSpbqLQkFN&@cUSz&MBb_(0i#ji^??x3} zkG}gl`c8D-Ty$Jq?~N*2c^38E1o|y}2Kufm-_VzJ-CgHE*OeK%?ufx(o$0!E{z|PQ z((Agb(RDxWvaXY{K)WtU)OAihCpNYtJtzD%IRiZ>RxCh86QGtcHZqMzowmr(X=R=0H1Xlu`7LuFPV|`R$K8ltv-@yo(|^HFqa<(^u>2H}z#*x9=R-(Om}btv7hj^xabFx(OEVbye57W7KrI(K*)6 z^t`@0Ms5404;-j}(P?6Cr@ov+?r$zRz^00$Lb^k}>s|Rv? z>>b($nXB`!p#2Bs;5vu0zSEG}{a{89_zD$HRPdZ9xyLc?CF_iSaJwhAm$AVsP`Uci*%xSIKUHkB`lSAHL#<~)L+7E8fT2HX{ zti*g1-tVRI*2dA`7g&3-5zN5l{FV}{ZS;Ys;Jc*G*k1DM_pu4qRq&~CT%t5hdXmisb^0CWVALmM~HQ-(T`S1PPn(v+T`s<4)YAu7FU-P|- z!EhAg&z~Tl@xxDRt<#9bB)r$J%UhfN1eSH#;F?*i8+iJ^C&s?*i@)24@?Q6Au{d3w z?=_J38q9jFL#%g8xUYQgr^8?|0`c$UJ|Db$YlrueZ+sh?%Xgmm1i9?=S2OE~OMa)U z!JAV4Mf!*T zLn}S~x3Uk;o%Rp^hwO*+Q+H^ktH2k`m9+)IweMs-8aeN_ya_+wLi=APYSpu{!rQ8} zoMs;}@zGp!8TYt`dtS;jgqA13)4u}_MZQno<1J+95cmP-{~T?HJ8JR39%sEq16PD_r`!588g3#div>y1YWGj=Z|8RA~3?bWiCYi7!9b zPb>WjzrmSWsl4agynn^2s?t@we>wlR@xF5zPfOi`gJ2{!ac{}pPv8wxR*?h2Umrm3 zrO##V;-e)q(Hk?+AJfqz)6geV!4P~F%-a;O);^=gUl#f%fWFD$crNGVaehA6p=*`| zV;^yzsc(*nzCkwj2ix_{Ui44Q&^0mNu#;n1UqtB}c56@P+X?P zt5)&|ezmFpvVRL)5va)l>o<+(u0#ITBY#K0(bf6xIJpm=eCIp2#$=7H=U6*4hj#%- zQ(em3*t=}6p#ueryAQq<+>5M_l0)9%SG9}v67aAq(PNX4$7%Q=_xeZZf6qMS1IXCV zs7-dcpBjVA_g!1*DLq!Hm7V~%5(TFy`mG=ISp^21{A#wRP?wzghA)d<8JuMA4uVlLpI3rTaDI>2%FDTj`i3<3H~=l>KIXfN zZW(XWMOU!#tcxP(l$q$38R(el=$dKhqN(Vj&n{|XJqeln?6U8JK5+Uz z0MBZFX6T~^aCb@JJ%Ek2 z`$|ghy*)AZ0yMo7pHeWNd%(svu_nf9e5M9;pC3J^|J&;C-Q*imKOUdylh3dI-d~Uz zS@+>e^iINi?ZQ{uQyzO|*-+&xHUB9~=~VOm-j{c~UHE!P5?{-@B?o!G58=yGeA`DY;5`4&Oi7H$p$PCmw$8jlZkrQt)(M+Tel z#}?8jqN|Va4)5|#B8TGND8FH;Rw_C=zWz_4E_h^FmtS;q6Mj-iE7V)i-MM#u^NueVPE{J~Fyb?C;Ok-{RZ$@*QvU9S5P$TLEpGhiB}|*LM7xXTQjEfAf&1^fhdt=wuH#XfM}n zeLwC?2~ET%^~Z0L`yAsw`=CdwCrcmY(sEajtKGR^sz~crmpz(0e23L-D(=xBFfA ziQ;z+r|)kPyT&!O_>{{3!tQ;5ulX@LU+}=+$ER<=my2Or#rG25vk_aR{4maK_|dJg zO};?$$LNO-u(P%JddeR|Pl-Pk#0Qi6`#mEPe6pvBS&L8B$a`GDdo*VU^@Yf~>5sj` zGxniNp5nRpQ@g{@yNHax<{6F*V zTjA;3;N7suGQVTQ2YW~>y&RbnSrj=Gz7;v#h&;%;5l@5hknzM^{4w%bt7Y8u7UQPd zkcDIL@!mn<9q$(+4vCl1eM5%s;{D{f$PVxGdMoE`K_Bd7+sC<8@Sm*HQqFTHa?Y=L z){pqTmiv#SO?yG2seI5FR`L_N1 z{(#><$vg24PkFU%EBO67zd}FJg?Hz~b=n+sUor2r0^K`@>t2N3BIhF8e%A6i$yn6y zfhVvTYp|_N@U{4QfAR(N53m8PUle_!7}C;WwfSN{ua;KC++ zy!*>ztE(=q3HabuY~Wh&2<6*-6I-wRI-Y6yb?SM^e%%3N^dLOgp>GG@TfSWf_Yu0w zy+kg}_m_7c+nN63+x8*n$@~y?)*wtTDaB#Cp%qYk$rwe|>&l7W(ZT z^qcs4V`LxwX8L=RjAP>S-D4aRzpn&+Ek55U^t0*jt)NYOJ@NA%rhQ~D`FVQ{A5Zl1 zi#)SCex8?mj52(^QL=CI_rwP@eZ2`hr;C1GJ5Tj(KkpB&pLc%yc{1)9iM)xgw;Vq0 zX+KZ&!$HH>6a8?|=I@DqkYnQWiGGk{rr#$%o%nqFc{kJF+ecd}KkoqNoNYf($-d$1 ziR`a5{5{jxlW}hvKkpCc#LrVPydLxO+VvAQPhvYg?B})LU-VzM{Jf6Wv^}QQQ0>Qj zWp=Hs*=EjRp2YW;b+m`yq5a`bVuPFT+2=5h-4-NHhTj~YpA9^*FTMPR$zju*V{U&j!VT|MUt*t6k_xMS9Yl*+KXrg~?kwy+jIr}5omvwsL zYvm5A^)S}*vYGP-rhja%lgqtdKS6Bv0q-zfo_Q6q0}bD=v-NE|TkE&a&;<;R&_vPZ z#=mm9yx{UnN;$IAng5kxwf43HkP*z&@LL2c0@ zidbauhFIjf(ALOeqx}oC|0C_U)BbhZvyA%}Zde<+u|kgw+n`5o3ayRwGurQ@{XyEl zNc$4nHSZ&>BR14VZmxJba><6LBe#TVBig7(TE9fwGTNfFRnfK~v@Y^o#rjBfXno`- z?3>^9pX|R!`)=Bgv2T9oXJQ1pkyuiNs-EXr00quXH{RI2wcYinZXyoS= z---Nl=sS^Z?9bV^y!HR%_Y`gaP1}d;e?Ro&$o7h7BmWY5HnM~Lzp#DG|9kkqjr|bx zQZx<4X=>8$^Rz3P(k?VLX*39WDVm1jG&O1T1=x@%t&ZFS4EH|4aD&3|o|K3Tv~~kxQJQj~#IU(|0lBkWPQr zj6*tYni+?5#$+VM=ZptReAF4g65HeS7dndritoLGH8C{r@>Y%7MG}j?!h3nqG`5u! z_R%q2LIabd9cXMna$L6xX(m3`M*KX$cD%v)4 z{1J|Ck$sNWaqSN|wv+!4aqKr7dz<|q8pmGd+85=y19|Q&o;#4|vi~B-U*p=B<+=QS zH^(M&Y%cpR8^`u@?Lm1iZJRm%2*epVlENQ|XBW95b4T^+e`_L|7B z@2-j5RJb}a;=5ZTH_whmF8OXOa!cV>iLtBunQ`G1v2JzT>2H|N|8C*B$j@i5kNk7t z`p7o+=Xh4Me!uXU$oAQFk$)+yi|k-u;#SnCsdVzA`D`aYn(tD``>J;@jK90)UC}y% zcUJFT7=Qm>d0+bD8F=$-yswk4PI@|N`AKw1F@D$#=y|qzp{wsISOC#`1JGZeWD|XN zBb(^Co7qIaP0`S6VA_(t#WA1Z!M>sKPfrRWKJrv4wqH($qA$aWLk2)0|;thqDh(iVZo zwb3Il1ojSI3^KnH#NGy(GYdkGAoDlD!J$}?Ik6!02qG)NVTEgg$axTY1d)^AC55pd zavpT==hCk${37=GaC2Lw_#Hy$YvDTj3S#+WHX9vMv=`ZvKd7-qsV5|l1=x1F8wm# zm$b6kL;RCcHdoBaM>eC#W)#_sBAd}CSCr%{+2k0q8AUdu$YvDTjBYlLJt@}>c%kWs%&gIH|p7c`ZZ zvL!1y7iCd#yS+^}-m=?Usc$ahD&#PAzKX*aagLLmzKL_p(2f6H|{Wz6gttM3#ao4VUFWv zHfFv)K4zXl9Z9FJZI|^ju!-qnC{B7g$A3;ach67EIqNQc6#pJGd`|M7yGmbWw^%nO z6Ta+-_v~Z1MWf?mxb;#4oUyFL@XRrsov#*Ao5S|L8TrWW`5lkRx=No8_$Qw9y!U;!v^>w z{wn@UCg*N?uSXO zD&bLy{n~l6jCCMV@uuQWDzS+EN0qjK6bBVTj*GIFX5GM*7W%hdtZ zDlZwjy86_$O|ESMhwa+=ls=RA^hx+ESAV+tbAo|yb$xr+wgXv1ZbAjdh7cgBWo~tYc7E#UD+W0jA-(?%)nlWyL;o+Oi z@3HV*eA6Yy_b7Z%NqE_D7=@APr94)C;5PV4(}BL?^R*LdxgMzRoL)eV7-u?cDxr@FXSdY-pjq8OTRSy zqUO&#!+Ujue^e|lIcGd&euR;m+qdyvV7-vd_8dg8UdU!U-V3Z3ve}OJ0_%lrrpJ4^ z_jBo&hF{Xkrac#?8~l?}Hq+$JDB0v#Z{oeyHMHZsb{TlD8Mmj$dmZT_-YcIPrKxfd z)5gsN|7G({RDLY~o8v5}ucGi@wlSA`-wbe1E`0^#-z`2w%ur<^Zm+ zz&{z$Hx2)Ed5mJ_cb_r-s{>!oqDh9npL_o5D85_Tv07T5$&kJ&dFBj!=W~w#Vjg`! zg$@73|FihNQ~Z~EU1~gCFVIEtYNq(F4xTUfIdAY^HhXVjUd%c&4uAQ4Kd$<>GyIp0 z9_suI{hc1cf7yH!8?95?*7N;%2Yr$9d(-qqO5efs54w#1ve_|{#`F=bt{nLDJ zO8TdfM#-FyR`s{=2~5n9s=p<1t_{R@Xt(Bd zH&81oWYpiPaMa(TUDe;BUGfuD{VmBe*Z{_>!d8EaHdTL%Hfs);dElQ^IO=an&K=t^ z+RZs^Yu=Li?H4K>^|vJdj_m~PX5NIAhez(+whCMQEy=}W`;azOe@pOPY&$B9`dfnU zVr%1nYitYsgr+9#tXLuK572JX$cha>zw4k~626Oep{Yr0wdStSlKVHF)U{2nZHllK ztZV0`W~t3*5}!RKzRT60uKtwTx~^~U`t~6M-*t}lr-eH(#+uGCW&*y;7+*Tal@`7$ z!!hL<)!#yQy5o9V9Yfa-b@iwlBZ=36MTeeiKlEJa(Q}U9GW^*j{FY(U%s8YoR+xa_ z;#`RbCg8V>SYQHv%ZLS9_^mW?K=Hj@{FV*B)t&lVHk<2=m7BONRex)?QGZKtE8hjL zH9JlHEqsh>HY3)Zh}SY=+GecV!V^jTt=YEvTY@QK+cDcwe@oug;Kw9-6KC^tA9<1}!_SztxMp(A9VC5j>Vbd*^(G zlQ%3pmcbVnEFLQa9;?ErzZEj-Z&euew*-&HhOETvZwVet>Tk_XQ-6zlA&c()T>2&P zi<-~Q1dnxw{G(!T7aAUm{0L+-UVlsQSW}Ij`_nhP}5iB~JX+1#(>VZzlLF<9pTl8Tvat zhQBg=6I)-Tw5{j+@ecYT<@cuPirdp7Jbv$sq_7KTKcE?-jwuDV~5Wt z{8g{j->R^!!;S5>asW20j(m+{R{brGnOGtSDOc8Wc^|uTfDts1gs>X@5nfNT| zr|NG(JB82Ue^r0WrDZ!l%cZ5#11?`?F5?;Dvs@hzuOH*;)5v?SZE|f>2z-HS=i4#u zHlIm+`Xqdot3O@+Y1ZFz^{1;p`-uKjxB?qLs$5yy7}LUM*~XU&pJnrd6h5m9W6CqC zzlHv^>TfaLbjST#^_!t*A|KfL#iWU|f20q)+DB?Gz_nSf&2nv4Yfr?Z&V`;mr}!1J9MV{+5kj z6YyQOF|HZoRu~?>N!8!7joQ z>F{3e{o-r8yY!3Wy=?rV=FdCBdv$|MFmb(KCH@J~kcO~XGabGUmI|J8vn zXVD}>-_JcC{8tC`Ov^JF(l;f~oPqCr&hcO5607=K%n8T+TciFK|96Q0a<5BWf2$Yh zqIfk^{8tChm;0PI_%EBiw=gYs{MQ9^T=j2f_%GY{s`E4ScX|Z>W%EsJeUZ|(p6|yy z=!=x!o2D;P`VMD||FYRJi@s^>)cJlqE&bDcZ%X>7vBTZLe;tqfA~wE6Yk4|Lt1LY| zarc755ADv)(yH$}x^#DzrVZNf^VgQ5Uw8Sm$|?RFy*Nv2S=iQAdBm4nJ5vj{M&{jj zvO>$#2YB=J(1QC;UdlF({ma%hK-;4?Yq1T3JhAE#-q^RM`(ouK zTJz5WZHHJ!Nn{^osk%s-NQFW0Kqaqmss`&IPI4>bP`4Zc9F ze9P2;R{2%#E&I*+-1lVF?y&=W`V|4b6}tRuKdmzN__Ez{JnyHQzPB)tSFihX^+n{r zeFuEnw)9;793srm>w$7U*+4Tdh+zE*z*11$L^;;HbA2mUqS7D-aWkf z_tVQ)@0mV&g;ssf>hSbi*}PuQ>tVk(Ju63>Uh45whrHhEaihp<;G6e&wB{dZd<);& z?8~Y>%sr0ywAxs&3k5I?mU$<)1$ud1>p5zUZ66&pP}(7Jd$#p0JT`susRp2n`qVY#;X&`C7!c z9EQh={aTCg)DdX1JUd*sGJitdqn=#-SA(nSc3u*$Tb`xWt<3Y(ts3O5`_>3w-73wi zcy%4$Bm64sC6v=Qb3W>Sy?MQL4ef8i%fiRP&x3jP6uw=a+3cY|PEXi9pZ8Ml$9sme z!gbT3cFd=?#oqOkT?*;PB zd^`MKt$AL&#WTO@X1T`C_n9&=9s0@r$JTd<$cWS$8vnNrnO36NI$gw()0T1`#rH!U-rgMMt!jlZqJGxk7UO_TGB5z z4;~sH(pnn&YpqZFMfa4ecg;UtzHuS4y%3(ckLRKjste#R=`+zoGx}?l#axGusyyn; zuYDJqwhRu(4qY-Kc8vb`hcAR<|M;Z|u?M_4`ZxYQ9D8taeE*>b!?AfYC&a2NwAgpQ z;)y*{>5cvKH+->g&CZH_uOvJ6?Z4|6dl%lQnchDp{Tu2BZ}p4E1&>0j7Ub|P?!OmW z2YHvtT7mu(wrjivy1YZ!=I(rTsbdFN4xOb&B9+(_+FDgK3Z}syf7J_m;`T3gh$F_r|+K-gJ-IRcf>A< zF7~sP9$xlX=~92q8h=2)@@Um#^Vna_dn-PGcXD|Lu~%jAj_}K)&^g$!WcOF#pRdA8 zT3Nc{I( zMd7-=S)ZxfpEthl;2^E;&>7Q%3qC13zN7a&1x5Cp?P|-{4*J&UoLVqqk)(dJi3P{Pupa52F2JC%^2Eow`2|J3UKlU9K%} zox?UjyR7JM*mm;$V(XfGp4zD&m2ZrC(D{Qr==|*3^`3tEkJ*0D_7>Z5U;o;pK3{Dk zHm3BWi5tgrZrL`iQm)^4=)v6~??8QH(~{i_Jp=W>)dtmX{dnTW;sElwRI9XNQqK9X z4XlSK}N?Z;?0bL*^pst|QS$vHRkIj#J2+E37K=5t#4 z=OOBXhHP~~1tUP)hqS4>pn^LOQ5Tf@R)3KiSAwD9|2FomIf+o5rY7yIIZWCgfTkvm zteAX=dSTEm33pEW*KM@6aOa^oO@($=o-b`ngr0WXd5F58(9Xi0^M92~%S7C{OUrm& zP?s+=l5xSEmtCP%y0UJoyM&HZ^*>#`>DnOI2F2?xx%NE4Hd*N4 z9Ig-V`tX8XI>+N7=a|wtmQ28x8{zW&>5{inPW#=oPo#Y= z?JJG;r?_Sn$2N295sq!)*k+E^am`NJ57GV`+TW)AherF$@|=x4XBN-d$a4m+jl9UQ z*W@|0-%a~O+UL^#veCX@p2M-t9D9UgTR3)*V>17>llDWj|AzLrX+LDNKR$~2Lod%7 zwOqy7WiD>yZEckjFCQNWzgKn#^C_NX@69MOV&+DyT;k^Oc=-esFMm3u;^hjnZT0Jh z#5lQ$7dPYJCRSY4{46wTehOChyVT4qOjGkyFxKsH>_m*X5wA9%Z(*3F=4YX;=BMD8 zrA}pGyyj=VgI}Hen1l~Ed@M5-ZDD?;{wLpI)&G=lDU5$hiWq$N-<`fnz=yYw2i3iu z^3$F7b^38m+FJN{rbN8MJrO+{qgj z9^BvyQ?DG6oC$P+U9UWej7t7?O5H)bQ$JYeJ9L2P8qrUWU!+wlxy}y1x9C$M*H!Q3 zUn2AEmlw6L4Jt1wx{j@o?IyMnY`3tfxw3a2Na!G%j5Q1+oq@MgjJN*z%4ZFF|;=hClJei1u;7+kpcA4d}9v`6?y#o!#caO5?J zyaw?Dg7^VJx0OjYDZp!*z+LvJcvDSKc9ANb`YB##Ad7WCHM02 z9Ibl1C!D#A8n$^Oc@gdQnfYv|j5>1@L(~He8TCK~A1?JkLqE6$!UkZLnE2BNck7;Byii}2)(I_$+eR4%fz9FMgWHgG5Mv>9zW}_V$ zjUuB_WHgG5wx3TsG8#ojqsXW_U-EMw@26GIA}3~*Hc;~B&_N$pN?uA78$06(tul&@ zT|Zm$5wmNzWTnnW*;U+bgWtmr55(pk=XWr_*R$Pj+F&zZ1zVhuuY!En4QU^9ImcJ# zc*^P5I>%8?-^Id*8)GW>zVVzAmkzy42c;*#hl?#QBag4Q=q=B2;=@z<;6KK9c8=|3 z%$?3Rm-C(D`kmr~lLIK@Z5d;)+uJ_IUUWlzjJ^J%c0FMF;->E{@-BI5PP=|2uU+oV zc#D%>PPuo=xqE)cW4Nx;NAWNCaOI1VJ9#G8OYZrZjt!H>dSi^SUT6GejPa~INbwut zFWdWPZDCAMVFiLAdY?yv1auSx85fH^)h_%W5ARtKKN^3T9~+WFJz zpIMk#Cw|^;FE;eqro1=U|DQXSuI#(`F4NXJ^BB!Bxid${w8bvQ*u`rv zoS;?zHQ}|zS9kGU)U$Wj6|m>FDy+S$Ki5Vctk5H047qDGsQSw;zUz<0%pL41b;t8h z82Rq5@APye>aNccS!~3&{+fKJ^L4E0wCm3CP;cO5U3^#adQRedU�_$>n)rl=k}b zM(*hIMvizY+}B;V+U7V7+*g@_`+5TG$kvbJSg&d4G1iN91XHa))Z=(5;vX))YeRf3 zHy7V^dC}RxcPUxF2w6AsL?t)Ij`=!bV7_K#i1~T~{E3=BbK*zVhEvW1lUEvKj1mMt z;$pm9Ok0m*_|%;C1%~lbGMj_U8o4NzeKxRP8Otg-l$5fXCO-gKjUuZq*2~4V^$M0R zt*rJCKdP8{G9EK2zojR!UP@M37y5Z4N6C^^2L|lbrgjWi-9N=KV0HM@9bv#0m9*zW znYpg1Fkn_r3pP1_X!6{v%)y#-Z{}Q^86R?vm0kQ-+&^{c(6{KIbjJmX|4JH<8(2;g z7j}40`xyL4C%CZqm^_Z}t|On)%)fBTzfum3;$J3>2Mt`< z3>&7$jyp^k|G{52zRGC)=bnGz;lPr{D<)r=;}sh(+3{gd*vBV!UdqrPw0Oxq|AN7T zCHd1PFTpEzoY?wKaANs`lYM8w6xe;`I&fi@eLO=v(V60q-0ON4SElT1AHjzu`Ev%g z$n@u?+i+tu(%{DIe%vBE*5v}le_2?aG~cKCx0mo?Nq&*pU$(W;@(tXWiSILUV+sCH zVn3zyhiu<@K_2g*pHhCWIzJi5ml7wIYo6DOzEZwD8q>ey7+>VMIu>VMIu>VHw|OV$6X zFzSEtzpDSm|7r{_G-VrSqn(P)2~F7^pxvaAieU&%B_9&nCDi{CnzDV}Mr)N5B{XHb z$)KI8|0VQfTOw^{{V$;>+bz&e)&CM&vQ@dXY_I?2(lYWjmoFoyx*+2T^}nEo`er`^=Gx~cWTutkKEOtuKw&J`qRme&atG0 zYp{(YopP+|e|5pHJfr$wqCYpp$C~Ci_VXK9?}c`E{BErG(uF>7$M3c^A6%c__367A zS2`DZ^qk_uZ2qj7ujq{NS$aBT=xMHzn4nev%NF;u>VMhdR95{jTi>YqUme8z#P`0C z@nNZOU_Dy@OZs&~+|D}V;Hv%?HM3Oxuh~ZZFX|$x`d_ot)c;D1W4B|hZ1HL{j;(4` zQTywkrT*7!Tm3I;RH^!3vpcB&W#h+$`d_v&su_z`HJ;=;@7jW0S~oA!LJ z{}nRUK(8?Be}xQeScOsli`rkR{?}}y{ui~sRQ<2nY3hHu_X{d)olC!Xy*L}csQK&8 zuwmWc9~Fa3#+xMNE}VDuzo^xv>UC8Z^}ne7rRslG81=uvN2&T>vyJ*+)c#WSzh<%SI=soIB;(DbMctDez()&{OemXZWv9 zu1`^~DUxYzXlEUaMZHn~s{>!!#!u~e!0!3h*sZJd>42XyqOV+^f}b)TyV&b?!+Uk$>sd6)&^L6?Umd-s1A3vL?0!U#)c>;CJB#LN?AiH#JZ+z( z`Q9{rl4P%Y0?*|v1J)b(u3oSICHiKAn-3wnq&H&^zl{paFhBgb5fv5PTgKGDT%|2Xoo4f`&>x{L2}*A;j! zF}Jn7{+EmIa`9cnIr{=W_FUi+TzuDh*LQN)XNkCL-CQ{92AvB%>f*bS*K-oz`$EQd zC9O}{oAtkJG4#HQ^|I=JCB~=P>wmfUuE-0l?dIaU!qY=5wQA=)hI5{xFX6kKYnvvm z_ud=zzijj3&?d+j#maMoHo+TRjF*dPv)7C32EV8|?F$U!m9);SX`jvY^fRs1XJ7wz zkgC<7Y?_uPKLA;cBC9S|DGgSMd{UQww)~o|@=ID-?IC_tG4o_RW>S7jPh!24te)@n zzifW5GZ(|@+qn3z_*|7shqFrur8_QA{8!R=+^l&rN~um3y;lFL1ApGeS1I#`;j4u4qI*8|!aAU*(u=8ag&EOT`Ao@q^htTz>DI82 z{OzRiimAWM@k$zeSVr}~I`B6w-pJ5@bkFbmc(5dY+T^8kS^uj8JdukJ>l?T^r3d>6 zJ}k+fGwT!?{#=jM|FZdS7M3NAzjnSKSN+>d_^>3u$n3A4s{duP-?n~8X`j#c;~n%< z%I{U@XW)ON^fSzU>Q4PHn}1=^K8-(dz8_E9H)*~%P2VKN(|Q6g))V-!>t zw|-3h?10v?a9KiqV>S0@t#7npb>x}~10NQ0)cc}ctrt$aiE~hM-xWr^FZ_wcYvSX_ zFKpNv`A6DSy)W8L>{HU~uh^}a%mdSA4wdSA3#^}Z^MdS4-1y)W8Sy)W9-TB8*P zb|mDe_eHy^_eHz8PO4fjv%;wN6>`-3qFv!|XgAm2Rrs(9quy7@R_}{8Rqu;7Rqw09 zsP`2z@^<)N)%)UqHLof(#jcIB(N4uAg{HJWK)XpJ6@w6(GCzMEv`fH;2~BDLx{cPV z-j~ocM7=MAcB%DZL58Uy*t^Sv5 zlU$qBO`CKs^yoRohuQpDGw$lNX_lTQE@+R%SoObbaX_p7mo08()&H{fjjI3ELHtjA z?+YFuc9Hh_^M@YX9r6y;pEvT)HSh9PZPfDC<=XOA$xB;tYg?tn(8mYD@6EVFs~qoH z_Fh?$J&ryh9!C$^a?j2A$qk9|b7y>9)d8Dr)B!6r>VVBQ>VOrdsRNc6-)^s^Ws75* z@oj}=nr+kpE40-Cn{CtqD~#6x+m)?V7MJ+`n8KE|{y=)qyAUO zsQ*=AV8cR2{jb?Z{V#B0&?R{DY@_}c`{ZxM@fYs>g3ONv;j19=u{gHQrC$=isQK&8 zuwmWcM-_uh#+ziEJ1IGp+QzlPl39Atvr!L@9Jn_4$cj zca`ttIcK~6m(mqEw(+)+^DblTXZE&_vFmP#kFnR=@DziP_v=!~{4VP2$hg(qoO0@v zPp6!_=O^mL?$Sr`Z)%)Q`JrRU|4)c zJV)6igU_t-o>3F5ECc?MS`9WHY0vHLz@K){2g}ui^yz@7GU|^sJeBeI#Tt)@?lQ+C z22ZK+h>efLM|qzu$v3g{QHK7W#YgV>DaRLGrB7Nu%8GyIp$e^TdX;4fKzk@T187a9F!`b9?y zrJop=8~r3>Z>L^aL_gX3Af^4i(2sY}M=9x%rjL^R56jPxJ~I6bqmRVTIFj40_dD#D zH1^EumkacG+I~s%y=nR-$)5KF?#rwb)?4_m{6U%Fzw!t8ota@Q?{#dnhF52`pU0S+0MX9y-*Mw6PJXqNkTBS1{ zY2s#`@mn*$#u*=Tv7auE*2N^d_-=)n4_)~9Zt>Aw{Fl29!E=c@ukH20Y$$k=xrE%casC2+{^JW^ouPA~M_v3^^7>EW zdtb=-FDK4x!fowymPg1<6`a=$1LtMN$t9mC9xIPyzUt3o%vV{zho zbFp73=epqm7prv^v3+U|`vSv$sa%Il<okb(-9GMS&2oJ2{QS)1-^uSz{&x1Gdw$|tq+O+t;@@+O|FXt<2F~lqfOdOhj`bve z*5WT?jAyqmF78X!|L##7myMS)8asBtOBr8(Uvh4IQr@-U8wJ4<2Eh^r!4g{QP^htp zjeqR;E{lIsjXhkvm(-7sf&+}Q)_ITMw=Dk2(05MDKV7&!1^;C1TT9-!6W2AP16(42);}FwRJWe*P=8yE{VOQ^V4=2 zxGoESxMEJe9e?Qbw=9gx1&aSt{o70UF3ay37RPak4`b>RrAN$uO6BjkxUOIaeUsAP zQ0HghUkI*C{D`#uk;;Ft_eUCgWy#6~dOU4^r1{=7{gGs^yMyQIE&NtpVP^QPUH@)h zchsogRn-YzE5w>SMdKC^V6IMVqlsNoYkqU=fud^{&*GTWoHFqUYF$clYF;S1Zt-B& zL6W*tX8xS2<;AfRMK>(Ionuma%ET+EdR~&3Tzu2wVHNTGWD~P+=Z3YB2RXK+_|uEO z$T3xi%KJ#`hz+%oo2faqqxhD^m#`kK)S{9#Y`;YNGTPgU$1jf3Zq=j;k^fEJ=TDsV zy=dP}ds04Fi27d4-@f3i??wBcXg}es?-ioH7x=Ah_WEA5{WonNn)SUx)b}FS#;RGt z|9kkq&8+VQT}^tMG*z)S+D%%UG*dAN=xNf~q?d}l(QeY%q`%5vgpMYCWes3e--|Yj zu4a8N=x5SY)%W6mmzM3k?9x)JcKOm+^S=u+-eG+&SN7xeX`rX8M_rrb+9YD?u6j31Gpjss*Qa-Vdd8Q1L4P{=QQ-`2V@s>Pmu+0B)(5fqL8`u27yQhH z9&L~rJ5|5Lk#@0;fHfE5%yZemdYV?_(T>!!K%{yfe&LH~2@z^pY_t8RuOZ zb%^?Ay)beak<<2k#t`+csCk8)#_NR@QZEcV6>=J{|5Ql5FlvS&r>X0Ox%YGFmxf=` z%Bem7r5pT{MoyFRWqJ}jX8GJk&eQV-e$$C5d*V+MlURXkGtuy?X&3AFmnLB+MXYNMYcpc+8`xxE5udRL<^ft%2E`8(qS)I{A z>5NSAW8L6qc}`!dA9j@uSJvb8!)$bN(%&i9&hfE(KJ{jp%QNE-ozX|}Z)fU#7>{jd)FKZ{No`it)Q;MF>yXIehWkiKd7C}V%S&({xQo_;`u4S&V|v-n@~$~(lR zxz}0t(VYK@+sU4%r{dvE@ns$Ki`?hDsUK$Z7c9(-9ba|<9asI^8NST+z3Ti7{H0#6 zA7=A!Y<-l{KA-Q$JLsd7-#jYI&nfhTZzMjvi$8PM zL3l1Phqk@Gn2SGi@n^(AUHqBvk1qafUBsLtbI14bwR~J`f8WpzE?=&V^wc^?PT6+G z?0N&|>*CLh`eU~Bsl+$Gknv|p^~WZ+niofcAaVcF=9Y=4EXtgIjaK?m11n$ zUVqHChKh?XYsc`p_%d5PGF$A^nS0rn@MYFot!JYinQd+zngkikSh;jAwrpzyg=VETDH3`E)T+X)sZYbK>~=G&vbA{Z>c83a7;&rj^eg zqNj?dCu2X8a$yz@f4PWT&)F10$tYPs( zhQ6SCe&5H7^{M(}9pH&vd|2PW%_*JNNAO{1t^SzJKeI3_Ehiq@&Idnvy+P2VK>7d={k%;rB>^iJb1obSie_D7oUP17Gq zv9Ipn!P(?86Luv)5u&=GHvcbo3-;m}PHLVi5 zRcp(&YJOjr_j!incjgY$YfiWQbf0f%Z5j9EnxU&dW}Q`z2YBBrT9!RF?fqrD^E^5A z-mI*dquJ_OmhP7KeBW14EALs>U#tAFH(%eB!+Y|a@dLD$m-&5{?>Xx8*A{ZF>|e_7 zvpKo?QD08&=A7{K4SycB_$PlJz4+hw-N^5tH-9qof4=!QGylNygP+z~|MlU*ll!mLTBn_?+Wr67etWz2oZpjG z9XLI4cVmA~Z12TcvE4_P?jE0~wKQL>wZ7e7Yn|~)t+MoV`R*o7)0+lrtxdk%`le#7 zHO~Y8g+5c14}DjA9@*A>akzE=LM?#HIL^6^v0p#R6^v; z|0K^0`U`lbzh)-S6Z*c(@6p`+`oPjTy9WqumR4=t>MK0iK)*EHrnNSRY>(7l_wt^; ze^{fBr9T_B<&TZ=`0L;D`Rfn)2GsgRHayw-Yk`pd+br$%b;#%*O=~%X+~j}IR{4u; z?W?)SX@B1FkH?oiw$(TL|j_U7t3zk7Vco8{Q3ho_x7K;KS6kKD`Fj9%F58LszRE&AXx z^ugte=lF)}7x8}xxvzNhxtWjC_Ct<+>&?q%e)Y{C&J3;Arr-InR=tpCiY_?i3)B{S z0?o@?+bXA_SND2y^{heRvPr!2z5Yv!R`LFK`EvD33&UlbZyZxJM$>l4@yQ&!hjTXb zO}~=%EN#b~=w7LtyGI+|{1V^wGh|=n;AZ4s*5o|_&x-6E9v^OPfwx6|PScO$y*bUb zhuSKuKdjol$)`1imF^u;KLmOCt#^cyl`G(x7_w4}oZJL&eGgf&^Sy5H{QKNb_) z(3;UfWOkXs=lO9y-zz+>czt;7q07Uq!tX6YN8ZcM_eXf{>i*j6Q!YArLk_e*JYH+n zg~ylX)*MDRtl@q~khQCp<=2$u=IME~UzMMymoD?y)WO^DV>~c zJ$}8E{TO`Sf($kK2Glpg>y7YwDZIX-DiG&M&XaEy-fWntwXUXLg!f0o`v>9ux6r}D z`*ofyeO;hH-wN+fh4+5}?;{(vyN^_dOZAAt|=C{5wR@ab$DY*fC$N;VSk0(S1hASD^X~@7HEw}keWZ)HK zpcEPK&7Z&hR}1E+m4TJWfOmfB_Fpb2-Tn(h2KEfZ4rghVfvSEr1H1vfI%k01$nP5T z?k3-W=1~uS_S9$}$N3(U|6A}Qp7aD9{2%C)|4SJMj5BOuBYnTeH@Nvp_}>dn<^JQa z(NB5?B+0?uz5)7VLk=dTk%LLd0XCv~+$iaffRcm%BXZ!&ubsrN*qha!$=d?i+UH84 zZxcR9sV8Ucc%$+-C5Z4)+D_19WHgl`s| z#+L9tRX)Fx!PUs8@^?HrO&a4Ru?58e?W~6eFO6Gf!GO=bNmaD zC9%osdzG9IuZ{Zr%}vZfeox7HPSalWPlInn{V=|NEWctCuQcTRY2^HxK(79TL(Z=> zzHF!H<{dESRSxA?qDo);pAq8sIz!b4HydB5}vzN}C9 z-jL($?0}Nv!^rXKk%1cX-u*lw-5(Cr97c}6%`(~E` zEo=Q?*<<1}i!Gar9N&p8tI{r3wrn19>{qsI-u6`s=cSe7a%IcLZhv^;*zHpcIc`I) z#FiEDz9rndpvqVCAD#mJf9DkGKh_HLM{^3WnMKWV{N?2{6YSYLzO35&Jw^H<@+VAt z7V#D7@9}>+dv=s{^8SK7d(E(CANz*Ywt0s&&q1DFLvP;Cv&BcX+q3Du0{xL;aeKDP zqwSF6PJ6a0Tifv$>>2$?KNMMV{6TbbOnAuT?^D>Z+eE&Rh3H7FML*b9DgM3re=h&8 zf&UwP0mBE(S=)%;>GuRu`hddk^Wd=t{75BN=>G53J=X-4pIEJR5r2^-%|X-#${SfE&MqVTc&*5rHQhXN`~wgBHa~r9Ol+t4x-ERe71+gT{Qn%!7g?)A?#lxC`e*rF%sJv0iv4W& z3v&|uLT%}Q8qYuC7b0t78%H5a2au(MhHZS!tIUS? zzGcW#{}le

-fh(ei4>;tzi8$*W(343#27zr)sql?sIGhCk@VALO`Ge#Z3Ec8E{%{n0}E9LpbUd)Ui3&&N0~ z3wxH0UF(N^>yMrDGtLVz&hu6u*yM?Q$XM?K^g$~z0{`iW8yj-9*1up={^HB3K7?%i zk+DT3n~d@H`Lxzu7i*O_GQJbLCpO40J|O!0P5g%#zaKJ2|B!v+11-CTYn8_qPKcdG zo=#vV#*fxo7GPI?>kGFAuyLo3P2l-MYp=jYPlX4kdWNlyVxPxipQqv1UW?6Lz%%q^ zIW>3UpDzjK=--FlD((O+H}Q;SY`D4~<1h{R_wYV4&f0JIp9k=v4iam4i{~FgK1%RI z&F?ecL&hNE@#E^@r(F6Yk9#~S_J^^{uAF@RHO7WBa|`qcqyw^hL{#qcY zk7H~fVlC+^#`;e(PTb_tzFLNFjc@UvcX7?j`MG*HD_nM0u16mg!guF7(Onw6T&(3a z@8w&44-Kxl=lHVSqvzeTeG*%`agFftsCi?yPh^wlHF51hUx@D;P>ZbX-uaECkNw8e zPe14wTl~GRpT5)Y(|>LFRlmZol5?E=^eg--k+D^N@vCy0WnBFxdUwsTf%HK@UxE+* zkavVWpZH4^@rh%MYmU5&AM730G>30C`y@brn0>L=H?TJ2{(xpV(0?8D*Ov~gnMS{_ zgZ{g6^7Q-AWiz4uCk*F3%JC?2pt>fiCj_^|lDGw_ver;m;y+ozGOllM=EiA=0r7N}XmIR5*L z2_IlwA$nj@u2w0$k|e9h=|1FhKl~wM#RJ?YE~_IfS^baOwWFeo)c2z^#QqWYQ?e?$ zvl+gu$FAMSbMFi0>1Fh{$s_%EpLN(elRu)KK(og9N%-P(=-lOuJ%lf+8E*(*h#qa{ zi{Z^B=$K6T;%<>kgD>Pc?R*ii_~MZie6gx)eDS*QMVrUhe3v1YCS9ZtKK%WJSUX<~ zv-l#HIlMypTl!0M_UdJY^pRixKK(Qhf7DN37t>EC>FYXVx!4z%WqY6bMV{%ey}m+x z{`7q&<9xHv{Lpzm&zcV}G^u{d(Q|l*>Bw&xoAgg9V*#)})$0PtqNlLAg#Ibux*+{? zjVGvU{laD6@dfpxjEV8NRR6rj?;cOQe_mfYxTfTTWxEf|pR+w{!JM>mEB&*7e#v&< zf|Bj>9O<9EjGy-LJznIt9Jw7$eCi&D+^+O_^>TmWcvQ}D%B_q?M-m^T-Y zKqZX4zQlnnJ|yfwAloN>IB_fHjXb;?g~#9J9J|KYrgw<2$u##_zr zJ1^p`Dfqa{@mB7UF5c?oryh&9(jS?|TNC<&SWBOZx2B?hSL3bRBVD}JN&ml!c&jO^ znZ;X`E;8eaeJ0-O;0uZ8bvfS3J<`Qn)9}T)i?@nAXC7}=eRfXctttAa%kft3kuKiq z?4PvpR-cb~&ur#B`!Vm)pZO0z^B@7{Lvon+%q1_{TYZdqT$%TLU(I{=uirbGJOP>e z40>X3GS_M9$0_WS^W6G|k>k3Ld5^jg@%+{sh+lvGRMqn`ha!2TG7ox_kG!AD_>tANpqcBUW{&+9Lmiv7H>c3h}Cb= z!aLM4=HDc*ZVS`PGS?}&bvp0#C+0d$I-Md%%bc5Trq8B&a;^Dk{b*a|&CIDtzsa0R zyx&K}=SYJi^fk<>$Xw`Y=10qzQ)%S-O}>E2_cHr2agKBZ^I#eFA$=$F)eGr6nFGru zpYdtt!1Ag_)J$i7R_4GK({~a2?HV>USIs<_%y+)#8Kl3G_IC#dFc+ff6FcZHiGh{VUvhuxul>x69)Jc1!68hh zzwQlWC(VsMV)WN4`pY>d`iRkAGGDrizG7}v>EvRsuX%;7zpS}Y@`Kvvz*5bPPD2+= zMJId~-7p0maT{}^w=y?6c~P6sr*i#dZgebkn}>WiNDdJ)evDiQl^cX?`j-x`d4c)T z-!fksmv6s*hi{m^k7wKGP1n;u=Dew0=IfC6DOJO2ent+`Soo)b-1%7||F(J5I6vhu zPZwY=HOHDqHF-zzsG0L%*>#hDWFA#?K*Bt#vJsMpC)fI!OU+>}wfWkKG0`;(Wu6`0 znZb8o&Gs&N86xxOtm=0I19h2mTL2i_EVGhV+rlr;YA_hm>uHhvYfJL;J|-+>g8;fG@|uLk9!Qr90;7CL267 z2_BMT_W8NV1`kbwht%9Ud^A$cuMbaB^ubdbwC7*0Qe6p7L+-daPRcuF?*A%(0qwhoqPo?h`syWu-&3lo1nPYu6->>f_ z&nift$=s@7UZl^YuS4Y3?(qy&a$g-RK-UNL(HEuH)1$;6E6~b{jXrB32TZU(`_TQP z9;|xoseo6XjcWc|C4#G)wN=ekoX%JZybFa$JBs z2a)R#@(qq<$wje`$lX1F{XRzCr(mG2)cV(t#ij?A_N)1sKcM@UdTZAC$$J3TBJ+a& z74$p0^$K$8rjl1d9!blWIA3z0Bqu2cAE607@I!KD|BO$tAGv+<;h`u00igOUeD8CO>u|d5MRS_ahU@ox?AXoPDWVFS$tX zkXQJ7WU>ZZ`wscCzhnOdd1()#*B3A^zNtvg*IJ(@ANDZ0hexjT$KIWWek#x^zv0cT z|K>ogdKP)H$H{m40DhEt_E)j*Z<0fEq99j)9DdBtr`|pGX&U)hIffn?$+M?aX*It@ zw@s9!l5;ak^v zwE6;QDEU;2a`ScZEe^B)I`8x9(ooGai2OnfKc|Br# zgs;o7_p8uFQOTo67rj;B)whrn^{*Z1qNm{Nr$iS)vs`qMwd!ov<+mO}|0TaU*%CP17Xb9GnsP5smPGU=B*@d%-)1J}N~o{SF=V zCj4^(pR^ReMD$K8Ho1OEh!8WzfY5ge%O~+e`@UH`Yr-od^%8hs zA2}lX@f(E)#CI(hd!!9hJaC{u)8~?xHMavE5PNhtJkW-m3J>_W|3UcfE$q=|2M^eN znJo?;u=_G%hqmw@tKb32=Mfx_@Br@)hVS#}u+O2(D$r>WblXgPlo|La(-)!hl#kMg zerXKS-{_ZpKhk27n_HScu)dKo#f{uU^i6^A(WfWIgpW=Jsnn%eM7R;RN3m?r#hK1iuUG&b+e6hNqR{0?M zXFlh=BDv1+;CxSE{Xp&Uf^k=rLZ%8lsA`S6C|C64>@>*pi)f_oF) z*7(KpSSh)aqT>SB25Jluj|8+{t4=(Qjx=nPNqRGu?<9Kgo(}l+-VXRx z=}vf6#&2yNui{t93l;j9eIf6z`0d%R#}1>LgxAKRzXX$6jQ&~h-E57~#_&pwr5r0Pb zCXRX8jBa}iohUX@`DN(A2jG(PlwLu2mn`YYg*W-tWueszH% zx_sNC@X12_vrXtSk;fVEiSV!JGV#;C1D`zm$|sbs_8s^n*-sm;_+(rLewx?t(>58p zOz|PUn)qp14&Jc)XW0(ku={6vM+v(=$!)}w+&@{)f2H9<@@&O z*vtUu+=%JRr;N?0?@{A9L)Y}#*o-5__cFZHrPvJDZ!vtN&G<+M@%im>y)A~1v>6?C z&SNw5Vf%bar?Hv#KE#%aj_(t(8ApGuq`$fpo8fx9KeO49j77H#CN|^X>!;xBr!tAnaNVyRv6*}QmljQ8 zj!EW%eto{gW(=R?3}Q1DFP!(W83zxDJ-R!S*bLX%eHn?(*m=P2%berbjMY~cd~C+h zZC#AbSoG<$u^AhWN^C~(FDCHpV!mS@^Ecy5w3Y+k)?!cJA~`SAl`Up6yetA17XiA_g{(Y?!l#bB-abHj+mU&34?c`_d6 z*1mlBq1^-h;m5A#|NGu~X!l{_+yBVA6R%fj8%ln7>qq=PH?rT0w|MczZ-2;NBlB8^ ziBIqJ=jjqlXWqHx$ciQ~SV2Z9mRW^LO}&7;;s+T$J2`~zZ$|4w}NC(vnbV1WJ;<{QRA*Z(Q|%zexL zGTG0I?>{8_tW6;8OJsjgeE&h&FO2VhQ}##1_rJlu(5jyG-&%+(3%w*iVy@=ZU+{YM zzkr+lKHCzuJK1hxD`eZL`PCd8eA$xE8X-d8Cf1^q81;Qzn@nG$ErGs9TLOKJwgmbb zZ3*->+7jq%v?b8jXmik4=8mG8r$y*1-y`28-zVQG-z(oO-!F8KH5vDVExwO+Tv^-l zqi4ME4BzogKYkUT@XI&6+Mi#=BmBzy%m0c$_~qU2lm8WO@XLEom;V)C@XI@2CI2g) z;FtH!lmFp|mqN>Lc{P{)3hn>r?%l(ys;>R-wRb|YlkD6iKtKu!p=wAXP${5->;Scp z1fexO$K&BSK;hRa34(?8(25e2(+WF*q++YV=LlZTafd{q#Y%eQF$qd*E?kOL+xj*j zRwM~GK_xAe_w${5tz@$e7hCLkfA1gr8FQ|^)?9OrImVddJI9=J)ix(W3n^RqJD|$X zq-^D9LzSOL*~;GqRlbc)i!uMHFf3I*@6u6*STs7u7zg1YO|Fe0abpKtJbUhSg7*j zTs#+B)4G{DY_-m2XK`&2_YQ|H;U49eLX}^pd%ZOcca@B!{|z}=&Y7qWv3B4eAuoF^cZJ^P(|X1Stj|Su z_F54Y%Q+t_gE2twm=JqqwDFWKg!rpz}T>bv2(9w)ox+z+#6}t zRw(a*D(|&w?^Qkts(gx7J5u>nsPY4OE(V$gjfW29Ipwc|Dxb}B%8!65KZ@s+9}87} z9M2^~eb7G8$vmh06sYnAJg58&sPct8r~DmI5Z9NkeXdQvxjI++_EaKl_pVW`)=fp_ghd$(2 zbsWZ8#^>p$UG(eo^wTc-b&c}$>l)?h*L#$wU++<#e!X6K`t^F{>DQeltScs7UCK{! zYu2w+aAwPiTw>SH;*+E&c}9oJX(@8*mpmeGlE+n&M-S_x;hpzP?!IY^yTny1s*%MK zWRWqjd{I_##-!eNgfi>Ol*b<849V7oSx{OiT_3x>YIBak+UJK zluguewp~Zl&QfceX(#P|7Jrqsr*dvqv_b0^qtMPHQ-a;~xxy4@@ zhp+4OyO**}o8Y5q6Xg}qcbVM3Gc9=L59e|1T~42gkF%tA^i>DzH9PD+Q~DJB6I`3q z^Vv$yxk{1)wQI$n6Gz|9Gw%~ySKif@IA%9_?z?Q96*uX8oOS4L#96{u4%QOR624jt z)=B|umD)J#C*UmWBDkvMmTp{i!h89+YAm?w#xPv9#Ku+gule7_Rb3bgSvLBBEO%io zG zM?%r$= zD*ps!E58A%{4+` z~{4ORY(Ra<9yYKn!k(pm1EU$t21YZO4UAT2d28yxXa=8cw(;3k{=b3e z!E4!Lpy0JSE1{+itR`%heKXfi2QgeYcFUf?e`j2^gRtwRZ~pJfqU7*NaMi&i{4(V7 zA1#x@b$Wjq>&k_>nk7%4qkIbEJqgBp8sEghc%_oHD=4eNdwNF+^HXVV>+hCZVZSHA zei58GE9|E-KdTH22Wrjj0eqFT2d$bkaNwonS(q@v#)MuQ6SA+SpZtk;!GsTj2@m4e z59#X}Gup<4`C!5r&J7kOJn9uDWX-SbYd0&6uj}BzA~2zY4}}TikU?QWmATNygqMo} z4dzP%GvR*<0~$=23RQj}*b86tVi?e1z7af+FDeXZ@XuJN^61DGVLBgFm~Qf?F`&Ug zGkD&{fcM%M&|t!uJa-8UXfWR*8}ls*V!k;%Q@_-zodXW6U&enS7*P06I4~Ro<{)#z zSD_ei3%>cq@bkrZ*P3K?ihmX2-*r&&Bb&M?e-VBOA6|+l4ptOSd<|QuzE6g? zz=;MICfT@+*{+3g7@HrY6FfsGUYR$M3?I07E- zp`Ih)=^pBNFlAm~jTrywBcAVaBHpCyvp)%f&EbduVJf z+6uJWvAI}mcN?*}`P4Iy`H?naa`Um-CQVFkJ~sOv=A0H0hcnlR&CMqsH-~tfiPaH% zJIeSl-5ck3gBj<77n8w_(O||7(j9Tc{jlxdpzQCVL;a5b_t%9Vt7cexazENJUi;}L zg6GHDak@6fnVp{4t~gzbiPOckE7n%UJ)P_gse801e`EzQ0*lxN-cJERXP51U(T+1Ezc*U*Uq_A(irLodvI z=!I~n_F+EhPBQVlEyzM+d=Pv8i}sxm)4WrZnC8EV64RV1N=)?Mptpl1C&l#f=f$KN zKD2|Ej?<^&#pB5KTj+b8uVh@EZw0>x$JKdD#v81`_-L@kJD$wCx|QSV>i&b@5687G z)ow%0CPhz9^-9<;$VUucLhBAA>6Y1Z66}0jm5n-1jBu zbI{SyYFBN2F}%H*bRF*y26>gTmH#zV`DX4_eh*anHz-^Ax1q|vL)psjhbsRb_uT~j z0D23w13DIZ+{Kv|u8QJuzS=vX_wjpz-yiukpCaZ(+-kAgRWTPiWxp)@8H2H1=CM!I z@I4Oc+7ZSWoe7a0;p#!x>}%)gntkm&U9+#9r)&1Lv`}5MubroB_OC3Zy z{dumwe#PLk-sW5h>(jb+j``}R8JCB}`Dz<|R_(AvXb(E4`!+JZ$Vc34`|;01hxO+E z5}xY^MHlmwM;G&y57WhS_~j4bdknjp|LS;VFch84Qy!hnQy!h1uRJYdR2ji~ zL{_XZ^@yk_RXxzXM$1(@4Bcytbk*i5?|~}sb=A&SJ_)LPimP_D^62Vn5N(RkydT4^5|)<@{EZq zlt)KbD9?DfQF-)pqw)iJ4xg_+4T_IfKa}T`M=x`gXAE4SJUY2TdB(qu%A=1Pl^@4* z_P7XRQW=lQ~nO9@-ul(m}@pvm}?%-DSsDK`C^_^ei2mp zB|K+iw2d}Ka}ldc$FJf03h1$mc%92%!|zi#&5qTT6017^{wc!m6Gm(GS*KfluG6h` z&~Sdd8kyPZi{L-^>DJ3=Yh6mo_|MuNcz9Ov2KA?KSQyB680lC84iQBOyD#(als zUSzGg2fhe5=zDWH^nXhl_=5g#*?c~K6idoYwh9d{@Y%QknV3qSzm7~yrO#hSCIZNE z8nP^YIq{{ZEzg**^L{5Tb1+7+#JKh!so(3=Zz}b3 zVu?C0WWM?iOc+VJi=UAz#wiBJyp@T;HPK&F!3FZ$b@o6WcuL=8)ZTpYHgn7B%Gh8G zZnI*qUdnRbpUwL;PhSucgDVo>!BMloQAc?Ha@xtbVq$RZrk&Q=%Z|U{iwHX%1RK6_ z3G5WUt}!e3{CHbF_0qhSzHM88ZS^Y+cAAE5-J~>_={7Kwvz9R*?9{^bAa08FJL?*S zorIfi06!V*1h)DJ>?GVI%=GU(qqU6fW-Vh}H%1y4jJXLTX&v8b#fjjn*64LAZLj0| zv(olDzP~6<`*qJ5rR{Zme^uIE$9GC;dmZ0F(!xkuFR3+r4o0f^VlXxr$2!AL;cMp_ z@?&Dt$alVT4H-Arkntkt8gg!~VLSZJHFVuv8%n&`xptj>Et~sTS83im!oD^N%KA%l zZLEC_+v&vSUa{9a3M1_WBek>cb1eJx^_}4xMZriniGq=ChK}Q`ky}{zIV!fdKf{hs z#&L#(vzBoa{Sg;I%o?4Ji-sbzbtPb>60lOqgX6I2opEto#~#HcLMIddR(=XpdF)YL ziMyt5V9EHn-sDTTuODgV^d^5lDv!Tk*bJaE}e-~8wV!fwdAP#@3sf2gnPc^NiY~>$@Dql+3%Hxma zD~~@`q&)suk@EOse&shnm4Ak^m46PZd^Oj<1l zRlb>bDZdA*{2RPW`M06Uze8Ed?}sY?9`92A1E}&HT)zo=9C{1%6wiURnohfFQ^8fj zTcKF12<+8###JlKwa&&`4$jgV+Ol`MahBH5#u0PP$l)VX$^ zzS_^G+4l{#&{zA~b@sJv>8pKhgnexkly#3i>1*g3TCI&-cd_-t=yT&it+nk7?)o&= zT4!S|;jB1faF^0qVX1?RhnK6ny5GTGmBv?hRXhg$dvsbjY6n>CO6YbdCi{-5Uqryu8X8qG0BSHAbD0iC z%edMzCf$yi)etKaMpNu8h|%HF?1^-lWGZO>yLGiT}H#2duDNyALcn)7&YbPs|FXTDp?|>>llk0O>!xe_f z=Gd5Qj*ZDiy191|_mbE8NfT>Z3RQkt_xedEW~MciT1y#@$;xa@R&1}I6ebJjU+Xeg zpf%F!ycAzrmqi5QUzgeT)@v>HRc5TWI66g+1Ui zt@%{_O2Guqde5>R@LA|PC2PCks}sX>u-UX8u-V-munRVK|ld zI^_wg37gSJ2AdTbY!>U^WUyJBKi^=p;5k*!`p72mf#O>#x2IT^>ZBOfEO404Ad+k> zApWJ;A6T+hF|iMk-OGw?Ie3h9o;q9i+zzdO?8TX8QJifS%^7DN&N}0JwD-jZ!tq!U z_@`^18Rs|_(BC@mPkYTY&(*=X&BA3SO{}W`TxQb3Yv8hJ#I{Tx%vL~bYX`|_N4_xNNvRFe}zQV@(4}xP;!Bjo%Ih!^nh|?Mw7qeNLdgNNh z2j<;${?$5Bu*iRkf<>MX1&gc~1&dTdA16k$!j922+Bob6dyS;dXO7QcEim$Sg!7!& zxc+UM;u|u&CK28LR0iVlyP89J(O|4%WlM%ak@H>X z@fzfOS2h&g+{HMs2Ho5>3X0v@#W=7AyS0mPKzOT^dF>jo`bN%xeT6<%{B$nZzZ#hm zzn3VDOe+0=(qL1imnn_xDSe;PU{j@kpfuQ2>F+BIHdT5KX<<`;SKP|kV;1y<&Ig6D#25I=7ejftNUfmw1AgxPlj-*XvKWSgTB3o8Wa%afC8=(B76sL3@ZF zv_x>d#HwwHhOUIhK_7-DLc`(&WxBVH_`mLb6{>rG4b{EP(B9BJ(0~Tsm01W?nRh@{=1gb?bT;%_=)BfyO{rP)jRY{X57OIduv!KsGheNA_^Zq%=TuU9-khhjsp~zdy zuc63WOEVOCYuN)u-df&(B5y5kLy@G*76<{d29IqioCUSK#{kW<65i8 z8Q{oVxIDF-3dZb0HdU%;kiAkpgY1>+8Dy_i&meoH^nv2VrSw5~yjW#8 zv8f6-PEPX<~WIGhm-4Tt*pDyA6BZ&zQp`3;^t9A(GGz^7OPQ!Ij%4x`kQclANDCIOTjt-%m z2FB4Ll+(aC+Es>+Ybs+hRAo$os*D1t%9sIF8HG@laR*do%nT`GHrG_fJgCaJ3#u}T zp(X#!&dV$HIpXi&@RV?^6tK4!%Nt-T5(@bp1!&)Ab*9H}O8| zZsL8^T``yyx`(=%7$J2tF+b{NVt&-k#PFz_iQ!Q<#jHlMUqs&@SdCp$UrQdm;3JE^ zQ$Gf=3;X@?Bc6%&jQJsTk4HB$CvvVPa+8Dq(?VNv@PAq&q4+;79w`1#PwUuB+$oj+ zg<}Q^hajUa&cc^}qqA34K60os!};^19lJ94EX|(x+Il{po&*ye023MC-N(Ew{4#UJ z_+6SS&g8vz?5aFGcJ&OgtJ`?L>ZJD0v$2r&YsRo&Q@U0}9OiE66ZprEZzCUHx=8(v z52bxk@$83_@AEv@deWy@-l27jTDPe9lY?(+LhwxsJQn|((4%SC>QHPWTw~%;@Y%$- zyx=POes_|MuTr34cuLr#&MFxnexKr3$5{XN|JeP%!DWrsd-EBKrtxzykl`cmHG9u^ zui+!_b@0&8ko{tJhphK2>S4W~yacifeLL5q&<|=h(}dp^G`&iRF#2&q>p7_AnAA+7)Ux^1_qLjm&pbo zdu6f#$X=Oj06JbK8-R|NA)AUdo3*V&p~$A@GRkBFvZ0#47y$(XwTyy-fmp9tTLuPd z8P|=0H2*FPR7bxF1ASN6#l}De_t+T7;2s+T8Qf!IAcK2s3}kSRje(Y`92)}(S2-BS ziA}{LXD<>9=n4NQhM@IW4hB*kLotx{O$h^e!9X=`Fde>Piwi6gMcaBoW1%t71ZX_; zDrhpaFSHMI0JJ}J5cCVsA<%T_HPA0Yhe0!;*F$rlUxJQ=j)s02dK2_U=q=Fk_;O&N z7n#F<(#Aj+Iyc0er$JqXfvTx@vt{q`qW{C=a?SFSd{)r+oD7wpGzBU@sX+G!<8r0C zuY@`ne+sJm9){|^QmFB#pvIrlbKP;dVm({O+C@G4Dpb$@8mecTp?Y=?RL{Nvl`r-- zRKD0dlto;wKxJ6?e<~vqsxt6R3seTaX@SbXH!V;Z_@)Ia1K-s6kNBnq8RXNT*FuMO z$K|qBt`EOXK*u^UfUP->Kz-%>mAQP^$vVduik+#>b(KF z89D~KH5ivOd;RM0&B0Gv^H>ai(wfI&@RM2d2!1ka9>Gsq^H>ai(wfI&@RQa&7K5L( z=CK(3WY#=_pUj#^@RM2d2!7I<$6~OP);F3pkf&X>;rqU{FHvhEwfD=}t0?)*xD45R zQhT%*N19C^S(lVi-S6P5|1$o#tKu1Gso_7eY4{H{{D&I;Lk<6-hX3cwuJ$x3{#2|o z{!STISq|=cz|=3IVi~kRWn0Lm>K6%B{X9_B@9&Up?VnQoDckT5nN<0cp(=k0ROJ^y zuYinepOxZIUc+l-(eN5-cnvkYhJFsRuK3e1!_Qt7*BKgBaSc>syw(UlZPo}TvXJ7KF3J-oaZz6VP$=cpUk9Zett)gZ`R#yqIXxC^Q>ilHiF5maR?fvSw9 zP?fPPSjI4T%3Ak8tJmr@@jTjP;(0@%_$1n=<;3Sq916chd)G#SAryl%u`B9562C<; zI1{6z-Xra}iiyMN8R}``fYj5(^{A(b>rqb=r=y-GPDee%V{S>{u@m62hp=su1Ia}^ zc&vmzR9{wUy;w@T!DAU7&zJ{klkiv!c+8LdWaEoztj@+4YmS8Ci#2qF$?oN5QV2P~RZly2SY$Hs0Ft zp&gH!qH1$&Y z4@y%%rGKw9^;7zHN>fjz-&UIXD*YB|vo4Ua_PvPcstSAWlou?vdazZv`yqWNGP$Y^ zEY^rFwt>YO(Zx2f82GKW4J_7(ez&3jjp%n9`rnwM|Dou28~Wdfez(!zjp%9{y4r}Y zwxO$y=xQ7K--xcZfyEj}K+(U(QBbg0<5(zItZ`friyhvkk1N zJz{NOP3;?N18Zt8*`L9qoiR!N(|i~26TZKqHpcP&uw$&_RXa7#T?kiw0_JjX74wTH z77&-x9v7`uZ9}(RV5&xZg$DMG4}Iw#FP3EPwGQFsSoEc_VIt>?!#QE%ijXO>NpojbyUBpj_NnnQT?Vm zs^3&c^_%Lbep4OQZ>ppEO?6bisgCM5)lvPXI;!7PNA;WPsD7*GY{J!g#|*1>wcb$( z)jRHh>K!woddF<2-Z2lVciaWlJBp!t$0DfSu>`7jEQRVF%Vg(0=6t#ND)%vaPso`Y(@>Rl#^T&Sk2yzfnBkwj&m|kG=SJB3P)0%Z+*qid8+ZQMgt3MXuG(0` z2dM5{3N?H<|7^mxoA_^=tG4YH=*#*qcrIbM%sb~2=Ke3q#{_VXb0*>EEhoE)8R&gF zlkgn-kLx(k@=D9hZY!c<0$53B66zi2VuEK9{=M?En=>j@uFfQUHzs%{;b)bl-JDA? z!9J7lT(Z?^%Hd4HOUc!4&Z3xLpGo-cDy!yQSBW{3FkHrjWeN?3aaDXB`aZa122^9B zz74ezEOUZ6-*@pX(>a487*mRF4{ZKXWi0dD?VJCjQvdtGGR4^NVfabfr!k*&rX5o< zbKv$qr#PKQNLkYuPw(MgKYNmOwixvI9LCdj?EEn7yzJgu#+%(9&U0hD+3kfg-e}%> zE#uAZ6e#13_GzqTywN_5dd@$au6stXcSHA#g6f{JP~9_5b|=!Dc{W_v?_f?`*JsLZ zbf2kLy27eRBz~nickTBuXUHLkMc}Ai%s00(zA*o8_Ci!MzEpAE>{`ak-4=CWtkj(Q zXRUi8?<(b8<&?80&8jV@oIOLK@L8%q23(7tlow}#RT=QYlmRcg%9zbHl`#*hGVX$^jAE$DSOirWOQ0%a zDOB}X7A!-)>I>K?^{M(?*rX2McsahId_)JE%!%-fDW}h?(D7Bd$PcoSg>TXXKeO;n zn&4*^zDX0ZkcDs3ge(|rkOZZEO({_7*OUsSeoX@%Y!Vw>3uB*EcpyO-ASk=S0By+b zFwUr(YR71bOpGR)^=v_QFlQDuHzP z5QUd{oBpTWx9NY{eVg7%yKmDwX}9*2y$8=b;JGh4hz~kwmwcdMJS%^27|+Td%%R?n z{z>2b;vx0-i-*+TPaik&ETE*rrCvD9%jJomwK)o~bglrIz> zQ*wpuf67dV?%Mw}%$!FUZTeAt@9cka+8tx}T|WJkNnh_w5vKJXlV zq?fo-BYhMe<7x*3b+5OZ0zUaFIOSIG%2&WGlZhuy3Y?o{$CK<>;YQAq_&56Keek5> zOLN*2$Lt}0+3_XLdC$*1KfaW2%j;*^548*VK7yT}4(@aILluYYhdM#}1bFGY=;Ke2 z;fw8uI%4mKqHoN8D6@|VK5DO%+1~`#>Du=M)=@fiKa|-g1=i7ZvwsS#qqNyq6}BI0 zwZT2z`=P4A7I_(*DbF0(yU2Z(``?1LV1fYwLQ7Eyddys2I78#Rj;+ zKz`cR%zU`tg&xyBe=jKQ^T$AGpFbW-`~1mJ+UM^BrG5VXP}=AJ0(1y89eNG)i_l@v zOz8E{9O##zBcY?AUxwZUy%Bl~bi7qN5gV`${>-v5koM(kf4=tVYrnqsl4wtf*59#a z#NQ16RBw$3se91RCQLLs?O?D)meS2 zI;&4rXZ5M-+{&44>er??u1$d^LJOe1p);WUpoP$@p?5$BLuW$4A5F8NV40?QP;f-k zT~P2xQ!x~5(6k5&HfUM`1sgOig@O&5mdRJ~1pInOIrY#x)F2h}^Op?b$=sNS&^s&{OI>K!l3?+N0iB9;3#^KdHn9jMCP4^_GEK~?StP?g&O zRk_EZD)$sr<(`JB+%t-yd1~@iMjCsuRK`%K%D4`yGQd&!DgzvquQI?<`6>e(m9H|u zQO;R{S%z=y@iKga>bXTwJ+}m^=fG51dTv?wS%PtzUnOgV9puttf4SIx8?@TUM!2)3O=!7U?v9Rf#L%z z_GMx)Nl?`{1*&(ZLiNsp!I)ViW3*yminndV$F5J~%nHg=49uLJaUGQM6azD7VT^!M zo;h2C^32&9l&2V2R~bI;Q5ln=_`HgNnKBBX@K7-@Q$`_FWxyYEJ_h{hDq}X+RK`50 z%D4-vdK5!d#v-Wdu>`6z;H7iMhRV>Hfi>7X6I-Hx6Kwn@yb{E32`08Q7yLGp{+^Fr z3V`3PWNZn%ZPxn9Pj+HB0VA_9MrK1}IGgMkj%2!2SPlPHYwZ4(7|sGah9g`S9>WQb z-H0D;;Mwrljqq=T^nh~$6*F2yALuN?Mf8ErBHVBDw#1I(WP>%r@x_JK&>gh*6m6oO zQ}N#~XHSu0G4dgWF+L-jgabN8Hk>~BprxRUQLoS4?z;@kvx)mK?(*~Xf`lbrL{#$KmWqQqf7 z79|dI2>K#1sy?xO{4Y_@Pk2v1&R*C?JyVx^)~Cj%`fI80jimd>_U9~-$oB94?9s|~ zKl^FrzMnl-d1&+Fl^Pq`!DgQj&-s}6oYpRH`<5^8%X@MIPyE0aSeuX=nB(?!elsCA zXMSvMpxyFyzCSQG=e?APIY+&|&bJp?f%>c6ee0tU5f1b91W?9<{5}J1en9=)SO0xH!5f6z~~%j?(o%= zxEyB;QXT488{LXrsLeU2&TZ9u=JUQZw`X+K_4UuKxuyO&r`_+`?`a%MTRpBo)4227 zx|rDfA)i&DHVa2?I1xR%iaLUSI`83kjOQD(soOYLKyvyYytnz7_9yu|zsv7q{@1)r z1hzzc7JoJ0vI0F8pRYxS6>E-$N>0?)H{P%UZ_-vhcaZx++pIeL_mOj>WXJTruaf4h z!9bnII{Ru$M2@cgAL?)zA0rU0`L)RQRm`htZg|t?m6fv3W-b^R*8T!B7mN>?rM=_K zt93B0lrnZM;D6?1HrsQ-dxCSpkyQ!2_qOrAz=*H-0=s-xVE1HKz?oCie46IfREHvb z6lYFt0c~H7P0Oc`kJ8uGP;6eO+RB_-XT84bL7jCzXC1az_g2yFJnqTl*JxRtisxd# z3YBO6X|pr`#JXC1TJ{CddQrBl%{>3ZjcHpWsuw!RZg+nG;a zICD=cL*|~2@P5TGwT?jZOdU|Q;d0O0VczM2@$)V_#ad->a<`qr?sVBH>`YmRosxa3 zl5N5k;S2oO*d^lQvP+t)xwuV=@3u*Ow1?HUN!TD`ldwU?CdqbSiw=2`uZTU$!q@wR zev)ox!SmYrzQF%{CpYk)=t@acZjNJ<=Jw6a`Q}vCrfuG=CseAl=-`* zL0$blV-fR}+wtSJ@4!C2hJ9l0eLH^Z_CH{s*blP()k5?Z8}%z})H~QH^t@A;{_SN} zt!%~R*s2=aR&64#;Mgk12B^*psk>9(10i*Nm%7R}?2csryy_a+D`!VaRL-8sLEF`k z6pJ0}wq4q5@_XBEC7cY}tv24h!M0m#Z63*X>3t4g8pqkTs|S9p!+wb;jvW(kWJ9du z-F8g%SG}FON`IG9kL;u83SZ>EpYZ#4ey6!de!wL3S?yLG)Gr#_H=-NruN%-E^^=>v z?QLvYeARF7umW$*h6lDC4CjUF^Yiv@n|3>C*|dfvH~b0Ov~N(K6WFFLQ9+xw8k-iw zcWoS-COaly$%%Ch#-_zNHjP+^W7A|~6d$-A8#V)d_x19fHFgZ2u#Wu{jx7s~4}{vX zGTWBLh1s&{PFeJY{7aoHrgvGkJjHE8y6zuh}*$j=sJen-$!P9-JEw zwNLTXO?F8(O7r8P_N?1RU3^abVtdXV8?~SP<>uR_q5f1xh@E18h2u}*OO=P70Vn@T zcFS`7Dts?vxBRYv_OKa$3Y+&;Y?zmRl23KyTEi76m^{o%cy>W7{z#qm%1^UaE z8WkNFhA-v#QSzzYUv zK%eKg2MiFtH>(l5p_pdqIg1TmPtL0;F*(|s)-)dX)AI_6EP{r*@6Pwy0f-7xmO8$A+cI2Jyb0?)hyG z?S8|uMuoOrSmfK(MfH|n75pxsXUu-uTOVzW`B&aAIjO?8l0EWcmmCZnYL9fk+HwIu zt9-V-Z*vZDHQ5dMS;j{7;AdUPHaUJ)fo+>ML5W-9XO)a64%%6-{bIC16cB~yL|4Qj@`*ra>V&`_a@UdLP`rO3)B8dG(5(DhT8pWu9W4{hYgV*|Y`&ja? z6k87Wt+WjwO1LNY;4b|%|ESNjQ#m<+x`p&&uzl)d=e2+ zCI4v{b$JZEDT&R^S%FUcIKg-J`{d^h%FX#d119DyCjaMRU*P!Ui8-gdzOyf5mtKhV zb-u{;x93@b-!F0n>e8(34MVX%_))*YkNU&XhyZ(4jZJzDo3s;~#M*@ItdHKlJKO5~ zO+nNG`Au&WMlVP-wkZ*emk7p7P86*KGSvvQ5U138`~veVw|hj=S*zv|hQ+ z6Pfesz+T`z>V{o?r5Pr{A=JRmS{2{f;@rQnEw@J?l+l97GHs)jqwtIv$I!k+9wAY=}>ILiIn|+Hm=i1orQg%l+ zryZVzW4kUt2;EP^2a^AiD_;ozr7qI7;8K2&V{5$hr!bwdHFjC19NKx*wlxl>3%51e zKkC?;UZiD*Ubn1H+F9GmHz{;h&m7y{WEtP+()LFBnGerdE8huT?Cc50wZQjKJ5xm) zy1zehEB4|m*p129k4e~(iL4pOWe=*$>_I)V@PD}G;y*X{L~w6pmtAQ`@16N2$EK{M z4L`!~NV5H&GJB7{=9TVY4U5(fS8y$zZ?-cZZqAuLgzdN;|3__k!?ra$t#y@e(D#~C zQrfXK)0pFx{dg4n@e}OFPx(gsW7wUol(7WcQj7i39MSigBl;zExs&t{Vh8yDhdSLy z`o7q!jjh@GvvrkkVrM?W&q@9HdNX&_M2xyKBB@Gay?n6&=>45W=Lg`23_#xp5EmZM z<&WHmKQaM-B!_ul`2~LXdIX#P!$H29J-7O5o=WiLJOgI<8FOg8Z}-*g9B9?-ne3`@ z#@pXCyKDZ8EWN=wpgQ~M_vn}NOjJbO_^ZK<+8-0{zX%6v|3{Dh%VX}EKj6ElT=_0q z*P-^re;o8(;yClBj5_YOJfod@I`vvd{9ih`;Z(HwZ|A@j)tYa7#l|sjP(L%5!}F@= zXyi`qlTXr9J*B%p=J~$d*VF%R^w5sJF8q-&A5lJ;{F3FArTX0ub?kxWdbE~Gb|H^> z=&6bkVFNaSh2*EmKPh57bJkMHE)*k&_I%H${1D0DrEG!vSiVFP{Vg3I;;I`ziC=@) zVhV`)B6Km-2a&z#pw6;Evu!`bu>R z7i15#{%0Y5Av^Fj^j~&B{NLAQ10Jc|*JT49>9PSw*pqfUHed-h;Jes>?;*GUF5iRa zZ@}-^h7Hi1@mS3n^IqA58)Xl$0kQ`-VGkr5kNoVB%B}btzr_xGf*mma2J^=|>s%G5 zu>%@sWe0=@!V$0?(A*-e4glKx#~v@RRR+(LI5t(5Vb z<_R^zPL0T=+KC*z_?+~s&b7D2_AQ!Da=sY#pAF9W4)t^1xsLk(rhxUtk&)D~SI%N= z@Ed#|M!LBloTGd7e+KoGKO*}PoI?q=Up7QOgHw0;4B~_1GrVd040zBuu?3)zW3fBR)@hYK6x#&?M5@*Vs=_zun-N-=9&OP)1rT+Mg6 z9bY2cW@ye=^Cp*LGve_Z!skm)F)#8W_Tfd!`N-49{|oHHo1{PX^!5KU_T@D8r4#$| zXY9)-HbxOXS%PW=-;#bm4IH+DSeHR3`0*eCP5>xfCE#^p$t-zbczc^8>kguVGb zHpMAN&&#gw;f(U%p$krYMP+owlqkcADaqa`rlfB~$Tl4LC^$FUaf)?(wmk^1YiQkM z4^*~v)WVn0z54$P$d~E{PV0&hIdwFC1+t~OjzX?v3sRs?z4uXX*#lvS(E95gr;c~q zfpw(2;zOyyxm5WLD{Q?Fn0bqs^_ou&^%oqy*Zk^*{DmUq(vA<6&oO%i?0E|_=g=M7 zxs?B)@mKzXaQ`rVq5gyP{si@vui)T_1+-Z@EjeFJTE0RZ{()jdvH|m{mx&Lpw9W`m z2uIZ0y6?YC9O1Oh%(W7K-Llt#!>_lda$Nv%aRX?4CTJ+he>66g+ ziRgVUKEtcVU$~r@^#S5KnyYBH=UG#kU!h+rzKx$C+psnSS2(s{KK_IDNV2}PqU~mP zKz>9U_wUb&2#8-7icvdwq6}X}dn1PjhvriM9Xla^;^dZfl_|uezr6V;mH*CMhOrIAfA+Hu;|S}54@IO_>5S-| z$MG+iqjUTV;fy@vUj$>wn)m!fbD#E{r|gF2J&)j<$fuCKn3LekTy#rr<^$iEn0ep) zNygWhPhYQQyj85?L1Gg>#D@F~8}c;shMRAllyj2wyV#TVZ%xXqFCCG&=jD-^e?0!B z%s+f^LuR87pJU~ha~jG==j?o9OwMn7PmJ5K?xvhQuil)q&-b6>8hnFK3sW@J4c?Xt zUqj#Fl;74vxvgR4w$=^g+cyJGJ7u@l4I;1K;M3wmBjx`N%=0p`B>v>MtQQ|84bS%K z-0ahakK(6Qb@ZW$ftC|N+tke5scdMS?W=_ILH>y5eHD{xNQ%m716wVo-xODu4JkR+ z?YGEI$%pCoTe|rvJGBw{SDS-=%cL&9Meoovn!jxgd6)B!#U0&tsyAubsU0b9{JNl> z`d8X{kU91pm#|e?W}Q(a`$W2JmE)s0@$Apyr?l99itN$ITwmt!8*`1X5=%SgfMsSg zPr}&(nY+ijGV4A1_M^8-wkPzuY+ft>DW>!%+9AJ1SVw+~W4~lujP0^Y#>ddtvF?(u zPu;Sra;FvJm(7~{8RtIYD9^D&S=b@@C3ma5Fx%z$EH$)G-zGXu{bZ-k-goi+;Fp|R zWq&g~kjSJPT-$#nKmFN%-! zbI$0AWA){gw|P##RBqd!8SFu>fFJU2zQ#9L9S8i_7TP+eP#l&sJvvwL zdy1dRJP3dC=--3zy$Cvi_Uc-x(zI3S8q)YtX5G$C+Ko)@b@l@mvv*7N=J3&~mB`4~ zB0c8J?{?m;HpQtu@YYK^%BY9t`WM8+`(v?z1=M*pdZBt$ZCqVhM!Spnuc*GX(#yS) z<9n#5=1g_ZX6jOejBci`dC*|hF@(& zrZ#+duCO$hG(2YWRAD^-tFLP2*z&fgp1C;1HXsjFiT%Fub!o{eS329#dJ z1|-qXvH?lldm$Sj{XZF||Kg+SApLKr9;L?CTv-1Tut9&1{yR3>^c8aEV@}Jl)g7`s z$XP7c7@LhvJVKjguXIf|;PkBRU>O&47w)dPi+o~|ihG2P_l})3P?vT-lti!<()FdjSO?rPF=`A>F8^{FENE1pRY zlP`TwqNgz?4_W&>#u&-hfiU?}pE%=7KC<@rjxVRCPo)2I!LmCHj_pYvifnl>a?Sf? z|9g^$99tg5>z?fY^~l5`_#CwV5w`vB=6lmcU&iPUpcs8e?+gUKMkK4$QfuAKE{}&`b(cALxUr% z6Kfd{#-^Trsg$zhgU;YT$Hsk4X>6OWPf{A&r1V#n20ti$yVBqXr3;m|&zJoM=|6$9 z)xVqQ-;cqj2iR9}sF!cM+wH53it=sGhJF$H0N*c|7wOx626_;xdWG7i`?&9$+$Y%_ z9M^55D!IOb>w^=!ZPno3)|OhXZ{WJhIE4Kg+%Ne5Vm*I#FuhLc!NK&4r28>PB7c|I zLhT`6`k-G$vett=A3I6!Bp#ss9?z0~jhMh6+3T^A^s56|C!1!~&LzELDCggEOoxUi@6Wzw6!hd!2VW z?{nVeyhrc&fHG1CV`B$fCqBTJ^IKL@-Q7#avL4ev+co&jzqMS8Gs5;#_Gf;6SyX1j zXL)W}ugsrc&i%_GGym&i_brRa6h8Y@eV4g26;JNEzRZ>BSALn5`9o-_8NbF--E-Mq1yD$

vtksdf7Jpi@)@c8C!v^xm*#`2X>PRDJRiuk~?$HBQ#dzNF zF}~nW@Do2`{-|%FXUrD#v>csOz8Y+^3f){q3>vWKk2ih}{)F2)ky*dGvOMneORLdQ zt=U}-uJkj02|xPrg&hAwK9}^ZowG>dx%W2g!a{7qu>-v-u0{7U(7j2}iBRTdbpHPK z2cacUy=O1_lF6LG-i^Xde7`l;+M;VkN+$%d+@)Qv%_8kArz%Vcw&FwNP0+pOgSk z6RZ;+u-hZZ_NluUja|%|joI$OZ>k@mAaeM@;7;~hII z8ytv@@n_M0cT>Lnu~i#aRmwi=d&in5aPTacU*AXHJ{+0`)xBl(OCIkoqpx(1VGF!! zXKqnzHx2LMvCbb-JCop_e7jQn|9bw@{I;;u2CiMtJSpei7@o)nRT;WR{8Jgs%WYQ- zLHPM7^U=$BS1~qSKGgT@vrDpweU>510nHWA2JIt}96SE{E7VzX%sgs^(=N%ybiV(k zI{IAJ_B{BYcN}4^R_`c7UYs`TdEt5O^Lw8E@|Y*kv(x^1?xk5St5$K3Af~^>n(a&N zE3wDhxFMg$eDY3gTKpZ zt7t`TkUlggNPn~iB%2Xf#}?F~v}+vG2lC~f--vy~ni^cQ?eX9xX-G^H8KrFV6_ zUt`q)u1ObFcD>Se*&CI%%U+?hUG^NM?Xq)~R@scj)pmLBa;?EGuR&?Myr-15%X?61 zyS#5It@0S3AGgam$Titp)oF*)b{S7AZI|(o(kg@TdX@d$dtB2R3qAL$()M%DD6Quh zyO-Jb{)ua^+4sJtwC=@jf5-m+eXi})|Jdy!UBiB9?YU!L<;R+F{Jn0>E`L_z`D$GY z@oP1vr|DXVe{05ez7HJg=W2YPt!p9vuEzOTT?_Gh`%;h4vHoMm`d?z}G}g<{(0CuP z$9UQ9=WUzWh|P4y`w`3uRmCz6@W1ThCemhJHP@=BCLKENY+`OOWW3*VEIH~^WI|NEKOar~|(#%=ilRhkpBWvN;6#kj9)@&kTh`vLj3A0Qtf79U^} z{jITo82vq!zI9|)zGAD!$&IDP576B9rTl<+GshM0kLMZ357-nk&TITmfu9=xJM8PG zA84=RA9(e@?F-yy`vUq;x}n`&n?HN*4+FJ z`0k-|BcXF5;d3E=d!FVr?K?>O`ni3z?kL~((a?10cfrWB!N@9mEB??iVs=~c0h92B zmm`n5Hd1N)b*1OBPHBnlufGjn=E7Uq?ziaQiKJ!!-we}D`DM#(e_hWw_dEAWhkqNQ zzlWF)JjwTS&LR8q*Z&9IS&Y9<9gM&JIAiAT?_NCi#<&6gw1~lPUZ_mTfc*7O{U745 zcl~Gk>+DT6{(9&fvS2tYa1y)Ox;9KV9>GJ^ATVJ^%On>2t#T zbnTsV{Pet#dG%Lpz5KlW^q$uFTu2w&ZC&hXj@0Pl$n$k^0Wme{V)$ICqmPTx$IF>3 zy&N4(!QYh*rfk&QX;24s&Cx-n9Uasd+9ki}U#LE543*Al4AniCqo)_s!R?o!gU%Q# zTi}eLvIXH|sOsa4p}N-@L*KN=Q2pNq^sPs}}XsF$jUY4MXq4rPuIgD%Jb}*J} zr98j!0%NG=Vx^b=e;h+YbyZ{NRP@ssLqlT}|EMvvCtbbNy!8j*;|apjiB`amJ`Q7z zOA+hs?uv}?$8;{=cbM}T7O=PZZdZgqle~v>G(Nc275Fu4dloT&a4+X~9*r1Ubr-%t zbgxK`J1qIS#}(Oh7ynK1di)WbiM18q=`PN8-J zT7^?MkHOe^=8pPsj^sailKeXV;-8V9!{F#Up#MUg^)Tz|?j-%M#9$7yuX!fvlk6Wn z%zQ+(C)z)PcT8veeF1r&&bOjo;B26^5m0nuZ8VgWGkB7^-%{O;s;BfnIBpA2FB zF~4V7kM<(#8SzOfO4D5xi`^;ySKL;68uQE>-I48ajL#D|pRE}`W5d$)GbPZ!^8N1v z#0cMyh^gAWW7f9etfSjay@s!}wt=xYvoqe`n#z7$_$OTa0qYP?5%XThT;@sUGz()q zV^+W$rDOZ_HoR7xGX`GAMU;HKXh__8Z*p(*ee!txH1+F3`tun5`hq7BzLtDF$sO6W z=z*m5N%6#mS-W`ehxhNB!(OknXKp>4PP+zDz8jzDDR}SZT&AbsJ?md;pMv*KDG%>8 zm-!UDe^7aNFU<24y#J>1@ZN*G)X3lFr#SnwB|$zB)2ukQk@cH=e|3V(YM+e_9^zvE z5A!Uu*hgJ}Pq|^~kTVwaUG{byDc=c6e;@dW_CR)jU*0LF1=`3UU zhaYgJ+$ii=3_Oq=`dRZG3vb$yu{dx<2mDw74{G2?4RUxr>0QYBJr5+TPl!+O??ToW z!jDDp!}_mV&mO1lSHX{D+Q%GnZ8GiK0zX(6RJ#R!+^anNPtfh=aw&XMe0ofv6t znT$LLW6AIT0NF{23a-~_VvV8tzKwPj(Uv0GwF#ZdCS6LqiXZT<_r`-^Y1abU^*r@t zp1OQLdaQ9Uk#2;k#)4(1xYM&n*1w zxnJmepT{{<`ULzxz}efbhQZqugL3_lHlQQNT&!7OOp;uy9an1pkafrbKl_Xvn|C)h zFB==zflZXnE5_!PlF!8E9sap1@H#f{?qxCS53?3ocJCN`yBl6z3*R!Knea^iPxZ$5 zZ?Ns2m2~|e$L1aDvUxEGoN zmp$k}#-F!s-VEe>GyO3Go41+%D91KzM$XEyS(}lwMarXhikMh{OJ<5LunmohiMdv{P5m{pcNm+hwL(9_rpmTn>-CIz9$ip+Sf7;O_(fbN4!!bXQ%kS$uFpwM z@n6r{RmsaDDWj;dR3x6MbeF#}zOdsr{4;Ir0 z`{)BOtG;LIKWE$OX4_UD##X2w4)IOoVenV>I-hm;;(Io{J_4`rfxo-Z&F4J{{^6vL zp`U9XNL)WWKGA;+{Va#q$&6Xzwd`~{yxRz`nXjnb2(L8<#~uOm{pyYITD;Vl^elE- zJS6^Av1^X(Q?lWq4<62O>@@X}ogM-Yhp^T*7al(4bzM<8&V`2)EXF-}cmUpM?fgN; zf@|R6t?=*~cz7#3Ot+H!bM0|J`+YPHh=0HG1jhm9%7Wt@JS&DzrSPm8JCj4Y2A-8Y z5WhYrp7m|;Yz;iy4G+XKjR7Ozk!%k8-HgoRWBC<*6y?%>vrU2gr|G8ZL z^Wyh@u+JG!SVOe8)YaSX_|Q4nohhEEpbere?5+BZ{O51rAO92MjC^SM(s$TC^uLh4 zld(6`|}v|@bnp@-Ag@}Z|QKFEhQJ~OswxboN@<1=G>5|vN0 zedeLI&&+sojPXJ~wDFl4Pj)NMc=7^$A|Lt%hh2La+Xuj`7|C(5M1Uxyz`su%*D{Jwg7w#CmZ7ua&_#SE9Ug;dKm_9-L(!@Nn{6_iIS@_f) z^koe^PC$Q#qd$&+n3T-f6|nUtveS zjUD+Fb_8D%Tb#(Zn%&*}VQ*v}GYyKS{)oX6_xnw-a*~KZ;J=zt8Z( zi_eTa*CEeIv^#-zFoxD9(2nOLp{_XpZd=xN=^AY}z9n{7?U8Rhm~*AmXvZ2?EVd=G zeKm9D%$=X8!bjF#HRj1r6p+?BNXmXO{2d!yoWY|y+dgsckZ*g)C-%@+Rn(I`dQI$A zHQ$Dp-Z}oQ>XVnNdc>G|OrQ>a>QL;8fj`W3Gl!&WW!N|`K5aGa^U|hj+INii5=TF4 z{-=)JdsQwz2YxXue>X7;$?8FT+PDy(Ryf|s@NLit72=6uZpeN zj4sJ{-Ha|dzN^Oe%ds`bdayNdjBOXTHHlT_*qTMynsRK-q8@C`2ynv;d_ouF;N~mr zC%6~~B^#Pw*ZNJ_u{LycHFoR(_H1>WXAFK%$_di3whb#{K1RM&JT@#Fo0T&pe*IBo zLw%QoZb&|ov0pCMLG^_BhhQ^=`Gtv zB={Y{I@~$h#XF+E#ICBdJvV4Iyz7ZRf>@s#S%l5mnz?!AY zT z-rF`4}b^%dh-5#_qwn4kE6Y} zxUTlU6Jgbkq|JMyt=cN&miGM{&rIMQ!>|Ky)8_+ue$dJLfBXY}_qqoP z^VDQqm0L5?r8H-$T;-~Hg6}N-k!M`^pUWv{KRT)Pk8Sjwc&&ak-%CVa(wTdhlWdKt zg8%s|c(3{jyY1yYd^fr28221y>_3LzC3%?0c>guzA&t4o_vq&n^!J~lJx#a9dzui) zl!vgD8E)ndu-O?=P!BW~>V+milb}~YQ=omJsn7w?fzUzl;X%$5z&|wpCh~+I)IOUz z)E~J#?b@Go4qvkadwotit~_Is_@B|o>+e0lyMDPl!tnp0KJN7y*yeqFGerAdbzS`L zO}myb7Cpo#giL>}bd%3c?T*z54}_uKxhhqG!_XK=2utSo#Upr{?F20xZV*4>2n-_Ogx6NCCz0)`uYV$N6T+H6>>cQT%U`H=)?-Ks6uy;-9 z+~wN4*YTqqd-rmPy>oN@v)a3n*v22Yll{4IzMPTFiCsy50sELETbhRtTaPULEw|2#(WrJOZiTn(dnc7-+~?8g&jSHuI#aGspRbQv!kq8M+cAD zcGSn*rtIh@`u1|{sQT5hqdyO^qw3eowWH^3J9-d*;t>m98Elf_zOr-i@|9(y=d(vw z`%q+~Kl+R|+UeJD8?AnwbE4Zux8qkD8?AZGUY@2c%&$#DPVb?8n$tW^-%4)PuSv+k zL~Pj*Y}!O@_7H5^L~QmDZ1zNK_K*;pt@%xTCt~C`ps{HxF^R(& zdM?_3_-0o?Ydal(TL*(cwwn4rl}*#>iqbI+`uZ*hp%N`S#xik1%ESR`k9EUn=~KupJ1Vn@Zp1NCwf)= zlDZ6|9tHST+4NTdepNR8Re&FujocUD2WBJp1^9v4`0@q#f!X--1^9v4`0@q#f!X-- z1!>T9=uqevq1Qq2_?v`Cj>=hIN%|B2sl8$%$9%*HwzvLlYzL`SJPgiAZgFi{H5~1bwBn z?+-tdhVG?X{ru|RhtSi0iK+fi{!Gj_i}Uhb{WwdqzkiKIEFXTox-4~lM~3(r>6gqu z>FVE}iClMFt84xIijnC0QO<{A9RRq}+kTM$y?l4H92+(iKa-dfSg9{(LJg=o5WBqQ z;8hLP^)VA_b|gPo%{=XiIrv5stjPA+?3XyoT%~;V0(@wF3p*XVG6Gy*ioMAO*PFQz zn#!T9r)WCC z+|NjS*CEL6BxHAvfwle=y$oH|fGXGP^_p!Dz|5M2Kv9>b*qxqgZ zY_G<=^bMM4cKdZU8S7g13P;poub#w?{-kbAy8Ct1r{ zO&@PqI_k`9=tuO$EATmjzJ8o}l^g%EeBYm4{rxw^4B-4N`kpr5!5rv)r0F*!IwbNEAdvN=izLhM#`QiOPeu>{YH~+EjsFQPdTye{5B6>Ger^ijG z$>{xHHEVoK-@fAR*PezfK8ozF<6ZycO7Xu1Hr@vQ=oe?zz6n0=n`qT)Es()3v`c-f z^E1TbZp_ik%&X^zjAun)bd6`BeM%gpX)S%q9A(pS`ZS4mD#j`7mrI`xF@5UxPjtEc z2kFzv^l1rw`X~DIJ>GXK`?l1tiih4#pR$Lq_ICR85&HCY`t%X{^mh965&CqcPkom3 z|FU-`;8j)U-rwh(3@0N&0))W~fRccrPzSWt<|KdvgoG<;rM6!ah=_`bQwMKRlK@)u z$h|~t>nHaXP^p|qZf(_8?zJswi$j2xi{c&p3MfKCh!SNw2+F59vrkS2NFdOi z=h@HBS$prb*M8SKt#|#`y8?Yy!aI;ziLc{aTTWjk(=Qd+gu`z<`Lz6=M!&o) zWsG4C@PFm~m!4R~`G;S=^w_(M#a8P5^|Q!$?+#@1ea9g0m8^Szg>RV%CdH5ZUga9- z&2gouzB|?JuBjvd#e#UX1a}cuVB)^y=D2opI z%SC_5m``MJrE^fDhyDVKz3o2U_f2Bnzoox^t_|=WVJu7z_w|Pv3*QG#i8cH>f@l6f zUkU9D8{2?>7g>Bk&viB5H0C-$pFEC?XSa^K{wm`x!PHyFP=>uVkHa+GuD$?|7~`pyg1=zZA*t!C2-8gJr0k&=&wypqMHx64@fUO&ctt-IRjlk6=S&A7# z)=7>Vv30ZCv2}kn**dXtqW3<5w{F6I?Z#IZ8ua8CY-0?*I=1Id;%Hm3t1@q%hTOb? zZcW6FZpE$~#YZ_x`Fhs#AH}AkW9t?plZ)|--p8JPfjt#HyTHY|3s=CN`mv|uT&x$x zClWil2YV{dtke>f&K;)>^lrqSMj*o-*;D2Zlnkda_v>NpfMHK(U{4*$@F?tQ75Xa) z8D56|vf9&s4P#Fm`@1Ky&UE5rM{AtTiw|biz1_8^A}43XmJ%ZrTl&*5wp47W=w)=~ z_F3pw2jltQvQK)C@)F~DvEl3BrQc!;j-Z=&!b>CJr6K%&3R|6`Y;}rmtI5Yrd%mq~ z^^i8UItAN(WlV~9u(H)dg0}h?v`2 zE5CHtRtvxMik*fZ!r5uD(edbAiMxvLvJt)VB)&^e@h zA0YWI+Vca5$Enz>;ECHCb2s4=)S|cB8+XgN&^mrL`~vGZyb?b{jl=0LV;n9RtKJxE z%bLu<7`roHL-vPs;)A&GMI!J?BJoY47-L5Vb8X4m17q!w%JmBlstnvh4faIx{wC`C zj0lZHXA`SdKGR*`6=dzBj2-eAi^)7|7CysL#yV-ll*U5uB>ZE+fXn{065F1z5dTzj zRtt7r#uhsnLnmn!&IIpozvuD2LOxycFPwb-ubS_- zy=lwYz-+$kn>-!q@edj)p z?|bueePgIIH^JkZmX+&!{=1s*KiEg{r4py_wRo5BYA48CPQ`NQfWJigK! zJw9oF%{t8oW>mF*^$eF!wVC_IjPv+*$9sJ57I}O>3fCml=XSU zB&s)Y+{gd2b|J}G@tYFzeoGy@884}K@xIyQEi>K)X2~ztCC}Oz${V@XhFttj_|P@PdBG?7#M|W-jBap@X25}OV zFH&;ct^|*>n)bxse#bFs$2xN5uOeTq%)Q=$EW}^#@g*_`{+FvfzI9h?zFPKXllL5; z9rE6(&|2OhWtEyXQR0BHU>`S-=T_?2NZr%O(UwnpD)HOp9WU?>xh5DSH`i|9SUXDd zt$|*7NeSKn-jcQCdLFa+wA0PIVi+qCBU1amDn3qUZkum~hqvXx$JzAFO3mGvOTF@* zG~ThFZeq6{C%hH?{`fGnOj$DX}cnWVvEAb zUv^qR{-)t0bw&QNxliQJ!FYCbo+n%Q_%M9(aY`=#_wydf_xL_a%FDhIJ|8^O z9dVPgo55^c@OpY``iQTlj)7MDQpTq?yGN%^7?Ylw6gwd`iQoI&<5QFJG88@_Y3S(G z{;}iL^?mLMsnIE;)wP4%^SnpPe&xS3-|yhr=c)Iv9J|uTt9sX@T%7tmSn_>f$~RGe z6u;NFFHV)5Q-V9#lyXt(oBWpRYq%cCZ@KnM%GRVrX1|22&85CaI98=!tm-TUvpyFL zyWrFpQ@0tzE_E(W8JYSpn0C2d%5@jN1>b%PWzVw5>htIwvkjK-&j1%c(w&i7$}@S? z?c{d`xOc(7OPzv^pTuvup22l9Hh#6cU$!BuJ*mSMcR`0;N88%yFfcOGmJIarNcK=U z0x#@#=Vre@!{fXETb>@KrI!8M4>aGa^t+j!r@O{rwL3EV<)YKqv7V0uI(9aCF$3Sr ztY>?+y;`=|2SaDyu{*rZCg!1Z_M?fq&eqqExzX8zIoPl3Y|#-N>1)Y#FZy~Tb5gGU zp6mql@;Y?q>KOL=>rr{pvc(ScEWKO`&m-6Zt4$Fah+TQZVpF7CY|2RZJ(e{H>CDNC z-N>MCwMb94*cB(=E$#HAi~I)UH3B-8X0q-DnO%e)5ZRV>#~CTs^?O%I?@c^$Xf3>`nk9z!=Q1Ptu2CZ}YHCg0C{$r&`)A_Gu&bEZ5E4 zAnop{H9D)A7ZiIpmDui7LtK^R8ioL_2 ziIVd*-uqj8$BEdyqx=?||2%sPD1UQSgz`6EE{gK~j=kRupELU`_?*Km`g#|8Q;NP8 zziUiXpxrg>6Ci$UdeM)Zmfm%_>d22wAeXAh)T#QB z6L)xgqmiRak*m2wbF+U~kn6h)*;>TDDNj-^{$y|Xk-tM`d)kM51^Mq;n}`pYALV-) zU1HYTJ>^FxFy=ece&i&pUXn4T=q3CM@gpxoH;EtFvvkz5ndfXn!_)U2UCI|f9lg0h z_Z!7Fi{EJO9PSEx64L$55#-oSDPwW*Ru^0ML^!fBt@DVeVo#^M? z#T=s4LB4C{Bld=W7~baeZQpSEDfotm3jgTK!KS>xT1nYE-SGcZjFFh*d}508i7m+- zZCU*SBi^`7`F_k{1>=pCV4d}N<8-ij%J)OhSmPZ=tZ@Ri#)vn9BU15>K)mrT;*D|M z+0LC;i=CR|+;Gi(u>oJh@cmY^2cX0npGm2_<|T{ohiunMjO3#vImdZFeyW0Vymz0+ zQ;QE>`z)B|H7U;8=RK}kS;HvpyOtQ2#1SirAIdzx#11u+5BDbiw%PX-S+B(>k$1hp zdyIM~u&;~}3#>dE7^j;3v@OKX)7^Rh34zZSut zKY>51Vkc!^O6;wOqh!QoDyB7I=vPxOCGK_~@wXdeFHOB5<*TU^V!xVtqaJ^|ap;89 znf#XP6S&?#WrDi)Rmy&Sy_WqFepAoJ^dhr=iR?(+P5iM9>7!LU7Zaa*ggDwJ;$RZT zlX#uP;3RLnT1P}|?w9x;QAu{pW+FlEmr$7Vl^96sNRev`;dt4t?&KTZkAc6&1Y zcsnxvHAAK?{*TD@#$ND$L{Igse-Wwti_UaTNqs=)^rnxK{c)=fyBJzK5_BC#%&R>e zCUV)4{*wJ-RLn$tQes}?@MX3T_u1GBF)z`JJ=-t%rMf?3*0Hjt)X=dT1oNc(E9u0v zdc$AoNMGmbF)ZecRR0+I+K6KrF)XvLJ`-^)@l_1nZ^W?<6}Hv=FAzhr`Xq6zzdhi3 z;5YNf$E`5yf8tRc`y|ZickYw4+5odplEMDHfpK#`?;Q34m)N}Q=PN$JYW5d?K3-EX zsb|;^d=-AlOPBE5GzS`1e{HA;`VJlWB!hJQRjK48H`c>@b-r>2~AHy%= z!^r&MzIcyce3;(wU0y^kUV?{Y-cWoPZNvoSYiweU@vrz0N%%Z67b$a&GKVO8*NCsN zkGaHy{FdvRxGr;xYF``5emOicdkr$yv+;`FojVLG`|XTJ+Vfq$VaUGjyT}~iMMFK= zqYU|v&&}RQpZ08gP0N0c{u8|>^A|$z%Jhp;C!6MA=i>XB{lrq{R&Qk8>ql-H)B>vDV8x5-9diGR~8z74vs zH+&nJ&ok>{vu~rx8d~(N%-4x;Bl@^E{F~17dSCT%WUelpk0Wc9*yBp&85+x3{6!z} zmspPZ9Px)9dSWSfFK0wV!AK5SllLh&`h$$yf31&mW$sZh$C7U?&EaU=g>DcZ$3;A4 zBzr8)<+#=n>AjX5SX11Q-s@Pm@)OoHEpofO6Ox;dQlHf6(p+_GSYN@t)lsZF_|uaW*^AvW#>_}vd?ol1+O}sy+G~GaW-8CTD0Ppf z{a*aiQfQRRJA~&n)^Kca74HZ~w=p{1F6lPiG(|Uwft?+?IYQ_*vX|&qhn(g>QzPE= zDE(3cFEO7~eJAH8*SPz25T{cWto(*_tB^5 zv#qSbswKuVo4!)>;ryP@x;o*>+04h9`>yjgIC{7ZT1XrEzTAy2Scc3!hwMe~c&=yV z4*4?6osvcR`ME1cod?oNYlo%4Jcxf2=FGtzpL?`@TlSN8#~1wa(Utk+ac-8pGzVBq+{80E*f0%!^*C$46SRc7 z0`g=1X`myuv&9DW+_yM_-;#Zb-xBN(zGW+$Qu4^k z9Dv}E=_~Saihq(wPD0v43~I(jaVoEyGtrYh8CuTOeSuQ`&l6h%zeTYAfq8@KV*2A9 z`r{zETqR#A>oaSd|5TAZ>Src@0Y4(*^|aS6dcssj-$XdI-<#_n#XaVCkn8ldB*www zV~CA<*WzQy_hz2f_rgov{$9sf``)uFn{D;>DamH$XeF0QKH-V8C!1aQmQG}|3*U0a zWD{8uxm2=)AJ{F~6gkvo)9h!okxlLq*)+f7tjcEC@9lzY_QLnFW?@_7SVvjINM~7l zYlcp~5y*QX`AZJ5j$3r}3+U+287tm4{MmvVhCfqq1Y0;}`09eV;j0P`G`U)Hut}Xb89Q&gf zi^&)-iX)msbL8xhwdAb-ZgyY?2crLfN1gX`458kuToKiotZ8vaUZ?6Tx$uR8+zV?8 zc9REU1a*y|ZkMU?^r`V1BBeYr%=h^CiM{OnN)#`s|?FP=f)&H*A#jK@bEQH;Q_5P9f+=*?;V|`G` ziRdeg`oq<^nES+sm`%$Yl7~Cmc+`hkY`)lQMGP1*oOw}tMpw=Zs47D_-pYdOahpLY7I<)11y30S+@y~ z5YPJmO6r&U8_1?r2XzT4;zvG3^)&&iB`EIq+}`g8e98waB;Nr!W1hxGxs-Kn z;wSF@x#YId{2#g;zCF>J|Ant=MakfuGr6apb@SE4Dwaej>NRQ0=ujHt#`WDanx31LI z*GK2Bf9q<_gLBv4=Hfhzb2R6)-1SSGoJVqw;ygNc{q^J(s2|IjyaM%?=B}@o>tE;0 z^@ahQf8y~UpXc#6lc!-d-YW@l(c^~5U% zOfP;l>mSo*&Z{bZ)z9xSe<^--|6s@ZtOp-j{vXa5?;Ygr*gwp%e*bsKzf9hPPsdL6 zP33%uqXBx}&UxIQAIi#JQ<2q_=2+h_lA}0Ke)%g8WqpP6Pe)TWmZLOKKE6ROzm&4C zJJy%xc(Q(Wu_vp+*0jITXyq5Jp z>5i9c*F^~}MnuW#fm*R6f{sb~EX zx&Aq4xo+*#qn`EG%k?if%XLd1SHNHL&bQau7(JZ;=;A_~b3} z;z%8Dkrzk!(S8pbldgWm()RyjkV~DIPw_9l7lve{INTBL|-AX zfZHZ$+x9;{%_lOR#s7C;m#1=WLhk32vu!^Ae_VVWI@;quG|7XW_MoQ&>qUS2ZuAvN z*LbqZFZX2SXM3{7T;$0Tna%i5_QgO)Z@tWswfPcf*6Vj`)$hQgKa;kwE_p4sRCI2` z*j(R!^3lFeI~w`DHZjh71Gb?CeZ49%)*FxQmbJ@M9Ruo$$<;4-2eB3Ve@-5>leyb# z$qm2yVb81ofqo`OYIVN}=>KSse>nE$#9ae?3Do=EaA(#VnXatEtm$rmzG5R*L5q0u z$k$@8B!Bg%V$ZP^vL8kxW9jdsFPqVo$N9ejAMcy!%gp=JX1dXrpWp{wiVfa}&Ya_7 z?Ju;9So2Vp%U6+g|IMDPwHsHyx|(Zl^xC(->&dEMj4u0e6_bZ79X~4WeoxlFlzFm> zT>~1yz7*BnuVsC3rz4Ae294?XT>mx8nf2e_b!8<_*0O$(s%0&{NXwd(sb!_|E$>qH zr%?{|tmGUr?v?xG9@&@iJ?zsPnSpy`-v;BJ?_aBBO&+CXN$w%zod#{>UGg4@AH6%- z$va$G1F2_GmZs?T@z?1qXm{eSnZ6f!-nd8b4l(e}RQ#8OVE;3&=erA$yaR!5`w6$-6grw5*j{Kecz! z8*WF|?kQT9)Vns8{DknpeE7k@jb;;@sKsXf>YZ$_C)blDc{{5f_q_TK*P>^1XHR zcVJxbZG693_<%Rz3w{ed^G*D$A|F1YTJN2vr8OqN`^B7PU&UgH`SAUb6O?bxn@430dGQV#9i*#&v;mKJD-<`ihh?tW+u&u!qFGq~4$-(q?0-8luN@7_|7 zaj29!?tVy1pPj0$o1H4pRN-?+MtDBkf=$nOAn|Q7r$BNx6-*T?a<^p&&qz# zdH9v-54_%{yo@#*?Qen~Gq^5oGS_=LWzuemi%Gi^y^yg7 z%`b>f;4I^o^axLxwxJ|7gG0-loob8)Zs5DdI0Ak(`Q^*xdY;^uIVUwey(Bd~za&-s zG&kcYhfW{q%d+G^UuMghcVv*?Vjp8)gHDW3*|R00#!SbhF7ize|6v;Yd2Zt6&1hjU4xJzzgMz-kn<>vE!cdDLd{f&!evw zADUZ`cj%UaTvMNyu3G%`;&jG0+Csn3Xe0B$w=xFV#P{ygqP!o*dusP2=hhyaoLBo< z{-oOOzW4db?($oQGZssqm->db=;`Mp+~tC$H-7(w>ur8puFtbvpJ%y#o#pygVyJeS7zPNV(8lM+)9K9{pxv&w*(_s24(4afht!cRM+6M3*|S1v$`c7wt}h-;<(J$|oQP6FAR#d}sb1 zj>((vF1TfLbHS9&_Y}o_?@y|Zs$)D_Tmb*63DHyr=mV##=SeY;PWk5@KYK-*lV(2M4!)TlI z=bQ)r)XuYyB-JCd=jamc>4Q+(-=ow1 zU^}$GN2mS4zf0O@ql-oFNL*LwD{);z7E9>I^oWgDmk^7~;5?i22+ngjkK|m+S;?B= zr-(nK-!J8>O*|@;i$EMRqGU-&;_%`97uZO0d@@Tzi?{ zvo{|scxCfv1@dkce}4D-$|uWs_Xo_uSfA~UYl@C0JJ-=W%x&*odN)Jw&!P9-(EHx*(|Z>5hVG|C zZ_~3OwCI6rozmO%4l}*AFn<}tGs?B3^3-T}{$j=VTJ!Xa#nx-hnw;Uc-Ra=@@N+A~ z{u8$ywb*l_GX2)nN_4>D4fxdQx1{bz#zv&v!o1#`R2Rpt%+k~<>XW&Vg(fYm%zwu1JuU^yqt7$pEKlo37c(SEs z_)j^0#_@BG|BAgl_1FbZ7XLTra&fq{t3lEg=ZL2v}(buy%$>GsE zmp>-j&Ix^Ls`;QSVICW42kctG=&YVKgaUIPuUkI*i=jri!J z!XMQYWe$AMhux*A8OYmW{@3_l%e*DkQI%6h-RfE4#c@1qJU52tr}2E=(u%S?>_(}3 zHtQ5hQ=L_5Cnga~4wm8DI@t?C>XCg=g}-J~mtbqO2zNmSM;?c?Rbq0b{6C8P4s$sk z`JkkHu_uD{FSE z9w&1JdMvJr>taLAvA8CwQ{wmfeO>uq=?v&3He#s}du!)CAA;*@dBeG_ezaD$|0|BN z0~4KPpX9j84qg{g_E{mht#69*9Vuo%sd;YS@w;MtCuWixm~XE|=ZkEYaW-VCkr=+{ z3$vb)Se?))gRyo4_G3SL)i-f`LYzh%Xb`IFE=PWog7_IMKG@Nd$G-{QC`?W*#t zzFwL-G1^^lKff0bA632>9#MMS<0{YNUGc>IRyqQDBzx)3{6dcOsw)4lww4uqy0yIE zuN+B#DJ%HZ(v|rWxHgl+!}XVl^L@Vc-hy{IlK*l~!Ao3cZcXWtc!>=_^HC4(%%8&X z2s|_ezhv&_TMO1a7`RXRQFH>a-R4~8I&9gMaMu{H=+Vr}mO$+mTs$9_kf2l1E zLf8bKy z6POv{!CKD3gF^psJh&4cbOd=Yl6muazu3?5DaS#M4>=58k{I$d_(b?A7hbu2OIg9A zTRthcV@r9#WBmUQlwZn`!L_e!y|18f>!E`2Tfbj0gZ~v>kh`q+?kbr2-rWUR|9$sE zQ#nRNy36w-;a6RUkJ_?x`b7`#Ot=>QN=hAHp7gc3sgcC0MK8VSi7HV*Gh!+-hkpTwF2{MYgKm0|N*-@mED_n)o%6dz*qLh03U zd*!*#=(U?XPG_1|MNbN^W_M9vw(@G%^kqO-&PlxjUHKz)rRd92^kYD0&PiR^M|7p| zs^=7RWp}=>FYC$KXF*S9>w2rCsz z>AF5l=tLii@7P^kD88fUK*N8$5}j!F9s7#kc&fUvJKtyP!nWg<1YH+yvFJkU9Jlz3 zwjNAtwDn-`j$x+j{3v|c(KuT4U{!GZ(tV%M(1kLVHuT{l#?pP!N8F8%H2E}r#O{7? zU)Gbm&w_p4qVwfaoi7cau{}N6YM(nEced{@+T-eu#{c2`OKY5{63l2OEJa{~~@C4QbCi+e;a#bHDZg|8MRda&a;hvGK1N>*FFOzk#F|3P~ zwX;W=8(q(OjQW;EH)gW-!M)}CPY~Di?`0qR7W8L2ah+qMS;snHu-APur+R=~|CV-8 z=2G2Nl>fiUtZyA$BkNi}WL?Eat`xOaR@So~qmIw1OV+I&xyj+%#n@!`O{~4UpyRqe zbPcDTfx&tX7e@G2vX-N92xBZ~a$P-p`^JLNuURzY*dW#$*0X-&$VAP5bb#iU7K=xiA6l`0YdgXgr+wo`C0IKh#o+F=L+hJYujr*XveCJWtgr?D7jpV>|gW-+g zkh~YOK7C+&e&d4ejnKY{wMQ>p9=D=tFmpcr-|QMvvy0;s*7U^C z7t7d3kaZo^vJN?m!%dE)6jwaCF_IhK`K+aAWMZPX${AaiBWq=$(XSSL``8-xMS6ZZ zdk{uyFMk4Wxp@DV)Xo0$HGApngRFP_kQ`h$&&u`v$9Fxx*NSp|UturdebY7H@ozi$ zKh^ui|IvIWzA67lsq!!WkNltHJ^pR>b@6$}7E}JWMOyVt z=CLZY*hUv~03&(kV(J)Aouhb%(DEqjjy_?Hi+rE7V-GU4*VVs9_H{A;PKrGIWo=uV4f^C`kv{1s(MN;moBgBnd{?87GXLNG+uaWG z2@KQxnGFlJ4};(9(Xlrm-wiKB`QqV^9qgT1A3ez@GAir78_*36tcOznqr>XqtDR0q zV=Q~GvnIW{LF5SilJx6GpU%_<)umCE%>SZW;sjH``(vUiZ`>dK;BR8a#H@(jzhL_` z-upN*xfGdf%7MRJ-#)e*KK zj$63)XX^c&=c1|SFu#Atk-@c#IX~&pCdzvMJ|yTlui?!6qkm+w=Kme?`!VOTM9uf3K@MLrZKy(LRzjBz(87nzwUIOQ z^4p(0uzmkn_HAWec0W2^)@2865qura7Cj5y#1?%FF9vk=V5O_;kq5V9P~AZKYI;*k zQRDnP-&+q2UD2T1nJnxx3(WkFk*DH%c&)U*+xrpxFbmsMKW~aJ(vjlLM9%ByiLDtJ zVgvWMh6dl4j!v+CkJ!)A)SFvRQ+ zwb4Oh|628s$seK)S?k)yA9`Q(PseqI_(Ok*RQ{0c6LEzrxo$r;`FixvTKu8w=MGxo zb|lv|V4E+ezppoR(mt1O7CI@D{nXu&$#t0ttgXbRXZ266%gWWNZ_bEWA^y{a$U{g! z!I!U3mwAo>_^ybv;U;@-o8n`SD@+5_?d^$c{`m$8ut%kT`7K4J-W}0 zp7Z`@$=O zyGi(ZPzYa3K9@bT?+f_yIN$cM&gUmwsR6k%`)SDS6xVR?ao4aK#pj*WDgL37QD}7- zSr>k9L{7fMzskX{8ms$NwIr|j7 zyvvoObn||Erz9=KD|>vukr1!+bFr?UBhXpbM`_A;5Z`Gx{z?>n!UiylZ@SrMAl`HG zCGwE|BRXwPl;`9E_VOrA^ql;hvRlw=B~jYRd9j**cA|E2rn*n#K8^b{?$b_=LI-Ld zu!|Ae$*b^ZJn}z2%_#KqPq>!O?@F$%;93d)i?1a!%#gb7-_v)(cCU`*GgOx5@u&@C&2kBfTHT1`jon4PwS3-E_4Ju5~}BV z!RJEfn0>C@=%HQsVTRw;FbsbM`!EE5>L|P~Ig!3(%qaS91NwOve4IJJ?R^sz~F^v58 z=)K|eq4-@_nEfvNMDe?Z()U-1UE`S=d`jhiVfT*UYkrB&7eD=9CgF$S%lWXa;(Lkj z*?_H5ei--GKYy)nr7NcKOZ3AL>}(Cbp7O`gQ{s>H!v~Z4-HzdHe6knVZ%2Hx2HG)( zb~Hxz^Ug=s4S(zt-mx29@(l02Yd}A*o3@CIZ*>es24dm${)}w`GCmdmZiY70I{_bu zeGuwg$oE*VXj9?`d9Q`1uY-3zBFmgd;e%Za?rbzNC$cDVD10k&xEy(qTtF`n3l$kC z#UBHMTrFd%j~Po{hb(*!AMZ-`oIIRD`-Y<@;Nhc`eMp&~vX3Ymt?q+&k~QwrxKBH| zPu&N9QRb)YBg#hOYkjW2e;4mRs_Tedv}-gnbXb?6*|blti|o*zZGP@sgFe{Av737< z;J-`I5qZ3K68HR>cRkPfQtHp3OnkOIl>L;m^f&(6#1!cH5qYGrXje6ux z_zVB8?vL2O`TOzl*wcMU#jq7IE_f9i_?UCJ^6mZ+Td({&-Wl}k)cZpHx{r|2J@BC6 zOX|Mei^13g?QekZgT7q{^$6XiE|E*4{nGZ#Q2p0h|NkRq#JBB6&O`NIpZ4?Ge#7bc zc_DS31wXH~4SJvV5kIfBT@okjJwLDYJ&{xX?)!5D&fjgg#o7cDfJO^_N4srlHiTSyR{Y4N1jAULRin&ck_2>HB=27Mq4inEc z<~5Ju`yV2XcO1X`W@3XY@!3ll$FA>3Tm-*4FlQ-#_&~;55yZP&$FGO^U*^B+SU0dX z&f`ysX3lSfR=tBb)DXsTyC174Q*|^=_NTjVXRoK*QZ+CkdHkQj|B}ZkkViPV#=%(2 z$zjYN82+)5KbX3=9VIqOP>dBY6a_yfy`AN(PCoOxcaQuw73;Qp7yKq9%TZK-&Jo%}&3NM@WcHzLM-Y&d6 z<*mXy^yj`s`41_-iSi#&ey3i3BlX-%Ju|82KI)0k>q%MmPT`d^>I#P}t1G-J>7BwT zz5Hg%e@yvzC|^K%xn9rkr`{>NdRATGg-_KLUX$`pq2_$p|2@hcqO6#*3Mm`$uzy+7 z=E4_eY$>cx+EVyy{x`nsKl%SN%C}SgIsY5q`DDuG!k1@lDg0^5mcm!~-}ufQ{C|Y< z_bETh|HgMdm9(|+)fs;&{J%+mDO}J0CA%yA|HJt>W&car7gFY^^8ZW9*233j{iX0< zQ~px;d;b3$$Cv!x$?q2aPl6VTzDWW42I)n)q8H_6dL=_sMfa2d-3@wui*iMG%7yL* zy;7i^qHj`wzCy2?DO0qiOlT|g8V=nQT~h*d{T{y+P5CV}wdom<3!9z+I^1aU=PAiM zvG(S&v8*f8^MTm%AK)ijp9bh)+b7#T+4jEc{u6mwn-9Flo^)aIE7|(h)~~kz&-lvr z|NHcK%4+wm<53x3TF0UxJ`>|j>ln1N@#lPpmK;uZs&WVUxyRUX0Xr^W#|0!FX2)61 z?YPz1(zE@+f=SMz{&^*-U*#CUaT&)zjw?6@b6m+WgyU-*!#J+yxRB!;9BCZalPfO) z9I{zo8*xn|j%oFGjX0(?ZfL|Yt#KT&Ggcm#n4vZ9WsM8&T<( zIQAXRdB5;-EX?IM@m=kLcM8*QtSfX}P*<42|CLL%{Ktt!FXUPtzvpmm64y%kf1!Tu z8J>NP>x(ISj_VI|eU1Fj^;(|&CD%6bdoS1Cy)Udo>1`ommb!~c7@{wdGymG|;{FW27W+CKST zzm^3pawoTtuSKiLi6kHBH`wRdQSn(qn#R7%^M29HkxYMI&XK}#6~}OnYdC`OdWrLc zT{rsQ=+AI`VC{cve_H#`t~X%sY&!HV9n|=}p`}Q4TQ+mvy+v<%kIZiyI;V-f%~y%+ zqYunFXB9G9%UoGlox(Ns9P(_|8Eu~9npr1^E)qSKgD>49GX6+OLK5rv(DlX49ToRa zdb^mpxng9zn7N_ifhlhnGbdM!j2AQaQ_R?}n7OrLWW1O;p5hBr>WY~=D-NNxU2kCS z)22g@(xFos?;(1(E8`EBBp~C($apb+MKOLw@v4W@6O@c|4Zoonzo8hvp%}lR_&NRB zDtT@I&&}jHwu;0rX!{($k?~^eeld1mJ@D{hw2xI^M&G7y44a{Q>@%=mU4dl5^;`dv|8=J)bI~=l)d2{@3 zlixow;P*?u9;@Hq#?L1gyHy^o@@yI1UHXLOnJ&_|U7k4we@6Kw^$V0=qWlrYAxpD$ zKZLc6R$Sn8_INd7RfZp8=B--x3o&9)x6z*Y_&wd#*Y}zsX5+ zs_&P2F6p9v>&U(a?c{-0JDHslu#-*fb!pg1Ge4HGe~7i+R{MCqUswG|&OwQzl!<+^ z$fcDY>i#bFmtk|dME@|lE!dv(^mHy11f`;-is(GKQZFwtpWFaHkLLU89`p?{xtP5Fhh0x{7b_MGyT zw4Cx{%G==2!I)QrH4nj`P`;h=5d1k9^Phq>55b>P{yya)_;YaP{};S@8~izC|4UgL z{5e?je+6?M#GmtfC%=REb7*DIz777Iax49m9}V4&{%C_gr`+g^Hu!UBYS21}Kc~!~ zaS(qF9Szzl{5ii3nkxLcP0wmIZ)4N5TKP>je-<9^ro6X{Key$-T4AZ6rL9j{A7$I8 z!XMlANv;30?LF)FI`E&AAKw}N+}5wQeq~-j*3~NfnXO-K{c5bEmi4@SU_52zONBo- zjYos{bJJKf#Ah<_=O&+2;mcXzT-zFPdCNI6h7TFhEw?F4&saA$J_Yy0CrvB)AL&A-+B$7ZsG%L|6Ajn*8Vg6 z0fSGB`Dd$Lvg-}Zsn~QlH9DyAeOK}6r@_zip7Ro)4mO>+rB*x**mUNOTJh;%)0rD; z#ixT!XAY=ye7aq40RL~(;k4<{sf?TF@K1x@?aFwVIsY?)Pj{SUe7Z?rTKzn$y;Ase zlh0?^f5Yw@ex%j@+w}(Qo=t~SqeD2|(9?X^u5LKj@#!Y{w#u_reywtA-`{qAPdByKN*}AfKi99Te(VgNZlZ^}zqj$}CfyjM zZy4Qou3ryJ|1jU%F8#yk_CAMC@9F&K!k5d?YcnkQ&t=~JDRAktEcwr6oJ#qRC=cSP z!MWc{Ju3e>^|Z-qEOK{~n@8n87pyd8 z6;eh%?;u_ptjVuuSn{6>{)FRm%8hYMaK4{8<5y-`@}J8*GRINMjd*)-9+`RL^)pQQ z&t)E&;|nP>@}J8*GRN;{>G{w3U*$jNcTmqk8=-HIUP(H=D8J7{ui&@<8VlV+@aL2Z z-3@vL=MkW#(6`Mz9c4n>Hu!VsD0B_t&-pDhwdolckJ$7K&>`qA+Wgrqc@K*}x8=X^ zRrIFXPr}xxwtW)YZ`-G8+urN=^DzFC^3%J5KezR(tzXUfbKC#7^(*;|`-*-I;vRLr zw2nvH;Lr83Xvi4Uj6c`Mph5h382x!#`Ond*c3i-Y3%m%=s(t3{I7?uk2|LbGIJ1w& z1&Rt-QP9l?W}grh+|sgIBoFddMvRGzFdzZ2Jz+k7%hlT4ii5VKi$TcoABjl zBLBJAXPpnM{cnw9TKmty4j6o5#L2DkYP;S*+}@_csnJ1=-@Ah^KSh3)_nf!*a^`WF zI|}4K7ks(oKcA)NKNoztFArm1gFMhhdu-pY;LF4Gi`3I6@#T7Z zt@N?l!*l(*>c_6&%XNCF`_BZvT-S|3`i9Yc=lb=q^bhmB?b1JtZtrvW@}ACrK7+A? zk^da5v!U~npCUKhEK~k-aLqyHO`XB~=O+BL%6~q?l>eN$ zIs-#}^Rjmef6TRD{&TJwIBJDEpJmE_ZpNNd9?XAk#-Gp7^PgMiMJZSL&#n0LS$h6+ zEB>5vmH*s|KcAuJ>owygDO35+&G_?Kdj4|*f6i}}|J=ZzLo1d496AMQm=vI)%71R6 zpBgtnca{Gfnj5qW?w`iB5d1kbRr$}&^P-fg{O3{zUSG{WKu4AT+`ymnTjf7D@aHx? z1NqNwdIs{J!?!knc2C~h#h=^qAIM{9>(fAfUfVt?Kftz6IzH02_ksOm+VP*1AKw}N z+}5wQel_Qbwe_p5U(NZ?`+$B`xJVOUD*U-=JQ~EGo5rFFe{S}f4E(ui465+w9gIIu zEB`rq)s7pG|J>A1M(jl8KR5N25jP3xGb8`Gsn3Gz);s7o6+gA@nr+u?yVf?Z!fD2} z&XyiNqxf`_-Z1o)HLhsrD>ZM({YIQ8h)>66s=XG8K?d>Zrns2Grf?0N&ZESnCeMh7*%?E%vr;yo8-Z2AA@+*_VDTUbJg9of2T@M#iL!tr+3gVQcs`5r<>YqrH|Dfp6l0D zKX!&sH_=1g-`n_flWq*sH;nE(*RO}Af0*xWm;PaN`|04*Z#!18Jts+Pu5)WeSts(g z-*NE4?TIJymd}rHdFMAhu>H>Z1>4IGJg|Lhq*mRp`N8cjO-q*jIj2W!MQJ=27oin3 zyAo>-x|}syl&992m|Gj^Oz=LJ?5X|b@Z8!pT7vh*$)4KZkG5Q^_4EFj#CfZYM0j$ zXczoa?TPTz=Ec(n{TuT52Kf%IZA;_b@hvSyC7SyRPXu*SM+tR$`0h%r@)}L6ym5o8 zc*l{O{@^=Q=K}R{>p}O5N8@9>V-Hk3aU1_Hp}j(r`7v5i95lHTp2>!1 zYH33`w7CnOxf}jT`s9J_)zCU}&yTlX3-3hmtrMIP)$?h~Qus&sZY=!sxvPK8Ay-1p zK4`FuZ}^aJIIJalpBn@ZrRLVY;!O0ucr83s5cvP)Z^1*~%dIVmy1I5w;x)Cm4Ejdx zt*JS+cVDa3-do_P{r zzB9`EQx0iEE9>NwiWK4l zZQDwnr)KLsCGVK6^OWWY@YJEgT;GwKUSBJ`)#7q)J8_rOcYL19cdR&q@<@0s%6D=> zwD05_j_Sj%=$g-64tPPUsg3ja-RUK%xlvkCCT9od6s__<*ZV8BFZnWW`HCpd6T;KO z|Ge_)NB8}Bd&5YF??<0Lu>EI8E1r1!vj?}k9fRuXM{B<0u7NeecPY_Y^?mR_c7j&4 z%QduS-Ms(UZUIgE{5D{-Ct{H=LJqPTtX6@Pi%q7W|dp&HVn1^Pe~mqWtIY-dpf; z;>!Fa-jRW=Kj@0D$#KLs!Vg7BQCc-TSOyQ4EpwNo{@Oh|bq(jmoRhwGPwL{#xv78V zcQe2Dao!}?ue>L9Q|7$XgZ;I#kB2zQJ|*8P^=wPyd)A$7DY^_fkvbpY{^w2gZMt%v zs&6mPZGcvyi%u@M4&JyH9+?8KOonGB`A*!G=Yxl;g_kt_ujLKrHu|}2|5qGk2PQhp zKFM*F9lS20?6X36>!v8*k>Y6Iv3YLa@w;MtCuVAXbe+G1qrZ0RFRtL&gzPoCB5L-- zcT-R1EiZOx%?V-FR~>>s54v164e)0c_vXmGzsC-yyWR_TNZrf#{&@R*#{h4VbAb1o zT5{dmFDETeqd&)1X+^;?vOdm9TE;qf@xzu8FJldm5mz_WPioHv4_ZyzVmAU?qjFy;A#qP;Om!rS|(sVXcs{--oOXdWyX%q|DgwL)Hf+ zvEN6MWxo%>wNd^#<;J)rh;K__zmF8lejkFHqWmc3#{6;+H2Q*jt)S+#g^;RkD_W=#nejh?pehW?2ejheHTi09J^tAaC89&}lc^8}A zG&a1Bw)H_E&TZ@003Cw(Kigi}_R7dlFY5zr``@lV#rUWBKv&jZ+5T4HtJp5Jzmo00 z+T#IxJfQL`^_B6J)$Uuzr)}0>>0{FnpUJ%bN*|L3*I$JhkDk{4AB^wqIDs7}c+rj% z*l_}V9as=Y(MRK0XG_oa#QLhWv7XNon1?XOdPCy3#12*bR`+*<@mpeql1s}NM;LKU zYk##_U!}(u+pMqB<6yz{Rs3E+Ofk4VCrmt1{B&7gHO4vb7j+y9b1VJY6YmtJ-&j}Z zc%rT_#8uJ|Ue$mX4 zoL7>1IY$b|RUE@PuHjHI`~3^HOT6BQ-3R0L#Qe(!wZ`p@el+6R)_9WDzp=)Lt^H>3 zlU;8h9%Iv?cj%zT?X0U3otEwDJ${z=$hs;+?=&&TXw^HbkYQPG(XMWx+^Ro<_gi#? z=pxZyPsVE1ISxp~?A1V6&AF{qm_WwxI z_y3UfRkHs_N+2O+f=v3Bwh@ZpAdNDpmF+N5yK1T7Xhtm^uSue(q zD8`Q{#*Zj|PA^B+i}4kT@fC{k6+sXQ-hYEIQ( zHT$1t9y-K$^Dsvn9%|3uH1kjw?Q-xN?fcvH!@EnLusqa7`pWa|@KBfiprBs^j#~U0 z<b$@mCB#M)4bb7S3;(-miVXCFsAf=3qX4mZ8tO>c0fn zYlK_BrN_stevkcJ0GHTPbWyy_`Ykoi2iJS5^p$#K{Z?msF`Rui+uH;CTkY-iApv{4 z6uoD}<=XIUdv=z!D%S6?+RgL*y6RWfb4kCJiJh~^sg)k;{x0;N*?yIU>y!5Embp*D z=*eIk&(rH+`y|ZwhUt?KJ8?SeyLtm3pWO|7eC=x;+ZU1bpW=rbn0RkxJ3iYo8xw!T z#>Cs0czur7I)`MhF|gNcbY~5WVB)9in0Tw7V6TU<*B5;Tp2%LWWv|ytvhnZ!&hVSc zhqv+X!F8&^y!ZC{%|aWOsoA(p8<$!5S_kW~ZT!2v?ojd&*kf3G4BMSC?72E_wejzP zJQ@bB*E-K-+&7UBtxy6kM%}(}%;w z8aZq%Yapzc7i;{|#=r-1#n^N>vvg2n_wy73uk=AI`aqvQ7rmf#f{u+}+EtkcU)!O~ zhnd?%=8IYHWMkoNI<$+)!Uhyyc^1}2bSm>b#Lx1ccGkUw%f9->U;kC!qde>^#NRsQ%Hmi+OQD?B3Q#<)x6=L2K(Q;t_=S@OqIrt-&AZp<^O zTz%x^Q~5V%nDWO{rsf@`%*Y>4u0EANewLm;p5H2eJinFx6#B~gHRuwgSCURI%I`DL zOZo9acaF=UZ=3w_LU)cIQEt#n&5a0sIj(|^!Fh3^Eyn^WgI;Q`K}P4RsQ%{dj5EF$E*DDGxYrN z-loH8)1gyYH_zvv2ED_`ddU2LPh#fJ>HP5~y=wIXt#-}oryG8P;U5@&rqxfd>kZgP zn+~T&hj6;1r}-|7?g;S{!sVw8=Rfp`{P89^x5~9uj;-=*-yfJaF!-%A`Y3+w3^(6F zf5`LgV4XwwFdV;`^Y8X({`d~;a*&6*XvgjQJN7HX@=zD)8-|Cv><67Q`QuG;WVNqB zY;1eDd;7We^YQlrT@){O6+hp>`=y>fpFiHzek=c3?Z&x&UG-~cxOo#j)cv8@>=3(V zwqHGxKi*`wf;11KN6+=^Vf!S^_lD_{5Ib=?c=_JI$Dft_@xsf?0%I6stZUD8+nCMy z>GH~9(n zdKmJ@+v|($^;#xug}q*@6}Muqf7@F4rD;8(^5Jd#d)xf+wr^#x-!$)aWWuf4>tXxm z`eN%C$~tzk@$VkB9_;32?*!IvEDNmVHr9RH>*U+wTWvp~t)F0xKilK1zt3^j+0wH% z{{8&Mzgu%t8F?TzXQf{oQR%PLD*ciZ@{Hw=H^r2!7#14?A6T1b)8XvWL5A^mNe-E*@Mn3Wjv}%dX^;rIR6JJ{CZS_;m_3NsC zdkF_0V!w?3I-UISCc71+eHc4+u3rz^H(|b4-5^Z@h8t34Ht^S9D#Y zE23t<%TY7+WZv>(_D8|(MJEY#=z9unRv$s+!3i>OCue|K7!tpb7d^~x~jWLPB$IsGp!I6K+z&Wd2aFnZD zaFhqQOo4si#fO_!)Z6_#{itc*<4Ic*@(%h0oG+#;1_)-1)Fy<&39H<&2jy zBWL^!J!gE9C1*V4DrY?9##l>X5@zZ6;}lEIc*<4Ic*>3MRCw+gdd_%o+=5|Hcy7x6 z9`>p>_(aV)+Vm{^vG8gVxD9wUi0KqQCFlM%@TAJYFFeZc3Vy2`{5F3Ea`21%L%XYl zcF42JKX#tdGXx(mx)q;l0y^=F0N3h0cRG#q; zVqM~=pU?T>t#faJkGEpuS5I!;zd1WcE6Q@g{xNU4EALw1blk z!M%mcw<+b_%?}^c1z<>$oCCZs>Dc%jho|S{-8{!RL(dUUK6B_(jGYbO;`twWFTQey zo+F}vz__-i6U=k62ITYZBI@B=R8 zFnokT{6Ep~4Zw@6{c_UsG{FVn8<^+$4f!_w1H&J*`UzG)(5hz@F5Ki3+VuwJ*=#!W zE*-+@jh^P`FnS}zPv~)cd@Onc|3K*t9djyJz5HFRV};sLfw4mN1+DtP&;x30V3L2y zscPj(tE^jP+rD4NHzfsPnWsx1#j~AZ<2&$a<@wXi5g&pr)OkyMP%|GLa?Sh2VU7;@ zsEf8a$Vc}5#b7x?eDUtmCoCUzk-lO0sLQ^P;2?v1WcW4)E*_mTvwnf{ZY{U~g-&r3cZC&4zw7@oJeHJNs}+!hB@J z@I|b4_I$st`nEG{ys1ys{h|2l5Fbl�UGzh@t3xCHAW+u8aK=Mo$L&22eVer>Pq`ny>>W6OKb%DeLYZTx(2 zO{?0w$Aqyp@ErYS$YWC)#xA9Xf>55j{=sFghXxyVaBUc){fz zFyY{{bsT(C|5p5$A@hPI4a&Oi|F`i6Y+QV`Ri{~Xm{n)>^!Q6eE@PH(eo3tf2Oq?$+W7ZC?%?hjpu5HjBF3A}r+<$rGp*qo*0r|2;M`yaw%Cj~>P)?l$d z-N3Ya!(rOdyAx6;l=o*nhvX?X#>pxNJUNG7Nxi&$AnU^=C$SMvR(asLb~JTj`Au9C z96%5c3r_xC(dDU^md~V4bB^PbW$zSTnM6!A?W*!2%vDQHXk-4+*dKjC+Skj!!!^mZ zZOq9Ud!+w9?V9oniE~TN?ch9iN?l<~T26T}<&t|lm>)ig{P5(54Z#pnzMb-r++``` zhX)_@iZwqx7<7$jU0Xa-pTJ4 zek(s5S{bwt(vW@-8d~Y6{Cena^oK#a;NEU}Ul?>#@qFlQ&^n0Grp%ymFrPVeG-#{x zne*GAslvS5^bF)Px9M4}VrDjf79Q`Wyocn6w`D(&zZg2&x^#2lXSQuB{IPAD@{<;7 z)wZqQTKHNAzLWCxJHxlz`qkF2=KS!sezogBO?;{F?WVD45Z`VZhpIIz zW}nH(4{!2EI~#l6=g^XkwU+IkiC0Z$eP(d3!@Or4dex2@Y%YA+)K5m-M6HW4^_3Ab z3F$KfPj4FS;X$+^ZvmL}1#gG5H&mW)Hnxm>$^T(U|-`eliezf+R;RhJ}Vc?LgF>Jfu zz&wgghf|}28rOFf1AiL)EbnOt8ykXq)3LFI&)ry8`0x{Tg=;Q&r|`L@TE3L``TX&+ zXE-_JgE=cx$RAG*`5^W=iTv^8kY^q)kUu_!{PECmaX zo9FsZgWh4}Jp>!jlUR7cslL*c{P9{J%O7vjt5!eIYS*lD&xW61_y>lcY4sEAdIR>+ zro*YxA)MamX}$}iH$wb`p2Wv1y>X`U$D3r`D%)0Bw#u-5f7`VmozX|}Y-c$64*El$ z@3Z;iJFv|`KI)<^x9{)Rw+zciU8HXqKI*bB)aUcZn`FppUxS$0_Hg(1bHSYFUG>{e zm7a=+yNaLhpkJh(b0UAd=^Lyxv)YYw{krPg&T#Z5dZ_zDaoZt2RUM-DTD1K@6;opKp_+_e|48+2=ljpYPTD^Cn)j z(%5P@&-Lr7PkRYB-(&gbO?D|r?=W`gT)!T+Kf-*kx<7OsL&$jdbnx;miz2o)jEpNg zaaX+W_`C$)vEoGEk(>JY4izT(PA=&0J2@$$`j9KKX3y6(|Feryj(vEk=AU+~V*7t_ zy!B1(MYkiOI_5;)_J(MOZ`ZI0-}VC!Y|n|;nj42{{(aG!fBI!wQ5M(tYnpfe0L{PO z6<4=EP4mY)Jbri5L#YXxmb~O2A6?%#%;UfPLHCONU-$T@Cd7F6^S+o0_lhSIW4sN6 zJ$}&%yGDBa318KU9{a(g>#vX0wz(bJV`=QqcKwjNA2$rv{CA{iMMnpC{Ao#A^Zb)7 zMfI-OY3>I-%jLfPV>Q2f4%{;!w(h{r>wFDg3ACl`IQ3uZ@jD%Z>xv_^=7z6m{`pR=Xvvf* z{=Lsn_NDXQD!!}Xc*XWH4{9r3psvz{SZ~&Y4)u+bdADPac9(YH+JcW=!)r80&JLlc zYXul2cC`gHB8j}nS6Ks@s^44TIGK>{Ks{^Y~JDX%R3TR z?EclYzB_owSo)@%->Z0MzxYJn>0VL5`-Hxqay}TBP#5!H$@cz2n+GeFuXUvytEXS; zuhabX>>c6G)wVflr|a%Vy_xi9gQN0^Q4V+A$1Zo>URVDbH+1^c5$WBUkmP+ULfiIu zq*lFC)0+3X+%*YDT8jP{sofLT(oz&n8{a!t@x)r!tYd>Ck^z(=}%G~p`E-rntF4k4~8~w?(lS`& z;A*Y%#u&cqC_F2&b0Ej#Z-%!;eooMjInLO|n!PPW)n8O>U+L2Jd+4)z*YLVQ$je*K z;YwDK zji>yQ{_)1T?_9|h4=pm?;{&E zTMo4pRp`7QMcbbb@_uaHT4b>S{?E)nujua>;&6Cxi-=v(Fp0k6-CsJm4;c`dU%MsK zt04o|_lxlk%FwFc`$A-3sFHzAx(w{p;u=>W18*Y(S;&Cv_S;YV>5kjO%D_Toz1=z*uz7u!l`x-`0E5lw^Z*Udw81|rJW_>jJ%n|FY z$IdDJiG4F{T0M5g{ot?_4e+ekkOu6TyJ8sj%0*?6WhNn%@Mm_WA7!g1?Z}K#`6CUk=3KTS7f_BZdhGC@^M7$gvhxoz6M!ZCN^1p zuafhjHN`G>%lQk)`S^r5?@ujqK311=k#E-p zG2XWy8r;T?y@+gyjPG_0)8%|%P|jaO|I0TY&}I8A?3eWSI}gRLXoNmuvkoBB(<>-hp>+q4=TDI5t;qAT zio_M`khdk=w+>lbVv^?r$a5v~yc>CLb~%+i&qoeLH_AJOhl-Ks52at&U&$qWugh^{ zWQ>yI1IY1`jF=Th-BFGtuiF!|;sA2|6W%!ydwo4}?0)dU<>ma|fE@qU>GpnrE%P6* zc;Yy`FSe`{IldWNR-p}3w(K_K*sW~YZ6_Abzb&jB=P6s3d1BH0%oEq^a@>Mki7iW| zz3J4QSm9dn2S=j!7m2Cff724Z|CE@B%}i~S>#tW9w6SNOuqWnSj#Ten)(9E)tk9L} z{fyt??Abwbmi-NTwpF)hU%H0Wv^f7id*=clRdwzEeP$-fB$-Kq2@o*N1F#Yhm8(^3 zp`1wokw@^6*2ndhKu~!NVrg$}5t9H~Twk5q6P${0k+~TWh>#YeOSR)96sMUW7 z46CbwbtIJU2OsSyc*oRm%O*OK5;x7 zOrfTY#+>ad3rbek_|71L)AL)nmMm;_>w67^LWXtza#r8^lz zBRSzq7sXW_rW`e0xd%TL85+!YNQJjGHAe1$TM94S@t`UzM?P~I%Pej?=|o*>N{6K0 zHaC(=(xKPUp-gbJ932{9>Coum_^28*fN}5-jJv`E zccM2No$YEXLgS#Me<$PM8ZFImwK-9LZgQp>m$VP@K?i)0`;q$7DOw#9PV$FbY5TZF zKG-nD!935&JkP~=7R9(0&G;6>IOk@b7t1`)QMzTWW`53G?+|!U=TznH`kW_f;#J*0 zk-dL9U8UR54fy=Jd=8o8ZE~u*RTr!L9?b7#+>8^yx6;MDJv^Z#3h|n@+thmND+=ejoX_E$_k@JpdUTptYMml<~PUt~lk<{-V{aaG7uJpJJzjYnntVeYaj!E3B$ZkU&tU<_g2btB_& zca9j!HHLOy?M*bgN2}5#zW-UI#nsMZo;X)i_jQ9?!!6z&#yd+}#T&Xy_YX_dj1HM_ zcit21Qpj?K^3+ySSLXw*%SP;-w65dG5%qmInw!^%9CsMmsXmuO^4rO~Tb)VNuT}ZO zyuaf92OeL^-jZ7*<`#E5qm30Fr}0LJS1pHE$up7iv>aX~IyTiUyeh6%=G7a(-A5iw zrVnC`iE!`-9BIa=f5B(yhyBcJcJF})JKF6mpw4!m#L^#jUsOAj%g?w!XtOxle+KPu zOiC^rNWagZ{Z}P;jIm%@9&LX`sQqQ^7QV1Y`XN@8_STYXD`8G@wf|JwU!I9<|EaXU@Pr)NzepuFw!g@#8$K)C zd6Vc*h}ULoS|g`D&S!5x&-7)JVc0IavB!tAd@i z$YmAd+F0@(o9r>V(ciX=L{pv_jCHnr4Ao+574s*Ni<`jQV&)zq7p2TML@oqJkIO~- z+DtI!Oyy#@=w(PQB%kAQ5!)yi4>chdQ_oH=){0y-XwKSUA-%NZf_9NU`1~E8d0Z~q zHOfUicK%fQTlz~dd)kAk^pV^61O1c?A9d5$8T8W;`Z|CvXE=kp9NuSc(P#Rr+L;RH zZ@$lDo^Ru;n|2;WUZaqOomM}^8F7?h5c=DVL;5G1xj=w+pMm~nXsNZC^iKlsCDK1v zYKex5*8Pt;6OFyhiQ!yU|E%V`Q499Z+DWa;GC!NNZp)~Gde`WJX7yJ3XY;7cdgtiO zddWxnr<(cGChFrrZ*$Pwj@YM0MCk2gr^CqcoH!qqXCn1h=A&8I2bAEZrZx7@7qqup zbGsIMYnZ+DhI6;KKKmoJ*XGnM_SP?GZ|!{C-a4g6;g@J{z0b0@z9_a zE9Gy>-YT|M(QUEEer@cn$ov0`R*;^y^ zpvB%Aw6}^bT`+qqdfsAhz5U3SZf}jy;~eyO>F6)Z-rBT2pS8V}Iy7Z(jnw1Q*;{S- z{>Ir`(VZ`1Zw((KTkNgpU~i4kp{Fbzx{&tPru8Q2TG;42lI`Ml(;rEQj?-7Il!;K##7Jo<_e$ROG*fK}ye*C!N z_dIC%J!7h>I}#@#zRyI>+<>psZqNSwuDMjztq;YxZo=;oNDIcdUW@(u7e|X;5Fd)f zk%}MmIwx^Imj>faU&06ackzefADM{XOt>k2r+r@hr!^gkS@q&aA$~|=6lUT}>49Cm z9$%^WP-;nI3_h<$pC`8YRrpYPb3Vz3(&+DO7xJNqzq57F--%yE{GE1v!pGlu-jmq1 zMff}=w$xh`UlzdEnNSq;b>52~Wqe{=<0k5v$ssYI;@=P8?-ajfk|V_+wySim7GGP8 z&yBdM`@H!7G}3yC_&o7iFsKD^_5CACau#6LT0-l5~H# zzf)p3UD)C5IL^r)<(GWo@W*V$&-sbdS-uT_^{?+QdVHZb<|JR|g5sg|5<~e+aYp^t z2wVLEr605I;eV63x&^qG#n&mZbp~a6A77{4PDhE+vVGIF^w|I{zR^Ez>}|;JiBCoP zO?)cBeoqVfNR!iyNARhLFSH*2Xg7Q+J9vMtGuDdtvitEwA88tXurusK`cC}SW9U2a zfyEQgxCkGZrzov#5dK;5fsLc@hR|P>{ zm`;C*4{Q_uOzAK2YiCJ+InrtsZ9hD@m6797M(!#4OKe~~94!IQ78FE9}4x?RQfB@C;CvRzrRAME}G5s6ly(!%r8B zFEy^wk7~<~C8KuCL*uw^%a8a`1p|)zQEd#6I6Qe*e5rBxQfsfuH3e(Nh@TzV8BDz| z=h#DBhUh$)Rl3KUY>3Zo2C@^d{Os*5KWauu=f$784~ zpIvmmBss~*!k^ah6f$Iu?Z{AXe0(B9n~3S$jJ|I{E;}JZTfO+wBm8uILo(C{8IpV9 z{<*#(8R~-!S-y4TD9iG%w?9GWrA;h9Dtd3rki?*c$Bo9L_ifR8yWgZAU!fnR{{#nW zEcs{~^sgqjHEM{HXu$V<7~g8QPl**Hy#_}x?lZhEkNa2KhWx8d_oWKMa~bbxh zi6_*mOodS9BHV02GNG&)|?JWh8IK9->TlD_oWK4QSC@e^+X`#b!q=<%n# z4x>**pIs8_vrhDx+zaosOG16ti9XAszZi4Y2FDxKsP_%aWG#z_vEGXwC!o)X=yekM z%{rEe7n#S1-QB|Yy`Q+x1ISNT6;sujF+FxtblJ;wn?NX+cVaDvU~?Yt>%kNj;)>XGRk8J}5)65m1P zca!xm&h)CC#O>{Lwyu)3Eq1(=q`!#x*fGQX%f-k6O%dc7(?gYjt~@mO&o9LXZ@{za;68Q9jpNGlsb zj6g7!Dm4`2+qV3V#1ZNugT&c}w_6;#Gm!UViBXZ5$0Yh|e0{^Ui>d1jO;sh(h7wQZ zPxKnXEq3vHE#-N8Qc~GsVt96Q?gsz()R|DVhnW6W59&{hzgv}6C#$MW#AaChgBX0_ zA049A+9ZcEdOOrUiRtY|EZtDXLs^63K(0IfD&v+Zt2!D5aFC9~^~l&GSd_zfKNTz* zDsl8+(I*!o?vA$@6mL`IA{; z_u#c}gyeZS@+|RH+bPEil|~(+teCwI;T_V3(qB%>x}O;B7pL5MWIL)|%0;>}rSM0~^!WI)y&WYhPu-a+sv8(ew|jM{+w9EOu-!%GBr z>KKy)weU7)iWS#m%Yw~G1lJj=t1W)rfIgp4Q9N0-z8Qr0e7Qk93; ztw*eJhB8p5F8p3anWnRD$y&q2c@c5wyPTe?-SO?J-a&@;{k6Y&`S+8`_9iD)?F-3( z#B7g&%T%MItBJ#IKu7nW+q;iY7wRQ4py;Dbzs$JBw<^oXL9z)AXBk-JYAkDiH;fp9LfAVLPhxr~fM3n<4_ zWI*D1WF3#l0A*)A!p&gVO<>uLVA>F{Ef0<|7>+W?59V1MWe51OBa!|FUp6gN<_=YqScg4q5 zbpvxhy$W7MnlJ#LCuZwtJFj2UZ;NXZ;$m?CFkXH*kkyV+$HE0e? zUL`J6+Q;q-DZ3?afBJ>F3v3cu>kNL$n#>IFs~GwCKKL~e85xO;yb1r1wTQNiJd2FH zDQjowV@pnuACVI`aw6-q;N$CRke5Tq$w=g+2APpqeK#`VzRF$Z1_LE7{s8hJ{qSZ{ zd*s56T(mzio^g9a{trk$FA9x=;d&u_Y=>YgTz?Dl9<;>>pAoqUu6cO|Z2JUE zlrd3-aBu>0vb&?mTKlpZ^4@jn_3*RyRvmW4x3B7goYb<0K*|^JwlSn`k0K{y;AeBe zGSSDu$cf0WV43i=$B>ght-8YEYL6i&C-b!SmYj4y1D@sx@wB-iEVJYgt|mOq6(Jkp z{46R$Hp2OtC9lYa@cg-2t41svgYFBj5!~7GuZFhFug-^UW{KD*zt*;y!I8cZo69$8 zo3ZL6b21xizP7fR2piv0WN9+v;u+g!cz;2NBRvC0+6w0nx9cqkainL!u&>-ULmwXZ zr<~R{bG#23O9kV<7Pgs){+djGot15d_rv+jGZFn2&SzR|GZFS#8w1YFHe<=p*U~l< zAw$B2JDiPehWGnM*k;0YzVA1|HX~SI`C`tbWS8AJy=(7%?G5?HhGrS)@_KR&M zywAeN{x88cbDWEVt^en2Ga(!~2ir_zyM1eHGZAwA6mtF4nb>A{?~MrC%w6ug()-|J z5?|08-xS+Sh?AUyZDy8j-~7hgW+G%j#-rh9Vw>T;a4sXZnQ$2h=Q3ZhZKkoWzV)`5 z5VoDAZKkn(?D^8y&NdS!qhgzp^)Y8{n~9KDvCT9mum4AEGmZW3MsB_pwwW+F5gBoW zc-m=gGrSkh&%`zpE*s(e>`ZMlNAGzJTeF7E*@5lZX^zBSWBIe#M@IH@i<{&sW8Z|* zG3*=hD8AEDk19>XA2yb?lU|qp_-d}pI@>jQYL>cVSuf$sHzv7^?!>^z8snA9fz7?> zvlZ&sb5<&i-@5gCL6Wr|d*7;yRQ~lzQO15xoN>StZyaJDv+ekaUwXh@);giJ@iKWI zCaty!ZH-sRKb5^=QWH{*S1HGJ{BD=f&Ujt9YJdLxZ|2N?{%{>pWpHeyd_tDs^8$e`bCb zbIlUoE#>$r$9~>l(?WxZ-?JMpFXV)a1BR(hb)qXJ8lw7{d3b2WR93}Ek@ zY!0cj_}KC1u6-T*s-JomX;HNU>DN!`-^HA>w2EF8E=4~&%nDC&uRWAe(x0cPYvkE- z>Z$XL{hau27jrF>Yk#LaS(K+zslW>QVF7wj=~UNbu%C%R*>6C9L{G&ZH~`&SgzgN$ zrzZ8!es21lICRLaljx-6QK^c1Nz6bt&#l0IOFXQsgI_z8^6w_!xhkr*igMQyzp#sQ z1 z4L@qb@S_Xu_QK72;H5i!f!`w2C2>Cg@9y>me#P$*?S1~jHaY%@2^GESk@bE3eg5rw zj=!!T*Iaj}3jAZd7FeHD+^e#)I=1vOb!<6!vI0C=3!c1-PQ6#)3M{)3EXfC7z?3y$ z%7ziqfsc9=_xi9P(G+Ys8(gWvk6`1<3zTnmsJuT9E9;@ia)Nj7-4?`|N_Uihd1|zO zZT}d5g`#XJasHKhyuYG2I#6+Caj%b%1HqL~_XlxhCo(t>TzTQ<%EiA4<@KMTybg!* zS(mQ@8}4kn9cF_s(iS$(NSg@GRPT+s#xB2B?nde#!kmT&lxeqvjA7Ci z>%g39bZ|?G!@rH^x9d*-PbkAS_Q|`Gb4hd%bEbnivG@pW%n_VHw@L>f=Yl_iIkgd( zBUq3`c`~E)Ko0sjU5Mw0ns3|=0QZ~U6!4wa^Eh(4aiQvzvFr~ri zSX+Ou1B`KkH7+nG3XT#@j89A>PucQoW!APn8Dky;V}1=sNCleGi&iqn<}7|V%L_bvRYJ1yBDgsl ze)2thzb}%f{Juthlehf7mf!i{^Y4gx_&xsPImAGa0Zs@aF#w`O@DA@5O^{&DzIiybf6tr^8VPEz~%6i*^UG_N`mnc#|94J zDTm=H?A@|913%<$`n>_(unV42$2c|y+4}uxpDE=j`)RHzWqI)Xx#rW+x#k+B>MDwh zdsY;ea7^yGajVn(M+Kav_#ys(xaY>6s&1*r?O&b}>tFLgoVoFVc=Mz29`m0)yyh}j zf+^*y>{;CFAIPWFT{w&6JriHPuDP#yIK6W5czDb6@Rs#Iu3XHR6?hkYUD?MGSeNgF zcQ{Jb{+MeX1_SQl5Pq_jV?~+@2$v9jnMK-N*%QU)BEPxAXIeZ2`II_cjXX-7B!1?k z`lP8k8*@^4?xg$co#uv!`W^r;BI~?DIZMY;4k_#ES7{12 zy0UMXaFt#7R%aXx@|6VScM-B2&RMeHEV*Hv@|I62v*1TX2tNWRU`NSf@o$gjnqY{;1;kReze^e(RkzWj9TD!b zFN7*q7yF?lB@R&&MV%P0VxQk$o;LO{UUofYZvtW(po^$bh6u1+89Cu=rIi`s@l5m^H(EX$k?%3Q$ zlX%z=x7i2wNL}6u;mkYgu{m2pII~TgavW!pRsNr7n?x{UAvh!W6a#j=Kpqpome%0U z3*;rg+wl7qaOP*=%rC&1`-!RhC40OI2R!iH>^b|F{C3Vl@a8@6M&?FAZlhUqqsu7E z6X;3_+TWk^E%NWanYG%fZ_!7r#8ba&dF@7fvI5M%qDiy`8^Q}K~wz-V#NxcXjBY8A(7y3f#A-o0NpD+C)eIa|GNu7kdB=Q?hV%16B+ro3< zb(4Jmw7cPIi7OO+AHwgYNs9Vv)Yn0Mou*tLL7gR5qvTw0i1-B?hXfboySOsGcZa`N z+ye{|PO-L!0}MHH9lk*Il^&hSO|x@o*@p2&I)I- z(`t(AtfCLX@u!^fAEK;^_l`~M2afawPx^o>x!_9p@BItcH10Wg#>ulT zFej=JbBt5)9KjpGm&J?&CCn9V`^-4zw8C{H9={b>EEsYNTu17%K7>Cj)a*Iyk%ORZ zW_Ba~c;Pw1U48?mJPD@!799E=`12xZ{0v_6DwrfTm|tRp`3snI2fu%n(BAkfm^6{! z6B91B@aM(nXIr>)2%eMrLXhVyicxTiq;eVSJ(Q(A#HIZgZ0B z;3U1_B;M#CCoyQ(0Ng^gej#UJ|uOA~be@-{yI2Dnk`J$j)1y>O!t2Y45| z#(KDdJtl9sGb-=_^Q@!D*+1Y!)%4R}#_6V=Ms$WANSl~ru8rh2HBR%RkwYv+3e>lWfoo5h2MngYd<6U zJDap^P6ID&Hr5$#b029* zS?}dMEX)QIiw#Ej&48phL*hRzy9-=K#wX!BvNxvK7-j4ax5Y$q8f-eIK*Wn9#G0)+ie8<1qX%Gd`5YL*wPog=mT!#f*(2H2>Y+z zyTY=~G~~)W%E>hs_o8?vnrCCc7I!1IY+_EdC5Aot*?U~I3jSF0s1Uz-D}*m^sp)fug!oNA?XgCD=>~rq0>13^rW%{*_btTv?_phE zm&zJmW&f({twD=7G$3B1F z^4-kMw-a0WDY1^V#Bi>tc9tHI^${W5Nd$Lx!iy(3;;`Aqfjz{_bBMiG=0So#G8gh= z3liQTxHBw4H|4h%`JM2t9{4%)v6495KNVYp*p;7Vzutw6-Omlk@xQ{oopAd;uT~}+6#Gnsg&Qe z;o2isdrG@X`${{j@@isoyX_0wgt}t?6@0D$-zMBl8(}xI%lq5mzJSerD%~z@O;P@x z%!4K{&b%MuOVyvoG`?p)+mSj?fFlXk1?@Y*HkUb^a3s4Ogd<7YM{*>2uOm7oTy-g2 zbvayh1zc5lpkUUJ+T&bl&(4Ocu{|Gq!v%MB!)ZP6*ChCBiaKWVraanW6LWQ+`?#&S zDVzh_PQtl?$a`GaJZ>=fx05y&d<(brj0xF#287v{z3`QxA$+^Cmu|{$nfGKxVI$D= zx`L95p5wfF-I|`*uw2Ex1n(lR3x}dU%-JXRT+KKjTx$#Ukur!)TGAXynwND8rXR2{ zu%g%7mtl)1P2rUCThg&H@NDfxQdgTx36?d5fs(KAskhKO!O6pvF^G5l!LaMWvVLG% zU$CtYc$aH}brzRu&K_NL8V(i7o1V*4YcfNAE#Xk%b|Udb1btgi%n_eOkUu@qh;L2V ziB5%Ow}OR7(1AL*)={|D;gFqZ0`s=n%-ddqKfRBSOXhF!;L#EIlZ|6yKe-P6G{Mmh z9BT)TwFAf6S^UYyG7HDxPuBd2_8fp5jqj%iej4(Li4EkT^j!a+f1(FoVSe{a`&@tB zXg%=xPkq=>g8rTLVg9f;u-UG}R=eR&SKxz?4{8Or>kmgp2gF|V5&kf-6^~y*9^jyz zzNA}${iZ3umYrs}UrW*yzm{bq!mqW0G^O5rhsE-1y}h6P)xnx-u%@Z{G*|Wsa4RXl z=zak{E&E;jdEr(w$?K;}^}v0U-{x3$JuKT1@+tMX3fUCAN#-m*EaB6(4@>-Bb8R0M zwtw*@NtOaA+@6P4i0D#n*JwY@s_F#fEE)73Gt6K!**l#89n=2E6*BZ@5(8*@09 z8WZADVsmbeOTjtToM0mYFND+BKB;irY0Adzryjzq#J19fBa&AMhU}%R;r1av?IZXg zx<1{)5aCfJiv^Q1$={?*mT$|2-4IT5vTy5*>_b+)Ec-L-Un2ckHjlat9#u{mY#t@w zYdJ`HCExv=1y`E#ZJiB&s^!~cUlxCw36_a{=nAl633IU|IMI~}NydD*QWn><5;Ws! zxKbg%J0>_o_Muec=itkKBKyAx;mdyPLn-*rzqjPcIlsV8WZ}yh@+XDiuKy62bOcP= zd3TUMt$D4x`Om%oW`0z26L2mTGk ze}GT@SI-ZM|GTHm$LMId)A4-lJeo>-WBt`;c-j@DT@jXc1!-51b_HomTfDWkxA_{f zWb>up@cUGJDG=gIi{VRgw1L9bG$F*7Y(Jyj9ySkpUwn!9=$55KH}a;HePaBAW0`wS z=1qcQ6DY^e@6ZDiZuVJzb=yB?r!81E-@>w>e@xok#xwDcy+az}9}^6O7v(oh3i`*S z3}VNW{4M{OrUF~r;U9zV@4dn4{}K6aBL?FZ&fDQiF(IzhERKb^QaFyCiYv)n@KejK zonV}bD+y+0_0`QQd-*KB)CDXnz)n?&ovN~rGS~Ff@V8-?OvN{Zj0l#5-xG|Kdy=2< zC7UbBJW;rkjdSvz%oTUA*Gy*Al9f2_Kl33m}YxZq4n{7*~# zkHjst#Qz-kX|=@v5IYnWhxjdx|5?%!|I-rxBb?@26#p~J<~G6sz9sQLR-Ibne_G;y zTH=3N;(uD=f4;WyKeJlme_G;yz9sQLjXX-$wM)EU#aAEy6I|Q=#o~XgdbPy=w8a0k z#Q(I!|1=dV-4g%Pbe!~oA+I;C0f}dyZi!u#bsc_}(^$lw?e_Zl0NMYl z>~r%}sw43HZO*{cX|BKvtOa{Uj}E+85EGDZK}or9;Ts{>voDd9J)gaaDwO-$bn394 zv{sU~l{5WpFL$3q*0b6 zN8pb->kx?Vu=l}`HKYrOt?TY6dR`rjvGS314pu&_A)P{gRz4mrAnCtF9CRhuhjV=k z=c$~D1r0n+yZx3r4P$=_S$Aa+XDEBlx>VWpbYes_d`=!M&X~fwbE%i~m#i0;yav#g zl6J9D*Cx`2ajl$&tTT{zCGJos#?7D%!zcs)2dp%?uDCAiNF}cYv`tQm)B0}Ld`EEq zn=zD0_FRo6{&hO<>%4Etq|UcPs26+vtnE$NyV3S@mAlqQn)6g~uMG0tNtu23*Yupv z|HornDuZjL&T#LEF&&QU%ud!&jYe+SE^elI-U*vLo>iG11?t9X^cPU6uin}j8iT}5|$EPRd3`-XqRZ{zV(p$xix9)Fu z-c99H0KT&Q4R(UXXfs`a<_z>DxOOq^}^4 zX#QW|zB_$|9!z^h>Rst8`CslY;C>YU%e_BxZ2@b8Ub-#Xe7PXTyo+=n=9rgxce4h3OCRzuYh6zLWpup6qG+bW)uEg&X6|j-=Oz zqabsHm1b7z@br%EVSIzGAU%h)9sHl=9-f{PN;4;QSb8r1%l$0wYy2RMG=c8kF*75tyKm1BJS%Y<++f(_=M`-3>#yZT`?suNht{rK^ z5wz7*+D!Id4euvu&%U&s^idXl6yBc0UK^G^`88?R<1+9?akOA-5JyYEK(N?9I$GL9 z+Cx9jWu`q`n)%Xg4rJG9O1Vc@H;>Qtd?#TKYv6_?1Tngb^*uI5?`3U%+^0dD77RHR zMz12yUNE|nG1b{h_a}hIZ+}lWD`VMD;JoUO@-MzxJI`(A^b76};JZ#ujU~$?#u8~4 z8Bb(gU|g#tzWEazPqHMAp6{Ct?5>**>MT00XNoQdb=O53%6C)bdy`YZ1kv+CbT})u zApH__e*yOtvRKUjSzOn+roUE@F7n7YbCq=bZWBGL7UG=9;}CJmjBJAzkTPE|6w}{ zwmEe{wq~9M{}bDj@W0{^|C6>6?k7GzNz+P?YZsFH?LH6OZxNi$)`xKJXYY?H+;1H7 zC2L>9Q*b|jdL;KtJoS7jg8TimvyZ)Wa@dO}*L)AXSim~;zp}RKW%lvepc0H_>>cyo z9ju#zb6g1S_qWcj6L{bG;C!!+bi*fO&of66&gWq~_MR|qy>wfG#Z%7*_nX-)_e;1S z+%LBgZxXEeXf=Co2o}s|{1@)`R=&^Te&>7aFVA--c%C_&=X*X=c-)2Ia2KE-84m`ZC5Owh#)KrJn)Pvl|C{JT z;cyp%zcoL;e0{mw;B(+^y=4zx1W ztl_I>3|@`x=iYGJ9(EwxwijSsu+80)u)Azyt)zU5Rd}t~^^&mbb$57;71;JRV%x*7 z4X;hIYA!eI_@1iUOB9;`x4iQD7TP}4}`mA#j*eLMLyn1 zG{kPUU69wZp-zDt=xSHf?aka2=$u+?xyBu#&hLw~dGOzCJ-5~b5B zYt!&$Ud}w$8I{aoFRgvZfVXwf3T3ME;V_C}c*D#8SiHCu_VhDcWI997dfrJUpGHV z_3_(l92vp>KVyC7mZThiXXHNRN4nYOKYV7);1g|eTI~6;#l3d_`R8M=jBsPyIsbg@ z#e9$X=Q~g2V+#kJA3i2JCw$D-+s^b$6t=h+Y;m3GmsZ%~lCi}}pR~pn*H&zCr@;@g z#Rc)h*@z!M4B$)?Dp`!Dr#81n1 zh7Csg58K%l;NbaT@Mu$=|i5A*!;Q>SI)5#H5^@#2RG$+Otbr0x~6|3HXef#ny3 zU!7{KxG`iG!{=m;71{KUj1{(R%(jb#j}u>pZLDd2b?W^(W!#C!CKk@G;u&x7mz8>i zW5qo1{Mf5KWoN^$W`^0u1ZRY!Y-6vraDFA+N;uX@{AxNp30s(qKU-6R9BG@_oWysE zk8s|s6Md(mU*bDOcTdBwJjTHf9t<3=KF%4=hF`S{;bV1(Ux}|YD^%9ylryq?QZBI} z*yTDCzEzDqOx_djW2bGWE7#w`HfE=1>%@F?!scF8UyyrwL)=Svlf+d?-r^e-T@c@B zqfUnLF7b`pyi06h@~o6!_C*rz<%2u%e?YE}{t~#?TVH^CbUal!)ez+~{d~vV8jMeaeo#+RBWyJaM zgT9y_4Ocqh4d^ZVnld&0q1 z4$3jt=jWbhd!q9H`A*GzWrPE6eExl&3kOA;FXqRXQm?bcL$QU)d*bJ$5B#0m`^-!8 za?GoLoO_;mhw?w0s+kK1IVh{sD(mP?$NSyJ+UN+*wg%4jowL0o{iw^?V!|IUBh zo3(av-nqxU%{m+I*An-3{?o$Y#E1a-&Kt2-p(XC^-1qbNzZ&;8kTo*0E_49josORrRW^`4&OEHe z7|gyPE{P)v?dRr2)*e++Rgdu<*&*IUV|*)@u~Ad|_EgTA^Qh~{%d>b#zCC-RH_0ev zJz#f!-{fs&ObLB^HrMMkGFS_0u-0HZ>qV>a9czw?UL80Ju{TLtFH}yOv&WlkyfA;- zoTP+gV`Hp(POf#A-`?QwLQk?$L0tV()-uRGa_mR9*4{&IENQPN#&im6|L!BdD|1xo z#whh%@#hWs-PuP?(woljLhcn-s8T6UIsdQZoh%oz!~8F603M;918I-xq~oDYW>UAn zBzKwL>oo?j7u+DP$C%E#%-)o_eE#$~Gij4C+#6FlV~*W+1tH(KJo7))VT?Dy_#f&u zfU|u2SoS2d*K95(=00)_rIbPX#31bfd?$WjlFN{?j3IAHb3mmrC!TT*qMvl~l4~=` zX8`3O{hpG(ZBv);Z7H5RFeJFIS$N#zm{9*q-%EbY^?$ce|4ZB6OW)6;T+-JY$ya3H za%5mU?V3cJOWR9(hqrUpDf&IRa;DYq$@6D6_PboO`(1vYuHQG%hv70P{a!)ehRdS# z|7fm7%H#m9$@jz~WkC90+GP;!BKPb&C7rn89ZAVXu*IM43IIP1q1$XVL)@=Ywa3}g>Q-zjAL%S+813K z&J66`(e%14QTnmA4!zD1tslFFa~9{HE4{A3r5}^DUu3M? z<4rMMWK2y;NHL1y)N^vJyZlZF{x0;S7(a`KbFhx^1EuO@f45~Wb?kk&suLUHQja>e zAxYKA{%(~i>X_mj3+E}iNIkbJ^p5?!{j5Cq9^U}3NK?mze+c*3?@TFgfIAF2+K~TO z#*qmLs+2P9b2IM#Ua6#ijGEBny7=}+rZZ!Ml%uIM)mC-yPToD~KHnUda>=*G8}B`! zneWQ~4=8gL=WxDL6s?}CW}TyqztSc<`5itE$Fcv!yiyqf{9 zm;v7ro^yjI*67N(H;Qpj#`;I*KQc$gpaI+q=O8!IXFmGu3eK6Fuz zKSeEmv^BJW)#miYJM@LrFNt=S@p(q*ekJ$hJ3lgx&*R$VntEvs^_6Eu_9P8?M%qo% zsGu)p-(2B5pE|weMdUS>ab0-TFj+f8UAwXm-%RB62I@MUI%Ip}i~;Z}3-_qw>y$aW za{8RNGF7R~twbJ%lw=%{e05D7v-aa4kKOENEP2dCPV73%`;Q`n*~0a>HvnEI?+*S~ z!^=Z8Rkx3J5Av{F^U$q1=8>U3^XNUxt-WTO)5Y45E;fbDmM-3OqAvPbi!8br$1*oYqk!`Z(9fBPoYm<+E{dMEI!;b)zeey;Q!Ljr_;d# z`jdHtg$26&4q-v2{1z-guC|kolxG;%MAig1eDYhKMK5LlM7cklYfI%mdRZmckW-mc zg!56+%VKmfoR5lrcHvqiAC2SM4Bnp~($Rq-9TokIgOAGGMD(%?@_Pe%Y3u7)bW-## zd;SbdM`f;XIvut7qL+7U9bFWrgQ8n0qKD|_u4Z)CBlkmknitYjc~91&h@M()O8Z#4 zPP;axtCuyQt1~QJbs19UpspUD56unfs_YvukhzWYS3LQxCO^RzxvzPG{kB5qyUXb8 zE^uKedN~+byd0d$0;l9TFLNoWJA07ti$`ZAjn8QRv(?qpj+?_y)YX=8vt`_D88`J~ zucL!s^0>KX^?fJmV#~PMkl!+Hwv3w>a@-USJ|_ZBYBS@u0Kh0}11;qV+OdVD6| zO>nr}c^x^%v4`9qr>lG~``(@ETnCG z-nu(!l&cy|E4IE=m$ znat+@F`CDc$zj;K!~5>^Wzf!Z8B}d#8NU8{cTe6dbnY2+FJ{Bm^Q(90SGe9;x=24i z(__T9K=00`-Of_)zCgR3Ugn$8JJGR_-dTD`KMn6O{L9t5b1uWldM9=d!JjJT&JOk- zRD5qo_V?=ByUD!k;T z|6$sFAvn0A$32W)@@*Z)FXUlGE9IXN!oK;W^C9Duj86j@hozqJoTV<=r>%>soyV9h zW6=4li`H0|Zs%B+#{Qr#U9r1JoocC*e1|2DLwL%d0#)j#Z~ljN_><-^_GGD2i8Yh& zWYJe^XK2Dt5{#kn6U82fH(nB7cB3cBa3tvdKD6asAudqJ?;IIxkT+MH@?)bY9ZG+^ zMStvdCbHkG2cF?BcRX6;@APCCUw}urCTG-N@_5)a`o`r@$HLR^#53V}bcojKlVtxh z>LlZX_{#12N*_vnJAa}2B1>moU+pWa?^mmvC*t-w>1OA9EWNb!6Pftxbn{H=avI${ zgSyzV>JG{31=CG*N%Ydv9eChb>89vmNH@cI#tFK~GoqW}d3>e1*=&8!KsPTyeH-}h zUTs&+zoVnWfAk(Fei9e{k|^`QkZAMR#2E9~GR=zJ5#QATr&ca?=N z>Z+Zs%QbM`!IAkLGf(@@vw2&b$>p7xi^&`>nj?loX}vd0aI`UI<6qPm2W9{Gmr3(} zj@wE1I;X3&v+P40d%cxr!FLwr`My)0x5?S2yaQ=qhAcXZ70*LL;!X{S<|A0E2(Z?uhOj%I9fb7a>%_;@z+;77!Fx2x#!k^G*uR$I3!KfpYAJMCQU(gT}4zQ94gGxH4Jz5HXkFYpp`^dcz8nP;;C&a7ymK6a9%E!;7W!7m+X=j}>=TP?nVc#~GflkaQ(9-N@+Hc!#at?{ZZzPrj#ru?0Grsi5z_axuPo#t{GvGDbL6;rht zF8#>Ad{6uqy()KW#sD}WF?gjr7P-t(Nt)4-|9?ll-F$DL=3Z4d4SlN_!$W(}`dWv15q(ok9W{;)98+lLWo{LyR9c|ar_3vQ zDARkHGN(^fNA@}|E`J0cmc%C{CA2jPRjM%oUD(}4!dE;Oxs4L79pBz+Z%ozF zjJa{xM=nwMTi$V-HNEvZ^Zt>qxDy;kO`)oDC$uqYhN`++zNzr9m3>TiLTh91({W~3 z%Jat6%3PYJnIG|e!`1BjInTi1_lTnLjy_{mYD7{MyhX zN3r(|cx>pAolmQ}>`9ZJNOQy*qcy#5=R#Gtvrwp}DK1x4nkm$o){j0s-*!H4#>Hs+3dgV!#9??Lklp4)S2TXRZpzAO1%-Q4k8@>)9g zl`9?kt9yHEuVy1RJIYku?SpkQ=~A6NH$An*#AbZ*a)P;eZld}5M@iMT%OnY7DbbN@C({n+IS*o zY2(N~DbnjYBR4fuIJ-DU_R+8D3%PG_miv)?_895P{UXv1E+;STaWW zEg7TymW)weOU5YgJ>YL-TUuq!BpoShC(g9To1HmJSuf)(WxbNKl=W)PQq~@vrK~+U zN49rl`$o2BSo>XRx79i3e~r9P{%^|rw1-uG^0&%QdsyWsf2;hohgE*^x5`g@Smmd$ zEP0{+mb_4ZOFpQ-B_Gt^k_YN<$piJb`k(qs|BIfAzKX8e`kPEUf6;Os<2$J}*ODjt z9}kYjfn%}Y*oz;;TqCxSk;By5otF+aMQ5}5JsNxM0M5Jb(d))A=8fU^CyD*d?LW}# z_7BwS4zKGA&gO!%Ip*vQG1q+VOe$~X)6IMN?!(}2y4mHDpnhuW8}irc%6zAXX0E!- zVJ@#$rEejd&q#Ta^}1)_7J{EU7y8WDi`2FMpe(ig@4hJBC`K(@ zW@s^0bFs%%!N~;kcfG9YrXTaITkMWEiXYNn@q)EO7()_+E?&iu)Y=Tk)050%wVkHdxL9bFN+3=cYwFm60h_#FmNYzTVr8hLivxt zK==I_x46N;&#?#HqPdNg;NVl7-=sZV=7Xm5p(6ACJ9Kke<=j__HT_jLxOMxTx>=;P zsx4qlAL?vXtHW1zQ--d5Q@E#JuqnB=f@@XxEAwOaLN9W(s?EgK^8w?;hj%*7t8GkQXOz~gZ zdD`tGc}YHEM_aGcXMB(Pd!*ShN|}4%ub<>1o3vs5wYQpo;(hy>a(>4lH-q@Dk(Y1C z4$KxAXjQAoH;YOaJzqpxV*HmU!x>}r- zF86%Ic1F4M&sNcnS$*`^vMMsM?H<2BjQcV-%jBMtd%Ppxm(PJWW)?5?+SgWvl|l9x z&f;EpT4(#elx-7yUdpzc_KJL8?%QRnc3zq;Wt7+{@Oxi8`AfT0QSVHp|F4u$Y|R7t z{|3t4h3|IxH2r_2e3_XXgEG^FBe}sCX^-%DHre>u-~1m-+g$$FYou`{p)!@_|BQXPhHkV zx%JOv9hb<2)Gb`jq%Km2Ec)>Z^s2cw@*?lH&iIkt=F&P}*DZMlcSN4MAwSKfYnQ#6 zd5XwkwKKyyyXoT$>QxnzZG*GOqMYTPE$6m;cV+Hq%ecdm^A47rkMyyI3#=7+&rtET zo9JugzO)*Qmi#*Np8ea-b4)1DuAw~TZ08x7pREfci60KnNBZNu=YzgP*5_3D2u^4^ zSRmL*pNmd;ry zV3duYorBm~62cOZL0i^#hx*=WI9lV_F=j6Jbru559^x6$3 z>zwCuFN^-!$XWVF1Jio*jFf*E{qyv*`k;kNl5)aa}k=rmM1N0c#@dHnYZU_^6D{1=P#FZr4lXPU>dM zp46+0(qGL|4r8=ZuNEqoakl?k{TMbbN4_uj!`oKIz=D_~FH60Ir%1hqadtnTKjA+3 z;NuyLxr?ZS_$9kA294p6cZS6lb<1s4)Gf>15g$gU^sLSu(uYU+>T@}AKJIwA{Ga3U z{6x+oZ`rM69jm)`AhMN#?@@5^JK%o?`HdmJX|#i2m6f*00!NlR_->(c8(DAy;Wk;w zMI!GBzR9za)@bA{R=KO>8=Re09Bmh8`1QBaLskcV`q~4X3 z!|n^=-yPs&g&cR?KP;~gN8$a$5^^}Eetg%s9AZdv){g*phFiE(5E`2W2h<1SAC$fq zdFepk7s^pVd&uD~q&v4hX(2Km`QG`uZplg5c-LII@|<1HYG+Hk zolCo^X8y9$vc{>T%hRJ7^DeQ*J=Xc8$he^P>2j8~Z$%6txLPFm18?kvEEQ*7k}hN5 z)C%Fpozi!LJsnazAzK~Nog5XNJEoVAp4izZr)H(=Nt1`Ry!Z8JmqPk<(>`K38T-01 z_ML0LNIF?yO=^fkZ^8GNk?DLd{OAORo{Ox=_#^3Sf@Kw1>6z|KYyKvfxf@%sa(Tvy zYz_HfKcqCW08?vl5k%Dao- zmOhb>;H8)Oa!y)@aXCG*(tW(&p;-40pQU?qGaZ-r`8f0P5pQQDbSj?f9WiUJSKiaY zID(C#;qTj6zT3jnrqY^BT2sAE=kvwxN%@9_@O3b4a;iKc>muzO2=T0WA)Xb^uL`U- z_KZ`m6pplU!{Aw2A7?TqW%65i)?|xg`Dh>ESc8RQS>tJD`mhkcDhX>l;UJ$dpRxIs za8r3+_(v9fKVPZ;<$LSK@X)m!u00jHRv5Zg$hD_K*G7b{P377%p=%bO3h^!B;*I4g zp6Zo*k>x4ix{UXgVda!|5gC!OdOl^`C~eHy=AG4~TR>Yp#W9a|7|vnk8)}DcjK>9( zZ9d05+QDv%VchS69-YK9k(<%*u|*u^@Ubx*3z%<9yPU7diSSUFcQ%)esB0x0XjY%nTS=fRU_T_^ELIV8ylr?sQHv(pzts9ETKJB?1_2i z(jSti#@Ws@)8)#u^OZbvW)=lRRtrUDXCBYrN7`2Yq#K^U!WN|5*t|6LzjisK&$20p zic&9Tq_GAX8Mo^$@pDpcgE|c2h*NP@+3r{)OKeKWB;SN3op>XgIBH_yESpjr(wgT8 zrd31!r%S5?X^9>BFt|%%VNB#yv%QN_==g zVgu<@k(r_NY02S+{QrktFtQw3Fo}AnI$E4 zmoRQUW6mA)U9R=#==}C~^K`CfzkO-mvs^#h=dq1jI`2u+KbvtZ$I4f*tHUoh4$9@| zNWM@1k{H_1oW&EGqjo6S=*=zBy&XM{jNFpBUi-e_5w;ww55J2X%UEdF*RG?7xwF(y z=9nj@GueB#|6An;%bPCOt8L;f>=PD0_<@<;t7a@6&HW7K)h&*H=51X(xZRXg*v6Um(PEn0=P(eFglD%=z z4IJ~|&bH)W>Dyhbe&6zT*Sv0AcdfcC?ueO?jP=RRaio-aXPJLuXt-bu*9b|q~r<7KdqHhO(7XNLXUX84ia_YSv~vAKA% z*M~lLLb7-Nbq(}?bz4=Z)4zv1@=TmE+{b#CCi6_^AE`CMpWP++ zQv2$4)ot}UiKE+0tlk!9TPrqC;^*Z0Vba=9nsbT$ICLvMCeqvVey;iKY0}&5Oe3w< zC!}@o9pWj7n;A$7(aNd+mLvm{+|%rLmX61GBF** z&2^5&CoSVgY)X~bSo&whmepLv+H_>#g!=pk{>Kcx?h@@{V@FrL?o!U#j7>Y7#JtcB zJBVYbW`B`Q91gLqSTTdCg2ec6ke_%gpZ-ZYQb+#%$q{SHjjMiUm z?T9h{`eQvi&FL~8ny5G@8)di3Iof#WUX>l?Y;XK6Uu74vf5{zAkFkPv8E?etuWrv& zr4uNV_??PWT12X+C-gl!y@|f;K%Z`j5B90> zvttwUz?H$^%OG%OAb2wXpY9Fjj*k8P;E**AH8@@1Q7m{A$NhMo^YFZv_rRiwiNp#A z@#rvkv>!a$iY^?2Uu?cOh)JK*E=Oq>lY9l2IurX~;nKora~m=xxReQo96?6h?BOGr zl+qL?Su)=?gh{E5IC6wE4xo!g$mU`2sG7QOVqAST?$l{*#&;i%M+a|-HjB{9fo)ZO z8twk(wh-QccW)9~l8TJ(pdUMfHG-*i$b0AbHby$vJ9E@zfCuQ`=VKgZEHd#2!3)~_ zI&k=J&fAatle5Gl%ynK|+XWj}tWWLR(OcEUf)NSNJu+vcGp#CvYbpE|+jCduBBEz| z7Ur7Mwc?xqeo0)}Tj<$9@|uB8?WHWco$2UQyYf%bGs)A9K4ru216r!Fn)6=tp_+9r z8R$|uV}BxXime>+Mv22+)e(If1K)SK+Za))diJkf`m2t(6eFs=%I=q@vL&W89(@x` zEUfoEF#><)7C84O=vg#&l9j2x>=#&@^Yc7CyRE}vY;Uh+zt8^%^A!I(jqN{H+2RxZ z$!{xiH2#rsa)S;BKm`9DQd*UFwJ&Nz=D?t#ERo-gB8~JLi#7;#Srv95B(nu4h#Yh27(s@z>6D* zm3Ia4VwnbBXsj!6@QjmZUAz+oRi;2rXP~$7%wL`bV^U*O>ES~Uu4_lzTkqba*A+Wt z9^E-Ta{a*;+NBeHVfU?!DbhZqv39njtucXdNYZ|rF(8PSC-2$6qa*7CST7-ST^0H} z+j(3ZHtTqf1dc=w;jp#Tb0_UGfO!)0#CBRrt`Tr*F|A_nj`2S}9zn%O4!TqtJbROsaI*v`D^q%C|_c@%Q z^ycvVM;w=g()(}T-NKH!!FnJ{NB%Dm+$JN#Y2A1VVCdE{65TKm#;_a zj3K||u*>%set*u9=_sk|nYwt$Bo3Y5f8*T1QJ|HuR)ls02a}Sf4Y`Smqb)}dj&>a1=eU@oC&za$f>PIq6Z@DBF`ebyICVUFIyj^E}A|h>mp-F&P9(7DZJ>hA@j1; zpnw)_Jj1=I+*`oC72Mm-y=OxAtoM?6FPZm}c`rHQJ?<^w-U{w*=bpSbgE^jsZ}96q zp<^%SR𝔢}BbH)`t!Gf_J^W*!TyrW=fm%*~~1(n%*zgwn5CyeIf%4^|zsoP2FQ* zCT9yXlcpv1N-`5^=fvxh9!X@ikjKJKet(Z^Y2541GdFV-@%zVIo5;O8IDWzJpKxsg z_gd0zX@@UE9wcsb*N%q#{^D~L%(y`EEcsjpN3yVjJiv-Xup)7usXYG!Mg-s+PjMaW zNCZ0)pW^y*{s%V_!HvY_{J)ClR`c8nu7e|q;7H;MuFsS7l1VSQiS&|5FF7ndo&!&; z^cvIR8L-7lYetD`#1;!bV!@9tf*%il=D+ATe%P37W3i3FC*TA2)zh_SY6LE{Jbxm7 ze$n=^WmzQw+Kw(TqLk7X`84HdkA&&l2$mTW~aPGd{xtrdN?8(ERDDTVR-cN}RPC&&`M-Nv&2lCdTW ze$F@{?f%YEr9{+~fR_l9eB!`5Q=&k6kk2d&1>xd)q%-Tq0fiW9Hq_@-R9+6mu?H3rbG z&6Qi8y9^(U$o@2ZZO@kKUaMV86u*^M+EUu@ZSF1Rp0x4X+*{1O8MKdZwy7${@+-eR zy&+%bu0zp*-QOpk3msU+I#Fx>>WVgG?i!r064wgX65plG-E8a)?}t_BhuCoJ1Mce? zL*G@Sk2Wt8-X;1XJnY+k|0G>GO?x!z${FCC;Foah8Q`joW2=b86CCpsH`<(@G`2Zq z3CFK*%JoP+X|CM1o=l^j&EbxXD-yFK?G-M|etbTSX@{^yFy-rX&1wtcGXyidg2lFc zg_T#H8FUJ~5KNfraj@3`H~_{5@bOvtPaILC{_hUSmCTF7+e+5b*g7A69!WXIkbb28 ze^aiXBx}vor%~1-^w!qVv4@W9sI0*f9hLaGCS}gfBTPRV+v{s^?If9NqP(`uh3lTJ zYp1O@>B%^rq`Na3>-{yj)@;4Q%GOwKuDLtJWlSU9O7shx^t)#pXCQWJ+5e~hUI+I3 zruY6iij7`k3B)EZ@dR$-uxk=kT|2k_xI2?LGI){r?W@q+i=_BVQ2_Qu>fkJL_F zwO z9(+5=ED+GCh5z2*_6pi*`!=v?(L+kvOZYKHjun? zysR}O&Mb%hPln4ms<_vHlHm6+ipa}DUU$T&vZgq;yzW@Ti#ha_#L4CKRj=XaUn|dy zmGY8*(3f!XzC-o*=9u=LLk+poM^5_4g&!pfKT0%y6!vDC=r+YSwY4NppcX;pCfGImPOp~zqq`j0Kk+%Ue>}>SJw%JjzSSR{JprGBtf?mkc&+a{ zG1;%<^Vvn*%w1ZtF_kgmyN+bm$|oB+lJ69siQn&~b+p~}mPXH5~IfrvHET-UU3W^4$AgYbJ-uOcFu?IjA`RN(KzbrGmEDOoAw9JazZ5du<5u z5E4OsZFfb51c-_ZC=u)-@!Nn%VS>R!r9vAw1S*0NEcM*_H30+yLx2>7L_xmaf6ZE9 zjDTp}*W2%Xlj~a7TA5k%9Pj&o|DXH0pXV83_~*x%{?Y(*`RrGux#0th6*ltw2KL@L zh#XB}FM)m3yAN#VP}eZQaYJtO+ovWDsHzRa~RB8I_5U(UATEIr|8NUOn`TS};$L>lZpG{lIF zoM)F!ndc&-(gWw61j<=Yxsi{uQIyd)%vi(ocIVxQEDuLEZ$L(`M^=X+v)3^%I5dO| zTk~S`nx;Kj$=(XHng8ueJS}@f)M`(o$Laq=#7?BgPOs{jR9w}Y{gy6ezoiXH*;VV= zYiZNPrnTqOpSqMj2d8YRp*(MQSX$ldJ6b%=JlhdqIWOI4JKzH6$2qh<43t<}`9PO7#`BI@bHjcg@=Y{o@{YUcGWY9M%5EB@ZkX;|L>kBG9lf9p=cqGt#v^3ECF4yn5uDHnvUG-x#v+S@9K>?b-nK%K3*C;O4NmwYAH>?1X#guMhx>Gx>4&Lh2%)U~h4*)OCug8C#7Yb{y& z!oE_>C7-z4M4rApmz>GDm)WcrN_cTbx$@o32Y$QMh>HHSKi z{(qBe40}mk&mL1?f3o6v5AoTJlvjH^#LS;HK6d`n@o~SNHU3$b@#-a9?q4q*e+Tzj zT&2vlF7pthMBS|Y8d#U~B6xWPIQlcLG|J&a-nvnS7UC{GMRtx^^WAQ2X>4U@@;LsT z3xaN9&||n_x#GC;i5Yfchd1%OG;YR_-dwkHJ;L?gcbK2WKK-PVvHcq2O48h3Yrl^= zmn%F6zpHHUN6dLY?z!koX@BnR9y|e_Jz85gX1+V{>h=de8}gSiIazx@8aO4Nxa2-h z&L3B5-Jcw%IORT1E*Q6oyKJ<4sN;!OF_GuvUSCjWK&l?c!86q>5bGJ#%>GqBHZG{m zw&&o>Q>^+8pne0W-vH`2;2^g2^(}Qtf5s->G%6>n)1!eCdq29OKlvx$`Q-c#RuS;~FNgqdH`y5OdN&k$p`|1GC z^Uz(z8Re(MTWzfVd4_e&du31Q7si;|HFu%$mZk5M^OS+k(XPq#&E4dwzF9hTO7+K6 z4-T0%f7UqKWGv+^D>joYS<3gY-yiM2nEek6I45BQ`O9_&No%BVh&l&7QH8F5vf0z15~mN{thHiCNUX#lqd;q?cx=x1Tya2G(jFFzMdilXbvDxpG;n zto6v$DY_l+*7~-^tW%2E@Gr-F7@+* z2g84DTo7ylYx;n*m%9V64lF!2fd zvp)mN$a4T(_C;~jE|D?}I0l!C#@geIjrP7#mvk6>*gi5R3x4sHSi9OT!#Y>zu+HZa z7m7}^EzY{Z)@RY_*SPj@-C}gA)mg&uA?vJ0;ccg;Q?ICB;NMpJmEM1J$R!W_tQBw7 z&c)>C;B8If7x8ulaZc*reoh%oJ#%B7FWj_$+aUpCbOrLsWYIID5L&AOgk#Aj^Jwy_!>+5T@N>Ii?8haRi?PgQ>HJxlHY4QDQs1l z!jw%s>@KiQdR@$!ZNgRUA+P@rk_UMWcne(Jh_3$$Tpe}>Ooe|^<_?wX*t1Z1NdNV_ zwwOA~@YU+vV))$lFdS95zLhH-TqrV|Ux9O3;a z+Imafm{}k7!k+aStFjlH?3)1Y@1u>O@*Y75w_*G-!>FjimB{#V0kE8)E`bEds-!TDh3(Q#jW&Lrio_1j#vugz73 zm1;lnQlHH+ifG#+{uf7euyTxzmC?MkH#qs4jgxzH=YJ0}UCMY^+*EC@_O6GUt^z;b zVtjv?%j!3?PE#1^@X%i1ql1mJ_*{5-3vIqPu~TqwiLt!| ze018XA8QcDQLPr?T;LB08DFwTv|# zrSQ@nQTXY#@w1k86epGb_oFW~mvO-DubfNl`&|B4zhu$o^3yrEn%_F^^oGGKs~`NQ zPQ9YDcq8rNu5{DZA4g%R!<+NrPzPz}_2B0|ur8B!)mnkR5zMo&Q|&*q4F{DkM7%qn zaZg(g+CX_dls6wN6^4#bx{>t$X$*yPo^{-RCOqAPFEo4(Ox0TGtz0?ut%I$HSi^AG z7hl^HA748boP8VL&!Z{vwH9}b;-w#huMcyHm;RRfMlOvtgt;1b2ydP7o7OVz#a}7> zodEv63?@Itc_6>%68;u}zuoEg9`tcD?R^}%eN1k*74qjQhb|tzAj=rC4blWE>cs-e>p$?u4J-0xvGN57U-yD*!q|Fn?|_H1Y$(@wjq9*AdsugK1kS0l<{@LZ z!B20;b{%1k=8-dS)J^yx^)VZvQX^W>0<~Vrc@KWJwhK;Mj zE@5kD?c-|OzP@1V2>L{NMu9ADIU0N&LtiA*7f;}u6|P2OsyL?BG!?_M4cej*%vQT+ z(>7t+?=YC(1P>od8~3Ko9d4?+3l|G1LnG}g?q347j-p-l+m+N&TwHb38BmXdpA%_A zaeilfPoG<9jKXi?)Yajr`x&pK(_TtTegfI<*^GO&Uj=F2t^K{C$Mue{z)t6DjqA^6 zz3~xyz0Sw1Sz5}v)iA#6F02_|&AAoKJ-xlRml67acFs;@jd9Q&+QD3*_QIDRy1_7l z4a^5txtM25`F^T=Vk+zo47_mghIv`!TZy*zi~GwNN>nq4Q4t(nHH zp^Hn_Vqdm@he4jfiGgyG!6vBUpGGD|x+1lOA=X$7j ziYq2~D4}C;1#=b}AFsntzMFB{JmT-x#TxheknK3iQ^_72U5v!q*`zm|cDcYbg9R~( zETi=VjR$V8p6ZJYuK35aCqJj2^3QH!jo{zi-GX&SlC_udwud@ZyiOT1iten|9u(}E zT%A$Wr&@D;;*XkJoygphu=qlLf2TV>xQcU4rcl>YuK4hVnPWf2?{0nH6W!fQ|6IiP zvpNN;hB8jP?xWsQK5{1qAHFb9we+2(WB>Hdvtws z1IW^28;f|+eyp46)%ui0+=v}vuO9Rw%k9=ThY~(5d55X z7Eisc`n$fE;FcrPo;<-CK>0((4VEE;I*;RNXK7*hb4zIki(p! zOREl105ES=3tZC1y&nSITKw z%;Wt_eqG!ui@oM&SEbBkp4*m1*^@eCM6#&46s2WSu4g9YRnIEQYn8>BMH$uD7}_eu zlEqryJ&qj}Cf?4ouyjQRb7rx|O@44>KQRn`(>q78lpb)g5StP4>3;S4?hB`xw=}rv zGZUN!PleIK#)4vB#XjW5kB#03J{J@VpFKg9q2N@`f@0=oKBLXIYOf3OFLrzO2ax#{ z=mD6y9gN<;n03x8z&h3--aCqCM~{wy@!v&{8sd>N;FbrG`sl(#Fob%BM~wX}t>ddu`46|=zT zEz>V-#puXB3XX1d`GRh}>FQBC9DN#&KFL=z2R#YX9-5yxfF1>>^{5E?eCW}I6(iB3 zME)WgFE1a z(xD>cUpO-ouBGvY<~?^H^OMmb7iAlO?lhu1&zcFfjp)vpR}w7UN%Gjla+ane2L&plE4b9uEmbvAZLST6lJ zMq5tO9odr{Wp{+*HL!02^3~u@v^hh<9QL266*sA*kA8|Ql_T>l$m#L;K#RDGBcF}^ zhJ_B)T^~Bgn(lqbZNanz_(MYF)7Z(M_`Jbm=qvuJrmefh9lj`wCFtxHI73@`+|i2j zJz8Z^I`AAi6y!JCY`#zcUpP<~7ZS#cH*7%$#id3F$HC<{DE}PFki_Txkr~xz9N+E3 zo(ZQ@!1*o6CHtmqAINtlHdhdqXCQ~d@mRfBBpD7>V4lFbq7qGU@PVVFBMxD`1W<3p}ovEiL<O>qZZH zoaP8izhhW(wX9XHJe76EX^tSdQkiy6x*d*id#hY+D7wUwt72QO#1X{7O1PgIs(muN z?33XmULlbL5V?xH6W}+2mS2<4zNOcNhI5|XarQ!x-{(HBH+Y=$ zRIK!g1D7q(enY2_?F&fjJ)K<##%@MV6Tn&H;h~duYL5i%qXQNxU9GR*gD+6~7#xSs z>fHw9&_n)S>mce*2+uA+Z^q@M)SB_HwpX zS&FL*+@(vf&Ycu4VcfB?xO2rsWFlkI#8&;tnAEdc<BkzyAHze`M-2mpg)*^~r{%AEaIm{;P;!(B?@*jc3$|+BUU&!TWZS$$ z@_77-xRC690dkm!>}YQkM;09!)L2YuE4^r4kc}5_4kMRE{5G?=YsFp2e*=9xkNa#} z-ZHV*k~f3@XG`8@#fP3i-tI--u*FXfMb>)T@>Xxl+g#*9xF_44&z*MKeh9ubkh$gY z__ijnPCwi24rgwhRo-9n=1vY@vhnfqVq4xc7L%_v1(_Kvd3z|KViofC0qGt@-Zpbz zWh5YP$+o=tD_01TIH0y)#C$qy=-+}#3u=z5vhiBl*H-gpU!0fSL_ZTpIG%<{$*xO{}WU$?eW9)>CI{df+EH0St z0gJJ9(>qt}1B)%LOkM??Q^-7h(-Gt}OFjwmE_Oxuv4hPmM{+~WoxnC^SlB!=3Y)bL zn-^?szB4y;f^;RT4@)=5GZSo<-|}g=>{?{t8aMcloC%-Dum)h?P@_dU;$U+y8k^xd zJLrGua_c^4q`jNAcJO&Em@0f0HYW(1*`vt8X2ukMd5%11P+2Q6ZUca;H~H7S!(th_&#LZyaON3CKnt#)ZFho55zd#!uj{pHrs> z?2zPKI_I(Fyx8buaoRlOd?0Dn6y6)fX$MxT43cxjXNuPfS34r#W%#hQ7yk?&a&B~l z&*prpcTUT>2VY@#_U*Z_ve%QRakZ_S4`e(jIWM*4ya`@(%IId{Y6hHa;u+*zF_>R!oD_2qX9X!a8q~+21^!C*>Wlw5|%m` zDVg+utM#;LTN!fXN7$-#HLh)#-n~No<7eFX7<|QxEUaC5btp_7@8UTN9M#-j<8*vo z$Uq0=qYU{-x8+0o)T*EH$$*u_s!KjxU6BuOXD4=$PrjwOL-$>3TC@C_dNV8_r&@Jv44-JLjF&)uzg zmk-|!d~_$V3iWU?ttV)3bGP}ae7hgA=dS#_JK~L&t(Qf(X#suGAgqO>DyC~Vb~6{d zoDM`FAbfF8|AK>u}a8{KO4#1Lp0Uc4B+QUnjF)(F%U+K_=vPYw#hf zSKxo^#N7=xl2?Oon8j1~A#WD$>U(URWElTL(-`~>4ZVyO`D~qfw3TOvuUc40I?s~l zZsc1&TX9wAcd{jB)lh$t@DTpn_o2W7`L4q3?<%gU@xs18!du{{+rY$3##D;G50Tyg zaY5vK-=p!NNqn~N2BW2?{)ap43&w@^;V;8~7Hk0Ln;2_4oK<>|L_K$a?K|PT4L7jA zBJW8j#H$?M%9+T-t>No3ec=ffZ}q`jgXQz#5oyF_#BT_e<1e^(9&4NxGbY~ZL2rcB z<)kzGp~Q;AU`eXW6Fdyp_UMk!*TcMjXN$XDR#`uFa<%xYY}(wBasWw-j5J53hc@6TVfKH(Y}5?vU>jS3`f6YD;toFR=FRlv3*3& zXM^8gl=IHanQ#Uu_`QfeIVzsSIgs5yfc%R8 zO1_UF-$RgV`Lsr2|24NP`#%>SmCj|)#`c!jwqLT=gsgRFIaqI_u12*~;`=Zw)mtBmG-cP~%3ij{(9GR>KOC3I7Ad`PbCW~4A zvlu;YV!Y&{ei?-knal(4GYWfFt4xwf?O*#8GC7d3Rg<@)#Q`Lf_mZFf*Z1q-1&&Pa zL`Iw7GV+5*^MPSeGC35P6d%Y%CLKPo6aH|@=wZp^z0+M4+uXCW%21 zcaq*`GoS7FxE#*w@ZA0J!j?1ng>S+Z+=xvWhiw>(jTi&h9SzqVH5uDsab5X^Eqw2F z8s9C>%h*h~Cd>_i>+%cl1D7of2G=LLUBPJo@I3j4DTm`9j>dXnbV0GpN>6hN3BpDA z<|ME|IC&NSCkQ9sj|*9L<|f-7U;{oS&jZ*6aY@;N0(kKOWLtB}Kfy2CfPTmhG~%-m zW=kJBBj*e8R~*1rh!@Xxd=?fjj`)FhwdzO22PZqQ6Zv<>hx(0yp8K!?!yochC_mYp z;cy-1mY&?TE;n=&GCqi_9vv2jYyVe`1sl8o<}f#OuUw^1(F^dv=aze7zWa zEauGD$s5bcF9*Ym-6`R`jq}UXYz%jykJ{tD3cRgC?|PF?)${}l!(;Kg3d57QU(R^2 z3Jm`(7~V*%PCaA6K5!xFRU>-!12A0UCF#mxbS2f5Q$|-yU+%%Ds`;9oQzuyZ z;m+LyORA(+DA53 zb7JyKb>)8t*ME-AXl|~dk0+EV8Aiq(4zn3-KP|Tx1UG}}b@b6OadI2e>!L7SvTO60 z)-p$U`33OuGt00rUNWq?&&^=UY|1Mi%m~Ji2l}yg5Br~myh@i=(w>SlkbQ$^So>i5 zkluR|~e`3ccYN84sACZ@k}YZmbs;_B*W?R{&d1txjS4z)T* zyAl3Czc*!N;YUFJ8sNLhoWGHB(uKdNTl18JJ(IB=-Er&5iviC-Z^p_xqk?^VxMy`bQ!uSU(G4>OuT8SH$Hd`<~U6QxD&k54#0j+{HLraVPoFKEvz*n zQL#prZ4h2ajvphPOyqYs@@x6HNwbJ~;Je`IjmYnF%v%>t>t4}F9#vYK&gbIxiF~H- z8sodyCQE+NLHYAmY`i~&&$-qEpO^e*(*O4$zbVM{3&`(bbTEZDk{!tJGsv&j(@S=X zh#yicc7YuWl7jy@GN#0{?k3Y)yJFV;A)W2u@o0XrLvY0`WcONc zg~#v&Crr(^WVasKeE`|5!@lfBc3(CO?+5%|vMYa!WLN(82jTmYUHNAuyNkUs!C}~I zvmi3}XmOZ=OR9^%AiIVoyX(=j#rV2U8HOdhngg1E9jNECiMAbZ+p^2J&$0ssvRll2 zb+qhCX6x~V??#`a{o!r>K=OzG9of|!rTjpjvsf9Z|`?nG&<2J3hCfSC~$oFLMWHbC+2eJh>Vh1v(rB)HDP}yjTmT+JtCNyY+>f+ z0iFuSt{n6kEzR!a+Qtsp7Ia(q*@CVgMsFP(GaMVU5?w5!EGzl`8s^In3?31Z{gaKc zcytPJ)#zg*HfU6|jgd}bTPpeeP|CW4xP+bbscee;pS$q`JG%Ja@Tuswwe%6p=_}hp zyRSeeHRpMVc~=kqqvuF-Hf1`%cXRna(b7L}?Ulqqe}-LXVjL^(cnFRmUCLzqta;1_ ziFe9&F=nQ3lxL&nl&}T0$PTjdmkh?yJBc6IOkB?@>bIFbbDKSaEB=T+P4ZRz30qJ< z^mWvIRF17kWA4f@1!cEs#FlL*E~UFYa{oDT=>G=0-1|j&1cx z`_v@E=Ps;_9yctDiX&M{9ErvatJ#y^u?3&w{~T|`wd&JjwjFR}Me|0|jYEG7guqT~ z-csk&q?ukTuIanj1@V9%M9VbtC7FIhGHvmsh^&u5wk?@P=KozdicCwM-$$PJBU_V3 z^gX*gqZ>z&W!aZcz;egF47bM3KeWf;lHVSdefeY*V^QWw$AhJxA`6Z65&J?6l@*`0 zj=qpAH{5C*me0rW>#Hs!iW6F8IuaWtx$Y8?Yp}nqTsMr1$ghXK6NWgyR~*WIz8i_% z@+T0d8sDR~AM&elj=0-F`bzd-uGu-L@81%iMTV>Rzm6C-t$mdIst&RT8gu-avBtgF zll8Qj{Cw#iqt-CG2A2eX5z6?fry_?rni9qy#nf*X^1A_!nw?6lA@*RF;r8}M{v+l0 zwT?A3&JdqDW%jV_K_A8%KY%;$D2T{!23%%G!By3DV1nd#7xJ5pFJAK72m98?7teT- zI923VeCy}a<11FP9$@CwNL=YsTYjHm{3-6Vnzg-kX8afIf#yX&K^B|ATzAyGKwJ6k z#+-oUcdu;^wB{({6!iP7Ul+5HcZM-&_ zF^Be4>B_!CA-&tlXxTy^?gM)}9A18N!gN0CV6@!iiLut$Z^51&K%W+|N7)wRvGTr* z0T!_@+@C56D0heg^i5ldsNId4nf5sCm^R%pb{5epGkrc1-sUftJ18S5xMz z@F%}QnU9m_U}FE)Q08vM?ux6~`)Ms@zlP^)yxoZhjS2t!rKidlzVxf|w_o~o`Q8_P zQ{Ea2#@Zak_i8KOlqOk?ezE;C;JTTGn)*Vx@R0`dL??BithX36knST9d zBkx#sN_1x_?;8%Cx>8uIeD)qZbtPk+mbW~#Z6bboKKozf-4Ks2oHdbQ>NGpv82lsN zS6{Dqp|pI?3lEnMXACWQP5_tW^BKvrN4UiMtl?Q`s^@eSNbY<5o_r5{v7J>=RJJ6Yh%cFF8}K}fii7LH~hpB zv+j)XGj-nVHUjrqCqncO0CVOP?iyH5SHzTLx=u#!=4{bYAgs!k087f-U{Q zmNd>uxYT?cZ1J{YOA>2Ev=7ov!WHnNk7DQ0`75|zMO@ZEe1|X7ZbuV32Y(Nqyhqx{ z5-te7LRo|@$2qTS4R|7KX--glc})1|3k%Edd+C?u%fXa^V2akEgxLdV(_y3R7=5I* z$Gs@uufdaY@Z=%%BM+UJ{@on^4)kQ_+(6boaHJ=n_x@!dYj84{a`?(f{;!0q9go76DR!Bh@e`fLeIWIOQa~b>jL}Slj-dTrU4@HjE);r0kH}`e;sg|Hm zyTG7JyzJ3IySeBa_Spf4yp`{89^89BHU0YSF5UxA*VfE-W&H_ES6Q|}+PK9CuQV1~>Q<=NaT+&tqWENU$dp*&Pn2_oIV;p2d&Sbe>F9H<16DqUjW?sH zr-L!Voz2*lR*a!O1z=1X`2cvu#+DWjdP;Z& z5Bjz4@Sxx54vv*`m&|?g!c*lx1B2dv={MzjUn+0qHrgZStGG=*`r&Y!j~9NK+iXNH zr3VhTdF>l;n<|^zB*JY9kpc0Wf6(ut%7DKL+^n+`~!ZY{+Cf0H;2TS(yyf*<{0ZW7{pCoh&{thhB^D&;qeQvS2&(DM%aGzgrzaQ>1 z749RMy7Hxk zcWEH&gw1Cz{jWgQ_tEEVF+=>%T(oTL#*2PC_T!6|kKN+A*8jedwg%o%{aQ&{^``Y{ z)gKgK(>=zkuOs&#-sSR((|mNl+kZa1rV+gt$G8h#BU}-;5w1LDm(eNT4)jxHQ<=6H zscY&2sm-yhr;g)_=c-$ux<-2&I2>jf&(wSSWShe%pSB#P{tO&ueH#v=y$8<4VS?ln zJO_tS`Nd&+ad$Y3>e9Onhl%Dd8heSqtcSm7Osf79#~BF5oW>k)<%>3tsY!|O7_E^K zkIA%oj4&ny#+-}CNKf1H7>$`az&$R%Cg6XIF_XrMW9ZYWb8whwOlhPKg(*ryxboIR zhW`*{i{>t)>1zi^&ct1Y+c@GoPaKJ-%xB^&8V`x52s;!z;Dsj%H^g&JW5>C9%Za=H z$Qa3FtmI>p%h1~nwvQzdye@&GBr^V)%oueW@1*dV z`P=JHYR)_b{2Pdl-@tgtOC8>(uQe9hWGpCu8{49>ke(eM%RKfb5e^6geooBfe|nRH zzaWO`ci_hn_HAB6Ijk{`2mc9Wk^iK{(>?eH%JdVSKlSvmuwubW3(8*xC*B1oj>9*E z6+XB|GyXf-=0wW!8`|g^{2n?>b0ODiuH#&e-$Q3@rs3zC%>EhjO$6!lEcEPl_S&6^ z|Lixpf#!1bmGxX%e~b&X?BVm*;0GI0z=?p_{8z>a8&hJk{??iERFRoY_&mf7^jrO= zg?OD$y%A2K-&mZ&{*5@rhutE-`z!t#t;ton-#ixalhkt7brNNn0H1Km^AzP-NO_zx zyh|D0A-(%p=O`VkKN@en`x^U5YAwPddn}>yEMoj2ABf6z1?}RZeb|rdW~~V;VLun= z-RN>E&0GEv8Fwf~|47rRD>tSXSvB|bViJEF!M@^_3->;N*7$(Td-zaMSu_&wwk zh}PA%TtM>U_%`I%kbgt@MEf?j+P1%zzN(@=_%&Jva52sZ30wN39}W-Dm?N@BMoeXi z%>{(f8e=F9P`m&h(6YJIXx48%$h84CSU?J(3Y6=rTH7Mc+(8kEOfv zW88?|N`KY%uhZWV-F>wDb=p0myN|Z&ZWU`h#+eCg5h1^6tazwbGA9^Kuv9$yFgD;p^Nl^?Mi`n#9-*ZYW{TF9kX zO~<$3_$`yzYfLs+`r8Z-7=zxl1Om;UVsGK}Sr4MSGm-m;u~|=}Kbz3w4~&b~;Nxii z>w5egi9@pfR+8GhA?5p7;shVzPe>wv{nq)N^PBa7i}5!^ezU$L^@Q^q`5-od4H{Qy zuXxpgvF+*?d8S_54}22wMTC#CJ`FiYN3Zu7>#tQF_fbCmwkFY)^>(6xZyC9#F9PVZ z-r0mcFH)M+O?oQ(s^3YcwLD$Hr^Fi7n1TpB=mVGIm$v{-f-xxEkD8 z&GRSBC;c9~YxyEMtNn-QelvEv1-tzzcKbMXTfF@~^mrk9yc#?FK6))%to_Emi7no0 z+v2&jr`Dy(_WcGsI`ca@S%2XEN9^g+ zIf$v)%?HqXrJ>*c8La&~Id|qT^m{mK%b)Gvi#uozL%J-x%y~D~{KZ|^<^9NO zz_!az*=18%-i<1YW0Mz0*<|@8|C6%(g0eVzsWP?NX37?An^n&DDChf>Q}1s?)~xc{ zAj4@9licNa}3gHYb?&(!g2h5j*XU{^tWtuGB!FH8?E&e$(D_leHPc&++q@VAR8^c zK5-Z4x7?QF*Z-OiobJL-&k1DRhWtN{KFzuyC+jWlH4}6EA04>b|JRSN@&CQe9Kbfl z&(0jcTY)rUY|DzJPS~+-lS9G#oro`DNFo53=c6 zH+P54)txbRbUvGqjo0ltOT_@)$+IwFBR1Xf>HQ~pHsO02_!YP?stqpqm@6oRH!9vo zdnU`STXO?##?*6}hvXgW%%LpWXpyb|9TKE;oIrcL!~gN|4lg(yL%zHAc!&0QhxT}f)3F!r@eb|r4sBzq9RGwkiZfn06Q9{? z`zPct{TGOLcwxf1;~idTk9TnS+v6R+9LxFt<9LVp?ePxn@eb|r4(;&{?ePwgcyMu- zf8ltC`DciCnBN}n&>rvbEyO!~feSc3RITOs-xTjKzdhbTu}ban4w~C*k9TN~cW~xM z{zc*)=9hdq-eJDk9`Dc|@9;kq@34Q&>9v>b@eW^(KWvY8XpeVjk9Rn|4)Xuncn7!k zcW@D#>?TGzhFIlT))mCDze9Xve+SlB?;?I_CGq}>eVRnUXxe|lF|BrxwwKuT#`HKwy%dGo< z{!N$v6=LY0zctqX{19Vp8v9xlvTuGL-oNtSa{Pl_zTo>41O5+xlH>m_>HPbB0sns# z<@mEmYe`bTAG#{Xzx=0$|Bo@I|92By{^~@x|K;0a{QuoM*1!5kasEGC8Sj6eI2M&# z>m_f{{t?9AT+dx~xP>_C_3R(egSuQnnwOGCAM(_BxhjiE9kjM-YfQkOe{;b9JZogD z5-E47%l|Uv);s%|Y((z#3+;ATGCs~nW<~<<(c&SFRq=>up|&4 zq_eJyEei^AsCoc)~L%N7kgmHP4a$bHx0lnK9Phidn=9 z4KTgI!>mWHVhzu#A*NsJhm}`Y`5+%kf3aZ&t!u>xl&oQdatWm&xZNxK_}f%V?k6MC48J@|=CCHOJbg#;O;-D(zih$z?ddrcV3XC=Tqf-A5_59&w(q_J-Y_t(ZODTV!+yoAj~C8j?8V@^$24 z1M9(bp4Zzq2K*aY3&uH2!FPWY@W1Z~us25FRkK@0>Jnl%m%nNFi8WkncI%nCgm}1@ z+0(=9)+=>Mrt#`4m+AA~sY`|!ub!gaysXQOOAYut-JIj^LO#;dHRxY=^7-!F0soLY zbNs2~vn`%HAI$Op_D6>Q53K82{!5HTM~-(SwXiTkP{5q*4=dL9N3564IJQE_``(nra}x%6=ieJp*U*@ts=({{I9wWUFM`D`Q;#)jxIn z9UCYeyboPmR`Nw#m_xd11En|5wtf4NfvsHkpl>1e8>$MJ{{IJgIDqZD%eH+h(HZTt z?&w<~ds2OE+eh5%bL3rhIkp!$pNITewr^_wn#<9v6zW@rUaca3tz}bQBh_}c?Nhv5 z-vl>%HFT`hdUvIfM}2(QEZHC{%`@tb+p>3a|4Dn-_&;dx`u&skF6*DQcmIVwLO(jA zFEd&H{1mq70_?>`)>*yB=UuTM9}~0g;Ky%?`*v*I@>^qCZQbu~jPw8gYJGRw-X;Bu z*gMAtB_n6D>rP!(Mb+gLG9^9Z9skDB;>7{~v7ZL~s}du6wq$ff&m24F=$rH`+TP7T z4oz&UlZVos{ExHSwz*c<62I0i6ua z+zt0^<(mQa^RmZnNuCJb><1^Y#%=I#XWZtD*#@FR&bUo`L}}bM16k`+Sh(hj!n>QV zMEA7DY9V8`Rp8lj_J;c%>$Fy3r&hCm`sJ%5c4{8|mCl;5jKcjxdKMlU(yK5$WO0m9 z^-Q8!wHrB7{?~(>%D;s1mip^b&X!!y8V=#UWK-PM$-}Y*>Oaz~VU32;8pt}CWwz~D z-y9i7%8nKM4}v;V!bkK5KL zp3?^O?Y7OMbdQajMhwyrVVA! zj!+&0?sRlq4!(g6*4pK0*(!p^e3fj8!^tmjCfS;dY-%jClCi!umO-|hv5aI(h6tn6>+C}BP30YKmC5v5XAvvyCp`+wUv@no6q>7#tzbTM^2;14rh{6XDpwC zOla&NY+c3JAsSy-odI97kVWLWx$0^ov_DFIOTGd>&nCx=Q?0RkZ5ugu#sX)OI|=E?e{ADT;PR;S0m4XnOZm77BeNM!c57j@J@2-qNlMY!^u zZ-A?X)bD>1SG6Yfym2+@tL5;VxQZPAQ@BbW#?yzLkdujEm-aU3%s6uvoLPI9$hY%s zG-tN^+2PH|>S=7f?2DXPKBBLKtKDr}t+#P?3Gyl|jI+xqY`uc=wc_g;$|9VV{GJVG z(`}qJ?X)|Rb{zF|(tMFLf14Qb8?iMzJXJ$%Kw<*)Kxw$DAZ&pj{>X`g#I^L*jA>93L9`?~&` z^Rb2Pa}Qsfhi{*Iu;<;|=N`Tsw_@A7_PK}u`MGxa4>TTYpL=MZd(hsN(f;A~xrej+ z%-ZK3+UFh=L)AX_z_~K``lI8P+UFj={yc5_+=Fpz``p8s=XBfW9!|%nw$DABj!pOy z?*3mg_webR)vR|g88^C!lW|AlWQcRo8F)G?W&|;eeOd3&Z=g|j8FBCI$6A(5EJX~y zXvM`4KhZRgy-*cn&U*4&%Z%9CC)khm5c{%s;Jo$)%)8%Xb_!-E8!sNJnO{Di_zayh zm%_R0kLq);@nXt`N6Q~2O`W+C&zUPP>vPWE-Me^U`9ePH%o%c5@Y#Y53(7gawesiq z?{qF=SMJTs%U?rm_XF&Id7rn3^*cT9Z5qbIn-waZ84mE>3{XRBLnwsSa|mSe>STBd2a3hKSlLF&)=l~)%R+TQM8B7z09P~9XYs*{#KuQ zmdv&KzvyiJPnlZ#pJ(lDcK-c;`V7jElp#o2&f5Q0KL33Gx9fkq{uv+c~u2XjeNagrM?8{^@;d&LYV5GQD)vzCh3MaAQ-}FKi>p81UWq)SU-PO6C^wgP?22?xcR~@xp^KAXBK3-|}@lno?cKZ3MHvK&O zJo-8FpXlf8v-YzY)z9WX+0W(~`#Jj?_H%TZzu3=TUFNUf&t~{+{jBmk{d{!Xx{$?P zS?j32E#hwVsT~WOc9GGv?^la@tmC`=T^IH6n_jP%ehU)k{#?U@Z&%UpoY8Q%^btN; zRs`1(&rdVWuz}uAGL0*>$NeGJx7E+ct~z8guYxWuLCy~GIb0|lfX8Z&f5inS8y#w& zf2c#n6X@wF(^z|m^LCL<>wT@O6D}yuTey%1-&L%3C6|xSv@idd`-|4h<9G1nvbkVG zn&Go_Qe{*8wo|ql4*YrDFoHLD;)Bl~_{58Pqt^-hCx zP~gaWjSjR!oYm*=(uQq0y!yZPqDcP-xm$6#oTK5%>%{ibUhf!5!FMUodxkf7 zoc?GwQi3f;$KVduA!dL@HI!LrQ(Mn2o>^b3XT~_M?qF@Lo@eNpc{A&s7@mn^w4UR5 zCeG4&U(du@TJQ60y6ty7JjdJ5UY>pS`+DwRzt3}q^FC`mGo1I6d3N5{v-3XBdy$R% ztn)sSgT>gkH^Ft)@d$N1N*zC;jv1zB@Fld*EqwMf&Io^s>zD9zwTs%QhBdfnYlpn3 z`sPK|cZTyh^_t;)PWhKOpHu!N&gUv;dSngYLe>GU*1BBQk>@gIFQxoBoXruPe{`PF z`9!DvF!HoM!81sG9Y)`^roqxjbDw$=Y+^}27R896nQq)|Aze>%d;adF+6+h_tpQ2_B%ZLIV(nE zXPx1#v9r!W_K~;N4#v_a&YD7%F-V`}CbB+|wlayGJyY58C})mSX3CIbmpPAT#oUeL z@+8F~*TxrqAN`wj;{4TamDtnSR&?r&o&NMJ13}(Xo(I9RJrm#>tL0sZQAOq zT-vBKk#|0gA1rJ+6K@y=wmJMTOkIWH!f)0&z!kyJD6aUQ)WzY7gTv>+6~Vm(@d)v) zT>6;bJGN*fyzoBat<&I!>EOM?vl97D5`BKaodB2gRz5TDG}b;dFT1J=zBkwAdwM4u zoGqa}<{`@y3nyFHI}Uz= zSicF6xEmfJzJE3zAsIC7wP~_*eX-wRIISZ;!i!w=M`f6x^;XEiNNlgph1VEFXLvn< zAMFLM`EZWshZ$?;nh6$P5x>%zxf)wYHm&qcS8yaaL>raeMV-go$hd-Zv<}MV5!)rR zRa`092Cd<$Q~mLmHsSLd%RUah@pqL{)(w+xv*d8Yq~6v2$YEJf028@Y4(<| zZk+U%kjEmp#NtoEhle^=e4G#yoC)q+BHLehdv!jzH4FUFxWZL76co zvu?)CF^r#M*?Wlnhh~}9n7kkLcQCgWyR0#J=0!`!CS0_5>?iiPZ3*(AvH2^v8i$X& zV#*p=XX^Pupyd?n2kIHCo5-$<%eZ*O*j#a1?FAj~};l7}#5r?>UbS>9f}`1H?*%;&Scu4OffT=2c|#inPyZx2A9q}Tx>nh=2;w8vgE~1$|k^J z7YMKCA;-zs%)8;Mhrn}jSItdSnWi;2k)?4Ezx9C&yU~w5@K*8sbk=WWu#T~3;bGR+ z9vU(bTVpgYo|3_}I3~L)z&+1;HmbCqcNtv04tX9;JuYEA+NG`(YhR-stasFYJvSJ> z;2y5mxRm}3*5Rf<5;(D=G;m@A*HSLh?@#*u(_@Xvq~D+Pc{V5eu}`_$ABuIJIiJQ4 zN_Pb;KC}TnT*_qxGEy_R7=L92xaV2VW|jx+*(AQke1+;5r2jQnqp@5fZ6XY2?x9R) z)d$S=*XB%W93s3@JtvrphNN5fl52}Jh65Yx;0Ze0a1N?5O13LXoW z#o^aYeqG-|emWsXosp+5$khetOjm3CTl8H6 z-%c#!RI~D-6l1%eK9mgY{+`jY&u50}I~gszIe%_chH-00wHaffyUZAxE}_#dlBto@AgKSjL;o8H>{4Hs0?C~H7} zuQ6Sy`5V%5*3RhZ_V>m<*0MhG zeYdf8yUSR+=g27P6=>PNcXa46pEvk@?1<9s&$-zPXBuS>_Cm(f8M|_6oWl98+YO#o zzCpZ`&+{o{Kxeof3>q!r``9M|T{1V!XwEDSENCb;T2{dQ%$ga^eT%0rXym&kJeRw@ z!6|NEu!iSmcS>cWl>KehiFz zkM>}fj;Y_^OWXMFbw0bC`wJJb|4gRo&!YTqQubQ6vDUH~mmB`yUT&1(<5(+w+)O=} z1&p#Dl)0gk{`U;lo^X56BmLiyWDH)5e#`|cwBL_D-+X^)O%Pow1XCM$H^EG(&E~Az zBKT0|$(*-$a31&pWMLk9w!Yx5HOb(`Tkzt;?u)QXNtN2iVGU>yf9IN1OgT zDaRkC_FSW~1by9uh3J;{B+>pP!fkxnWy0>Qyx#?G=Xxlxz<6l-f>~}Oc#Lwaq2B9U zzVPy~0rn0|3>Fjy{6Ay<{CATktohIt@Lz*|O9@-ZBEQl5Z&~k~{;k~Cq*FIO`w7<; z`+xDt*~m>2<=(7sp!!bzHao!>^cZ=B>9fuEuUs>i{(`61j;Eh=Caipq!|AK7j5VGg z7O?tA82uc$Qbu~?4Nn*we(Q`_V=Xpm?Ob0-E4lj|nhdXec0 z2Sx|hn3Pj{0H#b2EYKKqJY%j2wD~MPSGx5)KSMr4X~Q%#F`Q1D=CE&=^3}c{Dnp4~ zhQ++AGIZqsI=)-Mcbuc@*Smk?{}R5_{tP$qd;R7;p3C@5{j!XAwKqp!+LN{3YqQB~ zF>U!6&mr=j&HM60uHacV_vC{=#J>I;cJ>F@+gq`_w}ejIeRBvqZ1J|u>@)n>t;T$R zUVNFcJ#aD|+ntu~e%E-H=WEQ!^M^d&>xw-8zdTpPob(uv+?<)fS*S+o&6!hO+-KgL zxi4mIW?sTt+4(dhEu4hzjOV@;{x=>DmH|e`!vEIsnXoN=Zry03xqfU0mvJ<2tkHbW z^kuvkpnWpdD`i|ddAH-a2e|qVLB^jesJGkGe`$h2V^gB~G z8nenZelXJShiIV7d2fla{aooq(m4P0sPpz%Pv9R()L*irHi(u-)lc=xpuY#8 z=VxonB=n+Hu95MxrS)xnZpl5iNOF&TK5KqX+3VfzO38k`JI%UlE-6j2Z|i`@M3PzE z^_`;+jvn*_+Z$xU(LX&e3yjWS z-0J9oqgT?K+;`BecWxhRyfcyOwz2B78MLGE!9CL@n_DehvoSX_g2yv#{E<95a@T0f zfhE7Z*NTPbcvfC{AQhLv1-34zHOo(ZW#J95v=PtD~yH}kgC z{Ej)3^LZZDoqOi(sii!ZX5Nu{d-vSQ%Xq#ahkNFoshfD-lzC6;ojF4%@8$WX5!^HH zO-=DmNKNVczfyA&jLAbgnv;ihcTK(_$31zZ=_xDX%A>q0vmb7fZF<5*#0kjfS=dQ+ z^3-N`#Ggoi`Y5Y-1@pjfPo}(!%%t!$^o`1X7oRUOz2T+Ea^Fwz%`l%peB{`>@FNr% ziNWW%YsUV@NHaA;D?>h zc)TnIj{J{lZ`bhu2L691M*h)Q{G;y5g?ygp!v2xh#0PFWq5jA(yywJih4-G=jlImE zFLoHl$sGj+Csd9d1(_%EcxPw9#1k2r6ZyaAi5b@aKePY8@#0*ZzM%;*Glpbv&*N_C zG3iK;x6&TsVG~M?6Z(7!pM(2-g~!5+QZ6Sgaj?GZdzOJuhJ9XI@{O3_*Zp64M02pV zzjw;!ltFwa-P`<%RX_UDsvq}EFvCO-3+#I)*b(_Za`!M^^Weu%qs@Z6FBwrk_ECq6;qx5p^9=iQVXk!lE;FfY2KkS+ z?<4Vz7I6s^f?#7O1x^UktK@O#}=7WknXzE>WzGV6Il_iD1DRkjrDn{>ENK`R-9=vdyW|~-KI!3ci33Igu8=H0kA29?|VKM7#;V~ z#3}I~YgekYZnK!m~>7u0DN*9gM?}W(? zUOAW<{d=d3jo6Yir!|wbO2_<9e5dl|+1!7OxaAq^63M@9T`aDOE%l^NNL|cb*t>&v zHo=8#`eXv>sZa96S811Au6(XiuDr_@jh+Bs&9K|YMDE5Qcfvo}w><2PzMB!%cCt0Q z!Dkacgwk&$FTF22qu(rnKP=;UqWzgT2As$taVmkBEZqo3hgykgPliCk^}S6(}i zH?>O^_Im?3lf>X{QuZ{6ZopibKigMy)*0#I|(um zCj)~dP!MigQCm-T5>UoqD?L`*dqao=5QBQ{0HP#;Xwe0gsO>o={tuv1*}*8bT1$H@ zlLHz+DRjEWYbG!gVJK5J1aiN>wf71;Aq)=f>F0mQ=d(U*W$m@z^}f&hd!Fa_KJVl( z{Nm2?acjuK6lA4u79&q1s`EFqz>>b{;B0?084hQESKrLE_;vupnQ%7y`x&HdeV=qg zzi+_N3^#EtIGUXg)ma+(pkMK|q*&&#fO)&@;-|{DNg1Yci2J3%`BH+MkJvxyD25no zaXo!kd|s%P0x}K?U@|pw1u$?j4|Jn z20ux6xSAhFS7vjqaUM(k?K9MFm`J@%dc;{alDZQgo=W`{aQ$P>8n+tSe$kjA@z@IZ zJYzO2%;Zp~#q{_5Ut)h<^O3?gl#d$52wo^3>v%UbA5QY1ybS3kFL%PnOYjS8Iez~C z6;`~1f2iju*?V{P+P(K=zwndJ`71_7%3lrZF}_azLfh{iu$_Yq*a?=D$2FYo*qCgh z-G0ANd)-03+U@lUVFiC>Y{aqKB77%uB7Lp*xP7PYi}JPIYWQV` zrx7ER>@!ZIL*%g%fmgP02eR2UwH;ZNz>zxgr_+6ImYHHsR@24*UeT9b8d0bv(z= z9ka5Ni;MlkcI0KhP_b(2)!duNk-FpVY%llIcHEP_j{76HH|E<$!T*Q$YFd3WR{p+$bnSpYtt{ym*8y zS6imB_(jQ6?Xb@2EcSfb|E+xC2fNSL;`8%`T@c4<814l-8DJ+9>|}wR@nB~H*trSp z+zfUmf}KfV$HJP`kMUV?r*?;Mr#%{kJIO@2s{-r9XJ*t8Q>*2uNwt^?&tPVVd?YuX86u*5#zwdk?@cYjH9r%6cgV|xbzL))Z-?!w9 z>{5u`q|lC2J2DhQ$sriZKqqYc6vJ8dZVK2)q0Ogy5?=|_bCo9}EGtRB7xQ~9oX*CN z)18r2Te8@bf^8fEj>H=zr*pCiV5mjkwXpb(mDc54>wCFDET79u;Qfo`#Q~4_w79YP zbo(|J)9O&EYdhujWr#zIB&K zua>^)I?@YG>-YyQpXN+YA#OUr=vCQ7-}i0)!aWlm1IpZvUX_glz5Y}WV@k#u{)sgU z*bm%U+Q68b+gl#qlj<2@Mp4&ko?exTPfSdSDr@+%L9B-U#?bQoB*WjB_Fp4 zuYWk_G|JQHVhj&*&^~+Bj3ubyI5)1XU->@%zdBg%Q`dO?cVscnWE{`CGJS61`N~tn zmywsQ`R=jdUVo0uXl^1dx-`=`g3s~0okibV_KT$}`xpZ%tGKt9>+c*T_K56$yQF_B z_HRa~y#6f1e;gT1D(qd>MA>ikurFRno=+dm@-;qa_^Mq4y5zSf`R!f4-!;(6^MPyF z?~MBDls0nkExEn*E?1QKTh5jYE|QkYUeC9c&H>7QiZrXJtC862Q0`(9dtF9-w2!9=!P%er(7miZzFe`r%*D>=${hM%DzR^C9bSJlTr(1G>EhT&UyA0&d;*_6#eP30 z9FgY7tY6*R8EO9ZU%b9St}ye_g$C!IX4!2zN0^V^XZXTh1I*veFnoE;BfrDNejCJ~ z-;VbB>)}%mlBW{Rg+_Gq5`2b>XCrv8CuxL{ULVq~=R1<4%Azr|`_R2lhz)f8|I6fG zb^0q$Z}u!Gt*ggV}+-$Y4Dwt*f>+%Lfo*HHqMBX?XQJXww@fvV|$vv zqCAVgUA%7V>|w1x;CL|FD1DB+t|iZf&gkYkY}89|wtb9ESVcOrv7PE3ovxuSHNLVg zCQzs1!)F#|V^^kOU#4PbreJR-(;q*Hv6K@TOX;$FS-UF?dlZE|isttio{Qyq5AR`% z7RLL|@SNepdMT1n#=x>yJ{PhyX%DSHjRd0pJa)2e0cYN$OrertrU z5WSqz*O)N~xo@ux+8gZM_Ja@a=?6v|smD}ojclqPyr;(ZF%!9;%F&R7JwX3X=R18- zVB%TX3uJyhcKCO$+gm^8+>lQFxCShn8W!8f4DaXl{fNB^oN>L(@BxPJra?wv z4G;89HZhN}jCa!aTSpsbKYA8H-0Iza8NOdKw(9%YUSD6Q)2tuh@coJZk7pbF?=tKE z#qgETfBf?u>Ap-bzLt1x9*6K53tkP<5cXf-op9cF@y-VR-^%~J9L6|}m2t(zntE4# z4$?!9sqa37!K*IqNfbM|P{3-*H-%O>IvWfLb-U%ip*&cp39mKOFY^96lQTCvxkh8=&x zImrADwo!I`GCt=H`7yp5&UGrf=f^+z!F7Q>erla@S=>IhAz*OwP6zs)h?8T%4>_os` zTw)yh(V(r+*ihLC@s?I>gvOu$1{vN#I>N|I^0g6rHjy@*#Yrp~iIaq(xBWZx{{+`b z=&inVyWmy{O8vc`emOCGgfOQ7B^zTSJ=eZK|E4lwNm+b%F3=1Zq!UJZdsy z30{wlu*PXu(iZGRe$)?r0zFc?lHp0}Z(qcgud7dQEBr$8Y(;Nek-C?c{T%nG2mEzE z-s@Yw3huBOzo0zj5T~r9?)s9h{%?g-xj6sK=xaWNE{q@OtvHRY-St7$v`6Vv$Ye~$ zUF5lB#oN=>cQ~|fM#0T7n{Rw;#aq+${kJ;qXBO-W-rE(Ihy=7^PRkKWG*~Q?#xk8UG7Jws!O$* zESdBD_2jRa`4O6@-a@{f=e?4hRnzk5yV1Lfk>v9Iyx3?{`#~w~&5XBHU#arnjDF4{ zp0q~6xlG@z5F)j*G3{g;@hRnUocN$ zC-#8>jRTkl{=XgXGAA;RK{0j4ThG{ptr|=l>NIloQ{S3@e<$|l25i>=IKTj%BMkoy z*tY@E{I7Ey|6>OST*d!7_u+qR)qnx~uXCc|zkZ2P+K(dvJc`4}AKj~V0C}^&bKq3_ zYx1i%{K(sCeM@J)WpS0@J71y>G)DRGBT;4j;WxL(_BX%AoS*N+4m7_Tn_wPh4)`sc z{~cXlWejXSJ$%)GbXQ`tpYuXj|7h*|cbMx{!F&JN8aa-(QS=JV@(;JiN0|LM6bD#A zJL7-32AZ4Wy;ILR2bynjzGeP9*$>QrH+#i+S4F~gZ)Jbux_7ew_jT`P_niM$_E+Y= zo&EGJ&Wh5Bj*8^#-p*cOl)SXiXXG8-pEL4~{wL1J+s!xv$(ppR zJ;XK7IEsFvYOrmk|ERY$`Z1%m<9Fm5uBpTjBpA^5rW*Gphe)O38FeAQu|6LxL^-%2MFT`nmyN{ibfuRTPX@eacu z@8legEO`H8uDPJg!KHG%XH~ugeSixWSxDSm$AFEbV;ClMu&#_2`^s6N!1g8uV}L>HF@@cU$7U z8|4F9(bHpA%qz}nV|tLKZ0oFu;;+lc4d(m|ZQ|Fl*Zq28I~KFXV2sg2{BjdMt_Xj* zG2Hvq9mJTX#beW5aFJJ6uFUC)?*Q#^M9}pYH=L9Sr)wmbgSSC(`>@#zf6{ z3rsAh9l8x%EoVN9oj3iynz+FYrwjLp3!kRUcj6m;;Aky=#P-$1O*(vI3uX8t*pzR~ ziydHAkha?Hb^H-x8V#!=+Wzp!uAe(U0k?#RymaSD25<468+iV;~U zd>r6)_x#b6Xrt?b5zLR z^9$~43w!8LhtFnCSZSdn-tyVIJOQ8SMVBV>?Ig-#l1>x){y1&X_vuTOAI)U^&=3yW zfAW3He>%aLewQCzZ3O)1rS?6)6qk&-0GF&pCX&rmbf?8_^iUkhs$=wr{$qv;WLxFUJ}BR-?u*{^vD@%De^+X;NTFv9!SE%*)Ti_IH#Pit!ZfHM7& z*oioxAG`8tIAlUxA5-({Bu~jz?by{vBdaP@soa|_QV`p zHhu%Tu$?%Ve7<66ilH8(4c5aWeKC%VB?kZ1vzbr#;n{B(ag}4i0~5q{*HfoQs9VL+ zB!Br7`4%VV`dDHCRy{ITEZAPEf_pY&-};e$|Df#EMp;RW?KH=ow3pi8q>qNQmmUpi zFO8?YbWb4kUaT`U)pV^didn;z=~ud*?4I$313y=#;f_XnY12*{;!)q54<1j zz&rETSa-s0VNLTu8}KPM=GAUiy@UHYOhuki(K z_c0Iqo(S4hk+iMcaMCDZOT956S;N z`p^XDIv5#Q8BN@Gaxru8;aP_nlOp{nO^B?l9unrOBkruRw~8s)JgXL8m5F_5>Pehb z?M67(TF#Q+F8pZ|d}=IvktRg5Zg5|(Uox$ukIaiNc`@iqOms)>;@;TIU!fznk)MA{ zaGC>{lRi1fp(ewjroo%O8|i)OV~@uiF~DQ`E!!AwtzDwJvpHHVzFqC07d@6A- z>5qeNd?}uF3Bl57>QXwC>U5ZoIUMF&@GY&qxIcu)z`;9sOc;FY-ER0+ST}s@E%;V= zH+)O;PKcXXF^B8p6U_tI*l^@|eSBXlPX~yVC{J3aQt^+6(IfGzrErr+L-O@#NWKn` zXN?mRzq+120_lBeW$%GrCoTH zD}+~pzl-py$>_}_^k*V^bTj&N6S0X2KJ+UXSH5(=_)+?x6j$CyZ2fuqR~1+J$qeJe z*ICDe^%(Zl4>5dyCXeaVO||Lr)u3N3BedQ{#-0Y&;%h*^Drkqf6BZ@Lfq`|Ui#J9+ z5>wVRoEScRu}!2K^+?aMXX3a`XOg_?9o;m`YpB?J%B@ze(+%`H=K^ z1pUG%(L2@C6xz29uBa())(g;c?k#DR(R^n;{fQ63S(}G={bh>BQqS^%EhD}D1I%4- z;{NPMqRX1W=pFIy%7yW<#K_{ywI+l7^>W6r&0_w04&`fLOh62EyUv&4Pv$#oC`;oa zqbw_)KIKIYt4!l5lh$!jzwswk0W1wX4@=d^H-M-99eC2WLos!bZ_K8D zWqVxfIN3a5X+2nakGeYmmLBJu2kATB@yNikY2Z7S`(L817sdY-`Ykf1G9Cm+*~e92Z69{f!#?YUL9B(7-y*QqI8J?U(Us~Cp7UaK*#^E9g-y8D(Zg(H z3`E+5=(4`fDDy1OtX^Zs=kzPA<(=E2pT1&5mq>@Yao01hK4 zOGf_P77mNSVMhMdZL7ecIDQpAa~Al}9`u7q!hZ0Wkv%#Bgu#y8TiBjO>f75hXwcF~PpoQuy0 z#PKdY4q@OUj$0dC)EV>DGzLgETDEa^VN6*gxJjdbwGsO~yHNJoZ7L75+cTb4pZ;lN zzEfke_(B#i=5B0s(`x$T*J10DqKpToaBb_+32gig=utx?I0L7O?VrGR z3)8mEzXP4J%1L@TE_d@>xShruv6f3yvZY%CD*HR4o*3sEP}vxwTZ6#zPvZKbTYW5h zU5PAYtKVBRplk^?`n^R1J8X1dypOG0y*ugF4(EW%iRjZr^huoh3G}2HdCbM`9zc)G zM+TIc@vLRT{mJN2EdS?QXYW&~jC*lA23R_@J*Y$39XiC=WpKUD>bGY66*|;{4ppE- zE$GmZ4jt-i)(|6=4&{S?*_p|3L2*sB`)a^^0Xn2{1lOQDE$Ge*j;P8ObZ6QdQI_s_ zh_|gJM%tqBL)S-@MG)iq5zmamSI?YN(Dp0-Kg+vqa4^l6mj2MkL7!wme{zh0)|$7| z(4XhAnOo-OS^ATK{%n~$r0x2k{5qrJNPp@`cS)Sfil;3n4^@V%`MIKLmj37& z=}#T@;eO^qx~@8>KMBO&!dITtAJt@9xf$=@ytHBi;n3FiIa^kk&h zFYaRFvt{H2-+`)|d zp$7xbafIiuGOTq+*LL8_U0y|d!R83UmGV?O=Po$HT^+dEnm^3KRU!Pw<_O|oMc}GE zOXIns8P631$Bu<-d*Ix0aBs$PEllv8Vf;m168rcB$8*(Eck|1-YR`22X^#7Ep^CmFMoM<36FajYFp zo7PI7ehc{mjf-su+dn1$f20kdHp+Hz8ik!T&?g6Zt<`uQtvQH2Qra2|@(JU8H0I_c zoL0{_fJHZXhz8pYw6~MQ2k<9~xe3<|FHZ1zNK4}!8{od9Wi#RF;s@90I)ycPm-K6$ zNL{UCOi}{>H_(1;*cRzKl53Ra&eC2;MNdt@*IdmZzjina-|4!&^$_Fb#8>I3C>3V_ zm%HgxHB;ydTBTTsxG>`s*KK}c}rDup^%m~=t1mY(>7+YwRXRRpg;L>-6aF;x|OD273c^oI< z>1uP=;&YN51DmVKQx^HDos);%&Fj#Q#rZ?pISlqU$GQ5N!ci*RBOUJH$6poH!#zkR zJ;*(T$&*h;_~i3vgT-91qj7vT7HtfwJ*Kpk9{R9B?K0_u{JgkxGq}v>-PFRqW%r^3 z4b<&&&P#)MOT}jkZwCJ_72cLa`kn-D4}drP@l#n~ZD9N^6WZyBfbjE%P% zaaa6(JwC-1g14?**#K)P9BR7>Z`F)#xAEp6Ka1hX+d_EqR(P^O{*#RU9e7hYg*Vwf zw*!p9lNGns|26pVs32!fb2z%;%eP>!r(?IXvES3M<5P*1Oko_}dc*&%$Rnf`RK@_yBMw8%;m#jK(a(FCD=? zZ|KfG*HHh`Wy?MjhYQ%}K^^vaBQ{m`S++S!wwdv?vdts0&9p0i{}SI^O1wtnJtt#_ zm*SIR$j@x)2YN9Id;GF=gR`4+Jim{jO|SqR`PV$wdqOwVAE0$PWG`sz?7p6M{7G~n z$%ty!c%;qfgT|o`B)%Yf{dS5`Cd?mkC6@~es&~Z@-F!EKcIiy8F`TyHPhAeP6~1u? z9QG{uX}}K&=h8WM5a)$PPm9;)g7dMYwS~1s6rTw2+OciQgK)03V#RG`S9^f((sSvXI-xVJ~W3_^{ln~w3mr&WsubYkCkLUjrWt$m5pT3oLx3sNFOB`Znc3`0(RDHwE$0pRo)-i|=oq=3Ev( zka0)-n`ae$=ainY@gWZE#}1wbGee#}hmW_*&b4{AJ)K^maZ(?1EF>-=JLqRT)Fva+ z!pGOhk8{q#Hdp?_4!`Tn|5}^+tlFLZJMhuIM0_*`J{pT1^I*^7uxs(yH@N7+9=D{)vvWKhWHJfRQ)Q|a51g3+u*V|>IQLC#drV0d|btN)z{MUr2s#jO<#6{ zY%M%B9yw0LZ)W0mY_3}F`gW^gyybjDFm)i|-YOUEZ5^ zJMX%|gkoy2GVU?q~ z3!Xzct7Q*K>jl2MAB-z@E52&KD}Um^CXQfD7|lb9p{$BwRWdJ6d{ym+#^1tS;HbN> zi>b7&+}J=L>0Ki}2-X{a6zQA8y~djj|6u)}!C^-D8v9ZPo@v0=AEC`@^H%AChqBgS z-)rH$4L36%i07mi;#M|yO@kYZbrSP(#8_+JD)zN*v$=AG`fTYNiri|hByRA)a{8Y% z21wkMb#Jk){aaU(&cugf%MN2t5}j`I@HphBGqGRBADX=^{`#fz>IDUD;;`~-huNR# zx74TF*-5OARKvbS*BS}Y;wKK`U~BLpZOE`Ls9zdGl$Xp}pF@bTIvG2J{?;f43Z7F~ zyGiy}ZNC)OZ&DegUy8B)ls$bi(90UQ#VV(x`90zcHLSO+|Mh#7%h7D>*C+Iw)Y3j( zgFOi4wQq;;+HFBz`;O7SV-2OBFZp(B5_^-0*XAyYD3g7!U2wOhUqcM<8O4VO)AuK? zl!9LoPZGaXTu$XL<80f==wh!w1B`^?)k*!SeV!T?6Rn*BbY`AA1Hk|ZV1#P&nzaGp_#BOJR zSIx=m4(GDlp=D5?%p{&q5Bh!C+U8$Q@F~tAOj~*l4o$eR>4`C?lJA#7eE$J(x{POE z0tfQ_2f(j5u<(5Xe19EWE4HQeu%1<$2m8N_Sk)iFa610CDCqx%wIg6HzJmui%97y$ zwXT$M?}FU6RN@D+-O|~jSi^jQx{P2B^v(F`cGgX(2A92!9?_?;`DM&EI}0YOv8OgC zFu>%;V6u?m#jDZdBea>ElrLp&0F$}Glcz`hZ0N>O9 z`h64kY)saI(IfB~#lu56!GsV@W`Rj@f=n=JbAnnpM7uG-!sG*soMpS=9+?Yf1u)6j zI17`E+Yo<}-lwyMtzr_wq|JM6uGm(vW56E9bsn0(lA;!t}(4Ld!% z&}pTozJ(~+ML6diY=i9NSNT6GgL{99@L9elH|P`a1E=`r0epk_q;zC9{@?&Md>QH7 zL439W{g5wcA$B2~Eq!=~`7qBAw>W^mFy#+IVi$G{uCBum1Y&XW1GV7aZX@a)13izy z7fgIOrcB?H-=Uw(q>t&Tx=oqBZ-DV@IjYfN*>KHQRC}-m-OyfZj1{!ngW`7LI9f;E z%^b5F+SseHkA~;^SB}Ak7rGLfbGNNnnH03)PV`Z0V6ZpXQx)jlQ0!2}qA1IT zhZBF54fk*!Lz}Py8~$@_c#A6y8$P#fIDANY)q-Aq3mdLBlXT@Ux{~PhM4v=gb}~1i z0^L0ZXKBH%NMD3C@ipydm4gkG4ZkJcX|9HEIpzl1ipA)QV{S5S#etT-NZ0-yeObZ0 zv|Hj6(3b>DU&aLg*YBISXX{HfV-+;!^sE@8;xZ9U141;l-$85*8pTlh*b2~P@iaI(W zULLgRRUtNA*sTn3nT|ZWEpFZiZXW2<=!#+4cwtz5pxd!2OUbX|Fq3FU9vC^%cMShO z3cN~})+0}idzOELXIS&L(!gdF^{NO^%kOZsJNj65c}hI% z9^xN!iS>&sIKWx%oH1?6kL>bUaI*xQRniumg5IZtPub;sSKxpBo(>PPadwumTg~`F zt?iyydSNb}U#vdu{@Q2I;^Jo+2a#ihSvbpIlvq~Dn9V+}>Rt&BBYuIOT_IShJKs@auxcVQFS1q<@@)&F%vQ1}<#QM>! zO&N?UtDZGi(@G1Qy{^PXm6a%cqPR{0=2Zm4bSw4l|Ue~yG?3&^n>%m*gNUwh<{eqkLzmb@- z<`pbO2a1+2-YA}wLcW_Q*Ch6=X`b%&=h5z%^ax`#D5J(`)Pf0dq5brKg^bg%^hq*) zVtMAqDWq{=3hkiM%#CV)#Pa`A$^CTuIE(L-KI<%1j(#Mz>=5Z{&TtyM{}A76msmvRe-&=FFzQqHE$Q1T{Txi3P~$h& zWy-enZT$pJt-9NV-W)=X+leJgPcz{?RrJw(A|6QJU=G}^8QswQ%l|~rZg6@k^}Tf| zv1NR(vLETLd+~>?yWlBPym~X7S2|sTPQM4Q%894H2cPgg!x5c9Kl@4KwAOVS{EE5x z*wMA03Ob_H7f6TQL_1C`TsxwgHp9_xe+2!azGoPW zbC6#UmcgCIJj|y4EIfk|>C|h~lYG8#oC>$;jN|^+9O>7PWoSHmXB`}g@7eL}Rn$d( z5YxrzyRe*A$T+2LupEeU2e|!(@GKlZPCBXJcOv+;V%?;fPoMC;aP=1O`x1TF`H%E3 zYvCIe@cJ0;i{Hm`PrtQ9_OFZ+e$hk4_10~h?<4kH>4wt_zp2#!Vep#(rZ<4!!{}iG zV@hhk?{nZ+$aYR?HTQc9L z^+?>Y>T8TQ*DV3NH%6Da4YxUSL7s)(YOwnd*sa3H><7EA8AkMMw;P6qUG)Xc#1~X^Z&uJ3 zxPsWF?PvJ{1MC*kcO8mdVYZr>_weLW;}-cM3pPP`Td=YnQ=L+F;4Oi+rf7M_GCM40QrM!z`!nK+0`ff?v?9& z@(CXN!7ccL)JGD_)}c4DtErB}%H85d#Gu^4`)SBj{b%Cs;}(IrzVPNqey8sL4_xX9 zK4{Lgu~z#dU^C+^o4NfOcbV;54#pV%R##kQOFaGsA0zweN7oOdx3-^|h@V-HF6NV$ z_56N4edh~}imQi{dzi@=M ztoY+0c!qQ-l{U2cGZ!-UDV@0`j3p`8H(S)Vq&OAW0V}^xp)Fm@Sc2`0@p+l@ZKuv$ z>;ts!x9HQHn6kI<1=R~4u=MF5`jol|Ul8PM;m)||o6x6v;@k(}PtqrG>Rs?$akeD- z{)*72G4Y1k8yq<1e&6D2fp)=H+tQJte8GNvfyOt<7u>=a(J{0Q3i)5YK>Bn)b$=Mn zSZnK32!{{!k@lgCUG+(80K~!R29}4m9oB}7DS4hTC2Bjo&N>pdFE~paG{=bO(5J_P ze!#|x`j4a=hkonzVLPqXaon}3dJ<|5~kl5rY)`%!1^SxZDAVB ze@Au{ObgF{0?$ogYtFP07sfNXaSSZWzZ}Pw+x}&u)pouu*cKOl2Uz~)_!Qcs^q1yf zOEu@Wr8?kW7*A!5)!IZ|SXd77N;|%$B>cDhhoRo0gJAi9!|911d*?U$u3_-wsk&@(0Tty-fZ7fw&bIuHgSF#6y zj61|1EHPZsL&1L_|1lly4Yf7IXWAX?>ji#?)86_;bE>Kwk)8a3 z`bUq0#a3*tE2Mv*D}FVHTKKj5N$crrO?QTM;8z?a3hW6dHQ4oKXSG-G@9+-}YusX}H26+5oSJ@NfWiorF$I^DlXI#5t$`#X$6|32k&~vu__t~*w)*3ho#5**n_Cv$) ze-LK)-|-@^z?{M(EzDbcg|t3^-)O$*Oy`(ew+d0+nZ4N8CQTGnR>y-Xx_HER&&2YV`v|Uw>os?r2 zzi)9dw^BZy{7fXjl8N+l66G%ET9{Pc-K-xJMcJZ}iEM5RX#hI^NcmCtk?<4j-?!#k zd&u7^BfPnie6jd{`2o)F^SrLLN0EH?vFRE3>#f$j>d^XWW**1I>Sr2xQ|y#|)2uq# z^>xG7wLV@Z?=RC9d4)V5qJCeYZd4yVsE>qTeT@Fak5|V3;z_GMRDQ)NH5Xm|ID5W9 zef(3fKHP?1_3>`3;lGDE*~J=Nhd5Lx%yqHmWy`KFr_4#nd1OeP>>-cGl+G=NuZnb3 zFPbx??-WrldcKl+so>E5W4Xcl7SIl?KJu;l2s3@;HJ~5i z<`Z0hR{dxR*2xa?B%NsHEd98YZkV4_Ho?2T%@|Ny&Cv)w+qj=HC zPpJnQ;~8p;v?jUj8*KX{`Cb#WJI$0!_Q;mMbV9%Pz)uMKvJiG^ZIA5_ zb_Qi!iEJytRt^4ZFzW(taT^t3y#HnAIo!(zX`YqR-W-&dIe%6k zA^UtG-yl5L^`LLY1bqo@#a_0L_$%KJ_=v#%EOvcdj&Ber{{UQdtp^+HwjGk34gUZd)P`NADOpoOxvv%ZPy;a$q5y4VBG z4eH`ue6zT;bxr={r=<@ED67(6O}-G9@3WWu({5qbVGQPU6HMw%n_;^(uE_GS-N1`pJm8@1c#oJF6`#_&*BHf>xJhF z>47Jx2U=G}^0)P%Ge-*G|I_j#>A+_4FCDgds%(+4e-PWh0pDd~+}447bRZP}(g)Q| z0=6nt2PAKs2X&_d(`XY2$CCG6{DiKN!;IdE_zZO54?!LzTN#Nz);*y~p zYNq|AxONPE4Rh#gm`lt|{gtuA)YV>FOB*MXc_JmjeuVE6?86COAL9sE zW8drBFv#$|+Rx!rELv$#XAM!MZLoHzViX_n-0IVj*Xmu(3qDLdLh0%M_h}pNwc*|~4)$MN!x#qI#ePS>S&3_6jWeqx1Lm~7lHv8e%ClGB9WGH#Q%yWt_=-n-pTi;QP$4;Mq3cr3Ur>*-E_a)z#zGC>^ zBfTv_*_t7;6$Ubq>)I}|EeOiCm~Uh-2E$HYGJXOXPiE~_$y_lH#n&Wb$+z-TVsqN( z!6vbrm&n70F%An)%gNVg#ilT%v^G(`mM*y5^9A6t}<=NhOe&t8 zXJQ|PwNTtu&|j|SSJSWbe|UCHP+#-OOBC-#)3*|XjhD_!U&$|H6|H^9cjw6dSb2%E z(vvnab(A!1U6pKXo46i4b(K$tJq(piP*=@v?4e{MTgd+HrfmtXQ`?10ZuLN_Y6^v6I z#XN=|)5g1xxy?z;VW?R1_WZdcZ7}v1N)607P zey%m&dH0~=b!zJ#W(^O;2A9E8Yr3=@mhxTY(;mk-nQ^u2m1oBDvVQ52G}e1l+jS)0 zA%1t{UeZou4n}2((aM}*tL>`2sMIz+HofyWEn5zffxdCMa!}a|DCd)WSL1e7Zd(p2 zzb%VhTb?{$4ljhrf$L8vhjZhdCzD_EVsaquPb-HjcnIjle^)%5xb01A{=u1rPWD=7 zo)EF}X5v2Dw=-Si@3}t8m?t;;b#CFhH+t|Z4(+L_Jp$slT>Z?0%$Z=X18c2&>D-xx znWU*PO53b4^x7ZKLA+dX6C*h{aSgUeeGKb44v>zxr|h}nPxAlEnBShxUbCx-8748e zZWirl&5iHJGov2~FS`c)JIowe?IkxoHs0LL^#iQ&F)uETxf==1dbW1#H%=`fzuHe> z1Nv!1cq>*|=fFDThgg>}0~?`r$gwHbx?>+OFKWY_`&w7bxu#X~`eO# zTuz=1=EQ{NdBCU4^R|#YJL3X-h32uZg!1l;JC}FOSG4n9O&msfe{Rlw?CCT!aq*mM ztToe>f0gl1iDK9DSG&~nPc~n=o>Mt(w%ZDSBB%=!sN3((6&T$xDIp~UJ+%fyx7pGTf z{WtblIkE!--|QiD_?myzU)c#wB2D?>#Lw!*7NFWU(InQgh`|25%iPGD+0*SN_K)d_ z{dt;w>z`pQh*jDjVa%;;?`g13Ry1p8c-T8DrhL|q3#~nVrOUO;(kmWhu4BCsXKD@l z6w0G#CA(zSq~`d`mk_Q=%UhHaU9=CCK}NY3T-^Gc>u9pUCOY)1uhc$vNo$>M&F z2e`gF%w@jAwfYIgvkF+lQkY-QI8)7SJ4U&VBD3S{(RPBeoFrfKS*v$}vj=|1T(9BU!QM>PGhtju2CpNyjtM@m>-gaFT&LL2 zyScWXkLKEbUf1^XTxZzN(^r~dKOe`n{k*R2=ebrL;n#N88y<5N{%r@gU1j_pW&A#6 z`~hWTUG8zikk1U(R{II}f5q{y@HELqGTP#DUwa`r6x;Hlyv4SBDAyYMKIK|t-zWcD z?EB<@i+x}D%p{*2uXFs7qaJQ=_16~}r5WsP82bItZ-#y+H0?*h)aEGnwW{l8tKE>f zCmO7*3kx@6z=~xjz{ksAqk%FuQr7#pPQY*XVEv>BeARH~&Zf|3r}}iT?n(-M*1FyT z7E+=D*D2I_iYIXG;JU^LUWaoX7P#JmY--$r>l9>D6B~Sn>$GC#NW%A78{SlZRjk&i z_Jo@wJT6n?)h5%|t-L9ZCUugT;4(|ld56<|ZFhOgC7&5~p2QcU06XqUQ%<~zPP5UYqQT~9P%0gdRXErB;H38lV?BR-D zMoG@-qHJI@`PO@q8)=-$2+Cxc5p5PLuR*!UE?WNwWn=%JN&UmAc9-ybI`UbD{koGm z>3AzoUDH-wWg?@J1fDq+IZpPaJMJ(A`(|@Q@kQBj+3+HZOJGMs_~H*Ji_I68 z{O-~`!WyR`E+NhZp7VL%_D5R#=s~C2fxViOvH7xX;)m+f)BKAAtclQqEq!kJIeYuu z^1yxt?5$~WK0T9;tt~BtGF5eo14D52R4wkzs zhYBtG_{%JN+GUnqIQwj6cU-XS=^i!t$?RxPW@= z$|W>5M833${ck@Dm$(luAPE&SaB;UE0GEbXH ze1LQga|F4BwNG&cxJ_^}$CT%)RDSv}kFX{}HhWAL=wrJ->=NuLF0wmfVZE4rku|V_;^tBf<>ujcAK+?0crLq6EINmgCN! zvTi)*4*mhJsmDIpdo8SjPpqad^3I2Ql(|`1~?MUoT5$ju4qS{^4YnegT=aZ*#B) znWNy;Jx+Xr3*Qh1mkVe8u?X7b4y#>0lJeU&w-Udsc6sWcHQ7;vR%ag%w%yi%2er>{ zn_(P2$y$mHPGV%sGyLr?gY}_%aK0x93x++g{kz z6JYrb(wxt6JLz8U3M)-@X?={Ftu&MW$(LQjdVzyU%b~p`g6Rg*>T9hXYQ+agPdGWV z@FvPOfijM#tXcTKOxDB6C_o0*dN^J8GM_@ZqljHz&R*s%*e3OH$6y0G?`0lmR^!_Z zZ*HRDOlqsfw_DeXliF5;^}v1O%xZkBbq&8bgkFR7j=jwf;4dz;x4DDyHy!EYLk(vz z9k_#=bhO9WX2#!Gd83XrenIh$C$NXwvH|UGy!|lD!HFr&X!hYV+ zw~(o|-+7ky=5!&?FytDJd?S!^Bp7o0W+xlF(^#WH`e^nsW^BP{uAlGwb@}|#AS>!0FxC&K%R3 z@Wve02$2tO;klU{%7Z~Y?KBKux+}7|h->AijdCeZ4{=sp=;M0>`PH-f{p|21d!quCDMS?(?5&{#_SFFX1Y`JQtk^4bmLVVT40*ZSfq$642e z^+)a}zFp2U2l{6CS|Zs`%57ZhL8fuY)RHH1fcTcG?v~+a#D{j1eh%yRRHGO8ZRYz1 z>LreP(U?>DLp@ta9aZp5!p;%qt9(~$@OhVcTOCf;A>6X+*H1XS8><}bdx8$+5N`mR zGq&-Z(w5w|@tq`RoV9PczC%pTzn{Er4t~FgXLP>@-`5&(Jnz#p|5wj&|35kJ=DeAE z(iL56FSa!J^g7AG5!bvL%)ZCf&B^Sz=&NHc_&!RK@ZkY-te}}r#Er02z!ydv}&r+x;i_B!`PdWZM2S=$2#6e zRMr%J)BQ~SzIQOsjG1R7rz9FLr6lSZ2OJ_i%=_1!#9LD4SH<#d+~8!#=#PedCL|uQJu37b?>z zmHz_Y({py2s$Ihqt+CO>kHj0-qZ5*IKIh~x?E2eD3W^3cR>#*&qH zjt`qhTxWO3|3ka{AJ+N*@w^}U{P-^KkMHvS_%82L=H20Lu!{Xvy-Eh%VM*n*a6tLK zhOS*owi-tq9@M7-bn8-S|E+(w@=k1A7%m86_;Six&6)(lS+y(4I;$;_1jeSIcfy*u zh48Dhe(Q?uP)uu%{e^HnJ7eie>~E>?t+=MRv|^9i^H1MTZWJ^YhojSb6JZxT^@#is%gbj}+ zuP$~Bz18y>K|K^6?D}rece~mMp8q`e^sQX{jp|S9ly{d_XnrIc+4y>Vz8l+-i@!+a ztahFe7CSF<(BOHQ*QF$8aGio}UZwPrWfkWX&JM7fP8wdyl7P-6M8tCRn>QAH8q4)q zu4mx~_Hm5gF(&)Y9j|6j*fBQyF0Nl#x@zi7qu9TigS4#mARWp#Ix!1d|NMM!?D%Bo zXxEOxaYJ?tjeCCns#rZI+X>D_sJ_((lnt@-Av-^7{#)I=`&jH3bzgaqomXCE=P#6Q z*St#BEy%U2ti9X|$T>ODD>-vqww#f9_i`Q^l=FuFmU6BDL$W(+>r0jyoNc|U2II+L zTfRP%^)ymA=W1OIp2-DQ79RoE$tB*lnI%S>?ym{KoAg(hv(qXd-DGf@!W!^1=Nqwk zMwt0&|JOHaf;!uxymd*>&YO*S+phJ(wtY2d-=+omSq6Dmc^#bXJj%WeM*r^Q@a`Cz zov~wBw(QzW9bD(`c)_x1HP|$#Wzz;*Hf@O4F)wBBkhm1tH(?#$KN!8tCUG(?X4}Cuu`<@zXXVnqUO=ADk?v9~`_s!2_E5f{QeimCHKA`+%kUwK{N@B7* z*=pyD0~Wxy#jVBjWY>y=K3l#!)Mp!E{dlI|yus_dQ}5)6_-OFF$d881yBdEo9KW6L z(a?E4J{mS}%#P=>$L@GO`|5S8rrytC*=2mWe71*r84RDu;|Tj`@aUmChQx(`G-UL! z9nZxLg-_qbq37lMZGLec$01)APGd1?l*In6f4A}&(k{K!_qvuzecPX$8voDF#s7~R zkGwJ^{msKa{@pX}?Sp>8@l%d}<@j^-R})*WdM4*DoKHo6CGq-^HzaBwHq}{0torei z)u(K=kGzcQpdYc9+DfZEf%+8hQeE10cwU>R3s)ci(U^I6e>8Sp$=;u5PvCqv|F7M9 zb@p3UyC@JRQTtZ)SV29`q7Gdv@LwyQ%gMWLSmIEw!?`X=9hSHREae63Qt#Mx*@=UZ zw@ar{LObbF-}&O6v9KDf*X34Q>eAoVJ6+2d+D=veGe1fhoe9sJ73AcK6=sC+c*#XN z7Nm? zi=*rRDSC(ca$rM8(Po)R-q&!frY>_i^t*>ps&NMJ)PI}VFnY{v|Cvu&EY-V$2>;(I*8`mzSY=z`M zkGa&goNuJe6T^(J%uX;$A6I>q7LIbj=^k|tNlXF%C0rX^8>vGR9mVORNZWc>*ci>b z_Io3Ee+utsE-xIFi5<>!rzB=xHzd)yCFwu1h@W=cgDW~6#+yn21Vp7HYt{6%teQ9=@ zElTs@tsT!@&b6h($Vql;Ib{faN59$Msdah%?c1E}8D1SfSe7(bXr8}?gaVcGw}aedPD^REBeki>D3VcGNfzhuzRc_m=P zva89i(V5taSYlnP9Q1o2qfw7nO`Xn>TwLt;a=fx@RQ91=qq9HcNcdn>_A8ICno574 zWv61*cIKU-3#wu#a6FbAHhKc_th;tx9k+JDs#razx{}@!+dPuOn1T#2CqMkREQexc zfjn*PlBdP7U(|h-O|i1hGC2QxU1gBLxWR_uUT~5DPBOts7C0FXP9}hpo50D<;AA2= znFLNOToD5f!~{Pp&RVoD3OK9aESyRH7s8or{W+Y4GoJ6Be{bTb<*;!w6J4DGMue3N zFp|PCXXmKw$9KM*J$L8m>?gRkfH6v8>H&efSb# zK8zTuxZl3{i>(-yjkC$%Og<%mv+m1LKtI~K@4j=<@BUXkXUP%0IbTi}&+lc)>Ez!Z zi?VgX;!x@;{tYXaN^>>qrJBtvY~9PB+SaKpaL$%=wsl?YocP|w?40m5;WF%; z#zCF?W@oz*`pv)RGqO*)w2x%hI?Lq3`O`a>lF63;0-IJ4v}vO*VbjFn&f)FyIh^d6 z&Eu}au7z^AzbAkDtoBSi^m5B^#h!I;BY1-LY-bmH*0o<%yzLyeuGq3Gw(PUDNehB_ zlOMfUn^d-J&FA1|wmnn3)V66$XqWz1akVY1AvpeXa*eF zo4a*q>oPj9_i1h2l{V^yY-889_T@hJkJ?6U-{xW-g`2(HqZk7m%~;?V#stSQHrT^B z_&DZLxJr)`M?1lI>Na9&`-T|)+9#bpdme@6SDd8pZDS1k(zY-DW~z(*qj!FfeZdX? zC+v~ijt$fpl-A+Qe~9mEy3eGS_T=}8p?h_%$z)zcU)D$HW9`jZ$G*TriG^xDL@Q|= zC9PHLn|b0khp(EpK@IaD*2P^sty*S>6|!p(?!!#8S9a` z>g?n-jBVFGr;Nkbcu%ck5zX;!_EMgD=;1xh$i9U=J=cFVrmUrJV1Mo(pDNt*uO}7} zbL+>v^8V~6&AzEg8bpZXy zuYVP3Yb<{!Y}mSr9%%kj9eS@iSKq~%h1uARY1ogc*pVsNlgZ2}n8aMAiOf}US#y=z zU18XpDC|u%zsK-gEYEv*4_mV^-gk!QY-eqVKGjn*wx-ROAlPoe zo@%e*#`O`tSg@h_=?#%tKIvl`Wo^U`G>!{iW3L*(P?eK8e0`bom4Pjh&R|Ot*ylCX z=u?@*yOPcZRw+{=$lQEi+&L`BC0I<;KO^S@ZDwT-*WV?g*~1t*o(LZOdfpl zCZE=*(Eei48E@U9eX1T`{|A~ucp86%QGBlM`svmCz-pF!#qvdw+Q4@!aC6!cm5*zxQo7< zU(U!(d!tuY+FzKj`OkToX*b-Hl{QNEzvWDOqqi%~!@2^$%XOyx<}O!S3+oO1@>@pQ z?{XbnN11Q@P1*O+r}PPRa~0QmM!)Gfz2B1V^?%vzF~7&$o8^XgV_q-s#*@sITFaTfw~a%3 z85`el8XNa>E{HSIe%Qy6mO~zjv5!1&?cwV~=Q`;bHuyc}lQyxp^sV$sHet_ie4A|wqmR)W@AKZ>UmA$E}X)WwmnxE<0zGy(%q@XWOD+X8ilqXo9 zXd+n4W6$GxAM8O_ z?&AmlZv^d8gEMH0(7{j8Q`sVm`=F!l`2H8+KK;ArKBRG!v^sI0Kgs@FJgxJ%&j;Za z_lfx@W5%_vUX@Mw--+0tb#R}Fcl9ZAJ9B2qk@LrXv zQEoF0|DERPRhgDzl-`jNRVJ?VPw2yW^$a$LITEo|s_(>em_-g3!fYKrI+QC}cCS9m z_c}7Y15SAin^)!R-`vDp%%yOq25g@jJ7@l4>B{R}eJa!8Okers(v=^hGga8^k=UJ1 z`PvCrs><}ex2Uhhm74z)X4zCbf5(-#+l1>r=iyrObN7+I)8O(HW!oRbd7CTI(yLD2 zL2oCx2AQYW58lH0CDIfZv~(0%9Y@!N_h$6uELO{7>g*+T0 zPtrs28;dtQVx-ANYy3h5dUgoAT+LqWvdvBKqy(d%d4%7#Ma5e7IVWhJ!?0NsBaGL= zvAyC+HE@#%Bi!7~__uf6-i=#gy>I@Kb#E46(`H3@-&`1tjf?ZXd6av1V%KIy7;oMa zZEVbqGv2(_dd}cEgXavMGu|AE4K%!rg9|g>yxwhW^y)fR_oK`o@mn(g7x7yezs=-Y zysC)bQ}83=DQ_i+OSzl#h=mW~otyZ^A&wV04DuNX29@V{&Yw^=U9W`$M!Lhz{gkbm za@13feaL5jl(EslH)=e_n{V^&&3yOWhaG9!hebBoaYE|}c{i>*9^g`viF{Hdyh-UC zC7l}N;dg|YgI(l3(s=XBF~-J}c*;gzbk8a)_h<3{t-$EKbI=4KLp0Tjh#sK8t-gfuAjh>sfpZ zJ0(6A4+m5FZpWZboNPTYYjLth@-c#ZG>6BV`RKaM#}4w18f?jPeD}Wic+*W@q~p6B z1JHqJu-=1qOhCsc!|!e}2AE^vI7RR;7y3Ptaaa@FeaxG{^vz(`D_!P1R2=MKBW*Z3 zCtZ{t3b)e3r_l$k6QecJ)Sk$Lk1?LLRPCnyw3}{57mkAC>R#SAkM~0#24N?_@Ja5~ za?j7bI_?d(o-=sP;5mcmj5q78=M0`Rc+TKCgXhG*0^hIZ`zM1oqMCdSM~99Fbtsqo z>38W4`PuE~xwY7XcQ|Tzwh;V%2^*2gce8lrZNBv)=P#3f3irg>_HpkgoK@fOwQ*V_ zv5xf`#O=iM^qZd3drCvR?ijr8hse)M*=jkj;ru7%i84IrG&UA<{vl_{PquJYbik&~ z#P%hTr($gHOy1j!yrt*TZMP$=w3W7~+W{uFWcnY+w>E)malJpeqRbQc0RMO7kGhL1 z477Zu&5`P{|MD9R=$-HdzpLDY56o|Zm(!FhIbd^x(uGzU0Q!{p4TZ4!8-p(;CY?$rI^^?i05_s&U^p;cwQLx?N02QxZY4* zW8ZAPH$M2AINzPY-^BZpvDe~!gR##x-z(;xxSn|467Hw`jd)&lkmJcNZ|0kyhvzv- zV{nk`4c2uC-xCM4xn2g}xx(}8IQ|u$XUE?D5qO^3Jt^p$xLy%B{i1oE?8Ckw*OPtN z7sB^sAM~3zpX`Hvvw5F5oj6}Dd9(Rm4fnd^d38MV_vU$)?gzP^bboP>@7Y{W?cRSV zo@d4I{sue`pZAZ=^De$7+|gh^=1KY$juX$d`!HMK{#u9f26*`$#0FQv*=N#@-55`t z4Bi}Qe~X9rrmYo5y!)J8JI=NGx+)n@zb?k>?-xn`-c_tCOB|{{?YNpJ3P)LKyqf7x zcBds~xl1ko!r?OdB`~!{^#C=|6 z{DG74pF=oqVm{W7`o*%o5o?;WE?6W-7)LnAhsX^i}{QkBzZ!RUVg!}uiGDejU7g_5j zro0(L>`ujp$i)E9Gy^i7r}o<*$6BKIvz!)qRL( z59LG^tzGcT@yVYIDtd{vyjsKCcWh2p$(Dh>8-`~W-V?hl@v?s&p7pau|DN>gEzkKj zQ}<@-E7{V|x0d#!*dr-Ar*l!axMjWxQJsq>MD;9+m|o($<&(Ze*GEP8&Qt$rM{_cg zem=zWWa?T(TZ63U7vA=uuPgPGZModH+uNyV6wlSCRaSqZ`2XJ22Yox4zrLxtS=Wxr zUa4`ZPQ817!NcPh(YL`jjG*8Be9?;*k1tdI7CbTjIjfG5l-mOgc2Q5i;Uly9EqZwT zyVR%hea$)+j9)+<=J)&`ICfOlz(otFXCU<~8n3qPx^`2K`g6qTScpukcN}V*@I-8~ z{|NF~dvzQ^#s=Y+VETp&_cp%TH@f_)W59b<#kO(Ry6`uC+|QiD_o?gPf(7FjZdpa! z1APN&JL0wy-@%dK9nS@i-3t~rjiae;a7b-uBSW8vP4Tm*aaZBEa8UT3wkLYXMD&0< z_Bap@eTI)veG)x>PX9!k3r3b-^e3M;t8?t7(BAhw*T)Wm)-Ls28avptcIc&^^SJB( zpM2S7nG(ueO4%}=4dPwN&_SML?q$%B;$7ghxqnv_eVG)?IqdL^7@^@d+$Gq7!|#E^!r$HAs3O5&tv8x| zllz`TuR0TAI)3=s)L#H!(f;V_Z@$YI;@oLmVz!O0RluH0+IK#JHEuV5+~?xlEqxN5 zv|YgZ*@6WRBt5X@>BKqgM>M$)>y*+s%L3SFeXe)GtfT_%Q_H&?%4RQ^o#fo|WTN(F zb5eG)wcqRE@h9&e-@(s2`1z^%IdTa8*E&D%7sAh<`zt2#U+bt7G3F1%1QqnN!hgver4*Y z(#!i$=XGPVSN1h^^MSs>!$)KdMi+l|%V55J#b?)d3*Srkmkz#|`xNwl>EeA&eH;4& z{d}ZZM?c@gRv*NpWfw?SHp`A0WtMr_nk(tfk_qaU^kt3x>+0tVnG?wm&5QJZTUS?G zwtgNCtERTk6+ef6oBFxU&s);ZDP!vAHb4Ix(a#r1_lH+U3eLcLBYOHX%=<=k_4m-# zcX=a=p5=KNI(y_NL4E#A`T2+~eG+rFJd-#A9oyE^C!?o7wWU{LhkoASZ#i53mb1&x zpFItJ9^_kR#?P@CPU7dQ&XAu|R}(*f?lk!MsxW^3>^bD;tJ>h_w0*Amx$SRhNk6yw zx$STH8_~~CpP!fc&OSeX>a6ke&u{z#Ir)x&{N)v_^_YsET<7}ShCfUDCdJ_wXPx9i z_8eMR#J)uG1#*^8z0O83^ATBtqce(hURXV6Chj=j8|drt1`b`99k^^{c3@67Bk*8P zM_};;XW+nW)^6mw0wwJ0pUl}esrc4@ioZzb5#`{AsxXY#YVbu}>voeT?Od*BE52-e zwI09Mjt9L*BOme}ozr>t^E%IHn7(C?E}gx?UgAU4mpFV@Iv-U2G;c)t3$r3-zrgQJ z$~m1+SvPxjRFcc8_t$Wq&OFYd34O1)UGU83x1Tr9F4$YV$bG-NbS`J2%HLPV{7hqh zVws=m%+E#4kLD(UGt)FTNyG^>KVxX`QRZhDeeyFuyYQjaGe=na&*b*BOoUnSKW62_NL_ zdzW3M^U!!^o;{z(d`+T$=JBdoye9{E$Th5siE#was?q#3x^z~PlXV;})^kL#u7f?} z#&g!!ebIqoA#s?hXx2$)p752P6pLXl*K_UHm1H#Z;yf@XKL1|mN{=T;`<>`YF3tz* z%`=}7J$8Rr<4qUudh`4`=Da)eV#j6V7nc82ahZDJE6G>l|K)cu7Sqpj2eFuE`TwwD zGei5aXJMKdhk1rQ3pvM@_)7}$mjpBZa)7*b`DF8Z8xx+j;xE%Z!T8JmuHH94cO@-9 zLOl8~K63e|58|gyr!QqgAM_2na-r`})ZHmI-E;kwgYiMweHiGu%ZdjbH2W~n>|Y7L z6?eJiJV%z|E*E}svak4E_Qii9)>vrAT8{K0rVY+q9A~5)WF4jAE^Ekf{%fRn(M?9@ z3SSa2E|*ci;iV-@CU#;i7crR&@K=9Kyz4(%^SR&Y@pogLW@2ntKl64;v^96h+%=ym zJln{cvU2AI{`2sue}95E>qqRvx;A5o=a=}%llu?x{2P0-t{pnW^CRwECub7})4R(V z%Mi~Ku|x5-f8X;e_mZKyU+%f=$Jtpwj~(K4-;r}(AMuK zst@<@yKKw%6Ys`Wyq>G`k8)}AmBb-=xmmxWKWj5C_xy#iCilPG?Ay!CgW?K{xObgA zf_Cvmnd8@8xxs9AFfkCdJE~c`YV#Gd&B2N4 zgW6Sli@2*F_qiMkf1^0i!Nv(2Vm*c*IXc7aF4@sk8qH&PX1ZTYk}^cEp$#Suv(E zE5=j;F35&0vvi&kY}jOUunn}SnAhg78oO3UdmHA)2jfg`E6!9$&VLMa8%vyNEId;A z@#>e0i+IiLiY-59@v}012YJbbo@e1_H}ijlcnfi6dtcSbT=V zdGEswVPsyjcrI5N~r21Xl9J_A}#X zd)$L)Px0?T{O{|D`d)U{d9i&x1(ZpqOb+Aj$Gw0ueR=*i{aK0J(K`G_BcV=nF%?<% zAuy^2Mn;5WPjtmF=1KCj78yL2`1~W}sJdA@rM{_O5$Bg*H1d1p7cFqBt%0=9m}$Sv zt+up3t=jJ6k^Z8+7$vLiISidAFLO4DIZMk(^E_i1S%#Utj*E-s?S)nCE0cm*;a4KwvwSGzO_Ri@_ou24Yp+4AY_|z z9`@jyInQHcDZe3wcY91c56V}?Q=_{$vPu*WZ6OZ2GjjG-6QI9A=XLfEdVupPp^LSw zZ+R`&NIA?JqHUZlH4EAHT9kLuVr1YiyRiGP)2RQSmlrQt8cU7=vQ6u_m1oecF=sLO zd5L(E<_R8U)9@u%lB@L}Mi;+$(f4l*@}fPKJo_1ZeUSJM_aEPyowWr1S(b4nL>lghhsSN%!nBRBvTQcvmxGOyi`VaK{4&Kz6 z-=*Bw!jDR}3{Jcrf9Lma%*k52<#(n$v-=RN_cBk(mLDYUvE=HF1;exc13svCWf@nP zay8hOp`O=xu6DmqK7-`hjqt!#w7GW6<(52qB1y8W1eqpzHsQwHtRWL~Y4?@H#qe0k ziOuG?2YX(jt`hpU2f4C{-!~MD$oeb%N;xfRSMp5q?Njcbn|utv-NX@D_t5U*MT?R) zn(Yp@8duo9WJ_>;!h$@^ItD$8`{uK376awYC+ha9`B9db-M zDfDXQO8j5sSXEr}TnWiAJ6EE|S(0C4kqOh2PDhuLda zG`H92LzBJM)Q=YIwUhdBDtqnKFnewA$@bcnA@&-2Xsh-bww!6NQKoHs?d33g?b4I& zHM3pIUPIQlYOf)4O?!%pPV`OM zYrr`?=6I65X8EYgteE59rM<=&O?wUBQ>*qGzIoGLqs%$C*M?g58fWNq*lXz?`6WB- zwNuz@@S~ILHGFiYy>?|A_8R=DefAo9cayy~q`AFj@k7&Iv*cNe_L|uj%U+}1w(Yf7 z!|b)e&FwXO%1!nfGNV;{?aB~)4Vl)qy%w~YPS|VhX}1M??WA^3Wv@w&%3cfFin4FM zQG3mTp=@yBQj7MQ2|siQOQxOLUJJ^!wX*ZiwY}DY96QNgYeA0v-Pvm`$gz{`wHD;q z*S6PMkXL8RUilK5 zn3|KgSuf&dPU2;w@niNPX4aednu}*ij_9$ydG6ww4H-|sBnV{d$qy~z7(z6K*AYW7uo*tc^n zw)@@qN{XHFbw)ShxA+w+Qj7`R=xcYb)_jUzu!q+YuDkFBO*n;5@!nJT6mL8WKE=C` zQ4`wfQ~X&we2Np==~KK1pW=_&=~KMB9X`eD+v!uh2cP1P+UZlgyPyRb73Ryle&VTQ z)X6@@D_B?DjX2pb*ZHPTaSu4~Yc9pST`=lVEPaDxI6o`-;ZcbG&id%dPsPzft-Yw;vZY-|AMJ&OO0f#21+GL}zwIQ`S% zEAd?OQ^?JLA1Q8P`#ydQ=p8yDeKa;-*k%qL*jFF?AJNW5m#4<%o~Xut7RX_ z=d7680Agxdx4V^bbOkqmo$U7i#Bf*iCr4u#IT~YL`i~`J$;J;*!dl$4?8DCfn_DVah}$-BtD3qLto_U$C6 z#Lg#q3%Qpjxks*uvmpT>E!UbSNTW$4kN z`5|Ri9P2NW+RhJo?I)*~A0nGU`5}KAmqYs}@%mq~5shrIHWmh(eo!-VIDs2^h-j)gZ_`5{ll_Vm}X51jTp zeS$cYazYeesU_#bik~&bU-z*m<$m_hD@FDRXC}0PGx9HP{g<}$K_;}44-$mciF^?C zUAZ7*xX3v$W#1FAT};`hwZ@{kWyH(O<@Gn6Z$ zcw<{KusyjV$`c`f)Rci!*&|^+GEjLUYmkHL$8>T;zWMx+(a5?{$h?urz7fd4T;$^L z05Z|c59zSw{*G-q+uvyAhnTjU=~D{J4{5>Qn2cYkrTmZ+wp^IM5q|h@_^3*@{CA@5 zuWGX8XtTxq5ZVp3<&v)?XS0?3kQ26CnBT9z?QeXUc7y)LCR+|)UyJ!6_!~oQxn%k$ zc@oTbY@Q!-!j@}KyOu3?%63oKa^dZs$PcmojV~vjb6d{#EiwnK+H#g}Q8>}IEob`{ zUpDg_zfoHbxSoeTvj@HABwr)%%p6BMW`-{M4`a*OF*JOFt=e){3@wK;XT_GY<77ko zc+Sl4*kQ|c*mA*K|907O79TThIe1x%x&9^}yWA6M%k3d2psieg>E_LCxnS-{b6f7e z%v^sxQ>^AMld$F5>_^a+Qy;=}{ZF>#1}C;`%MqiJEeC&VvgMFJe^SmQ&y*Y4lif55%K>9s2L7ovTP{Qf{=?XEAu_O4TP{Qfo)cRx zL?)hDuK%fRxl`p)9)HJSo|SgU+P`!qzCw3enn&k;W^i|KS3K!W?Dn}|v!k#8J0>)>9Sb2zl!$$`H8+y=v_7b%B68eeQ)xctMMCn${>ISX{`E5#$U zdQ#R}$~Uj`;qVhIln-Jj`=vd|H?8f%)`vX|wI)?%wJvo&^&D=99_6(EIf?wX(#6n zJUyq|(e0d>G8TXM|8Th{7^`$PMGWgsYoa-C^MZ)L=7Tdf57Sv-@@o%1+thP$+ub5F*^aE2M}#T2_spNQpr zJN(z87oE|UaH%ojk>e$Aj@G&yhw+GFw4=M{{GtXw{cZS&&GRy~2OfT@YFF%7_YCi1 zm8<1U8h3gBiW=I=4M9m;^$r6j#dMg>QS6cBic?kHaY31>z;Z34B%N~%v#jP;jZ93%nF@5 zsW_s}RN4@e?EfQYt7xt9Hew^Yp_>HGM0zXIxHAqq)n3GZK0I^L^R6Es?HU-k!M&+B^rufXA7uK|BE4C^jEnVu$o|MNpUqtK1#=Wh zyL(uBbDyh|U+b|dSzl0hf45S_)!w;n`s&4HIzx^l+#IrTwO*6=_N?dvz5LPH)1;OD;7kBo(OE}VfLlwTl~BCbx;Nqc zXVfQrpI6+qw09CTJJZ5*&ZiWfR|>~XT<@`bcfa7-W%Zgz`W1@z`)GGAdsmM>@95=< z^X%okB)@QcMqKG$c*8vE+Xt;(G&7+zD=yw2&-05C;{BO3-K8tR?fr3a6-nUG2hiO> zM@oh8Tjwri@;d;Y*F!@!u9S)zaJ>dx&jiTSueIuLf%`fucKzYT3B?xfN747^ns7h1;(2JX2K>)Rhp$*| zbay!XQzK$aYlZ_W+Wrc8^*l5nnt%R-48H*ljE;-(cOhQ<=Odzl9wrU2p2egAtvy=? z4SWC%WI_Y3Y13A}d)u^DY2X29z&R~*^*gs^u71m+fo;UpdgCjPDUK>l;mo*&>~C7b zeI9&wsVk+f*PLsP^=9u+#Y1iUC)a2BGDl1p{>QY5|CzJdN5GPWHNbuw`3cLwelpE7Or&vL zCiAP9X{ICgkzvRWH}azvJiHBkBq%elL;hWh92}24^dT3=;WN+UJjk(y$V=02e!&dK z4bolQj#z&+a?a$R$TwT2>C9;Nj0;N5vsl3=oyF?rY^5jAh1Ay6#_Yw##HqdjZwiS| zNGB?GxlJ0J3w^rb^Qn&5T7$KPk_D+T#+?KB{cq6f5!w}PSI1pYQLVGbBqv1Y=ohw3 zHpgqyd5`52U2gL%#3d%3$JSQDf2v(QE4nlO4DOPN11vgU0i6$wiSz$^7@ZHW=v?&e zN{sRAY(&XwTaK-QwnXDPe=JDn=Qq*$D)_&~yw{@bKO(;b?+<6jm)22-WY%72dTeog zX&k!NI%s(edmQ8;|EF_z0)Bs5XT|6QSqrf(yE=OL%Y5ab#qS zNymGkwg^o=Yjv6)o$s4v5(ZC zce))hE$V>6_o?7mHF~5;SMdL8bVBK;)yUFXWN5ff_*!CY#S!{po~4FPC>$DB32i}- zEy+lo2QR&_W~NcP9Q+)vvzxn0A8k%cEzyw1ZR>`TiKc$Ycx?SpXPin0m3}za(hqfB zdfkeV0m)A3y7i2qKXP#_|5wqzXl*5QpB0nfzlM7eWuzBMexA?^W1H!P#*CCw$G@Q$ zLTi$Zy`UvxuE@urY~0{*Av-yz9h#F~xD;C2Nq)vY&L)in_a3rnDY^xHu&cxHk2c~< zGtdXWa>Q56gN8Dpp%0NYUXzC4JJ8Tyk%cx5m02|O7BrLwA6*U&jWKEHw$**6-PS4% zJ!R5R|JA*x^FQYWX~-d2&OO!9vu^tXV@x?Hoy3-dUy?Jh5&j?b z$O*c+))nhNkN>UFP4Xi~-WcTIZ;-pv18bq1ZLaQhYb-ga`u`7daHFHAKcY)>`nis? zVlV7r>4W44>3ul;^dV~Q(Ywly4^ufkCPV77vc3uSXEE2gEg?x)f&bhJkVzBd^ z3wJDa1irx5(>i0FRqW<0$Lctv!2|An!dWM~p^ZOdTTI{@#+-4Ucfq;on=)9|GLJz()Uq-&zm6zK1d4&~);$pr<3qiDCEvZbPp8(dBK3LB<_Blui3x zm-k0Tj{yhAIJ!SF5&4{feAb%P!N}a(Xv04=d^|A5(pTf(0%>5CqV*c~ty9V=rXIzRu4Su5UH2?C+q+%0k{v9*VFYTrIzismBh^doLJ^#1$TJ4(J zYqi?b{BOs{v;%&K-mtlZ`JaNl@J!hJ-|UL;m$)7N2d()pVg6MnoSsXVf9VN1%zv@b zscHU&t8VmJ>CRQ6A4{*zbU2!H+7tThHPHEJ=zSD)KN9*Mfj*myK06$JHkb#}1KJV| z$5P*1&UxR8p87oZs@uJR`|!_=Mqix>eI1it#26>K9435+Nry%M9fz)TBQQDyZGQ=E z9lbp}Aewk&W=!dQ*zv!?CcGQFLVTbw&X^!vIf+)G(<%hwj;{b7{@(znPUxdyM=mdC+ebm*A6$EkI0S;e7I1Qytyv z(tuAQ-z5Q`fsQ1<5#`N#+?C`%h)s-*WWtBM2G~i zzE*s$58-P)BY!LXZ^_>(U#naj@$CL+W7r9=Wm5}zK*{&R7Q6AThGAX8b(7;nKX1R{jDw2%^CY!>3>W9 z)^Pec7yj0-kSTw2{#NL%z5Z5ve18-E))w&iwEeBr(VD+C9G}jbzZH12*WcO<9{5_m zQ~uVL=Kpm4t<=$)zcqaR{~`RXHm$b9-x`V=-zk4<7+y?9zMQtdl{#AUx3+>8-=4o! z^xRH=D=^zA`SGp#TU&t7Y5QBLqcwkPIDA_5x4K-!JtK*GMiKXjCjQ|j4iZB=B$l{m zT;TXS&V`4F<0|g?nHl$tuBhxyo`7PXNsho)Vx9JUj^;Nx&kb9w99QiXv64N5l;3(e z{_CF|D}F^W6y=dB4tj-)yq|u-eACy6fqtNPDDjc|h?_|_CGJ$8K>W12x3_^9M*SD$ z4=G3CQDP}W@r!>+tW+_SI_j8C%&RHpiEn;AF%-pKPKu#4#XGx)#5>Oq#yg3tDBfw0 zCsh8%@ttzlRuJ=4ZfQbsTc5V0rg156M)CcX#5)zYOm?zA zIx)|s4)#MQ=0;xCoe9MM9Mts``FV^#l~;*Bh|6H7d1CvnbQtg+rry!zpLix)i?6Ma&wvuw@8 z)ykoKW=-nqongNEGQ<0O^BwUw<<*rDURJDAxpjW}^cO3pV#cS*(XwOHb--+lBd#ev z?dLp-tB9!xZi=Y{;odVCBTee*pGQn(De(G|_-Ga}l^VWZ>WVS*y=*u(kCFBy4%SW> z0z1X4rvp31z~abfTtN&hzPM*;9`RYlz>0v~c;GgW%ZybM2UEOrgQK&beX$lQMwJiT zQusbUA=Zq8d4ZQ=U{%C31uw<5(*-YQ&pLy-pPZCJEX42+KLxzx1Iqzks$cNhPF!>c zbFhkl$gkqVxtdQ@G1db;W5z%EqEzjS^}(yjhZ|;)#<969RjbW*eLlxCt_eN z#YV@%3&y|`u7NjZN*m2WPnqLXMk1po)!eF_{hWji>ec7#J!r+Y~sOEj`lEtu7l1E#ehS~@4Ucqa(8zizYmf3c^Lc|z&`yM$n=;QQKheQewllQvvfXZyy5%VsQ8F`HgJcx z_9v%~wTLFK|Nkhj94O@^ao*Zu?GN=VIkO+56KscWm(A&V^bd2o9esqexpa12TrXon zme#+xJQcO%?bW)@uh7~SJ6}rmuOL5mI(dnEq4#~m$(=(NP|ki<8fT(Gll#dl{0N#X zMb_>oU-mI^3K9T#kf_&J$2vT=arSk{_pRj697&AxKf(7237ne;PZ>)-R;2 zRT!7xmu|6_-<#;qx*5r(FOb8tk9!vS$9`90#eulEijqfGeHhw!Ma;Lj0s(oFOc z@tp=_^2$2&HdkjeugAs(TPG1;3&(>>^poxIm0-Si5ErChhT}pqIGzZME(ZSUYkx;_ zMLfA%^GrEIALvtmey^ubkFajZT*JhD1$pRuUGWwB;<{IS1P;~z_vpaIS0!U5^7 zIg&?4cM}J8BpUt#^0Ep}fdi69lfi*T=u|l1qW+!W-7e(OGhsLos>_sx;XtS^BRNz? zKT5y><@0DAk8ps#Uw{0*>)~P7!OO0Nr;UfV`Os0up`+v#!t+cWr3U^|lLWlsFICSO z0p;dqCUmN(!KN5W9pX2M!lNGy4+xKryf`8t{bV2VCzNjYG6wOaqsZ8|l(+QP#t8|) zCzX6Jtq-a}7G}=qR{9vZ!df$@-@>D5(6I2$=0*EocLi1^854d1|CvUa50vW+4o-7) ztLVh~p5-%QNN|3+1LXE8N4W+bISt&nx7GY}P9T%qN%6Rt z!7-&Kjt@2(zDVj;(SUw(#Nurg*eplTf3HAK+6tZSLrG}-!E^Ty zMz3PdlVc@oiYlbH(tngxDb3IDne9GMD^{1N>_YY}Z6c^(}3qt?y< zV-runAK{4`Jkk0r^zqHr;LGRW$yD&98k|wCz8f5I4|bQj;epDFKMZ~d4u34}0baPl ziyqDM895K|d1!)lGA51}TXGP(SY_)0(#LAVThaA*fbT(HjPx1dO>oW2Gw`-u@I=W( z>1E=BcY`PUdJETjlvdOB-hQLd&w7|+sEO-Q(H}giyF7>|D`UF&HMYmVlj-PZOW|dr z$8q3^@K?M{dfMaQ$%`LcV(My-gC{5JX+2Cl8PpCv&1varOD$ez;vu@4^t6aD+z8dr zBExVaR6jHE72J@XztoY^#LK2b`_gN~cXqI69QM`O@Xf4}uj@PQn@I_ejo7-pa?wxn z&6wjc?Kz9rd}n<#VLraM!KKsk&G3Dhr6WCqjRX6Z=Jz{9?E-wZH35l?B; zH**4p$Wrn6?}TqA3|<8^39mA{8oK4VKD1u$^3TvX81l-_RBXD3bRnz z{|)$Nnsl;%NZ*XbN6x@E(==~?H@=xLynYJ2eyTmb8NPcj%r`T|eW53p7?WZ_@BOX# zW-OiL416<9xbQdcn+d}K$)m~b@y+mEs4gSlOehY7>N4N5Z)SmQgZ|z7W-Q)zn!cH) z`T5TJWo4y&o3)Ro$ zn+e5@Q2ngEzM0Q&`~yB`M_{VrW8;hkie+(t(1iZPvf^e$l+MRrJ)b=z5{ZEovR+a# z!nCrH$CJhxtEwW`Ts=NM!moJ4Ag8w>nQN0_G;EA88rFL8a}#SFgTE<(JqK6a?=C&a zK4LFX-yrg1cEqIkuZxfL_rs?;n0U}zMnqlxss&5#czMneoh2}xy@>YU6Wc+|MgHu; z*Lwr!-{cK!o?ryF{MZp#jIaA{)_)FicJe24t(`@V3GK0O*rrDIEO?at0g8>ty0Q4} z58+>ZfqN(V$6kZS*&wi?JYv?>dVT<3rrKC`EP9B_R}fRW2tS<0qO$9GU(UY%snqGG zPh;q_=4A{qi?NLT1;!FcwA$&2*;BjReS_&yn!Yy>`nNpIpagMf?1 zbo;8umkfM)feDie#=nC(t7pEFse8{jZ=f@E8&Tfl?_X;KHoJ|6hZ*adc;omU`fe9P?%!AtV0v~}oN5U!5{Ki}(un}61nnp2K+}UsQ_%}f}wrfb{7ih{Bi@T{vMrdznRFB3UrxV+baR5Yh0=}kNm`;6@h{Pc z=tVJ$0kpv$s|}*D6L1L9jLo}j88h}Y=|#M3AoEoYe+-u|`yx-4FZIZm;lSxu&P&rd zWg5>x_ z%NmPrrUQq4z(e1@&AC?_dGCk_%9aX~K2FU4sbq`hqBWWFDE+kIAv#3{y@?EZ^O{KR z5r(;sh%I9d(!L>Bjc*Pk$(QNvlrKq)`~7$$^P>=%FZoh<2J&S^2%QKfZPCLx;=ICf z!Q{~QyqWr~F&~aZjxfiYdm5Qr=vM@F+5flke+{x<^i#w5MJxFht%y$cBn0aj2b?%J zwBZBFgz~Vq1Z(PLO=Q)fw_d+X~=nCRvV~Eq*dVugrFnfe? zXpf8|t-z(dv=gqM315;;p58L;Nahbj<~$7_*kSRnJD?TmVRgX7=52a6$kNpcpd;yL znm_4hL78zvw+f}F0wXAQqyy}+^s{jmFBAXn&n22DFoN_X+Nrl-Bia#Me+q1B!)PZI zE}=62RI)|7s_?%x`VmhPEaKs3AJQ*dhik5rova76mu4jQD;;aDUIYD0_KQ|*xu1mI z^%5}H1n&~Ry5!}@gD_yPGFxxfyJSl@8-!jaeN5*(hSSR+OYhSDRJPu=9(qZYOkiA} zUFXd_^u6p%{f}kLL!jd{WYM3mH8MB%bYyO|^e^={^totKx|wvmUBF0fuVJ5R)gzjb zjw#tcshMupwv3U?&={Z@Q#Xr8mzHkUL@$98E%hF zv!JTOrs=S0patU}*ru^*rY)Q10JPO%({$K0rZ2#9t>y|iQ5G)?+hhfQ;Wb~`ueh%6n8k1HOZg4XcK z*P(T-rd^5cZpJOJWgjylD;_7tvw(8)^(P>L};g&z5n+=`Ph=j!DT%^P`+S^&Okea|FbI6`}_B~ui>t8FG1f2 zpsAPOBOAEiNj$I)|L10W)al-IPdRH?-gg^s8vK@BQS3Hens2?=JNbx+A-=SXXPhhd z+UEO>OqCViQynT}kh7^e@KOC<{wSTRwu5zGsqm?vvVQrG@=sZQ_W|(IM;V*^x{uI* zJAP=>=v(lD%<^FTumaz%c$MVs?f7-oZa#S)iXCd*%|860iXqAee+gqwW6b(qJS~&C z`YrFqz$f+XxUU*tFUBVj#xx5kDyFlz~37XdGwaTf}& z`6j#~i1$V|#jR`^vor)=@+k@z;%IM!gbg3nND7b z_BAlbqbOu8zIaI)JU#~JCX z9z|DOY9!V@2_LJUR(KTIa-4We!!lwV+Z+*fw?X^$3EseNM}q(TZr;F4J&nNA-5des zrl|gF*;7FESAG#aL~FPYQf?N%_3doxOhJ#f>(sMHh-K|Z-&O4Nw^xxrK|QlAy; zW2jU4DjK6XKKK;6u*s{$r?Si#RZLwrb*tWC)LBa(N8dZ`=<^2aAT1uX#x-%Pk$ka7 z^H{+i0m|``-4Mq--o`wlTQB@+vpI}4=TNr8IPDXl`9}sN(4PaW72I#l;Rb6CwI>K? zENp6|{7T@ol(QPknfrHAyjhzZ@qWeY(_^!>vXQk4*z~5pT(0~y&VKCHi+7Zv zymGrk>&xku&c5Eg%(~L4%fbH~>gvU_NvqR653KIxdD895dW$(6L;Pyv2yfsW`hH8i zH?XRk5qK%y5qOvR)Etaw&j`)In$Lr9{)lq<{MNS)aIHMXn{^Plp549$$E!AKKPB&p z`h`0yhZ=#!^ve-x z2MBY>-Zx_mZ^Lr%a9ao-+VmWE2J~F`b?BKGzDaw;ENyx&{2KIZ)7&=3s&(C=bo?jA z`*rDfUmJAnKTSGL_?mRQk@*uXpA$L;CTAB1gLJGo^qJDJXhU>-cKc#em$&s>I z+E-3{ld-RYB}ev|vMs@s#T%zhHD&QCi+05WMZ4wXPrN;?`l=`02FKO}d9(Pk_sbwJ zwrN!5AH{x9`6rE8S8Jb*CoMTUo&K16sKGDQckxW|OZ10@Ih;#p`<;TaHnvIDP88kK zhil+*0eIXY>XiK^S*tm+VQ_jYla`tKD>?+Qklpf(Y2Xgksb3mUW_-5KmgX_Nz4O;S3I+x_)RLkR^N>Yp+)Vt zcs6M9!*esRL+q6?ndhW1OPff=icIwx` z!45n1%zbr1`t7h&JM2{3AKYQ52IB+YF*|jEd_^60s&RZG_DzSKdgig24m-6yF)Zzy zAB?}WWTz@Wgni&A4C6k4yJAHF;(@VT)4Adg&s_8Y zF+#`DIiqs3V{z^VXP<*+Bp z_VU!#@5~E%mccV}tq#Z+PF@p>uIncOD>J^y|LMf2gp-rlpEwkDZTG=(miu6w)Q9gr z-SzV=faaco_M*2wb9U(t`U<5xlNN!q_Fwzkr@Pacx6`D%uQ6|J<9sW0CmOTp&ZI-& zGnq0C1{g7SCkwJce4{KA5toau2dy;w- z&y;)`iyYQC)MBGHOehj-l~pe68_PJDZ`*`BeqL7VLu%Qr@w(30rVq#g9Y)1pn$p+%dadPXzaq>N}Yw2g0- zHd`HUJ81J9jJMI{+*CKfQCQR4S$OPL7w6DKu>V{ndjv*twpKLfXgU^3<|saLm~%zM zqgTMA4`C}#?fJCtnx0SjK1UYz@A;%JuIDn}XXyBDR4=$WdmU`QKaN%l_VSUzMxN@?O-{i@IGQbqDL~>fh?@ zX4aWbox=Cy_l;uQBN_h)<{_8!c!qNpPflUofNahTbeiY!Wb7?o61o~?3~FZwcPvGSD@s_)w@n>vYHAla0cXrE#BPB>5+CUlw7kOn!iMYpic6X0HtkZ*Y*XdP4;kV$ zf^8TM2DEMQgw>?X0_cj4~ReQ@%TteR!^NWUh;FW>(zm${gr2GRy8VG7q7X>>M^a zpnd$h$6esRjBk#x28;8LHWkp`HrmO;zq6mSQa6s9_(uM$iEkX{_xueL-?-+!)EhEq z%v|hr#rP)~-Z%F-IM2z5@LyOzGjl*gapss@Z)VNgOW!!=^uGB6<|)9QTH74m>++%5 zrOxhkUhuS9^!NjB<}3f?&0Okq)~z82*6!;>ovlTg4|AQFAM_0NIWHpE?-f5cGB<%k z8x14#y*nJ{x0T?LT~_5(MtcXX<9Efq!7^(?%3RC0Wp^5xmkp6$L|`>-HG9x#QqM4|KUT?L$n@f+CXE|ZH#@)AY;=W&Tl$4(fj5B&Mw+9 zd{m&G^NXtH~nYSn!oj7XZ&LLRTFp9zeV0xkhHOkzAJ#?Z4f}ks0eWGKDAlW_mXHenI_y z(_8R|EXoLG$rYLYNh7?OJMfj14bgq_IBF@$wuF_Sa)>V-4ulnpIcqVKj$_7!Zo*_VE}fI9A&X{1f^ z7%xon=$qN_&BzGvr|aS0>Gub}P5+P0gSPncQ?f$o$Jujrs zcK`P=_vyS>pF->XI?vQ^*%In^g2voJ{VHSkyV7;*f0}DTSQF>w`3m}^d7jRl^YRLf zHTj-&E+b=-N3u+O{u;(5x)}prX3r}w)cYKj%b4s*OUw79<>hK)en2$qGN zgRsofoi@_R!P|qrV9ygWowLazmWR&cqvvV9hMVxt0td8DUa$D_7y5KAe_>#5`3slk zmA_!aUGOa9N)k~;z`Eq{6kpEan|;|KFfUkE zvZN%9++X8?2F+19`CYdlTh=n(Jw}v&J92kt=j@dSb8}XHnm2sq>9+e^uDj@#o;m~f zR?j=eoF&gixQjU3Xp{Z_Vcs|UANKy%u=lryy&o0!epJ|d=5AA@+xzK0c#!Zya~Y0% zngfkpaJvjzJr!(LhR`B@+f&wgs`k$Gor&wfNVq=E!u7VsREgXZzE`?Z&AWUfsf>3l z{TEJ3?-HKtu6N-y5Q_T);9;%d|J3ZcOO)R&I!NV=(Q|?YPFxixAt$*py)$+ z#Nu^@R$sEY+q}Z$Ul+B`zr^zlc%F^BPfA~f7in(x!GoLeFKF4EJBv02W4oQgaGpfj zW;~=F|5bOD&4ajye;?YmIc|9T9`su`ykio5&ERhE%!ue#l+&|UQO?jwo@{=ndmTkf zg`42rHtyx{QU|hN7;}amIw2W+PmbzVGzdBv#C`JoZFxJna@XDAyLnx`Z^XJgeN*`T z%>1Q!ck%o%*Mtps`0m+I@B86~JAMDk?^k)3Z_H+W(CmhEYa9;d@KI#IWO)43_j~hl zz0M-ny2-xY>u&ZvegD!tl}l$X!Kwa&Z3*)!o?-V*@SJ@AyDfbCP~O>kZuS*gP+zm4 zh1Ti~@2lqj(H7>v5?&je`+SdAbI;YTxd#R(&;6a&+#hUV?(ekb{@_34+-Jdy#dl;2 zYkp-5+q9Su9MiCm^4S+Oo%2-kW;?cazp^Jw^JERbJ1Z48cj!xbV`hTXdcV zE~gt#fByZ%Q_*`*_kSpzi^qh2Z_~L=7vggcZ@#B9*JWJ7JzK9Ur|%m7>=6BE8GY;p zjo!iKg>JK<-#+{n?ao?vmv8pEyG?$VkG#(3-K+ecwC}%hdn9W_^+Ima)yh+aWLUtBF&*B5&-=2$i z<~)YTxl^?f+V^ui-}TJ*A?EuI=KHSGpYIu9fIK0ENm=xo!2VsBMD$QhDd#Cpqbnm)|fAG|%ovbA>L8`qg%EnR)^jh+#&i0~di z6J8f?6f5w_go-}v9NBXVg_Da^pNk5lfD85#XPHxKY5SuVxvhoFQ-pyQ5A5Sid z{-)meygRr~x4igol_eV!fL||kngXr} z+Uv8fz!yoI7t%%_^z?j<$f&o`<^QX zTvaFiW|cp|0go0A zxt0OzNnCob{{9m7aC zQ@~nw8?IL~hod8m?@z@KI#2kqu(;5H4*H^NWmGIl(w~=wP$1!_YA$6PI3KuWt zTl>5Id_R`&a~>@&%t3AxxF?arS>SQbPW^H?KI5im=(bK`eX3^+dp@Wilc?(g>M|nS zzI3h}F7;Kuw*r3m;Y^SzTn~MmUsU3aAUAkY(feC=`rgHl|8Q3mt&NlFxaQJ z!}|0<-r0I?&H?&T^Eo)VJ(X)V2S*zxRQL0OL)qX^4mdO%9LfcUMu0=)Lrfe64vhwf zt^tQkoWs{0^oO1)4(>|{;^0c|!a>b{YdE+K9CS3{VC3=|d`-1n2e=M$ZRfIaNxtH- z;EC`l8(f*TzR)*s{eItV>x+Dk@cTNR59Uhe-6b3D_Kn|g*f(&)J-%!CZO#jHm-*-0 zePjN7hcEN5cg!5a)hp6nloJWQT0FeZ`fcMbd~jRBNboD!Gq5Q6$|;^meA41e?|Y+) z9st*#T6e2ipLqCA85K)}8l1VEW{Qf0Mw!Jn&CGS;O;i~rxc;)Gg@4L(=Cw&WiGRxwV`z+jRi%(V&%W4lFX_-$J!6)~(!YA*6 zPabYJpOme0g8ww;2O= zZO+Y+Z=N5Hv-n{`8~jkZ5k%ow*EL2o*1e-epmFyGv$S+GrkU9*xYVOuz2D6 zFkToQ%a*>_!3R_8I{4tZwPD6t_$a(Ql^rcUIJ?PyIeneb=7qAQZGJchTl%}GBUYg! z<$g^a@pQ-gUFMTj--3KzZ{g*m7GByqV@rJUgnT}=-Py9f*kMYNz+c1qkAB=U@lDsbjD{M^w4-+6iS^-$fH@8nru%4#f!dyfdXSvPQ` zleZz&(Y5Z8^Er1b$EdFvU^K)yXI;PK<#|hvI^7ki!;SjRugqI=!M^)9r(#DOAvg7l z9~cc|@gFtomt#aj?O$>NH8amIy&)yW|K3iIIrRT>O{ZJU%rd^ zw)@tsUXAw$-iG&_F~MKx{>P3LZ~hf)cItqAy{r53O0NB^--!VhoQo3}!IvC_OI#8DJ>!hPv7b2j?eTx{FGk?Y zAL@6M`TUE2(eGscv7Z=$vRUjw!oJzb&UpV?)`F|tsQ8?tB|QIRj!Cy#sjmiicB;bRwi`u4`O|p5#QB6Ogam`wreB2f&QO2z8cBcwH7aD znXz#Ex|spZ=i_;fz@Lps|Eee>a0mA!pAk^s_b{ewONs+!j46lzjf|lH8-FS5CUz!y z8YSzzc*2ir*Z+ujREQHN8g{+UrRgT@mo07hlbf7jQ9O ziyOMk)SmEM8{mt{&Nx4DX2#1IUeISO`O0@17x>fIm*D|;TP`#@7C+aN_@4fJVuiQl zWe1*&SaWs1_;||9y1FlTxf>j>ikNlvMAk2g=S9)K64v}_4Z(BtV+!%BS0>~HUhh0S z@K;Ba|DOtS0#~x$GfU5Z=nTAmzAKR6jPn0sk~8pMQ(S?4thapSheqHJlQ;*4_~PsT z{!iwby#LxYEI^%`_875#;dLIgT0k8)gZJ-I?;*a6rk>CE|2kJX@A`6o!eLylb*wvy ztL+9h=kZ%*^iJjU{k~z|hQaX({(D$+^(gdP&>6gA?O_>r;*SlzI~xtJL%+MY7bY5k zpLKBrCenx5@XT4v%Nph)0L{&1jck*ggfD-v|NhOjtbMzIxNI#v{zmRivIx9xO%^@P zyh#@A0vCh4x~s{ntDy(Cqf5p4z-rvS#tC)Pa{_;y*`sux)ov#8ITJba2y1vpgKGsT zZvPJO;m63P>RU$yA|2iQ8PIw4t&%n8pCki!x_UIVmj+J=ACKf`Kk6{Y&`KRUm`l}T z_g%bYSO`B#gC8}kSNteCH*hj98V^tL!CS_`WAflNV_DZWhIPW%utqq-T-$cq`Vjme zTpwxx|6jNGQMJ?JMbO}G=v2JO)Q6x^cc<=6`p`-JNB*7QLm~PQbsVCuX8O=y#D7j* z*Gc-&N0FvJ6vx`!A+F99waDbr@So?=hel87QtEbeuBbsaUk1EK+dOHHEAV4@QpOj> zo86I}D>4$?{!CO89kNW;AbC-Mq z&1kLd0QgR`e#M|aZOaLKFte+vKh^yu!sMxT|2|XSZWLa3ISH?|)_o`a`vSZ?#@KdQ zc>bl!W71W#b{PK%S5N;j*9FT>Ja3~;>4zqbGFP8L>%#Xs=;SN(t6}I>11!C28Z=mo zjy4^bh*$5U4+rRz=ukS3sawo20^-ri;aCZs9e^)ax{^)atTT#|jc)#W-o29$Z}R7f z7JrU_XN`_BOx;1cQx*D36nerMau(lrdmGB*y~kf+kG7w~(at7rL${Cb1j`x8F8eVc2BaGu$pwD>q8=a;P{P|(trSX3j z?@D=>&u{5k(s|O6BhtOzO_qM;t}DPV-?T_Fpizf&#oAw!9g2U3C8jFXnPIqzJI?XumKq;p6o!Ea`MgdpEbT3!CXk6 zl1z#w4?*=DqMj<|qrnm3?}bf}7Hu3K9A`A7CowkqqG#qTY&~ld`8o~gamm24 z)q0@Spq>@1=a5B*UqPo0^!+g3S2I`AdA_*7+b|Zs*Ao~@?-~-SccCXr@9F{UuaI2h zo8{<~rv8Q8`y5^KD|o*2^xq6e4?~v=AX}w-N%yQlR+)MjWvibX8CdFysrw54@HujJ zIl7*ykHM#;k0qglseZSkXEU8_MN&{FtDztL=|^28>&>BcTOZp`8&&X@jYfa@QY^+Jlnyue&||?H8rv7xENoU*1vw!C< zc%!df-i`U(!QLLy?WE`FoyzGu)gis^FnZn3m_ILL+s^$-?tf9A7{gOeqhU7ppK;gx zi5E_e4e~VdbdRUfr`hn{e7;-5e2dOS+tT}wVxPJl-~=*b9hn?ScPGZd5~e!Y}l@inow~(`(W3ZqEtKExw>M#s#h-10QkrG0iXx>$q=+f@L`kv7i|6^=b(D}*y zuS3u4!<=dkKkLx*!2fSZ&x;)K52xovz~63$ze(5Yqu=m1Ti?sI-bv@X*?K3vFAaVz zov#=C+1B@F^Gv#)^t?GdPd`U`UZthui7&6A&99^9IjN(UrR(+5?+|@YI-srVWz)`S z>UkTRZ2s@Ip0`o^f_3P5f4h2~?4ES!O}gGJ@bql!dEyT{EnQFiVP}ZGC;p&!()q+6 z^v>4%q|-^~+fLtXeXojVE$MkXDDzG0c_!^!x}IqN0ZZSrbv@a=t>}3hzZE^t^x>T~ zJ?{j6LgvZ0bC&hI6ZMP#eJwrDL2SXn{)kR|elC1}5%>Tji7T*2;vC1qLso3_2yuna z@aNib&7gq#JDuO!uPM04V&~-! zY%M339Y3)3V|#B!>fUq&-|XGa?tZm-5xxTh-S7B)BN^ui#+%EyhvVxcPS~5h5ZMz_ z>s+}1ZOjYnkTfUe+zkB(pO^X$d})!6Lg(ch#+PHau7|qfCVis^oK&ohqOR-GCz72|0Tl@Q62th?L zRK+T4F$qLP2b3VUx0m#H0TpGy+^W^K{90>*N?Ri+Lh;`J&szvv62cZhAqdU?{W)_^ zm<$sL3T+egeq3)Wj8EK_yM#1lSuXI#SK~2_uX&rg5f1mr)Qh2ZC+I!R1^4?nRKiR`9 z(3tK#!>GM$98~k2-?E_qA zrhRK-PQijc_ohwYo06;t(jMTtoa>CZhdBRaM%s9;o#TI$md_k+#+4<7qzy7`(1N zQrGL;|JDx%7d&kB<>9m&>DNN5KlgKekpA4Cc0X6Oe~|n6TszS>wcRQHL90Ipd4BWF zLkhn2gCSO*?pOOff8FX&u$~&bo?G*W7Ob}V^00*q>WBLCvF7E@p#|!bzAxc>rym-3 zuwM_`{dqX;kkyyqyqxUM!*+ijw)!`I+%VuZEHb?EhZhK5Ke6ZK7V2AM^(8omf>RJ) znxEmVz)SNZcs zS{HsE#h=@Vu5s^c!o+-vWIm)ytycciH=(7UzY<}c|-PP{Qd zyf+r!t33p#;lozWgZT0&c&>{z3%x1w^{P)#uqO6pueW^draOJXbS(EtDr*}?YK%E;* z?}>L=hxo*T4dbHNd!Yk+F1$M=b@~(06*ts;ddKva>C<~We~ITi*}GvEYp37ic{%ad zdBh5)5#PvB9s_)PJ^oYn`WT?Ow&oxGHJ^RRYBlGyIfcDGhOjo&;)VX58#e z+kJD`uW&MTmUuZ&gTCjH|KnXyG42KO61110_Aba-P&v-EsoL7p)y2EdldmAX^YFOA z$iiG?Vh%dzyDOw?w#&$N8QDTcB7-8z$m?wxi5zrfWXkmqM3#}r!-z7{;!DU#_(QXd zXlD;&{$m?5?k<=9~BU7$FDVr=A$vc*eq%Dg-TJ49(Nd9jjBYE$B_~$7y(yph4 zjHInoWTf@psWLLyFJz?kJVZv)UJDs%^#>Wr{WHo)OV0`F4lQIP^JU3M3tmAP85v&4 zNDE#OWTe$EbO@_I4_f^>?=q6{STd6Sw<;s8F`>JGf33<$YfQ*U#t>0PGA2t#^8CEZ zNaUa;BaxK{T9c931eT1v_8&<`-hqtVbs8CY2QqTkIhK+6$jI&OG7>wh-Htqmb|n1B zl0}wWh#({3Ma_0(h>SeNjzqt=?8rzm61~3Jj)Xro%Se8oA|vU~Pwc)}{fcBq+Olaa z--pY{W;+u8*Fr|}ZVMS1CIecvBQ2Txpd~XUYg@=j+n$yDYqle4M>4kAjtZ!*#zlg4Ao z$Vhf1uV_WIrija?8)cZkE`8|Z1=UEGheI2b0wD|`C7$u9Xm3_*LsRw;>bwL&lTot z70YQ>IpWTb_I5$s6DVBr8)OGa`%?=sTywMMcd z9U1BPTEE72Wa~0=#~Ebg&hsE6x1BQ?skmbmYv!gBZ_MT#y}R*mYk$3XVv4LmFJ%w9 z(&_A1r#G<03!AE?6?{e`wR~g2eb@-ld+HvudYa{}o&Tsf&`k*^&wpGNJpb{PUP+a&sJ_V>e*)`77=NC1_G85jnJ#?t(Z#Rm z%s1x1I`dKI6jZxB*RXDaa~pYX27U9I1M4hE#;3C&;~C!`VgaFZJfFPF=My<&k~Vl} z!;7p9n#lM+0KO&Uq34}Q9-uQM1&_=VG1u&-U7bUE{D&_5{gKZg)&2p8?0lMX@?kvM z^Om{K(w?_mQ;Ef_W(~&NzvMnw*1@|_aP9ftlEqKeJhGA8oxnrn!T;n);le*2oW{u|}I%SQPW6+?pBSP(Ri=Y-Rn^l1|`1xSUm(TDCOF(YJ`rVi7&y(Ug3p8%!i%H%w3y4X*#=g0)bA66$m(2~nA8o$h_Ywa$@c%)s z?{e+R{eRnZzweEtC1X-(BU5wMNKR=(Tjm1>S;P7ZHCZcK#YZoaJ0gxTX-d zSDwIo{NKR;HCz>+Uw-X<#1QXHJJ8W6*xB7I*iW3vq5D^eHGQ)&W`OEc{Ql*T`qm;R zRNpSXt6;7~hmp={JAFuFWI>iebWuj!7ciDUpMdS&snQ; z1iU*S+#G`qGI!iL1$x7=Jza1^iaNL`t9g((cbS3R>@%SJfVmH6$QuC_$Rh(P#ZO!qAtQW6zUFbhJmYj+S!FYBG{)W(;iWEzi z{!!YIyc{cs!HLx?PG4jDz?x(9-^(F~;lZ@qC{OmeGwmSzx{hD?r*WNQ*xwbJxL?1? zS9qBFhj^!idynw@;MiQ+&n;NKsnMtWiO@cjYZ8r)tagXd?m^#x z5yK+1JHz*jIP!DAyXUwb$s10)%0W@Px@z8Mq}^<_8;rqS{k`F5Y*+0qq;2)%QO;xj z8My)`XSA+QO7l;Q@g7_TPaX`9c!s?Pj-p3AV7Bf=Hoj6<7NJe~KAnx+X?9C>VG zHMy3@z)$7nZ2Io$amdPn3CoEH;?T*Q2*GUAcgZne9&cH=i8*}GcZlB)@H+@Q!Sj$$ zc%`x2M(%^wiDnWfuO{~)H96Vek#QV_jLSK&cj&g>s;$7Kq{dp!{ zox)YVF7fI0w5fc6b>u-v=FLqA=0bSvT!?z)Q7m*DM=r!@c%|fz`sL)EI6Pha#E}!? z=`X?Ol!rAGzE?udnX_;FWO7$u^q2+OM?~v%bw2ZUs&PXof zzKm-=?_A3LHS~w`FgJLZizXwnUUM-O9ZF{x*8(FW$`~-xh^ZRHJo%B2yV09RH_Y1j zG>zoIUvyl2=&mG><0s4eBKF=pCd?onIXe_w4+Ns~^!44Esv(V4^0dGa!6 zdCXZx)}*u-4WqzlgG@YK{zUy{t;;}1=6wKM9eFnvdAHAFbQGK&nsVm6HMx><8hIz# zUcwpX%UL^$&TZwrczd%4k1gv~r3Lrzkp3fDH~>vF*fLJ}gP}4mm@{}^S|>Yau%neD zD0~j;A#0iU81e)|W!x^#C^(&rlYBc+Tr_Y8>wYFcgZ;a9@%ML`8*~=C=wmHwSl^5@ zIv!@7@D|PkEkeejX<=umEkEH#8 z(g{93e&HwMBHeB;L<*T@ZAzUgyIu7&3- zmTY>|=j4msN^Ixf;ql^;!MsJ~#w=eKWa${|&#AY*V|6-6$9EAlJ{i zFP?YR#GwTb(C%{!KOXmy)$R;iu6;c27tDj|TgY_+d42;XO12&How?oNZD@D+8QWD` zYFm9cg5I>5{JthvYJL6wrV0JQgP(v0g&n*H^N?>t;5ot%;mPONU5;E64hfg^-X!GO zL*U;L@_&zF2Q;D|D&KF@Im-9jM802JWt)6Wo9(nWdj99}OPd7|f;QYq=FE*RS1v zYq#Gl`_1Fl?| zVfNdMQ|&k6Pm%|g{e~=Ch#ZS(zaiT!`wiXo%=X(s-|NWMllI&Fr`m5;ySDv?tZmhP zL-tzsn-!av-FQ0tE!eJfiod%37KTF+?KcZ{vQ33ULA$?o`%SQu{f2Jz*RdWJbGlo-^6~iVR*KA zcCFiQr^>eTX1_&{XQ$Y25#-rFoc$I-o}FU9MUZD-*?x;4x6YOQcKnCGA&zUZe#^x= zE;nnMqXO9ZRf@f3+3T8D;d69+##Q#ZX8Ds7@F6SaHUgh!vKcel@jp7gM<2e&uGrwZg!2`H{6pYr}#qvIjluPlG#e&SNE;@6#u-%e}%RMrpS+g+y0ve&{E;>IBAbkadLz}CP^kDYXFw=6-kl;JLL^_~|6~8o#gzN;5lskN zmmKPAoOv2w(9eEF_#L))1FR*-#Yr%n^QnVMOP9|Q~ zVddu9rZ*Hb>k@`D&N@BEFC59{3i^dl#>|xe-R0J{`xO;4OQkRdD`vJfp^Lv4J}l9o zXt4sHVt8zVy+B_Z{pTi&rX%?oKO$~+uKbLmL&>Tp_F|QPZIfbW#0N|}b{1#H&iZoZ zwfNvD_+Gs`YvnZ4<)5bf56ZlbMrk5^N^usazfL^p%f{65;cre%P4To0{Et~Dntml; zpVkp8cfg6E9kBh5yNIKu;DbF=98LKkXNsda@kIGw^NII3>x;ASIm$2k*YPu@6YwTc;EA8dFo1BmmVraX`Us}#pGB2fH-$0@*hV zdA6H9qhGdTZuQ7M<(F)x-`&8)pLX&1zi)V|`jE3Rh@6d4ul@Vt(d21-JI1@PXQz1o zchchh!C19>WiR@FE^<#amNUlX$UE6#=bhBpc_(kX66!OMe}l*=k^GBe9K}f~%{e5; z*lX5_w@Ds~*L{P%ZTq?6{2!A?^2Ql*06u9$9?6^E|8jXGACpJ&#&_G6NAgKt1V5zZ z!#cU=#GBtgogeaK9?2#?SwFiL=SR7cbt6{M*MK zy8jOuD4dya2Aq)$-2A^=?xhhaZ)Cz*gd^|u{x_h5dzClAxXzSA^6}U+=a49OLj6oP6YDb-YeE** zkw@~BEejLil{!ahB>Y8sw_x6JdzQ}yy3{cVaULt$igAW$H4()qqXN-yKNUa-{c?4wsZWDR_?~bX_jqg`I^G= zNFw+lwMHsZ9?41DF3b-JUsP^n$)+!SjvuSpwxi96c_g$OYTKn;&$^dZ@<>kFc47WN zVp~B!=0PkL09n*Oqo|+wQdOp0w>+Yxkrd z@}Td$+jh>{UgqGIuzbj7+s@KOg6o#fY}+~Uw1ZYYLmw+&6W*s>%H}l!{I47n$F|d6jHg>85VY;?_eHkt4h7c;AcvJ> zV%c`Y1|zN!uyaft+wOGjp3E^h=sWMWoee9&sa4y~mVp+G&S=}6l!1RU+b%>Fwrbls z`6bB0e;V5^L>9Jc+l9!&^J3eD$i^?XM&R_eUDF($p&HK_stMSAiP(Qh*n!E|gDLnx zI|fb+jw(HjUttI5cD*ns_2@Rv#2Ssy^S>xlIg43)(#Nu1xGsh>mM@G7Y-IiGAaZT% zIoGQOf9cq78WXghn6;&T`AK)Xi z5;>!i_F@Y?Ws8zHzZ9RP#;!A16R$ERJpH4mR*%ry4AXd8@wpM*b04Y07kfAUKkJNS z?bV6Tq1GKY+B3`hoZj2bnPZ;HK2>$JlTAA-=X~$zNZQfaTR+1mQis1(XQb*}uHBpu z=$RE)W}VN)S%ua+o#U$ZY#AQVp7rWW!7=LRyd#&{t!g6Ut>bJj+3s_P4sG1^@{m9V z?G`hxx?_bK`^+-RR&nN5ej@(ZS*A6{A+&AcTQDwK;oE7Sf2{e``5g(xthf2m$kny< zIhJ~AU9nYrUWf_QasF2psr0DhD{vy#fHe?Ba5H*NFW6*egw5R#q&-FljVpZ&{NgF$AZe|rex5Axzv=+G34(IGK_L}$P7#m#l(|zw# z{w&~GXBI8I*z{EGbbG3F*05qlIx}y5Vv7F{oSmih;al*3@4~N=$eDLRQ+*g`ptdlQ{B33e@3*o z;3vs({`c`+$2Jx&{DL`(rd{dbkGebiwI*jPx#jggPA!|px|?_Io_2i5UDJ-cJo#yt zc&4x);O%KqTzm6>K5IGFBo^PZ=I*}72jA8A_!i(*1%9NnhITS*XnW94{P$hr3uDR_ zC&&BiIg8fMb&I=4y`J@&Gy2$eU^|4pUT>$=lV87;Gt)ZE)B4&wSYLbR^hxd>{!aX# z!aCX;Hy`jV;@(Rpf;Toz7|puJt*jY}>f&YJ z?J4ZHOk25?*Sa~wDha>doJ+|@LplyfA6z{x zTImDM1fUh2Wp^F8wHR6n#e2Vv^Pf?l@cpU6Ze_hYL$i7i*p}Q;1j#a{MovWA4?*Mq-01ef-J66?!>viCIHn?6~ z7#qY%oyEskg`2g5jmEjaMY!J^+~@pn@ITqY{c_HIo5vc}72v+s!@mvgLmMmC9d4RX zXybkd`u=h=?#ETFfEMe(|Ex^-irq$c)AUb`iYu!d46JDTFDCCn1ETpA>$3a?G%zAL z*55VLDE-qHqJbV34QTC=Xh7?ZmO=x6ga)#q0ry>Z9e?-kyIQ4z+0cONuI%IQ+?{>= zZJP$RXk9yg(AdHbWgR&aw3I!V>$pAz-(BMFSl?^T$YZ_PyFf872mc#bGqBW*4a5J~ zGvdF_VDE3QEw2OiTZlm~1^->(sOs;}n)jt<$5ZHFvb&>ys7(h$TA_m>&;fk8w12MP z5o^)GFGUB$H?@97@@DSCBUi^p8#iS$zlxJ(n{iJMLVkFVAG^WByIE^vSnKL;LjK)| z92|!{^dT3=;tw6eIT)i0ke8NE^uk%@&9(TmJ!YK07CC3}Pvn~;(`u13o>>=`S!Zp7 zPdaPc!`XR@*jqqtO>N3~u8Kg`GDWA(B?7dd5^nC)gI`1AN0H)dR|bNRQ3w=HkbEaf!5}R(DNSXxd?i$hMpUU zfk>83gATQcas0{?m>B0!um@+50_^p(Z-2CBhP>%a7b&Wz>_jdC()~^~CknEJM+rSw5AQwmT|EshwS}TX{ z`z0p&M{=FcJJJg!KTqm~aV_*hV^+s9^M3R~Xic)Q7qmn?4fz<9jq6P}veVmG4$Vm~ zTmmiaAojkOv!|oMy(es1iiw~Pb~6qC2qU2^3w`h}Wrs^ggo2YtayV2O9cs zWT8Vt%WWEZ8yd=hkFJD9V{g>i+^Wxq`# zhl2com0Z)!p7q;ikFw;TbP`7n9w#3B5&XZyGbichMt7XQ6aTkHH!05;V-y$t8FE*8 z;BM$45O>&FBvSu3xa{%@_RE84v4v z7!wXnBliY+I)a=S#2nm>T=|3B+ZcOT-*kK0rZ_*#>pE}20a%np{)DHIU z*-87mpbwot>x|FohinjHvG-Nk>oOCl;~B{xY?pOOiT?H2hQ8z^|2XWf8$9O+`Cl%x+oBm!I!I-pomjN!P zlgGK0vATbJLD`f8vo`jgI_3BfN^5qF@VM91i;oYcXkF)SzTLr@A3M>yy@LlX@M6O-KA?%vHtYI#6b>BUSF*`7c1s)D8wvrRqW_U1Xam@cb=HEZ7bJ=L% zK9Bid$No6?z{`Bh{T257OLj|N*e^Jc3pL2>TwlTbgG1K*r%+da=6@1=*I@qdOYZ2; zfS>3)&A;|IO2;PB{F{$mP}WQHfA{3$QBx50=L-@Xg z`M(`|;l;4|pW=@4mn4|}$L;wqVgB__I6apz|I!n3ng2qgbMyQQS3T&n(w(bCKek?* zZJNzG?MZ!hBy>IkdLItm4}<=PqR$RNpB;=o8;tSyfVM=#anv{Wu^!N`$G?JW^$)#) zN7?Uc1p4Yk=&QFS{3iC2QRCTYxmIZ zJ)IN${eZWFBOU0^JY=1N9}~^kdIS4OcySXvw+P!qcu|VIA-oVDJ&6}R>NDUmZN-bp zqDvbu)Xqt~h;7D;$0OiH$=Tt>2H{1M>8@99rRdVa3+6>I_~OHyz)8I5-i#OV4eU1q zyag}u?75Gn0wa(AXTYg5`ltt3rvoRQDP9gOr@Mo+911g!=-IOCQ_=ZbhndcjcKEA@ zdA^Ib?gAHfTX2f=$I*u|&~HDA;FFClfPcAk9`u`Trq*WwpCrCZ20mAs$$q1Qx8PZK zvi|@!F*cF~ANHy#-C_pevtiZ+Wf|=Ew*9V2$D{6^)GFNyKHKifIPShXeVCaQ>_KTSM`_-QRi+{jFg%FdG^;|Nhq2@&8NtTNy(ne``1$wEJ6w{#HjO zpBH~Cbl&c7{r=Ivy1zAyj&m(p_Sf>aMyBVp_O~*INdDGvIzE%X75ui&o&VbTTcMq= z;&0Ws9o?|q-})u|tzk6uyhTIj)887IZoag?mHtQaw}#WtdGfb@^tJQ1LT_i`Z?(qv zweYt_z~i&_w^B!I{?>4OI%ob?;BglI)|2qS*YbDeZ;dqnXX|gJj@JCG;q(7b;csllw?=@^S^Hb5qcwkP zIDA_5x4PZLJ)?88Y;8WM@0*7o)`62oZtg8U)nD9k68axH%G^B+|tCt__A_h zok@kkSmzIkquiU^&3_Z)%%CW5M)Cb};+=|HrnuN2o0#VklfAHsxsg|OUn228le(TK zKTq+_*|WNq>6`7uCzbz#f4nr`OPr3lVg>&9DV;@OoD;LR0KE0RDy8t8H|y3?&*Ju zn9361b)5KUKVm9%e80pUYvp@6aBLYP?MWQ$v@m>CI-=D#V5fNXG+?I~SUmZRtB8Rm z6!t6|Lwr^-u<5{V9B{jmV#TV7gDKv*-t6LMPoz@Cs3rloj(k6yz8UWB z{%YC|jhoH~K2F>;l;+E!_YsBN%YH=;QWp4AOYZ#5qW_RMY7n2|h||RqOJ!X=xTH9$ zgF6-;I5`i^a^1ln#ZkorTEtN;{81jBzLji`gAdeSJvbm`EV2puOu&&kkr3iQo7mPb1UPLR91 z9r=BTywAhnPha+E&qAih&gxM1E6z{z%yN~@%6 z?RH;KrL`?izLe@;MSkow@)Gwz?|TQ6JBKcyoc(?o#stacedHB>2u+qDYxj{a`#!&q zkeBudeEn|X;!A{2N9A9fG9hkN^a0{ch9pOTCTGhOjj5BD}oZzeDHF!@fOgO7@{ ze}H`7Ob*Qv_VHQ7_lcZ?mJUxDO+Hqf%||k6dqkm8_71#lM4?$Wg&cukE>)_XM%r3h#;2+`D);48s=?;tK@M8JgFfQSM0gS32PQN*4U8X++9H=HoWE*;;a6r0iuH=!?-NJ$GoeY0I zd0F|V!2!vm$>2Z}bSfNhQ~wU|ZYT2S#V{NQ)n%54;XtS^BRRC3ew2U%%IDEK9^n9e z=j^-j@UWZUWjDgp#=+Zs=qO{+QN|R&^DG^u4*pV?47}kl)h`=?I&yQfJ9e(B!=|{F zI>c|1gh$^V91tEI`T5X*^pm~FpHRBp!x+Sqjv`~Osm4CP=KCY@CJa^yK=vB;lN)ob``wbS3 zUTEWJ$Cc}3C)s>TdiPdv)L5Tk>E2txQRz9t(UW>NXJLUCz0RO_PbRlswo*R1^ttet z!Q>)oeVFid72k^|N_P?;oN^j`z5O)!YVl5RRrXsG`%4L5m4BxBabTgoTln_#hXQ-x zO~SP-_?OmXro+FAz>n+UUyp(#Q^AoxpnqsBqJtwVz>z;_?F=xs@C5u3o_N3$t1xk{C#ULZJuE!w-v&L+W$S56Y+h#JA-bCMw5TxL2-VM`!*C;1KeO-^+>oBX#O&D2 z%cep5(rd(bwzD4?_SL!Y%~-nc-?eXMY z2j7f%ffb8slW)d?<=@md69%(hw#;v{Z-(zfWxsqgp)d=T{eK1DjI~F`Kdo=Z<|ALi zH`6?C|1iFpFuZ;qynendz8SuIFU&V{yXW?_A;g#z3wrPC;+wH`k}u(#X~u=Gy>BKA z2PBUsx5YQZccHqBd^4dq5UR`kjeRrCu=n+d^D`DU~}=B#}) zVfZTFOl$c1PvVCBQo#IfRMMU~CNS3QsYB9e%I6|hgc;)5CAf9HmADN%m;u`*p1H@GO3 z9;1A15q@oAtE2EQC3Y|-y!vBL*#Y(qdyP8!lMk~!v7`T{lxY8D_)xDV{`0mGRp0RH zg2nf~K4-D^g`dXWLpAuowi9EK|2i|yTkh`UEw4;4%Gacr<2kMB}#EIk%8 zK=0QRL%JNloAa*DEU9FV_H^p>)2C7NS#vRp7{zGD{R-ofk5~O4^_qWiJmb@xXgzg~ z$y^vkH;kmO+S_ie{QNv;zkm&eniu-Ggf^?_TOD!XJzOhzU%wCAF@VG4Dy{QM9JmN3 z(}0Qg){CPb>|s$l4VWYXlY4-P*3kNFcxJ7xe@G1NRlZLF9v=Z4 z!J{{^nEt>-mT%C=ND?IatLQ1t*Oq`?aSR$1zsHMX~V==0!&OBl)jC*-R&gpp%*qIw6MDOec#N z>mrLz$eAI2E;^~9P4>>%P)LkKbkYP4j$w?%T~@B*`psK%0w4a^2yFSW8Q3%-NGtmo zzuK4t4D`O{C=>WkHiGoN0eWA@JGxE-He2{!=ZtHN%WN2lrbFQ}8h9lDuhY>_rt2jA zXpgT@`q^v4=>z&KnyKfS!TTp^Cx_VC8jF5H;UyY^VSObUnh&hT(f;OEee*QT9$q8p0d75?j;_kmN64WoFAj-vc48AFhc zPV&1@I#NzaB-#;A6U~Tz6t{5bOf+{UxHvLr^jXl6`jH0?YVI5!CtB(Ud>s05VB$`4 z`-cOwL!66M!+BI1&wa0#F5doXQ0Dj)Gbck}B2R_aCw4cV22ykhi>||k#5>; zp{RCSCYNw-TN<}i(zgVvQ9__YgI3nl-_ za(uwK_}H|bR=&6b>`==u0FEh-`2hZt{j8gieYn3^Yi~_!-(J=TZb-)`U4UP0iIG%a zjBlg*uDt`7Vh=7QUbDrFs+XUmI@Vj>U?yrETKO{KLC@oBlYd%u-^h9{)omP)8KCtu z2Y7Gk7cm3$Z65W>*Q)yT|NHm`s_}y=jq)%q;HlOB*rfA*83z0aPz_4eFpEhc;0ZfN5Da^SOsGjb}K^LILW z3*Ipk{Pzy=2J$>P1-DmBN_&E`G~(!_f!@?Ngfc0EGj~!a^Ne?RUpY0Q_2s7Kv7h2( ztFBDyGWkE3x_WUh@3(nrvwu4|ZJEbi@HTTe3O_(4F#d?XE9U3G_+8+mIT*)UJ`e)!95gs~pT#pa$ zZ0UIHSD|D4p`wvxhH25>{I5XA4!vz*oO{D)c;h*s;k{>|VgFguu(ihWE70&qLvsS6 z-p{_Ec%?T&4QcBdjEunS+692pJ$N(L+L zA5T2sp4vyP*uQ-D%`zA{?8xAC@FmHSzVM!8_|pOWzm6P{{MB9%>%lQc1}nC;#7M4R zhHTrw8D*KSEdw3;ZDJk^pb_zCJ?p)&uv~ob1?2Ev=2P{nZpmQ1>(Hxyk7ZwPeW&-e z$Hp>S-cF-EYYz+frp6%NDZYuUEzRXz5GURdl(})ubxx#C`EeaycZfP2UZ?qT$JxI|q4N%DF#R1XtfxaJWJoVcMQTtuyo|D#=q5;ucA%fJukFb4sEI*!i~jYbm{0~4qiwP z6J9vFSWu>N{uli_A2M~2aQ8gYWqcdyGSQ~XV(79`GS$|NMO)I1MO)!CDc#ti$qbt& z^FqGYx1-JjU0QK4TL)>4F6-M!m)a}ktI*}MIXMB*=J}z^^QQ;5l`f_CiY}kIA?F;+ zbEDut2bzJm!I8T|=<}z4hxBd;w$y*WX11$E$L+RjFi$m-|Eb+} z{p$XwcH8yK`0j#!r!(lt3)*ehcH8w_`ie!XZPk(6ZP!!%!M0E7E74`U?MfWyG(M$v z+x7H5rFPr3^>~e9g_4=|_;2OE46Xx+E*njJH347gSmHWSiq#QomH$$HFl((EdC1Qg z(N)j#{Q}<88m}8!<8>cvzOJzL^fVfhpMQ4o0`i7+KV?nn;s$rZO2buggJPmv*n{eY z%DIbESTpg$vbl@*lgC{VRs4$lo~$L@pnVWu!QZqEKR~tZ!(D}M>M7b(AN|zptdUEY z6>Y8Y8AP3l}p|1%MF`{caLHcQ}6^!=JqYbTfYHc18 zE9af{JhbD3?brV&#Rq3?hj@hY)7!!$_7XdCc*Gp&^%-dES!iNE>qy5zBlohuY7%r( z$QkbYmOZohS@2f(_g5}htlW)5tWBwo_MZ4X*9xwB?=@(6KlJk&ykb4q`&j2$kB@mH z>x(jp^H;JL;qN_0qrvYK_*J3Dcx|5jT+fuZ&)U$}mU6GRX}r0S^@Dm>yiIlJ9fQ4h zR7XVu`+^bMQmkq_YbTX6au5FNKfsIf>@`z;;g|Cno7UVsP5+%Wp$Hdyn6sIy|K!;yc%i-> z`EP56UM4togqJyR zS``8Z&2JYQK6_e$pVlWiuo`T`O5Z7-CRk-Ke-#eAfmtMYT@JjgH69`GnrFc)iZ#5^ z&1(-GIkF@KURviNT!^Q=^)`O#xtS2eGp+d$?Di1j6x_xFw{HNqOyH*X5|KgK$eZ~* ziwAD1-`QU$b-MluMQ3O7idL-1ctoN`q_><>5Gd z!pa+lkM{|Yf79SkTK6sfl550Uyrl?!BY8KQZxi5W%8AyVJbU0@dy$hb&W|k1p4@b= z@1^-m#>|T~8mg&d59hq><(omsFav%yuVL24L?f}vXC(OFy}(=W1oQqfd~ofxcOtu# zBaZw6AJ%p>3g+3oZyELIyaLH5@$wVs9%cAmJB*~V=`B@E;QJcba42?JID~%Ui zI_vS8f3xxaZE3S5-~LjxnRTZ{mll136Ms9}Y|B{AK$~qC%U_K)p(W9!MLXz$XGNQ$ zLz^~3^^6v@$vdLW&^G?2wAt!-+d!M=VZ2Rl*M|DOWb({q_GzUNZkXXyAI& zBQhtYDHi-o_;)wX_&nl@Dy?;K7RP5p8k^ik<5tdQFyZqqO2$P2pLx*=-*(Q>ycoZe zY`hMX7>dsNnBnSboug$U2QNSd|B5>h@%W`1XuX)$_rwu1(i*+XkLNz8b$W?A9@!|HI+=K-Y*yq=gZ#mRQiA*T z>}38Vb7XIC=WJA?1N;0X<&;NbZ?o@z`Aa=>%8jF2r)N?ok$YUkb(!_sEcbG~-AMN5 zab3!@3a-`GZ?inbWtJ*#w4CRP9bJy>$u|=H9jUj{@LZEfUv6UUUa;M$m6@hzfYG7y zm0n4euY|UzHdOzlD6`yJV@OVZHT%aH#H}(Z&bvl*@hjSY&M=Ju8e1GXSh3r44SCZ8 zyyPV7J;N-z!C-#1E~?M*+>No!jpT1UbF+te@J4xQ?hVI(7z-TZfM+~#O#r@$;B``9 zrq|ez%|403?P(XW#se7c<}90PbiI``1Hb;iGxL7!Lq8mD#8&NQ?Z?lUH^un2#u$w$ zz*_I<`A*J%TIn(Uw>|Ft_3qz91@wL$@0WrLbunf@`SW$ZcL(lDG5x*KU5XfU33ySB z4p_svJI5v(tE=a@0y_p9jq>*{)%?1(pQTaW?`k8k>IviMN|(_nKYVI(NA_znw05MS zoV8Kilau`4a-04p;UQ-*Z66#H_?&ZZbxu>~S4DQ7xukPl`H?exQUw2$*&Kbrq9M<*E* zj~?dtyk{mJ9rbGm@wdH!Uwp?KSmJWkSFpy+>FY$D7sfm94{-(l z*fZGYF;T&OuX@-BYyiF=8Ajl}drj-xt>B&WuHMr-+Hdazepk&5zEctM&W(J#{66*~ zv)&2L%kQi@UDW9gTpmLmj~Ibph1~lkzw0~ z*igfnRL3TISMTFIrtO1=2O2n&sd`Rs;Ie};1N!y%21aMIcNz0g$+?#6t~3IFLcguT zr~f2%SN~g1pl%iO<}n7Sop{*-`^u+*pb~!l2`oL1rape#rP-vvoPeKn|bFmqS za~T2QNfL0JR?PYt-ofv!hG-=jFMZDas?w|I2AkzPL^zC4lZSY4smM5R*@m#zugJ*`G@r~9b=fVRriq<4L_tu5#zns|Q_0f`N{6CU@_c3DqIi~k5^)DlXGA1K! z6#N#O%bJhSx>Sei^4oQ(p1xuASap1GvJR(TiZ7`@ZLTAvA5I-dC;k>(2=4QC;!!s` z3umUild>Ur4ynnxfrCkvTu!)@1*TUKdJAKqq9l<_sEvz?6u-Y?y>Mo z^JTp1U%blIHGYpfs}KIp_-Tpn_n5}nZg0BctBl`frez}w<*z_4-t;zSbG`yDteEq^ zj*euk*^D)tu?{jl^^$4KN9l=22SYc`-fQn#d#?q$Ub@KJ<>;?!(L?Ic?{+g+Tk%&^ zlCD=`MLtDVq{GX?^^f`?qnJ+KRO#{-m!Nu_#*t@n<9D}X3<*_G^qU) zdnI_wFYc04ereC#@+*f7F28Ebkn(R%7+OBye+?_YcFOSbOe3l^7y8Smlu(va)>3LH zDaV%j`cuYJW>6MU-lS}%9HFFsvD9}RCHHV|U&-N1d}|M1=1cj!zi<5KSNj%y{w?3; z&#&>N9=Xn!Va#sK$e?&BlV&&O9=@1&ujIXN^3Jt&i50_RRQ&+zwVzJ`DOv~`}Yy)XOB;xVk(6Ax1S_jL6*W7>)i zA%4EqoxZY`-&?^mi=Qx$x*i_)J9IQNaAL4gddM9E9!7zW?v=MRHT@g{NBD8h+ksDpkLPsl7!m$L;UIpx3m#Dm z-Pge*M!{o7!DB|jV^+>dJKD$Mr*RfPErteS&7;2*Z(j_Y<80n~GrYAB-dfGMlw091 z)xdol`&LUfRa3T6wo`Ucc2agxYAE7Cds)9Id0wt7uwI5<>hPBg_>1^)c)1e$qkmMz z4Oe+8Zn!$3;)ZWd$c5JoE)VA=*G?Hye*HZ|%fop}%4Yb-=Bs@J9~f5togWV`cX-Tg z@R>&`;#Z3{!&^3A`lc^`(*U2t ze}4AK4Zc5rGRRl5>000NP1pO@ZW`#T+4LQsMJwpT#->cZfv0VPr)|27dV2BuQmz;K zOs_GW=SjwYUHO=Jiq@W6^e3Jp{*x8dU-H1ObeoSjI*_G**!;(N*P-3E{;xi^&_BZJ zvv`AW+qmOD4D+%yf~;K*AwSvcB`81pnt|PtpM68+XKaZ4^jNaa?X z+PIh#f{PXKbq5dI@$c^o|0HvSM_Uc!Ey)nc8o}H2qWg-+h?k4U*gU;v`PO>`ag(w)c-vC|H2CInBFNx- zN+$1{^wXsOiRcu0^#6J2xtGUCe|`wsCeQPDj?C_(v6L_t=|3hmmFgEi$t!v{vH6># zClb&0f3+cgV%qjh`1j#`XmXny9DQH%e-(P^Zs@uOx(=09eJ$A(UtbSg)*8ke@i!Z< zibkhN9&vVP9`Ykp9&JbG*ntkLxh-H$C2w+=Q_Zc(RdCe!9aw5^9hwilW<*sEoJ6j* z124-?gGWmrk*1U$lf42WycobqMhJWw0FQ3BhLL*QQkH$7{ z=Fu;9Ha2*n7uf_SZ5z+Zso#Vzb6<7^u&`|Jtse2#Xn1QByw$z3-m)ij-f;-DMa=OK35L$md~8x#m>tqf8-wgyX&v@Wv;)@Cq3-P z59F5TuD{-wzy8}k>12=ncyM{X;VOkLN=w!c^etb1gD<0^xN#CCV=Gt6+V$V@)vO=n zJ74u$bZ`cp%h;5G&XqA;`j%|3E1~6?Ha&}$4OeQq>dApuUJ`+xC1WCe^HuIy{kG{l z5qgPKm(wTc>8-7Oy>E4;Z_>q}&mf&&VFrXByRT?n7dv$Ixzoe^oi1I}G?=~gl8x0B zDW~#`Jy#1?PvskJ!Bxvv00x;hu2#d}!*SIBSI-w7ov(WT8St}6_?hf|D_mCtSAz1z z=0^pAB>-`uggal3jgk0 z-B^xj&%q5a(2K5rJ>&UW4$Ssm%xLVmkDZont*>i;rZ1WTxXIS;l*zvRd$~rkqwr+kVw-PDrL zM#aFZjLL!EG}a6pV7xQ%TI1b;*BkE*9B8Z^_#NZ-10{<)K9c84c_c6CX0GG8{wvo- zTtDHunQQk)^L(j~<|X}r>up^B57(b@-NNN`;UBU z*FT6Xf5>NiIMbK;;lsY%4ne2c#rsubO`tt^5VB0*WiP6-DK$w#qgF${u;@3*$vLS zXZyd!H$v=y@bC3pHeS$QbLW2;6?tifcXFETTX{9qw^9tBYjSrarf|}?B7I`F>!jVV z`<If(=YkpdEf2)w&>RJ14)cxaH*s)=>l;Hp&G^QY4>FT3E?SZ}WxASPqy#na_@QjUcP-VpL;LZ_omqQO1Sr;eb4f5+5WF}=mgFEDJn_Sv+({*;=aNC z%8-7B>M7P7IrcjEw?+8CRbvVD-AP{#*A4XlBx_H}iW~C1!M(Nfn6qV+rN)whc@*_Y zF*5nG+`p^w?@V>`%n z*FObqP35=NkIf~P9|zn<0Jo8|TxETMoylB|0e%JieunFQe3z50`OIWKGp9e#d}{q! zxz-*4(@e%T5ttSqX_}y%r|^Dc(uVp`&3Tor^3u92g!|aBv*CzqfV>d_FNIaP&@3;28On$8Rr5|x!#~!f`9oJ*aXl%vM z#cs!x$h=DU)MkQeBD*S=j*QaMXmp)&zUF1|Ap!c^ib$mc*PWce?Z5HuNh8_AZZ^zO86>Vh;4&8yt`f)fi8X zD+vF+$bw*9YpuH4@>}iYL8mSI68!#jeKBP-oh8g2`KJ8ny563G)^emz%yE}~2Hk#6 zyyiGDoo$`nf$iTm8a?FsYc0U9*_b-9JKI*p!V+ zVvGjm_Ziez{mJ0KiZj%=&E1o_E@)BLXCJx)OUP#)-NTsB!{}1A``g~eTgi1DVfHBV zm|d#sF7!5L#d`d;m=w6kJ+_+uqMV!KpjcQi^zWX6`&lDnM= z4zw80|MoW4k$-)Od7;0K{NKyCW)mMHU#78+d8mugnujjL!n#;-G{ahxkV>qv%gRf6 zPvbszg}3oGe4VxA|JJ#4N%@Vn`cz9^vGtpF50l?L(QVZ4?rcn$n`<<{#~VGaqVe-y zpS!rL(W7b$_crnS2S*;;IFY&g&7AH>;awA2^fk}h_!)CBvhaek-HiR_*w_|*{^Ag8 zA7&VVt?nLS{q0PDFIc(LeWBIoJy(#AMZa6r%{&C=eE;a}#x+I?2Io=!*{}RF?Wb;|wI=v6uete`jc*JT}?Ck1_0tK-mpa{658imTrak9q|iVn?#KaS8Qg!0TGT9-6KOmRfhS9sVGgNb&-_7Ppsm*-kD0Li$ zKahvia0Gs|m9bYNS69THzAohthw`J(Ztf5$gf2&SGbZ$8-v6-6<~Q)WKOCI3F%=xG z1CCkn8u8Rd@I5QOt3QqVS(Mszqp=qH`(m0a5DQNHO#Fg5zYae9JNNgG{+Vm-AmHP^ zu)Yueve+D>p>B}T7z>X`dhMyjQ{6qQ(z(}#-|5JmzStt7v;8j*4m@oZjsN2%ab@pA zXQOFr9yE1;zU+0UK~vpV?t{+MrU!ab{&BgP>R-?G0Q9hx^&IKY5=mQ}eUi$Wqj-O@ z%Tv`GdYXp5ALZ%lk52VwKN;m+?TYK-kM3b)59?`UJ8PnpTaeG%nJMJMZb#?d37vJo z-}(F0oa|q)_V-6VZ+17A%U{#O%>IP`Kl2&<@AlW+Vq`0?_N5O7W#@w9%Z)hJ$;A1E zp9z+I;IeT4MZSsVdpF;_%6n`0zl&*Hqjfj#xCFnxO|j`ww6cjcOwrIt2_@QPtR4bA z-JU{!sno> z@WtYZ$V1W32;g-A^SX{TBXbM8metuZG`pA_Z}|AruAcrMz#GNKMH~$Rekm~*b84%=`!x+m@#x#Vn4QBkgjGc9e?*#K~4F1m6hAT3P8YfXY8keR? zcdfT_bj+2bu#d108xoUD_9}>8Ip6Hy|4+(?l%13V%pE$+%Ip(Eo*Tq_1)GBYB7HyN z_>kw)+2>%@zv8EGRSYy;l>?`m6$4Wi%pKQd!HRLIKb96j(j&uO`}UB*S?k!p-EYVzUZ$iJG6J+0rGnqD4lXM(ypc)=Jy3$2T*RM9OeJ+T(776fO4Gw zFXH+g%0$YD1aeNvC5R-0oOp#5FR|kZPW(Wzg3y?R;t%oZBL6Sv|CjjxW&SrhJv}ayQlz~Yx#m-5Cz|CIyjRJ4CH%jf^5O+X`5L}?hi}&M ze+}iOi_G#JytkA0jLy_Usi2-+yjR0}x%{6`Swnp#T$fXJP|sSfHI!X^KaYHza>{zj zev0yB&C8o6C@wW9k^HS|*jv+_buf{fm|$Eg$BIi;k^emPlFG~ffA-!5JgO?o``%TR zkjlLfga8o|#Wp3N;S~iXc8ZHFcd(ltZQE}Ev8Mw8#ZUShl}mzPPoRPlrDq(&GeCpXs9P_T-KKSk% zw-1iv*?11eH?!8a8vSGS)F*t8Obv6a^F2EC*BtHhmrYY~&CdL@r;qn=WcYXwN2ZVW zaD49fX#Pvze$nq42;C1rH=0Zx+Sl!iZac*iIAhEiTh5qr#?m;$Iw^d#pC}7-uW`{? z`-;ws;{%nkv|EZV@#m?5%7>uKr#OC|YAP$a$3EsecXHJl0fB8xqil? zK~s5(dltPV(l!G{Z%hBEJz!$#t&uo~*m6evpb;7qeK~zDI?P2^9R@v(fR^&w(UE8< zmGVI{aD9qvtKZ@Se_YL4 z%zRhTADh@KL}lp-kA8w9lOxiPV;&zMA2%^9(R%(gaF#Nq)7I&8yb`$0mMd)m+2p)2q4O$Mqro z&-EdF&xPxJ@fKo^oc6z<-5EcI7z*D-rmP?oNRd4BbuUgI#_@HzZ?!8i!fMwcv}-wFOJ<-a}rcY^=+asL#@3ZL6rUoq*A zAG7xjvG~w<9|kEa7^wKX@BW9f8^)^{X6{keXe`A8xKvan{@b& zg9Q)JKlG=0FfhqCnspO4x^48>sPQa5-9B!@iJ9oi=dZJUtZ~v587GyJD_;;jDPQe{ zDqf{nrec3!lA{ASx@)oDOxnbKM{jj>)lJ-&t||R8X%Y z%>`olS0#My)5eE09-R20&g%Ww{+vZ0#DI&BR!Tl~LLcqh$?AXfn`r-s+WNmZWF{%L zZ4>vLzTd<>>4lDNS2-Y}+YLdt`%`^AnYo42*BxQBycK~I@kh#;4j)91*qqK>F8!PS zztw-lnXCWi$M#<*?O{D{_ge|FU$W3Ic^EP&8pm6>CmC4Dv4wlm10B6idtOMdTTP#I zrdLGU`$77HIA8UL`r_+y-)f&2y`r;n>$#|2!I-ghiU;gAlwu;r8V%tsMWpH>w0qa%^(b4`~{irKlM4xo{x}!IK+3jM{7wdv7Jzo0B zU)Z14c(nOiXS#@^a|}iwvUnOct@+=I=p0&S>Cjv(t$nKe>7=|)yLMKeZ_58x|HaCm zPiYV9d8eP+=?oX>2h5!;+m9`4qzlIO%WCGZ;sMe2Z71)(F88hWh?O@^`?af&Mc>Xm zww>ILmC;S?`z;y07u!mQ^2W(OMm|T|>ML;1YL8fX(?NOlj3aMKVq~u)Yd^K_j_e9} z2Sn@s6}Z=F-DApjLEUK^^o|)N?AIVWhGD(SpSw<3`wZ**WM}Vjv;L*yZbE-9WxaT5v3( z^0oR;d9P;;j~`v6J|WOLGl_LrtoxBI_YmWwp7llRJ-r&MS!+&Pm6_tmH!M}2E&P99 zQbzEf&z^to5bM4Uk|$|;tL*I2$=)S#QM8#2oW&{STAw(FZ+SPczWgeeCm3Q)RRe1+AC1@l>E#Wq z^?bBj@pl)*d4h@bQv+);t^2H*oWmX+4XklXPhjn$(y?aJ`j2&zA=WWAus(D(|0SMW z{8|I+(&sErEpO<-dPvr8H9#wKXj9Qa{ST8ue3Kxget-YgK)r1ROy@L1Yye4w~Bc1;?az0zQkegqfviQql)5xhGwg7aMwoOe?M=MEu{Z*9*hDKP%Wd%FC~?{fRg z@AvrsVOpGjOv}SD!GFI!=Tinq5)q&PlA?r^+6tY(EBO&YG z&k9+i__2`niH*YEf!240gILooWc_?I*znY-(A(hQr^bhlfK7Z~Olx9Kf~RTwL!NP= z_a+8fN3#db2EMJ;gFc&PdIi(?Z!hil6vzEs_wn=!-oWugj>R1R!coISsW`OA7H`1LKBA2YsYnT+8Ly!$(pUp!-c z&&YQLpVeMFd}ogJ=Vjvt2mF($%j)X`{!+^O=%@&m-4el{{XY?0esKhUdPxKy9~$tN z3^V>m$GZGKOL6;uI?3Y?j({Gf#ryxMXE$rD>_6m2%EBH!2WE&5;ZH1Kop3W{xiZOH z?qz%q0!zS2zV5*_tPj4L>&YqIgHycSgSkn*@&IdSN%RHb(bl?P<-3I=$YH2M4&@ASsO;){^>yiBmw!dTlr7`?iVdt= z-k5BL7g5%U8ASdR8`_LM4H&v(~OUa`J!5#M?`4eodO zlGdA)#uJ>+;GX_l6K95(xYClwfv207JXY%S1-qc zZlwQ5(O09|Z7zCD|2HPk#y1VB@zU;tsQ-Gt_j4!dORPPjyp3+owa=tv0#62XPo3SPD&}4WU%whOxB9ep-hP`6OPEt?;-WWG@s zS-ZTNK6n<{RAQ2YgIGU5!IfI$r>!4>Hczp~LsooI<)x`Rwp7RGSMErCXv-<;pnZzg zyHXpMvDc5jZz;JNB(@|6nVN_^rEFz$k)iqysn*P@eCP{SEW!zRjM^xfd91!gF26WC z*;GElyLVm89#Z(ai}`luLn*{y^mJGLG}D6{$!&cD+j1_%{bLP2(>zwq_ zG~9pN=4bkr*u}hi9Cgt<8)sc(Kx;u~`hp{vB^F`9Mz&e%M``_!&_ zwm5Ft7V&`w^qh;xI}sXD-a+zCA@8}WGj(UL9LTu(M%SFSfQVo^gT4g7C0HZ*uuObgTKa#T35zO5c~&f6o;v>Be5_m$^f?qha5jHRM-;wI_fB#2r|KE=C`2S{3oPVk- z-hb=-ZvOkQh3lK6?CE%5)Flc27g>X^IF=u|dIVQsbNiONNAS0tkJfvaj|#C5!}G5} zTQ~FWcQ}5-ai;bBbMK-%Oc)#3c8X&^$3~7-dGPzV2tVfAKnJu}hv;Z0=|z_(;q!M} zeA-)a1#QuYPM(;WY}I=ZWB!OIts;qbRlmK>UN42>%gB+m8%Ozl^nLfFfZsKGl)rQ= zyb8JUNSxd6g%%$TMC6szw;C%+iSa>wJ2jd8r8dF?wO>n8VscQ|$dc*Md0EO$fo+pH zN{21Wap-;?bb3`hv_xNsrzKOKBAcfbwC8Dj>&e@+^fCQ^Ts&?3l@?FC(&A}Jjc)qV z;b}j&d75PD9Nry>Cmt()SzbpxP0u7!&N|91zA}+ACf{VX#XcM1bK{`Ro1xL~K&v-F zvt!|Nd=F$yWNsinSJAW>K9|HdQyJUhbI6EC=>G_xTMC~msvJoC&9^QY1+UAmtd9RS z{K!@5ce#nnSz+b+vGg(T5>vhsS($U1Z!>c)xv93B z2u-UmW53J18lIHK7`cae#3}lA0b^A9zGBPhyJmgUL1QckpL`oW{0?%b8M?{RcX{VF z=3P4XblzoiPvs4ThBw6JSN=SOI5p__X_FSLg^u@7&dc5D(6BFf1^b^IvcI`}GkpFX zj^A)7KNsI!zUHTamNP2?E&s*wzd5eC#e}Z?0pARUcaQcaGZtKBqdrQwPI~1rbf=d% zq+fQw#Q3{i?6T-i-%aJ4bk?_*_3b%*Upa^_lH%|I`tCUI3&8V6&1uS;0Ikk^II=(B zKKNoSbsh}8O-%FzB`+p=J=np~h2a$|Xe)G@>n0)l=5q|bcG>1-@J~l)OB%8)Cjie= zdrgF%2l4JC-YdH1o6LE|e7ki;3_llL!*j2jKwYnN8T2}H-FWKb$jmcSM?puCZ~7U2 zU-bSKLpDV0`tA3(&^HF#+xb`jC=%E86ZYouxi1d>gt=p(>*C;a)1#*3V`7Au(u$8>M(b@NYo=DlteILl z^>+S~-fFFx;#~Tx6Sa4s9T#J-iE`%5nmWb7E6ulsr_ zweJwxx3l`FZDp%e+j?ow_Uoidxwog?`Y3z=(KS+oXP$3cBeg2BMr!h;wlz{6*#VnA z<2tFRozU{BI{YruM^+3;jNan#t7t5B*ZQdo_S5ZJ7uIq7v$I~vX&;L(5ihK9^WV9C z%Fz)Vy;p0e9DUc(b2o8MG0f6WrIXHkr%fkKWse=L%~$-VbeZ;Rr=mJ)2V-Xl>vWy* z(jGe-KW%ztWPj$a=PslthmI`zVSSZFKay+2t9G=WvweFi-4^^@;=}T#IP_5*ima`2 zXv0}orEgp~I$~v3L`UpwZIyHl`B^0M+RM7V#L&djK(xH=;*tN_)>o$=2D#E86RAmVOqq z_NwW|wzXFs>0?9qo{2LbcGj(H?L}1oqAf4*(=ta%MefA%4d=X|Eq?#cTEE_v?m~Iu z_>RBqcCq*w>$9x!i5=iCeZMt+ZNBKpvRGXwR<9X+qD`+^ofFY(+ON-w>N6eCqQ?KH z(Vru4JFCw(<$tUHh#im0uTI9JQ(x=(cKsxsLpIo09b<35h>qdxG2*Px()_tI-J%J3 z<@Ae#(e`gAProkrt@dL5R#bjD?c1(C7X8P}s~x>Sakv-g0emxEdcgcx8RoQUM>>FH znDfrB!1}F@%3`$9r$>&_-rC6_yeqVI_*W`_#J^pL59sjkE`K%M~ML=qoFBbopd}&?rUXNNn z{jXabo#MSF+woqGTqF_%Ey9E~4LTKdbDbv9`X|c1+m( zPGZ6q602m{MY>|ZKCw5r<|!$EVGNknFG=)^ZBLDkCp$K5Z#y6CLgJgE{m>N)7Tq(u zqgXJjZCMlkGut*BZQGa_v6LBYF=DB~NQ~J0Pl*v*{TXA#qP9eh8TKoU@hAUT{zoT% z!-)gx%IB|p|7_Xnzbf%x))lU=|1Ey<1;vM1{S&K$IQcju0Cc^$m;@v)U+D<~wcV*mPJ!DV^8Sonq}d zvF#EQe-)*#ufV-e$b3g;#*{5q=6k=uc(KnLA4Yt6joMt_{g`lW^yAtWv}t|EF=Fla z2OILESX(yp65?vQ;=>+pb=K~8#fN=4F$&su=8KG7>WUBRim&;y;%mNe+GTFp6(80` zzh4&pe%?5muK2L7_^__{u&n++#BS_->;2oD&)~4upL|OE&|l;JU(xw?yZEs7dxB-h z#D?u{wiqd1d_%CO_ zhbDVZu#RHCf?r_lm;B$3-QGdbS6BSkXOI7K_5+)2$9_5X>Gu19#o9x=_W#m1gjc4s z$6PwL4{N`J&lUS6TPi*;tsQmZle8yTW&60V7=O6-+Ay}gt!odkPwoL0bno~V)h10UG>ShulDx8y`MGaE5^>fmzXbyHwmM@+phh;y7vDfPRDK2fyYKi zKXUx1?c@F|+xu5+|F5X8!_iM||GoAED~*Z$n*3?8Uoqn*wUa%-IvYPuyE*OE_1uN~ zwK(l!(a)C^|0VyIqa)fh5!DetWlyl!SabOh9UdC(*LMEbuK2GnDE>=*X0H!$;=iIg zq8Re83%)H`WK-Y226S z&C$`ExG(7Gg1E1k{lVJnWKGayM|*?C#?e)##qbZuSN&(|Us3;iSGuV~|C-{v6qo4e zHFgY#wbyCPo?t_AI*ILyiR)Nx%dpPq$kAWE>Hk}N%QvWzQhTT7ojxoQp*shp< z*&7qvWzEkV{?Japi1y1@{Jzz`UqpPD+SSnmV%wFy<16jhF46R-#xQ+TW4qKoj?Kny zA4{fp#dXa;F?{s=llf!jpT2qQ{69{;DRhSKguXxH=1|Lz$Avz;XME@^-$^Hqt4!a$ zZNCTDe@A@R-p?4{HQ$c!ita0Fufx~;*4Ym%x$FCnUGZLD^Y3bQegCoRTc-Lx-8bPI zvC?X73PS-c4yS|70mwdbX^TsuH?GN_fw?A0>IIlx7-+r9mF3u~u z5B&vmmq^@KGIsm_i9Nw8?QcKIzT()qy5hgSzVTm@Hs zz7pSf++1e<#E^`GfSb>dgP>HD8q{+q8@e3$&4gO9cCWwAFm;_v)RzU?@bn{;?5#wW(7tA6>q-2a4cyp#Iqxxq2t#wzLH+m6!RV*TIIw!;_P z;Wwb1cYOt7x1#+OQx>ajKIcAO`ZiW<+*VUw+kRc`^{x4x#BD|OE8C}j0Sywz6^+CA zGxdkAbQj7CM?d?r+r^@@FX{V@(pyf%#A(gX={Qa+YBTGA<`koOfqZh}gWA!bqn~uv zp5K)Jt^WIh;ujDBxg&p7){#(uj|GP}dJ=uBoic{_5l>${D= zKflv+py4v_{ImBYhCcjpQmAD{a_IeAQ$lB^riRWfN(-G^?kRiM)2-r+C%)q76{hv6 z<$YQjJ?@I1eb;P#-W^xwJv;WbW0#rG8)ufjmY-sp>w=!pp>)&QbfuYo<=N4%)f*G6 zPd2TU?m(;8_wD{^#`N5rT^vqal3afLia_gx#Khon@=aXQqkL@==RLSbf7cBNv^wwD z3!e{NQXI~SHwU~fvuXggnVdeOA3WZ}v|fqc@j-H+bwI#0Pds;idc7xcqIXH)aXshV z?ypJWJNJ)*nM*cLdE3*k;vG+q3ggQEeF^O)o|d{S&>FfV5FSL@Q|{!N0_wCorDb@EDgIs4 zAlixijxR|qKS|rXLYYquH?1eB!<6K-U^@4wCZ-2fr~UkQ+>=~${2J5RoRC^$mINL< z4&6`aZVrs%|Irh3f(7p_eQmBOer$}(TXWRot$EvHD!kO&=Z*`WO6(ro9A^%c#+$Mi zjcI<{bY9C$I_+?$N9+>P>7oL=(S#U`bug#5Vf3= z;O0LrdF*5A8Be}1#hJ1{crt>uCbhBd?D^^Y7WXLsEoI0q{Bf1bo8AA=O{OZl@QSJz zX~$>T!}!HZP4k_`Oi$xo{Yd{~PeSmQ?zG_D)b+eOp>fAUQ$AcyTkHQz-9}jmiI7~`}mgdu`JWPk$0S=UN5@6 zjk9>q8|TmWR$eNTd+v{`c2cfOy_fdCbHt_ncTlcPXsM3&jnGq{C_U-ju{3p(cN8Q*OKB~`lc=L; z=~-y$W%}*}w6v0Up6q8@_blmGJ{kH>;r`&nlwcy)mHICsxh5SN+U-scdR_f$>Y=-O z=*bH`-CN`>x5k1iJs6@6If;xR=&s;{rLWC_zWmTv!_}tsFf>-*n=zAkaquYPN9|PU zatBKj(}F9ZHO6(>dT8yaC$S>+ecH*UwRp-~46QW`542w4N~&pK+|79=seCK%N`xm| z=jtA8*m+aPe^XL_%Ni75<$?hs&VDQH9^CS`Qrtp^@6lB6R4Z9GT?t3%4)$(8(qHOWxh6AoW=8(KT#B=#mm;@SNa~pk8IQ8Y3f}IjffV1 z4_zrQ7v=i-oLN=N3ui@X@u9-(DjzZJ>y6K%#TuUdz>`)Xp7(+Kf0opnvDM4s*EO_- zc=gLm`<5?;{>5h|B&G(L1C`}7h7LjV^BF^K%62tlr-`w%$(2~s#Mqhq z?}^sfNli)(KJ7{iHZgW8uTL!R=1vX%1J4XWR?nO>zv_Q;{V{3JdM>W0v&YY9#*eYb z&kWPAF^m44%=mc_V_sueT$E$QWlM$Bjme0$rD^{QiH20 zL#^>N{=9gyHGcGr#?KM>!va@o&~s7S`0??4{F80t$MaZzWhK0Pwmp8DJpC)qoBq-9 zqrA3Kt~=)}VEinI89%vIl~y~YM#s+gU244eIDGjP%D;*-q;mf%Xh!v!%6|>WGx2F3eEwzV(hqMR%74YSt{`5X4IPS) zAMyAq5~%Mf`1iiVK0)aTMbO<5>ez&y5XXDxLOb2u5&fWqdM7}qIb&1Gd!Zi`Kx1`t z3aW1Pc!Rrn?qkN9bc7YQj<8;|g}$~L+IpHkJT&)57Hw^Zwhqm`ylRfEBh<#wmUM)h zJn_N3(8=Uv=*k`6c(@&1y^W6W9(46o3|;+~hjGxiO-Fd@B4g3j(^0zeR@9m{9YJ)Z zG98}tBXooxMd@ls;pG-x6`|iaI)ZetV(9Apm^k$4B=qQH_-qRNHWj{`2LDY*kM@Pm zdE&~B3~-0epi94tE`6B3Lw>LEy5h>}E<^u4JNEHI&zR7E*?M%7-%R(TKZ_qHQ||)l znec!_=xl{Ikv{EN{w_T8KICJA_&<88^yCI~@1xK_1G>tQ8KXn*!mH|`&jHZ;mFO9I zr}WTd^w7D`RpglfX#%ZJ@ScXJ#)if%@s@w^=yyW1z24w`NlC#E+)3g)&3n6(KeFHm zI?Q=!`ya{c_sjtb;jz1+(?s~Jd3em6!=B!jzH$U!qn1So2yqnIVTZ*%e^bMuX^%Dm$`R7HunCMkA4f9<|U6kNFH9~ zPiXmVEhEUcAwSTXBQ(i1UdCLnY;&OAGpM4TF;Vxxys9}#Db`r(N80~u`UTI@HmBLU zSh~v}5=@zF&FefD2Wvg4jm6A6b{6$4zZ06so^xk(d}PnLv`Xb%h3;|~dK!>znr9P7 zGuq<`-kIbM9xAT@LgZUVNdsp571x6tSO4>(LajOTV|uT zj4p=na(sY}zTJ9HpNavlevNgMX$<8#JZCmMcXo7aJXCmDm1M^Z+D-EZ>CfUBo504$ zZweiU=Z-^P(cDb@(u?fYJZ>EN%Akhx(|>>-qCTp>`le70va*f-`UHo2dIv>EIp`hv z=pC)ds@3=}Mc4VZ-XVJY;L&a&$@>E6a2B*vhm3S+(V;=j#gw=5iyaG+@zR?cq02(j z<`nfVp9lTd)3>X^5}V#~kk_I&!*z-1ZFxfIQRwYn=nYx?*cfPSpiOUeHocWX58`{0 z-Lpa3>A)%Ut)cAyGsD%Vac0Z;>G>W{W6sXyPhKT@^Q1LizVneMi)?z+T+9m}_CYhl zMQ@7}%YO;Iy+OVwp|{=OFHIu!mS)qNzhX1=R{X%+ss|ar(z``(>!7!fp|_LtzvxY4 zU?1NM{DbHXU0L+DeR0q7Ec$m;;Y^F(Hd?xJR+Y*rdaGlvZHM0K+^*ovBoo|dT)|mM zF6hlbZ=*icGY-8SlHQ7}KZZQ<#L!!NJ=s8O*&N5<$)dM9PiBgysLn3wiZE;??R_TWRu|CMec}x z?C|E+cSna>dcfPDVe#e(F}&FeZ%%?Yw)}W>=q&k)Rv*^b;GH?}=8R--a2>jA7#g_V z1OJE4#Gmyol!h^;Rb#~A&B0jSjIMKt{y)sP(0uZUC!-?5pEINUISfw~e->{}6mQON z4CGxoE!GlT{&FP`{s` z>|Wm6jXCK|XyXdzh5zVr1zXTHZbyIp82YJ44vEe+=DaqY7nvTGPCE-aA4*=^=iVEm z(+;gt8ARvJ*tn$Eim!HuzRQxk2eVCLa9J{RZn~q-j`~o~w9&b@qM3F*%6#Io$J+R6 zdpaM=d{A`0!lv_P^rG`7!{V#i=wx?xK<7i53+B7JM|BSM*>T2wr~DKf#H|iLg=XrZ z(+JJDVp@cSL4{bh}&3kT#2WD{O+w?IK`gnjcZZCMiqK|CNc?zzt zQW-=at;o+8ZTjfr?ic*$u*s zvhP~9U9M%fb#&HB?8Nox1_N$1&4-b_(qHFa%$%LHUT8vgxB6sg_!{hQJwOkUjv>ei)s%{^mfd^j3`t=~a&2 z>PIga>c-aPN^YEK>8;7=t-&YPphpaFCpRYS2tJ9u;NI25rCv-qrMG$+H{#V#lFzus zDdnf(CH>vr;AwPiuMnHBH_;dDY3Z(mE9xGYUnTujGVOHl^?hHUFEzg&a0zyZe_G%7 zI^#={ZsNlJwGsJomNq`rPdX6zo4?GswgE{M@-3oflv9W=IN zTl+_k88(XXcnG~>z1!9JGWLZ-CUUL+Ydx+;XKcOW@gUO@D>lLlVs+Zz#OSm;ZJqWd z(i?<(s;P0%lb(snLT6W<}jrq0oq-ZboRFVBgy^!!whc=>gKSp!= z2%VR}zb~Zop7>1AU*X^D>60_kGmgT)$$!1gzeV@Y!oSDeOSvpv82&7o-;U13vt#QZ zU(9%(&$s7e;onhi_m^U$`aLw9kL)eBWxr^x8Cpw^>Hx0tEOdawp6rUi1GB1f zup5YPYs?mVO>h%^+0DcSZ$?I+&%}QOU0!UuCw&ONFZc9CALxx9+AFFL7-;ej&}5P6 z*|>pm+{}E*P5rXxMrd*t{62f`rBy1EX!0=f;%Ct0Q07+6N!=|SKs0$T@6)yZe-XXF zp~=J0XfwKu?BKEbz)dkUIR=`PJ}?@Zbo7D4=nv;jUyCO1UFt4BfZj3sfjc8KiQm$q zNw@j6bSI7be3v_F<8pLfN6$T;Al}lEUHBGc!8ByTRAj>xWW;22-AU-W6Xzp4qIO}6 zzr#Zv{%+~K%+18t#B)RNb=id*;L8>dhOgi0aR+1V!>eTit^DLu zNEBa0-<$()5TCr3>qPO%|I;mG$(dVhd4LS~kawOyE=ZS@EGR%PJ^^i)lF#kfW$PIq zk^@cHEX1=l9u|JroL-B);smlndU1(kv#|8yh>xXhd_-(;k^_gKe`kKEG=_1Wj0_mJ zIJsQ!lgt^1u7hvsu_G^z4&4HcU&m3$7#0tI5B{yWU=w5G6mO2$hi{I-&DH;{ZPmnK>~ zJRZBNcz7z9%Y3jB9{vk>c$3Eu51(6g1-g*NRTJat`|xnhmo!#RGgkV$Q(VydE#*2sXB7cfuysWeJqYaGv z^QN~oUKU_emA~fj12e7h;<3kzoADyK>0`VM!0zbqaMfQtya2k8O-wqj`c`x$`to+b zx4rP~8f*`};M)%EiuQA8AIVht#AKK1&9%eVf5MoNZ?66dZzxAJ42?TF%x-vl8{PH_ z?uMt=(ns%0C%1WeZ46Hr?Mjb{EptRK?-liBw5MV5c+s$YpS$5HC6re-nDNXZPYfCt zI)nTl0=;T1JxhB|K=+n>L(e#{7JBeQo3-?->;viKAsT9ghSouYTj{Un(P`xiiqhcC zSHYX_uBwK%(hD`6Z6? zu3i>jo|v2-^dTQ-VdIw%*#(`=nv+|l@`x{g4Bae;&T5zkPGsEYL!aWyg`UW@{?A7b za_H=1PeMf_bCuKZ>)B*0mV06)x>xUgJ%MFE6#|Y&LNv z;>&ls%&%p`E2O=OJpH45`Mf6{eLNL?JPlr(4$t+$d%MGfFG3&h5jwXsqK_Xzm-d^4 zn%#?Cn9$fBHxFf^C~lah;2_+KOeu^&I_L8b*0xdLIX|c5A=KUkRjL+puc+b-Lxg{a^E>O z_NI)MyA#uDPC~n0@s31q%~i-t`GuvsAHN#i9iIIxW1xvS#K|%4P%3)-Dd=@Fyl6MR zhWdYx3l+eVyrUQcmqq3P(hKURn%4K2*Gb81PW*mkl zBL4I?pI>Ynui7}qe`2C(ohW^u@~40$w0X`)=Z9OmK6(4`6J50=rTi3mp2Y6vN9RAq zyBf$po%{QU(@5vu)!=D->)r9WNG2Kn+x*P^p&YljMlx?RGE}w_`CjBl4=wNAqOo}? z_CdvLY#uGX(!1py^lA0mKE})`+Hf~EM2*qW=sdN?G`}Mq5P#qd%}W>;35?T&jIkTs zsWp1v=IoSm-dB@BzMG%<`;ga_S}r?vHTtf`^C8Cb%ji@y2H@jC9xODjq*3_RKfpKm zwC7fIE7S8BJoIVCrffCm8E4X)o=QpxUQhXF+4JN>8lTLQYmv9|t)DSw*Z`NOaXfuA zhWL)B(6a_$>)Zhk>q8l5V<$Mmy&Vrc5E)Ch-i<%;*P7SPHod4{WNuprua*tFm9cn) zd9>nAX2;qTW9HJAF_)J8`g%`h@DI_sw5wtU^M-2V*J1J!&8?EoZqwj6$388Zb|oIK zLqGq)l2MOq?ls*I+aQ@BI#z51^R@!|&!S~$MC0i>`bqL$G@OH8b0H1)2}Yi|wI$Y$ z-E;)~i1#^m?Dh0Tp-s=r80(^Ae-W`s9nf*az8%r)-=?3OSff~479Fo7pB(6S9Q141 zxXH5+Kkz*C^d{(c3x4auC4I`9ct<5VeKPl@+oy0(|1~A_sYw(4G6rSO+r0Ds5H{x; zFZ#UbH;4XT0R8%)>8;T3X~v+BIFduq@6Vy%BiMjNyM@FLmEw;quwy}d*pDMVMX%+{ zmd#1D%N+Q3_d>fbxsqx&FTX!DE}OZBSN_HH;O6Df?u|+1UgHhUd|#d-XOhbSN0asuI%rN(Dy~Vvd@TiHzdUcZ$e(X3L?HoONS}AysGE|+BFvK zRx{2vVCz0_j77Wh0nJ1X)N$`lTMl?^+GXBn$pHiH7U8d!uh5}g(QF;I@V6PyvG(xx zb|Bfq{{ZdEM=3kd$Jn!Ku@gG9YmnFH7JUyH@?B)fEy$E<$d;+-@l(*_C(lRbSbBT` z{o+k@*BrXc9r`wDq{~YtShfY|Sv+4pt5}+8gg@$;KM;cugzd>| z-}lp|@|#I#AHEctW1O8z;D7r5P4uW{Qm9c)BaqGyKambiw;cJDIHI>CRXT&Gy$FR5nO7*qBUa-o@dtaQgy=olJHIn1Bs$6Q3bQ@)!yGkv3XHp!=i zEU1BYpq1ZcGmkz@{J?JFdVWd$cGG7bSKr{~7Z^`-lFN4?3+f)Y*BVbJ8BaM&kp;HC z7Vl0=`VQmi820Ux=uR3>(x>;K<4Ru}fX}a(@syivf)_&vuDL(4bhXHw;M%Hu+EB9K zZDfIBnj{OR5f_@vyr76{$pVe11@!%C^u@!@c#6^ABfimI)Uo||a#g$se>u&#ik&w+ z9TP|L6mcY)H$3ldlLa4R|D0jEMaR=hTMjt1BEONw#;F$qA$X_dw>)mig(NGk=?3IN z19b5HSek~uMANT`rY$`wLhIw9ZHuO%`5%gpLerw>{{uZAhqmU7zp`_BW^9~+mL)sh zgO@wgpNP|F24)g@!A+u0^LkiOodxt2#&?XwI>pxyHT7lWN*bcD^jH zsWGN^aAWZAL)mwD%SYj(DQ50bME#CHzdO)T^ZOHPh&)(sJV^tg|48|hqjL?-Go;U) zclEX8!4=FkzK`yFs31bW+2}Hd3a+iHg(rx9k3hfq*y2UMS0LZ6NKRlrNt`P5D}C!H zOB2eU_g)-)=z&OF=~FiS{+#)zbf@Rt=rXQ^3*>?PqVGYAE%01Vj9;KV{bt}35d9vr z<-sw0toiP^DE&%DNrd)9Cx_tcr5_WMf*+tWAHav$r}6`|&kXGT-;;k7zxRD+dJgg2 zD;Imp8=%Vx-Z;f=mid=(4ehP;+UMx`D^u)q<{2w#L+cr6eq~SloO#L04EvmMu`<&> zXAG>&vd^#Pe4u>}ov+NX&*A+m2ixb&6ISM$vfwc0qa*Bl15E4cJX1FQ{Mg4Em^0mf zV)1KpRL0(>byRxbF+bN0$)+`rzFevMJxpuA)6aZ&;K+-PIRB$yz|Dy)5m`2 zK5FiHmS-B%P3wpMkQ5ScJ352;|E+=6rvvy6>HEsL%r%(%Rq!pl#_t7MXC}r6rH}Pz z4%on)`H`v}qfdI=d17YpGTJ%x;)9zw=If{=ddE@V9Tgjb+j3E}Z+LU>QA5MGf2 z9{$%+p=8kQWgb1oG|y%I=4Kc3b^7XW(bb-%p9-nRtvaWlbp8XK(@#488=ccnI{&`T z=_j2}*E#*9^Y7`Le$x5PI;Wp>ev8iOC!J5zIsK&bsXC{hbUsDr^pnmf>zw}5`6Qjw zZ#tjI`3CyUQ0CEz#;Qjn^U_1e=!3aU4Tl3i0kJ{-a<}U_)YaVmH zgQ}N15Vq>&0aY(ut6oCYOQ?DYRWAdoUb@brUP9H&1*%>`)k~;)2~{tl>ZSD5t2)b^ zyCH$|aVGp`H*gYZgwsePoK70yZR8yX-oZO{J(G6|@8W(uSYX0$3g?ktxPbJ+g`^kW z&pVZVu?g1(OzXiC6Mi#^>r$Qsx~}6L!lT?z1?x@tP2maN zAv{fb;Th5kTX=`kpQTQVeW8OsSNO?`xbEo+KPb#_g+KIxnXd2+y3TTi`w9ngzdM-Y z3co2F>sEyOc!}G!xo*xXJ)jKjOckC{{@nEhe{NzyX4fBK_ z6prwOZ_fquJUr94a>pCRH@>{h)%xJI+`FIuhJlOqUuNZw`r;dF-=Lh^xIcpXRUXQb z&b;%}`S1>J|H>UL#W%L+$D?;Xly~qfWkUZr^us{w#^0QOPJB3twTR-wlC9g}z1xim zAA*-|k2B$HU3(d4x=u0SjY6LZFBA4O;aS2A6Fvkl-JZ$wy3R6TmvA8W;kDayNG}{r zdSNcPG7q9o2M)Bf!%Ie7c)!2?3lLM_UA!`ly#arv#fp9hD%!0R8 zQ_gB#Q_c-S%2_O=oHK=#vku-^O*yl4on^vCNIC0VAmywUQqB!R%2_O=oHK=#GobX8 zGv8~P$DnhF$7aD}tH+sebvJMlX@%2BE1XVR;cdJF9$9?{@7DE9-YvX~`|)6b30Di} zkzTlf^umRt7v9gim40#L-5Y6(myEmYKhK@-E%{XiI=r_!iRVhmN4SQ3glox1_yq5R zcUG^X9J<~>IfR?Ip9pR?;cDR)(hIkeUicj8h1)2H(pOOqY}dpOOcxK{kjB0JJS%*i zXN7N&Cp@(JE#9T;I^HEb%6)ijb-f8!3s3Ma;c3zf&yZf&!n>6ItO;Y|DbxG1QknNJ zHWoiFzKH*Ny23Ms8Ln_0{IojL70%K%{MZQL$93@AYWQ)r5PrNt2tO_s4kNvAgex3S z`aGU9@n!iLrgpk8GudajFR<9%fz978X2;tRr@ZxHCb+r&)y+H`C zE*3sV{=#kKuk=;QKcOt2ISD?PNd6fnJV5Vw%M~8lKM+0u|L!h?e>cOQ55T_<2;txV zCWL>N3D3B~_X=BF;alL}2hQ@G@mhHwf(~W>&a~*zQ`Q$$3`hoeX_B#cyzkOLct1S; zbT*jd4(}JjQ51v3*(I4y8vfR1OJ*_aYZo z3y}-+g~)};$c4Sgg}o-lN*5_dYQ?KoInuOFtM|`Wn#!NMA~K_NXdt`?nK4f|!V{i^ z%-Dm>*ke-5@<-60ym$Rj6Ml#^4=K$;Pq^^fK)41OG7%Y4gAA!LX=V9&rg;|g-=aSF z{(1IJJ&QbYA*Ymu?hmAGE zN4iZ<&9ydfUGb#;E5OFQX6wUGCNA}qr(;)0l<%CmM4HaopG4=WI!}w7r|29TukI)7 zyjSErN#}hd=ZT#A`r{8KPsKUSz(0_DiJ5*3U*xp&V_w7d{F>oCF6NADCYr70&B&b5 z^~aqzx|uVcX70U4^F&u5yvvyIE@7MrZxVV{n(#m&<0DR(X~IY2 z!7LNrB^*e4VGikqgGn#UCB1MM>4hUmFU%u-A{a2?UBa=X7mg#na1!Z-(?~CzPI}>O zq!->ndidY2nI^nTco*q~1*8|wBfW3|>4ghPFT9`h!o{SAr|l{>;a$QNq!*TuURX+c z;TqBl*OFfN1nGtANDr^twZVjU2{(~mxS90AEu4j%O zSMhgOU}HFiPfmWhN{&*FLXHU>eh$Uky@XuBk6dQ(dkkQ1+9aOO+NI=^=cgZqubm)9 ze7iA!F&$O=%(+dXi5$u<98B4Txs+WvjIs+yP-bBs^NlRix_unaixwvFyl@)N3#aqE z@HUhiQo0R&nlD=ULS9rCMKAbPiHQ~uZ z`f!MFgbAk#=|kn=8Xz6bRpzElDm(t7vgiJ*_jL{rEroZAhsq8nez+1lt9YaM;Y#9* zbS{2anu>jmb1(je89nfUK%332BF1 zA?@H3(hh6zYx(H=Le^tX;P7*JIn*wt_$Y`)IUpPE5a?vRE50#E9Ls6!t@E(yst-FW zH*|BZeOYULruEgBvTaW(zEOF}zo+(?oDle97P^++Q-NHZcT-^79^NrYNZSk+(l#kV z+U6zfRq{E>$9EZRSt%Oxv0f2d(Xcr0@R7_xl4*O4-Qfwe=PTrKrI0+5gyivJB04hi zdKLW>5G_$|KW+OmZJa2ijbBVP&C7^|Q~49g?`7)XrG0nPzAsfr?BLHC@9+Vg;cG69;HV9`rba7Hrrapox#&X0Pnu%+m=N&J_}JDlH|jhQqd^_W6G?!$lNC%(j-sMiqy&o;F%dd(NO;YZ#X&J;@bYXtTXB z`7z$l>HaDDLhpRgWv_;dH2n2<%2YoJMZ{VQvsN#PUFq;Q=Hzfw$oeIs-^gXd>-=N-?Pupim| z@;2yFSY^U58&8@1G}0mSc`iTErq4I%ALR7zw`kX5PuVNH_Z@t=yDdztcnQ>++s@Y8 zw$lGcZ6C*B+rRPb-$(o#{vvz=_&5CY;RJ6VVt2U4zu`}g@I%Eh`P1!l{2PAa2CQfB zZ}@xL=lD1L{p@r68~y?IIsOg*W%fD#4gVGPIsOg*Ap0EuhW~2&9RG&@8v7i-hJUzy zj$gw+(v%I#hyIE0vfh7K~@2L)QM65`7kBE*+5ScorUkPtinrC{ANqe6Z#o4D;<${Av8 z-O4x%Y3xvkztK74QRm;+x$SG3u5-qi?tf3`wy)`Co#VsM{abX74@2kEbk5k;xz@jj z@L}kDiq7$2=zOxyZC}$Qo##gGPt-X+3_UYi=lC#mF5l)xd>Gl3c~qh+SVvsOdi-dO zd<(q=U(Hc`r1E8afUoZN9P-(%<+zXIn90PppNWk>6I*{KI%6g}VY@6mUaFt!srstks=wMn?V)y2`>37d%ece#Wqjlggsr*>RX3sPCRE*osv9;kt8Thh z-Gr(eJl(3BP<0ckZbH>fsJbb=>ZWVeP1k1={0F;%V@W3*M>^po(g~-LKlZVM(|KOk zxA9)#9o$ET9h_;x2ZeW$URXeS;XKj{7w}%CUr2gg-w*0~G5Tu2oO>aO^AgesOGzVK zL*CfJ4zA@{T|dD)h3mKvzdyLagbxZg@lN4p(hIkcUbvNaD*bb$*Y!3~*Hyf8ra4!; zo9ENG-ox|4{p5>1?BMG>r|UO(pYSd2BP$Npneaj3QQjx4C%y0l>4m3xpVFTpy{=n8 zU7wBkI2K*Rxz82;2s_xpp04mnU1zw${e+p^M}8c{u69s}UF`)ScC~ULcC|&qTvzyq zNX}pX(M}qw~@c_4(=m&4q{I`D8!!jf)IOJxe$BWBH;q^ zSNesd*Y*9Nt{1oYJ4W{9`U&zCt|MP$)j{lN2Zh+tUJznOD;Hu%TO`~{zSzwUK1Y7K z-UjNrO8MgRs57nDbMGzkWS)5t``JMu_Olm+*w4y^*v}RTPm?G1vV&*HOV=%+ zuFtx{+v9wdA7yi%iiT|A*Vx6j$3=W0FZj8}{#7o-{WTqAXjjccT?v2ksW>sz@Uzew!@p23zS+t$cyL2O$ib&YLndt7Ga zcH2KP@_O!J(;A6Q>mW9*gV?mT$Mvt=j{jp8{*J$d*N@C2ud|-;a`YIKzeZ z$(8s@6a(2Z_pWVk7!!U&7-z!UgkBSVLYQL0_X~X{+)@DcG~p4t&M@H&VJ7$IfLWv$ z4kW!WhxEe1q!;Fz@NG&zjP$x50qQz0!sk~YZ)-hqmY+m=gv!~3E;#tt*e=}I1}NVy zA?4d7qJ0#Q2K?o9p!#d*NY=%kgZjIl3z$iMmBB&6k=x*lJ~T^DiR=L#Q%C-3U%3J=tEhASK=%;Y{idlx)@mk=JmNeGW$A%w@@ zCCqh&$143W((8HzsOvn^;X4`7FXD61`_}uY1N?c{be`!2-p0FxcknLw_b&MTE+PDW zlMsHtLI}UVOSph{Dg8py>-v6B*NgS8Zq~ch$L}zwU_S@?*i$wLwBGGCtwa0buXqAK zhj1N!4tV{m6y=Lgd0$A#!1j5V^2Gc-j@7h8);`#ud)hbqlELv!uhX zqWm?UBlYrCv{<(J?iKHWu5PCCR5pGM;2Qj^ly4*cm5t<4I)wQ%a%P?oIWq~lvIjXcSl7sz!MaAy?7_#Pw2|_pRD`Uy zORabYT=uZ3T!s&%a47R?WY9!pP7N|>kgkzIgLI7ys=?1PU}U6yitw|{BHb*~S-utI z(V%O24tX>PdF1$72IQOOQOuL&YjK|UU$6PJC+tTyy^L&aeFC0vI zVJ_)~!$>b2L3&}H?VApmh;Mo<=@Yp>j`YGwq!&&jy>L3|g}0Gjc!%w8o@pZf=DSD_ zpOnwt@;A>Ty{;FKUbv9-!uv@tTugfSTy?RD_>fnSUe_h07nYJ4jTJFWgFc;d7)HZX><0iuCZI>S`15{q7>Yu6L7OxQFz@{iGMZ zPI}=Rq!+$LdiYFroe5V9kCI+kPkP}A(hEGN7?9K6~>wHzY4u3{HQR+gzpjhOn9oWrwOME$^U9$W}BY{ zJ8F0zeYB50{iTpTeMCr~-Yv|<{w$Bc&H!Vc>*4q z1K$h6_gFJv@xWC&$A&F_sC6BF{4BB~t;CLm-eUP#N?Dib_*ujcPte}-t5|iT?pEEX zyHz*pZq-e7r<_*Zg{r$ybr(|id!VmZsrOVN^-dR3@2iE>`yS}&Rq8!eNWIgA)caBF z(w2{fx}U-xU&va82^?Cp;OA)XSJC%C+(Suk7}RQaevy+l4AQ=27d-W%Qd#2r4X8t9e<*b_D&Yk-h+g+x8rB2h2ADm{#R)8 zBq42nrI0q4pXCtoN<70p$k?xG?;IiR?f6-K)y~iIxaDVQqod3Vm#q7w4^#i7Z>7ohtz3}j zS>xCJCfZ-`9BBJiCZZS1r(*e1cz+J>kM*hChfifaJbDl`QtyptKd6XLr2)Hpn@^>W zu=rj399MuB@WlfBFpQHu^YFt67vP5xF2w&LydQszaIry8jP+yXQBFAgiK!3uu(4X)L=uf!LgllY{%5usgA50NG zl?m|oSDu3oh1;M*VU-EL!W_l%t62UQ{3@0|dF}XB_VI4{d-hnER3SghpISHa zL9AZ|Kf@u`S=9W$5x+_Ux?=;rmIgaUM?RJYJ4Q$Tl?FRTNB)%tJ4UAg|4M@$qtk$Y zrNNHTX~4hIK#Y!PUu(P@h@Y^|@vk&|%Ra}y(r~4Hj(?@$D)#;QU#6__I{O}emWCTx zcl7Ouuktv)mcQ}#4z9wVavWdFTzn~7m(Yl>C5TUDe$(RDn(&`2k`Gev?QL5Bmh(9i z&FQT7cHU^M8+rOW=h#QLqAxx6c6nl{bAd>kyLBlNbkm1&`;#! z*v?+9j*nx9&TSvZD>}D*95p((eH?pqZu>a)>fH8m?9;jJ<9Jo)wvS`K&TSvZ0iD}E zj^FCs_Hn$XbKA%9y3TDM$A9VE_Hn$Sb9@|XgSR-Be`6K#07qGS*g(8RLrcU?HRO7H zsfy{P&RGR>wn^U#2;o10IUxLJrV!p!EQD`v5cV|0XMh>#jKWNGoVj2YdVvsGRi}J% zNU!U`q!;FrUO0^B3&9cG7v@E5RepT^MV>^WT^MArK$T)*3W?T$h54&yEXS z2@YZ{O4F}y4rvekA*`c3^}=^TSFw&?>-C%2FFTj{%22+MP=`=hBX=Td8xgwV~x(6w9Ae_ao&yqaeRS?@^%n2kcIuIy1?t- zQM@8=NAbfPKh3MnGp$+Jax@39;$Q21{vFjNc{{3Ca;(a$Wn5=rbCHg2#jNfE;WgC- zef&F0x>fGTBVTMJ)y5TxGmQh`9o1et{xpTyd7leiG_K6;HI+Li@jP~p>HyCP$AZ!q z$JueDlkB+CX{5upQav-?N;d%6mW8dNx|rvMD?rTwO6>U1QafIBjU7L_mh{+1suy&N zq*p&R(9op2W@{eL&!cOFK|a_&c6ie(v#>K%8+SM$j03YluRA3w`eJ24PQkxLTOu4%Z1YiMPkLQN0E`h%v41;^Mpk&|(($hUx%$3CDt$gX73k zIEg%k)5udeo%F)n$WwTSJ8ZrM&LnSa9JRw-tvlAyzStY8i^)s40`!9=P>-HK~k!fNrE8!dciPs;kLYxCBJR_XM zJA~7Chj2P+gtw8G@DAQ_9XOMB2=C&)@+ly_uIG_nh^@bRe`=Y_pi3_1S?nd%#k@8v;BL|g_mD=opESbPd8hCV@)o|uJHHLq@lN4U?kk^q((C#J>4m3BFFfN3 zXJd;V%`w5{Vow@kp*Siz_HmrzVE=_OlOG5V1&{IEec)P->K4|4ob`mC=RCuMub0E| zN27~1H@$j(y5d@z@JDM8kF0_RY~BxAydSi9KWOoO5L(?Z2kdF!{a}W8KbR@r4~i!U zE#41WydSi9KbR}t4-OOW2Sb z*4nT_NnU&ArW4(%#?8}uVHUPfk&7a}vPwC&_Zes^q*${+dhGV)`*5cx4a z6-0hmX>1vyXCv>4E8AP8;+rD!PiV!j%{zQnLIr9$jX>0l5$vp|TQSs`@nObk0C z8I+?(awNkpPyd=aQ=Wb`zf*azJuz%hsSq1fIuyhPEf8XZRtT{{bwbAm#jrs}Ha=-1 zdz9^i4Pb|2*rQS*_NeqS5PP&hh&@^%#2(cNu}3i>_DC{Zn^c4>yPUq7drdi$YJQ<| zVxwYKLZnof2x5a`zJ$mEVOm0Dg%Dd+C&X68gxIPsp<}CJ*s7w_Z~-1m?O~TaU~>ck z>{Tp?TwyVYJ&KheUpNE#LhM$Z5W5uBTK z>^h~?l$kmu*OaJE*t1xOyo6OC_A0iVJcX;sQ;1!w6Jpn5LhM?X5WAL)J&V;QL;}K1 zwq1*1*Dl8g&U?4(H-UGKKCW3lJxq25$T1CNrx?Dt>0J`ph1+erD9+;da231ZM4mR7QS+3zi_~DxL*g#rsXn|C?t;1W^X&E4`OL4@>)tW^ zB-^(A)^HY`_)6T^kS=^{DSUToed=N6`l)<(n(sX3{Aql5YMt{@=Kbk>ch+<~rt(UjNS}eL7e{df|Nc`-1fPe-Y`0i%BoM+p@Wi zC8XE?OG!Tv43S<~MS9_K(hFCSUbu$z!nKyob*v-3{@+0Q!C)=vg_}q(+)R4mR?-W% zkzTmnvbl~h>Gl5((hmirq!%`lUf4u>VKeE4`$#W**|NEg*GRAb-y}VHT^xpAc!>1E zPSOjHl3sY6^um*t{aw;cdi~!^dbkV=4#VXt#9N#%&c82*(c;8}q!AO+f#3ZI9L7!y zUw_c*J`nVD6Jyp)q&iFO9pX1(6~77N-cvL;biJiEt>qcvCY}+V=Up@eQ+xGoYN-vQJTGkI zdEo`xzG0Dd+ETl9kVbfjG{P@xZ(mN}pP5@}%!!wXvR+2KM0y-AVR!}$x#AfTkt_7^ z9))SVM`0%KQJ7`f@gmv0NBvJb%TE^{2ck8dw`=Z2{D=Il8R&pk*lg}={k?qW38a_&$MM^|G$@lb_O?!Y0$nB!p%OhLu3&-V`xRGkI~;=a z*-G@;LO2BJGwso~5e{J?eJ7qnScGm0IsU3;_^TAVag??ar(j|-D7T5xpj}Mt1MRXH z{i9e76O%!^n79YpWj6gyu^Nkow2O(!@H;UYx%^JQQ>?~f;aDdoW4sfiF_GWtbBfhi zESz#KRzvX_iqlZMhT=B*atiM=XB4Nf%8Auj$Xfp>-@I=58iJl)jfcMFX~?opo4Am^ zWtfr_I7pvSJBT~z&{`Gbv-d{1;RCTYMndI?5qra zYYp0bjrv<#_|xNC>-BHr40XfXRPY>a+&16gb0*v4T+b{co>|`v^Qph-cla0Yf`b_3 zvnG9)eBAz5NB_GJ_b|N=_u$q|@d<}%S2Hf?M;aGb3u(&?`i){dt`^e2+!znVcPP$d zcaVM^7vnLL_c4$6((J@|JYn9ub=t_8=m{LB4$AA0!#}+LIov~%-i^j9V=R&DO4)0@ zhbpW0ar~|iIlRPl!%JLRE&r^Kan=P#aTq_g9qV!FSzh9Nu^yc7_9<@S3_2){SP$j< zRboBF1^j>b-!ss~ts&Y>SVfx&m%~#ESAo$dDR%EmAW%?_+TX7j} zly^70M2pbkJ5l_>1>!P3fj`h1!Xo^nZ^IQFOoBi7vCSWpI&m3_tug+0ID^u-n2gf6 zn2gf6n2gf6n2gf6n2gf6n2gf6n2gf6n2gf6n2gf6n2b_lGRla>m`)7F3}P@$dEgBG z#y5<;WWAjCWA!9!|0y_v1@H(d>^o)Vm~a8p;SmnQjm(8RxPd=7-8((-x+_R` z6`nxz+GfrdtA{7hytak27oUVD(7d)aC8PQ&#=t1iJns*CQEciYdv(=YE*|dv)&1 zGJGC~i6^-aJo3a%bt}PY_SSg+_ur~pc?j`kJS=>fJcRf% z+J$eDhp>Y@t_BYwN7%`I$vKLDAD?~U1g5eOM3l3p6BZbFW5GxsHDE9v$BHqr~XlU^A1 zMh@)Y+3UHEf>Xdo@)S0Ky5CG%;Xcv|UnZ^aHS!j|Ngl!u^1cB)gdAZf_a)~j>Gl6{ z(hEy5~_FQ5J?{#<{4Ht}Vv`F?Qy`NQ$&7cTgDSbDRt z5==y&34NgMr&*DEgqc?4VPTdPi3qc;NV{-^73mTBt;o>Z!Cd4B^SCcLV@a?7$CF+- zk@UjJ{4Shg`}2Q}eZSZ7=bz$St!MdL%imr6y~jC*6TnRVj(LXTj~|X-emMU5;e1OB z=bK_Ue*5A0@`p3W9B%yiTi`4L`0T%`PyZBtedEt(AA)XtPuWha#{0~@5@|CNJ3*V7 z*a_Oq#8l8`a}-lSo0(V(+H9Tvr_I7b+UzwUZPqQM%}neDZDwLQXtQp`V$fzw6w^VQ ztrOB_VIghSEp*zfih2D;Csv~q-(@#*jFrqq&&Gjr&%{tr&b~IWgF5A~zUVRL-?{x* z`yOO{KlZV)__Ov=&)L4xgqk+vEA<$jz{DejoOpyPCr)AcXYD4u<%xHcPdf6WP8>m_ z6JO8->VES%`>E&c{1R(^Z}RijJgvG6oBi3`B4ns@>1YO(HPS4|uNcC`_^8nSG= z3eRi!2(!)~`)bztV_(hsf9z}VVA5h=OwMzeEUmLNnlEL?Ok7U^8>0k4pDNnze z)hf?Wp2hCwVs9Iq8}>IB zyW5EUEf!*bi*+CSJ70+XtrB8?Hwm%7jY7x%N|s$tU(NSTIg@H`Q#rA#jaEV=7kk{8 zm=Gxz`as=JONh)DW+p_cgxKXxLhN#*5W9Rx=;wdz^2T1`FD$EPBYioCeUuclkc)k7 z3?d)9+*piUVF{@FGmtNwjeH^Yd6N+P+$h989}-q1L=vwA=YN`yP#$)jQfkJVIwjX! ztvX?Q8$;xY&2FqBFX3`f_g9gpa1D71vE7@5*zQIlw)>Eba!Ba+MiRdU<|0R!$9>5eOM3l3p69Xc%fyvW@kVmk4{9{rL@qYEF-RJ0a$_-h z3QIuUpFvvTZ1NVClecgVvV?QVLs&uH*MakqBdp}UkR@gkn`u|d% zLh(FDh^@$U_ODWG#m?hiC%$6X77k4^+(a;#8TNaEkzgn@927<~!xchnU^pZU4h+|t z-v@@H=J$c&4)gmZVfmhe!Art^Vd#=@P#C==Tp_dug+u1|LE&2S`=D^t{5~k$VSdjF z%U2x?W`*Ue4u-PAL18p2Tp_duhePJ~!Qoo-``~cY{609`VSXPHmOncf91@m4I~W=g zmOncf9TKh(T9<}H=J!j(wdVIr!%_45rQr_qdv;hp?_e-HET4BUlpU7OI~dIlR|u`4 z;gI=#Xt>t=J~SLPzYh&}nBRwmaLD|AS-95xepxtbe!nc-VSXPO zZu}APGU)eUaAa8i@?dCWSpM>0bY!?fXyt@M=J%X%t@)k*E&ZMo?l8X-^9Mf>x(I$E zll7LPh_B#$nVKTj@2+-YERMc!?^z{&!ZQ6OGdyqUgt~d5%5JcEFf_Dxp7rJ5TQEAj*D~+oOTMe# zZv-sU{ulp#^iF?kxQt<22Brr8B%OZ^WA$6dz5De)`_TQDa~twC8(%5hMi$p5&cHf9 zw{aWuHp6W&R~g5+R$M_m$8jq)p+4VViD;3ij}yN|J#^^ z_rQ6|3twf&MD#7=(tZKs`=?yS_fNT0)^@{l4BL{q|4 zQ7ptY@*U#~)&GNbYtJdWuhh|3K97&cA@+MOZJ83c=hR`o%`OxRVfIg>eH9aNp;(B5 zI1WNF58@z9EClto;~$1?S?BN%#9VwP_Cb8ZLiavt#6gI2P%K0a-`ish=P>M3oI@GD zZ<})%W;lmreBga!A->xu7GfT;5PkWEFB1!4`hC#p_r*@XFVVO^$03V9(mOW&titJM z^LbYNzS8Oci=+dR>fHUK#5EYMNzijxq(p*V>%yR`3AI_E8AaPHCo&R@#p945}y{`nG&d$tC=$8^rtSc#9{a0$dotfYUKYdDXU`1s8=aS|&T6Xu#YiIw>H z%{6fnEAjE0YvLqU;^Q~hleng@nd@t~#>a22iIZ4~kKbGqC$SPAzquwpV&!<}n%Iby z`1p0khB;dUo}df9BbitUaUI%s%0rCA3f+hEFlTHKD=`x8Py7a4;F_rp=X@jlQk|Z| z7p*=lZuAiAj6VK>9UCEjlQU3Hv@Ed7KmLK$^N!cUS)hqWXpizP{1ATaNyWT5TYE;a zZvRr@be*dK!X=~#r|EnRut?`?fZx#h8sH?IuK`}8^EJS6I$r~19sd9!>-aN-a0}@| zxP>&~rNN#AA>6_UP&{uJYd5!W=I|0Ycbkt%e&naR=3SL>4%$z2ZTOhvM{ehOEAL8Y zAOD!OMK&Lk{K(z9=3VLjUApF7>G}t{=3VLfN4n-+>3XrQc~`o=Q`fvJUH?$myenPb zp=-S>t`~7#%R93u=l6&|sD+Qw^-`|8Ze#7_RBY$?b7yR5J+J(KukxpL!SV;nFQ~P@ zS_3S2ZO^w)-?{z0WSkbgRIdUdKGvDI0g)WMtUuVD8R*L$m10NDI$Ug~S%-^Vj4c_89Gf$$Q+j+` zbxMzKt1bn^w^f$`;@f%)zn0e8`aPE6i&p4){8=mXJpQZ|dLDn)3O$cMYsDoX{;ap~ zWodn_(rJC|0#i=bxhfq#s|8Aj&uW3v;j>zxboi_m3<2?3y@kI@YiLb+-cJ|b7g{&l zg|ATSX1jbKeyXlC5It^3_d{hBbPu6CddV;2&ps6RQ{P9Hv zls~?xz%USBRA3}HA|djYpY@*Nhdl5@%ixFTL&>HsSodk#0yJ#_nzjH@|0!8A z@joR?=3MY{umaih&v8RtdS4;djOu+=fqGx~m%6ZnTJPG0&C+_;F6@uiyLMqi%z9V8 z2ejVRtmUl*ui*bp$i}y%^{#$ry=%<$2kx2v07^bSrI_S5gOZO=DJJ>&lwy*PPsyx9 zeiIzU{~e&@9|8j)KBc#g+Ww?E$+z%pNPZ$H`94tc(?H431SLNUlzjX~bt*r8qdLj= zgOZ;MN`4+#0FK31!y48V$|HymMR^p1%A*8S9y37YF&k7K<)HGwKeR%5;2&C{JSsrt zF&|VOm7wxi1Wo`K+iO=jAJxQhl&b%Q@B^v;Re|b%%R%+ORiOIc8c_WYzmQogj$g>E z^Tsb^)@0X$_$akr)vRaU4C0H^dR4RTcN>V0O6yg3634N^%XO6Upxa`NAi6Eq1fttw z%^x-HfLqT6DJKy+KI6GXSgj)Lg6*l`fu7CY&UWTS(= zNPn$B|MlfVV%^@zTfN?hcoA2ZtzrIhkaM^;bR138k{4#xC+&~kLP51q}rfGFz%qQvGcWj+ZH@fhu6 z>Jy@Ts!tV2eI#REdRn;jp29$I*$wqU&&DZwUUQy`@R{;D$&OmKK5qo2lbS&3q;B+n za5d`_9+;x_0!8o(k1=mup)}Zl75zbMzzU_o2CPsTY`_Ym8_}g%(&@G>kWN?ngQkA{ z%wAM`n2)oUHT%vc*3^OvBo|w;KytAa3nUj?u|RUM6$^}h1dV<~{v7Fi$7Z1Oi{KT+ zQ5fAHLZ_Rt2b!@5nz09(u?L#52TJ#2Ki=BlVSPY){Q>NTWZCZ`spcM2#{M-w1p}tN zJholHt^`bbfhr?*C7?25SBzc2t{A(3T`_h6+wxWiWnRPC6mc;VI_KdwzP*x7yQ1e! zyMm@&LDR0FX;)D7$L1LOfXy-XVFY+N_x+3^1(*Ml+QX)`Cy}r4Z%hk8-k58He@sTZN-LcrC*Yd{jTiT zh&}Aflt%sPly5Y_wk?gI@@)c@FSe^r`C_~3lrOfcPWfWH>Xa|G%h;9X-;BFCVD9(m>^v2`aBFPO1dvqoSJjAH=XUdJe31Sb_=Ziu0`4Uileg>#MKO0n^F9+4< z=Ya5cvT?@VRDiNK^T9l@5|q7J1daz6gR(bwV{cgJpxB5#>^Y^_2Q4$xQLS* z%j!O4j{hvN5!ZghH|fVWT9d{(=V#P$ent&@cI%lntS1X_Hf0CTzQg*vME!4K^phSL zplcJOpY%wEu1$=7(j)1*HZl51kEH3^#ONnIlB#PHqo4FhGS@kbUlx9qPasx*0(lH% z4cUkA&YA4-)WJC#;vp0-F`F3rPsdAmw`B5-RTZ*o`s0({W%m8?!sUD6^1X2RUi@cX z_74lCc_;j2cifegjIz!=PmR-*GY? z<(_0b4oXHHC>iyjWIPQ@#1=R_ax&5P%>hmWVC>iu^*I-S3t>V2PNZw z$H_RzJ<0fYP%^qe$#@@>j1!<_d<05H4=5R@LEcL#eeFh%6`42H>Yg{%(><>Nyv&Nc zgFcy-oEVsQI-!pLdv@vnsa9kc`eoV*Utr$UemtAlJ@0>5k;nAgt)PDUK8Ws_mYg1# z_lu;u`}Ny0`jLM76{z26@B7h9(`fJD6`|{I(=(4!Cq45xsAp);+w=_Wc^kTG8tplC zYHIJi=d8$RJ>LN8`4>PvPuq>w^R(S)Jx|+>M%PWF?WW#gogR&Cn|6?U*tTi^4r1G; z(N?3eZPRG0(b%?WwAE;A+ces0G~Yhcl5+x8tfSPKN+bEs)%ZQm9Pvjf_>ZwGimebo zP&RmAwYY+UOVGnX_9kcEH{FBZmwDgxejxL{>B%7TzUk>8<)1zfr2NwdgOq>zP>}LZ zr|rv@v96YR-2{9=%umWSZ+je{$g}PC`4!S(51_+Vqi15jNa8Gd(8NR}J@SOESvMZw zejV5M;-_4gVwJZN8_~+Wdg8c#_~#p6;%woXZ=-v(URwKiG~k=cL?3;C9!#SytEO5t zHTZ*dcF01{K=jQ(^v^)_4gQaTte0gk|1-=x>#;ot@Y`3TZ(8tE)lj#ase27|yP3Mz zP`8_@dkuBFnYyo{UgjC2V19~);;7st$WCOT4kS_6j(*fiOvsy zjy|CFxuZXOEW!J$-r@qv2HNLRyprUb^nJb+(rJekqv!{E@9GD7@9GE0H~m2Gowg{W zjrE>&&c&m&pNT_}O(0F*@mNHhjQWArw2NbzHI=dX3vb=L!OUy%}*#}iEIk* zwG)4$<-}gx&%CsfbS1Qb`JMM2blT#1IEhC|pQqnxlRU~-LYo9>ld=YzfA|PK;%e%q zzWG1M<1p~4-iGz`<%)?AM>LD}8bFz6(Z=KS zKmBc-{-=$_IRu%nnEQpEz%h7>PRg(R?t-h3tW@OO!Fx<5Z355h9E?(A`qU1{EE{F> z6tBQj+{>7(X6!FF=U^n6*oYOZ@qS{dT|fD2bq>Y??7Pl_ke_!fb<+7UWsI9DeA@x% z91N9TenOptv6}qT$e;DurVQ#gM>*5yPm~qk;a{Ep`Cs8nzZswUY<%mp@UefpuJ_KF zb@<$CqTYG0wyEAztwVIgTld*^I{lni0_7p3o0WRXVif4d}_(vzM;Wxr*!5$OmP^9<- z{)dZrNC+44YvDD)o*E%s#2O)7#DhY(hzEqDgFUN-Ih@-rB+fzc5{Ka=6fdDThjrLz zbM7~G$m|!6j-2y}?ZZ*o53?83VZI9*c)pdfq`lP)SHU<`+Ze9G@D^V9375CHB)-Ze7DM>H02R!&~V32fBv0(DjdWjg8jzVqL>q==x4w!&~V3hq{Kh(DfagV2F2m1vlZpFfey+G*vxij^$oyO}*Dj|Kl01vOog@!` zR!H*jXN73nDE_PvZ5z$RHw3R?IqP*RlqTr0&$q=_RiQNasw$KQUsZ+D%m#TM(Q?wl zZ76L{CcLZ474k$nRIVzJ?*{Er)uD2&0{L!;t^xUOh^__sZiwQa(mqv2hMzhp|ER}4 zPZvLwU->tI${#XzC4`dV{9kpk&~0 zvLpk4lO-AWo2<)0KRy_EjvUMCv7G&?v@TEkRcU>l_N&r5Iqg@a^={g)O6%6NUzOIM zX}>D1BQyI|;S14zRc0@&3h)a4pMQ?)h)QmV{3N#ul-%W@1K?&5zQc0%t*TWXQTC-$9*v;#Xabc7KBHRYfzPN`dEhguRUY_^ zYLy2*qgv(B0VT~#q%sy`T zhs=I$_=n8-zxapDerx!L%>HTkhs-``_=n7XX84E99%T53%)Vjxhs^$7_=n6sT=<8~ zURn5u6bBPjJwx16J*z;~b2+Gbt^&>b0?qpZ&HDn)`vO(ZT2S@e1gf5!LDh3BsCsS# zuLZY*g<#lT8*6#Fjo8V}$1H1}61daha z!ExYG@EY(qI0-xnego|GMih_nWp$(Wx{CICBk?-&XUvn*W#jJ?=aIc-CR|60#Tp^F z4!-l4|A9UbuA?Ol^vnylWa@V?OTUBJ`W+mh-`O+N5ArTrazWmO&UBo~yU>}AGkF*0 zOh?{@&UBo~yU>}AGkF&})3GntF_?LEhtVGvWBt={j5(}bM$d=PiAJA-MxTP{Q^_n!OAGs1 z&*~4|Q17=kPEopwXU~*}luwjB7gWARQ2MV4G&&SCI+T3lIFKC9cM!)=p|seH3R5>w zX|Wkb|6?Nq*{huZMZ`%&(f5|v#>fNtq1@&!!2l=(Z#F}4$#y((MjD5hi82f;2 zG4=u5V(bI9#n=aIi?I)LWFH(G<0mG7b=U7uPQS{Dt?{dz*cxLeurz}6T$ zfvqui0$XFw$d2PfB+q_VNi}m#S^L*~7aWNE{n(*_$ls408b~|r#|{mo4fkV*2GWN6 zu|or~A^WjI<}3>AkU4uBJCu1j>lQmW`vFcQ=g*uso9_lQApLe<8c4t0mkH8u_hF-s({J}-qmI*W_hF-sqeJ%jopZBuKh2A{XE!v! zi(F;ubGB+oYNR~vb@%>l`?4g1wbo^2H% zdaG@|bGCLR2saYN7jj@A_2Zm|UDWSM@@frn_5^vgR)OTzx*R00)>R;RwXOlls}-B} zBzd)B)1D-+R&3gnD^r)i=P890Bzm*%IY! z3i57g1j)Ol36xG~2Bj1Bfzk=sy4BJN*t*ry3D~;T)9@PmFZ7t96oz&vm)I2Ifajt3`#6R}4dJf|CUwgqQZEc?xw@)^{7^j=c1=S6svclnCc=vvb{jA|fLcD)}BD$9M@ArYce?Rst#QXPS z&qBO^KlUud`}bqdLcD)J_AG?&JP|p$etb;G$jt?jk(&o1BNv;Nh>YCvATn~XX^F_l z#ik`9BNv-yxiW&>lZ;|eGD<+nm;p-0Y)~@FLCKf{O2*tc85P`1Ol%RR~11WLwcP%^fH zlCcexjP0Oggh9#J5ho+cJ;`VUC8G(HjAl?W_JNY|GAJ3Zfs*lNoQw|cNyZ^iGCD!Y zI0{O}aZoZ&f|Ah@}tNzu#|}`M*Dxd+cFUlx6j3 z&h8H~Phc$ji$TV+zeH*Jbr-ScRMA}ei=L?f^~`)w&s2hpcmE>EN~){SGa>e3(lb?{ zo>>m+nN=VBe|L&;}8nQyUgJM+o>M3DJp zz7J$RnV$wykNixKdgNz;)FVF|q#pSrtjMU4?OV@uI26TiJji$*hfMQMd7tL}@@x)% zYORZr-5uw*%y}4B=bn4N`u$PjH{@TDe^|1`$GrHe_?V$bq#p~=kIm>tt?#Wyr`4gi z0$i`uwc%sD=4^|3irIjVsfUlzUL>p8V^8Oa>nw5UIi2Ak{$(M4?*|=UxMC)BzC=BL zDntC)-7~Re8)-xFFL$c^)NeID_d98qakSf=w97c!?M~Wd9PPFczQH_4+nHx*JM#=} zXP%)w&OSq1m}h7U^Gu%e%-FbR#yihUbe@^)JTt|9M!Xnfqqm5>XQ_PTJ4LVWI)&Vw;l1H7yqvTPiJj$L&okm4%F6DP{DS4DhdC9;2yGC&+_Bj{% z^rIMiot4lx#G%aN8)Xdd(>+&Qu@Ld+iR;V>@g_6rEAg=#k{!Q)Q~~2^9&IDuB%QJs z&<5fh(kWvB?Vx^5{meNT1++(j?$aKM^EgZ!E1zLbY{wI%6IT*IhB%Tu(v9-l9LY^^ zB=hhkrZTr|iaXaLcAj%BO7Wko?266MSU(K6>Z5+6Xp{K!E$Ycn=UZ?FLwP>+)LG${ z;Y2QcM!4p-R|oBLZLTi1xsHMojsJ6cIme0P&|I{j!savz=G&Y`L8Z-Uj9nyMkW{B< zN70W)@x5eG2BQZk>s5amFljsU_E8~E%_qE9aUEkrHrFw>%H}%8F1NXkv8(92=-4sD zaTLuk-x_A0s_CQ|Q)@-0)1G5Cf%KU%n?d^fn604FY_rd`z@GHvZ%1!AaU7-(N1Z<0 z2$D9x36w5u21%R04@9TuzYIr$P7k2dM}@p5eq$7I9E#rl3|+I9J)(|0QCe39w=);c_6WSr9N|sDp1&5t>y!)Ob-p8n*SSduuXD2yUT3xt zUT2mNUgz6Fc%7L-c%2zw$5S`fO$Tpu&WS)@nKH0}VEI8fAK7gA{l!yUY&?ecb5~4; z_zjoaU~SZi_s7^=>nhe$=)CZu@ERd}bbn-D^Ns}OVfg7f`h)oCI#NLVbR8KWe!7lJ zK>TzaLqPm=9m7EUbR8o>{B#|cgZSw>t_1PZb>xHi={l|k@zZsD4a85^aUF=CuH)+< zKDUk=tVkhs+dw=Y~h>Js&X@;T;$@;T;#@;QzLsY`S`D4*j*P(DX&vg)Eec1e5M z_&H0M_bJaApz@pzD$jCIdCmcq=Uh;ER)ETLKBzn^LFKs!RGy1L<#{)#JePpVbE#tT zte&@)Gw06W|5e~6;2LlUxE34+t^-Gc8^FuKTJTD66POQf2CoLUf?or-f!BfC!LNg1 z@CI;)6=@hBx;~^l_EB%;@iM49UIUfKo1pUO0F}ofPoV6NIt&13dzp~C4U4c`F>FHb3w_^10{bfDEZ?-$)5;H{$x<{r`TtKzrNY( zas753eD_4>dLkWuuma2l=Yx4*B{&vb1daz6gA>8K!O7qfa0S1|_FgXQO1+D<-Dn3jWUp17IFl0FDJGfa5{@CDDl>{*vfq@Op3xd!hR-segUE zrzfg(mH4%kZV{+-i$SHk8&tX_pwcZ>I{Tb&#kJHb%~nupwt-5s9aNeys5CoBi=S+# zZ%BPYblDWesf6@Aeyot5KMCsjZcxwnDv#{?ot*Fe7XGFRQ-08t-#J5L3hQNk!|Hc( zw)g8{PfyUiH~0$k-t^mu`kl_1-fxAuZ^{9G!~Py$qNndQhkGPR}f0Q{x5oc=uSL)>kt6$AV;<|6++evv*R}+``pRK1GIJ;ZxaDcW=Dh=2Q-Pv$hn)o!_mP$AReg{qQgYu@n2z z?*q~A`#VAO`~IUK`hEX#5dFR%{XP)=z90RbiG26$Zpl|&?0RKx8BSf^#pk4Z*<2^2 zSjGk%#|G@f1{_D1@52ThM=$Tg1{_Dv?!yKgM~Cji1{_C+?!yKgLm#?lcPp0VF}|hG zjN%+v&SFt4WE*nYunBF*X~QP8A*T(S(1x5gY(g7y+OP?&$Z5qU=-h5MCgxpl#+DQK z;dO5JF%WL1XQ$+1FP=nREB4|^{S-tr(QilD8)ud~|ZmcF<3wC1_>D=?W-Po0Py=hz8OgYlGw1D@mud!+fE(!e)N$HwTK?&;VV_ndAME5i5ly+f^@v(I90 z%8<8bhkZu(lco%*TegBHS-15vyu^p_4ktOIJL`Jiq^x3V(l?w~juosoeTucv`h5Xw zp%3$2*1`I{Tzm+J@u%rq?rL4*f75k=uJOC+daSNlkE!c1x*lv_=W~5*sJ$O>fHUvT z(5Lp|`+JYQwU7OI+DnUx-8g}4kJeZc+uz}QA0FiUFoEyGV|*XWcT69Qh+8o8aQx3^ z9*+On%){|Nn|V0?=P0&I+=7{hYaWK};{S$d6~$gfu~*T7AoeOc7{p#hhl1EE zv$h0#b->Sg?Z(HcIlq9v(HRipZdm`mzX<{gOb5JFfw=tMh5S|$lx6q z8FN6%m>Vaff_suNAC!zrP~}(zsvNu{BZGHjWGn$CV`-d>5cedb3Y3iHpk%B9C4={9 zWbhu1jCG)7Y>1Ol%RR~11WLwcP%^fHlCcd`du#{Q9$`>2cErhua!)cELCI(WC8HUX zjD4VFybMalYoKJj87HHIdy;Vol#EVLGLC|haU7J4lb~dDgObq;@?JW;nOiFP7B*v+ z@py`L)W@B3y48>F&^!jdp@?yM7~SRa4SOtnWXvb`B!bK*_xM2OlY7!Y=97CeLFSWt zvOvb?o@|iuwr2$MMf$b;#xCEGhisiM(T2e7bH2dPUN zG9RQa+JEUm>Z1LZ#5>5=xOzrb`OseW zojOV!$!ea_dh;3R!jPj2m!S)_zJ3IGwNig^2&%t01l3+*A!_^2%iHBR)c)of+TWCqvY7Hw zmb1_Joo8~LXDFX}hW0=E4DZ0m;T;$`yaV$L??BIptJp`MRJ*)S{741!NpThW7IOEJ z%Ifc%RKj=0;W)0s^hxzW^-1+X+RyYs+HZI8(_F)tJz2F{O4nc-x0roAG#Dd80ZSPZM z$A5IOHbQ4(BqK-fa17=5;p^N@TUFr?##g+*h;Peu%B?lm+OJCdMk?jp%kxLzLe8Dt zePNy>pM6}!$Ebg49-#5qS_1FEJ8PW*q9a;ogXoCXau9pnItM-k9nmru9l>~(o+%*q zqL#56Wo}^B-YkX(Ax+EOAZc2bz=x2gd8x(PnS>gb57FFFXLU=js4u*HY`~;{)b~V) zy>qy)GrZF%Q}Y_m6qmfUpyaJ%Y#?v%1}ma7yjgc;`lfq^ck_0551!i_hWFsPJv*$3 z&hIw7hu$}G=OtS`YUe$#!E5kL>`m!nZ%vGRb$<7Y22-}Y0A7gK(D@rZaoon+e{1s^ zsY`5LL;H{&hU+MEV?N+G%G{U_IF2$m<^ztS%#Hbg<0x}uKHxaY+?WqIjxsmq1CFE2 zjroA%D05>z;5f?Mm=8FPGB@S}ZljEtkLkpE%pk^NrW@k{$8kIQ`7r#(9M0m%SG))O zhIIL1_za!tJ&CxFBOfe%NoRM9YxX!?#*rsHbv^U!bGm!@R(hNr=?Zj4y z%NUXhci?atgLUojI$64Qc%4DIrVnUdouq4r%NVF@hs(&+wZml$&^6;r&u8e`;WE;7 z?Qj`sx^}pXR9%m^pHI-W!)2s!{U}_3o~5EH`!2V!DaikujhZ#Xe5a3Cgz zH(0 zyJ~N$ufl_9%|QI#_He5wT29%q9*f{ZQh2|L^LQhHxjL~C!_hx)^bbacqX*we0nwXp zWPs?+H!cA)!69H4I1J1NM}i~3%RxVQC7285gL&Z9;8^f$;CS#la3c71a58uUIK__h zcr`B0L+8d`OnisVp>@|UD<(wipcUuQ$UBXidSOSXlfK8I)Cs#6iBczhk432ybE`;{ zI_Y~XN}cpQ7Nt(+dyG0osZ*3XMX6JiIz_2dlscJjGwNi%&8U<4Hlt4F+l)Fzdy@jS z%D0$rFXdYTD&HBP@|_JT-*Qm-&HvTwYF6Wz0d9DJL=NeFXt_79nI#79T0F`Ggs601;%5yWQJhy_%a~r5Uw}Z+v z3@XnZmdbnjKoj4H%C8wze)~Y>_cEybUIUfio1pUR0F~b%Q2BL&%I_$s{Emal?P<4Pe@v9E=LDd1?#IHKQoA_0S#h~hNH>f%+ z0ab^k=XeuKa@WCGNbUwua^X%a$=w7>F5HPFxo{_z+{A8g55I>pWNw$GXho3B@bYW2Gc91{3N<-q1dJnux zh0@?7tC#>D2TgrJQ(w^3S9xXEH^8l!_!4|!CcXq;nBh?Hg&7V7ex)ICSbf85tgT+m zJyUMblp8eV)-xmO8yud+uRezF%db9$?@L@uL*mH#hVIby9=MaQqU$^K-r-#CXReci zz1u_V#u5Bh|9|6BbOy(Nt4^L`Mf6Q_KU@m@g&oIN45rY2C13_P1H1&B4VrNensEqL0n{1TUIc#ssU$Hb+e`%PTR29Uh>qx%gXg6=nb2)f_! zA?SX?hoJinAA;^TdhXR(rM$XEXrck!X|+h-as<6u9lz6~4EhK*=L z$G2f43`c^EFdPXs!f+(m2*Z(JBMe7^jW8SuHlmf7m^1lytkVIwmIh=#iL6%a#gpj$ zR_w)-=_rQFi)7hlP2bXF%9^odH~9QOtLITDAcuqB2&!Im^*OU`_aJ)rU9EQ51dhKxm&rx}~F2%FN3O)$M zzRn&Fyiaj0XVYP8_KI&I5AiLAOIhmWJ2|7~%nfjS`c58d^-Lw7spON7-PwcP$;a-v zoJ*BykF+iK8BE<0GMKXEm!QrXf5|;-oY;^-Q+$(t&wg60C*^$9VMj_?uey!>$nA5- ziR&PK<88%v{3peR6e%`D=Xb=#hKzG!Lk{9Q(s%4JzK_^oHw)i~9h;jA;?s)ef$88_ z5Fbc%JUAGf2o41&gO`C*KsfbUrIG$u8tH$fk^Wbja!_gJfJ!shlDyLgDw+H0`9+|f zUkvK`yFop_1l05Bd+{k1dS(stS3R>9)HCZqJ+nd2q|_bQX0O@T{5q)L!=L3={JQ@d zx=ZQaBwcSt-GRmE?r!|(Be~aWMQ+3&7M=f|9Wfl#K15WQ0M<*byfq%00BW}J);?n%ZWP%=6}$v6s1#&J+GPJ)ur4N68Y$h$oR&;Ad5YilfyC$=L< zT!P-c%eCCV_!QStif>7=9sikJ%T>spMck_(Sn+-DToLY+YGz&iVM_ zIFTaaM8vVg$BB&nRGi2#!>{0@x6kcRomAdz5PhUNF)mM!bLuco_r`KB8=W=Tj-$}| z9OvU!6ldb{E3Npc58w-g=P`W6Ds+P4M=r#zY}A;?Cw-P1p^SeijzxRC9HkEy=!}o^ za4du1SZ4Em^dI3^6c2JS9E;vf34N)FcXK|Dv66a4ost<~eaI+FwigwToBb z*}lArwq(g4szHz-`M#QVcp93OZP3M4c z{`QxO8IjNR4azSbkof!O?(#TU<|e5iMh?G~5v(D4D2 zHtD+oE=3%RxRgc66PL1xIYjeXkhwtfy3cYc(q}H0GM#7lZofD_B_DZx`4pY=AwGqE z0j6w$tAbBS;JlAS_9E-Yo>BeTn=FYv%6y#nk*u|ACk`fAJst27sY7&*`He5Fz&GC| zP67_06n}pgyu@lagf(ynV>r9L!r_l^g@elIz44)i?0tG^V(@<{rml;4iF_3OxrzSOg@64+;`VgT#YrLl_m73_E%hGwEM>knWpMQ$DDx@uoRo5Db(k_|!v{@H z$*zu2_Cl@;Q-)SQ&)#o8e&#pp*FN*x^)EfMZvA`21?n7)qJiw&gf2S8T>4|a`}~wQ z{13svzy5G)V8hRXfd~3e4J=3qM!wfS_}usMrv~1~mc6qkSa2NM_1a=95S!=;v@b~r z?98^F+cm;^?)Axu0nM9Or}EqziTwlX7<+qyNr7bcYp{kT3Y$Rg_KkHwo0cZ<}oeUYX+wD9tXOF?CAw7AWtVj-8n(9j1KV`{2yP z0~U2epEagg&*|R(Mc$oB!Jg&l;v>{)d6G404$tddZ-(by3Ey3U9#(%&=DWw$ztTzC zw+w#r#1X#P%1(cp@6M;c$MfA+Umpx)elr+o4O)S=Vo$*3y91t#Y9D_pOME=v9r~_A zk51?PiOb#!it}!nYz3I7pVqzOlwJI`%DvEu-CIfh13c&BFOhnz;(uKm{@Z`@VWH6^kgI`zW}_>dSeTqDxB9 zDL0{8zJ-qYCi@*0*P(OFeurng(me^>_wsup&-LT^{^%agTaxbaT0PbAdIw!`R_~xg zRylgd)iYY#l8&x1ItIO^@5))!Q~MF5QYYz`9Lv@%SEE~UEa{d9&@CxvbxZmnxJ2~I z&C)CAkE>Wq)5Vy-2D~;Ut$IRAX7zU3?PPLR^}nD`UPboD$wR9DKv|?)P9>vT&?nL@ z-O1TTpPbybcKuz?{NwuHp;IQHQ*x-=5qxF4k6Se#^Nv2^{f?%5>(D3b(I-n8AGb0l ze);vOfrE^bw^s!VwD0`o+|T{TV8NsebjtC`frjOUf%f2K;fBy_OIXm76-w=p#J|b%)BEMxEb+Hd@yk41ret8zXqIn``7VSTPZdpw~d7knA zgwB~+f$4d(P7pe+BJ{;c}0wybCWiEG+me>yrx zdFtDu7Jah3c4c`ARE{4|j!rN7r@xO_ zVO#fT%~lz@Cy%~cLaZ>Hy@@$5WsK&qUVks|Bc=D8-jOaCKwrr~*Nnf`(KYBg=^E|f z)MD1xq!Vjy&kN)A&1y&AjOn9q#!1c;&b>p9bj}BqQ~6kqzL`!vT^%F6(}Z1d*VCvz zmFSop;`7E_gpNsO%t*iFQ(jlcxO2bwH8m~dsWmmyFN!Iy2S27<7U@p^{3i6ox6l{g zL~j(MKc=BqZmdI}*m~t6=QIoBb<5|>X{1}2*KDhB_E^4^eNFq$X~yyU+su31xsCNX zbDQ_k;m^m-ZJs|nw@Evze_EK^9Aj?tJ9NxdDM{78r@ih%uUwNdsCqj(W-Zs(rDRoa zN5|-T64yu3SLMz;=r__=%!AemnFl>AM31cp)#pCi_M7#0qk~?0=Hd14J;V10WzyVc z??udQZex77bDKkJKRdVC#kkZsaOXBJe1*A9y)(B-V{UUheL(XY?Vnq>B^anj?>%ti z)WEG^l)d(18hb%c?}5$UKr7$+4O!OdmY_YCYOs<5he&77Ygm`>%xg4vQk@rE#Jon& zX-p?y#M~zJbLKW`vqN!nn}6>!x4}1gc3vZWvXi;mu1tGg^EUG_&241kr9brli_ZLp z`A_5pXMXb|>gmpJIw*4q^Bb*S*ZX$Y@_pX?M$ey5&oH;y7NoChU7vJ;*7JSY`OQp6 z-^_6I%@d&3?%|6(-O9Ywo!eA6dIp>Dsd-H`|9{@RX0xMn78;#%ZXI7K=e8-vl+%4U~+~=p@66QYlF!xcPn)uAx^&f8gE%TXm z>-VEaG@q%~e1@^&&S$QmzdZ2w!N9tIV7y{$D;NhqV{X%heg5~VU_q}lpSkk?2^QSI zcy3Lc68NVPkAHjDh=2a}p%K6PcFcEu;E$a7%)DX83!$7HyQ4WfUaQ2W`>f|)q~EtM zN(g99^X3w7;9}-A?=$W-$5_O?MtVha8|jr5P8r?u9b|l}Y${XC8W}zi9NFEEHFo{^ zOXBYU=TW~-I(H89IM>vB{~gX8M)~xe!*rZy4ucQxQ*)S#FPOtrlTY;p<}fP1<}jl{ zcMhYv%UJZa_G?e$`{xeUWViCnc77SeB=1=pHLS0PkPuN=3gn(ce(xqAE0!_L(e?4emi>N74*a@ z<{Q!#smwLH;hSZf(#3(OC8W}+w9BkFa&6Lw`L7QB&V z_4F|AcjNQW+(2oSrY9lT^GS+5r%)O*r*P6}PSKuir+Xd$%&VkRxtl+>{Um#dWlEj(3U6{P4t?iMukbf(CpDnm0&y9E|(skiVle z&hISyKPJxaET2I9SnWF(Q2%j#8}e(&zoC5MzcpGM+pqb1J@vt_(KC)ezBB5iTeOzn zod@Wf!#;aDaZAXV3rI)n8^g!5nint+=xM65x|KGMer+)~iB?&?TGQX)jN$5w{r+Gq zNbcwPHl$f;U1VyLV>ZP@p-%oA?vb$zN&RbZ(kx z@G|@jcA98tWUre>K8W4u27On+_S__lraX|Ac){-Yn+zKA0yISYq=;AdRFYelbD z9(PebrEN${D0nH&!naJn=UoID&w6Gz<9VZGQa6oL*;l30n0}Ogq%vMbTd98POG#Y6 zg3UURX!W?`x&i&OE6aNB0P-dK2aIJe&uV-nalO;x8!ORz?gi#SJY(XA?jz4zzhHbH zVQjd5&A#JX_EP=oi)`=vabsG%k{Q?7UG^h9EuW>zX|AbL6x}2H?8fH=opF7Hc1R|z z8PlYV;jdI%#7N`%6sCU4CSTZR>G~5~d+Y8$);C5+W!0GOyZ_jK#`rytEjDp z&!2W(|5a>p0pr=VyW6q5`tCpJ%c_1Bz40v9ANdAX{|k24_#%Bns{fX;-;Lew!ES$o z-9ClgmMy-EalDps{493(kBn>C;)P$)7PmOIcok#0%(2A}V@L11WNN{`g8zm+eJmsR z+`Ein*}--nG4ZZ{01o1fJZv)l0oNXjgSZE~`7_47WGL-EbZt9(cfL+PcR39C7nrm5 z>~qpeh4%PVnZacy#KoK2Qr@+Xw#r+K5smKA=dz(P@VPJdG>{D^9OJYA7Gz9 z;2XZLjc!GE%SN|a53Fx-_Nzt8Yfp98=ZlT&cH@zV52jP zjh1~D2Ow@S9ep4ht#RGEC>Z!;{?vf}7a#b`1nl(6V8ML){{xJt`-e;|XagI*H#P9) z(aC|=4;2R9>cauF^8M^`0BymXZjJW_Z16zdwJS?m9V^2V6@h?82tIK0TcQcLLwbgfGww z)BEU!RJelMnK$aZHupR>!wvM|1r!U;GyBC27SKi;o$)__Z*g5S4$H+av|-n!t3JRl z(2sYfJwx> zV_ztKv?vItbCVr2Ivri0y)Y|j%g$-Ur*c;7BGxi22dDSk`_hre`qwRFACWse-o5WX z)vr$Hx%#-@`BY+EC3|C3F7-XqIoj$uOw4ErZQscnip}nQxP~V8n)MTv#C3K~Cr%q# zove{K=FKH+GW1tNJc)ZNmkxPEzS~Z8%p=6gOGj2Q7yS77(z;_)t^KdwXzlMhP*(Rg zoB`a?iF&@Le@@96XFM+EhMJ# zX5m%Ao^J`qurH+W8up14jtln4&(k@H{Ue3nU_VIkuYGCNokhW(ztz2ILC#hKZwmH& zSLjXL+c}SKX!Owz<|mV>LkV-_&GeNLFUY#2O8U-b`co%kbTfTwvykz*S@&6cwOs$R zF0MrX^DSMe|NT5KIr#pYgn6vL(*3dYLE(6w)BTCu7qVuqN$IDsu9meV-zCO(A#HjJ z4pclS>q=_Yf;!{xF7Q2`(L8bj*O~m?#`P%vbav+B=*zcQ$E5jOA^SaU804ur;I)qU z(K#`1;*r#zGv!6sTPK>~F*@f3d+KNh#o_1Ob3@qAy)N$v{2xQN$Pdy)S@XVWh4FLL zyqb_!t@8~Q;zRl))#&Pyh3td_vw+r`j|AJ?VemQ>F8s)V69fw+)XWU+t`YAJq1b zqYL$2a+}q^`n@$)%{-5Fqyl|Z?nyi{hIZP>{svK>%jBfY>$+$IjfrY(oM|uGL+y2x z_GqDvK1fJskB3jU$#L4`Y0CL1b_RmaSP zfz_jCpR40E>i7ipxi`%^v54`x**Bm%fjU}Tzr+3~ipfy_&0BJ1csFx|5>J0q$J?y* z>Kx{os^h<_j+9MxT*CU6$^GrMEnU<}b)C*Px9Yl=`W;P3t$x2x{XU?6cT(1E$QVfd zG{>GAX5RU!K6-@mHdB|? zl;=sxtaW55ab;Fn<--(@z7P()l|6`xoIW+4a>^f-lfbw%zGIWYS2#yI=p0Q_p?#qQKnuV&Lll&uf!e6Ua3yMb?lYwY+rk&^6#bmTjT7N z>L7dN)quw9wv zT^#eAvt61iT$JtFxfj%3qwY_|gj1Xd@MhdY}8A5E7M~IE`3i&3=6!I-JP_KvvA3J=GVQUS%Kz6<)&aburz~P=g!J8~gv=cu zq`y7I+;NuvXFh$W{^z^r7~|_H=F@FLzI*-;d+!1tRdN3RpWRKu<_1A-K`BYN2+Kv~ ztAO+?WfK&Ugv1NgmiAW&&}&^URzJm7dDC(iL|j zpVFE4T&S3N&pUdadCxn@ofnWpzfg=Ex=Ar|$cx@2z57h&I+8z65~f>nNP9FiR~aMu z!#YWh&wM9=c~3ohU2kl4?VSnh``oG#U1lgc( zGYqlX-+f@W}19+S3yKyH~{!Y?g1Mlh|_^wdXcuMgM5diKUKjQCE#& z*}x?yv1?Vy=3V27Ub>z&%)RkZRq^DLo#iii6Dw|Zp?6K1STQl0{WR$Rs_){9WRH!j zF`)lUS6pBWb$$pNm*!EjdCRv{^AXK8CBr7NCiv!Ec@+~WD>(7O3i-t9eKziY4CX%O z4$W0EDO1?L)Jr|6m+WMke~jSWDMoA!c6033u{G(uFO%|y@a{zTzbExwOMS~H_N-{| z5y=K?M}oOoU~eWEJc2dltIF!%@PvF@PZOJatohd@ZmXX?Dd>hx)YUK36) zTAU_HdgNg zqi^AUHubk=#=TONr_I5?_JK0lMX*2H_ORR09&PT;)i8)M;oKJ*FNSTCAFTlpLgcY$%}?t8da zn)WaV^Vie1=fQlft5)Xahw9@pURR#k)Vbl?XeXBiP5Kz2o9BbE@$_RK@L%@6Db)KN z>McD8TRe6}?Qe==YdZV+X}>p}HnElfKF|G_dBi95AA5=S&rUE#y#=nQ4~5bAtGqjP z&)pmI+);spp17*dJ>IHA%#~C|56ara7`y=*Z({%c4fNS2_V3>iON@Wd4T;3;-@l*y9Q~6_+s>s8 z7~um)u4u7fj3>F~D#k-?Qj5S7_%c=C=XfN`vtTrSjk;$)qxq=j6o3Eho<}Y{aL*(E zFwPC|?WW+5U5bt1==;!{c}F_(ehANP;aweBhyB~cJ1a&{8f4o1H@lO~vGs%4c7den zJu37Kuci1etc5>z(pRFd`mGe4O^;6+Sl!k;u(YEwkh0qLE=I?Y9K{BcKt1-*CpXdG z`{-}kbGOo$AJIozzv~|BtzrV$b{PG%hJG4GKdqskhS5)J=%-=y(;E6|82!XLz=~n? z(;E6|82z+{ei}wUt)ZXR^Ue&TRRA6_&%s>?rWoMY&t2Y{Oz6BBy>>Tx?Plr~fXoLceTq>C%W4+}}xoa4`Yt8@Rf zJ{79d+srrCL1WRz%lO@iPPYr3b$#ApKySu`Nmu08>Cm;+Y0z~&WuBC7=+DwWo26Up zMs!23`Ipm8x_YgHZt$5+x7?am1A0S8(M|a2q?@oublwk+N>}fKd|3?-c40kvH9WYU z=fqF?M(T)u(wh@ZUHvJrbs>BuU40CGs?ybk?_+p}XvKQM2KeM^{bFCsoAHj zaocq(wRH7-@LAX>`KmcYHe*6KF6{I1F4ZH&h_6Xy?5DWmO`Uxm^(>?g?A^yE<6n_; zXYa7S-kiTlbJ!e@yY0B&yTpa?{d4pct3|_~9*MDvTudBi9wD#VC z#pf9VKc_x6UhDh8&ha6<77wU@wO%Iv?PlS1clz#6^j&xQ?oaexclz#6^j&xQ?oaex zclz#6^j&xQ?oaexclz#6^j&xQ?oSq8_j8BvTI+AZYjAVa{qS@Rc)ilX>y;K>ue9)5 zJSDmh1Fu`0ldz!Ph%xE@bMSf%cs(pOCh!1w-2>d#J<(myi0;Dr>%i(~36~&ei{t#k z_gspN;C5^xQ!Tt+$#bInpP}o;c{f&UoYccy>l{P6@cLQwr5^CYqr}w{r5N5vQ`lDdTOJ;P{i zeh1secAbrvWuj#wSdZ#yx^+`)} zEq|ll_&f`1TfsYhT&=1!-+Z3B=5Ve4m+e@%Kg-<1z}mls{bMccFNQCR@pIG~S~dE) zWWa3db`fp68W|w{UH0_mWI+8;+0k2I!|`IviN&T9hixYwJ91*_WQ6@hlTOK^j z*pxp;EbFnuiM1}f^Y`wu;rQ`?^t7k!7V3XKbGjwmYr#GFD!j-1S#skl{QP(JFzT-I zwg~LxEO_;St=n&B%~rlbyDuD8ruK@5rRS@^Og*17jIOfu{M{GElx5OJ;btE4qz2nd zzALV(=0cW`bXt=$o0~S0?jWBpn8aGxs7u>)G{2oY*_9k9$2Th1;|iqFe}BKbZ^08f+L=LKpxo2lB< zZmID=VKB91NH7gdO-7y_23Lif3yt$?{EX*#UhO~>CNP4uVFa_IxLZ6EuInG1pkuGHGDoJnz7&9>&0H_@1yWyLzL0*s`l7{B$AwxYy;ax|%r2?XB96 z?YfbsQ;f$jiy zaK^@p0i5CV3igcwoW+En`-%aao%Bi)G3Oz@(w3OBj$TP7<{ZUWIvFeEQ{=;UL;7UK z9ApjcXAZDoyy1eLMvb5K*1~g*{nJ*L%>N~0dNbF5wXWrJbgnC*Y7=2I>$~Oj^Y7U| z^RwxFD*Rjz^mH)4C$1K0r4)f_?4_^glm+I`)Af!RgXZ2+8Ou z>eG66M{_=_x$P$DBa`l^sF*mgViUY<&Ua}?Cizy;pQ35QoF;my*{Ay(WMVvX`{w$v zHGNo3ABtbQV9$A){u)moPT)U2OMz~Lru(n8X`eb(|1D+>!3XcFf5uz}ZU4fRje zM-6f|pL3j>>z~sZW6_K+eV1@MFgoC2#>xK@UM621{cwb_lFC@|GgdTCq|e*+((M(GPaIJ3 z3jbB#&smRqCc3}~bb_nU4TiJ6b7dL2LTG*GbaV!M#%!ITxGA0Cfu?l^Q|Cc% z&^qAh>J8`$rd=?NF>C7#yID(N?CvjM4?|dQV68}cgY0P18}^_#1kn}tT#VlEYC7xw zT%$MW43v?qY3@OvIO<8P+Jo*;j_x44)#rT2^Z-07-C>2LJLI4{dD;y2AwYgkN}) z1NDQ9{f8>&%~!mS*kkDq^1BjNy#Np0fbHx&WJPt}bW@K-9}QGb`ft-Ws|fk?3s-Vg z67#hb#?I5|60O=K2M)8Ab(pl@GuJ)L`qp80a@7aSM?ZkKH{G4Q;GgIeA2Mz})R^;} zXX+GRpi|uBX%%>Zb>qFR_JKp_6nbW}yOk+_eqpo=e1uNXo-sAvXdgI?PO%rCl}16#daDZbPRS@6ahq&?$n9ed!deyO}a; z3mCZ)zvEXce{N#qN@Q`x>d>`xik0XR!pH$Gci>mlxiT+gL%1E8SBVWl_oYJ$Pi@@v z(D#ycze3OW0-J?&h-CB(?X{Jj@g%y$lknU^`dfNNM=NDhX1$u^LoAiv8 z=ocsLGwwIrXXqY{`)qy_eKzQ{`z(e&i$424vviMepM`agP@e_1HRv8E(+!=vM=9ei z4ZW}ncu-28bw%I59RBW3P@hTv(0L*~&^<1;bdO6Ihn(}i;u3U^QH;Y3hwf2=?x8-C z?y=bLn0>U0aW#+rXva8Mi0*Od4bJAYbdQC|@dRW}e{{*md4Fl%9VVWBpD|gQH}Eyx zH|WRrdG_&tE`R8Ace}t!OCNb0eWVuq-4D?>grU>uo4e2}MuVTy&o!PpfR#6Jp5Gzn zUWb|6)q{W1M_xG=eCPx8kq^MlR|)gTS4-d2F{YT&1=AQ)(nT~M`+)aJcb6S;5o1ce zp^x#t0h4a4xPD^)ipTh`dLCphCw=5K#*}E*ppUrCF;!#fBj_cj?6>t1(YYyo#A(Aw zvuqgYmJMSmH0w)0NKdiPF*r8;6ncs0)Mqtr!yjZBYmYjs#PmDCAEX!KH_UHKkx(kG%Yfi?K7`n(g=X$LHylpZI+*V=u*b&bOTNLtj|y_Pq+8zc4D= zC%=mncT`OtJko>wmt3Fj8-SnZrg47XUq`3=zDqejn(FucBtPBPm$FI|{l2pP>AvTF zZ1{d3 zdkpqo62@~~c8O6|n&2wyhn;OZV@h-2BzNI&{GYcl%dO-Yvs}u~bwdx8OB+;P96o%M zw}SFk{Dp7}*Ocdbk@FLOMO*fJ{B;}HhnRwGPG@X*Xft6}bAme0MOq+dl? zLc6t(B_jcT)43ZCA1H#n^>`o{|!3+ zDfS?qYAngOma$ZYUCtg$8b|gyry57{sa+4gQ5nP6xu=&1dq`>Z}n zvc{42W3ksI^o?%vEi_uyxaeaSXVJvKmw$#IR&%aY48G_aulD=i;7q9)`1buD_5L zrnBGI=Gt^$JKm#lTEY11$a}tflizpQjp@G5yk~t2-Z?$p_v{Z0-|sjd>bW1g8G9ba zUKC?5+P8dEjBmxX7Cx<&tBs$+7sEK0;~K`B+Ipom-n5sX1?9D-e6?$=$2&mv5N4>Y z>#cUCweM>)gGUo(->^2z0ZuluEYn3pN}%ip3ZWW$=1Eb z8iRXXg})ig`GI;S&GD`@#@IIA>3q-Ayhr)o<}5+VHhq3CBHmBF=PwJ5#|LQVKJai~ zi_mz?L`Q6DJW4K{Y&?#nk4;{tPLdCO7<;oBA2XsG`ZI`pHv5uu3Q|-raI%li+9EIC z`tg-hryzZzJyA`K)3%OrnyPVT(SS2TUf*qvRTuS4O-}91xPE;Z@2kNkseBKeWX$e! z$U=L}s=dM<`F6>l9yt;ow*}U?EvC$LqoIA~dlLs%B4M8hmZ_1%jExw^3G$*yIEgS z-pL2z>ymf9zofkD`(@?buaQTLkG71L8Jwf|7_zB7^5PB7Yx;on4#IQzoJ>}RnT z*s|`qQBh%8_uH#ue6L)pdkyj~@jpb~*)pgld?vYWw}rj5;kMMnml|i}@x75Hz1Z(N z^kcv8`Gn9oE6oaxGh5Et<85&f`{5n(PUF{wY_;E^atl6dUbfA$WZTVOwQOU|^<`{5 z<&bUV*nXU{ZQU8lwj_saTV##HQrauo=9Fp8kH?Vy`Blm?Vab`wvO_og0eiH|JZCZ< zpA=i)l&muG4Y`$%teT9UzvkxpF8xjDo?(5{kA2XZwt8RO)ezK{YEtQtH>#{C+doSynS995-C?oh(f-9(fBzpfaa8vIuV!oyRI^WIQi?d*a z`{GUMw)Q(rS)l%-%u3FBQCa;sZ)Y()t$Ea{+R!{IojiI*`4gGXEB_+Sl6jqXrbk;k z8SQe)2%Qts2ij@vz}Cs^`H{~RuXQl>K;06_G=}(plSBZJvSXZ56@*xn}BmJ6X%%Avo49k6Z!|&ErhU{;UhV%zqQhIEPO2)#q8mSk4)e&tvBTFHF1| zaT3hb`Fo8qQ+z7S6rT$3#HaSV--}|t3!R>8^giX&+)cRH6pn5=++dG7pLX=H>isO` zJV(9ty!IE_xTiqyY9U=&wx=`2)x576SKmGpTxH$tRD5_+T!oLn6t2>T zE$G8G@X0u^YbyNMmU-rV=*;s-mu=@6r_OBkv#mG7s}0!N=Xjl2Hlj1Z)s7afZn1E+ z6n+&J##nU}wq8vA!uZ-mU4*mZ-{x>O)xueqRd#F2j-j1)nJ-c1vk4(P&Qj)B=b*E;!H=@7 z=|`EL8v3@C&QM#zUM-zPqcaKO_8W6|4dv{%YGU@N#%&;GFKk>TG5!j1FA?LP5LZf! zKSJDG;@-r?#Q0Li-9p@zIF~qu_!?r)Du_!b=A44Ke#GpDjY~7;?zob3by^emc9nV2 zrFLM${rJAPvg$x=SrmL(f}YHt;Qfi`G2Y~7&05dSF`PMRL|4tc40{!R)Vne{M~80! z%|jON9O}nr)HYB!*m&u@$_MAqyfl2udnfvOZR#@?ps~jGJU%bMt?O&Uo5Lo>6XJA-tzNAI6imjkF0KUm?7zv?rCe zowQWWj@xXOxk5a?P9!EN~^Hm1sb4G0VnLgK&4t?`0 zXU(4lAL{#a34AA|FKa7f=!036v*)via<0yDPvp$j3;v6A`YBlX@cf72KYgR2CFe6= zh>da#eYw8!7xRAs@9ibMRa~n;59Z5b=CR_N^NPM+S_2B*Dg+BOz=-iiyRy}8y&YVAs`##du7Uo}b z`%t$;LN;aJM4f#2N~lftxucxz9}N{q}c{Mp*S2 z!Mjr^%UAi(e9iBB^B%3QPd49^5UAqJFZNWM^ZP!?BhBBBO&^79y$%_BEwc6+WbW1Y zC+6Uv7;X6{e!FF{WbY1SuNT?-AaYqeD7mcf@!7Ii=bW1Nk;%Gm-UHtxi*+8Vc@LQ! zz6Tyk7VFGZ^Byued=LDTEY|s|<~?L`_+HaFy>5bZZ3EPlozi@6TV;y*fMG1p;P%yn26a~+n&T!&>b*I`-Abyya29hSvhpCF4# zJ5Cn!{{&e~+6l6lv{TAr@_e1L*p|bR$+rA`(BenQUCA0RJm5qA2H-_oR@<_;H}ZED zd}zyETNd|0{?3L6ZMkd9;=ah=hv7e4?%J~W0_5*6;5}RJ+OqgUvwfB3 zve<4{Chdx)9X+Y1Et4lBdsRNB;mi4e(&NDP)##@77-1jq za;4*^xqZ>R`SYklTfUQgF=>ykesumpbeP-FTMseMk^WkXJ~9Yh@hea>IW7)#X1ay{roT-xYH>U;6P+4R5q{3iNq4t!wub-4dYe*hjZ@sPf@`~Nx8=fMN| zE{*!z?tk^W%>z@}AJJ_8GX}!_&vm%}|LEv{uFt0bzb#nMRR15k;dOKE_SkfH*)e>L z#rHMXB@eQXW{7M|Tpz$E*_)c$S?dPv=-XEaTlnUL_KxP=-hJLwe480ZrjK$+|IeUI zt>L|4@|c(XrdrRAWev+n#V3w+HLVpbBkZP}J*;VIy-w%dvBxJci}lPQuC_Ic7>_B| zIRY(tHkmvZ+!MXvLS)50ml3$yn-s`lU&;oqZ{b@eQ&_h#b{}sAE0k_ zx}u`8Pp8fX>$%Q4%SZQ2>imYI&hD1}V8%bvri>@3?(UWib$2n>vg^Kud9v#M)TEnU z%kZXlo-ygd*X;VIlTW_H&GobTc$wA52R%l(pZhoJ=Y6-Gx}SS~iGCi^Y(Kjk{p|XZ z{p@P8pNE`fKRfGud_SMM&S&1wuA1ihS@pO3`C!h{d)X>dIJ?bY+)i>Cm*`v( zofo{NnEhR$^KSN+!e@I)M=z?;nf-2_*P3-pqgB-lceh&b3&v@^i!<2Rw+C;U`Q`H| zT+kY>a3L2vg!~AW65>hIw`WF9%deQtbJ#C9V}WycIaeo_cd2gjEwJlW%sYiW2XiV- z%xSKj8poNmRcH6iCVrkazrgiA-X%W|uib9iWu94zKY)Br%=UvvS9y#}V!)q28%E$t z_7@KG#su1cKacV~(|Oo07jov*>Wi*pTd6VX6X>D6vj?EnxQ7eFV|nYWA?A5g+YQdL zf$!63kMfFk!XeQvhW0;qcZ@k_=yKQ3%yGRH|5p57=5A-tTncBKsL$VrhK*&o`oF4E zsQjgM>^SX4=!tML&t zuiaeZ`>ktiPp?+SFc`!qMKp^plYZEL}GwebLLJV+Zqr;TYY z@4)WR=Q`4Eb;sg!6dU*@I+N%k8dc&S-W(lr9qr9^w71w!r(MN%I`uEL)2V-{ovwPO zhWt}GyKL^ugf;AO$UtY;T2VURpK`w6`A+A1oMqn+Kdp-O4p3j$Fy6H;Y>r3xWJy$X zAOSuJ!Y9weBRVrgzC+r}nZ$T$jf{$6jP_tJr+hlpSFCZqp2pgut}6|$V?)~Suu5Esa;@WHFSN|tidARnagH6~Sb+)l=nmPkHo_EWyHJU!L{asZ@4lxg8 zB;dOWtz4|@HdVJ=>X~lWnL4Cfb8&nw6XtZ z(EsRD8%nr81p3SZyC%520o5z_i&kcx8keoU(q6if1o9kdF;Ljjl-`gDw%Ph&4Q&;M z3%~KfL01Gr9lGM}w8hpHH%~pa4k1|(D?LK`7B-lCp0{Pu2=v0K_yVM$8>WKyww|Rk z4if3}-JC_U2ONEBb_3QvHG4=fh`u+=()W~S2so?r1!lv`<2cJxI@7qkzOM-jb!MD! zRp;|J*LKO(cxYg^dy&=d#`=@9?Tb#c?Y`4&yRCbidD~shwta}B?b0JygWlg8xuN#f z;8)$09#P0WrygO-R%EKqNouS|XkUfoXc0cvXQM~lgdQP%zd1cZJm|9gCMD;3p}W+e z)7tzayvShus19T03kV;Kps#nsvzmkGJC(n{p70`J4m!sRd{1i@=dVg%5l`s5@S0nQ zH_h_dqdEc{f<|*E)8>&^Gq0c=`RG}C#0K$fkdTCIkbmK3wI6$lz7dhl9;0r|cT1@2 z>PZt!K3qMi+iMrFpK~*Hi-X7ad-^Ty<~Hy_ZMk$Iy23X2PS>J)G1t-Tz5h8pv%$r; zuS{Jd&eSzNnpYSsVcxKqFyWU&j@LcVX|@-!r-AZHdB;L@iA9`gI^pit3qFdC3d{g^ zx=Z%w{q(i*jE(!iAB_=rUia7ZO`{UzU5e4VrjloMUY%3#65sTsZt?O>Bd$VU`0_Mk zyq*@#Tn?JK{^Mviq#2skzv0@jv#YD%$c=7nGaly6QOuvC@lA0%%~a4eCrYo#ogQ z-y8OaJ}R3)9=e9)Kt4Eb>!W*^&uH9gKWRK;eh;`_6zvbrNK6kt%y|1_yTag_@A!j8 z?V>4Zgd%*g{lvNEwJTUeTxwpsgR6+E&1+AP^MlN_TFFtZ)lNoEWZ*L}_GpU%cKOYf zSwcSf>gM7Q1Z=its3l_wCghGvtHD3?ugdP3)_mFeZg<+GyrVpK0W`mWXD2&-)9tdAPkD+;jH9_) zgQl*_sB4_)Lp`x6Q&0cvt~FO!_0{<}c489-v>z<5vqx-b0I!qYkX;(FZZxzntEP`ZDyTEwPU;&r+ufs3w!FX5T zd&s2=I6Ha(=ZcB8yM7e*0NUC+^b#=LSyo*WJunbgtFKwLJ^{kM1bMXAiy5 zRasoytMJ}O_i(S2>-nCzz!Xn>ppxra&XkFIXG}#$?j^8p5WlX__v-zHz73^?)Z4XU z$9;Tnhy4;e9`M&i6K8wy8Jd_^(d$OPZ^S+RM~AqQYf{0S6nAXR7Vvl1_Jp#_&=dYj zzE3=zm;N*0uX~&8|E25{_qjEG&bYB@GseR+VA{h2+`juQ`eZ>H(P+nWIb~k@|KdqK zD|S-GKxnsqKwNaqqpiq83R=h-mT~7n$I`%v(jdnb#hZDkFO7P->5dx4(1Xo z#1C)M|3!{GdZv*755>0%WWlGI@ab^)bdlQ_IE?g<;k!Nb?}0yGRkp;{rpim6!yAY5 z?T`*tnOxUxyOQgUReClL+XH84ZP+qoSlPB4{EV9`%0BPmulu4VqipOaEe4FFe`XQ# z30}e&S8UBHaOQ3L=o4htA@v)&)OzmynKb$Nyx56-llY|drT%NFdzHsnp|!5yMTYO$ zi;TJ0KUVBwJibXgwXfk^>MXy6H^E4qW$+2(NdI>v8Ut7G+$^v{`&5EvGk4lw$N^PVK(Dz)x^maEy0U-(3$t~o!9;7cG`bbk!tWQ zZBNZA_-T2T%XcI7_}!gdDpZ$1e8)f@V@rGKw66}mbpv+pxtkd~>&V{@z0ZBO|54-a zdmp{eV+0OSj|$qo(j8y(T(+MxFA@S1^ZdSBSzG(j|oZflPaqwX{NPaz$LLKGUF^>NK2KcJt>ZhU%-l=6&uhp}zX|hn{_IV(1y=f5*)4 z^65PFid5Q0+GxUC)_>`mGvS*=>irgHIOsbTI#a_-5SG8i{5yfVl@WCA_9}d^hjU#( z2tv2XlqLN^XM<&-+h4_hbl|y9wu&;pTlOIYvFmbs6uTNq*TVmv9}@>25&2UXPspKJs=Ha`!s?y{;`o4x2jNjuFNj zWB--MW?p0L^QOlj+l^y8NlX3w%45bL*RhmT&-0S}LGipU_ZtM!l_Y@wZN zHfnFacQN_=&?Sv~O3%>!gX73^^SwrDZfB!Bx3luN(1D_({GY9(uhL5VN7Gh~#lF#S zKbQNeM=JLW-QyXZ+mV6HPc2!UXs4~C9(J7y=&LmDImy*wd$xjxaRS)SBOs12~!R9FX}DbAd~-BQSV;pq1dvHseY-cgsichN1$)X*Du{_ zuU(ePP+0-1ES1x{Q8{KAYa7b2>vhIuIO<`SQ9I@}c+CaRv1U2lQ+DhP^aD@p4YECH zj%3bz+yXcHV`|sfkIi&4;d&!Gl@%v{_Jm*JP8wL@(pT!2D(I_VK$tJ$vyz znE3p}x6jQ-_juVIUuC?O+gW2$IwU&qH7oIRD2Er8-1p1Zp-~33N@rY#*84x1(tY08wzaQnezFr_ZT&{f{N+q zQv9B`lh@>Lcqa{Ambv4*+n9seyq<`iDEjCX=J;F0m@HwPqDz85*sX1P@cd30!HX^* z8vL)U%Y&DUx*|B>$HRh`PPj6dW<<@^7#vS1AuJ}WA#5ik9odrIk8mwvDq$YsWx@x9 z!-V87wqy??WE>oyU2^cJ*=r8on4NU!mh5W}P0gNn=(g++4&9NReE81nRAWY6YAV4` z7(1gbuY zSfF>=*wY`rI~lEm;F66id%&2JmHBntGjRr;KsYp#aeuOR*lpUvS*wy6TRbUzhsFcX zd6$~_1RWJ0%=rUj*;RIIs4@2de$sqb9-j0p{eFG@<73@?6V9|9q{GjleZd-ADZ(=g z*8;@CA;r39W1Nj`y?NfoH@As#<4la3m_BnpbL+XnI;|~MGr!gtv+LKB=j?0yo%z-~ zdt2{RY`?Sd`)nMPKf3ch;*YQVJz!$)gE;8Q6lr4-MVQ}Mc5&P`>MXehAU*1)NFHTrB_>xf> z;LFfpWBj;u!sWs5PQD`87)O#m05d+Antj==!-C(t`^un=E2F`d=>%caybr*W5AMi* zlK-z0J|xr=Y-~y1bZ7SHP1CcV+;mU&hnotsjrVTOPJ8cvvNPVhHGBMfw`befQ}W)l z?8WchmA&S@yR)~ycW<^iHn6)I@8yyg9D5HOd+$b`AIJ62h$mzl|9kVj!j5(5H|97K zwg_i>hIAFlM4d||ys$CB)HN)evGdwv^mPBLZpY~wjmk4IKs;>x?AM0t#$RIuZhHV( z$sU1_tYi<{F3HN?PFWf2kdUBoVQmLXtH5&fNFr0$>HsE;SK^#@

RlOxNZ^Q#mkN0tzb^YgpA<3q#w||E!-pgCd(DP9W46Y z1HR9sC>d7lhwU_5>GWZyrzhSG|XM(*9g@nTw#8u(Y}NHr1}LqCH=> zT(kaQrc3%1n3&5P>?yFRi`Sp>6!v8EKA4e8`%1t>>1!_Lh3aGBVs6PF6C3jXJn>Bb zt9;?2%bJ5Ve%^jRdQLC#V7G4{oh#LG=AQGzt;J86&htV z$eOoZ4YKB$uEvJYeC&7`Gi?^~#nc^#Vvh)|F>UcmMnoecqL2}sU2M*Q;?Y$?-?QNx zG|)E2q^o7Mzv$2LRTZz6r2*lF--To zXD?oVPj)IYK(Zheych1TS$}W#_VtC?Ust*2I5ulMr5*aH^m1Wn7v2<)pivL)9>Lz`Xu^G}QFLafOH#lR# zVS71#tVe0Ae?_|FlX>0e)eki`)VDC|RwbQ?7rVQPPfx^*)8W%SmToEj6rWau?G619 z;#0%o)31wz+!S4>;?&aST)=qx;YKSlk})*Ft;$M4jNM^otc z05&t(;e=WE5RJ0!aW;NxKGYZ|)5%v~?=Gu(!*PsOw2#` zVw=42*D>6C%ee8^;x)dfv$Rlp0cWap10RY~f2y;JnPa*G=55>Jco zgthiOU0B+9uHJZ#o_y$*DL*_kb;|rhw@vx`p*yBv*PAl>@O0w42-BwY7(UIco1ZnV zH&r)B{k+lM?4%DTPU%OumN1ntkMJ_#1HxfK@`pE18T8>TQ$~OIKU1cEICaVs>u=3o zvHrH~zpcML`-}D15v}=s@}|48lmE_ILC{!i`BtBrlmd2lr}g~zb7ByP-Yt&D&U&!(nQgY76z7J}1FU3>GwB&J}$J765-tnDNdyeg_`lWYzD%*?iNf-Q*GnB^F zhb!8DtS57^MYK6JbzEninVFHC+u83OYlg9uRehhkKP zza#D9N!{n|?7Fg`{%*tc>j*HPbI$#1u$4r{$G2mAY+)XfYmJYcTx*O-SKZmQ(&Tqx zUIEx-L?urVo>ntAlx!_^t_w0o)41QI+@BTmk8x94{A2u-30$AQc0zWKwLi^1zkFd< z(my6lY5k9%PPu69*6e<3w`JcbF;gZEKHnGvT&rHbD?k9b3$9zJD<00ce3X6G{lVeHLboWqn3- zX3@vxWjz6%HCOs7c#vz&=ZhV*)13BW=4|$yUizx?OSjcC$gpFJxgKYwWsvril{Vf= z8&BHPR@wwBt%S5^tTb~DZ_V-7Arl+wQ0{}6>G0xa zwUxNRHj!@9N%Etq`T1Dv+ogo@e%7G~xzMYWu*g_=SuR2K(VB&ed3yo!Z!GO8B`l&X zxde0W&Ki{RrBT0LV0Xj2ExQOj*W7boRk~SEcuaN^d~2G0pIuLTtoRdaUzNP5q>kdT zN_cEhg0WodEm~KV-`+OLGsik{?7@2<>3L7of;?!2lbPv*E}wrtd)4HNJ*#!d&M^f4 zYWaZ6PcPPpt(utN)j2n;7smzgIW=t|nb1n_EQYQ|?D6j$N`L8{E@JzgsZri+`@MQ+ zhPe+R#P1pGL$KeUPCuCMr(Eaz@nt+Foyq{)?0V#qSL@J5w6Q#e??SU?WUe)-?QsOv zJ3t$<2ysSSl`kPydvDC~gAW$v$d@BQev{^$PxGop?oe4fc)v}@=E~|qSsFu<&<(c$hZ|WH6REaLYKH!Z$!^_@YPtAjjtBG987k?%?R2=;_oPN9N z9`}OY^c{Qfv$M31fa`~ewYPAbNzYvHBX^4Sb+AT`Z+%Po)tkIMhCVIE@9zJTzF_f( z+NFA=@eb9antfc&)nf_II_u%44ysGnCh9WbG5Obz!@qvqlxP1hzQ)Ls?F6mcEM^Vo zna7T=UokXN{i-={>}Bewd7{gY4+i*|2yc%eUI#C!j-|xW;O9!xgr8l6o%jK3y~$oD z(i)N0hip7H*NMQ-pmccorSuG~50%0{(g!7TY#CEPnerV`p5|het9~_Q9kHEH>2{f0 z@dK}a!@Xfg52Ik$g|33#1JT=tcnbDi8CCEZ{_6*Ch$%ZXrbSsT->yD#Q*7DM>x{Y~ zv}q)^9<$xVHWt;e=4kQ>W3selO`;24ldKM{zIJ@}Gr@&f=aV*^&~xq2vi+p{*4~)Cg!EpdWfA(U%`@}&U7Md>PWlC; zT}`-X?G!VAzqL1IzexJmT|WNS`>Wx}1nk(ej%9?}rM|vrbJkEoSK9UTJqxqCaeW1$ zJ37i@{+rLy9|@NIxy${V6NldKPwbkIkuvoDg^9W^%nK&c49x>f?u5c>KgVQC9}TLcIo+6U5{MouNyrSfB%8x_hfKZ zTdY~99T&@gk89q05Wns*d@Gl;a!1X~F!m#J>b&lvYiIpt=KS`?xmBAm9v}nAEC_6^i^;0+Nrw) z_OowC-$JIod*8UCEST6T@F{Kh^q!Urf{85y4<9MqxEH$Z*Le)=+o&eUJ~M;wHH_ff zdHpij2aHW-FW-KZzT_n&@x9P7?4i{+!aNCW12f!lRb%j-ssi5@yZm)==$b9io8;5@ z5q2S+U+@X~>=C{fbjZ~r@FC}4w{y1${Qi1>Stn0a;Qr|baht$`8x+R`?!U<>i{=}% ze;8$yjpwYN(H_3nVI&0p6z8wojy^S&I+YL?8gVuEAu~MWjp4aAlo3UF?J0XZ?-3o< zmeBXdUDUxa+vfa#G4)rUzLMCE?;oxjd_V0jiZYIkbr%f|CUyuIyeBPzbHx~+(haxr zorYeA>z7BY&=JFaFM7*&qW_7Oh& z3n@n!dtBc$rmN{o?Sb1CAL>)-!^fs)gDWG!mn?856TBILU-VV%cN@-rH|ty7o+$7r z7CegMemwaS$e+k_VA1qe__l`d=rDM606hALv2cj_@7D7|nDhmhbOgGT@m}FlPxg45 zxb$31*(dOna48iGsf9jMC7A7T!aio?q4uTKZ0{0&VkG9bE zYGm`0mM1SuKD16e`uw_>vO>mXW(Q+bC+Pm>Rts;yyEi|*dt)*@x|4qF3DyWx>)`jE zt=b1VliriCgSEjOjK44P-DR=x#FN4cXg&xW{)6X-wI34eOig{qxK~@|Wa*r9GQ_Bh z1tVI%^x*u7o=#OMq_yQb1-a83JyYXs?{h=T;ygvyzSTW$!P|_pOx`t%F}07n>|uY% z7S0CylyRnadKpjh!42{a`t`*77!O<6v!VUyI5Y80kz~FN7#}Ehd#k!Kp7POCqY~N& zqLck)5Adx6cgwbc=yQ#-VV#W7IctnJVd8k!=qKP0x{W!`$BeTWZ18_hPA~f{dt7eI z_Lp^Vy93+Lb(L-6|L55T|2={2*BfQnpEo?ac1T$UJieH{X5$IsPkqnUpbYW;GdvT` z^B$ggk^igt-_~Ue)Sd-T%Y=ZQ)pr~<9+m$+_Wt1mP**~T#_#fS#?w!es4wNZm^&Gy zBa`a%t(rf0I@TUH4#2<%!QWlr_I2R)E^zGz;;=mU#jvub@~5eCzR1rgQ(1QXRren{u)^O2R+wXr@=wHz z9L9Y%<9{SLkOdxOf)|`k-Sw)1diLF#{v>ale6O&4XN48oqbHn@-ck!j$S3}H&~P;6 zh)2d!uNC0gaOQBPPGZtXI!P4cwqtnwAL2TNaU0JZbqN@wZ`ku4@Qodzz4`2e>2*bJ z*)1k^ZoJQU8+4IAV$()4Mf9PJ4f@{kROFD#UW*I};pNHq2G@I<|9_75MS?Y95F z^fWJX=pC($x{hA|BeLVT7?0A2@)`T*Q5WsO5*Bp&)57@=Zoi9tT1MSK`1hBG3m

~>Dc>`%n4P-VF&nMdT|43Gq0M2ZlZ|}cj+k57XJMDdP-3nvxlh?24wD-E^*n6cNdmrD8?cRo+7Y`=T zHre-L3`c7m~Q(5`cL*vzJTdd=YHuX`Y+NSPz)y0A1F2c z0BdY$bI2d)nqOIK`vY04&D!xjW@7uboT{8jkw;d3M5b0U#*Apv`y<|OUgNBjD^{Wb1r|JE_kbm$Mj$g{x6Dtj~j z&|zYBoACjY@P(JakGeKWY5aAi=de!c9@}4kmwuT;-%5AC!}v}lE&cy?glx($TVng` zdd9imxmPm$`!M<4&wSu9zMpd%-j~1rpUBQa{B`PJ{PoANnSZ!r;kc{g`v=lIgWf({ znc@NY>!15S%wO;P&-T~Zn`->^@HxmOl2v?h%|WV9T1kh`LB_F;Sy)i(`NI8l&Hr`h zr%(0%-|wf-j_}j9chd3G^TOuUU$y1(i}usITjz5&S!}aqvAa1^Ba27Rl*ReP)Fg|M zbES?vE<_$LWUll=WH1GPS2CEgUUR1*8PqjL2975pu0dloQ%a5v|7 z9`X#SydB>lrbiUV9hQ9E>xyc)o&Tozya5kqVr|BEx}Eiu7V9cwkRQ!???@*;<+oVx z<*j5->CvV*t8fbEF&I71+);1Nk^H7NDWLN&{u%x`0FJ%|`Y*&;53sK8R?^=h26KRY z&9{+0#{R(r%tustV*(?2$4%JZ=iv97_*T?&oDHf>ez)?w znqMlv6N6cQ%__km+5Q8Ib-STcu&dKiw4K9@g?^%-zQJNPt&;G#dscOT%Ysy zq+d(Ep5%^dSa5&Rnxq8c!mM4q`!DzHp3T^%J$=Jx>9lJA<-75To}};HoXhkieb4%r znkVV|CzYq~HJAA$egA;+^t~|8ll1*}l&9~#@JqG)ZGMWg|FR_1PsB9KHAkAn_g5#l zthQO`;K45T|1i%olYP_$_>}7w4L)T-KVWaiKF${V2%fmHcIM09qpxm+C%$L*)kG^f zz*;ubS31jBIN)Q>lpBp6i=_|5hXK}n$I&-!@K`)JqMiPjPajm%AJy>TaMC;A^*irR zT$7lP7}x=?-$j2c5U>Bo4WAvM?w8RY$+V9-T~N&?`r~fp=?}Hnw9{|= zXPt{xu5~IqTuFhMw9`*JzeYP18=QcTsI{T%7L7XPhw9rrd-1nFfG;w(&3rk7c4okf z8MJd0dsin$S(_%q55idT`#*+vQldlabsAV>sIhOQT}8B|h<0s2rm{(w(yrqBeQSIP zU|8BUpLRVU*C@I8*v4{lAy9w_V!?y{s6N>nF4U89D4?%>p(_e64m|s`*3K zAqNBOGjep^9q7Dlbez7GDVUT;A9pdB86*4BA5;O~u$#|(7dM#f_mx?v-HwhEoK5k6a>JaV@{dE{<} z^2pr`<&nFQ%BMkzkOGKJcHsg9rYS|I)o*9_S)3{4|(rvtLOc_`oZJxo+hwK~I*VWA}LmB0q`f z318&`^u$QyX94{<($*7mtS+7UTX^pPI#p|heu_?&ZXN3Lj(rwcPqtK^Ik+Z?bkoZQr zGy^$!iZLZ7RPz*Lx?Oq3bi4A5=~K!xrcWu)m_DdHWBQ=-jH%Yb?%?O>Q|VIX?)81_ z@}wtOf9iGzmPnVP3+C9mbcwA?zsne05gQ$-V+{V|5|06EjB}Qte|^kH=zHNGp|AJD z%L^HU-HgFP#$Y#N0A|(qOarHFUEOHw>I3Ksjl+Jvi9C${mA=kr9lrWKo4!6sU*Adp z?m#x5^(F>JkUorjuDrkJnh^;-1Ba2%RrGZ-HcNdiJ)KV9t*5Vha{Z^#ZXR~E8!^jR_e zR7#&!p=WYPSJP)@_b05$Nnm{&eYS!=+esg&&twBe(nr!c>~}LdM|~t4VDvxzQ<_UZ z@y!Wi$LOCKwhb7>yohW-GHaMe5vxp(^XF{xC0)|~NsqH~R{ibti$~}aeLrL`Hpabe z=F5YzFIw9$82geAexm$9Q4i$YWMw5j^j+Hry}Xio-t|-K zWn`rM3Hi_-Pg^?uC%;?Rs2-nO-y@eltu>@Cp!?gb=j{ z!-UH3p+|m$9{C=6nzPb5CPyA@+-s$%? zV$SN1qNDfiHhtm4XU31L#g9y)-HEgV8(Nb{JD!b#y5a*nZC=}}9MfsV95%8j4w)QDS3P$G=s5@^V#=SW}M))FD6}ie0hv4|CnjA?aEfI?jhrTSfbP zw5f{r9p=5n(LXc)Q^&5oD(9X9KNpt2gP4VQbss)$e3(xw9PjJKr)>wh%7n2t8Dv)=$gab=$d$J+u3zZ&&pNkng!^ZRp^=p-RPQ; z;D#Cagf8sh#!KucxUhrb4b87>{igI-D>AwqJ+>EpwmjZD7QZLuDCsy`hZQj&BVQ^3 z9hQyG${C!n<`BH0u}eZW#2?A%FBj{ey2Jc~(HX-0gTW%g{DbLlVgAANw=ln%6JY$z zoPe#n($U}2U-DIdSrYQG4CY@N(rNNj*U``4L9W!_2J;L1hv|05kKB#!I)?7rO+AjG zyQJIS;QR3{u8?k@M1Ol&hdUd;>?t4L?!JWhVm5x+O!T7mEa>b7XKrC{mOt3aH)3Ox zT}Cfv*?w0RShIAoYmI!=_8z_lAMwQb^nv=@nR_w)%@`#~F3=@Ok_&W661p;(zSVp} zl6W~nSC)dE56=$i%XR4C<}iIpT=eL2^pDOV8^pXqHffz-Px)IS-?Q}LB{`R}kM=%1 zAxCSG9sMhQoX5AMWG}L?pVGn0=@a>&l|AAb;)ubjpJdmhn~fd8Pt`n(`bKA=IJ!BH zK9PQ|&JF2jwcF9pE6~p^VftBTjwhm@js2l-Wq)+WQ|={h?|Ry+Ivm7SYQE`$e9Uv4 z`)2F&NWXJcitV6cg3Jc?u1R9UfrK zvFRt~C5o`Ce`apz7`Om`;i&d8&Eq>j8V{X;BmMq^gNt{+ZS`(=nDdrTcw!q);#aGT zp_FwuJUGh5oEAJdss}U<8VgN?CO|KPCPVu`dqev}`#}dnzXTl&O^03q{W5eIG!r@; zngjg`bQE+9^sCTopjSh$hh7IZLnrIZHu-~`y%6#P@%tWkd)u^LcmnHf+cl@k`r&-? ze`CINNe15*!%qJ?H6<`$V9J`cp8jnI@F^;8^{n}|w}0UMZ=|fb%{w5V>w~%8pFa53 zvHN!4?Y=xPp7vhvx;*f{$Eq1cn|H-nHMjC!&G!!C{=M{B+K;Y2HGy{wLl3;mm=ECj zfyeIq*^l|%?H(Y^Q=M^HZuKaa(wwDonXCEI*bq5Yu)pabcL2RKgv z|IqlG@DqMe+brf#f9mqKX@Ambe9dHi0NyVqo( zn|Jfg5bb-_b@hKQ+I0`M=s_zbp!oV2#_;#lcMkR4PJI_L_N%cUuey4L`v1p6Po0*3 z3Lm^jABu;!9KG*n$Bx|hGnIkvubz{XTb)5U=xoxiYJHmrY+?HUwAHJ#|J7I0JFQ^I z1I$;*-%sc4{(R{e^*cI7{hmYp4$=1`=u`asX3Yclq;F@STSg)uGtk>3k&hXCt6(JZ zF@tXvj6^;%DM$U5*Qu|4#;hw1Cc^7q*Ot(UWERA+Fm z(XEmN>5DwtV(dEgRDGG(ZPvW5u%$4U^g$l)4(ZT%QWoYu;{PV}=nnMgVPvJ* z)}`XJFHVoLW*r$kZ0k`!bDPqm8yMRQ(W4qyM~}V`rbjid7pg~3+j?{#{=_2|zB1S( z!+mMz;^iw#N6%xAuJ)lwM}KmWI@%f6NFA+loqe=RN4Mcu8Xc{9%^u!{P0X)NgHP|I zeVWre!q|#$HLgkU!9;Y~V079<>T#&8tcsN%%n)8cd*!$^a4`?iVfa5qJ*)W&4;bG`-sJ@jmm3@x~I3xaU`tJ&Skg51u^H_iA$EFVDo_*M; zx48e{BkrJV(^O&-2Qu_rOyIz^uAtU-I{vuUMa*HoMK)MiSbq5j_-pMWIP+g?B{=6S zkM#rcJK9I^Z4df56}hf{{>s`GV0#`i(wv~SPUz=^*S^UdPCd{4wpr|#IK*6~eDwl+XnhMi z9lbIVTwjX5$p+V(xe)k4?QCPrUxd%YZCM-o4q+V|&pP(}0oK^h!fe?F+m?Bw^FNyJ$wT+b=B2OGJhMBXv&mT3vP(Fk7JaoAJ^J(76&0(I^R@WGzt|Q^ zXCd>cE?>hIers9FTE!TzTQvIAEa)eU#jEtQhp~Q~d6ldGwq*BTUHt;r#P;X>EykWU z-@+W|y`=B;^bH7e9l*!!$Jw7($My@1<=ROrl|8t9&De?;-~P*eKYNMaYB&F}?x=%v zcUPZxA7;;r!057P;bTf1q+umv${c0G5ymu$cPhpy z?3c@!4mM-z4oq~p1N#`$xr}KEWBO;t^h4fv1N*i#u8N1=%$Tx=ujXdP^byANX2$dp z#`I>!^by8%nO|d;6dJQxyaSolJlikrF)Lxr9$~Cf7?&m3gip4wu24RoaoMbEV3>B& zC5x^)wGR5jzrV2jEwI>Hd%S+$!xwlP*?h; z%Ku|x`+x;|2j1qt7I*J94|wqhr=D8A#oa^C{p{!b7P|W&i*C*rb9Kt1i}CU>Uc!8m z#kKA}ZGOg!wb;8B^1g2{=lvq%^$V+4;8U)eZ27gToq03u|?KPaq#V zmWMr?id~z6eY=q}eQyY!_IS*B-_Y~gJthRsZ}-N!gR*-c4)p}X?VkLFaGTeiy;FP7 zXYXX|4swR5W9tI6)3I}huyZ@GZHKUbpI)2G+MF&scL+N-+=X7o2A=XHHXOp%{pl>W zZaTJZIJRy&wr)7KZaTJZIJRy&wr)7KZaTJZIJRy&wr)7KZaTJZIJRy&wr)7KZaTJZ zIJRy&wr)7KZaTJZIJRy&wr)7KZaTJZIJRy&wr)7KZaTJZIJRy&wr=+4(!Sa ze3TPhuVFv`32Z7lwqY4ExeUMP9qj4fu&2_q_j=fO;R)H(4(#c05Bo*&iDXAV#GdMz zwN|3hxx=mAfjaDI6f%4+drJJk$nYRyzkc=(IQDb~_SA(855b-;MSmqB!>iC=k@oce zIg34QOZJXspXu1iuC95u06tix?!8!hDmnQdv8Bur%9cKQ7F#MCD!q)(+>gTfrAOqk3J6_1O!z zd4lV%v_9LCO`dbKWpjq1Gl%lr=x^o*$1wl%&UgI5_ioJ%UPF1`TIdgcyC^p}j52c* z{K5Rp+~BWnvx5K0cNAZk>j}_w?qejePvAW_w=#qeu6-CS7ZJ$LhO1S5f8_ykjWuoaJG!9Cb;jEv=j%))3_n&iT4OsQOn{SwYr} zn)kE=G-#pU94H^(EmDTZ5Y?v_`jJSvBHkZY%92t@2r;7#slzw zruGkElN6sY>u)%V{~$heC1W1Xm=nibWijSjyP$FO=7!b?WD?(Cp2VCNsWWcZvL0tW z_3_;?@1*Kc#aa35IImW**Llc-@2mb`A~EotSNnriyd-)7T$+Np=$n@3;k9lBOx zSz|R17{}VjTF$vu8FiGM&lzp32{PYP);l)v4*jP!NM8P%%I}#WR`8ef*TkfRKxn-s zd&%u{%!;U|mv_a2m6#(k-+eWFoJMT>|FHKha8X_7zH84OV0bzxDn2rRn#gEqw0UUS zSO&yL6u}c|W7FOo6h^6Ddv4?szx8SRmnw7H?TYNF90PMbDK6Yoh|HHk5zwwA>9 z+>=wmCl84dpJPz&|68-yu-P*MgMgyl{C@klnYH&`d+l$1kN^7Cx4wmM1rHxc1s|uN zZ`LtWeH!Ye_axvQb@+xUPtU)IIX`y`-k$`Snh4pN02#}Gtc?fWPY2#ND0n|O8N}Q= zUrQT|43aGhB!4Lh9{HOJj1-LgrQkl2KMCX6;pw&%;^TARlVkB|_`jFqe5TEL#GIZo z7JS}+hRqoU{+A-hr!*pFqxa54OX84kT1J3YC*nt08coA3qemoK%n_q4X8b;38f7u3 zC-L|I^T6Sj*oaa5`U%r$OIZAHe(f~w*=%De@BV{1-viHXM!kQO+NqHNxHW&3hhhu%> zZM@Dr#H`On3_HcCm!NKS3_I0X5TPq!g z7#V6y67=#=?4fcVyijdQOZoeBoAc@K*}9mPEaeYhFz5T|cQrjbyT)LHDJ12coX+c5 z+cA%hodvy^1ni~Ovt8R>mO}PH=niL`N&1>_*N9M0yUVdp0O2kwCmeIN7_&;jm|hG-^&tiz5T1Nh(M`43Q@cy?B=%sT z{@AK~7s)NnZE9t|Jv4#~iddheM9;gj~%Xn3nRBtTg9ckges|H{~Ui6Q1l29Qht(wyQDZyO95` zwTZxxnW4^ipi9(xyQ^?yG{$__8b^-T>LnUel3oJ7ARKuYbQ9souB9VO!8~Ux8g?E# z${dS#MsIFca3k4f!j0-a?tJ@Tn?cvrhXd>3Xhjd--^Ca)3VN7uVz*$#Zs(aTYVIYg=h`K~!e><)Yw*yi+XY&h*Iu;JP4KRZ)lQ{Kc{N!mMI;6LtTgwJsy ze2NRYNjqo=jj)?m^ zJpRVV;BSm{%rYFhkL=WJ!}f7cMR>4=zxgOI6mM{Cw#hD9!$|F$2p<>u5sTnIqtK#QG9$Z1(PKxm@G-!K5ob( zb@$PGDGr(5MRn!^bJ0D;cye^@ z+y(wx4*q-|{8=0^KIKmM-g0o{4!Mi_v_=p7rsYof-JXK~?SY6pExqHvX&D{yP0Is{ z|LuW+qb)P=o34+>_1O5){Mt8B_Pfa}xGiyzzV;5qxuR;5S__!FBS@(Y0q$wlOXu<#ov6=5EAIBr`2C9ql+4 z?~!ePGQG-=On*zrvT<_=x{6l)GYw<-0#~1DCoZK3Z&go8!lX9#@husEROVNrB zgU`#K4kNj2OMlURG2CZDm=r#*Zve~egWqRIH~73rFLrIe0GBFwMy+FMO{vha+bQNr z!7GXIX>|u)X-i+HDLyRB8S(xR`daw0gbz!ttFMJ0i?E8&{lbrRHoLX%e-l0=EhdS? z`rDK8;@>YE6<7Lcxa95I@mnZ3kf#_z2s? zC``d9JLo>tcbn>)-oQ6e9eWTrMD5=I?C>V|y1ISL^4AVw7R8U`f>-FR7L% zHD+0_U>0ig2s{hi@mInfL<7vzx=9umbR4_zIy!<~_5_1n;IGm0Z-=qVpw3~JQRvq@ z(XV7bj}J~uIR<_q3`6sWC!%a_!Z6)|UEYFRybT_rc|*c5Y{+PiHFjZ+@vp!TW?&wg zi=;V6nnR?$YY1zcz+B>K{HE)>aGmBB`Mx$Ndt-1&$|lHI*ZM2EckVEt?E4vy_`@#W z5wfpf7n%dSb)YR}n2`Udw3Hp_)2{Wev6PMIKhkS7e?jyvO1#yQp_+r83+$)HiFue? zeHimFgb~xxzcklMb28&mMmfe_|6r))wWtu}E-M-rjBctrp^TrDdA_0eUeZs)(3S~G zdxqg1VorF1rad&bI}vlc)Se`?M?JS&jJ#*Bp-s?fDX-%@g{~WcT>8{sD0f+)>(aUy z+oV8W5x(gbYy;ib9oUBE^VGUnjcpjMp@qJs`8vWjq>sA;-?XRKdn(4Exw=3Yht?`# zk1L*MXe7>r7oG6GMBrG6BkJtZ7gr+qw= zc=s{ny9&ix#*bFmGN1cmNWtbWh8FDlY;)FI_`d}IhaDW2bsW#{TEQ~&;D^9o@{Pr? zffRpv1kci%lp?Grm7%XfP#@JPGr3|D)>qKJ^PyOG@X z;e(IYsLGRXwH#&akAzF}K%+FggLsZ%4aaubelQT-MksWnIZXvkg}-Sf`lTGag!!b>M{#bT{pGPX{uI`3>|9-#l8(O0Ltmwj9cnqeBjJM& zu@=)?mW(p&eRK-?Y(Lgux!~iOg}&nR;rP7}>*|OnXJI~8-FNM`LF(c*FrPN`e7PH* zwFWY`5waI{aAViX9pp!ZH5l;=SA&Cr9p#tw(=A|EL17x-aUEK6CX=)Mi8hjOZr zeVPh8Oy3!aGkuG--ETo2FW79dL0$JPlJ{FuuJT*F{ef>82|pcuQ$4GZo zs_P$yd(`hhuG0_97zY!^ARG0W24m3oCU@$4!Al+fUg^4h@AZ|<)_VJ@WHWg z@V!{GaG-vqlwUj4kneBJ(B%(3c`qn`$yu!9CLR4Ibo3dF73U9rJ?s9#uVtNwEgUg; zLssPA^;zeD@lCL|62=0PlM*d7SM&$y?=j2STNez*(i*HqF>OnCH_OajCHF0tpyv%Z z5^r&4Nw;jtI)WVg!!Q=3FY5SsP|skSelGA zEvAr3yw2QP-poq7r97(|c_4>t>G|~| z)AGgI4$5Oeb~hRM|H7Nn)*^kU~f-?E))|P30jc-FAA}_ zmPMzzevbL|kNOt6cHLrgv5V^ql5i9mnahUrEdIabD#CdK|KH%+j&n8tUv`yXEfvN> zuzL}XKjS@*!nUMieNb*w*gc~DKy{YjKEe>Qkh6l~#3`?22gn)oDlp2OSl324vHF*k+lILh%aZeG7<0e*O_mdfc;{r?Q-gK$rSMg( zFdBJ&;P0_M6`x*7Tl)g&UAK_A%dz*u6kr_Ur`nN5=Of5TI|f+i6~rKz5Q|Wg$J{IM zT{U**{{DrfmES(UwDMQLktaVfInUgQHThV3@6MC44$0t*qkhHQb14oP$8a2pEl~yI-KQPs0?$~jKNx~r{}-F1^aK-{D8T)M8rC39%Sx% z?B(9HD8soWVxZ&P>*3B6w5xJ3v+mE3te@Xyu>N(vY)zF1)h7;SIUn9=Dsw~=59ouh7I_}MC6>@1^yj|TSb;w(plEiXq;IHU2&HldiX9+Xr72Dr;bhPd{Au{>QRy+p#`q1I945&olSTzPN@Q znYGRMR^-R54U@LN|A#3#Fv7l-QDjl4L-C%cgshxjoU@j1bKE9XaiE9Xai8|O!S8|O!S3+G3C z3+G3)o%18w{zK?sEsc4*6VVQ8_b{A6o88GcQ@d}+nc6)HXKMFdI8(bxKB(O!A6nXL zX|1KLik71U-LCTe_t5ir|89C7^x*Bs`+56858i&fpSK_M;O)ozdHX>RA>YYLza=aE zW}@HteiQx1_phMe`2H328{a>fe&hQm<2T7P$u{Y-+CCmx4+EV$+pe#!pbq5N*aCeK zi9ALT$Uz&9{ILfQhTTKHfcc}@fx698oh0K{{J#Kpc?!-Ako$$m*|re>AG1$_j<&ha zj<-Qi+n}dC>qYwY1l0=>IUAdob)x(_?*{(Wv+H!3OJx z$+Gnv)^yi`zGNfTgBDT9BkzK}qWsmT$ezPi(0&;87)$>M`mzza@)G{91;+a>^kwqX z2{TO4mtO)0-3c3f0y=ZHjJ3a@rE$|ztL!YaKK+o*x@E_@_c!3033~1OKeSm3F-E8T zxa`P7mIxdb`LxaYt9+Z)F88TN>`P9?)6DwCqmmW*4C)hsx&ATJVEwNj%GS6HX8o~+ zSxatZ*73>AYQeXBhO&=_N&H#LIVA3-`{*9pm+^Dhrw@}o_t3r#;+`K(WY&yf%u2b3 z#5)CT=w0+4@*jPcVZb|NYhTo}+{!rJj(r<_1==+|HpBTAo)`B}yhAwnW(x32w735; zuE%>><4ezE~;2w%dR!@Z7t-VUxf&Q^MIj<*@1Xz+`0Kx8Q#(_#_p4G8I_=cJK%7O_mS- zAY4Z8-fm;ob*vZPyXZrcWUZdStW@uo2;?UO4=e;fh`7-d_$FMinZMnW;;^OJtdzI2 zc$Mw_e?u;-BjMnA^m9J?nR3`9NZ4BfIF{nHY`}0~ux0cf#A4)p6348~l8MuV-pSIK z^)TqR12h-+PIY`c zi2%*_AXYRIeQCh=R-nH<t3 zw3E*V-yecJ3V%jfF8DGr>QE-TSe2E8BOON`4))ostn|i6|PHj@x+nF+IH~Gb=-O=>Te(I-t zM7yixdB3LiGwg*7d(ikM=>(i<+>&Ut<+JU%mLwc3d6q?t1@6apjgUOJ8u{h(>3TZd zmpt2&n3!ux%*?eAPBUQ~B`NfwzRZvF^koX2@s1?qw>W{Zub>mgr`WT_SgxkyYUD@` zX&TS_I|V#Ix%7ra?btdjZpYS989TOqGjqpQ(eKosn{b#fRb`FF@c@peaDW#ImVY+4 zz%Gu zV|U~g40lj&*&Wa&kcZML*adojGM*LxlMIr4Q2Zw8x_qTAX*dgA!S&bef%O;ZJO-U7 zcy|N&UZ9JJHVx3ht@IaUnbS^@P3mLuo_wuN!o96@ND#lN?rNcfaMnJremDPa==c+~ zhXZs6%_k+}%uvRR@dfFFhZLlbon=YG|4BBfU>)%$`0fzSJD^J?*n(8htP1TmgWt`e z@dcwH2cvPGz3NcrQ5+e2AJ3Yzw=rwN-Y2r=;{T1S)@444^0PQ{_CKEWqy3Fp-`)R2 z){pW3JGhq1iV-7P>_)D6{v8r%xEeNKHgx>!i*1=1Hba5DcXrm$y>qf&U%W1p?n?qK z!BZosZ%aT=(ix&{)St5#f8^)c=Q6vhoVOLSKy6KdECi}o+g76gYCrU^g0A(@KG$L+ z+T#d{_UHp&+CQPt{mXjq^~P^Kj;}CU6SjA?1E5-=N}2l5yfoQ8&qXB6v9o`Dt?(BQ}T`9Uc9xmUGfE z+UJFw3%MXYC)si>aX9YALA)n$-43*!zQ0(7kJh7&Lm;D%_;4vC4Kf|jGd(MCi;^$vC0yO-XLp!XTj z`*G0w$&S-|Cg=^iUlqMo&-&1!3$C?GZ`C{0^kxD6k_IOgFmr(=3_O1u=X=&T?KZOY ztdY?f{8pWacs}6VLbCtx+YT$SH5Da3Y$<{cDA^87ojAu*2N@d@KL_)Avn?`?s^mON zG3ukak!A6-EH?8_`>l9xz4F{9{@h&Uxw);L+n_x6n)00fKAoOAzpywV4-{WgeFSl_ zG+x3Qxoa&SJiY}wAHG{Y20McJ--JZ@bAQJ$X1jb#DJ0F4@qnw#+#=Lcf?@5cWl0!EM0O!}re3GJi3r zAoh#71*7&J&KkYyOU=!LU&iqZ9KXczkBGZ17kj^C|1X>`Mcie%cjUK_+gUEBzRicfhQ?^c zVvL4-hx1V@HA;{^|Y@kBw%XUDT9;QTOtZ~E-v ztdEqjS$#X5s(XKqDk7*n(`dE#JDXyUXI!Z8JYwtZ>cMtX7ETz9!^D$lf@1|$8 z->4o#Q~T4um{pJt>^fV){p7DqQ;ijfF4UKvdvjaeH%oILe6aa6cAz>osIC#eL_-%4 zKVZzs$LrPL1qm1@71wfs?Q^%-GGD{7e#^Sd**H4-o5r%_XON(yi3f7=Z5wc`M_0Rjm zVB0~@k^e{Vc>;dFNo!Rgr#1c84J3K3#n~ik_34x!T${Zv*bAzsW>$sb@OM57r%jL#d9O@ z{8T)jzPd0!9d;wnGz;q#@+^kpgr@QEC40+&Z4KB9g6g4tP>H{0p)QK8VMbF{5{`5n z)K>Dz<>CKf$nP*0$8%rg7L?eGSpPDs;FHgeW_<+z{_`2}h|8Z{u*nuuunD^VxzCPe zm3%fg>p7Gkh2EKid>Qn;NtoN?ybQi#O3aGz&qafO?ciO?&ub?d<1EJ0Hqb8VcKGL% z`2?*$E(v@n{BirBKe<1S<_Z*FTrsYb4ORQ%8mLb4-z)b8^FP-apcC1M)x!7Y=RJoJ z*VX)?;XuuBmS1;=lz(!JA^*!%Isfz|WBym!$Zh>#sPnuX`$^3+IWIjH?rfTY+`#yD z7j!aC`|rAIa6uCAQ4*&_M?D$q?A% z@fe4H7k&6W9QP*NTX65U^DJY+Oj%Fkcgf&k1ts7SuE%Y1K|00tlzF)mpK~OX5g^l`rGjHo!S3n)@L~4{_;fD+qjOo zHLgdZ$Tt9*4_k65a{`X%z(W&&OXlu*tSUP5J!sEbD(gzH|zXAt9!M^bqi?qQi&nJ2dUqXAebB1b>+=qYBL5nrjJx zubTAIC$`XnW#F~f_Riz=kq$o!dq#3%guj_^>(Rx_xUY`*Zx;A36Z}WMW)J_h{eAhc zc`fhX)#m%J*L|E1Ve@?H)pmR7x%TK)jXX}*npa6r60fENsV`f2H8_3g(Ur3;_dr+v z6uOf1WghgSM`zBqEbAerG-9R zj~q`WPXBmOA6K_E{ttvN zwSIzN@9U}K>gEsS1GPg<`AzUKUYZwyIPgfsgGV7QJQ`~PW1N?l%cbYwH#{$gmN&t7 zcyuV%0se=!9>%(gBXT@nD@*HHFQSe!sEgLE zoqterR$**X{UFxf^=`Ya!}4I%)7M+ixoo3z9oBNx55O495LZ!yy?rAPqhG#!z{P%8 zZ&-u%8|TL`_k}*pO+M~c?Yo@et{rM~--^Ynu^{;=SFUwh(&Khg6#xdMb^uVjXU-{}q zgQ+56JZp@5d*!Qx8WvY3U>tD)F{@vHo4KdLf7Gg7(gb(i-_xD7OZ%41hz)l%{r7n1 z^RlOHUsJmtV@?lUwe9MIcJ;;H>;1fKYgI3OFV=Q^j5UD#J5kU1Qxn^)i@xy$XindG z0c%1N&|me)foVbvZ#WLhdolCW;>yhWMV0lSeFN4Wy?J+JSwnx!`NT##evbXQe~Go9 z_2~OXc|ds;jxVvMCmemT2KxwNT}LUcL$=~DAxBcY9EIE%arJw?YR(xN6XPg0L{y~G zT3OKOx68kOaTE4M+B^+=5QeaKz65W{c>fjDjs54#kE5?oW4+^H@=eII-B6(P?S@_L(*BYhY0 zozQ)gqS7yxp!}c9S?LVSV->Q9dKq&7L-EXQsAClB9ENugEiYi*(U(}`Lf=R2I0_j$ zF2|PBzAozD=BOCQZ!t%n~-<%`Ap{g59H%5!k#$249v}z72OvS!`!aueh%7` zl124*q!MOQ29B+j(=i@l{%3@Ax z`w4v0^pZm7CVW#memCPA@-W6ZZAtR{SF57qUU?(vLtF5FgO|&E-W| z&?VELQ!=4jrb5R|!5WkMowY+J=R@}>`wAMNgTkSMB5*wt_eJ6UXgmk~v#6KzGVT*P z=nUx~$jjkgEjp+QI?1W%AE!LvVlviYaUBFcJPzGKI;a|Y#0EW71v#d@rE7XKu8ZRQ z=%N-q)ZeQ=s_|_XP}f=1NqgCnE@Dc3)z~j_OB?ka#@c_iE~2{5%?NQ)KaoD_hrX#B zp6Z1JTtyGwmbpy8G$(FC$P3U5xIw!LAPZ$9W^S!w7&i|4^9pA}I zcYX_dRi;q+cMZ<>`pV8|?AiRMSqA6-ohv&Vuy6O<-(}7}&B7j?m|uMFE2Tov}Kk;=YWHk?U%)zyfQSTW%7lwMy;rDwul5p)doL`dI7+UXt6!U7w(KodB z0o_B_=stSBAr<=tMMXP)gtb_!A-{QXw1>IPy$NT`Ke~s;G533r-(xuE$1vwl`$Rl2Q8eCxmI!pz5MByi!19!V&7KG%ho~1)4FW0EdpN$vPG|hZe)v&ffqfx zxM38N8MkYjPX`Kg@(} zs+l*z86w3yk|F0c^T^ip^|66R<$>P!B|;}?zlZGSaMZ!SAwV5RKufAew4HQIs!AUv z`qM{Y8BSkalntGd1>G_YIwljkW-4^i6zHP+kq5)*(M27_L(m6WJcQ%~wZ3bP5E4oqJ3Y3FE8QSjwyWJBwM&#wR#V}e}X*NaY-Ij z&iTBZItd?g83nD*LDq@i>messfUi=4t41og>Ji9b9WdHL^a<(e2DIT6+C*|ln1^GF zCCo`WnqoLykh4?J%T=*hLOCo_w8&>r6(Mn`e|Y*+NN5jtyfDC5|Huv0bgN+@u` zcEl`xV!}QHQMSu(BM<4%pwniD+Ac4`ULJWdw##QwHV1kwHJ z817@Zk6j)H9ms5mT{N=G_X5w@=zn0EVbIUd<60tq7vWkNuI1u?!dirRl3+&&dwpak zd}XT7gI|6qo}qlLr*Le>!O%`xFHCLjh4XQI8~witxG*d##BmJYR)z04f$um9`Wy>q zZVB(Gj%JrX#=E!Uy`LIH;soKI)%Du zPwTcZ7jy)8#8=OC1LlIxQDd%Z=%Fg$FoC;j2LZ3ZJ`4b!>JQncoJdzNW+Z*L9s0Qn ze4N~;1#{IHJmbC=tTn)kwXOjByJ4#Y&YFdI9VB~ko6(m6>w&C#aF(Z@Gm0EuftBXm+TszDF>$H_zQOLJh0{!=zPNI zzZwr51}x`aupY-_&{KrRdI5t`{U&K} zD@^t#_S+#$R*QCwKs)L~dN~$C)&(B>67Q&nE_n^_eXLI}hY4*V8Q(7rgbYN0*JClZ z@yPfT;JfK;pkp*J4)#H)kRjhA5sNk^l-v|CenHyzCP&OP`>x}aLD!l)Kq9dx%uHlfObBYYjLi^}C z$qw3cz>WJhK_BeGQH^^G!GE_yN2KGu<8jZ&c-Ll}??nAcC?m{v6lE{tO#KbKHYOhQ zJc2w@gzX6D(KWh{o})Sl*PR8f`zh#W!?zvAc>~VKSK&^?H@s$G?qZyOiZjuVbm8m> zk4{6J?&SousTjI97td`6y-ChtFV+z5zliavNdiy6W~_v5tpi^Z*8960?l=z{;Qj&G zBY(bzLEKgfjC2C}pX^2reg<35 zaUI_2#dZ9BzPRoPWb`O_P+&;~+pY2XbQGKK#ddAfLv*LQNG?VDsqM+W`md$_|AEg4 z-&PGd_tk$r8t1kC2GFrZ2deyD!Qnigx~>DxYiWbh=RE}HwX}=;WZj4JTHX^f;a?8t z8KK|iK)(^z8%F;_zX`mTpvDtn?uKPy;ZItA3#{pV!fl;Z8I?g1w&R zTNq+O((!*1{--=no;<>FQ75Am$zUU7qH>I(BGS+n_P%g|OtR-A+%cQw-_H$dN8FRwsHM_G%P5(4I zcFm{Rqs_as4N7_3E4#Drn)zvV-&a1(zB_(*_5$U(@1guBD1Q*;KSlYYO8En*=SkEv z1NA(GdW=dv@oV;EkDXqTJzz~m_Pypk*`Z4LLnwa+>^$dPxPxgH?E3$8S zr6PM={GM!Pc-H;zC|im$JIV^FY{;|jHRiq9Z%yBqU25K!{X6_GzUy!J|0|SNqWld0 z7vK3({NC($X70;=IeuUEyZB#x=Ry2`9_4>W`33whzVj9H{_OXs|0Vl>oBxu%4gcp> z7rFl*&X-X3-zfW<%A{iVuj2P-e=zee+5ad0FWG;@|Nq2s1-}pBcQgJsgBF~=W)FS6 z^g=nO7s}Q2iUUnK-QzuU7xelb$~oOpPIMRaiU;jDea#;F61^Tm8K*7Eh_*zp!Jr$b zYrKc9|Bm0Brua=X)#>Sx3!R=GIy|8B=T*r&eC>_-BeAYb$p@m#zlWc6ed?iuZl84f zq}%)8@e}g0HlF+(d(s8SucYf&UBBx1ALA<>|M%$el-BNR$D=g9)Q&}cFcZd`+A(N* ziwmSXdAWrA+#__qfbJL2{Q~3<)BP+rw%_V{>DgGsf>F+**z{b>H*xgAaTkuh zIPSsGAIDf618{r`#~>W{;kX6IcW@-&n2cO`(TGD<>ucfH6n;!C-W7gKt=~}iF|~di zvNKv9C!e9#@1^x2w&y=2oW23MftaDl&4v`Y$&bCjFz?q69Lv)18~$C^drx-a0~Og) z?~3ds{9m-1Wv+rRdKs>z>IeY z3%`%!+9$Yn0{_3ET-%9fx6^z3;Jq{O-adFQ{@;%46u-NZ-izO}acw-V<>CLG%C*CI z_9(p|*Q}sLT1GSSwXpKk5aa{>4)%GL z3crd@VAywg-me>R#G$|M#u1O>UL1pQjKkse*OQ+gvFoD$MSlk318x6n`%~M0dc7Wd zr_-T(>A=VDwaq!C+fp#+-Cgvi_t5;d&^Zm*+k8FAKJI-dcFf7y zA>($;{n#<~vtw?}4jH#&j>mpWe1#ozXLcW2>-Bo(K6N^DDIMCC@h+mbUm1TkHySc- zhm6~SE9}4(_Vv#uMspd*HQ)w2aDyGV!4BMD->6($PtWzib2IQ9wu%HUX#E_1L&oi} z`*zrU{@kiGRyyhEGnFMQjP_G*JSjKl(7e1IHa_}EWikDsQp|Gi=B z@5CW+KtKFHrVaZq?7qNzS{$gwd0IT6#e70H==FO1r#csLs!$=uWkrn|9>N} zKd^y%%t6?{Eo?x~?IPT-8E@<&`!7kfk9lKMwF>tS_27QW*Q3S#t#Ceav1{c~E8kjq z*6(i}SJPSga6Ucb!Kn?1G?;I3!}66Q{YH7&z!}0@f?nTJQJjS^?s9nzh6Av zQThbrnIP%wmuId5&v0B)vxwsojz=&KS)HQb5Ugd?#syBp9dUo%bVh4w4@wpX`%HF17UF_XpWu!sY};{{Xtp+n$^BdO-RI z_+G#C51`vge>1G(=xDxY&svR+?Gr+N`P}y6%dZ3(U;Zaee0i}S+_VNbjbfiIqYGlE z`-w3}JoCGjy9@fx^b==}YZokI3LeBYpICE>o3`9pFavd}5Y=BhLJPBi_7K{5i`08)dEH&k<|>e-Lx-jX%flL-_5DKL@P@ z?OVm4qg+crj-x?$(I2hi&rvS=qE-AkXewy!jXy`3ps_dp9CQ@4HL{}se|(FH~w6g|56@H1zPI*6zijO`;`5RZlC!2Ki%GA{azdRiR1Y8 z;?H&cs_R$G3(&e+9)G6mS6#n~b=0(;w+D=;w0z0q&sF15Z~VDxEb4=qMEto5lk)iU zHuUFH!Ni|qy`uvNK z_b2LhO}A^hUE_Hxy2`J0z4Y)kjZatU4WX~J;|!s%w0<6Me7efl=#5WT`NepAx@ru^ zpVU^y=DHb+v3wze4y=rt)Elde*zB(J`wZJTDzpz>zPy0>2P&) z;N$yX6lw;iKjtqI_8dA;?ohEj=7T^U!;;dg@Geq}tsoc}e6PnWK9e7Z_sYH^;{Uh(*J73LH6U)X(tBenKluh(Pu zbUIue9RlfwuI4+xy5UBTPglveR-U!;tCd^*{?_X)+M^HW)Ar)i+vpE^zDLBTw_#ts zJQJin*6+vq&^G89kY|FVuV0?I3OsY&;?q^~ptXe7OdCZMr7^InCR@g1GdVn*8T9PDS}oQSObWMx6VTsE6l2M?J0bpVPd> z8pNVc*W^E^`3saULb-^i=K0TQ9^)0npwC3^Zsg|S`Ohg<8fAr4hJ4=Mcxl9%{BF7? z|2f5<;5dVFF^=)h_hZiZ-I<#F=QNLu;{wWszuh~JjCtd2(^dJ;X&xEJ*Hk9*pVK@t zjz7*+@}J{>p8p)by?PF`A^LjhWmf2g@~2ev@{S8YW1_oH{5i^r?t)(4c?8gs=-XiSh3f3D+yUB4oqaZk~&-nd7FFSX;*R`KV`Skz|> zs*XQb#-QH#^8os@Q~A%KQ+2<9?iY9qJj?f))BP--eI|52OZJQ&>KC|PdiI*fmn(Qz z%-d=0obY36{Wz`S%N1W@tN3!okLZmrSH@`G_~ZcoL&E8Le7P#V{95EcC;P1Mfwup( zeoSruiP!~8()rjT+AJL@}Exo# zU+?_qD*La+d0KnrjW1U)pRoVJ?h72Lwf}m(9=oU0;p*rRNH=sf-}%)IH$!}R06e6X zU#;9~Ej>Apb|k($K)+BuJu<#rX|I+(T6=h-U+4W8 zOnkXQ4}Sl(h%Z-kqnEw`bl;7BJs|x9e6L^n2hi<3F2200^Pf-0*g@n!N365Zd2z2G zH{48B{&U1N3xD+YaP23!=FNYOYu?!DHHe)?o!4 z{P}bxU#~h|5@kI9xjO!Qrjq|$#Gm6g&wnoB&p|7m{~UDk($MUoAp7K~tXpTs<#}GM@jO$`G&5=N~{vp8s6LpW`>re=g$Bb$WX8pX>DW zipR;dG{NCuFJnCkD;zlJ^6Wc`^0g8Zl9F+NZsCh_K)#{pE!%0n`^lM!&J$y~$ z(^Yyy=qs&XQRpi^Z;1PapT`@Y4x7pMT7(bM8=tQ7i}Cn$RbRB{UnCr_$ET~})2~JT za}^(G`=7_BtNK&hePKT?b10UZ98=u|@ex~=_l=11)mHg)- zJ{`HwdH!<|pN`z;JpZ|fPe<-^p8veP_;kHqPu_2x4xOe$yE3kx!|w#W{mOWNIsa=C zpMJgbpR4qx7Uya0m3Dqv*neU71&-9(f4yFh-P7rCb#w@%8@ihB{OX1qJw9C}-&%Rr z%CA;#_51zgKW~GcoKM?}PjBb>9uc43hJE$&Opx|izaR1GZO}6y&jd+ddfta;t^&_o zxA=6GJZSBsHy+hre7gQzX-Dnf)zXvmXt43=ZS)J()8pdPRqfT%M{5sn^y|DI+lx&2NNuH}P6U1oz+y7q@*t`cS}9UaOJq=tHYbFqd_4qH&BBJ+NK2A3XoypPXm2CFcV0g$akX zX6cL8C8n~KQQ?k}CktPkkN;Poy+o6R;VdT-GH;*a%}loIlBA=Xi$Z3IE-&N$6_2C`++JFbTGiYn6(pY$2&=8*>p)O_3rUxOQNj!c(&po}zcmQh16<9-caz zo#s6M;La_?Tg|fJK+|Ie=cReF^P=5|@(}P^sPpooFz4kDrP6b9Sos-Q0xz&~SES8t zO3bySg|eJvoF$y&S<&CNxeF^-TuEPB7HWHuczW>1>t0=X;+e|Yp_22brxsWK;zHqz zpMJHZ(j@h(s2R?jm*l?X#CP#wtn?}HKuR>rsgehluj`HPMPDQr#+R*+F*&O7zB~SB z+)H$=rkBmVE|N741wCrQY;IdGlFxqS6uVsw8LE+SUG7)Eok8|MBc_tEt+@VBuK|v- zrsj95<^JUZQ74}1zv4>TT54}N+B@Q0;fqtxEv<}_A}S0mtxjyeAd!-ih-me9vjwR349e>Hm>9|28HPx-X)9V~j0x%@?x^e)mOQ!S8Wi zi*ww;#;hM7d@}2=_}z%#U*Y@_&izpS%g>(7dM9RGrWx-@f~`L-N0p~a5%u7Q9CIiu z1rO$f2lLmMaxK3z&9ZF5xddnPx1O++B+s>cjNgs;eFEoQbbag-mR-s7ET>~x{;>g4 z{wd^pMLh=+@I6~EH|N|1IiWhA!~L68_3auvkJon`&us^-NEcmRGzq*h5j-*hypjQ) z8SiX*EZqqnDkWZG%3t$`h6Cv5{JJ}&{F7r0`Cq2W`KKot^S{alZ#@|5JZ}$kUYuug zUV1Ft*))T>q3hhaIAYnuzrF{@F34WJY%H$>-%YukzSb_W#^?a+tImQyPs?(7E%?)l zdsFG&Kf(?s%AW^1sP47LpQ&6Z^>LUDeH`CqaTQyxj9;68{v27%a=c??Wt?MPgLUwP z!`8rGhBZLKuPWAJ@%=vHeb-^}{XS6cT|b0+Xbl$M?*sL;+V6wrb=P1Gmf3gh72odz z<=S;ue7}!)tQ8XbebCyVSFkq)m5Kd6Xnl|w`+b--`+ZPc8_Lh1T#Q@1@on+g?;~Eb z-v`A_q5J~M#r(22Zpw`PKFq59J}7PqWnWX7*zbelrsA>RN4&D%2Y&PYKJeR%>&+he zdg%okaeATrDHXlEez17#_W}C)thYk>Pf;%D<()S-d+6)k?*laF`P4z%R_m?evEK)1 z$oKmon&LOnl<)VU)3as0l}=BcKOy6nIwwW^=Pw-FElM@YG}#5h9uHMRZKYJHXBTWqzyO7Vkv*H_{9BKQ=& z>vID56A7o&`l=CzdB3i}u`I2~&0gG-o%ldSw)A2}c2e&>*+r{a<|_E?(@{Pf<>OJF zhw^1g`D=Kl1lKm=+OxQ}3D-8_nhVeDLiur&e}eK8D1SpK-%0OTi}%dHd)DGTefDH; z$F=?R9+b~U`FND)p?s%Oewf~aYa4OxSzOzMYe#X7<`Z_I{5Z-#LHP-kA6Lp>>Wz6A z1Ku^Hi2L|yjyg%T$4A3=Foz-)el;zDVV+~&uN!g1rRQ4i#u1O>UL1pQjKjfw_H~OY z$zLyg_g=p}eE#|UTKx8+ABA6A>rc|+8?Ap>+i!xO^m;x17@ZE?LkB)?$GR%gX(@8| z@iV=L)>R3;(||cft=?G=8K(6Xesv4Vwfe(*zeY!pE+YN)QUoham2APwx-vhv7V!$L z=I8>+x;AFVjQu~%%Kjg;zKZt$h*$Rip!HR>|A$%G|AW?7(f%Lt?d<dv z4xOSyyRzOz{2V~m?Z6mzU<^AjhJF3BiP4Iz+kqqOz!7%f2>V8*9I|c)R|5-tV#*%CtZ;cBDMilm3-~)jNwKzeG z1GTupyZ%bSgnGT6d3T)--Aji+x}&T4Ie_kHH9jG`{~H=NV634YpU}7=5PqQhwPTZA zgcA@4M`H#WC){4uoI^1^siZT8dBzFRy<2pG&;?>#z;%E!K4>*Ah-$%6f<9U~*UGh) zfA#xYkCQt}AI`5>hef!y0r7iR^E|Doxmvuc#-C~)I*ak+53(9 z{eE$HN9hxghk~RpJ@1Eyg2q8!T!T1j!ZjSvU_8@shk|1euj~)Uoo$c-%|WzTHK>Q=ZQ<~D!Oo9#`-Nj&PQDD z)zX*hq4its>BT_yS#57m*0tE%X#+g=b~W^#@XNL0S$}pGYgM%0p|zVg`*q&0SkFcM znoo94Bd1z=@cV=4KeheJ57a0A?3TJu0_aI^8*kF<0sADt_Xg+_A3M>R^)qsPRb*JI-Kn0RH5S38HKuQAZqY;dkwvuiwnp<1(2Zm#N2PW`EGe zdTc%ZU0-)dc?k3|tUiYA$QbrU9k=T7@18swBCc0E&!xw|v(l@Hf2TN zlg7YveGmbCpv<3>Uf?=GiH%}UvrdO=ONT)EqO18ifWGL~_;>A?fv|!YH&9&sDpg#3 zgAx}%EkIm+tFeJPhB?SFfj351zn|y*)at*E(uecyb&iAg!JvY_#QIDXFRA0>UshvI zH7^Beo4vHs@Ar$nI!d2_yc8sT>G{sa!~0+x!Apc~G;#7N?ZnA9#QBetS7VpeF`l?Z zZ_KJ5$KERc=#4@b&bvJ%{@uskD(f=DIc2*s89l&mC`$_9J zztOMr{_UnXcpv*E`b)824Uw(aEVQy)>b?n}C%v@4Nv{X&n*iU-@AqA!;4^0KOgy~E z8!yh?5g&iu^T%Ut70(MdU6Vf^YsY#1_?epg@hIo{<5BL7|3u9FlcegAo_XGXN7+)8@%-^rCi2H4=Pu75KV6eQ z9_2h95#?gs#q;wa#^_}n@6OcZk4G8LACGb|&%|@}AtxWtzd2o%KOSX#-jT{g{&?i- z~RyzO;S~bn(*5tk4VPPpRm|aXir-$K9ZBtNigqcN{-Oxu6%H z8zK7QxEFNv&WjUmaV(-T(2LI%5M3$X6KKfu#}iF)6yi6}AFtEX6Mv-Bvy{)Xl057D z*&%rkDt|m^s_TRO+3)N6wUozq==Li68QosB{8Jn7Xno^h%#Y$BYX9V zXS^!L&)<7xclJ+EuKNjeKfyb?-$nPkDEgPjqmW*}aV`8<*Gtb{)BN!&yzAA=@yLq@ z8Vf(7(92rCjyHZ@)hFKk@hU$U&mXTEqqXNxB%H42kH1Fw<5m5y#W!02u(sdC+`ixs z;V;+vt@V06xJReM)zN{E+k=gr?*u>7d;G-F`ozTr%Ku8`Jvx6pa+~u!@6(n1@yH#| z^T*Fr^2Z~0JkK9LUCAGh-0?hr{LFUp$LsZ$Lg(2>>2&Bc9om(3^?ZIO=p8`TedhnW zGG_jU&L6MRt6ChWwQE|OE^vat2Lfkmae`j2$3E(GxH>un(j8sRcL8*V4^9Y_pEeMF z=n?tjRdTMCYpon>vDuNcslgp`dZl4U<1!B}ZEO>Wz){7k95e=RY5RH_(OiaiA z>GApFRqfaEpVn^N=+}9_wih?Aq6fd41L)Bk{d&MY3GlrE z`ozahbS7TDJL2Q7Oa6G`q>2Pi75J+E$ z7y}VgpdH_Ll%E6W3!nI`u8e=zjv2Z=f4oZGweszaNml2<)$jMN7w;&2INx69ICvin zDsp~zMgI6Uu%?$*LE3Wt{+=HX?}KdwFI~_4@hbhK)%V_5Rz3dRlZP0zqP)7-nl4-q z_K^5@AA2kEk@se$(L{&AtjQUVa-K6DH_ySZ^QR{VKgmC6cQ4Tn^33y(-K6yNiH|4U3QRQ`I`L5@ zu8re;(v>KGN~J5gUxRcm^0(a$-P$S+hIB5&;D@}+hx>Wzz7BRDQS*K#+GcpUfQ7>r{aj#fT;`1JGpwfN{oKMEhV7XN7dMOuH5 zw%>$LSFhJ&&viO<4;}cpJ=oZIl|JhzKht~s#L4=^y#>m*N#z~Q5AW3lh#@f>`Z(TJ zV&hXKTi40EdX96tk|Q4Z%t0SJ?5rm)9{)q$?PI4aIpXm@^9cK~_!nd9&BxcJ`ez}tx5P|N#_qt8^9us)8L5PvfTdT5oQIfv%( z?IE_W_WhXU*kS)uCVJ-aHyM5B?xPc1ut9I&fID#rjL;ANj}h1a@giHUj9;5TaRI;v z>Un-4-vS>9JgCJ9S{$g=vpgkTF5lw$QVt6Ihiu3?_BLQ3xzeIWDz9~-FTpK_{dc~UFuTG`g`SK^z@9$#i>>BD)p zz1a9RFfBdb*&Ojcv4sk65e8NB(OG%kug~FVlaGS5&0apz@3$kC!v~9Zls*CZC`kGS z;G>|i5XC`y`AA?J5f=}gGoxk^$2T0)5MSM)U>W9%Wm5WjbXP1ei@-9R=MY~nuuDTs zYwSWe#v7yRFV0;hLt6ZzKj+b9*PAY!hl7oeZ=*k{p3dfn=e$hoSB1S*;;qTv-d^8g zZyVx0_SOYkdK$X1QRqh%ujZRzXP?cDUI-ZxSVU`QZ}#iFZ`+HFSM@2s-#7l+2V+r= zcyC_`ABxgfWWO3BgY1_8deYl3H|h0&{Sx4N1N4iJo#@KA`0kF6cLflG`i9%8xAegS8RkuE)yjvGRIsw`bl!JKuD($98KmfF93{Jbay48$+@34I_QW z1;WRv(_4=_`klVE>tAN=j4tn8EAJfp>+$p6HLZN_9#xF3i2u;zIz4=+jh_>~4{f}% zm}k`Ksjv0C7V8nUe5vI}J$_!5U!K<0uJNo>7VAWLetC-De8sa?SzlA{p_ex9LC{Md z*Lv)Mc78$RbkNSB>g$w|uV1z9=0?S_*Gtdp@$)x#{Jb_+K1~%X@A{4s{}N=Z{90ws zNcio&K6^cOJ{z&W-h3n?9zl$81eVw8Cao^l+CF^^fhTXtwW0$bv)`n#^E^H&SUFeb z<<=^(DecO6fca1Gw;nU^CtgWkJMgI;Id-nw8j5!1yo>mm-s5MDjL-akSH{e1W8=4~ zV&n6FR7tiAdRs5rlfA{i_KRl`CpCIk7m!A6lH%&bJOMc?s z8@>@Z?%fsVKkl8zEP-%|OBDz2jaAj--#xj5&7ORiHws<2&g&uZ?_^K6s^Z*J1}Hfw zG;!`;-t=cj^|<$J-lyFZ_fGbS*6^v<9eDf0pIuV-M*utHm6ew15R3I3l&4sX zlX(t!*0+IjbOQ~E;Cjr;RKZ$WEsj5f*yjlKEIK}SJbp3fY=1xJ^714M_rfh&HhTaP1(=rwkgxRjFsxPet-4{ZD1#k_1lYY*Y&He zU)A~Hb^WUASH<7jQ}ip3yHxQdk8f9vMZNLus&Ob^v!cdKB0szekG41Vd`e<*Vy&g$ zGyGN4us+i}*P-4s4tiDh8SKq|N7YZlZ^GBbsQOCyOnmxGteK5KYMzuZz zzAoWk>~5dd_0q%FH2z(MhlRe<+CibOv_2kh{&-cNkUvrDW90ecRbx0FJKKhDk#PLK zeE#_KmK;^xnm=CE|Jr`n_M^7n1P&1VA>xp?~sT^S2cajNeI zlRuvIu>A2Vy{g55TDzv5dlooB-~)j(wKzeq*JB@bI$Rwc0_lye=DPrT!v`mHWqdr> z8`mm-yh_%!vaOY6tqkk;w_f|v9(_2^wihSgMt{)rJvM)Q8@AcYM?uC}=FyWJqgYy)m=?;_mh5ygAQ<#cfwhPtL=^#?QCWFI3MBkw0Gb4O*IM z?Z%CMo%d~far7#B@cVt^wtX;Gx8#pk*)1>K1L)Bk{d&ND3GlrE`o+gibVWS9$Qv)t z-4$Q&3NpUFA&B^Tk%y+$`g6)3zf7Mu*UqPjahW#0LD(66-dvSWQjfLQW9^l=H7$N^ zm4{DX3leZ`O;GveRTxBHd!w&q(&O*<=<%UFEIw5D!g?B>;$s6n{@%M@R>!Y8e)Y+R zuCEpC@oPo3dR03<)#LB=HTzfeHT!xF1nfoimhPA7HowgE(!+ZE{mmVJ?~{LC6>EP@ z^Ute%P1?8_J@(#{Ur49J^`!$J)8C}A_tz}{ylOrfa&Es%kGZ$&bm&=YFxr*#0Pz4F z?MHf#pEWi<@dI5MbMKRXUKLy4WAe|duz+@cPm2xo_nJ}5&=EedUtJkre@*hwt7Kd&(_&6X&{9i7{r(#-uD;9j&$odiz5ElTo!0N~`SJ8U zD*wDncWL#xHwIRZpKq0;_gd41+vgqm zy#v^x8~u8~{s{2B{C?kc3_j!C&cw?%FE<{j9U7V6^jMVh(!6NrMSG0%{DZxmXS2=D z%Zp;2m&Y4R&&na?N59S7uP=|kc=%4{o_evc@*g;AirE^IWGoGDO0TR9lblt9jLyoF ziz`#3SY!Pl<~|X|+|%x2IaXY+W6V+4hq>$I$cnlI=8lqVZj*VbC7Q9g6+c_Kt$vWr z{m2qiS>3m7?kUmXjyk+AywFtkQcSp`w!h6yI-zQ)%^m$smh-}oS8kge!VZ`u_Cf;o zXPZ1A{h8YS%)KC<uK>TPn=M%vs_XiH6k&0RMJZCesi=9IsG(Hd>?+|wtb;^d)8&e}UXZOOlc z`tP*44O0IKyOA~4-oe}p4J>EHgbD8Bn=_n=cyBSjtM*c1<%lJ$>`l~_7aifSE|K^* zX5ih@QT7_CF^%Q}7O_o8A#qR{h(F&INeKNc2qsey_(ndqu_Iou;xZypQO63g^?2 z(G}rKaw}trHcJZEZjs|J)}UW%CNXyn_Kq;6u>%IQQ+|A$7NG_^!etTKZmd}i{w6zW)@vv^>uSj{o?qtg^f!pKU}b|DdUlaO$Jk*@jEYhj%88LkE zIvhA3k_Xn)wIR!=UOIxl%`lp>=HjTA`<7Qp10A8OY?*7mc(~wqU(74Wl?OU{ zW7hP8r?Xx{*&Dd_+`)&lo;Y|s%e;zZKD3;bF2p;NAk&xR@bUyHyuQfYoHO;*;>s#1 z(qW9VDPso>tt4E zL!Z^igDd($UUnM>b6FVyo^e7}${{EBg126Utf=|kq44}U)JOcjvao;I&_0mar3#>-W z9pl&n-k$>A|1)?WvQfV8Y;#Vb!uz3U`(`iiM^tQqEY^bmlaruVly?k}B*%PXL|N^4 z^cCKHMZ$fM0h0MG`;r|DGBCMUxT9YZEB*Xyl7WF-26ibjaEL|LuZIkL3K_6M2INN` zY5HivBLQV#8Dzllh_&g%1=glND>85>k~I!BvYhb3(6U%VxT7>C)=`V|O6c8na%}yO z<@aA2D&so7NAQ2699F(w3fJ&|c)R>>EoP;oBRu?Hi@rZ3_pM(K{x^U}ss7Ooi!0Yl zu|9GzTaI;1P~;#ZfE;8%4qzinN2gPNgmXFg1Id9LU7mq6*_#zVyMJ4F2>UlH=v&td zdR&TlArt%e})LpDyj+%8tDS*&-RQmIo&cK~(~>1?3-Eh8 zAKdQm4vX%|Aak5KcXKjUO7H*pWq}O68wD$c5D*KH)O#c%NiX=n{$jL z1Iq76{9XzEuaUzQ3>fi3EpVqv3ipcviQngg$7+Bhxm-d2*8md|KCOW*t%D8KV#0TO zM^s!uJ8DMa8t^Ic&@}8rO1RCRjjRQ>BwSd#l$Di(KgYwCacsM!wJiCQA^L7%BgrOm ze2DK6_^^iT8f1v@;R*#GHuSf->))K{B-=??w-Mhk0(NmKe!qqHldQQQ_bJiQj{9*g zz&(Tu$$qxr!iZM5kS&QVlb!@FgshQm90FN70$Dn$*v9>m4BKgQyC8Ff3)exGj>?hc z4dqpWN4Ej zLw|-0B|?vuLx!es8CuXZ?2!clW#~07LnE4oJ~E=op|XWCY+*!UNZAK55so?*;V8#B zK^k0tc-a(g2MLo1JJ=+Lmj4y{pY}}>ax-0yaP%f$Rv;YsbCx*;cJNoQyMzPlAUB8P z0rlGzJ4p5a5O(md(qMkOMg|f?pNq#k+XdmIlgU!ng z80X0t=NVzoLSWZIVc)`F=S&#qg=3s&C_S=Ha(<1m-g)Q)x6H~-P3dcEBANT2;Jts! z#?s@E4dD6892_*ptCpF2-yoJV7UMgzdt`%5gaM(y4+1|pasFEFU;Z`zkB6<>H<0C= zS(xT*f;?S-ok;D&8W+H>?3QiraM-v@XVUQg{^cWJqo;rer$_@{u){tl!#+<1uDugB zcLCnvSQ=6G5b*h;J`s*zf!ZshW z{21PU9P*I}94fv~w1dVVslag+;HOCRM-=M#IoThKUG~L9JN9F2nAI!Bk&SWJblBn1 zIKt4bg`oTX=w6P|p{&%5{XYw(i2C&yC$5v&6Dh#fz!ra-g=cogMmlUpTmGzGl4D3B zusfb3-NnGm2`s9<3g0R(?prqJ^peV<^XD{W;0QFXAwC{5e^}Fa9Q3|AJbP3&%R5P}CSPpgUijo~4E4b<-;3~RD%TFHxSCNdBm7oBB0&FD zpub~DpR%dw_f??(zL+S-qtInpp#2>R{mFI{UN}Yl5Y9?dr9SoBL4WWNr@t9>jRyU5 zp?4YR|3t4?M_LSKO-JYMTIpHGm+9cKMgE$QBTg zH45ebedf%B;gA4<(5lV(eBRH@ndRM{<^8?S^Stj^JQ7WPrZCo7{4vlGTdOdC62AB$ zIycDNL-?Yc`G)X?=+TpW(V;dI9doXHF<9i%ppm4mqDjEEsi=AAiv#7LZ58r z0wMZ+3i6xbNUhC;J_)>+2z{<{Bx*{u8hFN)sO@7;4CXTEvzp^ZN0>fq$F-T6`Ng<( zTkb19<{nYpsN4#DHs6n`DhmQ0Y&tt!%VQ+n1Y^~T_pTO37UiMb%-;ljkY^@^OVvqgW*jwTG3$(Y^^L>lGwHfWL5i&5I zG3MgiTN~&9^Rl_ht8e{TBv9}`UE%w&Cj{d{#tr2pZgB&jz@sF~% zHZ0FgwztxThU~48a(oWV;f zo}>r1mamn)wW0oRs=bvw8nd@X_W!?xz15P{bF;VB^TyZ8-WtIdV$W-`y_Gx~v$ra2JHS$iQ#l(hqK~1$9t84loN+PW-ETq&s?sGZTPF799#PA z0$huY|P79}eNsN}| zo34ds`HuK{|FpL6L{U$CDnd8$sf6jC7WR=Qr)f{&QxRY2G5n+5@u}?O{aLP9Bi_rR z^b&vRL}-O?)X>QpPFHQ2 zjb8P>QR0D4`oJ3UjrKzq-CE z#6lRcLCB_OT!)!&;4gh2e`#30J=$Ha_Sz=Ow);(|LLbX-YM1#C^4_>zgjc9DOZA2rNRaro(C@ukMq`%x|4F?iI9d8i-PE&dTds_1}I{HTTwkT^VfSH|`@ z^g!*^xw`0@k>Y2Ecly)rD@c2Z%Mh7IXO-{uC2Qhyn*#5I3_p7Z!;hL_%Dnhf_v2&F z#8=k?9+G#&w>+58y}zW&?aI!HzV&`;LFbN z&{iM5^awv)ACrgj;32tZ_s{h)c_4x1giYPxQbJgFo7Z{j16Cv>n7roWS>e1m9}+&xsWyzZ1@I+^3zEC;hAK zO#f=bv{b_W)%x*%XSZBEn|QMO;FHPtbNfNN%Sn6iYl_^5pq=R8d}v23IzHA8wKd4S z_*h?V<pkGTBp{H>*hg)JAdY1JPk3Hppd=Y({QpPiS}a z_Y+c&PVU6`%sQ0#j!IE?S^wfnuii!6-Y!?0YFXQ2#Y;*43yF^%NnGM?Fka*bL#2)VH=F#`| zLq8=bMPr@u)em)2%5Ntw_8{?2hv7%@v#(@)-#`q_k=F6rbokNdQ;ISepZXDx6=&*^ zEXwX%s?1!9ZtGj>m^p+Pfp9EUsu|FLK^l5@Ec5}X#`m7w6G;Z1r5nh2C~Huh@O7t$Gj5-- zs*_d>2I)jxkBmK{i*gw6C!&i6N*q19=(E;NZ9XwkFPuRa&4sV$iY}th;^AwFY1{^G z+zwxFonCs%`i=KYeqIJYe>Tl_4_y16$?Mpr<8scTn9Qni=Zt*4A}hB{EEuAHx;PLo-;WUOK0xR5yX-7atSp7{3F zAHqZXf7e&P;(JLm_a!G)?>BisVzx(uWonSo)x=?+Kt}f>+k1}E7TP5|pg^Nd4`tl$ zUzMe0!ULO#5!noG6dn-lnj_3f(P;zFZG~Wz{$P}X z06NdWC_B+FI}@Qd`eoAsMc+wmZg#8W>YdCfZXgfQHwnT=-_O;BkB+?DM;Cmuhw;ZQ zx4UVB=%k~Jv44=br9Yi0@r;=N>j&`!p}>~X0xpC(pV*38Md@X>w9u<)Cui#~tT zrH2xgqKDBx_wmdhCDs`pyw8zZoy_{4igB?sKM+2Z_lULDiS3gZ<(=rr```^(FLBW2 ztG*ApmvwKV+jjmqN6#j9QgmGG)v+@T9=}?t`zkrLx(QA!%=i@M<@SbNYlxSv`O*P+dEd)78(689 zvi81qlP+(DdstD!sLRhhq?a5xQqmAFvpABz@JcZ1(v{)N69-6Xu$1^p#!GBeO$LHOf)=&wiM zk>T*j```~*i)iu4OYq41vUUa<8+-!)2%mW16Iq`H9$&WuemM-E42Ms4z%vr7?}0}= zS9@l9(18*ce-QovvdDWE$R(l3MIUT2UF!-d$Ip8#> ziPL78y3F81u$tgBcLZzn^Hw{Hi%@ zGt27-=3DX)BoD zZr7V{Vx)QKuz$L31{$99r<~O`bCQOPrK01%7PgrPdX0x(O|s4Kz8%lZi=dYs&$QTP zBJ8u44mda4jKM!&OWRBY4+$3T*c96g@ArwY&DdqW&o{v~Bf7xw#hjCE#-OG6V*Y8i znFyM7G{^jNw$1Rqee4(8jGboovHxP*W?nMaLH^6yW=uWORNIWb-@Y}rnFzj~3t!JY z7uyW)y%%Ad8R{9Do`;V~d_nJhQ*1LPCTXf|#?A}hc-u?_56E~l_*`r=yl2NUVwBI5Y82&8w zk&*q};>NjWvTs89NcN3*8sBNTS1C`#A9g=$Cw*@9+0|T^b+&6BR`w}(FYP6G`PL-2 z)`J)rS!29Habj~X{bITDb!g#%qV!$80-( z;x``k%xsg;MthU8kC0cpgm&6nl%L98F{ufu+S}CQM$X$OwAcQixNCp@>Mv(ZfA!Zh z*1h`tjO~kmGeh=<>*isfIN}gwp9B1fDT{AXL$4P4L(_kxhJN)Ee`u_;ve)1Qf8e1J z{?J!SWv^{%{=n9hoWL<}Wv?~2DxpvBcZAj!IYUdXQjRaZ2HV69%JCIFmE#}X=??v= zOR(3nJC)<_-J={|cTaRE)e*ey&!b~PsRzcbd&8NkO(%^ZZC6sY<%+v}Lu=ZBZ$x~; zQ`u)}BmN(aHLpkUm*02K-hKM)61#y&RE-ze^>gUEh+~$cvR9>BfsRhS z(i`lxmpV%RvmMHH@@xg|RC&gFPJFkExR%MaKT@A8>QkjCq2TEGGw($WKzngQi8oCW+0pAmSevq9#+=DuN_GJ z_fYOEC91ZXde;)au$yBg&mW}D;-@+|AQyd+gWmAtqgwWo?32})9vRv0bUiXPLXXJa zGzsVu(IZL|^+zBjQh)fGO@Gwd^ha0v?X}y~&>MIAL%)Wn%i{cj=kN809_D;V2Y;ZX zZBF2kgvwsW;Pw4|{ekUjPN1$hS6_FJ68iIKM`(Rguvb+V<@l1zmE+6MC(F?%Ytbhk zAyXe0yF*KFMVAzzU(hLQ&?y^+M2G%zL$KGMiW7CwEltrY)%X!Cz499Mn{L+k=eD{Y zj;ts8?&CYddZx+~6HOT zY+83K?KgGKiANRP>IWIaq%YQ?b83*mEh)~xHlE+Ex&l9;4%^r#?;eh2(P5o48J!c0 zkI>RNqGyn;@_hJQ^pEJA+6bK^x*&=AWJal>9OQGrjefn-qlBtF!CoKU;Lrn0luleKN(=GxDtHC`-rqO&t?|jygu-jvCS@ zKI$fVM0AQ5--{=E_96>of8E{i6+J;rXHtkc~a49+nlar zo}ad0A_iOXRl63)?P<%P6wY{i|_Y!%9Qi#S1g9JUr?7X++6?@Vd!YLXaKmnJN*&|aNO+B(RlqYqrLjyZt&?#-3hwXtEy+P*Pr1}X}e$+DSIlueAVH(?uqoOMWewj zuYz0F|EOvaV^-)RdB+x(0rfHc9-MWg;!gHtHU+>a4M6~xH`XaK;%N1AoDC!~g zy`STLVk^o`tWp6+5xt?IKLo2Zq&K>;Z<%0~-S}3g917!=1o)TneT^NnWPw?7ZJ6a5 z>bw`slFi;K`J~45M*%e63#}J&ywdO2SEC0vKI+gvRKvLCbLuSmqtetLp;L56*&^|8 zkKmf<5Qz(jrEY(eJiJQXMz7V*&c+WAu&8jl$T*Va$y#r!LPSD`jBw@vUG5R-KKyGIm!g)Ya<_yr)J5Di!LO!8C$rq#s1qi}%MEdC2=R-})^_3(Lr!)C<8eQbs*?ffmvZ z!7bqaBB6`WLiRwDHVJk~@t9DCsZxaQ!VlV@B!>qh59)$1JX3^+&hjp&z^j00uN6)pSB zDCV?+btE3Y6}nh-$n9VqY0G+3|14Lg&sYy1gl#j^>-CQhoFmxfm*|vVp;La19(o@A z^E!F_3|#XzI!SCW4`GA(KXlUFoIjksY1qjFLmj__ro zb{#eJ`j7mfB?)R^SzCW#1K22am>TNv0NBXH03Tu3SPxdP#^eq6M1?+Op7kYs_Gd6r z4RrdyQL1j`5uKri(jLjt*G6KS9WMPZ%NoWuYfWskJS{r(*Rct@F^#lC)~iW>2&Or$ z9VVuk$ouVh?zH=BT>8fm?Y!Orris9o^=$^*JV;(r*9SNbve{r_vB3zw$xn*YB>vN| zyMSe6d=k7Pdt-`?QO16|EhZA%2;ZI!%XFt-#imjXkBB_hV@fzW5xgWir6FC?-^4N! z4=8o8>^7qNMGp$5`GWd}bxR-gMIL%17yXfg9%2902bLSQnG?A(k8*L%&Alj|iRRfD zbc?56w`^igv?Yc;_}P10?7pcHc3(TD@tJnt5$Kj(@k(7Bx}~O@Qy2YX%%ef zK2Rpl7+~U?n;g&7>zD4}rvd1feZEv}6LjA~oc~_d^+nFDu0hwlfqr=doO3NU;oqWP zHgJAjLR&+>B)|IHjB6H8pYij>znpR8m0ub71_j4Hf4+)6%+0qGTlqP$jhHrrt?J@9Y8>k8#FfvyDUd5HC*>d#%iaME}TKD1a?UaEIufK?$lZ=RWv%%tvbI z=gh~-;?%%IYz<;peu4dZ7ch3elAjZJi+j87RrN1NL63)X^u0N`dexMx^mW1Vp6i3p z_WU^bT+gat%CRL%n`6s@ZH_Hh+8)~w{8i6Sg1_#$G?;qqJ*EAz6~Xq$)+im0tqpcK z_L1_PW1j|R_G}9eSY_JFYpASB%BrNSN-1m7HGyT%UmK{Tj7lkE`VD~x6^w=OdA7Ca{X1wXz~&N+R!_NdXH(y!9L($7jo4Kcah z_lIpl-LU_Pey&8njk%3J!fs~O_t%5{Aq)FddED5Vq5`{^2aREz`NYIaHJ``Szh^z$ zi8hY`BZ;mH+jqi!E^|1+NLD`xMv}gd#7Od9CuB;n>JqT(GO+4$u&Urd(OCm(PhzFL zyG~q#?fLk7Zm_EdOzQ=|CV^j5l;aj|Dx@zqF<19{PTHCq(sSsx)AU?F_&v@xkL!>A z+eIIXezV(pMw+&se4BmQ2VNOy>bI+Usk)rYyeBIP8-YWuD=w?-Im)Njt?7vk%N^_` z`Y!UiU?|$doPB)H)rbW zn`$qTwpv(9bXh|>P|6iN^#O7xdh!T$4C}kT=&+m7WjCSI`k>qL(092yy3W8-joG8C z&w`;Manss|mBpE+UrR8Q-A*LFh_G+#xf$ZK2;-;c>h)VgcA_)svOCa)N0EU#u-2Dg zts|zLXbkhV>CD^S06%?#k4xrn@#v$Y;3rFuiT&h8@Y5J)d-Pa)^jLfJSbGCMS-Q;7 zW8f!a{zQM~!$+fUQbRvAePUt*c|1Kg@Y|oLp|_ad&FhdGs2ib%zWS*j8%o%}v)<+p zdk>rK3T(9-?s11cHGNRav0eXZcyvhYMSsB`Cbr_y%P9jrXyq^YR${+t$ggGPY4>YM zp5oUsY()6AmXoKnoA0m~eyx=U*k2u8Q-iK)s6CCMycij8DsY*Lq&C)l|y* z=@K>cAoaH}memf!b_9P)d#;8zMc*WI6d#u0Y0HNtey>@U4-4DB_!6a^g1@Ew1K5em z@MQ(@Wlij$9Df3T))VQoyfvcx?v-VohdeEJC+)0VkxmXH-@FK5iLH& zMuff)Ok??^?0Tmm8*_km2(A*_N>@@Ot`Z%xkGk6JLjn3n^n=LyWJ8Atjw)LuIw_O# z@ogbA2Qly*q>Sd66w#faMb1Cs0!*};VAiD%OUD3x9OmtO3k~Rmdl*RR|1c&wlSgC~bP6;m4K9s8c z9R2d2@cu7M{c-^NPzwI@YZw1&#xJlF8T#cM@srHk@(sOnX8g2=`JSbB1V4@bfnR^@ zr#bo~*p}q_WOR|(Kg4G<0UPX-H{}Mz#yzh?PGE61z=GGp!loe~dN>4zuu2 zV|J%K=y%aGVn364*=~-4ku3jNLl~*C`BqE(PfPrd#NV{U|Fp#ah)(f-z2bkSwZ#9l z#Q&5r7x)&$|D1wZ>TNS-%<|cMW>ZbuP61>6*D(I)oUx18!9~x=d{3}Tt%+Ua+`=y{ z@jor`KP~Y;E%86!#`vE_tQBdA|7nT;5lr)KiT_z>TaP97i?d;wZ$kW!)uxvCpO*Na zmiV8R_@9>epRZ^9&nt{QE%84s@ju^&_#X>%NPLd0YnOPz%71eFk8Z7Pzkv83t6eSe zKP~Y;E%84s@jng4O1H%SG#n>=aKInx*MP*cPq)M_%DRq#+odgJ&vtA5e2DK)$oD}W z*zw4^#jG)tSf9~+3q#^K=M(dh6{oo_{)U=Ne2JU2^eRXYcpMb+?qJ=d}Jb8v#nJ}^FQNLw{@FL8gf8K>s5c4zh! zXJ~#}u-EL=sL*WIRrPkdYe%qFu*##dwuf~EN|H9TgBlu^Bj<^lteKaXH_1!l-+sZ` zgE=Wm=oR+4nVaeiy?UoB^g^0D^criy=Bd%4*NbC9@+~N-_w9Tmk2jW|QubJoN9gAvd$9=g6HG-ci34wFuvGAY|p zp3S-g;)&Om%Dy(lSkF>oYs;vE>WXy9t}6m>fW8cpQU(e{p2}Y3HHjM++Eb! zKXymYIsCVLLk9;`h1X1-PH8Ta<_%mP&jygl5Z|8BHXUqIe#NHv+G(|_N^rAyxPQ}0b*;2D~} zy-RWWa>|J2|9sEA>C4q{-pf;mrmx_?+@H_=DE`a67r8c{wLx#(8Lhur9HS2<-zP}3 zGw)58d`nV?qz~nrar1d^BKf=dU&7v=6U}@lrk12X&VRXI!hIM2<(}+m`$AG&;I&)h z^-koMM=H)7V&s{XIyk+PXAs|@D^AZLZzun=JcHA7%sg{a2c_roU+!md-@$*m*O6;; zJc$93!^vNq)JH;>KJC@e&3=DK=<>r`)X?oelyjfRo3zQ_5XTU)mekp%>AhZDuy`p|GVF*zcbc# zO22lZ4~NiK6X`SAd(}=S>CZm&ozN%?8rl1E&^v?D$G;=}dXfh&RF4*K4eQY|bRfDo zFd|y|MEXN*<~q|KZioKHolbbyrAxg>)HJT2kMW&^y{v&7kPz0<)vWKabo4&f=Er>= z*3+Ux&ZMJP5oa$tx{5K?)k+N{ppRExtLjy;>?hEy@}mS6UE^rxK63>`@8|Pfr-sIo zWf5bE^oxuqvMw;LRTAI)36CdP5=YPX&HD9Fbti2W8CNq!mcz2^rVr)2De}F^3Frio z^Acn@E44WNQe=NV_Y`Oc$GtMe0)kW#-}Tz!sAmd>vF8| zNoXe+D2K6We#H0`qRuitRWY9V;Y+(-x^VfC@3Yabe3#G+>sGlmMbYwj`B zsNd$sb!;3uRxn&*Gh;aKwSHZ2+{M9g7lDq92mPCf;j)Y|AxW!YeVpk3P0&y<+{M6e zjgK#1U+mWZJg{4D*#n#Jy~>{6);Q&5pQ!j~`A+a7ua3MX-uO;?VC%!iIQ24Xv1=HE zS7ZBmz;4^a4rJN(LaYn6uv-##mu;+-ly9*Lt`)mp5_Y{FPM@|M+ulZOd-%1%wMm9; zFKfi!o|m!DfRYyUjDPTL$_-uv=Cf`!Bcl^G>2BcC+oO zoF{1e`<8|d+^QOR9lEnLbfg%&lWFgok8N)O_Ow^}V%xhlDufLogbl&4?Mc0F!X72{ z4r0@5L*2hi-o;=wDbv7ewtC4m(Mwi6-lGmHs6%8~J=nWJ>~%@l>xvJ@7X$HGsxI1}3JtnX__xd+=^wuA3?W8<17 zHZEjF<`8qhPTkQLa~vCcS{N+@nF@e;BraU;y-;3SUw_#XR?3yUWj(7rD}8`HZLw-% zGUI)&WwN$)wq=XM9ygISt;nje7xhBQmwfJncVy3LFx_IobOvvj^PL>U*bCR#XSyMq z+$zCk<{ZUZlP~*ApJ`7<;dAyb(nn?}MLS%siV~CGwG&O zG3?kf7fa+?z5Uw84daspl8`;>oI^|t@v*JJvhYKwEkHjgdtow3c2$KD#^!M4-X5d?clnS#CKo9QbGu)BDg-|n)Yz}?K)D=KhaI&NE7$BE5?K5llrWz=cQN%~o| zNr47p`)SSc&O~F^~P5oG7;w$l$W|?(eMm;0zC-o8=f>p0`!CN)h z!{j}|K33jVzHHLK8EfX6g1tVt0PNK*t9h{39-A%fe6ZK| z*k`dP`z+qTK0)F6f$Y>;LX|;Qu<+5Bk=S z=JA8RUK9;hYJNZHf`aDvgNn?FAJme$i-o;zvalCEPecCkc?$Nr?k9eDAV>e+DD0kK zuoVS4`ud{WX0|6Pf#2Ta(BB&31RFQM&vQXRwElWgj4thJsy-B3n7k)`PG}J5(!sA^ zR+yt-^P}8m#vMxFrBsK$pukC8T}E9;Y&uDIOV>t6V74`2wqIQY%+{U#oCLRB9L(1I z{!Y>D;_s{<9~!`H3Vuy_PW+mJ*}8W3UlhM)#pMqDOk>U06}vU;*E} zy=jzRO!>3VGwy9N?>7bewZy$O-LQC9R^R8Ry zjSX??Hw?Qq?3X<=c57*sHnY-oRdKE+p;^>}9MYZ3^~kiF>@>S-;o{QOVmcUa%&qM%Kp7o(`G#FKKkY~-jQ$5-s(%z z%2^NCgYzHyT4@u^Z_noXTv`TeK{eJIY-hb_4ZdTCv$9u5QVI4Z$?LVMNi+8PlC{_7 zOq!9DkgRQtRbG*6J>=XMJ}>bmYn8;+FJUc%>?6m1bZf0WW78SweEeD;DX@Oib#tjp|8oh#-{o-vg^8OgnoRa0hIeOGMy#^ss+r41u}3EKbC zu6&O2?PJ-K%v!U#h?x7xHIz~Zp@~M``Ftn7Uy@ssx{Rc3$uqx7n-NdF3ZRopS#oVE z<>XTj^6x46TQ+t1-j>31*1l%;xW|zu{e|{YUSss{Zqi@+_5o-=jd}^KH&U+fz!mVo zX!}%Ts768d-9yA^>mkO7Tx9iY;@lM4edNAbgzWAc3u?vkKkG) zPv&z?z9$~Z144i4mje1l=q}%&9Rb}fewa#M8?>O_!UI#zzyk@iL3kj6XPd?YZbL4^ zJP@T#F?g+U9`M0~P33_W9nhi!WDIE00smS$pt0|@b~9t)8^Q!nGq+yNcfIY{AjJL> z&qZK^QfGKvxH`V+*x;x`t=kf%9&hJV>zvW*@#{Edas0WW))l+ej*zplsehpZK+#1{)tDall>Z(c$MQDl9W2x z->oV|Ij(Sw1@n})R$kd{zGFRaJuAd-%u z|BovADvox%QyQ(jQo}k&8Gofuc5!YWhvV3P;#uAqBuzKSeEm zv^DgB(dW?OLueuGOQPRpe4b+7ui~D3=SRlz*<8EAp}et%_R6!udy=*TM0i5D9bo1<*E+lxUnAxW$a-;V<}@Qd}6g(-hUb%%oeQ2y?k(;yxafpC*BxWZt8+eITu|3Uu`EJsm~y;39pIX@XNV8i(Jb7iE@81*OthA!KX5(u;WpY z%OJ97$D<;jUAY#CN8`9Qh4<%}GTP6SQIXF$@Tkm9L@v9+zqcTlmb~7NOp4rP&zWM# zsLU14CZiT!^zn`*qYG^^D6*9za)@m1ZbWvya^IBGLQ_uVJz0w)a%%J`{bR^F{o0VM zUfzJLPBCQFtx221vU+kpG|QA#**Bmca~q*oJmsyXJkc$3-{B3@ZMn*Kmyy}s=!JpE zWq)|_3iMPKdP<)2F_)6Ivj_S9cw|=c_=5g#s;r)M+{`*vR$Io+mT|LX+*FVM0U7)U zkDH5E_c&D+TgJ^3MJ?lI%eZ+l$4$ZDBV(1K-h#zB%DyMMU>ed$lJ{uovs3wQg469O z>_i&H9&&s6PSrm4zj?*ni_pWqI!WyNx7X21%WybqYuM{S_Iz@I?K?4N8A`gz5vASa zjn?{jqqLhHF6~Kg40dRjR;onT&b{j9FJAy>x+wcI?9kCpm$qz*TJXeQ2aj6t*OF1o z*KaRe%=tvlV>S#a+{XLMrziy_%q`|qP8s;%GjPNf-leZ<#kTAMcSMn&B4ei98FC1n2H!CFA1im~U5C@ zzgL$`z9qrY(i5VaV$tJWZR6ia^ivdLi|CgeCEn02LG+D`cjI~2%R4?&E1ubn{@TMh zId5viW7(7IM;E?0bymTYSY`hv^4RT)btIYG*TM%NW#r zZE@7MrTcldrJf$Nr5kn^X;Uq2lJBs@kp!m{6f5NcX!F1H!^;k*wl_;Dmsm6TP8PIU zJH;XRBtaVpK2g}?@YYM?XWr^f(wqruAdkKrYGQ#B&U0j}f#2M5N&p*0`9SFL0rc4C zN@TxTFF39e+L(8ldlj@$F^%ro{fIz~H=CCUD0v`NMX z@s(Tc6&gx=yIfFv;iV?G*YQtl?>|>IPu1J!C7WG_8FFdJCp_`blg)E!%UNXe9NJ>> zs>kHjizb`MlE|eYJK(@3$)?DmDVuhjaf)p6jL4?FjDMlWWV4;d37gTN?nbs zO@#wJ?<8ejq8B6F$jXbl+Q~3S(Bd4!( z+}SB7P}|KPpr6WRet7tf-_th^eFS5Zhm^hJv1hZH2R|jgyWOSF4(EIld)*s*_VcYk z_NQHUKXcUX>{H7erd;OP@=edte77)(y?D#nzdnlZ0o(*85MNsl`Ut<;ejj^R80#vH zzHU`Q{=&`mq+7%kZ+F#>UB^_)DH1ooK5T(&7hZZyArMa%o(lyk|b^LvoCFp zyK2$kgsMdo+g2@lF}-S$RoBcY%BDV2*UTu&<|uW|jG}CgQrFBVS18-z41WWaZy*d} z@BbR&+6EEdHlFkX$;yv0vOYhB{5<&67cdsj<{JRgzqRmbS7_7^+DxHMKKV8SG}=Wy zRrcP`@%cjoz!zKFsG)6_ob1zJ)DPKjNIE9OE0C@PXgqxlWCuUwn=CM>hFYDav&chc<|GvYlm%X3MucXj4CA?sD2R zIf!qS_7MA4uEIAt@QwAnN}W~uQum8v@nUqam%QeLj z>?Lw@aDX43u_XK*&j~SWk3EozueCk8qyyK#gS@1nOD@p`_rw<}MWa<^?Jtp?%kbxm zoNW4ajDBd0Qnz`6L*E*&)W!Euijt{+7oOR1y;AopzK=V}?bc$!>qSaT^=7d2Q-AkA z_iN;;!sF2L!Gy%%mG4~W*6&MlXr1`~Jni=Iy@4GMD0P#Nw;dxnLf0L`l)AUS7`N`v zqiB`KH3(SLErQgf`l~7vl78)aSix z6n#mWL;nlkH(brWp95WKwZ$FScdMsc-|=g;?vuAdOP0EhHrMxRO;^Puj)ycsqDGwtql^J@!6XR`sP`Q`d5ER(m(&C6@BWhEk@ry z8LR5w&*YmCMXEma2G!{6Am3qG;ZgLzWIFT>e2Zz-4K95a&#QckqCpz`g1(kMeuSg+ zaU@Mj)w(Y5&5j8i-5eun^dz*9`x-~NA4#)k$XD(^$5HM_(C|@sM#_9&o>{=rEaQEA z#li>gdpVNl`^4l3AH1K!kv!ff-bVP~{WPUccuXP2q0#mU&q(`($7qkiW3}y^iyp)E(DTCh{MT z9*aYd#iGYv|1{=0v4spDq^#X_S$|z*Hkw})nMbGBw(>KIi_mwNDqLp9OAK<$W{kyAr*Gt3l=~#cFXsxQ~AKu{5S6%MZ zm(?icAHbXQq&~@N-AiB#(Vx2(`1RP<%JqMyF17skw2s#*(Cs?9w^eKE(UrIj$PkQmw1_v z)13NJw@cqi|BAjBUEhlS$WDmWrZCpDS~a z`p?r9y>qsr=eiZ$_?|lbx~z-CcsG;1kgaFsIeA9>As?qHv^o4tC1qRB{EYnax+uE% zFRe1I{*khz9I>OVSD_i-qrR3rJ4Gq_KJe>jx$q`^cMBM5Ble?e(RuJjR|n%Xm}n>OCAE#!gm<2bRj!L=jIPD@BjdG)^L%(^AiSb6=P7_k z7Q!O|ctmhi0{K?bf3uXt+S%A&mU+M{-Zs7#9uPrjDqF?XucYnXEkJQkARe5Hect)<*aQ!b_m*=bU!q0tTJm=6q=2)pZbO-g8*hV>D!;${E zt{Z*doBSmXP@(TDm0+)UXzHZh)zCdWPx=oq-92EtAA|Ak2J79WAA8_V{ltVAV~&)e zWYkV&t;0Z${pASmn*)zVF$eks*My%k$8Ic8j?OMrj$SotKKB*soyD~*u4Qpe$;?Vu z7GVK)*Ch)w}Z4dnw`M%t@>Q>{rEM4j-u~X>p{qdA9{ZdW4GZpn+siWAM z`|fM#^cKIFZyHdZ*Oj1E+x?m&^I!5}#o|ohyd1+=|l25mYe2hF+p3K9lmsOv0 z&cjv@D-WUf9%3hj-XS?kyYV@1$6jF3fI0m~Td)~ewCE15h&q;Q@WX5{*F<8=I(n-X z@meW_;nO|=9GQTn39pZM68rs=C z>fSu}s9{4mrm?;+^Ro1%%7hv6Ef*#8ySDR7ZReeh^E}?~!}~Y!{>{7}e&^iZr!8xv zJn9#+j!Sq#+Ggi7X^XTW3p!qjTs785KKR{|nE<@oSYFL_-QZ{Rj_`AL_@}Xat-9AR zPZ2(>ab*}s4>Zo8UDYOUYaE3ae%M})(4*PQL0%%;bEa}cPdHR` zf#^p@7EM%y%g4$!uMJ0I~FSIvkEQwkTM*qGKy;n%BNR7Cj7Wc zbd;q(yM%RXnW;;J2Q6OPW76K>S)LO@QA+(?FX7W}D<0sar!wga6Go_&8e6$#pAFq03a{+JjD3 zInLr<7WCQ3QRw49r}gF;ssAA8^TJDNL8qkPfIQL*FU`W8RaQ5fl*RK7X^ZFyB&cPTe`OEknXz?&zCFyP&1fE{?H}s?T-V^XRh~ zjJf@2gZL!7f${Dq$vZBRljI-`X;T_{;*!!(<;u>5TUK6LIHb*zMNeF^WYH4-Z{mNx zU;K@p+G_YJ1)ZNk9mI~_m$pAelKch<{YhD#j_C$}Djw`fv6|qSEO;TC_e7^jc~Y*F zFYgtv&otyelVkRw$DS4ZxC{Q<m55n53@ED)h8s zeJ<~eq>R3ti{2ow1Z92Z6&d(QN03*n;;EMJdv;Ocpi7+Q@1wrsBw2OE{$|5E!~VzM z6)=GGse`_gKCPlpEjkOf?+D&2Aq^cns4$OIGImfx4r$_FhmOi2J|$=U5OmsLL#Gv+ zc7(4=Bvp1y&ynOQq3;G8xKa8{Wal&=@|;!A8dpodoln0hXZU5MWsOpju1Jq&Ouf_?S6N4tBIAy-UzMZue=A}X(c7h> z-@v7v;iq8crRg%pPOKFC-8p?1x~*esXZWjQx{FlVrBiwt`H3BWd}>y@nlyf3%X?ps zekp;boAwhc%GlbSvGsiEBKc&YZ&FQ+z6IZAMyBgyd+Ln7J|A9@@lEn~h(4>#O3(CU z8uLTZn|rV|D{k*7;mtv&?qn>k*PWH{rQ%Kg_JgG5I8RDubGTXnVbub8*gAZKm8*ZuD@V7W9-gMA7tXU zGF#sX#`%o-l7-&{`^xizZ?d5M97TDT@40J(&1*San`>SxF|Ul2*llY5c%DdxIhiz-_^rC)?cWW1h39XCoJbF^@64fz(+7jsFo>4(82 zqg=Bex-%vhQ@1&!+4O_e7lXLp6*)Q$$HF%wz%I`k8) zN10{xrxJT=nYqwI%5-qF%FJ}T3$1dc%$%vEA>q{$;n}Gt%lDJFQ9k+F%U7^9DIRQU z4&_~|9zrw0oJy4PdPW*+tl@F1?Sh4+-WqKvAjK(h)!Cj{ElX@x@Fd@yC7*aLn>cM^ z?F^e$Tk@Li4Cl3j^3Rr6NAeOmaB(c5ZaIv(&WLD~@k?NjjZaL#2-!zAqAanP+Vbh~Aob7nVa=b$eOpkILVr#bG$UO(9A&n)^g zYt&r&Q{vA<5_*1shQkPB*)Mdyo zOG6XU_wz|F{Il6v3Y2y0t7S3OPS>e22o?5>$`bg@xiM6h8P`}MCweG}H=h_{Y zDS=&AIs&_|cLqN1?F#I>*&X;|0I@%JMC*qK#^^_fdGs$Ih}DnXuGIAwy};fBq90b_ zL$Tzz78{?mtqj?i6I||dK&SAyl)~7QHfkq2ZU@3B75&2nVa7Ry+nZ!E|o zbza%AFte3_h}G@ANn_ zHV4Q1{K#`BctGqi(#DhR3e$fNV?j8tN8XwwDu2 zU2AuC@&t|{<6jPo!@TwSCmOns#cP#OZA&7I2HJoe}#d@p*Fn2zebK&soHl9KHh|7Wr-ZBv*gwEctDA zrIA;gQ}R0WA@Lf-<@6(FrUS05UHA*{z}ME-(P5^?ky5?$JNVgSJ(|QD_5F<4A>yod zBoi}2Twa%0eBLsC#HLhdV;||86+3gs)vSGo2Tp0vf8gKDQ0p#rd`H{aO|83(V>V;c zP8TsX^utc#Bx=~dWE08B_^B8%iK!J^`Tt#>lXic3rCN7)E}n z^p~7d?!n7cVl$N5UCBz(q#R{Gdc4l#4Bj^7)k!njDjljfa&0~5KR)u# z)ccgY=?8tnT}OwyVzu9KH2UBSc}YIoDO>W{P5Zwj&yCQk8!_7ihd<4UF8sTJ|iXgruQpY;L~vy_crb}w-Z zGJbnVNzV4H9Z0GDz?d(lXycu6)dTTaR5Rb2k3TjJ>=Og6T%@h|9}bZBHt^M#u2vO? z9WmP9qSd$CIAgTm{YcGDbGfz0A5l0aYcubVW3=}814?$3tAqCYA|<Sf6S*bE)fm>RjrKtKEW*S_FpJN=(Hp^2tQkLHlWTncf5~ zJ3`Yf@nM<@K0E$MA$p}h`lSFp(+_=0rZeD4xMnh(MPf9qd4xz^PHFG zeY}S*dL)rpb90p%({!UmYeMP@~Nx$fnD|)F5u@#11S`e*ogQr9< zWuill!XqB`2ojx?64ptl)IZGg?M$7NTCYcrlE*=0u@v4sfLj9{{A*o-=Oc_Cw3(j9^DBYyP#`Cr`EymUE0IwZ+L3`i zK>ofO>C|K4iT@IPL7(4<9{#=SuA_hBDDe)nT;Hkfij6DQuk7F1Td9jhMP`xA+SY~_sC%AB6+PRP?p@V?vAR*Oniv!8UUZ#&~s zw5SeB_DyL@w#2N)BX6P;OOE-U8-l-d3z+*ePx;3u)M-`#-z6;7UuV^_D4^V z19ae1=-*xF?c34YyU=TQaI7B>p1DbHXgrXy;4}Ee(20zPhHiykZRpok`%V;1Ds4OS z7E|7`%hbAT^!Q|Fn)YLKqv-K`#+PU>DimXSvt<4;5Rd^IwMxR$o{^_RNeWz!XE1G>VHHS#}QUkpI*3z7f+ z=z#+CK|l0GKKkMoV)5N!eX-PmzHqQ^!O1f&o^|t16uN@73lkjW2f!|(D-NP7L{A8A zIf{;uH41mrhj){Y@JKQBdK-P#mwAzaNsK-cOyWjv+tnW0dkE zhaX$lp1wEU{h?YHbjm!sOM2w`hAs3l&w%> zr@pfj>kwFvA#+{D{55qR7l+L{o|HgJBngJCrJcLzm;B?o&oNKj-+!6Hn#HJ!`K;^T zOTU3%DzdrPTkcJe_%?S%U+m=>#7K?2TPdm`W_?JTszsY0SC8FGlKtd{kq$@sk0wo< zIN)K9JvbhDTs_))+PndeaJ-sh%JUNk)cDKluH{(#V6d+3^YaE=PfDHk_<$Qp9j476 z@O@I+^N$bcMM{5u{(#=38p>GWeEENdaJD&_M3)}$u}9Hh(#IgaIc zThfaI4pHV9jt_AxN`1m8_vaj?+#jXBIN%qgJCmLq@DSEVuKm%VC0=y* zCgyrAV^_;q-GWUnSdImE3g#5NX<<#lnSwC|U!E_v{E|6nh6DfnB`tWm1y8rkA1-9R zbg^Vq*1PS)-?N|8Iwv#zYSKZ@+i<*&bO-4u|J!r?9_h!VWBk8_;}1v!NqvbGk=PGw z&Mb3AnJ>y*Q832&&Jkstmv6F+?-8t<>WJ2YtX)@HPacrfS{o2#-S%9r|1a0)`dBB? z`so2Bt)Ce%*T=VAxW0<(>_Zfq$n*1gep#E7&%bzy8d}ct+j;)QOHMvtDfuUpf3h~9 zGBW>U@=tCcf1aPu^H%;w-aNaVXC?0`WyU)0{f==bip1v7JC+#F-t2?mc2Ao+-`KR<$dE_?#a8dN0=cmQRtRSMP5q3Sk+o#U#l~>v)+z}T`$18RpBv_ zC)%4xdlPAIBJE8y`7sguR3iMC2tOvaJlphsFy>;!;K&%TO8Bb@{Vngxo*ZF5Q;)-E zi9zs@$jAiVXi$y@KjTLa;(cTL`U7v35OZy5LCk9Xm%f$)cMAKrQw-WmvRoqRtz;(hMR z`^LN6lXs`!>%5g%yNT=@B=LgYo+ntpLTnirV9(RU>4O3G^fLVto(B99sSV$$4Bv_11M(GoX!uSTgCu_oCthogN8&?hwt6;#56-t<*7z**W$7=e zf8>1G-cJ$u;DYJb)bcHSAQ)dTLNa*3!uW^z-|kd=VAN0WfgQsed<1Rm^cfG%HgtC* zxO^V?I}dCy6#P98{Jq4)<$|+=T%W^r3unu-%eeNL`D{II-eJSdox`|!73*2T<9rx5 z%QpxOTw{|9Lnh!ekpk!hg}bFYtdU_g{#>d)?cF@m`1+4tpI0*Pep!h`*3NmUdE? zQ}Eq4<+?G(5ql~4sk?=r8fcF^cR4YhlfXG1aL^>Mi}dj#{ui{?5*L}c=Y9T90vEl{ zf9Yenzq0i+#(fLxbT|7orJ??0tlP`DHC$J{Bf;QFt%8ZykZQgm*H7!uM(S<#r}W_@ zVxwOwSA9mmmMNSoKEVmne=E7Sh+Q`N`BihAD)vo z+Fp6q3*R?&tg*(A`9NaDX|ABCF!NaYf0bs`v(1pwG{dVpx@crYdp_9Zxa0_GM0@jP0|&wp})8`1$`9~ z9mO^KynhvISY_THfF44xhV)f^+0cY_D+Vou4&RjPM&H=@{^W zSSZxV)XA*Bi;Vl~Zt|7jVtZfp2Uj#C_tv!`%=twRi+&c}EI9e=bS<(U8>*YtkD^zD z5%}HGotGoymhKE-s}|iUy3x{|_C5>Q`fND#8p#+LsUN>7*Ny(O%VZ<%k>?DVjL>U# z9VUB6i4K$Ty+N6@^0w)(`hNNvT#MwThU#YV(j=R1vUJf|+il6V>dC0j`)hEm(RSPF zR^M)}p_2zrGHpJ@x_;Eu^-K2n*KOyqmQmtgd$a#TcXYoeGc&ygIh8nAOJ~`SJBT5c z{l&s-7sK=1*fOQOE2$qzL8mF`gpT;t@|c(3i@!{nrVYp<<&a896G`()%Sqcw${^m+ zl>(2$S>BPO>}8$DF`whT97l4LHD6iGb6Lyw>@4i_tI+ZGdRgaQ`g2Nbw{$Q4Df_q> z>&Vdg)_r5`GxBNuk361l|6BPeZe@Uj#V^DmA)|Gez^m%yHXUDz9=YiIAM z%k#XWt{CDSwV7vJ*c$q>A4GoVlu-qFDWeL9q>LKQ8s`z6(?;E&mo{qDkhD>qd8QBP zDe}|EZz}mcLw-8>EhN7X`Mpej_wvk>q(72hHTkU~zYoc8C;8Qq-yZVYOMa8Outu2_ zf<|-TL!VOmtkAMQ_O{s*)nip8tL|2vH*?L!oY<qD~YX4Oe-<4=;Tx>$9P zvVDpgE)Th!&AvEQBq>kI+RV8Nd?)2d8J#)rLz4U@?|V6ak|g;`o|D)sVm0j;w2M6m zz^%r3uYi-#!;yUKwDIk#Mr?~hM?3vuJ43@f_o(L5>wRBmXqcBaYUGe`e@kEc(Y;n*i|)1ewUi_6m;CL0E%{16NPg1R`o5F*&(e33 z*r%np!u*Z7ht(HLkY`JF1N6V0Z>{rlZ9la1O1oq)0IOZTGqj8Pd#(D*+UUsoQ(tdy z(@!q-#h+gxd?|Tabu;KgyMyclV(>3I=0f$Qr8li(Q`f(U{!9@a%AOBb9(?TCtN3=- z>TxME_mYwxOPRUa;pTf%Zq|jmwJ7{i$+9n$Q`sM4pLct^tn+QOy)oT86&{{TI?Q^R zWqjA;-^gnqd97ltoArKWFj!YZQp$pLnIx4|ObU`_lPZnxeGr2aRLZxLD!VEL>z#a0 z7p(DJu!aW=l1-Y(_32#id#M^4(OnJYb4~Wlv$v3F(b2hTyYGzIm{TGdqw}~ol=K8?0V$t*Vh0sW zA0jO?pA)|Bjto~S>hVn^CwA6Oq~)39OY&rqFR3O<3GC&)&A#w4k7tIGo**qC<@1c# zlBSSCq^)hj&uzOTd=!2(%Xyb_-ld#(Dd*kgNt8npd?f3)m1w2Atn+gwgpZW-F6F#S zIqy==yLsrsp`<5B3rOy^;pd{$!$0M7h^p(&rfc%{+{e&(dBKiBYb6&s5_3fzNN6%UA=)cc)F7_wUr$E+XEJ{-jyxKCN#B_lm?B@( zckS{Z|7!Ago%>gF|LUmwIsZ84b^r66JH|QPyL_>|W}W=D zKX1t|*l}$*zrb(jypQC6gk?v|o^G+F!}-3$G*6E8PNQvU+0n{ZZ*QJlTXW=oP5$t*_k7!oLc1sD;HSvEC-q!p$KRzGFFXEJ~$3=acvzCs&QT-=fr2f+! zInBS4&8xL?qUXka>hGhCpS-}lIN|BzeviII?bOzsxYq45#2$AF!Stu?r%&=;C(r$Y z^Wfb0#PDymCu?dG^^f|dt$uN2CwE?p-29q-M`kwL$0aYnwU0|yN}cPGg~;_B*Y%PQ zT`M2+W%$0Fd2fF5NtHXIn5c3`E>th&;*4aSz4K)T{}V9`{Rc!Fb+&Wn&dr?$i|5RX z>$opGXYg;`E|&ggP0vim3qL;hzqDWUoZAxNCcblHkK2?rga=FCxTDw zI9N^u%i-%+Gr!Kcp5wY6EL+zuPX)`Yv#Ggt9~-}{JNJq3OI$;_>d5Rl^Y7@VbLZZ* z9#=c=3(raXoZICi@%u6Rr4@c7^ZMX0kvVUUC(jN-vhsq zxqFv&X*GW@&@z87VQqN+UJks)T33+ydu_!9a(bN`7Z9&$&1crL)_4|tM)85!@DcF` z`9hLb=2_h1GD^h=daPV$&6&HBGWl0|)5fy(|J6Fy5nMMdi+fDQXU%)r#8`cZ`@{3o zmQK_BrhAi?TI&Z+PYPLM{*NecOI|u{9@`zsA=U&8S@Zo1i3jN$ygbX{F^*q`JooVT zP+H%UWPcB3ax&k;)<2o=Vf#$J$L24e#rLRf)Gkh&s6EscZM8!*fBCfi@=4z7@E7GH zJM;6LwKtqKH!|TbIq(@V z)56sYEnY&73vptLci5Pg?hePi!aUu_oEK*vQ6v|5WnSXdD?^E3IdSaDrHRRG;5o5B z$G~~wTykk;10AK5?Xxmzd16UYVWRV#hmxL5EJ|9D=sfRN>_5Y?gRaF*4|Cn`x!#(W z3XUB<-n!I$jjNMFk#}jnz}TdviOxHd+1EUdnLM*U*J*CYU-TWqFZR}_d7o2Xr=Ct7 zFLCDj!LgmTaoPmE@fWpmd+%~^skuwee3>!qYfYGOY+4IOxGWSd?f#2=i`~{v8#`_5 zw5#_)=7T)gG9P3)T%+*8@LUja5-+^MTpdiTU~Y(b$#VEb;p&#TBGG)gp0%gNm;Ltq z9?ci|6q-f_kP*WTjR6l^~fI)-IwjOhjl$XQhk^Q z_mi$!PG4#+O1STshiLP|yIxbJ;JN>uK`?ryu@q=O&83 z)4uK1$Kp5SQm8$xwrw>hQEj|EYA&L9O7wh0KkFCIKo0n?_%JtNK4M$kWGZtLoVgs1 zTw8Kt=P<~gY~d=loX}^HIYa09r_LRVI<5=*?^=)1mfY|^5Rx-XXChzLe8P`tPu=%q zi`{mP|1Nq=j~=(C%TWMBO`s&N@xGQ9Jks^bFXY2 z+1_{SI?cUmYwU?0V^)A$XWo^g&)OVc{+{MX(l!Z}4PfgH$8Kgmr`pAt9~nJ%XkMf> zPDIU(tWRs18`;+QkYno!XMUuU+otwwJuaN1ANU??{$D%#EHcmQOM2eIa5Rtli1x6q zN3Tc6WTJKW@wZ!a_*7!K)*N=5JBgE9dcp3KwsiN7sJWTa;gR-lrLX>8j$7>&ts|WF zZB-v#=jaGW@1Cb;$$^)iof@qtoVK+6>F4NJ=?Uk3Q9AaEbj)fW&7*eoL>u3$YosR} z{TZz@{cW{HTbw+)EqI6Ji~XX`<80j)QQsSBi@2c!@;-w(oPUk{;}vBW&OgriN!S*zx7R_W!YblH4T6x2!z)zx01+K1uF_;}>`0{f>Y9oVY(e z_6z;L&L^p#e2`lA+_9OW%P)J~JL>n@<^_J<*!{BipT&%zXasdi5CSIN!H{Ls#A_yXl1C*X&Cn*2h%GYm| zt9%}ZaoV+24)cm{hjWYXzN zDL2pQSI3V1qW<5DlbhGZ_X>OM7Gr&)Jy|;p90-j~(^2Z}ixq z{is~eHph=I`}yBzPF=4TIV;Pr{nFs+lY>j%6m>|C+|60x394CoFnbuN`L>o z9Jku*Q{*?Fr-z+(japk|g`M}jB3ge$>tN}RmVS-c8egPiRy+L#dCuqQRHuC$ZXfku zJ?H6Er%fE4+L7<n~63$barwJCk)T z^gq&CQ|ptijn~z- zb>u&HihWtXEw)RUFOdW!;UY!Bk#E*@0q-x&w3r^2!>nbJ>SHC**;(}Hw7I#iLrM0_zUMfw~iV3 zSueD89_VE9@SL@E|JG|UPbCJ|I`*vJ9r@4U|25M8OMKeC`cmXSYrd8}m(j_87N1Xz z%6;Bp@fQBmw~gHA%D-Xmv%gjDv+&)K|J?Q(*iJsVlSl5X1^q={pV`vgQS%9v59V-b z{nwAR_F8+n&(?g~sJuNVKc76IldK~woOGW8J%h0Hu9d&<1OUww6%V3^jrZyYwx$k z9ds;`7ue4Fo5+;)UjiN1{q5Fowq#IqR-JL(&TZ7VK0GS-Ip?EtpQCcRlVRVjJJ9Yc81gESx!GiQO*xzpeK2p3fUYY|a(Qd5)UD zG?g{S+Z#J9{o&+5wljW2<%oVM^P|txA0N>k*7a?y?`+{)^SK=zZ|9*}IzB4*xxQ~W z7USp*XU*s6+|)ul_xYUuX{G-n{qhArZnf8^&3{(AI(pb{S4R)$*tyR+(K;jw|2bdw z+-J3oqf_m+v2sjJq4#Fo5NiBhX6XIzPYj*;dZ3Bi z=2fMh8T%v2&TZ`cwep|qH@2(+-CCa)&a?lE%hn~bv!!30wV*4%-mw;R$A5tTX4>eE zwV3#aWBunZ@|va=zwF3=?#O@c$baU)dRx}s{j75{ zJJx^hSpT_W9^TLXsxsD*|NP1GpXFbBG%DA5#kFDon&Q#cf2rAiqmkIsEp~J`_gVQt z2l=n|@9@7$;<@_&noqi>u=u#v6?XoM=A8eaVZNc}AKu4YOwBvg{KIc1|8|OaKl@{2*N&roZ0tHQ#EyLDj(q38LcVkByym%qmb~VKwc+)gbKoq_ zdd}y?0_?S%g=b4A+qp*JctFelSA5#s+0SGRW&KZHt32n9{N|4Q=HG;jDf$m&i>pWF zF^A`0EqrULoyR=V#iKh(|XP;+Q?~+&Zp?eZ~pS+H#>Z}khvtv zWlm@(mpQ_Z&(Xylxy{bH)fT^u&dEb&t+m)Ayz<;!Z|nNMU|nbN!l|wD?;IVX|Fv(? zBX-|(|=A*T2&6)6>L4(ET5a*?~=vVGPuLeTXx3KNP9TrI`cp- zv*&_*8P;e1sQR6!E1mq&cG@FaS6*oSW@jv0agpu&ArCZqjeCt@@`pNOS+tCH(1z}= zkIGAq=x^tHTIs7uzkI=uTkZQP@{=7sTWIGY`=j!ZBW-E>)1&eJWzI{M&TO+C+S8d1 zh8&&wzkD6$=5o*On!(4CU4R;1G*YUk9DcJMX?Kf`L#m=O_O3{gS9+2c0u?2hvL}p&N1}I z!IVIg^Nt%3)VWgPEWzvNquKZ%VC)(s9c<;Qu0)iHslc)p{4SfJ_n`0GQp9M4)1mtW_J zE4ba~t-RkCA3W_zEU3PUZ{}N8Foxaf>RmablhIIpmC@vL8`Ia_8)zEHdmg1OH4BaW z3BK51%|e$|r)#N`aq+qv-|+M+c+=Cpz;KP6b<9vlWt#S7ImtK zZyX(OtyU+-q~2S(J&g`@OTSWU`KE4Y~1T>Zl2zS?}lPVxD- z8s4DslkXPK^K}lcduzdiA5zcGJooud#;Vsn$-yckq3+75Nuh`{H z9dPJ6qc}BtNbyG6aXmKp#z98IorW>JEAQ&Z{onJ%1b^r58oZmjHoIf$HvRO*_m|Mt zIzOmWryGyaKm9qsqG_ zPW#TI%*!xVRrfZUgtM!R+*{O!r#%xw!jmv`{|Afq2%n2Qv31+(nx`-F7_~EbMzyCO zZDuUn=k8Y-TIkDP2M%k%PharypK(2d4yK0LcnV{w-+5R%Nc)EI)F*-`y*nCHCwNCz z3|Q*gI696x3QOz3(pLKJI9OWBJ5Tg8nzk?Kmp>JJ$8&smY?h*# zpXOb$=!8+OZo!)J>q6#rarwR7vB3|y=8<{dDgK(TQ*i6s3m!b<=~Hloe(u8i9tVSW z8Ogzi!Qn>3S7(63r@>(t>XM%Q&lV0BgTwUfuN1EZhe0q>H8_mJB)+3R&-kr7Dfsia zF2Pf*5wkKb2`qL2i#J(V)HTB5o5<3m*ffKaTCg~a>;K_biy~MYymEYD(oc<5&)Zl$ zMZK%Qh_JXHTUUFn!%Yl z=zVKGO1}lJuUC8MGZ%UqeJUL--I%q=mtO;JO!~S8{hYN(`q>-QH)J)ZuU-^mG<-mt z@1T9FdH*7hw>ALg*C7Y!%-!hd+DGuUuS3_R#2WWbX5W#c_t5cQB1^JaLwb#t&SYk2nzPi*Z#OSbw}_Ho578|~>^Sra8&{lW6T#q~zE zdRh9qlD3ep-ny`F{t|Tb)`k5dI{KieZ^5C6Z1p@xwzj+bR%RkknaEQt{j>}@siQsS zpm&cWN5KVs^MgJga?~$>5^@yJ?`*3KJecN*M~3=ZGE`y9(2WrpiVqfo>vgsaWgoSjqr-~9fNd(U`!796zYCjY|h}~v9|m)gKz2a1inS`bA;zEiSqOBJrnvDSw z`O&kU=DY5keGl?;Pn7(OEH1R#DIp?1@6ndCksV=AI2xa7G}Ki$Pk$18ReR!X&JerI z>x!=wHz}Zx?f^?`zD{|SnM0YqGaGnWGKizJ8iy@1z$K`)g>eyFW#^f z42nxllpaSfZ{z!y@eK(azZA@3NB4_`(>QVwYh?Hc`7)R9zWtq zDu|)JC(++KWBUZf6BdEHBh;}Tp3sT+&H+1J++lu@N4;afY1*~%`Mux=Szzqo?5yHj zJ>Fmi*L{eri6cBb^U zoJ{qBD|hF*!>w@j1{~pCaP?ReuJ(D5gT5^s;jxPh3s;XvaOEwiGFmu-aHVfLJo~$F zgzrXhwJG}_EL<&u-#8pW94r@HHBaaSkB);!`_QxT=-UMJZddegB0M@Nbk@^p)sYl; z=pDH9+i>Z_^qt0UudCClgM;C}XC|yZ^n?-GXY=TK{(H)VKT99`sCSllCORM%oIT); zrB8e0zm3lP0pnwh^gldRJh=w$eH0ATz*UaS$OyfSuBrx~Dd2r5Jj3A(K6vOHa238L zz&(McU-6!r$F2=!F7W1`{^j+dyS(1u58~p2r`>VVI}JO!@qEUDBXF2zu)Uw}-;WKD zjgGAVr?Kc+<7X3QANKUN_{tGk z6c0Ay>eiJmTQfTreao>SWy{uVbQ$;N;5!BmYh?kvjX%h@)c$;?! zkEegxC8Y5_3mnb@I|msf9V|K+lr5%b>v_n+O4($R1I&2`{1n|sq1_0FFQ{;TQR z6_j~4-qIMag*SuWdBWS0n9wi5+r8k8vG~CWVC@ncZwGC>Ee8+Mdm6j%qNJU6pM-B| z{hk@FK6Nu2o2QTWcU9-r>n`|*JdN>Kpj27PJ$L9YIyuHM8 zPk^@y%HJ8W;H|5TH?v?Jc*~tPr}!txuXwlc_Aq$+5WJnB|AjZnz)n}s;OoL0Tv>Q4 zo8KdUApM)3J=4P5qZU^lSgh|9-VXBLN)Fx*x?RDUaYpb_!xfwr=K^mAcuRj@*Eo1P zB;Lwce~j_O6NR_dJlOziscgs4$->(~PyYzsTzt<@;mGAt9C;HQ+2H$Aj6M;(sh+}{ zbe`7*#^A^v>Z#v{7{6m}zHGW&ZE)qA(ACq>**BuQr=r8Bz?CP%l_%w(+byo#9gI5s zxEfuYwa|+$W~^J-Bfkb+Y;k4YmGvR~7(BiWPW|#p@a{$KFh6#5bJN=yp~mj$HZZLA zoD|m079PFeF%I3>_`QtK8J;Vw{!FsLJJZn3$v$uJVYqA&7#Qn8|ARB>=c&X? zk`Z+CO6lfcv~Gs$9HRdZBNwtKk9d*`!uq*?L_ZgyQ>CA!n`5P$$G7NaY>F43o!lO_2;F8t`@-$^zou^m?>a(*QCz&vE+KfZ3P8iZ^p-#MExTKWPz zXZKia_|wQjiV<7a2u~|T9z49OAHD_Y>*rF9d||%9Gq6BdP`@AN+r7NE3pVLYurUO? z@ZUYIU?W`PcKGXu;HR2#NH~|wd2O68GP+xwb{06ljAxb2xi^Z_URJDc5Y8L$af#PT zuXY39t9;#psYYyYu@9UZ-Qct7@9UZtoO=rzXxF3I6N4XY(W|X-ei`rhG%4f?0nhIwcw)@{(%SJipK1JE`Gz^Irwu=m*5lNqn8mA zY~-8YxAkBu@3|cvn9Mfb#>Z9QV;v`aMnWn#MN+v zl#7jq!;HP+uQ|*eSjW9yFd@HNwGRwmj{mJYr3c+8UFf^c;;A*@&C?sE^*n@bKMd!sz9G;g-YS_8uX1>+2`{+Jjjzk)tD9-@Rv)}IxMn3h zBE{{ii`f)hgTLTjt-sil?-XzKA~(|2Yj{rP{P_G+=#l|$Z}1dc+p7|vuQxU+*u&zk z!wU}1%PAIr)tGjw_u9UHqAz7%tIrPo$J)N5jHEbm6BqukM;SlP(1u5B*?I<9ybE6a zaCiL7?zp;KWcQGKpI|vPD~i)*4JuY0BwO;W{kzAwA{}`=1g}`@cGYdgzi`M1|JL~` zkE_m+tv5X$#zP_+~K`x z$r(QhzS%teiutauXChx3E33gq1osnU+lO(UhyFen=RJs-1e)^D-)rfUcf>P}qQ80m zT3de$_v_K$nfLNt78gc8Ys_zjbLs5pIw%%1k?Y6X_L%f+-5;(C$-f~?TQUs}gYe>D ztS>l)?|w3B+&>OZmvilt;6UU4aquhtD}28PzQ=)U`GqGj{y!{x2i?CMAJu*^Jf5*P z*B<+YwFa=37~ue}{DE+Q!=BWFz`R+-Y4{DKwoh#4l{=cXX(u-!0CIZ6>`Yof|^0%P(AmUbb{Fdi_q1I~eUB zULpT5-{JU&qjkM>bk-ub^*qHCVx<@1o3qgk(vw&6J63w~K$nm;&fIK|2aEyl^UmXp z3*wR*3$ozF$HDe;o^v~X*=poN<3K$=3+Zgh!(PULhv3A=87st#^BkXr#f!r+mKOO4 z``|PV90vc6eW-g3k4}WIuM)2lzqt#&=5-|pXJB7Hf__}&=~kChzG}@#boe4qQr)cbm1|OL9qvXR z3z3&X^lc$>cL~oaTo`NV@Xq*MrNa{_M`8~aqQifS4zKr^=8|ZM^ zOOlmS$jShBLfmO&Wrw>r{^{h#ci=1a=oRs2VNG1^A=#_46ZyuQeD2^Qa4pxIu${OV zd2!8=o!HNk7s=XxAulTpckm`(67rH{>F|;EZ=K)BF-KkwdJ^!1cnXTq;hre|+&M~K zJjjdUDF*WLu+5JiLGGK4-j=-FgHKiQn#1#ETJqwt<;9J>XxvOfUQ+NoIyzkSmkwW- zYn+lFKpakQv41s)u+( zX~HlVcQ{N1y1fOry@D0!^eX!3J#lhdr&mSkbYWLKCib<>%R&UA-*5JlU5ROk#YTg^yn`WEVJ_HG5>SzDIibLvXVMoK<26PDbv> zgHP$@Y)|;N&X0!&IXL^!6H`!!t#S%|eb=h?d3g3>#j^VZlWhOjhxlw~7@aJfWiK3% zU+MLsmls+%yNg^BafLfwMu~iQ*|gUp&wz+tZuWGBk0-##yP|6o(YZ!6gjW6vr zc=~?Y>oB@!DEZXnLllgM?*uP3ru*US(&g&s+7b4%&`DlbVr3l|sE0q$?+t!GegyDW z4d3m$z@49T){Va@x$*AUu9YXiu2;Gv)?0Zg#viY}@k)=>Ryr%)C;$(xQ03=Z1>;sw>yjHY+7>*D#Y!|2H@-g$h;#E>54=lY{t9^6m{Z<)X*MlT!KaTIw}%fstP~EzooqHpj#T zlj1jD^cu4 zarDrV-i?yYLHGxiv#~BidZl;coA7D%+fHQWByCuM4^c9j0q3bQjD|PG0f`6BkX?dY z#2}}yB4b~3CsgWv>r&(Md0%BR&t3P#k3(KpLcaXerSM(J^C9GUE1YUZ3NaqWgC82M zxO8Icr-=ZJIYPEK4@;CZ1R#yY?=l7xfF8fozen%^3%l9m4=pDinPumKB;h{}#}vI* zEL%P&VHX?t#e2c-^RBqcbxZCGWu{_#coknv46a)OcCU`h_Zr^d%z1ZN*gXh#e*$)^ z7+>B1yMHu{xRhNe)ztyEB}nJ`$$};;B}1GuB>qE(c&;!|4_W>9PAnv zc1w}7NAPtw8-|5l#eilq4jkmzo%T53v9XKYXN>~}*j+@tTCqY0yTa^2eBo~(&(Z$y z)_x%Q!(Ruxic!iB^dbJND*S{Fb`8esv-f|UG2|A;lA9S*Zenbi29LiH9zQjQF~{QZ zS@erH)?ImMusd`O_elS1Ot5?l;8{9fF{^0I)S(}B&Fkb~XuPTNB-YjLAWz2QvhtcV zHdKJ`9P~*A{9j|i2r#gdwro8f{LfQkLK+V=7Tm-*khWkz{yOAFdNs{8pmMi(5xys{ zecYr?6*m)SAH5LFA!jFJI8Wcd0*`87{Fy!RGRyV|>&&i}&a4>W&3DF?6F#G<(bKiE zK9R8n*_QrnLe@_qx6T-o$r!U9y_U_ltmph#;^oIjPYh}N(->p%X!2}RkjHw)pvlo= zjAW9rrGWb<@U4f)OE^rQYD@|H(_8Yi{s%r4-L{rIqH~67Y*D)-lZttsB<||PfAl2J z%;TGmb8b1m<1P7%tGr@@vFZcHg$C?camSNz49QX&_Os%c_ml55-i^&n-{_t7iYYM` zRDvC_@?t7>^kMP`D#+{k9rdfA&pfWa!F7K^o@V>JNs;7jmeq{H%@ig|O^k((&u7E)y5UMlc=D zjS{YVhH;JVe}n#a#u#C&dRiF2UiwZt#JOL2D7BoM#JFX~lBXKew{jTxm7OE*c7ndr zc(B~nBdGH)h|hxILVj1lX;0uY5q?z%jR&$hc42GW%Xm^so5{~N&}&p0M(^OG!5@cG z@9^fQ6Qjw+_E<#yj)31yaMbYw$Teg1H@ApM)4cQsuGtI8P)_5=kTjLvW z=R;Xx{HDTX4rN_gT!l^$evg3P@%Z9}-yw`|LwqsVljNy_U-7LUEsV*3#@jRa(|O^% z(#LH4{u=vJ-02xNT*ei1&Um1>=(}LC5uNLaiVL*HZ!$3f;rEz59vmaaI^NwWf?siz zSgh*99)zFpGhUe4pYy$d*oKF;&n`*8QXczYk+o<|!l?P34MhRI7dRxRsqtny)( znA}^_8;qY?u%`yB$9e+~Dn>b*{dF|$H6^grEJ!QoC(+l*lN<`qz-7&lK$DO1_!ARCv6KVQug4i9`m01P`>9L}kfTfK3pDkiOrfoo5eMwzB6pqk zISz)Gp`+K?Z4=bK-8M_LZ@0~VX`i-HJs;M--A4J^r;T)cjrM6P?f)nHdROpbWBM`r zu#mRMbiq%bHHtDjQ*NP*r942HKzWR^8|7)ro|GO}QDz@XAEoqJqN^zDvVeK~DWhmE z_Z=^%fuc>n2>7=@n{FO|!YHc$lToyzcc7@glCmG=tCT5}bw<(8e;M%q zEHO~>gYtm?|Ex~;f9utN|JJqX{+oLTO2*X%{8#Ns_g{G=!@rtm{`*}S=91A9%pbD< zqhN-456`%n{lNGM=B1Rw`F;EkM$z$S14Y{h8fMuYE_2UKZgck-k6FDXP*gF{D5`#n z@)F7elmW_e%4;dtQ)W^gqnt^(hw?j=8!59WpP-yed73hp@(ASvlrK=`QT~bYrEX@m zC%vfR5q`hT@83{XQLdw`q&(Qu-1kTJeOyyY+03!clqV@GC|{*q-pAaO+t;j4qvYi=M#*dAjFPe`MoIZiM#-+} zM#=u$jFMOGFiM`g$0&L6hepYP`;3xT=Nl!PA2Ld|{K_cV`>;{6dxKH3uh=N5{sYfG zV-$H^xwpLZno%NO|0Fm;Hk;y8uan33Fx&lXx3i69d*0}zIQA-Z+wNvK^A7$8V6D-) zF1wa72mODX+=nc1w^#4ulM`PeJen;cLIQ7IWrS$ENkuG!R zIJdd$T8~*V&)9YG%Qu|;bCPkk)Z5wr{FE;Km+y)3R}D0pN;8e3vgNVV&oQMCEti~MSjs`+st@2O)iR@`PZ?VjG-Uv(xv zR5g=ly<`-rU8>G>4OQKP{YU+(j1=>~u_LQx@41XWs_v5*3#7XOU6MviKLrxJqYu6k zGioWae5CsgA)VWcZa)4}^Ym=mC5>$}_520f-E7m?E@!LdnKM}9W-!l_9BzsUl>D6S z2W;PB8^@;ehv3!trfBas2%g&#ufO3 zn!NOTg;)4@fq%FEmHxbk>xx~*mGgAXd;^?wO_H$h_V2aNYdrmbToZb(`HF7zfANRS zS6CR4+!&G@H*(|g>sd`_o3FI4=bG^IkQ=Y$CVYNrdH8r0$Eu@ufDJ|rHpl-Q2;#X*)Qz+2e& zifbep4L;^|#-GX9b2nV{m)w&?KW#_PEkVyohdn@F)WiP<@xHm&14&J-(8htH0`qAt9v)c=U#n;HdWo?I|beD z^v0{p(nelamgc^=EN%N3!`#I)cQ6+0?jM$iT$f?>$K|xmRKAVz+TxBDk3=Up+);ft zlxw7eC$UN9ylj<{v4Jl0`GB#?>Q}DO`C)7cY-`b*!jN>bn{91^(Xg0!gDW;rl196^ zxgLAymRS01E6=LD!e#0nY=`Mv>BGVFq16W7u#D(>=Uo-p7w5=sJTl97$VQnJ;~nK- zYZ=%&?C}+3fvt%)h7`-1WML>SnBv85g0IG%N#B#*xM)CiH}c&(>2s6#qL)qYo&-)62U;s$O}%H(ACu|h6TJUP%C5A@4*H?A zlW|tZPSJmQ{!D{@!1or^nQ}b~nC|h6KJ} zV{|3oT}Q0(IOR63*ZwCrZU@J|>X8Jm|(=GVaZVq8fS^hNHRCU7=b7}@B z5zlGgwo)$Buu0)Lu$E0_+foy;tHkxIhja?%IrfxnD94_X{bbt=%6-an?5gfzyK1Rz zD=6NhxKB2ARULMY54-B%Q!ydg(XypFA2mj#={@+)j;!tyI)sfUeJ>Jv*+x1rVEgKAd_BQsP zzU#kew={UrP;8;vXN{tv=+jzkv7y*PwFfA%g=&vbVhhz`*9^rLs>LRn&zLKN3HH-h7#oLOo?%)x8rK}h-m+{V>@649^uV_2hD{Z}F~dBGJ@(4}fd9a@ zbpM`Sfs%D^1^oZ>O1gi;n;HJP-v<1(C(`|AKg{sEc}KZBP_*Tm3Fgz3zaJ1NI!IYd zS^Jn#RB?}CZobK7mce;;8E~F~*e1`yPsVt|w$@C3%bxfSX*StX;)y@NpDQ0$Avp$;gUQFyRroe!OUYKa zmp&DyCQ%A2!zdjb#Ix`8_YQa9oWA}YzoYy6B)_Bk{1knQEo^R!;+NP^MHgzb=LQ;Q z+iSPyMjB_U==ZkTPc~QUHmrKuD3a|J-Ik8+r8ZSNI<}YEqbiVM7NE0_tiC=ZJ+Tiy zIsE13>Emdlg|tH^o7zTgqP9@|Rp*pA>>kDewP6O^C2T#|Ug?57u+_2Yn#I^v-5E#7 znS@{F-jdALo6XUQuib}D71dUbP30j6-)gtKPX5iiI{ROFq>Eqp5^s{;{g4rY>r<-&6K7C8hRq3W|KlSNN*i*87oqmmB-|AOny^K0oGTb##@@w|3K4t$1`&NIl zZ`mY_=UY0t%}wzhbMwFp_v5xJyk>j-SUD!%+-3Kp34f9uC3$e{D9J@K$MzHFlYBfw zd74dfV%b;d?!b$Qqtf7PbL_fKRz0cnaMh7ALG`2Dsk+hrPpMv%r&K3O**%rm{5xd7 z20Z5OR`rohC3$viD#>*+$Mz30R!P2}p=2J@DyJU?Vrxm3X7gUjQCm2vxa3?}cs|!y zb(Ldl(U1I*pT*V6;<2x$yZx_S66SMbZ2JqFr|9|3F8_f>w|`rI+JgIkgk5#o6}G+L zzD0Wuy8VB)&uP5fv$^>S`NiM0?Wrx;TKjEV>&G8rYjMpVqHL;{=ZD{!%y*?@I}K(_ zV%rxNC@E&EVN1ElXebBo8YjB39RMHy!&b|ta}J(mQ#p7S&in8k&Tj|%%4rKAb1~Rb zFJ2TV`ERxl*i!n2WlCep2DYe(0j42=84A@0EXh3S+dmjr^`@@E!3f`D4@Is@hll%*3XWeYF&BBAY7PwyAQk zss5FFUN~i}5=T1D_&%8Ly7wOg=6$j0<~PAb0ABwM?58g5%YKSsyuY0NOI8H@BiV0w zd5rnyj;RE&n;j zJhpVK+1%<|-bf#C#`%^v(o@dpdCh#wpJ`LoIRX3XQTX>Y)cwr{W4C_Kl>d(R&%thr zwzray-9hcxTcw?iQQ3TtY$S1c%XfjiR$SsTzrwS{xf%D{w82t1uX|-L%8rolWuyF3 z+@n5_eRT=jMrnt?aBy?8=?vT&C_T=NidXX`b7^*_JEEzRHV|SJ^ePsTLdF z(KX=dP#0tL&3ubvUpY7H5jkcR8EW z?$TMZp;RZ?QHyOmYBJxNh8OCH zVcir6ML%{c2{p=Q_11jl*b!VT&fNm zYFJcUD$j{W$)0lTC&i{5`$@4W`d0C%Wj4RBmVE`kpMveBKCJnJJEZy6bFrO{6O)R^ zb~#`nhJki&%U(CUu0EuJqPXPiDxni5SnsKV%bSBeV6_g=0;!!Isg^iVw;5 zxdY5Nw#h$fzYW{OvPW#&#IjRt+hnS)x4+%8actWpo#QFkO9jX$>&h@!J8XYx%?r)B z8K4roNbAegV2>rM#2)LT5<4tGC3cu)ulbnMV%uNY1C8l#W0#du&pgJ%YHXOi7|PR> zK1$+Bd2kZNlk%`(s{2u5msAg?#OA0TMv2W(J;GH~q5H8@Ds(?KO)2A1h3;+UnC>Mu zRiS%(yNb&2xmD=CA;hb6A2w2^?!$J;)O(K@Mb`z>{S#Ja_%n4~imS*wEMV3?WfaA6 zUN%@9&z23@i(aPRaXgv7Iif%_qtoN_ zOK=;@hPi~Y5>B2*`4r_#oy{%FyI3)%-H*hC?WRo5ZQp;Sv{EZkMO%UnW za9nY$jg%WGAEPX$9LD)lO4)hDN-X>AHOdhjKS()*@+f5rWi@4Y%Hx!Zah;<4fcxH| ze4DZn++0iCDhqop5AL)a+iV?n*OS;{Ph+cX#)hlFR@;spwhMdfB==x%d9k@}>| zm10$jSt(W}TTAgO)>yG(S*hfXIk79p-kJ(OkWPzdO%TO)>ae$#JryvQ|0&)4WqF1v zd<)CFm!zAUz^|}d&M^m{x3ceGFou0$>lyGdime}hs#gu{oPg0-6>v3Hl~S(r zn6F+U+s6~C>OmYn5U%^H^leq3GuL!#ta{pK?*5KZr1p4~eyo}w6WVDcnVXWk`SIyJ zJ^kJCtM^`#d-V(69%gy}o_@8F{KV5Azy9prfbUFIX>YS^WwL))MIXP~XYX9&Y(;#3 z^FaTL&ArnHm}6dQ9K8_H73hj~ zH(`1pn3D|b?POxFnXf*LjDgik$(UqBGNyR!OR?sr;y6=y-uVbJCcM)pmV8LIgzI}K zh3g+u3fKJ;O~sxRhmyS{J4vypLhLBvTlQ2cTtxjUTgiAgaa7*0vb4&}@pF$aOG~<; zEN!5#EbXywWocK!zb?P2EUkiA-cI68rTAX<5zDjcGO`o?+s@{uq%P)`n_^7M)}()O zx5bQ-KHtVBn=6U!Qnn`<+r`sH7|4xmE1tJ=q}$wz4w9X6J$g9|Jib&Yg~*;vJ(`C^s@D38vstMxKC@$_s2HkJx6|l?M`1@&vlL* zr4Z*?!uuRq((glzxr%|Lvi64Fp=T==lg1Xqz0P}eT*sX2ceQ>mvbHNq*7C5`w%hWy zi+--ig@-#bcZX5*GO+~ZRo~8aB)B4PEN-RIs?{!%EpZL; z466@_Q!OrTp1zg(l&v(7W0!wVkEna&jkEfeSI;*0mflGr&b0*DmF|&!<+j_%X|u0! ztO!|G>`F1JS>3#&%eY4THxC=DfcTY;4~K7L!h@94l4;vnvaKd#XJvC-Il_~$v&5U0 zi%Vc<1+gLG?AX;b?m<7TOsBo1k20vI`cd&6asQ#j9tP&5o9@{G(}34^!nT^nSo{NQ zuYdbi!2FL{>E<@CVZKb)eJdu_-{mZ05bZ zb5jrF?3O8AOucVYAjUjO8>%jG*iHvouU5}pVcSkaiB)waR&@`y(@&AzZEeJ=0@zMY zJY){?7-ZW@97H}i*(if(qc~)tJ^QII=Z8HWFlWeSqQCY1!jp^kt|q6<)p_waoqLRD zR8kk~9DPuxI)Swr*i|mh@lLBv%c$S;rCK)!n-JU!H^P;$AbZJW$DkaHXwGdKcxW#M zRS6&V5rdjT3~DY|sN~yb@_xmlw03(}p8s)TP}xy2sHuGKBHDS1Wh=!Mtf#!RGdjsI z&h8}ErudVdJrf&A*R7ykoHkayg8Y`f6diM_AvV=I?v#doBzq}j$DSr)KP~$du_ukC z;uqWbUd5a0C>48Zi96kf{c|gJ&^NJ%zJXmdow(E2L(LPqgyT*g*-IC3ygTQ6a9vNX z?}fe8JM>1-Xc~`Qp!tT1JFO>|Z=_>0$xd=?rjN+svuqy4pRhxgI<}KyP}m=n9NP)o zNVXGef96aR?z{o3yt-XEp?KH>e*+P^rwKbC#jMw7stVm38wHxDwNc^o^b z8hf~g_bY~2GfZ(R$|2|5Nr^g+?Pl3Y*h{_i8@s5t%9yYnrFZz?hNp;uJ%7rx>Yy(i zBhvex1YcQskDof)mJS7XRD0|1UgRni) zmjwKox(6F7Q}>*t)IFyubx$*;?s*m7t9L$2sdv6$6uDE<{hfMe_`4{lLD!TUMb%)w z`Y}pmMRrzoEG2f<+hytghOHU?j6bnYpmXdQa6dW%u?$8Ti#|C_O7 z9oJNEG>Z1!5-<-O&hR(k)7#QD-K@BX<7?3`11O&`iW)8on1_a@n}-KwnAPif2gffR z5irw!kZxWxB*U!9H;PQ^P=jq`@_jY%x0(a!zChB38tjX$xs=aRmQp^ZEDUHAcu=~-wn0o_-zYNp4Pd(8$Mh=-P~5%Z$oW9zm5JIYT+HXYW`)Rs@$Z;s*kZl5z%@mKJ? zz8hM%XL)JAGqv#iZBLFdwJuXz?OR1ZJ;C^aT<<uj6XWVgxu$P zlSa*w%_8~jjh%t*qTI!Z%u>gA>=(6DgNy4VPsse+3B+vhB^2AX$(9=f=H5Zv-)x`@ zdKJ7kG+>_iO1fEtjMtC@f6e%SIjnoSS%&Shop{cs{%&*seI8SDgRNXs#_fGZXR{nm zauCkAZCs3bk}=D&X97GE99Nu*HTQ0aGbI@ly)ggJA4cFOG?;&**?Z)?lOo?$$sIx#Xt1!ZPPoOis9_;)WzJ}KWxXGf-|ch za=RF#q+2Jl`PgdFXR>2k~S@SO3){|3Y3 zG^c0oV*xW8n}7Zb=q=`KsgM6@OS<{(x(stX-0D%TQ+!ZyLe@QE4t4G=UD-zSERFBy z*!ul#yNC8tE}L-WMJ}ruzgY*PVfpjT)BlP4qUCEm_RnCx-;pu>R$Rq-r{2|rdz|0S z`$uwId2xjfA0tb;yj<@sP6n~Ly%D<`m^(@6;bg#+Q#XcT=>{5=OSFfi%7IqvT z#?#Elt_Z*>^Su7nHdU-AY*Ps{KjJ!YW^N9+{Cly5@P8Cl#WSwLIe+IiM!$#8eoq!Y z?M#<{|4g_4!W-*R`yD^CYpRQ^bZQ!$&jthU@t*=bN1G`3c{2IKBUdBa8c= zX}$%6?JuDO9pLn!g^=dx*Xd^nu3jVDK?VgOLt-; zwUy5&8)*{lV&17u{&4HSY zyr=2dF!rglwT}%o8+o5gUt7P4H_gU2$-ySM8(Sn#*As7w$42@-Hj)?nNOJ+LYq70n z6MxRa9#&pn9?w^f-#XnxeqT@QV$G$l_OSMvj*;WnnRXHvd7Spw7?JQ*#hX|s3mMXU z?0D?bYQ{&+BdQ)o{yq&%aVKbVnsq<~^6T8y>KWU#hoA$Lfb_kz<_HWQWHly}$(>}JN_7y9A65DY( z`|0*PD)FZ}*5r8``|2ooOyb?~_@DErw|w>DvCdqRWBBRiyVv}awtV(G$hDD8?E};D z-)DHp7oSi%Zpwu8#;I`4yBW)sTXWx%(sB1v*S-8MC%@+9^3rjeIRBrXK-16hWv=1( zN`Aj*@J&6^{U3BHz1rYh%!G7*H*y_%^E}0s27|>bwg&u{?@RZO>WS`12sDlRVW4mm z^SuuQa>s4Qr@v!E*jMyY?oY?ogsQE57!_n8K&fzv4*SR8Q)?+^%1}U9YZDbsE{KE~Aaot1t8NpC-ht_91WQ@RmnB z=DsMu={CDg`=VnM)N8w4uYGomVu$M0qnm%-JmXX{KBLf57}8D`0jf25|-X2)ZVfYs9sf z_i4?!kq>$<&u}o931%jOnK?Ye!QfmlXg$-$;7-o3wEIXHT*Yq(OBG;gEBerMw>Z87?y?0i}@J!aV(!1qHJ@{(O zdCxlM-u_0>Khp;1;fLeT=i)0@AHh8zB~S0`TqoH(>J5~{P=1xMSib(d=+8Ie$zvks zb2;{sj*0K8t<}au*fb|Yxy(-c>$r}^b4>DjWjF4#ec+N)lXGtSdgp%m#n1arWH%b; z599Z@+8p^6)_1gu4{WjG0|^DUaowx%)XiRGINm4`-g|@h19psHwjCeXK^y*p_`s8J z)ZY>xcqYLp`t4%YMI=6uOnhJ>v4K6r2Db2=T|7te0qa@F$N}4D>$Ia27f64BHJg#w zy`9hv+_#7O_Ho~ScuVA7J3jDz;yUm80!{yh-~XK#t`Gev-aGnP+C}5Zx~GF{a(@4_ zH9Edr>v}MMTE6}eW0cAMQgnF%W8ftAm)XAlN_1`^V__l3R`AhDhj;9{5 z{r*4W`{noN|8%Ui3%uBpihWIDlU*rniKCvh=al&IrLVk$7dpsfS6aaY=y|05`nJtw=;WD_@?M87$6IQ;O28MLj|K$9*^q@Iig z)Fqso1h)*mOC2zLFKQl1UR>wkY}!=3bSdje6|i1Rcpix_!b>%e#Nwr3 zQ8`cK_h_9yYwk!6^-?@x80}Wb_r~)+Z*2GAc4Y5e-Y<@--*2%W&Vy?EzjvJf-BG_R z=32?~BFg3^;;de{Y%H8M4sPp%* zS$7D2nhwS$H!a#jTdb;qr@F9ZO?WKt4lU)H8hGmAC0#;Vn=SziY_)wYTaBe_hT3!Y zP1mwDLl=kVUwwsr@mVkVQ&U+>cq3~r*MiG*Z|9(F?$N9lEkFGjm2lawsT>?=8mkg6 zJ5D8B_8OJ&$g5N`wq8jo|NJ)e#ZlJqozla2=1tnR2EOnpYlr^W>k2*!uT5qDKJ9}M z?f*pk@LKK9(>{H#{rTF5*J^*E_TjbKzhC?CTJ8UceZ4bCJn{Kb&#HIe)_S*itqWeO zb~^2jg~!H{ixP`3HkNtRv2fg2xNa=@|FJ>Mp?-+GfB9zDisO@4;qcl+?m*EB^v4Go zrQ;4~q&LQ^uG|;nnQ*O`V*GvCx+>X*-8+IDK8#m|n#+Ti@xXSp|xEG8*jrd>jWA`b3 z-soGrCD6BVYiZvN`;C5$yYOf4F71auyMOVXK>y<1>&Aul;?v%TPkX;Hp!h&w06y*k z#igYKHe5=(#qw>F?D`F3U-c4~b?O9%iqr+3c1}HJoqu_#YW@|W zZK_M&P=8gR7sq?D_1aLWy7V8xT<|fW?W#wQ%l)cDS)ebuGkrJgP#vxv?SFB~7=KxA z|1&#{iyQY&zskS=j&Xi{cNO3La`pJaL--3HkW80)b{}4HJ@OD}+Fl*zsoBiuklqmQ z%5v4n(irN}XMI1$Q{8fI1 z@*MiJv1X1n=lh#Hr-{Dn_k%#;M7!Po5nN27Po4I=23)+HWfZ=8kE?KNAncp_GdyzJ z9Yzs3v`w4)C-`eO zw!`;0mI1%JgiUjOcd`ADZLQ7qhQRYQ*B4Bd{D@7*#rK?gYEHZSn9gsf&fG22?(&pDQBg>j9GdXAo{90)z*8aDYkbv*i*=Ki*>i^l(#qU@kE z@V*0TOZcID$O9<1c_L%XIX=%<;fdcm(0qmbxn;EN zHE_i%Z!*kFxPAz{?kkKH}yN%;y@`V`m;Le35Hjrm>P{2N*D z!lf23^yJTGJY8n-LQlT_8_n^5I&+%mV)y%OU+CT3qp_n7{TRpdqj}(Rn+Iky_9`dd z%7+K<8gs=1lNfKGVjL=DoahD*RIbP%_=WhIa>Z|e3ohZe)>SguCb2DNQx4E>_}^yy zberJgWdrFiINTO;?aJ->#>`c=c%+__4(IzR&(VEl0fTrF_cMRJ3=ZwgqtQFc=65!u z-?NEl?LzidH_crZ_tUq}xd8W5n~VSPKMC-^a9hu&ttZje@)x!6zm~l7I@>om7a!7O z+Ea71Yk6l|oDcr8dl6%7P88=$puH0Kwyya9;CS%9@Z4_A;e7$$X9IP(l;b<_4esQ( zbH0w_de2Oo`}yb_`Q}!L|M9H$xu2(9?q}&ebfVfF4!cWvLbc?rq?5Oza~|4G{4d10 z)W=)$R@UObon`Yt^`(=y@+99V9=MGh8p~g)_GOG;7v-nSgky*|S^h~lQ_Fnf?hzgs z%>l^|J(mOWpJU>HR^9n_Z>%RM?$^QrJy9HRDjaSKTy8R)ZW7#XB7Vxu5M0m7>3EYh z>BRlS0T)o}Kg8-U_mU?c%lzW@|4U4B04yKoRq#Rhn8OE^hokjSC)%=E10S1+Y)-~T zn}S?fx$p4BiTEq0;jc``U#Yl79ei=BHx4*4>)s;-V1m9Z~Ez3wq({%nJ9h;S2f9*(fUuVdEu}KFAT037U6}#2ro>q zcp?9B1-_Jn^C$F7$nr^g@{6@^`6NC0PiWurNqX}CNBhW=zO_jEmQT`?A7cM0xZ6pN zKgxQnPr==^|7-Te-D)kq7*|k-e)F9PkJnGK*6}j_TR68jxv%}aqXW!6eiNNlgP(K+ zKGzz2uOIN-8pilDD&eiiRKi<7R0(e-(ca>%HI#Mm)kFBh>)@-0DfQlLIOE$X;)`(> zUrdBA)*;XLdwg(2U+|dWgD3jnj6QUlFPO^j47T%fm-$vJcNsm&+H=zv+#e_u2VCq+ z_b=&};a@pzg8u|Qzy>e#cppmlAALB(|5pD%NtLJc>b>92@V}Hj!Jo$dmE@URb|Aw) zIt8BzI&fGloI7WNe<;4eui+aUiEnTm_e~xdDE#w%Mj?5OO=W3}eZ#S8x= z#-DLU$rhm1q_;Rl_MA9G;t)%#YW zYae2q`W0i;!_2=;%e}gib$DLhfM2wj`0&Wwt6#}t%?R?#Uwh3cIY~Z9<;a-AKOj2` zCX|lLhU?x9=ZUU&bbXz=I(1atRG+=b@1ODCk_X$k6JOPzUkR@#RK|Kj<;h+AyQYNK z6WXi#JQVBS{BWFK^*H-hcrBs5w{`Wul$B_$CA3#{*n5#*->>ZTO)r*<9gqz>Pq4vWx#<>;5_y69Lcwt@U!E;h}X6K|0Z(y7BH>cHHmB4JtA zF98pc`n1zm;&H3H7*{G@C+sY3g`GOEbK(Nn5f;$XP0{UDN&oL`)m~P=(^fU;W!XNm zgVb(T?(-XoquP3R>u1Z(yPtK-)hGJy+}C5Q`*d9z-!ENyx#n0J_};zr7o6ugIM3!D z;4c~c^#`Y1(+4hQ^&kEn#h&Fa6DM-k$w_6d<}rJ&=1YwKFWd7lV_$8)Lfj>_IlQ(F z_ZIEu-hJGAz<&1kn5((<<(9Q?_9lZ@`&g-TN)Yx?YEXq6Kb2ITH zhkb)#ZkP>!D`b0)zPGp?Yq6Ap-%V-Y1zje8p!^2nnesR4S$c-UCEU$F|zzf9*hs25#{{QT~34B%6x%a>KIVZ`< z9ApZJ82~jK2*QR-g^ztde*ak>sf0* zE5L6r{1!j(f(ZqCYu5NK*gKhT5mzyO$v)d$YoD#y1jQD(3Z9OIr*f`+H0>7;PLuBm zEAiFhPGy`VY}P4jkbTg$bH#vh#J_%P_CZN=H@HeL?i|6S1ULK^JD_|!JOY{irXA2} zq%@qf18#;dG4MDJ9=|EN3QQ-w7CT=m*TQpIpTCCmTC(pwZsEF-^^_+P`XvzuHy4*e*Se4@aK(cr`w{LsXLE#ttL@nFpaFehhxO~1_hm5meQBfNXS zY|+;R?1o;k8-f$ProE7BaH7|=7jg|gBJs6g!in^A@j)aqD>(2bWV2=sc0+j&>>5U$#TLIT zC14LcXs&O!uX~byE&lqwVuS3aH~bP=_nP=Gs^}rPHt}Cn(XZv&#D7smzmjVc|3wu& zEY~Lfiz<3luFW;+sG{j|4gM1w=rpbchY7~qkDuF`8_Dlf0RQE7WbT3eL&_%Wis!Rs z?x0<2R}AxN;KBjSfos8p*9ip^{!l2GaG+2y;UJ-4z8?z(6aGjjy7pSAltuh#9W&R) z+nbm$0ele2nu=|gX~~8-4I8433B`^mHbfsj?rlt1lcZ~h`s0tYcZOD*F+`JiCX87Q zY()-y$hbCn(j)k8guR>YcE^x({7^LXW)Jg#1D^CB9cS+t&&&ZU z;*T?Z!L%Xi+qtd;H-5S#BmEujP2vAI#`lTW3`w8bLHE7Wg?T0W^0)PJXz${?VPgvO zaOP~Az#?y^G4!1FFDJFjuRZ(jv(u>;@e?cKgB0~NuF84H(M9tgW$NA-O3L#$@8;+ z(|wUG$mNJ;aw&GrUd`kZe)-2i?|!eAPspvx|zX30~d(b$tKU%6g%4 z2>uoOsmQz3J>}yFGp#@L$uD~Yd?;f;QQVe~;i4|EJvF!hK}byx;x90G>UH9Z{YU`{2hs z^H-kv#Cm1`cy|9E&*9m3dj-a;XDDlM9~lgG9m2RD+*c${@T@H-A|LH(XZyb`GyRDF zPh`>VTe4r($$NGm8*E|P?mQ>`O~!FaL(&sGEAPviel}@Hd~n?Qa*>gY!iP)?Ecx#j*cRaK;EQMz9S|(y(oCcG=1(Tg{Lgy*9Z6#*SkQh)I=Zj>5Jx78;N8}#*(M`EvQq!I(c((@o?Uk1OR_qsM z8(QsdW8BaAMj_{`0LHC!sp*1kW&92Ks}K3B&*9ts`Zf5WXWm}F35q@l`KxCSw$ArD zo1jCXtDwW72cTo2JE7yDZ$iN@)#AHjGE~0n$%F2L7C`xCZp&;a-=u~7)&FDs)w3sW zUdUhlxA#}y8vg`GPhz|q4z>#UtN-8auiliAkiYtWsK0vb`SXJ9`Qj&|EB$ja^N5hY z`hN|7_1LKA**5CRW;W_NY@AyxpNh7f+O$)EO=nO3uRwR$X|V6Z7AfgKIE@HSyyk-uLOR-b`|K{1s z`rpA{y~sps{_4$p$l5>3U%f%uLjLMQ{^~>i>O=nOwaY{P>Vti)H}|jJl0Ws3zxsfm z|B%1>kiYs`Y{q4T?T(PYdimxfm#$SEoyZ-=x^>g4s$Y1^cjK6x;Sk{L8)rb7mhy2xt{MCp2)dzg;hy2xt{MCp2 z)hmS|fA#p24f(5YJfrBJ)n7drc3x}#>Yw0V$X|UNHq3Zzmi>O=nOL;mXV5sI(-kiUBST+7z|)nhMg;@>9Zul~FHtDnTTAt8VD|D68nC(t&w zFQAaW`jEf+kiYtnzxvHLH}%hN`%nq_tJf<+{^~>i>OJ@r5BaON&k7Css}K3B|9{M1 z{j6}#Y;T8O{Py_A@4&vzj@UdCv3Yj#?l+W%!JOx_XI7YZALl4V4^^^yab|lCXCYN{ zev(^t8WZG<%q32*#wo37Wj zRsXJQi^Yc_>FKIoG5RrG8-YLg+IZDpOS+u>TX9RiuKmZKJlagIXT73pMO7ZHpuO&Q zI`xWnMY>k&>{QxmkwQ zB&#DxgSImBUqSkk{~D;|zX2-wZ-q+!A3-JmeNf5&km9dC#d(rTZuOe)&mvylH=y!< z5%fimR-XX%A@wGp!j`T1!alO}-^u#+l}GJ=!n)8QK@mEB;AW=>EEw z6#pwv=-S#pc(j*%8Cu=rihq^FuTuQKqurnLd$h+Te!Ai}Bz}?NkKd(hv1>e<@@$57 zIrTjCtl~fQwC>+PJDtK;{stEmU-%o`&{5z{hSHX&)G<`ftRI=iXLm4s<#27O+hg+31b5IP;2aJgp6@^1b|kjH-LDdMZ4Z$j&# zFF_AOS3!?K7eP-zpM@g3&E;9;d=AkCyRX$7c5^P|Ui{=0$GZn8os>xUax+)g<|B_H zJK>9Tt!=#S??B&sl{1~az0_~?vsYKTwDsj-+Pc}{+VVk)|J8nx+M28=ZAq7C?bXUy zt>l9^?WKpQmsKt)_Xy6lk^UInM%N-!kzM+l^s&4Fy7rrH9(b<$Yr5U$O~6;R%6;iS zX|cN2`%0cYtN3fsM=6x`>g~GLPx=lrpUVFq{DpJY&^H5#le#Q8sB6z#bxB&GYf;o? zm7-{CQ&eqfKbN*HCQMtA6|TMZP=qFYHRU1Et(9CCsl9Y(l(wNe@+CTHEagWIOgWSG z6gd-l_#jsEe-ekTiZ3mtj?LT5)Yp7pV(RN5mcEw%w!W5YTVKnyt*_-;^ffe*_m*+q z)j;Q?zb`%<-{0nAs+_s#v~;z!ZymbSF>8bMw)Z{OlgyHeMlyapZ*xXrs9KmT7aMjWOsM4l4DktJwf zsN^}z%2OrJm1&B${JL+-j<=Uf^9>5q*2-AmqQA@dmP*}ad}Dkv$2a+J^GmL6e#y1X zFS!TMQf}dsoKv|< zc$7w&U5@m{G0t?G7j|DOeKRnoaOS4}3DT`&%#<|6KOJL~Rw{kPdiTpSXFKN8XZO%w zOFAe6-eD|HPEd~Yq#tl*&mSXCXY8ElavCEzAF3zkM9JCTxnL7HL$t=}D3$Zj<(ws{ zmzv4v2uspifH`k-3s=G|m>WRcWqx8fck{^*~Sxb9I!Jz||kPtI~LcDnl?G|LrRmr9*-;pb@NY#zEG z9DXe4jIPzXsx5!rL7f!8{cI1OO&_RhQ>n)-opkN-Q980^)mwLH%QKythI~vSj39`t z2%dARGk%}vP#W{x!pVAZJbZO=@TZFmrL-4^GiXP%Zj@2#Rt4{pUCI%Ia+-DHjL<032`3-%L+_@J_L*0?w2`DegSvP~ zyHrv9c3!2_VdO(O<>+Vsmy|nj{?$*x^LK*l?*QN54$l7x=U-)c&&~=r&$~K1MDTxG z;@fe*Jbr?9#FSo^Gnw#QqUVgOboY=^OYzNLLB|&m98( zj}G_#EExZbezNhu;C%4^XuRUcOTJ%1ov3&xGRy!S|v+2ZHanrv%2@Md15AoH_a!<9!bL>i!75LU8>} zSLoV}kzAt#N8ZBOEntLt#^0McS5C%|cfo|)7(?sZs{S2q^oqsk_~PH|+8eKWw1bS< zzwe+|Y$9%Ft*(8}8532{c(m;u7?&8ccXrk*_H*yVX2yx9c>awH?O2jtVb23r+#IHH zNSyZ4SaiRPud^eyvg#;p-QA2mZTZh!phO41$rwEEGHpL+n=M($JsE@hb=1~;qWHrS zuS^P0yecUo@#-Yb$gU`HJJOdVIMbJN#_5XTN<~;=pQL4zh4xKaHc#lkB`w4Mc39%I zNy|PK+AnF@X?()bp2DNjV`sBwD|!IiyP!M4@aWL$T~IJmbsaPTdK}sg3U-;D2!&^} zyFlTI@YlrIjD0d^$cct-g@Qk1u8`9fdH~uHdK7v&6kL;&43)V8?P6m6B^K6SWnq0V zzByNT8`=ik^AWT?bRRTmt{`(h^Zi-GGY&V-6`tomxWDEl#l#2FmeO|8Mos1kVEgI~ zihpy1u5DyKaGHL;g?y@ii7)p~&}X4nK-Vb#72wF%S2Jf=oS{`euJ~6b5x+w5Z(poy z@05D9%HL;b^~@Qz5no@V_>b-7e6innw8OO-+R15(|0MAzpN77#`(LB)p9CMj=7KJP zx}me6=(>|Tp()T!(C$!h^J{5P1Ns6u?&Wap;4a<&^2-t0!L8u_q3Fl;(BV)py6*(I zv^X$tC=Z@Dyp8S_JSTX3cC7aL(!ji-thm{{A*PMCuGeK+sp|sshBDInC23`LfNx-Q zowf^LHf=5IcS_?Y-<#3SRa&LuW*zn=SRpfZ^v0oUZuL$8bKZ~MsN=l2gXoL{mcIB)g|2;oz7W}3M_VT(c1tRW)BPf6 z8=um(54je3`}}vhc9iS23UhVKE@1av>C?;TzomV`G(XsIabkoPMju~BfA+l|sg=)( z(pKCpeV;y0-(KsA(_YS`y{z&|oqC|_=H9GpX5XbxnSB=>Hrc6bW`C6a7O$KARr;IE zHO#rlarh>4e(6KZIn4P7eOz?m+srq%P&b*6Oa00}7oFFo^fjw~pPbaS_pSO>m+D#_ z*Sl{sZjoOT+$xTaUU*1Vkyz!KAkSmTCAsAUh5YsRDZps!>wZV-+WWS+55 z<{5{WXV`5I_75I2>U-Ymy@|FrXm{bscJzzc*5QFRejh6Bdl+iAV~g!eTUzbwqkY%t z0qigOH;VjbcgH?q&NFWHmNC!x984p5eb6kgO;%oepfblOOH{wXgXVLL0x-;U#zu22 zO%39STnkU2uXKecgyRIk+zXjYq{v*t;)%>9Y@XC!eQV=xyTpP zZx`51r43T4OK;`^@2k3hW0+jW7$49_HqqyMgEKdS8+Ov~-=g2EiK$5?wDX!u=6Tdp zP3)Q^wzzd&oSG{AKTWB4sh7lePbwLtR7gI_YFypBoVDJYw(WC+M@u2E%g5_lmmH7Q zkGy`9q-)RKE7wuRZ|`Crf2&7ZJ(={gRBfO02Jf2j4sFGqPHpXjF6JNDA9BLAa_ouk zTwu;|HT-Hm=g?iuIm|M})>VN~#kL@GfNbh*&pjOQPftuqDyE*ka{4s&)Twtd^;|R5 zsoC#;mwCu;nTJ^AkhLqj9HS|V%tchnkP=jeHI%`A{-Wk2q;1Ygz{aUx1m+}@uoE?( zlQfYP`a|1TNmwAiS-#X8? zE@<8&^^PFV4CXD(=Po|#S3>JD8i? z&K%_@%vG|O!`$X&?h=^8oM#QAfq8&AhnW@TtsBIgll6-UtY6ghR2pQ>LgtXQSG&*^ zPD5k;qNbxm^nZxuG%KYUALYcqx7ur^D=n9qfiv41vIS}r> zocT+1cO^^KEMm!XAoG^ZZHZKhHZA$MUT3tugKljrN%>aAWfeGFqPVlLBkXtL&W3(lVu_Z$jw52I+ zwCZHWTFfsS# z(2mf0sEq5qlBgdvh4Z1AFSok30S+unN!ElfzOT+^r7=Fp+#*fp7Jq}v+~O!yunagS ztu2)LnLMM;CePrB&9f~sw*lX8k-5zVsLX9Xg38?H5LD(i;GHcpw}Ce%Z{fYkTX=<86C`AVqto426SFSbAht8RntgMI*&dDn+f znX`NheH{7;)b2adzxMNA`d2knux~x|IL{r1?t&hJZi1eGE`gqg_cFI({*s5ib!?1# zK;Fj@{U1XQN&m@5hl^h65wH8V_Q4*3%!-Y5wW82>U94}owbjEFe`%i>#-v!Y&mBY; zrF26V(HG3VCi;i>MK|q8A)Y>G_95vb(g)B>hsP2x{epgI_6z#oxXUb=N0v>Qr*Ddm zT0md;oyawPqqM)SmB=;y<(+?HE+Nt9t$3*54pnu32POHwgh{#jqBIB zSiix3ByBEh+M81-4`EtY%0sZRd9nbAK^X17~*U@Z&^PFHesjM6L}Ncz8VJ}Z>J3SJ+dZvJUYm>-vb-y1nm1gvF|sv z?RWZyZJ%=!cibcU39#)iQY^cE6}I_U-G3Dr`E6|R*Mg6y#p!;L+cnoYw6$Ga+UhjL zzb+A5+I10Hvg)eK4YKJAw(g8gf3K{?f5WaH-NddRY||He{yyydfJMW``GEOPM{SCs@_w6=m%?Jz#%7)`(p%V1GX^c2!L5YVW_u`cUGu-F!tM7Ux>lRt-0Jjb!UU2J; zLcy)K2?e)i2?h85L?}3T2y}2a3%{~|NAT+jvA1R_yJg zz`mxgi&C;w))L)}H_@v+S}kKiR7dm*dOr#rTg#Xbl?-JZ=^CzAT$8~Z_&+nW9)dj> zOL_?QU@Y0>)+^qs=6%MBzpcfV$~d$Q+)>Xsv_D3#IDTD*wx`b!t)B7YzRQ@mKAE9S z=G}V6kl%FBD}Hy^5N!o^_%h-i?4no5SRpuf6*zaP=y(|qlmzXyyW41jZB3p3Ab9sW z!MjnVCf02}rpuV)>ZC0etjpM8VqF0WHC%XW9j@Xx|Iw)?mc9(i zxSrMlDp;B}G3OufQ05;dhMr|%XfUuj{%^1_^hXwkJ_NPa2TTl2yO|i8cG~-pg%7~L zCO%jX72GWNS>6%+EV%hBROT+UQ+lPL4Bi_1f#ez=9d` zh!<>B06oP0+0dQPInbA&^Po>d=R;>egE6r5jitm3-dPE)<9-?Redt=~8tBW=3t?dC zFI#x-EYEL)9)NxT-3t8>x(fO+^gkHf?$I zR}&w@ce~$8zZ9I@$->EIf25z7{Sp0^+BJZa)1vf1KZ=9W4?lPid%A^_P3%k`-G8Um zH|W1+-=O~ge*uRGFH;$x%{Gb%v`Yxz;S_8K!3gBkJI#NEnTxMA|1@yA!(Eaa%lj{hRxt>c{O_Nk+2KTOo4_n$h)4<-D)D|%Ii|n0| zGI@|6v5^YS78`aG+ckBw?bh<_cfsAA1ap!GW4W1Musv4d4-$Vq{4M)r1b?TpKW1<= z`=Kmbw08E~{+K3h-2(njr;SZ4OPx(SG<8m=Jr@Vrq08J>dt!%{a&2$L23>9O+}dein}Hc#f0( zSr5k)UvF9g(dDk@hsJZV4Of0rtu`9Fp z=g6owN|xA&qa&59cHrx;ooeYOn{Og$`m!~KV>U=iEp6xUE$`y=tsUy}{XR3y_i|Ra@722_d~eW}!kgQf zzsO!7;m@7!D5H`wJOw_Llh1nce3kt3oC*DI;mO}D*Zs8TD~@pE&jhjG%y&Hc`?2Voydx3)W$&1zBXTME z+IdNwk2pJRyRNT9xk*DUoFAnW<{gSK%k1Gfc~9Qi{zZVN^87UFf0X@B&r;vEk?sN4 z#wi2DZfDw3*nbcSZ=%>IDRMa(Tgn{DnTs5bW=x1j_G;nRgUDZmDN|~p#&|FSeg7U~ z!Y*WRuBj&?O&j1X6DE~=COmjznCx#<*y{iWSworD4Z>cVpfs#WV_!?6(y)9`gf>|E z9Qs06E3fxVs2h|q;qag#6Y2-qb^MHSOW8zEOBwRuRWxN7$@r3@y8D~&Q)hWUn;`Fv zh=ecn4~@NtI(kK($>tf`7A604t#^u*yz7_g^e2Mo%G-_yY!xG$*(&VuQS2Y~IL&IONl(g|Y?V{&3bx%q z%88C|VEv+DSqkMOh|b$-*&_@~=gGG=QRoS=XP9k7UEH)?h7xJmHZPG++b$vd042Ze z=(|j?gVpIch5457|(mMon-PUL&t1Z0y3SD;${srXH6|hU9>s~?Ey@{^N<+(*l+d37@ zlJC$fo}+(%r^IzAPn=_nDV7KVKpGa;@BBUd}vyouxNb_SVUK{Uvng8|cpE%t=)C*O_y7 z`g`n&vsqwM-*2enrUao8#vc-ewlUt;g(es~h6{~1cHS&B&iK=vLSu}N#_B%S1$|2r zRNt~hhi`@M^sO82^1XC(nD3Q4!+q<=M)+1DAHwsW&=+N2rSSa@ONaKLPd-mR!qeZ9 zf2Av-{|w3`^C98;a(KQNp4ay6+F2b>D%EZ!ph1KALUzz^XX#g8=?oNo<|qQ zqR$s8ktRO(fKzWqKed7X4)}kw8gHoZ-igfLjD8O1I)Zj^1+~LEFs!}DMD(@2$7DG= zI~bpf9u<7WT6$;Fs>;+Ps;A{9OW= zcX4IGpx5puelpm$O91!2u1B8Ou#h-xSa00TbNc1liqipXHp0TYYblchyttO~xWJ5S zDNA^g&^AWGFro3rWut}08SQQt8f$d;bjAho?rX?iYj{_5bvgOicvp1wBJ`B#YT?&f z_~lRH8>g9-gZ>()H|*V5Ip`Gso$$!!!&>-2-5dGvS`Z&L+n_-R2PuBhk0Z{5t@kz4&w{NxSo%4S^+b7J z^j{`>6zcdkk(4l zT18rt#zZTP=D7S?OP_x)xEvb>`dO3mCri1(z^&o(v6kKzUD|{9%=JR{!peJ6*1sn< zfA{Z*&5g^O>gxCwY!Q2yKZrd->=3q|J|8we2;LVx{cW-NO#1e7=$_~4doQ3Do$Q5o zp|2Cs!!J4_j4PwDEuymrqMsL_zix~N!=RHEpet`_t5^IC-7WfjAv)xlj(WvH-uoST zQ?S_1%k_#+x%V_!_t+HyY`$2}8}urAOmv2calq(H(dnW$%(XIX@ypTc{=4NqYh~c{ zwdi)0Z(RhZZ$Q7lwovYY%facdqvKV+dl8KO#wQYg4x2|=*!*egwXcotdz$)u)=6l% zalj+AjdA2Ap$W#(TZG0LU;R{QjBz~so7nbJu=&5yr=KUEMr|DCxLsD!;lX94 z-5VsGQu?u79vizh!S>YwY%jVnm|wpozvg+oLLO!v51hmN{n4}aVE#E^{>iKt^X%G3Hg=@ z`IZU!mI?WmSrPIrlWzJ6!qzXogF?P#0=}F=zGXtbWkSAX^8RS=>(67YBIH{p zTPEaNCgfWtTPEaNrYz)JCgfX&?@OEehL?SzvY#azyL`yEjO8EaJU-rozu{uv zTFAFd$hS<$w@k>lOvty4?E4A%mTBBu8uBfpH2Po+`Ib=%L%wA~zGdcsznA6V7XZHq zA>T3qU-L5em}~6`3ikUL@+}kcEfexB6Y?!1x*_CSCgfX2{AigzuS33NLcV42jT7=M zgKrVOJq-Dlk@ICkzGX}t{cXMjy+~hTA>T6Re5!~~#gK2Ayz3WPz66RxzGW`iUt`F( z%sHQrA>T6JuiwXzZ<&C9l#p+kkZ&2*=tI6`LcV31_!tiPmTBsj_``9v#lH7(#T%V~A3ArK;eFUybQ5P3^}sKC&z~!Wb)22l zGaK3sdJ8le>VaMk?F;4X&z`KK)NzJVPkdn4afVU^`*mFS%y$br!XaY-<$Zuet7Oz2O(?<;JcgW!$dL0R|8#^)D2kFV$B-0K+G z(Wv7LribL-Wy-qa8(&!PLJWRmW;lxyBNL4}&Sm2AzC2o3g!Hf8j9>~2;k)4b> z&U*T_+>27y4cxR~!2;@Z7xy|xb~ftpqxP`eb0|mdTC#A#LU`6CvWxK};)*sEEjaCr zD1DUgCPxyw5>&!x4&7f&m_x`VyoWF5sV^^DFg-fnOh>Nol56st`r)Dl1Lc}>$#tS! zQ+~NV9>q2Fl&Is9=|rHu@YIhCstXPQ#E_R6Yx>v-V?;HLN@M+RMu%##sUK#+V8AJ zR_;+bN4n&x1y8}hIgxS3oT|ACVtM8%{K`%x^dYn%WFSM?FE3g!CsH-!9l4$)*QA?G zJvF%|?}=QyBcqJ>IP>g-p{ge5pZM^{F8+bvB#lh`Nl87$ulY)RJ9ejzQZMvwEkowkGbzLQ8qfB$CPp`-rBbxf=SY@AGGJC-s?rX$L{qT)d*o*SU5=N7sw5b*u zZt{G0bAFZ`MxnQZ12V{9F&l-vHzd(qz%(|`F0`uk1v`{DHe8_@&9@Lzrd{>vSf z|MH8|5271HNBmFe2U{mhqhHNOSJ=A1)(@f+L|^qhjGOptfF5%1^?couRVo6Xs#d1s^+w=AC?B`2d*3S1NCgw4_tp= z`oXSi9d(UF&!tgzTTe_zA4u8VC38&uFsU{DKsg)r1J{?XA6!Ohqkaf8jPvOSGq1m| zehA@*b2=l0A42#+bVLY0oSR>S@IwecoNs<%kEtR2@c$Nm`1+0t<{v8d28Z_yXB*2r zs}6s}$2hBKu-FB-KI(Lpx>Hh;s<>{CE#OtcZq7v6<7{7g_?bcTr#U(p^O-YeH_mA? zNYlfdX4A?_=RJRRAAaN+@%byhqm`7@q!P{>m`EC{2=%0MkU3yD^F7XoEA-)acn&_8 z2dnMsikTCNkM?`9OUChTBG2@GB&_HfFz8`bF>ZDz7-QV=#tyE39@WN}5*26cQWNUr z*}Z*lIXR2+9&)-%*Q#-b!n)f$lPbRV@|D6vsmc)#<^6*D%-QNv@D=?&e!SOCm|8w> z!ZqdZQI=#Su1@OoSLWcV%QgC9%BujOqF3ORb=NEN;lY6xt7j@RusHiB~&A=X0#<@2>+FjYNOqKp9XR=LV?j$yt zWL2$G=(iJ8rQa8v1%AjGUAjAeu z*Lyk3U^8>@14=x62shjOPw?>M1FvH{Qqi{#^sduv|E*mD?QcNkye`@?%gvdI!rxv{ z;qfZY`^2YUVXhix_|PA#Ip1gvXLZThi9hFD#lD=S;SBKONNzmo$+=&g$0`2zjyhwg zo1^qYCD!;8<@vJ`WgMqHPACaRgVM&>qu|RQT~tMx_lRGAxpr`kUw^qSk!#MEmg{`E z=4=P^85h@_>tJ4obIrL9=JRsRxen&@Tw}KlJmcn?vp3A=qqyeW4fA=q=FAQAd9G9K z=Q;l>)qXyXYx{Y*wx8$v0J1TSGnzKjMl%@OwxQRhjz_8EG3xjgbxc*=1FnQWw-NUX zM3gzUyX7k)s zIk(vzZDg=UP(b-TUq&>^KRD0ee1g+{40)=I~Y6gAQgT41HQ(p<6BZbHCR{CZg<(BuPJfuf{lQZ^3b;R5Y}x3p@v7U9a%G>2H_OyKZD}j>%+45|ITrMnphJVO;(t>Y8!O&7?;@}wdM{FNgy5|V+L-t4v1kOia2jXQB!dl8 z(DydZis7AD+PvNw36_j1T|D=ku3bELu+InH<9i|#@5wWR(X)B*V=l6M@5EeFx86H3 z{dLiW$x4i=SNET%?lP`M!vnkC#a6wWck6@XxQH+bo!3f6QdUTk~LB3#|ewsC#wS;+Kjx{$Z<#W_XIXBpp zokPfk%q>JV&Gc1=F#UBGtoO&%Gf{gzSqa0e`cY7WM1Kzc;)MTDO&;KU9!@qu8Mcxh9|+eOXMbvvdQ`0 z$^ca_6)Q*YW8U2z zyd1`V!I}{guMYmRJ(o|VZQG$!zebkVljcK&PNX}~=_*X)d~x@gVP={sS9nM7ads%} zN?K}Ax`DJhazY1NNB<9CHKDG7p{xmYs?`%el?>sZOJi z^K~ERtj}e<6HdQ<;Ngg(XT|#d#>m`((B3m=_}UZWo=&05YEGn@H}JC zeDM2T=I6(`mh;nve$H8#LJtoM&@VX)Q~n?0EKH$a|A+2VP83f`B@~Ab^6AiQ^ZI&U z9<;=~{;{tTy2rd8>{CwiJfS#T@#)ZP^IG-gK}*bQhp!U4hahK)7Kb~1a)$6OFl!lo zTFy6qj_-h;=WMbUIIrzRN0_mI^E-?63G=r+PI!UF_FPfb!(r6NTrSWg!ksZ@*= z8mm;~32mcP6bo&yR8$J>q*Uw{dWBNq23yE^^-5Gie~}mVQDpT({^lZgPjTLTJn|*u zOJ11RpE!G+=f$p*0H5UyRiSc*D)ltwqB88qjr^+&8{EjhO881WP5xEFSJ^x99ynX} zPV^Ei1MYn))@eMg=G{8APkhlH_KUQkZAa0ztI79$f|Mg)r7go~OTtFNK7yUM-mL9L z2rm?0`fELHdxrljl8Z1Y-8 zcQc0W4r9&%|LV|Tjr?PbYK?z(nT1C>d_gXz2#lNMtHAP7r|c$m$?sJu=eUXP0SBhj_F_wkq1`D@5rDy$IfSlTIZ1Gzwd9w0G|{ zN`s8g2IpW_U&EOrio32C*Z-BE?0CY`rJu3CYst1mjZs}&d$`!o!``zese3JZ&w7*2yM2|yTK1o{LEj!#^PbrY=9V)$3%9i? zI>7z5gkKOR?ekSny`h@DA#&brHG4hADNy#2$R3)5;E8JXxqa1>z+R89*(VakJAa+5 zXrDZ!YFqDiXdjOc?4^1IdyUj-m&08*k-o8&{Lqbs#W}0Wf2p)L_B-+#j)TX(4gGb_ zr1Cn>znl(6oCG#}*rJa@f64zk_K%$)?T+j#>*S0yu7eJoG@+dRCML$JrjFNKNbF}gD^v0aq-94%5fHZWhHSrlx-FNOR0M;^9<>qvu7wDxIOWODczNL`umy4Unz%EzPj;@ zGEhB}oZ|dbN(0wFcLc7_aP4!QiBx9Z8X3u%a=&u;BWqm#NG1Q)$m~eJtg|F5$#t>x zlY5}s!QJ=Jzf&2H!cJ#A`zCQZbEA|wyE2s%yGN%Ilw;YWl@kv>x{T*^_?OB(iO-Px z$L{5s2Om{ZvXhju>?C{gD9VAd?_h}%&|!$=;xx7JXU?}v?L8l%dpatbb2;R z$4q0}xisu@U2+;h<*?I`_D=`5O8XyVzAj}cVV-+{K4rH7nE#La=x=sgh+Q`foGkLO z8?0M^Ja>($D(l{^s!Zfy9HD@)jF53`+~|U1_d&;x_5{hsxZ;8Z1u05liZZjoloRCr ze#U~$%=b6L!vjjVu{VabEGxaCF87ohhPtQR4}Cx=`v;Yj2a+}^vld|A-l3%2A9Q_R z(Diupnz`$a^Q9L^UKdDzL?R^dXzBOLP(k~k28X0db zt?w(|lzZ%+BKPUYS!?OqW#5gC71`hIOg5<-Sx6SyxB7qq6`2((_w4>)_k*6y-|W8N zF#E&3W`D@>%wB+PrBM2Xtk3MB{plZtyU~Y|UmEY(*LI$htUP;Kc?z}jY@VOpXJr3! za6Zx=-+MmvtLF7NUp}HwR2`i`|MAi0()V(O!dH)}=d8X(`*iK4XQqNL?S5eQE9o~G zAJVrzyl=Gf;k|_MqovLA;iK~LgO7@AZa4LsrE}8)`Z(XxKO&E|+#R&!z?5H}6PdWs zwd9peULr?x(WAldnB}nYkveWS*AvGk7zuOs)Wqce<{Up95sD9FpQb#*2sb3F8Z%9=f#zaK{54(&@ z{V#b0kB!aWx65YpSmZg}mS?M;(za61Jwf%9Htt%sD|0*{>gUwQUouFbkwaxDEN_?_Uq6n?2) zQl}^hQ<9{vSDN+3=F?94FIY$@WlLo&bt2O zlQP`*COHX}Y2%XeNzchRF*7kcNl(~#Pw2gW4!(>N89s70%Za{8N8fzgc9C>aku%wk zBz<-t_E*8{!J#{P?AyqS^mR#Jm3~v1os{Apn$6w^bx6-=&4xd4;fv$PRY0~JINzy7U;3}7U-jf=#D8vKN&k^{I=}T-3u1QjxSg= zO5Rgj&||^x+dAzax}@c_W|CIHsK3WOQ!iLJX_To~TB=JR|K@cu^}RLb6H;%X^R@`_iu7J3uR-8GeS!15)$#dl?h?RTI z%QLp1AXesbRYBzxzRX2NLj7_SdjBcUGO>vC!vfx|U11bA)_K}|1f1#1b9(^Y55m(Vf>?;PgGnos@Iy~1Ji&P@F=ES(=d$R^NYy7PBn^e%U9(TCgcI_t2*K z$IoW{AM7N}%aKYRQjR_BJ!-8SD|t7#91hALW$D&JS;qfX)`ah6P59m^%m2R_zRipe z2M7fp-amTThYyTi`rD@E%ZEo&zCFjzX1zlBg6BH1$o4(xfbGbVl(7VA+Yv-3+H;-O z<~AzrYR_$a@HG`|rC9Tt67EU61kY=7NmJ$}@=R+~bhostIetOy=Oo@vb1!R`d|SDr z`f4S&_8K*}et;wQi^0y^!#BEezZ?_hJ$6U9_iMhxKJkFtdva=|_w;QFw(;b;QP@*N z=fy&8T~vo%#gr5JWJ$s1SQS~5@wW#3U5l;qAR*@T4bLu8lk3JJw_Sa^GKUhnZ@YJN zN`8LBmD|RRUg}$vc@=TP32EEzAFUIwZF^w!O5%GEmr3ZgZKC;n`nE}<%ZTqy+|7hO z+oqV$_uV#i^jhNo-s#J_=pJM;CPDvl&Y6rry`-%l*_Anj(2aU6d1O&$cdlVLCbjDszgYvwo0Wqy~P+@tzx9og|9I~mB%5M*a4vU3BnGYr|e z5!u0JJoP4I$CNeGX8djCPUamVcj-`(JK>4Q-7aL>gROTD^R>N%eT4ml0H4Ad<*pKZ zcxC#o%$o=$+a{QDv=Mm{IopRkxwcOpozA^mPCgLDnnW~f6EUn&#Ilwb$9Egf!mrRZUpvED7YZxg+t%sb z_akrtdqZT;NIC02)n{kk>UO#f_x4{rBkO3NvyOHa8QRS{olajm&N^FloMO1o3@&Ud z&#*Qj^BH$O>uV2no5qlR#c2=@FEA6({{zz}QqhZF*;cDk1x7u!N4eNesv8)RY zP#T8s`PEK0`v|I8U;OF8hj*sMb~YkuYc00jRxMR29G)6kRDG?2{X}Wkq&~m(G}N%p zcqQL{*RURUHBNv4ig&lObF=ccrM&I& zA#u4`=6cq9rQA)@h7aCZKRt0D>y#UyCJ)Y&mel=#tY4B&J@r3HnzGNNC;B=VyBJ3v z**S;yI7oR9ZMwnhi;XwFpblR=5?ADljWeD)Ib-J`_;y6TA#f&A?!?k)+Gr*iencdD z?%DULu_xe=+Q|^S=D%?PlildvDV zv+TM596Wnc_U@<=#;4(WLwokTY`9(bc5%9l$EGV#_QBjCG~9S>s^SfEb~fI~QoQ5D zhk!H2*sR1De~8i>4uGeoQ6||_J%e?=S&SJ@o(<={wxr=Ay$+;(fP93HQWx2ax1YZE zIkti3|F5O|(x$J*wl^vrmDfK;z4KhknX!(%>wU2u4TXGCWBA6N{wdgSFLRt8$IreT zM%~&HzKUsUyh?p$-kHC0W6qT){+lo@N-11SS(j1f8IGvBedwq%Fx&ok)<{W5bS!PZ zHn>mkp)Fb{4u zd1c)B)7s0PxL|$s)otP48T8AM9hIyu@O{f(OW&aHwtVsM&O~IihIULt*N9GSK;F~h zI~YmCrxB{z7g0_BJ2lDSjYK9EioSs71JT28IDdNLQ)u;I+Q)f$T@P%IksiLy8LTu! zq9fwgJ+UCi*=1`oaqYNHX58rs&Xj(3XvGk36#FUOxiYHgUHaKb@|r`RI!sv(I+N&A zol3u;pGlr>`jf_d%BLn8??DgKA9l0uEqkb0OwWo}5{)+OLCSZyw|1jHO#-L7VmcUM ziMsc3m)_uDZ9J^A;vLpS@yg!zX!@Jz#BrxR&yHuEY#(!+&**32*sb19^mt!lPwan< z*1a7a4&y*))%y|uzZ$LZ-)S7UUGcKEwqyCW!QKpHd>MOj#}Pz6V~|&cG(`57@=h4< zJ9%d<|2Oi#9c%M3)W;bYW5~M+R)3V|KV)AH`z0C*2w@JTVJQ9Sz68okx+-%gg?PrK zh6?(_8_rHAn)Cy7;1lTITJ-j9=xtfczXRGh9y~qF+tPR-W5H+0i>VVC52gPMr@h+4 z*X@VTX3d__p{T~{Z(6?IprOZ~baXL(hHex+K7ujlE*ZzjH;n5v`d$O$Ljy8j{ebB6 z_RaKpXY@&j#{5zOy8K*!kTH$2H;!eb(ZKlAQaYz5Wq2hmyZlo2p9bj)y#-xi_BGP~ zroI?MzaLHiAB7&sL?4VqUyMLs+?0!+uzc8@e}C&yOII92SBRbv9dQC3A^TcS!o#~r zM`UCyHmq_JbZqa)uRbTTCEZHxf{eL0N!-$4C`QvV^;KZClT z+t12ULyN!9zr{Q>nstT2tWTt4cXMOoDF*)wwiTV;JXFI|S$9u3J@naRd|}On7m@i& zVGqJcg2H~puCcmrXnWmv{}t>%oVRsMDj|a~j-Yt*8w#My2s+oBp$8=1r(_D|>I&7M zL!q*l^M2?gXaYJn72YJo=+8*L+iA1rWt0Cg*k{I|Hn(k(_&K)+nP#n5S#=TA&d}Ch#=Vf{09H1MjUca~jdK6*gh%pyQZ0RS1a<&yL`Sy2 z*tDJN>C1EZCSf(#HW;Mf`sdAD&cIHjVh{{8t>0HOwIHmpr3%eXL<#!nQH<6Sj|eA>q9-3lrWSBl)f) z&d-0z{}t|45HI=vk@!FJf9#&ThQD(EQ{s;jcbqsW(<$Q45|@xbnG@bI%a)ixITHe9 z>r4DV{!6|;;QsZ*OF3^P{&xOL+3w>0e-J;7xL*<{WuH#m4B|p%{nz0uIP6Gnhja2C zl8KP~{wv9V?01}8du}}XS1Nm`tpD15%^rX4u~)`hd+cpK?%HE6{u$Z@n6))BL{z`eVX9 zgyFK^0z1w(`b5w?vJr1v*ww__TnkP$@wSCy1-}Yz6|CBbTfwt7Zk4_&GG=2|!LTxx zs4{=%I~=8JUW3dndN>~V-AoVH%uAKa-W`)l$m4siIm}gOGEdFm|2V?T*g)K3o?Xh^ zwSc%~gr)8I_KRniGnd`W{{w{OR|MiV@@%D~+m>_*m8AO~&u)}-`9F@p%CZ@^muL4& zy2LFb>?hqrJo~w%%l`v}&q-I}WY0y_g0opNrx_dxPy6!CsQT!a+p*KeNw;EwK z_LP~(qR4+4@pFjt5z2^POuWeY7I_|d)CgOM-$=a3b-g@KoR3gXyvVf3t0|{ptifL{ zaysM7T)zDfIcK-?C2PTHS1 zv;B#ym*=GIi8I@txWV9^^q6z)9jt%mvJPOkt4$T`qX}J=Lc81vZ7Rp~BS?F+cCWSg z^S=Xs{)h4s{$^rRl(xyW+vYrZ%k%PmuGu%)ud?zneU$Xi*^en)E2C!8ujZt(?gsTi zxA9Cp&qy0>lja*<9==Hr8fRoYk+EFHZINFYb7ZW! zo$yP-cX`*81uL)r>Ab&7xxdT1|HwLv441JUB{DykaZ1WER?TntF7+|{g{7zR()COm zuh|$aFWu5v*vpr4-^OS*E?dfdv(Io}FjQI1ISe(bTL44NWxrE%3>7TXv8;tQm+9dD zwrtt;l<`UA+LmQ=?6G9OwP%D^-$y#%M;hN(S)}cH1&vR(JX}l}<=w{dNw5cc?qacu zjXg$zjRkvbr~Pc~5!|l1j75!QX@&`!@Sxef*4}B-&KLR4_nnXMZ8GDIjRkC;o7jEN! z?7x=ugZUL3%&(yQ7bG|gbL<5d&Fvi+bLIPi=4(`QTNpb<&om!@gLyqy zzSFVCIMz8QG43}XU!`7+W9&b{v%=$lwK7YaZ)0z|^rM5w#iiE4l(CB($7GzE%s6V# z^$&`FRp$D!w6#4>1_@yU`4o9FiUWcH_4?&Y)+f4+CjKKCu&`!;!*eJ}`L zUnp;Rz7b!yGM?DuMy~kdV%+et_n}4IX4f%D$2Zm?D6R>wg5q0xwn_Of@a(0eYs#Ia z+hlGi`G^dgx~+wJn0&Q$TMOfW==|Ik_~K(PX=A-?U8xUGZYi!w`7iKnOYgT7*Mxr; zNK@)4{>p?8qpZ1EYwtJV2mEq(9g#en{S-4jf%({X-5a6syVU&=(5A=$aqrBH%V0lzAZTI#1~k1!VQG`36B%&CU4D;Jqn@1bzGmZU&y=%go#qdJ-6G?r0Kzl+np`)Nvpi`l5!JExp zs>)`=gD2oYDRdQd8+1GL3usHcG0V<38g|*`J0-jQ^IS@JSA84()E*sm4ZdRg@ZDnH6EmJU^ho=n zgh$&K^(7nH3X8phU z26Wk|sm5(_YH7n=&1uOs@7-WmjEMRkq*yKb$hL4pvre{hvdbR#T>W>%Yjb51A*O zL3;=e{0nvyAA{tXK%ItieIMaB|37>00v=U;=l`FX$z^hxB*-P)Btd9N1O$H~(F$df zU=26XN{hE$Kx_?!srpk_EtdqbD>6_;Z0!<%3szloqEp>RyRHBBSrUR82p|#gR@a1! zHOVAg>IKFo=Kp%1IVTK}Xxr_!wVOQ8_xYZ4=A7^OUOt!i=kh(FJVlAKuZy0n=a4f8 zC~joS2uk*OM(X**ex`E#9m+pbo}!Gjuj_xVR3o#pkTZd^@P{xC@lyA-Ij3ta;=0x% z08ho@(eB6p-Sz33U*de${6^>HbLUs{CqC+F{xnb4?>)>@miB4OFGM@*z&_4A97L8x zY0=#^e(`VjF~^=e<9F!Np;OVPGdArt)!4+>x<8h7ELw;ei^2C>700vITG#C{WG5H_ z?RUwNOo2btXZ5v(?q7)yTK^lyrfEyA(yw)PzZ$tm@gMcczBY;2 z>tw}k)F0~_{bA2V$NA?FbNo|tYXzL_zduDjoy*?NdBQ{JLPrlcGDf;GN#Fg^@3ubp zpVXDI_YR;BlH&Xi>7DDkt%IW*9Bkm|H9e>K*WPvQm6d)|zP9FmEHvqiOZu&AJZGTA z&CpYKT_<@dEHDi{wwStv+R^cPuds*H-(B7M>(Gr;JNFx(%n|cCk6{+S}6Ir#P0pclnC$`0e-iX1MzY?`T=Nx5U*y zILGK)xBAm)&U;E>Z*E#|ZrWcjH|@`{??E~5PP{sn_jPRLi;yd#{TjV{B6|0l{H@b~ ze2%^O@4fqv{u=wb{**l7-u(C8eRY59`xXBby8d(IzpqZlCXew&cE=_<|4Zb*J2~&h zL*%7Xu&JW??!>VQIA2JK&3C_-^Cu}zuKQ29?`6*4r^M#E8yjekQbl`Kp1a!Pd?6(^ z&;4G`pQJcB?g!N#=kHUT9QWJg4~ogtmpwmx~m!b&Vq?hOaz8Cs+ON{da3!$@$Z|24dH>hxxGXk6B0MoH=W#+;eCBlymN^ovzCt zr0)F=Smrn|wI?X2~zo@=Nh;Fp>n8h4948 z@F_g8E<1`Re73BQ-9u;jOl`W9Jn1e+LdbfDKAmwm<8tQFp{u|5->vnHmHA(g>u%j2 zBlCOG-Q9Jwk8c}FH~YHFduN?&-FzC>L*94Ie>WZUWw~z6SB$*xsoxfV*z(?z_1$!7 ztj-kuI=PL)G?Kq()R)TZbJo+ztLq_``>%DMH9y3qBwu{O9B1gS?s0OqyX!(n@5Sb6 zw~aZKr+p?J7qa&=#_G7!;w)f~?mA8RyN=9=tsS}HQ**LND;mGVBa`whC@Z_s^-zwM`Z{49I_xa5EDHwk;c z_4s9>0qLFI_@Hs~PkL+CGAKAIWXNt=&h>6Sl@Rcbp-}VQ_k6YS}uyvG_medwWit zGS)xz=gd{GZ1KO5SiD7pXF-E@%tb!b*!X?Q2YiIlDy5xr)r{C2rZgV@a=B~Bsjmul#dFv@IZ_RB2b8Gwi zy5_c9ynbeEydK}uJblL^@AKpJv;M$%eV31_H$LAbw`0Fw=fr!zZ21Qk&;OO<^Pj;U zc1+TV+o#6F?R~>PKW={-zK{-Z;(^`b5zc!3z5i~l@BdJIzPrAb?C-9#*V%FVS>5G- ztgh~j(_49qPHx+0bg471wk~api3OdeM-}^V=-r8>{2j*YKcf$w`9UAX#_`9F{rot- zzE|2kS52|pSY0=(hq%2H!*_7G6HDtU-rRfNUp{{S8NFi1ksZAj8e_|Z}Gcw(oBgZ}>N5si@_>4?<@&z23-W$*7f5=Av{=4ye^1JFJ^JD+V zTu*!KyYH{x<;3qDpB8b?-uQj*|B(B#|8KtD__j{V%bkKX#9cGkMa#%%bX1=jy)jPZfR<}q0J z|99f{%DKRYu6?dfY*~BjL)~LGvFp?uv;X3lz2>!B%--2wue=t0yES%?eZbyhzbhtY z-`(dF`a|ROG4Zvr>{oRB)!l2yZ|%g5dWv5-@zO6_eEaWxd=~HjEyVAY`{Q5_J07P! z_s{UZ+?~CXyS`x0eQZ2n9WhSPszaB~Iy&(chaUglfB%dwaN@f$m?HN7&2`rWpSQoh zD<)y*+d1)jM{m!vWA|gb>wy^AqHkz-twA^a^kunj%~@;=%$ehEc<(Haw*5A_tAkI5nR^{~g@S$AiB9sTiTx&9g1^7JzKoC_?~UL0#_xOM_r3e?d-vb}d9d4`l9$r^{rkV{_wT#M>GNaa z^lj%x_q?t91LO2}*n9W8f48>x+xNzL`G5YLG59|=#&7v4{zhWyaW1Mw4<2QVJ<3#Bp%;A z?%qZ|aH}W2@<}5h*gclg#{XnF(_Z;5y=KSEPMb$?iW7HrVud})_ul*d^6_`+g(c`F zOU@+mzqopcvkO=Dqz|nB1#bVZo>;x$96RgotndHE_`4&=+w3^|te7}^*W8{-hX0-K ziI+@1<6QJ4lbzg$-hJ?0@%GLdPkr+UZ=n6=B=eJ-lFgIzQp}I%rkWql_L&_^)69-l zp7kGi5-LALt{)y{L>^x?=w#UAuKZD{vFGRRxb@!l!oBa0Fw8fPE#EsO)i~Z5^q2?I zjfi|s=e8H@Z8D5tQ;HD@x&0Au#)@HShS9HL{XH)xEl(+L8Rd^m_ay~eXg6tj-|~mk zIPS|epK zbk1q=B+c+H_ixaB-s;@CWZv`is%uVu-hSCNab%meJc&ul-E}?Uy^CQmHvpiuHePdJbTog zQdi8FYEw_jKvwi77jcE_RwRlqM zS}rgm#}j>Z#&Z8dEzte+;YNKvzZcBT2^Jq;zPH6Fd#KRmtvl@T*1hL3D!q(5!yOkq z>gyA%h%@Tf#vALm7{>AUJl@K6_#n2#8@HzLe}EF`PvwUz9_nwT)~%+!BIad)%M~n+ zOD&&kc!S3GztetqdVH|r;}s8m$~fa`_vJWa{kxul!3M(@Zfx(IvwKl4?@RFO_ zUET%54*bAqA6_!5eGBvWG#GZvS;p}j3}a3j?;67MH+d3+KXa!AzsFcR-HG9?_gwzT zD&|_hpA+Z3{C(DED8H}j>kk#av+#RU-?{O7?|9OJKN#WHOZ zKf^fw81Fd3xVE^x;RU?s&CbqDwWU6}7vI!gL%+`P-Z1Qji*6XUm45BE<{~ZZV*I6P=w+vcq&htk`xsvOe;k!jo zC6_0ggWSYTMutU;b=-TxlU6C2_#KWV zODonFyqFYkT$zg;X-W4-a$U)5rXUx+$i>C_KSt1L$I{D@kgVRmVsQB?Wc2nGL%L*iqi1mCfiB+ae;RM?bPuj8f}e`urzF;C4SW)2 zJ{BW)Tj8VN^1MciP~cCtOK& zZSc;lUnE()<4f}epLC@K+u)th#YyD}ZeQ>|?iq`&p1&y2{vVuw%CqgB{*{e3{}jMK zhRr|oj3MD{)^`^C^EfiIdhtSwfAZm<>cu14FShxo6Z)1M_t6*e&mr1fmF5YqGkn2y z^r6ALuJ?o;PN&5(cy>FOp*uaEqQ${ov?v~U1|ACX%x)Vm6oVI98xl;(c;SZa z(4er?G|6$~@(%jHhCcYXejYTV@yzD8X7rilbOv&MJ9L?gY(Jmh%51D4Sw0*(lpH_g z$*4?ZyhoAWyM2R#!U?6&-66)<22P0My^Eop1a}l4tY+Ma&}q)p)bat~gJNi`aZz#m zT#q+c%YC21Yr+Wk+8AMzXbZfy4%&K>H9WBRI~HwKLt6(Hf2DnqjS(7RXiFI3Qcrwv z7j!Z!9lCPIhYxn6tM|YNA3;}-$I#V&4}38A6h?UbEW@I!C%fp%TiIZo!U&=(_37ZE z?|>1$(?wTXOTJ>!RVnz!!3e@&Wzbb;VH`L*862I0oJ~dE`jES6$lr8ubcWgCiCcdt z%WZxLmi_=NeUP<7e?RAS#jS4~0sd_-+;HG2!`yG<`tM9Agh|7&n)QuTyTcoDIA&t z4qXggMep(Rj6d=(yr=o`sbpd}*jnf`2{~)PPcHg;(35FlmP5!Rwf!DA__v;ZmDyn76JQJ7-vljs=|eKK z-IQfSvV;eaHz~}&=(_2tOHChb9YwY^fql=D%mk+kAB@xSaASSNs(~kqSgQ)pppzM# zH|0}m63ycajP(m{FxEq-4_%6``6@+v?N}kY({s(qqb_eyc(nw&7G{7h>(58;56?#L zx{||H!oo&!xT0#!#zjf`U(w0oQPpcUZgCm62JoW}qkpVj(D-P@L*J(jFFMFvb@j=M zXt#NaKa!)`NU8I}cLVZ``X|Ry)iSAj*`bTANM&&_IpUM+pn8F83yRh_kVzA3XuuDN1vX^oK zoL+6cXHaF9Ye=|}J{8iRgNqg-cNcc?$2}z@+ND3{F>kUVgrOxjo}dnIxYTSx?iPVr zysWomrWgG!TdoMqlG5Bc=Nhnx)~IRRrDhKL@)QjY3g&q-gQBAxu*VdzM+AKpcpvOR zJ5y}zA(}k#K!PcqUkoiSfOZm%h~Pfr|7Mb@7+1{*64Nx=cgIL#|&3+L7EGCGoZKpPjrt% zZwG{3(f9A8Q#>*B)*UMw(Asc{Y&X$cqi1Lry}9VmJ>cY;7@WKnoNUnlEMrg?y=k1H zH_1J(3mO9_dl;wA51_-7Y|NbNa-9J$UxB=yjoiK*`8^9cJ`=n=1H3#vfPA;`a$jiF z!N^U>f_opwd5KM-&5&K%_Pz;ti$QSj`G(7?qWbO3ZF89j?waC4y% z5s&mFpAWG92jL6Zl!rV6E2HvxXqS9`5t%CaEZLkS**qmGn@1y?u`6DChIg*UU(-UL zrXz<}qmxqTPqFv|z8H%<{+W1#+Do0z@8huv?uJJmS!k?32i{0xEG6h^$qQ_q`irsS zPrwUVMpF1BSgi_v@bIo7_!lIvUm9+di{_7evMWUkTK87^?&ZA+*roHKjZxT!KlZqS zC&3%nf?+>}ewxrjqI2<_*QWDQqpyW)7eMFd(^mE3TVrtT`R(e1==?Z-E@4~A)jrVo z`t&}*;YLz$Wjb_j^a0c6f1-O%(Yd$sIP-cKn_|R6r{rpPIzRuOD4pMH)A@0*QKvD` zlB>hPWjCCG&daB^-}Mj)th)R^ab~2GXGqjQ91~Hhp+qDM76>ei>wCswFG4+?miva^-yX z+q!tiR%BWw^sy>8qx|rSjPmz9<11zB%YJTwKH~5Z+zVbb7Jaw z#J|fPTjA4U_TDy0)`C;h;j<$2W&wJ~!K#&>t53?uTgf}* z*4f7KgXmsi*mdN^RPd}9nvn0UDFqq^*PiH0?Ljt57N%TkVbx~n&62zN9bG3H#s_gc zfX|_6gb|V7Rx~WE9qvZYv6$78hqUzs?|ly%m+w}1)p=I>#Dz>ecNsDf9Gt>f<-@A` zediqERoM;AzXH2}QFkL3bFi(v$Uu|!#t9EX>&^Einv1yBe3=nBN9XyJV1n74$r!k& z30Z#}o72Iq;sqaLJ%D^a2<~mV%pVbU6<-KjIoLH9Y;e9CpO-5o?6`o4s!M_LO1BIT47sue*;&a+H2Jx4CY(MrG6Y}B717M3yZdZ6aeue`^^jyC; zcwAwJzkciSpx2TrA449*;@X#DaBYo^YhN}71&7=6_up1seKO0HVe!|36$#~%?+5Sx zuEk#?41b6G!{-=Ahp^Ic^onqj@U8rE8h<&pBO{ZMk@?U_EWNr}KiTIwaW?ja)*bAd zGqB+MCBN;QGhvzQsv>wy`mzby=%W2X+4xafUyTeuo!0viI|0KY!#A-eA0oqjhmqm5 zzsZ*2qW!0#`6A?YKJ?ls87;lvjm{;vW5=MF%rx$w0)0#GOV);;ztohULo{viHFOvR z8wY(U!I|{?nHb&Q3Z1Uy-e;f#>Hb#eR~T6I{W0`C3A&blcsdy1QQ17m|F!t4UWbOK zpnuD3{V!TO4y~nk;Q&{8HaOs*XLzOm?gi~R_zxtz#j|CpM(_#NGQo%oUWT6TR6Gj0 z>~Hi*{sfs{?imaw$OMZH=)wdBH2E$xS!(nPZ-$SLV>7uK-|)pznp}X)AHMjUcJ)a# zc@UlPBWUt`?5pExeJmUxn!J_w>0G~W0UJ0pc@P>s4xW)eJQfpN8bgzX(4;Uy0W|4g zf`ed)PGhh|leeyLm)CC60 zunpcg3f;esOa!y97q%0|S%_Tox&{X4VPkJbK9+j=gab9}H;zY!mwGb73u+$Rm}Sdw zH~biaUqZ;+5PUb1c0wzXEEyh;|5Y;FM?D^!FoX>MH)MF5Cl?vMxP26ONPN`>UwsQ1 zE}KcbatvM>=Jq9@fLC_8Gx1XoJozD*r46|v3@us{UVB(Jt87L3ctyH9xEZ|VS{$_% zSHdr@#o5@3Lo9v~ul+0h@}S`kUXh*wzhqc4e7t?G-?wnh;g?2_4_}C<^4G|4PYi~R zkKq>&{Gzyu0lz$IW2DXSeW#IW@yjjvRu#87c=vpZUpzLyxZxM+%?$V@3;&}d!!>?e zhCe9Zm~gJvR&*u$@}5C%dy(69_#Xx!w;kFQ?dLE*(y5A#$v2hBxg*!_fM*n=YZ~P> zb40_?xPxPAk?p7Gc0jNenclz}eJosV%k+jAnJ(I`i(;9sK1Z?nfUcNEcN&(A7Y!>0 zT8m6sO@HNwnT8$NI=aaG5dA+EdKE7{&3sM=e@nlCGwL6P9&(}02G&(Rf^^yt4TYhh zN1?&zSg+#+Y2~++rXicpLpFb}y$ae=O!YD3#Q4;VpbNj2lU3d(6y>NZcSX|%Dd)G54m|^?CKE-!C&xo_= ztYpQo^19R%-j>3ArWuOoculKsPtBE?)c?@}V_ef_xTc zPd6e>_|x72tG~{C9Ypp_ACK%~Zl{2Eo<7f*GuHMiYn@xh*=-?{ysq@RFf`Bxeqh~? z&*J~zG(&$)_yE$DyUQ~=-1wXZp8TFKt?mf4>y_;AdF#$YUn)K<{N6GS{Ep0i8XjoF zjyO{2HphbHk3z4rkVUn`9GZR+XBH!qy!r6Jh^QSPT+lSzhY>rQ?~A&7lo6C)9)6>HEp?39-i%w zEX3}ZwwxRd#;6>PgV01Yu6`=^7dyvm))l-sgEm@cU1(C!EY5$-DT#uy>-%i z73fjJ^)W|2UpEY zC*FfjxZQ9i=M!f?L7ebO&s^}8(eHia=#%iLd^Vl%ny}O3X^Fv$>Hh-TUOpfm#a?be z=PJ(rpD(h#_Gc+QedVzJ4&KDi$fNf5v z<3YjbJ#$aS`m=W(0weK0$Dh54wJ5P^dL{fWTFxycPw5P_9QAWYvHg3jQ%v4bm&_0y z-%mR^&~Fj+Yx%lqvxJ!N60mw3^!p4k?2_e!%G-EH2wb1Sb>aI|uIab7#6fjwqF?w> ze!YsCyG(q~bzU&N=r@P;KL-6~K-14bzsKOi401{iK)*kQeh=Xb7VVahOSG2QWU-wW zl7SyN8ehb(5ADkLB-+Iee(hFh_hnadUB#-q%%b7gAE}BprUxrlLA#Ta%e{s-IREa2 z7VS1dyWfX)8_+TDLAx&)M)I3HFWQygMYJnF{8BK#Xjgt3(eCE7xZtJeY*%qK4r$?- z;;*!qo<_TdMY~n-*=Bs+orYo2u3|y+(FKiMyTR549-DTt{VZKzK)a>Ht`#?QXje4b zh)?`I_&L@u-rXN0zxcb*uHu#Q2YrfPs{wzZL%Rk#yJP89=#g)tPp(9-T!DU>4VJ$g zEI%uN-m$QJ6YJvjx$6#$aGPJ}8R2>91iF!^lY8^Dg-q(m4&D^ze>d z{Lb3H4VF5N4q7zpe9Qib%FHxNX4a1LmOHxT zNQx0T=}D_=OGm$;VBj6|TlQ8veaIP?07O}rF_-TB)5$q2gxE9}G;kBq;aAErt z=1{ueJ#>Nco1_b_AV+jOwm~W9(gotDTUh&JV8(+EKgD49XpD3KW9-gPuF8*)FUR1k zSli*rn4FTw$tjWT@N@SmUGOP>(0N8e7eC!^>j8&W6h9Jg9Q~EwM0Q&7%N9#7BwIO7 zUqdf6LkHiArD^C(G`&MKZDFM-txtotEt-bruag{wrbW+hK+i4E)}mSG_Ds+4#)r_d zbjL@?a!0=uS+?_B+ZGr74z~2mM>DWTiI>hpmVN>)v^7Tc3;9%5uGSXTLbP05Y8-Ry z9@!8Y%e2y@E}D+!Nr|reN9h{b{~qh_=oryh)9fhydRaTk5a)U2q_ptcbo5rPkDS%S z!F8jdU)egsZ%0@w>4UYdenI{Is<0I_9OAqITzdq+iRf2jkUo&zu?KtOR`f{~b0%M3 zcB)Zl7@5Jxf_Iw3ulJVc6RRo1{wQU9hoIlB;M6H;y`9N`$eusl)4isP5-hfOH{T_mTr{I$p{f}pW^pe172djI zMPm8SQ~L++xjUL;`nXNMKg9+WcKW#+EaOT%O&=&e`Vq8v5}E6Xi4SzA-+`;5^y|b) z-zU~O#U0m0zrrX<(4Oez0CIipr^-)BtZ%iLh(^z-UVdG}&9pu6sNKY`&f;MUma%8|! zS;mUr)f%feCb`_mnbO~RUH5bB;@JPY1db2GlZT+q1JG8OIna6{50Cq%Vk2O)g|SQ4 zuwKn0E;YTr_+T=$8GfqJOrjnN#tEYzhNy-o2dif82B}W>M+T}+^hYwOm219+n4r%^ zE<1A+9;LiM@Q*z|i*#(y&oUj`^RraP_WXQb$IOq$dbf`4dAdu-%#*I)$+6y5GR&Cs zK?-<`xhQg>$L6sn@zgg{CsCJD`=}qF?nAwaxP)Pty>r#_qd7s#ZseltARh>YgB za0xZfg_lzETsTP0bK!Doo(n%k&2!;rjmW=x{E@Ome@Bphgs*}|n3M2L)XY)%PHN^X zd>=J)7=DBrc^Q7(h}^+*H{9nBlsunrhG!d*)`do73HP?%M$NsgtEjoRb%PPPY=b}W zjdXvV?#g_A#5DrrykX zIrS6N_fc0+-$wmre{Mw_xRXIk-)-guME8~rheG`n1w=Xg}{%exaQ9aY>*mi}{Q9sA%c;jlL z<2Tn6$GFAl*n7Lt@#bAd$2-f6j+Y)bI`;g;=-B_L(ec_ZjgHrUZFDqUN89bfT4mpS z>m8#*KL6?90>!8vC8uvGD! zOddoI^t+aI5-pWbi#BFaJ8OR)$Ikkva~w+p$K3u%EFJXu^!?ZBS=*gsT;{He+~%u( zk6Ce-@!&95)#P_G$|md(rQ zd~+ekTI(Cr!ATrD>phBNYrWz7z8qWY&GCoMTBrFtYA-U(s#KS`f2iBsdxgiEmz|U1 zdZhU}zt^npVSRT^O*OY>_{{0}^>Qg%=SR_}ODW=sYpE4~7jH<J!+`;%CiuV{PI@>499z8r~s3>P{2LHFF1i;jH7!hHL%B z(2{D6L$r0b+nCU>%rLEW_rebkx&rm@xC6f#s`t3oZ*&{>52;|11as$iLN`eynx>w zS{IEEppVYEL-!YRJs+O@N?(6R2IZr(Xk(CZ+{3fdIUf(;9G_-B=egV)8SL*+%u?;B z%{+?Q&|Ze~X-C&%uU*9XXs)}?%9tqpQSscp8-vgNU}N(*WB|H-5js8V}KyiUwit=+rkD)%sdVKcvonWmt@YNPqd{{XV zVQjG_VC5OS_k6n_Zo3bA;02u5W{np)ZNI-^S)93tck8_k%MwgI-{4O)<)72|QsVr- zD`O7S{$k$acX=lU;ls-(S67U`w7SB5c6CJ^vSJVI?waH_>xX(w(O+Y{F)_${r&Gi; zUP@h#VQz;`M=@tU*0+}Xgefh2iCl0nWj4R-xiJ(Ur4%_T`p}%ZDW&8-tR&9hO7eH) zFi(p0V*`CNiM83zJ9bWRnR*6$VUEdKjbN>;F?*x*ulrvvGuGE)Pn^b!sqi4z@%gM@ zX?XKmpc(RR@~-24hnCjF(9$%UW)#bsZqZC~Fv|_~)?(Gg#Xro|MXG zsMDC6U6RvrWs`O77;C2Xs|;hJ?oAdCagXMHd-lo-jps=)GQ5@dRr;Xwwak%zx8@GL z{<5F(b5FI$n3yF$9Aha0x8D!umJL147ZkQ!cOzs=V{eyUJF!t3D%f6CbH5$9>+!(t`_S7_NE6d&) z6=(jTHkEaMU+gJthWl+>L2)0&e@d{e!ZXNC!lr6`Hqn$Vt^K$0*i|{uMN=aA`x_F> z0~Z((eWOpd)nU(N<`>ykj$PFPPLN#%ogEJqM(wJW%kX)#HjZ5-+0`;awiNb$mTW2N zA+n{YGi6Ir_w`2xs807s`m0WqZ3WF@@9f}z_I(QNC|Cc%;l>LMp7?O(aIg|sr0T0t ztYn<$&71%G#EB#uSvFY9D0~jGoopNIIvv|K*tI&gZLn)}Y};U0>)5uzzO7^12K$zd z?RlNcvHBE5p2-GNzy6hZ%el~Ro|{AtCN|Ky$kP^VpmY0BV*{Ow4b_4@b1pVi3%1m` z*ibFlWXnAM$b9g?_eSTNEenmvV&dy5%Y(QhSSGZo9%|R;OZL86I<2Hnz!4*bf(ZP0J>l&-p^mZ={}0 zT}*u+bs6<-)b~=)qaK6JB%5Y4=c}lnpuU;9g8Dk@=lYwLEmg(&1DtQ8E~T!eE}`B@ zJ(qeYHq$2RM$R9hK1>~?ZlYdFEt_c@Hq%J-9(L0<^v%BU*i7Iqux3ZyBP_JV{{hZ?1Qp!u}vZcs15FgBFMOI-?IJVTS ztf^>fI<;tJG_^wqsT@1&z02)CZLNRCdF*=2j)-0JW2`N<@M##bA=`+2-kiy<>S^9A zyNY!`)BL^E&l~DKhcETRwtCKp#LneQ52uD+!oJd+Y93#DI4$%N_Lb(N!JlPTBC{>K zX^KCxAM7~ht-a!cCI9 z|CDB2c&Fq^ALPl{d~*r?Q(pp?M zl;cAjTkFYj?f9r(eUC@=@%PWEWluTlki9)|Vh(h&i1&(*&O|4*XRxO( zaqKD9k$+ra;kW8kS76T^ci^3oQG9oiZDTcfq5W;Hz#AvsfgM9N7q0bpU_YI3@mvOe zg?>h8Z=*Z#U-oa(w|lXz@tpm*tsK9o10kN>2!Q1qKa&$6E!dXM@GjDF6!Y#&`y zZkr#TOT>11?JR%Ce^7o;$r>ExDe077QaUJE!;Ir<^Tol~B0SegEn7-$s;vttYEL%P zSlZO}*lT&5kK%f)Ep-@FyO>1yb~556zB=9Jyk4?Qn?YUDcnlN`S~pZ%IcvFBO8 zs($E_f#?#}uihJHR_CBU#>nTEU_LWeUp+SNCL3k-tdn zpEaY%W*A4=f_)?ztofErHIlLgyXrC9t{TzJwyHy($&aGvYPiQ5_iA@Qb1%Mf^A6c8 zt7CXl=Z-BU`};tGk#{BcIkuETOT@1&S}GFFKo_E!X`&fysXEps)|QfelTI6H@^^s; zcguc)&o5`accmJUU92~`HLSDmGd9#Z#w)vMH0v8{Lyc2AlMGYxLf4P5R(g-&irP^# z81Gu!PSU(fF3C>Pm}D!hv~8ss^fd=tsfF167urjV;avKhLVL28yx{Qew$d)%>DWpw z*heAy{V{!u+DO-7>s*V?a}Bo7)!0CDv5~$_?!mVL;n8!*KZx2&?Kl5_{N~~wH}`sY zCJy^3-jogXUV{;t0?t0vHxd6{G!B(A!bO%2fhaPFn=mbEgb84tgvkxlAD zo{o_PI=)E9{iDa)_gpk6di-@ApB+6`UR*!cu8Fm8R$rjg=Fzgds7LuD`WA-r#|Ej+ zl6^&uO)yY(Cj6&5ojo6_6S2Fgvsi=Um$CmfI~sHPPued=CpKZv6mw70Q`9Nczo6#X zrY+Qruc?ljHk)AJqNRFPpj#|&&My+SDcfJzL4~*DQ5XjMeuNsl2bNyz^TqDww3?93gI*s~1Bl6o} z1%YGeniknAHAZCcT)&x28?sxHd4CJ`Q8MqB{ne+gAh36kzayD8Tb?x{ub=HV_vPlB zb$!4c^NdJcf9j>wXH!27f2a7(nnZq=y>%1c)?wVOrPMs%y2*%)!>5v+pKp%oUtqpB zn?4UBZoZJ3=ie(gBJXYRM_z*8-n)}Jj`})kd{S>;?Z$tF@BJzG8*F3QIwPrX<2;9Y zF7>{6^Oe=)!~en`so$JvHa*U@sr(kCE~37fdM5Q%)K?^%HAafrw1wmOTzj7SM(X>h zmr$2b=cJj{{&cgcj^i?}{fhct>Q&SaQqQA)<}9;muRpSLb6>OR70#dF`ci7ev2LRN zCG~9TUsL~vx{A7#ns`XlLh5&@AE9ofeuMfj^{doP)HT$t)X!2MqkfwDL+VY`C&5R> z=-1WA_qFKN3T&=t(6i5Br)@*$)?%ma#0J}AbXfLbJG_jowRfLk#jO;pQp`%RD%n5AS>%a!$<4uM)Jce(CD1K8%?5g-#zq#ZW`R2_v1?G3M{E_vZd~;<= zfhl@j#koV1U*p)JyZ50x(N#aLiMIB$_OEj+AC-JfRb^$9s>)VU?wQoE*<^@hTpVfC6&-J^eX7auzWs_e@9csQk^lWqAoMGgImVNW}oGkM>WV~|y za^CEGLGo-9`8~5zqj4)EDP!UdBm8C)d{vy8Z;D^n>--n+a*E&LON+;UV??syJBxSW z@2`M651qw*$=vfR);NuNuMz2cmERmNf_9K4Z=(mRhjYAz@0ncSH}~lJ^YB0M>v3o2 zn_IC>6%*Q9?6zd`j%@fvG?Yzy@I>vtI1_uz-1Qth0OM^{$_FQ~2J_q$#atDYbdgFTHVpO z`7ReYl5-1F*!I;PVt}uXjQY^^&Z#`RXh_tql?@Tt;x`xnKHt1C6W{H)e6xIZloztV zWef1n3)jn5@KO|S9F8tIBi#?hHUhV)e~p(F;hW?g#9dr3@tp~tarjE{omITo;VmoP z!@Y`wPpQ|4<0()v*fxXxp zUA&uGHd*ucPDf{^WQ}52tH5iLH+hU%cv$n`%!^`HRq*vnBO|YQ9D7>(c=N!~{e&T= zfL&HUcmKw7|NUMoPBlh&13V%dYMPCQWkbyXzbH1P+~Dcp7sVXb%72S(H5!{D*^X1q z<{8cR8sv;0IionvFy57nZX9xJzIl$%Z;o7)Z{9S^Z{Gf`eDlZu;5UCl{A!1n_|(E6z20MrHh!b{)T+zOee>hZcZw6tHH>5bjfv)T3up6<8dq|#gtC^>JuVf39wooF z@XeGl>_uxKwxsw)@=JcRua9_b5`CcFLp-!$nakYakFIC^6~t)!l}+C3jlS;{;!%g0 zJB=e*@hJX3!V#<8v~a+VNhM=LokvXS7Hp_{u%ULG$%cwECOWp$Vt6k|aeNHc{6ObD z*-(J)e)F&ub9rBC2)UYM$4uT@Y;y$(K* zOw(M)@!lZkI!>Z5+wB-rGkM|Gn0cSZ>|$-Qh&RdJ%OS=j8!AkDi=gpn+$klv#*RCc zGB1Bh+)3?iV&8;fXNor|1|>Z30b`I&C7Y>ZDgP^V_Ec=8LTsi2Y^Qwes+TMDe^SNo zKjz*ZZK|4t`uARq+EfQG0MB1Vo}yz*jYB`%aT4YCb=gv*ICgBQ5gglblBO(t46y`*;9*&JtOO}Css=+?h z!aj};*dV!weGb1%Ut9JS;~;19_^esrGOo$KN+w3s!m(vvp?_lStFK9Box#4cY%1Hn zny7nh`^vJrZ2M}ou47;EU9U>nVY1a^6P*h!DE4#JxyH}mVBVY1BhBicY^QOy%{5xF zF3v~UHrEK-*2+?>i|go4%LeNU{?j=aNj93=9}T{Ie+II@F>(C|>CrgTJIs@1d)*Aa zN@PB;8H$Cgo~Gs=;UUZR0wY-lG?>5K4M z5pyUkRzx3#=Zf_FJn+^izu9~n_$@O(a87zbU?f;~EWh>F^|{#5wAcIqwo`e2AhD(( zki>7zPZ^O{{*T|>4!&z%YDBh!@0vFmk=HIS2<*ca+YTmfeuKSUZx;mSqN6@6@SBIf znr}AWWJH?J;FXMQweSLmH zeX8#DC)8)C&Q7Rzt2Pqq`>1Y=uWx;txP*?YIDXtd-W;#@b<~z|?E(ARe2$me$Nu>G zI6adcU++?F#Mh^*Zj7sMecox0<7b`r;_BnA_Tu#3j@tSBw#jLWpR3k7a+h zUWL79WSnf>LS07v3!V2l*}6(~-;=GSx=vl9^L{5=U(q@BuXNu3Wa|c`|HM#_BYCgw7;@vlDU8LWDDEZKJj&P$HP-Bo26whJP96<%@>=WsMwlpKjkpW z7Ib23vi+2+=u3&pmIl5xx9K?BJLj7^p_0_6``U9#rQ`^74+8AHoX#Eynf6)5PI`4S3EZ(}OvyYySt3SeT zyQ|{r!R(RUo8#(VV*PiQ#nsoVo*!3VO?!1!W)g5^MIQ-wJc?!Uy9`iOMd;Gqxd1s?NbyWL-Y|w%`0z$6N6GVK-hb zd-8cB(#rmy)ua99y4m?=4ZchD}-j?dpmH$<-CBuJoH5valzy-PF%r7lECL z_tnhwnA^vYS2c-o2*c)+Uq}o<_}E#G`S^^BJ^sivirPPyI)$P*sbX$Fbg@U6a|fU9 z0vjtgR2U=`9aU*zRppTsePJHSf%MDFWbDXWX|I&HYR#5T{%1%s-_WV4)IR$tCI()7}X>2tzuVtUTrB(rDtVd3RCNP?6o}3KZAEp zvM$)0)%UtX)%bc6*}J}blGhY1Y{%ZL#@?)!&*yq{IW~V281zsKHrkKv>B~1v_aIl` ztUgAE?8>a=9y6;yd*!(QefDOF*FOGv=Y;l1?13yrHa8+vLzSUw&WV{&mypzaHQQQV^L_rv9oP8HI|KRVJ2#TcOpfDLavb+v?ZLMd&9{AH406#Qjmy<9 ziN@s=k5gPuF*(KPy2s^qVQU>_Oq*Qkmi?$$n(W8^^rIjBxa$A;%`14f(}$%T@3-x$ ztuv$c*gpIH4ZOb*nWz|C8a8A$_HvumwlBQSwjonv@)~dGAr3d6IGk*m=@i*GB^2@h zcJdHp%Z*^3l0V0e)c151^J##$z&LYKAM=~DOuyNH-3X?bqp{@LG+h^CBl>tQiR;ca zvb`HaHP!^vl4Lt?*JTFHf0TKMhr;&4H* zc=tG*Y|3yo|_rxV|FaAg#NbRFY;nbs*NwQBXy5_S7(aDfiG#x zQz_d~-!DmK{bVa>-=A#$S&kX ze{A-F*pzEo)7BcpwA#sYoAJz5<0a9&L@$1ku@@*Wk#C4$BestE2H%f}4_?gP+17D< zcV6{qe`J#CQL-DUNBAROR}BsurFw`zlBc>ad_^t$jC{vAhn3el#UFXDy)(LhW1xXg z4&N*9R{rF6z?a@jvPq+grz%>6mz?j_2ywj%!-+Y5JH#j^c-zd@GcG%Kw;(4_|Y2G2f%VmhvcN zDdk5S=r=0J2Opk5j*zPq6djXZ6^-Z%D&Yd z#8)=6?lYOEb(i?fTN3@I=3p;r%wEnL!e|wm}UC_aApc&2IJJ8L4K{w5b{)oP_ zdo5)*^r84p)yedUe@Guc@7>MVE*JCtr8ihdVUEVP63^J?XWAHQME-#}`V4JwUHaCN zNy@o;qjL`Ws-Nbgp7Mbge@im+POjHNbji!uOdqBCJ37;%*MyU`7Fq-0R(&TzdE(Cc z=(?^6_lsBY*I4;piZ_Tqg@dK@b-(lcB^-aIpT9hvhQAJ;-AUe|=y)giw~Llpi$|<@ zOKRm1?s*$bS5LfUe@wjPCOh7;llglC?7H$+!+a>o==hP(i2O4$_$Lcoq3ejZybnIv zmtkST!0UFrWxpM7X)GiMp7}b!w=EQBsoi4)8lG?kcJwnk7TskW%a-5VjRL07xNpU`K7ntE|A% z-Ai5qb5Xt86?!X&SUmV{TR%_e0Q|3 z`(K0y7c-xVMWr#fmYZ9O6}Qr}JAI*VTR_@dULIq%S^-!ShvdD<>%)x{fh78s3S9{Aw_!jW{0`A#P%tveG zVDW$F`5c~$jeRI*Y+ua&n&Py0YhRlD+Kc$!)~Wq9DW}CbEPOgV`i-^{&AGj&Cct<~ zmDk36g_y5Y-skmsf;-`&!X+V zyrUoQ>JP3RV7_;g9UHmCh}2yi#k9(k(|5$I95Jx%{WeY$uD##JX~MPl+c-_Q_I?|u zHILvJoE8M1t#$Ivz_W^r+yuT&7OowgWG;qITfnm=@VDY)qF3dmC4*P2?*fl@nfG&# za?}p>^_mB!7<^0H=%>8A9q6we=&v2I@shqpX5$RvB~!pjSK&Lq3Czm6$d@iG-1dLi zm%}yL;!RiK!-Afh(ABz@#2PEUtoTP0*jVwACa|&MCrzW!OR7f*D^sIGm3!6%Hdd^q z32b~Jbr!g@68Z8g`q)C7)9AbGAjLVR*}SNHvuU=CulUF`+b&RE+BEC};aSB&rePoG zSovtvY&$`@Y}2q4biF^vdS43P3l+}Qyd;5f_01_KugCHET0UOJynw;_f${%m$K(qc6x0m2tcZpX7byL;sN9*YNwRT-$S5e&Ds)1%a*L72)99=jI3g zXjma>358~W_=>RJTdecjtn+VfafklvE>EZ-yKHhT_+&fy zWCz%!ZhT@0KYfSjrV30eo%K4pe`gVSQRvKV#o$-`hc&C&C$ToVM`iowxb^3`kWpaY zt@oycc06iC_Ed2HGvrQzZ?)G#7#(Z^-oXnY!qhYr$rUHPt3k&!ug9-103pEyRzd>+_6=Gk%S=`}j4E)9AdyW7dOp zcT~7TJHSX)S9?v3aR+m;Ls)lhin$f6yKSH^upDMQq{Ui&yruGlKHgfVQ85DT^`oRH>nf)c~M;glh&9m`! z4#O{=Sh*)I%hdm0Q*5BWlrKhYE?E(MAZrmiTtFCu8D)WPj-7 zck@GoZ}x|V-kBfzM`%QIJP$f6f{ql6%jKHpJ9}JyNIK`u8(pEdmtgbUj?DvYXigfK zlUnAami6Bbjc87qz?TkhREdHip#O3{XB;%)97xxg?-BpDrF@fjVX`WPKw1;XE?=qUbY+T>_kHFgcE0atzFYY1^L(Wa z{#^prn8CctAEf;RqVMjQ7aUh#3TBWGTK$w?G|Zg(m{T8pPm9ODZDHT2-^khTt-ap5 zz6*b;_J8v~^en$p3)ii^3;3zSR(mNH2A0oKIM_>j+7I%D82H2~4E!Q(ZlaIiu{on@ zM;O@3DYX65bvaRA^d|YRxknhd7`z{w_f$a}!ofS7{2%7fnsew=a~_-jqkiWQi|D?O zLO4$Q&P!wRe_}Cl>^^giuZ4D$JL~vt*MW_NgN29Xw{>vwagL+7_}gIBZ-H6ofL*Tw z!+sN7d?mQ}3b5^LFz)5xV!rKI6UD{d^SExN9m(YYb!SZ+xHyStl6f`-T$~Cn_R(e< zZKv~&4Bo~61X+3(vBRjpSH9kqyeKXfrW3xMX7h40xYogQ&0tyKxf#&wY#YB>`z*k3 zQ^9S@5e$Q6kC9__d1^8|UbtZac({%0eZa&w!|$Ta3pl@l_T}UKC)TSOI&FU5Fn?dvHZI_wtcRz=-BqTzM*3q+i&MsSadqr_b~Cx^KGB-PQH!3I1``gkc>Pd+iUGn z@53JTBKNNQg2?BE^9*E->w2??Ry>=UkvOAc+l8s8k5kNb!H$K@Yi za9O}J?77R2Jv%kfa>+LW+ltF3zcDZ^)Hbgu@ZLCo$15|_L$BwczsBId9ghy10Cv16 zy2txfd`^nU{AS zh|Y)RKx3DWEY?R^HHwR~o08i%##dXCTRJ|06hgW(PB?>4R~gFV-C z2AIzHs>)JBRrAXx?`NDEV^vvNsOpxo$)djoe82M;^VjjuP64OpfKx{@<|&HlBimxf zu4^mV2Q572_z{Jn`cX9IchSEM*gLh@JM7`J+5r2^=^Fd>@d4I9uwzX0TT_pCjSHv2 z2ib}EO#0@Bz5y-9zVANH?}s*H=luW9K1GjJ^08#NLN9;LyuJd>ZSP@TUz?c_ik;Ku z#8bXu(Tgy)zcGDcPyPF%_czmb`Go5xsV(<R_%ik#_XA@_>A+yk-R4{NzT)Qn^%n9M1LssyZODIo7c}PZ$rSb#D<4DrG6i34K5gW3k9bFTM$fCQQ4~FUF-7qSU5~vsf%6-< zE}e7``{o|}xCi+5rkgx?`7xwBitsCjo?Ej~*He4&<30^uaB|>gEZlt~yY!-|=J&wpIQ@ zh+@$S-&cD5dQV^*v|gVBjUWddeCyz2wWT&L7=lk^IH`RDGj$z#I0nev`=eJ;EiVoCo`8ro} zto|$iMzIGkbrLv2zJwZN%TBPp^Ni)oXWpvpc@dxJ8Tk@xZCHS>9Q`2T0`O~9+F zuKxdX@6AMJ1T`Rr1W>{#RLW3kb8bLEK%981KjmMDP#FXGVk;FZL1;xUSSVOE_>_b} zIDyMNIkYA~MFByIb*TIb2$euUprYg={P=yo=bjTT*91`M`@GNp_vU%dvrf)E!`^G} zwbpm7z4zMiDT!~Ufq%DmbuhD^7#D}W7d`J?@ziAEsoLkFw)nH&IgmLD_Py}+S$JZo zp2V3oJ}$g#^Tfftiu*nlxmFl#Cak2mZ;RyEKyT{S0xY6ByYt(VsDtEeD|}7J4#$3b zsqRkfSM5E3vbLtIhbe1x?AM@O&W^tata&$>^DeOGonX+u;MzODwSB;*y}_uxz_qu7 zYs32wotvbXZZg+X_&pWung(`F=efE(e--bk$NTDoU9Yyjbg?UV^Hbuwfv#cvYCRc_ z>ss1xhk9w=g2ldd_BpX#Hbe=Z6Wb->?2OWDj2Vj?vY%C^bG^9{>Ps$P7Y8e*JI0CW z9wt6($8^EO?1ydV*;0S7aj`J$O-?+Q7;iL=ZCQmWsZMvjOQ53Z_(KI4)%F)vG1Ww(>g}X^+?-1>o74_U4ND_Xs?g_j+)QF zTnB@=(_ZRr2oFC96&~&l70v}88^XijVncYA`44@Bp^58i-ot$(@(Lat(ygEk*E(X$ zyPonFH4W|U8~+__{K@^w1<19bV1bcgjJyYoymK7*_z7|$O=nIy82OzS zu`e%keK|4N)#%Ul;O#dUH~h#rdk9;)VYhL1I{hPKt65MFOq~9qy*Bo2j|cq)F5hf^ zZ4UFx=GC>`3O3CS^GLjTB$Ie;RDJebqCUdOwbjYZO;l68!Wbzb7+pUbD@WDQ#mlOz z>gnR;I>O83f4xUGvZEQ?`xAdK*Rf&Ie9-lL#wKFWmCAu4Jfk&~*K%y@0M_d22}(wF z-ynRrOVwNW@zFZQ5AQ*5yE0pcejfeZsg36M?M{D$)q1C))#IyiWFGjY!ku>BvV)A#Gs_cQ4GjdVYA3Bb)ZrUoDE=AQq}J$txkC->m5 z+4sx@Uq7b%z|G&lCwJs8DR*Rh4@{-$Wj^5?%*yjb`; zI$o@K5(U^><*Tg3Mnqz2&5wwT83(~B8b3dZJrzz8ruMK7HJ$mp`d|3E2{@_qJ9d7b zvnKed$N%ydOeB7+JebOd*^5K_RF*4#-j%tm7-yd5*`vmt>U-vzF z$5Y@|_Zjtzk7(z=($0EL_DgVXr^$oPorSNqj z_|L`H8Xt+phnRz2@zG!0vMY`%e62b};_JU;81}vL_3ipNJcz{8Qye_aI^F`|TD8Bh zH2czbvd5tLzYcSNrJLM7VbLcp71IuYuQjLR!kG5eHm1Hwn7RW#FKts>d^_miRAK5= z=K2b=_M$Ij(cW(CTJKOSTk+V#@SWJbJ%{56^;P_OI8^a#FJ+I6C97V-)`zIuGZFYY zJpcFVYK(n5b5cverQzJAS65?f$<>w!e64+RBk}c{-Y~va8MaVnH-@aa6$Wy4cKm;U zhx>tx?*$*<15Umhe0>-A`cCk3UvTst;Ojo@?-`C8pPS@mUx*m)i{<_}o{Q)C1n_hn zD;J-3Up#Zw!M2s;zsq5K#-1ZV%$K>fGi*Ezu6>UBvJhlO%9*^Gn%8h`jQM*@7&wV> zNa1bp_~>!Qdwbz;8a~8bS~n_8PK;Ue$lgJ(YVPeE&xC(z4sA>1c8z(ld{>Ulj z<(|{LT=df(+CPc)>s5>wj->sc;oHNV?;mx(y`vd<8^L<=aoz7q$Lw&vD<59p-P1Lk zTUqrw#hRfdC(o_c`%C$b+M^F+!&b}kMbmy%zUYy-@n@5hH>o<$ve!=k-zn>!}C_N*-eHx?rhhaUl zpZST+DVyXf9;)`=aTj^k?)O-FM*7CCmtzB)A3N8<$G!T_=Fh`9$d!X@Beq%B9^u;+ z#OJ1%Awp7UtWD#^I_!AYHpt9-pd~ufIh55-}E|rVGd>Hki94S zpz)H+UC$mludKk)LA`0i$oyaKm|A2GWtDq`qko>I{#tX8z@hc*Q>BMEucLTJgm14p zzm5LUc%JlSPvk|iYp-8N{?5m6T|&OujH>m2mTROEO%@|)`D9E~9pd;0x5(mUv^JE>zj zGB1oJ{i597(k*S#Ez0eEqvttvOJcQdnZ`bnhlvN?Exm&NXwIAmVbGhQKTJ&yv}Zrr z)$s3RO8vlE^vMUj`>T{|0_!M;bjzs}bPMH?ZmDA2M*ZvL@>xY=SN@@BAv&c!I;AoC zbSt{#E80e}iYAopg@;V*rQv>SJaRD*S$N{{unyTb#Vpu|4r#&lroSiOU#c1UkX$3% zn|Z8{zz=2M2g&wEbjrtLY@Kp;6FTMZY0Mx7Y0qgcM9)ay zscb4&d#GKO?qNMM6rrDr;E&zs1waIyHikMA^vDsEVU?0ky8X_m;kF-ParwRKKn z>HGZ*>x2H`9E1D0PZ+0cvbR9|md?8yUE}JV6Gy{(hcN{k=Zuif!N;0n#|*=H|CL{` zmlHFrX`bp<2h+HD+ND?0=pX+batjL2s6Im-Jk#nj^hq6LB$4vBgr7<1Cp%_{PJJuu z!W``H!dt~(wxS2WnOCp}I^i~S!>#Cu?&ykc=#V_}1v)z9GO5ND>jyKFpRBR&^^L1b&z@Cm%Gt3-v-05SAvaU7NSd*myY zfz^~{7UvyP>jze&OLX3e^S$V$eh%JwUU~_<^P(tt=Ot0}RS{G==S%SOqv)DlD_<%) zxU#4k!)zrcUmJ#b&cQGbAs50h!V&wYn+3;R3}kfO)H3Qc`nay;_eW!wF%j5BdqYHGm)2ig9J|cm8?8>%j9tX9{SnyZKooX~#4XYx<=`>$ zFBeFE2-ge&yU2zMr|AEK5%^`3gJ1qrx+DU>3?v|7{36{FiC@4j z(>4EH`FOr7e!0WJFMS;RqPbUEGg-!1pmOnOPLSRmiCw0_gGlTW;Q!jN%OVH6jIec0 z%z3{FIdF6DhzA;*_%_((DBqO6k?*g(JDT6L#R$*A3HwjqAGfrM)HnZT+@k%jSD;Hy zq|^;8M3)@o{9p=t1zjS&@+I>~R-sFD{uSrKIQKdj=TYenFwSGpabTQZfpNss4l8FB zeX;xnaLbEDJJBP;Edg-LTYEh-T-FpSo>2v1^#e z)5Y5ElxNSGTdY{-MU+#P|IM>DUNMX{3pl6VI}04_aj z30wrbs1CwNO`tAzQGKGZOC)ZQefUg#~5?-|xR%vD0qoYy;vr7Il#veCgW!zhdF zeU5`)q-QMj%*F7FCCekA;U`f-hZ&X^izYAg0lPH5d(; zm)4*Py101A3yo8?7{rkOTKR%s!e*M|U?)o$5xw5I@=l%{>(Ryr^we_iR z%%c^1j!)zJnvdX?!Ts*oRPuMj+p;g??cX-0Ph7H?=M^iJU7yMK6({xZyvm_ixaNtw zbA@H!=Wu_0uDjQEIb~Z-jFk0p1#V70J68_hn#uj;5&nbX6u09)^rY=`wC;v^2Cd+o z8?R6tF;f1bZM^zNU%sz6xf{z?xgui{;=f{{TWO0R^>XJLl(XJI`l2m3-nH+FPlVUE z#n|%>^qh3>TV_~>3OF~w(eja)e>>ftf6H71$z{1ClL1>MFCLe${RnW1;-al6e@**1 z-9fo!(>1qN7$eE?H@rNv1sRo{mpzbuKY3?2OjGg9lg!2VGC3viRWddm*^I|exZD`#kd;pt%_QC+`TW4K-HXBgvfZbU zPuVrer!aIX@_EomCEl2djHU)Y;E-)qyixX5F~>H@wQQ^8^9;8A4(#9l!m_Ua2Rl0W z8Z%fJXBIq#|NI;_^&{e^&%x0LliAOVwlGh|+rLYeC7TDSn>+ULi@v5MdCulNDx-4#sE(59 z&5mCp*^zIcTtDu$zd36HzCxzU&^hJkD#b(G^_RaoKdo=F%Ng%zU=K0%BJ_b;~kYeQ-^0IR~qk7j8Ss+4t$WT zOP>7~*x1XEXP$TMr)+9Xc}`ST$+X5eWMfZJE(3aY{B6kQt;lG1WVIVIn@3E*Zz02W zT(Kq_Ta)}o+1TX+;lq`%v3DTHu036iJ(UlCGP!19c(S*)WNr$aRd4Kn;Fw&4RMI|-e+ZV-|HS8+Z_BCR5c$=8`u#fk zlgjf;%H!H(l}j?Lu}hUtN|xV_A+GAkaET+sBQ6ssYpkFS?~{G4%`T@6 zi-^nBCl!GF<8tAuVkB1}2Pk=iI{?;rQMF0K`a{Sa){(`qM z&EO|P4eR599&2~hIKf|#Wj8)AJNi~5))mv2-FN8u-N=yp{GO=iqvQMb7~+_CYdi0d zJ$7yIHf(T9%BQuZ|I9Fhk4BVJ^9IPlQ9XVkd+f&4%ZSD4e%&LP*12nw-7>l|t+Gar z6Y3fF-sp16<{mtLUZ$VsTr#~Of&4FyOi!Q=*_V*%-cd5`=C~dK)>AGw*<{J?yEeu? zzwggf%@cRl*0q5za~#=CBUhQuk)eLtKWGOwS$gJk#-%i8w+%9?YkKD%u4_-BLzENw z?I@f44zd?5#~0e-i^t8}7`&LYt2dBEq>>LU535S z&puzQwZA#oyc@YJ$R{=w++5L=`gnYSVc_XQO?_4i&J#jhZ|=2*GT*j>SQ@`?tsn-k zy-q9QjgWNE0@}BN7lJKt5*vD?;yVe@AY%-?un-Lv%mSRr#PRq#n}Wn;{=LmFmxCuN+^4(gt(PM?yRL_A8n~n=0??6xvz2V5iXb6`PHa+R6U* z5Z)QfyGKLy?eS23I{?+UUx4b{^Pu|nGM;^%zB}(h%YMEa*Y$jFsGffi`Vz6w$BfWG z_TGwx3uDh-eU~FoSX{9XlTgJ>h>pJTB^}R;u>pRU5 z>smrzPl96eza9cbpM2d7O1pnO6Pg8m8cI8UJsO$=?F}uCwboRzhsqq*RTr~|%J0n3 zqVAM=Je2mE18Fj6B-LJBiwq{?Ujr?EC^;OXM(0R~B&|K(yS6i!IzQ$7S-QwbF?fr}vZQ{3O z{Ps3<5Of>#KHUrL0ev5O3pD*Yd;gb3d-;De{~v(94y}MrfF6O4g&v2FfS!a7fL1~8 zh0<@M`?42ZEIqLICSu;KA^NlvwE&@-s)%?e7z@AJuZ)A}q&2 z`pdp<4_b!TJe&Gw<|3xDUfp0k`hC;N#>ZBld1{(z&7wc5@5Ievzlt>S$%DVv;)_}2 z`iw5u+C?$ey7jTviYal{D_xDy+75NBW&IMZd5w~+wPh(*$$P2Rswb$|uCRVlxqA`c zmnk|G#P2`;TQ&*5>b; zRtL$>Cw88xvp@Wpx%TLqE?iSxijJDr3r<}!7qWJnx-2#fYk8K(n$O&*70I#I!hUhq zYvb520KVG&AtS*m>0HNJb$_DudNcah4%%ZN<)qte)cBxy%g)!-KprQw9-=Q z7|k17R}+7+bu~GbZC$PZ5Ar;9(%Pnq{haS{&i8OmUF_?hpsV>^Ihz--#;=R-$(6;& z(>l6*%mzDO(7v&z^?9cU$%|#44Wb{)q1&P2!|$MK&-8e5h0~TRYH7yj>HmWczjW^MOXn`XbS{2DGk9(R@uV(E z#tiddd^Nv3U?K7AaP$7ydio$oC1sb+tc2(JP?cMJ(%PfN;!zG|j`0@E#aDBA;mX>U zH^cTsV~Wr4-b(DH-Z_E;`(%|$R-9+QAWyOOzuAYpmRw_W0!!5wPH%-Auzu{Xb(rfo zJjNH$-ZHJgcO#EHv8wxaB=?axeW{ZIYYGBE!>->ud{=E*Hj=AHf7GpD&3 zS}{1rTFrhrua7|&^ozH4p|e!K-r%8L>ApZ6U*SC?6Y9)XorryRYC^pdb*)+A?3Uyp zYekNNVqZeXqjtGcDisqefS-NoXVcLKaq#06<^(P^J=QC4c&U>Sa_?=)y=}m_qp8Q; z4NU9lI~e14>rMRo6>^hn&D#i$UL5Kx!g2|o2`dY|M)f!MfsgHP^4;HYXdUHpeY2~_ za=&$$dvf?~7>C+Cz~Q#}0(`x^`rtq6ptce=%tvnBwR++9X1^NRS8bgf*~Tgp&nHA+ zc+D};ci*IqC-U7k!-iKap*%~lrEYoTm#gkK(MHGNRnQEVSMfD{uNV9m&kK$4UQsRK za~We=l{TM~?DzJj4l2u%)`sO?f7Yqz?N0p`;J@e5j&|LYzk<4z!@I`pmlB|ycHM|W z$zNCeazU0om$rINaRF_lcTb=$e%@Vb7$NstrPQJKxS>_(=a3seKiBQz`_fz#`=c9j z!O;7`{v&RS!2Qh);rcM{pBI7q7dp7Vp8fmQeNCCu?cjdJF{-hDJqPCt=%=FCrTZMr--z})KwDKdVePT>HGG^* zKOKJ&tiK&hzsBpC@d3vv4q^M3n=+;bwhv}uYbom-u>C&PH2em8KNNlSFl)~aA2dU^ zUr$bRaxxy?Z-#o4*SrrH;Rx+Ye(|%iAzQ$N+pwWW(62k|n%ejKZ1GCd`tw@m?tn*D zUSpo!$hBREP3trK((>Q?t#_}%f7ro(>J7=S%WqY0VJDvB{y*ngCo|2nt`D&Au2`$o zh_hBPZ%=({(UdyYl8QuYMgOoJlpj!{eIl_7i>6;?9q`4K&U=~PWRp8wYc1Ptgkm#( zo*9?%aAthQkjw>!akeDETQD!(SMUn)@P$2%v#}XtG8c>#eI#?ibkYCJT(DYnZ03TG zL?6vuaKlFpuP_ICHUBS$%2!xt*!$@STiDO{=DME07pmt6K*9a;In=glOSPTa$bL@` ze%E`3@SaJ;>q6wx!XG$8+rQ0sDsrI>pbtW?hdyS67J?(+Sc=aum;FiZG(w9qxjvfs z!Cdy_DfL@rE7`BK8@b}Req<1HUp_UhBfs@q#}Bi|)Ghe6Tt9su^aV5Y8nS=-X=n@- zT>M%Bv^Nx8cX~E73mSkngN}vfK<|aV2##AFXB}Nge$>_R*3p@aKXgYwPJs4=4uDpH zON;RvR+*vo@*C#ShXxz=ey28`8_)k!Qmi-T<2Sr%hL*^0SjM%<+%pH7T-RFB`YNk5 z2ESn|{}*%pb?AKPV(21h59ktTF0{`zRiw7**k5HdK!2cVe70^l0Bhbg7@*_q=ywa$t`_&q|dZ$S=@}QrE%>;_;fJrYse3gZy`ChhEAE4dE zlXuZAc3a1V+js|5?Ry+*w_}a%OItea8>D@gF+alnUZQ-OX2d2OA7cqV#%JIeee1ob zZ*6qGwGS%)Vo8SQ8$5{iFD8Ru#$Uv**be?$lLt>YPI1VOco{z;OMZmI6ZsJ?Pa=JZ zXnh)4e+I!rZvS@Q6De=K{uH)3!+bUS5$>~l9X}%bs)SCShU~inHjp1Ng8m-Hl{Nbj z!Y>!|BMR_CKBs_YB0A@`n) z{LB*Cd081g9`*EOJd~*z+=@m%%Om+`OnOyoT_2oT($zSt?=)s##VyRA)4q)DZuMJP zeCxWwrqyVu-|E1(ew)euLJ#RYG4T8Y`0)3Tvy}A+uk>SYKm3kmgT443K5IE^5mpgL zTro7xT2CDDolE!|&1>*C%ozL)yG$vS<=|7r6@>ruskiHQc;TO!F)*{3dIr}c)6~sB@NU~5*<*yVKM(sO zBZv`2`y&_hl^Xn!0{X`h-Ze)Uo^qncE+NF-~7#%tQA#_9Rz6i=WvV znN}a#fxof@|13ycK=}`&{gisNmHZU;AnqtXy|E{$avwf}+DE=hBtI7OE%%;UeHPVU zK8xz_j6YUJ_$;co{Omk@mIYCMOOSdh{yo6)!Ti`mx1PG*n7u#cgY^P8%BRu1LCqid z&i>2B_?;nq0M~!1baIOfW8C6UbA$bNd;#VEIK0qD%)u9E!MMet#a?R|KINf$_U~Kw zHTT&2WFCqq-!t{H$1R53X1RJt^ZAwcX>z!(C*CD5^^IxxiOpG$#Pvf>S&t;znEh%+ zuOkPu=(T33qbT;X2~^`02Toy+hQwLd;kzU?Gx}+)LgUO`@LBBfF#MOX_$9MATt6j` zYw}aZJ3``$wJ?0C!em>;KF( zVNZ&{=g}0`f}b0X0S1xZ1g7n-vRC1 zz*;x1p;d8=^SxZFgdTwIhYGLBjvs-_j5A$27o!OR^eU;!BV@3pv<_ ziV5%l{>m@O0kg>;$)PReKjhG+!ZbF|sI$#8c;fQxZTU3=;FbKE@lg3SFF=KlmOU(ENiUTy*wyB92{dq+ccZvd)$=RtMvIwQ2` zC&Z{8_gfbCX`IL6zC$CR+;`|{DEDc+r~F3KdiOTJwT1f*=aTc8`wsVpa^K-0Q0_ZC z$q4ms$9NZg?Y{Ip>%c8Ws5^B!&;v@pIxxrx9hhK-<|Fe520+n6+COi;L98vyYn8NS zpZM9~cDBbbni*ldKQh+(y57&Md0pcys~z}f75A9@_5!p!^f72p=zY-RU|z|>(Fh`dC>Qu zPeb2_4uO6I?E&2lJqFziJqSGjm3&n|w?dCV*Fld%=Ri+FCqb*AgP>>Nz2n;y5^Eip zoY1N8qxg=$K@UlOCZfZoS6ZgAKCB(F2>Ptzt4j?7*^OarBf(nQ(+HKeOU5Rp*fMt% zU6j=nU9<}sN;1Rxhv%i6c4l#XGqNEWkc>zMj*)wGAlG&6LnAB~l}3m;zTrNjiIS6x^vHmQ0iy%5?{FFjYMl{fBvT*+J1(3 zKbZg3hC`w13nQUwgE7z@&_|&gq5adWF8CI26|x3|V=O+zEDqP7$m5#)iShUod%sBQ zXzO*w65~yK?XV@DH;b`(tT~=J%}r|y^*qwSv_>Jn({E#ck2vt=aMQXEe%kmRoUq*R zTJwlCySRQu3}ZLMj>L~H;QF^r%EK`h>^_Ub#qxPv6P6zjmfyQM-Y!o*<#Bbd%9pY^ ztY68P8WNUszi> zBr>M29Mh^>E3iV#dQMD#uwwcRN)^*@1P`KP`g_?=ROP}?=+`FpyxavTzpl5Tp5HZg zuQ;~eq4v~Vw;+7c|3}r|JhjK@yIkuFOeWWZ|6gG&901owW8rXY-f5>Au<$hcuG(p$ zQ?Dg_JJ`-+*J+v?lmFEq#$*x$8XwbmgvKs3mtEg#DZB;W6qiqdZ^F9+BkE(1Lpk-C z=F}%L7T*Q_PiC&?L?;H{I|9$v7K0bBRc1f$6IQi(MOj;o569l!^(E&ve#h9mja>~J zySnpk7Mrw%_VE*bZ5n}Jn@8YR_I0=MYg!n;9!i3q>o!yP^@8|&(ruQ;;DkT(n7{7g z*J(dC`rWN@j;3blHs$4LY=(M>UT22x6}{FB^%Je9*gF(`-VK`D)WNUB?uB2g6nnoR z9D7%rXs%3qaBEFCc2ER{Rjj=Y*w?l-Lj7>X=I+2tq{1=Y|9WR?; zq+74`2kT1Tj{)m;7S>I~hVZ*=K^aswWD`{OCZ>TkS2$O8V=vcaHx58$vn!yo7qZo| z7x!mcOR&%KmAu3Se2NF)L(e7tGmm(l9TOmi=ON}-LTuhcoUdfH=#b13V)Gti_a(&U z#nX4cApeZd#MXnEu_KC4C&(s)X|p8@>^)f5!l1Wg$3>fJE(kPp(@|+peo-& zsLJ;`7#Iv8j9oa&Z#~1k!oSaOujU#(!@Y+BQ0~>-qP5`PCG@*e?i2nk&!6rUB$J*{PqG={r54bFv`86 zsWzrw-kDhNR{k&K`ex{4=rZUO=uGHT=ve4<=$+76&|GLF{{4{u=W~4{bP;qhbP4o# z(B;t4(ACi1(Dl$;pl?7cp=HoL&`r=?(6^y)LbpK|Lf?Z1pzlLRKtF=^fbNDKgYJbM zgdTu?0#s@10x@gvBaOXHcn2vESx;I zkpA}tcuT(5G59=~@~NHS!=*5?=I*oy7uST72Ss4ykHH|~Wj40iZlm~c`_$APx=)xn z0nFU8En~A_=Fhvb-;I~`$BE>MB3}9!vD9kJ%)aceMPTOQgg9&M;CP!4nk&;DoLm!b z9u$F@f3+!$nT2yyKDS&dOToslUT)zfM;I8nl4B(2(>RuLjIIGUr#iSfo3SU)wMM6R z>7%*z*rUaXW4AbWK8`(txiGE4(3%f(L2Q!o3t@W2G^c`_=V%Nb+%0TV9m8fks$B}X zTfo5T?>FM_H}@n3w3hiGee7m%+2GXk^;w$k^n1m1Gh(Xa*P5TSu(Q+eiD3&frI1r1 zGJfrW=RP}rom6xDn!c&{wPM%%$WLU)uahr~UuzD{76%*FG-oCRzRreM(fIl$=FF%} ze)^B%qr%vVUtbWnrfzP`TK9e*%-ukElXqat?e9g#Vs-r}*Dr>>H9tn!JDYhixk=%* zrNYB4&YhnZb3t3zfW6y**KHh2o$VMjb#6m@&W(scFG+CPlNhwh^=>Wk=L(1C?mU^7 zz~gD~S$OjU8XU1V%r0%Q|xKdC0}*C6~32T-M*ry{EbGiZi!jf9V!r^hDyxj}lu>VeU&( zeBnJKo=BQK^1$P}g2-UqxWaor^NnBmoYxo7+TI}*<9FQ&9fOQMl2Lc|XXrHTKUfbx zY!>;T>^VSSWW{=g(_3hcJp1V>W}HM0-)r5#aV?vUe%LWFL_~K?UQ27O-=}|nr?*V zllS;F#_nHflpI_>lzXtxD|)5|SB)$D@#}pWvmS^;{$qMVH$GlQ8EbiHmMq4|Vh;!P&!NrB`} zW-v9^A1o&4?Obx+&adwczQVeh<=ta~EBnL-SNDqxuI(QmTt7G=_}a)i!8aaF4F36v zq~M}v$-&arDZwQjQiDsorUjSMKg8Sn@Q1XPK>Yn*tn)1c7ibJj{8L#!r0fG@$dO8& z^67i0(bon%6K`*Ux8l`nQM`H`Ug`ce^l6s|2Ur{T8Tq5#x_6-79jW)F>hXcMKI`=B z2NWw><9%}FK=fPj=kXouQ$~G5a!^H?+%~hnOF5&z8~vRR*h4|KVGViLvyQ~u_4D&C zy+`jTs65|(?z2CIXLc+n5gnUmbV9D}SWYGLAL_tM&CeuH)2`l%gC8T40xY5v$?%oqJ@P=)c;(uY~g7J#0+XZP=tTV^7!Hut(+WN3MCA z%P8Lpau5}>c4}D;eUp8@ULp5TF7mIKj>-N6cYD$UO;SZu0?qy~0h$!Jk#$&ZJ={K{ zwN@&(?1;)%2yc_AQ*U&d);;Q-L3pBfKEz(I?(^E;OJhYXm=~#axw=TrF<2RrSy)9rMNa+@fmjsWVd5)AXw6w$dc#75! zXAe8vRo_+4AK47OPvvavlv8mJH?C1YIrU7jQTXEpCgtRiUM_dyB>_h-EAEiUyhX)Z z?DoqF&s)qRFQyxllf87~CYqzD?`e%vAMlOhKH8VpV9c+kJcNguc0&i}p^N?GLw#pt znQy=c-$pklu|CJu&0}lP&3%tPv1>GXS$ltsp?}^DeV9HfeLS`nU3>@m_N9v}$<;x= zh`)Y?F8&gJhWl{Bj9KX7HRxhDf8R9jn`6|k%m!zbc+Ioxyzv3a=ciz+b?7+d#5jB_`38vzLY292|MO62R(80)H&tY z2+q68A5>lc+0YCg%+q;X;6yJo_{BXsuWOh8)I&N?5108rb)FVD`?wiA*F@*3fkQ3L zVC4^Vo)kFxBQto+>kl&48Jw5y2`IDBZE;0CKR#MQ)Ne(Vlr~R~93_8CdIzN^@2;yjqGH0&AtMI!_a~HZ&Z1rzrV(@6|-b3nCGMC z=x6pfB5|#*r+jv<`hD(uTG0lw8xz@=y`-+uX`W*@G`C;+HxZjL0^2ddu^R=*WM6EC z`n&Y<=!@74FqdsJ?w)PF?ElrQPmA|NCgW7~1YGbZ9K+amZU7^5*(yE5Q8j ze4KgM6kAqukQLb#+n+&J@Momc<R?s#N3y02;+;QQK_LdU*{ z-zN2o*Mok_x`$_CUSKey)7yLKkhcUVw^opIy#>Pzw+ z@Gkkh?~sixX@dO`FSOsJYkyP^*#p%>{#Rr@biFn^M7!E^=eI`LA=gK6=b=Q}p-cG) zy|G8KPp*&f@32R`BkYlU1?{sqf4EiIpQeB+o9uXOK;bi!Hq8AU?xrY|>rG zz$5hkha1JszLcNP#<5MUlEZ$2FsyQ|xwc7uf_#JzyeR?s3Gd=h$WO>eX3EIVP|m*E z8`xL-BjT7Jd*cI5XPUtq*c1C>bmEQdA=jKe7C+N(v z@$wx~1LOH$xUrag6D8~$H}X2AMpzNAunV<vsRr6W6@IfiYY@!ebbky8{tc&NB9zH_!1_* zMA)xr^w{lWNz{A&EP*%(NXX|9zJWmXgl~G&D+-+)y~`3`=96So9N?~$c@Xp zE9rxjRmOMJUa|VdK6FH6UDTi4xfx?k93)bmdJx@B~2hubER{>ol#ke-uniu~?} zd^h@g>)@6B4eXNp4foxx9lPXV9)use^hd`oB{8<_+NC7cULEm0v2sK^e0=27wN0`~ zTK6RT6qdJd`ZEQNKT}RQP4-H=n>gET*pBj;vLDO0d5icO^Sf=iS&@u9)W@GmF8uLw z)}Co?q4ayNYM&>!FuSy z4D{fu=+6$|?jJtr559^%?Zp0Hu0Pb-u~V;MpZ*v7fjz)GUPISE$ev)bRj=buJi&fm zV2`tN>67!YQS-4;(hc^wDp>fHm%YK|*eTl|0=uuoR;|KTt?#dU@QJ|hYwnN7UwX{; zm)2pks=WA9FYEVT**?>2*ey@Sc-vokZMUwU_mg`4H}{i%kKU-wPx=Y^BhpVY9#T7TU$*7m7Z_lpnCaqL`i zlpk{?{3^2|zGE}$uWw9r{KClks{cmTH=TMJHPlOe&4{X(U2b%7bh%yowBg9zUiMOICgL!sf``{bdvd7LhWIrSQ zb+zH8Y-qkSKkJJ5de}p)KiClA>jh)PYu2qezTOsaNo~HKFoEo-FoAqM+1E&auatLP z%+6kEpHDd8yZU_Rx@lh&ANkK>$Zr@+p2Ilu9mbQ3G$Fh%N<8!FuQftRQhWYZ zz3}`kcmE~rOVAp>&cgrGoUKv#cZ!EA7H*IK2oEJE+2ef)#KFNY{WM2Xadq~;`0GJp zPMhI_?oW8n6XQ**fP9YS zysHIsBwNO?j&-16y`eRGjMM%(*=Ma8N{))0SZi(nIO}!ht9*z)QMomr%$`@qc$4Ot zsScWxra5M1#M%|FdY|`h<~s~r2g-y`DUSzv80KYe4deC`Q%!4GGjc`5Q1;c#ZKdqG z=P-S%j4^?HVmhBfCqZ4ksj+&^Z&i%Gy?4UOEb;{FnR1@7-;4i0jrZH{C3bGVkGWHt z*E*c*N1-#I7I-k&-DAB-eNHgnU?=&=l%pUJ5xYN39o?K1m&#M%##DqkBXbl;7Su;I zzcV^N!RLHiF_cPuhd!cMelcUOTJx@JE!h*;l{eK-IYWCBr>V|UK)l+1Kfh_b`NA<+ z`W&(T8BVOfw-aNjbCLW6kum;KBP_>O)_;93@VxG4F*kzpP$j5 zCmfm5{1eGky7Jf~Q+tr9HrzMINDt4Gz|J-YTYf>m(B4?hiKm?GXIh#g(VRHKSN%<^ zim|E4`9Z7RGc59ohIJ-(YYp>D+_iq|ZMzj->(`imDZACYFL`=?>bE-b9m(v+5BRMg z@tp_9k~eIG-)hTu%E>LWv5Q}8dyz8_a_B`4eaK-9a!6jN`U*yp`%dU2-~DFZc_ZIY-3DrY1Z62f_Ohw3_6<;7w==hG5W2*Tms+gtn-@{9 zlDmACo|8OgJF>VDc^u5S8xPg|nS;orZ*Av$`N&-4cd#`f-A4k= zewsU@eaznDcto;^pSK*@{4I4o?lVKj32H$Ga^TD^KDwJc*JCCjprhsZfRNNe?w zcgb`q@_ZOsZ%b@ccCsDU&dYRoUX1Kyo9|;M-^1P!fB68r^C7bK5jH4;w$b?VD#n*r z_m8nOzATwqukmI2p~jal%TDH)R;y?`nd8{W)>qC>PDJM3VVNv#2iwv&F|t9ROUCfA2+7b}rH*-hES#n{c4 z9lQ95V;2`k*u{OaP3YoQih0w9?wEE>vKOS^O7>JA&CAkQ@i*59?5WP* zlY*SJO!0iHbpm~`+p$%ttaZh1%*BUWi{0vk z9nl!+DXw?MuEgNyySjW~#2DME`_IoAWUh|2&K_esZwocX_TR-8y7s9>T-X-6^|2!A zBl{$oYw;uG4Sf7QGWSc#9CI00BXdj1{Uw=mZJ;ZA(Y8>1KO4L2enaoQ<6pJgJ?&2; zyEo=a+dbs(`_2)JwtMLjcCYvn z31^I8d_e2hrh+dt7f@>_FTkT<4$T3Q?b4iZ+s6a1*mxA2?&47wi?%0j>EhAw+^BRL zpEVyox=Pq>pm$H#o4ttv`9Ndw@s^@D7qVVVbECB0O>?8{^=^*s$|61{toLcMXma4d ztO?M>z-Nul&uwyJPMRB~a^1$f36)E8qvUhk$n$%d8>RlKKD!y4;C@%vf?U&gHt-GC zA8SueZnwYhJr%}&e(uwAw)_wq)ib((E#LLWI#@2ko?8@Kk9GCn2VQAe{0Wpajxc++A)PR+yC*P zm*R-0tF1TS6U5PXgz-1w|HRRM{=)e#{GZ6UU8K&Bo>#FdVqV2LFtBvKCz1J~d}AYf zgy{W$A=kLxz1ggJUWMwQwQQ<``sOwC&AL2aNDM*ml=_V}9RWVh6fUeO>Q!-=k|s!A5#k&&^lAb#TxXi^Ki@dd|g#{ku0< z-tFIsjJZm$Rx<87ymjJiSFfs%UreXAVIH*lcoKd0a&&5Vd^SCt(}ppO=3tkb(Qo<0 zq@`coHN+anT!=5a)ES>$+9#YFRJu#!v(jA`=+*lAAHDi=&EvDtdR2W_dR2W_@3ehF zr#u=fmY$3*gYF~#a#3BI<>=aS-N*ZGq<-$Zl!r*~I84qASNATt(z+Kcak=BMrh^Oe zDQo2X)Zr@ozj1!*7V!Ev=ciuSH(N#ctu@(u^~;OuUdK=E{T+3$m$qZhS|}dz3Oc=W%ssP36;b>dz`mZMt}%ql-Ne@2?AABHycd6nfWT;<>JFmY;gXbu)eY`;3tk zUIGW#*6;so#zw;Z|J+86eZ&&GjU$E|Pb@cqm~I`$J`$~5=0tz`v|%OTlWH$}i+#~< zC&t*4^)apX88a%I6T2;kHiecz8$%~TuY=}888d0+hY|;Bl?^5K6HnYZhQ2g8i**5t z?|^yiyt(XGbDaHnPxxX>|6s&!o!Kb7e`-DUPn`|ksbo#qqn_&mdoqm`V>iqynw1gW z=czt>!&f#nL%+~(4UHAEHauVSe0}C?YrmM3s{@tH4R}nyU1O|Rv*CrJ7r^G)JN{bs zj;~}L%sBmam9gT~h8K%oWS<%B#hAff@`}6vO24HVD>62|RP+)tbnVU9Af-W|lCh4* z^;@E`qRYmjq9W>~y&4;)Gz`cudP2W>jS~;dd%5Ukc-APTQQ#J?&E7b>=!`GE^htcW z-W*LhJRJW7zc9{g=h-NLZU>u4KSKYgVDCNkkpO%g!M=O} zVxTE0bpng%3jkUTkzX6TwhGz7*NMp(V{$16o7~BGfU|!13cvOD|xQy zIo>xlB{eX$d|FWo_dG|ub~Hyjj=CIq^r8IKbBd-C3(_+>AE9&Jn@>Gk=$vm4=Un@& zZ0~AV?-5hh7#li$r{;FO$vgUB<5f@jZi~R0&8Va5rT=rtji>r<#aB^%4>@&Rtg=z2 zsnn-+N^&5H`VMuevnR(zIYxz;pzfcKS(D?U%Y^xaCL#JAEXncFJ7eGU0vYn2k8XqiH=3kNGC{NyiA+CjGmA_aP@=qf~zCOZ+!Xk^~3xK{lIy(e%KVD zA2_ep51d!)2hOYY1Lxn+4_v#_`oYx=(i5&ucm_VYxxcOf`hoLm{Xjjd^#kWuN4lrQHgm(wqLb*>UtnXuBF=Fw zvFzm%Y$fe@T?B(Dz+i(d&EIikkKftgZbovjz*4aty?+8w+#CJRC^OWXt z9^m{7_CrX>8kkwm`PH=TT8_QGdZqh(SCcL&_nH85bRxeRldwO+oqjOMMms*5 zaPrCDYs_5p^^L6j%o1WhiWM*BIKq35g5flF%)0s+LE>psnO~Uesb5))JStZ75I$!r z&t`B>+h51dz6srO++zgpN=^^lnVc5b$@wGfBRPt_VE1^^D|PRu?e9K4iSi!vC6q4r z;P(>$nQl`fxP2n}sjhLtPkBG*cQ99F7C!b4#s`)U8(lSh*iBX2DNAD`wNiEZdsrv* zy__-|RTq|7e)qR2^TvoWds!cyw>y8-AP$w?n|i+No`|yV1@EiubBB#Slb0|s^QmDs zopH;rI%=+0ZSAZ!UhK5-N&GjroqrhB&PUTO-Of4Rqn&eWZD&tJJA1xoJ9}zu=iKku z&XHxlu$?bm=5OE5p32(VS><=zSz{5B;l=oNYTFS|yG@-rH;t2x6Ng@y)9_7xJACb& zhAljaiHWpZz%*toVQef&yBi0L(UM1ej~OGd_ww}{dpwl}ay!gpbkJU#$CwMVcVcev zm~sOkOC|KPV_XLtRhsxcUY=L{Kh;Rvy6lOx*}p?h&w0p`j9rhUKW;7BiM?ws^O zKE9FGu$OWqaZUS!-Z5@O)ij>NubMFx-O$)bvSm_b(|SU;Y!msW{GpR~hT|f&)f3y^ zw>Nbi$v37!ms00toFC^~8beEP>rK6a)V)11y!0kc{n1Ca`-~28=%3Ao5$KT|AGj?! zE>I8sGcy@G#oFaL-uTK5?e0D8qb{GpkAsZid;zZ>dTv6sEPpyRH>g~r+DHB^`h7OM z)4k%I^pJQLNBtK*5ohmX-{kR!ZMpu`FwVXuoMfcIhd8^<{{|1EZMfRMvQfDG7el2( z1I*D&(A-z%@U()KR!!I?p7!y?2BxArg79G}T-&5jbq)%uUH52IO{|58H>qAAMK;u`Hb(_=U&d4gP`+r zof8w)Ihf}xv6S#VF`Tp3#J)d{bJm&I_v@TF3-X_|G=yU`8>Bk(U=efR? z;{|NA_#z&a6K}1J4-;MfP~V9zf2dc9d!2ffxYsFvxqF@Rm%GIZ2C+I6n=^GW)QS*j0 zhMtbyu8VxfVXK<6b|9NMb80J(hjw6&p52D@g>3d;(0Mj(p3UA1_C1{MGo15S&SS#o z3bhX%jaOlFAsMP3(L|$h?O! zoSMp(PdWW=nJI(cDRVyOE&P0oBcX0=U>|!4M?N3IOc_BWB|#X0yr0N#b)I@flf^4y>fydQSY|9cTRxPo^4)fr%=s!x!mA z`+tWw?*DnTKbUIgWPZ;Cd(*1g4L)?NMVyVc!OYu$4r>Mksi zKs`0TTlKAE9HAyGF@fJAv4m}3>BDxeyC^K7xvs(zg^bsI7c4OvEFsKa8^DZ$R8Jin zvj}5pPYn4N>YMiaJ>G!yLMy(bwa0f1#JAu*n)BdbiJj`RL5_59oxm~f+oSpuGdV#l zqc3~8HzOW0nX+yeHrVcm8-_JI(>6}`8I_6j@e{uGrOmtsb9{*J)8_l>cRCl}CvqP9 z<&(evEq!LECoT}i8;LgF(A=TP_znv=2LB=VLi_=)If#B(!23%0#vJfOG5Np-KT&t~ zfs~lQQ|O%=$hYGg{_`_~&_9#VKk_ZS!*4jl{F51z!CsAxx|QWTyBVHDVlVZZ9Lknd zCma{r3SRjBJVRbDi)Wq)o_YS;@hrC%o}Jt1*?Fj`r{MGuFE+u4ZHU2#j>RvJ!!P&P zet9eE@9NyG*k$?UIgLvC)@fAS_e;ljE1^HgKR@-Par~4wW<~`uvV>{=&^aG_F5|Cz z=(}DHFaJ!|%G5JUAKQ_yF%yKRG-8HtKdOb?i=EyJ7$G*dN(n01xbS zCzq2`D(Aqjf1mJcWA?LP?wpDz?ek}Jeq@4uK817H4dsQ}N>0FJ#ys1Q-$U~Nw6^Q# zU|SpOBD0Sq_yTd{{Af>qU%+nBunQ-qU4tIveXW?M^(Etg z$}@Em-V_pBc#2#>FJPCnH;kJf;S~S7>sxhx@~39-tDpIUI%oY>@bswg`I%pu!T17 zD30qIG@<$S`HzEz&=ULnC&4o4KKnd3Xnf869K~@)(1hmO=bm68w8TF52FsxP>~mj` zo-`_(qc|=m$S&Ta^6m53U?H@`K938QLHBVe=T>oCd{Ft2_JE<6uz&sY)c+66MR6-E-pJkRWN$*j<ID_w%vkCL72nKQ>ID_w%-Hx~FHa0P*b-EZt|c5BIQDbI#Fuxyf#WugAslWw zvLecn6;Y0?h;n36juswoc&#D*E}Oi6gtq&?DaBR?ehuykIZetix7+O20D|%rfb8C5D*Ak8m z9Q!%kx<|Kh3-|^$?^uXmQNpo-V?T#MTV&hqabcS!Of+T;WWKoC1>6$RE}l!ai(3Zq z*@VYo{^v&U--zJ9Cn%n}{2U1u4#3Y6cqkmLe2X^kJYn8F1n=DUn-Tq3^3;nu+P)Qi zr`!nhCjSDSOk7fq4z=#Jb?tT^-UZ*SdaH^md9!h7D(ba>}8K0j`&5IyJ+N_C8~vK5 zdIDWiy@AfYs{@^U@qv!MdVx7wQ;Q#P{ATtI9hbjeIp=f8IcM)fka}G}`)Hl{I?u{U ztsl^ySSQHEe-&leO5W~GeSFpzJ;;;jYe$S&00->1|M%1=eZ-|E}XW&AY_cH@zv9CVKOok%skm zc>1QNLFHclf3G`patxo<68;ib57mcOH1B04(2m)|a;iRu-}-Jd&i|rwzI!w0ZQ<3H zy9{f4ecq*aV<+8rxzFo;dWZYG`>aJ@+{#*(O?|@8?xoz(&jS2H9h_&|it^nXphZ0E z*1>Hnm0jglSye{mPTF#pm$I=}5ZCdq*dLXBu{pl((Zf2~$qcD|pXFWdeUmvKK^qM> z;se8ImzbmR9g}#E&MRoM;Rg7O-}ha6t9ATlP(;1%)~C$tILJJV`Q)t{X|Q*Zhc#my zEjT6^4Js!dI@fO-^PoN$Xw2A>V1(}G8=7CE`99iXT<1Mgj84n>P7C^Q0C~~%MdMyw z--f=r1bZMEOY}6@%DjXVz41?ap7rlqiY)E^jk*3N*)`-#YbZWUHfRTLWft-C_u*>~ zgS!5zbCqOS@^XT{d`FrQSc0$A!p}G-IA=TZ*ccf)N@t+pKkiAN2_jO-BLg5`UQQjtbES;-5&a$C#BLLmZg56xWZbVox-8= zkdLC@2U&(Sz(}bqE5f~@8h@qlzk_^>DkRA!_Vk`twYxT z%Gds3vd3D;wb>kRIsb2hwxYbxvYuUQgjz8czXf~NmhueeP+7}}zo>i`$2+`#DbM@3 z_XvCXK8OGOtlo!wx4#PAKpQ30F*>S!)Gn>1Ln7KlbJEl%i)oWlv`Gr>k;=PvQilPo z^_DCW6AZxz`<*9{^^wE_)CRHxBWMHJf_EA3*bnd9ZEqWBPq{vY_vTcy%Hw7J8GIfE zpXYe$RGQem0mM8^_^dkU8{+xfe6O)LxpFV(`rZucupeGfSNonq=Xzbrp?qH4Z|R;- zbPxA`4BZLkzR(BGxpGamgm+pymFr2aEQW8-h-bWW3io-br@kBZk?zKp+=WfK6Wh`k z8*>Nts*i=uv3;b*M&rs9{Ez|AcZp*Rz}A#d=UC?CzsWV}uB<70`WRKkeX}`?_p|#N zRqaMC;69VGXY-q`=jr$Nn{ZFNQASpFrm-YDQ}=j?b;QP)|9Y49$R4*RnR`>4WO>?V z=QJVCcm6(&T__K57WeS2R8N-C*2v*^-l5#adH7&i<2HQbH`=wjtOb-cn|o^d#+ACR zI_*QIR41(ktnIzJ&#lv5U-L}WP2*vTgDpnh#OD#vtQfP6QI?gN&7t}#=LGt>-|f`5 zrNeqA9LaV-FZIp{_auFcaM6$d(`vPd1q__eU&(q+EqNLsV}LX zc0Ztgw;*em;_V&i@mNQmv<~G`@BVlHZkHXuM}4@!>BE;(*S+XS^|QUc#x_l$FEyqg z_eSp2pTUCt)PF_wTXf%!?9;7?-_+Kxhx(@!5nm-=ykdh%c5G01buZXf-_PM$_gu2& zF|#u5z7KB)sQ)_UDTlg!Ity9L&N2IBgA-kS<@RaySLv=n?>7tUxA&X&6^}F)>@kf| z=)6?pqjpcKZ=~4#iqd1qt?u_ba;W~`w)ateH(Dog|M$73Z{=fe)PB>@MK!%EvOMCA zbo@c))F+@j^05~=P{pl{nB-A;jhc+g>ztkG=R6zTJV)<`mwTYuP!D}Kmv@-dB^{Yb zk4xsr7}XwmYR`Fl&L6@K9N_4_tzF;2+g9|wbzA$sKj(b@lsSC{8WTgs9K7p<;=CS} z8<}_rT|aM}ncO|g+t#wH@d9i^-=g5&1;iuVLoSNn&LCZW%C)H*Uabk4xg9*Kgs7HeMovo zV;AD3AL`28Uix?zzV^VFiCwdy`A}PK=vOu#p5~Q&rq3HN@vcb&{7{wC#T(IllHP5Ct{a4GN~WWq{dfFk^Uc9ycOa)1 z`$lBhuPlETz~5%yoxgX%U-p8KEg{SQC?r@t8W zU3nNJncRP_pAW37ye0qD@}ckNqtA03ziL0@(T%fwf2-+eW|)8aPP!t-{L^>Q6|(;- zpP%v>W!afo30byZFU(f}z801iu9IGy=-6o4>PQ=H#AI+!#;7KX%szuT;&wL~6~DXb zs2i~#&9T$zyPJ)wyZeSw?Y7PB+kV@;zBewK(`O8at%tGQve7BDOB3+JAdZ;bP1-iw z)-*MCchj~vY@3_f4E+3a4&5(X@8XGQTP;7d_IEtRJ0>UpyMMRK7~zLr>U+_3(%ihy zPIv#w+_WG3_{6f_xqmwT+@EKiJJ;xW4%VwSVn3ipA0_@Uv+Al@13reHPVA7`rPYrz zKf{kzn*~{`FMntv_C@}X$-LBuc&E;bS&yvt6wXpxx^4IkA1Df&ci-J^)Wf^mkDC1c zD}8T;KFt3M-oLT$Cfg4R$3^4=sU3r~<3qHeZ#wpC`rHA7I^U4ljPqE|C+FOdIf?HN za@tbQxNRAUO^;HxOTS|>e$l1AbLH-_`>NBf(`?`A(%;rI(RGaUOI7~7-PvvPz?Ba< z7+kSI`P`9wQJaPD&AQ;e{HXf|#u#mcec2IwYs~+P8S9YDR50iN|FU;3@KKd#{y#I5z)Ws4$Soiy z0lW}3R@ZuKwG082a+|cImD>GxQ0x{&u&w>KR)hee1rAsuy8pIu7i*zqqN#S(f6Fdx z(YCZUV$0In-T&^VT*X{KiJ%Y!^Z!2YoO5#K%uJY&1Oh$ze9q_OyywihJnwURpZEE_ zkH|?Qd%q`;GlBodK$}_Y$^ESS9oVyFjG?M{|C!9um~_5{Z=R5)eS8*}zvZ0Qs;kO8 z_;io>M-~l-|CjQ=#{b%=i;FyqN0)JL^;_W$ncFgdcR9bG#P7>jRh5;ahiCeS7nR>Q zvdH^N@usoFORYA1MXyKOEccQ8IeCvextFZ{tz~5u3}-86lXsQ))J*=zuAKZb+oS(e za@A7*(*;Z6yWjq4UBR#abaBCVF3y{`4xPK6_Vwu8NB^|GVCkPODR`9jeSf;N;BHMj zDc?Jsyf*dCjQKR;Rq?plSN>V}D<8g!vfl<@MR^~|wX*aJv>SdUG4(&)MLg=6?|D$}`OvG@b(#FHWEeV$ zo?68>*ncDE%;)U!gA z=Z!@tc)KW z;&1QF8_)J=N!C^4iC=yB^-BxZe|v47Tqk`ca!V|;WjHyJW8gWl!#(*O5*v%Z)2ohm zT9VgY?ec9B8%v}?^7EWD7z6I1;j%D%G6p^=hfl`BC*$Cg@$kv5@Ci8;Uzq@(d>TGc zd_~MR9s@jGK08RRvG7?Pd*L&oe>y%BUEj`U+29er^3P3dd)Q1qnSrcMf=7f`#=s-P z*=BDmo4S14Pp96q?W(CO_+QZn9=UEzU|#8;uPZ41ud5e+ZcNU+iTpp2|3_qJ%`4A_ z_rwPf9(rwO{w0fd=HCh*g^I487y85{McKqv*Oz2p_1ls@S1p03o_YPU0=e%Gx$iz~ ziST?DF;nsV_C2sf#iUF=n+Tstyfn^dslQ_`^JeYO-P+-E&(?K{j>t_iojQKsjOfHV zglC!;Mb`;0GT)z(&KurIofp@4BZ{s^-+c>xCpvE?IxepFMiebMgZgd^{T4nAebAH6QO06T(>$qxNSe>J9rd&*U|tpYCLQBzi58Ch6Z#ese0CymA(F+FV1Ym7PMTi4WJ#Z<+gW zqQ^`>?nd;Q-G@7q{@dy5Gx0++{RUT`C5{pD4SlxFq0i#ugZ6n;@!Q&Y%hhGBE<3$3 z>0E=~#Ey0}CKX-w!!Gz{raqH#sj1T*W?XuT`lOdP1-Lyt1|wvjyRhPEZ29p&@u{%zlQoH$?W ze%5zwLkCLE$+64G!wwGC{U=9P_m}evwbe3L=U+_w56i)I4rYC)LA86pjPCUnDx9d` zIgfFV!`w^O8U5gPPiz-sgWcdbHxzVyuRXqD+-IP5pZ6nKFyJ602?Wfv4cR zq|Vqb^6PiA3D#Bcsc~HDkXp&Rzm#j`TaJtzAM=wpeiY0>v1f2|(*W|Z%UK`i3avHZ zUG&)>{LAtm9P|3?i^pm$172MIgA2iM6ywhyC7YC8 zX{;M~;=jknzT=C(+lTUA_i3>>U7hdMpZ6NbdaZ-3cl#YUTdP0hdp{WlixG%_C-?c_ z-CH}nmwe;f&|JRr=*P%qr@xw6M_lqdWgV^@w&`GpCcpd8j%H|o@MFwfT$fvOa4_qE zvkvKF;FA6i7^G(UUe?Fk#r6)^p8$RFLY7uKQR;F*JITA3^*0KAvM%C)=HF8fWn@l) zzSx^nKU#94q0#dXf8p4=Kv4h5lyGTnw)WCH@RpC~f5g4tf|mQ~>qFq~_kgFpWBQoV zn!gX1K2G3ak_`gX1@PBEgCq5_tgL9|-!~Z4!hxFrL&`OtrFPJH7 z3xaFk%6c?%-EDbeezBhRzmC@h-BK4Aq)Y7>%|BQlEj}%Fs3I#|`uh*s zUdrM-1y@~Op;bp-Usx)%dw#N~^v}eXU+ANiewBT2s#YrR`8Mxgv9zjmDeqs-aU1VD zlkv3FE!YP}VlDTU-2DXJFl7}v5d8ICr8!fSo96Du_xHBZ+4-7Vur4X`Gy=D z#rh&j-@tSG(G#L?cB3<7{&g2JdlWuv0Ba_CX8?M~`7P}_XRxI!cJpmVxz`cyS%uCJ zy`vfT+kN|ZbWi&G?E&L%*E@2r_orpYq@SA6J%i|*rpwA>H=}z-?f&)-KRC|@*1sM_ zU9$_}?FRJer;+8xXZytR;Egxw=Z3yxVeG)w zmx#=uOF}3^}_#HVJKmww_aV@u1V{d3;ZpAYnv=7Zz@_dC6% z|N3QL=|R>@dhv5w>A&CU;s1dC^Cv%5Dp;U@eQR_n_hfCQ99{TY*4~`SJ?`e*Z@BMa ze%F`#ywCB^*@koOQua@Hv>OKU?faNh+E3pI#!#-2b8?;h{@`dZb^d()8(?i#Y2l4C z2ZT2s^@caDXV2W*#*qWGjX(El8{c9-w?HfX)*w%5ly_K+e&qUAu4xraT>@{SgI@zr z)^r8+Qkj!%LZ9Ex-m*LJbvnEABj_e}=Ph_Ku9pWZz1)C2_&tN_2hvwl4z^V`&nb`n zZsE|H*9_fXiY+e1rmSFn#R>4*%zVGT7k-$I9clo-n(ZmnC8xgOD`Hm$CfU1vVARa# zm7o)x-y^p2GVYE5Y~qi|C(@@0H~H{2^QUKDifZmDl?Q)Hh*&C!l{e;QLJY@}L^OXF$Ew z<-Lyno?z;vw|%kc=%i5}R)GN?P(Lc**GsYArTGKuONX;&_3%KA_(~Tc4+-BRz5~2@ ziKhr%CH-C0&a==V9nbE?M%#TQrT5+*8+#d=-hodkn9rSHW1CnLV;MeE1G>+Tp40zj z*$-~=4XPiF&-C#Zm;KF0VS!dEIy%1oPoXY&WLcMA zbaNAaQb;S*ThQIV3gjvM95wWF7CLJ}AMK@Vbg%eHyYWr>XxaJ}^51^z4{v-WFTD9h z*6sT?I_<7L;mz}N&~XLf&4+2b8@)E8kGA7?%I@q&=X2hjS$yFXCJc=VtJ~_*IKIKO8$Ee#);y;!F9PXA%n^$!~7u z8Sk?FgiYf;`@)0r&O!G3`8N4~Jw9Mxf42S>-?oeIc$@Fo2YucOXd68|V|Tu``8Pa! z3(x)SL!Qz%uz{kJJ>a0d{AR=ZabHSkA~vZnev{niF!$LFJz70k`UoHI-dEdvZLYR) zc#v=7U8GHYEA4l2Jk9vNFwfb_ckSU@*7IHBKknq4#$t!wQ;X8K)?7visAABNyh{l!@K=OeJUqVKk#pLfB>qrfVgK39V`KE`YJ zwT4)})<^jHt=KBl&l1d0E54Utf79LvS&jQy@%tP$ z^xiP~Q2egz?S2=2qWE1y>HC|+uJM~%d`jhiVfQ}3*Zc^bFL>am@aY@y})N*p7O`gQ{s;W@xkQ&e$TK3pX^y;*5Z>j@*bD- z9?jW7eGam2`eX0#jNRyxXL#;?)b8-}E+XS^c!nYaIq-Tv#xZdjpNM}~p$*lq!N;L) zRJ{-RzJffg@%};jt?=}1@NQUSnf=A$gFU2`UWUwxEQ%Zo--;ZrK^|n?h-blg$arEV z{uueJ)iQ2+i*eI!$iiXxc-MgN=JyMchhgXmc=#x7duVH=Z7*$?sp~YZ)3{FKI&JgY z>N<_-?oSS583}k-idE`#;a{y%>G;Kg?^$7@5+hmv>E8WV%}*nx_1V@+XB5s z&c$B%S&zy`E_QS4EwxWa|XR+>K2+vtC>8x6>v z@E87F{V%bBbDHq+?kkTitGciz;DcAOfh)YjlyCQKY`ya9c&6posplp8b$gM~eej^e zxBFv4ya2wpe7g?rBXpO0iCmiRFYi7oS^u@)|A)kf__p21d9wcN)qY;$H$caB9cYVj zW#;E4-Rms)dF}52&-Nleul>CwCf0L)Ui))i``h#Lve0jLqu<2WyIB55znT8tIOCl7 ze0Lk?#P2IXUyIK-0{v|Idy8okUr+qJhiM<)Lw?>a!^ac7yoG0W#n1C{j}eBiH$wim z`Fr96n!er`p3_M`ubrnv&xww}Pe|5tXWh^Hqpa`f`g!NBpC{v<;mDi#dW+!G?)LLU zKkPGnJ<$*QZ2q3;2RSD`pXdiUXZn5O(}~Zwhj%mmz1_5>^7Hm`&6)P|l$8R{T5_!|MS*51S`8zxVoi9j|G7SgoPjhxy9vT3NHroWneZ?=S0U z|G$OWAMYSGxE7y%2IJU`LE>ci&GGq3@xuo)*2*H@-9CPOpZ{g9tDb!N4Y}dg!oJM! zU7%IJNgQej;+oepcRE;x8>4>mOC5k;75W|HJuT*6E3_l{=u;!&uA9X3ig& z{;|1EF8AJkl-TV3-XXd?^Gad|8vgXD*0(*?TED%9E?{tkCWYA^cHyswWTM)8FF3OziO58+X>(fi&NWay~BiDqMMeZ@K`z-BWr+pgj_tSo_(LP}HhRDY&Vv&KXW0C7Z8zS?J z_RrJ)P1sjy-~6us;Q#k%-$DCf z{x`q#iO_SAmn!NaPloCuFY~|o-EZ>$2eki%_M`l7e)so6k41h_@!iPxL*I>T|F3KxalDh`HvSJmFGbT(oTeu2K1;iz zDeXd2lSTuem!fGXPE(UcpQl~Xly;%1N$W!BrDz(8(^P182W^U;v?+Q*yJ65v(K8gM z=a)EEwB*>OWn3OyT58oUUv^%`iMO{Lyc|qcn)M}I9T1l{d+k0&N%S(!muph|&dbZoy zj&VGM{U_MI$aaF`i`ajPEy^~5wb|;(B~H-CjyQnnyO?oEr$1}PA)PkOj6*tOG7{r+ z#seij>Wp8B?Q!}Gr-}oL@4c8cF*NU@R*l+45{tgvds)#Wwk2aYULKyTU9cij@>xCN zxj>H$=l?~^waF_NYt>6QSI+TWoEyuznf$-RIQI;{UB~&Qw5{X(Bb;9^|8u^M-~O0$ zTRGm(x!-c`ZT|nUaqd-qyG5ScpXW~Fx&3)A|8L>^8~pZFc`nCyac(TC{{F zEPQVp`J(Zd`WO{cp9I#HKAt>>`&oUwIx&{+jFlIDZ&~EV>B}QSzPCJbQ{l46un$LFfqxmj{ysvuq!uY#u-o>rMcxUzgh4J_Ak@uxP zo`W~f#QQqw>ZGTWmLEr#6yt|Yfu3iY7drc{f&~!0*AM;maW>I+H?oPIyO~Y&+XM~0 z2Bt0PTO9KlP9AachLbN$U2@QuSNk+P?$jkuBYSnsW2MyV%48Mqd zKG57&DSpSnMEUF%epInH@M zIpj1Cn;k?>gV^jKHaqcm93!VeY;q8rtbVt0j8;8b#BS}aN;Wx%Y(|mID6$zvHlyo|b5G0f`t!SK{H{O0cGY{9 z^tJ7>UIsQXT@1xZFX#BrDd+C>i8*IorH|s@!-mgE-g9T^tLzr*#$>{m9r2!h3^#XV zdFGW6Xnnz+}e94mB|K52O-L;9xVnbYu<#phib^6JXZWn7y7NyEPdGw#f_t|LBV z>LSy>wewZJta0p^8&Li&wI}BA95Z%n&IiQp+y>@yHmH2;xPQALGL2)&8J^|-u2;}S z@oJ0V-$_j7H2X#Fb7iJ>5^b5@=G?`UOX7Z*)T$C5mDsPHH_KQD zA{B2c{zPu<{OR<+W}WBfRsXVvp7`%`#s4|Q_p0kN>_@v#Q&t%FX%12Y&eS9JKAKBC zDciGC+RpR+drJ<)u=LCEf?Iw*A@LAB& zq_2g~qRpbKiO+(5CQVfy7soCw9auM)mXWW!d>J|32^r4_pXKU+YL%A^U0r?Z+9uaF zfx~v~d`h25eEKAOma9Kq{W-?Kx4OQ)>)R8f?hX1=;RV&^JClx-6^#QCokW}lgpg-MlzdP=C$Ng>$?YG z{s=7B_rY`};JLtZ{S%lj6VFwa0*k2TYi<0RfbX)6am^UF!tn4-=J#0mF23m^<9igo ztGnMH-nH;uJ^a2-KhQ~Eh3~S__C^EWwJjaK>kRTrXWzZccrRODICY$pR}|jM#wT6P z`ICIWUWfMzf%mGg;k`oOy((;YFR)(7PCMQUtQT^V9`EJe&!t})eo^!1r^0)6fqzsi zFF9vCWqyQ_o7=nbUSPeD&GsBbuwKY!JKhVd7qZ!o_X6vMY^KM1x%YGFmxf=`%BDRR zrVIR&Qa01%&M4XBTu8mLGmu<}D-Zul>lS^O0_;-oVl%B{G|J4P0%X4}I{_EZ|f&a46$tmYfxpwxS zdwn9F_Ehvy{Cg_=R|ox}>{y2R&}R())q&4! zf0r6h*8_A>yqYQgtAppuea;#Dm(AW=m>08-jKg0(*Uzi|Jr(}TMh|s;hW<{s;J<9X ziH+7NZR@#yzJtC<`MqiSBBk$O`UjoHf7$GqMc*`b>Rdmcmi}qJHzobk*x_>t|J7sl zw?vPuCKs&2QGbheRey_i6Qg9#N2~f<_yi{ANY&qxIM-_8JG5K#x~r)b6*B5?RXFNz z(XQ%m(JuK3s{WSb8LS55Rbi{YMVqR>MVmDT%slYVDjfBjQ9pU~8# zofRvj{eId_8dw7xWW)T6ta^ zSGlxQ>wCGhH0p1Oth;=fv5aSo?{alO<>sZBvA`U|l;eHA`(illbf@ z@m;R|boHmy)^&Y**S8ND_^z|8KP}vWG1hdBF%$4z#`w}XuC(x78ICDWtNs?c(;e5_ z>KM9ysH;ca7)iViEIRaT`=Mt;kDhh>mf_DP;kOK%X2v0%vBCuW7S~EVFaf`1!~zrW zTShF7z>7M@7zZ%wz=-x5p_+ve$x`djj@20td@vkZPTW7HPrMsR1mdjdX-Z;Ez$v-Og zcD~`U$d5oa4x2C75za_aA?)_Z)rQnyevgyoC zIZggaDVu3>S_Zu4=H5eZQT3DaYrs4xWtLif<~s?JLTLd*UlU)_xd#WD}(Qx^ilkq3I3{s-`|^IKJppG zU#0PpZTyp%)9YRz#BY|G(Pll&jOfz=|78686#SDihuYwulxswo`PE&*U#0QgEShBK z>$%q(_^UMZOv^JF(pP?;#51SiEB6Zg6?5H^I~@0Kjrv<0cUb3%->AA>?(eG2`IWdI z(EW5#yxJN3Rhs^k`O48G`Zp8&mGQmm`V9S@Zo^+0zKN|bQrgyY z{d@;~k@9=f^hHYF!SoMK3x8$UF^j%w?9{n_J}v#zd~Ztnr?JE568@^k>Tgxp*5Sr> zTR8x$mqotLIjjB_=S(b-np>{0tqV#%fQb)MSS-$2^|v@@&Eu{Hk5vI~$gICbKDLQB zx`T5Ma?Yy1#W@R?Rbi{Y1$Ibs55BazHnNa*tNs@4Y7V%_QGd(CXK}3RZ<+Wk=&I^(*=VYKE!w9E&1pAj zrpCt5Q`O&smL|PaY>{?Vf6GRHg(-rLs{WQiLxsNexpGMwuZIf%8Lf{KrJKv6JxA{!s(a_l zPqY4(t3O@+*-P}N!WG!~QRT|o#+Vj9%Qn7L_$-?rr0`jt7*n2B{VnvTRey`|raSJ} zs^1Jf7x}=}FD6Z#{Ud$Y**;Qp0j|w*ZI)}ZTDv12bvE?uS;cSJbcZ>==!^}T`pOwg zwD4QDc%g;gvc`{8m@$Z#isosyMsCaY@}Rv;LNe<(h7* zzm*QpgqkyGy?~ z-pj@>YX1CGc&{$-kBa4;Pk65iV@-Dx??vq`Re#IGdr?bC)!#DlJJjA%^|#XDz1;h` z^ppBqDflI=Y}#{Sy1+jvWiw6g%$dM@^~(BNHs8cK&T{%H&KwOZ2Rjx1%e`;B?vzW1 zF4IBjiA?cdU7)u-r#Ilg&Pe?&8=ahT?v!h1tk=EX!tYEDEzy$r+eshAzo)`~bu>b{T@Ziz3Tc5{he;Xf7yH!TVJHKt>^mr4*DYH_onHKl)l4h zHWNUdgz{e zk6prc4gX)tb`Agkmh)RVzk_Wn|KCsBV>fHD)dM`S>S5m4qmzBH@)E6jsphG!%htC4 zl4ngOuD++OZE%CnTYIM#x}lMG=+C_Qs#kLFwcPu4^ve%5{|ybkK&^bs#DG@$HSR6{ zH|KNTV^upw_4DbM2l!U#@}oXlW$uxMJLG)cOKX2HCy-aK`*ZcVf$kpfY|5~k}`M_mshekd5&F`>(%u|rFR?BT}=Ks;O$+^dT z+70|BXA=8@Lkk|)Lk0TleA`4%o_-};zCZl2>sNKW6hnM|fa{02IlSeMr zs_$MFo_s5t*X!9H_G^=~a#e?K1bGd7^G=V}{6mdz;ai)1S+xha$3dS~ z8_U+JS3MFwHkEDeO^Y|s*V{)!r+ew+ee|(>_Z8V%r6*gvUeNe&E9J!9v@3Tn#8%)74S zdQDsO*&~rxHf-@l-xPk<;pb8CbKu07HGEUG@bw&MIEQEZxTnb1T)yQ1JXY-2T7;(# zLX$<=;kqUHW9lCB~ z*-bagZ~T0pDHD^SpWIK*bM9u3m%irx$j7YA;U2U2{X~9$1u}LeGNxY*pLqN_ZQG-~ zkNNvioL{DCtBSzthz#reebKD*dvwR*!Lgs z#U7oW75hO+cI-QU-zWAiyiqf`Z%q0()Cb<`6ORiXf>tfa;al8)7qkxYF5|TV{by`f zdkb`Vhp@+geZ!DfHvDArD;qxk`Byd!`qC>KmJQQlPfYiqu82FQ0_J1SpH38VnN>L z(D-odty{*#^sp9NInEPXHNYDa`FiZ;`134zhGzo#KD|8TQ{NzZprLQL_0xX8F87r7 zEt>!OoU&IoTzTCq8&+Jc#q^KI-)9lL;pcrzrGGD8yg~7Y^a1=K?}jYFx34^>_{t`e zGq@k^!f!VVe@)f&X|1#cN@d3P(%R7j@DuZ`~Umkvm;*s%}r- z=(>Faw7UJnJat>?tNY=Tx3!>NCAsQ^UBQ+vz26PbtCdUb)vV+{X7>Q{NeGw*KdVpZ?oMGk37NF=OQiE|1;P% z`gj|BQUagIcg%rLE{6`2`2Ll=S3ciA51KUbyf8W~;wjMgh)zMzF3H#GCh~jHCo4Vq zx~Wqp!5<=Lli*SLo$!S4>|*K1>{o8SldU9s@pFsm%Zc#99JZXY#T#azFVs804_ERI zE5g`Do-aB__#*ts=(>qEeIvF7ouzoA#Ixv@JGtMd`Nn`Jt~*RU-u5f+_J+%}xlJGQ z%sp^}ckVl*eRB`pmNoZ1bjXq0`@}wo_Kh9;ia&PzzCi56G_7@!wy1RmTR-j6qQ7I? z%J++{Yw~$&C!Q={6ZN3;2YAr=*|n=Yee|EQ{ekT*wj;j2wTFDZ+D2?l>B+HcMssc1 zMy*nQzh?h~J3`+6`kJQsJLY)$>z~sG)NlA`?3&^L^0`2(v|>`u`LNZjhbZHl)#Rv$ zl5^8lT~OMccyv`4G?a`*S9L*YxAM(a$8qRGR)a+kC1cQ4T~ONX_;XbkG?a)vS9L*Y zvvB7j)=H;dkojF!jwVk%BbwLFq zK--73sk)$oI}cG8l=@bGl^R!qq2joW|E)QRP@JYF?W{RW+V6*^CXK9^e298s&@Ksg zPW!iPw6<{Pp*T&2c2=G*ZS#emcHDW0x}ea`!ku$m<F2&j0c$UMrVxA)K|`!U;=*Ji2Ehr$Bj5u0)E_x`C0h!H1R(1 zy#+sR;x?CzS=6dMz9LfcSv}%;T#pRDU`1rna&5BYt(4P#7wuzdpGo@?qx~6vvy^k| zIQIzW)^lzh=j!;)R@(Q|{#)ALrv1l8`>XPtH9Th;&soEB`mcy=;oKYY9NO=qeJt%W zX@Avd-y_fA+&a!Z!nyUF+s8SX|Jq9Xe%gOa``fhdH`<>V!Th0@XN_2-;_NaPH~hA? zN{N?`4usz;`vUVRo`vsCDKcW_Myy=o=J9y>7!@ynHl*U^3bSqX>*~ZfxrrAyx=)GZt-Oex?2=-(l7Nly51Fe@luOeAnNdzDmG{w~q(ay`A#YmG^b}aZcJ= z_;7=+r>y^J^S`>AFFO0Knc%@Uc+qRj#i6?rd7Rj$@1(htH!M83!55}pIVd?3 z=mNW5c^Vm&{Oy#wgLbEWu&#IL0MRv~pPsltt5$NI9e!`_Cq%BR-pjv8=G!kTYGE5t zUQ%=&TOr#`Y{S@YVN-Ku?>v-xu1w9nCD0`??<_>!&X5fk9*PB-uL(kz;EjR{mpY*Q z4_$&c3(i;SfbzeM&hGtO`kj(r#7-Xo7cTzC!9+Rj7XDE&I0r5qc?}}3LHvLqen1fU z%QNIPh^-G|>x0<(Ao8c$k=G#hJcvCHV$a*JryZLe#AXMv+3I@9y?i1^s~+tMXD*|L zZC*oOM7w=vKHDjy&fLTh^*}>LJy5}iOFht#Q4dt`;ZhH@(5MG0_;9HQT9~FDsN{aS z_jBo&f?v|gXgBd=8X1iuqfuluii}2|UR;uI$Y>N9jUuB_WHh?YXh%k)$Y>N9jUuD% z*VB%SMv>7dGODhZ{M;w{Xw}omiCL=km%KT2&_|V$mlDOsPI*$RjACO~O_zMc?ArBN zsq;~`6}Q{q_prnLvAIXs4`hEm+wGT;$ameK_A!@pd}WTOoPMoy9Od*~ zEPS{zrgHBa&na=~(6e+3nm!-Z`${ zDn2+lfHK~eG4{$`?PKh@H^j%-t1f8Q1Ew!-`rab%lBedh>j(4N<=%|9IO*k-d#9Yc z*LOUI>nwc~|AG%!z9_kqr}KNsJwM&CVbWOdVq>g#D*iIYcvc>y_>J(F?fo+sFZIB=r>Yt*|WZfD`g$v2YRV)u8{RqX|IQM?L%T={fzqSp%Cb83g=J?Fu$5eh=9e5VYKLhV+=TE19W?^ET_;Dk~ ze6F8Y{R@6v`ggAQN2j2Nx;}$`boxD0hQ$4zRYpHaj9cjsv!7D=JM@$7eNx)j^Zk4W z{gaX&Y5FJ0zp(oe;#ZpU4hemd%73u;Ng8`)^~rg9K5d_*`Q9{rl4P&Df-%?9{PhIB zYt_Ka@m&WEeAknVwM{(NoQ25s^2Bw%EWcoN;yksLXF$8u7O*7zt-HlvhU)%Ok3;BV>HL)&Kw=n7P}Z@7q2~Mj8^@(gx3~d z-NkoN&)!{Ez@FQxu=cM0ToHM&LXUhgMy(au0Izucd)J09nU{$rTk@UG)v>12t~iAl2F23usqBDW- zQnG#lvTo#wN^XoD^L5a`d`-y^^YtY76E%P4#E+~9rlDP=WHegLu>MOIy`my2!d5iDO?S?wl%R59~p zJZ4gUOLtU?HI7Se~e?m>hPyK!hp>!Y0rl;b6rzmz^t4W zY;ykK4Y?vN9?l58e z2Y=c4Dx>kAd;R%`14|mOn0#f9S8Tjw$A>*>AD`HHDMNqI;wAU`^9B!=^lpl!0s#8feW+j<7wiFP8Wyd{;qp*Wy-$x5`0*aKWAWzOn+{&4L3F= z4Q|Zt$IZ24UCvYdmxa|y^L?s+dk7zvyvSODRE*+eumjkhM#dTw_WEeU2pdAtHM$Li*|(7{V&?o+M{4^)Viq^jy11oSGXM7&3K{8 zMIpCL<(E}B>VMI$@^oo8adK+?c5q>;j!lKF{ugbk{ugbk{ui~rRQ<0Cqy87is{R+p zY78zkWxK{kI~AJ~nzG$byGbJz!w{NEJ|whDsQ)E2W&4(m)+#4TXv%hzK|59dOX$fq zU)s$2UqVl|TcDk)|0T3!t8!`CUjNIbW#sEFUq+61LdFy7e?dD})*p-f!quCR_gov~ z+Mv*e$j4oK9@(72Hxl1G2_NR_&uZ82)T&h;xvM{2{n<ip|V#i;TJ^s~f3A*?HO+DCXIHb{3+?Xs-B|CX6Mf*0-)(C?xIVq>(|0khbT;(pS;dFh z{8=+!(HY~j^mNG3)BHwaf>!-6Tinm8|7D9)S@pkceWU7sbrA0p-}`*Vho!=Sb!+`E z>DSe9JL`;ttNLHm%u@BgrW^IYsEefPe@#zQ|0^+$-Hx%c#jDLYwyIG@?XT}k{jce^ z`d`$jQuV*4cToS!#*Yd0zieYvGZw9CJjr)R{V%@5s{bY5B6XhlmJ~7guD?6HY1RMg z;rDg=aZcK*`d>D>p0fT|5AsE4-?hv2zihO3>MSR3sQOVHj7Q~%4oUr=G|T>8c9#o72p&0n7i8`cH>Q8Bn= zyh&2-!Z}y}i&|Z(URQ-t|BKpRs{U7nQU42kl&b$V-KhUX?JrgTYkHdcU+(=}`U$2k z1;3<~QF~5G7x*WQj3(u3&JY;0b86v?zU zw6hMz+@7fa)qyW< z!+Uk$>sd6)&^L6iUlzT(1A3pJ+&U7i{jNx@m(D}U+#0x)c>;i0~R*Lj*mKz&a3`C6~4>%z3Td8+)$D~jQf3fvXO8a}RpYNc5Qhslm{z>vL?0!VI)c>;CJB#LN?Af_~K5d_*`Q9{rl4P&D z1JC6w1J)Dxt{$)dCHiKyn-3wnq&H&^zl{paFhBZpm#v5PTgKGDT%|1|QE4f`&>x{L2}*A;jnF}Jn7{+EmI za`9cnIeP;>_H5u1TzuCm*LQN)XNkCL-JCz`2AvH(>f*bS*K-oz`+UZCC9O}{ll8xB zG4$Sw^|I=JCB~=P>wmfUuE@)*?dIaU!jnTwv})%(hI5{xH{rXSYnvvm_udorzijj3 z&?d+j#maMoHo+TRjF*dPv)7C30>7v^?eh%dm9);SX`jvY^fRs1XJ7wzfU4D?Y?_uP zKLA;cBC9S|DGgSMd{UQww)~pT@=ID-?IwOyG4o_RW>S7jcVfMite)%jzifW5GZ(|@ z+qn3z_*|7shcinDr8~}3{8!R=+^l&rLaQz%7SUt%zie{vlyeI^ktW~Rz5d+8e>um3 zJy!p#1ApGeS1I#`;j4u4qI*5{!aAU*(u=8ag&EOT`Ao@q^htTz>DI82{OzRiimAWM z@k$zeSVr}~I`B6w-pJ5@bg%FIc(5dY+T^9PS^uj8JdukJ>m9f`r3ZTnJ}k+fGwT!? z{#>`!|FZdS7M3NAzjm&lSN+>V_^>3u$n3A~s{duP-?n~8X`j#a^Bwe4%I{UzXW)ON z^fSzU>Pr1Dn}1=^K8-(duAfiaH)*~%P2VKN)4BsM)*bk;Bfh@%QunRN=c%1|vV2X{ zqqUR`^i&@3W!J7^&BULw{ekT*wnIK&ZKF@CEj>AQ&1kOMwo$88HRF)^4IfcIJD|19 zS(s4YSk3)e>l>|J7P-2@z=wq#^}c9V>xI*9;vCf6cZE^!3x8tX^7#1i^Q$*RzDc{P z_eHyjeJWf{y{`(R-dD&`?~8U-?~8V;-dBZD?<-`h_eGnk_eGmpYqY|^j)WZbzGzqV zzGyesNmc7*Rv7iZLXLW0v@0AA?dJNs3LjQs)cXqA>V46s>V46s>U~uh^}a$z-VVpA z-WSJeUR7v{UAx9cI~9`@n$mth?Iw*>3_@ti{QPy$E&(4VG^PDpHd?ECUqaIm^}Y<+ zsd`^RPuk{7n_2Hm=ozBk7qnCLzJ!(>SGlxoulMEBGV*noFC)i0A>$q5!=R-r`_%t( z^=RZh*Cx3(3ETZ~*S<$Kr|^};H&4Qcx%xA{PJydGUH$3$^wqs(e57yy}v-0{4v{+DZ$T$|KIn{+nx z=vl>w+5A~E?&`E@mYyaqXphBM^}lR!K&$?jEpBDi|FZRss{hqN{7-!E^Bx~|fwuj{ z{SWR4dHd@x8u{m%cTuZ0Vo~cNZBeV_r7ga-tx{s>qXXghrhGxG9PL^7URjYnjy@(H zM-SO@&&~PC)rs+QXM9}M0h?~r0V_1>fK4~*fEA{x1C|)yZm*?fi({MdZG~l;ZqxxQ zwABHdZqxxQjMo9%maSD5m-zmi!j`q;!)#+zGd8U-K=S@l|BLsx>VL^MNc}IqAw^8S z>+ejzSoObb@{>@ntef7~>Bl)~s_K8)=y}TeU-G`ID_IFt3ioO9oT4^uLo9e%IQz=oA&iZ?Om&X`Y|YsZ_|u|5?>{jZQw|Et2lhJ}p! zU(=2HU*N=`OYr9DM*T1TCx0uBzi{stWPU6NUj>Pe#j$lR{gU`a&0n7i8`cGWR57?@ zyh+Bnlaf=ZtyvK)nWhIlYxLmo{wsn{FW2%^t|jg3Xn%zE^|Y^}-O7Ow+e}`Z-8PT* zgx~A3oZ9mMDvTOnA)^LZg@FqLBZmHp-Zal_1B-kR!+oMyR!MCO37D=ioJbOtBiW8 z-m9CgshpMdS;=`Sb;VlcA>U<1Eo|?x^)D|eI>LS+`|H_mXH$7B=*T z+@$hYLUF&#nQP(nT@>!gHm0(2stv#19H%+`W%s@=eY?VEN=Iah|FZeso#i`u&Y764a^GNf-BKFZj?w)|z`Bhz0t_~;4z*rg$_u6$(1 zxT|h9d}A%yH=Z&u**8}FW?Kg(KbUA2E&KRw=IZd<&G>MlA8XVH!&i>`6GnY7jyuGE zRk*)9Q}k3koGJdRgMN|wbhSR1va_-#h&g{^;8et)%Of6T&Y6^58u#bKSCcVaKhux1 z@#`vooL?>Mi8+7b7`L78=T-ln3jbyEpVajk_)C^wB>iRjMMi&_e$l}~=_khJMnB2e z+o@OP(oeQNNNIo1_wya}QA&EG>7yk7!}2qvk4!(q=p*qn4(7J&{SNyjjXksaBdvgh4_`!ef<^%VXqe?VsVuY3dlwZ_odYm7N>(d8x{ta(Agn8(F~u@>f= zV8A}G*x($I9cFuljQs^HtU-IY%Hxe9Fat5&M71hOKk)U$**S zwz+6$tg<)Zzsx!AG6U;%a9I0Xs+n6_NM6ECY{S@YF>(~v81t~E{9E&6F7_+sTsJ)6 zVztg7wolDrpJ&)FmFtkHd>ZrDYm9ulwDOrI2LOI`F-tDSt;aBZY2~w<_)*2jld+je zIV|0Y`Ep{rCLd_Wc1~Ykt1uD?fk*XzN_T8S~%3d=TNeLh=sTz7pHBQf1^;C1TT9-!6W2AR16%k3BF7GwUss;*W5HXE{VOQ^V7B&xGoESxOhgs z9e?Qbw=9gxd5Zs1{o6zMF3ay36320g4`b>RrAN$uO6BjkxUOIaeUsAPP}gVRUkI*C z{D`#uk;;Ft_eUCgWy#8UdOmG`r1{=7{gGs^yMpKHDg0JlVP^QPZU1IpchsogRdouy zR){ruimsX0kGVRrjV5+Ut@+Kl`-`rfH;r>rbIQacsC6mHsd>5Rx_JXx2TAHqnfY_7 zmKW!a7TqxKcFsxdDHE@t>Ul|Ca`8>`hE&A!lTFOR9jjMF9^~Bo;!n=|BIi^cD(@q$ z!&cWuZl>nc=HgrCUBr5{Qj1E~u>BJ43u$jF9z8EgyH%4aME*B?mo$2&Q0GwXXn zSCif*O;xOoc9Yg7%~VVRdYW`L>7`96t^p`%G(Sp!(r_oB_Bt6AR*`k6FU z^}RTDY1z)pE-kfcmoJSq|2rY$9oF}9Wj|h@270=B)U`>jO(Lf5+V_;clKAXN_%2s} zy84qbv&sW^eR|iYXMEWk^rw>_70%E$wzTSd*~XP>eGr=;r0RQh!q1%J(FT~YQ?)0K zw2gHHtho?pp37?1^F)8T<9&C$?~eD~*oo+Zb1e>aHuUUS#edoQ$jsAo#ssaHEiu}! zbG;cuwCaD^;)GWHFI$Yzs{dt+5vuxM9mEI4H$R{8UowyWawhe^#<#jn(wDDKzx_^O;&v{zUw06n|kCM;9aXeSP#Fi(+_mgSk(u!(f39TpHBQZ zYObAae(CJHce!4etuLHBA{J!cEeKtLHx^Pa z3|tXBPlePAqh=U#nz~+?dq0r`}5IwMp3SQq$N zp3|G^hh1sIm34dlFdLnm^mod&bA0SxPrVuD^33?dspzBl_f+_?4*EmcwB(%JNC|bq zJk$vrPMt7n&rUXM+S#ig)`5R*U#7<{jd)F zKZ{No`it)M;MF>yXIehWkiKd7C}V%S*VhkYp1xm&4S&V)G>#>&yhB`?`#Z}%n)5$# zJK6p8R6Lw1zN~|Ok^7u8^}}rbf`yr}%(mGrF^Wn9UwqbWdaN&h_(Y`z6iyrs)mNDOLTjg>HVDGl#_-|2pSeO?$6uV%hQr&3q4M{-9M~tP?qe&M~)( zM{}`#E>6cOIfd@`{){-Ni$C-I*~Oo&jF@v|?)W~wmXC|=?;X0q<;xY3?pg=QDcjDNT~FYAUHq9* zf6TT%mH6i8GyW{8{@D0-oY`D*U8Vlmw$rUYMhwV~0Y4WoXLaDAQjBfe>yO#iP;v2P z?HE26UuLUEW{Z6~b1!=nzRX&y^-R) zYEJw-!9H z)f;htNIS@2|nzM)gQC@XBLLV#fPa{L5d%eZCDbXUhrX--_xD-$87f5 z);B5b@40?n(K6;|~qFd{a+588K-f8@WbNzhU{z&t^Y5F56_SF?USewtg z{ltCV*zvFUV#lIcu@7#~jva~giJhF^H+J$@p6d5~eQOW__C_-m}s zhx_vAUF`)Mt&vVWKFIpp)#7ILlpe+m2Nb8_`VzMR^1 zIpN8x|7XO!pZ(9sdH=$G4f_Fa{%q?1e)I39{*mJrj^AVdYxaX^|382FyQwebteqT! z7Q@k#hkSXpqdhsz8gvQu(W+Z~!Pp_yU9Eke)LMV^aN)5%*J`bkj#cgWPqt+XwAFr3R(0US*d2|1J+WOE zX2o_KTCihup4QTQq1O6#U#)e@$F<7R6XiRaG)-^nueCP$a_gIlwbncj{1^IEQ9krt z=6Pge^M&Eo*$ezNO`i<6P7DO}CY~3l^4C0(6VMw6hg-$o?;06y&Hp>Ca>dD|8z;aI zevh_7>b*}GQvP7$V6F9@Lap*>|8Q$@iPkcQICFz9XOe$Gc#T}wbcNRHf93M}MxIm3 zb7~)+e(cjcC*~V+?5P63-Uxr@@h%O;;nt=bc((;PH8I~8j+F-d@oR!P^#^v|7Hj-i z{9WcA=l<7*TfLsa_0cS?rSW50>m2%ZSzv7I{-2DCmGIofd{^V~svVav&}yFLUNZwZ zdg%g>`o?iQ+p|x*PrGQt)VF-YYBkU3H-(;(N13-c+`8=KxQz|GbAbCacmwqZe$+SC zc!SnDitlbX-gZMCvfB7>x5dhMMhuy~d_h6Y?&oif-NQ4kpl{}J{4~!D`U`lbzh)}W z6Z*c({!ngyePF?i9sPth3#!&^@D(0wpkEqp(^?xuwufumy}YOI8_V@k^k<{C=<$m^ z{`$9k{`&pCezks)4NtcIMj)jBE=$|K5*gj8X)XJaoBR*jDu0=+eJvL`?aMp<`N+b@ zH~6L>8=RfT{e`Z1ims!e>kyAuU*^xNS*ZE-QQo}z*B&UIb~*3V_*$u6k{vGl$J`wK z_pG5Dcz@yJA3~#Sp8Ix|Rz2U>UvJa$n|FQKR@wYeVa*(5{a5$Q*)(qUoK0T;$f8U9 z7Z;V^cu7$f`;i>a9CdNgtNDwseD$6yHjSNq#ipI~S3UeujLhGK%n#)~i@A4hRo|K) zDSVprER_g-79r- zcWOhMU*Wrcj_ivZ+>HFon!HEhS&^LsqrbIFe_LhshgCb)`n0C7 z(!Im#2O%%N^A1z8aydK`Lsn{$lbhhJA0R7szSj+&f1mpazb~&ETr)C=%q}$eJU`Co zyM)ITuMe%=e_6Oy_`OBw$a~rO{vgj?)>qp;;eul~i`7a_)B! zS-Wy!eoa|!o}NehmHByk=|X=^9lZTcZf<=L9(oPAyV}#QUiiDzA&3dDJm>*QO7Hyg%kt;^^a;r)^D{yupBEp)K(ew`;vUl}OSH^BQ7 z;r(C2`^ZM^wj*toRR-_(;oW~?@qSMI24t}j{vS0Qy<$9Lh{vPP%F3x}97|vE?2oWl z8;}8!`3Mh?ay2iS<}Yeqqp=@; z>_-!Pcn|(a+-80T`&WS-j9?F^Vi%`iGbhIyM^2iHy;OGd!Udjb4frm8PmbPzom2V~ z`)1m-2JDP~!G$%A@T}O7M(mlt>O$<9UzcaiY8$hrs;}1aA$&9U1h$0tsq*=i3@$@H zmA~W3Y0?-ki7hA&XkWX6|G$Z>9_6_r+YPxF);Ay@ABde0Imf>+ZL<1aCFetHqdtFg z6LXM1P;#Erv1`z8JT`-OQm&Co||)&XRCQdM3}F22_5$nwNM zo<14-KZm`S{l3~= zDbW8pr%3;)R-iwYQ-IAZYL@e_E}EKP&))H6)!yeR()W`;VcN5ZuSkE7<8=1y5bNaq z6?^uEVb4DD4XJJO4r!i&Jime7ypLy#k7~DPlYIsHBSYf$Y^g`vEa#o}Y-zT(`LEbB z`jLJpvgG)K==hlMkjdZ2v17N1d?O3d;aZEnudP!2d-FJ#%bBQr{H*;9!rYPtfve zM&S>B`<}Ei z^o)|B%QuajefcKcW($4T!knt?noT)5dXtu;*Rn763~S!AWTLWz;**$m@Pw~V?H|$q zQj^(~n+ji!egVhn-Oa_!M?m{JFgxaM*ooii4U}F8>&?vo--zP0(m-$ofth*YqzxG;e?maw1Uzk&K2mbl|V2=J2^j2{P zXt|bWG-Jcn{TPR7$iIj8k#W`@!~fii54De2!&^LmKk`w6AG&aXc1XU@d=D9ejK+_v zho5rkk38=2nAji2F57bQ^*0zBPR%XQBaFK$u*27|_2s?hK=(HSLH!!W_951iu41hJ zG~>jz9_?#o_}2Iq|8Xb3c{M**4`+qT?#%V*BSQG@{7!V21}_(DdCj}{R^LMdYwkX> zaL345cW)ZUmTp`ld^}>-#hb>m$@7}{?LJ?K@9S5KtnJwPz=Fqr>*=HKbBrzi!PiIM z>i6ltG5o4u<5$TwPJa3|ewE1BQos0BIn6SzeiOaBd|`k3AfV632Y<*rOrK5srHc5( zVa7EF-^CC14r!XfH=BJDpg+vM*yZbAn{j_YvmEHZ68h^4`qxaN-&aEaZ8>@Rz38&3 z(Eeiv{l#{RzwoZ=2d#Rvr+@Pn=noGm`iHpJHPC+sdRK$~U(4;Mm!MDNcS3)O$rLjt z68d`<4y+j=^uOn>OX`objM(IxHDZ%IPw2mg7}H+#A%4S-rO^M&j2G59=zq5_ zOJC~u=#LomUkd%@8YlgiLVxiS%AtRi*59JP@TwnwR($8(B0q*-Tk7#xK5e@{`)TBS z0`h(va(^rGKOTQ}9RBQB{MmRMXehEJGMvNxmMt8L{QC6`?04T6j?E`NHvxY&ihLax zzld*)`aG(CM;GJ6;{Q&;SGt`(I*e?eK(>zEHzp=Bv0`DMWYmA?SFFu3LUBuW!_@bKehVX^x(RRKV+FXK;$&@ed61g<^LY~vk z7Xgbe9!bF$OFPFG+l4RMJig{T4Y^c&0bQgIKK#R&SUX<~vG^jFIlMypTl!0M_OgYA z^pRhGihk;kKkBEii|MCh^mQGwTvc1pzBG2^KE?*%&fBHU?alWZv{m^+f&zcP{ zG^u{d(Q|l*$;fXRoAgg9V*#)})hh$YqNlLAg#IbucR~8+YEMwt`h?5A>kI0K7!%`j zss4F`{Z3E3f3`0eSX1)B!X0~O&)Af8&y2KkEB&))cF88+JtdpuInqD77(eaid%VbP zIdVIa_|)AFxn1J(>gE2#@u*zmlv^2(4ktdKiQe>htp4drymc0K%8j==;;s3oA8!?U zy7AU-inlWFV8&Y?zH#IkjJIB+;;k=9td*GSqr_TIE8fccr;4{qtW{)N;<3FZ-U`om zCEjZ1dpF+Njqz583@pLM^nAQEJ^!Cpyp?ZA6>oLQfg5j)$6HOC+#~T;4|tNtrPK9^?m0=yfp&q>Hzv;fu2uZxwmYJl?AM?5xIHQ}j=#$Vhc+UJPXuh+tx)j8(hB(H8g z)5|i~DY*8XTrC zXHG@tLQgP1TE?78Bfnqk3#fcAvmX=ZNQW^GmSG>#cQRi+hrW|Juw3#PpJfg#uWDG$ zWaejO4s0HM7op#-W>a(3%!A2%=Np~@`n$}v$Q;!S`mGX3(x!CJ#UTo_xYi^YMp!PYiRCA+~&;=9G37R#>LxhYUCKp2G1|ggN1w(6IX1?@y%$LUH+pmAY zH$>mfv+eVytLPtd-qbGhb;$dKsv$K$CkJU1{L?`0{4|k&+dOKVpK_R|3ow_OW6h(Q zyrX#3%z3cvy2(E>k19GKVIK7b8DmHup8Qs9dk%V_`P#8D(KT~qo*mwq!gpWA_AYrD zBJ=31>URVEb(wQp3GdXwJENI(J(1$i2+5KA-Q`cadimq|an-RWL8o zXVTXp@@jW_1}nL*4i=#6gZjt|((CCF;*S+*WyMCHwU7fQ*q`0#{t*vWJ^oCk#q{j%EG@ zv6JNP?!|r|Cht=)P*-Su>qlYJ0}J}p{M;YV{R_M`EB)jFF3?_(*pGiP{f=(EoSeFe z*+&=yA;A8*(aN)5PBeBmRttEGaR#_(a z7hh3*6M1`0zJc|EZ87tuPM)ouZaK)#Bz_+tM@4cTL-f~IPqf|e9N)UqqtzEcL&>L_ zo13qTZ*hSCxAQ))F9_8yJ*s2ZV8?sf--hbhD2o0sbe7x&naR> zpg+E_V9c5rG<|@a<3{pOn8G`aL@8P59?1K4~d_ ziRhhHY;qlI^NscmP2m(x#yhP5x1=yk?4KnM{eUm8{$TEq`ai%!E&nzlcGV|BHHU(s z`WAx+BxidLzRWIU^bPW`+mO+Bk?n)W_!hoPctE3%c7MBg+L+gd>m~5OZgNES;5P~n zi0@i1_DCC|cwldVrq3iVYi0*LAol1kc%ThA6&~<$|9$Y?TiBy@4j!=kGV2{YVE1Lj z4z1@smcj#)&m%Y<;Q`(q4BuzbVV^;lRiM)%=(ef&C{yrJCeKCZDIcW~{n8kuztJzd zf1<@CH@7sue|;lkiW|9y=$iuJqfd^F2_GGOetb;)lY`hFyWAe&8$>4^!^Zwn@|OOq ztumkfDJI`b@Ij5(!qNqWHII=iESNd@U-)P?GA#UN>Y{gk?u*q0waN$4KeM^!HOX~` z2WNW<>-&T6sa+7L`MvO|{El4f7`c6tquhv&oDFXXUgC%^zkW7yFSs|+ZH-?nkCl=; zDLO82ZJ1eD7WGsQ5X;qwRh-`K0j0 zh*S98cad8!W2Kqo(wC!O#*&LPhkSbB*Jt^?=tS|IL=WEG0l$8^1AbMy6JC|^TbswL z_*L>kg+69q$h#|kd;VLo1L!8;S<4U;g9cVfL}gigjl|F!-aKRQtI;*Y=|(ht9@ z8VX+scF?VSF zT6CGn;}rNr_*Zn9_-WsTPo976W6D?iE_{;grwvtna!m$)n%D5t)*8A@@gcsN_-R=V z-mv>;*$&>Y`)7(@;SKTg*LwO{x@-=zFMf^aoxLBm4Q71R9kChZ`}XSCOh4z`i0R9x zjLoR;QR6vesaMBl95KFE;iXQ+X88Sj!$(?&kF*b;-yYXnZ}>>-&|zmiHbWn_&!?O+ zHnUpB)~XM&rK00|MQp~=UrXq(PQ_;Uz1^Q#=jboHKjX$`9PwFG2V@qTQT)>@V>1pO z5?^>kXJRw_e!L?#W0(2y=R#~obb*?S$tX6X`tq#CW*mJs!m#-n$7cAw-S$gt#@=Uk z+kXaPGir^DzuDM~p+`<5He=E4yot>?`1%?6`k72(GyLv1j@ZnX{g)JtV~$DYf_`(Z z#AXbig)%Lh#_D%Jd~;sJW^8;SJmNL{v{S}r_?_K9lh}-% zH|+jd#<3aJ3rM$CKoj#FvzWgbU81$@{f-uU_7=&38M`Bw_-8Zo<41^vPs+>DCo^xj zl655xFfTTRxyI+@f9A`k=4;ij%m2itBgE+5<^PI-TJ>j!5R1QvxkmD2Jj|_q<-kKb z`uoF=U&Zmg?>w~Q0P*c_vhGCnH?`FzKfd)O`xl1y*>Z~)U;Orm{53MKb%6NvR)3x@ zv2^C0TMpi+RSIr>qF<}5F38pokashZd8Px*r(7p{=EOcFd*X&Sv;QRXnxDuC={K=n z%8faLby-VpML;9ZB&YWNlWi}p)v}u>GPfsjvU|0D`YKO&<2Fs(xH3Du@%1ci<8xl4 z&2O~jX&YY$PpqyB8EpgDn{9%HEfwa$;{|ChXe^CA}jQ{^_`F~ja{|ESAXjRYpZ!N@?g+7jq%v^nT2b4O9l(<1bh?~(74@00J8@0IVC?-x4Anv8qE7T?P{uB>f&@;NU& z!*@K_hrQwx_I$(Zec3A>VK475$BIAL^X~V`vEmK(y!T`|R(!#pcfL}N6;H6|ee>iP zet0Fc=$6-W`M*Nj_FnkB@42@e&tNadcXMw!p2c2{@8#Zd{8jdH{0;6c z#}BZVmFJjyI~#mb#hR!OlRI!G_+`81t(ND7C7*Gj)+4;LUGr5} zu|C#FY=AstbWZp1(f0q(-MhzERb6}EYwsPBon+@G0RmD;2vtH7fl9e3$_{W8k|4CE z=ka*_9H8*DN`hjsJ+z_(<+Pfe0IAq&@N)n!=eR?nP@$3@c}+lR&4o*`YFpn1#EK;0 zCa9#v@_v7Fua#`J;bKeAd7k%={TXwvz1CcFjycAd<2T2gb6HWQ+}V7abV!`_SIN{~ zt5@aH0oIAhoUu3Y3aitUa|!ab*NU!O%DRtboOd&%%&Hy2Ik7#-+Dio{}P>s<#lXsbwJoY`}G0XaR$7Z{%RE?F>HvL;wJh8W$jb&9U5`BRd;_%sw<;9HC z%}O(tE8Rpoi?Lkii@rhJYB1|viiznZvff2y&R|W<9-h1Vd)D##=WhCJ1><%@ulgH~ zGBy-4Hf&<-+-q5Nn;1LyMp<>0%6p;8`>eWql~0B$pK8^OQa%l;{6LQY>s9mH7`wVXRr&aaps=TtH#x^=Ev zrOy2wtZ@ov;LuRBXwS4_ORoS)*>tY4|*%$5^*#IB#iC&@_mj)|1ha^y51c|_hMkB>?o zy{wOhcfK=u`=&GQ5?8G(K^9Aq#d2hE8M4?x8wMbY_fUu3)cGFju$wxMP@X!EP@X#9 zgY4|4&i5cYyQ%XC<*D-s<*D;Z)>s_l+_7TLD9PfdHk6|$texB2O5Iz~)3xwNXG$F- zu3uyKVdQ%nqDpVj|3~1j>bA~njnQ*)tX(LfPR-P@m^w94$87Qhw(GLUzd;>$`mM_G zv?YOe?MzJyh*u4tB#r$l*DBzH{vSnp0X$d-4gDOA4s%ygboK4`oGwkou2e9$TS2nuT zbQ^U?m)^KTeeSAMee+HvaW*7pX;H`7b{$PS%dM@ZowWNo{8iSDj~UFlZSBb2qxkj0 z9$EB%ATcgbgO98A7Lt*yO@XR-d|jvCeUxq51RqVCD6f#d%i{i>>7g@!IFD=ZQu<7M zoF%=ZuR2(-*R3(Au1y&sK8IRgxU2UCaKIH0D;G`GDBE@~*a|vAfB0-zDR$ z_{rzutV4e#&JwcUXSve5@*xeIF{&*mEP{P)IH!cq>d62|%>xXQsG|0m)qA6UrPn{)Vk zaa`qK#S7u8Z!(SwSCv@UB6PbX3W{#mxS`m$%Z0Igo{+6Gn99fpd1OPFt2N!~6wa!N zK)1zmU@@+l;00k{(Z_*{zItp9h9g1aj5dAC{Ov*Q0329b@i6FwnR89-Bnj2 zoHZ0YrhFJK^C=H5GdK)fmZLnlEXT%W^;QU%6=rk&PM#kDEwM2f7|mcZFq*+^U^Ii- zz-aZBueKhHX7JfY8=r0F|7&?3yp}T-3SO(X5^L+hYQknY*K_T32*X8Ux15Rmcg9sW z2)kbX*8i+5N)Fe8s}3gPmm!z`-7+a$r}w9`u3VU_S@QIEluu#2wP3tw@J$?yS1wt* zjIt`cr+1VxKb7vb{${xq_FD_~^Kj;@u%F8OqB1NTs5Q3-@Kw?uvTD=8ffti!VZuZk z6Z&jS$iA9>@+aN{6FvkcJcwUEq_21E7#kB7fC*zcH&~ePs85)XHNUp6-K;deu7d-M z!GsPz6ef&E289V#<~=qhyi^QmFkdp53I9_V&|tzesPY5BUig|9!hi(*oM>=ivW*MrdxIP4dxIOPdpJ%MMif>I$BFOtfD=2A z-JWn_lZ_J>*f{a8#f8FwBjMp5>Nygg?xCI+!iBlux=37jAuQVJwK|1Ig%Ll1x2k7p z>=(m`elSo%7)E>#j5vX7(~&V@#BrowK(>V8YtgfoD67uFi51H8{#^cVp>J}Pr*9T0 zPn#DgPn(4kcVLSpXBF_H1sN-cA7-AKyqTv~9-TD!aU4|n2^Ria2ruS=7uSLpbI>{Y zPX;R%S#@`jf8#c21a^G-coN?Xu~K`)$r1Q*yC;Mn1K`KQ;1>r!wiCCC#E(tjH6IvK z*wO5Vq^);14Bo2wio0FZi~T40*h0x)IoMJ0I$}ay*zpK>aRJ!z6Z-3mV8-{rj5B%W z1NKe|Gd_JdX{_d5E`%A|!(($XRUaDp#5}{!1LqmI9(g#%ua7ySDdcJ#OdPO6>F>Jo=*0LRKs)C z35F)F3oNDE8NN3L`x{ndO!O!20Q zOuolOyeY?%{JD5jp>xfP{L{qhO*_5a52hZ9D*x^P-c`YSR!nF;!!z>vEqrqM{82VW@Y=B(AJ{?vC)*ez z)m2xnd>T~u4|LVN4Zdhzh5z=M6=YvU=i2_1DZJpf@xTM^oXAH%5na4g+%l9~>YeyJkbS6ZO$JK+b+1Jj|HT&8*x@KQHN7w9Y z>EXI&Upq(F>}%)fntd%o*D{z7oM^{(X%8`67iXBei0QhB?YfBZx`_3v&rcTxwkLR^yS&UfqYlrfMW33?{Kb!^?6-8%Y5}SjLXC0{dJ9gt8RD_ zvZ8j zE00d*E00djS00_5uY80~s*F%QqAFLIdUz_!RS$Hp(Q?%dNB0_|Ty^=%d!fqvTy^u6 zPlhU=>Z&VK9$hU{-qzJdbafWGItv||)gON%4V=U`8;7HFjeg2T=NhL_rt(vv$`?|m z@-v~z7g472=xmwt=xn3%v!Tk*3+ZelI@^rSN?*qG;-!Eki1v6T0rdyUI^ zkMfT|l`rQ#%CCYdznb?bkM5Q!kM1@qzYePWv)qU7Ha-tUcN=R$x+^?(qv2n#$_a)> zS6&Czcq_m8Va4d!2exB?cP!t`OnsI8%C#E<;IV`?^TB2MZx?5FCsmk=;Six8UNNRk3Ozfege{wkn zvAP4`pJMzzVYF7ib-LB>I^9|ijpWB`QCY2i5C6GOw_ZY9>r+c__@eEBho33lp#BsN zOUDNip3FtY4bGyDCjLg>-a_9gUUv(9r+D28<>}iM%G0+78_>5eC{J62!-T)`<^S?i z?9RbqS0g*svQ>6G>X|6-*za)7hpaXCz!%{LeQz$8{%=VKU(o+88_(sBVo7-^R*}I4 zej67c6VvGPH;{>G^!Xde#9hd8IbO3+FGi@RYvGsJ;2(ZPuo;s<==LZlhwaKFV_5pTqk!PhS`ogDV!_!BMloQAc?HQrgM5 zVq$RZrk&PjmmGh?7ZG+k2sV84BG@T%U1N6Mx$(9F>ZN%tecQGW+Zs?B>@*$Qx0%faMm>nI|(;k3w|=#32gN-*h#oanCV}6Mr#?{&05CzZj3Y` z6mt_s(mKA=iW9+Ct+l)HRRk}!*&FmYv{VUHk5d= zb8VP?Erca~%8h^_}7IqF|)! zM8QbcLnm<7$c?P~939s?kZH#!<2ggZSaMLHSb9TzZ}O$w*AKd!|CN6Ps(d-TQ+^dx`PF(3 z3{_e-q4f^lfj`z%&$IYrO|S8c^1p&A-^?@0?|~}+CVWx;9jNl}QkE4{8;`$5OujB2 zf2%3VRhO+i{#my2_-6&mg`9e6xV^v!Tk*bJaB|e$;Dql|7%Hxj}D33o@tUUf$vGVw10p-_0m4BA9m46bDZdA*{F}T>`FEhoze`!l?}sY?KJQZgL#Xl{ zT)z%_9C{=46wiURnohgw(!f>1Tj5x%80^(_##JZGwc5s74$jgV+KTtOahBH5#uIb> zyuRAkytdx>kQe8^Wcykw)VX$!zS_^G+xHE%&{zA~F#B4L^wqvL(!Mqt%DTs%^fi19 zt=2}azR-GM^tth%*4p+3cYPjft+uh2a8^7qxQpqmu+%}u!%Nj&-S1$pD&woWDxZM< zEjle6wF4}6IdnT5lYPh3&r`Wj^^?w8mtW_F%jSa1;-#zpzpw5KljRXBiyv8X8?W0BSHAbD0iC%e>M%Hp7mY)eF?3I9}aGZO>yL-Dm0H#2dusZiw$c@AG( zYbPs}FXB1nZ-Xj-2iNDYhARS-&9O1r92=93a&zxO?j^7FlP1=-7^?h|?)8&S%uH)2 zwU#mxlU3N5ti)bFDNGj0zt$zLU~81sc`?4UF7broUzgbS)*Rw&wBN)5XG2x@dGLw0 zYOUzK%9n)r@ftQu{VRONy1(<{v%Bd_;j_Ex%P)e@ihICk*~n5jKFjRo9eXd=Onk?( z<2%Tg!yn>eAR-O~d;)0$7!uN+L^toN+w0iT7xQ?j-jzB(~H2b)dr0h`@r zW3#*HZ(%dxGW^gYvo4Z85{6TGZ&04Fny?vtWUyJW!DexR4F;RV2MP=}3!PKttdDf! zTPnAwSeELf7}hLsn9d-QY%C!DrPv=>vQ9CvkC5FD6+D`HEXMxyUBy5>VGKX zz={SayX9RFBxh!%T~nZ|$)@OA?6hy?uPyID85 z+v2=Qhhjg1}|5;eZ&uZ#1VYN6MV!KeE7V+K!(Ly zW$M}luX~CkRJcR-wjvtZL;Rq{!}U_Dt|bP#92yUO1eyemh!a%k-g@Hyy7x7x?)?>1 z_clX&L-#=YK_lV=3-l~L=K?(&1=X`&sGjvf^=vX!&!$4Jgr-3ULkEUpb}hsKUL_9j zDs%I%!t3cO6F+mh%A5jKnNy)EvkXF^qG5maT~2347NKr^AUp;trah35ZTnE&6! z{C}(7p7aYk3WdytVuaioCTnLy@0 z2iekn4nBIt_{pxyUqGATr{cuT@KbT(X85T%aWnjkj1w(+gFZKB&q}hN{d|DE+E9 zvWZFCG8q|1*8VNUrV3TIpO~x4o&r_bQ=uxm5UR3gLREGVRAnQZCjO0V7G{#44ZRvV z@BHy(?fYtAT+h+_N{RXEeaoSSr%=7G9IE%Ng6e&%p?V)OnuClguAHOyt%K@)&qA}I z&qGH*Yr11o`KBLxRZcZDy7DHd#@+Y9JbAbZLV;bT1u_PNT&KJv?AIn!5un*Pt_ zoZ(H(sXxk?t#yJMlxD0}I!IbEsd(n}%k42+X8DoGWek3i=aPvTuyUg&m_QCN85mjbb#y0=10S~eWsJ&YU*oIzF#+uV5ysXGMhW?}O8Tya9n|L2}H}O8|t{BWR-9z0>jF7sSm>+dBF+b{NVtCZe#PFz_ zVpgNrFQV@clwp_D*OCVx_{gH~)Q=(T!hV1Jh-YKGV}D596VQ#UNt~;R+~nf_w9uAZ z{GXO6DE?227mEMW(>gX2cS_@b;h2HKA;_qUv+(8L=BrQ7pf zo6qIbS}@T8Fp=@y{mk3KFEdw+-=(?YEZ(cw)im}$MaHh4C3bZS?^m7F-g!0_(tgcY z_G?PliiyMAC4B<_1n_O-<4YH*zwx28FDilkaPoa#;M#QU1&h$9INqUkj9Ry-_>+Tg zYQykN3p^J8o6w`_*y?a>BV1$RQ1IErw|w9#`rhoHqrKWc7m25YJ?gE}8zS#h{OTC% z-~L~_|JS*!F?w$SW6^Ye4hAxO3a8XC4=?5?o&e#Jek_tSo{LinOs zm6v*j$ENb(893AI53At)v#5(O5%SO#Q*&ZkZqp`rYyCD%48up@pJO&QDzy7v=WrA{_(&MY z!AD@9LTsdbY@M-D2u@l>S{UhTU?lAk`w{tb_K30GPuNKDs<93ZGHd#}&)%4ReMtWJ z@IX-qF&N>V$8FqW@DI4h;2&_0!9T>iLii^+@C4s+c{nz7{)U5rifjy|J(SalN7Z2S zPk2(RE5Ja~#|kizbi4u#Bpt5+14+j#WCN1*KNQ)kkPSe`D`W%E@d{*9v1YTjbtu%H z%czhI$RV%!i;++;P|Ij27>MV$Nb~Q)K=t&SFwl2}U2F_waF30F4DPWp zkik7R1~RzE#y|%5*cfQB%CRw!aFv6BoY+(Xa<+w7Ku`EbF$As0axjqU7>o64uNJsuY!IVIvkn> zy#|^K{R(sxbPV*X(CeV%p*KQrz?TC9ZD9_7t&M@scW#I|PlLJ&1JzLPX3O5=MgK>} z<(lOu`K^%eIRz>|X)08HQlah-#pTL%UnzAk{uEUAJp$Ez?4i;-mP7T9N1%EKzNpXe4r+J@)jRM- zeYP*^(>vCY*E^nt>K*u^KD`59)ORg(BXlfub0{un_WIT1n}eUU=CK6)q&1Hv;3u=@ z5&UG3^qSj+A2oIB7vc4!YcOyAx*!rAy6FM`7g!f;qE-mk=$&>XstF(;Yv*Ez$&IlID%zm$hzt_0#(Yw?qWxl-K3J1k;1(NJu)t}nos zPl+S%<$5C22W6a5{3?ZUM)9i@#u>%0QW$3xze>S3O=eAqi}Hj?T$I-^6iPV_!=RL- zb%id#RT(p(Dx(OhGH!#aj61^0 zn9VhnF%POT?u4q05~#{p2vr&PK~=_LsLEIpDq}c2WvzR#)n|2@cpmLC@w}l>d=l-` za^iC)4u#*My=$ZJsTG4Wu`B953cp1$I1{6z-lOceiiyMN8R}``fYj5(^{A(b>rqb= zr=y-GPDec>V{Xadu@m62hp}yv1Ia}Kc&wB@R9{wWy;y33!DE?T@7RZElkiw9cr1YY zIt9eypjs=nbsGI$8nU|6CNuM!(-FovG^}M7Kz1l zUHBvtlL?=+5;uy(T#?vJd(95m{Vn{Z^1dkkIyWBGt82fNVmBAsZD<0(mmqmfkdV$lT!D}(ZqrAaG5qqlw z*M;y_+ygOd^c}EveEs?0q&DzW2fqFheEc%y!Bofa@eknJzmHF^cvA;<`xakJARcUW zocul7PsN(R;l!TWz*g6SAMQh6KE}??U=P)PuqgES0nV(^d51sYf5o2KBKA=wjFC*m zw9iK$U*w)Y@O`Wosh7@Jd{=4erS$KWrhZEQPHF0=^lz1>o=U%?H1$>bZPI34AY<+O zo|x)Nd+(GFELJwyD%$<9z7v^J-3AtGL>Jq@VvXox8(0kdR@Vj=Yec`>(EmpCyAAzs zOx6ET^t%oHZ$!V_=-#8zhG-g8KN#oT}c%nTF`S3)081mss zppEO?6bisgCM5)lvPXI;!7PNA;WPsD4u&)o-e!`b~9Izp0Mu zH`P)7raG$M8aSJK%)rddCvkd9OKNZobNW!rs&J6jbG|gR0zTp(^)zsLHK@s@#oGmAe_La<@WN?kkjg zru%Hde3iA|3hi}yAF8rGgsQ9#sLDDHRavK?D(f^nS_6< z{Osn83YDuf3Eztiok{pbWob9(QcSeZBs`m9b((TGlkj44wVSgjCfa8bzW1nA`<|=R zoJkldW5O~;2E({2zYhHXTrv}?F;U-!S`U^v!JO}V_?8)*K@o~6#k2=E{-`RBdG7X& z|5l~{17MjFZ1`~eB<<6fPddwvDVaHNd!JLh&LgC(>5Qj$b8i9Tsm>OI9-qT_+K!zc zj-8j?TgiB{+sk=wj5oV|P{te0Td!oi(Y*Ca#vAR^Sjl*!eHsm%e>Ow+jAZYI?imf$ zJ>#IdXM*fblsWTkgs$JloVc#vA-mCire66nt1^lBmFC>F-@}|Cha47zqjoXh+{*aE z{JYr;QN#FB&3Usc87p^N)P=E9bM9ZX?n%6>oOi9FoIUAQ-73o2GZYFB_6&oBUj zW^+wt%!8_oJE1D01gbI?LRH3nP?fP5s(LI5l_6jCMQoJ%RDCXNQjc%E6yH!jqJvH5 zc)Vj*(dX6Z_@jBq53-SsZ_)%mv++%u;Ab|zNfWY=jc?L~EEsH%45fZesZi?Ilm?}K zO#>Zl5*JzvW1m%cAW;|~B)h@@ZOHC$&ZwJa$7qU8j3$QlY(J_hMmDv6EuZyk!T<$e zfC{b=i`b)BgkloQ%$bET)ycNJUPB$U-sc+Xp!GftoFBLX|92OCo8?Mm?@(gb+B7iJ z-dB;MBh2;4hi{gjLOrjc|Fwqb8rr2bM3H6QqW@|4E&88!-=cTY?pyRu+O0ih@5A#B zcX~tjsXQa!g1xAf!gm?i+F7#aoIg06d*oYWanBof zsP95?m-XO)IO>=W&;9URbsSC|JNuuUcE{R% zS3o~y(bqfE`Ti?DdOrQ5Jx|k_4?K$>=_9VxNFPPUxZ1%$-Rte9f=_M+r`!Zy`3G>z z6yiyfgJ&n(@gzG|xSq2l{)ImJ06eMq(wz3Bv3tm0a(sz%-V5^1jV~40^7=*gL+wJo zk6`C#fcu>NP$gmep-zxK0bcqp`uG!M_(J=kj@bL5=o_;i%Isr;kJ{^G_BVlby7oPR zb(9X@4`udAfpv7ZL$P-Hy2FBBQi z9sotgvj;(uakK9UAAXXJdtRn*H`)81M%(+IZnXD3O$gifbe%bmF`2XOyZ1dch3$Jv zKvuQSB+?($+Pa=%O9?gxDnV}JumNr`P=K~IGanvsp~ti@&D!Un8{ zKeKEMq<#6?pRaxT+OMy@B-&G=^>?fp2{gk$)m!6%>a0FhozQmKO zeX2UEPgQ62sp_mgRh`wRsE=xxx!&^w^ukEYpBuuRiDC^(|&PAK@JsRRl(Xj%vb8#LVq1sgOihJp>6mdID} z1_OG>D(azktcL0xPeApKr=WVrI;h_9EL87!9;$cLK=qD|P`zU_RPWdd)jM91-xI=1 z#VYq5=HXQCyHJ(8AF6WShpOBUp(?ins&bD*RqiRM$~_HLxn~qZ^VSxqjCA&5sf?je zl`#yeGQd#bZqbJ$D~e&w;73_1u!~ zvjpQ!+wHw6o1wZ7d=;A9JDhU_|5lmRIfCYVjIeVAzo`7`96{}`3O`5iVzMk8 z_az(0U1@M!N_Bsz#`lBFONL@n3Dxg|=?-$f{Bgz-&3EezL9Go8?TbpNegPb(^8}r> zflW%=IPMMk$?z(`+6b!SGDa%~rg+WUCun3g*2rvl3}=HK z!;wsvBZpc;r8RbcO$=v&9m5eWi;Uq!#%{!qHt=j@>_+(4BR$}pK*fv}(g!+=a3Ou5 zvk3Rwye+lkI5}XANPKa=HFO81#7z}dS+QVL z;fM@)2mV>Bc@8_4qcihv;rUCA<<#&F#Q|%OKg9uSXuD#IHMCu^#Tw*KvBetXPqD=s zUiPs6`#1!Lxct(Sr^ zoj74O_sdVq=6-YDJ@>zXzBqU@^GMRzYWl>986G^9G-e)o=bZS=j-;_qlfMv7GvDIE zM(SH!#r=fQChd!RAj+Kg7jR-yto>U79;yTfC7qe{7;_2cERGWFCvzk}ZbWVs!p023PX^g%i_z zTb!HVuD+RdmTj#0JIOhJZR~YAB}yFT6H($YhoD=CQT2)I6L^_=e#(3LarVMi>Y29G zyCy9zEl@{&$CK_K*PpXQqT0Xvv&XAe|Lmt#`+oLB)uD|~R%vW#2b+CLJm(YQb6UH+ z^;`bnFYe9@KJ^2CaAjg%aE{yG`OU<<-1%{N!FJ2v`N6=v-1k!_IT}_M z%`*^d2r`G$sX2}9zUbVY17mWXxx?2|<8z%cNOh=ZZFDPgp*H89I=fl#na}&u-QF?P z*EBr8;>L#Oop!%xzo&5=ZS}f>O%u*->mp+Fhx}Hh+AJKo?nKO(YU&97>AaiYF`jSC zp>7jgLCNXA@!sZR+Mnd_{4T#w_+Rrf9&CyDEdFY~Wf^)bK3|OvE7lwXm7J)pZ@y^- z-=eL0?jZMtw^?=g&m(6?%Z}-NH0Z|ZOuA0rr}`L(F_N10dC z-0+5t%d2Fc&0H`tto;RME*Kv&TYJZuSLE2C-!xAI_sF#<~PF5E}e#sN!LT? z?nD`ztocLnBy8Tvv@wqIvGtu`+Rg&{!kK$o9ya%Mg!e0ksdWUJXX=2e4VQY}4)adu zjh}bfDb^~3le_H{cBjiuVP`7B?3CM^{Rt^Ku=VG`DYF z?l-TPlsiAs-zgnEK1I4SDYt$0k&%+=d^wZ3p)0b?g&!@7wTOxBVXb z#D0)%uN9%U*r;D(qu#|vq34~#^zSUO>SQY}#a7kYwrT@$1;fS?M(6IC60%*}ljE>s-L^}6O@3$Ft;CZdyVb_K*V%S!rOhMR zF1^p;OXCFFcJ;uI)z~lb#Ia-IjckZ@yxWec{;IcASLyF!>XCEwY|$3}`w72)<#(EU zzH8&yG}$rvN=~e6Fg7jTv1!CQ9GfN^qxirz z*sz)CyT6zJGh@f_3G3NU;n=e9_&~TVtFUcZe1t8V;gm&R$iLLNVtSWl+p_}rQ|{O= z?tNpJ6?E*@*KC^=PhVe(%?j;B56z8-+ouHTCc7jXrTOu2d)95EE<7iGp*`o0joQ!t za`SD|aDOT@%ucbt!ttl@rB;=OodGBRN_NX~{3?7eW48jXp!TpCe+rv-Gd9deKgp*$ zay4V9tJ|-7)n)rt*eKx;$FF(`f9a&RcVII%do}j&V{d9;Gxk#HPn5=AQuMGRwarJTm#iOaUZ_aOFw(E>Nh{Ds`}(-YpU9)%d8Y+i+T0a*v3!k z;|%KgBzEkf#7Vhpuvbg)t>*QelzR{9XQKVVYJ97v>-@pJWAcK(#i!bVPxU)|s)li% z;C4T@YYKL1D)y_;+SWLwSMc`}qJ#bAOO1{R4#$^r{3!WUZ}F2a^(nqoS`6Pni($_K za&s0RrvqQAlkv3}*({6ZTLsi7Kpj+<1<)7x?EwQs?#*h%ZYZW1e$L`{pEvil)Yx3@ zO=}t*9o&W8RGqZ%?DxpF+HK)m>AfxZ^#|e0WS4hL8UEFR>l&Va@ur678{!(CZyX)6 ze@)}u!M$EjP`dse{Wkfdm@#G8lwy9h{C4q^{b?B&@}sn;E&KS{BI&Vwr^ji}U|;B) zernffY>V2Zcu`Mna%@7L*8(C#-aYjk+qg+;ziT~u%RRiW3Ns zK#5!8XO-SS9JI4R`^9L3%GX*&`B}n~#-}niY1#9#E%Ui1+Z08by>_e8sayT>Qgg21 zfpc(fR99S2=NiW2YxUq?t&$IUDgKr0muy!v^_CrLhswWFy4!wTc#qiG9WHz<7qLD! zF+UHnzbImWy;!3d9dzv1!5Hvb-)eV~cXZc>jqliiwK}yhPcHusOYnAvw0FPh7Vx>PuR-=%?75Cx|^g$ys|< zaVf_3oZ9%)szunJ|FrGTVDQ`q?9QhiPqqA~;nd{`^rkc}FLxO_@#94QXWu73Z%|(D z{~j(S3jS`PD_EayZQDK+`-30#OZ=$c zFZKl4t7>f0>)527*d*2_Y-4@&w%s{a=dTN+7szjVvnXaklCe!mV7w$SUJ~E(<*b+U z;Jjwrcam)~eoR=M!|UtRRdw8r51{qR_1>u5*9P_i?@>4G((7rl_)u}VT4Ve!@+@9l zW8*yGO5r@`9pAJ0@#*Y_=j-M-JkJ`F;M?;)-){Ly@{!)TGi0NL^HjzneuwaTHbZw8 zTESn=bOi^)_jWMe<6P_VhiH%5Az!Eg{ZW6Y59AN2O^!eG8@o+Z!?4|B zq|sU0>!Q8x+*Ti02jA>lv^melb{DfdvN`SWBofaH<5~va zY-c{)oHKn0+i@%YkJ|F4ZEJQ~tE=9m?=`2Sv}0?gGsi3Y@i_M5C)kgl@{RTU2Np`{S-Owr2CsR#&}+o%t9)C+(#*X6~qo z75_jPD#rY`bKtTX%{RVkHjx-Xh&Zc z{z!z6D4$Gz$x_Nv{T_fi_CRwzT1zFnkk35yG{uOp0UN+V@>ArW6f>SVYpG-xN{~a% z_hhq%D$);;3|`C@sE_4KG|}JE@gc7I8z%GH?z5NzV!jAn4EI4~FFL5RY|w1m4{_{( z{Er2ckn(fPxfFs^_M-+UiAvz?cgTaY{&m_ z))bwZF$KM!jQ&r;4&-4QUVpycGwZDj+J)55+XdmHaJwLTp!GlZ&=;};Uqk<82gLt< zT{hscs(oEH;IS?naD+W+w_*eC!v=g88}L2k_Fv_D@cgy-9b2&hnlm1!Ib+@{doW)1 z02?5Ca2@tQvhmo@9;@1nzwsOFz^B*&<8Ls3ytCd_c^W&QaaMLfIAA<>V3E5&bG-er z1N~Ve-#?)FU5)<^F3H7yXzo>Z;0NFGXDKbLaNk4q6Yd9#lt%k=A4Xpu#~wU=g+KQ{ z7(X}OG%5EL{`=?_f9>z)eI@J7(kCbUzI4Fp`lvC?os7->jrG)o?a9|MFK|6$;)L9H zO9!1!p-xU&;pI4Gv@W?W*jn1ZtBlqq6PR1*E~Aw)e%(B=R@kW#xl}umgDuZXzv^9k zTWsH==_KciQUBTCobOOS=bfvm|E~*KPaG9R9ed?2!Un&|_hF=)`@uQ7SN~^HPx&LV zAE7yvQ2S*=y6L-6&uVHO{sh)3F;mYa!B36x;K_ z`C;?G)8XAL+fIxa@6S@Yg?5hwqs&I%Sr?qOhxuIL4kxcYSoO>g%Vr#=zIvCii=Hij z7dg41a%w4uv;I1pbF3`Q3CngUt>=YhX$gENPcgAFXjhG{Nk8Eae}Un0_GXwFyjCYNF}67U-$ z=Sxm8FR}$bZK0fxy?p{d$3DD8`V(*8!2iO&oW{O%VqgA*efiYJD8eVJu@_sg7jF}L zlFevG?=Qt(I9S247s!&a7v=DBKC-e9pW(;u`WuAz|83CZ+?Pn#fnS~-Fgf?sKjKqN zv1)&9$Aexc9<+~rGQS;0Oe!rtSGxRWk*D@OWM(1u=KI(bryM;myS|4r%72S4IPn#g z(G^po3@4@}d#9L^z7ZkYaOC6A+-%1w*74c)AhNFEb(1|%+0s!9Uqbil|1Tk5sv9`1 zD@Nqh(fAd}mg+hhxsokNg*x@#N4;eagdxJ~uXmg}-fag~lkSQSrG@5FZ3;Mj?*Sa0BC>Z^V_3AO06Q!^%`?~~B`JbZ@NjK6RxG3x`wbu?Ge zZqKu(F~35;RDK&jLAGIK7_M+^!F>D&?U7`CX=U5>?x6gLHtye_?Fous=ZjH0c%lMd zMSCMhAhR2Jm)0SVC8A!Amn$X;djIsze97W|B9WEKXG!? z>Z(*?(qG;9ld6AZF2mRc;y?RYhjD~;!G}C))jA`3=W+ZC=I9*%LO3Je_!prVvgSQM z)!e5&=PA3PdCw#GCh{p{FXklrvliZ%m-XOxCS~0}f3op46!R@(yj85?Az~9h#D@F~ z8}bbEh8u61oO_b=d)SlqZ%xi>C?A=%=ao@ee>nb?tlxilZC0ZnpJVw~bGNS=le_b& zvAMtYKQ&>;>g#g%ymo!=KL5W@*zO;ET9~4#e(=^b_!|BWr~I}a%59A(x3zvC-@X}m z+9|uWeh_*62A>um8Y%y`V4hcyCGjWMWo>z!G(6j@bF)tyK8l}K_0fkX1zS#pY*RCH zr?R2-wyzS&2l*qK_f<@4dvbJc8`x?Q{ie9OY)I*`Zofr#NZ+AN-V*kW0nQmcajaog)h*uBFR!xg&rJ3p zSHchZH(%o$EDziEXL773@FU_s!bvxCCe~x@SI}JRt?XA`&3*;NdWvFG1CO&`K|akl zI6HVSdjR6um;H&yRsAV_nTCBB&+iA!@%@?K64HuA*YHzao^E)g>R7Bh@bvmes*>Z} zf!0`Sldg?YIw6#v>m{8MN*_|XPbj@e_w)~?SCf8wHuITZvNrwpPFGN6Y~kAP7rKLg z7#Z^4wU_NJ*2lE6J|>4)&d=$W$4@WacQ5rGImD`DpWCK-_7IF>o>KK_B>ipvJ2;y> zby-LLP$_BZQce0k{(J0c^3a%8w37XW$74*K zb?i6jt4HY{_o5M$|mm?!zi}IQ?zuS4Y+7z$$z*`^fsGuI2 z>t7I?5QxJD7E4sz1GMEoKOFc*!nLSzX#o41s|mAt?*Pf;O>~yFE2z+lZRN9 z(gDf+THBc5h1)*qyzD`^4V3QB=UTWel>S>>3%7}~ z1J!)nI>Y~3zuG~%oV4WZ23v|t~1xjDc7RUx%YukV( z+XhJg<7q<&^>1U1lYEBx)YY*E73jQa&-w~u1IjO81Cr@y*??s3J)aGb{-2D{fALXu zkp8z*k8)#c&aeN8*r2~f{~a4``U*MoGpFU)>JHf*LDu%z9brjjoMS8hufEuD4%c-%Km`o7+Nb4K!x3$8_&m+Np6v z-@cx#s<{|2@e=t}PGZbx-#H8f0Q2d=A-v zk8S_E`QCH^Ip~KzN)Gy=kB%JZnj;5FUrY|(qE933ay@+=X_x17E!-|kCi1zqvj>@Q zY)&D3ksSPY*qqPHM7Pb6jQ*`QCsHOZy3SQNA;3O>L7&G7l9P^~x`OhtnD^+shPj+w zd9F&~D+f31_w=g%hY)U-lcM{|L@j|8AgvKLMK_U|+?dUjA)vx4$kr z+P^Ia`eo>Ye7|5`lz-b9=s~FJ6>gjE=e}=ppJa1ze7B9N;`%bK4^HZ~RfBt5o9ei} zj_WGp5cX?uztI1S^!$~f^lGIChtgX}_hXJk{w}eFxaKyw5MOSuuBQPhUO>+XIh(l__=z2*SqcaI`4Ge=e)~#kKXYiWuy(p#tyble26a> zu&m_zyB3dQJ!W9GYw%ltZMha_gzcs5FZ}$H=&bEu(D{zIWLO!C%7THlPx)EI`W)sw#( ze_FEEX#aQZI`YWbI`X6INh4>~q)T}2@dH-n4ZPzMe8HdKCw|QQQQsu**iGo^Ds)o$ z8nDfy=;ouupaFY<1mox6Pq?iUSq)`XtKv_;T!xNn&2AaEGQjvH{20I&a{LeZT++98 z<Bd`g_=C~@aM)xw&y~)r?Q08WI{{FUypruf~XD|Aa#hk(3^}?_7U_zG)NhyBbB`+j2H z=H3SG+m24vqff%71j6?o2lKwlyhkBxp|W{CIT4;FS|_|?xv)zN= zQaySqQ?lCz-#_<1vY>h$4F3;#__gxLgU&8nfjm?byD5OD@^c4ATbpJR!zjil7hmR+ zuHk?2ynGJV;JdELcm5u8aLUKqV|KOrmhv>lJ9bt!I2acj$fp19qI~&dkFI~TO7>ab zJJvjbgJ;3~`ab%$5zus~?yaC-@_Ba!eWh~@Ti{hYbBkKLX?PEhb^egrnGFBr+m+k@ zH}Ie4w}qY7aqSxBNjdk%@I*eS%FsRHpUPlfZku8V!p}#Uk6y~VO0enjp}uFIU6M`g zvjSNTYOa7bXdj8>*zwn2rOuLL=20u1c1bQ~@cl2<(eJXh<--TP;|O!LdPfEF;jc3>xGY}j?Wi9p6Z%2o+|B(r%F5HsnT8J>EAG(op-zh z*FQsFoAL7RI9_XiYu9*+tbdvPjL$7}Rj$DHth;mZxLu5^_4c^BANj3EU*s>;D{YV4 z>y*Z}$zLc{+V&S_D?K#iFZh+t3Hb}@N;8&A@9KHK#;OBclP;?42Bqz?*DGz8y-aDl z>^Vx?W#=iavKfnO?DF2@+IG9V?MmC_J*~7|-a|^;<$Y6WmB;w}q+P~AuF2l2PCJyg z%Xmgt5{kckKT^ z;Mz|8kKHcTHSCwxo;&tceykbC-|xok@@F-km+4xVU#l@aUDv|=TQj!vec*6ESL6F^ zT?_MfHO|NBTA1J4mwJSc^`9`-{{ma5v0i?L#`~Z>#>;lUVB5?_Y^F2bk7Q1$I*xIG z|7909kT&zGc~)f&>F{x919O963&P}0^|KC%DdQjZ*++Mm)~J@2>H-E zj1K7?=+GhhLG>{@Vc%l&47Lvj655Rtg<)b4;>?Bw;3NW8DDrQ@~CU0l*V6IdM@jf?z8>%ci_uh zcq`leHvKz^wCw*|5xObAY^m+9>lx>M=U(aXZ^HEV5c7d2`F_q>WMBUJf1x{z@Ykt> z@z3P5i^h$Q9}r0Q41Vi;Wl9F*uYd0UFn_)4KigktZ>sUv!{;EEN>}m4H3uo4 zw2=;yyS%>72$;-E%2=dLbR$b_qJ@jG?jx&KN3N5IKgbKF%1bdz~@#Eqe^r z|IQftwmpVQFExgS+db)JDY_VL|D>P8xfW>$-DFy_}=*;nl9-uUOl&kokjqIluF$XK3}E_y#e(qB!ob^y^+%RMVaOH`V72csLVl zGrrR>)>B%ntBgT^H0Ql5gZPx+V!fBQnmwf_TH>stshr1P>^yTveK<$*AHB%|oqzGa zke|cg=-Z(GM4a_7>*{VN{m;Z;4zsWM4$>#tKX{n=h#GH9U?lID!T9?k@;-xaMZL(` zKr1~^bYf);l)YH@D31=^qdaE=%}}1Pa)$Dp4Kzaebg1$}t-2)The4I+e4t})=tyWZ zl=FdhD?bjZ`~=2q+W1r^`;qwF&Tl-wG=85BVf`_`=U9)nh4qa1B$eeEuF6I3)WEB5 zt3920=JoEV_ISqUiJZ^YjGwV?amJZa=wJB$_W@#rA9!M`ckh_Bbp-3^c2lnr%dM?o zEY9pq2(+fL9~b@!7k|h)#8bq)S2LG+k~z(ySnt?n@J8vlKD`aE73YkF*YTdxuNM!A zU*k*ZZN5*QfS;y*y@&ohM!&x3O@gnbUr%;NH7$HFc};Qxabeaj-uux5`!eX;^k;AS zEQ59pqj>{@LrhbX?Xul<>9>-d8w7Z z%};Um=So9-BBoh+Y(48Y`Tpuem(@NS8$86t{vYO9X0eaD5TA10;vr`&=zHw#ILO&z zA0QJq*U#GeZFqGvGVyJjSCg!i0BhL{uXL8N{KF49Q*JbNEEXO}4g;+Dj)OPt$XGl$ zq62;`fCshkqZT>5hV(9E{q6@7*CZw+26iFq_rQ;llJ)<%>9gb1{R;SzLi?CQu1leP zo8SlQg6cNGk9(DeA8M~@r{DO`Iv1-_>r{5Rk^{47r=NCyjdm(FI1wLFYeUy99(BeK z)wg*L;BUW&Tx4#awKbD=W+IE3v~v`DS0_bTo2DQS!dUYAKSXv?qeJU;npk6~zHg&l z#k8fEc5OhXa!8lcu963RYkUb{SlYFKcD+D7nWtX0A3fGMm_$3IV@b3_I;M6!hull1 zwNG4gu;uKTaQbQlZ78PBXYpOMerVm|5ohlC%h{jnd!NTSQ~Ct_Kfu}BuI+=jDhB2H zA#FfMj=5N~z?dYtRy!`&{2}X*g8}v#IX3SuY+epFt^=DWn^%I(D<_|Y%{%;(EBFRB z@2(}WYYwv(S$6Lje7g%?T@Bx|pjq%t|4;M92ClX3o|SyfAjjq%>#}*VA)5ylnHFL5 zIeqb<}iNbd0l}FuRnLa&lAY3Paiae-|Y0E zgEG^G44UW79JG8G*StdqMWqcLG#9E^-=wsRLI2<#GUz$}%kX}Akc+(V(-5xBc_m}e zL$3_sx|O>WJ6Vp6J?I&P{v={2eANrF6C=@|h468tZ71eh-8S`C$lhUWs@4kq2%9R~ zdbQ6x_62l7>EV4+L*o~5op|)hhfOWN%C{yrB{gsjYgZ*N3z3uL=%}WRjBTSxkHVf- zKA5s*R6YkxbcPLMv?oggSeMWiu^cm&p(}$I( zPajsEKGj;-UHlw-DqG6jy}plKnfx^CPu=doQrS{$!Cc#xF12mxx9EebVxt2M^ud2z z=00GJaqd#=uaEf%eJ}h2?DZjJc@cfEk3LvLAMB$Kz^wY7Y2d7FtD9|GeHdGzemKN8 zk%z-y+3Nz<;fwD%@cIb6z8n7TLN{OVCI&{3K8Aj-d@yOvh=ioTG4yj4yiQ@v60c>a zGvM8Nc+Gr8-FkSfIXLzRnD1Awhu7kz#-!)4)8ZlVugYC>WS>$D5B>0Pj$@~(kL>gi zcsPW$wt4XIX`kz|%5fe%oMK$-==v?g1RBv?1 z2GJJwR{dK3^EdF1|B-PkpYwg)z&_Vgr!fPYx+ejZ;F+Taw zBj{`S&@&hxu8QU{Ld2EmInXx@d%BR~t^HAGoW;{8@cp)F!_{@wayOn1=d67Pm z5B;L@_IM(B>dAlJV*Ajm@elqGBVWl?c?h{%Yx~g47*DeCn~V?5_r2&}6ASmilQXQJ z{xiC=5+C}W9fP;7q@MTu$l8jIR6Zpi+T&@@fPeD4g^e2V$@M*Q`O{iM`VzLk9oy3z zf13HTv6b*j=Xk~T3E`I}=8@$$%BRl8r|zIHYvFMs`ZEIkas0#N6wbee-zmtyY`x32 zKQ7z;WWW#EpA7gR`{Pm`e!G;1-*a?7{GOxx;kW#+QSe1;%N)O1dT9K~Wub9EdRPx% z^3X%WAK7~RXNNy->V43RhY8i+#*X|FJMwMp$S<)Y_>$P-B)-+`?&c4BBl8(Y^}WE5 z4;@=A-rV|W5`Hvu@9@2yn6vm%eByz9h8Mo5Cgy`y)#FDd)9yst!5CVXNIPDLg1X`Z zyKPz9rE9d^_?FmRwMV}3V9u3JryVO?aoCoq_A=(onL9sGjgPFoYRr?LC?u_Qkd(b8 z@*V43oWY|y+dgscuy1?FC-%};)zp(cdQCoiud4LU@o!b1{5;ho*3@GnbqG+05?3tz zVXm7wBwee(#`*ARYiOU3Hr3F+W4xC*`e){U>e#(k<-&8|7sB#)5wno29>k}O5A$h- z<9$8&v>o7j*_{&XjluNSx`Og)KcKz0Agi+P;kIUHI{Og9Y)t|-EgY}Q*2ri5hJ04X zhsAf54=Z~k-}RqJ%idr!us4a=o9OC7+tz%}cZFBRccuU3yBc2=**Cr_vTuAV=#t~RYHYt0TXU=jTNBUNc79uv zRJ{sYvk+Ue3R|-tf&G^KlCSz)X~@Sin15NwrpZrT2cN%*UWwlZ^9%cj z*>=Z|+=uNtiS61)Jx*e~WZQT0{rFZ_$hJ?0-yYWC&cQEx#>cn2FC)H~gI_iayQn=2 zI(xyHTR4#I54Q1**w_@8v5VQZ-<1v4EMMYUBOkS+m#@i3JaGX$5Wk(d7sGG*C|P=e zElHMMU`vv*l_~I6^9jk4Y6*e0|UA~gzE#~!9SmTVBfv&D+3c~?~Sf21MhmQx>2-wZ;VxUJMYze?_lmf0MF9D zKmN=_-Z30I@D6=Gkmm=TeBj4F;CHWkpfFEu<`sFhqg+aJmdX{b+Nb!=(jR!nh5xyf za`vN>TL0Ka--*}iNAtZz^d*D2hdC+M*lPG+u#ESrudv%b-otm3n~rhMQO5pb_+65R zNsRX?$U{1Flkd~dC+P1##(0}L299bmIFqoH1C9MlI*geF6;fTlwG zLeroFpaY?U;KM_lCxCxw{7vKuKd5~+bErRZdE2!==`6lx2lo1`bX%YD z{6A~;?&80ACA-rGhCIxCh5Y>t&h9UejS=6mG2(kJ^*ajhN5Io$=^ngJg10lVEhEv7 znb_@-=*LXHRWK6$n8~*aMxq~Clq0_7ciC&7@hkVZeE5@I_t9pG2=S*RDF}EtLAm(;|g=h9^~`xkPVGDby-S1XAcVS15p(}fATPivG``J;}tfPa+Y&+^_Zc}!21ATibc2xc9*wL55?5O(nQtjwj z+m0T@pLoo|R|cD8x-aisynJQZ==to?)jky2=#RgkjduDq(nhOa=bY%a(e3z^#zt#i zvzNDN6Z31+k<+_rpXM}=)3=ga^=mS6FbP{W1e-Pqn>_@ZHVK{4VW@~;^ z--#IYP5GO?cKMrbcemZ1#oV>#CFEz%Vyw`-1am2SiIHxJe8>9B?006Ah5T%19LTeM z?09%#{6=_D4lh>0i$da!ihI2PFEl<#)<<&9@voh6K`}jzQ^}Lm-gwhqXKXmZ*f5c~ zUCn>!n*8gpF*e9I{2Kg}Zz%uza^ndX?bQ2f@GpcbE@`ascsN%%n)8cd*!$^a4`?iV zfa5qhp%@9wYJmo$F(kEF7quKgN23Vm%oR< z)-i%J|Fu?vbI$TvKOn!OV+7y!fX8X*&0YBCiWioQRVql|z3O;#cL+UxoOAImmq>eqauA zUx*)=gD+o*ADDwLUx*)=gD+o*ADDwLUziTffDVOz89EG#FJG7g#g{J}3B{K$91X>n zFB}KOmoJ+0X0g9hQcO8Q4PDXa;Sgxi_)&XU>Jb}sl-p3a7 zLY~*L&f^1QMlyLb=iA)M`2YiWPZ7Gk*|O@2u$7y^&_&drv%%|%sQYH>Uc~pbHZ%WI z#P_i_GyhY>_pvrJ|5L>Gu{JaRqxqhEY_G<=jCGo4b_aAe8S7g13P;pquhwEme^S4q zYBhSk7GLnE948UN>{`~KwW zAGj`d0OxPf_q6#o=0NW!eZQw)K$z<=K5l=`{v03MKQNYSr>!*h;Px|pD_MN&qX&Nc zGQZVs{$t%yC+F_C;+NKXdT+1Eh@V)S+54dy*7%sdebwEsJsnwm9NAsXyZ+IY8h9IQ zycPV>FW#zq3w+!+$*R*@AcI|Km-<%cXNbq$n4_1OS1$+~&x*n58qdP}lsHJ!O8S&J z%BJJ=X)^Crj8oVzk3Jn@`qUkmRv)Q}@oz*A4^Ogx?}}Uv$yU`C_gv zU3Af39{Nj|PrA6)-M8IOf3X&O?;_s!4d%RmPJjK>>K*t5EX*11bsvL;7r|5J8a74o z%>P4QiFeKzdl>sJUEE-wbyXaOxjN{Ra&$bm8+Yvy?$Vn2NDSqSx8XRo|>rr=qR$DH>Kz38~dguuney|M0~#=ZBi_5{Pn zJ^2gaV_r|=o!Wb`@lIpi5zY{G#<~FQbjG=(jB~pf+m15+eR6#sYje8CxucA8*SN6D zjDcr7iA_ft>;7<_vF;Yex@#EgZegtZ|JeH$_^7ID?|o(_$>hld0)&T?1W+s(>~qdO`>eel|GoCwYY&I5v%%I4hpn^0 z)(wZPv%%I4hpn^0)(wZPv%%I4hpn^0)(wZPv%%I4hpn^0)(wZPv%%I4hpn^0)(wZP zv%%I4hpn^0)(wZPv%%I4hpn^0)(sEA)=`cdvURfp*t)+dY#rG+(tBTlx9)}gssdIg z8g%3sY)2%pI&9BF@T2XAU8Q;RB*@K9=+-#c(fzP17l2VNpnMzF^Iw2Xg^n#RflQVF z7kv(U`VH(U>Dh&Pth>;A?5P{}bhsYtMS+RPjvj?QrDxW%IIeStv!0F}u%`yda7*?S z^9NjplQ8#d#o7U3Pp8A4N|50}u&2e)Una=#D(Ek@J^ha$_OveEI2!9rM~|19{cH|k zFtzS&t34$-xhA$0K0>mkF9orsWJ5_WLuXdZgl?5Eo_`nnq<1JUVLVSZd<%H#UD$&2 z(9MUzO9R16eenBb*y==Xs}p5gjeOi_&yTpR?&D*t6Jfi@L?$|Vb6efVYpb`yR+H}k z97jvGTIl}6WUDEER2yyeh9I_j!B@>=Px8~N`K7J4n)szt>@@fxn4KmY9Rs~fepkXS zJD^uy1a|32UML1H5I)!oULaiX^R9vezzc1`0hI3|5DtJpj{9CIo;c8$yB?Up1-%_; z+)d*`_4rxf0`)k&2snd}!_i-298NJ-oiWy?HJP3m(7 zs|A>0CB`~Q@F@)iy-mQ!6a!BC(~@s{0_B6E>^!HeIaa-Je>WTaix}`4<{D{f#-_MULuq39Rw$pXikw2{5V0Gr*ZFN%n%eF8lVn%uU*H70wd7E+Hkl|Km zRgBenFvsfrIli~dz?|qvotz9tb3jdgkyx&wu6~;^a zU3lLtwL&U%xzX@9Ol43y~FCx__8#P9E@G^AOPL zMB)f@gK@BVO0sRA2otsKVckcjz}ELubsv{o2)Et z>))C4J@D)%)cZFamB}M`y&Ds6F>gXF`3c07??V0I_`T71ir_e$(|7 zTvx`%uQzr}6SCToI;umTu zxXxY?C+lo^4Ve)-n_>=XWt~krf^;d_-9Y-9a@~`@-hnwOeY`a-7J7LLbm#g=?Df~7 z^1{-{4sM14s8mkHZM@l{IuGXuu|@bQ0!f6m@^-?>xwbweqhEp%J~}Oco*1lG;H1l{3e^f z34081ygAdr@#Z@@;m-H4_nW|+Y1aUA_EYKWO6*NZ`kHXpkZ@1C%dk%X;VvpC9CM`@ zvr@*GUJOL_AqF5Ao_s~tO*Pn4yF>9L_F$p@*dl)y$t}%os%5`5JQ>GjKh{wswyIC*sJd$)U?tOE@wXxl~D}t`tX(K4f(ch8&H8T+Qv9k@j<2 zhVxd))-vpy@*>I!Pj&{5d=E0)(HL?o$A!G2hk3k>k{Q ziN=(qmw+z_N8SqEL^!fz>B!PB&*?+M_G3pq$KvhLo7-jFNVb`9qq2|N-#*x8&~@dJ z;CeV#*28yoFh-1l9wwaFDHyR6`jPbcv@5`fDcnwUb5vpuk?KIcYmN~+10M#rIb9nY zPP+nZcsBbl&UDz6H?UTc_D&c0kNX(mb6fzQ;sW@VXpT0&dZF+)@|*`0W9s!c79rMI z_BT#LY#zsc&@*a(hwwFygsl<&M#K?ue}~84_%Qs9(T-WV!*`OMnyuSD_VFkW))3fl zJ@x=3U*oHZMPuJqVL!;Wi+m(sn&=$uIQDWO&M}S?R;vpb+VvV@nl~ouT$`+V7p-BW z_DzJ3i~NX1@E_7VKlu)s0>ga_ysgBZBow8C$PgZy^NJo8k~gJ^FQ{C2=>s|d64b|!nqMVZ*+Y6{wE58}Jq2h24U zw0#3(ndW`;cJMt11Ybtq}`14utXK~cHv{CTA<>1I2a4Yv| zjqLkf^C%4RW}t6LAyCOtVbKI*uiJWIK>dKOaD*zb9l`g?~t{cXR^&AwAWx z_#%wsi`H~bZna0}bSB10JJzhjZUL>OSXqa`=M_kYkzBTFW&HhlM#K-akTL3qO|d zVJUU>)$n5xRuQ^i__5Ar`|AET;6qYll4z{IJ*i*x#|0yz*C_Qr{824q63pqhj!Bwr zfD)6WVE0}ZObwikM_ zLra*Xm#n{vWK5Dma~pD-J~WVx_HPGa6!-{R#3)S0C_Csr)OU;Gn_kB^Q5|~_H$?4U z5A5&;_`0eU%u@3+tfvDGXi4UMFpKO*a={n2&4-$v{r>Szmg*%JzOfxkx0ziq}Y{o03JMxb9up4E# zPsCW=gkd@ZySxdxcpE%K^M-_B*np87YwW@t1rg*AUh?fw{!f z_)XV$;X2JN@_lVk_Im%Yw2hFlj`de`?%ZKe*$*%t34~pKAY@<0E;I*tb6;!PAR+%T z8EHGvryc8GV`&@Ef27xF{(|UTlzg*!ykZV^F0h{xC+1;p^?uC55Jt>I|I%D5&B=^I z8RZyz?Sp~lS7XAEyR2wzD7vZgggkzd=lKTWdr3bHLR%)t?HPo3h&ka2s`k*_?nKP( zQhQR+9_8F_G4h_hiZ(%~rM-sl6uNE*a_Lilq1V z0}3{MF|c6Q!A-U|@qY>ak2o~Qb{x;|TF$ca;D^9o@(snXffRrFC7z`F=mRnWDMd*(6*!7lRo(CDn)tf&E}(w{fTgC z9%z(-cM#7ptl`+Mw;u{dw;?j!hP9b)jMI&LVAqCjk{{g$b`ssnA*bn}sqi`3}xC)Q$m%TiH>y^l^opY6vQ zEEjw{v(Q(3J{-RnU|k*Yt}l0kZL1)28z6fThcr#6{k&n!pc_E$+CsNB%3V9?);iAzkvozxS?;(TqMv4u8E|#w?uy&sFL$($0O_A{ zjL&rN3$bKiF#MmIjMyBUg`VJkB-%zanAFzg!^2pLS zn&ObrSIEan_#_TF3DF+-pr+p(&GWkH;;d=oLCd)^7RbZ@nPh9gZw9P?z`Q}J9sRKf z{c#$(Tq$2E)@PRKepQ$@=+_Fo032cXFzJJvUr>~xZwxy2CuRMEaF6mG$aVUG3FBbG z7-XXksxSt9Z)&@~7rfNw@0G6E_g-7s^wrx}B%7&&xm1XP25AWseH#ZmCc~v8-i?h!uMj$!hyP>Qhv=qU4Ec7 zLybEeir$6({(KSp+XbDWv>IzsjN3BZO?u|8)b}itpyzcsl5cX_q?{(ld3K8fQWsP_)Np)?h1T8v?nc%8X7yX#`oNLX%%>25c8Tn#u2j#IKyPJyqf036n zRwIVDbUwu}pe+lESt*TAVV4@}VQ)`@E))|P4O)=>FAB4|md0kdeueqs) z5x0x_gVkAr`v^nKLe2_`6Q{V+$rx)>8yBFTq7ffJ^Edo^@Lg7e)s=-g_NB4G+evk1 zvM7gG6Z8;h319i`^lWoGinq){>_ZLgEA?GuhUcB-z-uvzm;}TelrqEwNQfm+K5JCO zBgA0+e-Y}Z`{E$1vxCWXahu)#@H`C$q%e$}G5`es#(HSeb?!!99O9GR$csaD z>_%Q3;*;ITi$naFi};*SzLoPMzLoPMzK!!EzK!!EzJ>E6zJ>E6+RphAZT|^$u$soa z-N|SNwR;fGpv~@7oT=TzaHe*Tz?s^8E6&tzk`HP($%mTuYFevltDxlwLANV>|LycV z-oKlk2R(TE@qXTZ(1W)h@8|6YJ$U=^e%^l2L&$fk+;6FJzZvN_zTZf{@%_u`H@<&4 z{l@oCrr-Gf$@on&O|nh8tfq%Y)+0dY_O|QmZ&3$wY;1%z5iCpvUixy^5G+_bPss+*VGoQOWq7yN;Rzb6m9JF4%@L=<9WHQH~hcZd$uMMe0#*N3MQ~cOY9)`y1pzYs{!{ zAvgT`r>*b*7xXi7q?UFY3H={ob@zw8x%_YsXDsUdtiR5(GgWUnhc(?bpfA~ob)ZEI z^2objuPA@@DYECV6|^5l9mdj6KwmaMS6;&ZHNbd3guYCDGHHep`tmE_pi!{FC!jND z>#_D1v@~p7V$nMbEl=KOwQSz8_Wkv^W`th*(NCK6g_BY5Q|7`^43G$8X!~J{VH`zma4}7sgf^D|~SKzs= z-C>i!H&Y_n?Paj(b--j~-#6ob3-}}*d@>bSe;D|K_9n{*e-JLCcW<{c%Uafr?_IRh zC|Rl|FbmbYISTm+!2=7x4ikge4M~6O_Gt*gx<+AnB@rQwgWU5_fUBy$|>HK z?%x4?LH7~XrT;(2dAn7zNN6wlVd($=#@T{ckExJ3JL(5!&4~id_aIg@8hxq5_m-o- zJ>!BO0sGAa2D}$o@E+)y9|C9PIDrxQdhaBbR2K{0x8qFvD%#2CgYOSR9)-W4EEjy4 z9CJ8}U97aF;K;<0hl3rQWy?I6XJcn)+t|fNmTbgx+wsjQxL0{!2|agkwk_}A99zoS zJk;^X5|%v6%(l!j(=)}u++ham*ZW}8Qx(mZL*YPav2b~FA znY`#jpYnXPS+u_%{Fs94)Fx%U?J1*nlV6P59ZTOFpnkeXw7XJ2?|0OGhP{ws4;tPe zoq#iqTapdde6~H;oPvX;&N7R!z+L#RA(97IBfoq;UC*TZQfHf!lXK0PUL9N>k5We4XL7!EyJFyYYS1)1pUlCyJd znP=zNG8BEDx31)sl4Oi;*itvqXb0wj@5dNq7ryrd3wIoevAT{XWVlX`&vbpAHO|%6 z_ih?*EV#cv#$w6y%sbh#S2h`p1r$p!em{%rKEGAh=c%sGQ(d2=x;{yD9ke?TX0(1y z>y?Qwh?Z*JBO1`RQ@`B?S#7CrFkWuvW!Q|C>g!UT^K2Dv{@q_`On} z#Lq_XToS%_D%wvxNj?SQb2`&CwG1eEe<;SZ!TA4yjODLj?2f#Gkq*i&y92re@=#g{ zyFl+x#k1mnl0lLWir*w%moK*^181Qtxc(XzTz`?yW6*hmch{5e1-gi6Qx6^NqrV`_ zoOZHoQXh-=?xhmN^6er&y(ewZxm?yTdr|fG(9_3(`TeO0?Sqem8|D7L0@(jKq2N^M|vJ;uyd8 z5!;--4Ymn;AGOWJ{~MlPoAnsV&*I40|A_60{SCGs?tj$wGyMM!uH~{~#E2HVk!zlR zhXfj~gbkPt9sk-QYu0$Hu0X$cwr$|vIkwjpt<9qQQb0@a)DY_163~-$hG-l0=j=rv z1$g$Etd1(@ZG|jQThky5!Rl4FmFT}R0R1bWYdy5jHCu`HI6|U5`oN#|kIJ+^9f03}=wi}4&+LUQR)N+T2Bm4PpjITYoF%oOF!(c_HUQE=bQw)?9M}j@xh$ z?+IMD18t}8FIM2Ab!g)N$mkF&b67om{Sn{#oU4sdym*g?)}ns%a*lS zkKkzQ{iK(xWSbd_{u=@duk$t{R+}(PKI}qk?^Ayh`B9Hw8vlb=yM1?T!yl%m|LOFv{`_K7Q~#H6{2Iq^ zaQr>$HuJ^qFWUcs^QEX;&36p_9&$VD%cyVj;jf`FTCo_TA>ZKxsFTKPG`B)-f#Oz9-HO-6W9OwXu*qzj@y2S z^Zod}_0avcgM54@`)A}aCf0{yykpJMuY&!1`9pi&n7QUb_&*H)m!!@$FGIWXSb|Dm>no+JN{;PWK>euLJkKu)WB zukB0nT7$Duij10!|1(hc12{`K@5Y=7#X}S>V$M8f^!Dpm&~UPW-TDChqbrC%N(=KP zV9=+HdFB+zTM7PW_@AZDF-yhi`KX&eOT0K7&x+@U;Q6U|K67PZekSZjo^ck|Ddd@T z#YvaP!I$hU1Gd#+F9@oK_CY27nuWS3wuTvuwiFzhIH;}Ulgq>ZgOJ~0E{8*ykJ>(hfB)I>iHOUeU9iy_RfQT0G-H2tQ5Yt0Pi`1xUQz1x&zgNS$^$zr2Lbkb@^YV z>+?@fGUR`qjojAvhC9#Ov7gjDqw~_kk4@zDh48y$nyx-O0IECXhjw3h(FOd&DFeLn;J$p@ihW<% z=I<-8J%|7Q4dtV7q~O|j_CIdR-hb9MV*eAi>G+@11#)Nk>|xuK&mOT^{_)6?DL4j% z84EJQz^}3nAGGiAw40wk96J&GWipQ_Fnw>XISjsP(n}v(!wZ&z*IwN_kJm>!{3z@h z$%z5}X2PvU7cJ$!I^w@s;J+;JANiU+{MYjL<-_JRzkgSY@4r^}aXy62^QTwK?WN~h zqgNI3I9+XCB|S;JniitIZ06O_^rc5v&Nkl;UHJ>>O465k(2pLSIorIni|9(?RqGY# z%C>x8SJsoWu7RFRll5f1%zLfr$tuiog`!6S>&XJ>$*+Rw$tR#E&xWigX{^(%e|+_% zT1W0^MMoBc?{C-j&0S2gdIr-3j8<*I#G!oy9#c+QeD`V@6&Xl@3q_MQnhs!XQ?ur<(3K(g8J27Hgzqc#v$*OB$ zpZCdpxl-m!ff)no$!7c9^0+fFUew0bEsg(!;Y+ojAk_Q1>bSaTr~W|AKx6)8_!uwE zi$WZDG~&Ty5EmYcHGy%?#%21_bMPCU*N2x~hVSs`K&%7&cbzj8>tZ9ZE|%8LUclVw zHmt{}Zd!JCD%L(2_dW3f{F?6L*vGyJ`ZF1Ror{C9jV9|PvmQ5X+7&j)NuxN(Ym$s_e#!6j7_TU#oD{>E!TBK-yik# z^wx7O+u&S_wH$SQFvij)lviVK-zdcBmo4jau@}}GR%89f`O(aMp$Bu5kK3nxjpN-l z1Fi0xu|}o_d&&;R*#h5mjh?w{@Le@n6IrQGK>HKQeAoQ;!Cu%2Ww+p7`j)c;Cpe8* zPjCTy4kt;y>z?b$a#m!rh8nDKjMNp~{mLI#ymC=zEKeH88WP@K@k+n?MHNXHM_fS6 z>NnqK?y2w}`Ls)#;I92V(^<2m=b9Puk&es%JI?v6-qW^ks9g^;r-!cUcJ)BJdSdVO zUf#C()JxxswH<%O8bJP?sOS8ti7nPe-*^Hvr|-OgHK9r9uR7$wG$MvK5(nkIn0abZ zMONLyiaOA~9&3-@xGj23eQ(V9#78@Rh5fmIgSDS^==%nJpR!6EUtvv8B>G|%_7TLo zj#65OY{6kfj-*6=402;6)b07YDQ93@oTFG5Rh~|3WkI7qEc?;Ljo24y(=_Zs7{=cD z3cRJq`@coq*nht4IQsfD);k_S4zBxVW;p-rCsyYNIT_CHU@zel)0p$pk0ktWc6{@H znDg=v>HlzE{>}fP|4oidKf<1TMaZ*-yxu0{NZ*BgCv@MWn9PeMDF5d&RyqUoScNRA zPLDZ&fq3Q?)G-2e4#GQ#mKU(@=qs#oq3@%19EA)W*T76iq zJFPd*T*peY_bzb~J^wvRa(>3b9B+m*=OZ|GvoR;N{RF;gdP$*kBfcpUznkz4c^Kob z#d?aPSfjNTb<2Be^e>?ch!1G~rg5PSx?~!3N)~j>ROpx~SYvXRvu5Dr zeCQr|UqJ(OP$YCv6s||(z8Ksei|3$!7It$s;y$5+&X5j*yd3G)tb;0{lbo{taq9bA zOvO4Zu7kjb$Dun&2US6jSfPh1A;+}0bai*ebx}e9UDT|HdVBRp6~65P>N<-$X)jyS zMNF=*3i~B)ZlS&-So^QkMO4?h8DUQ9C(=i~&^NV%Go5!rAEo}kCo7B+@(J`~?$nxv z75%{P)zGnbLB4C=2zSPSKMrB<%<717PLfet_gw?sP=obQ{D0`MYVg%zom3Zvz1OiO zy`hHW2>QkJ){0kBS+DXWlqKMQ(k;;xQ-JqJh8Nvk8?pG0kwYTaMAa^=n2Ps451CvE znXFF-f9Zd8u?l?p)xS@4p2C`^H{(r?D6~c7)jx$f!u`;TCgerMJTtyGzAOpy8HG6J zN3pg#8M%j+Lbr{Fj8280Yi>+`M=s_H@6XC`uBY6pF)_HOXzVTE%j4kjDnrp&JJz0) z&I?ETN|2L)))s6+JLY2k>g}9N=X(j`od1x*9Y4s+bbb$eRi;t-4|UG>dg`6A*t7Z1 zvvkh?oU3=%W8dz#f5@DFo`pR+F~9iUOFwYZdcXfXmhMEI2Tov}Kk;=IWHk?U%)zz4 zqTVxjE&}zO!|(TSq~O{uIKL>d(X`(GDCX6Uqi<;M1GIXy%A^3Ke`7dF!y_q-(xuE$1&$GdPz<@+E5IgSp>Rl2Q8eCIUhNLUjF2( zMHRI}v2QEpWox11X?z>6MT-J9#`YRH38>Q&woeKoDVDW`5h zrgQg_zH6MaeYU_pV}Y6bIpnFB3|`BNH#)uqKg@(}s-8E&873tejcHzw%GW%wg#4q0l=%?TQ3`I-KeJWJzz1pXxq0aGfgJcaGY2FY$FRKfb1X zE=SS6Z@`zA@NLIrKEJFtbGh<)59D@&zQ5y=zF!&V^H%C4e8^=Kv^ob_Cw{MkoO}y> zl@44rRK`^gLI!Jr(H5XjNLSaR4X4m1l0(8g99t}APSVj7!{LIQoq}Gj)SI|&t_5~7 zu|x;$@x3!PhU;g$te*|gS(C#V#}0&@s(@F*ffKePX7OVq_8Ew=HolEKq`!ntn;mX# zT!_6q^5U$GXHYf=dM!7cHO`A-?pblHaR$GS;Xa1@817?@gP;SM6|sv3)_4c-jFtWe zrWpkN{4A~|<989Rt--Zi{7+bmFi#5X2w|^}OoXqDb$Rg155zN+uk{p;O*k0ZN$Z8F z&E0T5j&Gy?Hv$(%#DqDH;oB&o}+7?y1mw{n3YnyKYzF zF5pDMU47B_caUAfGiAV(9Dl*?od?$Z7CN7B`ftYphXKnuVOt4%5%#Qst>QQg_f~J3 z=v=Fhtos)F;XLeY8L%G5W6)EC$GQQ7QT;}#zYivR1N-d|CaXa^hM*mFVci@HAnO8; zeT8>aL6^LW_deXCo5P5=@*4Hr-7zg_xl*B^QUJbVFVM^NTQ*_S9A%whf%|iR=I>`>&bHI)JHbNil!cm2L3&DTGpd&Kz z-f_6+uXxucoJXPl6qFHWJBqTGaHjqSUK^bVdVYyKQiSaY=g~E~kDjAC2-lqjuKNY( zXT`T2!FfH-$XDS`#y7mGWA0*{e}Oa6k96VeD34Axfw`|H0hjq`lJ0d#EEfeOD@ zXgJTWu4{nvn%f}vc^AQX&FvyTS?A%r=J$k6_?N?Z2I#jr&~JqG2GRe}ZvyX)m#-1# znua8%2PTC=T*uWkM#0(yt6$xPlq}N$XIUx{jb1# zgaHNC%fNfui1V6xiu4@m2;c;NJ$KD<-kX&BQp0)IFV3TJPYUFXuwD`Pw4-qz>4&2- z)+7CJRDt(MKhQP8e54=fn!tU8=?L>3LE8l0t3p{IocAT}x!O37%f5{DNcNY?cu!zG z8uxZ7oX355oq+RT^SU<9lQ6d+AqTJyK0iHte+Kvf!!WN9j=4>#^o%^Wc>(hZ=itv3 z^O_fd{m;UW_n*M!_rW)~7MML3aI_RLul4)2MvmI40m=JD$}{7>^=BfZ<@tqU!z2ouS|$DVh#D1DbIFT*-Feq{T|1sIF8{s4IPEJgE9;5OUHe&A5d;eL)m1k z>rA3tm_=AixYC+MbLH$;)+`g|giWh}~izx2nkfKR91P+5e*K8!D5E-M>xTpZ&qi zPqY6c@zd--;s1Z)_!hqp<98GOH-Q$Mz9tWSz4Sskrx(hV^hy9tIo%UIbQkox2j!gZ zC?~oLdL@E(oW3RxeTiQ8p^VcOWkg$|SAWor(>2jU*Pr4yrzw6DO*MLYS)A!~b1+Jf*h#>hUOzFV$mFKg@*jrg{w8+W2#U#1ew(PF^k{Klc#LFQEAaG`|4( z!!$q3_3gL1R(dubv0#+5C_XdS{9PP9aNLTcCyv{3^u{p;M;{#D!_g1Noj7j7@dF%5 zI3^=kUM%8}mHJxvHH9Bjjdz6~Q|&hteoVC=hwO}+$H`}?_Is&)h^_e#38$||ZXl*B zaEYne0O=a)V(}A1^*YVWLeL{7rhkMGVyyhu8qUB zJp8{@zV<4f-GJ*QDBFPRPviPV`XAR_c=mN%+lAl9aqVMVJAwaSm#=+@XSdUPd*Hn@ z@ZKJHFaF<->lDBHA-xyBXXDy9T+74%AIjH`;Mt?}UX*RX^`~)tBmO^%>!EdSIq%M*H@%1E zw}sBB$KK}aNcN!*lsab}WYmSZvY9KTR=?;K+*9dmPb$haMIL-wADpV%=cXNQd2G52G~*w2o+H9KV7jyWFtO^M}p z%$?c&Xsy-jnfuh}(4ll_RmMAr-T`I&>D*YzxE(TX2d=OKSJ>A*ogB+$9M^yw?7$6n z;08NzgMEX1Z5=(=1JBLCbJ!{pxWM;0{DzF%VfXE@`~11*Gg#@Qqfb?oun5{ux#6U~ zDTn6e?XdB&k0I|Y+4%9Ex%rI-|M~c73j5z3wtf^2fdhKs|Isbje_{6p-c#d1HO^Dx z0X60mxFwUZ~C?POY_$4=H`uS;PkmHe2G{X^94R@=uL{W|YQ+GM;F|S zYkslj6gO=iRWJi}D&whmyg9`}V4spc(*wkuQ%rQy_X~cEYkqO(h=u-B(%6EVW_o<0 z-q>@LHzlPP*ir5ie~uXQQpB43#h;+O0_A@3=ZGn-#f_a|z0O|xs7UE_HxI?Atgt@Q9!jZatT z4WX~p;|!s%)P5dse7eHd=#5WT_{Dg9x?&8+pVU^vw8ll)8f+= ze4y@swVzYne*zB(J`wZJYP+P>>zPy0=x}9p;N$yH6lw;j;BFv zI_8d=#;kSd{0cAYMoc~pcPnWK7 ze7Ztks&Ss$Uh(*J1?Cg>U)X(tBh~g_tJh=qG&)=v9fIkGj^?|7y5V|{PglsdTAtPN ztCm~se&6*Lt0N6E%XOH-zDPHTd=QQo(a(&YxiS)XbbcV$}=I-Hz3bk0iL;L z@#zYAP}@guysA9k5GuaVo(pVeuaqvFM?;NIZ=pY_o-P-ku4u2CK5Bh`yn$9Ner z=rfVK8@YLS{&R|zMp+@1A)mK5UK+6`Z%tR_Kd1N;9A{82#xdUce#{wfovF%yPV>k( zE}&fa+r9J1m^a=wU6KEs=8=|9uFL15&>{X91m+`Kcw^Q3W;m1__aeU&-WnZFCe7WpL^v0LV zV>EAkauEL^;dCv&ToGS>HS(X6eU|w^-T!Jorn>(`?111C;U`!7tF?MPetV4$S4Iau zes4>B`4#drz2}CFFULGC=8io1&ndo~@}JL?^Pf|EIpsf}F6Td|_;SjBKC_kl=QKC0 z)$55L(&*4GIN)r;q<26W4>D(eRpQI9b^ddO{a52WwY~Djm&=$>*neU71&&nP zf304R-P7oBWpoIp8#yqU#Olg8DB29S4|(aJ-ptp^L`8^zFejUzyE5)m&>}*OWz>6?|Q!;l>R}! zHz56k==LrbU*6IA&!=PTAo8Ch)>-JhgqM*UZl)stIpUgyKl&bA`#G+8^Pl6IH+Fgz zVy983H~+aJewyb$pRUM%j=4G!Lw(<>J=ss;nm7MBu8BBm9(O)dk^fv7dyaB%{&Qvg z`E)t|xq4m{|l;=NJ&Woaq=Rc=1 z#Ow3<2hfq{KNs=m_|5a5i}-Vmo}T>Y8a+Mv&%w7Ef3{8D1IC|g^6$xGsOeKreqPN! zaU7u8CpkV+v-h6;V*=nOj^kU4KiBlDreBqLVm1A$=~rd`^DdxYd0eD|FM0gAVm#`N zKUa)JdHlH&Gl}?f#Tb;wpSLjnY*+qs=vB>cfc)o*eiFVDp8s6YSHf@N*JmRCxuVa! z>(*Q7H|{^x?3!lRG`r@TSD~GLt!t%+uWEd{LT?CtrS>ZdeZ}Vuali2Mc;nMyGx=VN z@IiXx(-nR(9-pr0i`M*$gyXgNbVYpn)yRLY-~)C4^Z0Z{f2#XW#2yGf5%EH5yQJ0Y ziObUHaAkDhoNT-E?SG#+7sU?Vxu+84oh&e^uhsuXX-&g}zkdJhi=2&o2x6FYLa+k!t&| z)$6f)8Xc~T4#9LoNAq1k-Eh6frz_-JEzfHCRm-h*e}MewEzpzmX>0N6tvuf);?rBO zuU?)B(H?8}BR;(adIsg05a~b;F)U{pRSMxwSDx)qXvpk*PbhFtNpuDdU75O zH9ozCexZ80TztBsy=wZX?cw!)o%dsF@#zYB@cTPEK3$<3z4Q&D`>yxvLFpgldjrxx zh;DCBeEI_y3oFu1tfAbWz;YV&ab>6Vx-u4Sb?M?VTw%Ic$A$!}>-GK_u8l0#@#c7| z>yKHU|KG{6y59XshO5ZLT&t!_uHpopt29UNTAFBZy)ZM(wXA=*%cPS^SN|l!Rl*FV zBg5H&^l*=FE?yVy2-C$l!cl)EGmh?twoGK*9Dl{}77l8IRWjbr!gp->FlNV=4GBB8 z++yCbrO3!!C5d=fjL!9Re?98em1f37IkvkaZfCmfV|TFaV_(HF5dRk$m}_Z_gnOjY zT}@5BcNmPf(|6YCSdJ0&i3AjCrJ1noy#am zrOV{^fo2ENWqTbNuXki7=pEbf4N>@pyJCzEs!uMvorUe#G9cE|?qazuMU36fr0ru< za1Lo8&78}>Bh>3IzJz4m|1NeVA+DkN9 z5Xo|)L6e8TGil%%7urw&+B^)Nc?A4p`f5={DQF#b^r?!8;2i_Lb)?Qvx&Uoi3H~9z z8w&n8qmM5;tB)-^0UA`|8;;-`&apVhhF;(ybB1fHF3$1hMDUQ!^Zz^dfQNpX;mQra z(=|J8tZPoMAGq!}r@J1R$Xt)vB-a!7=v*i8-7kW7e*ROv>v3I}W39|XXHf51@X9%6 za#YDYG=qgZp3sFmUcy0b$N&#jf=09Bc5Er>wPVZc;GyI&<{A(yxd!#rxo){h@3Kh5 zLop8G(OKZ5O3-l@_-84O*Kjc0ufzSs13SS-mEa@#UgD!6pxIQ=>Up#=7PKLn)#ANY z@KYxEDO-wj90fleCw>b1foo}Ox~rrob4@{ApxuFIZjxNjORp)O_8D|21D7AxIWNuAJ1^P| zC=Ua#g*zJ;MmQUHN~P!Y5oKre5_o}?xuUIZV{)!JBb?==;w<5u$cp}Zo4c@L`L~&? z*MwVNAfE33*R`*#IPp|P%|OZdi&Kj#etn_vg-^a-Tw#=Yl~)gD&P)29WyE)h5v=ra z@IYED%c<1&EnC|i-;2ITElgaqF3#wv!u!(S`reClt-71ly*8RP3ham{gXVQtVGWkaf9)0D?*rxPbys}9k3_5$68nA7+Mt)QHwBf6 z{XS@YkO}*Jm{j|HP+S|z&!Ak4TfFgYiP-NWQMKO(#Z9670?NhwvNvwZg#A8Div2z) zZVF}JP?^~8gW{$VvEN6cyx#|Y^Zh>X+l%W>9{PIe1sZXBq5N?Ly}W*~MC|tg`ueT6 zLisOHF6iZ*H#d3c>)r1IH0Sx$L0g~oR*Bf}12p9OeGpCYn`p}S`_SmwyxvNqr^cU< z@k?!#ce2^_LxbyRO&@st+?sy%(7_x3r`apbUWxqlv_3$y{{it7#y<@wLs@^N;jQfV zVY~SLN*aFE#sk`Tfah20D&s4)-B*uKeb!&eV^crOq+EX`k4e4juY!z6+qM4(#`l_^ zK=Tv4srd;sKY_ds%o|71Mg3UUO3!x0`l`)Q)~{nR525t+`uT6+JLLXb8Si@ix9|~C zE-f*R5PnT{fBCGhl6{Ll>#Jlxn0I{@elLVi(Yrn;h(D2VI<2o7qMP@-avV!Dirnml zJ=w{3muE{alxL@O-;-Uml4U&)pM56EXQO-^%JWdZR4#uN&y?WW23&g@*EZtX23&LD znO!J9j`ELDegfsM%jF-^dsgE;Gw`0(cu$W#+1qh#KfMR#vr#?{<#{OoP%b}0@4>YV zxb`%zZN#;sxJL5{yHI`{)gg%#wl7ruM1-yS~y{9es|d(n@=udVhc zsqu~4KdkOI!B1Mf9)FBRht8n`AGc#&73s7zedqBry@%FS3B6N~IYzbKSqB-W^%enj z3(D2{!+XCvtz>kA0~PK4_aSE`+p?L z`+v~-D%$_UB=7%0>#J!0kHl8?|Df0?tzJ+3tVV}+(VyD z?&;)MS=Q~q5q97RJ8*=3gIo?-w*xEKffek)3eESU9C*MEJYWYN;P+G98)AG)XCPLk zg!OPtfv(BYHRaG;zCFzPb^gy;jvY2Xb|T9;sShjLXb5)%n!`^^ZpH-NVgE z{vWNt1T>FNm_V&3Vtf1ILV*#5Js0>u;6XJ`P~$)~Zt$+Zk};uHuV>y}qeJJ?A(-yy zXnqc&JAB3`WcPnh;|7d1l;aZ`Hw42EbiaCRvWsv6;^1h^K;wjAl}$Mm)00j*V~}T@ z5Zk?3CkS02#sypl$m0W_aY0NojuP}y%eh*v)%>g7?>kOzD}6Y>VjULY+IqzAUCHyb zrshiVsuF)HdFU+0o9A%&@K7MUspO#$?Xve9wfh6&@V3$?C=Z25UwS?O4~2|_ytoE& z)P!p|p22vg{yQ>`LA-Jx9FyPEqqE|HR|JmX{02S?hFhj}3yfR5_yubY764}neHJQy z@vhehwth?YkEwBw_M9g!v7_k1c^T`s_&6VNy;n+Ks)yEZwWb$?*=MD_Jz3jqZ>RO~ z*xQxRd%`d0!?S_xEY_;1ze8;|Z}jWDU$LHx`Zb^IoJvmB^x*f0(0@w%l^?860@*EP zp9Im9-ZtK#*Ms&+knauBCw_LKJ?pzVBR)Q@4e{}=4_dY_B@aG(=pldY(&*e`lpv68}zdC|kR;18>W*X*Ay(C?>vG5fh)EF3-J%7!&^z;%d2{ zK3H6=$YG;e1EG$2QTs2o7y2BmkxaFeuKuqbA1p6eIU=DlV0FDL5_`I8LG@9 zzP3e~4>Gq2nYUxTlNJlF(IH?=7Hoih%r#gW(W=aM5I@s<0<3%So7?ZmSa>e;$hY^l z9G~Hpc~y-3Fhz{~zMpww97#7b3Py#KkIO@h>Scr;?XKw9Q^xY4-=jUTvjMP+kg=zVv*1 zB1HmW%Jv{vHe>k|UU$t$tT$~aHlqBmw$i(~i6KYG2;h4XF~iGTOAxAM9S zu@3E_y5_j}SUE0!pDG61$IdEau7lW3bw8>7=GXgm-oKp`2k&RUM1RTlt3KLy&4Q2J zQua*{J?W+W4SGFj-vs$ye!u@31-~(Kd*b0m-gt5DjQIF#o8kwkSUb-1 z$In#dk4HJrACGcx{3l}WA45Gne?02($sdoLLp*={bXER%l=J-YC>QzA`@g&=`%dH} z;`!redggh5in1jr~RyzO;S~bn(*5 zB-0D!k1OcKaXir-$8DgmPyTqKJC0wVT+oZpjSzit+yOdz=f#P(I2KYF=*8y>h_00H z2{h#S5TYWP(VC#&gKO~1yKI(kG3m*p7iP}&p1}(b>aKWk-d7^GhP+r=X+k>o&9r^YkmUFPwIiKGSdIynpzxn@;jG4c#^T#Xnsu~BX?V1{=3!EVEfxww+ zoS@a~v5y)Zu8a=BbVoN;HwHvEEYq`a`Z*85tZ+xqU6MS)k+W)MLv;H;5 zS=UO>YVq$kcKo|KHW*{c@jOjr{S7xn9V;9qXO6Sa^*NcI4TzcNKnURpx`t)wi|%^qv6gUi{|vJ2DoY z%l!3}KVE?m)bV3#%%H`;du*jfhpS76VERJD7>Jkx_4vN6{2WAI_{C>+Wc<5&%+Tri z;}!C*mTzxNvN8{@cE5MMcw6bi`Su#e!TVuQk@LGF^2fJ;HNCV7(UxoXcl~&HKWrm- z>00KGSLi3TzW2tmYVq%$Jj9?C<<-5~bm4lii^RYC*;|p1ygMr;-&}{~k5}-en%-)h za=l;Y{o6@#@P777^jCZG$1CiXm-a#I)b)NnXx{|+UVgv-8U?>Gb9>_9Mc#OE?uhvK zv-*hgI=!K+Rxg!JY0O-0$KGlC8d=UseOTE_f{BfcxXFSSz&UloI@8t2^)8(A;h~uUh z1|H9ivVVoWss*3$IY*72*-sI#nh>`EUiHRw5}zXH{#fuN&%sYTirWxs+f@y#-q_gc zdbR&*zEqDzz47siaj4(;Q{;zNj6ZpP_?hy4Wr^~BW%$kaE5q-0<%cJ|y2^uFHJ^Yd zkCb1ZXg-1LGn$V@^9f)~dTSRLw_YnfdsXA(6?oUHmsRu^K1{Wr!J8jm;Y;x5hgbL! z)czTsXS@Yp7vc09IX}F5?v3K()v@vG$2afaoR-dV(xt+$^OKtQ2u{e%HQ$CK5yu@k z`r{aj!^cMtpMHL?W*@!iN8!U(;~%xZNbL_&_nYwPYV~^TxkiW1p#vYchZ-BN&}VJs zXL?V7I9b2Aw_y1;sl2WE;k~*5F(f8k569bbY<#+8?KpW?&T&qcbHpQ`Ip|}Do%O`U zx84kGVN_n4g^r?yx*2D24;%}xv4?VAI%Aq-Ydzkg>eLrJ4cG&;4iJp1y{2WAY_~C>OkB^Un-T*$}dP9ymrC7bJ%I2|xYmjHGkk-9f9|%3b z#|8@dr<|&4o>a@aTDGjbXGs_ zcjs`l$VVaCW-lLU_uCQ6;fKZBN}r&76e4|t@KMNEh~glM5LnNhuv;~S1? zh_AjQV;Sa;Wzu?jbXPnui@-9R=MY~nuuFZMFLogu-Eso`!C05c*NUtNBLQ*{5@3 z7eGb?7E#;T8~r-(+ty;^6@AL@_m98!!&sCf-rHBgha&eC*{}NO5c?&Fp7i$14SGFj zzXbW-ApPQJCpt1NzO&=wT_MKD*M|@vzgqU+5k@DzYq9cLth^T6 z?U^@F&o|xZvE6D6pv7||4_`ah#!#$${ZRjLf$(u^^w#2z-qO}~{mZPK(d4~j<(*@H zEq>m+rj_sAqlmE;@gG`Tr-$#<@pHoWp^jG;^NbojwY8pCV?CmpFV+00#m_79%hS5r zRi1UqVx1_@FHiBCFMHN1YisH~^isz?2zqJbT8}+Y&o79a4(d5nZJjdm^()rhT(3Cx zTIpFWe*VUepI67qXDDLjT|bcHUqXzPUoFoW3BSG9XRpQ1XCwC4n~y}qBZx7M!18L{ zq}JtX+o!D|@Z>GIT6ExJ_8T;Ip2sJJD(CXN+-f;CrByi(GXDww)?(%Z#4BlQ2R^YQ z$IdldL(!_7cMw0*djhPH@tgne$e4L`Z2T5QY<&I`p4j-<3BhCImFwn2Jei1zRmR2> z7NE6wgav%&?^Rep*goO6QrkfFI7e+0)#F5ATeW&U<3x=PokNFUI-;ZL9Yja?#cp+E zd_2YFom9lZr^#{f_3_Q|Uqa?7meecjGXD3$16o{ssamJ0b(mUbb@ct5SFdqgJdYn# z@Q*V7-OE3L@T8J|LbSVHdTRIIF!As&28esFA09mJ-IWkH?w!Ug!ElL75eM&$Rn_9( zJ-LHTo_v_s3thO*>mu>*WKXv!;@s2v$T=ufaqeE;3}i>Oxc6+{r=1k{PWFk`@F~|F zc>5!eT~hW(5If_Ql^gVW(EbSWz5IUvbqIdr-HwQN@3j2yO&*)G68R}w%>Vx9;IWew zL&?`*VST!YX?FyRX@}k&X&zY+kM$gsr&x@Wc@B8w9Nucat)M5?hf_{s;ZNpy;BoDO zd33?OxJGdR-gsEV$v;NA+&rpa2I^GiI8I!(Cwq(uKGmc<3i@EKnsP#m`9rZk`og5| z7yKC4DA%@_lNEcU|0!u~!A6l_OGJKn#0PCv=Z8o6=P19R&JS-wet6{L^NE2)+5e*K8;XlX&OV-_FA+KVFhBSw zk)sd4594$*P3n0 zeoC`VS*E3|RI~N_vp;A7J8`VvT70{vUp4)z%nz^WS53dl{?@LdUwPc6f-iY|yJ9Tr zjc-?sL;0E&C1w)&;T3qawXx^p5=#(kEd!q6ubPJSncle$<(_fStD4VXZ}vNieiD8Y zzAi@5SHfrF*JmPsyrR!EpMmCs(R?t9^%?MW3IAee`?Ri=9=@va?+QFD^p)BU3Vo&a z@p$vcEBb`|iE1As&mXTC!|~YJ7JQ3@YW&_3DYtMOe zo`;Itu9TjfheM5@Z=qkPp6eoiyy6?wG*jD+>-{?K+t%Xf74+cu`^RnjVXRKcAFr@m zUb+X-qu2ZOp#2i$dxP|gpPlH4czTgHUYt8CzTOpLe0_Zg@%17PjnDdX${)W}n>Sa_ zr-^ZyI=(^J8ExKNkxx>Kwbx?p<+wF9e)Y-2r>zAEy0#{y{PGG6qOHBr)-q}F_j|PX z&@L7qDtuvG4Nvj0ffj%7T`#NQR}H`V-jiQQqro2ap!}HI#fFr&96QZ5g?(h2X^j#|dyh3-W^|?0&R*Rqa$B8-E7m1(m z)co@bUR2XqZ8xv?>%31pDQ>>Q^3N;ml9%2=?9laoJ!pRf`Cfj%|2hW0@oszK<(rln z4%7^c&cFO{jPugGSm#B1ob&v>-JEB$P0q%J@y^C^hSIb8u(G4yXYSXQC0;x-in*s= zEUfrDj^zv4n?}h{8hJUhq9#IeR`xSED^4z|NRMF+b^Vz8LM{N)0 zuGL4E*CsJ{jAV5iO-szNj3q4p<%(@}{jBZ>7aP~qe&6bz5*z8L#rq-)jcZ;0i(p8OTzwallx>oRnwcf=O?n93q7pvBok{`(Abny zt&f^&Tx?xU_tg$%ZsU$28yz*#R(A~AQk`UV*N#Tp z7Dugd>VI_65^MC_(<7?<BPHrtNr`RO=$kPrer6tQpPRsrc^dOHHF=SkZsi{MRIB8s6b_(>vnURQ+M1 zb3WcN6n#^G-|O(sZZUCqr*Vx9?<4x2!ufP`Y*LFepwlTS%&|Y#|)^yoAom<_700d{Y2LoPS;e>wU4B8EH}oiS;CBt zR9#H@Cr>8L7=kv{?6WwM!>swg?iS_PGk}#wx(i?U1~dx8drufx=|X)EM-z*!tNf-Z zr*2W=ngtDuD|XIbaC!WL3oh%7dFGpqbIh4z=9&#S55(`h)H&u4V~Zd9aQ@KC;~pG( z`7rvc9Q=`lwH31&%jt`DCZX==!iY7mbc=S>>7pGDoDb{!*3q>A%cfrX5`8<~V6@G} zQK#=&Rw?y$ggK1#f*ZuOL_7*U=rnO@|t6(+{1ty@;~caqXEy_uC#lblhfo zo@L#)jFm3HJ5wOjm-LZkNm688k-I5p>ZwH)l~T0BkYLRpk2cRW&NY{y{j>DZj++y$ z`5VT}F%M#_k*-g`wK=$F1HS1aDmSpk`yxlMg~lJ6Zd&!0nm#P2H#d#@ST12Vfr=JR+DpH~u(b6)RTc6_kaP5jq95E;#79ZoV zEHSQefw#YkjxO&89{L1wH$sXpC;qlbMu!FeJHh7-kf9oVe0dFcy#~B)0k0PqMtXP> z_tCc!Z&r_E?&atg;{Ac({iERhWBP8K_g#{~@qBEYV>5Vv3V8o7;C;wO*}k()IfXLs zhokMAyu2S(z8SJu1O87)Qm%4;oaX#xDPTwGQW9Ws)IoWCU=W; z^h#l+pM66z(3i`=E?EW+v*@~YkbzGi0~W}D{=o+?e>DHWpfa!&GN60Va(U-`%jLhw zGH^JWH4HScoXEoPHSxMgM`>KVqXy>{(7S8(@pS{1-F0c89@p_bg8v)z5oPP7NEQD_ zw#xsOVpcjb%ESLP==;O^o^|WM|2ps})jzU+QN=nb-cJr@>*F00WH}fgL=MJ74qzin zM`lugL~=QJm*hYnTQ(kNvNy|rdDphcF!pa2(6_c5^tcrDTsrKB5%!}Nd^lfXISew> zxbWX#|E9wZX2TxZU>B!3FF%~+tQj~pANI0zyWW1N-(qP-bp-U86y>Ofo#Xlw_D$Hd zYSI@#WqUX57xnj^B{6I2Kp(PMCn3{Q3uD$q$3;8#LYAk* z#yGNI{}B;>gW@>~UZZqVzvJTHJ8l5V7T5)auS&qt_Vu)mU?_+FOdu&_uj$0s4j z%TprPh`Pfilf!6@Tyqj~{4Cx%8uofJK#q;vmOXH}WWfVLPLo!iK6b;nwa^Y=y>d`dht4f~K1 zZVO~1Yk(~Y7uGCcYs$c%<6z4;wq5KiOMzraX(P!da(syI5%{p0>>6Z<@ZoY9AJ+G_ zy6fJU=p@@oShoS+Fa&mSDt^C-_mix-Aopppv5vcNF2Fs63(0;qG5}Xy`h&M}rpp4=U;)c+4-%0Ha0(w*oA zcz#m>hs^P6oJ#$=Zz%g`KZ5Rc@M_uJY+CW?KEoy<8yb$=ON(QD;RUfQHTCe zT-oj5^C`)3`fs8413QcUhO<)z>pO%uh_vhcoZGk#V>WM-1&658c;$ z6ZJmPN@)_`|14JH>J~CjT%aoV_6A#nE#4i;J1g78>zYgRk4#kcE}39=-V@uUAj=tw zr>>g5Iv;FbHu}KSOnTXz~B!$ z()6+a1kb>SL(FUT?FR=tI_)W>&o)nD;fKwOYG-o!dGiCB#XR%en~t$Bk-mjTv=z`j6|<;2hJXLFyCwZ*wM;Z^s@ED1*y7j@} z)e-2|alu9OafDMf`8yyH3=94p23EQS9vwor8_=y|_xKFaiCGWDmQ7(E|6AsSlbKhD z9q`911tKeF=qh?zgMMy9K4h-AopM6Dn%1PQy0IBD{$*IYDz>u@xvXMbn?Su2l0ABF z_-)HbH0_zqSZB+}2sO4&Vg4j?@dIpbfVqdrMJe+Qkqfb-r{tn@T_!f>eC1-4=%pnW zQqL*5h;5XMhntX#87-5GjUpE(RcGBuOD|2ifG)y=Pv7wwr{tnjqg=$}=TC*-!Y{Ge zk3N(NkKFoi;Zrhr)D5pQ;L|a99YmKioFQEf=b2md8GcneQ^EYr^GxRX4^2@H$oS!g z&SR--EV8i2SAH-4=iB zSH|Cp%zqYtYq;FE`CD7j-x{F<(->nue}8N9@_#}8R{GGCzco@1+Wf5{f2%zve=h!3 z^t{dAdfTxt+}|3Z$2sWn%5h(mzqM(7Zn3|WJ~ZWTjnw0F`CD!I{@VFl(VfrYZw((K z+x)E;;BSr4p{Gn8`aJ!uP3uk4h5B1*e^dU}Nd5Ud{H^wU`D^8GMc>ZX-%9^Bh(F+K z;csn1j$7<+rHtnMt&#H7YW`OEalZc6)A)g}|FV)}L1b5ap{H>Av zY1ZHBbQ1TBBJLSY+#`nghnqM^Eb)*y;-2xwiB*o$L&R|%50J%A~}O`VZ)5CWqvJN_;;^yi?+qNsbhq+^*6EYJ6RQm>YRj_j-x{sg(5;`FRrW zoHn&XnY^=|_@v~&;2$q7(umU$SFFJQJ~~+?Cg#wmCu#ohc&FrWy70rXZcRz)jz+#_=%<7m@{IXO9CSrB!_ZwAfsVNgs*;yqMa~*BmO3NbxQ~@ zORQ6J>vY=mKCw=lPRGg7vSZVA@GMV_Z;Vgt2Tm6BBc>vJlbA|~?`feJX>yuAlbDLc zLK}#W_9muM%liwQv1Y!P&BxPYq-n&#&Xb4mPU6+$;hn_5;>l-RMhwhToK`lJ_^iah zCc?Yn@a=LAGgeI;OyZsE)f9a{u@;F@6~ec6ykFQZ&WwX;@JnJ~HN-Q8UlP~O5`H<- z>J(@{D!HAW<5Be7v+zrNU_9fO@`Yd9h>LEA20NT;`5^dpr#I@1*yzI+zh=O%$e8HE z7QZB3Is;x28#QfmhQnD`62`B_*eLlxr($4D#YTr<3-YiDH((nEV*-5XV?(n$j9Qb3$ zO~Z9Qh`tXl?o{?NIY`}+pWWon-z@qc7Do-qQyg)+SYoMhjd4_4c1#(ya~>MUbz6QU zjw&|bR2B$;%L($7Yr8_a^HSbDNFq z1kE^mXETnPVd=cYQx6hj&m>mY2N{xgB(^3pBr>NULlS2foi9mF(zA%Cbv=s=nPWRL zG=LbN$WRSAo!ij&?Z{;}WN3$%Sb9X9ZjdEI1Cb&5Ej&Iq$daLf$dDOZM~<@0_V%%015U)wX+$R?j`EOIi7c=@~jf!nZ#BPIO9y739plKB*xm&)cexp1ieo} zqTcnQ=IwMB!D9)EU&=nmBO{w!g~*#ZNrtb?5^lcy_VHvu^NAehcT>#TL)H z!Luy*#h9}(G~OtUdVg}2tYz^q)_c+81oSx(y-q^ES;sQvBI5+PyW1JR50UqI82Ra` z#8h=>Opl!!UG}mkR(DTzl+E^#2gv+X;)3oH_>OJuK~7yBc@^Z59Q-lQOAeIeB*lRd z_FxB|A!qjQV1jMv?ZW9DkNs(S>akf}8J}5)65mBB=q>ACoat42$lKfFY+toUa)GUU zDJg#$`LW~4OWcdT@5?234qPBP`@J)j0?9@Ch`hpg(8)5!+KZEjr~KgPYFuF{f_vmpLJDAZzeDHF!@eLkVlELzs>l*g&dlr z7sc!IkVmgqDac@a8bUr+oMlI{sC#g+Qnm`)Hn>TtD3>bL%JJ=3{tS798Z!7N z@@%1-xqe0H7KiQ(;r&>0R3zsy34Tp#IC;%-`Z`-xsuG}~vTH87BT92Vd|y6S~skgnwQ$k-#cD2MTW2DWH~9g3Pr;zKX#1=ubc;s4g8h3&lcOln1<`ti|e&Y>Go>wE!AI=T?4P5(%CC{soXUVVH zMLX6gY4jn=%-Q=3xI<_t{BqLPL*!_`IQ^DmH&}T-1Jsl~nb?opruv>Vpy~bO9M_VE zx@U;wo3qY{{D@j)K-L^&!+TlpAoeI5yYv<|Y76pn6ik{8E)ly^&zKyn1GhO-%)B03 z7Hmu+b}dp4s=+7Quq&Z;9U)oBY(f^2knsd~bSeCow)RkysyyUw%{0du+CZCnaJ`N; z&0^h>xrT}JGV;*(Iz3hU;yYEngA5(~!(ijm?x-aRHLKo$-_R0 zj_yad_Z_1z^h;zwfk!nz%edLME=$iu25QI=*#>SD84&E6Bjb_M$&`WZ7b$uXd09ng zkpUTxMj-VLat)EAqrV$u2tL`z_!F+Td+CGNq+^V+uS?$2UrrWy;ZFwnUa~%@ma#Bf|ItOr~8;IjX)M;4TH$D_$hZhJ=k!wE^j-wO>COv{cK}=3dwRO z)2=lo%aW%mHu(^;eBgK2n^>uiy7s?togsONl7F~2KCY@aHus||z*W#ZDS@$;`v*-K z{gx%8?aJ55Jjt?Cg1f7cQDuFmiM^|lQNcMPqo;5;`J~82mvi9mQRLRkT&aj$`W);_ zF1bjuK1}3w8Sjft6zn8+aP(Q^_0F@%t7$utRhhq?R2`*9*a8wPIVr`rDEBkS|8?jL1!B&C6nJ+lSah850%D4o*f+_H`9m>s+>* zy7yjkJ@~A%*@xQr&Q(2-lRDNANc)1`4!ZR1G2~=C_-p~TO!RRWaw768woGu^|wa1Z@GjUpHQ%?Gv2TpTXIBkJt%S<^0s|ikXMaV`tK8uQwjc|Ns$}6%VIDdiK zuF;l_NB0HSh~3%#&yyXQU$usBW`)EOzS6##VUe*B8_PH8n=$(%b28i3d}Vzz5k9_^ z$kH^%#q;*f@ct4DBP|9a?Ev$K`}LMs7-=y!>`V8}z{6AVlymxKPVtbjRBZfL!Z#Da zuW9hBMZOu{563f$Bls1LXWD!-5&l`*2ArR7#+0A0q;Dodh6D?DX^C%!_XkDzX2NxT z(AU8?BeuYd#hjOK#^mLf>YIt+Sr==}KX2a*?}v~5;+qNQS@_uh1^8yny#oGaeKVFF zxd7ixBi+6+zL^NQehRsM>U?}Ny!S?gZ{|+-o#_LKF-a`wjjxMu#=;~Q;G1cbg|EGD zCPD^eJQ{UAz8T&N$1>uZ373IzEb}G%W*T|*jrYx1wylM}nMV59^QEtxZzfDe#Wy4C zV_NK+iI7+E%`_*k|02GbM!vg|n{R|~CQMF5MjRGSJEw1k_rmd+_-4XoBOITduW#n~ z-LK(mR`EGI@I5y?kOL!YjMpj-eD1{`uTg$Jf32c&t(n(LlFaqk2iIMs z6kMMar62Od>4!b>`Vsaq+eMuCr3c+*?GxJTFH`qX%IcWVQGbQ{Q`svfH6c}hm3Caq zb*F?*`s<3T?kCUxV*b46e>H#8^S_?IYsGKo%ieH3-Ru)b9)j$1Ks+&J#dTWn`Fvk+ z-Ve3lFMsR{-tVaBKPtiJ|JgWS@KdFt|IRd@e@9A=zrj<{f5VMR@Pi3zaAScZxbkx4 z#HuUtP4rbxtnH_qc=uLU@Gsp1{a4?roOt67<;12tqJybw;HJM$iV3D3n!4#FN2)%L zV=~7sB~@RexJtKNL_dg)NKAMR`z&oG{-d+z^%(K;3B(rW(w2Lz_Iw=HmI_S?ZXB!z zw?61HDpCUdBimBxas?~G+EVEXwWU&t3I0F)w9@zB;gc^)edDM%)8W2)#dVd-pUAIV zexFs}CsutM1}nkM1@w97>3yC&jkr}nDV4sjfuETl$6URFcS||`lj9KYZy2Zq-yNz3 z*TCbM)H{@T`4jv$iQ^!@zs~+KPWEgNdJCOjCl9KGeQbo@mk_sbz#sOJyQzXUY@&<} z1J$6svx;YmA5ubn_eA?QQU98>82?&DxpHK*5-f}h^uNoYmX7Y=^pB_Bo#Z}^<*dfI zuNJwOx#HpZarl>)t(Y-?NK(8mF{fkCBDJ8X~N-$Hu?EYN0tk3 zTh+e~+DRsMh@MJ3Fb~~ZhVJAMQkEyrJ9m}_jhLka$Mk{aBc6zE^s zT{*F`mvUk?_GAtAWFz+EU3BWbLRWCrjo6X`>!TCs8!FV0wuWRj;84_5BnmtujqqbD_GS@s zEquBH85aJC%}M6^y<3%_j7^(vr2m%9Ir*Ss*mRIFOlYwQn^TPrZclOeck=u$&FTL! zZP>{^d3SIwi4NJES=gLdVuZHM5j%r!mF6MmVt>Tu)J51Fu?0!ACo@V5=AfU$uk{%f zZY5ah4)lMguhXz?j@TQqJCQbLj%9NsPHo#9u^m3UEX(%nqn%QhV%eP`&}6n{XXIJ2 zQMQfoSvDsAJZ+5R9W`Z7ytGa1h}aYlu`Owr*b}ioXR|3MosNwS_c*XIPHc?}n-c{_ zi6+M)jt< zH+Nw-#Xhcw--21zE04{uR4vS+mdv*?iyzGL0?%HNkgUIm-JAzLxr*5Di_|ICSIaeZ z%k?!}7hs=%P0qt_h#$`BhQyLVwPjX-jBkgEZXu& zx)z+4kmH|=ZMip33ywy9Cv*KG80Pn2m=EsKf`6Up3$7W+*j=aw*A8|B-y7`=uGGLR z1F zPl6lvf>Y`l$HpUDzZvHqu|ZMlD*}Oj6@e0tY5lhDa2kKD z0J8)h=Kn|fZSAMjuk^V6t5ahA8y<`^wmukd{B4rQ_(xx_vC5TTNV_Wg1^WLL`INp3 zW|6w*5X;w8_tlT2S1z9fZh0QuviXOV%NetR@1n132ReeA3Y_2$N2zis=ITeV0e5o< zKH12zCQS(nmJofJOW8fy6UD|Nzqs9Jm^cLals;XFJW8J=f98z-q$%~b%}L?8Gk)Lf zG`2+a_b~P%vd?Q2XX!-RA?=;O`5?I!r508x2cwAH(6JwaRhqIJJ=wQRu*zOytFu1| z;gtmBcRaEjj#;w6EV*Hr<#F1)AIy@?-YR(<&DoEk@OVGGUdH({pU+s29o+h$YP_R` zaLY%uS?ouJWj}(a*^ZLs65k%jJ+UE@7Z6L^{w!s9l=`h6^_XCngO*)cX4(~(UXCt> zuuGI44~~h%E_UC}2fK)^5j*oX?H8L<$60KRT-&zh#FXo?BiCV124PnQVqbE}v&k{A zH)f8_$yF+LN9DeQ-<>??;`u1-PV_0e6Qda8o0uaBws{=gPqOTejcruPhqbWHL2Qro zy??< zn{L<|FwO=r&X&TE4O)#2T7wN*iw)Wc#(5Wv^Os(M{;LW-#$U#Gu>lFjKl+6*PNHd( z60u2%%$pLiOCqPI&z*)LGcuQ%ZDAa-Ns(<2Z@1lMv#pHX6$)+jxcqNuQT_^rHYU0K zYtmxDI&prnNn(G(v5nLxb0)hEn=YT+TDkn?QI*S^>%U+c!81Y!(e=WEF=iR^zRb6N z&)LQ@vM2R2aE#Q^h+W`?^h0n9xW7R7BD|12(43ozfME$kEYL(t5I?xc8L51+YX6ckniHk_}&NnVqy<$h+vA1 zeI3}4^T#oJ$zKv2BX-CxZ@84D1p13T+S@`qWMhtSJ2cX=Lp{&a4vEeRX0gj^3hS(c z2jTXooc14~tqSj*m~tIn3u#WU)vt@tQDD&oTMh-&0nR$)& z#|zF8?D7k2$}h1gzrqgv8vFAiW!wj@c@>)^KA4~3gZV#f((PP7kkDEG12$<2*HaR{ zW!j$?pPy&iog?6!)E7cHXIYE_rbsH6vED;lIy3&?Zt8z$^u9BAq_dvNUh6g{xfV<^ z08HYI4q*}@uP=v|!ql9Ajf7w%P48{>-X47ZjS<=JY;pRR)6hhM1SW| zwvB1Pg^id-`XTGpgdT!v&gh4QX=d<#N1i+5_YF?ty@-BZqk?H7uw`SP!8Z3&mbCRA z&LhKoFtPYx1mEN(#p#m&Y5HBjGBQ31-jTgA#m6XPf4DCu65EK}o(s$LhOXjMsX|6X z9~-d{^Ql$%Z^XAU+^3y^Z3Or1q1|G~3N3sS51yDq8JWz>#E!XmH`c~9*dcqaRC5}p zIk!K?#x#PL#HKW5ONLokM)Cor4YuD#Y`@q+!89M!-jHn>guNJu-N?m$x>k3RQH6W?Iq*yqn% zzK^;2E^;eBBG<8w9L_b>&eCJDKEkp)iP)Vz;Nr=SIDEEo*dFrbImBNp^B}Q5G8gjW z3liKRc4uUQX2`V{`JMc(7W@hGv648=KLcNb_?4e#zuu*c-OuIa_+R0-J$Grw@rm%` zfgEFhPOeco`*LGbptRrSz!Ux63q0AcGLX`+Qfc3?I?%phjnbiEOW>FN-Vgk$->N`r z!y8JchP8oC4I7ls4I2ZU8{SpE)$l=}tX~IYz^*gAzNYG`psot)s*t)Kz0$w>*H`%~ zsG~yanAg|8w&Gg<#y7v~-`em!W(}`9g_ZX2Ie8_%=M!(Zz^-mEtq1&?1b$6XPT069A6nEfSNFM3`I?)u zbJ(^s?A#FKJuYk>Hw^o?2O5ig3-|Smw|qT$Vg6+=cx8lT-!AX38FDT2o~$T*1gchF zSW?k%qF1Zm&<`J$E6`u;UF3biQ1pj6`?P-R83zPwZKprd2JuNtnL{YESMN~yLDL3S z^nbe-zIe(MOexn=j%@>5>MxSM+E_|#SyMJp>J>co7J4Uk@+fT#*}K8mum14BjPrhD&GHe_0HEx}OXejiYb{PaFCE}6f@V~>u3pKLoO{*!CLPm>*; zuw$LDW1X;LolN{>+cMLRfuGFz6ZFhOjwW5F1^?5EiHQ&7;q+Yp?|-ZXUtxZ?xO1+* zew-Hk^gn(0P(tyY&0+DdH}Ki6#aFxK4p;C4D+aX&-}PU{LQ8fRpA5E= z_KWTp64SEZwVxMkHHW(XbEOu%m-gEj%kGEiJ3>CCKUW}|VsDZ;OAJf!v>n5exYq(Z zhK286Vu{jE!QWEydQjg!qYeI#bHUPh?{$!}JlwmWhu! z97~P2u$1_mo5NCIjt!^zh_DxeY3!I(xZP>W$Lyycf~&;0(t{%sSBVWdKwHE8Lw@KZ z_Ca)gmT5x-N0lrWo0LiYV`-Ba+j8MI1k;=u+d41*kl8QO|IGTA$at2Gqk4g(%4vg* zqvU%npU_^Z_Yh~XD^10=T7sYI_%_)Wg`ehN%fvr)8Ma~tbFm~a(d7wA`XaDW7WcCf zRQ+kNQW4i(6P%WRC{_On_T|5j{hwO)gMm+KcF_u|eu{uOz-{v+6= zW7woUcZKlNhS&NS|2Xh(#@}{d0hXEvuDQy;YTniUwX3i3Zw~w#T=koNtAG7n|Asfe z=id_eEqLm``+X4jpMEkQgVA88Q}x(&G*$N2hAWNmvMVUNBCPBR%C4a73d$B*ytQM1 z@fxyZ=_!xNqzzt6SzfY#N7#sSj#M|&orV<-MM#Pqc|0Xt4 zev|qHFWFd0=81xpY&$3K$y~9Py=F3_ruD1zl=PF|zw_K9^ToE^-9n#Jyp(6f%X(t3 z1PhIQZsz<*8)xIBApOk71|EanV$V)YxiFj*--K`bZ23RL(l)f^|Fq@*h^_g$=l_6{ zR)piEFFXGSjI`pj=Kt7z5*%jZpXU5d`>^j~XT*Od^Rm611tZzpJ`{ zr@o9m+wJx9LB2mB-v_y8_moXHvc^nueJ1fO49VkMLe58y$D_}%_w?aAH|82sa=B!m zX<2`Q3)FRzp+Nsiz%8tV|qm9_W5kTs-B$gS(+D1Kh~B*v^q);XB< zu!eLx^_lf})S#6A7J1N>+#kjL?VM+DCKojLG<5qFeHzLB6teD0C(ls!oOQGQU>rP* zhp&T$r>q;?ZSl!nB6X{!*{nMtpLk=j>}x}g^#Ucfu7ozIWB2rvx)*xjf$FN8MVv)t z&ACU7)2FlUT>2&alJ(+JS01#KvIB~8O(HamYvvL9grU!#S}UXyxf`da>%-CJ1|BKk$bB0y!jMSp^hxuQA zFXDG6|I2T(r|r{8asC%>j5oSc-awAR%+Y3h8ev-n-*fBCHo_ZGPm{i27nK0alSgfD|SX~FA#zM$~s2RCTJn|~nJUj5&1 z(}K5*JhwdxgxRA#A zbUh^lpVf{Q?g-h@5^Ny0*gq~>Xd?8`TDi^8!=)N8-ReMgorbh~Ty^vIxq}L(! z@Pv?!u3~+UZKDscHb3s8kewDAayA>ijy!v@(Upv;&UTtV0ek%RRhm&5%YFi_sy~W< z`ITxb)6D4?yPwB*othd;R!58_LKhiNWL;ofyClB(6BDUC(^CEOOE448FVsw8AzbVLKfd8|&uX0a&tuS5Wk#Wes>~hs; zetb$Y$EVApLgP~%>vHVzNq8q1D2K6WNyPXRq|GuuRWhFWkjrqp^x5@CzRw0-`7WUk zTp;7!R%of1`Qc~Z3*|I?$altC1t0RAo(sl@kAm~qpHZ;a=nf%_cNc53WK3VAIKUjP z;6m1-J)IIAe1^4RqIdo;5Fd7qu*DUu=a4p(fIBK(b~{6OO)$xY;=zE02W>nfb+8_; zS^T#Ee*0Pz|KU3d(VV^@TXR}~|HSts_%C4LKcS6aKZ)^4nPz$1`#iDVzE;3~%fM{5 zK7?aGdw*2HeiNB5nfn@^1^fBaBe7rN+2>0U*zX_Ree9)^!(KeO#(U_+64s&rfwfgH zvyaafB|%@s-ZAgp&blcu$LE3l{?y%d8t!Wi%=hXTH+V9(l`)E7J`dxu_q1{ArCSqB zoZ1@LZ%(t=FX3~+ez}eICc&JKRQdvwho|x3O{9Six|Kt&HJ3SNROVai0%{`yBYlcrdJm7%s~k z6O#05*2jtcuYrex;XV)i*8KSL)x~bZE&#g?kUg;Z-mC2CZI4qP_KAv*mhS{ld4Bwr z@#c5h{W~5o$Eo40;j3m0UXSnRo^anDejwYo7i3+qjop&)yX<7GqHT zg59#>*njyVAMYgU;y2r+$#sH$aBy+((2bf|)+e_X2aguwce4ClOYrS2#h>=vV0?Qw zMg{R91o0u5zCCI8b@-#C-2r@h?P>dWD7z4>CUu%vEv#K~PwbN2jyGt-TG|j3~QQ+_ayq8OxCt8w0&{-<7Tj?6QKarknD~trJ{k%~9+%`Le(C+4f`9=$zm&TW`C=mneL3G5F%T!DUoigsk;}Z+V zSMiKD#LG%Og0W&Ae15`}p0bwUt2tr5F|jj(QFgM|S~$KEY$X`$416^UoP;k-#-AN2 zA&j(Bd`=QOB}TY#?&+~p(JzUeqPyq7S04S74;~60tv-brT7s`SS@yBo!dDV2&9d6M zns!FEPue9u1iM}5gSV>jhsk?_eeAOBa^?P8_{Qw=Y@JwyPT1J1>g?ES#ksJT*TPXz83=z_#X8+9@acS&s2#$DnIlV_#_!>|0bx!zfO;XzB0OX;-D`UM1z%DKMuNdXzRy8Mdu_AYU|wRg}tt`u@^B}uw>`zquzrRB@UK#BG8@GPUbLr4%(Sfyxx_Wgd#T3R#+t7x zc56DWd3Nk}T3qw|^WIjp<-L95^4?Y`;HZ!11y-rEX|KD6b%wdK7@u3lT- zTU*}SE`0Oh`NM5_Z>^O#+Lre=3txnCMqc(;H+I@IKKxwr-pp?xH<#nRF3h*r;&l;i zd2i-Ah|eMKZElP3U+d?+nQIppUU=Ty+?HU!w!F92Tl3JC_jYQ{LtEb4g|2OA%X_oe zmqo5!Xv=$R-E|8C@FC9mnqjx5H1F*)*7ltXU$y1Efi*55&*SsVd&`Dz=fYmCo%hy*IzLO^TTg2pX-lwI zTi)Ae-~Xd6?=9pD6CG~Ld;4sAeYEAhh4SIgI_Hg8tI(GBcH#SZ{Hx}@4PlLptP9QK zyVLPgqsoS`$C-z<7{k~X#3gwo)_!hYWbH8}s_JpRBRkxis84F=(zmM0!TptU=Rf8; z_VQfbk#Eo5=uOf~Sr6EU>mPXA>C>%m&*pladIoDjb=Df}V!dcJv18Ry(Z35v5&kAA z>xIfk=kNC>>n|*NbbeAovc5G|c~0*2k!x@0y2z8PSCCh~lC=!7j~x5aZM650n?Tu{ z$uXVI+P{0L@A4d_bZeCIT;S7_1%22@P0E|abrHW6RVby>o^t+Q%R5;va)EXwgflR5M)IMrR|_j>g__JSMg_2{!$mpOnomoJ(%e-1Pm&u`-^XV153S7^n? z<(dDb597TF`v20eJkIj%W7(6;UbDHJocqW%l+p&_iB8#hd?$WLl1rDijHhlXGp|yg zA5Xi6!Y7ToV-%rZ7ed_YPErsXoea*u29>-h!7v4*K&GEmt#ebpgJ@9@m z?Gj#ZrCyPNOOb&|&@~B~3+;v8;dHJ#3*VC~=a_s?UNonX?{d%PyIh}(?_1zuxC{#4 zE8uOoEDHa}aW7IP^SCG96OWVu;lI#jD0C6N%QtAp!FO98=0Izc7qnYsVD?#LAb~!J z3?%Su%VfZ1>SagH)O-F|P1{vu;)QbLMe9H%@d_xi|nLg>24lcL`j4d!4S;RlLRFZ;W#aw#X?cPsVc zLtN=mPHah1>Sceo$`s{#@*j2O447)PVRecd}lq=nXyIM(Nvkr4&|ObynDv)d~;md zCEprv{Ov*2cvt>^P%+kV4#zvi(aLkxtaFs{S7@?_>+o?nj{PT|;GL1OPMtQ#TjRR$ zdZsnr%?4M@2JZ;Yxxo{w_hj4~%eW_F{mex(=gSzB$8X^nx4f9TCNQoGt{N$8XXtBB_Tig@oZdiRXVHgjZ=9Y7t}^W& zeSDoZXIIXe|5m0_YGW&rhv6j|$E03ORZf`uaZtxT_A{0`<{&3_pXL3>kil%hdi<6L zu9J6%{qy9@BUGjSAaoDmuw3KFEjh-q5kBMi-K)*LW}DN+I!hOuvdyM0ZaiHV{j5b6 zU5vzOwmwcqAI}G;osSNtfN4YrQ@}K~4$3`S2j$wF5vDEBc6`9G+vv zPt4)F2@aP#zbnT?_K@4}bd?`q|C{Hmy$IdxtCPgOfBV&TdWNdjRkPQF?D^yb+jnKo zawo@iYLtGRCt4rmiPEoEo%+u`G5DdKda)8+_tfRrAAcI0>7?!t@k2*Docik7+R#V- zK5F99zZXqhvw2tk3a)2x9kXS0{!ZRsGg}#2#N1*D^^|}QJ_JW>=Ur%3C%$DTxFd@4 zq)eyctXgtl>ZT>kA)*~o`s#Vb{uyrhroqVc8GZTw0d3n6$Y^+DZrJ_qcc$-9loLz9 zDMir6M;+pi6#OCk-Ze+J9v0ntwU};-Nw)!C8r{@0=+^afqMP{hMc#DIB5yO{OF6PM zg!2T>`72VNDytDL??>Dx*&8dl8=yVsq2%mMeax5RpIcM!Zd z5?+Z+X7m4e)nm%!NPOMlygPRr)K+eTQWx2VufE>hoj(_yTa50-Y}wJedWU|6>z%2K z@Og3H(cb{QYYE+2sCS=%Zs(TyX7o;U%+fnk58>0OzN5Zay}RHxoT+!>_YnJ2#oXD! z-h&F?+mZdfx@Yn&3C^~i5Ze@s9q$n~{*A{zMKQLBeaTVcP1_Q{-pF`2jdwk~bH%2z!4 z>h_xd5$L`YJGiFr-HcuGZ5_rh#@7tAbAw^bz0_g*^^$yg0t>Mo*INNYMNPq2-+x7AWF6N5&fD%@wEk@llkH zfFEzcj|0v`_M7#9Gu-8l$BO;kei?=rz!9!#84VXd5q1yWxE#uf@bbIyOn4n#qSb~Z z+5e0_$@n0#a=X96L+Nk#&(vRJsm1+OzqI~-xw?6}-M%2*?0&bYm!^Iq6JMTgo=;!S zp_}K?7h6`{maKkmx`{4{UYfcC4s4NbiXK|J8IChf(@mZc-3+heOV!P0`+FX``8o9W zB;Vbu>#6!{yE^>G?{*R=aS<_>CUd-Kju;L_9k6Axqk}$=_@c@VPGGP5XC zn}kkKFPnXrmhA7@OQ2*}ETlBAa>eOo`p?Eq-DQ*N?K- zeFl5g*72=C_NU!6fjMe#_NiqKQ!4Xp`KIS2zFU~YUc9C3UmwNy0ImZQNUW_7G$O8c z(97Nx=DJFg*6qp${Dkk!EatnH&!qc;FEK}7ah(=?K0kE*;t#dp z?|&Df*8_seah~TWj8=jg=aJ^M8Vqn= zWL`Uh6`X50b`g(-9~%p!ox#b>y%)e6i7OnUU5AxK{S&^6Q^S7I40k(kqwSZOx3LZrUSl&5WXM&eGP*DC*`cZOx2w2D4R1 z=o_ef17Re4|5uaOHj@0dX&g^;*yS-sHkPMQo?BH*mogSF8hh{Oczwa);ENsYwcyTgpQ33X>ido52BHQuwlhF|b~^*qmq&dPC!MW& zbu&&178t^}n8c4Ugghsy$Na`7a^O?lS8Gww#oIhRvMH}pQLdIew2>UA`dPAkp?u4O zJ`F+VdeNs@0b;B4hupVP4Y|odZtUkZ+N?oKp|eJtbLe|s*tZR4qZ5l1+5?~LcZ4@3 zQa7}gdx{$9FM4x$xDT7LGV~qK=`kA{?nx!q+6i0Infu>DU(&E8-!=sI#OEsolQd=H zFVLM!i06x*)ch*O_++wDziql|?1)$Df0(qqBv1xifS zHn8-}fBK&M6?#?fR`onEAvt)ZwaZ+_*d$f&%KyKn-)_D)u=^gR{!#R8_ju0mb@$y$ z{i`2O-Sk(EJ9vNB_o(-8vBuF&QAYiCr_oU7FfM{O0s5$Nbm5o|omaV)V5Oo4OMQxQ zSzpEQ_EL;lGn8WooZl*+NeoN!6Os}->P1SbJ{euu*F!0I_Wq@>crJ45C2A+Wz13Nt zp{D5z;_#1LtQ2g2$8GE$pw%1qk9ozN;Lvv$DfRAz4*Kp9N_`#QRQTuGfrdMwy?)^7 zIHM=+dE-jOSed38f8+ax>)H2ngfp$KurvE^^>Z1!f2GyGpBZKRh5bu6bc-=|Kdse2 zihbR^lyeN{Wt<(Hf5$nR^9x%2U}EIEU*;Ue`48AmdH!|IJYPG&_v-F4t^SkATK$pS zJY(B%t=^MBKfLYrHOOhe+d=QsMJfG940(?we3QcK(6`1Y2ZQGKAmYoT=KT0r_RlhI z_Nya~9mn63_xOlod!AP6v!_mdGR+aIk5je!Jxi7PJw;mmt!k{kzvmDmkMm)U+Pk&- zv7Ed7_d~{|ONxz;E@^Mnjt|}Y-e2r{9gNz0Lia9x?;+zdp4)#(M`QW`zAO11&8Ynq zb*)_R%Hg)?QmNxqH9!JBDHy~vnCjzhEk6Pc0ziHy-7Q^x3z zDPy$Xlrh?G${6i6WsLUTjs1WVA)+;$nTl;dB zw)W#3N$*JdM$$8kewW*{y1@FcmiMXuO?e-BnC+*2v;EM+Y(Mpz?S~#_`>Ee-KlCu$ z53fvlq5r15(0@}t=)Wl+^xu>R`fthu{Wtkf|Aqger=qW-tG50oL+8)ht`mGGb@w&Y ziT=l9$KtSKvDmQ}KZv{@Ogw`|j52$1~=Q=la9M!N#ud zY4wMOX!S=o4Z_amVrO%Vd0S$x{?wUN-p;2P_we0^VZAk@$HgK2R5zb2*r+MSJAGAS zT`z~Rx>_lH3)x&O?Mc?^p9Ncp{oJ$EXT)BlT=Q4jQpf-9i{kZiY`cN&ZFdoEa>VI- z6Sex;YE0Dv{4rHvGO_u4Usmd8o$zg1?vB?34{NV@v9-e)LlQE5_g13%%IUt}YW_&j*YXf4ReH zT$!U7KS@`NZrO^F>rxE!d+N}2b$8W#HR86;I($DeUc0%@Lsmv0D>`$Y zp~%QGWW<)(eQ!MzdEWtsDe?G@_3@H_$#^pnp8MhXdU&3p#@1Ey{W1Br6nSt3G3~WbeO#|i zQH+zS+oX-ut$;_Wpxl>C9J8t-wg&{RXE-ICiV*BdxP zpR0R9`vH_Md4LMEuTTR0O{!&-+CoQKI7+_wlBjbaY;I`>4LGVk9yR5`XVUpaR9#3lT$(C#emWpOWydrD?j zy0SbgU4HYC+ZpB3K3)eMvj%FfWmRP2+dcLBNPd^OSth?J@*D5S_vLfIjhTU!Ui;p< zur|mZ!&&?mURF#0m$ub_=cR4?pjYJk^1I!(YUd^C(niUh!hRo&r+%SJ75&asv_DE4 z#n(K9|8Jn(J@{^yPu2b??aR#M7@Cc5x(=(zRvy8V-m0`Eb zZqIp_5!Md74B_`aawmn~K{-pmi8=4WUtsfqIsLoa@fp~>=nbxjYREO^VIi1n26?bu zJeA9H;C~TE&f%{4zA!l`S~z3=j7+68Q<+vTbQOR2UCbZWK&xHMFSmJ^-&pUCcDW|r z+08Zau7R$JcaP?r#`?a@OVU><)91^#T$If3gk4`8cHPaq9?1KHc>g-yzn=F)@0|bp z^krj|Tl-knafwVw-@@fg`XYVEf{&M>SIyDLi@e)9<41OzE338coAQj^5qa*7{4`gt z-S%qcDI$l}&J6SHhQ}H7tICpXowLZIoaHxL&TaYb$=uPFafd1AT}(M2<6{jMwpQdl zLy51ef!D}=X*D)l>g&#X_O)H-c&pByR-JOT>x`_=)`cE8o@+sYBJ2iQG$2KDhiL zk;ipnqip-xJ!D%;EL$QnXv^9@i}$9?@|?)QxvoVfM#3kNojKUV@OR`lyPoj=?Gv7f z&5*M^6V^W&_ssr9^lM}j{R&$BQmj5LYNAhmt55Qbw5_SLDgRaAN3WExhtkzw})-c@`Si1v6y2D*F|(Ceo&vIc|eTWo#~_U-o^wUm|zX zH(U0kUp*A<)hxxKk5iOaixijM(*I39hK?z?tXRceCExe#uEfEYINdi$d(FXNw-x_e z7}mM_e@s~c0|-r3XeTtSgr+v11>1K4?-g;}dH=}#fgDBmk4(tnnDO^JC+3i!lCya< zHf@w?(+aI|Uu=x>!K8;ou0$5QAXi0lRCGztk;7dC?M9inQD`Q*bB2u3jxN}MBJ?Nn zz1F&K%3#=7+FZHvoZZf9XB*uvgl@`N{<6}tCMrpnrbjcTUTlu5tRqU1aYs3*$yw;% zjvPhocCpwuaA`N>DUf+_x{R?iDg=LbOW%WS>yp|H`RbDHh^)x?CgrPQ z&nmLgGu@fy{7~%XK77rJ%QI1AbEIWE8H*cjX9aSpxKbx(@k|!yF86CgU4QQ~v50fG zx4Y(dyMMvZsK0lec=6j$<=@G*sZ-P=Hq*;|JSVNo#GJla=|0}?641P(=4#&DOvj}I z|DJj2=(n>Hx&;<^N6%g0mG{&z3}V~S@b_(dzR$F&O_eo`vSxUjuIID;ChZ$(+1O#w zl8xU4`^xizZ?fS1B1QQl-*eYTS@&|d_mp+7$hud=y{D~v zqpf>0xVPB4XW}&r&k5FVY)@c@SAL6ZPa*dOTU3U%Q|KZxBIETU+PGC{%-P1Z)s$NZ zEuP|72pvXonDtt8=*^g1NZS^1EQAg=Ek^Qt5A^5^9E;qH1Me;4CDtyy^y{ z365Gs8-@thqs|iOsl=XMXDnKP$2D6(24GCSu~{XWVz>!;lC z`W1XliW{Grs{GMzhww}=rxK;Sn32XBYh>K+yI^5yw@x31a>OZdRoU)XJxhF6$RyvK zrJQ&@n>=lD?M$Cl2g+LL2$i*)`p;EX7s?VnaB?o9Z8?m&j)-z=#D3xr5S$lYo=G3_ z5k^3#O4f?IX=65cdVuPzn~4mM;ab)-Jc|E24!#YBZ#PVJl=XynDzqF5|NLA(#(6*f z`cWo5v!G|z#HXOAIaBj zVDAC359^4b*m_)tk5Bqmf^IAdtnsSwDKsvnFgB%4ti|T-amJAU8dL6R81&=*7?Mw2X<$g<_$MV5iW=G$}<|5w)Lt{mO+!+Bm2#&aJsCeN3kuU#~CBJU(*;Tsbg%Xk^$(N?d|<;<|3 z+Xg5Md80q7weuenIkFg+B)*7>{^SPG#Mxv*uwm%Nv|g<=c{- z#&%+-Zu0yj7w{G0XuFH2-Q;$??I5kbx}#Pvd3xK(1>ElJXy*P&exBSvN?C^}a{;*}M{Xg8MR_&v z=NiwRqr7d-G|Fm!T3MgGL%s%iIYY>q=}ahV5AniVh_ww?JD0iDl&acq5oeEe>ymFY z_(O7s$g|p=OwI^-dEH})dCT|_n^KjHe`Ii0Y}xKBSo@9)oYtTJAikNQ)nBZBORw#z z)nCFno3W|ZNlp!Ps3lLLn*B>^I2??hikXv`TE2t-zr%CV@8g$g^|$8|gC^!)>&!9S z_`s!2yRU#=a!tL5FVV=&Q0n$1D+Q0{CqYrHw z0qy=Uz0)ylSHWp*9Z7x#v|e=1P2i`XCK zcBe;Qga7S~IPKM4nM&zo+9YwUVkNF_I`-blv(dblOc^f9>pUJ^h(>Yf1N&V=jK>zc?nxDWy-*)+MyL*b!H^9UHYA46%cpiUpLDiLHb8 zE5dcU23~f7r`zL0JQaL)VoE-CWf=BlD0XHD_9l;*@eM|8*TH`5kU0*Wbh@xdvDl+H zevjul56^pf4_h=PkzC}EJvxd#I)pvifi4^YUu^qU$R>RXU5-N+gL=g-btkvNv`b5) zjh)Dp*riNt$T4KZ%^pEwlTt!9>9qETWWJ+ilTsV)$T7+|j4l=Y1td>-J9gDq#~oW@Uc6#Mr>+5^4>kZgPzX) z?i{-_um|Yhr{f()EHd$1u@}(%TI}$joVOkO2WQE5Sm69tT@QR*u|DNs?Es}d78{Z9 z+|2o7oM}}V+)LqFeA_*li-?}>Uz%&oQUf>r`Qo^;x6rd8)HNHOIzU_YIn&XpPURn= zXHusdeaZ&k2h~)4J?8`HLpAGYGSH=R#{NX|Alo_O^%94>sw?_59=z{zchI9!wd|j} zv{xN*DSA|ACHuNGC0lY<*Cr1-6-45pd5Iu{=PqH@Em;C~3e}0m$Wp{Kq z^j)3R?DzTqlYE8$o%*gHD%r$XH$L-`9f&^NnM8XjR|U%}+-FRBRdQke zfuImn* zEO@lILs_k*Z`mbUeKvM{mLpC75w=n6cphWU?J|y0Zxq+v(Y<=chk9h5+*j;*`v`m9 z8GF*9vA)cZEkC6X^rM!xH;!eLQP23(R5_oH&mpg+Lu31;?YD*53atrSVd@&?pJ^|K zqxbpf|1j*pQ0&1F>_r~-;s$c@T_Jn1O2uBNtXpvKjFV?wyc313VC}+mwe%3!MQp`k zY=zhf!7azI5wb?%I5fPSazsW7Y1gaRv%$=ZOiW_ZNHB>Dz3tSf|3|paKyTxjzdVbL zNsUoTj~;nwQzvL|zWW2MKH!jfbocbg^$pvhOE-97^H#u>59`*gIMvsw?QL!S`stdCxiY@WSSsVOjK4D0%D5`yt&Ghw7R$IR&R28X z!f}lMJ8}Ll$B#G~`2X9Szt1s(W3c1~GB0jYFD(1sh_@}QYT|9MsK|?nx4AF)Rj{hy zQo*%?V+E@Uz7;$x_*Jl~=&RsZ!K^ZlWXW8_5vW(X1nOnp(8F;_`m`T#&AM3SqFKYU zI0CE*on4}oPV`Ez@|BE=t+BENN<5^jcJA->mICfC(exBVa)39|VUFx~YtvQyQMRtxI%eyZty6;)c+WiljJBWyGUscKz9REY$@^2tRhQBBBCAs5gvme5D>AT34$kch4jokFYx7L#PCH^lm z;l&3V-p-cvPu7+RuZ7RTW8tsx)*Hdw$aa~0l=brPE5N#S*;i>1|CjLpBK{BZ|4ROM zz^9e`zn=fM^Z#`I-_HO0`QPMQ6#4DU4+p5lAMazWpR^?fTm5T}G>%4p2tE=&e$d8# zKB6_yrbK9+2(1&Lb>cVSHyJ1P;tQ3rYFIcAn&_`QE8oWn@j^QRFA@Xn@i#k5c%hwu z7m17bKgjen6OjK0h;6}U+w?z;?&yz#A1>)4*5etPXS@%; zZo5F@l^kD}`zBq9GYegNL)YfoEzk9`X!}nv_#=r@y;fO5af89-}ds` zY<_#2-}ds`dhYM%n33c#b99Clo+M|Sd=6whID_0+WPHvIA%EZ!*O@-RVVy$s7mV{U zM>6tn+utMn-!VcT_FZ(QEgJJP_ya$c2ehH`JtNgRJFK(QI*UGwoj&E4dlp>tELi4Q zFwC>V+sgUZq>rg5`$BpWlyRk%tpyf)4aWGh1#bKl0dHl|(7 z{n_LX2=?^sI)y#Gms!}89CNW-?a+@aIb=={Zoh)qHG9l%u0OUuq#*mE2W`)V>_;SB zO!**>KeF@u7dE-AI`sK=wUu@@$FH@ei;k1 zWjr(#kLdAo?#UP!1^nL(-5Gxjb2rs@a_f7hQ;fm(k zEzdRL3UtQA6hE_Q$p1pa<@_($O|V#Ra>j*b;dabV&QW9e79JBE_EoxP(u4e9kuPt! ze1)}Fo*8OkERmt+aMlcugZ;d~PtYUyT?;pn%N`j!?Tzn2Xcdl|!s!-de6Vf77;>M- zgV9=wkJ{cleLS#n;}=aAQ@+S8GjY7&lMCdhsgIUjYYvOpn4=eX$h2wryZqk?VGhaF zwPh|`*3LQpG}B&tylk#N-<01?{)LY}=b(rAyv?Tw%y1e$VD4o4eyp)ZuwXb}<}im9 z8_-DKi1F_0a^Iv!_;_Q}FQPxDeB1Nbh;ci7jFx>yWQ<;iZ>7oc#x6f>d~T%GSKyvW zkMQxPiT27f_IUGX*w|~2wdd@+J$7mCjK;oy1@1N5_pr8|)^}(F?ileXYktI!L0;6m zEzNb8Tq)V(w&5NJIb==mZ+o0PG0A6_yfDdWcazVwJ5j0cvP#y;pNlN{Hv?CDv{8ih+$r*4p! zS4+O`<5BW^Vp%Qu!jDG>Zj!xsW8qUR`Ml(V2P8>!5Yrk(vh}&0|=c40u?{$Hz=nd*a>V_}SXt-iw=(;*zn5CaI+_I1Ro2?sSH1W3i2D9HEwuf0~-#87*%9c`2ATGxNA zmGzwO=f3afzVGMRFRFjh_v|y7SPQ{}sERrTaax`v-NuGj_jN_b_KO9ni3%?e_1wCVUUxyfeClJH= zo*2%%Glp{qk;fNzWfkR{&=Xxfp_Q|}p^`g?rONktK$>T5qx99~Jkw z4H>IF4^3X^{pN6Uvyd3(4}`>fl6{Puf1P++7shO|=@v=je+TKmXSf#e z+|$=B@+GcsaxLNdSFS3q2l)SeeA@_K4{A)6SNL4wZMEL872tZAJp|jh<`XNN<_lZd zTz7FT_CViB?Cjnw7FB!7vY`8fbA0>=kCN7fT#elyP#QaVRCd?b>c z6f7Y&c&PkC6RD&6emhqReYyr5$8{eUzJS~C^;n1Kr!uY}eYF-G%cVY4{}l~0p><&o zbm|R7a(tmx@W*r1W2OlXJq54IqKq3m2krhD2j5fwB-1}xv~v(;{OFEws0`fBoDlxt zR@zUV>;2@J;|&!L@r6RHd6lor=i~<~qD?9%`HTLNfvLgBpg=GJkG5mWmG3sL2$!J^ zS=2MirMj!$8tb(KJ)u=$Q@nz6E$fJVe>&L=T~1xck{RNz6W>y8;-+3( zJ^rdr)csPXX?zCRvK79Sf}GU<&+vbL=e182L67>aul1f_L}v@~dtOzO$y2rqemkRT zd?3X>!*Y`e)}OgyQ?KP7fBiY|uqT+b$)wbu=3eL4=zsji>d+!jT2dZ(u5sz(YlFdv zA3j>ZK8uN5Wu9PFF*JFCa}H~1gJ<OP*l;G2~q*`r|b5 z#%IV|G5hZQR{G%OTX$vMH)2=TnmZW(5scM{%W`oba;T88ILU8(kk8N%Y4_9boI`9C zh1fg5c&^az7HiHx6G`;3VxiT?dGv7#`EODmBNzSb;qf;ve_a2s=l}jY2UPj#_W`tj zlS!_>m-I#UK4X8qm-|}kD;YV4Jmx+UZc5_($`JD;mwB?5dPxSm-^^gY!5r!o@E9v5 zGrxiiyc=0K8JRc<**K9H_z6*Dr5yvljxl&1*;HhbBLmojG1ik>6{4>ng*MN!Pbj0q z^0MnvcW)Lo&A=O zG3S5JIhCU{+Q-hjBb%uIc=}KI>HqvMXQs}fr7x6U7IbvA{04c#%{R@N+q7=jimlo= z(>lKp|6k2{;_T_S>9Bx0i4LXDz*mctp^KCUXYGh&ugKCqK$q9L_3?|AO^j}Aop+RU zULQ1*0PS>uhB`t^{GP*Ne^mQz(50f!qtAvoXWh@f<*o_g$0P8t0)KLeeyeRheKFCb z>b%L~+OK+|MIAYRub(%1H^1$tvsJZ!Zm>6ny+0|DMVhl*c|Xo#|c$O?5<9 z*1od0(0A@KT_WeFghRiXHX`&6`8LfChd!7WyT7m~96Fm2yFU>QhicMCgx0VZjRPz~ z(^5U5uYJoK`nwUn(BIBU2u<>I2;DQkW9R{F;W~$xb3KlX_(Ec6C;K=Q$MP*tm&ht? zZeR3v;e6dLk&!BQ<%lSId0u=K+PaIf-{$%m*ZJm4FT9KHFm`l!$62n!Tu*bY$${S| z#P~61xgXJfFVWE+(u*$7z~{eX^J#zS4fI7lI(dNKG*`O^Fi%c!{%;cfs&RXZ{elJJ z%gB+mJ4S?pjD6>%aL6-qL}=}3colNx(FAYE4=p|xj>#)`Y&BPs0v#e}*n5%89_6Ru zf!gzz6iANf8Cfy~IxkMSGrVI0*V-W~vRt~K2c6#30a~I?;%WLFiscSZ%Wuuog7CD4 zWsmFsr^M67+-UQ(8*QGJRPSXhU7q#>ho?!F&Y^66uOP>CSdr5fPt!L6>RCg*#aG5r z$K*TBj`(k5d@c{#ybBurGPHUpG&>qTH!6B*RO~l5#OF#mkGY6>t@F${SGr2SgYYQh zALDb&;B(8%`V)WiMSi1UcWQ1~1;5X*?+YILcOv#)yUON$yXa5xz7edy-(W1=-}1;h zz3g%SQd6=TS($ZixqUCWslFQrO=~R05BW`nC&U}p!IRRMBloe6ILp{BV2(=PFQZP3 zT_gO?{_Tzkd{Vz(`8IN=5xU9X9OKNn^*PtgJ(tsO?%ABd(D3Gj+_E2}n6iD)@w5DP zMm2PNfO=lz2Ds z^wzI?qLKq6Ik#9k;)o9uZ)!C8;t;h<)^K#YK(DMMwPNH1VHNQQPQ^@(ltK#^%=o+4T+gRFq zqle#%C$BNI$Ca7!zeQpA`?U8*4cgrH{wVD=Yi;LW#~vx!^uEjf&Y<_&$ah&g7I?0W zOfg-miav_%nMxkj9^353GPduT`h4z1lWpo?+@2}dcO`WF0PTC~+V?7c9%w*oo%a1x z?fX~%T>FCm*nKAL`={FXWh;*Sv$o&;kEQERWB*i95-~ljUE}qU*7xl7iJe#A?1T8U z_fIK)Pta3LxT}plQuCcXQr|tbxQ+EoY~NHpHl&t)4D3hw zbM2d2haDnbzr5Q19d|#=AG}A4zWHm|A64Xa;$vIyfpg;Suhze!@7A>aFf{uNm%A@2 z0ouEs%iR<8J?@{2r$>Adb=sfDo~VQUowyihPn5f6*7`ZNZjRAi6+ZcRy8C_h5XGm{ z+NA-TM{B;|K5PB*dEXzUu}#Ic65sCD?Q+M_-5a%n@pJb@c^NBrPt;28-ThDxu)oG0 zH};F9a+a0iat1kZX07)`eHTAbn>G+^j`OjxwbZSReM5f<|I@lc`=lh3Cd7|L8|~G% zDcHH3F=^Q&#hA40k3tT#?2TH%y}K_;=d~#wcU`N!QSLnYF7XPN=Mpw3^*?jTI59Y% z_x)1p-$C^6)!L)Jm90{J>!&|k?~_`~y92HEN8tl#*&{VDevj0e*dD0~<1g=#YReAT z@H_64YOxdAK2?|B#m2~vA#wJ6ySz!*5=-5_f66(pseNCV_KEzi`{r!EM7*%(&7X4r zl&d4Sdaw3Qx%#fF=WgMhVwk0$N++H7_T@O{RDKiM-nUQur{X?a@11JVQQMe1gV?7V zKWF0auQq>NyJh?Ssw?+pw(b|3e%N1S(~soZAGp8DrH{gBY;ToI8}7cU2jXkbk_)8_cMibwuy+h3*en;$=ZSLqLXyzx)E@-rkE>g=tO>@4GQ z_f=h?AF`H|ewf-?FLB2TY&}-eOXBG1bG_fn)l06{o{#Az&R#3^sk_h0)$PjS_FA>j zdRuzk=V;Ql0xY+a8;ft;;i`RAH z^_qdFFY7hyvSNBo>-||R`b->blF+%rbp{aIQ+Uro1YKwi1y;$lnxx00uym-lvmv45*Ye!2bIsy#OS z$E~Yfy+CoeSLgx!)}Hi$`SCK$?bEh&0Ld`7%+J97t+wi7_YpB=Ei$Z)`sy23hS~DV z**opZ^a*YC2mSIfohZIPTp9Tp-RJeO{)nsZ75$NPW1pfwa*vl{z1sTwwBx@fJpH@J ze@$@WzZSBdb>qDVNNqnOn{&q6&&dA#o;@y~$pk0f%ayC2huEI>x_ zU!Ic7wy0FjtdfsyKK4=BLl@$6Df+1WeC+o4uit(?wrlTnV;g_p_%EAYuY+ElIKvit zjq~{px;|!)lh4nlO=6E++AR1KW5L$7iUku7ZIAzIJO0Zaw`&+T$ImLeXuPd&of8u_ z|0*$I3yD>-?IP_lU>`dhTv3UK}i4k$*!Vbj6g{}L9xUiTG8;hZAfbY3}ITx=s zf84c`O9$=WT{%zOezEE2&k+A*(?qIj+l8_M{FxbaG|X;#%N)k<3n#5*H->a z+V@v=zIKcM+>O7vN`Khn{X52g*<%)`A4(tjn2zYgg}Huq?HjQ5`*_{3Jq|1q9|val zYrKBv_N$}cHN^Rnuh8w}J9fv{^~HWh&*N_K9k)?m(V*)~Zz~?m?w@!a&FvrQXmN33 zgZ}2X$BC6)B~C0}2b*w}xUd%aayb^@_tMYe;tbl;u?GL2KEGS^IOtCtA7;~Syx!x) z#khLUw))Gqx(Qh^TV3n8v3Pw)F*aADBUiuqOaH$uuRf*tFnfIBbr5%a;&hO8SBV>o z*FDPO;>23|zm>fGyu7#j_m7MZv->q(PjLG+Zhzsypy+-1ugl_PTfB}C9~bo(7$;`; zQM}A|`^dHFu!qt;5B*~@-<3hG%x{ku`=uKz_Q%GD5no;z>z`(x?4{roS^H|GqVKgIc9E$i)8@nNmc1j~$z4Ldk2 zW^Z!O1dG`-oU@qzht2@gZxXn6tgD?*)qeihW0L}$9q#UTc7LC{i8C~M;%m78egXWS zJpZeO9$UqKxo3hkIA?;j75f$W6l1@1{)ub1cdDK3g#oI&M&;QbI2(RwcBey@$dSuW*Xa5o8{88qDSP$L}5gr7?5# zLpT1bMMrdEzXr9|OXB8j`x#&zf2Y_l>30Wzn{T|0II0%fcm2ZugRx*XUB#cF(nfvt z%_qiviQZft&5iqlp00@diaQ^ywNBOmO}cC4__M*{ed=Xtas0#et^c0-S4(_Hd%8*R zm%pC)F2yCfdW{pqVV`vxcP7}NtgFO!#qHBs=g6?D(UGga{H6cj<{O`8e3!<@)iIp$ zadnLOSBve68<&G|v0e81%;gWQ^oy2p`Ha7}`}fm`?^3_IdO&=?a&~-~6Wb-4{=^uj zztq?+^^a?_ar(!W>Fse{^G^>QIsZ)VsQKsa8a@BllkSY3=XXMXGySe;)3@`YAKW)4 zdV$|bCyvYhO=LH&%f&xMeAmI>F}`cQ6W`Txt~>O9#kr!ydnLF3{$qQ*SK8-4-mCrh zAJXLq*8e|>h(2WuVY zbu8|;ALqA<^J+PV{))9rEbc2AyM4=Vv;7%=`?1XV?MK;HTpDhV|5`gqbM8g1Cxeep zvbZ)na_qAn|K-}(*8PsZ{aF7g#(bs5(UR*Ej7QN|`}tpOpa13h@wE1{f7|CO=X`zF zv1@(z3LfJ8_F(&OKYnVlU$Pg@Z{eVi z9R2SK*(M#{jq!=|>1td)FYiC*8^20>^xeR?-^MCx)Ke<|e{-yli9=}g1e#_M% z%B~Wpr8V%?bca;z5b@*U-naCBEB)c~^8RBo%B8nf?P)Ee;(x!C?7k{9pK)T$0&s_Qlm?w%hk^ymk9IT)(`u$9JTzpMU;^`vTDqzMT|pnw}i}%{?j6 z^OI7e7Z;~RFRt_zzw7H*dfwNe^yCeu`I(hHo9cbu((gZH*1zaYDE40%{pvgYO!W2h z%U;b*F^x45U-Vcf)7)^QnR4TWk+0So6RAx$&1K$jvp@JZebU$qy*0DY3M@@7Idwz0 zd2ApMIYqvKrCmxkByrz`ckua|zTsxKjDzs`=obpDtPbXg-(%MFWxr%r&yf$E>SCI2 zMDKV%Io#YgY#PU1yfmfO7Z~SX8h%3G`FDk?k|^i-m0xdqD3BDX?-y<^N%BW(6T{6Z z)T3cQxcPMMm!qe6pT9Jzq~4cQ`i*3N)#Buo$osxdrM1JUGj&-QPB?p4zhOk?eE z)0~W)+j@VvIfHVZrY&{LOv$L^K%{P&$8OVT+GM`4b>dsT-lcE*x|AAE?k|hzFY&b0 z72)RS7sA#6(w_AuSLM^D{V7dDQ%vE{8wSu%8TU$SnAFxyPT`=h&U5JhR}2@}2bK^PFM4^E%UbuQ5~7D62Q=zwJwm{K%UY z`3`NpH2wP^Sj0N5w!3K-oeejIUr`^v?;8~rJ&A@Ef4KZr(dTks zp#Ejf2wmI`M%CqheUO+%AtqiE@QXsMF1I}I(ZrpzUoLr?cF_m|jn!P6-cr43mD<`8t3|NgR9 z=RjW}=&SB#(|jBntL@I530xaF$^212m3h38wSlzAYG{plUAzfeJLwCQrv8S0a%ims z^(}V}4!Z}22l)iLkpJeO3mow5S(gxfrwBXxW3jE3$^D(UVGL_Xr1%@58l|59>7 zr1IURk6-ZhEIq+Ecci>$p}|?EM`R;(xYH!p8|ZL5bl8!$j41efn+{h%ha(EUSiT85 zj6fsR{bF<&q#o(y@k4Jg^1UQ%$|e|DpA>`^J3@<-ZCcbfqQ$qFOHX@(k$%C;v^bOR zuYYoR3oZ6rpIa7um~%uNT0BR)tDzCm;xC~q<>jGXKbSMKd}YDR7Fv9`AhSG3O#3Di zv}v)5Z{PQ&m5L{Ri+l0Xt(jpf(81gtf{&c)6mAZAlGfzH7yaW^5(<$?ReeZOFdt$^)Y51_%!@fJX*Xle|d6A9dr|7tn1*< z`OC$h{Snn6|I&yjmM5CV59#xR^lvTYFZcORg`xSa%maAl5qR{e&79x76<*gjVD2Bs zy*rP71CRd_^{Y*U&Y;sI%KJ_4aIf+{BI<)-#r0&eEZZ^NJqA$d&E0^^uSqYD>T-LjVN7wjzl^$!ETi0GO zw+?uFRpl|C@|aHn#%T@nq@Mnm3*SA>Jc=ysRT4=~W*+q}8OuCM;km%Z@Z(v&6y{Jb zdk*b)=Fr5JIg}D9gRZwab108FG@m(C#vE#34mGySq3)4mz7%^76+r*uGh+j(5!OJ( zxy+$s(ENPnke9mM%-m^U?riY{sv4L(6aGD5&z;ny)X1}*v`7PUr|kAXNk?yLO=-t`%b?*!)0Gw{q^b7$G}X9V+S*W7;P zw>$Ia67(%To=RObe@>9^$|PT8ok@+XqYl-^SO0^;3HJQaH<~{u;13HtsS)2bm*-EA z?>jtodH(nw&n+v1m(OzMPlK;d=_S*rW&S9y?bPeuISZIS3*zQac6piIPpK{Q=Qs4_ z9OjN_Pjr+!!Zg;`UYfEE`l|J%I66aMjo*_}CEcWyF`5P~Jq67-L#OX2hMT3kxb)dD z;Lhmj>Muvn+(LfPZT`{#`a_^}BXaT^$w`slFkj#AXc`Z`CEejlS}bJFR-!YsrpL>) z*sFyWH3zmaha#joydUU#9+Rs6#66Z-Qpjo=N<# z4tXX%9fZ$ULYEB8v&-oTQ!na!JYIni)HeD@8zj1X0>0pJ> z)umAh=+Q~&(aG@H6!>i_d^ZjL+X+277`^CAC_d5G8$FLM{VuxnamG&a+wVyzuIY#V zdtvkw$DT9MUpRVnL&!`Cp+AcsC)4hH>6!3=0Ccv>A7D(omb?qkd;s}aC;pF~Dm}Rl z-TNdoP=~H^V*1GFyYQ-7=(8{Mej|E@t1~2{ht7qrV&8;G6K;Nza_XKL9nD+nFM0p5 zFGpwj{gDTfk|OVWlf-u#4|XPhWWfn^m`l+1KT-c*vIZ!C$L@zt1Mpe%$f!BTecf$+ zN0Ix}=??uzK&)peKrOjvI zYqjXTH;G@Oqf0lqS@)ey@z#|+n(`Q@t-hX3L7rrH1p{)lwd0%%(L0yKGKFfuEjQPhadS9T{*RB82Q%ahMTj5Cb`Pboa>fpj@0@Fl-4pQ zY95?dJ|`)~o=d$+``@N_zB9a(cg;GTV|oRj4Xuja=nj^zRNDBXK7zg@A?|*G>ZBhpEC=d zJF8`GJY3MPT(V<2{igMU^k?ymEnxi|(|8Blj=%Gc3D zG)A>I-xR`qXlV-n16;!-s>=%uvzWl0eCiptsk__YCy5AN-LCKyPUd zy@g7*LT`l+&Mkk4`76C!^tKUt`v`hF!}yEdGzSjxn}NR)y`d|M-gYhNT9U!|jwqO6 z)7#UwuAEV>dWzm^IBVObw;Hb}G9$@Eo;IGy%p?!=W}vqbALtvG-i}FcMb^KAJn_ZR zTWdYpKx>&?@4%BqZ#BO37JBngpNG+r_r&SQyU~#i_3vwXw$Pi}DS8vn^LwB%bYvgx z)blaqcfiq?Lmp2Xy7FXr^(1)qM0ocEc=&j9<#FiBW9P%$ZC$wwH0tWdweaHnWqx=u zvTj+|k~(;?tt(SjKEK(z9C~~gIvpgN1Z6Mx#`I&CH#fgKGTPJy-UbbeH;-k0y7cIW z9+TjWP2V0Fy+FRA)kicpC^HM*+#}f^*@!M{K?Aq@;Q!E>__KZsrEZjI)*Si&pl$Ky zNIY*w*Ez=cA7@@@J$b^{qcp~!(_8qn1y2=!7HXWbe!Mqp*KmL*C2FTj6xD)@+WnG=&v6^Kefmq(YfZF-=Xv6ri-o9&V}f_s8k9gUeM1(Rm{_F6p)6tDT|m z;^fYeOcRK#NQTZ$XY|<-ALyIQbnY*0q+d_6p6K`ZWxm>)&Ihv|6rHbf=)4iV=#uGS z^VLjrvU}U0^TDhIb3L6~bPkQ#DdzoE`6)Jtdt81B&D27tF`DUa(@e9sr_E1mp;Mc0 zil4%RMT?glIu#Asywu^5qDen|wT3=zO+#WwYAH7mS}uJtQ%LOuOvYW z*7L)n^|Wyw_k-Z0WnjZHY+cYmC+MRX`p9tT!|zFsXq>Uhz$;U1UfI{%9r{Qroxy%+ z4`u9zrlk?s`sij7 zBTdx#1BVZ0QqDKvfjzi#9r_p!eLP4VcjZ55(?_P(Jo&ems}7=%X5{Bihdz3Gdq@5y zxo6}G#BC`uJJN<@L^7VK|bKp(tOyfFDrTTiWn-fX_B|0C-} z!`L4hCtz==?Pr>0vvu3inx0*K)#iodvxPF>g1%+5m9FZhlPvL+jS3ZrAEN)>xFr09 zY*+OkUYa6ZRqKVim(W|#PY=NpvshF4;eS!`yIHy*bYAz5iP1T{tGmlIU#Dkuhq_3| zXdU)4>}Qc$_RT$KdV9(Rpj{3O7q{)tr!CusM8nHDI08eR~u)BY?@r`_Y|wEIlYNT$Pkf4FjLQ(sTe zo?A1Qbu1BYKmOp~+jFa*3I9}f;p>d~sq~~wWQueo>9(@Rsr@COE3=c}k0X$o@wDn? z{Io95N^taE=-kzNvwDntD8gB4LzZ~Hn#cT-tgMALTIhb1*7h+vFM@wxN#|YhnPC5g ze{W(;&P&fY3I8VlO%DGS-9L})&%2*`*}5?NSu(#Bor`D3w?V#`F?^rvtjEM>>wj`* zRQ3(gv^}Sx!w7nDB#;~#PrbLr$^FyN=~}+s1|3N5pN4*=|BAkU1AXT}*Rl(bMgDKp zdI#RW78}(sq2XL)Z=oamMQe@FTBjBrz*CZe4shI;SsH$DW_cEN1MzLm*+RdGY+)=r znuN$*$mmPy_>Z8=Yfa~*58(GDzFz19-O)q4wdex|n*0?sx!iQE-^@I2WWD62eVKD( zG&vJ~pE>usa@9#Rc^rB1eQ0tpYpcej&bAI9n!KO#^sN8yL@#h@@;Efwh%O^Lc)UJv zXBp!$V#EZtJ|P&BWKl zbEEKe*@f%i%Qg>&uixwQM&j+m>tr9M9<=P&o#`N^jc5MM;!oC9wVpS+dl zfcWJ9?HIM?%-xPWKn8q3nWvEp(j_Ge^3jV=L)&Y~=Ns5%YndOC0}a?L#IrRY7CvMS zZ$KwLjjWJfT;$p;Y`r+yFOGJ!#qz$2IMVCE>U@sIeF+h_?8|& zv2$efZfN{At{UdBc=&tpZ>XxI`(MHn(btQm*Ga#b1z+=fdPJtPzTOOfT<+^! zKYvg0Q`zwF<-TD3%suO$>g(`uFY~dC`BDbIEo0vGC!exq0h@<+!0sv@o(g8O9xQ{0 z{}3MD;0wXS=a%1qE~I(Yz`XhzJY4G~&6RV^l|J6or1zOC2ff{~Pxoj#kG|3XUy=SS zT9dA}LF-km6RG3mWN&0Mx|U~dY@N7*`Qn+Ybz*OOzG$xfEAwT&@kS;m2bnKHn}=sR z&-(vP-nsLo#+Qm6#8+Al5BJ6C&mH3Ci;wvte~MwgY;^Ra&CL5trn@~~7GP7AzvlRZ zGwk`|bLNYe`69U)WWMyp?&$Jxwcp|4hh+zlj;pa1U5UQ@ZSZYBe7g$ULpS)gOS_`| zEc!<>RX#DGyG8cH)2kVy-$*BSczSgl zPZ#YvdQ3~5V|sbF7GFkd8WxWi4a@hrAD&V~ePx3g!y5ARfV}8=m3Fh3O8h`Mk+VqoK8D(M8^d&fY*UGRk02n$Gp#lKE;;{e6eT!KNmg7rL&KGiKX?dRnEb$ zXBA&v4=-3DUv|$((6N7ggw1xkNwDdxU|FA%Dt|J3d6`XTvxqAZU%uC4ekvPY0sXby z*QbRqU-ET8A5TRePlMNXg69U|y`AB~*PxGgiC)|j)5lMsONUHi)&3=(l2Ysnd0<5L z1^AZeGX?s54B1@ExcISQ$bMEc9R4hQ9lg00TiToG>A$4Ej>CIyB(9%$h|=63e$JcB zltGR?S>t@_W+yLrlHb#*sva6>K!0G|8wU--jsX4DqVJ|H^_B!Lda*b4X!=edt?CT4 z>lg0`_^WP0Udk^l-Tlf@Ff>-=DGH1(X=OUss{c1>q|#Gp|#f( zLu`X&g6LSW5v<$t89$qrp%Kle7Z@kWd(m(fdd-zI+%ppU=ANc_J9fhf^dri1?bw?b zivow9S1{K_$D!rKCbdDwG5dB*uYZeia$}9+X<2l79_1xtIDG*CtvI(;(lrQ4_QPXB92>{*p2`ehEvp0{<+q9``!DnI(X=r@b;UjY3E zq3P|=?>XjRkT{ZK(C-hR-xJt?MY{#W53R)?neW7c1hF5-e2RYCmo1x6)WQr{X0AaOE2j~&+}*{~n_o)16SkNz)N za5FS;h`wws5B~GV!=jQ0k_D5I16fP^lx$_*h_7aO`cxf}UWDz*@4OGur}CRgXCJx@ znq!`wP2~TK{Tt{}jmV!lV+Px6j~LHPvw7zJoBbuOTsf0$nwxxSRSlhxEzE84&t~TO zIp(b^WAczO&%@UWsLS*G|91T4r-zP-O8!a4*m^Yawtbn84alHz@iInp64_Ep`cc&N z7;y>58B@uWm_7Y+oYsG$PsR7GJs;sYH%hk9?^~IZ@_C-c@9M{Xw2eHAsMBfwx0dG= zd;TR=4H;#MKSVAxvW}JRcorQ)b194Uv;3HgiFeBNvSwy%RAz&GO2~pLXa`#Pc_!=V zJeNL{EO-l9pqM7fg2}{% zX0t9>&a-5J=FF+V$XgAu}dOmqd--ExLV_wCt8=j4eBYB2660I9v z^j?+)A7THTZaTKir`3)eaA`$;Bh8JoFNLGMmldfG~Z5gvX&_dI(*eKEU zwK2Me_rJyXyD~;JRy!$1zkbF}Jj6|}IFwWTZ!B^v6d+DDu~*dq=vV6;>27BjE6Ibk zo~{x7{}t)8&~O>g)#$Wmu$hQ{)dtA}tvL>}*0>*eQbC`|&X?ggRmOCWJRSLmXy!D3 z$q0Njg{(c6)4mhX?{0L|+&;t_A`ey?Us8YQKUV+bmbHe~8PaDid3xFM;0D$jUqg33 zmLH?vOmvxJ`L~u=!xKcmC!pV4Z1JMs8<1}|Bqy?-Bu*9jmA>`eWr-y(`mc>V{9r7u z^cjbKf57@vy3>nZbQw?L74krS(f6RmCU~wd&M(lKetX~(5dFU6$b)zAvF3UcTIg3g zN&wmuog9O&ulrYVJ zW^rgK&(Pj#zjKeCzdFUaXPvQ{KD57q=2v%h?pc?t?%~`sFIJ~J_soIS8P5I9-1m3x zq4U*Q&ON+;^+4yIb;9awQydw>dUTlcuCHlcmt%^@TpImE9c!jVr;HSg&CiDM8#4A~b6IP!_A5;=Q|iAOZk`e75RpFChc#e5^R2s({_iP7 z_v`l(*6WPb-=V8L&o~v(j(c>^IO+cDx@Vkp|F^nloOJ&+-7`+QpQ3xlN%vpX zJ>#VNyL8Vu>HcorGfujntb4{u_mgzbIO%?(?inZDPtZN%rTg)^XWVo@j{D7wo1xAl z1IBJgJ?qkA$mnb7_h0!8u|_6xJNQfRMXn884{*JMyci4yxfDBbh^vfiEmr~8NdCXi z1dCTOX4r)r2jQdIWIEKJ(it1f%{K~W?0CZ%>w~#qg0a4&XTP!D&~u8h{zDiv)>Fc+ z#(GHD!&o2WgXw&)=L}=@687i)959RY!hxh0W|Lkxg!IB;#`=cR=NN0B@f6GE+}C&- zpGH=yzN)wCuXd%LA%iLbXe%b_vxkq1vVNw5uY+T>MfZ_j$&8qa!$; zG{VWG5l$hEa4LBxfYT^b&od}fIGgt!z4iHePwC5z zwJW>uj{ivG{xIJNU*j9$>*SRJzDXH+uAvO!N#3V|wZ?iwc$zYV=SVL+PkLb!Whngx z+O#AXJsR{_PhG=vSC93Ou!qO`APA;=tS{*~!(;Ul_UCTt8RRRR&HHP?eDW2}BVXYH@)a&5U*RJ1 zRr)35d!a|Px`Od|lC(W|w~;)1ft$%wxP|vU!L8&e+(w?l?c^zZfjosf$W!UdRoC=r zb^k9!zto548uAgIBp-CNH)_d8c$$2K=g3ERo_vH&z5pE}q@CDKccaSCvEay9uT)f-gt!(6;6*)&w z^Zg*+pW}PsdA=7m@%=#Xg39PowtIi!9Y?c$)>DIdH^gT>Bpl|mzL5>)`1qz*+3wd1 z@2K42X@2Nd-Yw#PL%=2aUwYZ@+QK`kU#Ffsct4EyV*Dr?3>}{r+;?o1@Yk|_9BW8OSbNU_wF*rItDM@m0+w)J^PtwdQLIc z)54&!RtUQqYo@S=v5vt@cct^ao->T)5%%XjymnU>>4gJHFU%&ra0uyz!;CdT>2ogg z;!QqNFJ9b%y1Fud9k%28RICfsb__ddTtg{&q5*foFSy1VWp>@xqj0)3Y|kdHUl18k!P%m zj^KFG3MZ3RIEA#rsgwbate8gGdY(bq!r8p<0OlL3LO74~!Ud!kE+oBh5oIg=l33YK z(--@UxA;FVUg|FSRSvqmw<3w}){>8KJ^2VXkdN?5%7b@SY@{A~-b_7&TX-J;w;HQL zxQ+C}?W7mJKziX0>Y?=I)C1c!@dH!DgEyz~?l9j9U*lWh>*NU!t$35N^jt$(!jrs* z$5zxDt3r61vV`YIFFa3rVH0I3{RLxT<0)2o8L6!Mml&HL7hc2vx_Yb`!X6&027X$R z?y)lT3_mtP_;C&VwgP@!A%q`q7Q&AUg+oX$9OkjYN}t1brbBUV57W4Zcw&{+H)yP* zQ~0hM|DQ^J@YjlIl%r?(vk}6dYv8{X@aGC4{CTqw{#+5Zx+I<3xzL`zifdaS{H!qyS^cV{8|yAl3;1pa+Q2><>!A^f{oc-~{(FKqHycf-GrT;Mz7 zxAQ&*9m@WlZquQ!xEH7xkRIT5Nyg^!!Rx}-VR-zxOfbu99Tvjlw+rF%>xJ<61wwfI zWO)2xc>G}#D9+6?jnA;A>wBHCyvlF3*E)NB*m@=3YrP_b=l@s;&woq^&%aLy&z}U( ze+8cZib=Bb(VTgk^=sn=)iGFl7QED%HCBLpw|cEZp|EubdGI44^59V+^58o{=`BwrT7~y>{&Bq}b_V^++v! z#jZ!1_Gxwh!D>_X15Zq5tQ{P-4j?n;35WTt@yLt=$czIfwK#Ve<4L)j1{>>P(mbp* z3w>6>tzoMQ88QwTQiTkuGHJ!RIi_(Y>)+))@%{7dz4{h;G7foCg*@4ZJo%Abhm_K7 zHYS&rg69*%&99jrL*HdR^}>z8p|9omhYlHSh7R|du3Bqt-M;E6{VyLI^ZMS)>M$fPJ+~H--c#gIAdaV;ZVQa53)?Q(P zv9<{P##$vzG1hD$^Ln(ftFihEnI8$lbYq?D0A?6#udqMqg;}H*4kW!WoAkmVq!$h& zy)cLL0WfT=y~5F?7v_;(IG*&v$)p!fA-!-a>4nos5C7Xc!&rNTvq>+^C%teU>4ghO zFI-4^;UdxtmyjNwwztq&dxfh=FDxRxa4qSD>q#%%KziYmq!(@^J-lk~W@GIYZXvyJ zE9r&XNH5$@df^MC7w#avu$=VpqP-Qy+AG{kdf|T33lET9c$oCU*GMmXo%F&tNe{o- zTVt%f!jq&I){Mk5Y z-G#%bvoMGCMuutLmB;s@h4FkZoXq#aDSR)S%J;%)=vF-n@AxEos9xfa?pjYCR~hf> zB}viUQ%!U0ajd?O60M#=ed5NZ+TST!y(BHF{&|kIuI8rZ-3R#Z8~x#5&yc*K52d$_ z-qB974pfd>5(G3gX3-FIzZ?OJp z#$}*m54sVYFMX(M=g1A*cY-&(LOF++%ZDiEM?%VZR7g4B5mL@XA?4g8q?|M%<>V&C zcQiY76^6>PPj^-)$QYY0Ne^K!Z zzvzCO%R|?~JHU)^Jm@K3(vW4_TP)J`a zfW{7I@PA={{x75-f@Lzvf&PbPBwWu)JKS8IfuP<9yVQ#;nnI5-CS&4*Xp2YekHDMyHX18P+s!ysXrzp zhJT%buBCEHk&E;04DUEV8RLcY%|Ic2lOm*V_F=D*&q+SMe)MITXe`KnMQlYw68uAl zvj$0~?=f~;W9iSA$>T;Lc_azRV<*33Gz)pXhVcoDmZ*1#zOAH>148u_tU@oDq?o<7mR;sU&>RRRQ`rNom%KmuGy*rQ7JUgK_yW-!JO@S;j(TKIAdC>HE3#X^6h^QHO2Z z+wCVE@siel>aj0__F^;L;NyFI4&$Jc{G<*M*}>(vnS-CWlzaIqG_U;sw)&|t#3m%s zM#ZuwGnN`3`4i-4CQju@F7-{NzFPY#-b}p1AMi!qr*1Fv?R%iHeYkDMV!Cmd;?wHO zgL%eU%Xl3aZ>)L3$9h8HWu8{Ry9<~rRSS$YPPoum1B8o= zl`LFh(B-_vN|#BU-=+`Ghz<&4ba4J`r=kBM-Mi3JT+93AOHI_K$@S1Ay#D14(4_E5 zXi~V*ST7fnU#}QlPUrjSohjo5V}+32l{=tIVY#s?jjvdK8tIU^e3u(==<{{P2RXg} zP5QOaSNt;NzKsuezm0*?eW3Q-UTwc^Gvj~K@o^k?{2R~zL(IPsT8>Wu|3-*09P95% z><-WPH$urVeyBL6P$%ae|3-+o0s9;L8=>ybJ^qbQZ|5HWMyRiIkAEZ7&$-9H5xT*- z$G;I8;N0Wi2;J=5qYvTRcrqpYc;IZz z$I%BHusLaN_Ta~uqjuoOIC+n0PJ}M&Y&%9Eax%`BvF<+kGJ4j}Wxm#-UuWXWsKb{r zls!6iP zzNWi$j}Jrd@76s&4Bb!GJ#$<4+W#KKhoSq4y2pp1`w6;td`;tZpB;NYPWSjQ^vy`! z!^ou0BLbdC4RINp@T1l9Tj)*rYEI%Kl`rFce09I%lFx1f*8^Pdm~?FW z>Dc(wvGu2;Gp3_6rlT{aqcf)C!$?PVrAIPB*O$=)el+Oj7$1=@BON{G!?{a#$S!qM z2>*IX2p=sG!bcYigU0$WAM9$Z;d<_2tlq+O-p>IuNH6S9dLiGeDe)N>B=5xJ`U9GiSGUxw~R}Nhi!Bop3zqgpUjzJYS>(SDT(_c(g@d*M!255v4tJoz_)sSk}`!Gc@Muoy4hGqg#to$`cl@*Y`nw8mIR zg(oRbSW9~0Y0?YNQJ&JDC%vAVKs{fG`8XC|!+p?WeTW_GXjhLlT+cl`R&QZC?~xxz zv8x>wVpn@fh+VBjh+S>5Fxz8&Q|X70UeCioJ?F&y9N6Bi53zsAo;JK2IFOa`U3gsc?YQHa^;K9qsBC2kGFdF;oY0$$vX2W_Oqix>}M|t zv7eO)v7ap#o+D4}Wk=7Gm!6wIJzwxxyApzBA7*l&;kAwmv5&nZ#6DIc#6GrInC-Q` znE_%S8?NVJpq{ah?Mk?&ta=9Nujd)N*h@m}VkJWCVvB_fNRJ)tD0Z>odR_$T8N1l7 zgqSbnr4Y~9ze7M>#w zwyvYtzJ}|$3Dh&TuU!c}%68!sSv;6${374Prgan>*Kj>!;~K7KY+Sn%dX??MFH*gL zZ?I*_wl(}#5Zl&pJ!9M2m5^Sx%khs4znyp3w1#8TI*Lu}C^oHK34O|T;s2P4zvB<# z^}}<>>w?c(i5?@F(S)v2u{-7~xe;H9Vj!F5&ff95G1lwC1Y_+G`i=FZFvVDlgh699 z<%3;~HB8SvjMYP!&igrF2I+KAuh1tfMs`NuhujgT)o^xV+eiibz+LvJa zNu)=po(<@N1AmI`!i#Ny`t21`zb!)Qw@OI;W(%p`NqDPm|I#z{OAu1OlO8a`#Oxl_ zZ;PI(-zp*Xn=Pb%qm`ce_181?>#t|(*Ozt2O7s|f64u_1yc+-%b8IiK{xd88LEpbkpE(6OT|0`%|FrD}C z?7i^#y+U~W79l)-l@K03TbS*!Ml1ag((8E`sOKEg;XCQuJLYpxd7FZ?0sg#q3g2`C zr&5-18fC%1_rmY@3gP!#gz)=SLiqh`;R4E1`h}#|^CD2sOH@`zyDW|I+pH-#&w(-a z6%PRIvi+uca4-B7PvYkgZp6<4Z$E-8I3h$A{F@M2P%K0i+%MdYp99{1554)(#`@utT*2cJKTTsSO5E^HSf7uE}r3k!tjJl167 zz~S>AD_hS^pq?*~4!?@>*L;q(D_GiO+vYo$z6W|bnzFN*_%)CluLzMFKNcc49up!r z?h_(6CLt$YL2lfvXXM7sdPZ)%f?uWY0Q_6nB`#$e>sSZQWxSqsJ~HG-LS)FJLS)Ey zgvgMI$c#hCkel?347o|q$dE($R+Lw)t}$PRd@NPewYoEQL-H;QVG~2XtP>(%<_nQ8 z6ObndkuQVvjC>iSXXML4RS_*wc6 zkM+-T{46s`HZv}ZY;8wmv9t}VqxxSXZxu$Uh>ty*_-0wrTYd!6=Ldd2{WRv_Y zkJ$A|E?o(}i=X9H{4DRV*82&5mRIq!JTb})z18-!^u*8dFHh-zb?h_B{>dXx`SFS9 zd?i2r4?pW2ox9}6&!YQ5y2sC=`+>U0POAF>y2sC=`x|wSpGEgs+`E335Pp_T8Q2x^ zv#h@m^RYl@#ag?bL;lI1Qscp=XwXZA3D|Cher&hG6l}Lb=6YDz6`QTF2R2+`I{w}c zV1|kLxcie{&sn4w4kW!WoAkmVq!$h&y)eh|O@~d)H$9s40p914UO1lg!pWo;P9eQ; zD(QvO9Dnl+6Z1FECOv#oK6l&SJdgBxUO;-`LedKtkzTlj^zgZgLKE{LuOhvki%2hA zOM2mY(hE0`Uic*Gg&Ro^?~<>*Lb!$W!mXqiZX>;LJL!clkY2ch^ult|!-py=Ow9MY zm-KqxPkP}2(hCohUicd6g|Cxd_$KM$GZi()st}$ey|9+_!qcP|o+G{RJn4l^_+0U$ zWa$0^|FiumACK=FOKhBCXH|kX@pZrBb;WG-^J>D zz84+L<9p!(z85a!d*LF!7cS8{${+nCT9B`$dOLYsWvpGFOY1SLzK|BJUV#3qNll};Qi|uFWL0hikZ)xpkImZL*1cH4z&yHITx()RnHuUBaIBq8nYB&6Lp3u*U# z(9$FNJ=J{H=37JGaFdlklVY0pB4tF>Q6zXvLRhx{sM9ly$Nt@{N} zXwp^cEI(N5dcxx`R#PwK{RZ@?{#lps+x;vlj-O@V@8D;-#qqNgKr^!Aj}y}0$wK;j zfRO%n{Vdhc+gR%VGJT#Tq|a{@(&zHC93x(dZ#V}T`!)TYC8WPyKg&;A`B|Q@{VbR1 z=#)c8d7>BS$hA9eA|7l2>zy?hF8f(d(HHWqq*6!uLIw$`yX_;S?h_cxo2Z-oD`ku& z`#u@N)Zgk`X>fciSLAuYg!I0F{#Tj(9pB10^kVr`Y+nlHXHkBsL8M z+4A=suraArewN?1Z{+=WzY2baW9+l2`hR17l{$3CI(#j4PK=IxEOkzdj{GZiPK=KH zD|JqcP96T0IwwY_4*yD>6Qfgyf2EEX9nrq_c-Ij>Vc+9lsr#aHkAJ1^M&};?O5IJI z`}My}as6%1JNztlUt-_U-^6^Cr|`A>t-pI@4gQo<_*&-TOVPfBdVDPrd@A!Bmb}`4 z|75XzkSe#kY5qIz=R9BzXS}!P4tw9ovtPc*Il862@X2ZK$Qr&o)dio+&B@UY(C*1^ zUfws7YVRBAUjH)siF_QpIIGq5aqQN;x_5jW zujt04nT{3kpIg#XMC!g~sZ@XgJ_u7>ywum?J$ zFdZFdE|`H{AVgNxD4#6S>v%jWw^P)F`1K5kw@RPfuIs<U!o(%cC)L*x;N9PUtWp}tRXLp!5 zzVO32yNMadz=xnD2z6LFtQmP8{iYC$4ld>9DO-%;;dJ>x*p5z*bRF$oIlkpw<9IPJHNE zCth^D6F<6v^w>u#7IciI*EnN~sHh;Fa4#tRe?RGk2S_hGOnTvKq!qqSKG;8Y`#Y6o zU}vZ>UMnn205d_q*P0#>6v)9fuQ%;^g) zW?*lq2$Pp^GvLrY!Vd} zYwl6rWWn4En6i!md86LfFG+)d`hR@0n_UFCw$s)a;2lD*^%D~1^ z5hicpXz*4rk8gzIDML7!GK5n|Bb-WJ!fBLo8#se9gtK|CeDX=J=Xs3OA5e_#}A?H&W)8z|G_%+`@b1vz7FE-bQ-i zcG3%9z@PA@=VC?<_Z5^S+zZ|T?kA1#0BMAWNh5rXGKH^`xA0BM{2Q=_GKD93uY77r zujkXG7oH=%@Vw8;#1=i0Ypln^nKZ;gag}i$;yTO4`3uD+H*5_C-{HFlzztj#P3!}? z;Im%jzK0KAFPH0&Mi*;rc;(U*#kDlxkJcF;8Tk)7ydSiAKWOuQ(B}Ohw7Pi?*ww)M z!5-rMV7hoeD4rm+c|U0Le$eLqV77QaI7GZ3946im=EQja?f9c}9e?yj&h6>J^&;n- zZs2-=>mAOF8Vm-x&U$*{o9>B^nse>B3b@8{=^VC9F8QP9{`}IEOw)&VefVD=zUjla zeMr-Xw0+2<4|(+=&pzbchcfz5Rv*gjL)m>e!@E!9C**Sy*|-M%SijM+&z}@&M(1lw z9uW7^xDwl52)R&+T#%e?EgSUB)8r*z!#lPe+Ee@%7(!-LA~VJakr{T{R&pb^Gqy(M zkNl`aevABSZ9Utek}6gLXN-Vq&hkhmb9m$d)leWXqV&AhKnS(2*^b z$d>DfRk!m)zU20#UFfSJqUu294gx4S|Q< zoNVL0k#UtmWL%vP85cmdRn}0J@Fee* zPp#A5(=j=hOIfjYrW6P5b|x36sh!9xV$`fL!UPbxR_XUybA%~AYqc=wv!cSTKC4pL z!)MhA(|uL|Iais1eO}m~_sS=W^m-o1Gkq=@n0ve7=nmS7?5Ye?j&L-HjH}G!d*OJ> z6HcZ);S|afP9-1VG|EHPRnDM1;cVV3pM28mc^>J73rL47B-Zbu%9lLMfJBmyFi!T$ytB3|ND`r^zJNnlI`37 z+4fn~Vk>d?hSXtOOJd!r_|#MA`pK+2?R6eHe+uhP#W|lw?@wjjS;l<>`hObh&O_Ym zEHBq*p>wu2;j^epztU&nW$m|svjog?+V97$EWT8;9 zP#8p(3A>t@eYyv-P|uQu9YD!KA+j(mlq?iV77DXTFC1cGzK&s}*E6!vwk?NAAK-a3 z>4kZu7mg>ra5CwIQ%ua)F_rXso<{mqa0cmxv)%Ot>GeF1^uh(C7cMk0U&kWS>v;+3 zJA;L!7p@|`u!!`+wWJrWC%tfkiTOI7B)y(DlKxt7GwFp}NH5$<8{iGKjAieM~>4mSEn6Kk?((Cz6(!xV{ScE!x`W-`a<9(Ag7`!k&;C?C$mWFm@Yk{bAEo z9riS_$GWLHo%}V<{)ABV1#gt+W}|TnQ(s|`KU5v=z_$ra)zkP^V={yP31`z5p+7ZL zy(BSe)Zc~tPq>Qz3FF48qJQBXMt!=OZ-iU;M%cz!>vRzoqYY z%`+E``mKgE!jq&Ceo}w?`-XmqZlyUVzeEM`GV)8L#`z^|KZ7A(`56+(SLkOPg(-}q zFvvIxyPDX3ksgerp6O@V>Eic+D5i6+bT9UQ$ljU;4>&~rt;a{cf_~^(wk>`b#sXo^ z<-Ho(pP_vk+OMH~8?Ai`4d{&WDXew&Y7`LbU%{H!t>3P&=Yr-#>-y~KYA)LQLR!}$ zCoyykno&Qo9$NiZgt| z9aiM*L`U~gciS&OKidCi46w1-_1!7|0Cei7zxLCQSwi{|8oVg~Ku_{ioIA$su4(-5 zP&nqV5xu>#P)YhPv&ynZbAT@hh_(f6lPjJl8iX*w3sr!))4b(;fE32k=33 z@tdLFpd6R}qR`)O^F54f<$G}3ru_-0=vRAQKu4MvHwo#>H0VZqJZ=&~U+x|c?eEY& zk9}e2I&P201jb`J<8r{+E@4H>$o45oIN@1^u z%Kbz3ddL^>A@=t)c=4e^`b@a$|7Y&)t{+hR-WLFp6-C>e2z!1E9*-X)PJwm3a!20uN4hzaBcZr_M{ zOL(a!1SC|H#1E9``&(ykI7UTurk(qbe11Fo?0v1by{>Dm>$k48s59XTcuL`Fkg|mN z=A@%p8+6)DyQH9-n<{umxP^CwTX|Nv4X#tT-G0}6j}e_Mjv$}!)q}3C3)|d4traTN z|8-WVRM=pJRtWc7p{>GK_+HW7ipyvwzkA>%8if|~MDYigiOcv1{y=L8^YD`%fh#y1 z2Y>LR2!Aluj>}MNjq$(38BC3e$(R}ylQA_aCSz(;Ovcoxn2f1WF&R^%Vlt*i#biv4 zipiK76_YWQn2aJ~F{TlNF`XC;lOH&PzxNDbFIhL|{aD@c+J6d;U_LxT0{c#xF(zEV zG$t{ccmmbIQ+)f{U#fADVlswwWaCfKcUX_ly$UW& z`o4xUEjR?yT<_8~Jb~`-<=WZLX&?LJZi6S#xVC}w#j4>6G_Gyr?8Rr`2{f*4N=PYt zmcDR`>*j<$W!vEi6#u-R{j#5fCm6@|0rs)q0#C4EmnPN&Z@wp#o|6ZN@vJ9WcapFlH1UsI-rE(Qm#@-qye}8l3YEM$x z8jpPM->iJtlT!AGN51!h%C+nPSnk31j<5Y!#E9t3(MN^&+}8>5wLd1rr}AqdF&G)_ zi8bmvCkA6k)pX_xx#*x4bWs@Jdl=t)7~gw1DR5&hx~Ro(g>vz=v=s0=KKL*`_^|DR z&qeRH1o$0)N(=q(4k7(7j87$uFFtJh;&airE%cdOd?_vTnLC8^nLBi!J`)b~t`6gq z&!ykC;5W&|Z_K#|_(Sdx;t#n)_wk3^FT@}6xDbCxNQghAMTn0+Z1t(m z9l^B~6UxQ!(GnXIxVm&=Tto-68aVx}W3@-7idahaMN=vj_?CS+of8S#%4%?$F?`ftkn=W^rF~M)SV@ zAItOmlgdZ9@uyGXnd`ad2k|$w6p)s%5Y+wYyd#`NnnL^*At8Q?7U3LZ3rk4z25=s7 zgr(e7DE|E1`9BLvZ|0VQvFJ0Q2h{x}D|Ejw)e1c>OtV5EVY(G+ z5e~CL-9oPw8hkgHi5y`T_a$dE@9Y1uye}Nj`@)I*E}Rtc=l=}*{($YzKg+pV&-1sA zzkB(6mvaoqfvNnRat*~FKNP?GQ2g^lnM(|1PB9d}{ZM@QLm6WZHU9i6I13*>`!DO$ zKZ{@A`19F^pbOtqx*e-=lCf7Tb!K8Gs528gL7kbH3hHdOVk)RJ6Kg@8t=Iq5Sx`uw z9TZY$T|(;2#BNY$CYFOb>ryNRb+$w?9n{%+A$1lMQfFO4yUvy~uHS6OYP8|I>|%_u zim~X$I8g4H7z*;)(J<0exvlnaM{jBfDKGqg|(mv|Bh_5uJyxI6lU4|zx z@dyDs9$~p1r?BFab`#$6^iidgjQp@2M^J0W7u12e-*Cx(>iI}|vE{!v>AB0FQ(5}* z9Cp>j1z=ZAya0B!K=-k$CXN8RT8mu`SP{Dl&ujPyv(6v;YS#H}$aQ-o?I} zxB%?ya{Z5et;N1d2J`K1$%y2qcljeGKfTJ=C_jUF7Q35?y{*OW76`Gs1-g&johQWZ zE*D~Vw+OMjwb)%F+e-`w{zk1e%Djr-u)mqu-CFE#fe`y!p!?Y0c|z>(av}D2ixB%; zE41yeWJU7nDgU0yXI%MR$|rWU)`|&bVvlQMV?qT&52*V|F`;?F)R@q6A$EC-5W8F} z#4aBbdifu_y!jmQ7naq%nQu9ReUuclkcoY*^&=mz2p^G&4XzFI3^up6mNbNQpzb%2 zmT*6539;c@gxK&}AvXM&u$8o~1&<*|*v5UyIT5kpN-I*%r1B4qPdu^w448>+uC?5u zOl)y&tUFX7^nkjb<2XxZXy#KUF+u^Y;tV@X$lKL-Jj08!dav(EGBK?Y-9=NkcO~? zv~K|CAxBuseaTtK`}%(|?+fpX*!T?gms%!XA{V|Ofc?WZ8(u=V!saJd+x)~@o1a)` z^AqcBeqxi&PgL0a#1@;M*lP0=+iZSfyUkDRu=$B##MX zzSIfd`Z_+9d~3o86I(H)qmVVhimiz76dV4kJcZ(UI*6@EwfC=5Y{jllw;f+Gq>4k6 z3^(ETrv|+)f5;z54f=)Q)L@Cw>KhCQ{e6QK=J&qAu=%}ju+{wDFDT!$-`_9j6$bhR z{laj+V2RM`9}Jk^`v)t`@BM>e^LzhbtNA@GC||YTpB9v_+8;;@`i0@NV2RKg5Db{# z2Lvn3?*oEi^ZS5ctNDFkQ2uPce_&AlY=2;2Q2uOxcwn$ZXblPm%X(FkpTk9IPu%P_o z{_wD1iO{+#7%;zI6|6A7Ulk0S->(X`n%{>9Ykx?*4Eo*gA0Cvy+#eVol)u~`9v&x;LWKRooDW!mBkw$-_B_$*WZSKdBq(_b4dV@OrsME{?q^RJ_? z-rVUvp#Ry2?yt^m$k%LqrEnW*T$?xp>(jZ7yBN0_ZiBJP82Yv13ZglVJJCTsIgW3= zuKlPY`ta%P8@@2dq1e(g<|s)~`%y*WB!*PwMCI4SMa0_i5*|A?B*~78zhp!ABqCKd_3da+Rz_+hhiF^3Jnen$`CKM~- z#7HP!LU9v4d5uMkkHkAHv|}a2YdG~C_FLx;eM)}et0FNGJ@dG4@sswKA%kUgmQLk0Tw_S zI|?NK-TSCx@fNs<=%f&*N{nMyl#Y9{#79uB#gHX(aI0zF9LAfLG4@0Wf+x!DD7oUiI z5Z|!Cxz9V|AjCN+79xZB_9(+S4EY%6P=xP0!Z{2voI^Z5@Sd>{-{}zxF_&0~o_xa> ziG?uVz2E-s3+(T{Q2qWAhb;a`ZEU`?68k%w$FutGOYQG}p>#l8rL%vOxTcsWuIbU! zeJ1>e#_I4J=V0FHyo0z6?JM;qxQ#k=f8EldKJgp-^=-m$C{9BBhT^I#Ia>qXV;W~`tis1{ zxCG)PR`Gq9YdDWp`1s8=aT2TO6Xu#YiBg6S(GEGuPK~ zjgQ}46DP3>AHTUKPGS{4esfKH#Hz9OHL($^@bT-64Rf{zJV85rM?A3-;ySeNl#3XN zmAVh-Vb0hfR$@5ZpZJZUd#%oVn{%EFzf`H`@I~tz7B_l~bw(e4KN1@uev>m$PdCoD zia-3m)qT|M;w;eE4(g-09Y2Iudr~oOPS>7MtlJ+XoThU%K)8ei;S`;(0p{s^4e%Q} zUjv+=^EJThbiM{SM(1mQtmE$^WF3Es5N;t^2)B?V9OUng5yCAD1I6>Uvv#wJGl!SJ zxkvbz_>Di&HEmVIIcPuDwc%spH{Q+lHrh&OAODE8MG-zGe&c<*rmb}UUR~2xy8gbd zX)9g-P}j7Tt{3Z?w$k-Ix~8pk{R3UoR=QrKYqb^E3%RbK%`EcyUE&Wa;A3>Xl%nh;xghJAb8loV3i(ZUdpMsb z_4G3Kc$W|HHU8uqyi#AHe2AOe_zx+|ZuHsP)+haD?J<;fiO1KIL#o&PfEB9m4R)pa zdU8iov7=@kE;iGw!^JMvFBy!S2xnBO_wj92>V14$l?foet;!S--`3mswY1jO>#_`A zv{KLG&swSH@n@~n^Z2t?>UsQGEBk@?v);y+rS-LXPwQ*vn|!j)Rqx@mny>fpS(?zpQ=3x#81_p3gV|~PXqB& zwc``hy4kmFAC=FPleL+qoS-QuXvzsHeSA?qrH?PlHw45Na}M|wumsujE^$NcYOermM%7-+ zLA4kDrFQI~*1NW2v$Wo|9s8s8uI<U-qmZbcda+y z1NY4L07^bSrFzM403{!vQoZEkQ>vGId`e~=@>}2t{%-{({}|{4@hQE1BH~Y~lza=n zhUCYBlJ5Z}KM9olR8aEMK*`5%RH^*qH>#9;FDUt$pyX$PIpAn~HLPJ>sWklfP?Sai zs5A;er7;~;8nZy9Q4A^#{6j012L7RyN}~i+8uLJMr6qR=T+klOJ?jeJzM?tFHsmZS@Txx~+ach;FNY1sn<<1kr8vZ-MBx z`c@F#R(}jcx7D|S=(hS3AiAx-6GXSwpK*uM(LtZ5zgD9Edh#LlUGC7^=iDLjB91Ow z%lPFm=WO4#I}*cZ_yKtIz1X}#{I-bsLIP)voxmnLHhZe;pMv!ku>$#EEVvBxfHhze z=$ad>Pu1^WntlhFUrnt6nO{xSci{!~U1Wm#F0w#<7o$Oa7h^$v7vn*F7tF1u>bqcW zr8!g)=M%CItT{VLW{kcB> zvYV>?uFaG5yv95w!3)Kek{z}p`n(pDPO1Z?le*CR{xz&mczBZ53*^BsJju9qrQX2? ztn3Y916JxCY`{vrgAG_|bR)VnO*%bd3#8Na{$W$TUS=<W6LYR`I3vR zm@m25iusa@t(Y&l*oyf^KY~U-B7e4YzHKwm`FZdP;wX&n51`Xc-vdqG15MupP2U4e z-vg!lu^(@5ai=feqpwWw<(Tkwb zi=feqpwWw<(Tkwbi=Xi;Qs@{B;pwd|ZDjjT8z0$!()hiur zRK3z!4=NpOl(8Y$C}TsgQO1UBCB1FfkZpXIq!VeYNNmJj_GL<03}1(lWuR9Z=((n5A@9!fcoYOL4EVnL4EVHKz;MYpuYLpApD(doUu10pzO^&FbgaN zWp5UOW5LCs?9F}H8`e1}HexS(PAN7*dgcS-B0eB4;>_lv%J&)LKTmAL_22MJ_|ar* z!WjGfj7rYWC}+=ZJ+qeeWIoQOY~|UbtiOxZ|0YI1Zet%^n;8ALjVZb|G5T>ElXY!k z^y4-r>Dt8T$8Aj1wTaP>+ZfMv2K|?XU&Z5y)gMP1eOW{HKD=`(dpxyrj)r&$#Y@a0 zhW_L667H&0=2*)ER(WrH(!0&RKW?~uH(b6OF5iv+%ng6+fh55!q zoF_dG83pLKdB`YW&M^-e1dFz1+ui~{By^N>-H3nHU{Imca&jC}4% z#y3I9me*jCmDYNC8HgbjFX^boCYQ1 z15h%$LCH7|(k@f^)+W2G(A<2hYi_=)YizjK%rjq}=ckBOrE3_N^ zGG(R5H#ff*&&GDm{YNYGq<*^-)NkJd(LGb*lYMi49#{F0ep|+Oq~CrC>No29A@tG| z>f3*H;KsZ3%oCJJ&pZX{8S3*cJwtuoh3=X{edgyUo}2rE6&k7MYd}5!BB68 zeWt``_?EMdQfDd+XP&G6+sPR5hY9$Pu`7zL5I;~fpl_MDf}DQnVLyA5Gwz$_!tcwt zZ(1*qao@Cfka6F%WRUz%>kE?qX#+s=KW#8b{-;s*Max)M%eZbFz97aY#TvIgg-_)9 zmdN=P(qRvy!`7f@>VF=`S@fWZiHO_yw60k=rc%*I@1X~iD9h@6tGpb4u+9!y;OdLM>5Km9i@w4C(U}RgFpYhqx{i!}4|UFYH zo(bP$u8>SUtQ^62pmx`Hpmx`HfPC{EsNJcHBI;P}taC1&p#Dr8ifjV!^z4s$#L4J8 z(3*B}EHm@zn?H9~&KyFv|EQS8}FG zDc@NZb@l-H+DCpD@U1Q2o{zYkg@4I^xr4JX)^Kk+`3#T`#c=41kObOlW>3C|I$F*5 zOdTEgBlWb1a)`HKJ$ZF(oZ6N)@PeM=({<= znLd9eulNrC?)=aG4qy6h_|#|NTc3%K{npBJ_spoo=UyIm&wZ^~<<7T4&E#42IWzQe z9W4&RP2D}K=kA0LQ2l(5y|4JjXXHk7wZl^sxnjzX*=?;h9sp1AEaTatWv=qKxxWS( zGnPfp_)bwgA zv;dtw?Kvwnjdx4Tw;9p(P8^2mJ2mtn-kv2>B@$8GO@I9 z6WPRLm}|I+Y~nG@HQYot>ov_a+(b6<80H#oBAa*&a}76ZM2O=J^~ zVXomOvWdqq*KiZr#ABFixQT4VW56kB-==KhGR!r+L^kmmqlwQj|HDfZ5tE_)n0^Ld zF`Jl-1kTAgo=BXe9fPq-GT<#fdN+aZD{@Z8VaC)Sy<1eNc#L=Od+^C62>MC|*Kw4(qYc=G<@Wkl8OB9Xb1>h!013Kh9oE z$C($@@O%?}Nqegqu7ZB3x-ndZ;VshyVGdaA)FKpgI=W4Vvc52~_ z3a^_uP{vQ*IFRv^HxXp~>cRjhbV4x>W92Z)i?@5@2zLNUTU zxePD!CKLC|_*`+nW)G`6kny=NcE;ZwOw) zve)aD=pDZ+a=tCTsuI0}uc}1v;HxUpJF`IABV5e8a2t9zBNg6N`3kr~t;*MOka>gl zsA^TdR)fqN!fQe14dHbl^M)|~DeY5bWOylq(hs{L=jq~y@+$p0Q0e1`@+y7&P+p~v zAIi)3pna;md=FupbC6tsj+D3fb=AHIVia+K*?PVN-ln+u;k)L3QI12q_E`TM+%PsD?rKJ0{XzMAbf{q z?^{)&G{Wplr8H_mrBMee4SYrwN&}x!h0?%hRG~ER8C56^d`1;YqZL#d$3Ugg2IheH zjKcVg0!jm4Q9x356vkKNve(L*vzze``Ss1=A2R#6;U6;lwc#H!=l|j#GW)IJA2R!= z;U6;lpy3}f`i4BA3xI`-r` z1~9H}HTuJ%uYWv_F@rTwPcpU+JHH$K!gD?N4m~HoZ4GwwCC0ZJ@4N_B=smx!-wSO0 zUda1Bcn__=8_7CEqvr$YM59kZqfbHfsbuCQB?Y~#XY~eds`gr&C+WSC=PwiolunpE z7nHwRQ2MV9G&&SCI+S#yIFJm^cM!)=qIaPsCSzz+4rh8(~SnX@RcL+0#l>`>}g zShv{9*$;3c8Gqrt@fhl72>It+g@>5ub@JWrw>UqW?{GLUi;ka%#ZUT&e;t$@FLfkGI`3c1~r#!_H$MQX*L&t zq}f~ul4kRCkTjcTfuz}t{c0x7=Gh==Her`*_(Bf#rF@*z zu$%HdLt0G%&YmExrsW`MHLUFJElAppb)a-Y11O!aACykO z)~%6Fz}BsiPQcc!A??OikhB|*fu!Bo2Ihb#VnVCOvEOhf=ThuXEMF$h;#^EfXG)A?28;8(>9Y;7AbqyM1JY+3l0e2l4XGewpoTP%F;GJ~i0`xk8#vtydcjOE6U+j$ zz|r7na4a|$91o7i9&K`+uhH2SoK>;xu?xl1DSLyT-zj@T0Z7>!3PH-=Fdd}q4YNSX z-cSru_J-LYWpBV9PN(b*B_L&Qmlt_Y%^cLO%XdV=UqsW6MwBx0wttGFR*L*{kY; zd_DM$0<^u?Vn1v6kpS)QjYZee{$3AA`+Kox0ovb-JqytOUhG+b_V;4X0<^yudltZV z9*dkzFFqz@WM+cM$jkzfk%>)*S92tJ@Nk#!E8HJ!^ zOa~=n7AP6Tpk&MjC1XyMj1uli#yn6mNJXm!9B^?0!qeKP%^fGlCd3>j2)n41VPEz86_jkJ;|sA zC8G|Mj0R9L_Jfk~3Md%|LCJV4N=7U9B;yz;8Ev3soB$=G6O@cIpk#D`l5q~CUB1ou zB9FbLShL!dHw6zuAo>>9vnbjaV!Al!>{qg7WHsRya z^A(_;-va9St)QOY2I_h0TjN_V_3fQwo%iaUFxPsg7Sua+px$W!^$zvx)jQO$7ron!DCjR9Lk8a)Ur|3x%M{_hmy^BGTUO`cE*$0u^{8gY!AqIGCK*R9NDQL<;YG0DMxlX zNI9~HS)mbuh;KdB=1>&B@d*8O3^Gld(w?S$c{T$-wbsSR?vC+V<~)pRGcUETet&}a z4f$8(AC_$KF)zI)K4x%-^kWYCu>swv^}S{2v`X}rkLy*sHhhfRoNX~zF&pqP)$lRe zi)0OZ?CCsloh2?kr!ySHzbwG-{fO-gSImUYm#F4Xd5Au{dj_^_Gj%Bb9BhI+e)dKp8#-9x>Mq23n2H<;(BJM#>6XP%+%%rn%-#b>As^9*%ip2@PG86EY^ zSo@jr_A?XhXC_6S5idsHIG0D-GnGHmovFN$E_&AZqhwJg z@hDl8DT};kQKk{$2$%9(xRfmNq_pH;|81=}l*l<3*?dR!>~&Viw;>K?E_0Mow5M~f zxMCrq&lA^~6XH!~@U2A0Zb)|Y{!uyftGU#Tc#~xEoR9Ovv12=)<~?yGK4gd^$>O~c-Uvr>3mnN@e2IySE$gDrwW$A;b1kOg zKUdxro1wmb9B$P^`9@GD(dS!Klb+7E;0%W1Y|5##!Y#vzT>gx3jcu>>N6xjmwjjcF zB z*ul7cM8H-45$!6jV{{=+6L;K?U8dWuqWNwJJ4Hp9EbUa!}d2^3-WGu9VlJc0P=43eh{6W z{R$ijI^Bm(9}#ey_>B?7aVUOcgcUhY{Br!r*(h%07(Brd@gwjEPZP%>eqWG;Y2AOC z;Yh@fEHY*8S4o>i|#?OFATuI;*bS=V-5yrgTpE)MA0u8aM; zw(Fu<*LGbr>DuNR8g*@R4Gp@s>t>&>ZLY!Wn`Lti+BfUMWb1Shx@H}FM0GqpwXz6q zXAYcg2fWT~;Vu5|ZwujdiiPkx-x9*>+#-b6xlIVKGfN1sGgAn!bE^O#$)Kwe|z?)3pu+@zb>q0rAtd4hQkm zwSEP}PuKcY5IyyoS~^ zUuhhM_H@zuWyNmnp&T!wlis$FEuUj7D4(MTl+Q5u zVJ|43VRO_RZO1M{q_pR+$sFO8tezI1qXub zz#-sza5%UL{0dkBeihsTW`kS7Yr$>c*TC)I4d4#&>tGPP3EXLgYQ_d`3@DBLlv`=M z0xFGzpwf5?R2r?I(l`bxjW$qeoB)+ZC#W>efJ&naR2t_LGniVbIF=I0$9GpE`RSnK z4+ACN3rc<_DEV2S$XVcTY_+-_zg;Wyp70!3sMQOWfSKSt zFbgaNM}rH&vEX8GJa`{C5nKXJ0+(Jo16;8%UgdQYsJvEy%Ig+TdEE*suiHT7bvvlM z?f{k7AgH|VR9-p%yY#9`%hahWa+b(3(9|ht>J&6}3QEoyP;$CJ$vLO9QBtcF6VrM% z|7U_eFbm89M}y~i zQ12}P_1;pw7dhu!aV-^kXB((@wu5?S2dH;~px)WZyZFg=c?MR;gqKZHoJv5?Hc7v}l?WW&` zRqwLT^nN?YeUlIP8}|43JpH@NGrW2i=Y98#i;D?AI7u-Un;2IdzTMw_cu$0rKTGVO z#&gcO569r^$MKBL_FhGNMTZ?395IUXbxrLng?Ws38kvOw=$M>Q%m(xbC}{J1H&7YT^?AyY+MpXLl=|8roCqdekRpnKpHo-wK+vJ=oR% z20cCw{p+0Fz4!RfQRXeoj8Dg?~8sv&<3L451ate?*}?T z^!owydtdbX0rYz+@}0B0C0}KUlqI?=QHu>qav(EZqePITygY``h>p>uY(Vp*PKE`4DH=fH9ni((<0k<*M#Xhu#m zHlZ0g&DexyQ{PUDByx!tEgxS8%`hXOP!~y{JZ>b7r?$VmH?CUL$s64evE# zH&*kWb6&R-yYh}ZsjAuJBe|*(d~l^z{UBVCY|2937l*Ns_Zud#qnRgfXruO0cdzZbnPyJ=e%ZrKK zIE`!<^TRR3_P5&e!^6xEW0)VFVty>&G2dWF+=3a0<9{~eaQx3^9FG6ljKlFihp}Da z7R)$Y<1lO&znu?bufo`?F!m~py$bgQu~*>%AoeOe7{p$gwI$fALtf5%FJzpm=cj{u zeio?bi$Og<8@vjf!#Fmbajc$MO!|7}K2Xmr0rkvMjYB<^HEZ!Z>-Tk_eqRsj_f7ge zp>o%3tJ|;pLHx|Rzmxmtk}40Cke`G2_;l|r?s@uD9$K1GxjcZM^KPUhM#%_pPcoK+lCc7mjMboI(2hn1?Pz4I2PI=ul#B}QNyZjX zGPZ(}u?>`r?V##o2dMf8f|9W_N=BG_l2HpvMja>_4WMM~2PNYbP%;jJlJQoQj8^VR z#xYPb+Ca%T0ZK+EC>du!$>;(l;~Yr4w7OHPN|_6rK1+W*%R1^#`%wK| zLanbKMp{jjUmSwUFAhQF7tf&bqZ7~1)IG{PQ}-zIp;W88iSmuH%U6>Wxbf4t5S2wd zNVO9eVEg{Jp(E}-7vVxw_n(!v!*Qto%`?=$$sc(!`6DkEpYhtyWZKV=Kl2RrfAJaG zz{sHuj2zm)JVP7k8F3Z+`6g8_Cy5^^VLU0WLUSQ!FR8TNo(YA_Gmb}b73Q1NH>ht? z-yrp8zCr45kN@Lb#c=A|LtE5Q?o#T!j(19_-#XqYr9SI;=a1}7br;`M7VTxxUTT9v zJwv$*^$g_}kMRWDxZ1+Vqr6%Ajq>gZM>vZea2QvSSLHz*MzJ}oJGNs5-%*wMjxKQ+ z7wkBYBKvz1hjE-ZkOjnnOy{1?$;c)@^0gkPeo`n$BV{mWTTljU==odm_cxh+#6JCe z4DmbSH}GQ@XY-drUFq9%WL&HNsfTN+mnQ1rJ$Q{1w1?7r-LfVq=A)kH#b>DA)K4TY zhwo|+{p2Zp*gN1hE?|!qFg`CO=EFG~V?MDU@_R@2sfxsZw6iusXJf=8M{PKY{Cn_q z?xC)h;}6DHd?1gxWit8JnrrP>C4M83eD34<4!Dp@XLnzo=g4Ls7x6LrzBCR{e{3p* z_n^(1ri18+rdc35qNy0fUN_B#4?#yX&Ot}epQUGVXoCv+ZkVxwS$ne>9)x!q?*nL;omf$2(Wh!_jQJM5_xJ^%bDVmw+@uN z_4Ezo?b~F9bcQ$UuFSXToZ;QD1Kxw@_66ZRcy8}bE2Q(g4ez1$M(*5rt6TNF_aM9m z&(yyqUFSjzA}8hpj-$wl z`GDgna$-K*n@C*}i=qsWQ*fa54~Vm{zFikz4aIF2GG<^ztS$cg!Y<0x`s zKHxTri20aCtjBa>JZ3mC9&j9Yqo0q%Z_MT_j%>wyz;8&GABWG-ncfqK`{;OY>B~C1 zTU@it<}x~-c2#!Ijhxfn&0Ogz`n0%=oW+szH|{33LR`kcM7RT+%NU?*o7YLxwax4F z*EQdO#?^7Uwz-VHy0*EDR9)L#Mju_%zw~^Hu5B(OS=Tm~k)&&z%ShDq*vRuSy0*ED z1g@We%NRjEU5T!XaT9|QW6Sc0o7hV~KE^(HXR&){?Q^f=h1fk*u^vOJwBA*FQ+*j8L~913_qK;yJ>7UV z66>)LJ|uzmRh-A0F^tuTjTnmld9$}aG!#AfW&()bd@}_^Z@$?NOa%vmY2Xkr9UKl0 z1HS@#!LNduU^bWqUJH%}zXpy4Zve-GUk4|GH-VEPaUQQl#d+x5*ei+e&^ffu`ent0 zXdSfTJZfpvuqhXIgfeM97N$(ty-=7kX+9RFOpL8UValZWSePA1rxfx|Lb2G|h=4O;Bd@jycp>zwFdnw&QQ0YzwmF_H1=@x@Z zcQ&YW=YUGL1XQ~7K&4v>D&2*k(p?NH-TOeLy989aOD)z7Tjv8xa|LrcrMVhZnrlI& zxeipC>p`Wt2~?UDpwip|D$T8+(%c3r&F!Gl+yN@hAgDBVTFUSFLv_p%m0km=^!9^F z?-fw#9R!u$TcFZw1(n`0Q0cXSO78@y^g2PMcLr2?U7*rCr#Wb9rCE=h>WZvKP6HW7 zi!U*}#xPL427HO(H8R00{)aCyyas%U;WfsBufhq&MiC1NSH}R?ri$Rs)K2T*?0;&v4FYzXpW5sKdXt2a4G@4 zho3Bv1L7w$Jjr%Y@8Ks4=)EAQ_jZy#yh=^%z-kw~N{QaVM^-Wp>;z4DK~rAPlvin` zSJ%L;nD`QWVJ5x=Uzp)g@P!!;1%9O_c1U&2LDp6;=AOwnXz~r3eCwHE)ipNH;?*~X z@5`%i4BwZymYUe%)iqs#8(nZGUq;urs@>sS9%8JMfW6yG>_!KEtN+`$6rI8GKdO@_ zSs~3y9)e4Ozlg-~6@UrUUm=(RP6zvevq01DLDTO+_0Kt=>Gz=N_n_(bpy~IZ>Gz=N z_n_(bpxS9^Bo9y6oXV3LFJq^*AC)+jzqQ`hoPH#GnagkYx76RY`1+J~fHBBsIFG-! zF0X}SxnS>|tMoRf;-fU}i#|5(3!3%?zocF_oX1jE=m0#6(mHH-mcCWlrj6a@S>QX& zH@*rQ9wfo)HgPHFeiN6n2_)?U=zhb8p!*FUg6=nb2)f_!A?SX?hoJinAA;^Te8^7X zS**Bn`L4w!Wx%yCw~{}p+r*{3<%-0mUwtck#aQ+ou{X<8Uvlx)~eMjE!hU$2Vgm3`c^EFdPXs!f+(m2*Z(J zBMe7^jW8SuHlm4`m8?U`q^l zf-PylmR$H6dqCsISqA5_3>gc_PXjh(AvQ&ECx$mU17Zgnx?&={$+;Nj_etj63iQvN zzqG3Fgfp2omU%ArWg26Ueb^VnpI~1Me}aA4N9@Rjud{~(?J2J1;(OSded1e4Lwt+j zQkJ@zC#RHOxCxF=^W?!+cRuOllTJ2vXD@aq8@uChF3U}QBvn0VFtI9NFrn%fpw1e9 z**R;R*pU8{JQIG;ep;+2<$Tm39aCAax}E*VBj=72*FpTo5yf}>H^qkJDK*ic<)?F<)Ovs?k@c3!?}0P z3Qfiz;*aPCj4kv;5u*0b!KJ0KB zm=30c_^`vnuyOd%ir$wTPgxAIcF{&L$l664#UN`JZ4`s7U9?dQvUZIHk+o|)h^$=` zL1gWk6yXPq3_tfIqX3i)+RVtH&5Vp$pkx$-k}(^Uj5$#<1;|6;Lt`f|Bu8l#EvHNyafyGTK1N zH~~sVCnyWgEE?eG(qptg6omYe9G;##KSTT*Pte<#;+ z4YFtQZOPw1lQM{Fm`NGLHO!<8;u>aB2KoADQU-A?Gbw}N9VkQdun12g9$Px3xfk~` zLG-~F7$HE|*lU0H3;`S|=ekv!r=#IZ!jiH!bOoX8Nvui&GP zoZF!?DZl9;`bcG>U!EUhmtlXf+I1`6oX~Iu^2wxyPkKrp; zqZ1TAayf2gv-&(f>5JS5dHh>(EZXDc1m9qe&iMEgj-@{w%Pi(c{}ql!@gP^iv8Zhd z`IhQvn@{6d4zuSL&va+eMs-=A4vwMWH?EG zrTbNk_h#rmeM_++;z@#7D<_Ν#kC`EJU8Tl!c5T8rFe~ z1sc|Wl1q_3bGVdgJiBklmGLRr$m_|c=$sGnDSQ`TLKR#Ud`b-GeZ;aCSuggC>doF{ zaqLm%;k=J{tzA2PIL_*Bg^x%asB_FGzq}IPe7iUaIE1PA``h6q*1#dGg*zC<+3h7Z ze|#q#RLZ%@k1k;E(?PNRe^pFfJMk9v`i>K=Zt?0GXSNfwaT{?RikGXWzE0pv-|Twe z&-K>I4cWxcZFc$p{8iVg4M*U0RF@vU?`n9>6Zk}0-7!_S@O`!8Uw@ytJ)LuLMu`9Y zLm_)hy$e21p07_BQ1%D%e3mpPBn&DGlIL{zpot0TWg+sO%XMzT;Icom_uG$t_t=JY zzx(xumw&f@!@I-<>Ku){zU` zcYFI^_-=N-?^B)f_<)E3iSo*OE& zYqHjSy(VjM@0zS5vum>I$6LPr9+z+L#28=AY`0I}dD8oFH&l}TD*hJn_a5bEA9P>i zM3+zcX)S3aoxV`Kj^BR7--rAz|Baiujq2=Ztp9}zDa6&&=Z|^(FI+?3WPjs8wSDs} z%lGPRmrw8P<{49_BzKO|zU9<~@zP;R=iT=%j6Y;iM)X;2lJ$b_{XeAL7U%C?fiCW# zOe^B73A1@#ZM_wqdlh_lA$nNfb3F4NNB>GEY2PyV$s}{$7r(81 zFR)|xR#AQ*&w2QZr5vmIU)P5J_Fg)ZNoB6neuwm#GV)(YUs3<{P{yhBIq`wfeD{3% zrg+!{WI7!7=W*VOMHJ6{0^Do$ zN%|$jis+VW(JdL4bj!o&mV}GCCAmLbB6{UE=@sb7w+L)(4dRFnZyJe?sTP zM*4q~2c3fseb;N1zs~qr{ond?+zrxy%A@i(4&;mlUk!aweQpKmjPCrn?wP7pNUEnT!b&6T5ke4l)@xzRtpJ;Vw}bdT0-6`^~w_;w446^64nG3Qh1qZzE% z-$#2SoV%oVqzn4+t)!r9#$Iph8g!j>jrMSAH0x`Ui8YUm3#0YT8e89t>Y;DONX{hA zy+e+4&U@rj=~%YDnMOGs9V5L{hh1^j)2KY9=$H)R^G02Pj)|wwNWWy0Uq{C{W54J% zHI1aHH8s*NiYcxJKO|ok@16hIE$E4x(HGxDZxo_xOf)f5-fTJZWsR?+V5?chNtbvCXk{pB&rlreCTbIAfa^ zzr@(4+8)~^F}AszZ$RT4?Vnp&<@Z&i_a2^{@4FKWv)5j|`kvo)?$B1ZuZg*SO`3JS z(H|L0)mU-9W4sp`*RU?%9@l8>q%zOHf^m(WQ=g8%g0W5FXN+xBXUC$(Hh=0dw!t@f zaa<#PvWv0W?$pS*<_P04jcsJ(r9brlOZNDN@lWVQdwlal%IS=6TFG-E;~TACS9?2a z`95oWqvt-wY%w4U#aj&Ekz`ewSVZ=MFVb`M|V`6kAt&e*2J)-%|I zkBw`}`2VxUHCt_+v%u(_OY8Wi(hqvB<1;$v3dc1kNmF{KfxK#5(*bH+b7>vle|=0d zDq8P+!7teMbCQ0qF~&m1HPR~@ z+eojhwDaiX?=bySc~hS1t>M8#{^4D{SYy|lzc~I5aUS&>yyuKzp5mHvA6R6MVU$kK zF-+^HjA8KMeQXR<@;PIeGSVr#%os-b*BE9b=!{`hmdqYwnCS6~=3W}Vu>Wa}=A>#r zjfKadV=n2OgsSK4aZF8WWE?Y}yvXin+2a`L7@cQuxp9p8Y0q(tbV?t_9%FO;zGmi3 znk&i&<@mNfZ448wQ=Ic_B}00pd8y?)Mc$&vF4JjiM~{5k*rm|cBZ*h2N8-ryr;S&V zsY{Jhq&u`1d;;Sn=?#r@F6oZTk6X^&^Ph}mT#RXA7~8lRrR*o1fAf3v$NQfCWkK>`<~SbwC*(!` zlWz8h`495+9j-sZ2Pj?f=L?e#$GtIw z@p}wr?}Wju{TqDI?~xv79mv49xJYX~=)+~`&T{dY8@;2LZ3FT)q>-OaXQNFIDUSg=9{`!X4<1f&A*(B%L=zL1%xBeWN zcf3Pf=KH5FjNhGV<<#Iq(6~Xm<8aiRL;jAbQGRFH|4~tXXZZx8`)bdzfW9Bcw;{iV z{2NLqdaluE+kTDTt0@nDjqWl0G0&)!ZqZtPXB?n8N9643*s6d%7LbnC9K*x28W%7Q z=&oCCb?IH=9@binO~T8qb6V41WB1{*E1iGP7bN$yd>hg+PCr)KiR5)3eOYaxKCN+q zY=@SEumX8$VrMdcGc!#d>;JjrDrvUi*26miTc96#uwC_iX= zlpl1k?Faq-?|!x6QG6EazppuVp1!*$jIV?KD;uc(D?eg7{r6of9iL@7eK?){i_m%$fWBQz*(RUx@yZO{byW}_N7ox+ z{2WP>a^4IK@7k4mQ;x<2Z{bf!CVjo@yyv_V_Fsj+A@WW*F#MeJj(iY%&<&bbyi6Oa z49snxe~xR)b>PGY5nn{d8P3AtJMhx4-?73sDvf)|pWdxWiphC7$-=kHcTZdR>Cbv* z5B+(wWKuTuQ`uL&r#}4z-;wfo4RxjR>0659`c-V!p;)Wi>DM*rpWSKJ3x|*|+25xx zyLndqD~{`37IUmv>xCB?2l0%FA9|29@BEzp-9g`Q{F*)cx9p|9tIxB&C!_kbcqP-X zvAgU?cwRnB<Y|=^9G^n|$87TDeU^?t z!Lhf_{$o93bd*>1>7M(K{de@=p0+s2j-!jV#i`tL{dxC|U&a>a(4QTeHObdTYNA5cpd%tdF=4(^lRDT z1z*w@H`=y%HGR3rw#AQQM<48$pYtE!e_~IcO!2?)4t-d5u*E}6yyG8$gLpFwn~ZTFiOM#YbL+; zJpSdVK723N@(J$uQ2wL{Kk+Z5w=8Nbt=PMs_Brt~_E~fEgs9j%>BJQJmct`_o~@R> z6Az&|ZxMgdwpzCNH2IT`aQd)pv*n8DFe~-i;n?vPoR~YO4?poG=I8O`TlV#{>~b>s zR3E-voZV;j=g;LD;1kNyJ~__5oNay&j^RD*^LxzUd)nwGbhm7DllAb1W}9o+<9g(x zjn2W(=h)-l)2B86?jPmjOT(}CJbhaIdjQ%>Ig8^>~eZ};E;nwUR396+-_qf7n0 z1{>U$wsvGm&O!Q`BjfXU1eHa0{2<@TKE6%IhF5YeKH-Et7eB$8HR&eJ$7P?z4>)Pc zjyK7Fh^=+>(tB_*j!h3UA9rl}qsZ0uMVoHdfuj$Uf7x}%roYcv{h#fzx--X)PDghA zC7ZK+$KAi``&>&W?8c@$K0TcQcN*WzxX;lG(|YKIM7V;x88_;@Hs?Gx!wvM{1r!U; zGY7;C=1@nQ?f&0~xwx+BhsEL-nz8HBRqx>!=tY}pPf^{IOz{Qc9NNh%I^YJ`^?KT* zr!KhdWA^+Ce08Mb*z@SMA<_JRW6wXiHstbi?90WE=K0}tZi&Q8)YUb^2)9?ZFjyRe2Cle|^{9}CO z0{ZiC;se{@1a6f)ID&j3YbT}%@dM8i64Q8_@EU*j&B9Ua3n{#geIkWp{N3{Nv`t|D zNZ~iw4-)*gC#kG0&)`pxOr#8jjFGqUtrWUJ z)+Lql?QG?HYNL;C<(t|nqPQ5#}BeDBJao&yA=>~X=wz>Z9O6oyz z_*wVg6!dbh-8~Hd$KWdYLF&kB);Fynevb0jVv@>qzQF=~NUzhc&f<^lLJzEG{q~2{ zXIFgcgnyy_blpC1plRa{)@dftwkzC$zuWguo{b)`0bCISCkQsd`=07Iuxth2+H)wCwt$owD>l?c@>CPHn2s z=yuZov{R|wPO4kgnNx?Skgs~yz7MMVr_hC(m)vFbE_-*aRX*2cb(Elwie0fCqo}9N z?2mhb_|~nSn6m!&X9ojZpJD#}0rj$iJhUyjKKMNA)C*k$$~@>Z*|m(p_%o=#!_?n$ zujZ=pRTJYZ6f5dM_rKJsv(@C!N&EbL8_0%Am8J`$yG@ z`Ie}Y6)G!rV#-XNsLc9CE|!u0sxqd=^er1P>rxq~P{yYz&jU%;>4o&qt)4z*F_h8b z`Y8LKC?-SSZ`P8p2KO*VD0KBUWxUHuF3Vt?sWSdaWh8GZ;}X`lOza(5+tN;%RMu(C zxmDJEl5{$qG2@T?B|qNj zN*^)2?H~Nz|4tj!{B+VzJyQV(b`*UPZGTD&K52gAHU40Hb3Tu*Ti7KTm%rVt=-QvOm?}GxXQyeglmC+0Oq5QU;XO^sql$u|K<`?2o=Z z*`GRF7nyqf681;yCcmitVK3?8Z`t^dWWs2kcQjbs`;|2zTn(Bh_C6^;1oqHTAGZ zs-yi;_DFRkd*sy7XW1jw!ROf{m3gvlkLKF;sBmd?nfsVB>o?ipBiN^-*r!5cpGF4n zGJ2gcF#2+GlzlQb7=5fV{{;K=2kg^a@?1|DRrYRP~JbyUMb&WYh2#KWdAa3SE^}?Q?5(4OJjvAvR%8~tjmmt&(Jx-iSQdqaFNNZ z%S@?k<2%zG`XO8=pB*GKgPdUl;Q-?XjQ8 z|2L4=KD5!Z7O^x!zTIbqj2$20dwZ6#<4pa}c={gw&%Eap{p(rA)6GKWJ!5p=%US8d zOrFzy<~?JC%zK*kKJ%Vt?9Q{;p+|(+p?iebArIpwjk~|dTu1h26MsLm?T}(LG*_7` z`vXrhQ}&1N^#b#qB<4LA7}t%$S2vKlKEj@9caXQxk{g1oF=bzWzDM`KjyoAk{RY4F zK*n!>zvSz|3;EXRZ1%G`$T#{S?|cx)o;}!{v((+Une!~A?sQ#0aFA)wX;E{Y4s@pG zJSRZeoRjpohxl$B{VDGIG`}52U+wBQsO;!>+#C0#3@RHQ)ejxrSw~*YSYAkbNWW=* zBYj}zq112m{KnC(nkQ<0sClL4Iv2! z&-=gi{icz|jwROx@1?#xjA8Sv{-(Y(KS*U*6|(rq&E$Dd8D1p_n$?68_4gr z(yZX6H6pj7YiFQyr=xqPp@WOyl&4l+*zJz2ZM{O>Jc@PfqtMIq(aY`VWnYwD&bdOp z+>TE6k;nF@V=ASS=cB6?!y}z+uF=W!(aG)D0p#yfT#xKqPrcmn4~5u`fmI8rU#%-? zryncMLT%b{ji*w2c`)^-wPr)Oul1@!h3MsBLiF-*A$mDQm?8ZP8odk}y$l+?3>v)* z;Yqe*wK5 zJtuHE%3IFONYl6R>?zFSj#J?Wvompx>VS6y2=(!I7WGZ*<-NT{GsD zuFhYQyHk3)iZkY4iPF>0qo-d*M?aQIpB-pTIG{c|&~4iH=_ozDV0J`LF9fBh7cov? zf9k)}H{}(Zo_gBRYpu4PR^Pr- zJ)IE~(bH|Lv$!-@h>V-1vyWpN-=^N2IlyBd4nk)~>SNP_t85w*t8971Tp69s_*XW_W$Wx1boOtSM8?0;*%w?c zqqDQ%qb`*#B{Ftq@0YvL-$zt7>Q?&u)l+?Ls3*UskK1~?lrrsOUpVRSHPq3`#N>S` z*p^Q8cchHH%z6^(@dN0^)9A`l^mrS3Tzg4Nk5{wS$kF9~^50XJXQN9#ARp4@4Lx*u z8}e%IUSz)KCrZce?BKamAJgf5s=gf?(dn`^&Uk(WdF@E_>{I@eGkZ zsdjX|>Xf?Gn!`dDI*zgXNv?HIF$~iAO_c2!biR1in!6W7^5e2z&@*eu^QE)VE^iAy zb&U~Y^9AVGMA~sQ`d|LOACT{6@~v?YzIgnKif@V~G@W>U%J0diOmGS4=kh->k9ddn zBbNB|nk1{Z9=)PAl#XV-%KyIi%{Nh9uKm<=k{cf`2E}ag2eCNJ^&7Aa1wYU`-__MqEgD{bo#o9~d1mh<0oKMrf zkH^H7X>XG6(mv^wZ3tz+319ZnH{&+Wb*Fbsr9X6~#+9vSEmM$njvq~TZ~PAH8ugp= zj^?A9Q@r`-pa1K`_kaGcZ&^diIJ>EOj!S4&f9D~_&7_f!z0c#lR?->_5Bo;x{lV|d z&ow^(Low;5Z_SP$P?q|$Yl1q%tBUmt4SXN%w3X^t?e+wEHa{_S!r?T}geL}D6OffQ zY8hh;*-?BjN#x@sZE_Fo-AQ}PpL>M1JVqOdzq>l#vyTa2+Re1n9@^<<+G!8%bTjR= zhjzM|cG^Qb-Ap^d1B7m-o%Ya9H`7jgXs4TLr#-Y&6KPJi`jqjFn0;_Jqf;#O*T1+t z9n+}iLyT)rFs?mBzRIZQL+C!$m%bm>@hte+V->(U=( z-P?7nL)UN4JE~)ijn{Sl{9j+<@7q@YvWnjR>Q`MttNLE{NL$A~Nvh*RR-CEh_o;*8 z(tCri%)c(E?_o3dHRpJVvpP>N868xf_A=jS{Quc|6Zj~rd;kBLnIue>NrLQ4Nx~wG zfXYt=ZS`gnT#$ssTB@z>|3U~VBn0(Z>xxMrR&+p(OBdqrLR6FqiOan%aI1x2YqbG; zxwy3V_Fo8~1VY3V1V%xA@6R*OGdv6lo3_>7%o z+ku>QebQ&_NO;18EBfn1;F@*bM=%_*j>{sgjh zF?1zceF|}^vehNur%;Drg+F0Ebn=p2-W4;)tWYeeiBo*scH3$#TfG$dEZHdisxd?X zJRvzQ+2^M&wIjnwZq0=EGhE50&AyIymeU5-?lUJ7SXp@c$f&*ES-eSSvN@Uga@8Lz zo25_0Ey`fv1gDYo5!9> zj%*(Lf5?%|W5<#so5y~W9N9efpU5rQJchYY`LX1$+i1z=_mIuW$c~McY~EvBro1RQ(C`D7a--n_Wb-waY|h|2$>#lxl`kl{ZN>Lzp1*>)nCeyTKBnCG z8TZepu0KLIuF~60oqe^dPq4Y$>pual71zwJO?HzTjJdV9xJ4lu@jLVIj^t@DGO z{2_TQ8W4WvFBAO^wdD0MaQ7Q(<{xL=CA?si&A%^@Sf zg!@mB*KZ)NFHKAc-iy2*j@;HW!Cl`7?vnN2K~_IaIu|`#nG|Sv$EDn8*v6d5&6d1g z#dm`HZ-MJMCD*UmFmt%+>zu-U$?K=FmxemJ8~8yRXPon@+Nn9CH*J20 zcWr(z+H+Wj2RqP>9l=^M{CKdI3_m6;Eg9Y^ERo?yz!Dk$m4GF3{S4%K6qXLTp0(q! zTwm^x>mMP@Woxusv@2Uf^Ux{4aVT;!&TXt5ihPetFjl4yj>z|1qq}(ywk_jj!$0Ma z@r#l1vf(u!vKV@l4Nu(9`sv7cyUd&!vrRh4nR(``I@efrdZV&Fy{^cLHyTOIvt(@= zv@^<;)}-;~v$VC4YvEsW$CCRC%ry-7{w>)*)sp>{&}AiYj`E>3W1mY0ETnB`)3+dovIDm505Vhy2RJ-poUOvK3-ZuCd5jD4 z&^>vK3-ZuC+IKMz-J^XM^UyuocQFs$qkR|iEZrlz)BMOwr@5*%ugdsf{WYEh^abN# z<}=FKm+{s$k1qO8PrqOW*8{j#9!Gw*2RX?&qSDi^X@#p_QwF+f7Ic}<+>z${g129= zViWdm73&`sBa`rjO+LRzKlAL|S+2gpdSauBJg#6C`1_x``qi{DPo;BTIp;Q5?n-I( zLvO9jVU_aka&NMEeund|NxwIaav zI8Pt*Jh3gvmKyZn0dVxg#DT#JJpF@O!}iU)QfhwX?jQU!bhObkuys2!WB~a3LE?bm z?c7_$vnif_CQkc$x&`}UmxQrT-orNel=->Ou>S(!bn3m?4R^{uA@#*Z5l#=e`sw6)R?pTk$@$Y>s#7M;`C32A_iI;W15oor%--!#XjUv3+Ma><$i_!J+7N zF!MQ2g0Et5IGz8*ECq*>I>ujjbAIZ0_*;q(!4K^VKgCxZ#!r>uK8zps`MGV)LlOLN z{YUG1BshrThqVf1L+^JqCYOWjgrfY?D{2vD2gck_o>& z2A}kXCsw&RpYipdF4F%daPgk*VYBb#`seUTJba?~fEAy=bboJ8@8Cw{k$C0j@hQPD z^6i>L)^7l4NvX#P(4soc}w+A`!6$zp zy~KNA@bD?Tk_oQ_;1%(S?0H-MX-*}8%+^@rW$GI@^T#WGHe>9Hm-(;ueu6(jv1g6& zir{nDeqgU9$-kKK;!gNs0d|DOj2bU0MpU{~W5(F=;v{sPY=s@@QLS?<;QwL&PChn3 z9=5RhN#_uuSj--=Fwy~v|%?iU@NqpgWd2-4t{^Gu^Y4p$|QW6ZP*hB zJiewjY=?Sm2hCf3!a1h9p;_4uD=pih5Zj>;_-RkjLiCpQ1TDmN(4L@$*bdqgv=G}t zdx92XJ7`bPLTraZY==T@heB+JLTraZY==T@heB+JLTraZY==T@heB+JLTraZY==T@ zheB+JLTraZY==UJ?I6F(1;qGDmq1gd?T~@(u-dX6reh~O=;<3gbb-;nxADf&O{W)wd~~@mdDF^CY%JT93ZL zefYBWaqpLmb@$=l+UM@u^d95U_n__f@9JCgIX1!RPQB z?{f7H?!~6iHyhn)rv7=*=oQ?GP0<^kDmHot_hD1)A||={{_=+A`?4E$Rl-lqtsx&w z{gdJD-CDtTOE&+F(A~xKqe^(8{ae9JIp>HfZdEqLzjb0$3&^agrLG%mYBv5dCNlT9I+cZ((SZb9aqhfQ${HpO|^6t`egoQF+u z3pT}h*c7*5Q=ErQaSJxZdDs-UU{e%3Y>FyuiUxRJHU)k+Q)g{LMy?|6_?5<=n6hyd zx_HH!$hB;WRoD`ekz-x%;4f%%V@YHV;m7E_M&=OoTsEZSsVz4>;9k1!7uXq}F=rtg zqAzxa*4oO>cpO{eacFKa_?Df~51ILWs|yPap6Z^}g?({MobkL+2?)bx!4|);lo^q?NNp8Asor}SZa7o9Ib}09t9u0 z;Dg239(!MBZ%)hhSd1P|LFb%@E%`I*uPwRNl&9Z?Cu>W_y{hL1czl;{fA;x{_kZH< z6&82o@%x15$qCE_uKY};M|ct;+(_CvgR;ytT~K2U^W^& z$WF2MF*tbVaqJS$@%L)nLOjTFe2?0z#Ed&39^?%8*Jtffk`InT;CDN*gR_YVR4kI( zuJJ%F<6ilWiM=uXj#|gq%G_i+_QZ$SUYZxze9f5LNUVc=+oF?=%n_~Qy~Rd)Yb~;F zcuDDsbC_rTQ9;0e9x+cZ|8v0q67!|R&iP+t|Ip{wy8W*p&!3wd?^oPKhC8mc1R5F6 z`{!Pj;~z_$=ljzF{y$!m16$ zucmnXFHVp1|7vKw|E2FG`2XYFZho!XQM(n#a+&s_MJ8TKu0DK+d79P4Hc2O)Lz$;j z$8hRg?e>mUTU_)(dnB!k3;2t#4ETxh^Eaf>?kczcMcS=*c0ikc)?Kz4ti2=^a~(R@ z2-T*zLSvX`+Xhc*44m#R|F^&=-OO?u`Nk}lvWwinL*>#3m6t>eALXs2yp{h+x`}Jb z^FPo2iNByP?Vdn*J?juNn48lc8(#XB$T~yCPfi4QMTi<#@Mq#dJYmaM3!{Cf0m20rimMUSjIJPSMUWA-&{U@xF&FLe18bEUdkRS)!` z@|Un5wbqX6-WkMHu4Z2Wm7)G=zkxjHT5YKxW%yee`RJLsVyxV51SkC{$HvVzeARdpG6k7`~ zH8C$|^OE?;-se<&q?p>bf%`{R%vDExv<_YrALSuK(}1JalYVUBDBa>Ct;b@mOXQ4h z-dk*>wYtEui@j(Ppv&Jv4{O+0DuGz^4OayGud}CA0(ATC_X2+T_kW!dnIElX4fr!_ z4L@s^SGxKR9#qS^+85ay*wuH)pxP0}Yp;yb{h@z{5`JB@%N%0 z@#zZquOIb%^M-)`!s~PVgQ(~2Zq#{aj{oWJ8UFuZf2e1E=!W+^@Ln9e7w>;@a)N*5 zZQcCxm8*{*K^K>@FUPmxH}&;0i{G@Cpd00Nr+oEms>eH4?U2k+U*ER+nUxyw@1~!c z^Hraotup-Vj~4CIlUI2B`c6EUsg7+4}A9Ldmh z)1D4K%@m(mFkp|6*LGUG>Y|;QeKQBa*RL(7zE*TuR0Fa6Ntw{@U+5I^Hlx-f@w1p}40(9(T7pn-Ly8{xOm2=cKsk`B9bT4!+f0C*DZZBSOHhh&%e;eSdXQ^Lt4619Q zU}xz*&EK7Y-(e?bwP53!JXJ)0bnJBP~RyA;;*IeMt(tkH~Nd} zyMIL=!5=;0m$~et_!zpWH~Qjr_G@~N`+d+KTiN@G#% zd_6m??|feoeP`>SROn24-R=u(X`_8P1YL^Hc*p;GzU<R z=dZE3&ZWNr+cRo!2AChT#%(@g<)o`dVw0HTHtcVE+@|<+joHS)L-x3Bp=pDpw^l-H z!%Iq5oKtec{&V3y?bY-sW433JXU}lP#J{uO)3fNQm)M8##q%S2N^!E8>@Ag5vg5+R zCA%&hQqp?ik~pK`i4<3Z){&_GOOczZzk>0W@O8SIu@`&6NbZX^W!u_yn7Tmtqs&J3 zdQn+p*l%YkG_7&e>iv;%R1WXx8@=yiJg@hcu$Rnh)R_}+*<|#~sUx&c$S7bZ-+^tD z+2bR>D_MRp^W2_w4m;&c#z+m+^DO%$n=}M&@g;=Mnpv_RTQH=(lU0Yx3pn(E#xC~Q zq>Q$r2g1t?BW#X&(1#lHG+H_!$e2uJ%tVhRG47ELkqu?rPq~J97RLH1jL9Vr?6Hrn zXV%fCd|)Vjwwv}C%m?i$%)z&@#@qwZNn54ZF~_4V*>IW{=!mw;piR-%a>n}RSO(g% z$1Q@nO8k9+J;8&xO1&<<-Pv$V~0O7b`PGr;?eX zQ^`Bgsa^Lwag29?(=##k={=3zBo{l9qnq{}o{u`6ehjzT{WRq~L%a38))(1wReq!| zRjy(SK!41sMQmlwJ)NvvE%__U)!&_rT*Ys8JUTq4T!oImkX!|a-N0cF=wuqQ>t^V& zC*#a{*vyY|UvoQ8Ic;VO&$itRtsa)GqmHzhHAi$ZaNs!cTRPPwi-fhj~tWix`PtID{q(*Y$6_Q>cCq5ymmYjHmq-ye!s!rd=NT^y(zCN2&xqSFpN%nIaO>G9 z$1~y!%x9yG7hHNa!tv}Z>zUtpAzsghI-ZTRo((fzF!T%?Q++YpNlb!SW|r|nik=O0 zJR=steD)3F1)rYvbvz?R#C+Dzcp+8KdODsFFJeCHWxSB4XBvCh^$~MoKI?A0kgjL! zpBa6|T4M8=&v>D`o?!<^pD{0GK1(!S=%#1j&wlplGl7u#jCFiHYVTXLwb%%2J)qgY z%Oc1}MfzlevGNn_qW4I9w-_r|lK)iK+l-aHd2jDVgE&X>|1nmc zPkxoI-)GIehy3@(%Abx2~6ReJgEw6W>WW^f7~b zS@77eN4Wf)V{ZP>)gDh9cxSTPUk>f*oj)gYZwvRPd;I0ls_s3md)v5|$=-1r%`#Vt z##{b}>#6a6y>qADd7uAh^Um+d|B&E+9~(w{l(P54`qo`Get7K)_EDX{Ug|oZGzdMGLOaPx$-(Zd z4Il9@td9jQGaBbFx(|Bv^DGe?d*n3k13Q0Xa1ndNM!y+#CHH}EY2$)L3!p=tKbOKe zDWmbNOaTWA8W%2N4P~|Va`&-k>zV(D`{1de@qtATKz}-;A(j1^&ti^p3b=f`@xet8 zLVLTopO%ys9L{)o3i#etd+(xqX>U4vx2LbEUeuj;`aoZwK}-Kl5?%eAbUo=DlJ<}I zE_Czb7exd3vnfN@yEvy#_52tZzpZQ9qU#1-(_URK&^3M1b*ZlDx32wMzdy_H3qJon z-Q_Rgp3cGe!$7xx13r;A7$5S&*0-6wH0R^8hfofuvad z?xf3r$IUOQe?3fi=(<$blzX$;Kd!0U?$_JQ!Tu-y&o_|U%zMzUZgs0DBOe=D^Bq}j zX=cAPC#-(y+#%7`ad0nxrMGpu&bubsbW0nh#1S6>)^Rga==K2iZ?-$0xE z#7d}7_P(cU&#fFGAF9?Rmh;X6bfw1dwbmHEl(ElztA6|3qls2KCQ^3>W%(QLU!?K< zNa~S)eU@2IO0bDNzgSakj_*gIk2HQic;{qv>vzzxSE6gbjn2J-_{2iu6XUJ;#D8pE zEZy6J?)9R3??W$(2BnvE9-pm?wa=;f9G$G^<}>7*bg}kBHJ_oAqtB3s(#6^{)qI9d zjy^+vN*8N?Rr47-Ir?mLL>Ft%RrA>h$1{IK7i)i3^Vv|xGfNk@pnuJ0@L{w}OBc7G zf6Zse!00o0Te`Rf{cAq6WT4qUAY?v+exu(()6&H)=wCgPPS&}m`tEO{i=T4n zVy>gQ_}30y%ym>3a~;*iTt{^=*HK-}byOE~9o5BLM|CmRN9khj9ifZ)f0Qof-ch=k zd&kwqyz|%8#kL-nPPX;$eHJ}R?@HHrp#eYoHwZ1-y4u#oBhkMLphH{l+PZiY`gb8T zXzN{D7mr5&J^=mMde_#)XQF=}g!XK`YwO~((7*RWcRDjo=cY(5J9Y8d#0s9UE`G|P zi@A>KVqhHA#au^qG1pOD%ym>3bA6O9=H993VjKR_$+rHz&*IPf;3dIF@DHJXgTTku zyS6S~j$V5J_z0d4pnC-$r!KC?F6vYlKjqNHTt{`W**{AcTm3p&U2OL&kA5Z6j}f%f z*2%Nby=teo7Ml&cmUgC#X;V}eb1nFuxGp|?w(>;U5Tu??b+K8`sp;ZRuYVOC?Lt?( z(b=Ae4+kHN&Uu!7dpZ8~Gl<0-IR-le9F7JKso={TkHR15{`Q5~W%hWq8av`a@Ne<~ zcE}^>!W-FRfjweg*hP%&L(qWEQcfq9^IqLgLbk8LHnn+#b-*v`K5?4cmOQ%XQQFXx zbCS>D-Xm)sS+oZm<`(SMy^M2YzwXB#xd2=74(ySelZaR5J6&&Mj}4W5$2;N~uBrbf zu6J>U;k(2Ey+1W#mk~?3zMXh=#ZSsEYr_6_#W7bYd;cHIF56Q$9z5rQ?;LEhUz`4w z&hRhXiv~^w|HAnV;Oimiz{YhH|J=V98ZhM{xVG{C4EG;}26Qfs@NMH?c(-ZbX4Xe^ zihp<@ihr)7`2V#7|6HF6{{K;AK}Y!Cd-ZFk@AlxG?$AMEjYapZ%uDWJ9Zk08n7H0e zOtLpKbCCQ7y}<3uq)nVTp|zv#n=g3uW@4M+BQr)hV*lq-rhIs>n>6NSy{Y`!iTJRL zOk(2jtI1cioV1g2+VE+~U#I=A^zQrL5=rCeAXM%{X|YE4@|kZXWaPPv_CzUF-w-yelGmKPo{{WrU>57XhIFU9R_;a0YR*de+ zwE1;Mo875_hTK18%`PUX?e5gWZFe!&vfI9iakAR}#LOFB&GlvunmhBXSMB!a@Sb9c zJHxYZyxhX^9*+^l^La6Np8qe$$Mc9UfamN^@$7Qo+4TkS?CKED*{6tSXPb}2^YPn! z@_2T&c7|uQ-^TNv!c`$tcM*px+?JA?yleSsGuWqN`_C5lU&XT>eHQokyONTUz*~?x ziu#s2H#C5ELub`Ow~Bu(Mc-?#B7?o#4ET1Y%Q#p2ifF&!O_i+gitKmOUJIS=;y!j! ztM=@7^Syl5sYY7Ub9bfHJP4m2a-L~c^L@p9N-oHUE4fg_JcQy1>PX4l)7dkV zZYy1}knfnkV2=g%;bmW)BI;7x6kA}ot&%z=d-fDInlh)eeu|It=&SbbSxEjYeSVJX zkEu&>9$vfO^vm3{lz0HeoS6Mb9$oG+&P_o6{MImnm$AO^Qg1@A2lD3;&NF?K`OC%Z zIko2O@35@YnEWB|(AwGEz-rn9IoF}OD_nrtm+RNB;C|j`;*oGehtKGyfZUbKL z7{1`UwC6n|iS;ds!Tm;hFw8y)&BVxNA&VMmb2Bkw=Czw^V!w6G+|z5##C_|!QrGy{ z%y;6r#>Zw}Cvc4q(0pIl_yEoKxpsN1cf4G8v#yi4PPX3Hb(-})*ID-a#35(d@27HY zzprcieXh4b8?4h@{{}cJN4Kp-uB(r`>Ej;y_z8W?a(TxM13us3-jCdg#2h6Ce~Qf{ zxClm##D{kVhayLRiyZx}wC~feO8Y+TueI;f{#yIK+L;-NPi612>X%4wu*M-5n_a%5 z9L}F|*6*y-S&y^qd!VP)iQciobu0WXzp%-V(8;p6_+Scj(g2-23yo;c5XBB@EoVCX z(j6U@0FMr5EvI5Sge!cUuVvv|)ODl5bz9BG)eA->|OZxwh#gj%%;=zVM%7 zy~DLX2ie5DqxLq|oTm0bPNr_fwZ?-JJKj}oWE1mXZVIuhz{-VRx1+Wd(as#Z&9ouM zYI70ST8lD~!21RhIL!Zv4&gMAwY~uYK`j$!mE9iejMjdU33(kpx zF|ZZ9TWPnx6Wl1{wHym33!uSDwbg=)WTN@sf{pz@7yM&St*_#FHt<=1?3(WK2Gy>j z&(h2`#g;8xX)Rq<3h#W{ZJcCFM|MLVvdy*+Tj{H0xa2o6IM|BFP=~GfWBOv-inrc) zd>cZ#AW?RN>@DVCO8MT_MH8_LZzdKX1KThYd2icU+T*|n&UdmGO&fCbiG_z{?Gp>L z8yc|p7FhP4-pNMJYJY)+(DF3)@|4Xqtz`78l7-qcPI6WI^LN&F>D6RlVE228)$dsQ z$=UbwPq6R)6YRTfdz^gVU7hwl+tGK~5%{3nN1`{>-&W$PJF+9ndFHeuOx=o3)jmnF zc7)bdNRL(!V|^-i#0}UHviCc)BSeEPD{fMH?hI^~R%}|Eek3n);UBeOs$v15gNfjJ zCp4=uh|Z~ekokn?Ne^LjJjZ!j3)p{E_KIji=fZ1jA=)&{XN~Ga&^4m*|X$0@fG}WxQKOTi48-ZqnhJnL}Sall7b%X?_mONHT4Wt&f&BR55N?N}B%D z>?3UtY?^HqtZAUUTIyJgEwO|>O{d@0y=H4-TyQROXP9(<$+fQ*!yEIEKjIO0$*@;- zrco98F2m^F+Q_%sug+JAv7u*Inzk z4{?=!dcB*u84u&;IL6QM#HJ)LCXejTIFkO`GPeo6tTFkBfwcvR1D6zhVvXBsp$Co4 z-@n~>XrDW-x`h~M?|0e3#AAdvF;;g$yKa*4jb#N}7tcNz|1}IN- za`4Ve>Dwjr@nZTqo_VO;(DorYWx&An4Qfv_?d3Rv*c;Y|KB75+5^N3Wfl}nSZI8Aw zo)O<_J!vvL--cYThz~T(_2o1?0KfgZS9!x5-v~4q`v(2x%=&GmKOPL(N{CD|oAXBKc|o!eM_d$=8y^+iVdxb4hkDBdy= znWQ?K&`T*g%TurehSG0_YxPC0nL93Ye(MEZcKmn>at=FXtlrZ*6`V(2M7(he@KxC( zvN|tY=XPh!EIGh;X9Dvx`F55wHr*~;@9CY2D&s(re9*LYIc-f6d}t?g%Cs}^n(OxB zSB*f{pezz)jq)OkZC|rpmfq7lwr#cAGv&QQo&;jOPJ_03Kw~|jwO-I%Z}_Q?IW{i+ zror4vJY!f_-CgO%dOvHx6~Fiaab4S!U7=0HV|~E+t9e<*h5*sm&JBsO`W z$+^ID2k^KLde)f*jknjWxSu`r+OQ|u&PfeD@>EjI{q*BU;O#N&xpC}Ua~pefXw2FI zZ(rbYH@zv{bf*!%gKug`CG79EDLWyw^#+~k79aX_h$r+J@nPm3d_M?;A5V5;`}Ats zN4mh%t7#%<6+X&d1bscdg2n8?f?RH2M_kx_$*$l(p_k5N@944YD<;_P_(9}c;_#Mk zp?w>?q20et3`q{O5CiiQ;$*g5ZG`1>?sxkQD>khfANs`8m$6+>_Nn4LpgRNM`HH2v zmzbsGxM0gXBfJOLeYiV0^bP3Y@#_r#{nK3j1qE(@OGeJh%E2E0P;@6TGb`Ws7%SJi zjg=qn&ZA#}@Q!Wyq5G1Pg8zh$QMqR-PHnEaw{^AbPK{+WhGCES^#<2!-&o!$=K2t{ zuXEx)3bJqD&4KWZ>_z8VQ@MXcdEk+ja$@$d3tf$s`_Cx9`;j)D)pEValN6loNe(u0 zy`Md0;(kA6ML(XU;5SHKSMGo1o^t>C+H%_MTG=v>^LAJ-(Qf4%$%*k*kwr)cH?%0+Rx)3|zkG%JxXHebe!9e(TT>pWxGu-`K1MG2Q z!)A(yZ;)vZjCK3(vEY*rYy_j0X9`1J@PE$C5i7P+#yDWNoi=3g--g|G;QAJOy+4O+ zsXEi}|Ljt&x$ob)1i5FIX_upN&9baA>~97CTE1UUVEA>f^7Kdvn)0{~Z@G-?eogwegt-Uy&|1GK z`_j;ss{`=OC81A-2g0AV=7y$z*lp}2@UwtaO7fDXxDs1eBWHdGjy^=AMW(@7+Md?c(9?_gF8}qk<3Da6yh3dWCie@L zz*|~Nr*(DMt?QZRuHFdmtmFM&*nRH10*@GX-TlZsj}hEUJ66!|Rqo{0X9@!Bd65#F zQ4;X~2;bVj&z!pAkM4l~0{CkxV{5&man7y4=qi%hV4%}p!IxIfoLmLhYR_%JO>LS$ zu6grkW<=Vncg^QKTSj|z_J_WGW=7;2z5jdjeV1SRsjtYSZ``|v^p^Esw&r~3#z(u~ zVh;zMW1&4Yyd=r;w-|q?(6$gs`);o$273b6Wuyk+HjA=kKWJ~Td~Ey6`Hu};{diiO zd5)LxHb2oAdmnXZ52813TfSle_`-&7!e4EkiLQCc3E*lSN#6$6i(T&4Kz?9_i*{=7iuAh! zk7#^4h4I)_U_OugDt8mtPf*WzV3@(4pP9fkhxP8NSL@Z(h6<|HX<701DD!Sb`_!jpvKe_Cn zae?b#MyBW8%rMtCxg*yHxo(I%m}tzsGLe1S5^r~}Oxzx~GSR5KGO;LerF49TkEs1V~jC{u|^j1Ii59#tGFk*mbqYKzOjE(K^Do_Q&eE=zwz#+ycYmASv=GITs_}2 zjdyOm+sG^$WYiZ8(mO8fp!m4JU)F)Itg66)tkqMQ<2d}hi05iYCeI8#;~VYWk&E8X ztXku<@2#U9cALt;R~FBlWu5GQwXGT2P}|zTN$mG}Zntfd=lXwD8#NBsSYC4$f|KmR zOamK?cFR7<KEhXjdDK4nmb1nJwf*c2J6fdb-q*?n8 zwhCkYvC4~-apL9C=k@X4z+ZgyU$i3_IO$RSQ@^sn_gV0GXR!3a7g4%~#yczP%ersU zJ$guVk8bX?KD+Ik*xOUIzsZwf=9*W`5baw$5F{7PD%UfcA8bAtiEVH5g4^VWX(m6+ z$eF*0u~@bELcXYG@GrZ56JsycH-hi%YrD=;tIm;DoyzSxW9zedM)AwedW4TJy&m`~ zwm-+JNAkoKKxV*u4d7g|w@f*3%`xSi#arNK@Tfq3md36YAJ}{)zR7(D-g@WS0^^-& zr0E61S*7L&{&3^nqRn-tT(e~Eh=@F{wB(QI(WbjLiw;cs<-MpZJjS)^DxxmYk=B1Z zzcJfk)uTSHGkvF12UU5ka%W7{AYG+;LhznW_l406bA_-_JFnQ@I5PGycDi@b>3 zgfIONeAl6mr5~r#Ki#i&^iOzHAMLW#zcW}voy9nrHDlpo_E$XF|Ej~O8)Lt>+h)U9 z^qgSRv(-=GR{d;t^iwzUPqwv+ttg!N}rb@T+%E`w47+SU1H@#}{Bz@=VK!!QLx8+$Ia-`$3=2BMG zq`$^9Qzlq8(j-%+bkvtf{jq&9Wxb{EmRR~uGSJp@Qz=h4DUx0XF1e&)QWdGl)N^x9 z9Vhrm$4!!sgEq3jYmuejB>!yLEBW^UHkz&9+NewKOYZBNTEjTFls}T2p0e(l`pnXIl97koQ{~gM*!E1Vs`61sPjL4iisF5~$l|wg6Qj8a6+bY&mbLv|t z&*We7#65REJYr5FD1Ur_I5PQ3IY-sB5wh`p%0H_)^h6`EX-0}yYtUGy zlN7`sWWJLJtW;+u@H7&StaCiHq&i*XcAc4V-U7Q`)tPI?2t;T&mly%N{v7&e)=#<4 z`tdUyG`x)enY*ytQN+7`+Cg00iy55Rgne$>_3FFMm{z-k^dX;=WF$5DQxX*yWAX&^ zqm-k0mK6Ck%<*+^%39)%l+{B0HXJ)Ei}UIZiZ49m#k4IKo$QV&S9GU28p&2?c_w^_ z8=V528u9%Sqr1Uxc!|69wB84=%-~vn^7)*9kObZ)fVa!%xNAm&JL2OD^5x^_`T=sq z{Y^9BS%gd|nyr`%^E^}Hf$({oG&2R9Rx&64|1(Dt+m0;iP&=B5vFfZH%lOvW4mWL3 zTZVMdmg$el_dN~Y_q5qh|6lxo=!b2js&}p{So+TO1y4P8Wcw=MtqjH|lE1Y)6JDHSnlj4Nm_+Y% zmg`&L)zn|)_Iu7UH{+u{^tyX}%W$J?$62njo#WhPA7y*WKE5oj>@WD1_gtM2+B>CN zXg}vleR@M;=)iZ3a5nv%#9WGGo{!v?MXk)Km~;Y9)>ggYb3tp;zd8$AYg*e#DF-fj zWU(uwbt-f_xM5@d#iXHYrxj#YR)&YIEiQPfVR8QH+?zlevG%%x0QdcCuP<1}{WG|i zPa3th#C(7B+R}o0?w`rMD@bRroo&89X6+3H&vXB;T|U0h_GV}@B|Y$$1qX8@{SvO{ zY|I}|8bZIGoU=H8DA$*ehGBOs<-hqI_((yHw#i<;XQOZYJptd4l-!K*_bm45xnzc9 zH)H$uTJ}^r9iA8dZMfPnO<~+8cxqg9WI1zvC;Q)Q7kaSccFjGyUC0IL>6YOEXeS5S z$%S^tLpv8kJC{H^mqI(2K|2$moy(yelh(|+-czMJjXOkl+J8}WCzy!tHbUz;%=0!g zR@+S4LfT4-z=^Y-ExKEceK>6N#{A1kwQFaZbhHL~5}j>Pl zUWwp&qc10Je1@J^@_aM)sZBp_Z%)SMs=2-_WaAL%Nb?1P(_z>|XviF2nzZ^25PiVe<1Afrz7UB}V&$&1weEoxo;M(u&+2qH!G$AXo zrsW)ixf6Uc&i*`>6W-1`>|yvgud)*aRF8H_k zi+UUVn{>A6J6!)@_nZydbK{ru`t3*lO+KQnzsL!H0t_xM?^UydzJJHZp7)M6f3|yk zX#34ZXp?znowL0?Xm2mhe;Z)7c_(}D2*yX04ID!AzQ6xkPh#+w?JQLowGcq5Z|i(yFDpAgLB_*SHu|yr@AXHYVh?58q|}O z!kSHnuX5R%8N>Gfm~?Xz=K<2zrL?)+oz%Jo z8C8$XwiSQ+V#<+>1^3T8dAb=~Dn4yXas;QchY#LafLxh`e91@7)4q0?(TIe9}NgnM(9_>aRZG{*1ViRp15RplrA(K7@ zE+OibTpB_AwJDdLNeF!iO-U|gB185=Bi^+BrcCM?kx56iKSJ|;ESc0dDo6HH#vbHD zIkdSCd9;bXH=~=Er5?Mi&BXpW<n5qq{crg+{l7 zN5y_griP*S5ox`HgSbC})WW`2E%4uGrS4E7H1W9P1u(w=Is7Zn_xJyqT>D+HNS(;@~CS8 zJoPbcX(M)H6YF+Ag3nZ^7k-k@x`FfI-X{MTe%M4jg<{1yP=ftF`?7C;aZEGFJ-IzBR;QS09q^xEuDE;PQBIIv=p=u>CS7?dH} ze~NG7`QF1f&+~r`|9iTOaf&DKq^1P*tU^5n|`9jz}jBFhi+gLE2gkLohY2%~>GD(AD(+>pw$+po5N-ytgk9mook z*C_vJc~J=87r_6MkOTS1gFNH~`$i49yzCG$u^Pu6FOGM&B`Xv^CpjUzWj``P@w@*4 z46mUa(a2QVwGw$YfiawElbA4)O%ey+_KWiWUam9XTgAUFL&oTQInLtn#Xa$kFtkK;r#)$Me$LR1p|Mxc+q{^?jVNB2#@IZ#NH)CVB0Qbe7-mR;#!R2!`@MDs%S9{PklW$J#8~nwALoYVN z4{IosZ~Cb1L1c#JMpVAO^V25zN7SwcaI}NAwxYAuwpQxSCGIzu`I1~$#YNMU$5&jG zOZ>8VFVVW^tGH;Q%N0EXNAbVcr1cH9B8$F%@X(97)*MjoJ;niSsz{75ZPZ?@5xgDe z9GR1xBd9Pcm_x4!cQ>y4SB*EcPH6{toya(uIh6KwzhE5a@Dc2U))%ri;aSq}Ngt3t zhW4>R>--1tq4Le7>yG@!`Mt`Y0Ig(zo8`I8^(8dc&luJ?EB&E23wu8FR$@>u=`% zo%}c4_k_#2cPsB0gYOAHl)kp`(e!nNKTm(V@bUED7pmSDxVMu3s{bXPy~cgj|6kmH zkN;DfE5iTH^FMQcH~02&Pi^{)dxyA}o=%(7-!j|QH=TB-N7^=q`xo$E^`6J`i@2|L zUdjEd_^-BI!}EXU{>|LGm3wOYo!l$uURPVcK5&(cDeHZh-n(d`i+;aOG=N@fpS444 zFC%{7uT$H*+WPh8HCzAMx>x$v*1fTM*Vehe#s0EswZq)STq|c`ci9zPcE#VzUON@K z*7=nmGuPivIxTnVpxxwqNWHioOS+o0pa1>Hze)N5=>Y$~LH1?QP4BmHjHaRkmu>ZpEIp?N;%sY**W6l?^Lh;?nriT@f}0SA;dT zIKzGYzs}9!nzfw9X}>GXBC%GXVL_!)t#y*Q{4XZW^+oPI!MjiO3N%!4Zz<`iUWU2e z|7q5KzQO-(q^G|TxwnRQSF79}luKGoxo=bM8kNicViKNL^WJ9O-KuiAx0JM%a(D6W zhbou<+ejZ$uI?!&p>Yv=u`{O0P6Vc7STpas`!DO5>k^K-1M721!p-?4;p9S+a1kBf zFb-fp9?LD{_DBFfO`$3 zdhS2L{RZy8srR8rKj}^Gui?JvdZ*s!UIS?-_eIm9SCdZT@lms0G}~4FmonBxh)#P# zk3T03jHOGh<#&wfo!8O+-VJ>+S83W<(4**5G+X+nwHhb>X=r;iI!>o!vk7H^Ap_RF~kJ zPke`Pqj(z=H^-=3?`yre$(zJ;EW0N#N&Gqg9)oLR(p>mzK^A@~@&@v?yt9*cgrl{* zvy*pBobb%V1NmjdO;s7)%-FA;*BxSRkQf_yvaEzSBk|<7w7)@Q?rdoXb7t$TIRvK- za1MMMYXdm(J6f?lF??+6l^M`VX^v6tcIX-D6X|m4ZNW%7N4n-J(ygQ~@~ue=R$YJV zy1z)fzsR?L&pwNW>+#o!<`<%;)Rw8P%J3KIkI5I7oT?Zd$hYk^+eWJxZOJU=ub<+% zZKK(C*;70>d4}h*q3X9EwxK2siP%sJi3f_cp`0}B!(R|f)6V}kZQ1>leiB{Vv~21g zi}pKvN3i-b%K0+N__Ep}+>LVRCz~Elq>cJEs-I+gAm>ihR@wcaMPYao#zrMrWs?!_)zcxFWBD!x9}N42m6-6=T}tACxaUZ`~}wvNM3SBl<`)vxMTRL6b= z-W80$ZfzFM*AlxeK5Bz5PPPvwjh&z#lb)J|9<|5%ZQ5s*vAz#n+j`1L18p7H)JNzi zi!N-K+xp4mr%s;D=;Z#fzA<_36uvu^x=bE)*w@FaTkl8h>rV8EtvAZFA0c|90Y86- zvd!+JL&it@;J9}LtQ_|{diRL-ALreZDc7VsOSWljsCq=hrfln=A0}LF+17y`kjz*7 zLo8exh_#IN%a)aa2^k&TJEHx^dAFnQJGyrS{Ekzm`dAN41cyo1*sQbfkAMg8@(!Mu zF`xA>b8{l&G2$Qo>tth*6~|*ZekZ$zqL`S7Gl^H&sJI4n#s};J_q5>&{+852N_2VH zSH;7=N1os;(p-{{_gC?L8RPOLagFuq>=n_Iy&}#hzmR+ldsJM(dW*>y_nmzk`Ca5& z*|VaW=a2DxCHZUQ3Eya}U(fS*c^)P|NIqg%WBqCI-r2n`@y`Ax`9G0Q9Nk!dWqjY+ zS6$L~_8sJRlM^FR{|L{2#`9m2uOffoqQ?3SJb$0(pOGITe`u7EuRZA=wep`^`Qujp zb#Fhs(%!uL&}1KRNy3Tr$r9G=G?IwDKj671a9}9u64G^~dr1|ejFhSh;`FRsWhi!@ zJ%8F==!$IdwB(k-oG|Eg40>#v&lsR_whW) z^9J(g$VCHh@w|!WZRESiIiu6W*#awn{$k=}61=nfUkZJZ{}cIt$b0{PjrD)z`Ty|zAo(HkSQ;>4i2S!;KQMZLD8Z)>yy7`v2-JjrG5p(^&ti_5W@5zGt8J_D0t2h(AYz zPqk??ZQ4YecG0G|UXAsqf1|N}tMz{ZZNd*$-(vk=K%1VWO*^gsqTvQ;o^md1Cb{Dq zFC0QLs56~DUCi~hq@R$UA;nquMNj5;$eCu68`(01lzFC+G6vtz#av%Y`U&Y7(opNZ z;tLy*SsBQgveSSq{SYt3;%0CZ7jaQsl)+OrE;`D;D1M^w#m{HLHv*Sq!dLJUA9V&l z!O48y37j&tj#=|Ul(P`q$A-gZWJ!b;-O>Jwe>>{8eb4T{O_w&EiazbWX)UJuCcf7D zPTDbP!O<5Z!Dx^l&&0LvTYbn(Fv_$ZBttR<{!p9M)+o;aH@i*F+-~1$e3|c7G6$_Q zVl0|AB>ayi6eW-(g%M{U8(V2Gx{JgF7UAGTo{7~TQ}IYfvwl{o#3yv-C9d3 z{WcoiFW8TUChdMnzeW4g0WB_po?>;KFB?a=)O_tzS24A zH48|k=)DD`c>FSH(AObwjc$(04%>E@4BY`=Ud(#DGkI?#?@4FG!qxt7%Yy*A;R=T? zk<6b6pUOsz$d8U?-c-IT<-5*gUd3o5MZRJ7%x$k_+$7mINplX+nfSvg`y%ajMcdtG z$py(Pwa;!(k%@nFSme2>!=@rT6o(Kiw<_X|vIRL2xwX)$PxaWakv_9w(NQk=1Ss3C zyV9!LmQPcOjhA1_DW57B<1NVg`^cw?UHFQucM8Z1iZF|9K?lJ$ZarY+6^$``3Fdfnwfme|4-3Fylpt714T)t~C)| zYfZlDG@v=huK4$^^+$h?bzP?tC)^eP-nFjotG-_GROtH4h<~4-#F*SM7a41t=>Oj$ z{@spwHy$P~oy3?b67P;5E0_PpBxk&P75|r#>{$0Fc<&AVZzDNl-Hk^mkF=Wd%s6+I z$NyrIGtRw=|4T`BjQeJl$Nz04JI4KH%?CN+^fjK(T6EYCK53-(Pr-Kj7jx}}q#E%v8-u3>m*94p_d}QP7WIkE^(Z=}wRKC|bbr_4W2th@QX zL*{p;yJK~;kG=n-n|-nJ-o}%qn~$*dkoQse$LOH1%Y74G4td{MyG{PEI^m>`cFPz1OjxGV#y8nZkdA>>=A; zu;N=ggQvc$x8zVqy2PnBw64jH$M#wA*tXp%yL@+-uLK< z`{&!Pjriw(gnerJ###%NKt_`|a6&O6UC0ml>;Ij>W$s zzIc-cPlE<6pNr;Do&Na-%Rj%c6aV~=@Ciq0i}{i$>>Tq#UH*9DY&wC5Ned3&%*puU zP244cJ8O&!22!ZrceTJwET*v9zl^PT>B=9Z>t zAB%Cv_Sa83L4SR8j;hN)AC=qA{p)Oh@7FEA@kiMB`bU3m;`guQpO4kolKrtdd!gmGpA;+qow~ZqPjAL8+Ocg%=~5f6 zmM(2`_=1kmqw@XO^ltl7{sH~>|@v0ue1I4_M8@e&o2Lc z*BNqO_p|xB{P$h{&#$XbOFFvCf8RyFe-HY#{js*+SiWN0Zyf70?m9n?b9IRg>pGYA ztAF0_snB)T`uV@@`uXS>JjQ;=~tpS``lUU4njyVbeIzRX%Nbe{9>s^2ce-eLZKn+iNG+9cRtG z(;u)9AE#*5rb`=-wtvN@$A9$aN9h9Fzw59moafEO>VjjfuaEj9tav-ye{bvUNtW+^ zbgUk5$QJFP9mPS6e)_uHH(}=Vf!Q#Q>5txXu3?RhkJEb1C`@gAYVm^f={l$1-U-A1 zO@4dH30o&xu+d&VNBHd>>+fUrUs)$>@0~s~*)6u7v*m{EN9;_$b-nj@^3N--(bgN5 zPgHtir^7ej(3x-E5kukj z*_QJSmTz7<_qaZ2hfIvZ@GEf7gwq%C%?m!Z%(mcT%Iq$`eA&)%xn&<^k1zZ9vWv_9 zQg}&d&()WP_D;DhwEy}Ep-*qPJaphYfiQmf>e#c*w9fvo;h*<)vIl;wuJB#gwg0`& z{X38S&o4@EDx7^$<1Fp(brt`04&OEDzbpJ_?sx5f-?jgJ7k&J`uxp*?-(N;Zk9Ya+ zyZrZE{`;==_jVjbmt6j0a=FWYfBnDCWzTtgpTCN);Qsfqe)=4TpT6yk$eOo>C+MfY z#ag=`yKimR-uFhe=6}9SANEDXdxaEsJ0(SpP{PD4V_cr2zcY5OMmKq7cSYJvT zXR>s(Rz9lNET7pCaKuir{jRpJurvAI_1@p!|1P~S3*BVOnMBUw>cr13yRtKVV4ekT zoz>&i3-+~*cN@R|8~yLL9B;Gy?2{aR_9$#SlHvd0YvLu7JA_4NGTDxO=voIK^|v2- z-LroC5O3Lm8xljGUY``&KP5S|_v)0;oT%b7_jcnyUvkG)dk>7?ux*GD`oqU_He{z5?VEz0P;;6Q)|}HB2XZ&G7)G!q*$6kd z17UCa-GfpMqvx{fxhoUrB-iX19tcnLB?fm;ZsMFCHIJrp-Gh6`l}#CeuwBQx5ys@u z!nrF)#2f3qF5~_T;`~SS%e`Yq4J7GZ3xrc>N889icxU#dp&i_xF(;{})ss|rO`5mq&a{-^ zK2KU*%eQDVZCOZM$TU}lefu)vFAn z{R5A;ZXt6JtKyBDk~s$`f%eqxx%=T>MoQCs%9{XOdb?b~8F48!R~z1-amTd>ZcmF3 zF57$e!=Ka7c*=bv&Zz#gr*Ckh;cMM=;Lzl^@9I(WTiTFSdi?>Hw`frFJ;s5o(%}bI z0mtRou&Yip+NT-D-pd~Z^1V3Gmz)= zdITEA|EYNP#ebSU`%j+K;5`EZ4NtzarQp}^Y%Q4ABXXALwsfPtmO4J7U#r~S)*|Zp z!=XccRF>N0F1h}|>$GdIciNz7=T957hIYMe!XmZRMgQ0F?oQx)JNf)vqq?P!5f+_& z%c!_gFx=-E9}+!@hVJ~Ve1qt-+>_Y)JN6nY_ZT~-Qbvns05CJ|f7d;rDRg&o%`)h4 zJM_~Z`uIg^ub@p+BQ1K0(9(dTXlWDhjnGrSC_SleCry1s9W%N?OR4+ECDBLG(sF3& zEpWFJTDp%qKN?_!*UlMGGYR@m;r=LJN-&ZC4SLosxhV}Adefa2^tuK#wLo_*(32N> zx~be-WAcJ4Ef}H?BYf}>bT?!7oDJKdFF*9P{VXHA85(Qp1JC$+1-HUKf>VRb9bDi` z4c-T>!PnJ`p|!1^#5&(z;AGKSJngN3*0zregonG5nzqAtGoMMSd7iowkqKj6-Gkd- zza-?pB&nv4J2ChVZ zd>%SXpf5S4*O_!!2_5E?o_=64bQpw2Hui|nVLI&?NEuJN(}O=r#fCM~gAb*qLyHN} z;uR(>>K)PIhwxIZD?K2rQT_i(D+xIY!Xw!8;3(M!?rcLbpM zW$*zqb3HP8$C94NNn~9{qH$9m*ES#RMaF-V_O*0_&Y;sI>f1XY5Ef0_GXENQ%IqiQ zP4ReJOBlm$r|v19#2uL?Z}o5L=SsYPoTq=&b_Z__gq9ym?E`Q1Hsy5_u#l{N>+b$F z^N`VR-8~>Gqc?f_*EL6ZtJe{{wbtFgX#)H-0e(sZPxr$ot-zxMxw{iS3eM?Y6HH5n zj|S9CgpX4AUux!ohevo);GzB|54~yeP(hT3Qi2W8^)icxCcr~&@K6Ih)CLc=M|r4E zu-TJh@=z)CFF7;O=L=$US7*aR&Cq-qJmjWrXTdvd@Xk}N#HKcQXVNQ)Chz!CeZi%! z)L3k^ zWM*SYvB^I<@K0mOkOLQ3{BsEUmK^ud7V*y(%AJ?$2`)5z!G*M8qv2_NqGFQCKYB;} zvjzEZqstfcoOYOh(s@6A(P94aJe=Loz&u8=#XoJHL3M|WK~eruS`cufW?dUVf>2&1$ThOH+**=D66_%|aS)K(QN{(;wq}O$$zq^p%Z~OWM zWhazFcU$OV8+Jk*^_D<83GRq}FrR*RgHA_WoKn*p`(Oq%wrS>!16O;z!8dvDb9hZQ zLX~ABEEa8HuPubOmV(3Pl50)cYJ|3$ONJeoY1s%H9keAI;Sx`La2<3qDGj=E$G2{d zp{ozD5k7&go^;UFyB_$U|6v>9$Dth8>+AI_Qb3-jd-C?ZKAbjV-+y z+@Zgp_qyV$Hx0r5Juv=}=4Xu1yOtg8!+!Q-KT94b)9)GR^6kigMChz4B@vwVtl5ps zyczwtUGg70wGBIYdzum63Jq*e4urQ%$qnsBR<%H%8PNL~*cqx*c4#toXbE%`c_+X( zf$%S=XZw>EhbGMN*6e%m(ok`VH+XYuQgEL;Nph#XjkDFH3$|dx9D=s5qOA8A0|b$= zZ$hVu$XNq^a?#e!o<63{vITjhvOmBM{?OC2E)!dLAGU?wZ-ExQv>^%FZpkph8L|hE zH_5)k@_ zMMibeG@}|iefSb|&FLiRwU5W6J3Zguzsuzf%3dvnu4OYom+Qx%_p>t5yRM|x)v|?+ zq}FAt?_V@CQD;RbwGMB*f6*$JaZ?%dsDo%9xC|g(LsJ>ujrL-XF*(=c2~JCO2e%lW zU@`3CEspZX+|nTjq(7zrH;o}=LrZQvMc(?zC7~V2-3iz%8lOpK zdePq+%T2&$N#1^F^7pYtgrkzPds%#s!2jyg2 zwufkP--8Ju>HHbcViB~n2|a1kqD_MukEv{x=i~?JdD+gb&}Av#jwtU_GYcMQ0k;du z=UenP0-Y^-Gx$GW^fs?s=t1c1Cg=@){P1{aZKy?Wn=E=;06j?dNq-lU1E=-7u(`$z z241dytyA|Onw;(Nw2pXv-l8)_Z=TfFVXr^1sNAAAjmNym;&fO&FJt%%VzewTpifUS0Jwok=q5x?@7q_Lm zSn_$ILq2I}{nzJ)4p6RW^&asCb&fzj_f7K#AIF|u2@PD}K?gu*lF^g!1#cg3 zgvBGB$>(P9zZt&Jm~xA!Z(T$_4~)v^mB>`dXUXP7$>!{cY#xbhW?b=K_0&0^`I;TH zX(DoXJ~}Cx_RJ7}z!#&D$Ips4$i3ugJU^Q;!R_$K&x(!e!SF^ReJMpxOI|S6S$_fJ z_c|gIPvmurdugH@aif=6tGm4%4}}t{u2;WlS;T;lpw@md?k_jnH|OMd$6in6b2fOp0JJEr;9K#&`xC#DvoH~}xeB9=b?(%zP z4sKE%#2*pt=LUyAI(n}R{W_~-=yT|*tSdvS(Mk6d-}RZUarr|SID#Jh9G>a1;v#=+ zUUkvuDPJ)xJa{2-G>*B%C8P%T37|Aw&+(do6v*tGWisrW1U2oA%O@qM=&EAeJ z{L#Encr*<>nvNdJK%Zrz*Rs%WV9~j4tSI_Saovyb`CswnUO1jc{02;VjQvb&z+%L8 zSJe70VpPR<->;o($9Rt>6W?Wxt?=ng*51}i*Md{Ckh4PUW&w7`#j0z4cU(}6cP;Nw zjMt9u@>{W67q{*N>(+t|x?W|RJ%;TShF!+Im=f;w!xM_V)uzG2;M((Tp+0n@bYa>p zHdd{J-)z0B|FL!AVPX(xgTx$adl(VLZC$M0(2AeqF{`Z)dDbhu_ak^*v0LF)_g>i( z4?3~WJai&BIE}U{hE@4_^JL*w`3-gNf?dFey_*Q;&pvCk03lGBUbx$W-vp82b z(uiEG-vN$Ll2zA%HgHWXy8bLar;A%93mLTaDEj>vxVLts84-4sTnJma*tIv<;5sic zFHc&7X=B$kuxn`Lb6|F;Yik}4HVMPZuAS+yy3@Oir~K@$8N?%ASlwxmk)0}h;vo+9 z5;mlXK0NNo*IUTrjBdvA7kdzI_NF!zBfm!#1BIWv&y2&hGp}w^8zf(fvHj9#3=ANX zN5K}Wy`F|`#2JnnvETZ?+UIF-wjUxW_-GOy>5 zJ24}R(BZ2YlXK|sj1%Z^p1<1B;o|*u;(7FT0Di5Jj+WhT#plx7@oi8}W)#;CgTH0> zrE44BxW!VOLp*KEHGCKX8;3H|LSw1-YjL)}5k6hYwXeYkvi*(luQ0Iq`&0OP2z;&h z@Mti=i}HEU|4WHg9e{_2VSkGq`!8NQ3$JCh-~i96Zs34pzV2(y$EP>t5kHXbmdqBX z8=+Si%OoQqG!i@Atb7!F+0JN_`WZTZm9G<+paWR6eG4Wq;K>i+$@xZ3!y4rHEIyN$ z_H~~fibjpMLBA4PXP8Cy&9SXTdXy zhsR@rTjF?fI6Ns#PykQ5nBW)~qS@$V^W=k1cvn?{JqjMbC&rWHFKwRm8d2dV$^9^o zwe6@IeKMp-TbH3Mpo^lH*(u-iuS?C7o z$s72c65!myB+Irn(;b_D9r%oQHewruCuI+2f{h#D?WH{DPU5q*$cJo!Vi(fcl84EU z8_zvY+@cYC5kfW=#lF`g`-|`1fAseUR`vd4iF#B?0J7Js|=rzA5H#8L=dky+=zOPL~ zuzdN-0qF4gzU+qS<gaGU@)$;5!sy#Da@UjRgr7*Ub$BB2SLyH!=m31eFgpDA z=1i=AQs|V`xkV$FAhT|#>tBhc~M@)Kwe&SFwz?2zS-zt%gY1AR+YCo_V_eg zUVM(cc##*`&1~eQEAdBHhpYXL4u4LuG2vW|t@uj(<-dg9_M^8ei9fVQZ@auJ-p`|d zWK)$JQ*5dOzg@ll2r{D_U2QMFl_wsC$6Xv#fo{LZx9vj}==5sF=u_cxN2gcE>2&dK zWem%-)H#OD+qdL2TJx}Uym(kS&I+%qaZG#=eEtIqd&6@T+8L9sM~P{4M(i z&Zv4Ge&`KvRx_@O5oGZU@lXRi^ddaCp7A}hZ79YnjsXo%nU%)pD;j>D7!7<4FF!)n?xyTp$t^bFCgIqrQ!k4_Z0b6(meLZ9O z<*~T-&HK>TF_i7Zz`h`MJJm?A`K;)PE~_fj)6mOL*nBpF`6bfJ_jrt`;&?^$*L+`> z7QNi;O9Yc=fXOq_wOQ!gY;q~gZ3*>QaTrVjqAB?gfBQ}3$msF#>i?u8#_WLAC*@A{=X zGW?a-VlR~+7Jjeq4}M2yuR{h-;YWNt+-qF}mOl-@jzbq!kaMVgJHeWXPVxtkfgUkG zK)9fGf)V)y-%fbv7=1aR`BOB zJfm@p5m~exOh+5i`MpT}{&{o0jrVzeZ*oT0{xp5nX`cHr@weXK{?oj#j(22neiw5# zvN+ctdWJlE8}cu*O9uayy!x1x=k-_0?v-Fi6*EyDMmcqBVTTKn&#Q?OYW_w^fpklU z3x5MsYwUI*H>c^x3Sx8p5O zWiHPxdG%4t@5xxDcy%e5S2BGRncfDjnwmwv2b=J);YkgUXFpG#@OQo`;435NBzp9B z$fsg9&B&Ut)61F3p~2LDy5lb&m5kyqS7URPXFq2c1G{>B4Wk&R;mq}T8LZWnnCC|H zSV!tOgLuJl&TV}B@t8b0*qhv9RQ~M@qdo15`M5P;ZpE@A$m4PRY0W{I5g%KO^Qn8_ zQ!9==*w;DqVT(`gSvwWqp%fc-jAx1OG~O;B7P@h2@wF$Vz7&l9{htO_$p4ybFwa4D zLAqR8Rt z1<1R2x%YhLDP4k>V{z^nw*QE6iko-TqBF$Di+N5S{96eB+Ock)Swv2F4p{vZ{QDX? z?4qA`Ty=_fgu(S`oEN@N=bZjKmE5s1Q~Zk@Dy~;j{+LDVxzZ1&7ysrl{%7FdYQTX?_@b7VA!Q$N_<`OL>H#yUp7m`gJIhJ1}t`F}j_9WiL4}R}Kcz25@ zwX$U4V^(2z{Eu|y8nZ$r3*p_NsjK{kKQ!&}88+|Mz`H+zcdM~6AHlnC8b<0N?icSW z?jqh*9DXjCU%ab0jd*uWW;dn*Sxr47rMM_V6&rhCt*i!$DZ7VU73h|nE;j_50)Pn#O~NwzLs(EXLu`*_V8LabC2-6 z?1CL%fS;xJmAi`9feq+LUGpLHF=TV9eOV3LcQR+j=dtHF$v#xT-$C?A1wMf6L4SB) z7k$~+ zHz)HyV}BSdbru^mYut6V{}I!fnYPZX=qNielI~LmHRwR4%SZ$={26Yh&na$-%@_3+wzxM zIdHhK{B!KWS$tXH$J5{p$xy9sdjJX6A z%<=gT+E>At`8=IMCGR3nv(i>=!WPs#{-7;SA0tnBPhbli%$DfQOuZF(I!T=SWAKyY zNtk*UI9HghE4ja7b5o!k?cxFFh7L$ z@%kuu+vaI_{x0cJcv}2?5Pq(Qw`Ps&b9sJ7HqODzvK^nG%U%0YX#37n9A8}g+sU>s zpNv74$(K$=mwpB>oT`c07v@vhbG0@w7UJc+$Bi?t-ygr3;)X35q|rav64Ml>d6V| z|J}k?@Nk&l)!^EXiJORj)dtxE`5k-kHy*^El+tI4^>s@(Dh;DU=%vu3R`I??!OyuuSG1VhsKPH%a^GM?lw*#qTAKYwZ6-G&c_rxbh z(d$dU(EOC-s;2upU>9<+4;`@+ov;^I;&*hmhIONF@E;VXQ2aeK+L+w8Y53kn^L(qM zj|L%&M* z_2yZM^{4pFpVuKHqZoJJ3FChI9n6I}nw4Z-0dG_o%#i^{bv2&&dxf!RWs1j(o|*r5 zzh^OJ4`uW3Nt90@lgHuBqwrP(eW39~AD+z^fscUC)_`BKgz>8Daf{{8NDQUIn+>lH zw^E>;!8i@rhbu*q$xfp5T}RO@Gmx;UXO_xN z|IAa_>7ThOJN@$$mFXX~^>LM*etJx0`bp;>rL1=qbulLYBMm%8Ule+0Fd zv>7aVBVaX5Fd~gJjL00WZF~sIwT%m*T-&(9h>Toe25-$WqXV~^!RucL1Uprm!QApd z@QMy*H2Sd_Y&sAKe)_k9;BcNf@~#3akT%@vM|t4F0;@mI$fn%prs38pVsi~$IPNkc z4O7iXWjDk6aI(iLAKh4h&=Jr{&_d|5&}q=Q(0ifx zL1#iIK<7YLLyMsy=u^-~p^Kn1pf9$uYS%&6@Ou^X73kB@66izFL+z}6Z&H4Paw+sx z=qBh3&J3FynvTIdSsqN}Xk?{u+B^Ni@menxcb03*6@ zpbzCXexe5#9T+5k2&n5j`@`h;Dw~i0=87 z5#9fy5q<9+BYNO3Mzr=Wp4}v@RebyVM~tXq{-ePK%2B<@oW8jncXABo$mKXszG#c# zU;Y#__YgQlx;M-^rPW4aLlJ9-YsWCg%!9~-f0r^&;-w;}cw-#Y9s6r3yW^ilIi3g3 zc+E&WAGG=6tu6oXh#P?NsdTO|OM0?Bav+||s;qqz4L#qxM5%pR5(#&xA1I0tt z_UaK`t=I80h4b<}#+&Mw(bx*<1~GuXEMk7Y!9BNe!YidB*#FGP8`uBGphez0(TR?zP$qn{`T#i zIC43TX5DH@U;63KifqH$`KQXv8@X>w*&c7o=iIT* zG@}(a8dhn#$J*c7Ywexrv-@Sokc7+fe5L8P$}Tg$J4d8j8?!U4(Zuz7b7-7j#GcOO zkWAbORsLPFAv=2>tSA}Tf(_Cb8UM%{RCrxkN#7gFN}i#UoA`kGmgvc+L~7I^qp}&{^;C zEMMs{ZkWO~<>^M);f=`VQg3j-^Z&2-ziw0Wz--3;@E6T3&#La`v9^r$1`j>pgEu_O zJABc6!y*26pHtn-*2Ke2ZYa^Y^sB)Hij-Hv%p=W3wmVgu0J{e+dc~$pAPnz1?~|R5+0dFu195IlA;Xo3uV7B2flv;dkLozyZzz0C-&aY zJ=w@>5&CWh`tB4MkQ{YR=oZr|{zcO+Xy*np+v1e6!(&7t`@ z;>U0sV>vPVs%Eg(De%<>Phx}SL^R-w%>gTq;l0;6_3%1%*n=z(ytc~y=xOKu)$Hcan*;0H??Mq8A|6WWV==rmGkLmFb3L%H%hnAHLxTUPbdsSIUCAwk{ z&)qr1YgKjjS>nH%L}O5h_m1X}%=kGf^9*YneA?7gKiQf8E!Q zBZFf;dQ|+OKJ{|UXYRuS@(i97Gnz*~Dc6q=bbAV8vyFG`80fKd5B|dBS&UT=#>#H9 zKgR#MeoL{jyaIpXOR|`b404{B&+-L^KffNHVct#tU0m<-(vmn{8s+eea#^Eoo=FXL z^}{pZ)YPWH-l7Zh_m1Vi4C*zTdYIIso<7x@2I=c;_-2=BSi47ftnt);7h~O<{Gy*j z^%xCbDi5@pF%HuHsf@uG#`a@h+S=Ekne@$0>FI>xp*nYlG1K!)4P%h5O_dCBjrx9D zw*@6?&+ov<$X5Qhse^tmrH}N#-FMjaEhg>fnlhg;sH@^|w51T-z8K6cA9_?qLTD{z z`Hkum{~z!Nz~HI0QE`zpo`qu==}UVgL-O$y`|C|Rh=-ijxXTV#QvU{Wj*ZamT(9yu z>K}`%-Ho4g7yiVpqek54wwXGW%h2Y%Q$VAansBD3j(`iG$ON&P}7 zeN_J}ls>D+hZ@241z@JpuXFtb`7*>FxwhecDAzX3hjOj_wt-yR^I0?aZERuR>kF(# ze4lOe4^`3a||7?HyV%;3T8f#6#quKj2%{KO0z&=b%Qv=%xSDxYZ+K2uNZ9)8m%?9ILb z_)Op~ux7M!2!2x`_P>vBcksXdg8e*%FExpG;!CCBOI05+qBCvX`p-Vp0eq;pI9y*!^O9P8 zDdrkT4#qU1tMDgWU+O`|R6I2rDqiUeb@?EjvOC^8z2=w3`ak#`Ki={q;>Y|9V~a2R zB@9{J&4_$epUJPfT))|V72|%X{@a}654Y~a%{lm1>y1c!Uv7RrJ-iwJN`0z+-28lI zcr*T$`lH(HYOO_Q+kVq9GqNA-*zf)3$wROK@|g-b)HmuA^@ZB6Hg`=m&JM>8s1FM` zdUE7&90sqKa5Qk}ng#eyZLuTFodiq9@SprB#hRoOk342r6>)gGc6iJu^D!Ua?zcq= z!A*M-gNN571;t;&P13tR&NOa#RQjY1`sA8`HHZ4CF2S1u)&$BL(~0Ov`4sNBCR4V@ z6J&xN4UVa=aZ%nuLIO8b) zNq&>$!S$OY7xJ49;5SJ=-h#@9lAO3cl=)uPpgi!}Y^SYb)K1#mPi=%|sD02~Y8(Ck zy4nSm|F7|=09REGxaji}1NahO+CJ0(>V)59`%nX8KGXqxD9QI*Q2A5tICR^VJSY!7 znZzm_t81cZ8MR%WPGRht}vs2;`p4SYp0k@$)>!+5#{LG#W<^H{<#yr2=_HZ zL!K^APht?Zk zN`BR1u!(%BG-6Zf_)IC%Aa7BVvXQx!cu zmax2V_2v92<}Z@_XUsU{GxX=!fPW+%tp1iy)stfbe$`8kU)7_PZ&istQyfM2m2-{V z?nU09`d)J7g`0MtkKK^<{kHeW?C=&Je?r zzR>xP87sZV@WlM6F|>E7<0q-#rI+L_5PH)#eAf@@OAFQ=lMRq&mH(cQ}B_#$J~SO1{?ZLX8u9U zS8BTdKl#tYHD0duaZdvNQKBUu>Z58SG7Ox3ylpb^y;vS9ZHzJbx>o*@MOSIS`f#SW&|Tw(c0OTh9P$GQ&WnDDpi+LCjM^OfvpX3=hZrlUQQl;gF1CIg?T zQ145Q&C@*IcBCD9l#V=$&9AM7|GRT7b!QIm*=4sd&yI6lJqZo7>Hm7lx?l66Z-So+ z9o;lUWk)v+RvF!-I^?U2E>QVKmD|P2TK8PoF;>1=<*Q<4&5O%n>}nbNI@JX}t?Mhl z3);(!XfF)SAL}UERsIzepCDJX1M(-D#hMS%Wc+SuSH|G%NY=l0i{+gDi|5b8Cf4H5 z%;cKdSD|Upx1royy8%l3YAc~Uvvx0(chr6W<(;+oF^hEnqfp)dEL8Wu4AuQ_Kz0AS zP~E@Ph%C6$j4sl>2aQOc?!61Ddmn=8Ui_VJg#y7ID++>nx^A}-sqJH0^;3*UeJXhD zerP82X(RHtE(O6e*qVCzD&(K7T`B3g}Tx~@96I1CH z2w45v6<8llpw1o1o6mr9|3|Bg$VV&8$Y$j2qer0$(7T|-qz>QVC4NQh{Z-@}Y-9U6 zJ)sZrI}bVqx-Ze%x`_GkZ<~>-HOW@(%bXj*e<5fg^nU1A=p^XGRIA)bvuZa`p2oR1 zp!Y(bhR%T&LGv=LGBeAnt)yJcxp$#YK^H=wgHDCMc7;{D*Np5~)7Glp%I{Y=KNqSz z)_u@-pc9~ffqnojh0ceP52>92Jpz3JS_3@@JptVft%a6D8=kz`DP@!1}kYW@Nc9U@b^1 zu*9zm`R(%LcPP7jcM`r6U*&L4ytSXPKR{VAD#e;gi;IVp7BApXs z?&pyY@htr-9=GK}vhV=EH$WeTN^Ux5S@L<5Gg6*NzK#5yUHC#;v)LQE8D5pIGtKKC zG_!A6N$C2rl52*Qm7KY?ti(tzE7_P)RUiDAnB2y1<_`4QTS<2LS-K3~EiKc~?f%bV}? zfFt>BV+zN=+CvU-ch6W1UGE&hy|b>2`L*&Pf*VY0_U8fX-VVfW`v)Hr20YWFfIh-a+2QvzdJ+xW|<%<#!hHURSp4d=J+u z4-!CT^*+5vK13czGWWW5(0QHHvvvKR*0T4e&)O3wdwKY6y9fBKJ@83|!CqOe4Bl-- z$b02$j&(<_`EZV5ZOp~zo9DHv=f(Q!QU1^7-4%?R@?NRe7*=A65zWppB1z0wF+85| z_rFNaKMy|LG}f@nJCk?IR?dSuB75Gv5i+g7_hF+Fs+`m0@i)wOzYwA2CQHHi)sCe{MB|p`Kdd|PZ3kx zd$-RLZ+xKq)WVV0zG;cpk(o)>656r<-ehaEjkDRK#*-Q<;#kVjIxiK5AEm!GuxCm? z)}qyuTT=cab+c);%^+W!LLH!c$cI+X^H|%>*mzb=BuAT5Jan%=_P(v;qfXFwYDcQ_ zQG6faWBa+84M8U-m5L8_Ejg(N@S&c>huVHAA1c8Z z`c&mfRUhq+`BcZQ2hUGpo}%kZ^~XLtc@oX5uf8LlbqW8<_Ng5IYLKpR z{43ko?zW;_^+MLPr;Aod3Dd?JFN-?lgkT&Dg0OW zsu9`xzf5Zz_^xiQ5!nX5t6ObE-Wy*K+=nl=4NP2jkhNZi3xZRyQRfOw>%_MMR^5F@ zr0xp-TL|Tv;VGsSAST{0gP2Mi<_gb;wu7!PBK3QX$mpw>e>2jwo&|d}Ok&RAO$Ck2jL5DU^0W7Y|2&jSDc=<yyKq1<)6WuGsmJjFRbEvYJ9*P2OH z*`nQ&s=T5`QdJw#n#8Kcb>t;fE~WgkQ(lv(_eCp;Irpq{ZW`sePT5SXO3*#s601C- zMq*W#XiY*@;~VbtD8KGLFQF>YeqMs!8?Bhef2-YRQNGV9ccVNbR+j(SI0=8x$iC3H z0a^@wTff^}Xj~}T_Cn))orf0bcg}^zt@;gpSHIg`Xk4M+9WFFJt>3v98Y}f1TCU$6 zFEqX&+WEr9;x-s1L6`;87wn~FO&?J+tv?JMrobilZ> z>0t4dOM}5d_T=q z%t(vrmTLAIoul=(6W|kM@_zVh8o8LFUIo_7uETBLBoH#K;pG8q+{*>{!A4|qS8#cp zeB8iqS04P7UB2H0PuI8YWQp$^DU0{T!$0GiO*P3?&!Q7|mnK&o<@<6+Q;LT+cjR|X zQq@_;b$4k})ltzkQ0M>MHHlR<_J4_0$L#;?-_hO0iS{+kZFGM^)yMp|t2Chs%pTdb zCZTFGRL$7_0)2^It2#NxjQk^?V^8y&(&OsuYrrPb_tJf({_H_B;{8cve1hXo+IkDU zX6r3<$8}wat$oii} zeNAiGgn(5}tQA2bf9<54Rm9ty!qr+%QrOh&5%^~mrq{%1nX<&5nm}u$ywP%E2ac$gh-N)0;ZP%b)zh9@NQ|LH$dpgHj_L*ER=F$PuV*PwrF-RH3RCNR{JDI7{}bN1z_{RVmObSS zml5kpX6^c}A%07|unm8+41cpsF`v7!<@o%yV9?`n*l0h#XGXxVyd6EkE7}-Q`ITLN z>a)7GW33$5pJZ*8WbM?bqhx zvsOBO>0WY~u3zgu^LuY350Zbj$@B#G-Qf)$B=6zP9b zmYNBsk}o;S@g(OAqI&v>_nVI9bkvWe0?(h-Yip{q@*blvE#`1Dib7FZp<>Qo> zQ%+7fy4HEQo%mX(Y13*?mhC?(mnQ$Q9rehe9+UpJX-(waZXM=Q-tYKT8^^}{v3<__ zt9gG7I#D^eOnk_0_{*p4XJ<4lb9~73xOt8DTqX}UjXa!unb92baf&!3|J#^{AYZNr z{gnC@ex&x(RnDgx*#hHCPJhX?##yFSjo%2Sn5?$+c6hon&PUAPz7)>8=QPJyJVJb8 zU`1Z6kJU!`n2J3}PuN&qKILxW`^;w)e(uhEMiwOI#PdpTc%`2IzA6tF0*klK!^x-2 zB4#*Xuxafk4=4RMjyi_FA_o_wjq3NQo($WkJO$>LPpRj~uiQxmcn@g?<)uk#~yjbc}q%EN&#d6sXjd`InH zlFInWZ|p-`-8qZRE!1!n7f$C+{G;X%WVA2Hu#%Y;CHeg z#oQd`;Ig(1|KwP{5jmR0e6t(ziSZ-VhPvy);Ou|l%5q(QBjU<(U0=#h&b_V|WyXFE z{#Fn_%k~+2a1K8!6Tk6({Klm`ud%BUN!Pr^>yxcHmK&D+oP4j9NMF_56q}dmCoXc$^_rK+9%A^2jlFJV|A@rUVAjqy_GjOD(Y|J6 zh-feQjnE!uo7C2zNtC3ek0c)CP(a*cWeIS zHe>P1e$Lvo-kznDV{6mCP5FJ~-6Cf2eRQ0qIjFSjPL;{os(iP~_}(htr82&^%Kxe| zzPHLFRVJUQ@)VVwyr!L>rjBXMQ9Rdyy+Wy{;*aUX@YPp?*^mBCju$!Ra(u{f(%>5> z-gNX`dQ1M}dDiW$1`BJwrECoTt90o9_LUATX3j~?1S9$zt}W);XSjAc=YPccdpI|PbJICj$+@a4 z1Hm2nf$+yh>Ck^nW_?$-89h>)V^rlbNMM2MjgC zJ=+Ds2Z3SUhki|;KFu}Ham_NWnaer#^E}S|lyg7ioW@$YHpQDZ(udpV zLyh~U=e^;BOMPLD|5o~M5B;|jKKKBhQU4u*Z~hA3)Fqn{?X!C)$1eCo`JK`WS%bbw z9lz?`b@(oW*?;LE<0#Bg^M3LrpZQPE=xjv(g+BTx-r&6Kt*w(Z=jvedWb9Rr`lE{D zAAaI3sq{N@y=Gubw%{{;l5R$uGh^q3lQk9^1L0Qf6QOzH?)d1u&I$KRR*Bcx^SzXB zkbDXU%jWBP_x_tG|5H8R`7#gR44&P=yhHKv4)AXaFEbV|*!h<9wO4Y@VK7}4`Ih~0 z`Ih^fe9I2{?;zN9!GnhN^Ascciwq<3@95xPP4|TFBHwZne6law#)QEGPQGQolW(aR z&K!99>nM9$D9=){#|T!x;t6igF`~VHWSr@yxVe*O`2qgRxhymCJoyu0>^IZ=`LEMv z*_@K~p_Rcu{A{Jp&(WSJ=xX6zjFhdng>lz{50o!i;^a#-7cGpfl&xFFJ`TdWTI2I9 z_b7MLkM}R+{d|we-tWPWf7qL2G)I`j#j2;BH8^2xl|6?H{auCr&j5dPVQ(s9LFtf$ z3~(2i;XBN+>Yi9Sw19OW-v{RvaQ>BcX4ua+9C~w%<_L4Re7ET-<|WVfYtXEPl=1{geIg~Qy!~G`4E_;?2Ur4t9 zx`~|C`q=mghi?GCPv@F#%p@{$hY!i;#bW}O9ijk`vUj%Sc|zvbJUKv^;<`W8SJHP zrcI?-7?AP}Ad`a6vt7Z)Ol40PaNyN_Y1G6$Nilqw+pZZ^{%i)}S@!Cnmu;Axf zY_-m%FviL+EB{doHdcP57Hq8iNo_Cel4uWMWhgdObI)qQ#>%zSf{ky0b_I8?MZdgD z9qW1KDC#ahNO_J?jx1`v*(k@ySAJxa;}>XN+9>=2;aTNDM&Tc*todl896v#G*+$_f z=zKfMdS4p*g$n1YUsAxh+H=aC*W<=~?HDg{Ud4g6rox@$(}HoeA1L+J9G?c_yuvCO zeAnZ`lchrY@XhimuaQ$n{ zodVx#t%We`{vF_5Fzms7Ms(*PaMKYZGL*hfB^GYpWm;qD2j-SrYWIo`Cfn4-+FSuP zQ?98Z89Ie$JMC6%v33S=ES;ZfMBMhPt*zUyc6^D?OMF%pSa*AgH@qE;RC!kmA2%nSa+C{YJbCS&gszspzoy(k z;fdxqB}1#(7q}(Sd z(82nFsit)UbGn|IVOksEod<}yyK!92$LYk8#i3YICP#mGOzrN;v5;dA$HN?o-wLa0 z&BD((syH6uaO1k}v-AwbZ{6P)pR0LDMvmn2J&qr9xa++4vd;Ta4mZ9#m}iJD72nl! z^vv&Y^ylc!q4=)O$Dg~F-~Xha@h^79@$G4_YuVi~-mL`Rw(u`w#U4WWe4A^nctbpK zm}`4ZH>@5hM)X@5MzmKOBl>M{>@ecH!`c~<6C;_s%RP_pX&$KdRx⩔NC}WGpu{L z=Lg&~i|c3iCcf)DbEO^IO3T2iGzFf4L!wn z$AD3mVha@enM0gKYumLRa%xKk=^i^sZ47sRk`5jJC&vtD8xSobt_m`_}G9e6hz zyzx5rK)6kKw~g>_3g4J(V}^g$QTR3%<5jG8DaRi4jr>CTAyVe|gv+zN;mx_e@ZXTp z>zI>dg4HI2X%|4BWZt1WKd8!y>6T^}58XG9`FgqRU(o8=8_Bcm<2hVA$Ki#kYZrCxUG! zfN{rzi`m<;Jcf%~&*QqE=SVLHq0Qw9;NleSN#))&aB(`gID=>1`* zjZaTS#)q$%4jw+m`8HtU`;m9?=Jou(hvzHC`#oY!b?|B38;12OFnI<%qByG?W9{1# zV-2nB+Y)09t<<`7KfDpVPVwK=hS~6#Vy^F_dn^abA6D7H@<&v5u>1!qJ6Qfhm7SRD zR+XKY>p_(rY`=}Nu;^&8?+Nmk*Euoa9qf%gy8|)PE3@;BZhm`>dK=cLr{GsL^bn?I zA2ICtcSNrgwuXXbZxRJl-wf?V3?M(FEo;`;r(Ad!e0q-l(|d$@v%tI!^v_^l8u6$! z;<{d+7;T@h=#c5S+K<~7Y2eg`9`!@*Y$@M4G-4zGNN|nEzcQlRUL;oh4(q)C!t+?yaGLM-?LNXD zLdaQstg9pdZ`)Q}z zSX!JJE`6YQsQ9m%*zZ)@d^7RcVc^s}aB5H5JWM%#bX$Deb#4LcpoPcWIHE9A4u{(O zA@;W#f2RU}hc$fmGr&HRTiSkiVvz9i2c; zucPjY30Dr$v%JeqV#4mT+(hH$-3hlFlLMo4947Rz3;3muG`Oib)KEv+}!g2{%rVvsW-UIdG2J^9s_J* z{}&tKQUe~zg-4wKe*-p*@B3lInl&H8<$L{BoZ00E@qy-JxW8TAQ2h8Yc+8cPVXQ0G zd<@CUFk-aGQ=!O;Ic#2Gf29L=`+}R`^{PC0 z1U=~DTNfYeS$al17SzUJ-qhMc4zb>Iq47HOuv-0D@yLO*Am{)P%C*#)&gUy7M6!-0&o){WK z+j@gV)Mj^ndm3$!oK@1V{_e5Z?`XB%iT!H4r&8B`)U}?vwvPQ8j7xOx?O@H@z?>7o zo)f^Jd1TOgEkL8T_9KcFh92W^-Q~?!SWPwB>m@ zVApomCoXmcZyqDA8yXVBuhtW>xUQw|?a(gGTd??Uon0oj%Lh?nWn#N*IJ>! zPJGX*i*vr05gE^3zAg?{O!omNrdv;Z){g0diTNJ3z0a2T!N$eHu>GBQE-~KLIJQp< zj$N{{PYaG+(t=}`4HAxR*RU2Hps}&>r-Ne;F-|UywK1)OV{NSK;8+{qIylzGv<{B7 zv95z-_ljr1nDt=g6YTF?Hw->;uf*-xf~DiUu*0;j_*9U*tf2WX`LqK z+IVe&RZq-S=a-O!_WIc0QS%v?>tGOfs-fM6@bFzw;o&h*;au>sAv_E&HiT!H|1ee< znz*j!JxnPo2;Ozy@ZdF^8-y(%ay}3owznV%CX7Ba%nbjgM?rA$gyF$baB&s5xQaDf z+xhQ-EHiv>8ZqLu;lUsCz4FJp1j0MOAv*`KzGVp5mpyUzq^F04aeH?8!}tT>+&^`B zYy7`;d1L&oUH&-!Q0J`hh25irpA0Vye%y6baCw(E$N!!2I5)|RV!N%~1KAItxBo)k@Q@L$!Ho1Ef zU8-F%jN}iuZl8^nTeZ=}%WA9I>Eh)S;br!JyKOF857xKNEY z>LXb#dgoGY_$KvonAgTvNq0_-pnxE5O%}>N;?9jDL*qCa&4SHQSwQP7u2l2G~n1*1hIK zo>jqh+qiDCbKUu9^9@h!{xa4sbM}t>G4+lguVKW=hhu}|Zc@jbFt@f*Iz(&MlYR!*`j~R!-DaxOp!k!8z2~&Gm zhnmg&UHulmz8ai#<2HMLpQs6b8u|S|Z~^gS?SrZPFh_IfJC$XMpATVfF=KqF!W-Pn zc*#%SdPicA@d+QNz3y}Lj3>dZ?mgm*LyYr#jI-_&zTS!+KG5&W_}Z<1wLjU~&GXbg z#gR*$`hOL^{ubl&b@;j%{O96pu&$idUB;|&VeYW#(z`7V9ForC6g3Uh#^n_Ql-;EAt`X@|hqn$z*+n07lGQ}-99 z9*ED&*jz5Y9d>Z4Fm)z#eT7*^!wcOQZ#Q`}}~T?;Oa?Oocg1!GIDw#VUXeK$8AUvKcn@U`l& zojSWQWX-KGkhAFA?}CRXgNrADk8cMj-v+*(2)>>GejX2w9tXZ2%lAEFapUHNUcL*F zz;%gSpTvF1-0ufZr&z=AX^$o|R~>BI!2Wl6Q2UG5t*%3b{q2ZA~{$l0xe|%x(-ar0o z<;nHGURg(-q(9ThQ=P{ieTsk4jrzXx1JnA?9|o)!f5IA~q<}Tc%lua6GTzfCV0~qs^5ON~>LIbcmDR5EtQmUkT=ScH{#xFn@fb^P*y>YOvg|u$B@ZT*l^naH ztYjNeb(;qSh`30#%`Bm1ACor9_Zs*y=Uvk zu|3GO2Uj_^S?3<)-L=H$7NK(#cVC3Qxr6K7eZRDaav{FFcvWFE^(azlXBwUF7JWCA42_4*VQi z&%RW8h;oYJ9dW+B+FXJDQ9e)lvJiQZ?Aq(sk-v-cx?=0sXVLEexBDiKLXQ-pPe!6w zZbiS`f*u-Sp>LdhlcjgOocHm60@o#SeG+;nIi`1ft#l5$qDAMRSC%5j&mq5=j?PgI znb_0Mw@L4yvnJ5SY-C;-OZr87drP-mi*C{0-rx2-gKkM}(JjmPj$}RYz}uu(&>y{+ z^B@d*J@kgm^w4#DPxejv?_5Ss=uhaA13dfFjP{|osfTpS`3!Um^^tD5K;B0Db#DFh zDbPPq=9(iMGLi7xq+u~DqzYU=jFJ*M^R*#T=Vaxo2AnD>*I4mrBW3?4;? z^yYkzpR?azrWrZNUL(7DdaOg>hf?r^WV;-l^8N$1PKjN&&TPMA3F6pima+GS3C8%k$P-nKMQi@E&C0+ z^^lHn_tTY*Q9Y`s#r7LidzYbK^e)XoTZ3*{j9!_}+Qmr;E(@5>@6rfqxQ^n@J!!tpifee zkyPs6hyF}MKiM%ubn2FFU*3a#Cw;5XZ|}FhHoX< z80J-UNw3W0&>QsALpy;<}xUvPqR1%ZF9EN$u!7%qA7s4>Y5yzi1gJ)a}1C|hm z(f+35|HC)NFpTs~3x**F@J%pG*ugOAV3^sAy>N`?0$XcMD~xV=rXXNVhidJe_Tkya zcN{b?X*=)KzBM~pw<+vW26oXtJT7)=bg+w_-x|9-5Qkm#ZHRd6((fN%ja`=Wj=pCv zja}4V$K$ZeiB{Mp9=Aw`l!3?Ce>o`qAzX7G*hMy6I7Pot#^IMu4u1KObV(e3nF)UR z9`|Uh7ok(u@=cMi!!Ob;@%RPo@|@G$%;U zj>j&`=!1Cd65{veuuF-9U5ad7lW@^*LJr)$chm>UP5gJT%PHO|eIwsr`|fCd({>{^ z2Pft~{omu3zVZ6z|6|;u?_d7`U2-;~P3Tp0$w|s5GtevO66uvsm`Cykx%-zs7s}Yfqm(^7HX5Cq zUg`Y1@wgYb-$M0LeYQ2-XjL~2wstx7scyQ-N@QL{8Fl$9_u6>HFy8tdW!ha8 zbg;{*OJEmaiq_agm?$2*O#2$_65>6fufQ&9gD}$7P#3$XJ*}}zJZ_PHCEQY0>YdcJe{k?ixr1M3Q5V_!JO{r>&sgZ0ufi{q)BhK1vg7rN)?-Vr#IMKJ z9$(TchT{*7WxQQI62BJP#VmymW=Z=7dPJDTp2K$uoRUFb#@1cO)&kk_7xaekjl1qT zI`^yb%4hfe1svl6&v?N#%;iO{8u+fT8&{EkBj3}Fr)1&VfJdI2hOSmzh1lQEl~dlu zHLUfr*M?n(T#p2U2y5+i{G2Vuij{k@De`kvcH<|@GK|TReaZbJnW>>4XJ&>T&1@5T z1D)}Y^tPdQc)#txq!SxKe<(H*N$(i?PxQx~ls`-FWb20&e_XNhO?1P4bi;WthV;Wa zVv-kF6DoTwJoIb&WiT;~hrAt$X>?=_Mn~qQb!>@gEjt! zFi)4<*Gs8p@YST!p=WM2nUl-@_PL&LSvO)RJ;_a#dczTTql)zaC*g-*qi1yeac}9+ z&r&b0KYQO@f9Bj*x-@p*VX%tUW2>(H&c|XNefj42GTyKG2yPwR_db-#{@wI#=_kqd zf92T=UR%fgij~T)ui*WPlX|#c^-wHa^Tge`!m{tHxju*U?zz2A-QFZd%KEsVyCp&qQJoNw$ox!^XCjQTYvuc}hlI+0^>Y zA0nS06Mv9=D$XhSRNwD%WOJAC%*riJ3}TDt-!IB0^WzlLR6O$>b1^=Sd@zcw;6qni)F4A=|2W zqwK3B6Ssl|wccuNqSN)R5pt?E5aG%Y_Wy|3k2YdvD8qu1&u19c=SWmU~Y%_Z)IC zf&7K{7)!NdDfW3iW!dK)pTuGdmFM7H_`x8)a6H=#{~Nl*mE(`W(I?aSo*83do=dj> zmn=&*PtrCw_wmSh(~>-Q=Q*mQ_WV&BCDU6SzeKVl-#~l)xaameYXUw+rc2Q|W#}r! zL)`V3KT}ZSHMQC%g|%5liX zo~K?0G&=WIWb+nebOf?G9GNX3CJ?ZYVLPsPDI0q!`E6xm*H5D#Tsi)~v9aTjW7nR( zi9MAMe=a>I^cMZ}7UfTvNAxH3h8;glZy%b1%wNDBN3h4AVUN#akA=Y>LVlk|e&52r z9z>pP42~TCPi*Wd$Hp$DFGnN4lG}gBRxZd41Yd>z8JqdrOuny!ttC$#-s0p9FxQV%T@23+di@V>QJE;2=6BI6R=L^dI zj{P)df5(18{0Dy3Hyz{a*SbEgEiOiWb)Ejd5`I#BeoTE_o2+_ChLyWi{gjUu-?oLi zbtT7dUti8OcAXvH{Ef30V|$Y*E8ovrsmR9kSWZE5ydVx6%Li|5o3-xda`G#`+qTc} zoMcV*aaM3r!$;uU^@I2Yq%h}}&>M0pko>g+}+U5A~D_+VTH{Jg(a{cA+ga|9Awf*ne z;np^It7C(=7*DU>(pRi0oovh2p^Wk^?*HA3=uP`>R{s-#Wf;=MW!Aws!Fh*<;rR@4yE4p?+Fh`cbYKemJh4nm526 z94+|^*<&}RUP>%Z*XtU|w92kccI)WMwCWn4C)7RewXN$dn|t!iMVWq@vSfOrpZ#AP znVwG@x_^yKk7*^-?jF}gU_I^SCYvnT-DzX&i{JfOs(Iqh+PZ7#m(`B!X0cb9%E-`U zeLrY7Hd%V+WAaj(vwICPs&jf~HRttBp*re`{0@>$-iGY8mg6tm;;)XIxiNaz7SDcj z^~$fZ#e9qWOSbr*_om)#cn2d{?5-)U!rxq7ub9_ds(ob*i3M9Z4cVx z@r7pL>(=$~S-mOyBb@K$wPrHkww72L|5w%$gV(oCYm<$LbkOe@-&$he1^7qWpHp*_ z^*ssp@Hl(QBp#fWWZj+av-dJkthDx|5z&4IcKJAE<`LNCYRbgc?eYhdiLZ~<^~A{T zfNDN!N2B|QI9PRh%XZ~(2=U((dPddN#p5sng@2Pgmdd~+=nfEN^ z-dyhc9edFZ?t!n3UmU!O{RwExFrU9+0d{{8@t9M6@3uzqt*LeFk^kEi_LAj!CVGAR z&rEA{2ETW+zXQ(=aPGDRruDD;1J;w27rtd$tI7h_Q|-)%k3E%gR`IPIU&pl_R|l-N z9~4+6uNc-kcaRG7-80O53zwbP8>%TL*0eeH%*s>&l_shcln0qoqEnU1-F>_v>9zE`w}G1b1Wix_9^1-po`uia`yG*0%r@8g+?Jo|pA-aQwpcZZ;Q_X|+H z`(>!!{W|x)3-7*smu25Sob$SW3{>~u1^qR#&_|8PH2VJ(WAYMIW1;@HpEb;R)(oCC zpS=-&$G0?=1+0hvTwr}Xh5bRUG9wKS8IiXZo7Vcx0qc!0ds~k*BI|U%$cX%gG5_VC z1J=_ze-m@vb$%+jscO^8cs*blD+;VDY3JvYjL7E`%n0jRBA+jWV)H-04~jndd^nVG z|9l0s8}w-?gI{#HbXFzvAr|4SfNa#N3 zP0;Ks?eD*o)bM*NzfVBlh1NpnLmQzFLC-*opy!}dp%RNrLtEJBijG7A0A~ zA7VuQG%&?_eR8Vxa_2PbPo){wOM5b{H|EiQE0O~8tZxD>Cp zw?2gcLP$WMs4xiS|M{LdCrpNfMbY}dmwBG^{N6dsyZqkY{%%RJ)}kSC){7H4F95vS z^pNhiO1n3*R^6Onz0?N!dYAqfO8ub&o6gjqqO(_l?Z703ONpB%dTkQLPMOO1%dp0j*jb9Jn{m+){PwndBF$>$d zPXET5)~DTWWnV1wSP=P80elB2JUj|if2PH=S2%sSvVp!_Nne&ehg^O+&RRiVx@GmF ztlb-Kv9d2Ut%olM$K7tTp23Fr6#cNDx{IFD;^+fl7of^~&?(bHnTwg9^$hc~qVZ#~ zS9FlQqE|={#K7;+gyvD&`0C~0OaFH{_)^-%m(ni2loq~#>3p}4cv6olVd5+;iKUrns73bT7>{G1sZz|!}(q=|CY^kQQ)7!%btRMSRBj!4ej`0O_woH5M zyRm!j+dK{%MC-V8UWmpCa*D(4^0W56*1khm?}M+7d;B$}_&=B5Iqq<0_Csi#XIhtW zF3pAkruDj?^M|i7td%W1*0OT1wW@^?T9!^9)4%$*C3{)4W6k&Fxu(^HeT=g>Kk^4j zrq$!_T=Gb=^Y|b=*Sd=QUig`5t@&xLwH%wj(s!M;hHV%9ag3e|l58}YbGt3EGW_qk=UiQ)^Bjmo@j(0m^n^2^os%ju*2;3{Z_>#O*h%IgRIh4UgKd{$ID@LbNAR*jA4 zL_6R9v_W-Q+QG2g`wu(q{E^eXh4}Az^rPK2?O#FLD!^S!&PxeUPrGfzq2#YCez`Ey zo=aOhr+6HFq`W87mR#~KGmMa1RvB&RKXKGyT_vMY5OA^dSXLw$Mzr7 zC&KP;Ysjt-+x_z+?EXcL-QUE1zOk}3bGjY7UvZ3D+rNop`%7kLg=LUz|AF`xvi)zB z?GG>1+WxI0Z2y+fvTXcI%+OB+FET?v6+|!H$0^Y1q*K`X z?_kqE@Ab_3kn0eaZ2PBMGp2@ZAIwD8QrEfI_LZz@_%-@|6!Pj0)}B@GGDAQ59($Uz zCu8+?Gt{4b%?Ds3?4e)TU;LPK$XnQiZ=*x^AYb2aZ0g+aV$$hN+O&4!msb2X*V@z!|KVNEQ@@z~b$NF9b@ap|y#I$h>$42=nCk;98XRkt8FAJs z=Ix257Ef(tEv-thR^ArYgYpAPbxtIDVe!lhtR23%viVQ*Ogj0>i>&1z8==_r+cV5 z0@8mv2WM0w@G&6$Ts0pE4e30bQS`rT`%%z?e1uWZhkd>%{bJktW3by}v2P{->6hJ; zf%JvWz_ERWxxfqgzXT{>VYOkOrz6|K{=Pr=_5F=NeO~~??w8M@zExkU@6<q4;3$3n2IW-s1U3iW3j7}M9wW2}JM!ga_zd$npCsQ1EzaQnUBnOO zaVAe$u2udl=au$iuQ=}S8P437ZKk#7H@VjSYR;It8o!qNM{WW>X@*{e_m4aTi~(X7 zzvu_{2O{f^%mHQs1Hd-G3BYXNjlid{Q(U8Q2$C06dIc zT7uuO$_%ZM-!LB<8e!Pyo!a)?B>qoLwq9O<-|(s#S}MO`IrpaU&Rk$pV{2uH3#_sj z{DzJEFX8@6zy-i1z{SApflGlsfdiUZB_@8uX42){+W>qGxElBda3t_;;H^OU5BWg( z550iwfnoa*(0A3rTP)j;5Y6x4|9W5*@Il}n;5|V35qAOQM+^tbkGKhVlzrjxiRa-H z&&MZTfKR*^A3?UR;c7`&hD2lQ9^rgj(xSDUzh^IM z(kl#nbSEC*@~!xJA-r4GIo4W>T{tg2-in347sAWImm67YrYBg7ZWG_b@9_1Cm=x9q8{oB6?tKKFc7=Tn`62Qx#K$)O;BUzP z5PyqTHsEW#LECg*mD;yvr{rCWvKO89eSCzNn$y0jGSjLd9qf#+(Z*vvXE?sb&^YVm z%<$Qui%rTAP4osrZ;NB_IYw}<3w`s4Z0Pf^+#AZXkANe9qN7nj(cV~~=wMWebrhMn zyhExL-Op37BXo9XLq5kl#l)651Li~LEExG1Ps_*HhmYa*J$8SjzSyIf5Pct@--VM+ z$QHY=QiuA87Yuz5PpHI{h1@f0r{q!YwaBK1~~96ONCu6dz+J_KeDUFRHBd zPFa;e`4>ymJtyHH+P|2B{W9qke#JZ3U(e@(6RtyC@*|$ckI0lC;owAmgo~3%Um{wb zMz)_p@Q_R2Za$Ix*6)?D&biZ9zaQa#yVda{vM%s<^IXiiE7%6|BgR1QVY{+^KSK7) zsr-m>_#vOtzAe~g9{L~)e0RbRc-Lcw*0FbUC;Wi-u)o*C^PP0wB{sum`2IEc-jiOK zQA$5AFUQBDou2ez8H&NJY~iy!;(x}ZS9Q?+5gDb|7{^pjOU_k%i}`apm$CD8xmG4+ zT{6P7T8zrIuB5ErWN^OFFr^a$PuzkJe*=4#vL4}?A)M`p-?4mz7vIBYtza#}D&mMM zN5xrdh$Fsv8h@j0J^qFngTGQs`&4lS+5d&K+x0uV;Ll7Sno&YKgKOYv+G*16 z656@Ex6g9F|2jU%R{0=KJ#r)JF^Iaz5Ajfk%!oQHrw;D>XXKBNx9yK?F+y3NhW(K- z#E7E(kvggz2DN4RP!)?W(Rm$ zH1sb1%2NEZAaMche;DnjG@-BLr*H;wSNZ8JJ&84y_zdbF`6`k4SVCFuI}Q3QYQKCI zwcicRc$&(=L15Vq_o7l{=cxuYI4Ugxh*x6Z=wMmWrR)mi0*7-`$$^NP;anuU7C9 z_Fxvg$P9HAM1QsdYMf%nA@tG6IO`I8m&7*45RFx6oVf=+i#;BO|1tr;jXdiO z+1}{(H)MO)u#fdj>{Z_-WnY$HkD~jlbbmYgepsG0zEdx&2Ax0SZ+TV#yR!ye{|D~L zHnpztvn(LVnvaex?VV;V$ZTxurNu^5>!qO=T3@2;cN{{euOLrs2HU2Sj(&~*?*h9w zw^mQQ*s9t`dMo#8fIEQOfwI@6$M*oG$2(-uK42!kCcbimV;b0jOEX(q!b^}j3)$$0 zs>$F0{K_xM#%7a0l1*R8f5@g!Wz*O=qs=zXz=?~qH{{n8fGhbmlYsJTo&?H1S`L(7 zvmPkF2He=V1@|^?!F`p!?*-KNw*vM3JwSc`7*O9Y0qXl#f$AH#59p&W4>CqE9vduy z`!~@iDZmN93xJOT@f+o5jk*~45|Dn8{l$DQ>*-e5VmD&T>D{}4dN%;nyYqp1ceN2( z{6k_?zs$8P-qSdb#e2KQ0C{isLqOiscu&P;rnRY0uJso0Rrh4iXWpys59Gb-kwD(7 ze$WW@@631?w02Wkp0(p@Bh;HV?YJHYUF{fdgmz3eLkr;f9R)z-kj~FrU=VA|^jf7I zI46EixS#DYj5bEt-X9xlec9w^*8FSYEUPp2&nn(AdG;i*H}D={U*Ju^{n)&WjmcIl zli#wOdn0)E7!X>mngARNyb*}MywdfTi@3L*dsBc*fK!2w0;dD-0?q{H10M(W1m1>U zBU&xt|7PF<;H$vJz(v5NzyNRsa18J{V1M8m;MKsFfi=K#;1=L(z;}Uf0AB*W4V({r z5BL!91K>#DpMlo{KL+jtZUycF?f{Cvs(>4Tdw{Ef`+;+Tp8+2P9tI8v9tHP~Z&O69 zb!d{mThX86yZ#zEB>pKzhD)xrOJ#jnXJQf1tm3Q73h113=bumVfn-Nl1=Yta(@H7As!HqhzItu_vldW>)uC3m@jIK5OaLPG`|s= zPT|>hBP1C$8(#Rm=o;QA>uOr1O22D_-uynkgwpGcP~Q%mC3#z}Rnl8_hsS!6J!j=3 zSUNva{gg>L8-QD5tOa|ZE96vg1!HL7aEZpy)EB*Z=OZBPvvG+pT>5f?wd^+jLx;AX zA>5DPzxr?#P_!@>s6MzG_%84l!1ch}Qmr2N7Oxkv283$@KE&f(u0N5-J^2%p@F%t& zOzmpRb;T0nO?&OIC7id3v2m<9p4n|p>n+;3=StHW5C6{W!}%U@*qftG>n8AN+xOTB zD-5qSpIEbN*RPCW?1tEp@X-Ui{&kc3a81B=f1Jy;<@2~FTYeI@{MHTec6|z|k1Kms zzvK;J`T8a@u9|C%*9=YP3Ua-}mCrpl#=Dd==$2jMv)V&%itBq)u*aJk-JXTkWRGk9 zjcbp$BgT6tvHkYM_Up#=o1YZd@5j820%D2D#PxMnKq)kPc?EI#WHWRbHu47I@K<0T zPk@F*x64@Vw`mOiq?mqUotS=PJYTVV z+1j#0B4hg6V_I!%kFC(Ii4)Twp_qR2GR5>;fP?6m{#MQtRlV>NhIEWQ!FNIGulpTo z=U0v0D~_!^)SsH`76dQ)zgP6;X*fpT<04;R3VS`|{yl692e50SZQ*ci-sz`$Y~dNu zuKKCiY1dN94tDm~ZJOc6VhIPHmy#rFXJQ<&>n?8M;vN7%Cs#o&c&)j5~^WUJb^qOR>Hg=6pT`jX@8zhdm& zwp|U|c6I07EHUW|o#Q9_wRMF3+BU*|J!bC>4M!_pB+0q!iH6>y(6}-t!vmT%fmRLAAOU!B-g4&7bH-AHF7_J z`c$J65~yD_dPHliF3-aU{$ zj&rmQbn5N>crIN~4wMdg4JdsR)7+XTJ6C#REBB-~b^xWb ztANrM($&%zH)mK&(a-Xgyu<~3iU;6B&m;abpLm`f6Cj4?A?8;~Y~Dkhuk<;=kr}1L z<~_vjONq@3r<)G4|BTN>*JCrIM--p-ODAK?`Jn~bet!B^_M#tLOUL@bwR{3UxR!3t z_5url@S<%;CjjU0Upjde@B$z@J-Zo@KFg**Y~KU?$@j4B=Uza^{zA(rVnZSJDaMa1ro5U;y|5a18Lz z!0Uk@1NQ;90(Sv-0RIZC0=^5}1AGa%A2=WQ8So+CVcu-@^w*7@}aAi_LD!w~%r~UUilN=ed3ENfj#EogFzw9_3d@8%xj^n|vwrxC0 z&xqy0x5`&Ga&yN9p2bT zdt^()zx$ATzr;qqU-S?EJPdt5p!B=QF>K@#rQx$t*pB0|k=MpxBd3LJWclhR*~t52 zEaB$`+fGhBBRhFs5%hNudrQ98KJYw(`l+A6!|7~f&E4sOU0k1?JUqfi{tGsUaG8Z} zw)-eP+&}g8hu)LT?8jzq*NL%NZ01j|;k+9!>yH!ID~fpOJ;YLLZD!78e=)*lF7d}% zFN}z{@u0afU9gkuvzv!U*v$977PgsX=cs;ey;PTR>%($6$IBjJ*vK_pV@c28TE=x( zJ$7@7V>f3p_T;(9=(Y(O?b(DgTAVm`&hZm*>@m!R>3|Kb`7m{2lZ;=;rdLdJI(GA1 zjlpAg%eJYFVKW}pIhnm%uz^MIm*MZX^&|$gmU$O6b_I6Xh?EoUS(fI|z2ds*F}3k) z%}-j?-JyG8*s__D*;67ie(eG0K0AJ$SbzK)+En~nvFl3qC$i($Np<7bnnUxJV;j~t zXC{PwodvF$V+Bo9!4hZSF{a&WngaFZDb9 zNeo)`+SEY&xyr%0J5T0!*yE|-S@!r^#~yE}tuvgqM%v|?BO|-K5GMOJZT|%} zw_@*#2S4A6@i)$j*1AxwNeN<0&t(tKT*fN?JbL8e^3nUR`HMF-ki}lHJrZ?(Ra_u1 zb#m~3SWnuUJ&tc_!kQ9qv)CkOB5oxgHE$o!t0op*`_h1xtcQV) z*sHpnJ*n5RC-tAb@qrl1jcciNS|IIuGk8J1(y4)_H<`g^1xm*S(nn6V&s_U6^=xsE z8O)fh`;7zswv&Simz$&$14-S?U`o&2UxdXIMA0qIXR2*#WV^=Z3@u zU$`wkxMqYu_~O_`!IyuL5d6b^iNVEfl7eL&l7maHObIT#CN;PW`Vek!!XMIF0^#>x zSm#@gU7#^A;ZJq_h`JArVUJYWR0!=IL0%i+Ot`%o+zMAOM&aruaHaPX zC;LacZNHLscctB@YsZJ)rmWL%8c?k0dGGzt4n@9|d>Y@iDRopC;)BD~$?Y?{Txa)yF*!}ILBp1CL9ZeK2WDIeuAuI5Dlx!?X2oY}FQ1Y~Tg(G9+~V>vaxt#i~WDlYe)=n+YhBi6p>lyYQ>IwfVrektG!H+#@ zfmSJk$$>WiIT@H3xQumJZaZ9>(ON6jTY5xwD+0Gkw5dNbP3s<&XAqny&taSe>wd5E zy);&o!@NkX%hh{@yywQs?7BGL>6?Y{yyAU!Eai)EETw-`EXCE~iqE*?(E85Lcw7$k zclGzC2>I8Ed5+XCwCu_g@f58e&bqVu8kMU(f21>%pX%Atsi)!|Zd_v=_0%^dM$xqk zP3p-dxm@AIO9GBuR@@{Xt6swVkpWk8zr59ya9!e;`_AoJv)yTupXfyaF|G&K03?0odkChTTSd;1KbbOQf z$Z*>i!8chbIX+6yh#}ycEHeE;4|Dz$S9o>+GJS=f{lbnfJb!aia5b{slRnXoGprqz z5`0PW9bd|mexDt4n2Vg)d+50K*a*(QAUCMCesZxH+?A(vT;M=IGkEX@r5oGzKQv6~ zv~ZpOP3hFYv0s|O*ludktQs_P5Xy*xIptJ(zZ4=JJV{zAW1&~Mx5r|tAv3^M;>WPU7X z5X8|J<<4A#wcxv4a~;6!g70!-HznX(c}pjhpcm(%7hgjsypB%L zI`6*^$_=`@P`rC8-S)h<32RfEAcJBUFAnR3q1EwSFQz>zLpHAJ|B+MwNE>%Gb+v6= z>T1_V??%-py8P!IUGcbd1@nCL9du@w5oy=ze)7)awfVXEw5JcGH;Orzy|l5>ZN8&7 zG`C;!Hvye72Hi2n(HrC7$wBB0(YxgGU8m3)*j%>GkUx`)T|by}b{f-eFa0(c`R}9e zV(7cU$k15QaqwFl{O0;+E3x_A`8e~@DK@WU!z#;`>y3rsA`(o=HTfN(VGze{RjEclAu9@362Q zJ>$mhMN9G>#CiGGN2z?}9LFX)vfu@u27xu>A_w3j! zlje-WXOK>ci!Hig7e2#X=%m5$z|W!oJ6goeIh~)-(a}xqlfr(2Y*_8J=ISQ-3Gxv> z^d<-7Cv3u>ke^Tp&y=%2Lj~t*uj5?pKNH9Ni#I;d`Y|(jDQ9B;1(|pmXUMhX47naH znUed)FnxP29Q+z!5Rad$3#kn)=Nt~x7LWmF;~BHJQ*a@!JJ_h`DX>!|OY z{&nl<>JGP0BK?)E=pcP3*%VpsN0b{~-fD1Vmw{e#%W(7V;OHd}^B{83OTTyYQX*r^ zu3k!H?bRONeb0{RjE@h0y1Gd^N$Z}ZpThihvOhD<@nkm14>YVsP#J7UonJ-D1Eh+P1e2<3wp`ngGDyClT_|C2~bENg1 z>iSLkL~ngdKGB%0Z^kF$@ITup3Z$HwPt=k3Q{)O|v zZXu5sk@dH7CYW^9OZXG_ah?~p$FX_P4(UVNI%Jp5Bo`qH_Fc_r9bDC5G~g9KSRYuP;!($}6ip*+26;da#o5 z&|Kzdt#S0=H{qY5?M-m?*ti>z|2VwU)phUuDc` z0s7JAS$wpW9d!SMf0l3#{4?!yr1)PZ<3-Y2w(iDX$#wMQF#Mn4=*(z5OK%o2f1`vo zWb@BZcN!Y|po}c(W2X%1&q#k=eRw(@TIkHrI%mEf`cUf+)hm4%rE^hy?0*)+{)Vyaa~Q{dhwJOib63=}4i;Per`wVJNQhWYZlkog4_xvTDOV9zo&cgrGoUQTrcZ!EA7H*IK$R0{c zw8#7W#KEy&hG>qY;_94#@uywHoOU{PrpCz?x9`Hfpu1ui|6yJEUONWT*p8cT9(DKO z57-pOlTYb-K$p&A3GxJNX!8SuCjHWL)vlj5bZ_c$o zGSUKv?=Y<+?9=^M^0{x4X^mq)#|rYwVUA?G7}l{4HLRDlW{+{&KP36A=SQ(eMRu(9 z!fkQZOUzgK2zjDh`EH;b2Cf6;vQMdx2Y$HI z%iJ2q?Tb@PYk3>?iin}^&oQ@^y6c^4XsettfkI+B+kg)OUAd{Tdd+WDjJ}I^^0S%j z6RdA4_{Po`|9=Mg+xZeZxASA}l;*XL=KfybETDxw80_t_o}xVmm~Zet`;lpnftL^-Grt#*wW3bR1vHn?3tiQh#V`+4X{Rtvt{AEU%kE_LB&b|ch z+{j~-`obsI!!Lc{o9p18-s}t7%YvWI_`IR*@J)Np=cwf?c&8m>ukKkO1K=t7V{eMD z;6vGi^2_SuslC{s;;H`FplRT9C;fTA;VI2O5l^LQAA5Le3p~}4_vRXD;dv71*|yl0 z2cZj{jn$TT%4b7NOLHXJ5=Z#rHq$!H*i_{FpjGb~7W;~ZWhQ#-dFGe6YyH;PdMmuv zuO)OTz14OQ`}F)Y*Xl|+;@Q94l572da&Dc#zF}i>txl9v!QL|Kd*o_uFMQ^K554fA z4?c{657`&$X6AY`zxi!wMr{Uj}0`3Xzq;8F?)~e=i*KLycO`~Z)od&pBdU8ALh;e z5%JDy+VApaWV};*oGe6+YHW>|-20O?_6SecnJa<*n*qN(a2g$0-&#F)u0)N>kKkF2 z+3KEnR%`aem%@Q`%ZKQlkKnaGql41v8;vioVto0z+hQz@FN>$vXnYwu)cEol>B(%y*^yMSOf1#Z%QvNL2o|o=*6EqdT~jFUaXXELKe4I%$q)R$F%F?y&!Ze z-cx%tFH2*^C$AAGY;cW$%IcTosolRP89r;5>^ZA-0(ohkbD|5`JK`JDg#qVm3fso~ z8oF@c*VTpZpsP|?>x$l(hY$AxdaE0HL}R3fxZfSU5`&-b%JM}KV{EH#J~3yIxjNQr zdyMUbF4P#?cSRSv`Y9(atP9=tSP|`!eiF~+`~ZH#KK=ln`w#IPa~Yq5=a#Yemw3+A zfiCYw>q60f7JAn$L-`K;d&7E9=hH~<-F>e09{l%p=LkmYy|f6uS8|$hz!~77Av^V4 z>bd&n3Z72SeU|^K?el3~o~y^Czof@xXN+NdK>-u}85vGzUn!OLM|) z9}j!QwnwqkU3=8EMY|BUbnVgb+^96$K5IMruEVn3hI+SLbA>k{ARlM~KHf6q<|5XM zX>OF(yJ>Eez242yU75tkWb1895=;v0cziN2A+WQ>iMdT~%t>>jRIfhFn^3(pH%dOo zWqiMtxly7|(d-6vf?KZc1-Ykk)=`G*k9A>BZkOJ-9tzujxxA)3J;`XqAf`v1lpsS11VW|!vLGq31WG$(qK%{M;M|B0Ztj?k>_|1h?dO1~a}-u65G zkLYb5bMexkw=Rqq_tczHJEo9o`#&D!QXKJg_4PV@f;ebLHvVh)KXK5{A4zY<|A~y- zMaul>c@?W7=2aZW2A0hCBrrdeGS+iOi1Pm_Wz2q*Gu0qXDt+ueAD@+JD`bp=<^e2jZPD2?T&~!Z5lIC|JFBVvZ#aZhsR7L z!xVdx4Es8=S8-g)UV|8t@+@^^uUl_-oX3@&_0><`iJn!LhGg+jM;3b`@^1`YBJ9>bY=*YK&KOD2Y3$&J=>FRo8wu0@@%0+}h$VI#M+`TfSgxO#ZX?D% z60DxgiQe{*VI|^|>MVPUbJ2c8jIkZ-W7=05vufHByR87W29^R_0*ir{01JVPnY7OZ z5(jFZ1tj(pPuw{MTAGr{x&Xy@uzBo#b2+bOKj-lt@Wqz>rxCmHu@>R;Q=4#p>KyEy z8rFpU!gEPrONOy>!n((2Kb{^w=cy@Y!`HMnL;tR47aJ=dU-!i9Cz>)}Tj#|jUl^!i zZooZy*34M>{JJM+KZ$Lwv*Rz~?D!hi!A#V%3yhVA);%@*DbAVES&ZqNC9k;qy?T~n ztW013yV<`ZhOV<2ni^NYHMi?k@)&@QS3BjqEymVtLmSmD z{m*7^JhgWtzKYtr+iB|()r~q$r#&5#lLCpfca&52sijkAPv?0K_tie^#aSJwZ!#D2 zp6&k4Nlvxv?4AMh_Laqp6|1hM9~7_F*LJc*oeWq~Un zBoibro}%AfS>VbC$pp!Z-$CE5EO6z6WP;?yZ2HQT1+ILMOpv^Inm&0NIU#xA$_L2> zS4K=)|MZ#5hXoPxfpo2Wcr8Lckgk;vq-*5^>00?f`Xu?ly>l%eT-hKw;mU+*;L()@ z;!%IawAmIiApj0tS>VctCGgcF;7{l9K7u@mmJdrSrZ+4f7DUJg(zWt|cGk)V(&tJ( zxNWVWt&Na#+0@;Y6Jy~+)!kn@-Ifny8j=sxvsONkR^84{KEwpdZ26NAsHPr)Q^^Or ztgj{?>O1=}k+@e~dm(SL?12vSZAWmCLOqMdnUAz2Ct_jrv)C;eRN{lx`$>zM4mc_J)U#epVNKx@2(Q;1nx;L zT*1A^);%`+Gi2+(qhr4y&T$d&Ffy_S`u3N|$O$PI+utd@i*uKi_b<^KX}VAOKPSD9 zGvELFX2beuXjq3lPAugBdl$_m59yGN=u3~IFEmyneeomk-`A1RE**SR`a)yhrtkubI$++t;=I&;5sJqubYR|1aYHRbyRpIkhX-o=pLA{izZTK|q`yl4tM7%6{@;Q;sK5VD$bx=fLl)H6|6knn3ig8s zf5B@Vdyw-Tv6E`>-9Gbq%6gIxkUq$H2>#5W85N{2q;Fr~+UjdkR_VL2?4JGIAD`fD z7J!cq6xP}#oR5&7i%qiLj*t33yZ^TuGuM25BeO80l-Q4A#Y?#Mkk4LhIE@{%u6|aK zc-nO47xwfttto*Y6)PHs&zZuv>AchFSFv+0N4D(u7=gh_X@UHt)WG|sf6h6Q<2eg< zizlr{?{4ez<0B7J-+exR*$NMSFY%w5HZ+3o6eFJ+8wYZ!@25P+=BmlW$9|XbffaY& zb$I5TmmhwIy0kP>YSgCx3Cn~*XHsY5aNRn~?>?J4uaBs+m-W$k9~X`v&ZWA0Q%=;~ z6H)iA*!!ycygTnYn&%&y@xYyzA9d@mHfpX{L;b8iUgGreXZUY!KVKEq&&T$izMr$d zMnCs#u%A5<{p|Ug{p_i?pL>4AevYhj-F`lOozK3XJv9yWv+D2mv&JIC!%Oh()VE`R zcAq+PZdxW82X;R>_u^N1R(;Xji*q~)2?_LDz%*tpWo#@+zZ*M@yTp(99<#=v@8#>a z^mu9v`1VeZaiz}M+{av)t;Icq`?NOzyi^LE?c+YSQH_b;hiC8|e$}k$$cC0iqRo@4 zo7NM$bt|S!`9q)OhvOm*wG-Vws6TBTOBpkO%V_g*()%e(V`zT2-Lxx6+q)3MOKat{ zA9?g6pK)az@@Io#1g=kt5A;cj3p7FgJeGu>V(s!=Z+y+V&Nm+M(UzUyV;5sM2f@{_ zM<&F4heU0w13fkarQa(t$ulj&DYxul$@-8& z#$vLNN0rpOkntTm?Iq0|1f?sKCMK#hHqS9)DdBfwNVC?&em{;h>rCwTm1fR@{XXd| z_kBNU_x%LY?)yr+?~~4T-)DYwuKRupY4?4l-Sy#f^W@I^$dF!n1@ZeKA&sG@p|=~u-*M=wwyYh^!tayLL60jQ;)hoYkSR&HoV z(kJddNUCFiKC$;Rg6BPq;nY{RLh708)|oowI(052os&yhT>i$fflAI2j{H9In@nQH z!dn?Sbt+|6#`wDx^L{RE%cRXkv_CATdSM^s5wpo9X7ieDCkfAuglBkBGO&hv>pS6x zJdWi$IGJuF1d3Hx2QQL~_J0RA?*BacADim^DLn59ex@V8MtJ-I)vNH!RJ%^md8@DT zz|oX6-Z>KAP4cBa+d=b?T^mvMqU5+_I5BhVMdWCNz4#xr#kCh}_MYC3u-C%NmOxgI zhPKD>y{nJ~Edn1ipyy$u$Jz3A*yxT&*~109FUgjzJ5JGd>DEMW;I_NOX?L^@71{QsXV~_f zGiUkdNj(IJU(5 zqS+u{-|Tz^>Va zd{{_+rIaxjd!mH>z((BHc+QUGn7{+bolDto$2a=+qr;Iu4IiEgr~TVkVdTxcWq!q>+c{CY+>^F-jx^PR(4&jxUIe7)!W-K{<2 zj*Rr86MX1~7<}kh{PH;ba*yqox2OHC%-x7ymS3LTqI6KB7A1o|cYL=}=t2JZ>WRkw zL*AHKRm8~rGjc=6eVn~$w+vZqw`j$i%O|0`q8!`R%}RrlNJX-e;zY^SG^mfq05 zP#f73Fo`kGj_mKDc>r45bvw4LZR^6bKll3raqRig1$tk|vpcZ6A7bC0COdw)S#i!? z?B|DGI549ba*+JmGf(St#sRg@)MwzPh}gmd>=pDRdP!%)xceg<;=jATRq4-uY6idf zS#D5i)^7!mj1Q-e{)ZX-$Nk}S)n#U|x>Gp4r>hzK^oO}Ya}l0oKFw3u%fG{xp6!hd%#qK)d4BD|`A-b?^yj^gge$J*zQn)} zxNhYV&U4xCuPCl!>AaMRrLQ%sSh_Q#Vrkm-6-&RLU$Jz^O%+T3qo87`i#Ictc7Yq= z&5Wg8K;g}drCmVb&5Wg8;6r#bV`&#qcr#;Z7f^UJV`&#qcr#<;gZ(@)?7`+&J+3L` zTF14WD<;0;noGI*aE;`0>ya5zkIaaAWJc5@lX~QMyy3Nm&|Mb${uOyyzsQSZb%e( zc`S4XPKEFCSm+KYe3!>UcR=C0JQlhGkHUAk@B$RR%Y_%9@Levv0EKUj^_GL1cknAT z-rIqg*9`3ZM>uaemAPnlbxxgA$r!ix(HIErEThiva;Y7;UT7s2T1kdhE`(OTclNZ= zPko~OUXcsM%&jHAYf8D+ac$>v+aBG=Ip7W3ylWADMJd-huI*d~eUWANN8LX27aOyN zGGAQ%g546)FP_u(i(3cb*#yU7{P&E&zY&3dPf$2@@i`V-H~>CN!J+JE?YC&-&J)Jn zFmUJQZ${9u_^BUlw0$dRr@{#1rmzl9ikDX40|^I(b=qD$)@1J}wN?HE@_BOrnM=%f8rmV6(0F^6H?3Q#H|%qn*f7Gg;V~Sat1mj| zdUQ@7bk22*lXLz;J$o9OF?HKodMvdo_v}8-^b}p2dn~@&9{A_@gE7p#Ng}Ug@=PJ` zRPi-JG_3yY3zdeyH|NOY&7*xySr_9k zt74ov#?xry)T6nN6@794=6MsXH8+v>adaMrE0*PGJUe4&@ zdL5j1NijIv108pvF{E{hC(t9s8|d!4Fwo5xAL#095}2zswfF)1ui)I!iG^#l=X^GM z&e`V>q+AlvIa)`*%(Jpnng(B^@o%gJ~yQcaBTbRo-13%%2uW4D|`^?95c0k|ze)U*k zW>M%)#CKFpL2mD>a zb%eZx*H^vCH70WN&9R2{Kj8FLPxG3s{C}@Eb8-xy)eig;R}VD>R<-SC`RT{3JF^de z3cgit8`6JNnsToo-3eU1HQ2D;X-Zzo8$Idf<$kaHl!yDh`>h2nZe%UXYXidHZl&JQ z-(vWMu5`ZbBuKfJ0%!BB+XlCU(FfwO1W)sg%u6`XAOEE9vE0qe;H8g$ZLYaodJX>4 z8j6pS4En)alS%yi1MqsiL0j+beptLLemMXw4@@-zOYxO*avA5u&UpuZYzYtTg$|yY zY(6%1@~<8{0u8o;2D{OQ&xt!Mdc-{3g?j9Ord!|pi^Es5ZfOW@JqYcUSIk}Wu?O1o zB-a?kvP1`pE4;wBQ@KNA>4 zbuB0UqWW1}Z<7BqzW4F&9?tZA1poPAi~@8`DQ%8rPX4RhlkCcz zx@CZIxMWZkm+?W?Amec7@e6s+r0!Wf)BQX>|DYA`bRKVHW@Q*lvoiFKhge5!jQRIX z^hegjElIqa(kj!_DJ#1balRApY3xG#0B7ydZPR#|;$TbQH{p2Uo~t18NJ)+@p4h^44Tel)3 zMQ2-mEp6zBmRdr`{oy;&Gq&Im(XXJMMbma9O}8h0(-2*c%AGo!_$vFwD>j&D#|CAu zZpF4$`PqEyro~$xGc&`ceQ;YK`gQ740d#3P6JE>8HV0&3C%W>=rD@TtWY_Qy+JxoX z2dxJQM;Z(En8tWyUW)PO&i9Kpl5KoN$uam=@8>#vD0*=Fd#}okmPx$-b?&LGLiCOL zZw9iczPuvqBiu;F?_y59AK6iezQ_hDZf(RQjn8Y*YJ6Vztc+aJS;*$O${$>A0cHU` z&~8uiFlkE~Jd+le#Fak23;fiDbQjXY&;vWTdcWOy(1^EJ4!Z8`E`x3-y2caR!9gSTGm$^Wf@3unYnfTg6V~VfI0u};ozJac6djz`9oMIjwI>k7u`=t@| zCjJ%8xp|Ev-%RK<%kTz^i{jV}A9iH8*x<%#Z*3G4PSFSZhwq51Ow}Fm* zmP_5$UJuZ%qvTsF_-_Q4`F5K@xo=-ONOEncF4Bc>FR*1=CH&{LWm+p+rnNRbQJ!?Tu(WN$$2n)(wX@#naK>eiP4Z zygByRK=|}jWklBf+~Rj3_-*sm@jDp&g6FS<--vHdYWq2Oc3S*Klo5$vgI`vQtno%# zr;K>$_cEJ?*?Z|S>1gACAt;{6G=})FJ0~MI$1%sKO>gt%qRh+e@&761*tyt()$mKJ z^fnoJ{#F@2uC3XvGm6PizTpGuSs7;90|l4zU5WGELi@W*o$oHK^IfU)-8|>J^W~k# z=XrWDn!5ZjTs*n`_z)kquJ$eYw)sQl7b4HI9lvTjy$ ztMTz4w;q2P`lBs+I_={&;~Rf`>G;lX&l}X`?fHW)TReBb-CVXDMt4g`C(|#jupfqV z#eCeVQ=7M2r^J5Ty3?g^&r4~8{d_x@-j}X-?TKhzEkCs3JRTsADM{b-bGwcae(34S zi*A$V=IuPv`-k&Vzklt4<^6lUvj34kJbwImizm2PuiAq1fLi=H;i`Hhu|3Su1{8;r_khS{qhlSnQn%CRF$KTqbmg48cWhd9`gMlwJDt94eG}crNWWC|&-*y5 zQyzBZFvkX0Y*0RTBwo~K;de9ZyjK|Y-q0AMqwIL)5o!C2E@sd#>h~<{*)70IY-82*r9DI8+j(3i@Z~VB=>J*zhW_#(Lp$Q545jX+TqX2nA(x)3KJB2j`h45b#POGK zl@Y@o!?lpund*A3?%VZ?@NFB)SMOy-;H@S9moU~Lo~dHazxZS~(C=xK(3f;BxETdx z?&pvb!0 z8J^;v9m(7NR2eBYn%^JYx$n1>u5?$64|>)txUTha36Uc^gl?_n!?J;p1_ zNAu@Yk3#a&-rq({OiC749+&E>_|$OH=#^u)bN%My%uZAMB`H&&yA2;KNO|ppHYpFa zNgBTZnY)nt3z50M`Cwtnln*XV`3?7Xd~jLH2*Wt0_GU4!&2BS3pFzAT95);Jd(l@i zbX5S{1YH$SAI-I@l`r5fs{=9h4~7ws8bLg&kZZW@%ZW$SAI6{Wwtclt$gQmVc$fc1 zcA*`ED(1P$%47bKo9ABTdEdI)_s%43^DualoSH!!BEQiyx14Q0Go-yEs@fXksxSA9 z+tbZE?$e&War>{28TWVQoqfh0+F!7r#Nm;C>&RV=tfP#*ijOuSYlVv$K$izzD{|Rl|*c7u7`0BaMW?ymI3)(nZ?B+lWX}q9S7}u zr_-SKxYFM3IB5B_xdSwROSG6IUzu;(+`A>|I=47geVRlFrqAwN0_Hx;Qwwyl5QcvhJa(tqGmvLMM69 zNiXQ6H*|6xbkYYpVNS)!zR<~!pc9*}i1~(NfM1o)_A=L4bQT1P&V>Jl=uEQy1f9jQ z9%1C~tGTvuxpXoVUhNN!h*ol;ku0t|H+38|ZPT(rqc?RL^e|}~KhVfkxsAr>{P$HU zIe+PV|4(ug#`h!Lk94cpnDKeB(4K4n(a@V)k}oaZlH3P6O3UaxKJ5pWX2cR#U6>i$ zX+vgQrw5>^d2hE(QNE?*y8~S!nvWr7D%)?z#0Tt{luKv*pfklw!*o`EJH|0?cB0(Z zPg(9i>pdHf@Xg71I(2=|2v6)o_-)goWSwY{@%}TCd0A&E^TP73RYq6j-95-V$-Lpn zxUk%7mGQv0DDQIVH}fmVyR&J-cUjgI{u5+fM@QD}b?EDKvhDbl% z#Xmu&jdNsL$J59(*>ET5?aXaB$uZZC>yBKDwBf!@`|YdBGufeM+J^J;tgerc?8viC zQSvP8ADkFRmECrNw$974^Rn!#`K03JR{Aw;GgqF;FLh6_L;u-UG-6q zulKPI)IaVhF}EWl6PWv($Qc9R)r9AZCmJ&~ zR_8C~erX=-Ixk{>r)Fi_STnlamu{~U)q2jub$o3 zT9D6$wfTIWA7g1xjsDGv4|{AoRm`_e z{VMW8yIZq&gc^B^ZkYY-wAqKf{*5hr83&rKnl}3q)^N1M zo>g{K0=Yyr5BPV9`J`r!3ZZ?o(*X zNBRFC?@z$xPWpO3>+ZL)p7xfTb93h0X68ISBsb@BUu8|d!uo>Y+FOuy?XR;Rjo!OHDR<36?*FZqG3&+{bM0g!p~go{ zyfxokMjoBX^HRzXE)TJu{!`YWsC}x(C-Bft)(=GgPxEtr0cR;J8ers#=IjGViH zse_Ch)pHm1A3SAp&J^mO$N%HhcR2pE&Mnx%8i~2&t-1SCS%ZBNufhjff4v>PSD$O_ z;@G4?$c=%>j{(S${>YPltReUjYu@^@*4pQs@fU-C2c!IUZ0WYWMqcqj^X52dDC^so`&<=#v1oXxud*3zcJQQKh+?6 zn$b7aZS$;akUiOz_ipyH&a;aBud_MV>=J0Z3VHe?czO4dI4cR-_>g|CitlBKA9HBy zZsfpj)<4;4Pu8-DDM4RX7`Mc@v`DJQ^Vt2vvjt)DXn z8SlGdh$rXZ5F_Uh>sAU_rzrUr2Y#lo9#Qkm7g3Mf8M|3EBrj)m(_T4$VZVtV4$sT^ z!N^`Y9d-XF-kjAJ`f`$4$Nh&wZ_bOi`*QZOU(%|d7&(6^^pI{8SpCEgbF>!d#ZP+X zkSBX9B?Ls*+Iw?2d5qxMAIbM1-^G*9=loyImBq8mfRB5OZWq$_9gHdMq;IswQ19rO z-qZJcd$Oj^pB(rFYi(v2=Gx&+&9#TT=Guio#@^PpZfdMu?KReZ0vwlOAM}QQFF`7L?%SHN4l~tdDv}eAdl_fQu(-bmu%@7A&)ZalSlEr zt&?R@J~CwxvSlDLW&pCLKeDJFvgk+Sj8 z+zFpb7TLBBJnBzvUTgc*!HaGCG_T+GA&-OPRmb-ENb={@d7WhYyc=uVK8fEmhII8c z-B^wO?Th?bjP29+_GWYZo~9dhF7H+JcVAZ~?ebYSBa^bfoXi^FrW>;x`2#uV@0{eO z8*{SQvpTEM9N9|WgCFX&N45jBd8sD@S*8BYI6<@Ep&^>xj*gDBm2A1UtC#gWIK2g% zQfof9ur{`uJu#+YGgTq`{K&b$vr}jH@HN}m6PxMERa0mG4W7}y5ABgVb?UVVTWL$4 z_2&H-*|t*6M=`ccb?f)J>UNXpy4gu|t$j;&P`@vs%OkYy6Nk=^`Z8?3s#6Z_zs}bp zaKv{>nN8=X$y2tV%`@QYb9i0!UIU-}16!&mHdK4ZhPo3Tti~Q2L!U@i@1+j=sFV0m z_Kj^f{L09Yj1KSrlOE7|WbMl@*<6jClxCy{4j{YNH%hYQbAcnDV~|;WayQ)P$$1AID4FbG9kiEk7JnYLrGyjdqD_0~MYqL^m8+Fk=yRF-<|xsVai$jNf<8?d>MIgvJ3C30vhc9?5-?Y;zi1^v(*d+JGN-=BZ5_Ez$)9QnKz zI?iUTvTJiydBZ;530tdqt*!MB?EDbA%C)n!<|u^irM15e<^!*W?W}M<2OU2A2Yec$ z?w|2}6}Xbk^W`O+oq^nIK_ANQ>KbWxVJFJ&x|qK2A-%>oW!RLq{e|B90$cMR$b78_ zej1y;3R})Xx61aC?YSFWW!quATeYZs7Fm~YG4e!?%HFYQbr}RWFF<-#n~Nx>LMP0$8#|}kN~YW#*Yc} zct7mB!N$dbF4#Dn8@15~f465I)^+}7fj-dm_0X;iC?rtr9*zS^j;e?Yz2e0FNhRU_^}T z%QiaEbY7#G*zlN_;=RMdr$j2p11S<|F!ev_hi9uvh|9f({pX-Nj~gwY(2?` z9TE1P&fqJ zz|MQ`pJM0PF}!nT=beyG=sfBC@2;J9>OE}_+Iy(RFzj@=F=}uG(+j}jPG4w%=(Zx zRCD~e%7-U+wDVY*7s~YKWc2c9XBf=k$RnLaTKn{b_sVTr=E2wUa=GINu6^w8lS|(3 zA0jq8!P`8bGCL4EFtDdjx4-S__WJ$K;Dt3f!ikNW?*GT}^89&vsh5u;@$xOx3tJV` z`#alpO+`i#`)w438XK2oe4k5eFm(1pHFBumvAo9$IPY&@QxAK@$Ck}tY-$al;Qmqc@=EM2j2M6+-&kW@ENP9Rx&bfaJ_kYR#Te&}p`-b<|p%ydC@~;`Z zB>#Iem*iiYR+ewH`gQ1M+`FH91>Bp=y_sp#^H&U>kv}VKM*d3DZoB?O`tRJ|%>9F; z-F7~nwlx2_!NL3|(t`QVlXlzvA?Yu;{}J~Ok#^htRN5o?YX(1>|GTtD^VgCdT3HnO zAK(%0{gr!Pl72dES^mp|m*@Xi+VcEYNdJxNAN=3K|Kp_7z?Y5Fv@lLx+}*%^8>ieC zPF)-|1z$E!)517)ar9H}+c@REaO&bZ9emk1O$*~xxVwdWHa@v$;}hJq0ADsf)57@t z8UJlu^8Y+8!~Ad_m&UB~bb0nXPQ3lV-gd0XYG8kf^D-dJU+3kiG3&f;I}o45}1zd7&+T(@!^<^T79KjbRl>dW42LFN+Ik;hSS0M~YLA(Vy#qT>eip2N)MT zoS%6^Am4LAAU}(A(KKVg^kQSy13b&)|1h5Q;@NQ04>-@}@$CYhPvPDIp8uNX3zg=1 zkZ+&i+3Wn@$+HbS+eP{r=h-^GEmyfsDECInZ9=)E%X$6|->y@+{2#`%UOXF4dY$uZ z8{h6wx!ha8^I!9PA?Y1F-^aH*RWAQ`@@xaob}8*VCv(B^rxogcfWaBe)FfM<@ZRRn&0B7 z#rfCVY~_FNDJ%cl^u>y?+xfY1;d)}-_VZ|a!!7@*^y&F)Zl002ENO`yHV}M%+q7`Db=6t`$-TzNuPeDE@49nI&RxSL`PSD! zuCb==q_#wjXGGITG;KuFg)2+;`jW~@pz&x~QUdP<8OLf+R`AUJ4nB>P1$DmTStPGZ z#z;Qdd>qSu#685vlmEVneZpP&-;_3A#g)$0gR2GCwOqAhXN;wdV?AyJK9bT{dj~D1 zGQN|FzD;G!EERmDGJcbKVVaf7m{= zoejN6KkunIK16oM-n#sGPUz8&y)kbjicgc^(^PbRDmp(Eou3N7B{_VWgicRIr>COR zQ_<j_IJ~BjafbA&wd@=d|+sDS}U73 z3*gNHc(VZBEPyu)z;ywsC{0NyMpDb7r`d6Q@GW&yle0B;t+n*|G;XC?Zs3E$nw zcTM9_mOY52Q-5mM z^HLVZoIDq0T+0*swjqBjQs3%)bNN;9Vg2(}qHK|9KPlQaiT0V!=Nq0&avl%gEFNsR zz*(`<*&Ue&`c2_m%#`OHzi`s=c>Id@w= z^Pa5{K6BTw&peQGB%^KZNM7&A+Qf!pDAD+e_Wz>!{QUcM=bU|A{Mht+(6Kq0_k6ba zwe=SJ#+(UVo{IJ&eYkP0!#>;$odJ$7s~Dc^!$s27IL_vXsP7rs$micbwNG}o_&Ejr zoKb%?KtE^gleoS`B+V$^`#D#GG;@ZwTP;q`zhAGfaJKkqkY>&hzxC40SFn|3^G-?g z25dVQzcl%3$G&CFc=TNBAn_qr7PyW8Wzz^A-C=`E)o_J&7GQ4?8S+yk%a59k!G> zkvn!V@Y1jyrn9O2YfvAr*ZTf_7BIz^Q{u=ea{JGt~?4c+7eVpu{)3n!q z{|x&v(x&N{9=2)rat53$MRE6(K>fgPgz+q4RW%{|B_#awBQ= zU;9k+430r9qlBOgt<(AZ{ghWVkf20S34-~5*S_bRoSb`axFHE|J^6g{*>~^rzV=#s zueI0u?M1su<97Hg=xEZ{!e`ND(bdFfK|hnGDvpb~OG^jV&820*4_v-1IMoRmPYR#q zjsX=aE*ZMIAcfEBguYo451+;U0G1CV-o7i0pRV2S+WoHG@2;U-dkN}N7h;^f zp!h9YADRAI=h|R9{1)e%Yl#+q%eG!<;kRt-TNZxHwpOU{TOF(uif-=Wx1!iCh2NUq z<^!$%juDGS9BS0BX5hDoA!2Th#cw(0WQF0P&+O~$3dbclC|N@X&t+k`z(J`saxh&9 z@m%O7wzcs#JeN&BF=GN0{s=7Bzkum#hvx#z_5XnBGVxq_aj=NWUu)yncK9xvjccxP zD+~|cWPXo@@8X-jWPFdpcXjvs!@Cx~tB2p$sRuggtMFYm+DHoR8|yjP(O?*-Ni*=fUjf%QUe663wx z`?>T>z%R;wem1;U7x+i5<;BK~$BmCLVskHUycbw6WV0;>5v&)o*@pK5>xFE#;l03m zA)ATuUhe%|`X%6(gtBRmh3NwS#Ffniu`^0GIo6YSukwaAyjQ(}_nQ6f#CWe`oy2=( zkfStS3}V7{Gr@n^bQ2XHOWm}yoVtp_f7xs<_r6Kso?QA0#=lE^rpAe6@n2n_w>;-! zz<(_|ANVgDot$#+lxt`Ix#zdX)1Hk!ihs|B|LUMW)I64CJoI_Pe|4ZU+xVv~rr15- zve7zApAPsZDf%YhpHAB-W_wIxS@caXPhINA z6VgAy_r|4vf_eB-!hiKx{w*0t))5O<=*YiCyUM>syNOXU{n0A_7CM26Ia2wzWSwgr z>pQete%*ECiV7L|w+bEkw`f=Sw`iC61eJeF;tbY-@hY_C-=aB%*RHqaF?97%cN}%sNTPXQ8AC6$9(p0h(F=~>GW6LP{FX7N znd^|wwZeAzEzXtoz;^g8V=b^9e#=-3wD4OA)&WKLcJW&_{8m@;Z`tNt=UTam+fwdu!(NGS;-swQdVfB>A`I+VXD+rilNS<~s6k$-5f- z7=zC;_|aUWwlFt>JLBEk;j{P#dH*Us!!HSC(;1s`j{Fl>HWS3MoDV$KMUj6?^eRIaaoR~v-NeFQ z88(v@1MS=oDP-Vn-1|mxM=l+@Lll1j* z_$O`*wZT7e_lPj#tGk51N}#)0G)Yp|bI&*MR|)8ukY|#luUsF)Gv}ZyUljN&eBBZ| z9Mx}){9DvJ%=5%GDsPv2U4`jiiRuB}PZ!0joxxuv=uf%NC568-=6efM;>2HFCdXC( zCWF5+zE_=}q`uQ__$xy~ZRqz9jS^2j(W@3qyZ@JL6FDUT7vH4vZ<+Y6?tXuG*TQ%8@cTOT zK!xwJ(O2QSY_vUV{;l)KE1iA!F5|syec>GAoV=p&UN%0tEb(52M*gir1MgL65(&(ns;{+3;T-^oN?ql8lEwZ}_hcbY>g>w8a#==UX;fXX(=c z|0G4<1pE^>hPy}cUmfUj7EO}W{oM1xe|135gglcZedF@XIq1$89REcuvC6-NPdKXI z8u_=VcZmOTuZy35s|Vff{Bzii*D&QDU` z=@$H#O*gUiMclddQa|27U&Q_11bq=#cQ|MKmu()i=$l}ky3~&+q<@0%jZ6Oo^Ke)2 zUndG)j!e$cnx9M2igHg++qE0ovA1 z(jx2nc_I}Ono=cE8+Lpw4h$X?~Ph!W7E|e z2N!!X8b6?1>&Xmk)PjwT{GLpk99!$tCU8w)7G>t~Wl!j#Onn#MHq(=?kK;eXAAaIq z`ePk5YW8JT9pv4^tKQ6?x@uqk$Q4?}J*&d`xA5=vdiI3*JX+(^8sEaVHu_Sknz+X?pH>w~)hgCJ7Ct$L|HU`1*g{|LnGBs4(Z`4BWBKl} zsalaIRl7ma13pOOyQMtJlsdWknT$uaLciuadG| zfrceK+s8dcz83Q>P4L(-zt$`~bqt!6rG~2?$(T~T))UlU>t9;E<4fV{vJ|cQk#tY> z%6{JJpAPX=uhhJXSIhYx;aAx&VJdwy?@aF-%* zO7z}3_fF^D3+0>ncKE+S^VHttS#WfcT;u2aOqs}sesVuK&ap`zFMZAX5sz6E;2!h2 zekRwCMaIS?w;*D*#F9rtBaeF#mP`-da%eQ8SMWBTKVe;bbc=zCKl4|oImgMS~6JhUYG`{9Se zk@<6`L@Elk$j|TdL>?>hM*ihNU*xBAQzB31q(=Vnj$V-u;f>1t-Vy2FP%n6^S9D$Q zIJ9a;4&UYe`=NCf?{c%2slUMgwcbo!-XZMq-_S5<`<7qjZ{KqL-)`SB;CtJ*tQw+4 zHq7-f#(5(@yTiwQ=|{%U+LqooG(fB0Fn*5rhFe1XuVPHBV7!vCY6tvsAjN-!d}kwk z>4!FH&_ViAcqlkN9LczGN+gT-Iev3E^6pJjB6?Vhlu!3W*7Wm6M84KeiayVhXLu$p z!>3OT`P4VaIMC2L+;W@Wugg89y;k$z5XjrUW!zV{Z&`h<7SXSdzE2sv;pcsGrGH1Q z*rNDD`T+ircSDxo+wEHwU)f}G9`~bN_-&H#*9^YbmwEXGNaP|I_Z&V*lpIm*opH}_e5Kr|E`sxAr@c3W6CV}L*#50JSx`-PYBPhkbX?vKIu;Wb5d7q zT|r;YgcnNq59F=bGLP{>y#xF(j(1ocW^Uy9GUfZ6&(;}yD*ILT7vX*)L_tCyQ^l$jz!S~Btchu*pnt5jG`eF}bem@UmernYkPcQw~ z{J+WnyZoQ<^{zVZ^Hm*Tj>$bUZT)1<&8yLhUtSQ_%VOe?b1q@4b+b?k>I_RTtC)I+hc=_)TM?M^(p$_pBb#iFacptM`@X6vFj z^g-*uqK9HJ=qfKL?RNaR$_pB5k3Cm;L20va=OOk=Ctr~HT~>@L?H|)_#@1Q!R3Y+$ z5_4|Gb6WA|w0}ms8P93OpNGf`8nWdD6^sCFpVOxDf(q_DL|#zxTm40HTnUDXdMm#z zpF}81Qps7hCYfU~xzA$JPgFC1F$2MA9xbskyrb0U_&X=~OLQgyHJVahl zXlLQhsh7I6Y>zv4X&KE6>hfh$GA_9DyfIplE9*Ow^DVu=^Mqb7 z{EF2DWsht561Osy_Pc1GM*H2gKVq~$!!;{8wwYs(aqL-+ZRS`t*X*GEJ=*_+_V;Q3 zrP2P1JZC-6xt-^%=Q({>7u0fWw>*dTyJ(+A``xs^VzeKW=WuK@#~$O@vm86jG4X%x zp#44C|AY4TX@AdX-!KCIp_gZkC{yd~;)@%8Yip6LmrqU$f0Xwf{3)L0AI%mzU&;T;cUbv9 z0jN=7oC0AWboiy zyo_u3;uyQy^Ehk2$`@qN-br&OZ&-M6gD=c+<(R}wFc#Ryl}*T~#BawPJ7{-~57zmP zF+j!|8J{*>p;aikP7Qyw_-i89r5|N{N&M|s4{heZ-_)F;U*$iW{~P%q!v9VDD__|M z56AbFDc@T=y0niw3z4@oWW$ArB3bxrvY<=WM8Sni9#DQmm#j&G^OZcH{I=2Ay`M|J zv+|3~(@o&QMgKU~UQWA(f7BYB0~e0GW+AUx=mA;i0a?gjx*@Mw%=KB!^;yjIS;(Ji zM_#j-&$F1%vzX7@&ZnI@JBv9xi#c1JFR_;!0$RmnPdIrQHRk5^#6`5t&-k;QGU|*? z43Q5sWaI-Ce7NKT4H@}B1s^W?K(mc}pn?yVe4yD0@_|b1r+Yt_esTCEp^SDDKPHgT zVq~-!87)Rei#M&v$uMNJ7#S@_MvIZr;>|`oGFps`79*p@$Y|U7v?HU%$Y?P#s?L}A z+zq|7ira~aS*i7vxH-n4FN!2CrIR2b0r=zwd&cF`0*(9!`kNHkC=!1 zGUuM4>`!?E|KB#}U^8BYxwu`t3i4e&pv~rT+E=DM<Gq z_$jIWkn7{{Q_{9eJ6puYL!zU6!hgFlDxU3O=bLP0{o#jD`QmHsDdIp^4)av#BNoOK?Jqu)K_(ARdcwq*K5 z)_ggtuhlcI$=Ycj(>^o!F`k}Q4W7l)&%k@y`O~SNS(sQSe%x4NzSNJa{sliS{kvH7 zqqERMou5QMI`y8}gQ9xR8l#_Nja!W$W9oqFtJpIAmCkf^&t4}V|;|cpD!S^QUlbHFsD;RSv!Cz0{yVmqi9^ZA$z<2!& zTie8Ql`KcDA8)_Um!%i1YwxGF;tXh)9HTCt%a%_h$(TInoaW-WTx{H4H%9rgicuCk zS2cJpQ?E5kSN2_empRuu;}}hw+!>=|&c!aq*u`s?OwlU-n(*49tGoCv^4Yul3fN;? z71rJzKUWt#RHzsHUC7;|LFHd|@m+r!hVP(0*R|(=X2iR@y3=z7#qRzr1xt+et-mJS z=~A^do%6cW9_k64tc&l8-OovM@5>qAC9yoe8=>v_tr0u=TO&q19`5VTTWmf~1NW6@ z;J$tac4W&JQLNXjOBn0LK7#S~AL@3z6zd-@zH41{FE(Mf9AxGtPaQZ0~1%8g^iL0e#FIixtO+Y$M7ki_GO0gQZgGr zW{p@BYkoGcUrEa2Pt;L5&@kDgG@2vjs7)TlcUKcn zX~thT<=-jyF8-^19AJ0TNAd3k#(z0&LD4T`>_Gz;Hrs~jvEvTg*?;htjjxij|J?I0 zKO9($y<+l}X|LFL$&L^EncY6I^HP%fpv6n>`IikIEJmL;c?n*zy(gdXeD6V)Qu!TV(2U`8M3x>;$+myB@dLj&-?A z@n05JC&Blr{_P=rSd3m|_LpsMv# z#}^kT7NcjF{bcAF$AWERz8dT8`h~r366gz7-(0506ZTDl?@iD*G3#mFffwrueAs!< z{{N!*^=_q5QF|BH5&|BH5Of6jH}|03Q|<^L*l+D)9C+P@uKn95^QXv_aao67%1o67%1t}m7UtI){*MP23pqONRkp(+34ZM0Ks zb3#-8AE4c&ky^tLno2w*v}>3DOK8gfk8QM8F;PNO{%@uEAzUI^|g9|LTNZc~1Gi zWc*wgwKYvU_FL=N?}c{PemC}e=|ms6_PcG*2Un+eb^0!@D_w|j^n&8UZ2GJjujpLk zv&QL=F-~)htO;8AzijJ%R{k&BI+d0G%hoq4|5pd=eWH6`&iJr+IIwQb|0VspE;`RT z*TGf(FLGw7{9kj8{9oinQu)8;CdmKQejU3FV`W>fHrKILjw*70{fp%PnrqAdMUE$|K;8jM9%HMm&3Nlfg* zC71t;TwN+(SD}&ri`-u-|5u@r{|kJS%KtUj$p1y|FO~mmZi4(@?)_Z)38pR%za*4V zdrV3f_$Psk#>B96CnoHY%l~E5Rh+REPTfW2|FYRsRt&Zgw`baE&b1%+zEOL}rNg<= zL5&m1;=j7U&+?qVn*3ijIyvRsDc4SUcF&K47wdqYihs|B|2oU{aq=}4B%2%BnFnKW zPvrmVK$o`hQ(GLcd%k76b(TII@KaLsmFwg1Q_{AJJ#R-w0({qc1G^J9_i!S-R|mSD zMWZBjL-+hu#n*N~&xAabBz+U`P|~{fMU(#vKY1ViCB`_apKT$wnEYSVJH%X zw*HAb|6c0HJLsRd-oNzijiJMe_vn*`~|6E*b!N)Gf*u@y*PjvCxzb^Q~hJ6=Z-Nkpg`wBeYKDM*n&=H|Ro)qb|NHc0VW4y)S2cSIqvDJ(>T@wuXMO zV!f>VU+veY+VX$7_^yKAvA3Ix?+WLK9?>eCehjCd;$p&gIrlb=+3&q4@_*U<;?O1w z8^wxqgEmjJ+hpY~;j@rv2!)|{Ws{q&Q~)o0)Twx7z?pyo6!L3{wR zT8yl^SfvD5CE`h4`q|=ZI?FE!Wwo35QLUNB;xS|5Te=hLrDXL|&;Mo9d!4ZuPTj`E ze?@&&E*;J<9n{!ync}}<>~S;a$q20?m$ir<%l~DQd#9XR*og%3#_su-9{$T|3-(z4 zuMYHi8(+nZ8-}mi*^BP^Bw0I*){n0)D;>Ux<=+h=IUC8`j9pH&veAvZ-n^WW9MS>5D(dW!OMTS1t zZTY`!`kRGiNuaM?>c>_8_7FZSMlUk^tGn`l+2(IsKg6A%FZJUc^i$mLRp%$6f5g=@ z%zo-h{x6$;VbMN;K5?lZPuMpJzBfVN#H^=v2VSf@@L?x>z3U|J+fkpVYUY`#>x(^F zb6$T>QIjvVY7KiP{+j$Vj9h<26l zi*^(Hl)a98Uxh}#uaG0(7wszF7wuNQuRW7M$vYjCY6+gO;xBlmE*d zM+-i3=OlMdV(z})o!<*yilZxuZXSaVbH~r5iYSPG51c*hdQ2V4KgKwxyN- z%Vt+P?Mb!QNGJ5nbHazo__;1>dzyBw%Kt^XYtI||y>y}vTzlS@|I3||+&QU>bJB$v zM=vNo%%;zp>#oi@%^Ig!7qqX%Soyzf>ws4NFWb76mH*4uH!A;E2kU>LdtdhWuq(7Z zzkTnay&-R3{kKN^x#lfv(MFWDlxbxx5|_5(mewL!L!X=${%H1hw4%wL^>gR?xXJ@I*T@5wZR7!)YvcjTPLKzz{rYxWE-l+SwzjFQj zHMm&3Nz$>C5>u(IU!9e6yPoA)uV)SKyE<#r<663kwWNJB?T^v^EbW_Vw_+el-5OHSSU6(gs` z$Z0Wh8pYMQ^o!w_gmT(V{F6XVW8zqh80X^6-_na8dqOXM_KMZTn^)vy7;;*SoE9Ue z#mH&#CZm1jdd{I8IW0y`)%g++W6i6?DB0!o_J*Z_LP29Jy%mPE6=9H#;H^f(~6G!t{&RV|401yotiWB1Z9898~Fb=|0<4!m?#y; zB6IP84&qp_n^YW2D5`fkV=bJzi^4tGY$_|J+R*DwJI$#tyZ3eJ+Z8@jV??s}FPrY& zS-z9!obUW!YODy@>}@0FU2N>F``c{n>IqRBdyNfG(GPk5Y8;ubC%=x^t>(W|PMz}U zlymp|_Tyq#>7)2JK2E1?PZ1m6I-jTJu-G`+n0yz;99F;_R=^xqz#LY9?wBumFf2Y} zJx9$+2A^5>o{64I;lB90}K1y1@w)AD;BU4{C_-F%q z?8=Z=S2{8_?wU!4Zmh-X#c)!SZ2O>OWVN3ct@-gM`0CKx&Gq5-daRKj3|%>@ zPZ;^ZsCS6}Ds- zFrQida+w}a*e?mbH$lI|%;(*K`!e%{^%VXqqhE6PuM7kKwcZ%B*Bd@>8Ou#PSmUyG zHjj%3V=v6N!GL{gi`z5fF`V&xR=zMdeyqaji#BylmzFM0QF875HQ^Km50*DZD{`(! znz&i#`mGsX<6Iwev7auE*2N^d_-=)n4_*HFZqdrhpQ*LeZSso)cRd8Oj4V;&`PA>68(Y5j@=Bw@!#(d@Vij5J7vp(hGzgYW! z(T1&a@n5$5U^ZX0bFK1X!he}Q?mPqQb!)n(}Y? z$z1GLTwgal;9|ATBeqZZurD+0mx^^rRz3~?^?D8Ds#C$oiUHMII*sj?IwrfoX*shG6wz;<>Y?l?IV#LVWWjkYFtnMl? zu2xL2i_=nT@11^Urw->_Lv`sJ-D|n0=%B`n%M|}5al!dEoLAmia9*N& z%RUXFd$-x$>{*WPosp58{5$#G$=}X?bkA?U7inkdqxkm%(e&hn6{q8 z&szLt*m(B&#l?N8{NLS*~-EP_$`ZnlGL3O@=qtOkHbGn>(&xC?!PN@8k$H}tDZ z`m+y`VKhG3#4LPw-RgpeIJR`y z*O&Yq$5b9F?_(`P)>RcuBIndg!){vgCHAA0TvW1$?e}P3PJ8RH$xDi9w{lX2i2o+; z^95&qFWUFg9up51BEJ{@+uu3!d(r-1w14Kz?-e4y7x=9jdwwt4{y*A2H}iXi$nQn0 zjg_;4`abHdW_~Z|YSP=Jsak8J-K4ciGqolGJxw~B^ipeYw3{?G>967!p`%G(*#lVR z_oB_BtC`;m`k6FU`Ms#Sv~1&LmzG+E%a_KU|DBNW4)c4tvLDS)13lew)SZ*uIf*rO zcYcqnD~Zk?gYR<3Pj~#pW>#_Fu1@dj^w^gd!}#gsM};%A*_Kv*FPmMd_6M=)K`Osj zC-lq`kJiszJ5?p?NcHR^VEICvaW3oF&y(@fwf9|n-?jJMwG$Z&F12;23o*`KQ2dvz zkIXnd=bE6kX3HAw4>;dkL$vaL+13fI{9m>;LM#85ZH-Xn|LS0UP;~Rl8UH1I^xq|u z|7&_1F0B5$QCyhhCs2Fvsa!xt{x9N0n3v7mJ<*nOsl3ZFt ze!it(5;?Tgx;S~0B$t+%-^yG&R=Ks%k!|bYZMn5Nk9Bi2AAIe{A<5%;2ocwCOpTYp~UFJ7g`N82$Ec|X+&?RePHu=K96)`SE^FL*iFN~aFj0y4cg}L{0>6d_Cl%M=;n6WPKk6P1< z#iS%1cWLAyx;XQN5zC02w#74s$ah7~E95kqFD#pUVc@Bd(`f#uZ1RPXGYmP6pD)b4 zpG&_4{E|>k?eQ;N;GYC?8WS(mo!Bu;=Qd)Verw=2otUz%@3&#f^8PuRFD#GTyG_2- zs(AUr@{+`q{mhmx>=^mN+O4gf4L@enU7S91r!M1+-AK4zhdpPv(cSyn@`pih)6RA2 z8;#F88y(b`kt}|!3;Zn4xtQ{YjkDp(x;=lGjZRMbJLTGGAG_z1Zw6nUx&ClA`Y8TA z8-A>V{!nvTY)o!J4tc^nnwdb;HjkO zn}Da1wr8xo9zBsitONbeqEnLkqI*7gwGQZ+kdKn2Zvs9_THn6t^M~Q5?^9^QUs1oE zy2O=th)Z*?v*t(B{}Y`jyPuwlhm*yZbFf(?1*=2NG_3zp6Ww!5C z=O>{r^?3d;n|@>Kqqy_)rGC7FK8pLj3Hm5T|FHCo?#v%%n-49zCz$Up_2UWqCBgS5 z=$DxJyeoJ!GjEt#_7whX&4A?aXBh_m%-WkNUjDG(8@2?i5Nns&F$jRTx_3i$5zbFnwgMeIMP+$Hn$v9Aksam#Yi9 zYab-1Y&+NNdIIO`;?IoyW48UNL^r>j@na%xCR<5AONx z@v=_(DE_^`_%X}2>xukh9oTX<-bhN9ch6sy#TcA$-<1w{DJl9YT_`rTJ|=G4(1p%V z{;>{h4T~R=)CJx1FMhn(MU{W713Zz754$*Ub85`HNbq6jE&rHJKeI3_E;EV={?<bQ4qIUv);Zi@I zus;%fZ-V}aS^Me=9<0^p-E;b0Z{*ZJ_#!8ZQzD;!J2i5mpjYI~(%z9XuX-vz@%66y z*ypJ_;nS+BgW(o`PR`IN?BzL{(nFc8mHlT;OX=QKUrb$J*(?0SFl^!>f7tls%82%G`n(Bz^@G`zp%c1cLf;U!ZDpAe_JMoe@i3cxU92|Ce$-WxuyynDam1{+l^(QE#UH5#?)? z18DzWfBc&{F9tT|hoHr9#>wNp^s338K%)j-LcO$#W?xp-;neMu9^;yo{!MdMa@{Zg zxOvXzKaQI72kM*WubeZ=TXqxY?vZnc@!sX!=R`~I8xHpm=l#^5GxS&f;X|LLTuHfO z;?+Y}j=pN>dwsOL9neJ6%D#1G>8GV$fWE6dkJU6@8E#py%wKu*>*1D}X=(aVo|jhYuiOww(~k@cx5#|I ze`L5N<8QR0)n``L%zz*K9&NScd!I3A>O)5cYAp-1wW81ZhFgZ^Xw4<8GdK7Gv;51# z>*c(oW3?9l_Azxwcup?Qsd{wo$=i5N#5d^VQ<;AK2>hAOyEF_7w;Y|oyDbY;Mtt8n znVaU1o|6@*Yua~fBY13NX`_=SF4$ob| zcO5xZx_8Vnt@1hUb$41o&t2wG-#DFTdk$;&YG2wi=Uv~BD$O(bZK0>cQKqj5x2!rd zy{3V8PUAie-n6==U-XU~nV_|d=DQnCwN6M!R*(GKt&u#Q5kY3hEX%As@S9sA3wg#^ z`eq6BO*}KppUE@*m2-HW(Dy^iYiSVK9T>)!SG>)!MAsq%|#cvAJzme>o?#O3VN5qgq7?&y=y?lrODn zm?y2VtfjSR7USxEPf$|epPljZ}Y@aLq}=aOLF{Xj@`pK zoB5{Kq&-D@>AQ@*k~epsHn?#+-}SG^zR1BO_ZfSa)Zw^qI|wzcwF)N;Hvko4z~!uHwztkFFW5KiO#)ika-!Wuu-13adykNSXPW!lwbUk;uzp@(M{va5v%YujAK<=*f^r;j6 z&h_~9Tz*I3^JZk|h_6rG5qSLwyq*iMuP9B6@+9ZUw+e4IOw(Fc(J#XLBjNqS@cz4u z!NU91o)o=2EmPkD@6UwyUxxRQjjH++twp5<@Au-}e`WE0pl%DYcm)0*J)CjHc*Y=) zN1vY(s5~-_zT(+mFkfv!21Mq!yfIqWkbxP&G<`rHt>RCgi3|)@GO)vtfqh!AaT7A| z1~QO~4EPo-*!J4O1qo%~5oEx-Aa~oV3v;)tl#kIDbd=n69|xtn%Dkla{J| zGZ*?EMF+|C1XfRG{_rz@9EA@TqK`!9%x^OP6*3PNFdxogUYyOGnIAbaa@JzzOEqs^ zxy*BW1GyV8*J{+66S&-gFj++@i1>&#!$-#eG5S2jW) znX{UZ=~<=el|gi^UC8pxv~)e6`M-qHOMiWZcdz#hR`R?XS=q(!tvvG-^4#PbT-Ss= ze}FviMxNJ|W>#)R-d1tmR%C6JO`e;O=Q8B^0P@`I^D22RK@MeXlxGSL6(i3FrC-ox zeZu#K9H*wHDLHOJj#mv&t2FQ3%M;T5;k3#oacqBF}}b~keTUFNb#$<4=okjtCE#{C$df>{gL)WTAMt)~p|HEfW3S ztOuz-4*xg!(hMCiu=)skr{9wnR|gb+pAU~UphqgXV*GDFClvj(fw}Z3bEs1%{9R_C z?laz_VJyecPlbnO<3kj^E#4e?1l>~f!XwMI$}0GC8grS_ZI`u|rFb%ApCe^XRQe&` zW9o+uGOr;+q93j@^uuEV!!3=^-4cmU_q02M2mIeTJ4^IU0TN3r~98O|+!Xo(`W4g@$xUdrmOqnu@G0h9(F2bnH~V~p z8f%StQ0{*R^Wa;aA$rPy_VV*h-w^$(!G=EQMIYq2Q+`JE(q0msaWy_CSu>oyeD&zUvxmm@3+w(B9xzFqkqnC)(4vF2Wv$i zmrRMAMxH)no|rsRYhK8_^13hFlExf&>f0>VDW^8uIVqeZXy~7Z(4h{q*6=RR ze-HV{K@T;*&wLNDK_;We)xl3e`Xilttd;o(+oe8`q3^~voDET;}P>mZJWlwJntyi9`=R!zCKmR+TI-x zE_>oXJiYY84%_1IeZBM@exLpaL$7)by-Ln;^3!YRRU%_6{i0U|8pW=Dn{oH?<$dXc zG<_*L_`}{I`U2KpN?D)y7`x`!hv>oHK}YBD&1RpZ(H~}C?DzGp>Zm`y@nh|{@SExn zXcmC}<jUZfBF3^g(Eb{O{xWxqzVM;y2d!eVr*C5|^oNHO{X^Vq zJoKN(xT`_``-6S-9L5v5PUtUdGQ+Tmg#Mo8{VPWZ{TJS~EoJ^)9n*i|h;6?4Beu!& zg#HIvV>-lmh~BVwCG`J3_QGZd{qOOm=qvpm{V{|7E1|!f1x^N1vUBJ{w&J8jNg-3 zL*_r${gkZA*x3kQ)-kUw;<<~m()B$0+vJg6yiYlEoyi}?p0q{{`$_oXn~b?-*dD?c z71$fX7c!2v@x|cA9LAVr`Qk2-OM@@uIcsEFM{~+vgvQ>FB!8}EzhQp{Q6V$Q(yE^KYcxnemY5CS0l^Ad{J4p z_nBYhnf}`E%SPu<+-G9vn|ZgDn;2rXj-#q@Me{!(}!1`2_ry+}; z?8Y4WCzI>4=$~snS-RFMocB+@Ed4k(F*=v(pWT%EJkkEyv#ft*&Zo=w9$GMOTgt+D z3FTJ$=iq{zZN7y$+vGXYKl`zt4)8r*+}=Z+%hL zT3K`bDQm6gwBE}5$6IfewN{aBS&zMF)?4BEuB^A(`QBY`?Z)+1hYUQz9Mkjbt%>>n zoYq_UhIs3(PC0PbTchi(=A7Ik>#fMSyWaYpljnWC)gi}Ik>ghup0D-R`10K8^;W(i z-g>K3j?cE<3V**PI_D*~-iqvWWxZ9t+tdx+_11G(Z*|DfGfIYfa=kUa+??}zEAJm~ zz11l{J+a=3z5LcCx891pCA;2ge%~dr-WrFGJH6h@Jrb?AI{B&F)?4Y1POP^oe!D2w zTjSBcv+J$gBhh-Rlm34d>#e4&Cb!;d@y129-s<2BS#eHKF6eq|oc`(bdMo!xwBG9MpM>kJJ|BM1RQ#U3@O$*e|KZ0El7>GdfZsDnT(r01 zWBj<{_xwcpJ$u*fA4!~m_&&2dk+<=6n)B5ReiQTD@<$`a^#Fd4>LJni)(Na%|M*nt zZ^efqairo0z0pV9&(+a*)8F9(eM9`A_(zuFHxu0yzf*Gt{?mq$#H@Y^KML_f5~HvN zU&=(*#ZTia6(33?_b9>VW%)c=o3F=*GMO^Qhhq6V2N^yT@ptx*`aAKfh`-bP9{cN4YOJQ2!?1nZv)t!HIvr8h@wwEkoXZ`gnYv z8$H3sGJI~tRo$O~|Ifp{o*_O@{GE?18&E0N9Kt^-@h_~8SKOV0pANrbE$i?1WO?w3 zdG$v_;XJ#)Q(`z%Scfy?I3G#Z^5i)I{4s~|bH3~IRlSG5`X3)Ged1SXy<>cx&(;=i zlNid)wZpa@cC6JutA$@u$MC;NT-~#{m&Mm9v2{A{^k4WoO*)+-M$7a~H_~S_JweMq ztsieKnuJe9`b~T)(S9Ei^^s-`(I3aBBEHbm_(${bsT|?@jlMJ$?`8I5dmrf#{9sA; zA$=$Q>Js`+d|*N18K1)kmR>rfG9Uk}_`sIXcLns@wfrkzHGVMhckcG|(?7)5B0j2l z^jjaUpO+C(ey}k8B|fkN_%o%y#IHSE`pY|{QG@n(W%a=qqUqB*=r36Vn@WGl{iVMS z;uk#x4Gx1txS9U?ep+gbZ}c&vzgE&;PM_#wMt_OFbR~U-Z&Z!R!@R!66}JAee51q% zwfVr}`9^0k7R+Q!xQ(%424lpn_(pHRH+u8pR-aGB`iXCJG<2Jfd^bo85iYM1$H(?IO} z?IQm+KWdbp0{H3D@TCSUKdQ+)ibu_u2Wwt8`A7VyG6uBsqdqS-hQ#5?wKBH{@TE3h zH!UJ#O^Nu~;houh_gDD;khlzydB&`Y57YYU;&Us9cdFr?$@t#Hk2=hddGV(<<73ak zS2rFWl551bCOjlOr@=$wXBU}YkrmR1<4+se0S~FU9UhYB2oD_~rt={3eh9uC1rHrg z!H z{9*K)^yBa8N9jKq2O1QA42=3$vj*x7#7VT``~D2yYTgILigCYIZ#3@H-j{9u)q#e8 zHGW?j{#DhNGT$E=J1vq+JlO*HWHtWWS@hjF{*U3;6uGaa?_>;~N#B(yAM4=8{m8xe zSbvk@*Y^`=l|`S4Z&ffa(r41wA>wNHc?K%EugJ<|tk2R%UXgg59wGWzrj|F%=(A>G zzy$kqfU$qXL#0nVljhZ@JNoP@qt8asXL8KmXIB}0Hi|wQPJc1y?1|1dnkDzGuL_nW zow+^(InG3$vykf$@(qq<=@pSP#O@wq{{EP_Pr*Qq)q2;BW=>CA)~oVg{b{;?nYXgs zj|~1;dtTOl{43~p#?~>!)XgNWf;f`q?{U7wKuJte03G2dCu5F z|NYVIldDHEKZmsD;0UcKPw+3kp>;=z+dJy(UnkfWGhRyW{~YnLCB!8*A@9eg5j%%o zATj%rw_akAJ|M2}O=Pl?x%LC%Wq0!XGvd-7Vq9N{Uwq?GIbUmej(FH6Vh@jv^+!IO z#rTw|6+P$;*8QZfR&hIVu_uUk`V@W?Kl?7`_qU0m`7AT2Z-5^&GRSw&{4|Srtbj3& z4CmQ1O0~*Y8QW%*dMfWBMj#qXm2JfM4y^hmafD%Xihq}wb{pLS$j&UTPa{S}Vje^E z*L|m3Cv4?g%RO3MCNz|Is>Q(!U37~ke(&LZb}b84ZY7527-b&%#|OU5x(|cFx<1Rp z>+kz}t#W^sR(F8d45fb%gD?8W2=GxMuclA-YRwYUn@22NG4r8dP`vQ<$cKjA{^%Pc z^?B$ZBZ=#gxkvbVD)aqH#-d`0qh~C7H`A*>OH9-+Iv9(dfv=yDu?U(48H*&Q@jdj$ z_u=cq8%o>7yZ*u8=hxupch}jDq1XPw;OE!iXNj+RpZD0I4dEM7Rm|Rhqjv}mrN4Z< z>&L`szxe34Pu^z4`AqioJDS7z@ttK;)<>Y}gTx#kArAHEEQvP$cFnr%)4ErWySs`Ou0b|=7 zbd=fXDEW&S^OTNqgz@D_7X8ima^P25L}GJuGy2vY!KRqVJ!HJe6h8X;w21K0XTP~Q zBKpZO<{!J#tyqDmEjxZPIF3YZ5ORTV9=H$2V(E?;x z_{|)PKKNH(q&iD0dWi970q49SvCi<|0#9~bU+_Iu%hD=$3ZKe##9Bv)?UNYgBaD#? z;0?h`obYATEkN!C_acvhn z#wpRe_rs&w&K#wC?}tZ4&k-JN)4Pc$g)c^&Men|g*m|**?k1LgD&xyEVv$OSrx$*G zj_YMi6x~V2!FxL3*Y9`0uWIasSH*s7^>`J(N?fSW$LtGvcg1hN`EjI)u}Se75-@QD99f2E%>P~zfGz#q~Nua^#nFZ}Su;P&y1@3$6hK_}30=0Uq$ zh(2~i##VIwL-2caEk^Vi;hQMtWiw;jyNro4CyHJsKm9b3pw10w6e)GmPN>}?Q z_#{?O8?5+bd=h$^*U-~88e^H_Lv%IK(^4F~Vb{-69lT-J&lJDH8=~iL^z^aDvJzxp z^cop=4t>!&5c{e-)@GFMdr_~=^l|z|OkFn} zYv@Ru(UA_L^V`?;o;7r&&5U6ed~Jq4Z1bm_b!}#y*w(5KnM-AizbMvb9R2kO{ne?p z8LqeMGn*a#W!GoiwHe3ytT_fGw>G2r=b~Jjaqy7n!XrAfHpBHdJJx3GGJo@>ur?!O zf%3&9wKk*r@`A3-IQndaG3O_}HpBJyxnI_1?0sgR`_IGLjM^jPuXb(57)Q=wZN{S8 zWwSQp;Ol4L>t~W#o8h`YIM!yq@4ss3bbL(W3;M&QvNmJrB+HIWtj*YYz^=<&;I$d6uP*zw8DnhgF#>S(vHY50$POr^4_*K?s z67lO_$=Zz7?|%5^vRIq3@rm$=*U;0>x;DdgcKuA&X6(FS*Uyq(n_<6zM0*7s#osX> z|IOqat@+SDYLVw|k{Fn2dxNZhHsT*Y!CLsN^njj^->{s0C7SSy&BoWbRes|yo0Fkc z?2_NCO&736_aVOv`)d{79K>4um+&n>$ zn~1v^iJz$nf67;-#3%L*DOoq1MEQ06nqLcq^c&ePWny5UE_=zXPSc1p2~<6BruD^* zT597=e0#D^wn*!vuknOy>NTyVJT+XiD@Ci>>NVQ@Mq9d8vkN@2Ixl3j^`kV~vW>PO zTFoM}EyrjZrPbt{ZKI90aazqdv+Y`=?Ru>y-E5m^v`x}#c4!{8AJRT=pT?(IL;N?^ zXJ3F$cc=ByU%=lm9=iTd`Hk;eelM5b>CxX0%Ww83koKkWyI=J8L-IR2`uivHdr0*6 zgZvg+)v^CuGwaGiFNu%1Tl4C_^Lq8afSY}a|E2tYm;W32&*p!J=2t#A__8^JJwk-O zN7;)~)~FY8Z7h9_ws!P2+S<|AXlqAbqpcl%jkb35HQL(I*JyLlSA0jsnx|RlE8ipE zCEq9CDc>vKE#EJ6kUbd>f-PReKCbL-d1k8@p5Z&T_M%jLLdiGm>P@M5gi_vL>WV)o zdG|$9SG+;Vd*@4C@dYLCJWlG0Cn$N}bg9D++e2kH?F#a{P^;OMMOnnXr9PKZ>UVH& zsn4U7`aRrR>hme3zKDBE{XR;m|B!o2{Xt5pKdja4d)ObTouQp6?@RsX+;<@5Q`}GL zn<%Benfpn73#HVzazCj*Pbu{mxu4X3Pbu|n-1l^PWZyikrFM~aX3>?j?dM*@C=YWF zsW(tcy@`8B{Wzu6PjC;ZpQM!fX|3iJzrVV6opvUFB=ymrn*6IN$9ZbTN&Q+%sbBA@ zNtgOWN~ursa9yB!-}Ah~i`toSV`;mObFZQNA?HZ_K}xAVEa#?IzcRnG4b@px^A*1O^+d@V8;e4SGE@4S&x=4jcI zcZC+3a1ejYivIoyV?A0wnJaI5;jt%!_};cYzxv6wndy2xe)zG>x%{r< zcP?|e>@WHTeyd@`yOiM5%Ou`K?pZ)g%v)UdmHV{gwa?yiW)AcA_SD*&K45OhXKr|b zxpR-E)x5ymxhF-dsgSy#QtIhi&HYjjQA)j^Rx@7e*_2Wr!gXnsIg}Zcqqt7$qba36 zj_ag;Ev3}2=Q^oRq?Gz3uIob?rtD8So$I7NgHr1GTqpHHN~srdoz&-2O8pM58$>ye z@=KKWaGliWQ%Zdi*Gc_8N~!;l>!ki5rPLqRYWjHCJBYn1s@Zp>oc|L3*{6~@(JT9^ zRmi@-!(?8BzxH`j8+A3eW>qaB=0L`U&eChUzwD;7(PZDlDUsUI+8O7!?mOKpvhP~t zW|DSB_9FgI#wY1V>F2DJ@JC_zSKe_9v5dc_pLWx)zowsd)30l#PQR{|I{mst>h$Xp zsnf4tkvjeQ6{*v&JIaVF#$R2|zxb_*U#VcvmgXt=uAfCG86NWA;N;VC_*55ugx`c8 z9~XY~6CVxj(oav>JDYhIziP!ec(Dv#%!e0Oz>7_M!(e#vUf$tN-uYhM;Z5H88maTn z*GQdrz8BtklXt!s-g%REzDDZ2^EFcEo&Sj#ix1g%tb{#Ea`~5UC`V3+o!hgIcdtiI zH$orTQ|d$f`df`YbUtT$O4&_v{226=ciZIGZjkEar2op$k#CA?D|?>Lq^j_sOU z>TmFlJHlGUjeJW6&)U&1OBY&g|1@jDS7}=V9pw0Ue&<1hP0&DiNa?-&wBxUVSIu{Ve(_ zvEw%kW8bz$`0jD^da*rn>3=;lpl?OTm3Rx`k=z&bCxhs^X1}L%Z}m;kQGFBl%ct*h zIe$k^bk865 zZ?xG}&FRV8RTHtRZgkjHKQ!#Bg;AP9!f}d3z{M^%amDp0IT_rYF zXY4A|2DupRs&s52HQuzN@7e7t(^l-ruDXkPRP3rn8eiG$?Gk%womR6;?4=FdTk1cjl=@TLTk4xA zrM{VaOMMHa)VFePsXtFC^%uFf)PGMY^=)nTSGm|yf9AeoOTEMWq`se0>W8_X)Eg+J z-o*W+ewY(ipw4PASe{6^O`?Z6UjLeoAJn=l9uicKi@ zyw|V^leGbr%@@LELjM#SP}zjplu{pp?S-z{kqxM9zH7N2T~us9W&cd1lsYozsCBN4Drbzt$r5sMv__L0fsxvb4_G zh+%A?44aL35F2q4ZL{Gqu@NWo``7T6*zncJS$&FDW7>&pq|Wp6I9^ZR_X0a1rWh@pxTLV4n;jwb)q1LIXi@i8b>d2(BA16{ueUgU$7PS|rU@vaOUL1$a ziTzMsTRH8=I_$M{ zY)G*k6&{jrU9x@H?`6GWiHG+Be=?J?Q24JL+fn>=_=GIm@fh~vVr<7x>95Y%j0dq9 z3%TY!u#;jlK64~%f~>oAWHUC}eRI9FNTcDK3ov#c#W%N*_ngW4$WeT93mLQ3Z+vnK z8MBwL=5!x^IMs%4ZXy1-JMqUUUmd=;511bc(u2AeoAGY!#Xi`Ly|Eda_}vu5@5k8w zKe+dgkfDLcfAuY~AIIFMy*1^NS8ozL-Bj%PiH4u+sItTjAm?WBFDxkhw;4V_$c z{uIMT@Eg9HbZiGX9x`l*ex90gsb^El`9nN4|Al?AYaROA87%^?qGjVC?kV;{*suqt zb1$jSFzknX?j`j?O0g%3xR=!DQcC>}?kDwmlv2NkXJC8mdOo$H{;gE@{N)+)--qX1 z$MG`5o>YF+5Zz#7=-axt4Q?q>$ zwEa52Yk7v)Ag^(6slQGs^We{T=Qt_5GAmKg@Ywqimr324xfF zM9SkH_O$R+ESeOq`7Y&B{-5IiFZ}O1fzJ!S)dOBn#oh2Jcv*v-}C;}BUp z#vCJiLX7iy;>eoO)=t)pwsx{+w6&8pqbqVjfO7|M%>b?)pwILS(8UKo z_Rs7qp~cEtE9=$2U|t>*4A<-oYc*rCDC5YSocA>Ii|B~Y8+!b&DaZ8X{4%Z^NQo@Y zlsd9FQ|b;`Y(+1B6y0OYl^m<(nqid4j}lQX4`OfHld5M9sT;Do6Im@rR*R9L z;z8&W+1N?sY#f8k?F@5oWNznl?kV*dlv2;DKQlzI{OlsYoITI$H`PN~nMl=?kU znca!Z?m}ioUe)vZ@Vp^f^LBKDG05J|GMg7B~>gy<_zJcdR9obzi zb!2y^)HhK|eKY4FyF0f~BD*`cMrBv*v2Q5)rB+N*Wv_~_QObNPdh?^=qXP%_>R|r_ za%T2>8N71!^E&oeM&&~6GC8)JJv+n)2VhXmb%L$j%KR?zH@g`7tC`;=*GoOR$}sw^ z&hXQH=nJ=eDEUUXt`BqeSBR;pHRkK>%-0_%n3`ghL4$lr}&f1=$)l88(aymuo z%!w_eoQb#6NNS*obX{jTR zPfL9g*P-)m4^yJ^ZJ*9{QlCL7^?a_AdLgCMi?~keb19{M2iJ+sHIGtku6wvn>hme3 zzKH9jejlaOf5>%)jrO!*qj~Vv4M(pbzXEdX!C&XmtND+y(+po-Ilj6B*gqxcePW~S z3u`C$g*_+t)lxd?@mfmmzOavD-jn;1_}1EfWjA&9d!XSN@i$0+iXE1N4kY$u9z3q> zEZ$N1-{{*p^qu(Y=FoTIuUjj1`gX0<>04zR(6_&qI^QC8nAl%4MgQere0QcDb~U_H zC1aK0kNQQ5f5H!FONZBX#i5JX4U%szkN&UE!M>pX>z{8=AMqtk>7x}XyC7`X1@Ode z^!Xd`#BKEX8}LLfyqp6s3%yML(l0dsgm3YD(=T%j1@qNmF2u%y1q4*OEpqbcj%6|w= zmH!Z$7Vy2|Pb}bj?fyjZCC-fFPpoF15W8`g;ZJ-{`KHQla^4r&JQMXL2CL?IqjutA zt~GsNrcdoQ-tP_G?>64g^d-uEAq%DNunAN6ZP7Dj#U$~;v2LY&aCP+8ZP*2(x69rG zGqI;6FQZ`dg|@jbtbQ^O^}#(azSnf_Wj=o#&y#ife47uhMCgtkRg4{VjOUl~oy;rB z2iK^+Q#+Hi{|#M4Y^OumhJQSZ?c|JW96P1Gzik%pCF@#}+cuxERhQq`PO}+XpOfF% zOmncI%vi=**iQAdNA0G7ZpJl=?Id>7b=XhJcEYy$1lvjMCb5}*%rz3r*r;L|gKaj_ zq^QqLY$S=}J1Kr5=qfRKE%Mul<2xn4jX1u~w2`W>iu&e)#2Loe*Jd01qkPlw_bjsw9#?Jfc!}8tpQ|>;4&7`+)>Ydm z{KaP5XrpZ$=Mh(_o_np)c0DEWm#S@|(Z<+m`sQ9XVjjgt+JlYMNZjW{aQc#G_(mzQ zk-jD+HqzHAC$ZPaH;DVZKG0VmY4|6D>>**sGCoIt1bz6dk?CM>N_e)m3|pxTTdC}k zNsQ?&!65C7N5L%0>G;2;K7&%~j7PO)-s;*RWj6);QZM7Yfs`vbF7;KEQZI*gQeQ_Y z^$l_zHdNW_N&D{L8R%nmwOornR`(j$Nd0w6sqf+%slP=j^*=%vssEW$>hEwbt#@@0 zeG8v_O%Q#nF2z$bR_f?yW2KIMHcRU0XS1Y^epVuN^s^GFqo3(g&!Lq1C=W4zeIxre z@NVcDL&1Oj*-E5ZB(alPvj&4>Wb#yab>hmb2evhZ7PU`b1 zrM^g>GcXcFpQYFH~zL|SV zeG8@3x6=MMl+RP%K=~rie24G)I=^dq=48s(c$U;(r?b zFj?7^^G)09Nu{fMDt=Dc6`2-0>Q!v9?vQP}P4)xze!hzP}_#U`7AuPi7sTXufO0I|`ADte|?3{hpTioul1Mq@40w9!Uh>7Ov%@R?QPD-#<{ zd}mP`EvRg?=L|hU;wL+@(VjDHG;BcAMiXDkY~~{6+tKjXpg&3Mr1Fa)&&n@`JXfI4 zN$g~W97mR4kUDbwg4CIJm7fP0R(>&LSoy_}U*#7=ewAMc*;RfaWcLO1Q;D5?0sU0` zX37T?rbO2kznSuj&7hQeKG&g(OYCHY)Qh-I>T@Zjeh2M$62s-N$?i04vO5i%Y`m9q z@8ewR59on#7&h4=BYskBGAI4^J?x3>OVL`+MwfjL z`=b7@B)@&%IQ(pUzw!glqm*~Q2RiYs5-WPY)E7nR@fu^6^sm@w#Qi0-&z8`aVxKLc zFFRwOmBg{n#==W>`)p*Yf5QE=DgTaU_;=thlRjdf{laOV!9R<|UW0!Yi@gT_2%W@E zTY>L4kG`*mUdn$1uPFZwyrTR!@XF(2x4|oq!y7`cH=vi@UK4&d?X_vjUJEonr(`Qw znQyb##BLD%T>N`tuc5E2*w16gk7=)&em$psrhIPDRKUV9U|nm#cH@KhJ zYGRwwN6I!UQMOq?e@@wEL4B68&7%8MnemaPe@pHg&zB|dBtEQS>@eAbNO)s0{x9+U zVN2GCPwXRjH!0tiX^#=-DSPX-zAEvLsqAUii@nWyv&WgAz0Sx-yEG7S+G8cyKNil6 zeH`=YZ`topux7H()x^HdVwb7k_`343%hYeN*RacG(hCv18b7 zCf;l@F_6cw$$m!rkFd*3UluW>>sA=H{v+72+1OO^FlVzTMD4Vl%!}iQO+9us^8@Q{ zvj5cvDX~TVx0Ki-Pf3X_QYj_2$di=+f{$i};iK7U*kRWhF_N-BbH+$wf#J7f?B~4J z^S$4TpLXpOO8S583QG8R?RH9dWv!?9_iF8C3S~bzPRTs5b~`2WfLhzhp_H*?6eWDV z8#!JJpYI+=iEQp>9$1TP?!KOqacej8z*@$w-OK}GZc`@Ts5dZXG6ytaZ)f0-$UskCLfqh+8v9Mc&r66M z{2-OmPnkxUPMJX&qU=N2kFq~yHsv77A(UUD%%L1kIg0Ws%F&d$l;bG#D6gd)PkBA% zS1Hl+Ke&++o&JNH@U`PJ5kJAp9)I%+Xt}|gj{hJXKSDbGgmnB0>FB)a`f!a{W!|+8 zTF3Jvtno(2+cmu?$a?k0Ma?cr*a?gB9xo07z+_Q*M?m3rI?s*60NXmJXS5w{-UH`9V z{eL&>|NG<_^52JfPo9I0nJ3R#NvUW=DbFdVl;^CYl;>=ql;`}MQl9e^r95X7r95Xd z-|F9? zgx~7-Q^IfchbiH=`UXn)t-gs8eycw&v5M>g4$nFHss2RNXJ_ZDuO{HBQeyeUH&qUA z?F!>#gSU3Y;Vn7Ow2!taJ=s(7e<^oCPw^A)f}Y|h-UU6yPrM6yI{n0@#Mp^%s$8yt z_sZoOc&}Wpf%nSg8hEdqJ`jI#Iep;t7t1|N-&E92^C>%Rk=#?mpDFiDp_F_2DdnE& zlyc7yrQEY0CH*RXWaX1Kc+%-d7W|g@rt;<9VSKJ~@9C6s?-`VG?|e$RcOj+RyNFWm z4R0#{H@ulYlKMQ#t10hE>`xZl*LLRhaq_$}e7^F$m6VF6l=8fCN_pNoN_pM}N_ieU zIu0Hczw$VF-X=SM)SFp2t4JFR-TmICHkd2|g#knXBb@gx}(m3bLkOZp_)T2XF>4 zgL2;wn1g@Abs>BPdCb9^j5+ve=HO%K%d(z4{t+-0hL5HMzl`{4#785xKs9qe{)j!T z%u#vZ8ec|_(HZ}bF}DtfpSNrHh2UrLYYh)j`YAIh(|`TJZM*9x4-vqJolEUK3E% zgkw?E)?Wcpfg}hN71CmPzt@_*lW`0uh4y*hKjw4q)2zMLz1F(!>%P}vBS~jXU?xeFCJKCB?kZVd~1X5{Sfm9l$NTo3fsWe6-mBzJ5rEy(U8e_SpG;TmD zjT@0lVGJ8yqPgV@@B^T$eS7SBX4F5kGz>NJo2V7tNyGP(f0>tflI1u zwFe*mkwx999wWXB>;2&oPbPQ=|AxG$&^P==?5jz;3BZ5WQ}n+MSDsYf8*E=l>opTuuc^LP#yHGP>QDGT zA-Ih=zWO5hHyldqqEc87C+_nM*HW|=ET%st@ea*n)VxKFKRNzQb(DXz9vw^n4fLa{ z!0Kqqv;C_KWQ;#0-=o$lza;iKjbH6!{@ed=*Z)G7HAwF* z!4_S`&+&nb9(k`>d&YZ>9(k|h5A}#zFLqPZe7~}G=KE>Am=C>ZtjbHiqQ|C+(HVZG zSsyl=_m3ej@`-2ji) z_Hh=UR^5)kKia~7jyKouOa2ja{gw=i@{jO;_SwEssa^NlhojW-kK_Y6{t>=UDHth^ ztvxnM@ss9oEFbB+_()nKb`R%YS|i4MKlw%)uNv(5L1s>0+uj>jT^wnDe0X4VGh;CF zdmgs^9^?Pu_Za^NzsLALjCV!+pR~}Ue8=VP#K`^|jt?~2_JOp9@+!uomV)^QJQ-E9 z@qyGIXX68@kI%*jQXij<52QXmTNpswn=K5W?adYj(8p&B1L)(kX`32rHgj8hAZeS5 zWy}@^6d)D9=!e7yT0anp55#=Mn%Ve3>xZ`aK#IT12dbssxuADp=K84hC_agON1=8>-q~TNgU)#9c zME$pxxr_Smt4RI#4W$0N4ypfcLh8S7A;rbsMT(2PPg;!2l`0Jj{--qJkV*q?TB)Zd$4|;HITY18!>gBiyt!kMkU4Ph^j_ak&De8-&*>-4RGrexxZsQt84=3zROr zv_R>?OASwimli19u}GzR12P|ZBeE}YVq{#-r+1XIhDz_4iPSspMd}@JQJ>Kr(&!GU zcfdt`HW&5j9Sb?vJDx=99dJ>f-T@c&4L~kK4n{7IjLVs|ezkCO{3p$MoQVIVIgb`Pcg&kRE>c~1_8`blUfAxFA$6b|A zA}fskX`4p>NTYwG(Ld7YA8GXe<+iIeO&Wiis5HJ|8&+D5-*umUXxmyprSYc%qd(fD(jS3T`d1*8ekt-av~jJo()g3l=$f`@bd5B+MjBls z&p=z(_)~AA&kmKnjEt|m04W=~F)zP+`Vz44cwzz-{l z^24gJH$MC@rF%d2{sFjzV(30>P8#;t*~7xwyTTcNsfhBqQW(ct08f(7mF{M|!(!|v z9tl?K`Xsn~dJ^YeuBRe>NbHQpuhOwI8ox@%&S?B99Xq4(t8}<&8goKiq$i)mMS68T zkfc-B3rRYfSLh<0x_(H~(Y!(z>1bY|i*z)vur-Y!*ObNxq|&$osWeKFN@EmKX^ciH zjcbugJ3`@>r_24}|T$ajBxT*ZvT=|AMtj02KS zGpIxi9~w$6tnf9ukal*#NrLAlpua!#4nb!+p- zs%?Ml0@C-nxksA5cqC~$V^y!%V^wC(K@PT7Y4$)WO>}AYt0>U(p71O9Ws7L1$MVa} zUKZ%o>|23OHI~)hSk-0Z$MMg`d)xLYas0EAX#b4ysrLLc?N>4z|4j1>rQ;;hY|kI7 zi1Np-LdVj-{IOVHOxNX~#QJ3NPqr~`6zg-v`es^dw!^M(`Cm%!RQ<0nk4JTAU2mna zo8zsw3eX;+$C@+_)!rCYHRT&azGlfE#dkeKek1Ulxad z)&V~)9)B%?@hES2SIpX~&_xk{E9s7e`T7o66I_1+eo`a;R5M(EFC2fC&he@C!SQ#% z?LUOmYrLr$+#cym2qoiN?dSX!t*2s6U~k5r8u6_L;6L0(fB6iY>&P0a?f9a|{X5vR zM*AH;!hMZBHO8!?N*Sa!mC!VS{`f4<{FU!xJxji{$Kv}sCSN-Ki;l^kj{mG<@~7kX zbWA>V{H~74ua4j0*vt#W)_&+osH(KrPWkY~W@THWH{Px9M5b3Y;)~VO7aQ@#>gkJ( z_+t2PHI4XU_4MyX`hPwByOI81pP~Cm`gbG!zn=cxNPXASR~zZ8_4L(7`f5FWwUPc` zPhV}s7pw1wr2o|qMB*6r+=R_(Cx{sfRBVqmz2LSur}PhZ`58lX|#u zF*>QwL!y)Vo=9|}H4Me*L~9s|(MkOPBs!^w8yBOK`iqh1q<$C@ooEc~ubzz1Fe@^y z^q8kJ<4K(vPwLEgQfKyI>>N7e?o6!I6K$W|ddCe&z2iot-Z2rWcT7g=9k(I%j;Tn!W14W@Yxb9$pmZO#*R(u_RJse1O7}^m z(p`*Hx=WEtcNtRYE=MZel}M%gGU*;^+ncahX>GS6Yh6A>Dy`i}rPYj7TKkbo>r-O>s$(u`d>eL9m+tY{x<}v{|)`}-h@d;53ZUdqX(p(or*Mi z`10O_jTdomrK_g#JII%HFS0LTY@2uXB`o|mwU1%=J3;mf=TjAl|$U`$GKPtc6&LeW_x<*$1$d8!hsJtyIkYRP)}EcUADN zIi#~G$Euk_I-7bR(ZQx(NOZ8N07>~aQNB5pZxiL4Lpe54zB!a*6Xk17Bgi$SF#@SH zu0Se{Ql!!tg;W}&kxBzym^9EuYZ_y@rZjFqDvcYFN@F5YX-q~cjoXk)V=7Ylm=;Mx zT=iKnN_DC_mv2%FH=YhR6i0M?lW`vJ;5pQJ6@C1^LfQ{)BOh+kfIjo#CJpE_A8yh> zTgZo-G|(1|Z;*y0e+?N(^4E}wB!3Ov9N#1xXuMCPe&$X38ZF)SLf`5BBE2;)4P5?estOw~%y5 z{zK9s`43UY4ZJHv9ZSa{>R37sQODA;d^+tv==5FnzY_hYA9L9CpMgmIX9zZx|AdqKqFr|-)RUjOUYo=BU*YJ*)Q{FYT}3?bC_K`~xKcfJ z6g$S%gb&m<-|h3jce*S zcoXL*J-);~?t_8FzK)K~x~iDpYQ(92zC@9g&lVxg&nJr!VWhWBJ3za3OnvX(ysGYBWc(9 zZrW@PG9K9jnS|_xOhpzT&qC71^Up@o#`C)(Y2*1>NZPnr_XLM8vi+WysN3i4bx#BB zbx+^1*F6o5TK9C3*^e=ez3$uAJvBtFdrG0LYMn_eAJp8s_QsY{Y#(SM?KTMvaN`4o zDBC*X;UO3OnDT`>ASquc5lQ(%DM-o}N=H(@P-i6N3!Q_ce4+D@-I2M-3y|j{dn5hG zKF9#_Tgd*%LC9|-FG603{0{OGtELDHn2UbK*glZfQs4Fohr|&Q{`E8sywSsm1otd@~k>lo>iyHv+7iNR-G!(s#E1zb*en8 zPL*fXsq(yyJ=;{T4ar=)0@)E+itL0Oh3tYHjXW25EixN<9TNYeVJs3~rr`!8eni8K zNc@k6iAa2dhRH~LgNECX_y!GAk@yA;)5KN0;gH@jhkWQAbCG(-qe#8uF{IwH5UF=O ziPSq5BlV7@NWEhjQtw!f)H_xp^^TXtdm{c)nbLiiIGoabAE|V=BbDxlNTs_QsdSr> zN_Rg}>3)h-x`&ZU_lU;OywxR2BZswEN}~r-Y4k!W4g9DQrGXz+qBQWMN|XkEREg5S zk8<`B%r|;tjhE3IQvaKb)cVNoD`TF0qw!H+CP1)_WDa(<14*x1y&*5K1?8yPawuLA}G- zNAR1rS?wce_Q#0YNAOhJul5nt`l{%C1W(kK<;R_8`*G(QKQ6uM9Hi{~PU4c0F{zZQ z5Ao@CvcLR(Y>DFC+Cxxt10(CAQmUT8kJEmF&fLHT9ov4~o8rmnDnx9)!k(|P!5Nbx z-3QQu)n5>YbTLy|Y_n%&Z<8tuZi-w=IHW*X6Kh1?g!F%DX_O+-p~k>W8l#a)1O1r&G0;zI8e_Sp zG;TmDjT@25$3&#kn2c0DZbK>!bm{D|p)|B-U^SR$#+InxRNH@(zY_7^Qq9=Xc>K5P zsP75jQW*d3bdD|IZ=1P(;>pe!PS~{BMAK%Y$8eU|V>oKl6|_Umq0$_?uQP@-$sWUz zUlu!t6FYVzeKg|F#*W>{|MjRJuuq`Ij3!eD+KX^9b)dZnx7)fcx5se`@HJxni(}29 z+i9;UT0%argukE6nj($Gh(pT9jNUVE4EaxeZuUHXB0j48h+K4s|Fb}G4tp#|d*+Sg z|0g?^vy^ve9B?V^Pvd|~DZ9oNmr{0(EiR?~X>4&R?N4KiOKE=^TU<&%(AeTOe9JYI zO&H>g5t=;-=|dCsU;5BQ{dXWz{~ZGN;J=2a!7sPrV;16DPshh}#tHLzUOX+I=gofi zJpU&B#ql@u_I4axMV&ZfhC4s)IOqn>oqgi-nmZ1Dob%)PY35s8V5GjqRn|p5TG5uI zJL1fKe<5c~in)LH;}2Eh2X#DB^bj!xvlqui@RJzHgY~pqjbWwX_qzC|RwH$yajQ|j zxKM9=B8_P!Q;+hcv#sfWtG8y&D*^kmt>E7-bA?#nnr{3`zQ1tbs!kUBCb+9EXP#vv zbN&vp&tD^Joj#Rh9OiRL#$k3LS1?A^IjM8#CGvTQ_jFRLThWuX0 z@i|H7u$M?&)6buOc;4LSe>-o>^N-Hkwd{BEWE-0B%?>f1^Eu;ln!CL6r@`b2IY*RHcx{lym_&Ll>+dPqVz>Cl~9R=AT`tDeOE zUP+-{R+9XN%B^Qn4!+xQlzDa^ z(^&0`53KE$5O89LuVy3%92=xO)G{}E8|^}64t#oax!yB@_vN^~gR1(}Ex!Ldb&H*H ze_+3-eh6jty21@Zzg*TSjLq)~T9qoZ{K$m|5(ZU~NBp0boB8eI|MdmrZKx}(cKU1H zyKbM>Ck0!6&hK;XD=y;!OQdJ%SMin^^keC{Cw*9B%?U`g6P5L?x2*71%BuhEnPqk?cvB=699Nafdq-Xp4EMV<7~T-H!W&1p z!cI(0@ifKNl!r1niW5_tMA@f370y^=nrz8=|kC(dB9;)l{nRNTpw zF^TlS`WAfJmJ;g1i9OAXiaqV+{Tjp6JOag;nvp8Q$?ms9-07I(=dC!!TxI;^Hk<-? zT5$@TnH_~w!lx=>6IcWnc+lVyPdYwAD!1s2!{XoUxnfSMPDh8FAO-CH2&c9Bacatu^_xja#V)Be>PbyBFHH^?Zy{CI259gNz}`f~Ga(`Y`G7ll)-uWiNic9eQg-4txYP^J z|8Cyf&(E9p`SbJVHIkPx>9j54>W9I`L)39D`TQL?c2{aqU_N*?4Q_SA*+qd{IDR5N z7_Nd_HCz-7Zyr<_eh*Hy22S;7I91&cPk40@>>2@1T>*ZTTC3_ubO`@tXngn_ajAg` z;ofj5hewH1ZRIB}bqFq%nZP&D5?Hf9yEzKSX@*O+U|-8gjN(-I`(mhi0dOBe@jsndEf%AoW$S5Z7m{-iin1e0bg z7Pd^_ny@L3<3##z4tcAcS#I_<-0>wpH?DPDPx~4s!?oJsS98Q6PljI!zl2@u$hUB; z2`PT1<2L*{{u;5PYg}+F7h`>H#{4{t{lzf`*nv5U@nHwQb|&Djo!!Q<#IH2A9LudV z7g4-wCw9f*R4T_t+JgMNjn)J6KDPb5k6pi+=U`C)AMa_fC?P2!^a5#50dqPrhU8#T z=cG0)I-6r*(Qm<_W8ccc(Oj-*jF*jiq^P^X7EU;SsR2n}?yy>mc z36nY+Z0d-Q*AXADBj566ua{%^dFyQMBy2J~CMwU-`E~NDJZ^*oXufi-H!kpMw+{Gw z_YlYQS<368yzam@AHELU?5C8u(Dv<4gge5VCUg?(+qLo_ z`hE@^Nc^QxTnPSB8|RvIA|B*mjgR`2PiL^kPRpc2IX|(n#_{Q5u}14h9jxiVv2f^3 z%W9#VHQV?mh4$(hXX8!2;YKIM8}-i;bk1D)7W~DQ_WZc@=sg-|swhL-_a`m`FD?Z) zMt~o~!I2{73>2~k)n(S89=YX@Tyt^H%`+aJjcdh~Ci=Y-Uveyd%x#J*+7< zYxEUYx|umFnm?S*wOqd0PCVS~GrbG!xCZ{Cvb<$u&01^jytk-(#gugHV9iy;c!eJi zgCCE8AHU@r?T>;x%Sq!lu%!n4QHDaO@SMyS=9}1217p-Jp0q02dU3I?^!p!~KHn7{(v|++m2u&&t$gG{_{cE$NPxJn zctHrg?gi6-lNGGqbXlX(01}woB5>F97 zDZ`#QbE$+26KRKbyyq)CL~ZaySfDxU~J9FUzA4#*F<5FD7| zK8G0ZIpDxK%#lAQr1-AvzvGt#zz@YL)<4@~`Rm*p(r(LR?w1X9k z)xT<8o7daiqTwL>i;@4a_&NVY{+xHtCI5daWj=9S9C_>zm;wgB#rI*2wn!&JgFif$O8O(8Q4MOL7v6{Pc-w{z%4@L?;*pL@>^{RjAR7<_2~U;Ylh9I|~B`6qM1ixuF-JB&RE zGn(l4CxaJ`ui)SXZOPz81-hI-TbT@Jc+g#YiTwRvXAKX$z;O-!%adJ)2R{7)oMMDk z{U>`o=rzWJwy;j-y_-s6g&1-btga=Anebjq0uj5BKm?6`%AIkb)KoQOZj{)#`y@9)honm?%DA0WTt z3XUH!i88BCtDVo{SX`kNexNZTVL&7O#f%Tlw2sK1P~We!`+n#o{RpRQCf3UM``R3< zGCJ1kaE1anLlt>&aKfMH4Shy_RZj76*d-3fIn!BB=buv8!vW;#UmPdetW(rEPWkojN0)hX2Vsq zHnJ~mb{X%|Jmh}t{lAF!<#IhY$rXBn_m1Z{FUb>%@QzG)M}pxUilzQHI3Yf9@VUA3 zG8mKo_OeIj{Wq};gAI)TY-b+EUgibw@?=(NkLb1g;TOc{9DX4`quB6^$QZKXo`)3s zv}2ya4aGh8!cD{}gcsvdgZ|0iDfHj@Uq$}gCk!`SV*+(O3wx`vin|z__zf8HJQ(r> zal>Vo4G$dT_yh2y>8Hc}brt>mn_lkk|Lgv5`Tw$efWJNn=a~8J!0I`J0&5=|9Qaf4 zv7u|`UKH5$>cxRA!T&XMbujy|e2RwJ?3J15HToS+`i^y-Px|lS^Sn%3l70d%YsJGHqqEK0H~X;Bqx5N2eR6kEc>RG0HmxIeDh#c) zxk{`a#77kO)tJ=kwD>?HzSR`!P2=jqkn(+PyhS)A4%5b4+Vm-$T15L-nIpVqcq?zw zJMr@tMbh}qd3-yv|*LQQJnGY zQ{gG=ZJr`L>R%Z2_r0*taFry=F%Dm5EO8R{7VvKz;_}yd_3cMrm9QuJx-f4W_cW&T z5#)O;s# zyP4!iV>Yw-&h7nHhlU5hlB-!mJvk*V#6F`3_SMaqH`06fr8zeKjA9LPCHfG*`7YmJ zx!cB{;fbEmJ&gazPr95vu^wW*f?}=LuwHpC>lHNCGdeLN^f2oc#A&|I-oe?d0Z3+D z_U9f~)gkIK6MVRk->-=A{e#~$jx`p&l%MkQc-_78_9ePQk1x7+URshnv@OwkPS^VD zI3;pC-pg@%c^&J00d|ZQOh4an8w4Ip=d%b4-3_ar|@ct@wa=68g}y^Lf_+#z`w#U${TPjI$2@K6P~; z^`r9?*6qwf&sD4=OiqmprQXqjxqPXiVd$v1E;R4Tl*2E{HYu*K1plCd`qF$q?Nxa{ z^(Ei7n&liM%BtgP zj^R;e-p*ReO`F>6tOuOP+AZaq%}1+d(nh`;=QVqNH}P(jDOu$~w?4`-n|vtNKPfRK zlmrHrlIL0U3+1C~(X4s1DR&w7%IYfS`FK|C_-68{n5mvwMqbKjqsz!^F>=Gu8H<${ zXU^VI`kC@F6P+C3J5$oZ5->*jNFpC0($~0&^s4-5j-;d4WZqZG`>wLzbwBSaMvv=f zT8H%xxS82+PvgCjK79Q6{W<7CeSI4`6$ad#aQLOkw9~ZiR;BuY z+WmOsc-p(#z1sQ|zdQgb?o$W9XtJ8 z$4)=jacn==wG;Jo+4g9xQ$H_f4s$f-slWH;S~T{l&kGNtF;IPf0@tFkQ2pQHS~MmK z2denyLo7DV;@YF+=O9?{5Llo+MW@6^z6+=JI-9!EwMKZQFhFf_A9buY+(a98+U=XZ z!!I>?=`-Bd{MBZTD>zm=+skou`^u+J9uEsVbq^RU@GN?u6$^CD!2%th2n&P(18fXv zurWaWKbbN#lmABMIEgb%Ag>M{%%;zq@+_KdFreZ%7?4Ii3j@-4_E;F8{(ms0|4Wa` zgZh6H`KT~hbL{?~3I=_X{_kM4sVmx9keHT()y={k+F26Uu+0V&_flrzm97Z`4v$%d zFXJM1;cksxlrSc#agS(w@8G1d_x2o(6!`kF?EU?5Cui@C{Q%o{!MP5Ci5b}GZ}ahHiI>$n~zdX*$sUQ z?{3=0t=JdG?v`VZ9lJY=-ir%gDR zQ;J^H4*nIG^VK%dhB<1Z-xPCV+r$ayxynxnu?`^XtA2voN%L=AVR0ne722(EIwgEjH- z!Bqvw^O1M*{em0ff~$@ocOsRqXl%Nj=dR~Dwax70HjJ9b^%-2x?%0M^*`2KCYPi0T z>q=u6_?6uya({~ce{SS>u8y-K$16DQLX1Rwm$8MKUBTR}*W;M$!J3b?9Is_OK%Q-?4gGc?wfz4hy+Yq3YzX3{?8|4)neuRfLkP3z!);bfkl7U%!{@t&LJ@ykE^ zN`9xg{Tfehy*|z559xfG<^K(`!q_mMv0=o^p4+}FV#Czl8adWCWAbFfXlu2c_k>TY zt<~H21}x;9cD9i7fwdgd&Z;<`$p0SRVO3tjJ3faCJ_1ktjQG*n9le8}qo2;9PwIRr zzRi8~&HEUG#@P#{7@h;4a9aobb+hKpNk072Ec&SC?9Rfk3}L_IKZf8!4*wA6Qh#e= zFOn3V9SJV%0~7Y;x+{Cq_wwj_!;wWuVl&!*f7M;ca-`m~nf~G@X0UmY{3gENnq)nv zYh^l4jT~2SoKCw95#M>8wrbiwePs=O#k76eY#n{Yw0-)D+P=p4v(K`gYqtNh#(thL zZ}V&&&#k6U)zY8jn=%l-c|ShyE5toYnG2QA|I<>@X{vR=i|_UjZTr)kCJ&jyoQ<*W z?5)a2dugieHlp{h+@~!lUpu4k(H{P!bJ~OUF1w%hP{r6y2|5+e&5pO88_O6*8Jt{t znZWTwCwF6FB}XKEJ+?zN#-W2dQUgQ!mB5dp32YeGJ#5t0rQLn!9OqkB+tfkjj~c{>1Gn z?E7`xQ+!*#(?YKGAx_G^H%2Goph`o}NPkL$xZEm@A;^FJgn0CH-Zc?S7l-xk!pGRLBl=(cEt9W)iuN>`mS`>ZF;JoyYhapXW@-ghit&E*4lP;JMFiY{vy6mt7F@4 zFVr#ECcaRvW1BCG)p3sqUkK{BAi@`Nbc`)mzpLf_vQ;~{roO1O>vU|Vy-3G)+B0-) zr#()`cG`tHR@&I&rFMEBaBa1n-fA7&={>GvJH5MfY^Qg--le&91H#LRUO;^ds4^xAGUj%{p?3vd(D3KH680&aQnaP z`yX>{t?q-{Wx57_Y3{j$ui~-Bj(^zZvy0Elp3l;?C|)a@o}+6~{MOiZz7HJDb7kMh z>RJ@vm7P!0wJ6?uHu;FQ^`B$w{{Ys>){AGz-iK`)FYJEC#>{#!)3NvchzV6CVF$P` zTwKDjiK`Y`l}kB}wmVCR4My4fP5Zw4ReOKEZSRR2x7z#ky8Dg2PhWI@tG(AX$KLDM zvG>XC*zVQXd9}e5$|n0hk^X)xPM~LVsWUxqNgl`E_b0s**?ZH6sDJSe(}!pudWY#l zdIx=I7xke0m_A`YYx;zqr7daQtL~dNuKTnleFs7uKrsr3Z<%_*{%TeiP9^?fCE>$F*M#ci^aiz*BcP;C1| zJRrryxKctX{Kw$|OQP(&>~{wGl>KkEubXAuYcM%r9J_hjG11&mIe=Y>t^ zFP0Bz3?`NjR2V+M92?3U;R9{)l?65*NP?%9z*ALU$-Hj^?^FJC-{qs7@ZPmKkv+|t z;T~hK{hCjpeE@RdLV7L*uB5Wh=3X+qKxsI05Wl$n_vzyGO@HqGKYlv5=4*r#Vddc2d4B?qH2OKDcTS zG8g%CeB`nC$Vz)Td}tqIcFW;_X>j4`v`1a*uVeVSj>j`k={B3Mzl&bRqg!G3JJfFx z$HM=uF@00KY`V?Y^&jVX=UMgPx1;*+F5-a)`F_q(+P?VuAL%<&;OpeU@b%weGyi9O|=2>^{?EI;_I#VY`)IgRKwS!W02F;SK;D{K}si$97o3>lbFXW zUr_V;qItUFf9>(~E4}~s^Yn2sJY8!i9iCns6<2@7?w4npr?)rH=U9EQ$?l8o#YjzG zy!Fd{aS~%{>Wi_lQl~#op+BBXtn_61U)UO z8yby!>X+s8#c2Ff|Lo1RSR72^S_S`KbPOA+SgiWx|BE&>y06NHUP=FSY-sct#lOmi zw%1or6t~`ue>_aSbVn;3qCfU#j!PNy?QV?ogc4h(Z`s5C43k*fe3Q!)@^kKGAC28T zUEw#F+cTN?!L97y`H81T)s1k2gbs0RcUb;CuPd(MM($nV^M*X^iM1T=wEGroj>UZ6 z1p1F+-Wzfmp9)&c_wrV;ru4x2By02)?8jhmp4d@m_L2O7H!Y<7FaCq}vj;!=TI3HI zXWhfRx*u`;W5!_iu&((!jt{baa1Zf_rQU>4Ki+XQ_V-!Z`_+6a>RI*%dccFEPdt!- zWG&V$I;RiaqI32Jx?1Ph%BywG-avhIo`ckR538o5&U+zs&i+9A+{k{&cqID+ZPfV? zq|S$8vnk_ad8|j`_alB6^2_9Rs5|qI`8~yav=z)}gp*WO?TO zaZSnC=V9#6who@LaBA+6a^yex{`U^X2tW2DR&87}W@TUI(QPDOeP>!L@v+#mGbOYw zll8dhPk!-k<{^H{nD<;_nFon!j!yIro`G(3oYc9K(Y3}o6VY|Dr~G?m-IM40(mR>& zlc&JbRIj%vHc7oc>+OhM%fFZAj%%2FXWIO<6vl;_yLjv0?%1+}y3Kj=vM+KeS2xmk z!-*b8_ipxOdK}#||E1<}bpN={(Y<1skE8p$bdK)j^E{63uh%)c_tIXf#kcusoc*ct zh@KeJtlYPV`I~%yb(qU)8Vd$@cd`D5ILjE;QJ2Cg7f$Vd#6o_++K!#KuKjyr!H%!=JS;R;A{tY;dK8#!${6<@_$?)Y#xKIHKl;E}Yu`ND!%S^X!0c ze?Ys)TRmoF9_7rVE#^_q{;XXsinE>@L3@yoCEmZAwv!PbnXl8p97EN8BjqZiEM=5y z34N-7;|j_(@lM}-UkW}f<(fpfo*|#aspo8`AIlDQq#WvF9Vv(Un9A`K?OttK>%X49fKz%0M64 z=VHzRHc9PTQ~TZk=II+H4(2^!W8QQd^TuI63capSt~VH1B*x1q=FN^s>$F)Kl;yP^w`hFiSbq&roKVj+XJR*uFySTs<5@E&pY@T`ht#o zcg~2|FUEC}=~q55wc-Nb{6Km}s1I{j)m|pkPSWDz8kS+(`g7bLJgvMleSZIx^iY5B zbO!ybJAFv)Mp&9hKX`&VWlX5%3F>r>&Z*NiI;T#b&^dMbgwCncyLC>T-mP=$RC8fB z@N@7~SW4_(-^Z>@dz|^FZg*(9uoNs9Z)53n8%uvm9bAwYAF87cet(+lfH}s2>EN%A zc!a(e{xNvHi?%$4I@m%TOrZ|8PzU&|`kra%sEyU@Y^>e`R;V6!@lE93=vR1MqJ84g zdjY!Mi>_}*zZ>YA&v;WqeL3Do|9s%ij`RDbbPVmIf6hVI>DVmkS~#7H?iQhI;uSTE z(6wT4tPwEZuU>?%rAyhQr@(3Hknyj|4daAQ=|+b^bU4nzY4RhS?v4(-GuO5d9X{@J zoz`|-hz^HY*gbT(1Knxv{7!7a1?ccHba(+eybK-YT4|y2wjI!VAK3xv?{#m)4iGDg z*g14I5j|C)v!&onfa7X(Hv7($`GFMXx1qE9(b-0HAf3qu^g~C&9M-!T%#n^{0}TG7 zpNc~C#5X659Ya5(Y#WeGTtqe?ojJ_?8LP}q3I?9@rJYv&c1%Bc{X>p_ge`f7^fmYFpY%cTY58mYsM}Z$tua1v=)TmoIP}%n2XSb_nZcgE zItP0UX9jyZ>O9Bh%sp(*j6Kl$=x23q+Y_~?_W1J(n?uiq zAN(~zT*+0ri*~ob=Fl^+C;9Lu!=d@U7xin#!aeBZ2=k}^L0@?Q4t>j-?3E9Y&s*-X zR?*wC6(%JEDb(v=+AXt%Wux<=Uzw*+@p9&zJr_La_| z9QV7Dz?QhCS;WkVogb)zBWtZ1aqqVC+-yWZ4Yr`FLhN# zK3SvJu#UMI<+ktY@LT1lxKQ~>H2D}t9zx_{qAL;o5bGv}q-(RmI3JvLDdqD~rlpi` zAMa%x{R?xSJhrV>IerZMc)t8ij9I9y?u64OM{!#D@xFFAZ8LtoaAzWTV|@AnuCO@m z$CP&@ZB_UljWug?ScedWH7Q_Pw7)K_5oi6rIIF{9;jZGa!W(hdA95_b0W-jxRPZLg zs?^4suW(m%WwRcZT%tLE5TwTI1B!5YI=!5YI=!5YI= z!J1|CC2`kf^d*P8%C?^j*6eEsYm%{T$HtnDRdc|a$zaVKux4^QSkn)`VHBLug&kaW znt6f?JE*py_`2qA3db7hqqD%V9pKrlWba^jPsRa`lWYttBOW6zl>&wpfLVd=Df2&} zZK&?j=o@Mu>EM@(c~I^7{N2F}`TX7SMdb5$N8j@KyQ6RU{3a$q{h646ja|9mxA03` z^{Mg*$1*)?%-?iF`hUH9Z26!?8WGtI!aT&087%;FTj#CureLpDxQ$0wj6_%75L8k#zpXQ zAvnA#3NINKJunOW(LQ9^#1#rS*8cURzasWMGwz!nIGsA$ao@0j<{~@ztM)j7Z%N5s z<-{`F`+u zTNFNP&+%07+1MX+EBm88o(fNMc^6S$db}#Ma zX6(Q%=t;4s;jF!U1KXvz>u%O+9;DxYoDkQ*ehLj5JKV#ZW1}bH5@p!ckBAKms^ z9MC$Z348}g^`Sj*gzrD!J8jEWt8>GB?6-W#lh|+=Uad5GlGd%X!Tv5{TC~Cb9gs=L zL}V&51$h=S9eFmgGqNl49Ap;qJY;udF7g88`N-Z#Ke7)pfczG+KXMTA+sKQM7b3rd zyaeA2OxB)l;)CqH5aEIFzTdgMO`0z}jQO_Bim5VxxP(|^d!2zATK zn7_c&wP_EWV&0ED^Z(%O8hZcx8S}66b_?lxcdmCu2R}Y|$Cg{&=Z1z--tV~14ZZKN zYWh>=%?Vb`k9e=*z1cj!1D)mk^1>s-ct>w=;9csx8~@Kbc*ldk;&-dNn|z+?yt4|c z`@3|^UMgp~svqM!OMm4*F8Jqk(%DX*)cnUr>Q1^=J(}+&(qD3kJ&a4Y23Mi~k{P^L zbp>wwcn{xAZrH~&pJ4m;aX%9KXCO@Q}4^ZD9C3qVyOYt_)AT#a;EA!mM z4#4cZc%&DZg!Ca(k!i@YkQvCck(tP@$Zp6i^l%sZ3BV5x-=sakgPO(?L%ql4ZPNOr zqj1e;@cO9wxX!Uj(tloOU#L@8-~8!rkJ10#o!#^Ez~(J{GeqlNbzS=JM7eIm7Ts-S zgfzZBh&p_m{EjETYsl{u>V7Wv;}ut@i2i@o^T<*0Q`*6M=umBV#eq8>Jh=ak2bBic zUp+ppusV-)z-*3P)%rFMzJ<~MQL9s{{-rD7P9r|#9^w_^`?>7hUm}c=-oY5@JwX0G zLHB*p=_d6(blnl%jsjcy(LY9k+x_Stqxe=qKl;ZgzE#kV{^2Jb>8-dGuYHDBZgTnH zlU~Dbh1d182k~3s^~5N=j!l2N&2PQzRiixP=NfEPUl3jtQx;>_$*1zWk-RFdE8kK+ zm++vNcSkTZ+2mz9`4Gq52)-V*F;v&YajynL`%=EEDQ92GCypzQt@{PEWpV8Y<{f`6 z=keBKI+&MyRvYF`0`FueqcKnR;COhqp&h(i500J~?^6G-z`F+e+{xnIoA4+H?_Q3= zJ2%%)74Q0kjlXiIhYFK}f&RqAPA6Xg9|OYDVmNFaZRzV^sYRSj@xxdw-AP~OyPbmz zyI6z&7KNpJr_S{0Pq@Dx9Nhqp?xU}4vawX{?9AaPbJppD`)nKy65AAxE}?Eu21iw| z4vxMMg`=w1lf}`aHjeIuPdsG7mGMpT+^07dFRm<%p1>MitwRw;e|Cx(?bK^5Myp=O z9caVoCU~X6XvH-amcQCC8%;1Uq@N35|XiQIbDy>N6O*Z9q zY{LO;!!TmIiht;u`1NBV}LVm?b*&2_BeU;uBU60TLx{KFtNwI|Q)#74cv^Lrn1hh>|tWK3dDp8l5*+HIAJ@Ey@x)tX zgXIg0mwy0XYwpXQ|C%eoK4-L#tSQ4i4D#9R?FuD3d8qt zyr(B|WyRju3;tqk`Vr5$4T{J7FMOfT;P8>T6CJAlKwf&2k5af*0rgc1uPUIvO5uS8 zwEI$cU;*vE6dqUrmoJ3}7Qp38;eiEk`BHdb0bIT`2bqiPfjl4C3kjDmEkMHMOZy?= z@}&cjaQV_9NVt6IP^8V}H9jvM|D!xtYt@dV>D4>WTD`O`aai@5^t+Z4r#&zZZZyn_ zYZ}XXiBE`CimR8xq4h27TyUiyetiXaQ-EJ@Vj;8#m9vRDf01?`ZOfX#J38y&WahD# zbh8G35oODk*tRT%Hs!*WjifDZbh#SlAem3maDdoPf4FOR+U{`L?l{ZUkjFfrT$d*_ zoZp9F!E3bVh0OE#m^Py}c{%&rT*LkVU3t%F`u1|msu>MdF2{!+P5#*%yk<0cUrydf z^F6KQ#D7NfeXQlge@63ttmVXiM)Q5F<-~s!?T&-wQQCjQ43xz z07oCGy?@?Z`uPI5@NZW~j`QjBnJ!<$3VsWi%es_0UO08&k+I0nsEb$7vxmC=9dVTl z|1o{b-(BZ~E=ugm{#(>NWxkde=j$ok&^!3XEAc4vS-nC z$~Z{F1Jo%o%7*>aX&Ud;7^i%{Lh7`;sZ)2T$mI_0q)uO@PRps&kEqiRdEaHM+fu!1 zJoFmslr?-c*HEVqQK#2Xrw>u5*HEVqQKvJ5so|v`5-9i6*@DYAf z+-K7l-Rv*sYVC_I>dQlY$>&pFT;M*tDM)=W7kl#*-uHcC-Y-&LzqLArKF1ek5BHkS z@P((KQ(_Iz#qpp2Oz9&$E9V^D3U5Q=00{eD3d-`4$KI-w9{l1YWw0i^uPHguE zyTh`3ANKTwqwSvfLbT0m&)%uLC$e|4b$i)E)UkCT%IVm-Pq1?vux+1U|31ICkhwW+ zcJ34GTpt&>j14^ENp1K9Tld#v*t(I}x<1&tk=VLE*t(I}x<1&tk=VLE*t(I}x<1&t zk=VLE*t(I}x<1&tk=VLE*t(I}x<1&tk=VLE*t(I}x<1&tk=VLE*t(I}x<1&tk=VLE z*t(I}x<1&tk=VLECt>Tf$Bk^=xOQyarYKt{8z0u&Ww3&!*q30b5hCqZ_a*hu|oOxL(70{zKSQFt%YPeR3we=pF3oKd`65+1ov= zyYNKpX$$tWkB9Z5a3a~!53#5E&jKseU~V6)bEpn`8b=>Ko;_v!!1Up4#(smW9dPXF zDD0_=KHLL)Is^PlqYuvkzhdp_|91>~+LZ1c%sSJ-BV289wh$aF7JE+h|L9w54-6e-vAt zX>4_-ZL8UjoBaIH*y?k>WUDi=-9r*HLtTumKBv`IufSFd``_VrJX`Hx|2Elb?H_fD zw))9q*y^%R+I&yN>0{f=soH9_mos9gX%EM;)3VV%a943xahE!9WiH(1bhd>Vv<2~l zb+iTXf?xbk;Q_RTQ{e&H@1i{(Ks?UGUNxV%J)e6ooS+)qZqM(QUl{8@JG>y)4=;yj z7(bl)a{O@3u{smJw$@}ueC!i(4SgTd4F~bSMdIKj9pEPM_}B@pdu?g$L6g09$n5pY zJg7;`Td2aGXy4yd`#U4jkuaNB^)hj$TbQq)wU6>0itxoW&gzFV%*NNrCZ?2+zSH2x zngg!y(<-*z|1S8c<*wA+b@>+C@S#(9Zh@87q;H}3oG|h50LGDS8yO6L+no}6@jrv% z73`CeuUO_pvK9870%YRe=>=#&=dLDdQg6zhCQzut^%9F!OKNi+?X1x{5karp_71TxL<{n!BKS z^cF_u2>2P_AWmZTi_{*s3z(0yi2V3&ns8Y8Sjt}c3)!z$W3LnF3%+j$!>NpcZ@e%V zUOLnYSMzN)y{DOc=)J}0TJO-ca?2X5IA9WUA8Xm?R%z6cb_sj5F(=4;Pg(C+!aHr(pI2>RM^_mnf@2xIrTpm{%p^pU_kBoGup{VnS~s841Hc24DU=U z3Jjs0cey4QPN4m}l7|NlFlXbe)j3%?-M^L98(r)!9V_Ectvzzg)XpOD^f z_$|!2z@#~zIqN@R4!h>5&m`^WIqXVvW@cX2ubI=X`{TLq;av0EZ{XS!d}H-#a3|UZ ztNgjli_i1sW{u}RMWpTKJePUzn*XjeH8*}Z=enQE{ph*zi@Y5JPG3Ep7&h}HFzj;j z_9YBsj*Rk>3ohsJ4VBMn3p>1pfpt1+W$C#8SJs3y#Ml&F zmrco|y(h5-A%`(}*^ON4*6I)p$ga34xAGaxQU8tf*EsYzz8C9W=(Bf&1M1sacbuD< zlXX7*{VDEQU_?3R+LOV>wcyJ+X1xnzQGw&zr;e@m3A+=SBY+R)*eBJY>}?UYN%O0s z?Nc@RmVK(jo)vl-8zkSSYmLq##sy{XiizzO)Bk1f+BoOwv3Cv3Vb$IdCu8qU#5pHn zyS_H(+yG}x)t;}u(2H=#!PvY*oXh4v%{K-N-@G=?@Xh6;bDx(jP3g;;*+VS@27;yZ9X{zoi)G#$HHxuzS5ojFW`}T&mfO1$vOFcbvz!K z!d|NCQ(u=y4&E9J_oN>UpkMu@YhmCQBMZX=>05X6-ITdp7oR*6Jn|*_?CIu^E9n2H z*CxUtOXI`K!IEg)K3zOA1)uL<%_E1$;*xwy;S&5pJaQn|Bp!Ks^=Jhc=lqfmzd3jG z7%u)zaI@Ctjk3++jnQ@d&GUzCMz6C!I5rNa*f@OUY37JNa9BL?jBvy=P>;g%tG)(D z%r$nRV`w{Lh)RR~t_?>#6a4VlHs^mfH@xa=aKlfpekD8%o3ezplKOVK!~aZ-k(gr{ zF~u@sOB$n{wEK1^-e~qb&>Yj&cw;$po$YwzRm{yZ+z*_IjdwV)#{SqEC*H_BA`|b3 z#2arR-k2O3=iW9{cItX}?Ilx^B3#4aev9}9pkj@WXO>^`LJaq#Z&xcuvNKIGIrPz^ z(~-W=u3)el4qg2ObDE#ZbXPwe^i*pNqw;qdF)qas%ZVRqoL{j+D~iLt4&RREp6ct> za1y<14exQ%?ay~*oLFG_p@^Rv&C}KsKi7Zsy!MP<)FHyjLii2FzrbIdfKH|J0^(k8 zvL-MG9;7w)@;RPD#~M@jyZH7FF|h?24~9$1=U7C30(OkA+U8{0HYdw<)n3y5rpBRq zujV1^T}pE-oJ-Hl;J?J(<^*ldW&X#t9R9b^D!;_U?Sj-VYXhVAnO8bDk>4cZc5t>i z;%p|LwqI1lH?E4w=a^%}?$*P(O3>{Re3`cI=+&8T4mj;H`Mj^T%QCe~+T%pp<0N#Y z7`Wk0$7qjxyV|^onEJ8Xqp!6+R^tab@wfiPiobOX9VDN-f{MSTgg&-vFZnQ|C@_%r zbvNzv9@^)Oq~U=9#NI~p8{2)LiD~uk`mL-1#NDP4fBRn2fUL7Jzm?TL>04Rfv*T~y z>)Jo-8qRgUKljr!` z!4$_+yiPGV?Hg~_5fPjFJ?FYVllzLz>E5ro_GCs<;0gNS(`S%3sn4|a>6Fk%nUTKT zUZ1XLr%zwx^ywJ>qkdg?2KbL~>h$u94u)Tx2y@2nj$qE2vnKz{TFM>NPp} z#wDZsQ#5B;Xmb|jxi|k^!u!NKqyxrjThtdOoFaE|(l{0FQhzesg?LSD`#ojua_%?d zF8!$20o1GP=f~X&10T^|#9=gkxXTx85r;Vw++`X4;sx4}#tp?`tnU2{*I3IK<0d#n z8k|RCks5Q<7^1#iBd)QFvBdqH>;78qYi!Yc*M@7q@75vk6n*UU#w*V3*x@nzemnk1 zd)(zaPT#k=i^c%Y?-~sBaQeTmFi=OGp5FMH6?l^R6Rv6eLi#SxIX`Pe)EMkf;C|6O zaXe$IKV=+79I=S{)mW>>WQKE1dyGBta$eTsz7FhNR({FJVAJ+pw*O?0^W{-q;ZqOt za;cr49=yXD6TUPiKN{P;jImwiCzt$0kL}K2-?PWb6POlwf^s@o*PFfcRbO1M&I0QS zPcyd(fLG!-XN22;eP@E(Xgn_(i=(-Xr8TtRt;XxbZG^{Xg5R77*Z-$-9F5f-i{ogm z65qHo`wZnH#TUcGUy}Hh@$-Fp=YzAE@8yn*8=1#XYx4FnkA6RX`*XIRtFcGTIo5t_ z*)CVpcCbMl$3r|Nk8dpfgx_VZ4x!7~1M5<6htTD$Te*idO?P`eq3Kq9)8qXv|N9ff z(LB8SBlf$BXD#Ea_XbOU^M3c6pMF2@=CyAG(5%n3VnAzo^=PW&Yd2Z>6MR}n>BL?^GC?r zhqc)+zcwf8dav`dKD4Y=;-%x!Q6cY8o3mKMQR^AA^;mlAZR@Szl=WsAy(tFv@6ns< zD|*X2LwakVpAJK(PQ2+p>ZOXd#CTHWMC4w5U!Jw``&hfNdiL}{5p^}5x+)rym-Rti z_RDKni`jau7uWdq(LV5O18cCViSdl1uFQBi=Vh#`Q=1&ecx-gtoj4D!)0_wED|z^z z-|u>koI{^`lD?O)^~uxQ@91CA{m%47>iNkbJ^ml)cc-GaQ|x!&KyN2*^T+6S>SK1l zGyRZyx_(Iae{H|}+IjeDzteXFgg*`VXKvy`Rt_AD<6b$;%|SXiVd6;Oc<(6tR|Y{AJLSmjJcsx1B!mW6%1u-URfr>`uo*0Kb?W?5S6gROadGO6?;nKU(vUsVny56hBF2PeSrT3~JQ*$!1?ScWN*&0$u*Z<^tn6 zE|RUGy~VNqfpLS%G1Nyr^|7D5T(w^*>ocp|_fHS>cqEE1z$4;TXTN;@gHhM0n>e?% zDmwihJQMv6_Bwqz4L?{MLpJKI7>=Rx_WDNUr7fLud0qdu^8S1K=9h8%YxT`uJx#wf z{gXEF@9Uc37Qr}cRwEJc>&-hZ`U_Piw{3(u<>xr2Qmx|3FS()L899Z9cCXe;X&m1Zt!s=a@4`&Z|= z?#U~f)RbR1$ywW>eJo^md$Ipt;*r9Kn8RB+L30?$OW6#oQvNA+>A+s>Z8Lh&oXBK! zA^Tt6Ay|D^N@4Z=jIY1id3yEQ^McjZ;e|Kn@+*(Cs)NXJ=GawTj$CAp-PN_o9p>0m zJ(IOm_(Isdq|ht8XCk(xi1k5Zk0cCo(mz(3GkH!NVjO!`Xr8#{m0p3btvr@dPsz*= z(D;oh59JER1*=OLW4|lq*!fhNMOISCSrar7T@ouFr2n?%W5&{P%zfB{eO28h7DnFL z0AKS(%}HR+L8Zl<02gx!qW|lSnvdXP{eL;>>p4AB9yep?IIZ)c3` zT){7Le>wgL&-FpaeRy`F-{ozcGvK79=kyHevV}(^4si7Dc-GNnu(qc&YkbaTtxp%$ z{A99@?wqjUU{|x>0ei;GgQE;!U7L8~j^Ao;8>{65k1PCPg4MF*Th{0d<~#S|ncb|L zuOwD+Z(N+&ANU7Wr-nEGS@_T_^xarywN&x#g^C)u=yv#+| zJ>F`$mvZeMW3~M7(4AW@{`}4@kHI6GU-yO&4q#0_Ywugedsv6$4ri!d8GEke=i%3r z-|IdrygA1eK2Q>D={MB!rw+9I6$O^Rc!agG`Eu4!vG2rM+QRO#V9Samui2|ZePeck zHG1*X7gjDx^@eh+b3#3=b3@&&ZlP_gB`)b{jXty`A-sDqYptf!ga6%(c>nIB!J0RQST(y73TxgNitJKYbE5}&E;0d`U08Fk8<~fUNA@hN zxq^KKcIPA6S77&m!kXQ>e=(B#dpaTS3AX(GXTg>O?9;G_GUl-7PJiI3>HfXhu9`i0{KiDCe|yE9 z{=Qt_*OP1c{KiMF_uFG%AHcPXT{Yu}1^tg)5cKcyxN6>9vtZ?Go?y*KoyY?|%$|F! zmTNn6k3BN?9Hp%6kGUtoRbzY@=58NcaIXK8*Sq;Yd+9v?{-!K+?rN$9!}|9N`n%=? z{c}bK{Z|bMnz{%wPiu2O%m3#bmw(e}w}1T*kAFJPTkLUt!aCX(tm#EN+PxFW{v}_j zvt7X&#$al8Hz9RDwhs3NYwp$kgGk+vt_MP*@&n4fe%R8gH;Kj@ro^?2DuJIF|W2CsS6_9x1D7kCe@{N6Kc} zBV{q|k+PWfNZw6*B=6UO!Ld3v`OYC9%6AVWx_P4)Qu!`GD&PH(%J)E|@~!@%e5-%N z>ONN2vAT`YWj{x6U-SKg^nc#}hW?K}O#XSl$v^rq`RDy6|LDWypZA;mqYtNl_pYValp>xWA1?TG1>f6GyJ)I(bJpny`(|rBypQOPa8;ik< zWcD#iVh`Fx_K)4VHDQoq0XO!yHa9+95>_Agb36gNT#VdHzb|9Ywla=C8gn@q9c=k@ zco3Wpg42=pqA$LcFevSkp#SD?2mPgipuhL|LBINJ?w|NB1{l3zpv%9m!0lf>nRTnQ z(MOaQ)+H~-mI`zCJzky|>2Q%LviZf^gYULOBv ztm)o^eq|#Tq6;7U$X8>pw14$J*>h}#zK79-FMS($c>t{ZJI8zAc;5#vdri&0#tUA4 z0uLI14c-N2UhiS;FS?9->Q28WJl#L_M?wGMx&y(8zQ;9&eaLd) zQOQ$-{>LT-{bM|xnwa}Cx?!s2e}AIO&wd6?IdHCbuXX$1yUyd!7-9K;o@M!Go^Sbw z_pjlt_D1yK4L``^f(@!L$j*M%MHEI^rhv!adr> zeJ*T!5MIImR-A=RqTLiHT7#;v=}mAl+4sd9`)Mb`XeT9b{Q}yDzD+iX_90%Tch?3j z{{pL{`F7D7ugkyVQp>M&7bmelA#I?H_TbDL4G^2C#%4ZS9|#2tgMRJXIin)D^1JM1 zwKI`6Pd!hfp0$Tfwu^5|z+*K}D+q^6z?SJf%*7b}dWPjc>hc<$=$%%f<^KS^)uD6e z8C~Dbb=6aOS=f;peoON^(IFFA>8j;IN<+b?_1!bs;<5FnUlxlJs|-C(ma4F4{7E4Zl#$7P=t_BNh|gC z>jVPwa^kbKl}8c?Ry<&dV6P?p3bjz3$Xuz9Yx~`{22^|mP#UGTt+ynAkOu+6doVHI zf9*N@8p_|25xO2={P2czS-mc9p_4Zkvd@phkfC;BY<-wb}tz;Wu6{=OH}M*XI^81*}j z-Z{Yi=^WATT6ylTsQ(P#LWVtPd6{$q_B3xv53@U2RY7V7Ha22rs+bGhjrSTQdHrhC zmv_?fY&vhmtkm@Mg4Fb!f>iR;te8hhDt%}yoyp#@97B6NBLnp<4rA^s=!E$xzS$C1 zsi)&=)JP6*9nZ&m40wQQ=?#tDwqi(Kh6P@5dZu z2j2HEi%|B*+T90}v)sogWV=t~jCXhUxi3zz7TrGtbFuWfsc*C8>t77B77>90%?8gj?+=XuUG=1<}&Tdqe|zcN({wA*-E@4d%-oybPPs z**$yX80U425%Ky|mDjs^r&`oGBz~`z)A-&Bo=d~~PC@^PCn=^td`^2hW|RRv?~lfu zHW>fU%X(@(=I*E~7^P5c*=^7zkcaYG*adq22wW?ElMIr45Pp+%om1^g7WP6{aQ$_C zaQ#I(k3r`N-u*SjUZ9JJHqFq%KKcu?%xR~}CXKOpj?<`o``xN`VnA^`O=~e-8;XTn!sA3p)Nci|siR?3N;V=d9deJ7?$q zX7Pp`IxhpX1W%2ku`L5VNoR<@(Rj{U{7!&tpUmm0cHURW0`+waWFgpnjeRBhuMR-} zTIgCY?F&-vM0;!@(H>*qPy2^e+8+-<`-fH9AODx6{TS$C(mNE_CHhiaSIA-k#xXr? z+Z_dn#bsbW6Z@gq&%%Bf_J!DUSrdK=`9tdY)c$K7Pm+uiUy6H^oTr1AGfk zEKKR{|9Uwm9b>#+$hnXU(sPo%AT=4=&Debi-lumowk%c{7EBqEC-I_5@Lt#4$AyqBOXXxq8g8XF+$tYsBqSQ$6 z{I#6#S6@TD|TC^*Zx;COu7l zVaY-s2wzfn0JvD1FJX<`rIrtlZ-LH7?3T~L4j})VmM(u9XdBM#Z5N~~!t8AqqASQ} zp#Ei{f2?X|YPz)`mHb|^=WfJ8$!{lrk@VU&`0CuQ>*k6DWJ9;ub7o_U_-s~D+Zs(j_+h?a5xnp-8$h~&UhMb46b@zPI%XO;FjKla1g|AbHEey{a zzOyhl9MAkJo-rK$`y6cieE3nqS25asirWOg$1>cbET^}R^F9KrO+JhhcA=~18Q+QG zsE5u^_|^*v|2D4Wl_{BjI{wU`*R{6~c@EppvHb_O|B1di^<2Moj=y1lKKiEA+ed!` zwVmZk8e1pgH8e*n6>~JyJDi7m(tM3_J2dVz*1+{^J}kpnYx6SQ++f0I4;9`2*~3L; zpB&1ai2eQefAc5z=f0!P&GP?=@ z9ea+v2KMjy?T*6R=A;hC?{NGs8!;z!Ir>$ojxo){#Te`TVZs%3eUCEC*SdO6&z(c# zH&Y$&)qGCZ^|R?3<2&l-(A59z&t?{7!*`vf`u!BI%+k#jh%Pji-g66l&YNjC4>4FL z%^m0-zt$Zieu;xFAb!A{lh4=dzzY(5oJ<@mfNx*$l0D}IY#U$NkTVNgcmJokEX5fl z=xE}B0=(O=v2Day&%{Q@slT)q%WqB&9$kPuz{}Vw5u09!?SU}VyTe8`_8{+`&4~HY zyIrpH`1_%jG|lPV82?cFLDy0INAP(X{(qU)sz6Tb2W}Wl^4fsCRf>w9gx^`X_dM(+ z?B7Sugzyl>iiu1d~_x8M|rVRf)BdFT9}#vc`L(jhTm+&>{O{V z(}{cYYl#=XhHJ%jqj3EcT%WzV*qIHxQD~isbqa;4meRD=@rWgBZSZX^_!b1+hrWYK z{52EzB5Vx{v*u=C%f?21rI=hHeh)`|hdJ1u{H&m;%pQjIFEfkY`{ZEmJBaW9bV3Sn z`Ll{P+rx`CL-#-V$>+IcpUla9672_}cV?qr2EA_va(kSY!B<6x^{cX#h`sYK40MVyc1 zYWos<&Uok`3&vz9?D2Ta!@q+u{4TcJ({3-i{hNiUw?7}KfS9UvdkL@nX{TQ3=BWLa_t2g9~#j)ww>^S}!;(RA} zKa%?iw&af<&V3EXkz3<>B$i?Wp!x76`*J2?dlEb}5q`;>o%iQ%UgABE#*uUaV!JI_ z$aQ9cA8E{b^BpOE=GjjCj=HlBe58e$P;yi~KxHnFEp*+|O%nb3M8++nGqJJV5aZseyA=1%_fq1=&wduZuoY(vAXMcLutS5=1(-?eY*S6A$d zyA%9nOTD4U_Ki8I;fPg}UV6_SQM3%a_QKA&{C=dv55k_2oP;6XOn&RZ#mjiCj`(jT z_%8?iN3muv|8@R;PT0JT=kMtB`IqZF&WEsh{`Bg+zjR$!^r}N0r%TPNq$i12$AqXa zJ9srTed*Pevr=z?u6!K2lJsRE^rKg2&PrX@M|36es{I0VWq01MFYC#fmqAaCQT1fA z%6nbu$vWh?LeV3E^<)wBzj%`qCzN|LB@7X@hTaU21(eRn>=aUFbvd9lNUw$#*0jDE!CU zpcD1JV_)$bFH{$H=lx7w=sRzTQ+44kgDy1YxXE8M^f4v!GXiTLth*k2 z7I96_A$-Ta9r`mJah-G5VI6DIK*idYS)PRB^RT{^a;er*wEyn}tZyAyN$XnoV_n4o zIfbv4rS+`maF3I?7p+@6bFbv8#oVOsUaYm`9uiX7!72GS3vmk9_(iP4qPW zHQUv&w136)#3-fpALCs=mA!rYg8KC!b9w1%>{k-{)gRwpAE5Qk=f3p5SljU~)&TPN z#C^^jyR*}K(K{Xn&FMYQVohio#;XZ6Fs;DwMq#767c-76uE}XyRMQ07H)HM5%QweV zG!I11Cox8O2H)rY53K!c!q~USgDPvW9mSfSD2&A#d`A%LI?8Ds@U!HyBYg+zozQuA#b%!?L;F9Mv-0W4V->ULCK)+^VYud6+~Wq^b2y$s zv^|8~4XA?ElSu%6-|)@W_Oy(!n?!!JU9 zAse(V{Qz_U@d4#;+7{(PmrR9D$$@T}0v$6MYfSETH4K~NgzizlD;NeH6a^g=jpH#m zFBa#=;X3G_MG3ApoF{b9Nzy@(m;DJHI;a*p$))Namptg)2&}{6ItYAt2)cuGP#yG$ z9eSu1a!lWruJ6aVE=mrdi#qhsKuv$t;oZ*SUZ-$R`j#!}BBtK24&O_BsgwKd$J&3r zE~0y#o*wR^aUy**0Atg5UAF5E=%W$;`{Np`gn9zkFwcmFMK#xe-|L}c?}mIgyd2?* z1%K?tw=?S_$Gb>IY29}NbVCEyL-F6xVfEmveHN)H8sA>Wn)H?ik|XFB+wWGcAHfDx zr=cwwze%^m5T*dnkBTU{r!jKLt5Kt(Dxw<~)l9*2S3xFMLnfOu!C&(C&eefWkN)gV z*D*jSuXGVWUNb3lA*PF@XU4N4zly4PgyS{;MRgR(d?^s-K_Lp68_-6B;XIfnUZ;tG0#&^44`wnyc zc_zNmiTvW5&wa~9>;3-U=b0|tbI)O{^C!N}fvgtd9ehcGtu?E^Z8j?sB^eRC$h7Ze+(JchMct0BLI$@C3#yJs`@$Ul09 zB{R>Pkl)X-cg8c<;{zm@1AQok&MX04sz3`DWX?y5y11rsG``!4ylf+MJgv*t zY!Uc6m@WDZ=tj2abMT^9R}bX6x*qail?GJz$5>5mZqIL8nC*Ih>EMd_s(l^_`-}x< zo~Kc#ViI_*Fwv?U06)xtZK|I;(G@PGC?g=}^>fMA^!KxY2j#)q^U|RcjPFDC^E%vv zzeA9F8~`opKBDiWTQYU}C_Ruqik#r`*F|~IDY?)sQ=wyWplhZ;7fpsPx*K&c!o0eu zyZ#XLfzcmAae~S>Ree-%QFRey@DSvbbP@N5Afwi#L7n(Rg97_QxCgCu_3?-PO8Tes zd-?f8ABA&&h`tkXi=15D2%9_!`sXG1LzCtVsIW@O)eW%CH)Fgf37vFUcFllJ8iDVr zTf>v9N5o-mC2ac0#N_IcS*-lNjHn9opS}Wl@Ed3F=2xXu=qeiT)DE5n9fI-f0oZN5 z|HSp);qk6FK-2r+XP$!2+h-Zn)Hnd^O5sP)w znSIdNqoH?v`V|HLXx&?>%{L(kdrUrUuD9t8m;Cz3<*dAM(}gtxLB^A z9jbl~gU*^1!MN{0zEd6il?eC=RlqF1XT^60V(o3Op$_R!pwnhW*xMH2TONh+_O_E~ zn+?5I5W(8!Ml;XMc-A(ZpT}?>!+8wnv9{sRfy@rY#^e;fD#otKk~> zE3glP;7>iS)|oLg55g_*1veCYl9rs z^dyj^Y`0 z&?PV6xeq2KC|2}^WPG+Jl z!P9qvckLw0*bj#fb}evc*Fok;7D*0?Z%GcZiGHLDXGME;8gRO8Y3NfabZ-H!s{*}A&S5XMlHY$0 z^HZw?o`B6*3ESETz9wJquX2=f1~$O+?_`ex#Tu^Rwo>>=hoS$;Zq!5Wh`-?9Rlfxr zxUdmE9=_@RRPi+xQ8IWHHt=c75boRk1h$_0b$F)c*YW51`*jB(qX)r*!k1KiyRBMG z2mNmV-)p{IC-)(`)4fP8MgOVqBmDJW$Nm2oF(bTN9pv0!|Mh7<&-Wcb#||B+i+hFU z=lR|1GWdBNeNe}|kN9~V{h~No@A-Kh&k3LSFX!ilLBGw0ej{IRIQ@ox6aL-=^%(hl zv(;nd_oYK$lg~F4`dRpUrD!8xkNmvlXwT>+Kd)Bx@klRM;h7iX=UH%%p{lPplz!{{ zJ@NsCua|}Abkoo4;3?8`q$A)b`0KgL?&oc#+LxxEcjfwdH1ElPypgY00zU0&Kacdo zLDknI{cup{?~#6>W90LZexPH*?<1d%e7^nYoACGQ&=$zgJAiX8wV%gjU-k7!_Lr&t zp78Z(-rJ}AJRZaA1wRiquW$Q#5^@U?Y5-dh^OF(#3quSr9C?KZWaD=Rep4MMZyn*} zN(pl<3pSBI5dN{KAB=nNIg8lrqn1GmJ@b0R4jBCDwnca1JtyLQC*ZxuBi4yL;V{(u zMcrds=Ysu}K^Lq;BARg8+;&CKWlgW~OUo6_FPDZcmzZA|{<^!4tAv$!3a}-y`+s#4 zHd;T@1Rc@{oin*Dd#yuaEzVY|vmIW!8hNPy#P$)k&#@hcjsotWawN{n#CdUlq}r5W zm6NcpGmUCtmLSHx+MYwXa`ucp$A+A+ZO!|6H%$8=FLBKWd1Gzw=ULSD}VvlJND<%_7AjuL2Xj0=a(tF z^Zq#FqrCr{@=@NO@cW4kPZy^=vwPWKcq-37hA zi*`6+rD>ksfhrz!p?nws?V%7sZ! zFCFeN`SXJ09kKQn=V+`eQ|p14^6%v*Q=fY2VA?0sKAHADbpHu;SzC^Lif_^dsjp<} zS5v>5{y*kdrvKlk=Tk$#s$o{ zfEgE{c$gVyxw7L{mrKtk0t-epixRU7QvVHG61JPL^~ZJ#wt?7g!!`)pH?UoU?G9{T z!S*d|Y1k&AR$d%%$a;M(;+i6kY4mqR9Mc##6md*r9Ea?Tk;f@!XpDOqV~AbF56Mse zHEIJfONoaKE%8tsdx2%{uM}*{vhY9RyR6@bdFl65=Slsl^D^+eWHrlKg;?}59LvW4 zvv6!YjuqnfGWFOCxONkcm!WMFj<3M+&GZ|`-MIF*IJN`-AHuQsaO^OC|5iP=9oJUT zbCdAg>3D7uo{QgAI8ON8?etvyKMTjk<5(enZ)$F&FPxoF#j<127{Gkzb$@ng95 z5Iq8&r9I`s(mf*M zPZq@4u#OM9-hteaBhmJO1GzZ|WZZ$=kfVRf2M*-q9FTDbaz75t{T#@xIUwT>|( z;}~ZjoRk5_V|*T6{{U)XT&({o{75~|oWgwZG`65T6QX_9-pM>apg-MR`UK^f5a}C` zXD+~>;eJW|BJP)Pe+2W8)nimY1Zx?MxWK9S#;b@`2|q;7TW)+WM8u%xp+5`Z_jFfZ zo7Z{qen!9SQqzU=Xp1`kMopRveLmgi`VjS7XZBUIlSex2~r z4>9)JXdkck<9r-ZbCBXFPO?u1xir#)pC96Q37ZoV{e$Q>tv^@k@u2h%^1cD-A4Ip4 z{$^Om(cOB_-nAOt`%VbO%%20++z)?_ z_P?Ut4}T7v`7eMs_raf|?H_3K!Jh+b{(pfv*YM}~e;@wW@aLeFpuG?N9PLK>alaOH z7vtfBKS#S53m^PBXewx};m^?~XsqGSK}SJbjz7o$f~FjQZql=y=WR@SmUF+!E4Deno>KJoQ`roG4dy-xfm?#Fk9KR5NOsbAH2F~^^o z`qk90VjVTD=j{XYDI;HU{JCyEs^QOdb5TE^N#M_QJ}JkaccMQZ4F!LW^^#ibU8Nge zmkzyZ#tpD;T{li5|IgP7=*CLm1O3LVPF=UI8#8U)dMD$?0azQ$ERm^ zh%CAvw&GQ4`%R8Yb=hLq6>0MmkC-CW=*jJ5bLbS)``B)#?2|a`I zOo;TQ>-~7<0{ofFhELbYgV8=}cvUsu5UPJ+UKiNTUMO8SkA{j*?_@mbK79_KuIsOn zK1O?Zr61?x*cCoqM-P5}Z{yQ-x>2KV5Z!mB9}i0ZAnzNH{y}tmpTnp3bp3PU%Qetz z(+u^`DR2KgaOpD)_0MUZiuT9RuHmVHbAJT);q}jPAD{Z?l($#|Ec!G<{d3A+pnVbA z1)iGMKc_s#^T43bK<#eS=Hd0v2`i1ZVroM@Zw)UEtjX`D8S0-C{sh}ew2OI+mhVT- z_>CEc`sb8K#&#C%BHpg$k&!q4!!%v}bIK!Q`-0j;{d3AAWBbz#wf;GN^ZMubU(<7- z4bfMlmrbP?+8@=?OPd#f#zc2N{5jf*?t)%g9s#r@`ugPQ&_=ZN!JmVUL{|-ej{k|K zCOy6L5tE)?I%xi)$)DYl_n`Q5Q~vXQ55391Ct>PS(>{^yH|Q~gK(eP!a|8MG7)HCiY`qjvn#`&lZ{#>1l`prT0_;Ym*s^QOr=+BF)e-52$ z#s$o{z*gu|{+&59&f@*fgc)bao8Cv`0+&nAUUGc7>hFrYozc#TIHoa<q;mg%Inubpf5NWke}%}S1rCAd0gaTw;g}!( z95m(i&-HmxwDJ1q)CRmh&p&{Uy#BespW}aC|6JhDO?rCkpPTgb);|Z|n*7;4c@Ky` zH|5`3$I#TLUVNl!pST}j+9wqsY1(`5_hSP1Pu!313V&|uS5v?0>%^M+)zq*0`saN> zzj9opjxRa>TsI%p@aMX@D94}ceI|iF*UdpW{=Ad<=S9^&hh8<~2B?3o8z&Jv;q}jT zV$t&b$UbSD`Q+y=qsK# z#Q7r5qv6wGGx@g`5rfq5>AJWW$EWMYqO15K`SB(`U58J<6!p(_d|(`Zj!)N(r*ZrQ z_CWB7zzZ4el6hY*F3Y6Dh0%e}??c6>Uj#qXbFNB!`ZTrvxxlBR_BpSAF7WB7ea`Ej z3w%0ipY!_XUE$Ns`+Dnsn{>EnI&>-H`W*g6&^w@v2g&(g5`6mQu79r6mqtI&Xs?X< zWnuq?-4}kO(f*tF_1Zm?4i`p;V7j5F`7WSt2!ap3V(OpkeESNL=tJ^1;(jZfF^)~e9`)V5n#__i%W8xsfF==DU!Ee;FofHnBAQni$19Dkd1es_rS3|X?S*QdwYHX zv)*D4!@Y5j0^HM%_by>2;}|Qsr%HD0J#+7$U8nLS*8=c`72C-ArO%FBVy##i8>Ngs zQvB>Z{C*1kC7LXZV)-$k$phe-G2j_D`cMSgJP4k72>fF^y11qsv<^S`M9rPxoiM!X zSW8&>LiA-d_=oszH2CMFoLG5Ej;lNj8r0$)_TwE+vv_6G0Ps+1mirA$yt4I9@KCPz z_v_yU5B(s^T@Z1Hdsh57_v``Ra^Iht>3--==6)nsazFN6i~BI%dmVV^M?a9=k6OZ& z4Jr?v#C=bJS57mVQm69JbQYmJW{FUq!$y6`0uR-KMzi9#Z7CbDZOd=LL+RnnJv2^o z5ASbrU;7o=JyIebidBe5XM&GvLC2ZkpJmv7gN@;Q3(hAVcpH3F3qGRvB|aJjnoR+% zR-un^pbgQi5zn=QpR&PEc~ZP`5d3tA_$mBb?qzYA?y~;OJsI}`?e;wR70JCyic^YJ zewqS)BDtIbo~HW{9}$mlqp=1r%>plp{*S~xN@?7|M>Dap@X{?+;3eu?HF)WI^zG>q zRvv?Xt1umhDnBhpR-Tk4@B*uJ$JjmA^n%o^2$nwrdkOm# zR`QoWc#3PD`Z9ZMMTGrX;^`spZdkwa@Dnu+!z9<^#}?Q8{A}^FADmcHW0eL}*I&n6 z=jHyD#CIu?to%{%z?e9eUn>u;+|Up2i?J9{oKmqd-m28$d3oixuA^i133kth7}hcj z^r(-td+Z4$p93lhyIltvs+VzG9?(?9AbX$@)5+Kt9KSDNkW$gw{(7A}u<{$YC$1Uz z)R$RnslQR^@2Jzo&rUhLv?f-HuC|0(bBAGbL)TJ2PeYedKbs*R{|P;}AsYR}b29Pl z?bsi~`y7|8l_@xveviifmG~IwzUazL@%EfGpUo=z-DibGuVTLz`{cbXxj)?dNbbk@ zzXkuF!2TWV2cZ2wK6xbf_4o}rHasH(w*I&rTbU_EH-R7WZ4s;-Jm>@uI@eeWQh#Ti znYtPKGVE>NcsR9e#GKT3@qY{cKaBkjI)2;3sXIo@O+B8-oSzSpoX1e_75CYbhWFXh z)}DV8||B+h?PcfTFC&E@wygzKt6E7C=6i|zt%+zB3;2ws^0o*D0IeK6Yv9x5kZ zV(MS}+m<~TXJ_NrCFhY_EzYBvvh(;|Va^kI;H`TjTxT4Su5)v(uJaE@xmu?)4|JWU z09zuv|5vwQ+X2~YlEW$+!FQ9}ve!B!))E(Febp)O=W$uCYyf|b#JQPt?w?=>)8$Wt zZFKLohn}ceC?zR2OOo;(mR$YPm*dx_VLV5dvV3ihtj=?6Yp@QU{IE5MmthT%h^vaV zSp0h*DgNuQ`1d~0uB{)!eP|69|K11ggHfF?{cXa8qbMi*}J; z)^JlceDA}i``!oPrqK2UwTbV25N;|3-}^{WzxRRv`S(8XzvkE5y!6%R1sZXBq5V-E zy|g%33cmLN`ueT6Li^)r7xdEd<~A>VweNj^=DeOdXzR1yDh1#B01f%~K8U9HpJ>Xz z_hHhrW4)D0Pm@0(pk0Z9fr*JcK^h>leR8 z?2yNARex8D-y%jxwY0=MLc}$VMQwdfka!~b>9oFT zlx6O(s(Nr#er1wmL7P--mf6t62`!H_b--EVPeDdm-AFsqHV|nlc>Q zgkvjkY%`8+!ZA0l*@5;$Xnzmwhtd99wS7B1XDyyH9nV>d=OlfYSA}D{={abhh4%4i zFGTxxwS7N52gf$y*a{rmjAI9JjPeOP(0&N*@1gxL+7GGi>-r%NW5KhAmhc!q<)|}s z-}q?$7IG+3@rkKv40(>ZziPpjoL!K5Gqx0Lw__WEZ5%cpvu|8fL-BeMyVv6Oi1|AQ zbj0n&IEuKoF`i`fZ;bI_EinzgpD%q>&4HSbhs!wbSdjS#Lq!w z-2orN0UyHwAH%V6MS7en>kjx44)_rc_z{jxYCB}z0bjuZU%>%iq2qkC!yjAJ)d&n;g3o0 z@Co|C{@;jA_yz;;`&OM#KzV%f354CLMa04#9LsPxEsS-QhDoA-hj&uZ;5(nl}XVALxAJ+++v&3BbY8oPp*E*Vne^ z6Q(DVbjEP+JRz=MhfWZ>K+Frc4p8R@KJ$Xu4nIoJ$0+AUxi<2zdA{#Fxx4h?{EBs0 zzjq+S84#twTsb&%KIu7=(daXBBJ4Pk-pM!R{nALrwW^;|TrPO@_bIW^LQpC7{b z>Ft*@*q8*eTlz5xq9?UJUZux_j!BUB4KgNvcH&~zcl8E7eoQy;@$Nr%ZeK{&pMxJR zF!4%B0H1A*iHSdBV&Y9qyqe=R=8()a2IiWL?yP|!O#D<86L0hr%=Iwl`l1uS6PfF^ z%=KC}6aVh%3ctyHcoYAwty9(N-ka+;^GsYOGjW+FE;H|sovg<;@$crkL#jhy&SA|t zYVB~k{^o78d30$lmBd_K14L(A5@nc4x!Nk9N z{brL6mzEB}^hHnea}a&etN3^0oPm4=F>fGT{3;zTzFEb^PYr^L_n8~$G0Y*(2{eqZ zc|NcEY1Dt+r4Q%Z%Z!8f^FalFiS?N}Uee>^pVRxCdR_|AHfyvp&kyK(b(cOtc_~Et z()AaMhxhYs1TT?qW5CIe=>jL;oE#V@ulHS6$9r*$8fMkRvHR2?y;A7HdAEBsr__7V=>&wh#VQtelBjPIHS zAG@U=n;?2pqy1HSJm}a2d0&3M{~86qIrGKf;YHnevF{Ch{AI5nkF{02F5EOj{dlY$ z=k?=f80yEPo!5^?yN3S+=Kc}fhu4qCeSGT2qvjB=A3x1dKOXJ8emvSmJ@g^Zf0%a% zY7+7K@iV-6-XEZCDcX4bcxn^%<56>$*N>lOs2`7Zjz>hhn0N8|e83nzhwY6ShWhbn zK@xC()PIuYoQa zy=*GI(Eg~7Ufho-x?{T;^!2G9PjtujINAlhcy5H~i|uyMQOk=HZLuw)HqeXb3W%;$ z?+G;I_2Y@A*oyH#uODyH(~Cbc=~>S6tR&ASe|AgWL#iJSnwt7xci!(!{aViP9j3j? zd&0C=9rcY(`ybSw;y!;@_<2*mmYe>o4kv5sS5v>L@vgoyzcSi=sAFrF6`pLAY zAFrE}^7TiZ%ttQ@KTmpfjdva^>bmgn%#po%-aB6v^XKnA|9;+&&~C;F%s9d8X57V$ zyQun?<55U2V7nA?tjncmFKPXFoxiK;&8T@AFqpp@%r() zIa*ioMDo+k`tg^je!On{jsA@>K5QH}k=qyiA>!r6xV3p-uisXVhQ9GX3kDt*+ z{dn`f<b$venBIq4N*8TGTJ&BpWqU*=&^s3PhG}<+zpDz3a;U5S; z)95Fd_x0LGlMWX~hhVy+r}-|3?(p*yg4L%D=0Eg_`tdqBH_Ek9j*aqbp6|^Y2!88| zKAc~>!p(Ow9&~*Gtg{~<2IDt<{oNj|AK!^x)_5pHJ8quexnCKSheD)p5FQHI54vLN z$Lr+CXkRsKY#`jdd0pUo{JlUI&dZ_V=R0{m-KWpjkJt6z$bUw=ait&U*V!$N=0Wu6m3}AYD z`tdqH!CVi6`tj!aB6Gc#4qIWa*XqEnnCstm=Y6SLPsn|E6aVg8Ki>4M%=Mf4Zyo7y zYvy{`zPY~GIEOOMolN|@ov#PGZ_S6^wHs@^Yq`a`Z*!fzFTU0E6MX#yWBl2iXZ>r= zvo4pOHSzCPH~!sNn@ZGyWR?;S8(QKiVI>}_33GwE=7>A>gi zS1AVGulCucs2{J(^+M(ySnp(F;Y~U?P-n~0SNNeznGcey?{5F;IRVza_~rI{5)02| z{z|GJuk#U%_%Wl;VB+7sw$h}-rKLkKeIYOg0#jg|-*=avgXjxCd{$55-;HyIUaudo zlXs(hYnWtx9bEH#ZM}GR>BIT?$cj9Ymv1`>#>(n=@Yw9$wUq7yF*T$DfiT ztDEGo%0^kLoZObZ)`4%Q?P_EBN96Fz)u>$ zr#4YDewtb{-e#y7k9J-&9_?bT#W4vpRQz#@p=LbVdChpVi}&Ps?rCbxc;L7R!@%*} zX!}?AR<+<0o^v$mnfCs7_b%xCZH5)Ni{Py442p`2g$-8#Tv`ts7ZQ?$CSNM2SADH?;wX3GDdUg9&SE~It z@}+Sus^Q~x^H9I}r>GCFn}729@H5o!m8GcPE5rZ%du90lqUytwUR~q$Tg{k&w~mzG zn3yqvypv{(#f%AHPI^-xn73XoJ$p&<@j8E3)5`|>ix{Rc&Y;zY*ToXF`tZ6qf-yeB z>x_32>mon>YOW7&%)Jpl-iVF=^@NV^H;>6=`I%Dj31?czH-eM13sP^!mV)heY(ub( z!{!sCM@-*2pd&^v#!Twb=QyXSHR4gv9Q1L(&U$h2_zih? z+%`?E5s%-HcgGzw)Ee>l?I-W%eRHVh6Eo@1qjcy}-g}7NLFC;p$KT_)_l z<$e0WCu+)AlClo?o5|2at1RvLl*4y~+fVHJA|o?pnf@DGGPX!H||exOm$a$LC1Cp7Qt&9j+w z=v_Jl(;Gd_&q4HtpP$g<`1ok(4fqFKZ>X44!s_MJcFYyr!@YBbG5tF9fzShdZlIHY zs;O$^Nu#VAW!pSo#W&f!vCNC559isgu<@Pvv~>N&)`<7R7OK2OKB%6LPRVnBbsAfz zd=#Q>*7(Rg-vKO#pD*5B`UK^p5a}C)k3#l^2nVV0k??H{oM4h~pAOPio_Gl^zc|E9U?xyIRt$CS~Y%0K05Iww^zW*k1h@1YXjqMV&zS&yov4h<_(Pb zrmG#>ZS(<5JU8m_UBucL!pb*~_MaDs7^g{Z6L<7Gb8XkZ%-R`K-g{Qwx$ke{=e0Gh z{M$V`jIF?bn7B?a-x=|9BKBd#D~mj%Nl$aF=cQPWXyi*HKbrV?U4404SG&f$PFbuI z<@MzWzxlj(t+KhM-b*hd?m^JYoY#8ofib@zYC0HmsOCCl)a%!+ySY;R*vqA7P5k`T zjh{DS<+F5HdH1(e{7Z;f`L$}!NW|^6n7xUe&ja>Xt4AX62x5*Se0ig8GU{@p?K9U9 zc(RX&8WkSI;*G8=e&BEaq%2KsN)|!{$1mrK>nnj ze?qjo8a>VPuNpl3x&XNM=C1|Ey}OeGu~TIR@KD6dus>Vy!9}z6uNMo z*GKU0WKXx~aPDIUsWm7JICqUV1KCj%_nybcw3l%2WS?japMKqeHXecOl72jb*cnY$ zuF~T{$0Nx5^7H-IA^6RAdjjv?YxUpTy*6hx>Qi)D|NYOwv6F2P6m z;Pd)WG4L0`&-9!C*jPW@n~IIi+jLKL-il|d^EUVUFmKapmP74*zJ5G?GaNPKwVIVF zs2`6S@*4KphWhcSA&)$qw|;yI>c^voJo0Ou*N->vOPD*y4JI8fnhsscyFS-{5%dls z?|#^Tp2WfvPW6pY>c_J_RzF^+SB-w4(XJVD&%#d-{(M>2P6m2&OlB zn(u<>4L?7jC-L!IZ(OSS@j6*I%C=FKjWTSW@4NP+EBbJr?FuK~$#~H9eYSplC$?GR zqY!Pmd4A`$^Ce%po8lk;$>`1wx8 zh3<1j)Q{J_gOO%NyK$u-=VRLyj$TI(e!f3$+t0`9mHP2IyQR@Rh#tMtj|Ux>AnzMw zT>R`rPvGfA-FUI@EquK@M0|a72>5zYhsJ09In|F}X6DU}`7|*vGvXVBoiX#~x_Xi( z*51V0tGG3z|LRkR&s+-{v0p?wS=Dq>-M&7b0P0~3F* zt(P_ZSJQv>tA}o`747qDMU8sZI6pP<_vV`YFU>XkW(@>>d)(RmrpF;Al$Q{9-xJh779W3??!jbkkN zsh_O;L(?^O&-^9UipFo+J(J_2ltw%+s@PhwE?axqGS7k(mVY+M?n$$;mW6HY`So)26zdZES~{J?>R}))kpT- z^WcOV;YoWoz{zIkD0TIvtb3mECbR<^3?WXH_^W-^WMR2x#DMEl4qP77#@rJ6J29DT}ObfgCvXclr^?uDYGggEV0!e{5Wm;DDm%$!j>g9Z!cKbI${38 zR*SVT^{dv|soA&9Ne#n(82&FDF*|j8TS`9eH112TPHj;c(P zqMAxP?fFxVEv~7RVwA9CyK@5iJjXgGwG92ADaR;ZO|d&S-8MUQIAd*ed?Jp`#yOkt zPVZ2A7;C#P68FubF&Nym4e#}4d;36=gF9HsJyCeCv*1~hogmS5%%qjr$S5 zuPh!|F)Rr(yHw@#L@%G$5|49UA6$9pI=hGXy@lwAeu3}T2)=K|bDxT2dnWZecWX3g zeK5u~xHOPt84dC?#@cKybdTDW#mnU%^y({r%{dnei3gbe&KMcHo z5WN4noWObCErlto;^LK;!26TI`)`5wAsdyuPPOM3tGpk9zQ3sPesuLqki`b@|A-9e z74;c|BuSYU7G2RW9%F@Pe<|TS$N7W23$N-+-~-C--mK z2>!Q#N9q1!n-|w?loI{qV3wSyOjPAyLJ&Ea06Bn-C?A_m;}ON>;J-)?L+*qAu61GdnD-Fm;gO4ML(Sh`(cIsXapZFkXSy0%(N}~HtgRt*ugy5!(7rZ{0Q%d2F^-fNaf)9WLl&!lLj9(IoFPuMqM)9PVotV^z`XaLWW4QYTqvld?i zduCPWS@YVn))q&ymM_3J&Q{1B`co`hxePu9`GoD?lO{zsGJJc9YyouD!=v&0N08OC zcrM9yeato0^^lJfXD zlT940%K6KX^Bdw~l;;d`K3bJ?l5e?Rl=8vSfj)L@D`bmgyiUGGmGl0ZoNtBxr*}T0 z%J%!PUo_qyE{&~d0)5D49f3?wDUPj(iH}isLY61T#VR?l{|m9VV7$JDzVDI-b9vqk zS=ou-Tky>Dkmn=v;OZlg=cAD4-H_)s#qkweAa76Mye*Kmr*!gs1oB)0d9H&zx5ySQ z&kG@kq#Nm(#6u3q^L`o^d|yc>zE|ZqJUoia@e#=JQyEbe;@%OGO|jaeDvm&oe~M?` z3VS^Xa%^3)WNi`tuYw%^m&K}l23zJiU;ONO@IKkHLdfxbuw}*U8g9$xL5{85md$G| zTR1PM9A|S|HllU;!V#^LR5@;kT#+qHMSs(A@AzW5;?GjN@~ik%toN3;+vQcN~y{rtPv9SEKg2VPT>Dw_Ut%nmi-gGmIm~Ayt#(GmeQa;vvD`=V8b0 zBKd|aI1*Wla1`z0MD8EreT08lPj(G5ME>DZs(;u#(C%q^`A!$vPV#kI@D8J37pLI= zt$04knj3OICN5678~Y-hLw+II&knyZ+Q%9~>w#WfF_67y*CqODVQ`C1hwMWavZK z8atOE=pD$=-(U-c3~g3r=q<=lI`n8IWN0#%p#`nO=Pw8%{-{<(XA5Q6 z!sz1gia*9jD~&8#sl+}_8q&0X*<@}9$tMwZuvLzz{22P5zMCZEW||zW^rKi-FhB4a zmNOZ4@RzW=&cZzD2^$SuxLx!aUDXeqe*-`T}#kGtdVf znN?a_v)9(gFwYNQQ~oK3l^=p^z@M+l$3}C!I+=NPUBmKk!~Bly9@!u(`GC;hd*MI0 zu>V3HSosBhr@+?j8qD%fF3fVZLY~gTPGlyrmIbgY@5^>i6l~o2lUaEFz{*ju(UZZ0 zlchmVJ7Awjz&=lbUwb2L?gBhRSsGn&AN=!0NzuwLLGMQ+Sc?<1+<<2^!G`nuVIIaH z{}TE`^Q`@<|9JpD)IoVb<>z?*A;?EM{7~_Jq8~H|$%G$Q4StHjc*No!&yfAW++|mM zoU$8p!`y^;B@gqiX|Tg%u|=X^3qkkYaS6)U2v%;x_dkoJ=%$UBCvK3~!(-rE!?*a$ zOkA@)F-Eb6*_|^JBxPtie0N+&x{HCA(^za%E#6gL+`nS>@g+6G=FM)MfGya(hWL2s zyy30mvC;DyaqU6bhWAUXd=l61`0?Ur-$QN5gNC`q{c?n|BUV=4RsE`W;8)Q(Mt*t+ zeig}BnU(yi=q8$1?}gr7xikr55Tz`F55CwkM4A63{27eHNz7}SkHHVN3~DUEJBu-i z!gz?WsFjl{Lmm&%EE@D*1^O#Xk}9TP+*g7AyW(S&2cXMxLHn<(^e5X*{=zXDhbUH_ zDJ3;kf&SnjPJbKjH5T+QfZk=G|HBE1N;>ojT}Sk{VcgO%CnEYwOZ!(0CHgOz)fzT$ zR_F9zFtk;kH?)$5$SJKAeE-%Rv7*m@jNH(0{fZrj*4>$_kbKWuQNuW2Ao> z=udt^Ht1i>k~I1guUg^HlJ8tc@}v5-BPB`mX*>McyCLV3An$iU?(c;BPlP`^0sicG z__G%7&klxckqk%UeorkO4EePxFJWKzpxw17!KzGxzv_T|ohQEt@92;vKE9a=@L}Qq zPKB>@FUII3WV;ozb?(6|7sjZx|q^Kr_ z`4jQQx1n=OF!vz7D93z*_=5Ck2VV?sN{5aKl`m$IT&jFQ&*|WcD2*>x1mKIZ?(xMQ z;)`}kZknmeCFcv!g~s5EkFs1Hd@)Glix|ZDQ!w5%UZk_1TAG3}vMRsCI3>X!wPLK( zFiz(%)^5mhn(UQj{g_!vo-tmvateI@;A2Mf{BQk`9Vh!?1)b;PS@XdQjeMM9q8{#FVD4k=JTbhq%AU?oIZ%UFjJ{J>j9RWI;@m52;_3jIgw-P-?+{ui$USzy= zL`S@J`E7-lFy8twkGH-~u~x)fpG2(ng5s^{f1r3P#ac7q+ zTQMG?##?>H1F@Dq6>kkh|L(?HagSi}RwMoYCE~3@Rzrxl>UpD2#aj)0LGiq9$6Ila zVDZ)ksevNV{gd?Rq=4JPBJGR~%E}My@ly*qiHo5P6h`5(X;Y#(Sn?BMd0z z_ua^MQr^;LNmhXED&HW*G?gId23*y{amfEkxYrB7=TW|M*^&Vjbj<Vfe(el&E@%H>XkW-;?qnwI&+=qB`q)9`RmB^`3F0>W-(J{!WG~oITaummV ziE;GJkq$u~EW|Oy*ipWEA;yk!U@^cmzKk4LZ1Iqa9OP#y2Udi!%fqkq4uE z=WZ!kIfh&d<){iUZi%?QATgTf!R#0>%7N7(pGo6IdF>1uFUyc72HMX`N>s9AnKHhU z@uCrK%UW)91n4#o@?B3D zBFOkjU?Dg*2(oEiGPvRm41(rs?Y(1!=r1- zwnrnE+H~W17wMXXlxGL;OvQVD4cjr`GDzm3v&xUfB`K71TLs>6^E~@to<~hnWuEe> zEy%H_BUd*TJVe(}ZjE?|c#eUGD9=tZUz%i7GLTOj)(H=B+YTPeM2?Sms1BIU{gC$q z;LG9Qp@VVAr5o~e6IC9X03M=a`uyBPm4_yPhj?xse3ZfS>w|q{o@m1JsE~WXLxe%o z<3?j3_X8pKV%%sPe~)pb@gqG@&-r7ZH@})RP^kw_q8+*Kv&gNEISQ;8?$>Vd;y(3b z*^yrzsODD#k0sOTbF%jhqsO~O0#7y{eDXB%xl=H9*JEo&UX$eBjj{7g6rv_Es3aj zGd4=GF0oXsiiHefeoA>kYbnMZx^)yVb(4Xs0FI>P2RNTFP=rZ}hL6w)J@8v#WL{00y7@kD(E4kIllAObcrvOz#+A=^U_!v~O+VthShA9Q`L52a>L$-%>Q8w&-8FZ0@aP-hcpZBvUn}Lb?O((kO1@QF?q>Dha z81OY=8V|v5JPf`*xUSe|{l>d0KfeQh{(Oz@82s9IRepX4{7m?&!|2BjHU#ew&M|v` zgWo|kr16r`*OS0#zrOt5b9bvapG+yaF&+BjdrPv`xm5iRjf7tM5ISlv_~$Hq(vk2>Nbh)Hlif}5+vH@9>k+&ld=k=YMn0&8 zf3hEX#k;P<%M0lNc)_92PqJ0Fs60TJ?S=4VY9XV$fx~Wxj2?q*H=o11 z;Jt_k7{;jXCu!5Oc4a8(;DI_|ME1jPBpx8&HJj`a8^n3wKtHAw0+&_T2@jAxngt$c zhnx}*$hiMO@ZIOIN1F^hp!a1q8+btP%a9$~jDD1X2MEtY>v)I<(08myxCc7y+t6jx zpwsf8+j8NfOofk<w+%jjw~dr#nCR*;(78u%gkJ@k+u~ts(cZ#&^cs~% z6DxPoJW174RZ7h)*G!AR_H*&#h(Iy&^WwbJQ#do1z!yI#WNmm&wmLrfps#R z$BR`vsFw@!#~Mht!q-0le)q;=$e$s;@veE<1l{&IbRyYArs}~*!6(hbh}Q;J)Z^Jl zuDc8V*UV;3GpxKGV;@Y20nRZ z*VnnP_A~H_zn?ak^U2r{{4|T|r)^Mm8RtXzYUHPd8F)kQpM@KEL+_t)eg$ukpT9v$ z)O6WG$UgZsq<0Q{***~StDcC>Y^7X6pN`E;HReWyFCQ>A!{3MIWJ1^U+1QLB#mvbo8TiIgwL;!>upwjq)pIampwLvG3>~vbQzoJ7(>`n((!#FHe(pC zWf-q+#b$85-k;fI7%#m)W5#9-@mZk*LW|9C{^^sk83PZIFFdq6u^C)H(GZ)_%lyPE zAvQz0fahXDip}t`ysWVq!-Hgp>^bzx=J{OzO@hHV+Xnjn#V>1SR zrPxd`e*G7T&1mCp1>am1u^Ano5RX_?KdsBy46f7rXB3;!^M>9(3pF-#e(oO;YnBjm zwjlN_yXGNZ!}D3FBSZDvqL+kKpl(9>Le!0T2D#JnSXQ2ZeAok6I~f;de|9(8X`St~ zBDRLz_x3pQm#5jnl(E3T&>G|Sm<2KS;uAaAPuIW4B>cAX-5^-9kav86Th+8TG2ngzw#!YeHQl`7(YYpeNwggZqlx##Z>MY>Z?E~$CLlA^4`_9(W) zEJfMD!pisd!+Rh%LOJ2psI#;W`5y&qUe6(4{s3|dYtWa6)&88&^`*+r+>jZ`pe1wk^-*Zrk#h z`n=QX^FE%$+@Iv*opXHO`O#&_Ta~bKdiNa|&-4?Kw`|3=<=7siu=VD5tGl5zK2r1IGVWamOW_Ykm8^RbsA zt+xkmy#7_cMnUnySK$M_ja4)eg^&AjoKrVQTt{jHhO2uu_K?m=Pkrnop{e; zDZFV4#`P%1cMJ9zQq{PsFotooxT<1H#vMZ+>HZrfb_<eF=#-D4Q})h|aR1}B zl5u}4NN|yE>5g8hMjk=vmDkX}b!vZqsO#$)V?UwqKAqv!Guy1;&UaEGoO>okI;$A^ zmK^PT&mQBfDv5Ac-B2>_AK(MhD@RXy^-3dna3l1}Yxiv1@|^mtAF0net3IbDpSkyb zKQJBELB9|!gq|Uqke;bM9(jxCKlPpZj6y8DpCog|=tnFtc5mR=&#^s`NoOI_GKVoG9c7h0Y;81KBE{3_d6QLprC)pmRtU z*wCN!aJxGj@|kygmaEFj+}o@r<374gb_tzB`iAt5QRl2ybq?jJh0Y<}ktOb>>Yirw zlb*#?y)y+gS*7Y3I+t{m&@ox6j)@6T#}Mu)kUoh+-$;*;PKiZsi~2?Sg!E5mI;CB< z>}h?_0v#hm*Mvdmgu_RP0LCX$^HUD|l)0Mv`|FsWLC5?QK0>_etB_vd{tCw7DDvmW zFcvoyvEvea7TcGH|MG3<&BM@}q#t)AS8_iXU*qxk(ge3<`&5A#3w+ueUJ z%5v|R0NY(4x!;>)aeq2nc5k=CXPE$9GYNVn2l`}QxclRr2=^f9lD|F>>HZr0ly#PP z_$cwp8`$<@BR}OV{1nt~*^`DmWHZLS9e%?R_$eOPv4!BRUo6OSQGY5P8SkRLEd9oK z*KZ=myFO;jQ&m!OM^#BFwq^HAUJ4W^jBMoNdPTy5+z>RzR>pt zL<%P|eX-Rlt*r@Iv<9pcv^W0+1SJ=)UNZ{i|5<0w8BT_TYp`Hvo@YP%%(?8#+H0@< z-D|J4cUIl%yTC2K0Jm)YYTatatni=cuP=>og}01%gF9T+%6lnSKY$FFOb~psnXsXk z5*92W{beC(4`fdi8;d-4Yo1}^5cpH-bS3;KbrSoTW9rjOiP$oy3-2BCe5>1dBd)&h zBQN6Xyg_kSPp2GG-aCj7Vp~ydVU-#%ipUKe`5{=PEx9p}eai%^>|t%S@`ET|$$)e%ks;;>+^#j8~C^+kT)Lf7GJ5Wglf0 z`B7`hk8ms5QL$Rqx2JMXWQf=Wq*As&NE$vRvdyQq2zEJW$(1Kfx#H1l=u1)TlBB1D zW8$%kUAL3LE+T70&TORoB6FIFMb^l*Eo%4QZd!#OZwB*bm zm4_DXwB*b?>YSr;rn55s|Da7KGGirjM&wfpvg4QJF$3As75Vc^@{;RrT;GhGxf?n2 zUF6KY*wlTGJzfO^zW>yti{4xF<3%fxH?JdaWNsA2HmW%{8bnzhp|4bc&1YYbADT0< zz_^?Lx8>)BzMYlN{HVbA;A>j=;Hdo2CwCPX&ra0BYtw44{oR+qH!>bGuGeKFZ~7r? zz&Njgao#A7%AgmKK^u@kFCl|AgK_=@#`)`@@@v-@`;5O%@goB=jDHM^Vw_AA zV4bv($Rv?JPHZFj$(+f~!=}pzU#naF?8Lg&?bTl}jo=xfgY@;{gDGYj^1aNreno6! z8QGKiNpOth5yLLDh15fE3%Gy0w2QQb?13hA66})6HJHS#lYF<6_ndW;eE)Cnkt@Zn zQ2P5guGa+=>Z?*;7xi@;a(@za7F&&q(~(2iFW7QOwp5T$5dUy6(w=v1*!6d`M zB>vVSqoO*VqbH zu*c*#zL^yM3-hdx;j_PiiMG>Dzn-ocb{fgOwQ#SQ`NrmWY}4R2{{CXy*k-eZZ8r2u z4!?PCh9PMSuK5FLOI-(l5XChE*+ZO8Jige(Hm&R6EbpW2=PPBFzN`tpvFdBTBmMVL z(zY=TxG;ukq#ok0CiDx%a>$-5?P!H* zPOOiyF^%9QktuD-lCc(+5qm%>gRQp_*)MWXFwMJ^H!533A}>ZDHwus+`N$FWU%hjK zshc@cAoD0U_dGmH;+rC16_v5A9EgM;hR@1`SOagV9_`W-(06Y6q7H5!B69mF9-bH^&PbPPVD^m3=!AcTe0hWGt24Tp@}=`H9$9qdnnxGiv*xiyA3gOW6W<`<*yqn* zvzNK~Zfq;}Ve8n84d;gK?&=otkFexUCUU0HczO5M_Pp3V; zpKt8XFEHvVFE_T7R}b4-{_wEZ%O4q5SKj5&I;HEO7t6aI+Mskh^hWuQhW)wx$HUf_ zcR%!drN^O{%6lAoP3d`Pb9v80e^M?w^q2C*!@9u(cAn1s+RCezylTm-R`Qy6W$4A9 zTotM%k6Ov&(Q864)qXy-`QrOq?JNFwW^`kg4vWez78$)ByDno)7=t19tU-X?@_=0Qj|wa@fXAMbKggbM-v$ zQC)Leat_&cjGP+-zo$9oabuBxP0(26n^V^_&C>OZcIcP=;FSrMe7pQw&5&!E_v9p@ zBT%(SaYgO0>3%Kp+Awrjp7Lu&-o@V+3`KpIv(Fy(D&v4)t)0|I${;#vNplQo4jLRy zKWxgt+G{rsLKjb(f+^)%(y?XWsp^ZQt~QnuS=N>elzatGy+XeeIr$M~jLN%F$gt~? zW!E9oMk3orAnytcWSxnn+S5nZp8!L}I# zJossbs|Rwd2Xd?ja;%4mpKMuX$}#YhIe&tlqv4~wuG7N*Wvz*c4&?r30t7XIYF^3b6~*LSu$*28{}&h{mAwQqdW6aI^}2DJg*^eF7?XMnAw{L=S}S<|w=wci(PRYhL^wN49vhw|GP%dUs1JAyx@K3Bk-B5yhq%Nmy8 zX?qPz*1eY4Ygp+1Wi3(aDfnCRe-AxT1#4O5tYyvZsT_WQ^{fZ7l*6JYddJLIvFbSnc0@wiH4 z$N|dg)DMNAkH`n<>kCX7A~>pIwaBDw@()rbb8X9m-VjW4?Aq2z>4(gEnfhn^U*gxZ zY#cQR992UZY#b%$wR}K%CExdmMXt29wsk7_shP9MJ}vxIg)9^O&}GPqHO$2VV4}-2 z0{SwrQV#cXGE{vzSgDljz8P*yKh#~n2l?{<;QjAf^5s4BLtR)uziQ2o7JU~zktttJ z5Nlo95;-}Z1A7cFDz<(HjZ@2<1^(eUJs?hpJuMWNR;?U66@}GdK z{(IPqKlwuFwU@sbdZYZO;Hm!^_LuVi9VYWJ8XD|$G#@*Uw$k2q=t{$xb}ebwI?}Es z?OM{VC2gU_E4zjp&%;YLUV4n{B_whzd;hVxN#xiJ%5l%FT6o5n^UQU1dwtAKTV&ZXQ7$Asp#Jd^dY zKaz&5kBJNf7mYtME4n@=We`24&Q`&085#mhJwCxxkJ5EWa=Sz@{w%VzmR zYi3oJZl{p3{v5RbG3$0R*hTciC??e3)cSY?SG=a?Pq5HW7aES|C6x)N!b4+?0?#_l}_0Iv~4H-{? zF?~$2<&u4-#sA_R_RSahEcPKQ@TnMrf5mRjia5d^CRMz*MCsJL44p;Ylmo-I;QKi- zLt`Hpzdos(7XCJNe@hvs9>;fQX{9T?qF4E~OS>n9m*Q77+~sMWimzauSHrglzXBzo z-_}zLPtKR?OkI5D#pX@Y68pFB;(M^HixPf{eQqA_?h60nYwqy!UY_tT@dbNAOAbF> zoD!C^prqV4b4JMZ>`NqNAJ5)IwTgFW7IoN4S}&2dhA=>=N&?e1FmhxuA~8S67EdQp~uEBrGJKLl)L?R_xBhjazDbwgZbzfe9% zG4l~W2QwdhNav8BnU7BmOZu;12VKYgiQL~wJeL?-(C~8T_G9W)!u}NEcco)zD0|L& z@jsYK8>Z8)UTIVO1{WRRYIP951MG=6m&v|1*jO)7QkyF%gL-?@Fv)wVk2X*} z%?ns(QSmwVscHHg{LZCb(q7^(E_scHmXdb4q72Q1hH0JLy7(E$w_Pv z|HEdQ+*i0Sex#Du3TTtx#ciJ3wagXW|7IFxl08>bv42hDdyVf+p46xl^(U0Dacle2~lD9#F` zeJ3lx|J~lZvI6-fS)!u~^qid4DeW%v`EKubvQoNFHlH2fot2srT02EGesh=0SV`Ki z5w>UFmG!$^w;^e-=>F}jmEPO3cIOsnZ6J?i{;%+UJ8Od$O?yN4+p=EbzdT>T^CbSu zv!8Kq1-?Pgd@b2{wm8MOjdUL%EY1FQmZV$SeNxtKoEf)*@8*)ehySJQ?K#&gz4x47%d1eA0IDKgT;U zE8j{pzk5km0srNB4$oEo%d=IlIKgZ zW3kjxp8UNeg8lk2hYW8Fi`bC(xn& zM+vRIQte=xxkDoNM{}-ITVu(KabtwC{SLp`4o{=S|K*CP+Uor4Q$HFV4DzzQ2NJ3cOg({~YeC+|!;f&JuoP911;o zxoWgOJ_XG2>9VBg_|%MFjy*m}+X)8BXKY##H$H_avy4x5jAwc9rBg1QdH<2~*`O=u z66S#mWW3u3Efv!~{P=sJw8Dp+Gu9#akaK!Y8z0UC=dnMdV6REtq8RVn_-4tNzD#j} zIXvN|_@XWEk{tdSzGBkvLZ2c&>=`GEYw_ohGE{&&>O6Kiqj*g)$?4+3atjaIct-NT zAFf^ew}kf2>qz{E?kGxg>H=?7odW(7-IL(Iatr?nZ3O$t8lR+TrpLXriT(C=0QP$l z%x3ooC-$@VM-}Who%xcvuij=!Zc8V7~ncBe^MKGU_@z~#L+H1>1G>7;2X9t@dSX8ZuTeTxJJjINw!yhTyofgW=9XJ2D=OJw*(c zV~z;{eLMbfBL8>LhJxYF27YUQd^vZq+t|~B(|V@XTLK zyE5H8r#-am`{p>6i4Wg)#^6`c{oLu)?V$&jlv3 z4RQJP4e0i^q1$6!8(bSOb$dBe_YdpOK9g_x%EJ9!>73tQ7Jdgj8pCeBD0X|o!fw5h z2ZG&l(%66bf;_&-)J1Q$Ta)Vy{otsw@Ow9EW?CP7tt|XeF?uIU-?akW-b(aoPmMyi zcVki*9YPo#f~nh+a$ko&O3Gc1POmFvzm&9#!D^DHiPaqCl6xYT>~j2`GQ30?;`189 z-VLI!3!twn{xHQ%%f@GtM)-rHb+Nx)h0IVbT-MASz?>^sdb!c~aZUq23__2UO50@9 zHjOb_tJPj~b3v8!yU}qi5giwOM&=O9z)pja7t7Rb!)%P!n?4l+^N3xzJX>B}7AwE( z2`l+Z+TzbD?+Ojjr@f|Gn9Tg1d)fH5F12-W=;P+%(@I}8_o9B1{3V^+;T_p?8cg?^ zU^!mczp21NAXyc$1~dCabJwb2b*{-VCv%f z=W8M1(?j=a#@pTVxc2KUli2^~jyz*$AU~7~-*@?nW_0_{JR@anYh9e0+A+Gg=kM)! zJoelqFS?zM$74^A_Zh$XW@|jQvan7B{ zE4sJ~MHhDh`60TvsQhrp=*})fPVW7@X6!f%y0b9*VRkG&b(=aK!Cf&KFY7QCKa1{6 z>Rv1R4_NpLS$KmPp4}&w(I;%|ReyZ!wdO?F z%Wq*X!A)XUC3(x*sPqL{8;$iz2kw%!Q5$!OE==B)^2@$RV6QcKU?={E<^I%D$6jkr z6??sM2H0z0PRC%cy$)U2>0qxfu+QQ!_F24!eS)I%g}l)FqQDsSE2U?DgwARMx**uY*1}sbkhbpB|qKR_ge5(3OQ9zYZ#WPS!!~K6kdT*L61b zV$IX+|Ezfm_PY98dGJ8K@rCK=J;7iv73LdT#}{<4K2Zt%>YJ+Z+$0y+xZ~G6R~9B4 zPmfPAq+X{g4@DOy-^n^BZ4k=snP>Db$~Uh3YC#9%4kfgzyK1Z~bWv8fS=OmLj<&lk zYm?(J+iPI9ADsovHkkdK1h<_X%+~Siog&+1y)!mGw1L?a)-~llS=SWIHehhxSy|Vt z8KfG=8*9#0?AG?W=JByxt98wjw|iTYuzUN=*}bh%z*)z$srVG^-qxI<-P;^K z(CwYFUqr(0&Gdsf3wCb{PYM5Zyxp7WySVi9?cNri3ieCby>*<=L&EOusLw;f?(KAa z8xnSJwtrc???S@vtz-QbhNDBQI?u3M+v~E&$8M)#_qK35`jdp+8+Nnjp55ERY})3y zb{idU_hw=$!DONj51snjtBL6nc5ey0xAuHb6LxQ>>aVMW5_WG1yEog{`lxSk!tU+# ze5#{9p>5-<cEkaFNe_$p!d2G%$YJCCzz_tr?eod|n%wB1`1d7g>g8({4~ zF?jJ*uvfzF?acT8NZ7qabz#zn6LxQBw%13(?k#E$f8055oUcN{?(OvV^Z0kQdmDp~ zjQE9)=G^J@SxJk>u*aDXUyQNr3*r$w5^Fy3Ch zg^M2av^=|zZ{+OR8~p*j8h^kcT;JsHq|dR=o-OdZ_1^e`>i8P$#$R+hYsadq_S!y# zQuIxd)-UVkE!ywztp9S^yhVYG&ib}gr!85y%xLrb@(#KK630wx7pr9 z?hex4ijCcX{}N%<*DKSOMH{#!FHJc z;sY?BdX9k}3rNQYO{%C{c$Rl@$nV!jvlm>U-={CYFLO9$u35HVQ57_q#9Ido6A2|~$!UGw+dulx3G5ckd2a@zklh@kk0Y5x=syvX80SOr(V?aU%{CmlO_Reb^XxYNI zg$edCw|=$4RqezEVfL4JBn}&txuWC373rsr4O&z!vNK6Le4$H=xRSNQR}<$D-=k=e zVvlxM(tet;Zoj{a{xoB1Aft<3mZm%<_lC%IM)bPW*G0cO8O(tn;a?ObBKy0o_b7+| z>{TM7LtN)m4!;plBC@|*T^HrBLYxZbDZ4;5 zoHNIzTyoZU?AZ@g<4^Mc2a2(Y*ok+_l9i{n@Q&b|8+@tyK*qh>8TVwYpTBJWA{m25^UR4sZlulfXtT?Rvx((h z8;?}cMnT5LP3%w6%sSd@(7>cQZShCiLh2WQ?lL}CTF>ivCg=RfIKGs7pHr1*UZcM9 zuJE3uA@2y?B#m0yQufUi%(Kt!uPGz1I~dmmSCxqG40RpIK73X1=?&C%0d)xa)AZ5c zDpT%J#}_Dbux`PkSF+i+!opU<592C&w@AL4svI`=;~}M=_RKX{9o#p!n;lZF_ zJ)VsQ*U7hI|9Rxu391q~2;HMNtibs2mVBdSLZ0#Q|FqCQwmdoqtOWq~HwmMqAYYmo);)o#*}@|18-cunL+o?Oej^h?=4QJzoa z-a2_szpR&g@TtrxoOo3FWjTG(iASY>4&Yuq9!=w3CEqWz`sf&|k4pbc1CPqwMEd0b z`1c0-rQKiepifG_3offP`>4znPSi(jyy)i}yN^EU=!4R?x=25yZ|-TQ@A~Aq)lZA8 zek$LIFN*Y2lcvze?Ca3At-d;_jlNoG_EnEAb&mGcqw}F9R$rBU1I947k@iX_zgNjm zWQ#mkebIK?pmFXpeRdCWVFLYfEWG$RHt`mCh!F7!XuzIwuO zb6M-Yniw||<7Q&q)DFKuAN-WZ&DUOis&!vXjGITsC&tahxOq0mO~K&PQkC(;1&b5Q zz9)uY8p1S!uchqaD$Y%CdAvn^3Dem_Zok`8bAbJCp0f5L^s=u`fPMe=tDW@Ts@lAr zy&h!GCpXx>FLRdL2-m4e`gOi!eWWi*zg~6gKlG)bhj!~_N^g~ON!HwJIRAjM?JDnm==Td9 z_j7N{+NCImSAbJWp-monh(1#AhwOXT9^D35bQ^TabW=>a4gZ(XO+AKgea{lzM4vDG zrV|Ul&8J;z;H5FdcM$J&D>eJrTWP;LwPsyaSty^jDy6ORuPM!XtD^Uz_46I~a=FJG za6fxXybA6PgBMF^E8)o?|EH-wlP62ibvxVc#AQ%BxC}~jd>PLDemA*jA${%%`d-Q# zyE?Yt(Z8Ji&g_e{^Ap!h`V92DQ=!`__PaBn+lhI;oqi{M%<6Y$Kct-|UNiC2?RTeL zhGYAk=siUK)H8Q>vG<_Dc{{SdS8g_ENf6s|LS$1aa(sYe{F{b+N@8pg`I4`so3f=G zc_ZW9Y`*pJji1noclIE^_A*XBQPuWX_QS1r75%JgNnvHGa&QM}>~W{@ernBd#xMnW zRe5mM7Qf=JFH(H^i(R$Q1JHdXa&W^nlNr0@Y#qig_+djQB~)q2zGbBISH>wBpT;l_ zOFh$xr7pn})JV-7 zbLjAYs!QLWqg0EnnVgeFTWzjX1)pT-6Tl}5dmP?)N&4a&eF5E-p@l|3%iAn0P|9__ zj5Y9^Crt^Vqo|%hd%Qw>9B^l{->eUu;jM8!SQhH{BL`jpM|fuUK6J^$j(fC?$E6&0 zrr(cuoO$#~Ru2Va|1;_&F!+9kGYu4@sbHC1VtVG z#tc_C{ZZBzRmMTtKmJ+LyqEA-(*3;KQ=KdO5T{;mrdfRPlSO$K*A(q=cdO|`T75{{ z?MOSC-i7)buI^@uLx6>|J5{RhqQ!RP#Rb;N8%9yGIMZ<;x3yz&SHdaPH;Lvhu>u zFh^f=ofiH@QS|!huWI36eJd}lv}~WALnuxj7S@PM%o0|CiFcUS!^6tQ zyiX`jR>B%_iFvJt%ZZnn*RF6a@eaan)?;ap&Be*?@C@eOOK2NeS9p(dy{}~IA8;L)vf+nR^4j5tl3HAO?jlO*-7M0 zEM?73B5z_TYj%=598_J=Gf+7Lp@hBvw`12X;!g7M09%E!IJ%#kVs#?90v3MzG z00@1X;nM-M(HPpMk~;b2YzEq>iE?V}y`S&T3y%X|?CPq8-?{iGP0N$M6ic@}X?U!h z<>Y6Vvz+`!lb@`UR;qs8Tqgw!jNvRM(PNCk&Pnny&)9?y@>K8DS`u{ew@;5C>D4L9 z)nbQMLO5E_iq%WyED!25hCVllIxQ$?ZI$|9`&O;NH~H|5{k}$-HE1bx)+lp6bsz0G z+n|y@u}q;nw3Gdfv`vNN4Xx##qLyDP{pS5~dB}`)(Q`cKq-;KPXLr_Gdmu}Ca{nUw zOD|-}#fIRX^de>aU7E7_G5St_*7K#G?D%nt@xcrw^41*H*p;qC(uXMHJ5&B#-f0-B zM1I8ixbr+7Jr%q@UP-Bc3oJeVpLvh`n0{5`RrS$eLTvD=8=v$Tw+B?cFaLi+y}g__ z&~T>`nMZ$Xm_|&yHcVC`Yu}x<tRiCPAk*1YOq^VSkd`(T& z_YWLnj3$1c&^TF(+)mu*f9^9rx1!A0*T1XLI4yebi+{E6bu$|8jNbd)>-QO#@!tOa z7aDVhbFSp2n$h@U@>;j#xyxPJ+5^MYwL$o%aj_El`dG~f^w-#P)7M;%ZN}TrW*BcR z$uvIsd%)QD=T6Yn*IbOe{b8zRd@-9dBgSh+)is()>vGOvdC997f6rEpH#m!F(=~2m z3GZv1MbTy(d>2{^jb{=IjpN&-OpD~gHw|-$J;d>C^h4T0p6kT&Jig5yB3*g@2(dhm zYr`MFGm_`a^3F|8(S78geWZIrj8lQmz(V$dnRgb5sAZhVdf-j>zgbiMq3?KwZPb{u0!Mu}40 zkgi2~F?JjpdZ8iqGOzb?8S6c6qY?Uwycb#D33>!GQuRv48qOQsEPGiT5qk%4TQl}b zcOwIvsM~9%49uwc3Np}pZ||GE$iPp~2i>fC^_P%?j}yNPJw3*KhWq|91oMncAtjm@$2VyHm3UUfD|-26CqGu#m{6&ds<G`z<-efQIGFPYO zY;-+;VVwBuH{HgS`HFE*mSXe^Dn@}vG0gMSq3eses`+g;dm-EJ%6syTtcSebOQFuu zcWTMoe&=q|8W@)smj%R51FqH*0bUFU0|3BUtY@)KlYc5H`8hJ5N-Y{ZQff=ZQjoLV{*0> zc5vfUx@yt?a8&V6~mZbbCGKb!BV#_Pnj znrf(&TWlNU`ZZ$cbM-)IKb-W%4p4#iwMzN5>9nbfde_tL(RtE;g6X~qw)+Mc?^dwh z*NsDWe$6;CC&ip2^;UW}SK;e0fq1OM;J#(>XcBXv7q}<s0?s*>-^ErEGhlSN!+#+%DU8cmFIYqu5R%zYnI9 zztE+gdS@%zZ>5Z)YaYY@8z}bx&h5%mwckqlva<<=*;#^-yvP`#hchinL(mV{k}zc4n8PdI(O0BY^6F|nH>?jiaz|?%pW#DtKG~m z-|{iPvA&(;@l3z1pJ)2FM|h@Bo{ShX1V_UPixak4Ap@-R?6Xc(=W@I_kd3 z&&VC&=fUt#d+FL`-_ATm_;9&q@@Zmz%3cvSO(wleC*6@Axc4V~Vm&m9b1y^r%jNe8KW-8kWy{apsBEpUWQp*g&1-wDwl{f}_k<5lbS*qlLOTiXR3Q_c z-^epNA7}mcN}GwykXYVv)KA7evwm^)Druu$VXIz>Ri|Zb)G1`uN#2pNwH2HEZ`Hw1 zY`-V>?KGubs_55VWU@xQgl9Rl&o*Ld9~GH4oOh)BCA81-Ra#-+K>4^4gypN2u$)y9 zSxU&^eO2lra)N$dLc2W9IMBzd^nQGV^1M8ky3)4>2+z-@eMHu&jG>agj9(??FZyHO zlt17<)&G)*j9;oXe#L(uUk0gr2w5#{l}G+kchzjO=(sMJA=^_otQen2n`Y*?4IY)T zxtMy{_w9NK-$~tU-jjL_P_(ry6&#dibgfGQT%v5Iy=R>OP+!>6J+P6LP$LvP}L|yy%ls zb-^<^@IsL9M5al8lCR`1-xY7oHv4}zaqxrt9v1xA1phU;b8803e2ja7uYH`)IgK^v zL4=B=x?vLu+QytBZzue7`DPkEs@q?M|8>*ajUTqTWmNz?O2 zYR|g}c3IKCIk3*;|22693?MXBp`FmQ4w~BSEZDvec(0Uj+r1@4BM7DUmSp4;=DvB` z^nC17^0!VxrcE?uTCp|mi;PkJa@T#rSHg>Z;Hy#zwSBVkC3s7r-9!^N3eBYN9K&Oj zqYpBml>QU{T}RzFdC)PIwwJEFXP0xkJ3+V8p__7?Urw)_=}O>pS;>s4mzd)!eneel z+))l{5)1u1VWWuLE))3%F6{?Dm1kd)C1dQ|TEXA_vYL=>eY*F9zxrgk3AMR>vnohW z^!T&8=VWPt*%K1qof~v1rA>Dn#8#BCbueS=>9&iclY_kJZejGDtZnwrcE9cn{gBtE z!z(hrN&2eDv)Y`jY;U$XKNPvS7hSXB@l6-rEU{!KV{uG&*20&Hr~C9A-pL{EbFWtD zd9%;-QsRCa`xf=PcS&K=n|-HWvhnev+qgFS6#0nE^fMpN@6~5|{xvySd3@ieT=P#{ zsQC-BU7s8AX7=YMZOqB&SH8qQY2gyTe5X1vh%HB*-`nzhuPIa8N^3T0&GomP&zU`w z@|9RJb}TeGULNsv5jux09JkcMaZdbJY|_{_UGa1w^qO7*j>~y7n{g?dYr%1|O$?U@ zeFVde6%1#N-Pu_s7JjR6&`vPU+sv12{3h5}-WPn6L)$M?l;3imyFSsnm(RV&t$U@` zy;ANix9&}{?#<=i6V^QwuUU9buzsvO<#YY=EWSL&+!t(7=P0MpMR-KU>t&R2o6wlp z#(5DY$mpXTE%eIfjKumCpwQ94yG!mk1v2HQkB&DDPHUa zRAsFfj3zj08D$tFSdTm_pr?}BI?n>yL-JIK?L4zRo+3M6$uqyIEG)cQDm+_tH2*x( zHuEQ4XZ{MhCdG?RO;vttmqXf2FsG8FJl(q&KGyKKU3bC4Qf{3(6cW;uwECboRnHNf z6+FqAv!s)*2eH$}*3Q&fbtA2%u4q~fnED@%z>Uc(;tVPVt*bM+e6w^cxD1^TJh15@&6xs$M|yOkcX6GM@#$VSjD%_ za=0jil%;PQWtsF?S$Hn;eg)xY|5Fy;fUMt5n7j4Tq7_^F7d`P<>+)4VBPrhwd|jWR zd~dn6$dUD~%?Wdk=_w*u)x&-H)5m@k!k(=o`z*H~3ZBDLO87VQlI(y%Cw$}f2!}JdpG;M zFQC0!c{tz9S7cY8?`if#Ua3eDfw=jvThvBYmb7kPxP z$ZW$);bR#K?fTkv^f5P=`pF!&bvm>Cr~2P4KeD{-a?L)bT+85fnNv6P*J#r`+BBaw zEuc+D(5544)9Yx{>uJ+bwCN4BsmTMTzW#LiVJv)Lju#d`2u*|^w!sfM@WT#tS#J?` z5_S=ybV`cxK`nH;B)F~c2Eww9L6Z;GZ5&{>`_7F6iw1MwvuRM#P4axxkm&PG{}Fw@ z>53xHt5+7;bQ1kn&epz!enrkUuXVI}9y})RSI}SkXzFyn$;d%BCN!4uGTKJl{COUC zZ~MKsz(;o5yS!@ex5{Vx^XSig;Q`UdNF9&XE86~h84IFmZ7|b1nQO_fgnrt(EYa_e zR~D6V?Nr;`*1a5A@7~CF&{kLDJL^Rs>qrSY8 zrs&?o!>c><&Z@hW*91R%D_E-?sYSM5s71t1?=5Trce*b$ZU4kRPwsz2TJMqO5^PI8 zyoEI^(%bRp0%O$)(tFF@i?q77O6!9^Vy}T+&KPWFda{(&#CqW^thJ3&doK2>UFsVz zVx2wJtBbwSsJF2l!p^FpGd3gG<>jWb<}Kq#YM1&T`jJsNsf!z~!1o;MmN*X-i|rMxHg{`fL2a%%x= z(5$&Py7LV$I&djd!xhj=uF3cP{u;I!N^?_ZW&FH+CuAh8sDm{CD`{sgB!}aE^eakH>YBI<0$hd?-j7#&Dx^7XIyzZ zQ{JvMZ?gt1W!`fcdx}u*R%t^AW6sxGZgZ#VzachhaGbOxo!#Uu>FlBYACu-b+G-#+ zI?l0FNB<s|m9aRSnxQ}VaoLvr(Cr{LN88=qDR=jpblPY( zm~u4!0%EfiWLk z>|*`lJ<@&$eD$%rQ_Y8JivEveZEaUqivF9gYQbJ^kADA5g}Af6_!fzi_51Hsf=TY4 z`tQao!BX}|xz+8{H=uv}eVVp*ceYYJgEGmwR+*C4JO_F2=G|nz>r5IR((6XryU9oB zD0SJ*I{Gg9-aE`iG(M?z$CN*@rgWnI3IZ!DokO79m&0B3}xTGh>i9qggY)!D#F|Duf&| z$Dt!`5ArA#d6dTUbl&svzMt=qMKd$8MUKj&kB~?2A&+*^7d`}Eyme7jCVc{3K87v^ z`HEc1#kRtfODmI&ci<_JOWDYf7I?(V9zi0LxIkM$C5Y5yO*(D=?;%J(vG>v z8j-0H_&qnho1Vq}TtY){1-0Q-%=(Yzk7mCoNo&Ox=6CKMEv@}AI-{-eK*oZ%;TKaT zG9HRd97TI|g|3?p92q~atlMJ4>TkgcEfPeIFL3qJzkzHNIX;>(=T;fV$Tx}WT>4&w z@gV}wWBZCc?;0o1dm>M|#q!IJ%JQTAfqFDj_SjfP8WF~yw$k}zT0V9y-D2gJvVYwn zE3`Ibh1u6g|5$l3j(%T6{~wDSC`2BNL0*hTUfh5!z9%X#)~m=16~6@+@3?u_!#7FD z3VavlsMYU*T|`#AkE{?mA-JUl86iFjzlMgll8*35G38o|JR8Nl$iyTjjRcc;=(jy$ z{r^L*d(&^znZK+;#&l0nsz3VhzAZhVz4`4;TBO`1^XS~Hc>ji-(4`-3VYjV}DMBC8 z*!-yLLVX6~kfgnlF(4{0k3AEhu`hlI_+!XiSFwJlI*%(?$|J>uPRgZOf?u1dV-qwP z%^Zn2;z9pX1%Jh)nkqF}|2g50gtrL?pdUDNc=HI8MGFh3>pJ_EJyg zqpL`+TRr8HxzdsSbM|d<) zHSQUL?*vaI`1gF`0kIFfqY1xtYn|$RDtn@==U-owF;~V{8B1k6mho4{S{YYmyp^$8 z#$p+FWt^39TE=A=gJsNp3G8k2YMZf&Zmw$rS0r%7r;FFll&{4%<^a0kgMbznFNN z`+{Eus|qd^Tq`(Mu&Ur&!Lx#21*=Mb6&x#=RmPDVnTxo}BTAq0h|C)XxcXfKVNw+iU5^72JRo-1E>GHpnfYqjX?=9XHf84p;TS3@Gy8C(e9Z8q}y9w`* zuH2LL)E$eCjF-8~aKH9$_!|7G)miWEMRyvVyW~aZF6cmH-XQdYro&W%%wc4{BJ&uT zv&cL~<}5OovF9x^7x3_I62Zp#_B_O%ci8ic`1A#9jy*y9+GCR z|6%^G9m3QTQoM>BUhiQwi@|0@4?$aDVhkUatUKU`!BGTWwqE4tHv6!dU`3-HG? zG~cv5^tzqOGp{6^FZWHlvd%1Y9SmLDE4REi$fD^y>=ss4Ykn_uoL8Z6t@tZhLkJT# z@@x;!DtWe%XM1?|D);vj<_27*jZR_l5p2e>=YYqH}AJj@1E7#nNRp6Rc zV3}25m{sEv>HK@r$LuG2qx~c-<4Ondlcc*E{426bbm8Set%DuqvQ-^RB# zrhS6@mDmpm_Vn#Oiaq_8S=bXBbCFw}=pR=SWKQ6eUt#2$J!ZF8AG<$vf%l~!B;Je4 zk9fM6{DB>T$kDKjPwntWTi>k)-^AMAINJW@hEgbNBTsP8Zu=*=C-Wd1+sR%ig6#%_ z?T**hWATx(+q$hdR^A%TC-RYUW+Df{5vww3_sq}wbF=-hRgm@zIoq!tdYJE9?KY3G zU&g|qjE9EelYYFKdol)w39Gp$IMBv*e#R%kbyc)Ud$=M-@9)zdtQAXpNL!pQ_f7gZ za7BCNmiJ<~f<9wnitkx82^JfS&A8CaDaS(C9L3VN@R;DRbLpN*5A1`5 zzx+=Aa+FuzDYP(_@KAd=Yp&15eqP`w=n?*cg`2Qtk6$|-jP5~b<-|=+x`i1ZY*{b` z+vjOuv{S`LiSJsE2R3f}wCQ5<7q(?4ju(7#n(b-!M@z1?hed46F$g?l$~5#{p-ZEf zLu_?zo^$fr3CEvy%4?68?bYYA^4x4c=lF91dYJFq?G%R@THyocPNwe18fyd#I@_g+ zIjqQl7=7c$yYuC~Ne}0EW799LJ|=(L^Vqm?+c`$dJ|i+lZ$h`y=6GYL?--wBv^odw zne=duH*J(x-m%A8!)R3t}dU&;mz4&0M^hL29ucxnG zBxS%>Ozb9m{eH=!`OP!8G^Q(&q1gKU=%ccS|A76bSM64hO}6nKw$rkw>rCv#WbayO zx9!;Wz0fDRN6M74?u)f_ufBskfAY7hAC+g4_7d#Cf?$z`6wcsG!5ol zZptR*5*=|9WtBa!q-Z_c>AGr1EP=Grz%(d~+Fj`N-?m^zQ7Nm0A-y zeB|kT+Dd%2@<%Gq+wa^V*!$`pS?{5Z{IpR9wtSh`@&&Nv>x3=e z1;+owPHnh`uO0UDPj*+vUv+52mMQGznU1aKCTuw?-Q{1t{Rh6qV&i%{dwMqFqwqP^ ztsB_oHDa&(P?9{)T-=Dg@I%SvUzWXhQ)#D0?0L=m*xlZapI0Mxl9?&kzsh&m-I~vO zD-i=**hXxOt9X{mp1qA1$}|7s#?IK(V*l3&uiOqzg%29Ok!I|0dkty#4{y;T&j@|N zd*+@@%mqJ4Pc>R-?~mB`wV6GQzn`N88~%vjquNcMA=mZM>)Yh|rRa63T(6H_e^0Jo zh+fZ>>)%AL@006iqt}zaqXi|syT1~R@9Gha?@Wru(^I2yNR7sKjEu%p2b0F7b-DAV zD8Wa%sKGgtT*1Znx`RKw(Gy%aEGhWlmC3=%+!V^It(~9krL5Z8d83rI4KEQF60aw| zk@yAT@x;F&{u=SK#0S!YkNw0KteEEymcN)0)PJC0SL>=4`$w^RT+J9OJ`XJ}`28|1 z5|J1i=8q-D-t-fR@u4^*F?O#1lo%V=W{EFkZ$XI%;ujz>J{Sjy8<*!9Zxg?@{03tu z@m}^G6}#$v#0Q}L4)-YIgE3kpA3w&W@1|VcnKrvo>85A!Y!mcbPPmu1>&FA%;# z_yOU63H5{-Jin874-#KOENv=jX$MlSZg6R9Qwgu&L$I0et9~qV$tnQK$oXb9h zvX^Fw(tT(nd0$8QMP}U41>Z(&uAdZt%|m%x@L{MPHAo8m!6aNdO3Ux!h>!h<X~I5nUnqfOg#Nq03tCn(e*mvR~eDZ7+g z+PZR(8l0;s)fMb(*@*4?vUFu=8D$+qIyaD>DLo<{7iWBs&d?y=z6{;jhj{J0Aie{%XN8OIS;Kv$_m8K1N6>ab&W79k(T_1=K#%R@FAB5Hq!khc?6Ut613-|G_TV2au-|PId zSMR3|CFHq8^*2}GFS-laA>*o!4MHwsYAWL?Wviab7&?tHSN3+6@_`o)hN#OPWQ@=# zoq4RBXCv$4cteICB<~m5|7Zhi-Q^x7Jlm&+=XP<0AL!*~8ObGQcd`F@n*IXm+(BQF zy~c|Vr(A7+ANz*;Z(H)LfxKj&|54OM@@$6ojQ3k^r(9?e*-zsZNw zD3kD^;2GpAGC;bzsCm(2cqeO<;) zob4xjtBU{J5SI_%AD{jf^n`B?jztEI!cR>0bgze}lEKR2SN1k|=X#~H{{DC^_~AG6 zg1bq#Ws(;B_>SoHp?kI9eoypzr=|tB_sR<{z!!}TEP~(ktHH0_<_dl*&mH{g?VjK` zH7R&YC^-m!JoIC?xo@che zTi28KJA~g8-j5u4`aQ72=n>j;`w6cRmJt>VL*9F$@|eBcH;ccQ@X>m{7rxwwoS$yW zX>Uz`XweKNPvuN=srNwo$xinFO@pq|Zg1l+I8Ed-<4DG}dBFf}-ziNCs`+`r$`Qya z#+3&>uAmoQd`OFqS9aS++wmWITLc?Hj2*HOJPGHrk<)6$wPUEY z1ahvxI8qQjb3^1@4f`?AqhHH@X6!3nFXtdUNc%_S+S<2%1jFq_u%FJsSH-&a%;Av?~ zk%yeA@QBEUDr8ayedJE&5&LP|yXd2W_rsKlwmXQtGtchOk&|+M~ygbOzx_hd?$MVv!AAUc7*JkDd%D6U=g1!2dJ}o9tJWm@AkdJM#HPc_eb}bcon(0 zjXDp4-^Qd`=URB((Q_@36?35#nC9w2#=a0?&{Y+yDv+NxW=k7fk*gu|h6}CX=Yiy% zM!v$=oINqDoc)LAI^?`B|5!(@b3(`y6X2Zcbukn)yFHH>*xJ5>NAyIkg#pGZ5b0jQ|G5In&_rBIeez< zwJVA3b%2CVUBXxKeBc3@>m>ZA68@{7mS6Ch`k5sBrxJeIVn=?i`MaMfUw4H6)Vwro zdYHR9@ksnNzCO|Y3M@Z}j`p7t`$6%UQXa;yiD3In<#Sz1aQaHk<^KwT?I*Q?>)iyW zkCgHV=@4xHsC=$V2~OXrx%^*2u>GPoaJ`#g`$UQE#Oc??eEeWt8$MDY%SY;-rkQQb zU!s0f&FGL~ehm1dd|G}}Rp=p{__96!9owJfw0*Ro&A)?x)I67E9~`C}~?ix)o==sNy(Tr z*4Y+q)K}j1q35#NB<3SUo5cL17zbj$Q59U71I-%YpS-qi?)$zLt^=S+p%_*b>|%Z#Us$v^m4nfxQ;+G+Y%+5Ay%M18Al-mv|uW;ifnI6I0F z+w-jwtRd?xGUmmPb(^rEX|w&~?2-T7{Hvt>LeBPUhaP5ovp#8$pFtTzE#E2`JHrIq zuc{S3WG*TA&>xRW?6x9~Z!5ti4t_eX{jF?V(q4Uz!6lZjmC)4ov$C;W*x_pxm8_b@#wl9qBx87=< z7FxDirv)SX8F!p=!@iz!)PDbz{MQrTom>7{X8%MFaJubc_Fqdb+GCj$>o{@Epgl)% zO;v6b*Tnm0#qdlUyeR$uMEqxu)9uygv+~>=kFXt&jbH8bN4vh}`*H0gn4=PV87Icr zbWs#z*n5oF{#i1AZVy}RWxTT6#g1e2j~h?Vm-{BY@ZXA!Up9T?>SOZ1V_t3J0)< zmt!!IlOFaMd5-#d9YYUCd0WvVZ9vw%GxYHiTd%f$9c}+LcG=0>e~q>5zwTx}YukHa zkeb*dn|;O-dt{%#dymVS$ym$Y%N|$H2iu-RS2v?-KT1(fzSS4}PxMcZr?Mqxs6_$1J~-$1J~-w(P#t#Yc5fe)g=AHMS7? zQPGFq&6>-+eY#;=YwzrmM8^>MQ1XYwYt zM>cOx>p)wusyJIPk)a9uueR;K%yzqgcC*%5MK9{q^;KCmVWDBLLZScd(JWY zFSCz1@vCJcV%vpnbl8Peoxm7h~g|)y+qBbz^g*y1IC~F(=*;8=Lm{ z$i_FH_2=ez)j{@QX8SlXh}}L83{usO-Ix=5gdKKbG5W`ix97`!lfGwYA7;|ki4$zP zI{XVa1%%(r-VHm)HYY}K+C_Z^?ZiwPImdjPMz&4|A4+>a^kc?+dknJ2{Di&O8@8?3 znc9b8UtTYGRnGkweK`M-3Fu|^E@|6FEPj8m!M1&vJuktoCSf0TeFz7S(51dolfs_5hPJ32Z%9d;3!*_WyclTq=8q z+y2h>`P?n+q0xi2mP5oB6aOps|BCTroc))*C)i$VPq4OZzw{2Y{gVApY`wi5C+xqJ z>h|{kinW`={>$DEY^-JbW$UNo_XBh4LlgUd$r-})GrI>CWTN{p_d7Tv+b_|jvgRee zqqcpL>8UjI3YE2I0#CEhy+?7w9F%f^TnPsA|d3HAhY+M3H6qAf#X?HafKny~-Ue6o)q z{UCD)!KTM4+kZ)$S@_Vl|B7Km%l2zscTQ;cCH!V%G~4bAerjd+<=7uA9+T~bC+)eib8j%`ntC|HAwTT3 z^;5)OG5e0hcjJ&R|33CzVwY&+8q0>m-0Re_C)nWJc5J&GemYgw7}g#i+4$wN{@j!s z9ckYs?PFsMt9@*Y5o*u2%h4{I9JXEN{LGdQarh$EF6a1tlfE5c-z9Xlae$Mq>>VGr zY`cV~PiVvRS+(sF`q(-fi$3O$2msVc)gsB<;IGmVH-jUw8Pw)xM(Gd!;ANe@xhWWt{)^ zUWxM`7mQO}3EM^2`OeD;`>%xkSHk{l+_ei6`-7cpHdP&M>uT!=&c@!I3Hz^ecYm;W zJFgv%vmZloc3!c4=v&QQqIO^D=X8R@j!cg2=s!ek_x8j(y1QIr1$F=!#+ICesY`enqtaBg7So=qK zlJ)p?#K9+N+ZEFdT5FYJ`x4uWEwR6s*dMf7AGG%!O6*(K+8)Q=A0wVV=K9S!w!fFG zcMjTpbT5ldmq*t-_nyYtj$_-g>$1o2n2kf+I=jtRHoX$>wdNh3$~eej@lKHXW~pG$Dd z9*3P)D7S4pt(eZN4W1Jl%~s=+Z66fJe>Og8kDi~E=VtqLko}g8A;Rt0X~`V8J?!8| z58-SV`#MJdIQ($F+&^ZFviU8pKJjCe^ZZ78oZoBNX^n-4Z9AYu0+BvE#eJ)DJuhC=v6u8Yg1>XFK+6uMMkHl>P@7ZI;->IN7K0O z%su4%_TE~=&SMjD-nh7YZEljX*{doGdgCvd+av$`O`Vm<0C2}g=~|?>rW_n|_{jJM zcj_4LEbS3_&s!I)Pa~fv=X|~8`>APqb6+j8IL)g!q-YTz<=8t=i|iSGozcYoDYMcR zH@nknZcX>r&rJ8}AGtGY8oo%GDN7Z0@e|bx>nC`WgAHF)BI%5CtG=T}vdL!|b!og$ zSzM5wsyE)JnspjMos^4L6}|1gsOBAa=Nd&F{>CWiB{I!lp+$^~wY3BJcE2mVehPJZ z+1E1ErD&@;I|E;Un^I=#lTCU<&$(-lhO zV2Zz9nWa741mBPDq-@UPdH$GOeaZ*-ZJDZ+KU|=C>vy@m^>4eC8ZY$@xIFs))K2;; zkFvQkNvVE8Q4YTC_SRG}4!n@0d?y{OoJ@IY4$OMEtKzGlM|z{6OE*>3r+9peZ&AFu z^8MSM|6Y2MzUtjs5C4;TCXwz2k5c^)cXxf8;&0x5=*ak2f6#gHOOzpJ+T`a|Z%)>Z zk;?Nq)A~RE0(5+mJ&a%Iqa2*5DC0B8>mt6N>`u{t?#j@oQ`aM|l;(A_i$0zMt>t+i zkEdui?bD0rb2@9`f;an4AMs|t>2JC-^!qQ;!ppZVFM4t76Gd}6E8$l;OL%v-a&Q@W z?4w>UxV+6JHtx%znfWKF!LPu4oh^{PpWjAoVkZ{poO(D!@9 z^O&bKT&P5Z&%UUXe_3ewk-NYUei9y<`AOLp;m@lQx?4L zx~Sf`FMaVU_^=WF=?Q=QcUo87=Ba@eKSg=zqGNbzJM@k6Q;!%wN#0JL+D9H!QsAYG zmZ53XQF!S|clrP>0-9`Vf3K<)izy+zx*Q;jhNal*n7~Si^<%nbfZO zF8YtqDXhBm%G3;fKD0Js;EOhw0x=>R;{ipAEtDJDCT_%-4|7XB+u`^G;-4 zL7I7VCh6on`W-U<^R%xs6+VMc)2Z)wBSL-R>7>m6I&;dtC*{re`MVe4!U+oXvxng=g!h3EU=(+^Wa zeX?DW{CR%NebIC6UyQzg7v;gXHBY8tKcqEnLr;D^BR%pv=Ie(8P0!(XWjlP57sJfi zR&0hF`SCh0=3V1O&4FFap$IfbTzjDgd*NLBz^G)r?1ol&P_|T+KTxPvBnIwG|}Gi{L}a@zcIQQ!4L!0r`C-EjJ=NVKsbr zns+>posdGki{PDs-h_Ryp7&0LPm3!u8-`&Y)WBmMwKaRc=JQ7m^4#y4YqAm6xi-Rf z@fP;l)9}^{jN!>e-?Y5758gVt=(fGJu8q*%kGEta+~*q*ISilNp8;Qa2Xvphfv?`h zM)&}}dcGfD{mREY$h&SMJb#n1eD%ULzVbJrtZ&j-z%AojE5aR%>QBRdlrkOrTv^QSSU*$ux%WFJRVz3GxWJ%@)-KDyvEHq2Fc`>$yKuUP{uM#dh5Pt%aI z=6lm?Px*#hd*w8;NaenZ4g4Elc2gmC@Q2tHdbSfj^izL2eBD`K`U;-u8wF&6_q5wMAliuASJJ_Um?~HHQ zTAPNv<=XB0Hf(*_V;)_Kk9{QVW85O9=gA*FK^cDZPjv0v-pQ2PRUYaq7MhGUKXYza zk$J7tH>RnRInl9V$==%ZOgooGKzrPbh+L*`F7oYS*)H#-a^9P7#%%N59BKDub%$AZ z99W&*@CZCnRQt%a`B7AR>t3~U1GdX4_^F`C^ekW>%`~4c@<_Tja@zPJ3y{C8qwllzY@O zSH^T*o%0ZOh{mXM{C&}4bmeva%Z*I(4UdSAim^M&u{-+ERZkPY6knISc8B=!!ygQc zO5fMOhY!O$9q7m;FD7|VYcZ8=%X{2h)S66lK{{S`b2ogs7~101!yA^s|DBBO)8KlS z--^-K;x|Knz4&cyYV-&2+oSLsy7-6F;kD5&zje6$wh?}i+>`EJ0MbvdUBKR&!1sUV zdvd$0d#}zZ_xZYu53JpKr})h`sQb19-`~300vX zb`borNrT@8x%?Jv+6ljfS1j851oKyRxA<)v{PuhJ?R~~y{H8f@g#QeDNBoAZEPmU! zD!ZYO@twN3+Vb0T)>baut9FXtI{4OhlHWSKo=A1Ni9BaKk%!Yg@SB0(rhcSnlKgg3 zb}PF6J@kpMAHUsbCmVRJh~In2Wbs>vFaH|9d1%jfv5^n-vyt~>BOBUZU~;eVo8D9W zCYk5=z+>3RKHgLLlj!d>*Io{KJfC7K&qh{1h|GQf*?m7Ud=|FyOl;*DOOfr?RvrqE zChg-+WO2uc4=JM;ZnKi|~ z?xN21_-fA5rWwfL_2{Dv+Eb(X!MqrUJpQrf2FTj6elYh-SP!gVj{L({O#Q9Q4ef_q zjELsdpf2 z(rS3)cGiXe-RFt)VrzUI`}OzmPbYdve6Bg?clms^8ES3XhvD-Hl(lcsqy22!347HB z@p%tEF4?t`t3%-L`ivowB9j(blL4QbA=tB1KhiVT`P|>sL%*J3Ju&Ks*X8Psd_IBo zp!j^9%jZ4VMORIZm8(VAWRH9bpHE;dSne5e&F0XUon_vCraZ+5@wKEpg=ae9(*(~9 zw>;D5&9(Bh6F#+aQ}PrUEMC0o@~L=8va~->`jM+0^y!T}^e%lZ{Vv(6wrO44`BZL$ z#wWiJsA;=Gd>w)Wu}#t z1>WKCM|x8==b=5+u^*Y%1b?gz1{%&h6=-uCD zbUfTSTD+i0H=j+J89De04e@uZtR?xOKpKn;x%O=(G#}wM*tzEg-w|M0>z3W{(Wp62V!&PtYHU7A|R<>vc zwrD1DEDL!y2)Q;G`Gy_3a!B+F@=QM6cZZt39lo62(;4_~ut!hvUkV-AV)%5Q>GYXC zjlJws`F4+YuC)H$lc}cf@YsZHTEm>}l&r-@9n3tdL~l+*?dVX z1;1IjtNZ9W@i6{}p8N4PbdEB8^4TWe<3@f?+N)L;QqB(Qd>8(f&sMf-l1{qB(>y)6 zSn?42_l{MeXXLx;{`l$~*{WJEbiIV#f_-`fnOMx4%8&euQr>vkg7A6Q-=#)txz_b1 z(|4=#*bZHhfzdAfW%$n`oyhqf)|yG1Rr4T=_dJPgKZVWP`K3^w>{iVQ*;PrqHHcj> z!HciUlhIvm?bZzJ*2vaP*bxQZjPBI^k*)X(9(|g9sW;P3*{y!&jb!y!%BftH*>DkA zGScghT*TJ)3-S5-(*lufYrBqZ>R7RKuk2Upw2Q;H=e@*OYJFXB3x0@y*q(Qf38c$5 z@!1Wf{+`3oqpt&XA+JE<% ziBp-6C$THGdp+H)_!mx^L|*sXeV*>*-1?2rhfYgtdJb98-=_U#Kb!V|Ytz1BawA2q z?ER;;b9)Os0Xw%IerjNYWc#TV|JBZ|Q6}^Y`Gs#a<`=S)iqI*tk!0J-AE)s3T)P)OPuji3In$nq@GZ58t2|$+WPV9kcETIi_f&Sm7^$xOsBR;BM!^7q1-mt6t#cMtA+Td$8fTy7l8{m|$s42AK;l0K9 z4J5ZUXTyFI*}+&2G%1lUp`)+n6F-74Z#F~HKSJI&`0}s^hGT~gyJim1a<$3s zZe$+!uwL@=enpECJozy4zG%^{d(|fKZD`vZ7cE_|8k0f}KxTWPu zula>+C(ZkEkN297D{1p4?cB4ek}aR+7ydH3U^Y78L3G0d=!pBVb!TDg&RmM_xaJqO z@*NqPl<(H&Wo;(8CYc*WuFEgngD*y|z5X z6w)LYu{Udx4U&_0lTVYJ{I`Kot7pFK>H~DZN7Q)^y&zjsx}XNT_#C{wk#fF{U$&F^ zAw6&&pM_+$=EFqv*=B6wbLa}$#p{zk3u_lAVl3C^N5TguJ#Y&CPp%I|W0>a|=zz*q z84apWI;Rp_huG2&Prp1Z`ek_h9)2CnVaf0hkl$Jho@Z`cfbV~eOvGNVmt80OW&v`| z@5zbGXMNp>d|d4t(!KOR{nip>_-bFE`{4tdwidWD+{=7yX1+8dZ=0ESqbaBPsWdCY z2jF*=49@~fSPwQM!~Y2xe%=>EhA-NCJGPMK)p_RCSCQdbFKMn^WUh?#W~G0~TsiC= zj(<9*_Y(HXdE|=hXYrbBwar?uYMn?MXJ>dLjo4bAMTvFd8s>{(R7a8vBXFm_+J{GTGX z{mAV$d=JBr+ezLP?-$cQ(y5Ax$uBjWd{VA|hdHCzT<7inXt8)09#7gZ2a)a9`F2?3 zATqt3G5Vcsa#yCe_mk=3UDuAe*5-s=KI~d7<3=8qj290p_IVJQvYz(J2UEow^4yrp z=q2?3IQUg_=|%c;2DZ2K8+OKPo8gBbyxGpU%0DoeGQ>mO@X$7Ra5v-CGi^}Aveko- z&37W3zqL0GZz-1g9CD(>ABcGHX-O{3cX`d{%}B3+*IvLD`3-#b9=cn6b^)6Akjt;| zyXqNcqhqL|*sH zv4fI)_IqDyQ#Wgsi^%H*^`C8rFJ7Zqc5Wo#`oDgU&vw2^v3$1psgVtB{tV>uQ95tkk=Nw%Ro?*Y@ht4|LCD&{$lL(3cL*~0ChYN{(JKcM_V{UR z>7Yq%JGjcz(1c&15{!W8XFJxJ@aGTE&7F*kA0LMNXB|_J&$8FCn>+EPy^WpzYx?UH zvgZ!=^|K$MsXRcO^G-8ooa;~4IG-KwmW536dj_|4!vp8B9~k$ZapUkKz<-_CyMvze zHUzGC@i*o4ek*NI+xzgYU$P_3-*zYZQgLC~?q|niyCbt-WDcBX4e|bTZ!`-#{sR1Z zKeFf`v4+n7nG&r*Ci$l_2Sz2<0I~}@A2fX*u&$H6a|$_GL!IYtuZmio!o8oTXG9bm zMrQWB2)~^lLpzBFzD&Lge{uxh$@)h_ePQId?4%;veU|sCdeSs>KN#v;z?!4#N%m&& zj@p}X3Z6*B)33+;`uFjM=cWhG%`|;8W$)AeOmIDYUi|UZN!He<>>zQXJD<#KxImfj z<97>U^IxE@F3KOw^&{-l7|gZt;6-BVLx{OZCmHVT{L%7gvDe=wowpMmDqo3WFN&i_ z*ADO1+`JY4p!RI+oF=(4y!SWQ(;BxU%$W=H;X!|M?0lg#Hkt^abXod^J~@XR@1~Pfv}M(*B3tb@EBgPu9up=v&3sFBvnj zz~k$#VvMGxFyfYv0L&{yxQiYw1y}rRBda_2oz2xwe+}G|gw-5J!KVqAc;;2HET`4^}4q z)8c7Q+Sv~5^M{_C`i$0Ia}0YMq!Yx)+8e>Tt%mWlybOPU#n?5U% z;+e1Y_V;6-KaG7veMvv|cE)0{%g<|=>*C|!YW6053Lhu@+X=h=UB)T7*Qh@)i;wFl zrx^aNgnzA%n=%&@2VR1mejfhaMcjJvlerD&siPU2K7;GB?K8Qid*@Sg+Xjh$nS=7@ z?L4qNiqE;tk3BE`EoS_e!M_1`dN=%gkvSM(AIVAh_aEWk)A)eJyNlUBw2^pZjk^~l zfd4oVQ}o+dwtP(RJ>YYBmvx`j0|wq*O}tvM!X)pCXFKqP zzsr2??+?Gx4yu%(0>6>=n;O?`B z*(c-k*nLgX4F}=xrO1}`x}ya z<^2rP*XtY9c78Crg}E*H*~eVJ$h=MJm`Zfai^#RbwB<$aml7{OH>oNr{U;q`?P&Jf z7BC;rql0Gl*D;!t=$0nvr_`OSsm`bN4{OQ;CY5fcK)E~@8WX>JZEzG^0%t^&O zFA#V2<3HL(nd@oOIqq#FpK0e`dfUY5rv78}LJ#X$*^U>mF*KKoSwAa|xsv@(R+5c3N&pGq=JkNMG2DHKI^+PLLe zs~6JkzNXKk7rNksulDC@_)9$fs(9MkNeN!Bg10SC!}H&e9EGRF&;JHLpM|$-tM2%0 zer9f5f|sQ`K0uZy^-HB)H-F7thl_vntbX}mCTmgRr1O!bAHfUfI}-YZJydpo*2|2A zcsa?FTJvcApm(WSopz0<6MLh?*Ec8l8rlCY?A{y^xB7VmU}bM zTfsE;six+&je&o)&XMi*K4T?)u+ftp(fy;cXW`*y^6l8P@8dHO|LPs2547eu##-Z1 z^humPlb^59Z`zC*9(gYEx6z_`{)VZ-JX%k zdXjyr@UQHx?>v>-@N@sok?*cZ>??iV<==l~{VCh&=U!|XPwFT1f#RYcz>B@eTwlMq zz>WNyLrg&Y`<|;0-Xq3Z?oGMIzp_!%;63rlN#y#*-?Jx$IKXGc06xM$RUV>$=HvJO zisGZhy&r=w*w0T=vP^P(=_O*04T3f=U0(xcrpd|p@Vrjh-1W8F0Cgt`(_ADPH{bh3M`!1O&`X6mc1 zRy@@W<6pX>wSH00 zfqT;~zW1-*Gv?bb@=W(&)A!NerAH;(&dg{1|Fuxx3nAi$jD7PW)*7t+no`W1?yrRU zs?!EUWRHzx4cN_m8!n{(bA{M`!-UL-n}x`XY$5Y!sE|33DrB672$>H7;qBN~LS+0P zA+je+h^)v2PyJ+SGz0YdSw~MdJ&QQMxzodXow53BY_%5|r^UR-*OX?QlzvEQ#!2bF zQkrp6`m0JaPD;;FnsHM4D@rp?N`Fad#!2ZfE6q45JzHtUN$CfbW}K9MKxxKF>HC#t zyp*1$G~=f9Owx^vo1x9q(u{qNZq}tI(a|^4@9+2wdyPz_6#O;#bAFroJ;v`n^u+`) zz)yQ8j__;dw~^mse$%-BiV4)OW6bai_lzS(wcQNpKC5qRFgM>?TpfGM80Vu!V2W`* zue{$lZz-Q?oPQPujI&jkZJZ~BImY>@2F&Mq?;Z1`_F?&ALAL}8$2U?ld>|wx2Z$<4(bq|;d&O>X`Hu& z=cq$?5qjYz=!L!1A^OX_)2cx9M8M;0y@`Cb$9Y1S<8eL;fcYNh^U4=`oIK%Zt`7l= zJ~X#>`Z5pYXGdFin!YE7k)I2FHaL%Rm9M5;;XZ5xJ0$=^*aA7Eb%&9g%iEb6T-<}=j(-FnHTz-np;aeeIMOM zz6Lts66l1>pc6h0op3pHqrp|8%WiI;?ddzQ6Ph5`c0nWD4UKRQG{P7(#o%6^Ga2>! z{ll849avL#;vCP9;vinzR)-x;b^WSYxfmHFB}WKumpPHMCgT+ zjWbpBW!Gi#cAx2%EFM5xlRE!te8=4xp}tqpwTA1GtsUNw6Q`Yp$ksURj4Mw&8-=tp zETo;)LfYAZY>d;+LgfpMV}!J`!voUJxR7=>3TbCpNIR>Av@;}n+F9;5J=3u{Bx4Ja zvGGdd#0P@2pcT%BRyYS*;auuKM#kq+xAN80EnLX;0bq@B;=(1+3ztDJd>ne=a_ScS zszlw-(HE~6Z~ebqxjJ0>YcH6Tz43IO+ekUWO_U?tOgX|W)Q9YhZ=)T`H_{H_4z8zx zJB z8LlH^ijf0P;UiB4bvF=}GtUM0i#Jy~fQ!UK#I338- zc)rIeR33S3gvjF#z92X*w8->W@uy7*u!pR;dB>FO*GXv_&b4QNr~Y=p??4&-kf`5YG_pBsh9=dkc`=!MIvNA#;y&p=yG7(2+; z<4?%y4rFb78|CML$Z8`*R(BwaDugTV0Yv=BDTXnAnAM>W&-`kq@l&@hh zeuXr}&RI(gQkw6RD4nJBphP-TX?(o8o}u)xL^@sRyhJ*UbYLX$V9L}!r}@MOGHx+* z-Xj({`|9-9@jbt8sK>*aarRQP$-I?VGkX60z`b79j2BsZZ_+x^6LMONaax2a#@QkC z8)uy`(>M!-%_eB+!M02UgjMK~IIVKMZ=vCs=kpchVrUN{+gVHxyk zV8}Qv!V2hxmCy@kK`)#Qy>Jfn!nx23=RuGBYpFI)i*OOQ07ngI@SJ^up!P z3s*spOlt`nr$x99df|HLg&Uz4Zh~I88G7Ls=!M&$M^?2o8mC3L1A5_3=!LtW7w(2$ zxCeS+40_>S=#fP&apSZITc8&ngkE?Edf_qXg>OJFd=q-%+t4F#S~`r=B0K}VuoHUW zIp~EKp%-3)Uf2u0@G|HL|K&P-3>S#WDK6K{ZzI3O{ATbA^3#61SI{fOk?Rd{j{?@F z=OyzwyOi<%)j4O7Yvqx&{d#%GLo`>OQl{<)^_#&K-F zoEdGerak?}rrkd{+P-Q~RQ>ZK*1DRTns*-&zwhyf{=HiIhCY9+%IGd#|bAJCrii}stiwoY&1{3mo};G#Ea_p{A&0;Ng28k*(n(+ zKbYiU9e!5HM#;lE_7^EFdAKnP{~BpO@rL#1#LeDXc*<=zE;q&J6nV zRm!+SNEzuu%6OUoF7$7w?P_~k6qvxdEJYJVE#wek*r`u8CH z`$|0F2j64-lM1L$ZBqT44-CG>M;lx|s&e^d20Sst@=JPTeg^(=)+sZtPJMlG@9NiQ zaW9KHlI=N5zud&T**U;_*g3#^+)3LsX9fv*j|UjXBgBt_>~Hh)Q#)qBquPhGoj!V+ z{+v(0wlOc?_hmHgqR$TZE06hpMb|Gd7OL|JkGV(BFQQL_^p%e`>>_R7A3F9+ItOXT zD}}r-KGV%Uo+suo6F#X)9}tlrTydLP;>1m)6|2y^^8d7MYZZGF(s@ViWzAqLH9m?b zD9+42mDkE?Zx-#<+E@F{Bs=_RzQ~8P?Ny%r05mp+`{{czllw3wr`4B-D~+>}@j5ii zI7@`H;VI!9cuF`Io)XSOF5Hx`XYG7tp2m^gioA zG2DX|)0$oZb>{Z7&fE4e{%716$0;|y@#5bm;v2!$!~}?M1R28_{#^F%kSD$o%t**X z?PCfKcGJW+g6tcx&k)}T4tLYUH-aPFH1UmKftx125gg^FiEjjNchkf-f@9n?@r~ek zH%)vac$b?dt`VH%rip6=rSjN-eRxBg8dlB=s3;Vi=SVkAIj7gl)>6#hpyGKYY z<8C3bjEO>G8FvYZW!x?#mN7wiSE#QH1fcc4JLnQkocsu76o+ zVi-!#R+_o3w9dariD4-HfYQV;l)hhSH`X*u>5|0tnMxDG&@fF*8Zg^9QQi*y{hvZ z+kJ^xhT5xktNnTpy^r2Y@2B_F`|7>*{^|$yhx$eRqkd8>W1bt!_}CkA?7Io|ZbH4A zQ12$xyWu0V@20%oO{jN6rrUQD>fMBTH=*85sCN^+-c5PEoAQ@agC_=p70?MQp%c!6 zPB@$L@sFLD!}H3|rC#AYuA{?FR2%1na3S=<8t8>fpcgKqUeP}ez4FUJ^hRl_h z(n+s}Mz|3g;U>z)7j|MZ&nmx#I)&S~j=Vq7Xq*$m9n>k@3B7O^^upcLDf&IoD<1=u z-%Fj<=1TiPo*zX15YG#bQ7-Li}nCLi}ng zg(V*68={{Gz4DVm<;xOr4t(#<$N0bGPn$9foJ;w_d0a>DoWP%ULWn=@B_aN_1|j~m zmBM9|FZ#!!SAIFD{Hp8mjwyNMw@|Kd8|9*_PT)s7A;gdNk`O;ygAhO3O5twG#cy_E z59KKz1C`&aa*26#m_GdR&WMp*dz&&@XP&@+c0!2%>?I-ovj!plvz5Y&l!?FW#3jm7 zz86&fvd7t%5@`Ooh;*UXIU&S9_L30)Sc4G%*h*oE*ZD>vh<|K~@{>X3@sI6Gxv9Cm z8v5JF;}?5Lh+nKhh+k}_a2fRY!A{^8o1*-3P;v(O`r*#4!*A(UPaZOPkAJ@K=yyktxMcS9~48AP+wx-+-;@g^{Jie`cDf!L& z-1x|pQm*0Cnu1U31U{`3__X$=jBMUV{NrKb9e<9jpHfCymwnD!>=@~cUTl^4{zR+Z?y8XZ?y8X zuYh&OTI?8N5>CrNu2oQmu#z%_vnWG2o4Sy*)~})bT-qR<$MpeVwMqEk7DBIl4fMh# z&sHS;CE!CEP@v$Y1LZQ+^9= z5pLsp8rWzO{=6NuMfsi33wJ>;+)Z0VzlSy`9|M)&OIujCkK%j$@})?wuN%bmL(mD2 zQ9g3m`oWZclX``3a~(Nt{b0f~)GO?SUU&|A;YI2d{Uz#Cz86&fvVjLu>Q%Sl8?M`l zWh}gj>j95*2ASNF?QupcpW|^-g!x=YX15^YTZG8?9YSRMIw3NCp|Hf`RET~e^vX{L zl`n&i*h#^NM9e|;Z4dAc$mfKbV4(is1SH3i>uV2pkBV?bNC-}Ft$BVMtEIEQc>aSmkrYv_X4gy@2w z3eg4iLUh5S!rjC1^A>DQz?InHBp}R_4EZs;_rCd<6K0} zA44x36QUP(3(*Ukgy@B3!iyefHhSRLC6800d@rc{W$1{jsC>=m#CrvrdaZAMNYe+P zXP{}mP()kV2C-uQ_Sz3~GfdgI$d^u~keiKFO^@yeq&#w(BBI7(clU<~ms{1R7- zjB|1T-(|dwbv`=e$3k?-_l4+?Zwb*M51=!SphNCd9vyP0^5~Ew#8y;RqOFNohGHyj zw6%Q*enZM`4&oC-zdS8OzbqA^U+zbr97ey4Qy%>?PI>gpVf0JEUBp*u-*d!Qo}-M7 z<5)kVXO;-jGqcbuhtM-)l}FEvRUSQah!~4#6Ya@tirT)*YWfkl=6j}j4Ka|#6IfTH zgJz<0+R#B`lt%}RQ63%CMx3Q!N}_*O6K8oCx`&~&u@&^un7erneKZDrl#I0$l$)NZ ztdkXMNj@Je)q2|J1kp{c=qANmzGvH$(X24cm z$tBM6>{i|D;+#>*&%U?SPfSGLSMn48@U!00cbELcS(F~9G;tQC$107VROvBF6K7HS z4yB2+C|yiC8D|L+XW3qeUy(S=rpt*K3w&0uwd+OnpW-PU9%70HyHuEh?^fu?cPq@q zcPnJBhlJVqY=t@aaE1BAdk27pCK2Nv4ZZTk&xFD!vxI1zf`Wax!uZfrVa60zwD z=+n4f3B7O@^upQD3+F&DoD02ho*QqjHi>xiLgC74SHmkV(oF^4(NqDp%?CgUbq{2;U4IP zG3bSRp+^qI<0cXNZGm3-gU|~PK`%T8z3>g_g>OPHd>eY?OuWN5ap4*0g`Lm~&p|J| z2)*zU^uk_ZuEbFamA=e98&CPe{e3go8|UX2h^jCC^Ra^o1b zUmg@~Uxxj_xTr6;vNqTJtXIs1?~bdV)Bk_OTWp*qhj;mmc*~7(mJ5udeDp_!DaQGU z&~Ka{2s4fIZDGJT4+^u5GgwIZ(?LdJBE zaI)*4W=s`lnL6?h<19b3ah8M(4I(?w>8vJlFM{0T%z%}F8;jg2- z_k%*-d$5rA9xvp*zYRYf<-H#i^4^1my!Q|AOWPO=?|uP){9?{3%;2Xp3qgK2##Qux zpo(`Wu5!VRtNhWnU-kqa{ERj$4t8TZk?~jBX_v}=3w~7pJe~5#<1CqOoMr5PAkK1^ z8)sPz&&ZEIQ%HYj2GO0UeSU|KK3AOOB>R>zxgkug6)=(ie)YWYI>&LdFSc zyNwaj_WK#jJ87HZE6t20=RO(3tUnrCIq$|+K2hdn6V&zd^uOvH?Z#GSVizl>Vq+=P zUrhb|V=9jkQ`wG;9s`eb`Umhms6#S`3*P4Q<@nM#Z#UZ&LYk#oK0T|=g@b;x%8!QULu|%JGQ+9 zxmZIShIw*m32_+VGU718$BDlPmlKZ>t}@t({bMSbO}}NWpNy$Ib8+yb|Dm-6F_rUN z-%jjfF?zgh6Z|LK4F3tYz<*gvj9oZ%$rEZY8$L|ml{+p&vS zOP9MxM=_QzcaM(ZD_!m$9mQ9=+&wy7#8)SGwFiI$gw9y4a&5-q#uLF7{8@ zH1U$+G80hIhY;eWDo0KEBnOjB)H&+Kq9%sQQD1h997zlaU4_Hjd8rDv>W62UrM_%j@OlTV;pZN?Z!BMrL-I4 zcvER&9O{F&Nh`jwf&BnyID6Q|ev7W&gr90$DX~=T>E)dZYieV%w?abXPpB3|{!|N* zJz*hovr(9B*q;IBU^5Eyv2hlGh1dl`bXAASDTZG8vCs=kpchW$`NiO5t_#Z&zN#Rx z{?)!T>#N!>Id%ICz9BFmQlv9z#JFw+yI-t~-T{u`EXw(xeJQGM;E&@R<%O5N7`>Bo z{5r4S!}qdFSg%arKN32KY2D6Ql@4NBcL|AUO%xK#x?4!hXR>f;sILrEU$t>2LoqM) z*FBukd5eD89}1W453!Fg^xd-k>=`J;e-y9r2lt29mF*9IkKcbQYcDf>h4^x`2C(~I zI|IS}@%3f<<8}Ntl(jRj3-P(gMz?!bTR>z@ye2oefBnGb{biJkk0fq9iG8LiAhILw zclV!WvUfh@;hS2`Oyd@J$l0hX=+-ByUN zA|B>>;W|)jfc5VF(2eeX(M|6D(aq4~ABis;n4s4<rdA<7a~fVY8_lqsA=nZntWDVzhna4uyE=Xo9TIk1|t@o}_I z^z`lDM*re(h=(alxDE`0>nT&XkurswC{wtZGKE_xQ@D*X?*JRUPN8rIW#i+B?@q7h zJ3#j`4o)FHiFllHg)QJX@F3+24^h7G808D!fL8b>pA#46_?!-5zRxj#0T%k4Lg8qxtDIu! zl^@IVho}P|M?6H?!V2(iu##tlv#3Kjn>vJZpb^fcEa5!rxCgAJ4&g$stDG9>m0tqA z5MO`%SXRBqU`wvzS^OpOFm(ynfu-PjXoMS~5pJSR;bv%sTPRz&jXFOMHd2mo2iH~3 zPUw~21-)=L^uj&F6W;b*DJ&x$r!HX&crSPm8sQ;mgvX!}zCoSBHz`~AHg$dh?4VBJ z8Lq3GPUw|C2fgqj^ukL%rwCv4G=4KY9==J#UMPOe{EqOuz>n`Q)SL2$=N<3FE`@xBl{orKDey}Vd`%8&Om%H)kZG5*U zhu_cn&go`;kMVntZ$?c31N<&{a*0jn5~Jq3_WTy}o54@tVJqUNc=V!wy*j7JjO5x# z?v3P`kvuyRnvu|sq>Pc2HIgz%Quavd7)f0tsdFTCkK`NPBO^bfoHOXg4cNzL2kT5p zdZZ7V@6+@F`(Aq1;@bfZ%}=!{l$MwJkqVQFvB z8|6drHL85{M=ScHN{Ifb$^y|Jmd4c~dNxr{O8sG5kFTCRSHpwomR592l@Q%hH3USr z)CyhQ(u!`mmA&e=JoHO>F7JiC8brUeqGzgv=$WdULG(SRb|R8BqTtwFlkPiVmt0qJye(L3B{95FJz}L+~uTv*PS4D;Bs#YPos!QnVs#bJW`4wUT9;5z9v^|)J5d_g!ts%-4 zR)FZE)=J73&Z2xFdMheKZ?y{1TU|o*RvLP%wVJ%%Wnn^hwU>GN+9!MZ%BdsqPU-bC z>^r5^SL&V6XRTrC60QT$SFP)*Q@D{jh3K`Y5WUtaM6Y!T(Q9ewv(`qh6BO=9=(Se# z+O7D&8TZ8dO<-(THwMv3t#RrQwt(oa)`Qd~JVaeWbX-)3j%yX7KAUN9N`w~5pJV?^j&Ks^$T}!UFGbAUin?n3wJ{= z+>_9Yidp=OSj8*EM1E>Ng{&tdmxvF>apeIEON5bMq( zr1dSYWXwX}+1gIbqAmZEF$*tizY@MBV1Aca6MFvXi&rjU>nny~V-f~k=0OKb7Yd~d zh0=w>0J=<=Z4&N6iOEgOQ07{G>KTpWayPg7h2zP2>LYg z70?SSp%>1AUN{?i;T)5Qb)!T!%GWsiQ-bZ6y;1UgM&lNuy~6eWV0&l)&!+UY&*NE*Nj3L`3walzKP%Y2Dm7}< z-(l_v*KtqSZ=B+z!}l8XX(P`FckqnxQ^sOnDC4r$rbc}j=XqfZ&kO%Z-|t&ut{C-O z2Qr@jH58XI0~~( z;=D)>7oU%+qk`p^J->=QEWMP!*`2sOwd#R_U&->+f?i-;jeo^3#$EhdH_d8Ti) zZ6}7X7~bh0Ll}!}3cLQQHTbJ^cH;}lR>-f84dcyo_(NSmLY$1R>PjjpkM5{ z2l{0;{HC)S%Y^icJ(E%Bp3xZ1efUmiHI@m-x@R&<+%p;zxew3jtj027+4Zv;I-j9) z8al6`a~n6t6wYHaDyFc}J*%;pz5a35yvg?M3wbVUKHS)zec9%UJr{Cg8_H6HC*c|O zgJK6A+N(nOe0yV#jTO)*Hda8N%yMG~OFkubFd@kYoYA$}rA+N}$C;fyhF1&a$=tAnKhhe)|z1^yqBG?%&`>G#nRUrhjr%i z3;121apMv*ZCqk_ME+Sn^Q?;)#aaB=iL)NVuf-)k?W_m?@Aiq<#8qTaCTBfV@1Jwl zL$QF5@xKp379R=IXTo*#nQ%REO5sMX^Dc3CPBNOkK|MakWe~FYNF(=zJD?HnghsfF zSf_BeOKbTtA+r@DmwhO^P}T2DMpeHv z8CCtxWK{J#lTp?0Oh#3|GZ|I=&SX^eJCjk>?@UHjzcU$CoXMEMS&W&S!I;Gv4BH-J z27l$x<6E*m{_n^1rRn=q#0YAMBc$`)DQjaA3z$h9;ViL{dBhIBz>k>jw>`vl>!9nt z-DBef>Vp^HeSKf*@KT-0$m_lXe~S396`y+(u`tQ|ef*~de*$UK`;{h6pzE)aPJYj+ zjql^`B2FN?_7MLs)=Zp0cI{#Qd-2D_31rtENgovX33K5B>7(g`BfE(c==}3BzL)(| z;sg^&zs7g$cMvC7M;rbZXP$NtCs6uzrHKdr{x{MinP)mvG8&)z zS-#(^|9x45&*LoTNxlnqzxbu-1~9_6HO~L+i_wk#%*fMz`QGn~e&0VR@&mto?-kKa zd;_rFkMA8{``>X!ME^PZj1ZsuW+A@zzZc?D`3E6qFbeo4)|k7JXE5@bX0cW%MFt&5 z7RB+s$ML<#@x8}0!}pdVi;jnkQ;M(Ucm?8z$6X(MDRTFCnEUusjx+xr5;Fhd z_*CNf;^VF_z7%L68b@1 z&-6Mgg!nz46=r)KM~L6!xG>-A^a%^e%lF|U=CC$gRz8BZ-NyA0<>K!+UO{=nN>JBl zQLYew$FssYlqJZ|~I4*pfI)ok6aVL0&a)g~+S2^eK@8h#Coj}_7oKk!Z$5VXHLqb2O>zO`h zg)rcAo)zM=aD@0QjtlWw^a%@nPVQfTqbWyN%ypGB7JB7Nc>Y*seUT4;dKu5$&9xAS zzu|ZVbqOm$U7rPwa5i-c@mn}T{1(TB^C(+bO`Z3E3n@of!*!Lj1bXF{K`(q9zrkM5 zmC{nuVV=duaC{wg3D<+Vz7ZPXCTN74sZ)rr;}8>dfLXq?Ij%!>-@%p4I5YciD@Lu>o83+Bs0J*)tTh^G2Q6!8;YOzvy}F z@6iL;`yOI{Kl-ty;t%zso=f;jz4b?}uhe7X1ok{a*gcQ1&ON8F{txvgam$|5swa!` zwjR|Gob!ywI`Qn(YvG3w=L-13L$#8Lf6r|3x(+2bwc#+4k3EC1-)y_F60ac{zmOJ z8hs1*(7&V6yDjM73L*NpLf6s13x(+4bwc#-4k7xtMd<2Zm6d3xzy6!HovHN;)K2tj zi}5<6(Z?+*UZ+Cn2X#Hu>ns!oyv{lydU=Nsz1$*1FP{+>l1DFZzs&gyWBRtk%LRN# zNoN*DqhDJ>l#gC+si0h8C8+DOC|@|6@`dQ<9YXYTixB;MMp*52Qa%SR{A4^rbtK*? zy}rc0Q(FC1j!sOyKQOL&aBgy`@cLUeeG5FLI-*g;)) zf@dg4*vWO3b1tF7RafFYGwXk6ec~DQm%!2J<`&~~Mx%>cQhZK@&=2Z*rq5X@4EUUN z!fcyN=Zvt>=cN1vIGS>V#avf8W1&~Rgy+%iYZNOl^Em~44{8*#iP7lj zmJl@PE+^$`a0_4q-KQ-vchB9AORDRn8LVm0t$E@bQF> zFW~!9YZR9#C0-Cl|Dl_0Ttc|sjZbWJ;}e_Q_{3&6KC#7(Pi%AJ6OC?sVuu@_*y+Y6 zcDeD1-EMqhj~kzeC3L;w6Mt{VAglfvi-YRx!Tx6}h6V$ZzVdl|DpSp*B73$Xue*{x z!8%)!h*NC+-x{aTd7f_0Rs`JdSLtlU{);~Md_`Upf0|@t6QNKbR_F;ip>QA;62=3u zYM~ht3kyR-VvY9xkXYQ_9}?@Z_iu{H_Z$k{6e|>lZ;FM4@tb1RLNhcLw)cm|8twg| zvADfIG}dA7XUF8L4u!H~@>Pez*|Cr?o*kz!^ zd57XTv1*~ojfL&~+*qT%pBszY`?;|Wdp|EGe|RXA7n46c6wZsuA0CS5#j1s7L@aFY zkBBwe`y*m;dw)c%!`{!2$v++n<;Ub74~6q%@{fn&`LSxDxg{31_iu?c+WWV};`aV6 zu?~BGWUS>;&dVU*L!pr|`O8D$kumwpL-CQZYN08Jh3)-e#V3sAmq2X(stM8g;Xv%pYv!xN%D4G4f zN0m4yk=Hb?UwiGjh!pp{gx@_IlIfm{K!4lzW^-Td#$Nfh5Zl|HH_-dQpL(AMg~jgq ziLvA-;44Fh=o?fe!ik(kfcJG);@jkpVt?z*gw9GN&q(OJgw9Re7}r>eeWbX<68Efx z;u^{Rj)%{nIYB^|+05b|BHv3~|A?)|l z=wF?Q_+w`w#`TLq=*)v+5cVtt@0~dRkk_=ujX!Ya;t$R~DBiF*c^#Tp@VhGJptBGK zthdM5m_yztVh%I#eJ5fLc{b*dh7bJ4vk+gu;Vi^_&O+Q6Z}`ugg|K`da{0c(<@-v_ z`|B}e#g8<`md~nPK3mAM;`nKaOp0b1+nM=Y|HwD{g5jS!83xW`QXTVn%0ej7{RoWutB!={OO zY{18F)0~soz?`sYΠ*$8Xb|lh}Zd-=;Yyu>l{yO-~{Xui5lnr1A0FH0LBX;N!Px zΠ*$8Xb|kJwP+ra2q20Uy8qW5fQp262Ke;vH$6l~Alh-<|SsMxsvFiFw%n*x;3qIwQrw0Kyt(KGBb`rY3o&PFJ{$$y~s9IiEUe)l(~@3hau ze}SfS(;suX@Iw^pn^f4%Ir?T4`}T(mXX<}7Kw=5$!s+^d4X|AQuK`Zc|7(Dg^#2;* zUHX3waJ>Fs17sinU?KbX2MLKSWC@8aWD19e`n*D73;Cepyj|?wY~nwMmlJbO#ADJL z{#I$mY6ky<_J1mE<1uLsi%9Qctn}Z<-(hc2A|8|0@VL^9m99UgG-IXo-zd#kDgCI@ zjFr;MlxD1y{N$KEnBfM&dC_uOi*GfW4Db(VZpN z|6@b@dFB86H9zeOmOoH_LGAt39$@(mwJ%uvgSAgs`-Qb{cpCTm?*qm*>^aQ;M9Oct z7+dfge7`A+n7Y9imdkh2TlxQrQV%{rzFS^80L%c>z`p-wK5WF#VRD~Y3 z`*6{jb{{T!v2}SaoGwpkV_B}!So}lW-7Zp_f_@aV&Aik*JNHE{)yi>@2PsN8k#D~@pAA*O{Y+tbN z)Aj{u`vSCm0ouL*RVMzYG?j_}DNSY018)VZDSP4d*ie_oE6kozjn_I*OOoRr&aoT2($iCA$y#ZLo-Z2dMJTfI$$S(mUr8{-mhNH~2MFehR4a{h-Rv z1XX?jRQcJU%ExaMRr~Q9MOA(wsPadHD!&*U2ad&8!yeW;)e*vnqB<%-)lmtmj#;4U zm<_6qIiTvmKUAkW@DJ6gj%rYKECf|Y4X8SnfD^!FiM^}*KdL>)Q6>Hh;|CJ|tpmk> z>p}6~Mo|2>2^9a~7qWZB@eA2~-uQ*=p6o^tAEoxI+WpKsL40xAuWI-G?gsHuX}{`z z&T-WFNXKapvaPiRM7Fga1d(m6hd^Xo>oE}7*7^oG0(=uhwza+uBHLO!KxA9%84%gl z+6f}tTF-&Vw$_UvvaR)!&&fds{VDlXhy1%S9@5(DbKbe^a}*az%Cb$^FDLn*?F9!C zXZUP?+S+$65_%1*U>2;2O{m?gKMH&-_?xK=;9H z-3M7;RqX>=UsZ`O3PJJ3Xi$7n42mztg5rx3P<$~F6ko8msuEwYw$d7E2LDgUcVO** zC&`|8AA9oCLW?w*@eF1UK=B>h>Bo6UB{bS!JO#Adc>p^DS$>JDXD92-d1xbVaSoMOW0STy#aP z%0*YyTKNcC`AGS5CG%aKfy^%_uAmr&mHlC4x}AHVoqM32d!U_rpq+c5WIy`jooycW z2V^z3qc>DmVk}bYSJ*ZVtp8guX#2~P&+jVeDGGT7b06n zqaU(4Cy~!DhqDrHY~oeEee^~9F8=xou*=FtWWSY*pp}cDm5ZR2i=dT@pp}cDm5ZR2 zi=dT@)LVmm-<@_%ca#$=c%JuaRXymaR?C;5>RAt}9&}Wz>On`fsvdMytLoVTsvdNd z)gkC8t3%LHR)_4QzFp{$UGOFKB*rRnHsV#j%aqCcMOAOyo6s#Spz1vcs$O(gRP~~} zqN*3&6;-|HuBhrocUj%i0qXtGT~@brg5$`ezxJ`Gp_~}RY3$27)n$;4ddCz{b@@Tn zl?ke@0I0gMLDiK5s;+!ceOU;quF+r+ECyBASa1SZA{~>-S(HTl<5m49262d@tykw* zeG@_-i03On@q8sHo}UGZ=VycB`8l9?elAG-PCCx&n`%({W+7M%)_~GCOTZFv87O`8 zIQoWt4mun0D&L&a*$By*4>%X`0p}tvZJ!bSEjIqoIU8~J6#t~}Of!?lyZ@gN<^MD4 z`DVAC*~EUbApcF-!Lz5?f0v@XJ)@u6Fj#4OMnAP-kka;yeriLO()NsgYD1>d_KbdN zLx$4!jDBiE8tDS&FB`w+OyI2k1nL;V9K?3Uj9#k2B52}o3L6z};^(*5!uBnU{K$Q^%RYo(YGJXoGjGu!l!vR&s zFZz|Sk83L9m!QgM1y#mjP-VOZs*GQOD&sh)GXCFwWt`-i%6J!48C{^tI1j3f9#Ca` z0IG~WP-R>J8J8+}ZJNh8^QW5L`BOc;^Y?+b80R$dWO`akaQ+oHwZi>7D;yjPtziJq+sJH$i02^t7zt{3lbRYjtl8e58Bd19gx7UW;6sPJf4P z3*Wmy&pgLF>6sTmJwtyk&@=Ss0%X^8`g7{kjLY+XVVqHVejljke+lY&`fik-r|(AT zdHQY?vTizkH+8AGG78-`{Uq1WZPVWc(QVV|t5N8->GahoblY_LY81L{I(;>Ywa@gl zg5Wy#QR+XHMzYS;{Jn^c_-H!*WAuv7R{Vd=y?K0;<+=a=%uGloD~W7DkxUdM6No@% zwP+?t6%#Pn(#o;?3NGg~Y?Xf1ij|h2oFg)zred|g?+dt85(t6?Ok{~IfW67l z`|@1aebM{!+JWeOd9fgRUtS_e`SVgh%AeN>r2KhZLCT*;+vm<^T`hWDe{4bMCu60z zy@*X@{RiRmE5ySdhleeI&usfu3}?}UCMF_g@k(8@ZrsoPVyUlOmU^aZ&KhtDexQY zA1SPtWiS6T=$)m=o;vLILHJEIcB&%kb}Mx+qHecR_af?cD|IiTZnskR`P9q2L;cM= z)Ze^A{mnbnzvZ2D`<y*ZK zfBh%@{wgQw=XA18uhl)$TdRAdw^n7(iSf@T=0xX*zs?v?`%H^wk0t#6s<(Urxhdgu zDc+6o4*pl>3W>DCW9f_o^}EJ_`d#CI^vyU>zta}Ew6XeG=UgnO{Y)GReK3Y^+R|e- zaWWbQTGK8cOMVWt`72NHgih$SJd;mBee(;-=#@;NUmLI|T6XNkBj}|Cd^eIdFwg1l zfZY~v;FDO+_ZfOln`BVFk+eyGHp#6B`yY9&K)A>~jEht3Fzqp$MY`ip3z z`HW}UXy+TW(=_TKzYXi@$0{a5KB9cut374Tr;YpSKjW>h{?o?tIRwyG%>AJ*|6%+V z4U}K`-HWe6X~mPyH2N`?Z{57Fb1-fuO|RO4G;`C#eu{VTQ_O-UgV6q5a}Gv~iH&%S zHQp;{hU+JLt)vDe(R(UODR(s}` zf0ake2jQXZUeQ}~}c)R1Ln5w!hXKyI-9ra@Ptd3E|;k}|p zm0IOf;MsX=+1rV4Cz`Pt=Jie-hM}DbXo&Cg-UP)nw}IlB)u6uL89qnj=64yp@ci7_ zo}yPwJ={fWgf36fYVdsfIO%+hFX8i!zv5GT5+)Yo7p%3xPmzf~>HvO;Jof3z#P4Kc zY4J^D5|3f7@l9kBk72IyO=PlO(_G`5$Rr-aT;rR_Bp$ZMYo5&;{!(8K= z$Rr-aT;rR_Bp$ZMYo5)l=20jJt+muOMhPlQskx6_;AL292fBX`;#AIkc zribuXj3Xu^j&m{kiJ$6(A;8u%@~+!x3A3ZIj)8=d;geYwSo$M_7p2jlig>CeLF zWNc@?f8Ym!<`If%z!&k1;u-Ko{ML?Zcv^TvpxMMZWGg;_|M()F65@+^RCrCGxk!jF z;z=RCh$n>jA|4lB8E9T0Oyk^kA#o0hmpFh=Lh%xcb9ff{Y|i~g4w?PJ;gRFM4BK#2 z_54-$K{-=o-I;u79R${1&>Nrfd8bx}K_Q{1&>tQ`h({bS>YB z6Gt*5HY%ukHl92)_R9`|i1ReUJ`4FPwEp>GeHM}x@_9Jxo$;X_$zp#d$8Yg#+i#(B zHR{owKH!WBpNBY5^e0~oi2me@2hpE=?LqV>Uk4EV$=3x$fAU=hVmtPwf!GFoSAti8 zy}(ScH+VJJ7yJ%*4LArK3|0uxAz0 zwxMKfL-;i;d%f;NeG_nn&$q=^HBsMStD2~9uvJaeH)BBhBQ%z8@onhav}F9QDp#Sa zyk6y+12S*W9#!=!*L;w9L+DA6c|&L+$h;wheM?Z>`*@Cj~&X#c+fsoKE^}H_HkII-t4tx>J6HDgQnh~sW+%Jus2ys1ACLDG_W^W zSAafjF!(vrEUVeF_p8#nJndJd^>x~>O6%mbUzOIoX}>D1Tho43T7Rbfs{o>? zMEg~ly|5;NJ^6oktFI%ZbPLH(>COR_?p#pm&Igq)cBGKf#f}tGy4aCIN*6m)C>>k| zD%}@AKez(K-(lJNRxMK=A@-$G9-Besu@zJv*o>Ab4{S!ulm|AWWy%Aa(K6+M&1jkO zs0WqDUQl^7fLS0mqYyTuLgj(2s8D%eD=JhT*oq332ezU@<$bU|`Jy(IM=NfPzxDFf& zmW9{GS{|-L>`fH|ZU*DQtzdhw3hV&x0K0(ifR};qgK6N$;FVxK*bCeX_68fkzTiRd z8n6i*3?2o)3!e0pD<0#U@<#1-6*}c9kK~cRL{CbTjDJ`@kJOUU_&TaB)(GM2V4jEm z2YNw#9n}e-YeHFdvYvw-^&CvqbFiD9vuCIeq%W#_fb@mVbR11z=uF4a^o2Rok-pHG zj-%-do#{B5zR;PDZG9b`(5veW|FEF-uf{Q^u?FgSbnB4w-0&CPYvb?Gd$QYBAUEGa zzm>l8Cb&%B1#JF4!shQI`M!; zWQ&mx$QC0XkS#_&jFWt@WsHxQ0M=dap`1RI6ItU^IgvF+P9STHoIutXIf1M(aspXn zIg~;>>_iTw(1tsaLn*Z3PUKJu zGGr%m$ecxi95QEbBZrc&VBKOpXFuQ*N&743jk{@|ODR9+Dm=nGuZeNH!{YpG#_f(M zka4@i3o>qZB!G~W_^MyTIlG|(zeq1r zCvQ|b{t5awaBkp@~oW*!nbPg zw$Iis0P&54u!Yp6P(RLT*hKwSlUGe4XHSq<%^Z-tYUYCERWlzXubL-8@~T0mttPJ; zWZG)-szIi$Ca;=hAbHii0Q$id_Icc^kT>{8oHHRRI9uWnw)zVEBaNWuk<}s2rXcU? z%^-PKZw18@szC9C9iVsuvTlKR0C@u^@G?8V6GMD&*lP>RvSwr0!LBgVepM0Hp3!Q^Ee=bZ{Ve zpQk)`DElAJKsL?9?``@Zp=cf`{3g5We%AO$3h94e6ug%H_jy73--kRar2l=$vqJjc zhde8!|9!}_Li*o_JS)U@9z{AmeAt*sqel;rGp-PZ1}cpW5ov_Dr!+Q$N@FXiG^#+Qu>({Z?|@3^YS^p7El0CW3nBZcy(OfY7^ds?v%no~U;U z*^5c<%mMYzTu|@K2jK}m`oI^6J(ax#8<*Z+2I~D6K)t^L)cdPIy-#~fzxC1HzVX&+ zpS}rkt#3Ai`erMrZ>m6jL;L#l4ejfL@Az2rYq`OC+BX{93&w#BU=nx`yaa3lJA<^T z=4w9LRC6`oDbCE&z7_1>bqVus(efB#IO@?aMU(QOq<2gX_NL9Xzma?>ndp<57W)LD zPi97e=#!aV5PdQ;0i+(8$sqN}>+JqWFy`px3^nY5J7@ zH2urFY1pZ?E=F>ZVc~b=7lSrVWs`Tl%16HW zDjWI2XN_H6ZQfV+$fvK$OB}?@dY80c)_u}`S@%i%Wz|Dz<6k(852B3mq&6E(Ovf_D zliET)nRNTC@SECs2ajehbBO+(ggh|gh&D3gi1sn#NaKopG_KMadyzhwRLU>ENe1x> z*-@>r9V2y*x{TyI>eie=edI^EqkY(ql0lv1N6Da08I(PPI;DrgzLb~nrDRYhP3$vE1^US}g28}gw{V2;w8{&db2S1d&2dEz>ALVlCcjFrgP4W%8qe^eH9 zHG#H~-z1T;XVC`oIV4iXEZRZin);b@GO}ooEZwI)6z6e(Hda2D+OZug`A)tPKWWHE zlEHWBzOawvCVV6luqDQ$TW*aw*J9gQ&b7E1`?<=l*bLG70eq`o>X%NNM4oR^N`5-u zf-@M#W>Qa`6>b@y$obC*mu`D?AbhUP)g!{bj;xWQ|5m=7X2)?z7tNX&_Gx6@9rkHt z6@+~neWr>R#1!k@bjDFS^CgQi7(PH*d;O*T;C1Nj>4mPMFX>nLI{FlbeI0$~gnb=- z=7xP8edaTE;jz7m7TsO#DVVaU6=@NVmf0 ziJ#9uaw5VvvKK$Wr}B^BM_5T5hx{Y+u&>KMLd=4(VIROZq8JX@vk%}KnP=hxxK`W- zZD;P|8&M30xyCo57!Gr-xDDIBegNOdJjR&0k8fn2ZC^itZ$vR1LB#{~v!CN5QOw2y z?C56iDf}cqBCe$IuOpsn#7{B>ABfrO6hF!Cq`+_b@c$sb51nse(#Kz-_zhw=o(z0& z(`os~b2u;l(x!d?<;Gu9qPpNq*aUR=uNZyDi?Dq3Kow~N$Vu!Bnwy4#$-4->vwtWrNy0(1{Rl2s@ zrc&3oufgn_W&0YmZ`PUN){$Iz%|iBwYFv49aW1}{@%U^T@#~Be-V|v5p%A~$SRsC$ z9|-a5+$6-WbE^=)&KMznoqQpFom+(Xbw&&E>x=^HUmjkZ2M)K-iGW|3GO&T**xmSi zB(r7rm!INd<1w_KyJ9lr-*9{ztc^NysCU@cI-m6vIxoB{evLwGbnmgRdA%Dw3_D$Y zG>Dz9J`TiASDyr8r>nmN#7>aPW{xz%59l@FzEONhtFw*4Af(|oaU7~0cC>z5U~v4wiP2~YaO zB5m0mqd?gly`XH42_SU|A+uDM5b{cO2_g4Ymk@GabqV=E*&KU-vN>jevN`qvsY|FI zD4XK|P&P+ovg)EecKPp+Q4cDQy`b`F z0F}o_k_l~%IkgLM6d^VH<$qyfPKKJU_Wp=H~_p4 z90JY&hk-L&&j42}j8A1<0;;UbK$Z0cP-R^Ks;sL(m30lMvaSPF)-q6K-Jr5^{&&IU z#g=JPSNJTEy`X7R(6lLN+7wheM?s}?5>z^;bT&$IsbXU4d-A^r=m#^vEU*vQAM6KW zF9{6*v6qB~fY*V;*bCizN$H3ET+JbUSAbnh-%SPe-E>gj-3RKs8KAzKsqez)d@HVH znZ8*C>YFv7zF7zAn=(+}Y~Wk$WE;JmOWmQ_!xX1dsQ0mB73%$?px!?T>itv7BeirR z=X-yGy=kH;KWNHtpP?~~^|IbeOE+@1_lIS!=78xp{1v9(^sHOyM*B?fPs+G&%7Ooe z{XH(ke>Zx&mu}>|@3wJq?$E=-6l1Xjy<+!mf#%&?!an&E#12ZIbIyI(i@&}<@91pr zdBj&V+VK^m@HbRhR{1FW4OLMf{)Q?qh`*sK0c3tym8|DrM?D8q^&IS`=h(e`Ao^5Q z4-lQGDg#6(s_Fx#fc?Nu-~g~II0U>L9M$k%#W2f9B7e*#@Sn|pvxyzq#Qgc6!H17B7AvqLE=BJTp)+K2ruILQFIQOYik1@B zeHU{lb9VRAE5bgN z-JXsm*%9Y=E9Nl;{=O4GObT*hC;UAH{=TyTgum}R2*TfYHi7W>o$&V*`1?-ydot-e zXLl=o)g@f7C_?3ihH0=}z8ZYOC0d>B*tzG{f4d1Q?&7JxNe?uNJ zM(1?rA!D3#x=pMI^XFMzt>%_@kvF-dw|ztSjPBK@4Dltaz@x0&dI!J67x*2Haz=N@ z>%4>l*u;uCsKF-A&hh zbj^B9UH8^?r|@+q*N3}?_XGBG=G_^_R3)~*eeA7$_^&s-J)PK%Bc$zOet3AQ)l_fK z4|g*^bTdCZ%=}okW5!^)d<#Yo$Np^eaO}@U56Av&^lYbU=L%qcnPhxl0^M#W;bnj#CdD|D)%}gquQ;41O0{$DF0U_TF>)&@~W4}#gUA^qYeX#%J z9t-<#LVrX1I=-9D=m+ukMur!-ywmBtECX{-X3 z#u`xVu?|#wlz~cPLqr-O?kSDUpwie1Dvc^oY3u-%#ygx~-@r7FQ z)gDZ>zl)*VnFJMMB|qXY+-qF7ktvI&lHE z?SBI;(|_07;T>boTr_F1gMhjBS&RT<>N7;Dbzj%u9CI4UvYsMUvY#*PEY zwa1fu7zc<0nL-@MDDIhaGANI1tp{kIBUAdjY?pBE7G;hc>ziC7TXy~F!dh2uZ=vo=C!W5kk<`mi_U_hReZLR-zj z9*nJcXEt-oM9Qr-*V?a2{*8FbS;_m2_(EFG?moYtBa?kxtQm>lgFdSn z1;Qh0#(?mMnz11Ax@H{y5O_rOcz6W#EIxCHK3E3rhR_Yn+MDV4LHMTnK9Fy!XW$Rv zo2r=>YiHa=jz2`Yqt5CUU(r~2=WzSMztw!AkiB!buQR+8C{xvwoGGsK7J^FeS!jdw zDwkO0I>VcFS7vNFXLwhw!|%a+m1X!ncyId#t6b-I8^4G8n{+3{TFq+b?eF8);GJzB zix+!}wvn&S?|#c*T*;f@`TQC>f1^3VxAEz1VZTQFjIdur`;Z;L*OBYQeBk5Abz(m7 zapXENANV+OotO`N9Jx-+2R@EmC*}hmN3Ij|fsZ5CiTS|Ck?X{K;N!@3Vm|P3oVE&j;{tjN>ehOvQWP-w-c9fImZLdJiVHF`3v3`7%1k<2$f@8J%=(`*k|%+V<;o z&^2Q~dUcGhZC^%;u5DjNvaW4kMtfaDUwS`D*S0SsQP;LFBSF`;FC$*p{lf3Nb#41H z;<#RpFC(3Dy5e0eaT8tQqk`GQO>Bpb_p%S(3FO`h` z#Ig|EVq#hFftVN;d>|%<1s{lsVac{*SiWn=u;2qRF)Y{EF)a8%ObiP?5EH|K4~eiuJg(MC)C(H`TZCgJ{h_SoBR9G_u@RTSe?E#1lwSrP{3s5DZ+?^n!Z$y<1WX1ygB`(3!Bnt2 z*bTe_^nq7_J-|#b1H2mS1AYhW2Mz)UfY*XU!0W+b;W&?75pftP%6B}dd?$j+_ij-67J$lkDyV#?gUa_lQ2EXPmG4Z8b;H)_LghJ^Ii2#H4=T?m zLFKs+RG!a*%5w>*JePsW^94|Ot^k$iDo}Z@0hQ-EPVj-k+)aHth@H*& zL9nwKKL~a<;|IacX8a&c;6UCx3JwKNg4cnkT;&x}$)zEs;loeS6U2^Yd?MJMB1C<7Lw2;ym0xFGRZqm4n#Fth-VaA7of2AVo($b3eSzA4wd#2o=DK}`! zt#`VWR@iZRkaydO-Dt#a z^}o%RqBA)DM|tuvt6X!ENARWKzX-?ijR50lzmZ@PI10Q390MA<2MyhWqR;W5p?lEK zJ!t42G;|Ldx(5y2gNE)w_0!C78J62VmFJ}|Bd4_=m3%7yY`(2I{gv!xF1y`7lYei< z)~CD+(Lt8t^Y};e^3C{I&e(hBD!--4*eFf^!jDb=f~J4LZ^_q<&ts;md?$Vu<+a=R zSyDclr^bj3;0Hg)x4avP3#D=#Kevu zON{RXSz>%A$P(i_L6#Wb39_ULS#st(>;a7(XEr{U*`zU*@>C&HrXo|SkSWG*auh@k zRGoB({U)c}%jT0NvVVHODAJ|We^`xASdTHa$tXEybe&peE$BFA8 ze&bWccl@1VL$VbcqVqc~_TV5suC60bizpby*~rg`|x}DQzq)2C(&Q^&O%V{JPYcbC3+{WxNc2&&A#;OfS#9q-M8Y?{rBNr z`tD=CJC#&iHyz%65<7Z#?wzvAhhvX%&b2sStc>GtNw@b9bNnp{*rNg#X<6it4O?@k z05D6= zVba*h_JT^I0aO|XL8Z|IDvhI{(l`k!jZ+|fyB9zEzcJSq zE%qa}BS2h&`rh%iTn~N9*K#wqCB=69J$)^`NIRdgC3}B9b&#(ipE}6bkWU@tYsjY# zvi0Xv2l-m^se|!5P>0%XVLyrd*y16z?YP$igdcoUUrP>q{z;E>Qzl~%#kO^?i4zI) z%2IRA$A#lWvWXLsk0mlrq|aC4L@qV{6>Rk3b30Tgl{XcHAE{2z<>|h59s25CAMT~X zvxbD@D0Du@*?cRCGjaSYHQ1@^um$4hG5(7A@C3z=oX@wiR5XuGy2Up_8UI;67VYtJ zkTIC0Gd|AZW9fj8WeoG9zs1L*c#w}Jm`-f7ODkG5uf-N&Lc zKzdSE$H(#m6DJZr1Ehp;v_B$FB%3&qDd-c9k3~K^zkLQsALQhI+9ZxKIT}BN6DN}E zN(>qwl6-sg^HTbG8Z?ki|H+S{I1%|_6eCg%P3e9K`rc^Whqe?OB0otT*Gl&m-ji=d z`)et`59C+j-L`%er9*lxew82KSIM?xMjEhPJAM_NIo|gC4}V0=h&s%O9MOA$YFd?|UnTedq@0U*kAHvPp`}gCQSbz`VNqh&5Q>`@fFYddfU7u2b+e%6x)6 z2gh{@mQm(Z{6RzFQiJ7`eJIyMEq1%e3ZBB-va(W{w&A8W_zLnub)XGt{$4->kYi#i?T`n#(?Vk+A)^@-El6zzS+b(rcMc-EaiRE;WGoo z!<5gy&(92~v#2BdY;%J3y6*iS@@|L;G|z<>H&Um$G1lO5ysy4qfuDOG{_c_RVU6cl z<~W%wtLG%{z)J^eL*_nJt5_tn=0{K?-7_-g`|zjlPn@A$j@uB4!spUM&$ z&o@o;I{0WF{U=}cDo{S}>LHdNJ^i%qHBol?w^i;bcI@6f>hI@0FTW`2F`xgsHvVs4 z>zPcd^H%M52+ag3|43*>^y{ULH$!vs2S)n4CqbL?!^V-Oqw_9y} z-Z#TbM#58Wg16iVkNF<^9gZl5=a~Ht&v?Xp+}!u@Jc{?)@qRSChx3-idpuTiFp}@U zD_ZytJY>GjcN{*WwJnM88pC7YTbftqQ%~(j5Ko=NU(&2FZ@C)Yl4gmwJPvP(YvC=4 z9q=W>S8f$wfq(R3EzN#t{u*##TtcvaTyk(7?RGS_WAIh@$u8199@{zi8f6i0IS~tQ zfuD%CoQzF1{N(7Gg^TWe<+qDof~WL{r=(H0Mr>tUnyjMZ^wBZ;_e#q5Ec|2<{A4Ed zaR)T<>uYoTyP=a$=LfR1@B9_q@A2C}*5D*~O4AU3#oVF(4+58$RTN&nVPoj>4V$g* z8$LWa%)cqreZ!kp+J=wHuJ>=_d_>kpls^LRg!!-tsfMOipeaRG9v2yzm@&=surS^d9=L=->KF%pmbU zl~LvE4|2wWzXBQ*&CMmBK22ZcJ%f2i>uA_ld#u{}Q}XEnnsqbK(;j$G&$z(r{cS#! zV)#&8$va~$|E}>azrNkWd+^ipin;Er&G2-UW#^$*Ui5RW#fvs|bY-a=KcyTE9{5kR zmssI2@6npATzF3gV|OI6!uae>%=yjGXd3JFE9sB8Q>}bQyr4Z}B?(^BZ=lU<;C13P z+QX^ZtglHV);z2iM)I2lHoxiJhTrs6I>R{kj&#IxKBt_@$FliN9`$s1jQGx0xGpYQ$d@Q(Oujr(71_oqp&h_{5Fy zi|@fVM!-LAfUgWMhM$D_%0=olQzCiGIdmHF7WA4YC)#@~-@(47ZFL&f_C5J2dXJ;q zSm)4f4#C6Uh|q1`XwhvFTKG>jy3Jv9o0s4*z2ahmf1thYg|A!_*CDtL9)HBT?P4<7XPE5BQ`?-k}Blu5cx ziC zJHQZo?QIk71ze}HH zV=qFti9d&Kqc+M8R`^c$^TSARQe`Oc-^=>48WhC0nR)n}?%cfS^1awIk-_!7KiAJ_Y0;VbYG@s(q- z9fE&=m+1O9*V27{VCz2jiFcs;JOIu>_jwTAM`LQhD+?EWvE~`{nP(U6gpWv{2}+-V zRvdk%C*$Sup9K8R{u}g)teprQ{1V+}Kl1s{a{^hXY<=dc{~O4<9(t~c8s>kl+l#ks z>h}9v_I7*emTlha{O@t*GkU{@Hw)7?Yzd`pc)tLd?zLWji*f(KRJUI`&Brr5{)^FT z4ngK+Hhm?Lwv?VC-l4tVBsZ@{aTCEvN4J6S|BGoyLuB<3Y!XGV`F`(6D2B z(fmj2EFXt{i=f-~wvFW={9609Coun+#+vLJ-f7~V>-RUF=A4HjoeR+)8o!D8P#kr5 zo4(duXp8mKqPLMPnhWXLv9T;jvc`%JhzH!yz7oHPOA0=~`8R)ne|+KX5Ge~Jz?f5!bc(GNBz!zb7y>O-G|s`cJZCcU3ENcC#fWsDUq^FM4xcV^EBmINO_zxyh|D0A-#L! z(8+1{`lB)C-8Z9V4|{?1mfCX(m1imI_GJT6xvrsI+_X;zjUo2VsXQxi3HF8O7o)TFdX~0h)7!&z_DdDYSI~@o3F4yu2&D z06n02>m2K(z9sJANekU1G{-unHT@Mf4F@lF{sAp0-E-MC#A6&fR^IWHwGvuZUx=oq z7f5zk>X-Zx(CQ7)>~Lr|hk3}GD|D8_xoElaEVL~9X(TO+hPAKQ&!A(`uIw0Npj*+e z+Wsy2J50NeE_#c057X|WEwo$8{#CMz$|ic0b;`%QiNWLS-KIH3408&{4%#Kc4w@Qa z2kmOxL4W$nql=!xW+D3B<;ZzxcS{Ic2lOi$DEgHhF%|mVXQg7ZOofJ1*}pio#kSzs zEfeuSNd}94Pof8mhHjbzfs(8vvb;{#j%BOED65LsDCs^2)8Ta%>0D9ItTcGEq zN|U;YP9x~_o#Z0cyeUPt;$`=`*dhj&F^ zFE2yHla&9BinZJ=>=A=!HxvP2qQE$iKdichA zu8(;;1z$z(8e62dbMQ83|0Hs|8M*x>a{B~wTeA3G=y)M?ydF9H9&{~PJmp);;%ZwK z&xe+CZCU&~pFzWtgCBT_iFfP+_#i&YKqg}!aOAOk5Dy|Z ze+k_y4Sl-fbHdD4p*{pKzqMYwhPQAa0v1ZC^&*R^Ypy7MD zmQ8Snm-;7!{S*H|ezPNVX~o{PmCy0#keA6b+v*&hA|F zd_i9W{)Ax1uj|?8lg*#wWB45T{5f;@wlcZ~-YpqjV?Dm8*7h}QaXrx@qqDH{Ir8`q z(6r{?9U^Re9kJ`Jho(i}o$y`$2%3Hmn%4evAD~C>vT1rB^Kgf*U&Y5Dnl^KBd^;i4fwyjJIAm8@(2Ffjhvnr z$hw>H|2XvYNaviaTCn2I9RJ4$hxk9-JJkP48y`Ro^Jm8gP#Z`)DSEF!2B*;1PFhOm zeQ3=|<3fG})kSjrea1>9W7Co0#azpuaL}HMA7ss%c$4PilF#xFIC)Br*T{Z|tabR( z=lEhAnI2+3?#T3~NLSY_GTm+ihaaf?lIxC4{{mh8-)vponPW%hBf0*T?X&#M)1l<2 zT#F}cLZ&-5J)Hq}1lvph3-E=!Hhdu-U%_PbMxEE@oX2K-18w{QiUsGLo$?K4(MC&c z`ftx%T-VUySos%fk?Z1BpJNwjN1tg=QQcFT@)yYGu%EKR0|rU1Z=+Az@`78xBF`_v zR!2ULJda!(66qgs;OKUo5C@pdGG@5g;_vbHrzmbDrdD$Wi7*8 zFt2&m+l|jh7f)dyk!dbZ<)N3`73(}#FZUZ>jw&u-Z;XPO-o*`9TFnQD868R6H?W3c zg>xTY!&c{-^%DiebvEP?r%hT7tdThE=|R|J;rBbcqACk!c3v#oZ38@JF|qREk#o=m zkH2wq@!=e6=dR(_&i!?{#h>Ccz;|?{lzDqY9yVaUkxyp-^Kr$;--s=q0zG#pKCl6w zz%5D-A3=_gwG%f8u>+4064Q9Auveh@MqzLEg%n=HK9Rz{fo9ox8V0j}r0~1!2MPYr zn-FZs4m97Udp86)TMfJ^(0sel6IIzTfjKn%XaoAm5b7`z9eD*~WuymWT~YyKX9eS_ z0UBMwm|7u(K3C{IYp>?&KkMQ~>OXVo0{!>#zS6<=zg3vQ`YYY4yEIYl-Y2TPbVC_pCDP97Vg_2|=B2Fa;aZd(hPh?6D`|1JAO4`#9}+GB$bef6#uq z-ru>i>ElM$X~xmFb3KLMul$L3u@|fY=eqF;f;ISk4`0$bIF~WEga1d8ItO>d`#+^$ zG|s$?v#r1OF5bhrk6WnQOVqIgWlbZW2DPJQw;%nKq5t%gS4clKs68Y5N&o4m0=u8o zwrVq{4G)vP+FAQPsO?{b7iwN|mlYk{_oP)c!DTg0gddG{MK$)OotCmc?m^;PS9smQ z4i9FQ6>@!)`S&r}WiDlCm@%+yJ?qp*x;h2D@H5G^w654QXusXG-yEOjs<9bU+>iB2Xk+P|dGg#j;Bs#pdWj}RNUGtc8tFD#Q@1Q$Ac&JVN zKBs>7P}Vi1kwX2XJ2t3qExTUSPxVnPI|l{0Zh&crR=L;WLVjdXiQ-t5T^%bU9b$eSNa-gGwdMsw2!cu|YIxiYSJ zPj*<|C>~w%<|mQ?$QvVfkT*u|{3GNIe6~g2{5ULcioKqHs=P6B26x^@@rwyXQ@cyF=J6Ti&oSEffw<(sk3(;|-qyL3;W1NKDOk}){;WX$Y>JRS8hesITWwxs+Vxw=AFZ4Grt*iq zq{sfimOry>`BU)gPKzskSzy{zvdHyd<_0~p;NM>$kB%ab3MgyCi~(f}jXdgTc!%a% zjy%%ZC&{DZwEb(yqiwW_cyv6zQ%4@D{ib%=2?h(9YJ30mb{`by}wK^OE>j$aVBZ2ypr!o@=A4j#+Fx-vu)*-%3n$O zS4GGx)j{&gse{UXHhHCT_qFBK8CO_dHT){FKjJ92e?xm@m(Ig(f_FEdhirYYUm5$C zA-j@IUmSL|$}Z^&7bUwkd03Yji$6o>2*=~!NWd4F$hyp=;s(aK=1E!?zK6NZO8gm` zCuz=k;Jciu%sfc*q